From dae2b80b4fa5d3a48867e5e19fc39ed13f09c1d3 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Mon, 13 Oct 2025 13:54:45 +1100 Subject: [PATCH 01/20] tidy: removing unused code paths 1 --- invokeai/app/api/routers/app_info.py | 3 - invokeai/app/api/routers/board_videos.py | 39 - invokeai/app/api/routers/videos.py | 119 - invokeai/app/api/routers/workflows.py | 2 - invokeai/app/api_app.py | 4 - invokeai/app/invocations/fields.py | 25 - invokeai/app/invocations/primitives.py | 25 - invokeai/app/services/boards/boards_common.py | 5 +- .../app/services/boards/boards_default.py | 12 +- invokeai/app/services/videos_common.py | 179 - .../workflow_records/workflow_records_base.py | 3 - .../workflow_records_common.py | 2 - .../workflow_records_sqlite.py | 3 - .../backend/model_manager/configs/base.py | 15 +- .../backend/model_manager/configs/factory.py | 16 - .../backend/model_manager/configs/main.py | 46 - invokeai/backend/model_manager/taxonomy.py | 17 - invokeai/frontend/web/.storybook/preview.tsx | 2 - .../frontend/web/src/app/components/App.tsx | 17 +- .../components/AppErrorBoundaryFallback.tsx | 28 +- .../src/app/components/GlobalHookIsolator.tsx | 84 +- .../app/components/GlobalModalIsolator.tsx | 6 - .../web/src/app/components/InvokeAIUI.tsx | 296 +- .../frontend/web/src/app/components/types.ts | 43 - .../web/src/app/hooks/useStudioInitAction.ts | 262 - .../frontend/web/src/app/logging/logger.ts | 9 + .../store/enhancers/reduxRemember/driver.ts | 29 +- .../store/enhancers/reduxRemember/errors.ts | 7 +- .../listeners/boardIdSelected.ts | 69 +- .../listeners/getOpenAPISchema.ts | 5 +- .../listeners/imageUploaded.ts | 43 +- .../listeners/modelSelected.ts | 27 +- .../listeners/modelsLoaded.ts | 33 - .../listeners/socketConnected.ts | 13 +- .../store/nanostores/accountSettingsLink.ts | 3 - .../app/store/nanostores/accountTypeText.ts | 3 - .../web/src/app/store/nanostores/authToken.ts | 11 - .../web/src/app/store/nanostores/baseUrl.ts | 6 - .../store/nanostores/customNavComponent.ts | 4 - .../src/app/store/nanostores/customStarUI.ts | 14 - .../src/app/store/nanostores/isDebugging.ts | 3 - .../web/src/app/store/nanostores/logo.ts | 4 - .../nanostores/onClickGoToModelManager.ts | 3 - .../app/store/nanostores/openAPISchemaUrl.ts | 3 - .../web/src/app/store/nanostores/projectId.ts | 9 - .../web/src/app/store/nanostores/queueId.ts | 5 - .../web/src/app/store/nanostores/toastMap.ts | 4 - .../store/nanostores/videoUpsellComponent.ts | 4 - .../web/src/app/store/nanostores/whatsNew.ts | 4 - invokeai/frontend/web/src/app/store/store.ts | 10 - .../frontend/web/src/app/types/invokeai.ts | 12 +- .../src/common/hooks/useClientSideUpload.ts | 121 - .../common/hooks/useCopyImageToClipboard.ts | 6 +- .../web/src/common/hooks/useDownloadImage.ts | 18 +- .../web/src/common/hooks/useGlobalHotkeys.ts | 26 +- .../src/common/hooks/useImageUploadButton.tsx | 40 +- .../src/common/util/convertImageUrlToBlob.ts | 4 +- .../components/ChangeBoardModal.tsx | 24 +- .../CanvasAlertsInvocationProgress.tsx | 8 +- .../components/Filters/FilterTypeSelect.tsx | 14 +- .../components/ParamDenoisingStrength.tsx | 32 +- .../components/common/Weight.tsx | 36 +- .../controlLayers/hooks/addLayerHooks.ts | 17 +- .../hooks/useIsEntityTypeEnabled.ts | 26 +- .../konva/CanvasBackgroundModule.ts | 3 +- .../CanvasEntityObjectRenderer.ts | 3 +- .../konva/CanvasObject/CanvasObjectImage.ts | 4 +- .../konva/CanvasTool/CanvasBboxToolModule.ts | 12 +- .../src/features/controlLayers/konva/util.ts | 16 +- .../controlLayers/store/canvasSlice.ts | 48 - .../controlLayers/store/paramsSlice.ts | 67 - .../controlLayers/store/refImagesSlice.ts | 43 +- .../src/features/controlLayers/store/types.ts | 74 - .../web/src/features/cropper/lib/editor.ts | 3 +- .../hooks/use-delete-video.ts | 28 - .../components/DeleteVideoButton.tsx | 36 - .../components/DeleteVideoModal.tsx | 43 - .../features/deleteVideoModal/store/state.ts | 111 - .../dnd/DndDragPreviewMultipleVideo.tsx | 63 - .../dnd/DndDragPreviewSingleVideo.tsx | 69 - .../web/src/features/dnd/DndImage.tsx | 4 - .../src/features/dnd/FullscreenDropzone.tsx | 32 +- invokeai/frontend/web/src/features/dnd/dnd.ts | 135 +- .../web/src/features/dnd/useDndMonitor.ts | 15 +- .../ParamDynamicPromptsMaxPrompts.tsx | 24 +- .../hooks/useDynamicPromptsWatcher.tsx | 9 +- .../components/Boards/BoardContextMenu.tsx | 12 +- .../Boards/BoardsList/AddBoardButton.tsx | 29 +- .../Boards/BoardsList/BoardTooltip.tsx | 4 - .../Boards/BoardsList/BoardsList.tsx | 66 +- .../Boards/BoardsList/BoardsListWrapper.tsx | 6 +- .../Boards/BoardsList/GalleryBoard.tsx | 6 +- .../Boards/BoardsList/NoBoardBoard.tsx | 24 +- .../Boards/NoBoardBoardContextMenu.tsx | 11 +- .../components/BoardsListPanelContent.tsx | 4 - .../MenuItems/ContextMenuItemChangeBoard.tsx | 14 +- .../MenuItems/ContextMenuItemCopy.tsx | 13 +- .../MenuItems/ContextMenuItemDeleteImage.tsx | 11 +- .../MenuItems/ContextMenuItemDeleteVideo.tsx | 35 - .../MenuItems/ContextMenuItemDownload.tsx | 13 +- .../MenuItems/ContextMenuItemLoadWorkflow.tsx | 20 +- .../ContextMenuItemLocateInGalery.tsx | 31 +- ...etadataRecallActionsCanvasGenerateTabs.tsx | 18 +- ...enuItemMetadataRecallActionsUpscaleTab.tsx | 10 +- ...ntextMenuItemNewCanvasFromImageSubMenu.tsx | 4 +- ...ontextMenuItemNewLayerFromImageSubMenu.tsx | 10 +- .../MenuItems/ContextMenuItemOpenInNewTab.tsx | 17 +- .../MenuItems/ContextMenuItemOpenInViewer.tsx | 17 +- .../ContextMenuItemSelectForCompare.tsx | 20 +- .../ContextMenuItemSendToUpscale.tsx | 4 +- .../MenuItems/ContextMenuItemSendToVideo.tsx | 28 - .../MenuItems/ContextMenuItemStarUnstar.tsx | 37 +- .../ContextMenuItemUseAsPromptTemplate.tsx | 4 +- .../ContextMenuItemUseAsRefImage.tsx | 4 +- .../ContextMenuItemUseForPromptGeneration.tsx | 46 - .../MultipleSelectionMenuItems.tsx | 22 +- .../MultipleSelectionVideoMenuItems.tsx | 58 - .../ContextMenu/SingleSelectionMenuItems.tsx | 12 +- .../SingleSelectionVideoMenuItems.tsx | 33 - .../ContextMenu/VideoContextMenu.tsx | 279 - .../features/gallery/components/Gallery.tsx | 19 +- .../gallery/components/GalleryHeader.tsx | 21 - .../components/ImageGrid/GalleryImage.tsx | 2 +- .../ImageGrid/GalleryItemDeleteIconButton.tsx | 20 +- .../ImageGrid/GalleryItemHoverIcons.tsx | 14 +- .../GalleryItemOpenInViewerIconButton.tsx | 17 +- .../ImageGrid/GalleryItemSizeBadge.tsx | 8 +- .../ImageGrid/GalleryItemStarIconButton.tsx | 45 +- .../components/ImageGrid/GalleryVideo.tsx | 218 - .../ImageGrid/GalleryVideoPlaceholder.tsx | 11 - .../ImageMetadataActions.tsx | 23 - .../VideoMetadataViewer.tsx | 80 - .../ImageViewer/CurrentImageButtons.tsx | 5 +- .../ImageViewer/CurrentVideoButtons.tsx | 116 - .../ImageViewer/CurrentVideoPreview.tsx | 85 - .../ImageViewer/ImageComparisonHover.tsx | 6 - .../ImageViewer/ImageComparisonSideBySide.tsx | 5 - .../ImageViewer/ImageComparisonSlider.tsx | 5 - .../ImageViewer/ImageViewerPanel.tsx | 4 +- .../ImageViewer/NoContentForViewer.tsx | 47 +- .../components/ImageViewer/VideoViewer.tsx | 28 - .../ImageViewer/VideoViewerToolbar.tsx | 22 - .../components/NextPrevItemButtons.tsx | 24 +- .../gallery/components/VideoGallery.tsx | 390 - .../components/use-gallery-video-ids.ts | 21 - .../gallery/contexts/ImageDTOContext.ts | 13 + .../gallery/contexts/ItemDTOContext.ts | 20 - .../hooks/useRangeBasedVideoFetching.ts | 78 - .../web/src/features/gallery/store/actions.ts | 16 - .../gallery/store/gallerySelectors.ts | 13 +- .../web/src/features/imageActions/actions.ts | 13 - .../features/lora/components/LoRASelect.tsx | 6 +- .../web/src/features/metadata/parsing.tsx | 116 +- .../hooks/useMainModelDefaultSettings.ts | 60 +- .../hooks/useStarterModelsToast.tsx | 6 +- .../web/src/features/modelManagerV2/models.ts | 81 +- .../HuggingFaceFolder/HFToken.tsx | 5 +- .../HuggingFaceFolder/HuggingFaceForm.tsx | 4 +- .../ModelManagerPanel/ModelFormatBadge.tsx | 2 - .../DefaultCfgRescaleMultiplier.tsx | 26 +- .../DefaultCfgScale.tsx | 26 +- .../DefaultGuidance.tsx | 30 +- .../DefaultHeight.tsx | 25 +- .../MainModelDefaultSettings/DefaultSteps.tsx | 26 +- .../MainModelDefaultSettings/DefaultWidth.tsx | 25 +- .../flow/AddNodeCmdk/AddNodeCmdk.tsx | 6 +- .../features/nodes/components/flow/Flow.tsx | 53 +- .../nodes/Invocation/InvocationNodeFooter.tsx | 4 +- .../Invocation/fields/InputFieldHandle.tsx | 19 +- .../Invocation/fields/OutputFieldHandle.tsx | 20 +- .../flow/nodes/common/NodeWrapper.tsx | 3 - .../nodes/common/NonInvocationNodeWrapper.tsx | 3 - .../components/flow/nodes/common/shared.ts | 6 - .../flow/panels/TopPanel/TopLeftPanel.tsx | 56 +- .../flow/panels/TopPanel/TopRightPanel.tsx | 6 - .../ActiveWorkflowNameAndActions.tsx | 4 +- .../sidePanel/WorkflowsTabLeftPanel.tsx | 16 +- .../IsolatedWorkflowBuilderWatcher.tsx | 3 +- .../workflow/PublishWorkflowPanelContent.tsx | 475 - .../WorkflowLibrary/ShareWorkflowModal.tsx | 93 - .../ShareWorkflow.tsx | 35 - .../WorkflowLibrarySideNav.tsx | 5 - .../workflow/WorkflowLibrary/WorkflowList.tsx | 1 - .../WorkflowLibrary/WorkflowListItem.tsx | 24 +- .../sidePanel/workflow/WorkflowPanel.tsx | 5 - .../components/sidePanel/workflow/publish.ts | 157 - .../nodes/hooks/useIsWorkflowEditorLocked.ts | 15 - .../src/features/nodes/hooks/useWithFooter.ts | 9 +- .../src/features/nodes/store/nodesSlice.ts | 2 +- .../web/src/features/nodes/types/common.ts | 35 +- .../web/src/features/nodes/types/workflow.ts | 1 - .../util/graph/buildLinearBatchConfig.ts | 8 +- .../nodes/util/graph/generation/Graph.test.ts | 1 - .../graph/generation/buildChatGPT4oGraph.ts | 143 - .../graph/generation/buildFluxKontextGraph.ts | 124 - .../graph/generation/buildGemini2_5Graph.ts | 81 - .../graph/generation/buildImagen3Graph.ts | 76 - .../graph/generation/buildImagen4Graph.ts | 75 - .../graph/generation/buildRunwayVideoGraph.ts | 87 - .../graph/generation/buildVeo3VideoGraph.ts | 89 - .../Advanced/ParamCFGRescaleMultiplier.tsx | 32 +- .../components/Advanced/ParamClipSkip.tsx | 26 +- .../components/Bbox/BboxAspectRatioSelect.tsx | 27 +- .../parameters/components/Bbox/BboxHeight.tsx | 29 +- .../components/Bbox/BboxScaledHeight.tsx | 28 +- .../components/Bbox/BboxScaledWidth.tsx | 28 +- .../components/Bbox/BboxSettings.tsx | 30 +- .../parameters/components/Bbox/BboxWidth.tsx | 29 +- .../Bbox/use-is-bbox-size-locked.ts | 5 +- .../ParamCanvasCoherenceEdgeSize.tsx | 32 +- .../MaskAdjustment/ParamMaskBlur.tsx | 32 +- .../ParamInfillPatchmatchDownscaleSize.tsx | 32 +- .../InfillAndScaling/ParamInfillTilesize.tsx | 32 +- .../components/Core/ParamCFGScale.tsx | 42 +- .../components/Core/ParamGuidance.tsx | 50 +- .../components/Core/ParamPositivePrompt.tsx | 31 +- .../parameters/components/Core/ParamSteps.tsx | 42 +- .../components/Dimensions/Dimensions.tsx | 30 +- .../DimensionsAspectRatioSelect.tsx | 29 +- .../Dimensions/DimensionsHeight.tsx | 34 +- .../DimensionsLockAspectRatioButton.tsx | 8 +- .../DimensionsSetOptimalSizeButton.tsx | 9 +- .../components/Dimensions/DimensionsWidth.tsx | 36 +- .../MainModel/DisabledModelWarning.tsx | 39 - .../NavigateToModelManagerButton.tsx | 13 +- .../parameters/components/ModelPicker.tsx | 59 +- .../PixelDimensionsUnsupportedAlert.tsx | 14 - .../parameters/components/Prompts/Prompts.tsx | 6 +- .../Upscale/ParamUpscaleCFGScale.tsx | 42 +- .../components/Video/ParamDuration.tsx | 56 - .../components/Video/ParamResolution.tsx | 50 - .../components/Video/VideoDimensions.tsx | 22 - .../VideoDimensionsAspectRatioSelect.tsx | 54 - .../Video/VideoDimensionsPreview.tsx | 88 - .../hooks/useCurrentVideoDimensions.ts | 54 - .../parameters/hooks/useIsModelDisabled.ts | 16 - .../hooks/useIsTooLargeToUpscale.ts | 29 - .../features/parameters/store/videoSlice.ts | 168 - .../features/parameters/types/constants.ts | 20 - .../parameters/types/parameterSchemas.ts | 9 + .../parameters/util/optimalDimension.ts | 8 - .../PromptExpansion/PromptExpansionMenu.tsx | 80 - .../PromptExpansionOverlay.tsx | 68 - .../PromptExpansionResultOverlay.tsx | 76 - .../features/prompt/PromptExpansion/expand.ts | 42 - .../features/prompt/PromptExpansion/graph.ts | 43 - .../features/prompt/PromptExpansion/state.ts | 98 - .../components/ClearModelCacheButton.tsx | 6 - .../InvokeButtonTooltip.tsx | 15 +- .../components/QueueActionsMenuButton.tsx | 60 +- .../components/QueueIterationsNumberInput.tsx | 8 +- .../QueueList/QueueItemComponent.tsx | 15 +- .../components/QueueList/QueueItemDetail.tsx | 6 +- .../components/QueueList/QueueListHeader.tsx | 11 - .../queue/components/QueueTabContent.tsx | 5 +- .../components/QueueTabQueueControls.tsx | 18 +- .../features/queue/hooks/useEnqueueCanvas.ts | 15 - .../queue/hooks/useEnqueueGenerate.ts | 15 - .../features/queue/hooks/useEnqueueVideo.ts | 127 - .../queue/hooks/useEnqueueWorkflows.ts | 68 +- .../web/src/features/queue/hooks/useInvoke.ts | 18 +- .../web/src/features/queue/store/readiness.ts | 161 +- .../SDXLRefiner/ParamSDXLRefinerCFGScale.tsx | 42 +- .../SDXLRefiner/ParamSDXLRefinerSteps.tsx | 43 +- .../GenerationSettingsAccordion.tsx | 47 +- .../UpscaleTabGenerationSettingsAccordion.tsx | 42 +- .../CanvasTabImageSettingsAccordion.tsx | 100 +- .../GenerateTabImageSettingsAccordion.tsx | 54 +- .../UpscaleWarning.tsx | 33 +- .../VideoSettingsAccordion/CreditEstimate.tsx | 55 - .../StartingFrameImage.tsx | 182 - .../VideoModelPicker.tsx | 40 - .../VideoSettingsAccordion.tsx | 45 - .../StylePresetForm/StylePresetForm.tsx | 6 +- .../components/StylePresetImage.tsx | 6 - .../components/StylePresetMenu.tsx | 5 - .../components/HotkeysModal/useHotkeyData.ts | 30 +- .../components/InvokeAILogoComponent.tsx | 7 - .../SettingsModal/SettingsLanguageSelect.tsx | 4 +- .../components/SettingsModal/SettingsMenu.tsx | 42 +- .../features/system/hooks/useFeatureStatus.ts | 24 - .../features/system/store/configSelectors.ts | 4 - .../src/features/system/store/configSlice.ts | 101 - .../features/toast/ErrorToastDescription.tsx | 62 +- .../src/features/ui/components/AppContent.tsx | 30 +- .../features/ui/components/Notifications.tsx | 17 +- .../ParametersPanelCanvas.tsx | 8 +- .../ParametersPanelGenerate.tsx | 6 +- .../ParametersPanels/ParametersPanelVideo.tsx | 45 - .../features/ui/components/VerticalNavBar.tsx | 40 +- .../src/features/ui/components/WhatsNew.tsx | 25 +- .../ui/layouts/DockviewTabLaunchpad.tsx | 2 - .../layouts/LaunchpadStartingFrameButton.tsx | 50 - .../ui/layouts/VideoLaunchpadPanel.tsx | 48 - .../features/ui/layouts/VideoTabLeftPanel.tsx | 16 - .../ui/layouts/video-tab-auto-layout.tsx | 278 - .../web/src/features/ui/store/uiTypes.ts | 2 +- .../features/video/components/VideoPlayer.tsx | 53 - .../video/components/VideoPlayerControls.tsx | 66 - .../features/video/components/VideoView.tsx | 26 - .../video/context/VideoViewerContext.tsx | 24 - .../video/hooks/useCaptureVideoFrame.ts | 90 - .../components/SaveWorkflowAsDialog.tsx | 1 - .../SaveWorkflowMenuItem.tsx | 4 +- .../hooks/useSaveOrSaveAsWorkflow.ts | 6 +- invokeai/frontend/web/src/index.ts | 76 - .../src/services/api/authToastMiddleware.ts | 81 - .../web/src/services/api/endpoints/appInfo.ts | 7 +- .../web/src/services/api/endpoints/boards.ts | 14 - .../web/src/services/api/endpoints/images.ts | 5 - .../web/src/services/api/endpoints/queue.ts | 3 +- .../web/src/services/api/endpoints/videos.ts | 239 - .../src/services/api/hooks/modelsByType.ts | 21 +- .../api/hooks/useDebouncedImageWorkflow.ts | 6 +- .../api/hooks/useDebouncedMetadata.ts | 20 +- .../api/hooks/useSelectedModelConfig.ts | 9 - .../frontend/web/src/services/api/index.ts | 22 +- .../frontend/web/src/services/api/schema.ts | 8682 +++++++---------- .../frontend/web/src/services/api/types.ts | 44 - .../services/api/util/optimisticUpdates.ts | 59 +- .../services/events/onInvocationComplete.tsx | 176 +- .../services/events/onModelInstallError.tsx | 5 +- .../src/services/events/setEventListeners.tsx | 74 +- .../web/src/services/events/stores.ts | 4 +- .../web/src/services/events/useSocketIO.ts | 31 +- invokeai/frontend/web/vite.config.mts | 59 - invokeai/invocation_api/__init__.py | 4 - 327 files changed, 4628 insertions(+), 16492 deletions(-) delete mode 100644 invokeai/app/api/routers/board_videos.py delete mode 100644 invokeai/app/api/routers/videos.py delete mode 100644 invokeai/app/services/videos_common.py delete mode 100644 invokeai/frontend/web/src/app/components/types.ts delete mode 100644 invokeai/frontend/web/src/app/hooks/useStudioInitAction.ts delete mode 100644 invokeai/frontend/web/src/app/store/nanostores/accountSettingsLink.ts delete mode 100644 invokeai/frontend/web/src/app/store/nanostores/accountTypeText.ts delete mode 100644 invokeai/frontend/web/src/app/store/nanostores/authToken.ts delete mode 100644 invokeai/frontend/web/src/app/store/nanostores/baseUrl.ts delete mode 100644 invokeai/frontend/web/src/app/store/nanostores/customNavComponent.ts delete mode 100644 invokeai/frontend/web/src/app/store/nanostores/customStarUI.ts delete mode 100644 invokeai/frontend/web/src/app/store/nanostores/isDebugging.ts delete mode 100644 invokeai/frontend/web/src/app/store/nanostores/logo.ts delete mode 100644 invokeai/frontend/web/src/app/store/nanostores/onClickGoToModelManager.ts delete mode 100644 invokeai/frontend/web/src/app/store/nanostores/openAPISchemaUrl.ts delete mode 100644 invokeai/frontend/web/src/app/store/nanostores/projectId.ts delete mode 100644 invokeai/frontend/web/src/app/store/nanostores/queueId.ts delete mode 100644 invokeai/frontend/web/src/app/store/nanostores/toastMap.ts delete mode 100644 invokeai/frontend/web/src/app/store/nanostores/videoUpsellComponent.ts delete mode 100644 invokeai/frontend/web/src/app/store/nanostores/whatsNew.ts delete mode 100644 invokeai/frontend/web/src/common/hooks/useClientSideUpload.ts delete mode 100644 invokeai/frontend/web/src/features/deleteImageModal/hooks/use-delete-video.ts delete mode 100644 invokeai/frontend/web/src/features/deleteVideoModal/components/DeleteVideoButton.tsx delete mode 100644 invokeai/frontend/web/src/features/deleteVideoModal/components/DeleteVideoModal.tsx delete mode 100644 invokeai/frontend/web/src/features/deleteVideoModal/store/state.ts delete mode 100644 invokeai/frontend/web/src/features/dnd/DndDragPreviewMultipleVideo.tsx delete mode 100644 invokeai/frontend/web/src/features/dnd/DndDragPreviewSingleVideo.tsx delete mode 100644 invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemDeleteVideo.tsx delete mode 100644 invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemSendToVideo.tsx delete mode 100644 invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemUseForPromptGeneration.tsx delete mode 100644 invokeai/frontend/web/src/features/gallery/components/ContextMenu/MultipleSelectionVideoMenuItems.tsx delete mode 100644 invokeai/frontend/web/src/features/gallery/components/ContextMenu/SingleSelectionVideoMenuItems.tsx delete mode 100644 invokeai/frontend/web/src/features/gallery/components/ContextMenu/VideoContextMenu.tsx delete mode 100644 invokeai/frontend/web/src/features/gallery/components/GalleryHeader.tsx delete mode 100644 invokeai/frontend/web/src/features/gallery/components/ImageGrid/GalleryVideo.tsx delete mode 100644 invokeai/frontend/web/src/features/gallery/components/ImageGrid/GalleryVideoPlaceholder.tsx delete mode 100644 invokeai/frontend/web/src/features/gallery/components/ImageMetadataViewer/VideoMetadataViewer.tsx delete mode 100644 invokeai/frontend/web/src/features/gallery/components/ImageViewer/CurrentVideoButtons.tsx delete mode 100644 invokeai/frontend/web/src/features/gallery/components/ImageViewer/CurrentVideoPreview.tsx delete mode 100644 invokeai/frontend/web/src/features/gallery/components/ImageViewer/VideoViewer.tsx delete mode 100644 invokeai/frontend/web/src/features/gallery/components/ImageViewer/VideoViewerToolbar.tsx delete mode 100644 invokeai/frontend/web/src/features/gallery/components/VideoGallery.tsx delete mode 100644 invokeai/frontend/web/src/features/gallery/components/use-gallery-video-ids.ts create mode 100644 invokeai/frontend/web/src/features/gallery/contexts/ImageDTOContext.ts delete mode 100644 invokeai/frontend/web/src/features/gallery/contexts/ItemDTOContext.ts delete mode 100644 invokeai/frontend/web/src/features/gallery/hooks/useRangeBasedVideoFetching.ts delete mode 100644 invokeai/frontend/web/src/features/gallery/store/actions.ts delete mode 100644 invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/PublishWorkflowPanelContent.tsx delete mode 100644 invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowLibrary/ShareWorkflowModal.tsx delete mode 100644 invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowLibrary/WorkflowLibraryListItemActions/ShareWorkflow.tsx delete mode 100644 invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/publish.ts delete mode 100644 invokeai/frontend/web/src/features/nodes/hooks/useIsWorkflowEditorLocked.ts delete mode 100644 invokeai/frontend/web/src/features/nodes/util/graph/generation/buildChatGPT4oGraph.ts delete mode 100644 invokeai/frontend/web/src/features/nodes/util/graph/generation/buildFluxKontextGraph.ts delete mode 100644 invokeai/frontend/web/src/features/nodes/util/graph/generation/buildGemini2_5Graph.ts delete mode 100644 invokeai/frontend/web/src/features/nodes/util/graph/generation/buildImagen3Graph.ts delete mode 100644 invokeai/frontend/web/src/features/nodes/util/graph/generation/buildImagen4Graph.ts delete mode 100644 invokeai/frontend/web/src/features/nodes/util/graph/generation/buildRunwayVideoGraph.ts delete mode 100644 invokeai/frontend/web/src/features/nodes/util/graph/generation/buildVeo3VideoGraph.ts delete mode 100644 invokeai/frontend/web/src/features/parameters/components/MainModel/DisabledModelWarning.tsx delete mode 100644 invokeai/frontend/web/src/features/parameters/components/PixelDimensionsUnsupportedAlert.tsx delete mode 100644 invokeai/frontend/web/src/features/parameters/components/Video/ParamDuration.tsx delete mode 100644 invokeai/frontend/web/src/features/parameters/components/Video/ParamResolution.tsx delete mode 100644 invokeai/frontend/web/src/features/parameters/components/Video/VideoDimensions.tsx delete mode 100644 invokeai/frontend/web/src/features/parameters/components/Video/VideoDimensionsAspectRatioSelect.tsx delete mode 100644 invokeai/frontend/web/src/features/parameters/components/Video/VideoDimensionsPreview.tsx delete mode 100644 invokeai/frontend/web/src/features/parameters/hooks/useCurrentVideoDimensions.ts delete mode 100644 invokeai/frontend/web/src/features/parameters/hooks/useIsModelDisabled.ts delete mode 100644 invokeai/frontend/web/src/features/parameters/hooks/useIsTooLargeToUpscale.ts delete mode 100644 invokeai/frontend/web/src/features/parameters/store/videoSlice.ts delete mode 100644 invokeai/frontend/web/src/features/prompt/PromptExpansion/PromptExpansionMenu.tsx delete mode 100644 invokeai/frontend/web/src/features/prompt/PromptExpansion/PromptExpansionOverlay.tsx delete mode 100644 invokeai/frontend/web/src/features/prompt/PromptExpansion/PromptExpansionResultOverlay.tsx delete mode 100644 invokeai/frontend/web/src/features/prompt/PromptExpansion/expand.ts delete mode 100644 invokeai/frontend/web/src/features/prompt/PromptExpansion/graph.ts delete mode 100644 invokeai/frontend/web/src/features/prompt/PromptExpansion/state.ts delete mode 100644 invokeai/frontend/web/src/features/queue/hooks/useEnqueueVideo.ts delete mode 100644 invokeai/frontend/web/src/features/settingsAccordions/components/VideoSettingsAccordion/CreditEstimate.tsx delete mode 100644 invokeai/frontend/web/src/features/settingsAccordions/components/VideoSettingsAccordion/StartingFrameImage.tsx delete mode 100644 invokeai/frontend/web/src/features/settingsAccordions/components/VideoSettingsAccordion/VideoModelPicker.tsx delete mode 100644 invokeai/frontend/web/src/features/settingsAccordions/components/VideoSettingsAccordion/VideoSettingsAccordion.tsx delete mode 100644 invokeai/frontend/web/src/features/system/hooks/useFeatureStatus.ts delete mode 100644 invokeai/frontend/web/src/features/system/store/configSelectors.ts delete mode 100644 invokeai/frontend/web/src/features/system/store/configSlice.ts delete mode 100644 invokeai/frontend/web/src/features/ui/components/ParametersPanels/ParametersPanelVideo.tsx delete mode 100644 invokeai/frontend/web/src/features/ui/layouts/LaunchpadStartingFrameButton.tsx delete mode 100644 invokeai/frontend/web/src/features/ui/layouts/VideoLaunchpadPanel.tsx delete mode 100644 invokeai/frontend/web/src/features/ui/layouts/VideoTabLeftPanel.tsx delete mode 100644 invokeai/frontend/web/src/features/ui/layouts/video-tab-auto-layout.tsx delete mode 100644 invokeai/frontend/web/src/features/video/components/VideoPlayer.tsx delete mode 100644 invokeai/frontend/web/src/features/video/components/VideoPlayerControls.tsx delete mode 100644 invokeai/frontend/web/src/features/video/components/VideoView.tsx delete mode 100644 invokeai/frontend/web/src/features/video/context/VideoViewerContext.tsx delete mode 100644 invokeai/frontend/web/src/features/video/hooks/useCaptureVideoFrame.ts delete mode 100644 invokeai/frontend/web/src/index.ts delete mode 100644 invokeai/frontend/web/src/services/api/authToastMiddleware.ts delete mode 100644 invokeai/frontend/web/src/services/api/endpoints/videos.ts diff --git a/invokeai/app/api/routers/app_info.py b/invokeai/app/api/routers/app_info.py index 5d66c2559ec..c71739fbd2a 100644 --- a/invokeai/app/api/routers/app_info.py +++ b/invokeai/app/api/routers/app_info.py @@ -2,7 +2,6 @@ from enum import Enum from importlib.metadata import distributions from pathlib import Path -from typing import Optional import torch from fastapi import Body @@ -40,8 +39,6 @@ class AppVersion(BaseModel): version: str = Field(description="App version") - highlights: Optional[list[str]] = Field(default=None, description="Highlights of release") - class AppConfig(BaseModel): """App Config Response""" diff --git a/invokeai/app/api/routers/board_videos.py b/invokeai/app/api/routers/board_videos.py deleted file mode 100644 index 1db8f2784be..00000000000 --- a/invokeai/app/api/routers/board_videos.py +++ /dev/null @@ -1,39 +0,0 @@ -from fastapi import Body, HTTPException -from fastapi.routing import APIRouter - -from invokeai.app.services.videos_common import AddVideosToBoardResult, RemoveVideosFromBoardResult - -board_videos_router = APIRouter(prefix="/v1/board_videos", tags=["boards"]) - - -@board_videos_router.post( - "/batch", - operation_id="add_videos_to_board", - responses={ - 201: {"description": "Videos were added to board successfully"}, - }, - status_code=201, - response_model=AddVideosToBoardResult, -) -async def add_videos_to_board( - board_id: str = Body(description="The id of the board to add to"), - video_ids: list[str] = Body(description="The ids of the videos to add", embed=True), -) -> AddVideosToBoardResult: - """Adds a list of videos to a board""" - raise HTTPException(status_code=501, detail="Not implemented") - - -@board_videos_router.post( - "/batch/delete", - operation_id="remove_videos_from_board", - responses={ - 201: {"description": "Videos were removed from board successfully"}, - }, - status_code=201, - response_model=RemoveVideosFromBoardResult, -) -async def remove_videos_from_board( - video_ids: list[str] = Body(description="The ids of the videos to remove", embed=True), -) -> RemoveVideosFromBoardResult: - """Removes a list of videos from their board, if they had one""" - raise HTTPException(status_code=501, detail="Not implemented") diff --git a/invokeai/app/api/routers/videos.py b/invokeai/app/api/routers/videos.py deleted file mode 100644 index 36ead345c9a..00000000000 --- a/invokeai/app/api/routers/videos.py +++ /dev/null @@ -1,119 +0,0 @@ -from typing import Optional - -from fastapi import Body, HTTPException, Path, Query -from fastapi.routing import APIRouter - -from invokeai.app.services.shared.pagination import OffsetPaginatedResults -from invokeai.app.services.shared.sqlite.sqlite_common import SQLiteDirection -from invokeai.app.services.videos_common import ( - DeleteVideosResult, - StarredVideosResult, - UnstarredVideosResult, - VideoDTO, - VideoIdsResult, - VideoRecordChanges, -) - -videos_router = APIRouter(prefix="/v1/videos", tags=["videos"]) - - -@videos_router.patch( - "/i/{video_id}", - operation_id="update_video", - response_model=VideoDTO, -) -async def update_video( - video_id: str = Path(description="The id of the video to update"), - video_changes: VideoRecordChanges = Body(description="The changes to apply to the video"), -) -> VideoDTO: - """Updates a video""" - - raise HTTPException(status_code=501, detail="Not implemented") - - -@videos_router.get( - "/i/{video_id}", - operation_id="get_video_dto", - response_model=VideoDTO, -) -async def get_video_dto( - video_id: str = Path(description="The id of the video to get"), -) -> VideoDTO: - """Gets a video's DTO""" - - raise HTTPException(status_code=501, detail="Not implemented") - - -@videos_router.post("/delete", operation_id="delete_videos_from_list", response_model=DeleteVideosResult) -async def delete_videos_from_list( - video_ids: list[str] = Body(description="The list of ids of videos to delete", embed=True), -) -> DeleteVideosResult: - raise HTTPException(status_code=501, detail="Not implemented") - - -@videos_router.post("/star", operation_id="star_videos_in_list", response_model=StarredVideosResult) -async def star_videos_in_list( - video_ids: list[str] = Body(description="The list of ids of videos to star", embed=True), -) -> StarredVideosResult: - raise HTTPException(status_code=501, detail="Not implemented") - - -@videos_router.post("/unstar", operation_id="unstar_videos_in_list", response_model=UnstarredVideosResult) -async def unstar_videos_in_list( - video_ids: list[str] = Body(description="The list of ids of videos to unstar", embed=True), -) -> UnstarredVideosResult: - raise HTTPException(status_code=501, detail="Not implemented") - - -@videos_router.delete("/uncategorized", operation_id="delete_uncategorized_videos", response_model=DeleteVideosResult) -async def delete_uncategorized_videos() -> DeleteVideosResult: - """Deletes all videos that are uncategorized""" - - raise HTTPException(status_code=501, detail="Not implemented") - - -@videos_router.get("/", operation_id="list_video_dtos", response_model=OffsetPaginatedResults[VideoDTO]) -async def list_video_dtos( - is_intermediate: Optional[bool] = Query(default=None, description="Whether to list intermediate videos."), - board_id: Optional[str] = Query( - default=None, - description="The board id to filter by. Use 'none' to find videos without a board.", - ), - offset: int = Query(default=0, description="The page offset"), - limit: int = Query(default=10, description="The number of videos per page"), - order_dir: SQLiteDirection = Query(default=SQLiteDirection.Descending, description="The order of sort"), - starred_first: bool = Query(default=True, description="Whether to sort by starred videos first"), - search_term: Optional[str] = Query(default=None, description="The term to search for"), -) -> OffsetPaginatedResults[VideoDTO]: - """Lists video DTOs""" - - raise HTTPException(status_code=501, detail="Not implemented") - - -@videos_router.get("/ids", operation_id="get_video_ids") -async def get_video_ids( - is_intermediate: Optional[bool] = Query(default=None, description="Whether to list intermediate videos."), - board_id: Optional[str] = Query( - default=None, - description="The board id to filter by. Use 'none' to find videos without a board.", - ), - order_dir: SQLiteDirection = Query(default=SQLiteDirection.Descending, description="The order of sort"), - starred_first: bool = Query(default=True, description="Whether to sort by starred videos first"), - search_term: Optional[str] = Query(default=None, description="The term to search for"), -) -> VideoIdsResult: - """Gets ordered list of video ids with metadata for optimistic updates""" - - raise HTTPException(status_code=501, detail="Not implemented") - - -@videos_router.post( - "/videos_by_ids", - operation_id="get_videos_by_ids", - responses={200: {"model": list[VideoDTO]}}, -) -async def get_videos_by_ids( - video_ids: list[str] = Body(embed=True, description="Object containing list of video ids to fetch DTOs for"), -) -> list[VideoDTO]: - """Gets video DTOs for the specified video ids. Maintains order of input ids.""" - - raise HTTPException(status_code=501, detail="Not implemented") diff --git a/invokeai/app/api/routers/workflows.py b/invokeai/app/api/routers/workflows.py index 35b928a45af..5a37a75dcf9 100644 --- a/invokeai/app/api/routers/workflows.py +++ b/invokeai/app/api/routers/workflows.py @@ -106,7 +106,6 @@ async def list_workflows( tags: Optional[list[str]] = Query(default=None, description="The tags of workflow to get"), query: Optional[str] = Query(default=None, description="The text to query by (matches name and description)"), has_been_opened: Optional[bool] = Query(default=None, description="Whether to include/exclude recent workflows"), - is_published: Optional[bool] = Query(default=None, description="Whether to include/exclude published workflows"), ) -> PaginatedResults[WorkflowRecordListItemWithThumbnailDTO]: """Gets a page of workflows""" workflows_with_thumbnails: list[WorkflowRecordListItemWithThumbnailDTO] = [] @@ -119,7 +118,6 @@ async def list_workflows( categories=categories, tags=tags, has_been_opened=has_been_opened, - is_published=is_published, ) for workflow in workflows.items: workflows_with_thumbnails.append( diff --git a/invokeai/app/api_app.py b/invokeai/app/api_app.py index ce1a2193dff..335327f532b 100644 --- a/invokeai/app/api_app.py +++ b/invokeai/app/api_app.py @@ -18,7 +18,6 @@ from invokeai.app.api.routers import ( app_info, board_images, - board_videos, boards, client_state, download_queue, @@ -28,7 +27,6 @@ session_queue, style_presets, utilities, - videos, workflows, ) from invokeai.app.api.sockets import SocketIO @@ -127,10 +125,8 @@ async def dispatch(self, request: Request, call_next: RequestResponseEndpoint): app.include_router(model_manager.model_manager_router, prefix="/api") app.include_router(download_queue.download_queue_router, prefix="/api") app.include_router(images.images_router, prefix="/api") -app.include_router(videos.videos_router, prefix="/api") app.include_router(boards.boards_router, prefix="/api") app.include_router(board_images.board_images_router, prefix="/api") -app.include_router(board_videos.board_videos_router, prefix="/api") app.include_router(model_relationships.model_relationships_router, prefix="/api") app.include_router(app_info.app_router, prefix="/api") app.include_router(session_queue.session_queue_router, prefix="/api") diff --git a/invokeai/app/invocations/fields.py b/invokeai/app/invocations/fields.py index 9e2e982df5a..5a2d0810356 100644 --- a/invokeai/app/invocations/fields.py +++ b/invokeai/app/invocations/fields.py @@ -235,10 +235,6 @@ class ImageField(BaseModel): image_name: str = Field(description="The name of the image") -class VideoField(BaseModel): - """A video primitive field""" - - video_id: str = Field(description="The id of the video") class BoardField(BaseModel): @@ -549,27 +545,6 @@ def migrate_model_ui_type(ui_type: UIType | str, json_schema_extra: dict[str, An ui_model_type = [ModelType.FluxRedux] case UIType.LlavaOnevisionModel: ui_model_type = [ModelType.LlavaOnevision] - case UIType.Imagen3Model: - ui_model_base = [BaseModelType.Imagen3] - ui_model_type = [ModelType.Main] - case UIType.Imagen4Model: - ui_model_base = [BaseModelType.Imagen4] - ui_model_type = [ModelType.Main] - case UIType.ChatGPT4oModel: - ui_model_base = [BaseModelType.ChatGPT4o] - ui_model_type = [ModelType.Main] - case UIType.Gemini2_5Model: - ui_model_base = [BaseModelType.Gemini2_5] - ui_model_type = [ModelType.Main] - case UIType.FluxKontextModel: - ui_model_base = [BaseModelType.FluxKontext] - ui_model_type = [ModelType.Main] - case UIType.Veo3Model: - ui_model_base = [BaseModelType.Veo3] - ui_model_type = [ModelType.Video] - case UIType.RunwayModel: - ui_model_base = [BaseModelType.Runway] - ui_model_type = [ModelType.Video] case _: pass diff --git a/invokeai/app/invocations/primitives.py b/invokeai/app/invocations/primitives.py index 1dc470b9705..10703a620cd 100644 --- a/invokeai/app/invocations/primitives.py +++ b/invokeai/app/invocations/primitives.py @@ -27,7 +27,6 @@ SD3ConditioningField, TensorField, UIComponent, - VideoField, ) from invokeai.app.services.images.images_common import ImageDTO from invokeai.app.services.shared.invocation_context import InvocationContext @@ -288,30 +287,6 @@ def invoke(self, context: InvocationContext) -> ImageCollectionOutput: return ImageCollectionOutput(collection=self.collection) -# endregion - -# region Video - - -@invocation_output("video_output") -class VideoOutput(BaseInvocationOutput): - """Base class for nodes that output a video""" - - video: VideoField = OutputField(description="The output video") - width: int = OutputField(description="The width of the video in pixels") - height: int = OutputField(description="The height of the video in pixels") - duration_seconds: float = OutputField(description="The duration of the video in seconds") - - @classmethod - def build(cls, video_id: str, width: int, height: int, duration_seconds: float) -> "VideoOutput": - return cls( - video=VideoField(video_id=video_id), - width=width, - height=height, - duration_seconds=duration_seconds, - ) - - # endregion # region DenoiseMask diff --git a/invokeai/app/services/boards/boards_common.py b/invokeai/app/services/boards/boards_common.py index d25bb9d9da8..68cd3603287 100644 --- a/invokeai/app/services/boards/boards_common.py +++ b/invokeai/app/services/boards/boards_common.py @@ -14,12 +14,10 @@ class BoardDTO(BoardRecord): """The number of images in the board.""" asset_count: int = Field(description="The number of assets in the board.") """The number of assets in the board.""" - video_count: int = Field(description="The number of videos in the board.") - """The number of videos in the board.""" def board_record_to_dto( - board_record: BoardRecord, cover_image_name: Optional[str], image_count: int, asset_count: int, video_count: int + board_record: BoardRecord, cover_image_name: Optional[str], image_count: int, asset_count: int ) -> BoardDTO: """Converts a board record to a board DTO.""" return BoardDTO( @@ -27,5 +25,4 @@ def board_record_to_dto( cover_image_name=cover_image_name, image_count=image_count, asset_count=asset_count, - video_count=video_count, ) diff --git a/invokeai/app/services/boards/boards_default.py b/invokeai/app/services/boards/boards_default.py index df161086459..6efeaa1fea8 100644 --- a/invokeai/app/services/boards/boards_default.py +++ b/invokeai/app/services/boards/boards_default.py @@ -28,8 +28,7 @@ def get_dto(self, board_id: str) -> BoardDTO: cover_image_name = None image_count = self.__invoker.services.board_image_records.get_image_count_for_board(board_id) asset_count = self.__invoker.services.board_image_records.get_asset_count_for_board(board_id) - video_count = 0 # noop for OSS - return board_record_to_dto(board_record, cover_image_name, image_count, asset_count, video_count) + return board_record_to_dto(board_record, cover_image_name, image_count, asset_count) def update( self, @@ -45,8 +44,7 @@ def update( image_count = self.__invoker.services.board_image_records.get_image_count_for_board(board_id) asset_count = self.__invoker.services.board_image_records.get_asset_count_for_board(board_id) - video_count = 0 # noop for OSS - return board_record_to_dto(board_record, cover_image_name, image_count, asset_count, video_count) + return board_record_to_dto(board_record, cover_image_name, image_count, asset_count) def delete(self, board_id: str) -> None: self.__invoker.services.board_records.delete(board_id) @@ -72,8 +70,7 @@ def get_many( image_count = self.__invoker.services.board_image_records.get_image_count_for_board(r.board_id) asset_count = self.__invoker.services.board_image_records.get_asset_count_for_board(r.board_id) - video_count = 0 # noop for OSS - board_dtos.append(board_record_to_dto(r, cover_image_name, image_count, asset_count, video_count)) + board_dtos.append(board_record_to_dto(r, cover_image_name, image_count, asset_count)) return OffsetPaginatedResults[BoardDTO](items=board_dtos, offset=offset, limit=limit, total=len(board_dtos)) @@ -91,7 +88,6 @@ def get_all( image_count = self.__invoker.services.board_image_records.get_image_count_for_board(r.board_id) asset_count = self.__invoker.services.board_image_records.get_asset_count_for_board(r.board_id) - video_count = 0 # noop for OSS - board_dtos.append(board_record_to_dto(r, cover_image_name, image_count, asset_count, video_count)) + board_dtos.append(board_record_to_dto(r, cover_image_name, image_count, asset_count)) return board_dtos diff --git a/invokeai/app/services/videos_common.py b/invokeai/app/services/videos_common.py deleted file mode 100644 index a1b8d762287..00000000000 --- a/invokeai/app/services/videos_common.py +++ /dev/null @@ -1,179 +0,0 @@ -import datetime -from typing import Optional, Union - -from pydantic import BaseModel, Field, StrictBool, StrictStr - -from invokeai.app.util.misc import get_iso_timestamp -from invokeai.app.util.model_exclude_null import BaseModelExcludeNull - -VIDEO_DTO_COLS = ", ".join( - [ - "videos." + c - for c in [ - "video_id", - "width", - "height", - "session_id", - "node_id", - "is_intermediate", - "created_at", - "updated_at", - "deleted_at", - "starred", - ] - ] -) - - -class VideoRecord(BaseModelExcludeNull): - """Deserialized video record without metadata.""" - - video_id: str = Field(description="The unique id of the video.") - """The unique id of the video.""" - width: int = Field(description="The width of the video in px.") - """The actual width of the video in px. This may be different from the width in metadata.""" - height: int = Field(description="The height of the video in px.") - """The actual height of the video in px. This may be different from the height in metadata.""" - created_at: Union[datetime.datetime, str] = Field(description="The created timestamp of the video.") - """The created timestamp of the video.""" - updated_at: Union[datetime.datetime, str] = Field(description="The updated timestamp of the video.") - """The updated timestamp of the video.""" - deleted_at: Optional[Union[datetime.datetime, str]] = Field( - default=None, description="The deleted timestamp of the video." - ) - """The deleted timestamp of the video.""" - is_intermediate: bool = Field(description="Whether this is an intermediate video.") - """Whether this is an intermediate video.""" - session_id: Optional[str] = Field( - default=None, - description="The session ID that generated this video, if it is a generated video.", - ) - """The session ID that generated this video, if it is a generated video.""" - node_id: Optional[str] = Field( - default=None, - description="The node ID that generated this video, if it is a generated video.", - ) - """The node ID that generated this video, if it is a generated video.""" - starred: bool = Field(description="Whether this video is starred.") - """Whether this video is starred.""" - - -class VideoRecordChanges(BaseModelExcludeNull): - """A set of changes to apply to a video record. - - Only limited changes are valid: - - `session_id`: change the session associated with a video - - `is_intermediate`: change the video's `is_intermediate` flag - - `starred`: change whether the video is starred - """ - - session_id: Optional[StrictStr] = Field( - default=None, - description="The video's new session ID.", - ) - """The video's new session ID.""" - is_intermediate: Optional[StrictBool] = Field(default=None, description="The video's new `is_intermediate` flag.") - """The video's new `is_intermediate` flag.""" - starred: Optional[StrictBool] = Field(default=None, description="The video's new `starred` state") - """The video's new `starred` state.""" - - -def deserialize_video_record(video_dict: dict) -> VideoRecord: - """Deserializes a video record.""" - - # Retrieve all the values, setting "reasonable" defaults if they are not present. - video_id = video_dict.get("video_id", "unknown") - width = video_dict.get("width", 0) - height = video_dict.get("height", 0) - session_id = video_dict.get("session_id", None) - node_id = video_dict.get("node_id", None) - created_at = video_dict.get("created_at", get_iso_timestamp()) - updated_at = video_dict.get("updated_at", get_iso_timestamp()) - deleted_at = video_dict.get("deleted_at", get_iso_timestamp()) - is_intermediate = video_dict.get("is_intermediate", False) - starred = video_dict.get("starred", False) - - return VideoRecord( - video_id=video_id, - width=width, - height=height, - session_id=session_id, - node_id=node_id, - created_at=created_at, - updated_at=updated_at, - deleted_at=deleted_at, - is_intermediate=is_intermediate, - starred=starred, - ) - - -class VideoCollectionCounts(BaseModel): - starred_count: int = Field(description="The number of starred videos in the collection.") - unstarred_count: int = Field(description="The number of unstarred videos in the collection.") - - -class VideoIdsResult(BaseModel): - """Response containing ordered video ids with metadata for optimistic updates.""" - - video_ids: list[str] = Field(description="Ordered list of video ids") - starred_count: int = Field(description="Number of starred videos (when starred_first=True)") - total_count: int = Field(description="Total number of videos matching the query") - - -class VideoUrlsDTO(BaseModelExcludeNull): - """The URLs for an image and its thumbnail.""" - - video_id: str = Field(description="The unique id of the video.") - """The unique id of the video.""" - video_url: str = Field(description="The URL of the video.") - """The URL of the video.""" - thumbnail_url: str = Field(description="The URL of the video's thumbnail.") - """The URL of the video's thumbnail.""" - - -class VideoDTO(VideoRecord, VideoUrlsDTO): - """Deserialized video record, enriched for the frontend.""" - - board_id: Optional[str] = Field( - default=None, description="The id of the board the image belongs to, if one exists." - ) - """The id of the board the image belongs to, if one exists.""" - - -def video_record_to_dto( - video_record: VideoRecord, - video_url: str, - thumbnail_url: str, - board_id: Optional[str], -) -> VideoDTO: - """Converts a video record to a video DTO.""" - return VideoDTO( - **video_record.model_dump(), - video_url=video_url, - thumbnail_url=thumbnail_url, - board_id=board_id, - ) - - -class ResultWithAffectedBoards(BaseModel): - affected_boards: list[str] = Field(description="The ids of boards affected by the delete operation") - - -class DeleteVideosResult(ResultWithAffectedBoards): - deleted_videos: list[str] = Field(description="The ids of the videos that were deleted") - - -class StarredVideosResult(ResultWithAffectedBoards): - starred_videos: list[str] = Field(description="The ids of the videos that were starred") - - -class UnstarredVideosResult(ResultWithAffectedBoards): - unstarred_videos: list[str] = Field(description="The ids of the videos that were unstarred") - - -class AddVideosToBoardResult(ResultWithAffectedBoards): - added_videos: list[str] = Field(description="The video ids that were added to the board") - - -class RemoveVideosFromBoardResult(ResultWithAffectedBoards): - removed_videos: list[str] = Field(description="The video ids that were removed from their board") diff --git a/invokeai/app/services/workflow_records/workflow_records_base.py b/invokeai/app/services/workflow_records/workflow_records_base.py index bf91363281b..5bf42ed2533 100644 --- a/invokeai/app/services/workflow_records/workflow_records_base.py +++ b/invokeai/app/services/workflow_records/workflow_records_base.py @@ -47,7 +47,6 @@ def get_many( query: Optional[str], tags: Optional[list[str]], has_been_opened: Optional[bool], - is_published: Optional[bool], ) -> PaginatedResults[WorkflowRecordListItemDTO]: """Gets many workflows.""" pass @@ -57,7 +56,6 @@ def counts_by_category( self, categories: list[WorkflowCategory], has_been_opened: Optional[bool] = None, - is_published: Optional[bool] = None, ) -> dict[str, int]: """Gets a dictionary of counts for each of the provided categories.""" pass @@ -68,7 +66,6 @@ def counts_by_tag( tags: list[str], categories: Optional[list[WorkflowCategory]] = None, has_been_opened: Optional[bool] = None, - is_published: Optional[bool] = None, ) -> dict[str, int]: """Gets a dictionary of counts for each of the provided tags.""" pass diff --git a/invokeai/app/services/workflow_records/workflow_records_common.py b/invokeai/app/services/workflow_records/workflow_records_common.py index fe203ab8c96..909ed3b463b 100644 --- a/invokeai/app/services/workflow_records/workflow_records_common.py +++ b/invokeai/app/services/workflow_records/workflow_records_common.py @@ -67,7 +67,6 @@ class WorkflowWithoutID(BaseModel): # This is typed as optional to prevent errors when pulling workflows from the DB. The frontend adds a default form if # it is None. form: dict[str, JsonValue] | None = Field(default=None, description="The form of the workflow.") - is_published: bool | None = Field(default=None, description="Whether the workflow is published or not.") model_config = ConfigDict(extra="ignore") @@ -102,7 +101,6 @@ class WorkflowRecordDTOBase(BaseModel): opened_at: Optional[Union[datetime.datetime, str]] = Field( default=None, description="The opened timestamp of the workflow." ) - is_published: bool | None = Field(default=None, description="Whether the workflow is published or not.") class WorkflowRecordDTO(WorkflowRecordDTOBase): diff --git a/invokeai/app/services/workflow_records/workflow_records_sqlite.py b/invokeai/app/services/workflow_records/workflow_records_sqlite.py index 72f37469de8..d6a94d156f0 100644 --- a/invokeai/app/services/workflow_records/workflow_records_sqlite.py +++ b/invokeai/app/services/workflow_records/workflow_records_sqlite.py @@ -104,7 +104,6 @@ def get_many( query: Optional[str] = None, tags: Optional[list[str]] = None, has_been_opened: Optional[bool] = None, - is_published: Optional[bool] = None, ) -> PaginatedResults[WorkflowRecordListItemDTO]: with self._db.transaction() as cursor: # sanitize! @@ -227,7 +226,6 @@ def counts_by_tag( tags: list[str], categories: Optional[list[WorkflowCategory]] = None, has_been_opened: Optional[bool] = None, - is_published: Optional[bool] = None, ) -> dict[str, int]: if not tags: return {} @@ -279,7 +277,6 @@ def counts_by_category( self, categories: list[WorkflowCategory], has_been_opened: Optional[bool] = None, - is_published: Optional[bool] = None, ) -> dict[str, int]: with self._db.transaction() as cursor: result: dict[str, int] = {} diff --git a/invokeai/backend/model_manager/configs/base.py b/invokeai/backend/model_manager/configs/base.py index 8de9a2b8316..43c31c7e4fd 100644 --- a/invokeai/backend/model_manager/configs/base.py +++ b/invokeai/backend/model_manager/configs/base.py @@ -28,6 +28,17 @@ pass +class URLModelSource(BaseModel): + type: Literal[ModelSourceType.Url] = Field(default=ModelSourceType.Url) + url: str = Field( + description="The URL from which the model was installed.", + ) + api_response: str | None = Field( + default=None, + description="The original API response from the source, as stringified JSON.", + ) + + class Config_Base(ABC, BaseModel): """ Abstract base class for model configurations. A model config describes a specific combination of model base, type and @@ -81,10 +92,6 @@ class Config_Base(ABC, BaseModel): default=None, description="Url for image to preview model", ) - usage_info: str | None = Field( - default=None, - description="Usage information for this model", - ) CONFIG_CLASSES: ClassVar[set[Type["Config_Base"]]] = set() """Set of all non-abstract subclasses of Config_Base, for use during model probing. In other words, this is the set diff --git a/invokeai/backend/model_manager/configs/factory.py b/invokeai/backend/model_manager/configs/factory.py index dcd7c4c0edc..6b8d122d615 100644 --- a/invokeai/backend/model_manager/configs/factory.py +++ b/invokeai/backend/model_manager/configs/factory.py @@ -64,15 +64,8 @@ Main_Diffusers_SD3_Config, Main_Diffusers_SDXL_Config, Main_Diffusers_SDXLRefiner_Config, - Main_ExternalAPI_ChatGPT4o_Config, - Main_ExternalAPI_FluxKontext_Config, - Main_ExternalAPI_Gemini2_5_Config, - Main_ExternalAPI_Imagen3_Config, - Main_ExternalAPI_Imagen4_Config, Main_GGUF_FLUX_Config, MainModelDefaultSettings, - Video_ExternalAPI_Runway_Config, - Video_ExternalAPI_Veo3_Config, ) from invokeai.backend.model_manager.configs.siglip import SigLIP_Diffusers_Config from invokeai.backend.model_manager.configs.spandrel import Spandrel_Checkpoint_Config @@ -218,15 +211,6 @@ Annotated[SigLIP_Diffusers_Config, SigLIP_Diffusers_Config.get_tag()], Annotated[FLUXRedux_Checkpoint_Config, FLUXRedux_Checkpoint_Config.get_tag()], Annotated[LlavaOnevision_Diffusers_Config, LlavaOnevision_Diffusers_Config.get_tag()], - # Main - external API - Annotated[Main_ExternalAPI_ChatGPT4o_Config, Main_ExternalAPI_ChatGPT4o_Config.get_tag()], - Annotated[Main_ExternalAPI_Gemini2_5_Config, Main_ExternalAPI_Gemini2_5_Config.get_tag()], - Annotated[Main_ExternalAPI_Imagen3_Config, Main_ExternalAPI_Imagen3_Config.get_tag()], - Annotated[Main_ExternalAPI_Imagen4_Config, Main_ExternalAPI_Imagen4_Config.get_tag()], - Annotated[Main_ExternalAPI_FluxKontext_Config, Main_ExternalAPI_FluxKontext_Config.get_tag()], - # Video - external API - Annotated[Video_ExternalAPI_Veo3_Config, Video_ExternalAPI_Veo3_Config.get_tag()], - Annotated[Video_ExternalAPI_Runway_Config, Video_ExternalAPI_Runway_Config.get_tag()], # Unknown model (fallback) Annotated[Unknown_Config, Unknown_Config.get_tag()], ], diff --git a/invokeai/backend/model_manager/configs/main.py b/invokeai/backend/model_manager/configs/main.py index dcb948d99bb..03c44e1a778 100644 --- a/invokeai/backend/model_manager/configs/main.py +++ b/invokeai/backend/model_manager/configs/main.py @@ -657,49 +657,3 @@ def from_model_on_disk(cls, mod: ModelOnDisk, override_fields: dict[str, Any]) - **override_fields, repo_variant=repo_variant, ) - - -class ExternalAPI_Config_Base(ABC, BaseModel): - """Model config for API-based models.""" - - format: Literal[ModelFormat.Api] = Field(default=ModelFormat.Api) - - @classmethod - def from_model_on_disk(cls, mod: ModelOnDisk, override_fields: dict[str, Any]) -> Self: - raise NotAMatchError("External API models cannot be built from disk") - - -class Main_ExternalAPI_ChatGPT4o_Config(ExternalAPI_Config_Base, Main_Config_Base, Config_Base): - base: Literal[BaseModelType.ChatGPT4o] = Field(default=BaseModelType.ChatGPT4o) - - -class Main_ExternalAPI_Gemini2_5_Config(ExternalAPI_Config_Base, Main_Config_Base, Config_Base): - base: Literal[BaseModelType.Gemini2_5] = Field(default=BaseModelType.Gemini2_5) - - -class Main_ExternalAPI_Imagen3_Config(ExternalAPI_Config_Base, Main_Config_Base, Config_Base): - base: Literal[BaseModelType.Imagen3] = Field(default=BaseModelType.Imagen3) - - -class Main_ExternalAPI_Imagen4_Config(ExternalAPI_Config_Base, Main_Config_Base, Config_Base): - base: Literal[BaseModelType.Imagen4] = Field(default=BaseModelType.Imagen4) - - -class Main_ExternalAPI_FluxKontext_Config(ExternalAPI_Config_Base, Main_Config_Base, Config_Base): - base: Literal[BaseModelType.FluxKontext] = Field(default=BaseModelType.FluxKontext) - - -class Video_Config_Base(ABC, BaseModel): - type: Literal[ModelType.Video] = Field(default=ModelType.Video) - trigger_phrases: set[str] | None = Field(description="Set of trigger phrases for this model", default=None) - default_settings: MainModelDefaultSettings | None = Field( - description="Default settings for this model", default=None - ) - - -class Video_ExternalAPI_Veo3_Config(ExternalAPI_Config_Base, Video_Config_Base, Config_Base): - base: Literal[BaseModelType.Veo3] = Field(default=BaseModelType.Veo3) - - -class Video_ExternalAPI_Runway_Config(ExternalAPI_Config_Base, Video_Config_Base, Config_Base): - base: Literal[BaseModelType.Runway] = Field(default=BaseModelType.Runway) diff --git a/invokeai/backend/model_manager/taxonomy.py b/invokeai/backend/model_manager/taxonomy.py index 99a31f438d1..38afd44fcb6 100644 --- a/invokeai/backend/model_manager/taxonomy.py +++ b/invokeai/backend/model_manager/taxonomy.py @@ -48,21 +48,6 @@ class BaseModelType(str, Enum): """Indicates the model is associated with FLUX.1 model architecture, including FLUX Dev, Schnell and Fill.""" CogView4 = "cogview4" """Indicates the model is associated with CogView 4 model architecture.""" - Imagen3 = "imagen3" - """Indicates the model is associated with Google Imagen 3 model architecture. This is an external API model.""" - Imagen4 = "imagen4" - """Indicates the model is associated with Google Imagen 4 model architecture. This is an external API model.""" - Gemini2_5 = "gemini-2.5" - """Indicates the model is associated with Google Gemini 2.5 Flash Image model architecture. This is an external API model.""" - ChatGPT4o = "chatgpt-4o" - """Indicates the model is associated with OpenAI ChatGPT 4o Image model architecture. This is an external API model.""" - FluxKontext = "flux-kontext" - """Indicates the model is associated with FLUX Kontext model architecture. This is an external API model; local FLUX - Kontext models use the base `Flux`.""" - Veo3 = "veo3" - """Indicates the model is associated with Google Veo 3 video model architecture. This is an external API model.""" - Runway = "runway" - """Indicates the model is associated with Runway video model architecture. This is an external API model.""" Unknown = "unknown" """Indicates the model's base architecture is unknown.""" @@ -86,7 +71,6 @@ class ModelType(str, Enum): SigLIP = "siglip" FluxRedux = "flux_redux" LlavaOnevision = "llava_onevision" - Video = "video" Unknown = "unknown" @@ -145,7 +129,6 @@ class ModelFormat(str, Enum): BnbQuantizedLlmInt8b = "bnb_quantized_int8b" BnbQuantizednf4b = "bnb_quantized_nf4b" GGUFQuantized = "gguf_quantized" - Api = "api" Unknown = "unknown" diff --git a/invokeai/frontend/web/.storybook/preview.tsx b/invokeai/frontend/web/.storybook/preview.tsx index 4f1cc0ed9fb..eb3d0391db4 100644 --- a/invokeai/frontend/web/.storybook/preview.tsx +++ b/invokeai/frontend/web/.storybook/preview.tsx @@ -10,7 +10,6 @@ import { Provider } from 'react-redux'; // @ts-ignore import translationEN from '../public/locales/en.json'; import ThemeLocaleProvider from '../src/app/components/ThemeLocaleProvider'; -import { $baseUrl } from '../src/app/store/nanostores/baseUrl'; import { createStore } from '../src/app/store/store'; import { ReduxInit } from './ReduxInit'; @@ -28,7 +27,6 @@ i18n.use(initReactI18next).init({ const store = createStore(); $store.set(store); -$baseUrl.set('http://localhost:9090'); const preview: Preview = { decorators: [ diff --git a/invokeai/frontend/web/src/app/components/App.tsx b/invokeai/frontend/web/src/app/components/App.tsx index 0f23e05a71f..bae4f4cf633 100644 --- a/invokeai/frontend/web/src/app/components/App.tsx +++ b/invokeai/frontend/web/src/app/components/App.tsx @@ -1,27 +1,15 @@ import { Box } from '@invoke-ai/ui-library'; -import { useStore } from '@nanostores/react'; import { GlobalHookIsolator } from 'app/components/GlobalHookIsolator'; import { GlobalModalIsolator } from 'app/components/GlobalModalIsolator'; -import { $didStudioInit, type StudioInitAction } from 'app/hooks/useStudioInitAction'; import { clearStorage } from 'app/store/enhancers/reduxRemember/driver'; -import type { PartialAppConfig } from 'app/types/invokeai'; -import Loading from 'common/components/Loading/Loading'; import { AppContent } from 'features/ui/components/AppContent'; import { memo, useCallback } from 'react'; import { ErrorBoundary } from 'react-error-boundary'; import AppErrorBoundaryFallback from './AppErrorBoundaryFallback'; import ThemeLocaleProvider from './ThemeLocaleProvider'; -const DEFAULT_CONFIG = {}; - -interface Props { - config?: PartialAppConfig; - studioInitAction?: StudioInitAction; -} - -const App = ({ config = DEFAULT_CONFIG, studioInitAction }: Props) => { - const didStudioInit = useStore($didStudioInit); +const App = () => { const handleReset = useCallback(() => { clearStorage(); location.reload(); @@ -33,9 +21,8 @@ const App = ({ config = DEFAULT_CONFIG, studioInitAction }: Props) => { - {!didStudioInit && } - + diff --git a/invokeai/frontend/web/src/app/components/AppErrorBoundaryFallback.tsx b/invokeai/frontend/web/src/app/components/AppErrorBoundaryFallback.tsx index f061ba15f9b..f22a94c33fc 100644 --- a/invokeai/frontend/web/src/app/components/AppErrorBoundaryFallback.tsx +++ b/invokeai/frontend/web/src/app/components/AppErrorBoundaryFallback.tsx @@ -1,8 +1,5 @@ import { Button, Flex, Heading, Image, Link, Text } from '@invoke-ai/ui-library'; -import { createSelector } from '@reduxjs/toolkit'; -import { useAppSelector } from 'app/store/storeHooks'; import { useClipboard } from 'common/hooks/useClipboard'; -import { selectConfigSlice } from 'features/system/store/configSlice'; import { toast } from 'features/toast/toast'; import newGithubIssueUrl from 'new-github-issue-url'; import InvokeLogoYellow from 'public/assets/images/invoke-symbol-ylw-lrg.svg'; @@ -16,11 +13,8 @@ type Props = { resetErrorBoundary: () => void; }; -const selectIsLocal = createSelector(selectConfigSlice, (config) => config.isLocal); - const AppErrorBoundaryFallback = ({ error, resetErrorBoundary }: Props) => { const { t } = useTranslation(); - const isLocal = useAppSelector(selectIsLocal); const clipboard = useClipboard(); const handleCopy = useCallback(() => { @@ -34,17 +28,13 @@ const AppErrorBoundaryFallback = ({ error, resetErrorBoundary }: Props) => { }, [clipboard, error, t]); const url = useMemo(() => { - if (isLocal) { - return newGithubIssueUrl({ - user: 'invoke-ai', - repo: 'InvokeAI', - template: 'BUG_REPORT.yml', - title: `[bug]: ${error.name}: ${error.message}`, - }); - } else { - return 'https://support.invoke.ai/support/tickets/new'; - } - }, [error.message, error.name, isLocal]); + return newGithubIssueUrl({ + user: 'invoke-ai', + repo: 'InvokeAI', + template: 'BUG_REPORT.yml', + title: `[bug]: ${error.name}: ${error.message}`, + }); + }, [error.message, error.name]); return ( @@ -75,9 +65,7 @@ const AppErrorBoundaryFallback = ({ error, resetErrorBoundary }: Props) => { {t('common.copyError')} - + diff --git a/invokeai/frontend/web/src/app/components/GlobalHookIsolator.tsx b/invokeai/frontend/web/src/app/components/GlobalHookIsolator.tsx index 0a21348e984..a4345f373a6 100644 --- a/invokeai/frontend/web/src/app/components/GlobalHookIsolator.tsx +++ b/invokeai/frontend/web/src/app/components/GlobalHookIsolator.tsx @@ -1,14 +1,10 @@ import { useGlobalModifiersInit } from '@invoke-ai/ui-library'; import { setupListeners } from '@reduxjs/toolkit/query'; -import type { StudioInitAction } from 'app/hooks/useStudioInitAction'; -import { useStudioInitAction } from 'app/hooks/useStudioInitAction'; import { useSyncLangDirection } from 'app/hooks/useSyncLangDirection'; import { useSyncQueueStatus } from 'app/hooks/useSyncQueueStatus'; -import { useLogger } from 'app/logging/useLogger'; import { useSyncLoggingConfig } from 'app/logging/useSyncLoggingConfig'; import { appStarted } from 'app/store/middleware/listenerMiddleware/listeners/appStarted'; import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; -import type { PartialAppConfig } from 'app/types/invokeai'; import { useFocusRegionWatcher } from 'common/hooks/focus'; import { useCloseChakraTooltipsOnDragFix } from 'common/hooks/useCloseChakraTooltipsOnDragFix'; import { useGlobalHotkeys } from 'common/hooks/useGlobalHotkeys'; @@ -19,7 +15,6 @@ import { useWorkflowBuilderWatcher } from 'features/nodes/components/sidePanel/w import { useSyncExecutionState } from 'features/nodes/hooks/useNodeExecutionState'; import { useSyncNodeErrors } from 'features/nodes/store/util/fieldValidators'; import { useReadinessWatcher } from 'features/queue/store/readiness'; -import { configChanged } from 'features/system/store/configSlice'; import { selectLanguage } from 'features/system/store/systemSelectors'; import { useNavigationApi } from 'features/ui/layouts/use-navigation-api'; import i18n from 'i18n'; @@ -34,55 +29,46 @@ const queueCountArg = { destination: 'canvas' }; * GlobalHookIsolator is a logical component that runs global hooks in an isolated component, so that they do not * cause needless re-renders of any other components. */ -export const GlobalHookIsolator = memo( - ({ config, studioInitAction }: { config: PartialAppConfig; studioInitAction?: StudioInitAction }) => { - const language = useAppSelector(selectLanguage); - const logger = useLogger('system'); - const dispatch = useAppDispatch(); +export const GlobalHookIsolator = memo(() => { + const language = useAppSelector(selectLanguage); + const dispatch = useAppDispatch(); - // singleton! - useReadinessWatcher(); - useSocketIO(); - useGlobalModifiersInit(); - useGlobalHotkeys(); - useGetOpenAPISchemaQuery(); - useSyncLoggingConfig(); - useCloseChakraTooltipsOnDragFix(); - useNavigationApi(); - useDndMonitor(); - useSyncNodeErrors(); - useSyncLangDirection(); + // singleton! + useReadinessWatcher(); + useSocketIO(); + useGlobalModifiersInit(); + useGlobalHotkeys(); + useGetOpenAPISchemaQuery(); + useSyncLoggingConfig(); + useCloseChakraTooltipsOnDragFix(); + useNavigationApi(); + useDndMonitor(); + useSyncNodeErrors(); + useSyncLangDirection(); - // Persistent subscription to the queue counts query - canvas relies on this to know if there are pending - // and/or in progress canvas sessions. - useGetQueueCountsByDestinationQuery(queueCountArg); - useSyncExecutionState(); + // Persistent subscription to the queue counts query - canvas relies on this to know if there are pending + // and/or in progress canvas sessions. + useGetQueueCountsByDestinationQuery(queueCountArg); + useSyncExecutionState(); - useEffect(() => { - i18n.changeLanguage(language); - }, [language]); + useEffect(() => { + i18n.changeLanguage(language); + }, [language]); - useEffect(() => { - logger.info({ config }, 'Received config'); - dispatch(configChanged(config)); - }, [dispatch, config, logger]); + useEffect(() => { + dispatch(appStarted()); + }, [dispatch]); - useEffect(() => { - dispatch(appStarted()); - }, [dispatch]); + useEffect(() => { + return setupListeners(dispatch); + }, [dispatch]); - useEffect(() => { - return setupListeners(dispatch); - }, [dispatch]); + useStarterModelsToast(); + useSyncQueueStatus(); + useFocusRegionWatcher(); + useWorkflowBuilderWatcher(); + useDynamicPromptsWatcher(); - useStudioInitAction(studioInitAction); - useStarterModelsToast(); - useSyncQueueStatus(); - useFocusRegionWatcher(); - useWorkflowBuilderWatcher(); - useDynamicPromptsWatcher(); - - return null; - } -); + return null; +}); GlobalHookIsolator.displayName = 'GlobalHookIsolator'; diff --git a/invokeai/frontend/web/src/app/components/GlobalModalIsolator.tsx b/invokeai/frontend/web/src/app/components/GlobalModalIsolator.tsx index b5aec1dd561..5c1446662ef 100644 --- a/invokeai/frontend/web/src/app/components/GlobalModalIsolator.tsx +++ b/invokeai/frontend/web/src/app/components/GlobalModalIsolator.tsx @@ -4,13 +4,10 @@ import { CanvasPasteModal } from 'features/controlLayers/components/CanvasPasteM import { CanvasManagerProviderGate } from 'features/controlLayers/contexts/CanvasManagerProviderGate'; import { CropImageModal } from 'features/cropper/components/CropImageModal'; import { DeleteImageModal } from 'features/deleteImageModal/components/DeleteImageModal'; -import { DeleteVideoModal } from 'features/deleteVideoModal/components/DeleteVideoModal'; import { FullscreenDropzone } from 'features/dnd/FullscreenDropzone'; import { DynamicPromptsModal } from 'features/dynamicPrompts/components/DynamicPromptsPreviewModal'; import DeleteBoardModal from 'features/gallery/components/Boards/DeleteBoardModal'; import { ImageContextMenu } from 'features/gallery/components/ContextMenu/ImageContextMenu'; -import { VideoContextMenu } from 'features/gallery/components/ContextMenu/VideoContextMenu'; -import { ShareWorkflowModal } from 'features/nodes/components/sidePanel/workflow/WorkflowLibrary/ShareWorkflowModal'; import { WorkflowLibraryModal } from 'features/nodes/components/sidePanel/workflow/WorkflowLibrary/WorkflowLibraryModal'; import { CancelAllExceptCurrentQueueItemConfirmationAlertDialog } from 'features/queue/components/CancelAllExceptCurrentQueueItemConfirmationAlertDialog'; import { ClearQueueConfirmationsAlertDialog } from 'features/queue/components/ClearQueueConfirmationAlertDialog'; @@ -34,7 +31,6 @@ export const GlobalModalIsolator = memo(() => { return ( <> - @@ -46,12 +42,10 @@ export const GlobalModalIsolator = memo(() => { - - diff --git a/invokeai/frontend/web/src/app/components/InvokeAIUI.tsx b/invokeai/frontend/web/src/app/components/InvokeAIUI.tsx index 21eee66513f..1fa0a5f3cd9 100644 --- a/invokeai/frontend/web/src/app/components/InvokeAIUI.tsx +++ b/invokeai/frontend/web/src/app/components/InvokeAIUI.tsx @@ -1,309 +1,23 @@ import 'i18n'; -import type { InvokeAIUIProps } from 'app/components/types'; -import { $didStudioInit } from 'app/hooks/useStudioInitAction'; -import { $loggingOverrides, configureLogging } from 'app/logging/logger'; import { addStorageListeners } from 'app/store/enhancers/reduxRemember/driver'; -import { $accountSettingsLink } from 'app/store/nanostores/accountSettingsLink'; -import { $accountTypeText } from 'app/store/nanostores/accountTypeText'; -import { $authToken } from 'app/store/nanostores/authToken'; -import { $baseUrl } from 'app/store/nanostores/baseUrl'; -import { $customNavComponent } from 'app/store/nanostores/customNavComponent'; -import { $customStarUI } from 'app/store/nanostores/customStarUI'; -import { $isDebugging } from 'app/store/nanostores/isDebugging'; -import { $logo } from 'app/store/nanostores/logo'; -import { $onClickGoToModelManager } from 'app/store/nanostores/onClickGoToModelManager'; -import { $openAPISchemaUrl } from 'app/store/nanostores/openAPISchemaUrl'; -import { $projectId, $projectName, $projectUrl } from 'app/store/nanostores/projectId'; -import { $queueId, DEFAULT_QUEUE_ID } from 'app/store/nanostores/queueId'; import { $store } from 'app/store/nanostores/store'; -import { $toastMap } from 'app/store/nanostores/toastMap'; -import { $videoUpsellComponent } from 'app/store/nanostores/videoUpsellComponent'; -import { $whatsNew } from 'app/store/nanostores/whatsNew'; import { createStore } from 'app/store/store'; import Loading from 'common/components/Loading/Loading'; -import { - $workflowLibraryCategoriesOptions, - $workflowLibrarySortOptions, - $workflowLibraryTagCategoriesOptions, - DEFAULT_WORKFLOW_LIBRARY_CATEGORIES, - DEFAULT_WORKFLOW_LIBRARY_SORT_OPTIONS, - DEFAULT_WORKFLOW_LIBRARY_TAG_CATEGORIES, -} from 'features/nodes/store/workflowLibrarySlice'; -import React, { lazy, memo, useEffect, useLayoutEffect, useState } from 'react'; +import React, { lazy, memo, useEffect, useState } from 'react'; import { Provider } from 'react-redux'; -import { addMiddleware, resetMiddlewares } from 'redux-dynamic-middlewares'; -import { $socketOptions } from 'services/events/stores'; const App = lazy(() => import('./App')); -const InvokeAIUI = ({ - apiUrl, - openAPISchemaUrl, - token, - config, - customNavComponent, - accountSettingsLink, - middleware, - projectId, - projectName, - projectUrl, - queueId, - studioInitAction, - customStarUi, - socketOptions, - isDebugging = false, - logo, - toastMap, - accountTypeText, - videoUpsellComponent, - workflowCategories, - workflowTagCategories, - workflowSortOptions, - loggingOverrides, - onClickGoToModelManager, - whatsNew, - storagePersistDebounce = 300, -}: InvokeAIUIProps) => { +const InvokeAIUI = () => { const [store, setStore] = useState | undefined>(undefined); const [didRehydrate, setDidRehydrate] = useState(false); - useLayoutEffect(() => { - /* - * We need to configure logging before anything else happens - useLayoutEffect ensures we set this at the first - * possible opportunity. - * - * Once redux initializes, we will check the user's settings and update the logging config accordingly. See - * `useSyncLoggingConfig`. - */ - $loggingOverrides.set(loggingOverrides); - - // Until we get the user's settings, we will use the overrides OR default values. - configureLogging( - loggingOverrides?.logIsEnabled ?? true, - loggingOverrides?.logLevel ?? 'debug', - loggingOverrides?.logNamespaces ?? '*' - ); - }, [loggingOverrides]); - - useLayoutEffect(() => { - if (studioInitAction) { - $didStudioInit.set(false); - } - }, [studioInitAction]); - - useEffect(() => { - // configure API client token - if (token) { - $authToken.set(token); - } - - // configure API client base url - if (apiUrl) { - $baseUrl.set(apiUrl); - } - - // configure API client project header - if (projectId) { - $projectId.set(projectId); - } - - // configure API client project header - if (queueId) { - $queueId.set(queueId); - } - - // reset dynamically added middlewares - resetMiddlewares(); - - // TODO: at this point, after resetting the middleware, we really ought to clean up the socket - // stuff by calling `dispatch(socketReset())`. but we cannot dispatch from here as we are - // outside the provider. it's not needed until there is the possibility that we will change - // the `apiUrl`/`token` dynamically. - - // rebuild socket middleware with token and apiUrl - if (middleware && middleware.length > 0) { - addMiddleware(...middleware); - } - - return () => { - // Reset the API client token and base url on unmount - $baseUrl.set(undefined); - $authToken.set(undefined); - $projectId.set(undefined); - $queueId.set(DEFAULT_QUEUE_ID); - }; - }, [apiUrl, token, middleware, projectId, queueId, projectName, projectUrl]); - - useEffect(() => { - if (customStarUi) { - $customStarUI.set(customStarUi); - } - - return () => { - $customStarUI.set(undefined); - }; - }, [customStarUi]); - - useEffect(() => { - if (accountTypeText) { - $accountTypeText.set(accountTypeText); - } - - return () => { - $accountTypeText.set(''); - }; - }, [accountTypeText]); - - useEffect(() => { - if (videoUpsellComponent) { - $videoUpsellComponent.set(videoUpsellComponent); - } - - return () => { - $videoUpsellComponent.set(undefined); - }; - }, [videoUpsellComponent]); - - useEffect(() => { - if (customNavComponent) { - $customNavComponent.set(customNavComponent); - } - - return () => { - $customNavComponent.set(undefined); - }; - }, [customNavComponent]); - - useEffect(() => { - if (accountSettingsLink) { - $accountSettingsLink.set(accountSettingsLink); - } - - return () => { - $accountSettingsLink.set(undefined); - }; - }, [accountSettingsLink]); - - useEffect(() => { - if (openAPISchemaUrl) { - $openAPISchemaUrl.set(openAPISchemaUrl); - } - - return () => { - $openAPISchemaUrl.set(undefined); - }; - }, [openAPISchemaUrl]); - - useEffect(() => { - $projectName.set(projectName); - - return () => { - $projectName.set(undefined); - }; - }, [projectName]); - - useEffect(() => { - $projectUrl.set(projectUrl); - - return () => { - $projectUrl.set(undefined); - }; - }, [projectUrl]); - - useEffect(() => { - if (logo) { - $logo.set(logo); - } - - return () => { - $logo.set(undefined); - }; - }, [logo]); - - useEffect(() => { - if (toastMap) { - $toastMap.set(toastMap); - } - - return () => { - $toastMap.set(undefined); - }; - }, [toastMap]); - - useEffect(() => { - if (whatsNew) { - $whatsNew.set(whatsNew); - } - - return () => { - $whatsNew.set(undefined); - }; - }, [whatsNew]); - - useEffect(() => { - if (onClickGoToModelManager) { - $onClickGoToModelManager.set(onClickGoToModelManager); - } - - return () => { - $onClickGoToModelManager.set(undefined); - }; - }, [onClickGoToModelManager]); - - useEffect(() => { - if (workflowCategories) { - $workflowLibraryCategoriesOptions.set(workflowCategories); - } - - return () => { - $workflowLibraryCategoriesOptions.set(DEFAULT_WORKFLOW_LIBRARY_CATEGORIES); - }; - }, [workflowCategories]); - - useEffect(() => { - if (workflowTagCategories) { - $workflowLibraryTagCategoriesOptions.set(workflowTagCategories); - } - - return () => { - $workflowLibraryTagCategoriesOptions.set(DEFAULT_WORKFLOW_LIBRARY_TAG_CATEGORIES); - }; - }, [workflowTagCategories]); - - useEffect(() => { - if (workflowSortOptions) { - $workflowLibrarySortOptions.set(workflowSortOptions); - } - - return () => { - $workflowLibrarySortOptions.set(DEFAULT_WORKFLOW_LIBRARY_SORT_OPTIONS); - }; - }, [workflowSortOptions]); - - useEffect(() => { - if (socketOptions) { - $socketOptions.set(socketOptions); - } - return () => { - $socketOptions.set({}); - }; - }, [socketOptions]); - - useEffect(() => { - if (isDebugging) { - $isDebugging.set(isDebugging); - } - return () => { - $isDebugging.set(false); - }; - }, [isDebugging]); - useEffect(() => { const onRehydrated = () => { setDidRehydrate(true); }; - const store = createStore({ persist: true, persistDebounce: storagePersistDebounce, onRehydrated }); + const store = createStore({ persist: true, persistDebounce: 300, onRehydrated }); setStore(store); $store.set(store); if (import.meta.env.MODE === 'development') { @@ -318,7 +32,7 @@ const InvokeAIUI = ({ window.$store = undefined; } }; - }, [storagePersistDebounce]); + }, []); if (!store || !didRehydrate) { return ; @@ -328,7 +42,7 @@ const InvokeAIUI = ({ }> - + diff --git a/invokeai/frontend/web/src/app/components/types.ts b/invokeai/frontend/web/src/app/components/types.ts deleted file mode 100644 index dbec6a72a86..00000000000 --- a/invokeai/frontend/web/src/app/components/types.ts +++ /dev/null @@ -1,43 +0,0 @@ -import type { Middleware } from '@reduxjs/toolkit'; -import type { StudioInitAction } from 'app/hooks/useStudioInitAction'; -import type { LoggingOverrides } from 'app/logging/logger'; -import type { CustomStarUi } from 'app/store/nanostores/customStarUI'; -import type { PartialAppConfig } from 'app/types/invokeai'; -import type { SocketOptions } from 'dgram'; -import type { WorkflowSortOption, WorkflowTagCategory } from 'features/nodes/store/workflowLibrarySlice'; -import type { WorkflowCategory } from 'features/nodes/types/workflow'; -import type { ToastConfig } from 'features/toast/toast'; -import type { PropsWithChildren, ReactNode } from 'react'; -import type { ManagerOptions } from 'socket.io-client'; - -export interface InvokeAIUIProps extends PropsWithChildren { - apiUrl?: string; - openAPISchemaUrl?: string; - token?: string; - config?: PartialAppConfig; - customNavComponent?: ReactNode; - accountSettingsLink?: string; - middleware?: Middleware[]; - projectId?: string; - projectName?: string; - projectUrl?: string; - queueId?: string; - studioInitAction?: StudioInitAction; - customStarUi?: CustomStarUi; - socketOptions?: Partial; - isDebugging?: boolean; - logo?: ReactNode; - toastMap?: Record; - accountTypeText?: string; - videoUpsellComponent?: ReactNode; - whatsNew?: ReactNode[]; - workflowCategories?: WorkflowCategory[]; - workflowTagCategories?: WorkflowTagCategory[]; - workflowSortOptions?: WorkflowSortOption[]; - loggingOverrides?: LoggingOverrides; - /** - * If provided, overrides in-app navigation to the model manager - */ - onClickGoToModelManager?: () => void; - storagePersistDebounce?: number; -} diff --git a/invokeai/frontend/web/src/app/hooks/useStudioInitAction.ts b/invokeai/frontend/web/src/app/hooks/useStudioInitAction.ts deleted file mode 100644 index 80c76e31484..00000000000 --- a/invokeai/frontend/web/src/app/hooks/useStudioInitAction.ts +++ /dev/null @@ -1,262 +0,0 @@ -import { useStore } from '@nanostores/react'; -import { useAppStore } from 'app/store/storeHooks'; -import { useAssertSingleton } from 'common/hooks/useAssertSingleton'; -import { withResultAsync } from 'common/util/result'; -import { canvasReset } from 'features/controlLayers/store/actions'; -import { rasterLayerAdded } from 'features/controlLayers/store/canvasSlice'; -import type { CanvasRasterLayerState } from 'features/controlLayers/store/types'; -import { imageDTOToImageObject } from 'features/controlLayers/store/util'; -import { sentImageToCanvas } from 'features/gallery/store/actions'; -import { MetadataUtils } from 'features/metadata/parsing'; -import { $hasTemplates } from 'features/nodes/store/nodesSlice'; -import { $isWorkflowLibraryModalOpen } from 'features/nodes/store/workflowLibraryModal'; -import { - $workflowLibraryTagOptions, - workflowLibraryTagsReset, - workflowLibraryTagToggled, - workflowLibraryViewChanged, -} from 'features/nodes/store/workflowLibrarySlice'; -import { $isStylePresetsMenuOpen, activeStylePresetIdChanged } from 'features/stylePresets/store/stylePresetSlice'; -import { toast } from 'features/toast/toast'; -import { navigationApi } from 'features/ui/layouts/navigation-api'; -import { LAUNCHPAD_PANEL_ID, WORKSPACE_PANEL_ID } from 'features/ui/layouts/shared'; -import { useLoadWorkflowWithDialog } from 'features/workflowLibrary/components/LoadWorkflowConfirmationAlertDialog'; -import { atom } from 'nanostores'; -import { useCallback, useEffect } from 'react'; -import { useTranslation } from 'react-i18next'; -import { getImageDTO, getImageMetadata } from 'services/api/endpoints/images'; -import { getStylePreset } from 'services/api/endpoints/stylePresets'; - -type _StudioInitAction = { type: T; data: U }; - -type LoadWorkflowAction = _StudioInitAction<'loadWorkflow', { workflowId: string }>; -type SelectStylePresetAction = _StudioInitAction<'selectStylePreset', { stylePresetId: string }>; -type SendToCanvasAction = _StudioInitAction<'sendToCanvas', { imageName: string }>; -type UseAllParametersAction = _StudioInitAction<'useAllParameters', { imageName: string }>; -type StudioDestinationAction = _StudioInitAction< - 'goToDestination', - { - destination: - | 'generation' - | 'canvas' - | 'workflows' - | 'upscaling' - | 'video' - | 'viewAllWorkflows' - | 'viewAllWorkflowsRecommended' - | 'viewAllStylePresets'; - } ->; -// Use global state to show loader until we are ready to render the studio. -export const $didStudioInit = atom(false); - -export type StudioInitAction = - | LoadWorkflowAction - | SelectStylePresetAction - | SendToCanvasAction - | UseAllParametersAction - | StudioDestinationAction; - -/** - * A hook that performs an action when the studio is initialized. This is useful for deep linking into the studio. - * - * The action is performed only once, when the hook is first run. - * - * In this hook, we prefer to use imperative APIs over hooks to avoid re-rendering the parent component. For example: - * - Use `getImageDTO` helper instead of `useGetImageDTO` - * - Usee the `$imageViewer` atom instead of `useImageViewer` - */ -export const useStudioInitAction = (action?: StudioInitAction) => { - useAssertSingleton('useStudioInitAction'); - const { t } = useTranslation(); - const didParseOpenAPISchema = useStore($hasTemplates); - const store = useAppStore(); - const loadWorkflowWithDialog = useLoadWorkflowWithDialog(); - const workflowLibraryTagOptions = useStore($workflowLibraryTagOptions); - - const handleSendToCanvas = useCallback( - async (imageName: string) => { - // Try to the image DTO - use an imperative helper, rather than `useGetImageDTO`, so that we aren't re-rendering - // the parent of this hook whenever the image name changes - const getImageDTOResult = await withResultAsync(() => getImageDTO(imageName)); - if (getImageDTOResult.isErr()) { - toast({ - title: t('toast.unableToLoadImage'), - status: 'error', - }); - return; - } - const imageDTO = getImageDTOResult.value; - const imageObject = imageDTOToImageObject(imageDTO); - const overrides: Partial = { - objects: [imageObject], - }; - await navigationApi.focusPanel('canvas', WORKSPACE_PANEL_ID); - store.dispatch(canvasReset()); - store.dispatch(rasterLayerAdded({ overrides, isSelected: true })); - store.dispatch(sentImageToCanvas()); - toast({ - title: t('toast.sentToCanvas'), - status: 'info', - }); - }, - [store, t] - ); - - const handleUseAllMetadata = useCallback( - async (imageName: string) => { - // Try to the image metadata - use an imperative helper, rather than `useGetImageMetadata`, so that we aren't - // re-rendering the parent of this hook whenever the image name changes - const getImageMetadataResult = await withResultAsync(() => getImageMetadata(imageName)); - if (getImageMetadataResult.isErr()) { - toast({ - title: t('toast.unableToLoadImageMetadata'), - status: 'error', - }); - return; - } - const metadata = getImageMetadataResult.value; - store.dispatch(canvasReset()); - // This shows a toast - await MetadataUtils.recallAllImageMetadata(metadata, store); - }, - [store, t] - ); - - const handleLoadWorkflow = useCallback( - (workflowId: string) => { - // This shows a toast - loadWorkflowWithDialog({ - type: 'library', - data: workflowId, - onSuccess: () => { - navigationApi.switchToTab('workflows'); - }, - }); - }, - [loadWorkflowWithDialog] - ); - - const handleSelectStylePreset = useCallback( - async (stylePresetId: string) => { - const getStylePresetResult = await withResultAsync(() => getStylePreset(stylePresetId)); - if (getStylePresetResult.isErr()) { - toast({ - title: t('toast.unableToLoadStylePreset'), - status: 'error', - }); - return; - } - store.dispatch(activeStylePresetIdChanged(stylePresetId)); - navigationApi.switchToTab('canvas'); - toast({ - title: t('toast.stylePresetLoaded'), - status: 'info', - }); - }, - [store, t] - ); - - const handleGoToDestination = useCallback( - async (destination: StudioDestinationAction['data']['destination']) => { - switch (destination) { - case 'generation': - // Go to the generate tab, open the launchpad - await navigationApi.focusPanel('generate', LAUNCHPAD_PANEL_ID); - break; - case 'canvas': - // Go to the canvas tab, open the launchpad - await navigationApi.focusPanel('canvas', WORKSPACE_PANEL_ID); - break; - case 'workflows': - // Go to the workflows tab - navigationApi.switchToTab('workflows'); - break; - case 'upscaling': - // Go to the upscaling tab - navigationApi.switchToTab('upscaling'); - break; - case 'video': - // Go to the video tab - await navigationApi.focusPanel('video', LAUNCHPAD_PANEL_ID); - break; - case 'viewAllWorkflows': - // Go to the workflows tab and open the workflow library modal - navigationApi.switchToTab('workflows'); - $isWorkflowLibraryModalOpen.set(true); - break; - case 'viewAllWorkflowsRecommended': - // Go to the workflows tab and open the workflow library modal with the recommended workflows view - navigationApi.switchToTab('workflows'); - $isWorkflowLibraryModalOpen.set(true); - store.dispatch(workflowLibraryViewChanged('defaults')); - store.dispatch(workflowLibraryTagsReset()); - for (const tag of workflowLibraryTagOptions) { - if (tag.recommended) { - store.dispatch(workflowLibraryTagToggled(tag.label)); - } - } - break; - case 'viewAllStylePresets': - // Go to the canvas tab and open the style presets menu - navigationApi.switchToTab('canvas'); - $isStylePresetsMenuOpen.set(true); - break; - } - }, - [store, workflowLibraryTagOptions] - ); - - const handleStudioInitAction = useCallback( - async (action: StudioInitAction) => { - // This cannot be in the useEffect below because we need to await some of the actions before setting didStudioInit. - switch (action.type) { - case 'loadWorkflow': - await handleLoadWorkflow(action.data.workflowId); - break; - case 'selectStylePreset': - await handleSelectStylePreset(action.data.stylePresetId); - break; - - case 'sendToCanvas': - await handleSendToCanvas(action.data.imageName); - break; - - case 'useAllParameters': - await handleUseAllMetadata(action.data.imageName); - break; - - case 'goToDestination': - handleGoToDestination(action.data.destination); - break; - - default: - break; - } - $didStudioInit.set(true); - }, - [handleGoToDestination, handleLoadWorkflow, handleSelectStylePreset, handleSendToCanvas, handleUseAllMetadata] - ); - - useEffect(() => { - if ($didStudioInit.get() || !didParseOpenAPISchema) { - return; - } - - if (!action) { - $didStudioInit.set(true); - return; - } - - handleStudioInitAction(action); - }, [ - handleSendToCanvas, - handleUseAllMetadata, - action, - handleSelectStylePreset, - handleGoToDestination, - handleLoadWorkflow, - didParseOpenAPISchema, - handleStudioInitAction, - ]); -}; diff --git a/invokeai/frontend/web/src/app/logging/logger.ts b/invokeai/frontend/web/src/app/logging/logger.ts index 1f753f97bb7..2428638dd12 100644 --- a/invokeai/frontend/web/src/app/logging/logger.ts +++ b/invokeai/frontend/web/src/app/logging/logger.ts @@ -98,3 +98,12 @@ export const configureLogging = ( ROARR.write = createLogWriter({ styleOutput }); }; + +/* + * We need to configure logging before anything else happens - useLayoutEffect ensures we set this at the first + * possible opportunity. + * + * Once redux initializes, we will check the user's settings and update the logging config accordingly. See + * `useSyncLoggingConfig`. + */ +configureLogging(true, 'debug', '*'); diff --git a/invokeai/frontend/web/src/app/store/enhancers/reduxRemember/driver.ts b/invokeai/frontend/web/src/app/store/enhancers/reduxRemember/driver.ts index ef42a5fa2db..9e67770b436 100644 --- a/invokeai/frontend/web/src/app/store/enhancers/reduxRemember/driver.ts +++ b/invokeai/frontend/web/src/app/store/enhancers/reduxRemember/driver.ts @@ -1,8 +1,5 @@ import { logger } from 'app/logging/logger'; import { StorageError } from 'app/store/enhancers/reduxRemember/errors'; -import { $authToken } from 'app/store/nanostores/authToken'; -import { $projectId } from 'app/store/nanostores/projectId'; -import { $queueId } from 'app/store/nanostores/queueId'; import type { UseStore } from 'idb-keyval'; import { createStore as idbCreateStore, del as idbDel, get as idbGet } from 'idb-keyval'; import type { Driver } from 'redux-remember'; @@ -19,24 +16,11 @@ const getUrl = (endpoint: 'get_by_key' | 'set_by_key' | 'delete', key?: string) query['key'] = key; } - const path = buildV1Url(`client_state/${$queueId.get()}/${endpoint}`, query); + const path = buildV1Url(`client_state/default/${endpoint}`, query); const url = `${baseUrl}/${path}`; return url; }; -const getHeaders = () => { - const headers = new Headers(); - const authToken = $authToken.get(); - const projectId = $projectId.get(); - if (authToken) { - headers.set('Authorization', `Bearer ${authToken}`); - } - if (projectId) { - headers.set('project-id', projectId); - } - return headers; -}; - // Persistence happens per slice. To track when persistence is in progress, maintain a ref count, incrementing // it when a slice is being persisted and decrementing it when the persistence is done. let persistRefCount = 0; @@ -87,8 +71,7 @@ const getIdbKey = (key: string) => { const getItem = async (key: string) => { try { const url = getUrl('get_by_key', key); - const headers = getHeaders(); - const res = await fetch(url, { method: 'GET', headers }); + const res = await fetch(url, { method: 'GET' }); if (!res.ok) { throw new Error(`Response status: ${res.status}`); } @@ -130,7 +113,6 @@ const getItem = async (key: string) => { } catch (originalError) { throw new StorageError({ key, - projectId: $projectId.get(), originalError, }); } @@ -148,8 +130,7 @@ const setItem = async (key: string, value: string) => { } log.trace({ key, last: lastPersistedState.get(key), next: value }, `Persisting state for ${key}`); const url = getUrl('set_by_key', key); - const headers = getHeaders(); - const res = await fetch(url, { method: 'POST', headers, body: value }); + const res = await fetch(url, { method: 'POST', body: value }); if (!res.ok) { throw new Error(`Response status: ${res.status}`); } @@ -160,7 +141,6 @@ const setItem = async (key: string, value: string) => { throw new StorageError({ key, value, - projectId: $projectId.get(), originalError, }); } finally { @@ -178,8 +158,7 @@ export const clearStorage = async () => { try { persistRefCount++; const url = getUrl('delete'); - const headers = getHeaders(); - const res = await fetch(url, { method: 'POST', headers }); + const res = await fetch(url, { method: 'POST' }); if (!res.ok) { throw new Error(`Response status: ${res.status}`); } diff --git a/invokeai/frontend/web/src/app/store/enhancers/reduxRemember/errors.ts b/invokeai/frontend/web/src/app/store/enhancers/reduxRemember/errors.ts index 9266ee478ff..87c89b27f51 100644 --- a/invokeai/frontend/web/src/app/store/enhancers/reduxRemember/errors.ts +++ b/invokeai/frontend/web/src/app/store/enhancers/reduxRemember/errors.ts @@ -7,7 +7,6 @@ type StorageErrorArgs = { /* eslint-disable-next-line @typescript-eslint/no-explicit-any */ // any is correct value?: any; originalError?: unknown; - projectId?: string; }; export class StorageError extends Error { @@ -15,18 +14,14 @@ export class StorageError extends Error { /* eslint-disable-next-line @typescript-eslint/no-explicit-any */ // any is correct value?: any; originalError?: Error; - projectId?: string; - constructor({ key, value, originalError, projectId }: StorageErrorArgs) { + constructor({ key, value, originalError }: StorageErrorArgs) { super(`Error setting ${key}`); this.name = 'StorageSetError'; this.key = key; if (value !== undefined) { this.value = value; } - if (projectId !== undefined) { - this.projectId = projectId; - } if (originalError instanceof Error) { this.originalError = originalError; } diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/boardIdSelected.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/boardIdSelected.ts index 3a42213e535..a408a94c041 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/boardIdSelected.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/boardIdSelected.ts @@ -1,14 +1,8 @@ import { isAnyOf } from '@reduxjs/toolkit'; import type { AppStartListening } from 'app/store/store'; -import { - selectGalleryView, - selectGetImageNamesQueryArgs, - selectGetVideoIdsQueryArgs, - selectSelectedBoardId, -} from 'features/gallery/store/gallerySelectors'; +import { selectGetImageNamesQueryArgs, selectSelectedBoardId } from 'features/gallery/store/gallerySelectors'; import { boardIdSelected, galleryViewChanged, itemSelected } from 'features/gallery/store/gallerySlice'; import { imagesApi } from 'services/api/endpoints/images'; -import { videosApi } from 'services/api/endpoints/videos'; export const addBoardIdSelectedListener = (startAppListening: AppStartListening) => { startAppListening({ @@ -25,56 +19,29 @@ export const addBoardIdSelectedListener = (startAppListening: AppStartListening) const state = getState(); const board_id = selectSelectedBoardId(state); - const view = selectGalleryView(state); - if (view === 'images' || view === 'assets') { - const queryArgs = { ...selectGetImageNamesQueryArgs(state), board_id }; - // wait until the board has some images - maybe it already has some from a previous fetch - // must use getState() to ensure we do not have stale state - const isSuccess = await condition( - () => imagesApi.endpoints.getImageNames.select(queryArgs)(getState()).isSuccess, - 5000 - ); + const queryArgs = { ...selectGetImageNamesQueryArgs(state), board_id }; + // wait until the board has some images - maybe it already has some from a previous fetch + // must use getState() to ensure we do not have stale state + const isSuccess = await condition( + () => imagesApi.endpoints.getImageNames.select(queryArgs)(getState()).isSuccess, + 5000 + ); - if (!isSuccess) { - dispatch(itemSelected(null)); - return; - } + if (!isSuccess) { + dispatch(itemSelected(null)); + return; + } - // the board was just changed - we can select the first image - const imageNames = imagesApi.endpoints.getImageNames.select(queryArgs)(getState()).data?.image_names; + // the board was just changed - we can select the first image + const imageNames = imagesApi.endpoints.getImageNames.select(queryArgs)(getState()).data?.image_names; - const imageToSelect = imageNames && imageNames.length > 0 ? imageNames[0] : null; + const imageToSelect = imageNames && imageNames.length > 0 ? imageNames[0] : null; - if (imageToSelect) { - dispatch(itemSelected({ type: 'image', id: imageToSelect })); - } else { - dispatch(itemSelected(null)); - } + if (imageToSelect) { + dispatch(itemSelected({ type: 'image', id: imageToSelect })); } else { - const queryArgs = { ...selectGetVideoIdsQueryArgs(state), board_id }; - // wait until the board has some images - maybe it already has some from a previous fetch - // must use getState() to ensure we do not have stale state - const isSuccess = await condition( - () => videosApi.endpoints.getVideoIds.select(queryArgs)(getState()).isSuccess, - 5000 - ); - - if (!isSuccess) { - dispatch(itemSelected(null)); - return; - } - - // the board was just changed - we can select the first image - const videoIds = videosApi.endpoints.getVideoIds.select(queryArgs)(getState()).data?.video_ids; - - const videoToSelect = videoIds && videoIds.length > 0 ? videoIds[0] : null; - - if (videoToSelect) { - dispatch(itemSelected({ type: 'video', id: videoToSelect })); - } else { - dispatch(itemSelected(null)); - } + dispatch(itemSelected(null)); } }, }); diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/getOpenAPISchema.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/getOpenAPISchema.ts index f266f147ee8..416c77b9dd7 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/getOpenAPISchema.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/getOpenAPISchema.ts @@ -13,13 +13,12 @@ const log = logger('system'); export const addGetOpenAPISchemaListener = (startAppListening: AppStartListening) => { startAppListening({ matcher: appInfoApi.endpoints.getOpenAPISchema.matchFulfilled, - effect: (action, { getState }) => { + effect: (action) => { const schemaJSON = action.payload; log.debug({ schemaJSON: parseify(schemaJSON) } as JsonObject, 'Received OpenAPI schema'); - const { nodesAllowlist, nodesDenylist } = getState().config; - const nodeTemplates = parseSchema(schemaJSON, nodesAllowlist, nodesDenylist); + const nodeTemplates = parseSchema(schemaJSON); log.debug({ nodeTemplates } as JsonObject, `Built ${size(nodeTemplates)} node templates`); diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imageUploaded.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imageUploaded.ts index 562dd7c27f3..f421009030f 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imageUploaded.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imageUploaded.ts @@ -1,8 +1,6 @@ -import { isAnyOf } from '@reduxjs/toolkit'; import { logger } from 'app/logging/logger'; import type { AppStartListening, RootState } from 'app/store/store'; import { omit } from 'es-toolkit/compat'; -import { imageUploadedClientSide } from 'features/gallery/store/actions'; import { selectListBoardsQueryArgs } from 'features/gallery/store/gallerySelectors'; import { boardIdSelected, galleryViewChanged } from 'features/gallery/store/gallerySlice'; import { toast } from 'features/toast/toast'; @@ -10,7 +8,6 @@ import { t } from 'i18next'; import { boardsApi } from 'services/api/endpoints/boards'; import { imagesApi } from 'services/api/endpoints/images'; import type { ImageDTO } from 'services/api/types'; -import { getCategories, getListImagesUrl } from 'services/api/util'; const log = logger('gallery'); /** @@ -36,52 +33,20 @@ let lastUploadedToastTimeout: number | null = null; export const addImageUploadedFulfilledListener = (startAppListening: AppStartListening) => { startAppListening({ - matcher: isAnyOf(imagesApi.endpoints.uploadImage.matchFulfilled, imageUploadedClientSide), + matcher: imagesApi.endpoints.uploadImage.matchFulfilled, effect: (action, { dispatch, getState }) => { let imageDTO: ImageDTO; let silent; let isFirstUploadOfBatch = true; - - if (imageUploadedClientSide.match(action)) { - imageDTO = action.payload.imageDTO; - silent = action.payload.silent; - isFirstUploadOfBatch = action.payload.isFirstUploadOfBatch; - } else if (imagesApi.endpoints.uploadImage.matchFulfilled(action)) { - imageDTO = action.payload; - silent = action.meta.arg.originalArgs.silent; - isFirstUploadOfBatch = action.meta.arg.originalArgs.isFirstUploadOfBatch ?? true; - } else { - return; - } + imageDTO = action.payload; + silent = action.meta.arg.originalArgs.silent; + isFirstUploadOfBatch = action.meta.arg.originalArgs.isFirstUploadOfBatch ?? true; if (silent || imageDTO.is_intermediate) { // If the image is silent or intermediate, we don't want to show a toast return; } - if (imageUploadedClientSide.match(action)) { - const categories = getCategories(imageDTO); - const boardId = imageDTO.board_id ?? 'none'; - dispatch( - imagesApi.util.invalidateTags([ - { - type: 'ImageList', - id: getListImagesUrl({ - board_id: boardId, - categories, - }), - }, - { - type: 'Board', - id: boardId, - }, - { - type: 'BoardImagesTotal', - id: boardId, - }, - ]) - ); - } const state = getState(); log.debug({ imageDTO }, 'Image uploaded'); diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/modelSelected.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/modelSelected.ts index e53fc977b98..d5c58528cb9 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/modelSelected.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/modelSelected.ts @@ -17,14 +17,8 @@ import { zParameterModel } from 'features/parameters/types/parameterSchemas'; import { toast } from 'features/toast/toast'; import { t } from 'i18next'; import { selectGlobalRefImageModels, selectRegionalRefImageModels } from 'services/api/hooks/modelsByType'; -import type { AnyModelConfig } from 'services/api/types'; -import { - isChatGPT4oModelConfig, - isFluxKontextApiModelConfig, - isFluxKontextModelConfig, - isFluxReduxModelConfig, - isGemini2_5ModelConfig, -} from 'services/api/types'; +import type { FLUXKontextModelConfig, FLUXReduxModelConfig, IPAdapterModelConfig } from 'services/api/types'; +import { isFluxKontextModelConfig, isFluxReduxModelConfig } from 'services/api/types'; const log = logger('models'); @@ -68,26 +62,19 @@ export const addModelSelectedListener = (startAppListening: AppStartListening) = // to choose the best available model based on the new main model. const allRefImageModels = selectGlobalRefImageModels(state).filter(({ base }) => base === newBase); - let newGlobalRefImageModel = null; + let newGlobalRefImageModel: IPAdapterModelConfig | FLUXKontextModelConfig | FLUXReduxModelConfig | null = + null; // Certain models require the ref image model to be the same as the main model - others just need a matching // base. Helper to grab the first exact match or the first available model if no exact match is found. - const exactMatchOrFirst = (candidates: T[]): T | null => - candidates.find(({ key }) => key === newModel.key) ?? candidates[0] ?? null; + const exactMatchOrFirst = ( + candidates: T[] + ): T | null => candidates.find(({ key }) => key === newModel.key) ?? candidates[0] ?? null; // The only way we can differentiate between FLUX and FLUX Kontext is to check for "kontext" in the name if (newModel.base === 'flux' && newModel.name.toLowerCase().includes('kontext')) { const fluxKontextDevModels = allRefImageModels.filter(isFluxKontextModelConfig); newGlobalRefImageModel = exactMatchOrFirst(fluxKontextDevModels); - } else if (newModel.base === 'chatgpt-4o') { - const chatGPT4oModels = allRefImageModels.filter(isChatGPT4oModelConfig); - newGlobalRefImageModel = exactMatchOrFirst(chatGPT4oModels); - } else if (newModel.base === 'gemini-2.5') { - const gemini2_5Models = allRefImageModels.filter(isGemini2_5ModelConfig); - newGlobalRefImageModel = exactMatchOrFirst(gemini2_5Models); - } else if (newModel.base === 'flux-kontext') { - const fluxKontextApiModels = allRefImageModels.filter(isFluxKontextApiModelConfig); - newGlobalRefImageModel = exactMatchOrFirst(fluxKontextApiModels); } else if (newModel.base === 'flux') { const fluxReduxModels = allRefImageModels.filter(isFluxReduxModelConfig); newGlobalRefImageModel = fluxReduxModels[0] ?? null; diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/modelsLoaded.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/modelsLoaded.ts index 63602339a9b..8cbbc72343b 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/modelsLoaded.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/modelsLoaded.ts @@ -19,14 +19,12 @@ import { isRegionalGuidanceFLUXReduxConfig, isRegionalGuidanceIPAdapterConfig, } from 'features/controlLayers/store/types'; -import { zModelIdentifierField } from 'features/nodes/types/common'; import { modelSelected } from 'features/parameters/store/actions'; import { postProcessingModelChanged, tileControlnetModelChanged, upscaleModelChanged, } from 'features/parameters/store/upscaleSlice'; -import { videoModelChanged } from 'features/parameters/store/videoSlice'; import { zParameterCLIPEmbedModel, zParameterSpandrelImageToImageModel, @@ -49,7 +47,6 @@ import { isRefinerMainModelModelConfig, isSpandrelImageToImageModelConfig, isT5EncoderModelConfigOrSubmodel, - isVideoModelConfig, } from 'services/api/types'; import type { JsonObject } from 'type-fest'; @@ -90,7 +87,6 @@ export const addModelsLoadedListener = (startAppListening: AppStartListening) => handleCLIPEmbedModels(models, state, dispatch, log); handleFLUXVAEModels(models, state, dispatch, log); handleFLUXReduxModels(models, state, dispatch, log); - handleVideoModels(models, state, dispatch, log); }, }); }; @@ -123,19 +119,6 @@ const handleMainModels: ModelHandler = (models, state, dispatch, log) => { return; } - // If we have a default model, try to use it - if (state.config.sd.defaultModel) { - const defaultModel = allMainModels.find((m) => m.key === state.config.sd.defaultModel); - if (defaultModel) { - log.debug( - { selectedMainModel, defaultModel }, - 'No selected main model or selected main model is not available, selecting default model' - ); - dispatch(modelSelected(defaultModel)); - return; - } - } - log.debug( { selectedMainModel, firstModel }, 'No selected main model or selected main model is not available, selecting first available model' @@ -203,22 +186,6 @@ const handleLoRAModels: ModelHandler = (models, state, dispatch, log) => { }); }; -const handleVideoModels: ModelHandler = (models, state, dispatch, log) => { - const videoModels = models.filter(isVideoModelConfig); - const selectedVideoModel = state.video.videoModel; - - if (selectedVideoModel && videoModels.some((m) => m.key === selectedVideoModel.key)) { - return; - } - - const firstModel = videoModels[0] || null; - if (firstModel) { - log.debug({ firstModel }, 'No video model selected, selecting first available video model'); - dispatch(videoModelChanged({ videoModel: zModelIdentifierField.parse(firstModel) })); - return; - } -}; - const handleControlAdapterModels: ModelHandler = (models, state, dispatch, log) => { const caModels = models.filter(isControlLayerModelConfig); selectCanvasSlice(state).controlLayers.entities.forEach((entity) => { diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketConnected.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketConnected.ts index 64370eee8bc..f8f7d1659a1 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketConnected.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketConnected.ts @@ -1,7 +1,6 @@ import { objectEquals } from '@observ33r/object-equals'; import { createAction } from '@reduxjs/toolkit'; import { logger } from 'app/logging/logger'; -import { $baseUrl } from 'app/store/nanostores/baseUrl'; import type { AppStartListening } from 'app/store/store'; import { atom } from 'nanostores'; import { api } from 'services/api'; @@ -16,7 +15,7 @@ export const socketConnected = createAction('socket/connected'); export const addSocketConnectedEventListener = (startAppListening: AppStartListening) => { startAppListening({ actionCreator: socketConnected, - effect: async (action, { dispatch, getState, cancelActiveListeners, delay }) => { + effect: async (action, { dispatch, getState }) => { /** * The rest of this listener has recovery logic for when the socket disconnects and reconnects. * @@ -43,20 +42,12 @@ export const addSocketConnectedEventListener = (startAppListening: AppStartListe // Else, we need to compare the last-known queue status with the current queue status, re-fetching // everything if it has changed. - - if ($baseUrl.get()) { - // If we have a baseUrl (e.g. not localhost), we need to debounce the re-fetch to not hammer server - cancelActiveListeners(); - // Add artificial jitter to the debounce - await delay(1000 + Math.random() * 1000); - } - const prevQueueStatusData = selectQueueStatus(getState()).data; try { // Fetch the queue status again const queueStatusRequest = dispatch( - await queueApi.endpoints.getQueueStatus.initiate(undefined, { + queueApi.endpoints.getQueueStatus.initiate(undefined, { forceRefetch: true, subscribe: false, }) diff --git a/invokeai/frontend/web/src/app/store/nanostores/accountSettingsLink.ts b/invokeai/frontend/web/src/app/store/nanostores/accountSettingsLink.ts deleted file mode 100644 index cf41facb7c3..00000000000 --- a/invokeai/frontend/web/src/app/store/nanostores/accountSettingsLink.ts +++ /dev/null @@ -1,3 +0,0 @@ -import { atom } from 'nanostores'; - -export const $accountSettingsLink = atom(undefined); diff --git a/invokeai/frontend/web/src/app/store/nanostores/accountTypeText.ts b/invokeai/frontend/web/src/app/store/nanostores/accountTypeText.ts deleted file mode 100644 index 4008b86cef2..00000000000 --- a/invokeai/frontend/web/src/app/store/nanostores/accountTypeText.ts +++ /dev/null @@ -1,3 +0,0 @@ -import { atom } from 'nanostores'; - -export const $accountTypeText = atom(''); diff --git a/invokeai/frontend/web/src/app/store/nanostores/authToken.ts b/invokeai/frontend/web/src/app/store/nanostores/authToken.ts deleted file mode 100644 index 1b1e2137309..00000000000 --- a/invokeai/frontend/web/src/app/store/nanostores/authToken.ts +++ /dev/null @@ -1,11 +0,0 @@ -import { atom, computed } from 'nanostores'; - -/** - * The user's auth token. - */ -export const $authToken = atom(); - -/** - * The crossOrigin value to use for all image loading. Depends on whether the user is authenticated. - */ -export const $crossOrigin = computed($authToken, (token) => (token ? 'use-credentials' : 'anonymous')); diff --git a/invokeai/frontend/web/src/app/store/nanostores/baseUrl.ts b/invokeai/frontend/web/src/app/store/nanostores/baseUrl.ts deleted file mode 100644 index 19bebab0ef8..00000000000 --- a/invokeai/frontend/web/src/app/store/nanostores/baseUrl.ts +++ /dev/null @@ -1,6 +0,0 @@ -import { atom } from 'nanostores'; - -/** - * The OpenAPI base url. - */ -export const $baseUrl = atom(); diff --git a/invokeai/frontend/web/src/app/store/nanostores/customNavComponent.ts b/invokeai/frontend/web/src/app/store/nanostores/customNavComponent.ts deleted file mode 100644 index 1a6a5571a03..00000000000 --- a/invokeai/frontend/web/src/app/store/nanostores/customNavComponent.ts +++ /dev/null @@ -1,4 +0,0 @@ -import { atom } from 'nanostores'; -import type { ReactNode } from 'react'; - -export const $customNavComponent = atom(undefined); diff --git a/invokeai/frontend/web/src/app/store/nanostores/customStarUI.ts b/invokeai/frontend/web/src/app/store/nanostores/customStarUI.ts deleted file mode 100644 index 9f6628ac9cd..00000000000 --- a/invokeai/frontend/web/src/app/store/nanostores/customStarUI.ts +++ /dev/null @@ -1,14 +0,0 @@ -import type { MenuItemProps } from '@invoke-ai/ui-library'; -import { atom } from 'nanostores'; - -export type CustomStarUi = { - on: { - icon: MenuItemProps['icon']; - text: string; - }; - off: { - icon: MenuItemProps['icon']; - text: string; - }; -}; -export const $customStarUI = atom(undefined); diff --git a/invokeai/frontend/web/src/app/store/nanostores/isDebugging.ts b/invokeai/frontend/web/src/app/store/nanostores/isDebugging.ts deleted file mode 100644 index b71cab53088..00000000000 --- a/invokeai/frontend/web/src/app/store/nanostores/isDebugging.ts +++ /dev/null @@ -1,3 +0,0 @@ -import { atom } from 'nanostores'; - -export const $isDebugging = atom(false); diff --git a/invokeai/frontend/web/src/app/store/nanostores/logo.ts b/invokeai/frontend/web/src/app/store/nanostores/logo.ts deleted file mode 100644 index 5fd94ebd901..00000000000 --- a/invokeai/frontend/web/src/app/store/nanostores/logo.ts +++ /dev/null @@ -1,4 +0,0 @@ -import { atom } from 'nanostores'; -import type { ReactNode } from 'react'; - -export const $logo = atom(undefined); diff --git a/invokeai/frontend/web/src/app/store/nanostores/onClickGoToModelManager.ts b/invokeai/frontend/web/src/app/store/nanostores/onClickGoToModelManager.ts deleted file mode 100644 index fdc0d8a788b..00000000000 --- a/invokeai/frontend/web/src/app/store/nanostores/onClickGoToModelManager.ts +++ /dev/null @@ -1,3 +0,0 @@ -import { atom } from 'nanostores'; - -export const $onClickGoToModelManager = atom<(() => void) | undefined>(undefined); diff --git a/invokeai/frontend/web/src/app/store/nanostores/openAPISchemaUrl.ts b/invokeai/frontend/web/src/app/store/nanostores/openAPISchemaUrl.ts deleted file mode 100644 index 124815f7ead..00000000000 --- a/invokeai/frontend/web/src/app/store/nanostores/openAPISchemaUrl.ts +++ /dev/null @@ -1,3 +0,0 @@ -import { atom } from 'nanostores'; - -export const $openAPISchemaUrl = atom(undefined); diff --git a/invokeai/frontend/web/src/app/store/nanostores/projectId.ts b/invokeai/frontend/web/src/app/store/nanostores/projectId.ts deleted file mode 100644 index c2b14e91acb..00000000000 --- a/invokeai/frontend/web/src/app/store/nanostores/projectId.ts +++ /dev/null @@ -1,9 +0,0 @@ -import { atom } from 'nanostores'; - -/** - * The optional project-id header. - */ -export const $projectId = atom(); - -export const $projectName = atom(); -export const $projectUrl = atom(); diff --git a/invokeai/frontend/web/src/app/store/nanostores/queueId.ts b/invokeai/frontend/web/src/app/store/nanostores/queueId.ts deleted file mode 100644 index 462cf69d0a6..00000000000 --- a/invokeai/frontend/web/src/app/store/nanostores/queueId.ts +++ /dev/null @@ -1,5 +0,0 @@ -import { atom } from 'nanostores'; - -export const DEFAULT_QUEUE_ID = 'default'; - -export const $queueId = atom(DEFAULT_QUEUE_ID); diff --git a/invokeai/frontend/web/src/app/store/nanostores/toastMap.ts b/invokeai/frontend/web/src/app/store/nanostores/toastMap.ts deleted file mode 100644 index 10f7795a8a0..00000000000 --- a/invokeai/frontend/web/src/app/store/nanostores/toastMap.ts +++ /dev/null @@ -1,4 +0,0 @@ -import type { ToastConfig } from 'features/toast/toast'; -import { atom } from 'nanostores'; - -export const $toastMap = atom | undefined>(undefined); diff --git a/invokeai/frontend/web/src/app/store/nanostores/videoUpsellComponent.ts b/invokeai/frontend/web/src/app/store/nanostores/videoUpsellComponent.ts deleted file mode 100644 index f36512d7441..00000000000 --- a/invokeai/frontend/web/src/app/store/nanostores/videoUpsellComponent.ts +++ /dev/null @@ -1,4 +0,0 @@ -import { atom } from 'nanostores'; -import type { ReactNode } from 'react'; - -export const $videoUpsellComponent = atom(undefined); diff --git a/invokeai/frontend/web/src/app/store/nanostores/whatsNew.ts b/invokeai/frontend/web/src/app/store/nanostores/whatsNew.ts deleted file mode 100644 index 5e8361412e2..00000000000 --- a/invokeai/frontend/web/src/app/store/nanostores/whatsNew.ts +++ /dev/null @@ -1,4 +0,0 @@ -import { atom } from 'nanostores'; -import type { ReactNode } from 'react'; - -export const $whatsNew = atom(undefined); diff --git a/invokeai/frontend/web/src/app/store/store.ts b/invokeai/frontend/web/src/app/store/store.ts index 12fcfa5a406..90c6b47b195 100644 --- a/invokeai/frontend/web/src/app/store/store.ts +++ b/invokeai/frontend/web/src/app/store/store.ts @@ -34,20 +34,16 @@ import { nodesSliceConfig } from 'features/nodes/store/nodesSlice'; import { workflowLibrarySliceConfig } from 'features/nodes/store/workflowLibrarySlice'; import { workflowSettingsSliceConfig } from 'features/nodes/store/workflowSettingsSlice'; import { upscaleSliceConfig } from 'features/parameters/store/upscaleSlice'; -import { videoSliceConfig } from 'features/parameters/store/videoSlice'; import { queueSliceConfig } from 'features/queue/store/queueSlice'; import { stylePresetSliceConfig } from 'features/stylePresets/store/stylePresetSlice'; -import { configSliceConfig } from 'features/system/store/configSlice'; import { systemSliceConfig } from 'features/system/store/systemSlice'; import { uiSliceConfig } from 'features/ui/store/uiSlice'; import { diff } from 'jsondiffpatch'; -import dynamicMiddlewares from 'redux-dynamic-middlewares'; import type { SerializeFunction, UnserializeFunction } from 'redux-remember'; import { REMEMBER_REHYDRATED, rememberEnhancer, rememberReducer } from 'redux-remember'; import undoable, { newHistory } from 'redux-undo'; import { serializeError } from 'serialize-error'; import { api } from 'services/api'; -import { authToastMiddleware } from 'services/api/authToastMiddleware'; import type { JsonObject } from 'type-fest'; import { reduxRememberDriver } from './enhancers/reduxRemember/driver'; @@ -67,7 +63,6 @@ const SLICE_CONFIGS = { [canvasSettingsSliceConfig.slice.reducerPath]: canvasSettingsSliceConfig, [canvasSliceConfig.slice.reducerPath]: canvasSliceConfig, [changeBoardModalSliceConfig.slice.reducerPath]: changeBoardModalSliceConfig, - [configSliceConfig.slice.reducerPath]: configSliceConfig, [dynamicPromptsSliceConfig.slice.reducerPath]: dynamicPromptsSliceConfig, [gallerySliceConfig.slice.reducerPath]: gallerySliceConfig, [lorasSliceConfig.slice.reducerPath]: lorasSliceConfig, @@ -80,7 +75,6 @@ const SLICE_CONFIGS = { [systemSliceConfig.slice.reducerPath]: systemSliceConfig, [uiSliceConfig.slice.reducerPath]: uiSliceConfig, [upscaleSliceConfig.slice.reducerPath]: upscaleSliceConfig, - [videoSliceConfig.slice.reducerPath]: videoSliceConfig, [workflowLibrarySliceConfig.slice.reducerPath]: workflowLibrarySliceConfig, [workflowSettingsSliceConfig.slice.reducerPath]: workflowSettingsSliceConfig, }; @@ -97,7 +91,6 @@ const ALL_REDUCERS = { canvasSliceConfig.undoableConfig?.reduxUndoOptions ), [changeBoardModalSliceConfig.slice.reducerPath]: changeBoardModalSliceConfig.slice.reducer, - [configSliceConfig.slice.reducerPath]: configSliceConfig.slice.reducer, [dynamicPromptsSliceConfig.slice.reducerPath]: dynamicPromptsSliceConfig.slice.reducer, [gallerySliceConfig.slice.reducerPath]: gallerySliceConfig.slice.reducer, [lorasSliceConfig.slice.reducerPath]: lorasSliceConfig.slice.reducer, @@ -114,7 +107,6 @@ const ALL_REDUCERS = { [systemSliceConfig.slice.reducerPath]: systemSliceConfig.slice.reducer, [uiSliceConfig.slice.reducerPath]: uiSliceConfig.slice.reducer, [upscaleSliceConfig.slice.reducerPath]: upscaleSliceConfig.slice.reducer, - [videoSliceConfig.slice.reducerPath]: videoSliceConfig.slice.reducer, [workflowLibrarySliceConfig.slice.reducerPath]: workflowLibrarySliceConfig.slice.reducer, [workflowSettingsSliceConfig.slice.reducerPath]: workflowSettingsSliceConfig.slice.reducer, }; @@ -197,8 +189,6 @@ export const createStore = (options?: { persist?: boolean; persistDebounce?: num immutableCheck: import.meta.env.MODE === 'development', }) .concat(api.middleware) - .concat(dynamicMiddlewares) - .concat(authToastMiddleware) // .concat(getDebugLoggerMiddleware({ withDiff: true, withNextState: true })) .prepend(listenerMiddleware.middleware), enhancers: (getDefaultEnhancers) => { diff --git a/invokeai/frontend/web/src/app/types/invokeai.ts b/invokeai/frontend/web/src/app/types/invokeai.ts index b24f83a1b15..664ac002e95 100644 --- a/invokeai/frontend/web/src/app/types/invokeai.ts +++ b/invokeai/frontend/web/src/app/types/invokeai.ts @@ -60,6 +60,16 @@ const zNumericalParameterConfig = z.object({ }); export type NumericalParameterConfig = z.infer; +const CONSTRAINTS = { + initial: 512, + sliderMin: 64, + sliderMax: 1536, + numberInputMin: 64, + numberInputMax: 4096, + fineStep: 8, + coarseStep: 64, +}; + /** * Configuration options for the InvokeAI UI. * Distinct from system settings which may be changed inside the app. @@ -143,7 +153,7 @@ export const getDefaultAppConfig = (): AppConfig => ({ allowPromptExpansion: false, allowVideo: false, // used to determine if video is enabled vs upsell shouldShowCredits: false, - disabledTabs: ['video'], // used to determine if video functionality is visible + disabledTabs: [], // used to determine if video functionality is visible disabledFeatures: ['lightbox', 'faceRestore', 'batches'] satisfies AppFeature[], disabledSDFeatures: ['variation', 'symmetry', 'hires', 'perlinNoise', 'noiseThreshold'] satisfies SDFeature[], sd: { diff --git a/invokeai/frontend/web/src/common/hooks/useClientSideUpload.ts b/invokeai/frontend/web/src/common/hooks/useClientSideUpload.ts deleted file mode 100644 index f5cc5a7f3f2..00000000000 --- a/invokeai/frontend/web/src/common/hooks/useClientSideUpload.ts +++ /dev/null @@ -1,121 +0,0 @@ -import { useStore } from '@nanostores/react'; -import { $authToken } from 'app/store/nanostores/authToken'; -import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; -import { imageUploadedClientSide } from 'features/gallery/store/actions'; -import { selectAutoAddBoardId } from 'features/gallery/store/gallerySelectors'; -import { useCallback } from 'react'; -import { useCreateImageUploadEntryMutation } from 'services/api/endpoints/images'; -import type { ImageDTO } from 'services/api/types'; - -type PresignedUrlResponse = { - fullUrl: string; - thumbnailUrl: string; -}; - -const isPresignedUrlResponse = (response: unknown): response is PresignedUrlResponse => { - return typeof response === 'object' && response !== null && 'fullUrl' in response && 'thumbnailUrl' in response; -}; - -export const useClientSideUpload = () => { - const dispatch = useAppDispatch(); - const autoAddBoardId = useAppSelector(selectAutoAddBoardId); - const authToken = useStore($authToken); - const [createImageUploadEntry] = useCreateImageUploadEntryMutation(); - - const clientSideUpload = useCallback( - async (file: File, i: number): Promise => { - const image = new Image(); - const objectURL = URL.createObjectURL(file); - image.src = objectURL; - let width = 0; - let height = 0; - let thumbnail: Blob | undefined; - - await new Promise((resolve) => { - image.onload = () => { - width = image.naturalWidth; - height = image.naturalHeight; - - // Calculate thumbnail dimensions maintaining aspect ratio - let thumbWidth = width; - let thumbHeight = height; - if (width > height && width > 256) { - thumbWidth = 256; - thumbHeight = Math.round((height * 256) / width); - } else if (height > 256) { - thumbHeight = 256; - thumbWidth = Math.round((width * 256) / height); - } - - const canvas = document.createElement('canvas'); - canvas.width = thumbWidth; - canvas.height = thumbHeight; - const ctx = canvas.getContext('2d'); - ctx?.drawImage(image, 0, 0, thumbWidth, thumbHeight); - - canvas.toBlob( - (blob) => { - if (blob) { - thumbnail = blob; - // Clean up resources - URL.revokeObjectURL(objectURL); - image.src = ''; // Clear image source - image.remove(); // Remove the image element - canvas.width = 0; // Clear canvas - canvas.height = 0; - resolve(); - } - }, - 'image/webp', - 0.8 - ); - }; - - // Handle load errors - image.onerror = () => { - URL.revokeObjectURL(objectURL); - image.remove(); - resolve(); - }; - }); - const { presigned_url, image_dto } = await createImageUploadEntry({ - width, - height, - board_id: autoAddBoardId === 'none' ? undefined : autoAddBoardId, - }).unwrap(); - - const response = await fetch(presigned_url, { - method: 'GET', - ...(authToken && { - headers: { - Authorization: `Bearer ${authToken}`, - }, - }), - }).then((res) => res.json()); - - if (!isPresignedUrlResponse(response)) { - throw new Error('Invalid response'); - } - - const fullUrl = response.fullUrl; - const thumbnailUrl = response.thumbnailUrl; - - await fetch(fullUrl, { - method: 'PUT', - body: file, - }); - - await fetch(thumbnailUrl, { - method: 'PUT', - body: thumbnail, - }); - - dispatch(imageUploadedClientSide({ imageDTO: image_dto, silent: false, isFirstUploadOfBatch: i === 0 })); - - return image_dto; - }, - [autoAddBoardId, authToken, createImageUploadEntry, dispatch] - ); - - return clientSideUpload; -}; diff --git a/invokeai/frontend/web/src/common/hooks/useCopyImageToClipboard.ts b/invokeai/frontend/web/src/common/hooks/useCopyImageToClipboard.ts index d90ec3f2ed1..e46227b2f5d 100644 --- a/invokeai/frontend/web/src/common/hooks/useCopyImageToClipboard.ts +++ b/invokeai/frontend/web/src/common/hooks/useCopyImageToClipboard.ts @@ -1,7 +1,5 @@ -import { useAppDispatch } from 'app/store/storeHooks'; import { useClipboard } from 'common/hooks/useClipboard'; import { convertImageUrlToBlob } from 'common/util/convertImageUrlToBlob'; -import { imageCopiedToClipboard } from 'features/gallery/store/actions'; import { toast } from 'features/toast/toast'; import { useCallback } from 'react'; import { useTranslation } from 'react-i18next'; @@ -9,7 +7,6 @@ import { useTranslation } from 'react-i18next'; export const useCopyImageToClipboard = () => { const { t } = useTranslation(); const clipboard = useClipboard(); - const dispatch = useAppDispatch(); const copyImageToClipboard = useCallback( async (image_url: string) => { @@ -26,7 +23,6 @@ export const useCopyImageToClipboard = () => { title: t('toast.imageCopied'), status: 'success', }); - dispatch(imageCopiedToClipboard()); }); } catch (err) { toast({ @@ -37,7 +33,7 @@ export const useCopyImageToClipboard = () => { }); } }, - [clipboard, t, dispatch] + [clipboard, t] ); return copyImageToClipboard; diff --git a/invokeai/frontend/web/src/common/hooks/useDownloadImage.ts b/invokeai/frontend/web/src/common/hooks/useDownloadImage.ts index 1309afdbf56..33b90e1d7fe 100644 --- a/invokeai/frontend/web/src/common/hooks/useDownloadImage.ts +++ b/invokeai/frontend/web/src/common/hooks/useDownloadImage.ts @@ -1,27 +1,14 @@ -import { useStore } from '@nanostores/react'; -import { $authToken } from 'app/store/nanostores/authToken'; -import { useAppDispatch } from 'app/store/storeHooks'; -import { imageDownloaded } from 'features/gallery/store/actions'; import { toast } from 'features/toast/toast'; import { useCallback } from 'react'; import { useTranslation } from 'react-i18next'; export const useDownloadItem = () => { const { t } = useTranslation(); - const dispatch = useAppDispatch(); - const authToken = useStore($authToken); const downloadItem = useCallback( async (item_url: string, item_id: string) => { try { - const requestOpts = authToken - ? { - headers: { - Authorization: `Bearer ${authToken}`, - }, - } - : {}; - const blob = await fetch(item_url, requestOpts).then((resp) => resp.blob()); + const blob = await fetch(item_url).then((resp) => resp.blob()); if (!blob) { throw new Error('Unable to create Blob'); } @@ -34,7 +21,6 @@ export const useDownloadItem = () => { document.body.appendChild(a); a.click(); window.URL.revokeObjectURL(url); - dispatch(imageDownloaded()); } catch (err) { toast({ id: 'PROBLEM_DOWNLOADING_IMAGE', @@ -44,7 +30,7 @@ export const useDownloadItem = () => { }); } }, - [t, dispatch, authToken] + [t] ); return { downloadItem }; diff --git a/invokeai/frontend/web/src/common/hooks/useGlobalHotkeys.ts b/invokeai/frontend/web/src/common/hooks/useGlobalHotkeys.ts index a3ccdc01f2a..69b0b2b0fc2 100644 --- a/invokeai/frontend/web/src/common/hooks/useGlobalHotkeys.ts +++ b/invokeai/frontend/web/src/common/hooks/useGlobalHotkeys.ts @@ -1,20 +1,16 @@ import { useAppStore } from 'app/store/storeHooks'; import { useDeleteImageModalApi } from 'features/deleteImageModal/store/state'; -import { useDeleteVideoModalApi } from 'features/deleteVideoModal/store/state'; import { selectSelection } from 'features/gallery/store/gallerySelectors'; import { useClearQueue } from 'features/queue/hooks/useClearQueue'; import { useDeleteCurrentQueueItem } from 'features/queue/hooks/useDeleteCurrentQueueItem'; import { useInvoke } from 'features/queue/hooks/useInvoke'; import { useRegisteredHotkeys } from 'features/system/components/HotkeysModal/useHotkeyData'; -import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus'; import { navigationApi } from 'features/ui/layouts/navigation-api'; import { getFocusedRegion } from './focus'; export const useGlobalHotkeys = () => { const { dispatch, getState } = useAppStore(); - const isVideoEnabled = useFeatureStatus('video'); - const isModelManagerEnabled = useFeatureStatus('modelManager'); const queue = useInvoke(); useRegisteredHotkeys({ @@ -94,18 +90,6 @@ export const useGlobalHotkeys = () => { dependencies: [dispatch], }); - useRegisteredHotkeys({ - id: 'selectVideoTab', - category: 'app', - callback: () => { - navigationApi.switchToTab('video'); - }, - options: { - enabled: isVideoEnabled, - }, - dependencies: [dispatch], - }); - useRegisteredHotkeys({ id: 'selectWorkflowsTab', category: 'app', @@ -121,10 +105,7 @@ export const useGlobalHotkeys = () => { callback: () => { navigationApi.switchToTab('models'); }, - options: { - enabled: isModelManagerEnabled, - }, - dependencies: [dispatch, isModelManagerEnabled], + dependencies: [dispatch], }); useRegisteredHotkeys({ @@ -133,11 +114,10 @@ export const useGlobalHotkeys = () => { callback: () => { navigationApi.switchToTab('queue'); }, - dependencies: [dispatch, isModelManagerEnabled], + dependencies: [dispatch], }); const deleteImageModalApi = useDeleteImageModalApi(); - const deleteVideoModalApi = useDeleteVideoModalApi(); useRegisteredHotkeys({ id: 'deleteSelection', @@ -153,8 +133,6 @@ export const useGlobalHotkeys = () => { } if (selection.every(({ type }) => type === 'image')) { deleteImageModalApi.delete(selection.map((s) => s.id)); - } else if (selection.every(({ type }) => type === 'video')) { - deleteVideoModalApi.delete(selection.map((s) => s.id)); } else { // no-op, we expect selections to always be only images or only video } diff --git a/invokeai/frontend/web/src/common/hooks/useImageUploadButton.tsx b/invokeai/frontend/web/src/common/hooks/useImageUploadButton.tsx index 9a06dab6398..445f58c9a66 100644 --- a/invokeai/frontend/web/src/common/hooks/useImageUploadButton.tsx +++ b/invokeai/frontend/web/src/common/hooks/useImageUploadButton.tsx @@ -3,7 +3,6 @@ import { Button, IconButton } from '@invoke-ai/ui-library'; import { logger } from 'app/logging/logger'; import { useAppSelector } from 'app/store/storeHooks'; import { selectAutoAddBoardId } from 'features/gallery/store/gallerySelectors'; -import { selectIsClientSideUploadEnabled } from 'features/system/store/configSlice'; import { toast } from 'features/toast/toast'; import { memo, useCallback } from 'react'; import type { Accept, FileRejection } from 'react-dropzone'; @@ -27,7 +26,6 @@ export const dropzoneAccept: Accept = { 'image/webp': ['.webp'].reduce(addUpperCaseReducer, [] as string[]), }; -import { useClientSideUpload } from './useClientSideUpload'; type UseImageUploadButtonArgs = | { isDisabled?: boolean; @@ -73,9 +71,7 @@ export const useImageUploadButton = ({ onError, }: UseImageUploadButtonArgs) => { const autoAddBoardId = useAppSelector(selectAutoAddBoardId); - const isClientSideUploadEnabled = useAppSelector(selectIsClientSideUploadEnabled); const [uploadImage, request] = useUploadImageMutation(); - const clientSideUpload = useClientSideUpload(); const { t } = useTranslation(); const onDropAccepted = useCallback( @@ -108,20 +104,16 @@ export const useImageUploadButton = ({ onUploadStarted?.(files); let imageDTOs: ImageDTO[] = []; - if (isClientSideUploadEnabled && files.length > 1) { - imageDTOs = await Promise.all(files.map((file, i) => clientSideUpload(file, i))); - } else { - imageDTOs = await uploadImages( - files.map((file, i) => ({ - file, - image_category: 'user', - is_intermediate: false, - board_id: autoAddBoardId === 'none' ? undefined : autoAddBoardId, - silent: false, - isFirstUploadOfBatch: i === 0, - })) - ); - } + imageDTOs = await uploadImages( + files.map((file, i) => ({ + file, + image_category: 'user', + is_intermediate: false, + board_id: autoAddBoardId === 'none' ? undefined : autoAddBoardId, + silent: false, + isFirstUploadOfBatch: i === 0, + })) + ); if (onUpload) { onUpload(imageDTOs); } @@ -135,17 +127,7 @@ export const useImageUploadButton = ({ }); } }, - [ - allowMultiple, - onUploadStarted, - uploadImage, - autoAddBoardId, - onUpload, - isClientSideUploadEnabled, - clientSideUpload, - onError, - t, - ] + [allowMultiple, onUploadStarted, uploadImage, autoAddBoardId, onUpload, onError, t] ); const onDropRejected = useCallback( diff --git a/invokeai/frontend/web/src/common/util/convertImageUrlToBlob.ts b/invokeai/frontend/web/src/common/util/convertImageUrlToBlob.ts index 5d1ff434bc9..69816bd0284 100644 --- a/invokeai/frontend/web/src/common/util/convertImageUrlToBlob.ts +++ b/invokeai/frontend/web/src/common/util/convertImageUrlToBlob.ts @@ -1,5 +1,3 @@ -import { $authToken } from 'app/store/nanostores/authToken'; - /** * Converts an image URL to a Blob by creating an element, drawing it to canvas * and then converting the canvas to a Blob. @@ -40,6 +38,6 @@ export const convertImageUrlToBlob = (url: string) => reject(new Error('Image failed to load. The URL may be invalid or the object may not exist.')); }; - img.crossOrigin = $authToken.get() ? 'use-credentials' : 'anonymous'; + img.crossOrigin = 'anonymous'; img.src = url; }); diff --git a/invokeai/frontend/web/src/features/changeBoardModal/components/ChangeBoardModal.tsx b/invokeai/frontend/web/src/features/changeBoardModal/components/ChangeBoardModal.tsx index 3413a38e520..e8885097d17 100644 --- a/invokeai/frontend/web/src/features/changeBoardModal/components/ChangeBoardModal.tsx +++ b/invokeai/frontend/web/src/features/changeBoardModal/components/ChangeBoardModal.tsx @@ -13,7 +13,6 @@ import { memo, useCallback, useMemo, useState } from 'react'; import { useTranslation } from 'react-i18next'; import { useListAllBoardsQuery } from 'services/api/endpoints/boards'; import { useAddImagesToBoardMutation, useRemoveImagesFromBoardMutation } from 'services/api/endpoints/images'; -import { useAddVideosToBoardMutation, useRemoveVideosFromBoardMutation } from 'services/api/endpoints/videos'; const selectImagesToChange = createSelector( selectChangeBoardModalSlice, @@ -41,8 +40,6 @@ const ChangeBoardModal = () => { const videosToChange = useAppSelector(selectVideosToChange); const [addImagesToBoard] = useAddImagesToBoardMutation(); const [removeImagesFromBoard] = useRemoveImagesFromBoardMutation(); - const [addVideosToBoard] = useAddVideosToBoardMutation(); - const [removeVideosFromBoard] = useRemoveVideosFromBoardMutation(); const { t } = useTranslation(); const options = useMemo(() => { @@ -80,27 +77,8 @@ const ChangeBoardModal = () => { }); } } - if (videosToChange.length) { - if (selectedBoardId === 'none') { - removeVideosFromBoard({ video_ids: videosToChange }); - } else { - addVideosToBoard({ - video_ids: videosToChange, - board_id: selectedBoardId, - }); - } - } dispatch(changeBoardReset()); - }, [ - addImagesToBoard, - dispatch, - imagesToChange, - videosToChange, - removeImagesFromBoard, - selectedBoardId, - addVideosToBoard, - removeVideosFromBoard, - ]); + }, [addImagesToBoard, dispatch, imagesToChange, videosToChange, removeImagesFromBoard, selectedBoardId]); const onChange = useCallback((v) => { if (!v) { diff --git a/invokeai/frontend/web/src/features/controlLayers/components/CanvasAlerts/CanvasAlertsInvocationProgress.tsx b/invokeai/frontend/web/src/features/controlLayers/components/CanvasAlerts/CanvasAlertsInvocationProgress.tsx index 4e9dd5ec512..eb2a043864b 100644 --- a/invokeai/frontend/web/src/features/controlLayers/components/CanvasAlerts/CanvasAlertsInvocationProgress.tsx +++ b/invokeai/frontend/web/src/features/controlLayers/components/CanvasAlerts/CanvasAlertsInvocationProgress.tsx @@ -2,7 +2,6 @@ import { Alert, AlertDescription, AlertIcon, AlertTitle } from '@invoke-ai/ui-li import { useStore } from '@nanostores/react'; import { useAppSelector } from 'app/store/storeHooks'; import { useDeferredModelLoadingInvocationProgressMessage } from 'features/controlLayers/hooks/useDeferredModelLoadingInvocationProgressMessage'; -import { selectIsLocal } from 'features/system/store/configSlice'; import { selectSystemShouldShowInvocationProgressDetail } from 'features/system/store/systemSlice'; import { memo } from 'react'; import { useTranslation } from 'react-i18next'; @@ -44,13 +43,8 @@ CanvasAlertsInvocationProgressContentCommercial.displayName = 'CanvasAlertsInvoc export const CanvasAlertsInvocationProgress = memo(() => { const shouldShowInvocationProgressDetail = useAppSelector(selectSystemShouldShowInvocationProgressDetail); - const isLocal = useAppSelector(selectIsLocal); - if (!isLocal) { - return ; - } - - // OSS user setting + // user setting if (!shouldShowInvocationProgressDetail) { return null; } diff --git a/invokeai/frontend/web/src/features/controlLayers/components/Filters/FilterTypeSelect.tsx b/invokeai/frontend/web/src/features/controlLayers/components/Filters/FilterTypeSelect.tsx index a21c303d13e..2b3ff537026 100644 --- a/invokeai/frontend/web/src/features/controlLayers/components/Filters/FilterTypeSelect.tsx +++ b/invokeai/frontend/web/src/features/controlLayers/components/Filters/FilterTypeSelect.tsx @@ -1,18 +1,13 @@ import type { ComboboxOnChange } from '@invoke-ai/ui-library'; import { Combobox, Flex, FormControl, FormLabel } from '@invoke-ai/ui-library'; -import { createSelector } from '@reduxjs/toolkit'; -import { useAppSelector } from 'app/store/storeHooks'; import { InformationalPopover } from 'common/components/InformationalPopover/InformationalPopover'; -import { includes, map } from 'es-toolkit/compat'; +import { map } from 'es-toolkit/compat'; import type { FilterConfig } from 'features/controlLayers/store/filters'; import { IMAGE_FILTERS, isFilterType } from 'features/controlLayers/store/filters'; -import { selectConfigSlice } from 'features/system/store/configSlice'; import { memo, useCallback, useMemo } from 'react'; import { useTranslation } from 'react-i18next'; import { assert } from 'tsafe'; -const selectDisabledProcessors = createSelector(selectConfigSlice, (config) => config.sd.disabledControlNetProcessors); - type Props = { filterType: FilterConfig['type']; onChange: (filterType: FilterConfig['type']) => void; @@ -20,12 +15,9 @@ type Props = { export const FilterTypeSelect = memo(({ filterType, onChange }: Props) => { const { t } = useTranslation(); - const disabledProcessors = useAppSelector(selectDisabledProcessors); const options = useMemo(() => { - return map(IMAGE_FILTERS, (data, type) => ({ value: type, label: t(`controlLayers.filter.${type}.label`) })).filter( - (o) => !includes(disabledProcessors, o.value) - ); - }, [disabledProcessors, t]); + return map(IMAGE_FILTERS, (data, type) => ({ value: type, label: t(`controlLayers.filter.${type}.label`) })); + }, [t]); const _onChange = useCallback( (v) => { diff --git a/invokeai/frontend/web/src/features/controlLayers/components/ParamDenoisingStrength.tsx b/invokeai/frontend/web/src/features/controlLayers/components/ParamDenoisingStrength.tsx index 49a289b875c..34fb96f063d 100644 --- a/invokeai/frontend/web/src/features/controlLayers/components/ParamDenoisingStrength.tsx +++ b/invokeai/frontend/web/src/features/controlLayers/components/ParamDenoisingStrength.tsx @@ -13,12 +13,21 @@ import { InformationalPopover } from 'common/components/InformationalPopover/Inf import WavyLine from 'common/components/WavyLine'; import { selectImg2imgStrength, setImg2imgStrength } from 'features/controlLayers/store/paramsSlice'; import { selectActiveRasterLayerEntities } from 'features/controlLayers/store/selectors'; -import { selectImg2imgStrengthConfig } from 'features/system/store/configSlice'; import { memo, useCallback, useMemo } from 'react'; import { useTranslation } from 'react-i18next'; import { useSelectedModelConfig } from 'services/api/hooks/useSelectedModelConfig'; import { isFluxFillMainModelModelConfig } from 'services/api/types'; +const CONSTRAINTS = { + initial: 0.7, + sliderMin: 0, + sliderMax: 1, + numberInputMin: 0, + numberInputMax: 1, + fineStep: 0.01, + coarseStep: 0.05, +}; + const selectHasRasterLayersWithContent = createSelector( selectActiveRasterLayerEntities, (entities) => entities.length > 0 @@ -37,7 +46,6 @@ export const ParamDenoisingStrength = memo(() => { [dispatch] ); - const config = useAppSelector(selectImg2imgStrengthConfig); const { t } = useTranslation(); const [invokeBlue300] = useToken('colors', ['invokeBlue.300']); @@ -67,20 +75,20 @@ export const ParamDenoisingStrength = memo(() => { {!isDisabled ? ( <> void; @@ -14,11 +21,8 @@ type Props = { const formatValue = (v: number) => v.toFixed(2); const marks = [0, 1, 2]; -const selectWeightConfig = createSelector(selectConfigSlice, (config) => config.sd.ca.weight); - export const Weight = memo(({ weight, onChange }: Props) => { const { t } = useTranslation(); - const config = useAppSelector(selectWeightConfig); return ( @@ -28,23 +32,23 @@ export const Weight = memo(({ weight, onChange }: Props) => { ); diff --git a/invokeai/frontend/web/src/features/controlLayers/hooks/addLayerHooks.ts b/invokeai/frontend/web/src/features/controlLayers/hooks/addLayerHooks.ts index 062937edcd0..8f0a1a11a6f 100644 --- a/invokeai/frontend/web/src/features/controlLayers/hooks/addLayerHooks.ts +++ b/invokeai/frontend/web/src/features/controlLayers/hooks/addLayerHooks.ts @@ -34,10 +34,8 @@ import type { T2IAdapterConfig, } from 'features/controlLayers/store/types'; import { - initialChatGPT4oReferenceImage, initialControlNet, initialFluxKontextReferenceImage, - initialGemini2_5ReferenceImage, initialIPAdapter, initialRegionalGuidanceIPAdapter, initialT2IAdapter, @@ -92,25 +90,12 @@ export const getDefaultRefImageConfig = ( const base = mainModelConfig?.base; - // For ChatGPT-4o, the ref image model is the model itself. - if (base === 'chatgpt-4o') { - const config = deepClone(initialChatGPT4oReferenceImage); - config.model = zModelIdentifierField.parse(mainModelConfig); - return config; - } - - if (base === 'flux-kontext' || (base === 'flux' && mainModelConfig?.name?.toLowerCase().includes('kontext'))) { + if (base === 'flux' && mainModelConfig?.name?.toLowerCase().includes('kontext')) { const config = deepClone(initialFluxKontextReferenceImage); config.model = zModelIdentifierField.parse(mainModelConfig); return config; } - if (base === 'gemini-2.5') { - const config = deepClone(initialGemini2_5ReferenceImage); - config.model = zModelIdentifierField.parse(mainModelConfig); - return config; - } - // Otherwise, find the first compatible IP Adapter model. const modelConfig = ipAdapterModelConfigs.find((m) => m.base === base); diff --git a/invokeai/frontend/web/src/features/controlLayers/hooks/useIsEntityTypeEnabled.ts b/invokeai/frontend/web/src/features/controlLayers/hooks/useIsEntityTypeEnabled.ts index d31f18ad6c2..b852d119149 100644 --- a/invokeai/frontend/web/src/features/controlLayers/hooks/useIsEntityTypeEnabled.ts +++ b/invokeai/frontend/web/src/features/controlLayers/hooks/useIsEntityTypeEnabled.ts @@ -1,13 +1,5 @@ import { useAppSelector } from 'app/store/storeHooks'; -import { - selectIsChatGPT4o, - selectIsCogView4, - selectIsFluxKontext, - selectIsGemini2_5, - selectIsImagen3, - selectIsImagen4, - selectIsSD3, -} from 'features/controlLayers/store/paramsSlice'; +import { selectIsCogView4, selectIsFluxKontext, selectIsSD3 } from 'features/controlLayers/store/paramsSlice'; import type { CanvasEntityType } from 'features/controlLayers/store/types'; import { useMemo } from 'react'; import type { Equals } from 'tsafe'; @@ -16,26 +8,24 @@ import { assert } from 'tsafe'; export const useIsEntityTypeEnabled = (entityType: CanvasEntityType) => { const isSD3 = useAppSelector(selectIsSD3); const isCogView4 = useAppSelector(selectIsCogView4); - const isImagen3 = useAppSelector(selectIsImagen3); - const isImagen4 = useAppSelector(selectIsImagen4); const isFluxKontext = useAppSelector(selectIsFluxKontext); - const isChatGPT4o = useAppSelector(selectIsChatGPT4o); - const isGemini2_5 = useAppSelector(selectIsGemini2_5); + // TODO(psyche): consider using a constant to define which entity types are supported by which model, + // see invokeai/frontend/web/src/features/modelManagerV2/models.ts for ref const isEntityTypeEnabled = useMemo(() => { switch (entityType) { case 'regional_guidance': - return !isSD3 && !isCogView4 && !isImagen3 && !isImagen4 && !isFluxKontext && !isChatGPT4o && !isGemini2_5; + return !isSD3 && !isCogView4 && !isFluxKontext; case 'control_layer': - return !isSD3 && !isCogView4 && !isImagen3 && !isImagen4 && !isFluxKontext && !isChatGPT4o && !isGemini2_5; + return !isSD3 && !isCogView4 && !isFluxKontext; case 'inpaint_mask': - return !isImagen3 && !isImagen4 && !isFluxKontext && !isChatGPT4o && !isGemini2_5; + return !isFluxKontext; case 'raster_layer': - return !isImagen3 && !isImagen4 && !isFluxKontext && !isChatGPT4o && !isGemini2_5; + return !isFluxKontext; default: assert>(false); } - }, [entityType, isSD3, isCogView4, isImagen3, isImagen4, isFluxKontext, isChatGPT4o, isGemini2_5]); + }, [entityType, isSD3, isCogView4, isFluxKontext]); return isEntityTypeEnabled; }; diff --git a/invokeai/frontend/web/src/features/controlLayers/konva/CanvasBackgroundModule.ts b/invokeai/frontend/web/src/features/controlLayers/konva/CanvasBackgroundModule.ts index f57c7038e14..b700392c05f 100644 --- a/invokeai/frontend/web/src/features/controlLayers/konva/CanvasBackgroundModule.ts +++ b/invokeai/frontend/web/src/features/controlLayers/konva/CanvasBackgroundModule.ts @@ -1,5 +1,4 @@ import { getArbitraryBaseColor } from '@invoke-ai/ui-library'; -import { $authToken } from 'app/store/nanostores/authToken'; import type { CanvasManager } from 'features/controlLayers/konva/CanvasManager'; import { CanvasModuleBase } from 'features/controlLayers/konva/CanvasModuleBase'; import { TRANSPARENCY_CHECKERBOARD_PATTERN_DARK_DATAURL } from 'features/controlLayers/konva/patterns/transparency-checkerboard-pattern'; @@ -95,7 +94,7 @@ export class CanvasBackgroundModule extends CanvasModuleBase { this.konva.patternRect.fillPatternImage(this.checkboardPattern); this.render(); }; - this.checkboardPattern.src = $authToken.get() ? 'use-credentials' : 'anonymous'; + this.checkboardPattern.src = 'anonymous'; this.checkboardPattern.src = this.config.CHECKERBOARD_PATTERN_DATAURL; this.render(); }; diff --git a/invokeai/frontend/web/src/features/controlLayers/konva/CanvasEntity/CanvasEntityObjectRenderer.ts b/invokeai/frontend/web/src/features/controlLayers/konva/CanvasEntity/CanvasEntityObjectRenderer.ts index f2d7140d68a..8bb8ec25319 100644 --- a/invokeai/frontend/web/src/features/controlLayers/konva/CanvasEntity/CanvasEntityObjectRenderer.ts +++ b/invokeai/frontend/web/src/features/controlLayers/konva/CanvasEntity/CanvasEntityObjectRenderer.ts @@ -1,4 +1,3 @@ -import { $authToken } from 'app/store/nanostores/authToken'; import { rgbColorToString } from 'common/util/colorCodeTransformers'; import { SyncableMap } from 'common/util/SyncableMap/SyncableMap'; import { throttle } from 'es-toolkit/compat'; @@ -38,7 +37,7 @@ function setFillPatternImage(shape: Konva.Shape, ...args: Parameters { shape.fillPatternImage(imageElement); }; - imageElement.crossOrigin = $authToken.get() ? 'use-credentials' : 'anonymous'; + imageElement.crossOrigin = 'anonymous'; imageElement.src = getPatternSVG(...args); return imageElement; } diff --git a/invokeai/frontend/web/src/features/controlLayers/konva/CanvasObject/CanvasObjectImage.ts b/invokeai/frontend/web/src/features/controlLayers/konva/CanvasObject/CanvasObjectImage.ts index c684739ede9..0aab41ee7e3 100644 --- a/invokeai/frontend/web/src/features/controlLayers/konva/CanvasObject/CanvasObjectImage.ts +++ b/invokeai/frontend/web/src/features/controlLayers/konva/CanvasObject/CanvasObjectImage.ts @@ -126,7 +126,7 @@ export class CanvasObjectImage extends CanvasModuleBase { return; } - const imageElementResult = await withResultAsync(() => loadImage(imageDTO.image_url, true)); + const imageElementResult = await withResultAsync(() => loadImage(imageDTO.image_url)); if (imageElementResult.isErr()) { // Image loading failed (e.g. the URL to the "physical" image is invalid) this.onFailedToLoadImage( @@ -152,7 +152,7 @@ export class CanvasObjectImage extends CanvasModuleBase { this.konva.placeholder.text.text(t('common.loadingImage', 'Loading Image')); } - const imageElementResult = await withResultAsync(() => loadImage(dataURL, false)); + const imageElementResult = await withResultAsync(() => loadImage(dataURL)); if (imageElementResult.isErr()) { // Image loading failed (e.g. the URL to the "physical" image is invalid) this.onFailedToLoadImage( diff --git a/invokeai/frontend/web/src/features/controlLayers/konva/CanvasTool/CanvasBboxToolModule.ts b/invokeai/frontend/web/src/features/controlLayers/konva/CanvasTool/CanvasBboxToolModule.ts index 088e7f265a4..ecf9a5d1c7c 100644 --- a/invokeai/frontend/web/src/features/controlLayers/konva/CanvasTool/CanvasBboxToolModule.ts +++ b/invokeai/frontend/web/src/features/controlLayers/konva/CanvasTool/CanvasBboxToolModule.ts @@ -13,8 +13,6 @@ import { selectBboxOverlay } from 'features/controlLayers/store/canvasSettingsSl import { selectModel } from 'features/controlLayers/store/paramsSlice'; import { selectBbox } from 'features/controlLayers/store/selectors'; import type { Coordinate, Rect, Tool } from 'features/controlLayers/store/types'; -import { API_BASE_MODELS } from 'features/modelManagerV2/models'; -import type { ModelIdentifierField } from 'features/nodes/types/common'; import Konva from 'konva'; import { atom } from 'nanostores'; import type { Logger } from 'roarr'; @@ -238,22 +236,16 @@ export class CanvasBboxToolModule extends CanvasModuleBase { this.syncOverlay(); - const model = this.manager.stateApi.runSelector(selectModel); - this.konva.transformer.setAttrs({ listening: tool === 'bbox', - enabledAnchors: this.getEnabledAnchors(tool, model), + enabledAnchors: this.getEnabledAnchors(tool), }); }; - getEnabledAnchors = (tool: Tool, model?: ModelIdentifierField | null): string[] => { + getEnabledAnchors = (tool: Tool): string[] => { if (tool !== 'bbox') { return NO_ANCHORS; } - if (model?.base && API_BASE_MODELS.includes(model.base)) { - // The bbox is not resizable in these modes - return NO_ANCHORS; - } return ALL_ANCHORS; }; diff --git a/invokeai/frontend/web/src/features/controlLayers/konva/util.ts b/invokeai/frontend/web/src/features/controlLayers/konva/util.ts index 8e34f2169c5..6189b3eef78 100644 --- a/invokeai/frontend/web/src/features/controlLayers/konva/util.ts +++ b/invokeai/frontend/web/src/features/controlLayers/konva/util.ts @@ -1,5 +1,4 @@ import type { Selector, Store } from '@reduxjs/toolkit'; -import { $authToken, $crossOrigin } from 'app/store/nanostores/authToken'; import { roundDownToMultiple, roundUpToMultiple } from 'common/util/roundDownToMultiple'; import { clamp } from 'es-toolkit/compat'; import type { @@ -364,7 +363,7 @@ export const dataURLToImageData = (dataURL: string, width: number, height: numbe reject(e); }; - image.crossOrigin = $authToken.get() ? 'use-credentials' : 'anonymous'; + image.crossOrigin = 'anonymous'; image.src = dataURL; }); }; @@ -478,23 +477,14 @@ export function getImageDataTransparency(imageData: ImageData): Transparency { /** * Loads an image from a URL and returns a promise that resolves with the loaded image element. * @param src The image source URL - * @param fetchUrlFirst Whether to fetch the image's URL first, assuming the provided `src` will redirect to a different URL. This addresses an issue where CORS headers are dropped during a redirect. * @returns A promise that resolves with the loaded image element */ -export async function loadImage(src: string, fetchUrlFirst?: boolean): Promise { - const authToken = $authToken.get(); - let url = src; - if (authToken && fetchUrlFirst) { - const response = await fetch(`${src}?url_only=true`, { credentials: 'include' }); - const data = await response.json(); - url = data.url; - } - +export function loadImage(url: string): Promise { return new Promise((resolve, reject) => { const imageElement = new Image(); imageElement.onload = () => resolve(imageElement); imageElement.onerror = (error) => reject(error); - imageElement.crossOrigin = $crossOrigin.get(); + imageElement.crossOrigin = 'anonymous'; imageElement.src = url; }); } diff --git a/invokeai/frontend/web/src/features/controlLayers/store/canvasSlice.ts b/invokeai/frontend/web/src/features/controlLayers/store/canvasSlice.ts index b8664aeb5ed..f7eef4a6454 100644 --- a/invokeai/frontend/web/src/features/controlLayers/store/canvasSlice.ts +++ b/invokeai/frontend/web/src/features/controlLayers/store/canvasSlice.ts @@ -35,7 +35,6 @@ import { getScaledBoundingBoxDimensions, } from 'features/controlLayers/util/getScaledBoundingBoxDimensions'; import { simplifyFlatNumbersArray } from 'features/controlLayers/util/simplify'; -import { API_BASE_MODELS } from 'features/modelManagerV2/models'; import { isMainModelBase, zModelIdentifierField } from 'features/nodes/types/common'; import { getGridSize, getIsSizeOptimal, getOptimalDimension } from 'features/parameters/util/optimalDimension'; import type { IRect } from 'konva/lib/types'; @@ -73,17 +72,9 @@ import type { } from './types'; import { ASPECT_RATIO_MAP, - CHATGPT_ASPECT_RATIOS, DEFAULT_ASPECT_RATIO_CONFIG, - FLUX_KONTEXT_ASPECT_RATIOS, - GEMINI_2_5_ASPECT_RATIOS, getEntityIdentifier, getInitialCanvasState, - IMAGEN_ASPECT_RATIOS, - isChatGPT4oAspectRatioID, - isFluxKontextAspectRatioID, - isGemini2_5AspectRatioID, - isImagenAspectRatioID, isRegionalGuidanceFLUXReduxConfig, isRegionalGuidanceIPAdapterConfig, zCanvasState, @@ -1227,33 +1218,6 @@ const slice = createSlice({ state.bbox.aspectRatio.id = id; if (id === 'Free') { state.bbox.aspectRatio.isLocked = false; - } else if ( - (state.bbox.modelBase === 'imagen3' || state.bbox.modelBase === 'imagen4') && - isImagenAspectRatioID(id) - ) { - const { width, height } = IMAGEN_ASPECT_RATIOS[id]; - state.bbox.rect.width = width; - state.bbox.rect.height = height; - state.bbox.aspectRatio.value = state.bbox.rect.width / state.bbox.rect.height; - state.bbox.aspectRatio.isLocked = true; - } else if (state.bbox.modelBase === 'chatgpt-4o' && isChatGPT4oAspectRatioID(id)) { - const { width, height } = CHATGPT_ASPECT_RATIOS[id]; - state.bbox.rect.width = width; - state.bbox.rect.height = height; - state.bbox.aspectRatio.value = state.bbox.rect.width / state.bbox.rect.height; - state.bbox.aspectRatio.isLocked = true; - } else if (state.bbox.modelBase === 'gemini-2.5' && isGemini2_5AspectRatioID(id)) { - const { width, height } = GEMINI_2_5_ASPECT_RATIOS[id]; - state.bbox.rect.width = width; - state.bbox.rect.height = height; - state.bbox.aspectRatio.value = state.bbox.rect.width / state.bbox.rect.height; - state.bbox.aspectRatio.isLocked = true; - } else if (state.bbox.modelBase === 'flux-kontext' && isFluxKontextAspectRatioID(id)) { - const { width, height } = FLUX_KONTEXT_ASPECT_RATIOS[id]; - state.bbox.rect.width = width; - state.bbox.rect.height = height; - state.bbox.aspectRatio.value = state.bbox.rect.width / state.bbox.rect.height; - state.bbox.aspectRatio.isLocked = true; } else { state.bbox.aspectRatio.isLocked = true; state.bbox.aspectRatio.value = ASPECT_RATIO_MAP[id].ratio; @@ -1700,14 +1664,6 @@ const slice = createSlice({ const base = model?.base; if (isMainModelBase(base) && state.bbox.modelBase !== base) { state.bbox.modelBase = base; - if (API_BASE_MODELS.includes(base)) { - state.bbox.aspectRatio.isLocked = true; - state.bbox.aspectRatio.value = 1; - state.bbox.aspectRatio.id = '1:1'; - state.bbox.rect.width = 1024; - state.bbox.rect.height = 1024; - } - syncScaledSize(state); } }); @@ -1832,10 +1788,6 @@ export const { } = slice.actions; const syncScaledSize = (state: CanvasState) => { - if (API_BASE_MODELS.includes(state.bbox.modelBase)) { - // Imagen3 has fixed sizes. Scaled bbox is not supported. - return; - } if (state.bbox.scaleMethod === 'auto') { // Sync both aspect ratio and size const { width, height } = state.bbox.rect; diff --git a/invokeai/frontend/web/src/features/controlLayers/store/paramsSlice.ts b/invokeai/frontend/web/src/features/controlLayers/store/paramsSlice.ts index 609478b4c0c..90eb53124a1 100644 --- a/invokeai/frontend/web/src/features/controlLayers/store/paramsSlice.ts +++ b/invokeai/frontend/web/src/features/controlLayers/store/paramsSlice.ts @@ -9,28 +9,16 @@ import { clamp } from 'es-toolkit/compat'; import type { AspectRatioID, ParamsState, RgbaColor } from 'features/controlLayers/store/types'; import { ASPECT_RATIO_MAP, - CHATGPT_ASPECT_RATIOS, DEFAULT_ASPECT_RATIO_CONFIG, - FLUX_KONTEXT_ASPECT_RATIOS, - GEMINI_2_5_ASPECT_RATIOS, getInitialParamsState, - IMAGEN_ASPECT_RATIOS, - isChatGPT4oAspectRatioID, - isFluxKontextAspectRatioID, - isGemini2_5AspectRatioID, - isImagenAspectRatioID, MAX_POSITIVE_PROMPT_HISTORY, zParamsState, } from 'features/controlLayers/store/types'; import { calculateNewSize } from 'features/controlLayers/util/getScaledBoundingBoxDimensions'; import { - API_BASE_MODELS, - SUPPORTS_ASPECT_RATIO_BASE_MODELS, SUPPORTS_NEGATIVE_PROMPT_BASE_MODELS, SUPPORTS_OPTIMIZED_DENOISING_BASE_MODELS, - SUPPORTS_PIXEL_DIMENSIONS_BASE_MODELS, SUPPORTS_REF_IMAGES_BASE_MODELS, - SUPPORTS_SEED_BASE_MODELS, } from 'features/modelManagerV2/models'; import { CLIP_SKIP_MAP } from 'features/parameters/types/constants'; import type { @@ -121,14 +109,6 @@ const slice = createSlice({ return; } - if (API_BASE_MODELS.includes(model.base)) { - state.dimensions.aspectRatio.isLocked = true; - state.dimensions.aspectRatio.value = 1; - state.dimensions.aspectRatio.id = '1:1'; - state.dimensions.width = 1024; - state.dimensions.height = 1024; - } - applyClipSkip(state, model, state.clipSkip); }, vaeSelected: (state, action: PayloadAction) => { @@ -318,30 +298,6 @@ const slice = createSlice({ state.dimensions.aspectRatio.id = id; if (id === 'Free') { state.dimensions.aspectRatio.isLocked = false; - } else if ((state.model?.base === 'imagen3' || state.model?.base === 'imagen4') && isImagenAspectRatioID(id)) { - const { width, height } = IMAGEN_ASPECT_RATIOS[id]; - state.dimensions.width = width; - state.dimensions.height = height; - state.dimensions.aspectRatio.value = state.dimensions.width / state.dimensions.height; - state.dimensions.aspectRatio.isLocked = true; - } else if (state.model?.base === 'chatgpt-4o' && isChatGPT4oAspectRatioID(id)) { - const { width, height } = CHATGPT_ASPECT_RATIOS[id]; - state.dimensions.width = width; - state.dimensions.height = height; - state.dimensions.aspectRatio.value = state.dimensions.width / state.dimensions.height; - state.dimensions.aspectRatio.isLocked = true; - } else if (state.model?.base === 'gemini-2.5' && isGemini2_5AspectRatioID(id)) { - const { width, height } = GEMINI_2_5_ASPECT_RATIOS[id]; - state.dimensions.width = width; - state.dimensions.height = height; - state.dimensions.aspectRatio.value = state.dimensions.width / state.dimensions.height; - state.dimensions.aspectRatio.isLocked = true; - } else if (state.model?.base === 'flux-kontext' && isFluxKontextAspectRatioID(id)) { - const { width, height } = FLUX_KONTEXT_ASPECT_RATIOS[id]; - state.dimensions.width = width; - state.dimensions.height = height; - state.dimensions.aspectRatio.value = state.dimensions.width / state.dimensions.height; - state.dimensions.aspectRatio.isLocked = true; } else { state.dimensions.aspectRatio.isLocked = true; state.dimensions.aspectRatio.value = ASPECT_RATIO_MAP[id].ratio; @@ -541,19 +497,12 @@ export const selectIsSDXL = createParamsSelector((params) => params.model?.base export const selectIsFLUX = createParamsSelector((params) => params.model?.base === 'flux'); export const selectIsSD3 = createParamsSelector((params) => params.model?.base === 'sd-3'); export const selectIsCogView4 = createParamsSelector((params) => params.model?.base === 'cogview4'); -export const selectIsImagen3 = createParamsSelector((params) => params.model?.base === 'imagen3'); -export const selectIsImagen4 = createParamsSelector((params) => params.model?.base === 'imagen4'); export const selectIsFluxKontext = createParamsSelector((params) => { - if (params.model?.base === 'flux-kontext') { - return true; - } if (params.model?.base === 'flux' && params.model?.name.toLowerCase().includes('kontext')) { return true; } return false; }); -export const selectIsChatGPT4o = createParamsSelector((params) => params.model?.base === 'chatgpt-4o'); -export const selectIsGemini2_5 = createParamsSelector((params) => params.model?.base === 'gemini-2.5'); export const selectModel = createParamsSelector((params) => params.model); export const selectModelKey = createParamsSelector((params) => params.model?.key); @@ -592,26 +541,10 @@ export const selectModelSupportsNegativePrompt = createSelector( selectModel, (model) => !!model && SUPPORTS_NEGATIVE_PROMPT_BASE_MODELS.includes(model.base) ); -export const selectModelSupportsSeed = createSelector( - selectModel, - (model) => !!model && SUPPORTS_SEED_BASE_MODELS.includes(model.base) -); export const selectModelSupportsRefImages = createSelector( selectModel, (model) => !!model && SUPPORTS_REF_IMAGES_BASE_MODELS.includes(model.base) ); -export const selectModelSupportsAspectRatio = createSelector( - selectModel, - (model) => !!model && SUPPORTS_ASPECT_RATIO_BASE_MODELS.includes(model.base) -); -export const selectModelSupportsPixelDimensions = createSelector( - selectModel, - (model) => !!model && SUPPORTS_PIXEL_DIMENSIONS_BASE_MODELS.includes(model.base) -); -export const selectIsApiBaseModel = createSelector( - selectModel, - (model) => !!model && API_BASE_MODELS.includes(model.base) -); export const selectModelSupportsOptimizedDenoising = createSelector( selectModel, (model) => !!model && SUPPORTS_OPTIMIZED_DENOISING_BASE_MODELS.includes(model.base) diff --git a/invokeai/frontend/web/src/features/controlLayers/store/refImagesSlice.ts b/invokeai/frontend/web/src/features/controlLayers/store/refImagesSlice.ts index e787d08fca0..ea440786dda 100644 --- a/invokeai/frontend/web/src/features/controlLayers/store/refImagesSlice.ts +++ b/invokeai/frontend/web/src/features/controlLayers/store/refImagesSlice.ts @@ -12,25 +12,13 @@ import type { RefImagesState, } from 'features/controlLayers/store/types'; import { zModelIdentifierField } from 'features/nodes/types/common'; -import type { - ChatGPT4oModelConfig, - FLUXKontextModelConfig, - FLUXReduxModelConfig, - IPAdapterModelConfig, -} from 'services/api/types'; +import type { FLUXKontextModelConfig, FLUXReduxModelConfig, IPAdapterModelConfig } from 'services/api/types'; import { assert } from 'tsafe'; import type { PartialDeep } from 'type-fest'; import type { CLIPVisionModelV2, IPMethodV2, RefImageState } from './types'; import { getInitialRefImagesState, isFLUXReduxConfig, isIPAdapterConfig, zRefImagesState } from './types'; -import { - getReferenceImageState, - initialChatGPT4oReferenceImage, - initialFluxKontextReferenceImage, - initialFLUXRedux, - initialGemini2_5ReferenceImage, - initialIPAdapter, -} from './util'; +import { getReferenceImageState, initialFluxKontextReferenceImage, initialFLUXRedux, initialIPAdapter } from './util'; type PayloadActionWithId = T extends void ? PayloadAction<{ id: string }> @@ -103,7 +91,7 @@ const slice = createSlice({ refImageModelChanged: ( state, action: PayloadActionWithId<{ - modelConfig: IPAdapterModelConfig | FLUXReduxModelConfig | ChatGPT4oModelConfig | FLUXKontextModelConfig | null; + modelConfig: IPAdapterModelConfig | FLUXKontextModelConfig | FLUXReduxModelConfig | null; }> ) => { const { id, modelConfig } = action.payload; @@ -129,30 +117,7 @@ const slice = createSlice({ // The type of ref image depends on the model. When the user switches the model, we rebuild the ref image. // When we switch the model, we keep the image the same, but change the other parameters. - if (entity.config.model.base === 'chatgpt-4o') { - // Switching to chatgpt-4o ref image - entity.config = { - ...initialChatGPT4oReferenceImage, - image: entity.config.image, - model: entity.config.model, - }; - return; - } - - if (entity.config.model.base === 'gemini-2.5') { - // Switching to Gemini 2.5 Flash Preview (nano banana) ref image - entity.config = { - ...initialGemini2_5ReferenceImage, - image: entity.config.image, - model: entity.config.model, - }; - return; - } - - if ( - entity.config.model.base === 'flux-kontext' || - (entity.config.model.base === 'flux' && entity.config.model.name?.toLowerCase().includes('kontext')) - ) { + if (entity.config.model.base === 'flux' && entity.config.model.name?.toLowerCase().includes('kontext')) { // Switching to flux-kontext ref image entity.config = { ...initialFluxKontextReferenceImage, diff --git a/invokeai/frontend/web/src/features/controlLayers/store/types.ts b/invokeai/frontend/web/src/features/controlLayers/store/types.ts index 3163bd85b2a..2b327ae8ca9 100644 --- a/invokeai/frontend/web/src/features/controlLayers/store/types.ts +++ b/invokeai/frontend/web/src/features/controlLayers/store/types.ts @@ -555,35 +555,6 @@ export const ASPECT_RATIO_MAP: Record, { ratio: n '9:21': { ratio: 9 / 21, inverseID: '21:9' }, }; -export const zImagen3AspectRatioID = z.enum(['16:9', '4:3', '1:1', '3:4', '9:16']); -type ImagenAspectRatio = z.infer; -export const isImagenAspectRatioID = (v: unknown): v is ImagenAspectRatio => zImagen3AspectRatioID.safeParse(v).success; -export const IMAGEN_ASPECT_RATIOS: Record = { - '16:9': { width: 1408, height: 768 }, - '4:3': { width: 1280, height: 896 }, - '1:1': { width: 1024, height: 1024 }, - '3:4': { width: 896, height: 1280 }, - '9:16': { width: 768, height: 1408 }, -}; - -export const zChatGPT4oAspectRatioID = z.enum(['3:2', '1:1', '2:3']); -type ChatGPT4oAspectRatio = z.infer; -export const isChatGPT4oAspectRatioID = (v: unknown): v is ChatGPT4oAspectRatio => - zChatGPT4oAspectRatioID.safeParse(v).success; -export const CHATGPT_ASPECT_RATIOS: Record = { - '3:2': { width: 1536, height: 1024 }, - '1:1': { width: 1024, height: 1024 }, - '2:3': { width: 1024, height: 1536 }, -} as const; - -export const zGemini2_5AspectRatioID = z.enum(['1:1']); -type Gemini2_5AspectRatio = z.infer; -export const isGemini2_5AspectRatioID = (v: unknown): v is Gemini2_5AspectRatio => - zGemini2_5AspectRatioID.safeParse(v).success; -export const GEMINI_2_5_ASPECT_RATIOS: Record = { - '1:1': { width: 1024, height: 1024 }, -} as const; - export const zFluxKontextAspectRatioID = z.enum(['21:9', '16:9', '4:3', '1:1', '3:4', '9:16', '9:21']); type FluxKontextAspectRatio = z.infer; export const isFluxKontextAspectRatioID = (v: unknown): v is z.infer => @@ -598,33 +569,6 @@ export const FLUX_KONTEXT_ASPECT_RATIOS: Record; -export const isVeo3AspectRatioID = (v: unknown): v is Veo3AspectRatio => zVeo3AspectRatioID.safeParse(v).success; - -export const zRunwayAspectRatioID = z.enum(['16:9', '4:3', '1:1', '3:4', '9:16', '21:9']); -type RunwayAspectRatio = z.infer; -export const isRunwayAspectRatioID = (v: unknown): v is RunwayAspectRatio => zRunwayAspectRatioID.safeParse(v).success; - -export const zVideoAspectRatio = z.union([zVeo3AspectRatioID, zRunwayAspectRatioID]); -export type VideoAspectRatio = z.infer; -export const isVideoAspectRatio = (v: unknown): v is VideoAspectRatio => zVideoAspectRatio.safeParse(v).success; - -export const zVeo3Resolution = z.enum(['720p', '1080p']); -type Veo3Resolution = z.infer; -export const isVeo3Resolution = (v: unknown): v is Veo3Resolution => zVeo3Resolution.safeParse(v).success; -export const RESOLUTION_MAP: Record = { - '720p': { width: 1280, height: 720 }, - '1080p': { width: 1920, height: 1080 }, -}; - -export const zRunwayResolution = z.enum(['720p']); -type RunwayResolution = z.infer; -export const isRunwayResolution = (v: unknown): v is RunwayResolution => zRunwayResolution.safeParse(v).success; - -export const zVideoResolution = z.union([zVeo3Resolution, zRunwayResolution]); -export type VideoResolution = z.infer; - const zAspectRatioConfig = z.object({ id: zAspectRatioID, value: z.number().gt(0), @@ -638,24 +582,6 @@ export const DEFAULT_ASPECT_RATIO_CONFIG: AspectRatioConfig = { isLocked: false, }; -const zVeo3DurationID = z.enum(['8']); -type Veo3Duration = z.infer; -export const isVeo3DurationID = (v: unknown): v is Veo3Duration => zVeo3DurationID.safeParse(v).success; -export const VEO3_DURATIONS: Record = { - '8': '8 seconds', -}; - -const zRunwayDurationID = z.enum(['5', '10']); -type RunwayDuration = z.infer; -export const isRunwayDurationID = (v: unknown): v is RunwayDuration => zRunwayDurationID.safeParse(v).success; -export const RUNWAY_DURATIONS: Record = { - '5': '5 seconds', - '10': '10 seconds', -}; - -export const zVideoDuration = z.union([zVeo3DurationID, zRunwayDurationID]); -export type VideoDuration = z.infer; - const zBboxState = z.object({ rect: z.object({ x: z.number().int(), diff --git a/invokeai/frontend/web/src/features/cropper/lib/editor.ts b/invokeai/frontend/web/src/features/cropper/lib/editor.ts index 6249e3bb255..62ce5ca9df9 100644 --- a/invokeai/frontend/web/src/features/cropper/lib/editor.ts +++ b/invokeai/frontend/web/src/features/cropper/lib/editor.ts @@ -1,4 +1,3 @@ -import { $crossOrigin } from 'app/store/nanostores/authToken'; import { TRANSPARENCY_CHECKERBOARD_PATTERN_DARK_DATAURL } from 'features/controlLayers/konva/patterns/transparency-checkerboard-pattern'; import Konva from 'konva'; import type { KonvaEventObject } from 'konva/lib/Node'; @@ -1098,7 +1097,7 @@ export class Editor { return new Promise((resolve, reject) => { const img = new Image(); - img.crossOrigin = $crossOrigin.get(); + img.crossOrigin = 'anonymous'; img.onload = () => { this.originalImage = img; diff --git a/invokeai/frontend/web/src/features/deleteImageModal/hooks/use-delete-video.ts b/invokeai/frontend/web/src/features/deleteImageModal/hooks/use-delete-video.ts deleted file mode 100644 index b14cd70ebe8..00000000000 --- a/invokeai/frontend/web/src/features/deleteImageModal/hooks/use-delete-video.ts +++ /dev/null @@ -1,28 +0,0 @@ -import { useDeleteVideoModalApi } from 'features/deleteVideoModal/store/state'; -import { useCallback, useMemo } from 'react'; -import type { VideoDTO } from 'services/api/types'; - -export const useDeleteVideo = (videoDTO?: VideoDTO | null) => { - const deleteImageModal = useDeleteVideoModalApi(); - - const isEnabled = useMemo(() => { - if (!videoDTO) { - return; - } - return true; - }, [videoDTO]); - const _delete = useCallback(() => { - if (!videoDTO) { - return; - } - if (!isEnabled) { - return; - } - deleteImageModal.delete([videoDTO.video_id]); - }, [deleteImageModal, videoDTO, isEnabled]); - - return { - delete: _delete, - isEnabled, - }; -}; diff --git a/invokeai/frontend/web/src/features/deleteVideoModal/components/DeleteVideoButton.tsx b/invokeai/frontend/web/src/features/deleteVideoModal/components/DeleteVideoButton.tsx deleted file mode 100644 index 9e56bfba7dc..00000000000 --- a/invokeai/frontend/web/src/features/deleteVideoModal/components/DeleteVideoButton.tsx +++ /dev/null @@ -1,36 +0,0 @@ -import type { IconButtonProps } from '@invoke-ai/ui-library'; -import { IconButton } from '@invoke-ai/ui-library'; -import { useStore } from '@nanostores/react'; -import { useAppSelector } from 'app/store/storeHooks'; -import { selectSelectionCount } from 'features/gallery/store/gallerySelectors'; -import { memo } from 'react'; -import { useTranslation } from 'react-i18next'; -import { PiTrashSimpleBold } from 'react-icons/pi'; -import { $isConnected } from 'services/events/stores'; - -type Props = Omit & { - onClick: () => void; -}; - -export const DeleteVideoButton = memo((props: Props) => { - const { onClick, isDisabled } = props; - const { t } = useTranslation(); - const isConnected = useStore($isConnected); - const count = useAppSelector(selectSelectionCount); - const labelMessage: string = `${t('gallery.deleteVideo', { count })} (Del)`; - - return ( - } - tooltip={labelMessage} - aria-label={labelMessage} - isDisabled={isDisabled || !isConnected} - colorScheme="error" - variant="link" - alignSelf="stretch" - /> - ); -}); - -DeleteVideoButton.displayName = 'DeleteVideoButton'; diff --git a/invokeai/frontend/web/src/features/deleteVideoModal/components/DeleteVideoModal.tsx b/invokeai/frontend/web/src/features/deleteVideoModal/components/DeleteVideoModal.tsx deleted file mode 100644 index 662e7e6f773..00000000000 --- a/invokeai/frontend/web/src/features/deleteVideoModal/components/DeleteVideoModal.tsx +++ /dev/null @@ -1,43 +0,0 @@ -import { ConfirmationAlertDialog, Flex, FormControl, FormLabel, Switch, Text } from '@invoke-ai/ui-library'; -import { useAppSelector, useAppStore } from 'app/store/storeHooks'; -import { useDeleteVideoModalApi, useDeleteVideoModalState } from 'features/deleteVideoModal/store/state'; -import { selectSystemShouldConfirmOnDelete, setShouldConfirmOnDelete } from 'features/system/store/systemSlice'; -import type { ChangeEvent } from 'react'; -import { memo, useCallback } from 'react'; -import { useTranslation } from 'react-i18next'; - -export const DeleteVideoModal = memo(() => { - const state = useDeleteVideoModalState(); - const api = useDeleteVideoModalApi(); - const { dispatch } = useAppStore(); - const { t } = useTranslation(); - const shouldConfirmOnDelete = useAppSelector(selectSystemShouldConfirmOnDelete); - - const handleChangeShouldConfirmOnDelete = useCallback( - (e: ChangeEvent) => dispatch(setShouldConfirmOnDelete(!e.target.checked)), - [dispatch] - ); - - return ( - - - {t('gallery.deleteVideoPermanent')} - {t('common.areYouSure')} - - {t('common.dontAskMeAgain')} - - - - - ); -}); -DeleteVideoModal.displayName = 'DeleteVideoModal'; diff --git a/invokeai/frontend/web/src/features/deleteVideoModal/store/state.ts b/invokeai/frontend/web/src/features/deleteVideoModal/store/state.ts deleted file mode 100644 index 4e7580ce301..00000000000 --- a/invokeai/frontend/web/src/features/deleteVideoModal/store/state.ts +++ /dev/null @@ -1,111 +0,0 @@ -import { useStore } from '@nanostores/react'; -import type { AppStore } from 'app/store/store'; -import { useAppStore } from 'app/store/storeHooks'; -import { intersection } from 'es-toolkit/compat'; -import { selectGetVideoIdsQueryArgs } from 'features/gallery/store/gallerySelectors'; -import { itemSelected } from 'features/gallery/store/gallerySlice'; -import { selectSystemShouldConfirmOnDelete } from 'features/system/store/systemSlice'; -import { atom } from 'nanostores'; -import { useMemo } from 'react'; -import { videosApi } from 'services/api/endpoints/videos'; - -// Implements an awaitable modal dialog for deleting images - -type DeleteVideosModalState = { - video_ids: string[]; - isOpen: boolean; - resolve?: () => void; - reject?: (reason?: string) => void; -}; - -const getInitialState = (): DeleteVideosModalState => ({ - video_ids: [], - isOpen: false, -}); - -const $deleteVideosModalState = atom(getInitialState()); - -const deleteVideosWithDialog = async (video_ids: string[], store: AppStore): Promise => { - const { getState } = store; - const shouldConfirmOnDelete = selectSystemShouldConfirmOnDelete(getState()); - - if (!shouldConfirmOnDelete) { - // If we don't need to confirm and the resources are not in use, delete them directly - await handleDeletions(video_ids, store); - return; - } - - return new Promise((resolve, reject) => { - $deleteVideosModalState.set({ - video_ids, - isOpen: true, - resolve, - reject, - }); - }); -}; - -const handleDeletions = async (video_ids: string[], store: AppStore) => { - try { - const { dispatch, getState } = store; - const state = getState(); - const { data } = videosApi.endpoints.getVideoIds.select(selectGetVideoIdsQueryArgs(state))(state); - const index = data?.video_ids.findIndex((id) => id === video_ids[0]); - const { deleted_videos } = await dispatch( - videosApi.endpoints.deleteVideos.initiate({ video_ids }, { track: false }) - ).unwrap(); - - const newVideoIds = data?.video_ids.filter((id) => !deleted_videos.includes(id)) || []; - const newSelectedVideoId = newVideoIds[index ?? 0] || null; - - if ( - intersection( - state.gallery.selection.map((s) => s.id), - video_ids - ).length > 0 && - newSelectedVideoId - ) { - // Some selected images were deleted, clear selection - dispatch(itemSelected({ type: 'video', id: newSelectedVideoId })); - } - } catch { - // no-op - } -}; - -const confirmDeletion = async (store: AppStore) => { - const state = $deleteVideosModalState.get(); - await handleDeletions(state.video_ids, store); - state.resolve?.(); - closeSilently(); -}; - -const cancelDeletion = () => { - const state = $deleteVideosModalState.get(); - state.reject?.('User canceled'); - closeSilently(); -}; - -const closeSilently = () => { - $deleteVideosModalState.set(getInitialState()); -}; - -export const useDeleteVideoModalState = () => { - const state = useStore($deleteVideosModalState); - return state; -}; - -export const useDeleteVideoModalApi = () => { - const store = useAppStore(); - const api = useMemo( - () => ({ - delete: (video_ids: string[]) => deleteVideosWithDialog(video_ids, store), - confirm: () => confirmDeletion(store), - cancel: cancelDeletion, - close: closeSilently, - }), - [store] - ); - - return api; -}; diff --git a/invokeai/frontend/web/src/features/dnd/DndDragPreviewMultipleVideo.tsx b/invokeai/frontend/web/src/features/dnd/DndDragPreviewMultipleVideo.tsx deleted file mode 100644 index 6ccb7e48503..00000000000 --- a/invokeai/frontend/web/src/features/dnd/DndDragPreviewMultipleVideo.tsx +++ /dev/null @@ -1,63 +0,0 @@ -import type { draggable } from '@atlaskit/pragmatic-drag-and-drop/element/adapter'; -import { setCustomNativeDragPreview } from '@atlaskit/pragmatic-drag-and-drop/element/set-custom-native-drag-preview'; -import { Flex, Heading } from '@invoke-ai/ui-library'; -import type { MultipleVideoDndSourceData } from 'features/dnd/dnd'; -import { DND_IMAGE_DRAG_PREVIEW_SIZE, preserveOffsetOnSourceFallbackCentered } from 'features/dnd/util'; -import { memo } from 'react'; -import { createPortal } from 'react-dom'; -import { useTranslation } from 'react-i18next'; -import type { Param0 } from 'tsafe'; - -const DndDragPreviewMultipleVideo = memo(({ video_ids }: { video_ids: string[] }) => { - const { t } = useTranslation(); - return ( - - {video_ids.length} - {t('parameters.videos_withCount', { count: video_ids.length })} - - ); -}); - -DndDragPreviewMultipleVideo.displayName = 'DndDragPreviewMultipleVideo'; - -export type DndDragPreviewMultipleVideoState = { - type: 'multiple-video'; - container: HTMLElement; - video_ids: string[]; -}; - -export const createMultipleVideoDragPreview = (arg: DndDragPreviewMultipleVideoState) => - createPortal(, arg.container); - -type SetMultipleDragPreviewArg = { - multipleVideoDndData: MultipleVideoDndSourceData; - setDragPreviewState: (dragPreviewState: DndDragPreviewMultipleVideoState | null) => void; - onGenerateDragPreviewArgs: Param0['onGenerateDragPreview']>; -}; - -export const setMultipleVideoDragPreview = ({ - multipleVideoDndData, - onGenerateDragPreviewArgs, - setDragPreviewState, -}: SetMultipleDragPreviewArg) => { - const { nativeSetDragImage, source, location } = onGenerateDragPreviewArgs; - setCustomNativeDragPreview({ - render({ container }) { - setDragPreviewState({ type: 'multiple-video', container, video_ids: multipleVideoDndData.payload.video_ids }); - return () => setDragPreviewState(null); - }, - nativeSetDragImage, - getOffset: preserveOffsetOnSourceFallbackCentered({ - element: source.element, - input: location.current.input, - }), - }); -}; diff --git a/invokeai/frontend/web/src/features/dnd/DndDragPreviewSingleVideo.tsx b/invokeai/frontend/web/src/features/dnd/DndDragPreviewSingleVideo.tsx deleted file mode 100644 index 0fe0fcec752..00000000000 --- a/invokeai/frontend/web/src/features/dnd/DndDragPreviewSingleVideo.tsx +++ /dev/null @@ -1,69 +0,0 @@ -import type { draggable } from '@atlaskit/pragmatic-drag-and-drop/element/adapter'; -import { setCustomNativeDragPreview } from '@atlaskit/pragmatic-drag-and-drop/element/set-custom-native-drag-preview'; -import { chakra, Flex } from '@invoke-ai/ui-library'; -import type { SingleVideoDndSourceData } from 'features/dnd/dnd'; -import { DND_IMAGE_DRAG_PREVIEW_SIZE, preserveOffsetOnSourceFallbackCentered } from 'features/dnd/util'; -import { GalleryVideoPlaceholder } from 'features/gallery/components/ImageGrid/GalleryVideoPlaceholder'; -import { memo } from 'react'; -import { createPortal } from 'react-dom'; -import type { VideoDTO } from 'services/api/types'; -import type { Param0 } from 'tsafe'; - -const ChakraImg = chakra('img'); - -const DndDragPreviewSingleVideo = memo(({ videoDTO }: { videoDTO: VideoDTO }) => { - return ( - - - - - ); -}); - -DndDragPreviewSingleVideo.displayName = 'DndDragPreviewSingleVideo'; - -export type DndDragPreviewSingleVideoState = { - type: 'single-video'; - container: HTMLElement; - videoDTO: VideoDTO; -}; - -export const createSingleVideoDragPreview = (arg: DndDragPreviewSingleVideoState) => - createPortal(, arg.container); - -type SetSingleDragPreviewArg = { - singleVideoDndData: SingleVideoDndSourceData; - setDragPreviewState: (dragPreviewState: DndDragPreviewSingleVideoState | null) => void; - onGenerateDragPreviewArgs: Param0['onGenerateDragPreview']>; -}; - -export const setSingleVideoDragPreview = ({ - singleVideoDndData, - onGenerateDragPreviewArgs, - setDragPreviewState, -}: SetSingleDragPreviewArg) => { - const { nativeSetDragImage, source, location } = onGenerateDragPreviewArgs; - setCustomNativeDragPreview({ - render({ container }) { - setDragPreviewState({ type: 'single-video', container, videoDTO: singleVideoDndData.payload.videoDTO }); - return () => setDragPreviewState(null); - }, - nativeSetDragImage, - getOffset: preserveOffsetOnSourceFallbackCentered({ - element: source.element, - input: location.current.input, - }), - }); -}; diff --git a/invokeai/frontend/web/src/features/dnd/DndImage.tsx b/invokeai/frontend/web/src/features/dnd/DndImage.tsx index 71488500b88..2c7e4e8ad30 100644 --- a/invokeai/frontend/web/src/features/dnd/DndImage.tsx +++ b/invokeai/frontend/web/src/features/dnd/DndImage.tsx @@ -2,8 +2,6 @@ import { combine } from '@atlaskit/pragmatic-drag-and-drop/combine'; import { draggable } from '@atlaskit/pragmatic-drag-and-drop/element/adapter'; import type { ImageProps, SystemStyleObject } from '@invoke-ai/ui-library'; import { Image } from '@invoke-ai/ui-library'; -import { useStore } from '@nanostores/react'; -import { $crossOrigin } from 'app/store/nanostores/authToken'; import { useAppStore } from 'app/store/storeHooks'; import { singleImageDndSource } from 'features/dnd/dnd'; import type { DndDragPreviewSingleImageState } from 'features/dnd/DndDragPreviewSingleImage'; @@ -31,7 +29,6 @@ type Props = { export const DndImage = memo( forwardRef(({ imageDTO, asThumbnail, ...rest }: Props, forwardedRef) => { const store = useAppStore(); - const crossOrigin = useStore($crossOrigin); const [isDragging, setIsDragging] = useState(false); const ref = useRef(null); @@ -80,7 +77,6 @@ export const DndImage = memo( height={imageDTO.height} sx={sx} data-is-dragging={isDragging} - crossOrigin={!asThumbnail ? crossOrigin : undefined} {...rest} /> {dragPreviewState?.type === 'single-image' ? createSingleImageDragPreview(dragPreviewState) : null} diff --git a/invokeai/frontend/web/src/features/dnd/FullscreenDropzone.tsx b/invokeai/frontend/web/src/features/dnd/FullscreenDropzone.tsx index f10b5b9d598..e5d7df68f28 100644 --- a/invokeai/frontend/web/src/features/dnd/FullscreenDropzone.tsx +++ b/invokeai/frontend/web/src/features/dnd/FullscreenDropzone.tsx @@ -7,12 +7,10 @@ import { Box, Flex, Heading } from '@invoke-ai/ui-library'; import { getStore } from 'app/store/nanostores/store'; import { useAppSelector } from 'app/store/storeHooks'; import { getFocusedRegion } from 'common/hooks/focus'; -import { useClientSideUpload } from 'common/hooks/useClientSideUpload'; import { setFileToPaste } from 'features/controlLayers/components/CanvasPasteModal'; import { DndDropOverlay } from 'features/dnd/DndDropOverlay'; import type { DndTargetState } from 'features/dnd/types'; import { selectAutoAddBoardId } from 'features/gallery/store/gallerySelectors'; -import { selectIsClientSideUploadEnabled } from 'features/system/store/configSlice'; import { toast } from 'features/toast/toast'; import { selectActiveTab } from 'features/ui/store/uiSelectors'; import { memo, useCallback, useEffect, useRef, useState } from 'react'; @@ -68,11 +66,9 @@ export const FullscreenDropzone = memo(() => { const ref = useRef(null); const [dndState, setDndState] = useState('idle'); const activeTab = useAppSelector(selectActiveTab); - const isClientSideUploadEnabled = useAppSelector(selectIsClientSideUploadEnabled); - const clientSideUpload = useClientSideUpload(); const validateAndUploadFiles = useCallback( - async (files: File[]) => { + (files: File[]) => { const { getState } = getStore(); const parseResult = z.array(zUploadFile).safeParse(files); @@ -100,23 +96,17 @@ export const FullscreenDropzone = memo(() => { const autoAddBoardId = selectAutoAddBoardId(getState()); - if (isClientSideUploadEnabled && files.length > 1) { - for (const [i, file] of files.entries()) { - await clientSideUpload(file, i); - } - } else { - const uploadArgs: UploadImageArg[] = files.map((file, i) => ({ - file, - image_category: 'user', - is_intermediate: false, - board_id: autoAddBoardId === 'none' ? undefined : autoAddBoardId, - isFirstUploadOfBatch: i === 0, - })); - - uploadImages(uploadArgs); - } + const uploadArgs: UploadImageArg[] = files.map((file, i) => ({ + file, + image_category: 'user', + is_intermediate: false, + board_id: autoAddBoardId === 'none' ? undefined : autoAddBoardId, + isFirstUploadOfBatch: i === 0, + })); + + uploadImages(uploadArgs); }, - [activeTab, t, isClientSideUploadEnabled, clientSideUpload] + [activeTab, t] ); const onPaste = useCallback( diff --git a/invokeai/frontend/web/src/features/dnd/dnd.ts b/invokeai/frontend/web/src/features/dnd/dnd.ts index 0aef104869f..f5e38d4b944 100644 --- a/invokeai/frontend/web/src/features/dnd/dnd.ts +++ b/invokeai/frontend/web/src/features/dnd/dnd.ts @@ -9,11 +9,9 @@ import { selectComparisonImages } from 'features/gallery/components/ImageViewer/ import type { BoardId } from 'features/gallery/store/types'; import { addImagesToBoard, - addVideosToBoard, createNewCanvasEntityFromImage, newCanvasFromImage, removeImagesFromBoard, - removeVideosFromBoard, replaceCanvasEntityObjectsWithImage, setComparisonImage, setGlobalReferenceImage, @@ -24,10 +22,7 @@ import { import { fieldImageCollectionValueChanged } from 'features/nodes/store/nodesSlice'; import { selectFieldInputInstanceSafe, selectNodesSlice } from 'features/nodes/store/selectors'; import { type FieldIdentifier, isImageFieldCollectionInputInstance } from 'features/nodes/types/field'; -import { startingFrameImageChanged } from 'features/parameters/store/videoSlice'; -import { expandPrompt } from 'features/prompt/PromptExpansion/expand'; -import { promptExpansionApi } from 'features/prompt/PromptExpansion/state'; -import type { ImageDTO, VideoDTO } from 'services/api/types'; +import type { ImageDTO } from 'services/api/types'; import type { JsonObject } from 'type-fest'; const log = logger('dnd'); @@ -74,34 +69,6 @@ type DndSource = { getData: ReturnType>; }; -//#region Single Video -const _singleVideo = buildTypeAndKey('single-video'); -export type SingleVideoDndSourceData = DndData< - typeof _singleVideo.type, - typeof _singleVideo.key, - { videoDTO: VideoDTO } ->; -export const singleVideoDndSource: DndSource = { - ..._singleVideo, - typeGuard: buildTypeGuard(_singleVideo.key), - getData: buildGetData(_singleVideo.key, _singleVideo.type), -}; -//#endregion - -//#region Multiple Image -const _multipleVideo = buildTypeAndKey('multiple-video'); -export type MultipleVideoDndSourceData = DndData< - typeof _multipleVideo.type, - typeof _multipleVideo.key, - { video_ids: string[]; board_id: BoardId } ->; -export const multipleVideoDndSource: DndSource = { - ..._multipleVideo, - typeGuard: buildTypeGuard(_multipleVideo.key), - getData: buildGetData(_multipleVideo.key, _multipleVideo.type), -}; -//#endregion - //#region Single Image const _singleImage = buildTypeAndKey('single-image'); export type SingleImageDndSourceData = DndData< @@ -475,22 +442,12 @@ export type AddImageToBoardDndTargetData = DndData< >; export const addImageToBoardDndTarget: DndTarget< AddImageToBoardDndTargetData, - SingleImageDndSourceData | MultipleImageDndSourceData | SingleVideoDndSourceData | MultipleVideoDndSourceData + SingleImageDndSourceData | MultipleImageDndSourceData > = { ..._addToBoard, typeGuard: buildTypeGuard(_addToBoard.key), getData: buildGetData(_addToBoard.key, _addToBoard.type), isValid: ({ sourceData, targetData }) => { - if (singleVideoDndSource.typeGuard(sourceData)) { - const currentBoard = sourceData.payload.videoDTO.board_id ?? 'none'; - const destinationBoard = targetData.payload.boardId; - return currentBoard !== destinationBoard; - } - if (multipleVideoDndSource.typeGuard(sourceData)) { - const currentBoard = sourceData.payload.board_id; - const destinationBoard = targetData.payload.boardId; - return currentBoard !== destinationBoard; - } if (singleImageDndSource.typeGuard(sourceData)) { const currentBoard = sourceData.payload.imageDTO.board_id ?? 'none'; const destinationBoard = targetData.payload.boardId; @@ -504,18 +461,6 @@ export const addImageToBoardDndTarget: DndTarget< return false; }, handler: ({ sourceData, targetData, dispatch }) => { - if (singleVideoDndSource.typeGuard(sourceData)) { - const { videoDTO } = sourceData.payload; - const { boardId } = targetData.payload; - addVideosToBoard({ video_ids: [videoDTO.video_id], boardId, dispatch }); - } - - if (multipleVideoDndSource.typeGuard(sourceData)) { - const { video_ids } = sourceData.payload; - const { boardId } = targetData.payload; - addVideosToBoard({ video_ids, boardId, dispatch }); - } - if (singleImageDndSource.typeGuard(sourceData)) { const { imageDTO } = sourceData.payload; const { boardId } = targetData.payload; @@ -541,7 +486,7 @@ export type RemoveImageFromBoardDndTargetData = DndData< >; export const removeImageFromBoardDndTarget: DndTarget< RemoveImageFromBoardDndTargetData, - SingleImageDndSourceData | MultipleImageDndSourceData | SingleVideoDndSourceData | MultipleVideoDndSourceData + SingleImageDndSourceData | MultipleImageDndSourceData > = { ..._removeFromBoard, typeGuard: buildTypeGuard(_removeFromBoard.key), @@ -557,16 +502,6 @@ export const removeImageFromBoardDndTarget: DndTarget< return currentBoard !== 'none'; } - if (singleVideoDndSource.typeGuard(sourceData)) { - const currentBoard = sourceData.payload.videoDTO.board_id ?? 'none'; - return currentBoard !== 'none'; - } - - if (multipleVideoDndSource.typeGuard(sourceData)) { - const currentBoard = sourceData.payload.board_id; - return currentBoard !== 'none'; - } - return false; }, handler: ({ sourceData, dispatch }) => { @@ -579,71 +514,9 @@ export const removeImageFromBoardDndTarget: DndTarget< const { image_names } = sourceData.payload; removeImagesFromBoard({ image_names, dispatch }); } - - if (singleVideoDndSource.typeGuard(sourceData)) { - const { videoDTO } = sourceData.payload; - removeVideosFromBoard({ video_ids: [videoDTO.video_id], dispatch }); - } - - if (multipleVideoDndSource.typeGuard(sourceData)) { - const { video_ids } = sourceData.payload; - removeVideosFromBoard({ video_ids, dispatch }); - } - }, -}; - -//#endregion - -//#region Prompt Generation From Image -const _promptGenerationFromImage = buildTypeAndKey('prompt-generation-from-image'); -type PromptGenerationFromImageDndTargetData = DndData< - typeof _promptGenerationFromImage.type, - typeof _promptGenerationFromImage.key, - void ->; -export const promptGenerationFromImageDndTarget: DndTarget< - PromptGenerationFromImageDndTargetData, - SingleImageDndSourceData -> = { - ..._promptGenerationFromImage, - typeGuard: buildTypeGuard(_promptGenerationFromImage.key), - getData: buildGetData(_promptGenerationFromImage.key, _promptGenerationFromImage.type), - isValid: ({ sourceData }) => { - if (singleImageDndSource.typeGuard(sourceData)) { - return true; - } - return false; - }, - handler: ({ sourceData, dispatch, getState }) => { - const { imageDTO } = sourceData.payload; - promptExpansionApi.setPending(imageDTO); - expandPrompt({ dispatch, getState, imageDTO }); }, }; -//#endregion -//#region Video Frame From Image -const _videoFrameFromImage = buildTypeAndKey('video-frame-from-image'); -type VideoFrameFromImageDndTargetData = DndData< - typeof _videoFrameFromImage.type, - typeof _videoFrameFromImage.key, - { frame: 'start' | 'end' } ->; -export const videoFrameFromImageDndTarget: DndTarget = { - ..._videoFrameFromImage, - typeGuard: buildTypeGuard(_videoFrameFromImage.key), - getData: buildGetData(_videoFrameFromImage.key, _videoFrameFromImage.type), - isValid: ({ sourceData }) => { - if (singleImageDndSource.typeGuard(sourceData)) { - return true; - } - return false; - }, - handler: ({ sourceData, dispatch }) => { - const { imageDTO } = sourceData.payload; - dispatch(startingFrameImageChanged(imageDTOToCroppableImage(imageDTO))); - }, -}; //#endregion export const dndTargets = [ @@ -659,8 +532,6 @@ export const dndTargets = [ replaceCanvasEntityObjectsWithImageDndTarget, addImageToBoardDndTarget, removeImageFromBoardDndTarget, - promptGenerationFromImageDndTarget, - videoFrameFromImageDndTarget, ] as const; export type AnyDndTarget = (typeof dndTargets)[number]; diff --git a/invokeai/frontend/web/src/features/dnd/useDndMonitor.ts b/invokeai/frontend/web/src/features/dnd/useDndMonitor.ts index 8d2aeb30e74..24d6bea1680 100644 --- a/invokeai/frontend/web/src/features/dnd/useDndMonitor.ts +++ b/invokeai/frontend/web/src/features/dnd/useDndMonitor.ts @@ -4,13 +4,7 @@ import { logger } from 'app/logging/logger'; import { getStore } from 'app/store/nanostores/store'; import { useAssertSingleton } from 'common/hooks/useAssertSingleton'; import { parseify } from 'common/util/serialize'; -import { - dndTargets, - multipleImageDndSource, - multipleVideoDndSource, - singleImageDndSource, - singleVideoDndSource, -} from 'features/dnd/dnd'; +import { dndTargets, multipleImageDndSource, singleImageDndSource } from 'features/dnd/dnd'; import { useEffect } from 'react'; const log = logger('dnd'); @@ -25,12 +19,7 @@ export const useDndMonitor = () => { const sourceData = source.data; // Check for allowed sources - if ( - !singleImageDndSource.typeGuard(sourceData) && - !multipleImageDndSource.typeGuard(sourceData) && - !singleVideoDndSource.typeGuard(sourceData) && - !multipleVideoDndSource.typeGuard(sourceData) - ) { + if (!singleImageDndSource.typeGuard(sourceData) && !multipleImageDndSource.typeGuard(sourceData)) { return false; } diff --git a/invokeai/frontend/web/src/features/dynamicPrompts/components/ParamDynamicPromptsMaxPrompts.tsx b/invokeai/frontend/web/src/features/dynamicPrompts/components/ParamDynamicPromptsMaxPrompts.tsx index cc363c8122c..e17743207fa 100644 --- a/invokeai/frontend/web/src/features/dynamicPrompts/components/ParamDynamicPromptsMaxPrompts.tsx +++ b/invokeai/frontend/web/src/features/dynamicPrompts/components/ParamDynamicPromptsMaxPrompts.tsx @@ -6,13 +6,21 @@ import { selectDynamicPromptsCombinatorial, selectDynamicPromptsMaxPrompts, } from 'features/dynamicPrompts/store/dynamicPromptsSlice'; -import { selectMaxPromptsConfig } from 'features/system/store/configSlice'; import { memo, useCallback } from 'react'; import { useTranslation } from 'react-i18next'; +const CONSTRAINTS = { + initial: 100, + sliderMin: 1, + sliderMax: 1000, + numberInputMin: 1, + numberInputMax: 10000, + fineStep: 1, + coarseStep: 10, +}; + const ParamDynamicPromptsMaxPrompts = () => { const maxPrompts = useAppSelector(selectDynamicPromptsMaxPrompts); - const config = useAppSelector(selectMaxPromptsConfig); const combinatorial = useAppSelector(selectDynamicPromptsCombinatorial); const dispatch = useAppDispatch(); const { t } = useTranslation(); @@ -30,18 +38,18 @@ const ParamDynamicPromptsMaxPrompts = () => { {t('dynamicPrompts.maxPrompts')} diff --git a/invokeai/frontend/web/src/features/dynamicPrompts/hooks/useDynamicPromptsWatcher.tsx b/invokeai/frontend/web/src/features/dynamicPrompts/hooks/useDynamicPromptsWatcher.tsx index 011ca414f08..2fd6b18b06a 100644 --- a/invokeai/frontend/web/src/features/dynamicPrompts/hooks/useDynamicPromptsWatcher.tsx +++ b/invokeai/frontend/web/src/features/dynamicPrompts/hooks/useDynamicPromptsWatcher.tsx @@ -9,7 +9,6 @@ import { } from 'features/dynamicPrompts/store/dynamicPromptsSlice'; import { getShouldProcessPrompt } from 'features/dynamicPrompts/util/getShouldProcessPrompt'; import { selectPresetModifiedPrompts } from 'features/nodes/util/graph/graphBuilderUtils'; -import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus'; import { useEffect, useMemo } from 'react'; import { utilitiesApi } from 'services/api/endpoints/utilities'; @@ -24,8 +23,6 @@ export const useDynamicPromptsWatcher = () => { const presetModifiedPrompts = useAppSelector(selectPresetModifiedPrompts); const maxPrompts = useAppSelector(selectDynamicPromptsMaxPrompts); - const dynamicPrompting = useFeatureStatus('dynamicPrompting'); - const debouncedUpdateDynamicPrompts = useMemo( () => debounce(async (positivePrompt: string, maxPrompts: number) => { @@ -55,10 +52,6 @@ export const useDynamicPromptsWatcher = () => { ); useEffect(() => { - if (!dynamicPrompting) { - return; - } - // Before we execute, imperatively check the dynamic prompts query cache to see if we have already fetched this prompt const state = getState(); @@ -88,5 +81,5 @@ export const useDynamicPromptsWatcher = () => { } debouncedUpdateDynamicPrompts(presetModifiedPrompts.positive, maxPrompts); - }, [debouncedUpdateDynamicPrompts, dispatch, dynamicPrompting, getState, maxPrompts, presetModifiedPrompts]); + }, [debouncedUpdateDynamicPrompts, dispatch, getState, maxPrompts, presetModifiedPrompts]); }; diff --git a/invokeai/frontend/web/src/features/gallery/components/Boards/BoardContextMenu.tsx b/invokeai/frontend/web/src/features/gallery/components/Boards/BoardContextMenu.tsx index 721792dbd54..5cc25f6c038 100644 --- a/invokeai/frontend/web/src/features/gallery/components/Boards/BoardContextMenu.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/Boards/BoardContextMenu.tsx @@ -5,7 +5,6 @@ import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; import { $boardToDelete } from 'features/gallery/components/Boards/DeleteBoardModal'; import { selectAutoAddBoardId, selectAutoAssignBoardOnClick } from 'features/gallery/store/gallerySelectors'; import { autoAddBoardIdChanged } from 'features/gallery/store/gallerySlice'; -import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus'; import { toast } from 'features/toast/toast'; import { memo, useCallback, useMemo } from 'react'; import { useTranslation } from 'react-i18next'; @@ -33,7 +32,6 @@ const BoardContextMenu = ({ board, children }: Props) => { const isSelectedForAutoAdd = useAppSelector(selectIsSelectedForAutoAdd); const boardName = useBoardName(board.board_id); - const isBulkDownloadEnabled = useFeatureStatus('bulkDownload'); const [bulkDownload] = useBulkDownloadImagesMutation(); @@ -79,11 +77,10 @@ const BoardContextMenu = ({ board, children }: Props) => { {isSelectedForAutoAdd ? t('boards.selectedForAutoAdd') : t('boards.menuItemAutoAdd')} )} - {isBulkDownloadEnabled && ( - } onClickCapture={handleBulkDownload}> - {t('boards.downloadBoard')} - - )} + + } onClickCapture={handleBulkDownload}> + {t('boards.downloadBoard')} + {board.archived && ( } onClick={handleUnarchive}> @@ -109,7 +106,6 @@ const BoardContextMenu = ({ board, children }: Props) => { isSelectedForAutoAdd, handleSetAutoAdd, t, - isBulkDownloadEnabled, handleBulkDownload, board.archived, handleUnarchive, diff --git a/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/AddBoardButton.tsx b/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/AddBoardButton.tsx index 6a9e51cb74c..9f59e60fee8 100644 --- a/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/AddBoardButton.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/AddBoardButton.tsx @@ -1,47 +1,32 @@ import { IconButton } from '@invoke-ai/ui-library'; -import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; +import { useAppDispatch } from 'app/store/storeHooks'; import { boardIdSelected, boardSearchTextChanged } from 'features/gallery/store/gallerySlice'; -import { selectAllowPrivateBoards } from 'features/system/store/configSelectors'; -import { memo, useCallback, useMemo } from 'react'; +import { memo, useCallback } from 'react'; import { useTranslation } from 'react-i18next'; import { PiPlusBold } from 'react-icons/pi'; import { useCreateBoardMutation } from 'services/api/endpoints/boards'; -type Props = { - isPrivateBoard: boolean; -}; - -const AddBoardButton = ({ isPrivateBoard }: Props) => { +const AddBoardButton = () => { const { t } = useTranslation(); const dispatch = useAppDispatch(); - const allowPrivateBoards = useAppSelector(selectAllowPrivateBoards); const [createBoard, { isLoading }] = useCreateBoardMutation(); - const label = useMemo(() => { - if (!allowPrivateBoards) { - return t('boards.addBoard'); - } - if (isPrivateBoard) { - return t('boards.addPrivateBoard'); - } - return t('boards.addSharedBoard'); - }, [allowPrivateBoards, isPrivateBoard, t]); const handleCreateBoard = useCallback(async () => { try { - const board = await createBoard({ board_name: t('boards.myBoard'), is_private: isPrivateBoard }).unwrap(); + const board = await createBoard({ board_name: t('boards.myBoard') }).unwrap(); dispatch(boardIdSelected({ boardId: board.board_id })); dispatch(boardSearchTextChanged('')); } catch { //no-op } - }, [t, createBoard, isPrivateBoard, dispatch]); + }, [t, createBoard, dispatch]); return ( } isLoading={isLoading} - tooltip={label} - aria-label={label} + tooltip={t('boards.addBoard')} + aria-label={t('boards.addBoard')} onClick={handleCreateBoard} size="md" data-testid="add-board-button" diff --git a/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/BoardTooltip.tsx b/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/BoardTooltip.tsx index ce524bac801..8877b22612f 100644 --- a/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/BoardTooltip.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/BoardTooltip.tsx @@ -1,6 +1,5 @@ import { Flex, Image, Text } from '@invoke-ai/ui-library'; import { skipToken } from '@reduxjs/toolkit/query'; -import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus'; import { useTranslation } from 'react-i18next'; import { useGetImageDTOQuery } from 'services/api/endpoints/images'; import type { BoardDTO } from 'services/api/types'; @@ -10,13 +9,11 @@ type Props = { boardCounts: { image_count: number; asset_count: number; - video_count: number; }; }; export const BoardTooltip = ({ board, boardCounts }: Props) => { const { t } = useTranslation(); - const isVideoEnabled = useFeatureStatus('video'); const { currentData: coverImage } = useGetImageDTOQuery(board?.cover_image_name ?? skipToken); @@ -39,7 +36,6 @@ export const BoardTooltip = ({ board, boardCounts }: Props) => { {t('boards.imagesWithCount', { count: boardCounts.image_count })},{' '} {t('boards.assetsWithCount', { count: boardCounts.asset_count })} - {isVideoEnabled && {t('boards.videosWithCount', { count: boardCounts.video_count })}} {board?.archived && ({t('boards.archived')})} diff --git a/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/BoardsList.tsx b/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/BoardsList.tsx index 3b48882b5c5..2d37a03f69f 100644 --- a/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/BoardsList.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/BoardsList.tsx @@ -1,4 +1,4 @@ -import { Button, Collapse, Flex, Icon, Text, useDisclosure } from '@invoke-ai/ui-library'; +import { Collapse, Flex, Text, useDisclosure } from '@invoke-ai/ui-library'; import { EMPTY_ARRAY } from 'app/store/constants'; import { useAppSelector } from 'app/store/storeHooks'; import { fixTooltipCloseOnScrollStyles } from 'common/util/fixTooltipCloseOnScrollStyles'; @@ -7,50 +7,38 @@ import { selectListBoardsQueryArgs, selectSelectedBoardId, } from 'features/gallery/store/gallerySelectors'; -import { selectAllowPrivateBoards } from 'features/system/store/configSelectors'; import { memo, useMemo } from 'react'; import { useTranslation } from 'react-i18next'; -import { PiCaretDownBold } from 'react-icons/pi'; import { useListAllBoardsQuery } from 'services/api/endpoints/boards'; import AddBoardButton from './AddBoardButton'; import GalleryBoard from './GalleryBoard'; import NoBoardBoard from './NoBoardBoard'; -type Props = { - isPrivate: boolean; -}; - -export const BoardsList = memo(({ isPrivate }: Props) => { +export const BoardsList = memo(() => { const { t } = useTranslation(); const selectedBoardId = useAppSelector(selectSelectedBoardId); const boardSearchText = useAppSelector(selectBoardSearchText); const queryArgs = useAppSelector(selectListBoardsQueryArgs); const { data: boards } = useListAllBoardsQuery(queryArgs); - const allowPrivateBoards = useAppSelector(selectAllowPrivateBoards); - const { isOpen, onToggle } = useDisclosure({ defaultIsOpen: true }); + const { isOpen } = useDisclosure({ defaultIsOpen: true }); const filteredBoards = useMemo(() => { if (!boards) { return EMPTY_ARRAY; } - return boards.filter((board) => { - if (boardSearchText.length) { - return board.is_private === isPrivate && board.board_name.toLowerCase().includes(boardSearchText.toLowerCase()); - } else { - return board.is_private === isPrivate; - } - }); - }, [boardSearchText, boards, isPrivate]); + if (boardSearchText.length) { + return boards.filter((board) => board.board_name.toLowerCase().includes(boardSearchText.toLowerCase())); + } + + return boards; + }, [boardSearchText, boards]); const boardElements = useMemo(() => { const elements = []; - if (allowPrivateBoards && isPrivate && !boardSearchText.length) { - elements.push(); - } - if (!allowPrivateBoards && !boardSearchText.length) { + if (!boardSearchText.length) { elements.push(); } @@ -61,15 +49,7 @@ export const BoardsList = memo(({ isPrivate }: Props) => { }); return elements; - }, [allowPrivateBoards, isPrivate, boardSearchText.length, filteredBoards, selectedBoardId]); - - const boardListTitle = useMemo(() => { - if (allowPrivateBoards) { - return isPrivate ? t('boards.private') : t('boards.shared'); - } else { - return t('boards.boards'); - } - }, [isPrivate, allowPrivateBoards, t]); + }, [boardSearchText.length, filteredBoards, selectedBoardId]); return ( @@ -84,26 +64,10 @@ export const BoardsList = memo(({ isPrivate }: Props) => { top={0} bg="base.900" > - {allowPrivateBoards ? ( - - ) : ( - - {boardListTitle} - - )} - + + {t('boards.boards')} + + diff --git a/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/BoardsListWrapper.tsx b/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/BoardsListWrapper.tsx index 4b6c4030205..e7fa512d067 100644 --- a/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/BoardsListWrapper.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/BoardsListWrapper.tsx @@ -2,9 +2,7 @@ import { combine } from '@atlaskit/pragmatic-drag-and-drop/combine'; import { autoScrollForElements } from '@atlaskit/pragmatic-drag-and-drop-auto-scroll/element'; import { autoScrollForExternal } from '@atlaskit/pragmatic-drag-and-drop-auto-scroll/external'; import { Box } from '@invoke-ai/ui-library'; -import { useAppSelector } from 'app/store/storeHooks'; import { overlayScrollbarsParams } from 'common/components/OverlayScrollbars/constants'; -import { selectAllowPrivateBoards } from 'features/system/store/configSelectors'; import type { OverlayScrollbarsComponentRef } from 'overlayscrollbars-react'; import { OverlayScrollbarsComponent } from 'overlayscrollbars-react'; import type { CSSProperties } from 'react'; @@ -18,7 +16,6 @@ const overlayScrollbarsStyles: CSSProperties = { }; export const BoardsListWrapper = memo(() => { - const allowPrivateBoards = useAppSelector(selectAllowPrivateBoards); const [os, osRef] = useState(null); useEffect(() => { const osInstance = os?.osInstance(); @@ -48,8 +45,7 @@ export const BoardsListWrapper = memo(() => { style={overlayScrollbarsStyles} options={overlayScrollbarsParams.options} > - {allowPrivateBoards && } - + diff --git a/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/GalleryBoard.tsx b/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/GalleryBoard.tsx index 772606ec86f..1ddc4b0db36 100644 --- a/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/GalleryBoard.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/GalleryBoard.tsx @@ -15,7 +15,6 @@ import { selectSelectedBoardId, } from 'features/gallery/store/gallerySelectors'; import { autoAddBoardIdChanged, boardIdSelected } from 'features/gallery/store/gallerySlice'; -import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus'; import { memo, useCallback, useMemo } from 'react'; import { useTranslation } from 'react-i18next'; import { PiArchiveBold, PiImageSquare } from 'react-icons/pi'; @@ -37,7 +36,6 @@ const GalleryBoard = ({ board, isSelected }: GalleryBoardProps) => { const autoAddBoardId = useAppSelector(selectAutoAddBoardId); const autoAssignBoardOnClick = useAppSelector(selectAutoAssignBoardOnClick); const selectedBoardId = useAppSelector(selectSelectedBoardId); - const isVideoEnabled = useFeatureStatus('video'); const onClick = useCallback(() => { if (selectedBoardId !== board.board_id) { dispatch(boardIdSelected({ boardId: board.board_id })); @@ -56,7 +54,6 @@ const GalleryBoard = ({ board, isSelected }: GalleryBoardProps) => { () => ({ image_count: board.image_count, asset_count: board.asset_count, - video_count: board.video_count, }), [board] ); @@ -95,8 +92,7 @@ const GalleryBoard = ({ board, isSelected }: GalleryBoardProps) => { {board.archived && } - {board.image_count} | {isVideoEnabled && `${board.video_count} | `} - {board.asset_count} + {board.image_count} | {board.asset_count} diff --git a/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/NoBoardBoard.tsx b/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/NoBoardBoard.tsx index 22ecb71ae8e..900799f8ed7 100644 --- a/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/NoBoardBoard.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/NoBoardBoard.tsx @@ -13,14 +13,9 @@ import { selectBoardSearchText, } from 'features/gallery/store/gallerySelectors'; import { autoAddBoardIdChanged, boardIdSelected } from 'features/gallery/store/gallerySlice'; -import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus'; import { memo, useCallback, useMemo } from 'react'; import { useTranslation } from 'react-i18next'; -import { - useGetBoardAssetsTotalQuery, - useGetBoardImagesTotalQuery, - useGetBoardVideosTotalQuery, -} from 'services/api/endpoints/boards'; +import { useGetBoardAssetsTotalQuery, useGetBoardImagesTotalQuery } from 'services/api/endpoints/boards'; import { useBoardName } from 'services/api/hooks/useBoardName'; interface Props { @@ -33,7 +28,6 @@ const _hover: SystemStyleObject = { const NoBoardBoard = memo(({ isSelected }: Props) => { const dispatch = useAppDispatch(); - const isVideoEnabled = useFeatureStatus('video'); const { imagesTotal } = useGetBoardImagesTotalQuery('none', { selectFromResult: ({ data }) => { return { imagesTotal: data?.total ?? 0 }; @@ -44,12 +38,6 @@ const NoBoardBoard = memo(({ isSelected }: Props) => { return { assetsTotal: data?.total ?? 0 }; }, }); - const { videoTotal } = useGetBoardVideosTotalQuery('none', { - skip: !isVideoEnabled, - selectFromResult: ({ data }) => { - return { videoTotal: data?.total ?? 0 }; - }, - }); const autoAddBoardId = useAppSelector(selectAutoAddBoardId); const autoAssignBoardOnClick = useAppSelector(selectAutoAssignBoardOnClick); const boardSearchText = useAppSelector(selectBoardSearchText); @@ -74,12 +62,7 @@ const NoBoardBoard = memo(({ isSelected }: Props) => { {(ref) => ( - } + label={} openDelay={1000} placement="right" closeOnScroll @@ -120,8 +103,7 @@ const NoBoardBoard = memo(({ isSelected }: Props) => { {autoAddBoardId === 'none' && } - {imagesTotal} | {isVideoEnabled && `${videoTotal} | `} - {assetsTotal} + {imagesTotal} | {assetsTotal} diff --git a/invokeai/frontend/web/src/features/gallery/components/Boards/NoBoardBoardContextMenu.tsx b/invokeai/frontend/web/src/features/gallery/components/Boards/NoBoardBoardContextMenu.tsx index d4c71580836..b77cfb05ea0 100644 --- a/invokeai/frontend/web/src/features/gallery/components/Boards/NoBoardBoardContextMenu.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/Boards/NoBoardBoardContextMenu.tsx @@ -4,7 +4,6 @@ import { createSelector } from '@reduxjs/toolkit'; import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; import { selectAutoAddBoardId, selectAutoAssignBoardOnClick } from 'features/gallery/store/gallerySelectors'; import { autoAddBoardIdChanged } from 'features/gallery/store/gallerySlice'; -import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus'; import { memo, useCallback } from 'react'; import { useTranslation } from 'react-i18next'; import { PiDownloadBold, PiPlusBold, PiTrashSimpleBold } from 'react-icons/pi'; @@ -23,7 +22,6 @@ const NoBoardBoardContextMenu = ({ children }: Props) => { const dispatch = useAppDispatch(); const autoAssignBoardOnClick = useAppSelector(selectAutoAssignBoardOnClick); const isSelectedForAutoAdd = useAppSelector(selectIsSelectedForAutoAdd); - const isBulkDownloadEnabled = useFeatureStatus('bulkDownload'); const [bulkDownload] = useBulkDownloadImagesMutation(); @@ -48,11 +46,9 @@ const NoBoardBoardContextMenu = ({ children }: Props) => { {isSelectedForAutoAdd ? t('boards.selectedForAutoAdd') : t('boards.menuItemAutoAdd')} )} - {isBulkDownloadEnabled && ( - } onClickCapture={handleBulkDownload}> - {t('boards.downloadBoard')} - - )} + } onClickCapture={handleBulkDownload}> + {t('boards.downloadBoard')} + } @@ -68,7 +64,6 @@ const NoBoardBoardContextMenu = ({ children }: Props) => { autoAssignBoardOnClick, handleBulkDownload, handleSetAutoAdd, - isBulkDownloadEnabled, isSelectedForAutoAdd, t, setUncategorizedImagesAsToBeDeleted, diff --git a/invokeai/frontend/web/src/features/gallery/components/BoardsListPanelContent.tsx b/invokeai/frontend/web/src/features/gallery/components/BoardsListPanelContent.tsx index f414df82f1e..fa93c597f3e 100644 --- a/invokeai/frontend/web/src/features/gallery/components/BoardsListPanelContent.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/BoardsListPanelContent.tsx @@ -5,7 +5,6 @@ import { useDisclosure } from 'common/hooks/useBoolean'; import { BoardsListWrapper } from 'features/gallery/components/Boards/BoardsList/BoardsListWrapper'; import { BoardsSearch } from 'features/gallery/components/Boards/BoardsList/BoardsSearch'; import { BoardsSettingsPopover } from 'features/gallery/components/Boards/BoardsSettingsPopover'; -import { GalleryHeader } from 'features/gallery/components/GalleryHeader'; import { selectBoardSearchText } from 'features/gallery/store/gallerySelectors'; import { boardSearchTextChanged } from 'features/gallery/store/gallerySlice'; import { useAutoLayoutContext } from 'features/ui/layouts/auto-layout-context'; @@ -62,9 +61,6 @@ export const BoardsPanel = memo(() => { {t('boards.boards')} - - - { const { t } = useTranslation(); const dispatch = useAppDispatch(); - const itemDTO = useItemDTOContext(); + const imageDTO = useImageDTOContext(); const onClick = useCallback(() => { - if (isImageDTO(itemDTO)) { - dispatch(imagesToChangeSelected([itemDTO.image_name])); - } else { - dispatch(videosToChangeSelected([itemDTO.video_id])); - } + dispatch(imagesToChangeSelected([imageDTO.image_name])); dispatch(isModalOpenChanged(true)); - }, [dispatch, itemDTO]); + }, [dispatch, imageDTO]); return ( } onClickCapture={onClick}> diff --git a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemCopy.tsx b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemCopy.tsx index 35608d6ddee..1df70a4a429 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemCopy.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemCopy.tsx @@ -1,23 +1,18 @@ import { IconMenuItem } from 'common/components/IconMenuItem'; import { useCopyImageToClipboard } from 'common/hooks/useCopyImageToClipboard'; -import { useItemDTOContext } from 'features/gallery/contexts/ItemDTOContext'; +import { useImageDTOContext } from 'features/gallery/contexts/ImageDTOContext'; import { memo, useCallback } from 'react'; import { useTranslation } from 'react-i18next'; import { PiCopyBold } from 'react-icons/pi'; -import { isImageDTO } from 'services/api/types'; export const ContextMenuItemCopy = memo(() => { const { t } = useTranslation(); - const itemDTO = useItemDTOContext(); + const imageDTO = useImageDTOContext(); const copyImageToClipboard = useCopyImageToClipboard(); const onClick = useCallback(() => { - if (isImageDTO(itemDTO)) { - copyImageToClipboard(itemDTO.image_url); - } else { - // copyVideoToClipboard(itemDTO.video_url); - } - }, [copyImageToClipboard, itemDTO]); + copyImageToClipboard(imageDTO.image_url); + }, [copyImageToClipboard, imageDTO]); return ( { const { t } = useTranslation(); const deleteImageModal = useDeleteImageModalApi(); - const itemDTO = useItemDTOContext(); + const imageDTO = useImageDTOContext(); const onClick = useCallback(async () => { try { - if (isImageDTO(itemDTO)) { - await deleteImageModal.delete([itemDTO.image_name]); - } + await deleteImageModal.delete([imageDTO.image_name]); } catch { // noop; } - }, [deleteImageModal, itemDTO]); + }, [deleteImageModal, imageDTO]); return ( { - const { t } = useTranslation(); - const deleteVideoModal = useDeleteVideoModalApi(); - const itemDTO = useItemDTOContext(); - - const onClick = useCallback(async () => { - try { - if (isVideoDTO(itemDTO)) { - await deleteVideoModal.delete([itemDTO.video_id]); - } - } catch { - // noop; - } - }, [deleteVideoModal, itemDTO]); - - return ( - } - onClickCapture={onClick} - aria-label={t('gallery.deleteVideo', { count: 1 })} - tooltip={t('gallery.deleteVideo', { count: 1 })} - isDestructive - /> - ); -}); - -ContextMenuItemDeleteVideo.displayName = 'ContextMenuItemDeleteVideo'; diff --git a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemDownload.tsx b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemDownload.tsx index b53ba238910..723d806f484 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemDownload.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemDownload.tsx @@ -1,23 +1,18 @@ import { IconMenuItem } from 'common/components/IconMenuItem'; import { useDownloadItem } from 'common/hooks/useDownloadImage'; -import { useItemDTOContext } from 'features/gallery/contexts/ItemDTOContext'; +import { useImageDTOContext } from 'features/gallery/contexts/ImageDTOContext'; import { memo, useCallback } from 'react'; import { useTranslation } from 'react-i18next'; import { PiDownloadSimpleBold } from 'react-icons/pi'; -import { isImageDTO } from 'services/api/types'; export const ContextMenuItemDownload = memo(() => { const { t } = useTranslation(); - const itemDTO = useItemDTOContext(); + const imageDTO = useImageDTOContext(); const { downloadItem } = useDownloadItem(); const onClick = useCallback(() => { - if (isImageDTO(itemDTO)) { - downloadItem(itemDTO.image_url, itemDTO.image_name); - } else { - downloadItem(itemDTO.video_url, itemDTO.video_id); - } - }, [downloadItem, itemDTO]); + downloadItem(imageDTO.image_url, imageDTO.image_name); + }, [downloadItem, imageDTO]); return ( { const { t } = useTranslation(); - const itemDTO = useItemDTOContext(); + const imageDTO = useImageDTOContext(); const loadWorkflowWithDialog = useLoadWorkflowWithDialog(); const hasTemplates = useStore($hasTemplates); const onClick = useCallback(() => { - if (isImageDTO(itemDTO)) { - loadWorkflowWithDialog({ type: 'image', data: itemDTO.image_name }); - } else { - // loadWorkflowWithDialog({ type: 'video', data: itemDTO.video_id }); - } - }, [loadWorkflowWithDialog, itemDTO]); + loadWorkflowWithDialog({ type: 'image', data: imageDTO.image_name }); + }, [loadWorkflowWithDialog, imageDTO]); const isDisabled = useMemo(() => { - if (isImageDTO(itemDTO)) { - return !itemDTO.has_workflow || !hasTemplates; - } - return false; - }, [itemDTO, hasTemplates]); + return !imageDTO.has_workflow || !hasTemplates; + }, [imageDTO, hasTemplates]); return ( } onClickCapture={onClick} isDisabled={isDisabled}> diff --git a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemLocateInGalery.tsx b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemLocateInGalery.tsx index 8055e592cfe..b5342d50f3b 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemLocateInGalery.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemLocateInGalery.tsx @@ -1,6 +1,6 @@ import { MenuItem } from '@invoke-ai/ui-library'; import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; -import { useItemDTOContext } from 'features/gallery/contexts/ItemDTOContext'; +import { useImageDTOContext } from 'features/gallery/contexts/ImageDTOContext'; import { boardIdSelected } from 'features/gallery/store/gallerySlice'; import { IMAGE_CATEGORIES } from 'features/gallery/store/types'; import { navigationApi } from 'features/ui/layouts/navigation-api'; @@ -10,48 +10,33 @@ import { memo, useCallback, useMemo } from 'react'; import { flushSync } from 'react-dom'; import { useTranslation } from 'react-i18next'; import { PiCrosshairBold } from 'react-icons/pi'; -import { isImageDTO } from 'services/api/types'; export const ContextMenuItemLocateInGalery = memo(() => { const { t } = useTranslation(); const dispatch = useAppDispatch(); - const itemDTO = useItemDTOContext(); + const imageDTO = useImageDTOContext(); const activeTab = useAppSelector(selectActiveTab); const galleryPanel = useGalleryPanel(activeTab); const isGalleryImage = useMemo(() => { - return !itemDTO.is_intermediate; - }, [itemDTO]); + return !imageDTO.is_intermediate; + }, [imageDTO]); const onClick = useCallback(() => { navigationApi.expandRightPanel(); galleryPanel.expand(); - if (isImageDTO(itemDTO)) { flushSync(() => { dispatch( boardIdSelected({ - boardId: itemDTO.board_id ?? 'none', + boardId: imageDTO.board_id ?? 'none', select: { - selection: [{ type: 'image', id: itemDTO.image_name }], - galleryView: IMAGE_CATEGORIES.includes(itemDTO.image_category) ? 'images' : 'assets', + selection: [{ type: 'image', id: imageDTO.image_name }], + galleryView: IMAGE_CATEGORIES.includes(imageDTO.image_category) ? 'images' : 'assets', }, }) ); }); - } else { - flushSync(() => { - dispatch( - boardIdSelected({ - boardId: itemDTO.board_id ?? 'none', - select: { - selection: [{ type: 'video', id: itemDTO.video_id }], - galleryView: 'videos', - }, - }) - ); - }); - } - }, [dispatch, galleryPanel, itemDTO]); + }, [dispatch, galleryPanel, imageDTO]); return ( } onClickCapture={onClick} isDisabled={!isGalleryImage}> diff --git a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemMetadataRecallActionsCanvasGenerateTabs.tsx b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemMetadataRecallActionsCanvasGenerateTabs.tsx index b4c30ce3dc7..1965b2d698f 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemMetadataRecallActionsCanvasGenerateTabs.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemMetadataRecallActionsCanvasGenerateTabs.tsx @@ -1,6 +1,6 @@ import { Menu, MenuButton, MenuItem, MenuList } from '@invoke-ai/ui-library'; import { SubMenuButtonContent, useSubMenu } from 'common/hooks/useSubMenu'; -import { useItemDTOContext } from 'features/gallery/contexts/ItemDTOContext'; +import { useImageDTOContext } from 'features/gallery/contexts/ImageDTOContext'; import { useRecallAll } from 'features/gallery/hooks/useRecallAllImageMetadata'; import { useRecallCLIPSkip } from 'features/gallery/hooks/useRecallCLIPSkip'; import { useRecallDimensions } from 'features/gallery/hooks/useRecallDimensions'; @@ -17,21 +17,19 @@ import { PiQuotesBold, PiRulerBold, } from 'react-icons/pi'; -import type { ImageDTO } from 'services/api/types'; export const ContextMenuItemMetadataRecallActionsCanvasGenerateTabs = memo(() => { const { t } = useTranslation(); const subMenu = useSubMenu(); - const itemDTO = useItemDTOContext(); + const imageDTO = useImageDTOContext(); - // TODO: Implement video recall metadata actions - const recallAll = useRecallAll(itemDTO as ImageDTO); - const recallRemix = useRecallRemix(itemDTO as ImageDTO); - const recallPrompts = useRecallPrompts(itemDTO as ImageDTO); - const recallSeed = useRecallSeed(itemDTO as ImageDTO); - const recallDimensions = useRecallDimensions(itemDTO as ImageDTO); - const recallCLIPSkip = useRecallCLIPSkip(itemDTO as ImageDTO); + const recallAll = useRecallAll(imageDTO ); + const recallRemix = useRecallRemix(imageDTO ); + const recallPrompts = useRecallPrompts(imageDTO ); + const recallSeed = useRecallSeed(imageDTO ); + const recallDimensions = useRecallDimensions(imageDTO ); + const recallCLIPSkip = useRecallCLIPSkip(imageDTO ); return ( }> diff --git a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemMetadataRecallActionsUpscaleTab.tsx b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemMetadataRecallActionsUpscaleTab.tsx index d3511e29e0b..d4aa0a4296b 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemMetadataRecallActionsUpscaleTab.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemMetadataRecallActionsUpscaleTab.tsx @@ -1,22 +1,20 @@ import { Menu, MenuButton, MenuItem, MenuList } from '@invoke-ai/ui-library'; import { SubMenuButtonContent, useSubMenu } from 'common/hooks/useSubMenu'; -import { useItemDTOContext } from 'features/gallery/contexts/ItemDTOContext'; +import { useImageDTOContext } from 'features/gallery/contexts/ImageDTOContext'; import { useRecallPrompts } from 'features/gallery/hooks/useRecallPrompts'; import { useRecallSeed } from 'features/gallery/hooks/useRecallSeed'; import { memo } from 'react'; import { useTranslation } from 'react-i18next'; import { PiArrowBendUpLeftBold, PiPlantBold, PiQuotesBold } from 'react-icons/pi'; -import type { ImageDTO } from 'services/api/types'; export const ContextMenuItemMetadataRecallActionsUpscaleTab = memo(() => { const { t } = useTranslation(); const subMenu = useSubMenu(); - const itemDTO = useItemDTOContext(); + const imageDTO = useImageDTOContext(); - // TODO: Implement video recall metadata actions - const recallPrompts = useRecallPrompts(itemDTO as ImageDTO); - const recallSeed = useRecallSeed(itemDTO as ImageDTO); + const recallPrompts = useRecallPrompts(imageDTO); + const recallSeed = useRecallSeed(imageDTO); return ( }> diff --git a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemNewCanvasFromImageSubMenu.tsx b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemNewCanvasFromImageSubMenu.tsx index b40525ae474..4aa8f9bb3e4 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemNewCanvasFromImageSubMenu.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemNewCanvasFromImageSubMenu.tsx @@ -3,7 +3,7 @@ import { useAppStore } from 'app/store/storeHooks'; import { SubMenuButtonContent, useSubMenu } from 'common/hooks/useSubMenu'; import { useCanvasIsBusySafe } from 'features/controlLayers/hooks/useCanvasIsBusy'; import { useCanvasIsStaging } from 'features/controlLayers/store/canvasStagingAreaSlice'; -import { useItemDTOContextImageOnly } from 'features/gallery/contexts/ItemDTOContext'; +import { useImageDTOContext } from 'features/gallery/contexts/ImageDTOContext'; import { newCanvasFromImage } from 'features/imageActions/actions'; import { toast } from 'features/toast/toast'; import { navigationApi } from 'features/ui/layouts/navigation-api'; @@ -16,7 +16,7 @@ export const ContextMenuItemNewCanvasFromImageSubMenu = memo(() => { const { t } = useTranslation(); const subMenu = useSubMenu(); const store = useAppStore(); - const imageDTO = useItemDTOContextImageOnly(); + const imageDTO = useImageDTOContext(); const isBusy = useCanvasIsBusySafe(); const isStaging = useCanvasIsStaging(); diff --git a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemNewLayerFromImageSubMenu.tsx b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemNewLayerFromImageSubMenu.tsx index 710a381d937..0bc680c7fee 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemNewLayerFromImageSubMenu.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemNewLayerFromImageSubMenu.tsx @@ -3,8 +3,7 @@ import { useAppStore } from 'app/store/storeHooks'; import { SubMenuButtonContent, useSubMenu } from 'common/hooks/useSubMenu'; import { NewLayerIcon } from 'features/controlLayers/components/common/icons'; import { useCanvasIsBusySafe } from 'features/controlLayers/hooks/useCanvasIsBusy'; -import { useItemDTOContextImageOnly } from 'features/gallery/contexts/ItemDTOContext'; -import { sentImageToCanvas } from 'features/gallery/store/actions'; +import { useImageDTOContext } from 'features/gallery/contexts/ImageDTOContext'; import { createNewCanvasEntityFromImage } from 'features/imageActions/actions'; import { toast } from 'features/toast/toast'; import { navigationApi } from 'features/ui/layouts/navigation-api'; @@ -17,14 +16,13 @@ export const ContextMenuItemNewLayerFromImageSubMenu = memo(() => { const { t } = useTranslation(); const subMenu = useSubMenu(); const store = useAppStore(); - const imageDTO = useItemDTOContextImageOnly(); + const imageDTO = useImageDTOContext(); const isBusy = useCanvasIsBusySafe(); const onClickNewRasterLayerFromImage = useCallback(async () => { const { dispatch, getState } = store; await navigationApi.focusPanel('canvas', WORKSPACE_PANEL_ID); createNewCanvasEntityFromImage({ imageDTO, type: 'raster_layer', dispatch, getState }); - dispatch(sentImageToCanvas()); toast({ id: 'SENT_TO_CANVAS', title: t('toast.sentToCanvas'), @@ -36,7 +34,6 @@ export const ContextMenuItemNewLayerFromImageSubMenu = memo(() => { const { dispatch, getState } = store; await navigationApi.focusPanel('canvas', WORKSPACE_PANEL_ID); createNewCanvasEntityFromImage({ imageDTO, type: 'control_layer', dispatch, getState }); - dispatch(sentImageToCanvas()); toast({ id: 'SENT_TO_CANVAS', title: t('toast.sentToCanvas'), @@ -48,7 +45,6 @@ export const ContextMenuItemNewLayerFromImageSubMenu = memo(() => { const { dispatch, getState } = store; await navigationApi.focusPanel('canvas', WORKSPACE_PANEL_ID); createNewCanvasEntityFromImage({ imageDTO, type: 'inpaint_mask', dispatch, getState }); - dispatch(sentImageToCanvas()); toast({ id: 'SENT_TO_CANVAS', title: t('toast.sentToCanvas'), @@ -60,7 +56,6 @@ export const ContextMenuItemNewLayerFromImageSubMenu = memo(() => { const { dispatch, getState } = store; await navigationApi.focusPanel('canvas', WORKSPACE_PANEL_ID); createNewCanvasEntityFromImage({ imageDTO, type: 'regional_guidance', dispatch, getState }); - dispatch(sentImageToCanvas()); toast({ id: 'SENT_TO_CANVAS', title: t('toast.sentToCanvas'), @@ -72,7 +67,6 @@ export const ContextMenuItemNewLayerFromImageSubMenu = memo(() => { const { dispatch, getState } = store; await navigationApi.focusPanel('canvas', WORKSPACE_PANEL_ID); createNewCanvasEntityFromImage({ imageDTO, type: 'regional_guidance_with_reference_image', dispatch, getState }); - dispatch(sentImageToCanvas()); toast({ id: 'SENT_TO_CANVAS', title: t('toast.sentToCanvas'), diff --git a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemOpenInNewTab.tsx b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemOpenInNewTab.tsx index e2b9f42363b..80e99fbfb7a 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemOpenInNewTab.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemOpenInNewTab.tsx @@ -1,24 +1,15 @@ -import { useAppDispatch } from 'app/store/storeHooks'; import { IconMenuItem } from 'common/components/IconMenuItem'; -import { useItemDTOContext } from 'features/gallery/contexts/ItemDTOContext'; -import { imageOpenedInNewTab } from 'features/gallery/store/actions'; +import { useImageDTOContext } from 'features/gallery/contexts/ImageDTOContext'; import { memo, useCallback } from 'react'; import { useTranslation } from 'react-i18next'; import { PiArrowSquareOutBold } from 'react-icons/pi'; -import { isImageDTO } from 'services/api/types'; export const ContextMenuItemOpenInNewTab = memo(() => { const { t } = useTranslation(); - const itemDTO = useItemDTOContext(); - const dispatch = useAppDispatch(); + const imageDTO = useImageDTOContext(); const onClick = useCallback(() => { - if (isImageDTO(itemDTO)) { - window.open(itemDTO.image_url, '_blank'); - dispatch(imageOpenedInNewTab()); - } else { - window.open(itemDTO.video_url, '_blank'); - } - }, [itemDTO, dispatch]); + window.open(imageDTO.image_url, '_blank'); + }, [imageDTO]); return ( { const dispatch = useAppDispatch(); const { t } = useTranslation(); - const itemDTO = useItemDTOContext(); + const imageDTO = useImageDTOContext(); const onClick = useCallback(() => { - if (isImageDTO(itemDTO)) { - dispatch(imageToCompareChanged(null)); - dispatch(itemSelected({ type: 'image', id: itemDTO.image_name })); - navigationApi.focusPanelInActiveTab(VIEWER_PANEL_ID); - } else { - // TODO: Implement video open in viewer - } - }, [dispatch, itemDTO]); + dispatch(imageToCompareChanged(null)); + dispatch(itemSelected({ type: 'image', id: imageDTO.image_name })); + navigationApi.focusPanelInActiveTab(VIEWER_PANEL_ID); + }, [dispatch, imageDTO]); return ( { const { t } = useTranslation(); const dispatch = useAppDispatch(); - const itemDTO = useItemDTOContext(); + const imageDTO = useImageDTOContext(); const selectMaySelectForCompare = useMemo( () => createSelector(selectGallerySlice, (gallery) => { - if (isImageDTO(itemDTO)) { - return gallery.imageToCompare !== itemDTO.image_name; - } - return false; + return gallery.imageToCompare !== imageDTO.image_name; }), - [itemDTO] + [imageDTO] ); const maySelectForCompare = useAppSelector(selectMaySelectForCompare); const onClick = useCallback(() => { - if (isImageDTO(itemDTO)) { - dispatch(imageToCompareChanged(itemDTO.image_name)); - } else { - // TODO: Implement video select for compare - } - }, [dispatch, itemDTO]); + dispatch(imageToCompareChanged(imageDTO.image_name)); + }, [dispatch, imageDTO]); return ( { const { t } = useTranslation(); const dispatch = useAppDispatch(); - const imageDTO = useItemDTOContextImageOnly(); + const imageDTO = useImageDTOContext(); const handleSendToCanvas = useCallback(() => { dispatch(upscaleInitialImageChanged(imageDTOToImageWithDims(imageDTO))); diff --git a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemSendToVideo.tsx b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemSendToVideo.tsx deleted file mode 100644 index b59f0addd7d..00000000000 --- a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemSendToVideo.tsx +++ /dev/null @@ -1,28 +0,0 @@ -import { MenuItem } from '@invoke-ai/ui-library'; -import { imageDTOToCroppableImage } from 'features/controlLayers/store/util'; -import { useItemDTOContextImageOnly } from 'features/gallery/contexts/ItemDTOContext'; -import { startingFrameImageChanged } from 'features/parameters/store/videoSlice'; -import { navigationApi } from 'features/ui/layouts/navigation-api'; -import { memo, useCallback } from 'react'; -import { useTranslation } from 'react-i18next'; -import { PiVideoBold } from 'react-icons/pi'; -import { useDispatch } from 'react-redux'; - -export const ContextMenuItemSendToVideo = memo(() => { - const { t } = useTranslation(); - const imageDTO = useItemDTOContextImageOnly(); - const dispatch = useDispatch(); - - const onClick = useCallback(() => { - dispatch(startingFrameImageChanged(imageDTOToCroppableImage(imageDTO))); - navigationApi.switchToTab('video'); - }, [imageDTO, dispatch]); - - return ( - } onClickCapture={onClick} aria-label={t('parameters.sendToVideo')}> - {t('parameters.sendToVideo')} - - ); -}); - -ContextMenuItemSendToVideo.displayName = 'ContextMenuItemSendToVideo'; diff --git a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemStarUnstar.tsx b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemStarUnstar.tsx index 4828fb5e9ce..b6e465db463 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemStarUnstar.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemStarUnstar.tsx @@ -1,50 +1,35 @@ import { MenuItem } from '@invoke-ai/ui-library'; -import { useStore } from '@nanostores/react'; -import { $customStarUI } from 'app/store/nanostores/customStarUI'; -import { useItemDTOContext } from 'features/gallery/contexts/ItemDTOContext'; +import { useImageDTOContext } from 'features/gallery/contexts/ImageDTOContext'; import { memo, useCallback } from 'react'; import { useTranslation } from 'react-i18next'; import { PiStarBold, PiStarFill } from 'react-icons/pi'; import { useStarImagesMutation, useUnstarImagesMutation } from 'services/api/endpoints/images'; -import { useStarVideosMutation, useUnstarVideosMutation } from 'services/api/endpoints/videos'; -import { isImageDTO, isVideoDTO } from 'services/api/types'; export const ContextMenuItemStarUnstar = memo(() => { const { t } = useTranslation(); - const itemDTO = useItemDTOContext(); - const customStarUi = useStore($customStarUI); + const imageDTO = useImageDTOContext(); const [starImages] = useStarImagesMutation(); const [unstarImages] = useUnstarImagesMutation(); - const [starVideos] = useStarVideosMutation(); - const [unstarVideos] = useUnstarVideosMutation(); const starImage = useCallback(() => { - if (isImageDTO(itemDTO)) { - starImages({ image_names: [itemDTO.image_name] }); - } else if (isVideoDTO(itemDTO)) { - starVideos({ video_ids: [itemDTO.video_id] }); - } - }, [starImages, itemDTO, starVideos]); + starImages({ image_names: [imageDTO.image_name] }); + }, [starImages, imageDTO]); const unstarImage = useCallback(() => { - if (isImageDTO(itemDTO)) { - unstarImages({ image_names: [itemDTO.image_name] }); - } else if (isVideoDTO(itemDTO)) { - unstarVideos({ video_ids: [itemDTO.video_id] }); - } - }, [unstarImages, itemDTO, unstarVideos]); + unstarImages({ image_names: [imageDTO.image_name] }); + }, [unstarImages, imageDTO]); - if (itemDTO.starred) { + if (imageDTO.starred) { return ( - } onClickCapture={unstarImage}> - {customStarUi ? customStarUi.off.text : t('gallery.unstarImage')} + } onClickCapture={unstarImage}> + {t('gallery.unstarImage')} ); } return ( - } onClickCapture={starImage}> - {customStarUi ? customStarUi.on.text : t('gallery.starImage')} + } onClickCapture={starImage}> + {t('gallery.starImage')} ); }); diff --git a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemUseAsPromptTemplate.tsx b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemUseAsPromptTemplate.tsx index ae526243fe5..188be4c307d 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemUseAsPromptTemplate.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemUseAsPromptTemplate.tsx @@ -1,5 +1,5 @@ import { MenuItem } from '@invoke-ai/ui-library'; -import { useItemDTOContextImageOnly } from 'features/gallery/contexts/ItemDTOContext'; +import { useImageDTOContext } from 'features/gallery/contexts/ImageDTOContext'; import { useCreateStylePresetFromMetadata } from 'features/gallery/hooks/useCreateStylePresetFromMetadata'; import { memo } from 'react'; import { useTranslation } from 'react-i18next'; @@ -7,7 +7,7 @@ import { PiPaintBrushBold } from 'react-icons/pi'; export const ContextMenuItemUseAsPromptTemplate = memo(() => { const { t } = useTranslation(); - const imageDTO = useItemDTOContextImageOnly(); + const imageDTO = useImageDTOContext(); const stylePreset = useCreateStylePresetFromMetadata(imageDTO); diff --git a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemUseAsRefImage.tsx b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemUseAsRefImage.tsx index 41505ae81d5..918d2850342 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemUseAsRefImage.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemUseAsRefImage.tsx @@ -3,7 +3,7 @@ import { useAppStore } from 'app/store/storeHooks'; import { getDefaultRefImageConfig } from 'features/controlLayers/hooks/addLayerHooks'; import { refImageAdded } from 'features/controlLayers/store/refImagesSlice'; import { imageDTOToCroppableImage } from 'features/controlLayers/store/util'; -import { useItemDTOContextImageOnly } from 'features/gallery/contexts/ItemDTOContext'; +import { useImageDTOContext } from 'features/gallery/contexts/ImageDTOContext'; import { toast } from 'features/toast/toast'; import { memo, useCallback } from 'react'; import { useTranslation } from 'react-i18next'; @@ -12,7 +12,7 @@ import { PiImageBold } from 'react-icons/pi'; export const ContextMenuItemUseAsRefImage = memo(() => { const { t } = useTranslation(); const store = useAppStore(); - const imageDTO = useItemDTOContextImageOnly(); + const imageDTO = useImageDTOContext(); const onClickNewGlobalReferenceImageFromImage = useCallback(() => { const { dispatch, getState } = store; diff --git a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemUseForPromptGeneration.tsx b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemUseForPromptGeneration.tsx deleted file mode 100644 index 12cbb22f9c9..00000000000 --- a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemUseForPromptGeneration.tsx +++ /dev/null @@ -1,46 +0,0 @@ -import { MenuItem } from '@invoke-ai/ui-library'; -import { useStore } from '@nanostores/react'; -import { useAppSelector, useAppStore } from 'app/store/storeHooks'; -import { useItemDTOContextImageOnly } from 'features/gallery/contexts/ItemDTOContext'; -import { expandPrompt } from 'features/prompt/PromptExpansion/expand'; -import { promptExpansionApi } from 'features/prompt/PromptExpansion/state'; -import { selectAllowPromptExpansion } from 'features/system/store/configSlice'; -import { toast } from 'features/toast/toast'; -import { memo, useCallback } from 'react'; -import { useTranslation } from 'react-i18next'; -import { PiTextTBold } from 'react-icons/pi'; - -export const ContextMenuItemUseForPromptGeneration = memo(() => { - const { t } = useTranslation(); - const { dispatch, getState } = useAppStore(); - const imageDTO = useItemDTOContextImageOnly(); - const { isPending } = useStore(promptExpansionApi.$state); - const isPromptExpansionEnabled = useAppSelector(selectAllowPromptExpansion); - - const handleUseForPromptGeneration = useCallback(() => { - promptExpansionApi.setPending(imageDTO); - expandPrompt({ dispatch, getState, imageDTO }); - toast({ - id: 'PROMPT_GENERATION_STARTED', - title: t('toast.promptGenerationStarted'), - status: 'info', - }); - }, [dispatch, getState, imageDTO, t]); - - if (!isPromptExpansionEnabled) { - return null; - } - - return ( - } - onClickCapture={handleUseForPromptGeneration} - id="use-for-prompt-generation" - isDisabled={isPending} - > - {t('gallery.useForPromptGeneration')} - - ); -}); - -ContextMenuItemUseForPromptGeneration.displayName = 'ContextMenuItemUseForPromptGeneration'; diff --git a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MultipleSelectionMenuItems.tsx b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MultipleSelectionMenuItems.tsx index ee21261cb31..0e086ad5e4e 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MultipleSelectionMenuItems.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MultipleSelectionMenuItems.tsx @@ -1,10 +1,7 @@ import { MenuDivider, MenuItem } from '@invoke-ai/ui-library'; -import { useStore } from '@nanostores/react'; -import { $customStarUI } from 'app/store/nanostores/customStarUI'; import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; import { imagesToChangeSelected, isModalOpenChanged } from 'features/changeBoardModal/store/slice'; import { useDeleteImageModalApi } from 'features/deleteImageModal/store/state'; -import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus'; import { memo, useCallback } from 'react'; import { useTranslation } from 'react-i18next'; import { PiDownloadSimpleBold, PiFoldersBold, PiStarBold, PiStarFill, PiTrashSimpleBold } from 'react-icons/pi'; @@ -18,11 +15,8 @@ const MultipleSelectionMenuItems = () => { const { t } = useTranslation(); const dispatch = useAppDispatch(); const selection = useAppSelector((s) => s.gallery.selection); - const customStarUi = useStore($customStarUI); const deleteImageModal = useDeleteImageModalApi(); - const isBulkDownloadEnabled = useFeatureStatus('bulkDownload'); - const [starImages] = useStarImagesMutation(); const [unstarImages] = useUnstarImagesMutation(); const [bulkDownload] = useBulkDownloadImagesMutation(); @@ -50,17 +44,15 @@ const MultipleSelectionMenuItems = () => { return ( <> - } onClickCapture={handleUnstarSelection}> - {customStarUi ? customStarUi.off.text : `Unstar All`} + } onClickCapture={handleUnstarSelection}> + Unstar All + + } onClickCapture={handleStarSelection}> + Star All - } onClickCapture={handleStarSelection}> - {customStarUi ? customStarUi.on.text : `Star All`} + } onClickCapture={handleBulkDownload}> + {t('gallery.downloadSelection')} - {isBulkDownloadEnabled && ( - } onClickCapture={handleBulkDownload}> - {t('gallery.downloadSelection')} - - )} } onClickCapture={handleChangeBoard}> {t('boards.changeBoard')} diff --git a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MultipleSelectionVideoMenuItems.tsx b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MultipleSelectionVideoMenuItems.tsx deleted file mode 100644 index 47edf37d3ff..00000000000 --- a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MultipleSelectionVideoMenuItems.tsx +++ /dev/null @@ -1,58 +0,0 @@ -import { MenuDivider, MenuItem } from '@invoke-ai/ui-library'; -import { useStore } from '@nanostores/react'; -import { $customStarUI } from 'app/store/nanostores/customStarUI'; -import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; -import { isModalOpenChanged, videosToChangeSelected } from 'features/changeBoardModal/store/slice'; -import { memo, useCallback } from 'react'; -import { useTranslation } from 'react-i18next'; -import { PiFoldersBold, PiStarBold, PiStarFill, PiTrashSimpleBold } from 'react-icons/pi'; -import { useDeleteVideosMutation, useStarVideosMutation, useUnstarVideosMutation } from 'services/api/endpoints/videos'; - -const MultipleSelectionMenuItems = () => { - const { t } = useTranslation(); - const dispatch = useAppDispatch(); - const selection = useAppSelector((s) => s.gallery.selection); - const customStarUi = useStore($customStarUI); - - const [starVideos] = useStarVideosMutation(); - const [unstarVideos] = useUnstarVideosMutation(); - const [deleteVideos] = useDeleteVideosMutation(); - - const handleChangeBoard = useCallback(() => { - dispatch(videosToChangeSelected(selection.map((s) => s.id))); - dispatch(isModalOpenChanged(true)); - }, [dispatch, selection]); - - const handleDeleteSelection = useCallback(() => { - // TODO: Add confirm on delete and video usage functionality - deleteVideos({ video_ids: selection.map((s) => s.id) }); - }, [deleteVideos, selection]); - - const handleStarSelection = useCallback(() => { - starVideos({ video_ids: selection.map((s) => s.id) }); - }, [starVideos, selection]); - - const handleUnstarSelection = useCallback(() => { - unstarVideos({ video_ids: selection.map((s) => s.id) }); - }, [unstarVideos, selection]); - - return ( - <> - } onClickCapture={handleUnstarSelection}> - {customStarUi ? customStarUi.off.text : `Unstar All`} - - } onClickCapture={handleStarSelection}> - {customStarUi ? customStarUi.on.text : `Star All`} - - } onClickCapture={handleChangeBoard}> - {t('boards.changeBoard')} - - - } onClickCapture={handleDeleteSelection}> - {t('gallery.deleteSelection')} - - - ); -}; - -export default memo(MultipleSelectionMenuItems); diff --git a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/SingleSelectionMenuItems.tsx b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/SingleSelectionMenuItems.tsx index 3556e1a9d48..9d64395a8c5 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/SingleSelectionMenuItems.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/SingleSelectionMenuItems.tsx @@ -13,13 +13,10 @@ import { ContextMenuItemOpenInNewTab } from 'features/gallery/components/Context import { ContextMenuItemOpenInViewer } from 'features/gallery/components/ContextMenu/MenuItems/ContextMenuItemOpenInViewer'; import { ContextMenuItemSelectForCompare } from 'features/gallery/components/ContextMenu/MenuItems/ContextMenuItemSelectForCompare'; import { ContextMenuItemSendToUpscale } from 'features/gallery/components/ContextMenu/MenuItems/ContextMenuItemSendToUpscale'; -import { ContextMenuItemSendToVideo } from 'features/gallery/components/ContextMenu/MenuItems/ContextMenuItemSendToVideo'; import { ContextMenuItemStarUnstar } from 'features/gallery/components/ContextMenu/MenuItems/ContextMenuItemStarUnstar'; import { ContextMenuItemUseAsPromptTemplate } from 'features/gallery/components/ContextMenu/MenuItems/ContextMenuItemUseAsPromptTemplate'; import { ContextMenuItemUseAsRefImage } from 'features/gallery/components/ContextMenu/MenuItems/ContextMenuItemUseAsRefImage'; -import { ContextMenuItemUseForPromptGeneration } from 'features/gallery/components/ContextMenu/MenuItems/ContextMenuItemUseForPromptGeneration'; -import { ItemDTOContextProvider } from 'features/gallery/contexts/ItemDTOContext'; -import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus'; +import { ImageDTOContextProvider } from 'features/gallery/contexts/ImageDTOContext'; import { selectActiveTab } from 'features/ui/store/uiSelectors'; import type { ImageDTO } from 'services/api/types'; @@ -32,10 +29,9 @@ type SingleSelectionMenuItemsProps = { const SingleSelectionMenuItems = ({ imageDTO }: SingleSelectionMenuItemsProps) => { const tab = useAppSelector(selectActiveTab); - const isVideoEnabled = useFeatureStatus('video'); return ( - + @@ -50,8 +46,6 @@ const SingleSelectionMenuItems = ({ imageDTO }: SingleSelectionMenuItemsProps) = {tab === 'upscaling' && } - {isVideoEnabled && } - {(tab === 'canvas' || tab === 'generate') && } @@ -64,7 +58,7 @@ const SingleSelectionMenuItems = ({ imageDTO }: SingleSelectionMenuItemsProps) = // Only render this button on tabs with a gallery. )} - + ); }; diff --git a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/SingleSelectionVideoMenuItems.tsx b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/SingleSelectionVideoMenuItems.tsx deleted file mode 100644 index d91fe886560..00000000000 --- a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/SingleSelectionVideoMenuItems.tsx +++ /dev/null @@ -1,33 +0,0 @@ -import { MenuDivider } from '@invoke-ai/ui-library'; -import { IconMenuItemGroup } from 'common/components/IconMenuItem'; -import { ContextMenuItemChangeBoard } from 'features/gallery/components/ContextMenu/MenuItems/ContextMenuItemChangeBoard'; -import { ContextMenuItemDownload } from 'features/gallery/components/ContextMenu/MenuItems/ContextMenuItemDownload'; -import { ContextMenuItemOpenInNewTab } from 'features/gallery/components/ContextMenu/MenuItems/ContextMenuItemOpenInNewTab'; -import { ContextMenuItemOpenInViewer } from 'features/gallery/components/ContextMenu/MenuItems/ContextMenuItemOpenInViewer'; -import { ItemDTOContextProvider } from 'features/gallery/contexts/ItemDTOContext'; -import type { VideoDTO } from 'services/api/types'; - -import { ContextMenuItemDeleteVideo } from './MenuItems/ContextMenuItemDeleteVideo'; -import { ContextMenuItemStarUnstar } from './MenuItems/ContextMenuItemStarUnstar'; - -type SingleSelectionVideoMenuItemsProps = { - videoDTO: VideoDTO; -}; - -const SingleSelectionVideoMenuItems = ({ videoDTO }: SingleSelectionVideoMenuItemsProps) => { - return ( - - - - - - - - - - - - ); -}; - -export default SingleSelectionVideoMenuItems; diff --git a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/VideoContextMenu.tsx b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/VideoContextMenu.tsx deleted file mode 100644 index 533a3d38b9a..00000000000 --- a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/VideoContextMenu.tsx +++ /dev/null @@ -1,279 +0,0 @@ -import type { ChakraProps } from '@invoke-ai/ui-library'; -import { Menu, MenuButton, MenuList, Portal, useGlobalMenuClose } from '@invoke-ai/ui-library'; -import { useStore } from '@nanostores/react'; -import { useAppSelector } from 'app/store/storeHooks'; -import { useAssertSingleton } from 'common/hooks/useAssertSingleton'; -import MultipleSelectionVideoMenuItems from 'features/gallery/components/ContextMenu/MultipleSelectionVideoMenuItems'; -import SingleSelectionVideoMenuItems from 'features/gallery/components/ContextMenu/SingleSelectionVideoMenuItems'; -import { selectSelectionCount } from 'features/gallery/store/gallerySelectors'; -import { map } from 'nanostores'; -import type { RefObject } from 'react'; -import { memo, useCallback, useEffect, useRef } from 'react'; -import type { VideoDTO } from 'services/api/types'; - -/** - * The delay in milliseconds before the context menu opens on long press. - */ -const LONGPRESS_DELAY_MS = 500; -/** - * The threshold in pixels that the pointer must move before the long press is cancelled. - */ -const LONGPRESS_MOVE_THRESHOLD_PX = 10; - -/** - * The singleton state of the context menu. - */ -const $videoContextMenuState = map<{ - isOpen: boolean; - videoDTO: VideoDTO | null; - position: { x: number; y: number }; -}>({ - isOpen: false, - videoDTO: null, - position: { x: -1, y: -1 }, -}); - -/** - * Convenience function to close the context menu. - */ -const onClose = () => { - $videoContextMenuState.setKey('isOpen', false); -}; - -/** - * Map of elements to image DTOs. This is used to determine which image DTO to show the context menu for, depending on - * the target of the context menu or long press event. - */ -const elToVideoMap = new Map(); - -/** - * Given a target node, find the first registered parent element that contains the target node and return the imageDTO - * associated with it. - */ -const getVideoDTOFromMap = (target: Node): VideoDTO | undefined => { - const entry = Array.from(elToVideoMap.entries()).find((entry) => entry[0].contains(target)); - return entry?.[1]; -}; - -/** - * Register a context menu for an image DTO on a target element. - * @param imageDTO The image DTO to register the context menu for. - * @param targetRef The ref of the target element that should trigger the context menu. - */ -export const useVideoContextMenu = (videoDTO: VideoDTO, ref: RefObject | (HTMLElement | null)) => { - useEffect(() => { - if (ref === null) { - return; - } - const el = ref instanceof HTMLElement ? ref : ref.current; - if (!el) { - return; - } - elToVideoMap.set(el, videoDTO); - return () => { - elToVideoMap.delete(el); - }; - }, [videoDTO, ref]); -}; - -/** - * Singleton component that renders the context menu for images. - */ -export const VideoContextMenu = memo(() => { - useAssertSingleton('VideoContextMenu'); - const state = useStore($videoContextMenuState); - useGlobalMenuClose(onClose); - - return ( - - - - - - - - ); -}); - -VideoContextMenu.displayName = 'VideoContextMenu'; - -const _hover: ChakraProps['_hover'] = { bg: 'transparent' }; - -/** - * A logical component that listens for context menu events and opens the context menu. It's separate from - * ImageContextMenu component to avoid re-rendering the whole context menu on every context menu event. - */ -const VideoContextMenuEventLogical = memo(() => { - const lastPositionRef = useRef<{ x: number; y: number }>({ x: -1, y: -1 }); - const longPressTimeoutRef = useRef(0); - const animationTimeoutRef = useRef(0); - - const onContextMenu = useCallback((e: MouseEvent | PointerEvent) => { - if (e.shiftKey) { - // This is a shift + right click event, which should open the native context menu - onClose(); - return; - } - - const videoDTO = getVideoDTOFromMap(e.target as Node); - - if (!videoDTO) { - // Can't find the image DTO, close the context menu - onClose(); - return; - } - - // clear pending delayed open - window.clearTimeout(animationTimeoutRef.current); - e.preventDefault(); - - if (lastPositionRef.current.x !== e.pageX || lastPositionRef.current.y !== e.pageY) { - // if the mouse moved, we need to close, wait for animation and reopen the menu at the new position - if ($videoContextMenuState.get().isOpen) { - onClose(); - } - animationTimeoutRef.current = window.setTimeout(() => { - // Open the menu after the animation with the new state - $videoContextMenuState.set({ - isOpen: true, - position: { x: e.pageX, y: e.pageY }, - videoDTO, - }); - }, 100); - } else { - // else we can just open the menu at the current position w/ new state - $videoContextMenuState.set({ - isOpen: true, - position: { x: e.pageX, y: e.pageY }, - videoDTO, - }); - } - - // Always sync the last position - lastPositionRef.current = { x: e.pageX, y: e.pageY }; - }, []); - - // Use a long press to open the context menu on touch devices - const onPointerDown = useCallback( - (e: PointerEvent) => { - if (e.pointerType === 'mouse') { - // Bail out if it's a mouse event - this is for touch/pen only - return; - } - - longPressTimeoutRef.current = window.setTimeout(() => { - onContextMenu(e); - }, LONGPRESS_DELAY_MS); - - lastPositionRef.current = { x: e.pageX, y: e.pageY }; - }, - [onContextMenu] - ); - - const onPointerMove = useCallback((e: PointerEvent) => { - if (e.pointerType === 'mouse') { - // Bail out if it's a mouse event - this is for touch/pen only - return; - } - if (longPressTimeoutRef.current === null) { - return; - } - - // If the pointer has moved more than the threshold, cancel the long press - const lastPosition = lastPositionRef.current; - - const distanceFromLastPosition = Math.hypot(e.pageX - lastPosition.x, e.pageY - lastPosition.y); - - if (distanceFromLastPosition > LONGPRESS_MOVE_THRESHOLD_PX) { - clearTimeout(longPressTimeoutRef.current); - } - }, []); - - const onPointerUp = useCallback((e: PointerEvent) => { - if (e.pointerType === 'mouse') { - // Bail out if it's a mouse event - this is for touch/pen only - return; - } - if (longPressTimeoutRef.current) { - clearTimeout(longPressTimeoutRef.current); - } - }, []); - - const onPointerCancel = useCallback((e: PointerEvent) => { - if (e.pointerType === 'mouse') { - // Bail out if it's a mouse event - this is for touch/pen only - return; - } - if (longPressTimeoutRef.current) { - clearTimeout(longPressTimeoutRef.current); - } - }, []); - - useEffect(() => { - const controller = new AbortController(); - - // Context menu events - window.addEventListener('contextmenu', onContextMenu, { signal: controller.signal }); - - // Long press events - window.addEventListener('pointerdown', onPointerDown, { signal: controller.signal }); - window.addEventListener('pointerup', onPointerUp, { signal: controller.signal }); - window.addEventListener('pointercancel', onPointerCancel, { signal: controller.signal }); - window.addEventListener('pointermove', onPointerMove, { signal: controller.signal }); - - return () => { - controller.abort(); - }; - }, [onContextMenu, onPointerCancel, onPointerDown, onPointerMove, onPointerUp]); - - useEffect( - () => () => { - // Clean up any timeouts when we unmount - window.clearTimeout(animationTimeoutRef.current); - window.clearTimeout(longPressTimeoutRef.current); - }, - [] - ); - - return null; -}); - -VideoContextMenuEventLogical.displayName = 'VideoContextMenuEventLogical'; - -// The content of the context menu, which changes based on the selection count. Split out and memoized to avoid -// re-rendering the whole context menu too often. -const MenuContent = memo(() => { - const selectionCount = useAppSelector(selectSelectionCount); - const state = useStore($videoContextMenuState); - - if (!state.videoDTO) { - return null; - } - - if (selectionCount > 1) { - return ( - - - - ); - } - - return ( - - - - ); -}); - -MenuContent.displayName = 'MenuContent'; diff --git a/invokeai/frontend/web/src/features/gallery/components/Gallery.tsx b/invokeai/frontend/web/src/features/gallery/components/Gallery.tsx index 11291bd5c72..3864094604d 100644 --- a/invokeai/frontend/web/src/features/gallery/components/Gallery.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/Gallery.tsx @@ -6,7 +6,6 @@ import { useDisclosure } from 'common/hooks/useBoolean'; import { useGallerySearchTerm } from 'features/gallery/components/ImageGrid/useGallerySearchTerm'; import { selectSelectedBoardId } from 'features/gallery/store/gallerySelectors'; import { galleryViewChanged, selectGallerySlice } from 'features/gallery/store/gallerySlice'; -import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus'; import { useAutoLayoutContext } from 'features/ui/layouts/auto-layout-context'; import { useGalleryPanel } from 'features/ui/layouts/use-gallery-panel'; import type { CSSProperties } from 'react'; @@ -19,7 +18,6 @@ import { GallerySettingsPopover } from './GallerySettingsPopover/GallerySettings import { GalleryUploadButton } from './GalleryUploadButton'; import { GallerySearch } from './ImageGrid/GallerySearch'; import { ImageGallery } from './NewGallery'; -import { VideoGallery } from './VideoGallery'; const COLLAPSE_STYLES: CSSProperties = { flexShrink: 0, minHeight: 0, width: '100%' }; @@ -44,10 +42,6 @@ export const GalleryPanel = memo(() => { dispatch(galleryViewChanged('assets')); }, [dispatch]); - const handleClickVideos = useCallback(() => { - dispatch(galleryViewChanged('videos')); - }, [dispatch]); - const handleClickSearch = useCallback(() => { onResetSearchTerm(); if (!searchDisclosure.isOpen && galleryPanel.$isCollapsed.get()) { @@ -58,7 +52,6 @@ export const GalleryPanel = memo(() => { const selectedBoardId = useAppSelector(selectSelectedBoardId); const boardName = useBoardName(selectedBoardId); - const isVideoEnabled = useFeatureStatus('video'); return ( @@ -83,16 +76,6 @@ export const GalleryPanel = memo(() => { {t('parameters.images')} - {isVideoEnabled && ( - - )} - - - - {doesTabHaveGallery && ( - <> - } - aria-label={t('boards.locateInGalery')} - tooltip={t('boards.locateInGalery')} - onClick={locateInGallery} - variant="link" - size="sm" - alignSelf="stretch" - /> - - - )} - - - - ); -}); - -CurrentVideoButtons.displayName = 'CurrentVideoButtons'; diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/CurrentVideoPreview.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/CurrentVideoPreview.tsx deleted file mode 100644 index 25c9806ab72..00000000000 --- a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/CurrentVideoPreview.tsx +++ /dev/null @@ -1,85 +0,0 @@ -import { Box, Flex } from '@invoke-ai/ui-library'; -import { useAppSelector } from 'app/store/storeHooks'; -import VideoMetadataViewer from 'features/gallery/components/ImageMetadataViewer/VideoMetadataViewer'; -import NextPrevItemButtons from 'features/gallery/components/NextPrevItemButtons'; -import { selectShouldShowItemDetails } from 'features/ui/store/uiSelectors'; -import { VideoView } from 'features/video/components/VideoView'; -import type { AnimationProps } from 'framer-motion'; -import { AnimatePresence, motion } from 'framer-motion'; -import { memo, useCallback, useRef, useState } from 'react'; -import type { VideoDTO } from 'services/api/types'; - -import { NoContentForViewer } from './NoContentForViewer'; - -export const CurrentVideoPreview = memo(({ videoDTO }: { videoDTO: VideoDTO | null }) => { - const shouldShowItemDetails = useAppSelector(selectShouldShowItemDetails); - - // Show and hide the next/prev buttons on mouse move - const [shouldShowNextPrevButtons, setShouldShowNextPrevButtons] = useState(false); - const timeoutId = useRef(0); - const onMouseOver = useCallback(() => { - setShouldShowNextPrevButtons(true); - window.clearTimeout(timeoutId.current); - }, []); - const onMouseOut = useCallback(() => { - timeoutId.current = window.setTimeout(() => { - setShouldShowNextPrevButtons(false); - }, 500); - }, []); - - return ( - - {videoDTO && videoDTO.video_url && ( - - - - )} - {!videoDTO && } - {shouldShowItemDetails && videoDTO && ( - - - - )} - - {shouldShowNextPrevButtons && videoDTO && ( - - - - )} - - - ); -}); -CurrentVideoPreview.displayName = 'CurrentVideoPreview'; - -const initial: AnimationProps['initial'] = { - opacity: 0, -}; -const animateArrows: AnimationProps['animate'] = { - opacity: 1, - transition: { duration: 0.07 }, -}; -const exit: AnimationProps['exit'] = { - opacity: 0, - transition: { duration: 0.07 }, -}; diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonHover.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonHover.tsx index 7123a8fbf37..edfada8bc2d 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonHover.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonHover.tsx @@ -1,6 +1,4 @@ import { Box, Flex, Image } from '@invoke-ai/ui-library'; -import { useStore } from '@nanostores/react'; -import { $crossOrigin } from 'app/store/nanostores/authToken'; import { useAppSelector } from 'app/store/storeHooks'; import { useBoolean } from 'common/hooks/useBoolean'; import { preventDefault } from 'common/util/stopPropagation'; @@ -14,8 +12,6 @@ import type { ComparisonProps } from './common'; import { fitDimsToContainer, getSecondImageDims } from './common'; export const ImageComparisonHover = memo(({ firstImage, secondImage, rect }: ComparisonProps) => { - const crossOrigin = useStore($crossOrigin); - const comparisonFit = useAppSelector(selectComparisonFit); const imageContainerRef = useRef(null); const mouseOver = useBoolean(false); @@ -57,7 +53,6 @@ export const ImageComparisonHover = memo(({ firstImage, secondImage, rect }: Com id="image-comparison-hover-first-image" src={firstImage.image_url} fallbackSrc={firstImage.thumbnail_url} - crossOrigin={crossOrigin} w={fittedDims.width} h={fittedDims.height} maxW="full" @@ -94,7 +89,6 @@ export const ImageComparisonHover = memo(({ firstImage, secondImage, rect }: Com id="image-comparison-hover-second-image" src={secondImage.image_url} fallbackSrc={secondImage.thumbnail_url} - crossOrigin={crossOrigin} w={compareImageDims.width} h={compareImageDims.height} maxW={fittedDims.width} diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonSideBySide.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonSideBySide.tsx index 45c4201c19d..a84e842dccc 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonSideBySide.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonSideBySide.tsx @@ -1,6 +1,4 @@ import { Flex, Image } from '@invoke-ai/ui-library'; -import { useStore } from '@nanostores/react'; -import { $crossOrigin } from 'app/store/nanostores/authToken'; import type { ComparisonProps } from 'features/gallery/components/ImageViewer/common'; import { ImageComparisonLabel } from 'features/gallery/components/ImageViewer/ImageComparisonLabel'; import { VerticalResizeHandle } from 'features/ui/components/tabs/ResizeHandle'; @@ -43,8 +41,6 @@ export const ImageComparisonSideBySide = memo(({ firstImage, secondImage }: Comp ImageComparisonSideBySide.displayName = 'ImageComparisonSideBySide'; const SideBySideImage = memo(({ imageDTO, type }: { imageDTO: ImageDTO; type: 'first' | 'second' }) => { - const crossOrigin = useStore($crossOrigin); - return ( @@ -56,7 +52,6 @@ const SideBySideImage = memo(({ imageDTO, type }: { imageDTO: ImageDTO; type: 'f maxH="full" src={imageDTO.image_url} fallbackSrc={imageDTO.thumbnail_url} - crossOrigin={crossOrigin} objectFit="contain" borderRadius="base" /> diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonSlider.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonSlider.tsx index ce4bc5f083d..1f0d64aafeb 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonSlider.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparisonSlider.tsx @@ -1,6 +1,4 @@ import { Box, Flex, Icon, Image } from '@invoke-ai/ui-library'; -import { useStore } from '@nanostores/react'; -import { $crossOrigin } from 'app/store/nanostores/authToken'; import { useAppSelector } from 'app/store/storeHooks'; import { preventDefault } from 'common/util/stopPropagation'; import { TRANSPARENCY_CHECKERBOARD_PATTERN_DARK_DATAURL } from 'features/controlLayers/konva/patterns/transparency-checkerboard-pattern'; @@ -23,7 +21,6 @@ const HANDLE_LEFT_INITIAL_PX = `calc(${INITIAL_POS} - ${HANDLE_HITBOX / 2}px)`; export const ImageComparisonSlider = memo(({ firstImage, secondImage, rect }: ComparisonProps) => { const comparisonFit = useAppSelector(selectComparisonFit); - const crossOrigin = useStore($crossOrigin); // How far the handle is from the left - this will be a CSS calculation that takes into account the handle width const [left, setLeft] = useState(HANDLE_LEFT_INITIAL_PX); @@ -135,7 +132,6 @@ export const ImageComparisonSlider = memo(({ firstImage, secondImage, rect }: Co id="image-comparison-second-image" src={secondImage.image_url} fallbackSrc={secondImage.thumbnail_url} - crossOrigin={crossOrigin} w={compareImageDims.width} h={compareImageDims.height} maxW={fittedDims.width} @@ -158,7 +154,6 @@ export const ImageComparisonSlider = memo(({ firstImage, secondImage, rect }: Co id="image-comparison-first-image" src={firstImage.image_url} fallbackSrc={firstImage.thumbnail_url} - crossOrigin={crossOrigin} w={fittedDims.width} h={fittedDims.height} objectFit="cover" diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageViewerPanel.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageViewerPanel.tsx index cea53931924..9e829ea8dcf 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageViewerPanel.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageViewerPanel.tsx @@ -6,7 +6,6 @@ import { memo } from 'react'; import { ImageViewerContextProvider } from './context'; import { ImageComparison } from './ImageComparison'; import { ImageViewer } from './ImageViewer'; -import { VideoViewer } from './VideoViewer'; const selectIsComparing = createSelector( [selectLastSelectedItem, selectImageToCompare], @@ -23,8 +22,7 @@ export const ImageViewerPanel = memo(() => { // The image viewer renders progress images - if no image is selected, show the image viewer anyway !isComparing && !lastSelectedItem && } - {!isComparing && lastSelectedItem?.type === 'image' && } - {!isComparing && lastSelectedItem?.type === 'video' && } + {!isComparing && } {isComparing && } ); diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/NoContentForViewer.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/NoContentForViewer.tsx index 62248a1883c..1649a14c511 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/NoContentForViewer.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/NoContentForViewer.tsx @@ -1,14 +1,10 @@ import type { ButtonProps } from '@invoke-ai/ui-library'; import { Alert, AlertDescription, AlertIcon, Button, Divider, Flex, Link, Spinner, Text } from '@invoke-ai/ui-library'; -import { useAppSelector } from 'app/store/storeHooks'; import { IAINoContentFallback } from 'common/components/IAIImageFallback'; import { InvokeLogoIcon } from 'common/components/InvokeLogoIcon'; import { LOADING_SYMBOL, useHasImages } from 'features/gallery/hooks/useHasImages'; import { setInstallModelsTabByName } from 'features/modelManagerV2/store/installModelsStore'; -import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus'; -import { selectIsLocal } from 'features/system/store/configSlice'; import { navigationApi } from 'features/ui/layouts/navigation-api'; -import { selectActiveTab } from 'features/ui/store/uiSelectors'; import type { PropsWithChildren } from 'react'; import { memo, useCallback, useMemo } from 'react'; import { Trans, useTranslation } from 'react-i18next'; @@ -18,14 +14,11 @@ import { useMainModels } from 'services/api/hooks/modelsByType'; export const NoContentForViewer = memo(() => { const hasImages = useHasImages(); const [mainModels, { data }] = useMainModels(); - const isLocal = useAppSelector(selectIsLocal); - const isEnabled = useFeatureStatus('starterModels'); - const activeTab = useAppSelector(selectActiveTab); const { t } = useTranslation(); const showStarterBundles = useMemo(() => { - return isEnabled && data && mainModels.length === 0; - }, [mainModels.length, data, isEnabled]); + return data && mainModels.length === 0; + }, [mainModels.length, data]); if (hasImages === LOADING_SYMBOL) { // Blank bg w/ a spinner. The new user experience components below have an invoke logo, but it's not centered. @@ -43,11 +36,10 @@ export const NoContentForViewer = memo(() => { - {isLocal ? : activeTab === 'workflows' ? : } + {showStarterBundles && } - - {isLocal && } + ); @@ -97,37 +89,6 @@ const GetStartedLocal = () => { ); }; -const GetStartedCommercial = () => { - return ( - - - - ); -}; - -const GetStartedWorkflows = () => { - return ( - - - - ); -}; - -const GettingStartedVideosCallout = () => { - return ( - - - ), - }} - /> - - ); -}; - const StarterBundlesCallout = () => { const handleClickDownloadStarterModels = useCallback(() => { navigationApi.switchToTab('models'); diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/VideoViewer.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/VideoViewer.tsx deleted file mode 100644 index b06ac30c172..00000000000 --- a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/VideoViewer.tsx +++ /dev/null @@ -1,28 +0,0 @@ -import { Divider, Flex } from '@invoke-ai/ui-library'; -import { useAppSelector } from 'app/store/storeHooks'; -import { selectLastSelectedItem } from 'features/gallery/store/gallerySelectors'; -import { VideoViewerContextProvider } from 'features/video/context/VideoViewerContext'; -import { memo } from 'react'; -import { useVideoDTO } from 'services/api/endpoints/videos'; - -import { CurrentVideoPreview } from './CurrentVideoPreview'; -import { VideoViewerToolbar } from './VideoViewerToolbar'; - -export const VideoViewer = memo(() => { - const lastSelectedItem = useAppSelector(selectLastSelectedItem); - const videoDTO = useVideoDTO(lastSelectedItem?.type === 'video' ? lastSelectedItem.id : null); - - return ( - - - - - - - - - - ); -}); - -VideoViewer.displayName = 'VideoViewer'; diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/VideoViewerToolbar.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/VideoViewerToolbar.tsx deleted file mode 100644 index 994da11ae14..00000000000 --- a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/VideoViewerToolbar.tsx +++ /dev/null @@ -1,22 +0,0 @@ -import { Flex } from '@invoke-ai/ui-library'; -import { useAppSelector } from 'app/store/storeHooks'; -import { ToggleMetadataViewerButton } from 'features/gallery/components/ImageViewer/ToggleMetadataViewerButton'; -import { selectLastSelectedItem } from 'features/gallery/store/gallerySelectors'; -import { memo } from 'react'; -import { useVideoDTO } from 'services/api/endpoints/videos'; - -import { CurrentVideoButtons } from './CurrentVideoButtons'; - -export const VideoViewerToolbar = memo(() => { - const lastSelectedItem = useAppSelector(selectLastSelectedItem); - const videoDTO = useVideoDTO(lastSelectedItem?.type === 'video' ? lastSelectedItem.id : null); - - return ( - - {videoDTO && } - {videoDTO && } - - ); -}); - -VideoViewerToolbar.displayName = 'VideoViewerToolbar'; diff --git a/invokeai/frontend/web/src/features/gallery/components/NextPrevItemButtons.tsx b/invokeai/frontend/web/src/features/gallery/components/NextPrevItemButtons.tsx index c7a6823f4d5..b59965c28e3 100644 --- a/invokeai/frontend/web/src/features/gallery/components/NextPrevItemButtons.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/NextPrevItemButtons.tsx @@ -9,14 +9,12 @@ import { useTranslation } from 'react-i18next'; import { PiCaretLeftBold, PiCaretRightBold } from 'react-icons/pi'; import { useGalleryImageNames } from './use-gallery-image-names'; -import { useGalleryVideoIds } from './use-gallery-video-ids'; const NextPrevItemButtons = ({ inset = 8 }: { inset?: ChakraProps['insetInlineStart' | 'insetInlineEnd'] }) => { const { t } = useTranslation(); const dispatch = useAppDispatch(); const lastSelectedItem = useAppSelector(selectLastSelectedItem); const { imageNames, isFetching } = useGalleryImageNames(); - const { videoIds, isFetching: isFetchingVideos } = useGalleryVideoIds(); const isOnFirstItem = useMemo( () => (lastSelectedItem ? imageNames.at(0) === lastSelectedItem.id : false), @@ -28,26 +26,24 @@ const NextPrevItemButtons = ({ inset = 8 }: { inset?: ChakraProps['insetInlineSt ); const onClickLeftArrow = useCallback(() => { - const items = lastSelectedItem?.type === 'image' ? imageNames : videoIds; - const targetIndex = lastSelectedItem ? items.findIndex((n) => n === lastSelectedItem.id) - 1 : 0; - const clampedIndex = clamp(targetIndex, 0, items.length - 1); - const n = items.at(clampedIndex); + const targetIndex = lastSelectedItem ? imageNames.findIndex((n) => n === lastSelectedItem.id) - 1 : 0; + const clampedIndex = clamp(targetIndex, 0, imageNames.length - 1); + const n = imageNames.at(clampedIndex); if (!n) { return; } dispatch(itemSelected({ type: lastSelectedItem?.type ?? 'image', id: n })); - }, [dispatch, imageNames, lastSelectedItem, videoIds]); + }, [dispatch, imageNames, lastSelectedItem]); const onClickRightArrow = useCallback(() => { - const items = lastSelectedItem?.type === 'image' ? imageNames : videoIds; - const targetIndex = lastSelectedItem ? items.findIndex((n) => n === lastSelectedItem.id) + 1 : 0; - const clampedIndex = clamp(targetIndex, 0, items.length - 1); - const n = items.at(clampedIndex); + const targetIndex = lastSelectedItem ? imageNames.findIndex((n) => n === lastSelectedItem.id) + 1 : 0; + const clampedIndex = clamp(targetIndex, 0, imageNames.length - 1); + const n = imageNames.at(clampedIndex); if (!n) { return; } dispatch(itemSelected({ type: lastSelectedItem?.type ?? 'image', id: n })); - }, [dispatch, imageNames, lastSelectedItem, videoIds]); + }, [dispatch, imageNames, lastSelectedItem]); return ( @@ -60,7 +56,7 @@ const NextPrevItemButtons = ({ inset = 8 }: { inset?: ChakraProps['insetInlineSt icon={} variant="unstyled" onClick={onClickLeftArrow} - isDisabled={isFetching || isFetchingVideos} + isDisabled={isFetching} color="base.100" pointerEvents="auto" insetInlineStart={inset} @@ -75,7 +71,7 @@ const NextPrevItemButtons = ({ inset = 8 }: { inset?: ChakraProps['insetInlineSt icon={} variant="unstyled" onClick={onClickRightArrow} - isDisabled={isFetching || isFetchingVideos} + isDisabled={isFetching} color="base.100" pointerEvents="auto" insetInlineEnd={inset} diff --git a/invokeai/frontend/web/src/features/gallery/components/VideoGallery.tsx b/invokeai/frontend/web/src/features/gallery/components/VideoGallery.tsx deleted file mode 100644 index 766971e76ec..00000000000 --- a/invokeai/frontend/web/src/features/gallery/components/VideoGallery.tsx +++ /dev/null @@ -1,390 +0,0 @@ -import { Box, Flex, forwardRef, Grid, GridItem, Spinner, Text } from '@invoke-ai/ui-library'; -import { useStore } from '@nanostores/react'; -import { createSelector } from '@reduxjs/toolkit'; -import { $accountTypeText } from 'app/store/nanostores/accountTypeText'; -import { useAppSelector, useAppStore } from 'app/store/storeHooks'; -import { getFocusedRegion } from 'common/hooks/focus'; -import { useRangeBasedVideoFetching } from 'features/gallery/hooks/useRangeBasedVideoFetching'; -import type { selectGetVideoIdsQueryArgs } from 'features/gallery/store/gallerySelectors'; -import { selectGalleryImageMinimumWidth, selectLastSelectedItem } from 'features/gallery/store/gallerySelectors'; -import { selectionChanged } from 'features/gallery/store/gallerySlice'; -import { useRegisteredHotkeys } from 'features/system/components/HotkeysModal/useHotkeyData'; -import { selectAllowVideo } from 'features/system/store/configSlice'; -import type { MutableRefObject } from 'react'; -import React, { memo, useCallback, useEffect, useMemo, useRef } from 'react'; -import type { - GridComponents, - GridComputeItemKey, - GridItemContent, - ListRange, - ScrollSeekConfiguration, - VirtuosoGridHandle, -} from 'react-virtuoso'; -import { VirtuosoGrid } from 'react-virtuoso'; -import { videosApi } from 'services/api/endpoints/videos'; -import { useDebounce } from 'use-debounce'; - -import { getItemIndex } from './getItemIndex'; -import { getItemsPerRow } from './getItemsPerRow'; -import { GallerySelectionCountTag } from './ImageGrid/GallerySelectionCountTag'; -import { GalleryVideo } from './ImageGrid/GalleryVideo'; -import { GalleryVideoPlaceholder } from './ImageGrid/GalleryVideoPlaceholder'; -import { scrollIntoView } from './scrollIntoView'; -import { useGalleryVideoIds } from './use-gallery-video-ids'; -import { useScrollableGallery } from './useScrollableGallery'; - -type ListVideoIdsQueryArgs = ReturnType; - -type GridContext = { - queryArgs: ListVideoIdsQueryArgs; - videoIds: string[]; -}; - -const VideoAtPosition = memo(({ videoId }: { index: number; videoId: string }) => { - /* - * We rely on the useRangeBasedImageFetching to fetch all image DTOs, caching them with RTK Query. - * - * In this component, we just want to consume that cache. Unforutnately, RTK Query does not provide a way to - * subscribe to a query without triggering a new fetch. - * - * There is a hack, though: - * - https://github.com/reduxjs/redux-toolkit/discussions/4213 - * - * This essentially means "subscribe to the query once it has some data". - * One issue with this approach. When an item DTO is already cached - for example, because it is selected and - * rendered in the viewer - it will show up in the grid before the other items have loaded. This is most - * noticeable when first loading a board. The first item in the board is selected and rendered immediately in - * the viewer, caching the DTO. The gallery grid renders, and that first item displays as a thumbnail while the - * others are still placeholders. After a moment, the rest of the items load up and display as thumbnails. - */ - - // Use `currentData` instead of `data` to prevent a flash of previous image rendered at this index - const { currentData: videoDTO, isUninitialized } = videosApi.endpoints.getVideoDTO.useQueryState(videoId); - videosApi.endpoints.getVideoDTO.useQuerySubscription(videoId, { skip: isUninitialized }); - - if (!videoDTO) { - return ; - } - - return ; -}); -VideoAtPosition.displayName = 'VideoAtPosition'; - -const computeItemKey: GridComputeItemKey = (index, itemId, { queryArgs }) => { - return `${JSON.stringify(queryArgs)}-${itemId ?? index}`; -}; - -/** - * Handles keyboard navigation for the gallery. - */ -const useKeyboardNavigation = ( - itemIds: string[], - virtuosoRef: React.RefObject, - rootRef: React.RefObject -) => { - const { dispatch, getState } = useAppStore(); - - const handleKeyDown = useCallback( - (event: KeyboardEvent) => { - if (getFocusedRegion() !== 'gallery') { - // Only handle keyboard navigation when the gallery is focused - return; - } - // Only handle arrow keys - if (!['ArrowUp', 'ArrowDown', 'ArrowLeft', 'ArrowRight'].includes(event.key)) { - return; - } - // Don't interfere if user is typing in an input - if (event.target instanceof HTMLInputElement || event.target instanceof HTMLTextAreaElement) { - return; - } - - const rootEl = rootRef.current; - const virtuosoGridHandle = virtuosoRef.current; - - if (!rootEl || !virtuosoGridHandle) { - return; - } - - if (itemIds.length === 0) { - return; - } - - const itemsPerRow = getItemsPerRow(rootEl); - - if (itemsPerRow === 0) { - // This can happen if the grid is not yet rendered or has no items - return; - } - - event.preventDefault(); - - const state = getState(); - const itemId = selectLastSelectedItem(state)?.id; - - const currentIndex = getItemIndex(itemId, itemIds); - - let newIndex = currentIndex; - - switch (event.key) { - case 'ArrowLeft': - if (currentIndex > 0) { - newIndex = currentIndex - 1; - } - break; - case 'ArrowRight': - if (currentIndex < itemIds.length - 1) { - newIndex = currentIndex + 1; - } - break; - case 'ArrowUp': - // If on first row, stay on current item - if (currentIndex < itemsPerRow) { - newIndex = currentIndex; - } else { - newIndex = Math.max(0, currentIndex - itemsPerRow); - } - break; - case 'ArrowDown': - // If no items below, stay on current item - if (currentIndex >= itemIds.length - itemsPerRow) { - newIndex = currentIndex; - } else { - newIndex = Math.min(itemIds.length - 1, currentIndex + itemsPerRow); - } - break; - } - - if (newIndex !== currentIndex && newIndex >= 0 && newIndex < itemIds.length) { - const nextItemId = itemIds[newIndex]; - if (nextItemId) { - dispatch(selectionChanged([{ type: 'video', id: nextItemId }])); - } - } - }, - [rootRef, virtuosoRef, itemIds, getState, dispatch] - ); - - useRegisteredHotkeys({ - id: 'galleryNavLeft', - category: 'gallery', - callback: handleKeyDown, - options: { preventDefault: true }, - dependencies: [handleKeyDown], - }); - - useRegisteredHotkeys({ - id: 'galleryNavRight', - category: 'gallery', - callback: handleKeyDown, - options: { preventDefault: true }, - dependencies: [handleKeyDown], - }); - - useRegisteredHotkeys({ - id: 'galleryNavUp', - category: 'gallery', - callback: handleKeyDown, - options: { preventDefault: true }, - dependencies: [handleKeyDown], - }); - - useRegisteredHotkeys({ - id: 'galleryNavDown', - category: 'gallery', - callback: handleKeyDown, - options: { preventDefault: true }, - dependencies: [handleKeyDown], - }); - - useRegisteredHotkeys({ - id: 'galleryNavLeftAlt', - category: 'gallery', - callback: handleKeyDown, - options: { preventDefault: true }, - dependencies: [handleKeyDown], - }); - - useRegisteredHotkeys({ - id: 'galleryNavRightAlt', - category: 'gallery', - callback: handleKeyDown, - options: { preventDefault: true }, - dependencies: [handleKeyDown], - }); - - useRegisteredHotkeys({ - id: 'galleryNavUpAlt', - category: 'gallery', - callback: handleKeyDown, - options: { preventDefault: true }, - dependencies: [handleKeyDown], - }); - - useRegisteredHotkeys({ - id: 'galleryNavDownAlt', - category: 'gallery', - callback: handleKeyDown, - options: { preventDefault: true }, - dependencies: [handleKeyDown], - }); -}; - -/** - * Keeps the last selected image in view when the gallery is scrolled. - * This is useful for keyboard navigation and ensuring the user can see their selection. - * It only tracks the last selected image, not the image to compare. - */ -const useKeepSelectedVideoInView = ( - videoIds: string[], - virtuosoRef: React.RefObject, - rootRef: React.RefObject, - rangeRef: MutableRefObject -) => { - const targetVideoId = useAppSelector(selectLastSelectedItem)?.id; - - useEffect(() => { - const virtuosoGridHandle = virtuosoRef.current; - const rootEl = rootRef.current; - const range = rangeRef.current; - - if (!virtuosoGridHandle || !rootEl || !targetVideoId || !videoIds || videoIds.length === 0) { - return; - } - scrollIntoView(targetVideoId, videoIds, rootEl, virtuosoGridHandle, range); - }, [targetVideoId, videoIds, rangeRef, rootRef, virtuosoRef]); -}; - -export const VideoGallery = memo(() => { - const virtuosoRef = useRef(null); - const rangeRef = useRef({ startIndex: 0, endIndex: 0 }); - const rootRef = useRef(null); - - // Get the ordered list of image names - this is our primary data source for virtualization - const { queryArgs, videoIds, isLoading } = useGalleryVideoIds(); - - // Use range-based fetching for bulk loading image DTOs into cache based on the visible range - const { onRangeChanged } = useRangeBasedVideoFetching({ - videoIds, - enabled: !isLoading, - }); - - useKeepSelectedVideoInView(videoIds, virtuosoRef, rootRef, rangeRef); - useKeyboardNavigation(videoIds, virtuosoRef, rootRef); - const scrollerRef = useScrollableGallery(rootRef); - - /* - * We have to keep track of the visible range for keep-selected-image-in-view functionality and push the range to - * the range-based image fetching hook. - */ - const handleRangeChanged = useCallback( - (range: ListRange) => { - rangeRef.current = range; - onRangeChanged(range); - }, - [onRangeChanged] - ); - - const context = useMemo(() => ({ videoIds, queryArgs }), [videoIds, queryArgs]); - - const isVideoEnabled = useAppSelector(selectAllowVideo); - const accountTypeText = useStore($accountTypeText); - - if (!isVideoEnabled) { - return ( - - - Video generation is not enabled for {accountTypeText} accounts - - - ); - } - - if (isLoading) { - return ( - - - Loading gallery... - - ); - } - - if (videoIds.length === 0) { - return ( - - No videos found - - ); - } - - return ( - // This wrapper component is necessary to initialize the overlay scrollbars! - - - ref={virtuosoRef} - context={context} - data={videoIds} - increaseViewportBy={4096} - itemContent={itemContent} - computeItemKey={computeItemKey} - components={components} - style={virtuosoGridStyle} - scrollerRef={scrollerRef} - scrollSeekConfiguration={scrollSeekConfiguration} - rangeChanged={handleRangeChanged} - /> - - - ); -}); - -VideoGallery.displayName = 'VideoGallery'; - -const scrollSeekConfiguration: ScrollSeekConfiguration = { - enter: (velocity) => { - return Math.abs(velocity) > 2048; - }, - exit: (velocity) => { - return velocity === 0; - }, -}; - -// Styles -const virtuosoGridStyle = { height: '100%', width: '100%' }; - -const selectGridTemplateColumns = createSelector( - selectGalleryImageMinimumWidth, - (galleryImageMinimumWidth) => `repeat(auto-fill, minmax(${galleryImageMinimumWidth}px, 1fr))` -); - -// Grid components -const ListComponent: GridComponents['List'] = forwardRef(({ context: _, ...rest }, ref) => { - const _gridTemplateColumns = useAppSelector(selectGridTemplateColumns); - const [gridTemplateColumns] = useDebounce(_gridTemplateColumns, 300); - - return ; -}); -ListComponent.displayName = 'ListComponent'; - -const itemContent: GridItemContent = (index, videoId) => { - return ; -}; - -const ItemComponent: GridComponents['Item'] = forwardRef(({ context: _, ...rest }, ref) => ( - -)); -ItemComponent.displayName = 'ItemComponent'; - -const ScrollSeekPlaceholderComponent: GridComponents['ScrollSeekPlaceholder'] = (props) => ( - - - -); - -ScrollSeekPlaceholderComponent.displayName = 'ScrollSeekPlaceholderComponent'; - -const components: GridComponents = { - Item: ItemComponent, - List: ListComponent, - ScrollSeekPlaceholder: ScrollSeekPlaceholderComponent, -}; diff --git a/invokeai/frontend/web/src/features/gallery/components/use-gallery-video-ids.ts b/invokeai/frontend/web/src/features/gallery/components/use-gallery-video-ids.ts deleted file mode 100644 index 5bb9c5c0b72..00000000000 --- a/invokeai/frontend/web/src/features/gallery/components/use-gallery-video-ids.ts +++ /dev/null @@ -1,21 +0,0 @@ -import { EMPTY_ARRAY } from 'app/store/constants'; -import { useAppSelector } from 'app/store/storeHooks'; -import { selectGetVideoIdsQueryArgs } from 'features/gallery/store/gallerySelectors'; -import { useGetVideoIdsQuery } from 'services/api/endpoints/videos'; -import { useDebounce } from 'use-debounce'; - -const getVideoIdsQueryOptions = { - refetchOnReconnect: true, - selectFromResult: ({ currentData, isLoading, isFetching }) => ({ - videoIds: currentData?.video_ids ?? EMPTY_ARRAY, - isLoading, - isFetching, - }), -} satisfies Parameters[1]; - -export const useGalleryVideoIds = () => { - const _queryArgs = useAppSelector(selectGetVideoIdsQueryArgs); - const [queryArgs] = useDebounce(_queryArgs, 300); - const { videoIds, isLoading, isFetching } = useGetVideoIdsQuery(queryArgs, getVideoIdsQueryOptions); - return { videoIds, isLoading, isFetching, queryArgs }; -}; diff --git a/invokeai/frontend/web/src/features/gallery/contexts/ImageDTOContext.ts b/invokeai/frontend/web/src/features/gallery/contexts/ImageDTOContext.ts new file mode 100644 index 00000000000..e4ebbf3ec82 --- /dev/null +++ b/invokeai/frontend/web/src/features/gallery/contexts/ImageDTOContext.ts @@ -0,0 +1,13 @@ +import { createContext, useContext } from 'react'; +import type { ImageDTO } from 'services/api/types'; +import { assert } from 'tsafe'; + +const ImageDTOCOntext = createContext(null); + +export const ImageDTOContextProvider = ImageDTOCOntext.Provider; + +export const useImageDTOContext = () => { + const dto = useContext(ImageDTOCOntext); + assert(dto !== null, 'useItemDTOContext must be used within ItemDTOContextProvider'); + return dto; +}; diff --git a/invokeai/frontend/web/src/features/gallery/contexts/ItemDTOContext.ts b/invokeai/frontend/web/src/features/gallery/contexts/ItemDTOContext.ts deleted file mode 100644 index 847a1c5d07f..00000000000 --- a/invokeai/frontend/web/src/features/gallery/contexts/ItemDTOContext.ts +++ /dev/null @@ -1,20 +0,0 @@ -import { createContext, useContext } from 'react'; -import { type ImageDTO, isImageDTO, type VideoDTO } from 'services/api/types'; -import { assert } from 'tsafe'; - -const ItemDTOContext = createContext(null); - -export const ItemDTOContextProvider = ItemDTOContext.Provider; - -export const useItemDTOContext = () => { - const itemDTO = useContext(ItemDTOContext); - assert(itemDTO !== null, 'useItemDTOContext must be used within ItemDTOContextProvider'); - return itemDTO; -}; - -export const useItemDTOContextImageOnly = (): ImageDTO => { - const itemDTO = useContext(ItemDTOContext); - assert(itemDTO !== null, 'useItemDTOContext must be used within ItemDTOContextProvider'); - assert(isImageDTO(itemDTO), 'ItemDTO is not an image'); - return itemDTO as ImageDTO; -}; diff --git a/invokeai/frontend/web/src/features/gallery/hooks/useRangeBasedVideoFetching.ts b/invokeai/frontend/web/src/features/gallery/hooks/useRangeBasedVideoFetching.ts deleted file mode 100644 index 4808ea6e623..00000000000 --- a/invokeai/frontend/web/src/features/gallery/hooks/useRangeBasedVideoFetching.ts +++ /dev/null @@ -1,78 +0,0 @@ -import { useAppStore } from 'app/store/storeHooks'; -import { useCallback, useEffect, useState } from 'react'; -import type { ListRange } from 'react-virtuoso'; -import { useGetVideoDTOsByNamesMutation, videosApi } from 'services/api/endpoints/videos'; -import { useThrottledCallback } from 'use-debounce'; - -interface UseRangeBasedVideoFetchingArgs { - videoIds: string[]; - enabled: boolean; -} - -interface UseRangeBasedVideoFetchingReturn { - onRangeChanged: (range: ListRange) => void; -} - -const getUncachedIds = (videoIds: string[], cachedVideoIds: string[], ranges: ListRange[]): string[] => { - const uncachedIdsSet = new Set(); - const cachedVideoIdsSet = new Set(cachedVideoIds); - - for (const range of ranges) { - for (let i = range.startIndex; i <= range.endIndex; i++) { - const id = videoIds[i]!; - if (id && !cachedVideoIdsSet.has(id)) { - uncachedIdsSet.add(id); - } - } - } - - return Array.from(uncachedIdsSet); -}; - -/** - * Hook for bulk fetching image DTOs based on the visible range from virtuoso. - * Individual image components should use `useGetImageDTOQuery(imageName)` to get their specific DTO. - * This hook ensures DTOs are bulk fetched and cached efficiently. - */ -export const useRangeBasedVideoFetching = ({ - videoIds, - enabled, -}: UseRangeBasedVideoFetchingArgs): UseRangeBasedVideoFetchingReturn => { - const store = useAppStore(); - const [getVideoDTOsByNames] = useGetVideoDTOsByNamesMutation(); - const [lastRange, setLastRange] = useState(null); - const [pendingRanges, setPendingRanges] = useState([]); - - const fetchVideos = useCallback( - (ranges: ListRange[], videoIds: string[]) => { - if (!enabled) { - return; - } - const cachedVideoIds = videosApi.util.selectCachedArgsForQuery(store.getState(), 'getVideoDTO'); - const uncachedIds = getUncachedIds(videoIds, cachedVideoIds, ranges); - // console.log('uncachedIds', uncachedIds); - if (uncachedIds.length === 0) { - return; - } - getVideoDTOsByNames({ video_ids: uncachedIds }); - setPendingRanges([]); - }, - [enabled, getVideoDTOsByNames, store] - ); - - const throttledFetchVideos = useThrottledCallback(fetchVideos, 500); - - const onRangeChanged = useCallback((range: ListRange) => { - setLastRange(range); - setPendingRanges((prev) => [...prev, range]); - }, []); - - useEffect(() => { - const combinedRanges = lastRange ? [...pendingRanges, lastRange] : pendingRanges; - throttledFetchVideos(combinedRanges, videoIds); - }, [videoIds, lastRange, pendingRanges, throttledFetchVideos]); - - return { - onRangeChanged, - }; -}; diff --git a/invokeai/frontend/web/src/features/gallery/store/actions.ts b/invokeai/frontend/web/src/features/gallery/store/actions.ts deleted file mode 100644 index 8d13c449369..00000000000 --- a/invokeai/frontend/web/src/features/gallery/store/actions.ts +++ /dev/null @@ -1,16 +0,0 @@ -import { createAction } from '@reduxjs/toolkit'; -import type { ImageDTO } from 'services/api/types'; - -export const sentImageToCanvas = createAction('gallery/sentImageToCanvas'); - -export const imageDownloaded = createAction('gallery/imageDownloaded'); - -export const imageCopiedToClipboard = createAction('gallery/imageCopiedToClipboard'); - -export const imageOpenedInNewTab = createAction('gallery/imageOpenedInNewTab'); - -export const imageUploadedClientSide = createAction<{ - imageDTO: ImageDTO; - silent: boolean; - isFirstUploadOfBatch: boolean; -}>('gallery/imageUploadedClientSide'); diff --git a/invokeai/frontend/web/src/features/gallery/store/gallerySelectors.ts b/invokeai/frontend/web/src/features/gallery/store/gallerySelectors.ts index 536fbd6d2a7..ab6b584a15e 100644 --- a/invokeai/frontend/web/src/features/gallery/store/gallerySelectors.ts +++ b/invokeai/frontend/web/src/features/gallery/store/gallerySelectors.ts @@ -2,7 +2,7 @@ import { createSelector } from '@reduxjs/toolkit'; import { createMemoizedSelector } from 'app/store/createMemoizedSelector'; import { selectGallerySlice } from 'features/gallery/store/gallerySlice'; import { ASSETS_CATEGORIES, IMAGE_CATEGORIES } from 'features/gallery/store/types'; -import type { GetImageNamesArgs, GetVideoIdsArgs, ListBoardsArgs } from 'services/api/types'; +import type { GetImageNamesArgs, ListBoardsArgs } from 'services/api/types'; export const selectFirstSelectedItem = createSelector(selectGallerySlice, (gallery) => gallery.selection.at(0)); export const selectLastSelectedItem = createSelector(selectGallerySlice, (gallery) => gallery.selection.at(-1)); @@ -51,17 +51,6 @@ export const selectGetImageNamesQueryArgs = createMemoizedSelector( }) ); -export const selectGetVideoIdsQueryArgs = createMemoizedSelector( - [selectSelectedBoardId, selectGallerySearchTerm, selectGalleryOrderDir, selectGalleryStarredFirst], - (board_id, search_term, order_dir, starred_first): GetVideoIdsArgs => ({ - board_id, - search_term, - order_dir, - starred_first, - is_intermediate: false, - }) -); - export const selectAutoAssignBoardOnClick = createSelector( selectGallerySlice, (gallery) => gallery.autoAssignBoardOnClick diff --git a/invokeai/frontend/web/src/features/imageActions/actions.ts b/invokeai/frontend/web/src/features/imageActions/actions.ts index 14d27e900c1..2c9293127b4 100644 --- a/invokeai/frontend/web/src/features/imageActions/actions.ts +++ b/invokeai/frontend/web/src/features/imageActions/actions.ts @@ -42,7 +42,6 @@ import { getOptimalDimension } from 'features/parameters/util/optimalDimension'; import { navigationApi } from 'features/ui/layouts/navigation-api'; import { WORKSPACE_PANEL_ID } from 'features/ui/layouts/shared'; import { imageDTOToFile, imagesApi, uploadImage } from 'services/api/endpoints/images'; -import { videosApi } from 'services/api/endpoints/videos'; import type { ImageDTO } from 'services/api/types'; import type { Equals } from 'tsafe'; import { assert } from 'tsafe'; @@ -324,15 +323,3 @@ export const removeImagesFromBoard = (arg: { image_names: string[]; dispatch: Ap dispatch(imagesApi.endpoints.removeImagesFromBoard.initiate({ image_names }, { track: false })); dispatch(selectionChanged([])); }; - -export const addVideosToBoard = (arg: { video_ids: string[]; boardId: BoardId; dispatch: AppDispatch }) => { - const { video_ids, boardId, dispatch } = arg; - dispatch(videosApi.endpoints.addVideosToBoard.initiate({ video_ids, board_id: boardId }, { track: false })); - dispatch(selectionChanged([])); -}; - -export const removeVideosFromBoard = (arg: { video_ids: string[]; dispatch: AppDispatch }) => { - const { video_ids, dispatch } = arg; - dispatch(videosApi.endpoints.removeVideosFromBoard.initiate({ video_ids }, { track: false })); - dispatch(selectionChanged([])); -}; diff --git a/invokeai/frontend/web/src/features/lora/components/LoRASelect.tsx b/invokeai/frontend/web/src/features/lora/components/LoRASelect.tsx index f0d1ffe878b..748aa9ca65c 100644 --- a/invokeai/frontend/web/src/features/lora/components/LoRASelect.tsx +++ b/invokeai/frontend/web/src/features/lora/components/LoRASelect.tsx @@ -6,7 +6,6 @@ import { InformationalPopover } from 'common/components/InformationalPopover/Inf import type { GroupStatusMap } from 'common/components/Picker/Picker'; import { loraAdded, selectLoRAsSlice } from 'features/controlLayers/store/lorasSlice'; import { selectBase } from 'features/controlLayers/store/paramsSlice'; -import { API_BASE_MODELS } from 'features/modelManagerV2/models'; import { ModelPicker } from 'features/parameters/components/ModelPicker'; import { memo, useCallback, useMemo } from 'react'; import { useTranslation } from 'react-i18next'; @@ -69,11 +68,8 @@ const LoRASelect = () => { return undefined; } - // Determine the group ID for the current base model - const groupId = API_BASE_MODELS.includes(currentBaseModel) ? 'api' : currentBaseModel; - // Return a map with only the current base model group enabled - return { [groupId]: true } satisfies GroupStatusMap; + return { [currentBaseModel]: true } satisfies GroupStatusMap; }, [currentBaseModel]); return ( diff --git a/invokeai/frontend/web/src/features/metadata/parsing.tsx b/invokeai/frontend/web/src/features/metadata/parsing.tsx index a2363004ef3..d964279c2aa 100644 --- a/invokeai/frontend/web/src/features/metadata/parsing.tsx +++ b/invokeai/frontend/web/src/features/metadata/parsing.tsx @@ -33,32 +33,12 @@ import { widthChanged, } from 'features/controlLayers/store/paramsSlice'; import { refImagesRecalled } from 'features/controlLayers/store/refImagesSlice'; -import type { - CanvasMetadata, - LoRA, - RefImageState, - VideoAspectRatio as ParameterVideoAspectRatio, - VideoDuration as ParameterVideoDuration, - VideoResolution as ParameterVideoResolution, -} from 'features/controlLayers/store/types'; -import { - zCanvasMetadata, - zCanvasReferenceImageState_OLD, - zRefImageState, - zVideoAspectRatio, - zVideoDuration, - zVideoResolution, -} from 'features/controlLayers/store/types'; +import type { CanvasMetadata, LoRA, RefImageState } from 'features/controlLayers/store/types'; +import { zCanvasMetadata, zCanvasReferenceImageState_OLD, zRefImageState } from 'features/controlLayers/store/types'; import type { ModelIdentifierField, ModelType } from 'features/nodes/types/common'; import { zModelIdentifierField } from 'features/nodes/types/common'; import { zModelIdentifier } from 'features/nodes/types/v2/common'; import { modelSelected } from 'features/parameters/store/actions'; -import { - videoAspectRatioChanged, - videoDurationChanged, - videoModelChanged, - videoResolutionChanged, -} from 'features/parameters/store/videoSlice'; import type { ParameterCFGRescaleMultiplier, ParameterCFGScale, @@ -714,87 +694,6 @@ const VAEModel: SingleMetadataHandler = { }; //#endregion VAEModel -//#region VideoModel -const VideoModel: SingleMetadataHandler = { - [SingleMetadataKey]: true, - type: 'VideoModel', - parse: async (metadata, store) => { - const raw = getProperty(metadata, 'model'); - const parsed = await parseModelIdentifier(raw, store, 'video'); - assert(parsed.type === 'video'); - return Promise.resolve(parsed); - }, - recall: (value, store) => { - store.dispatch(videoModelChanged({ videoModel: value })); - }, - i18nKey: 'metadata.videoModel', - LabelComponent: MetadataLabel, - ValueComponent: ({ value }: SingleMetadataValueProps) => ( - - ), -}; -//#endregion VideoModel - -//#region VideoDuration -const VideoDuration: SingleMetadataHandler = { - [SingleMetadataKey]: true, - type: 'VideoDuration', - parse: (metadata) => { - const raw = getProperty(metadata, 'duration'); - const parsed = zVideoDuration.parse(raw); - return Promise.resolve(parsed); - }, - recall: (value, store) => { - store.dispatch(videoDurationChanged(value)); - }, - i18nKey: 'metadata.videoDuration', - LabelComponent: MetadataLabel, - ValueComponent: ({ value }: SingleMetadataValueProps) => ( - - ), -}; -//#endregion VideoDuration - -//#region VideoResolution -const VideoResolution: SingleMetadataHandler = { - [SingleMetadataKey]: true, - type: 'VideoResolution', - parse: (metadata) => { - const raw = getProperty(metadata, 'resolution'); - const parsed = zVideoResolution.parse(raw); - return Promise.resolve(parsed); - }, - recall: (value, store) => { - store.dispatch(videoResolutionChanged(value)); - }, - i18nKey: 'metadata.videoResolution', - LabelComponent: MetadataLabel, - ValueComponent: ({ value }: SingleMetadataValueProps) => ( - - ), -}; -//#endregion VideoResolution - -//#region VideoAspectRatio -const VideoAspectRatio: SingleMetadataHandler = { - [SingleMetadataKey]: true, - type: 'VideoAspectRatio', - parse: (metadata) => { - const raw = getProperty(metadata, 'aspect_ratio'); - const parsed = zVideoAspectRatio.parse(raw); - return Promise.resolve(parsed); - }, - recall: (value, store) => { - store.dispatch(videoAspectRatioChanged(value)); - }, - i18nKey: 'metadata.videoAspectRatio', - LabelComponent: MetadataLabel, - ValueComponent: ({ value }: SingleMetadataValueProps) => ( - - ), -}; -//#endregion VideoAspectRatio - //#region LoRAs const LoRAs: CollectionMetadataHandler = { [CollectionMetadataKey]: true, @@ -1044,17 +943,6 @@ export const ImageMetadataHandlers = { // ipAdapterToIPAdapterLayer: parseIPAdapterToIPAdapterLayer, } as const; -export const VideoMetadataHandlers = { - CreatedBy, - GenerationMode, - PositivePrompt, - VideoModel, - Seed, - VideoAspectRatio, - VideoDuration, - VideoResolution, -}; - const successToast = (parameter: string) => { toast({ id: 'PARAMETER_SET', diff --git a/invokeai/frontend/web/src/features/modelManagerV2/hooks/useMainModelDefaultSettings.ts b/invokeai/frontend/web/src/features/modelManagerV2/hooks/useMainModelDefaultSettings.ts index f3a1d0c6434..dfab2d251f9 100644 --- a/invokeai/frontend/web/src/features/modelManagerV2/hooks/useMainModelDefaultSettings.ts +++ b/invokeai/frontend/web/src/features/modelManagerV2/hooks/useMainModelDefaultSettings.ts @@ -1,88 +1,48 @@ -import { createMemoizedSelector } from 'app/store/createMemoizedSelector'; -import { useAppSelector } from 'app/store/storeHooks'; import { isNil } from 'es-toolkit/compat'; -import { selectConfigSlice } from 'features/system/store/configSlice'; import { useMemo } from 'react'; import type { MainModelConfig } from 'services/api/types'; -const initialStatesSelector = createMemoizedSelector(selectConfigSlice, (config) => { - const { steps, guidance, scheduler, cfgRescaleMultiplier, vaePrecision, width, height } = config.sd; - const { guidance: fluxGuidance } = config.flux; - - return { - initialSteps: steps.initial, - initialCfg: guidance.initial, - initialScheduler: scheduler, - initialCfgRescaleMultiplier: cfgRescaleMultiplier.initial, - initialVaePrecision: vaePrecision, - initialWidth: width.initial, - initialHeight: height.initial, - initialGuidance: fluxGuidance.initial, - }; -}); - export const useMainModelDefaultSettings = (modelConfig: MainModelConfig) => { - const { - initialSteps, - initialCfg, - initialScheduler, - initialCfgRescaleMultiplier, - initialVaePrecision, - initialWidth, - initialHeight, - initialGuidance, - } = useAppSelector(initialStatesSelector); - const defaultSettingsDefaults = useMemo(() => { return { vae: { isEnabled: !isNil(modelConfig?.default_settings?.vae), - value: modelConfig?.default_settings?.vae || 'default', + value: modelConfig?.default_settings?.vae ?? 'default', }, vaePrecision: { isEnabled: !isNil(modelConfig?.default_settings?.vae_precision), - value: modelConfig?.default_settings?.vae_precision || initialVaePrecision || 'fp32', + value: modelConfig?.default_settings?.vae_precision ?? 'fp32', }, scheduler: { isEnabled: !isNil(modelConfig?.default_settings?.scheduler), - value: modelConfig?.default_settings?.scheduler || initialScheduler || 'dpmpp_3m_k', + value: modelConfig?.default_settings?.scheduler ?? 'dpmpp_3m_k', }, steps: { isEnabled: !isNil(modelConfig?.default_settings?.steps), - value: modelConfig?.default_settings?.steps || initialSteps, + value: modelConfig?.default_settings?.steps ?? 30, }, cfgScale: { isEnabled: !isNil(modelConfig?.default_settings?.cfg_scale), - value: modelConfig?.default_settings?.cfg_scale || initialCfg, + value: modelConfig?.default_settings?.cfg_scale ?? 7, }, cfgRescaleMultiplier: { isEnabled: !isNil(modelConfig?.default_settings?.cfg_rescale_multiplier), - value: modelConfig?.default_settings?.cfg_rescale_multiplier || initialCfgRescaleMultiplier, + value: modelConfig?.default_settings?.cfg_rescale_multiplier ?? 0, }, width: { isEnabled: !isNil(modelConfig?.default_settings?.width), - value: modelConfig?.default_settings?.width || initialWidth, + value: modelConfig?.default_settings?.width ?? 512, }, height: { isEnabled: !isNil(modelConfig?.default_settings?.height), - value: modelConfig?.default_settings?.height || initialHeight, + value: modelConfig?.default_settings?.height ?? 512, }, guidance: { isEnabled: !isNil(modelConfig?.default_settings?.guidance), - value: modelConfig?.default_settings?.guidance || initialGuidance, + value: modelConfig?.default_settings?.guidance ?? 4, }, }; - }, [ - modelConfig, - initialVaePrecision, - initialScheduler, - initialSteps, - initialCfg, - initialCfgRescaleMultiplier, - initialWidth, - initialHeight, - initialGuidance, - ]); + }, [modelConfig]); return defaultSettingsDefaults; }; diff --git a/invokeai/frontend/web/src/features/modelManagerV2/hooks/useStarterModelsToast.tsx b/invokeai/frontend/web/src/features/modelManagerV2/hooks/useStarterModelsToast.tsx index c3de0dd3fb5..f7e91af62fd 100644 --- a/invokeai/frontend/web/src/features/modelManagerV2/hooks/useStarterModelsToast.tsx +++ b/invokeai/frontend/web/src/features/modelManagerV2/hooks/useStarterModelsToast.tsx @@ -1,6 +1,5 @@ import { Button, Text, useToast } from '@invoke-ai/ui-library'; import { setInstallModelsTabByName } from 'features/modelManagerV2/store/installModelsStore'; -import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus'; import { navigationApi } from 'features/ui/layouts/navigation-api'; import { useCallback, useEffect, useState } from 'react'; import { useTranslation } from 'react-i18next'; @@ -10,7 +9,6 @@ const TOAST_ID = 'starterModels'; export const useStarterModelsToast = () => { const { t } = useTranslation(); - const isEnabled = useFeatureStatus('starterModels'); const [didToast, setDidToast] = useState(false); const [mainModels, { data }] = useMainModels(); const toast = useToast(); @@ -23,7 +21,7 @@ export const useStarterModelsToast = () => { toast.close(TOAST_ID); } } - if (data && mainModels.length === 0 && !didToast && isEnabled) { + if (data && mainModels.length === 0 && !didToast) { toast({ id: TOAST_ID, title: t('modelManager.noModelsInstalled'), @@ -34,7 +32,7 @@ export const useStarterModelsToast = () => { onCloseComplete: () => setDidToast(true), }); } - }, [data, didToast, isEnabled, mainModels.length, t, toast]); + }, [data, didToast, mainModels.length, t, toast]); }; const ToastDescription = () => { diff --git a/invokeai/frontend/web/src/features/modelManagerV2/models.ts b/invokeai/frontend/web/src/features/modelManagerV2/models.ts index 0b4096e010b..11b19b3937f 100644 --- a/invokeai/frontend/web/src/features/modelManagerV2/models.ts +++ b/invokeai/frontend/web/src/features/modelManagerV2/models.ts @@ -18,7 +18,6 @@ import { isTIModelConfig, isUnknownModelConfig, isVAEModelConfig, - isVideoModelConfig, } from 'services/api/types'; import { objectEntries } from 'tsafe'; @@ -116,11 +115,6 @@ export const MODEL_CATEGORIES: Record = i18nKey: 'modelManager.llavaOnevision', filter: isLLaVAModelConfig, }, - video: { - category: 'video', - i18nKey: 'Video', - filter: isVideoModelConfig, - }, }; export const MODEL_CATEGORIES_AS_LIST = objectEntries(MODEL_CATEGORIES).map(([category, { i18nKey, filter }]) => ({ @@ -141,13 +135,6 @@ export const MODEL_BASE_TO_COLOR: Record = { 'sdxl-refiner': 'invokeBlue', flux: 'gold', cogview4: 'red', - imagen3: 'pink', - imagen4: 'pink', - 'chatgpt-4o': 'pink', - 'flux-kontext': 'pink', - 'gemini-2.5': 'pink', - veo3: 'purple', - runway: 'green', unknown: 'red', }; @@ -171,7 +158,6 @@ export const MODEL_TYPE_TO_LONG_NAME: Record = { clip_embed: 'CLIP Embed', siglip: 'SigLIP', flux_redux: 'FLUX Redux', - video: 'Video', unknown: 'Unknown', }; @@ -187,13 +173,6 @@ export const MODEL_BASE_TO_LONG_NAME: Record = { 'sdxl-refiner': 'Stable Diffusion XL Refiner', flux: 'FLUX', cogview4: 'CogView4', - imagen3: 'Imagen3', - imagen4: 'Imagen4', - 'chatgpt-4o': 'ChatGPT 4o', - 'flux-kontext': 'Flux Kontext', - 'gemini-2.5': 'Gemini 2.5', - veo3: 'Veo3', - runway: 'Runway', unknown: 'Unknown', }; @@ -209,13 +188,6 @@ export const MODEL_BASE_TO_SHORT_NAME: Record = { 'sdxl-refiner': 'SDXLR', flux: 'FLUX', cogview4: 'CogView4', - imagen3: 'Imagen3', - imagen4: 'Imagen4', - 'chatgpt-4o': 'ChatGPT 4o', - 'flux-kontext': 'Flux Kontext', - 'gemini-2.5': 'Gemini 2.5', - veo3: 'Veo3', - runway: 'Runway', unknown: 'Unknown', }; @@ -244,60 +216,11 @@ export const MODEL_FORMAT_TO_LONG_NAME: Record = { bnb_quantized_int8b: 'BNB Quantized (int8b)', bnb_quantized_nf4b: 'BNB Quantized (nf4b)', gguf_quantized: 'GGUF Quantized', - api: 'API', unknown: 'Unknown', }; -/** - * List of base models that make API requests - */ -export const API_BASE_MODELS: BaseModelType[] = ['imagen3', 'imagen4', 'chatgpt-4o', 'flux-kontext', 'gemini-2.5']; - -export const SUPPORTS_SEED_BASE_MODELS: BaseModelType[] = ['sd-1', 'sd-2', 'sd-3', 'sdxl', 'flux', 'cogview4']; - export const SUPPORTS_OPTIMIZED_DENOISING_BASE_MODELS: BaseModelType[] = ['flux', 'sd-3']; -export const SUPPORTS_REF_IMAGES_BASE_MODELS: BaseModelType[] = [ - 'sd-1', - 'sdxl', - 'flux', - 'flux-kontext', - 'chatgpt-4o', - 'gemini-2.5', -]; - -export const SUPPORTS_NEGATIVE_PROMPT_BASE_MODELS: BaseModelType[] = [ - 'sd-1', - 'sd-2', - 'sdxl', - 'cogview4', - 'sd-3', - 'imagen3', - 'imagen4', -]; - -export const SUPPORTS_PIXEL_DIMENSIONS_BASE_MODELS: BaseModelType[] = [ - 'sd-1', - 'sd-2', - 'sd-3', - 'sdxl', - 'flux', - 'cogview4', -]; - -export const SUPPORTS_ASPECT_RATIO_BASE_MODELS: BaseModelType[] = [ - 'sd-1', - 'sd-2', - 'sd-3', - 'sdxl', - 'flux', - 'cogview4', - 'imagen3', - 'imagen4', - 'flux-kontext', - 'chatgpt-4o', -]; - -export const VIDEO_BASE_MODELS = ['veo3', 'runway']; +export const SUPPORTS_REF_IMAGES_BASE_MODELS: BaseModelType[] = ['sd-1', 'sdxl', 'flux']; -export const REQUIRES_STARTING_FRAME_BASE_MODELS = ['runway']; +export const SUPPORTS_NEGATIVE_PROMPT_BASE_MODELS: BaseModelType[] = ['sd-1', 'sd-2', 'sdxl', 'cogview4', 'sd-3']; diff --git a/invokeai/frontend/web/src/features/modelManagerV2/subpanels/AddModelPanel/HuggingFaceFolder/HFToken.tsx b/invokeai/frontend/web/src/features/modelManagerV2/subpanels/AddModelPanel/HuggingFaceFolder/HFToken.tsx index cdf8bceaf97..5d68f3fdc5b 100644 --- a/invokeai/frontend/web/src/features/modelManagerV2/subpanels/AddModelPanel/HuggingFaceFolder/HFToken.tsx +++ b/invokeai/frontend/web/src/features/modelManagerV2/subpanels/AddModelPanel/HuggingFaceFolder/HFToken.tsx @@ -8,8 +8,6 @@ import { FormLabel, Input, } from '@invoke-ai/ui-library'; -import { skipToken } from '@reduxjs/toolkit/query'; -import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus'; import { toast } from 'features/toast/toast'; import type { ChangeEvent } from 'react'; import { memo, useCallback, useMemo, useState } from 'react'; @@ -24,8 +22,7 @@ import { assert } from 'tsafe'; export const HFToken = () => { const { t } = useTranslation(); - const isHFTokenEnabled = useFeatureStatus('hfToken'); - const { currentData } = useGetHFTokenStatusQuery(isHFTokenEnabled ? undefined : skipToken); + const { currentData } = useGetHFTokenStatusQuery(); const error = useMemo(() => { switch (currentData) { diff --git a/invokeai/frontend/web/src/features/modelManagerV2/subpanels/AddModelPanel/HuggingFaceFolder/HuggingFaceForm.tsx b/invokeai/frontend/web/src/features/modelManagerV2/subpanels/AddModelPanel/HuggingFaceFolder/HuggingFaceForm.tsx index 0875256d463..ba1d80aa007 100644 --- a/invokeai/frontend/web/src/features/modelManagerV2/subpanels/AddModelPanel/HuggingFaceFolder/HuggingFaceForm.tsx +++ b/invokeai/frontend/web/src/features/modelManagerV2/subpanels/AddModelPanel/HuggingFaceFolder/HuggingFaceForm.tsx @@ -1,6 +1,5 @@ import { Button, Flex, FormControl, FormErrorMessage, FormHelperText, FormLabel, Input } from '@invoke-ai/ui-library'; import { useInstallModel } from 'features/modelManagerV2/hooks/useInstallModel'; -import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus'; import type { ChangeEventHandler } from 'react'; import { memo, useCallback, useState } from 'react'; import { useTranslation } from 'react-i18next'; @@ -14,7 +13,6 @@ export const HuggingFaceForm = memo(() => { const [displayResults, setDisplayResults] = useState(false); const [errorMessage, setErrorMessage] = useState(''); const { t } = useTranslation(); - const isHFTokenEnabled = useFeatureStatus('hfToken'); const [_getHuggingFaceModels, { isLoading, data }] = useLazyGetHuggingFaceModelsQuery(); const [installModel] = useInstallModel(); @@ -66,7 +64,7 @@ export const HuggingFaceForm = memo(() => { {t('modelManager.huggingFaceHelper')} {!!errorMessage.length && {errorMessage}} - {isHFTokenEnabled && } + {data && data.urls && displayResults && } ); diff --git a/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelManagerPanel/ModelFormatBadge.tsx b/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelManagerPanel/ModelFormatBadge.tsx index e139639f1f0..2d0192425dc 100644 --- a/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelManagerPanel/ModelFormatBadge.tsx +++ b/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelManagerPanel/ModelFormatBadge.tsx @@ -17,7 +17,6 @@ const FORMAT_NAME_MAP: Record = { bnb_quantized_int8b: 'bnb_quantized_int8b', bnb_quantized_nf4b: 'quantized', gguf_quantized: 'gguf', - api: 'api', omi: 'omi', unknown: 'unknown', olive: 'olive', @@ -36,7 +35,6 @@ const FORMAT_COLOR_MAP: Record = { bnb_quantized_int8b: 'base', bnb_quantized_nf4b: 'base', gguf_quantized: 'base', - api: 'base', unknown: 'red', olive: 'base', onnx: 'base', diff --git a/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelPanel/MainModelDefaultSettings/DefaultCfgRescaleMultiplier.tsx b/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelPanel/MainModelDefaultSettings/DefaultCfgRescaleMultiplier.tsx index 2fa9580e421..ca7684ae859 100644 --- a/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelPanel/MainModelDefaultSettings/DefaultCfgRescaleMultiplier.tsx +++ b/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelPanel/MainModelDefaultSettings/DefaultCfgRescaleMultiplier.tsx @@ -1,8 +1,7 @@ import { CompositeNumberInput, CompositeSlider, Flex, FormControl, FormLabel } from '@invoke-ai/ui-library'; -import { useAppSelector } from 'app/store/storeHooks'; import { InformationalPopover } from 'common/components/InformationalPopover/InformationalPopover'; import { SettingToggle } from 'features/modelManagerV2/subpanels/ModelPanel/SettingToggle'; -import { selectCFGRescaleMultiplierConfig } from 'features/system/store/configSlice'; +import { CONSTRAINTS } from 'features/parameters/components/Advanced/ParamCFGRescaleMultiplier'; import { memo, useCallback, useMemo } from 'react'; import type { UseControllerProps } from 'react-hook-form'; import { useController } from 'react-hook-form'; @@ -15,12 +14,7 @@ type DefaultCfgRescaleMultiplierType = MainModelDefaultSettingsFormData['cfgResc export const DefaultCfgRescaleMultiplier = memo((props: UseControllerProps) => { const { field } = useController(props); - const config = useAppSelector(selectCFGRescaleMultiplierConfig); const { t } = useTranslation(); - const marks = useMemo( - () => [config.sliderMin, Math.floor(config.sliderMax / 2), config.sliderMax], - [config.sliderMax, config.sliderMin] - ); const onChange = useCallback( (v: number) => { @@ -53,20 +47,20 @@ export const DefaultCfgRescaleMultiplier = memo((props: UseControllerProps diff --git a/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelPanel/MainModelDefaultSettings/DefaultCfgScale.tsx b/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelPanel/MainModelDefaultSettings/DefaultCfgScale.tsx index 78558243ae6..76f698b8f7b 100644 --- a/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelPanel/MainModelDefaultSettings/DefaultCfgScale.tsx +++ b/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelPanel/MainModelDefaultSettings/DefaultCfgScale.tsx @@ -1,8 +1,7 @@ import { CompositeNumberInput, CompositeSlider, Flex, FormControl, FormLabel } from '@invoke-ai/ui-library'; -import { useAppSelector } from 'app/store/storeHooks'; import { InformationalPopover } from 'common/components/InformationalPopover/InformationalPopover'; import { SettingToggle } from 'features/modelManagerV2/subpanels/ModelPanel/SettingToggle'; -import { selectCFGScaleConfig } from 'features/system/store/configSlice'; +import { CONSTRAINTS, MARKS } from 'features/parameters/components/Core/ParamCFGScale'; import { memo, useCallback, useMemo } from 'react'; import type { UseControllerProps } from 'react-hook-form'; import { useController } from 'react-hook-form'; @@ -15,12 +14,7 @@ type DefaultCfgType = MainModelDefaultSettingsFormData['cfgScale']; export const DefaultCfgScale = memo((props: UseControllerProps) => { const { field } = useController(props); - const config = useAppSelector(selectCFGScaleConfig); const { t } = useTranslation(); - const marks = useMemo( - () => [config.sliderMin, Math.floor(config.sliderMax / 2), config.sliderMax], - [config.sliderMax, config.sliderMin] - ); const onChange = useCallback( (v: number) => { @@ -53,20 +47,20 @@ export const DefaultCfgScale = memo((props: UseControllerProps diff --git a/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelPanel/MainModelDefaultSettings/DefaultGuidance.tsx b/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelPanel/MainModelDefaultSettings/DefaultGuidance.tsx index cb939a268cd..df8c62cbf8a 100644 --- a/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelPanel/MainModelDefaultSettings/DefaultGuidance.tsx +++ b/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelPanel/MainModelDefaultSettings/DefaultGuidance.tsx @@ -1,8 +1,7 @@ import { CompositeNumberInput, CompositeSlider, Flex, FormControl, FormLabel } from '@invoke-ai/ui-library'; -import { useAppSelector } from 'app/store/storeHooks'; import { InformationalPopover } from 'common/components/InformationalPopover/InformationalPopover'; import { SettingToggle } from 'features/modelManagerV2/subpanels/ModelPanel/SettingToggle'; -import { selectGuidanceConfig } from 'features/system/store/configSlice'; +import { CONSTRAINTS, MARKS } from 'features/parameters/components/Core/ParamGuidance'; import { memo, useCallback, useMemo } from 'react'; import type { UseControllerProps } from 'react-hook-form'; import { useController } from 'react-hook-form'; @@ -15,16 +14,7 @@ type DefaultGuidanceType = MainModelDefaultSettingsFormData['guidance']; export const DefaultGuidance = memo((props: UseControllerProps) => { const { field } = useController(props); - const config = useAppSelector(selectGuidanceConfig); const { t } = useTranslation(); - const marks = useMemo( - () => [ - config.sliderMin, - Math.floor(config.sliderMax - (config.sliderMax - config.sliderMin) / 2), - config.sliderMax, - ], - [config.sliderMax, config.sliderMin] - ); const onChange = useCallback( (v: number) => { @@ -57,20 +47,20 @@ export const DefaultGuidance = memo((props: UseControllerProps diff --git a/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelPanel/MainModelDefaultSettings/DefaultHeight.tsx b/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelPanel/MainModelDefaultSettings/DefaultHeight.tsx index 78706b8223d..7603007d5a0 100644 --- a/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelPanel/MainModelDefaultSettings/DefaultHeight.tsx +++ b/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelPanel/MainModelDefaultSettings/DefaultHeight.tsx @@ -1,8 +1,7 @@ import { CompositeNumberInput, CompositeSlider, Flex, FormControl, FormLabel } from '@invoke-ai/ui-library'; -import { useAppSelector } from 'app/store/storeHooks'; import { InformationalPopover } from 'common/components/InformationalPopover/InformationalPopover'; import { SettingToggle } from 'features/modelManagerV2/subpanels/ModelPanel/SettingToggle'; -import { selectHeightConfig } from 'features/system/store/configSlice'; +import { CONSTRAINTS } from 'features/parameters/components/Dimensions/DimensionsHeight'; import { memo, useCallback, useMemo } from 'react'; import type { UseControllerProps } from 'react-hook-form'; import { useController } from 'react-hook-form'; @@ -19,12 +18,8 @@ type Props = { export const DefaultHeight = memo(({ control, optimalDimension }: Props) => { const { field } = useController({ control, name: 'height' }); - const config = useAppSelector(selectHeightConfig); const { t } = useTranslation(); - const marks = useMemo( - () => [config.sliderMin, optimalDimension, config.sliderMax], - [config.sliderMin, optimalDimension, config.sliderMax] - ); + const marks = useMemo(() => [CONSTRAINTS.sliderMin, optimalDimension, CONSTRAINTS.sliderMax], [optimalDimension]); const onChange = useCallback( (v: number) => { @@ -57,20 +52,20 @@ export const DefaultHeight = memo(({ control, optimalDimension }: Props) => { diff --git a/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelPanel/MainModelDefaultSettings/DefaultSteps.tsx b/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelPanel/MainModelDefaultSettings/DefaultSteps.tsx index 42afa6a1074..0e052f569df 100644 --- a/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelPanel/MainModelDefaultSettings/DefaultSteps.tsx +++ b/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelPanel/MainModelDefaultSettings/DefaultSteps.tsx @@ -1,8 +1,7 @@ import { CompositeNumberInput, CompositeSlider, Flex, FormControl, FormLabel } from '@invoke-ai/ui-library'; -import { useAppSelector } from 'app/store/storeHooks'; import { InformationalPopover } from 'common/components/InformationalPopover/InformationalPopover'; import { SettingToggle } from 'features/modelManagerV2/subpanels/ModelPanel/SettingToggle'; -import { selectStepsConfig } from 'features/system/store/configSlice'; +import { CONSTRAINTS, MARKS } from 'features/parameters/components/Core/ParamSteps'; import { memo, useCallback, useMemo } from 'react'; import type { UseControllerProps } from 'react-hook-form'; import { useController } from 'react-hook-form'; @@ -15,12 +14,7 @@ type DefaultSteps = MainModelDefaultSettingsFormData['steps']; export const DefaultSteps = memo((props: UseControllerProps) => { const { field } = useController(props); - const config = useAppSelector(selectStepsConfig); const { t } = useTranslation(); - const marks = useMemo( - () => [config.sliderMin, Math.floor(config.sliderMax / 2), config.sliderMax], - [config.sliderMax, config.sliderMin] - ); const onChange = useCallback( (v: number) => { @@ -53,20 +47,20 @@ export const DefaultSteps = memo((props: UseControllerProps diff --git a/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelPanel/MainModelDefaultSettings/DefaultWidth.tsx b/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelPanel/MainModelDefaultSettings/DefaultWidth.tsx index 66467617d02..4ffc6e8f8fb 100644 --- a/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelPanel/MainModelDefaultSettings/DefaultWidth.tsx +++ b/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelPanel/MainModelDefaultSettings/DefaultWidth.tsx @@ -1,8 +1,7 @@ import { CompositeNumberInput, CompositeSlider, Flex, FormControl, FormLabel } from '@invoke-ai/ui-library'; -import { useAppSelector } from 'app/store/storeHooks'; import { InformationalPopover } from 'common/components/InformationalPopover/InformationalPopover'; import { SettingToggle } from 'features/modelManagerV2/subpanels/ModelPanel/SettingToggle'; -import { selectWidthConfig } from 'features/system/store/configSlice'; +import { CONSTRAINTS } from 'features/parameters/components/Dimensions/DimensionsWidth'; import { memo, useCallback, useMemo } from 'react'; import type { UseControllerProps } from 'react-hook-form'; import { useController } from 'react-hook-form'; @@ -19,12 +18,8 @@ type Props = { export const DefaultWidth = memo(({ control, optimalDimension }: Props) => { const { field } = useController({ control, name: 'width' }); - const config = useAppSelector(selectWidthConfig); const { t } = useTranslation(); - const marks = useMemo( - () => [config.sliderMin, optimalDimension, config.sliderMax], - [config.sliderMin, optimalDimension, config.sliderMax] - ); + const marks = useMemo(() => [CONSTRAINTS.sliderMin, optimalDimension, CONSTRAINTS.sliderMax], [optimalDimension]); const onChange = useCallback( (v: number) => { @@ -57,20 +52,20 @@ export const DefaultWidth = memo(({ control, optimalDimension }: Props) => { diff --git a/invokeai/frontend/web/src/features/nodes/components/flow/AddNodeCmdk/AddNodeCmdk.tsx b/invokeai/frontend/web/src/features/nodes/components/flow/AddNodeCmdk/AddNodeCmdk.tsx index 55c683029fd..a9a502e7795 100644 --- a/invokeai/frontend/web/src/features/nodes/components/flow/AddNodeCmdk/AddNodeCmdk.tsx +++ b/invokeai/frontend/web/src/features/nodes/components/flow/AddNodeCmdk/AddNodeCmdk.tsx @@ -19,7 +19,6 @@ import { IAINoContentFallback } from 'common/components/IAIImageFallback'; import ScrollableContent from 'common/components/OverlayScrollbars/ScrollableContent'; import { memoize } from 'es-toolkit/compat'; import { useBuildNode } from 'features/nodes/hooks/useBuildNode'; -import { useIsWorkflowEditorLocked } from 'features/nodes/hooks/useIsWorkflowEditorLocked'; import { $addNodeCmdk, $cursorPos, @@ -147,7 +146,6 @@ export const AddNodeCmdk = memo(() => { const [searchTerm, setSearchTerm] = useState(''); const addNode = useAddNode(); const tab = useAppSelector(selectActiveTab); - const isLocked = useIsWorkflowEditorLocked(); // Filtering the list is expensive - debounce the search term to avoid stutters const [debouncedSearchTerm] = useDebounce(searchTerm, 300); const isOpen = useStore($addNodeCmdk); @@ -162,8 +160,8 @@ export const AddNodeCmdk = memo(() => { id: 'addNode', category: 'workflows', callback: open, - options: { enabled: tab === 'workflows' && !isLocked, preventDefault: true }, - dependencies: [open, tab, isLocked], + options: { enabled: tab === 'workflows', preventDefault: true }, + dependencies: [open, tab], }); const onChange = useCallback((e: ChangeEvent) => { diff --git a/invokeai/frontend/web/src/features/nodes/components/flow/Flow.tsx b/invokeai/frontend/web/src/features/nodes/components/flow/Flow.tsx index c6f29095e44..f6474dec74b 100644 --- a/invokeai/frontend/web/src/features/nodes/components/flow/Flow.tsx +++ b/invokeai/frontend/web/src/features/nodes/components/flow/Flow.tsx @@ -4,7 +4,6 @@ import type { EdgeChange, HandleType, NodeChange, - NodeMouseHandler, OnEdgesChange, OnInit, OnMoveEnd, @@ -23,10 +22,8 @@ import { } from '@xyflow/react'; import { useAppDispatch, useAppSelector, useAppStore } from 'app/store/storeHooks'; import { useFocusRegion, useIsRegionFocused } from 'common/hooks/focus'; -import { $isSelectingOutputNode, $outputNodeId } from 'features/nodes/components/sidePanel/workflow/publish'; import { useConnection } from 'features/nodes/hooks/useConnection'; import { useIsValidConnection } from 'features/nodes/hooks/useIsValidConnection'; -import { useIsWorkflowEditorLocked } from 'features/nodes/hooks/useIsWorkflowEditorLocked'; import { useNodeCopyPaste } from 'features/nodes/hooks/useNodeCopyPaste'; import { $addNodeCmdk, @@ -52,7 +49,7 @@ import { import { connectionToEdge } from 'features/nodes/store/util/reactFlowUtil'; import { selectSelectionMode, selectShouldSnapToGrid } from 'features/nodes/store/workflowSettingsSlice'; import { NO_DRAG_CLASS, NO_PAN_CLASS, NO_WHEEL_CLASS } from 'features/nodes/types/constants'; -import { type AnyEdge, type AnyNode, isInvocationNode } from 'features/nodes/types/invocation'; +import type { AnyEdge, AnyNode } from 'features/nodes/types/invocation'; import { useRegisteredHotkeys } from 'features/system/components/HotkeysModal/useHotkeyData'; import type { CSSProperties, MouseEvent } from 'react'; import { memo, useCallback, useMemo, useRef } from 'react'; @@ -94,7 +91,6 @@ export const Flow = memo(() => { const flowWrapper = useRef(null); const isValidConnection = useIsValidConnection(); const updateNodeInternals = useUpdateNodeInternals(); - const isLocked = useIsWorkflowEditorLocked(); useFocusRegion('workflows', flowWrapper); @@ -212,18 +208,6 @@ export const Flow = memo(() => { // #endregion - const onNodeClick = useCallback>((e, node) => { - if (!$isSelectingOutputNode.get()) { - return; - } - if (!isInvocationNode(node)) { - return; - } - const { id } = node.data; - $outputNodeId.set(id); - $isSelectingOutputNode.set(false); - }, []); - return ( <> @@ -235,7 +219,6 @@ export const Flow = memo(() => { nodes={nodes} edges={edges} onInit={onInit} - onNodeClick={onNodeClick} onMouseMove={onMouseMove} onNodesChange={onNodesChange} onEdgesChange={onEdgesChange} @@ -248,12 +231,6 @@ export const Flow = memo(() => { onMoveEnd={handleMoveEnd} connectionLineComponent={CustomConnectionLine} isValidConnection={isValidConnection} - edgesFocusable={!isLocked} - edgesReconnectable={!isLocked} - nodesDraggable={!isLocked} - nodesConnectable={!isLocked} - nodesFocusable={!isLocked} - elementsSelectable={!isLocked} minZoom={0.1} snapToGrid={shouldSnapToGrid} snapGrid={snapGrid} @@ -279,8 +256,6 @@ export const Flow = memo(() => { Flow.displayName = 'Flow'; const HotkeyIsolator = memo(() => { - const isLocked = useIsWorkflowEditorLocked(); - const mayUndo = useAppSelector(selectMayUndo); const mayRedo = useAppSelector(selectMayRedo); @@ -295,7 +270,7 @@ const HotkeyIsolator = memo(() => { id: 'copySelection', category: 'workflows', callback: copySelection, - options: { enabled: isWorkflowsFocused && !isLocked, preventDefault: true }, + options: { enabled: isWorkflowsFocused, preventDefault: true }, dependencies: [copySelection], }); @@ -324,24 +299,24 @@ const HotkeyIsolator = memo(() => { id: 'selectAll', category: 'workflows', callback: selectAll, - options: { enabled: isWorkflowsFocused && !isLocked, preventDefault: true }, - dependencies: [selectAll, isWorkflowsFocused, isLocked], + options: { enabled: isWorkflowsFocused, preventDefault: true }, + dependencies: [selectAll, isWorkflowsFocused], }); useRegisteredHotkeys({ id: 'pasteSelection', category: 'workflows', callback: pasteSelection, - options: { enabled: isWorkflowsFocused && !isLocked, preventDefault: true }, - dependencies: [pasteSelection, isLocked, isWorkflowsFocused], + options: { enabled: isWorkflowsFocused, preventDefault: true }, + dependencies: [pasteSelection, isWorkflowsFocused], }); useRegisteredHotkeys({ id: 'pasteSelectionWithEdges', category: 'workflows', callback: pasteSelectionWithEdges, - options: { enabled: isWorkflowsFocused && !isLocked, preventDefault: true }, - dependencies: [pasteSelectionWithEdges, isLocked, isWorkflowsFocused], + options: { enabled: isWorkflowsFocused, preventDefault: true }, + dependencies: [pasteSelectionWithEdges, isWorkflowsFocused], }); useRegisteredHotkeys({ @@ -350,8 +325,8 @@ const HotkeyIsolator = memo(() => { callback: () => { store.dispatch(undo()); }, - options: { enabled: isWorkflowsFocused && !isLocked && mayUndo, preventDefault: true }, - dependencies: [store, mayUndo, isLocked, isWorkflowsFocused], + options: { enabled: isWorkflowsFocused && mayUndo, preventDefault: true }, + dependencies: [store, mayUndo, isWorkflowsFocused], }); useRegisteredHotkeys({ @@ -360,8 +335,8 @@ const HotkeyIsolator = memo(() => { callback: () => { store.dispatch(redo()); }, - options: { enabled: isWorkflowsFocused && !isLocked && mayRedo, preventDefault: true }, - dependencies: [store, mayRedo, isLocked, isWorkflowsFocused], + options: { enabled: isWorkflowsFocused && mayRedo, preventDefault: true }, + dependencies: [store, mayRedo, isWorkflowsFocused], }); const onEscapeHotkey = useCallback(() => { @@ -398,8 +373,8 @@ const HotkeyIsolator = memo(() => { id: 'deleteSelection', category: 'workflows', callback: deleteSelection, - options: { preventDefault: true, enabled: isWorkflowsFocused && !isLocked }, - dependencies: [deleteSelection, isWorkflowsFocused, isLocked], + options: { preventDefault: true, enabled: isWorkflowsFocused }, + dependencies: [deleteSelection, isWorkflowsFocused], }); return null; diff --git a/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/InvocationNodeFooter.tsx b/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/InvocationNodeFooter.tsx index 851b85880f2..890666b0c4e 100644 --- a/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/InvocationNodeFooter.tsx +++ b/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/InvocationNodeFooter.tsx @@ -3,7 +3,6 @@ import { Flex, FormControlGroup } from '@invoke-ai/ui-library'; import { useIsExecutableNode } from 'features/nodes/hooks/useIsBatchNode'; import { useNodeHasImageOutput } from 'features/nodes/hooks/useNodeHasImageOutput'; import { DRAG_HANDLE_CLASSNAME } from 'features/nodes/types/constants'; -import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus'; import { memo } from 'react'; import SaveToGalleryCheckbox from './SaveToGalleryCheckbox'; @@ -18,7 +17,6 @@ const props: ChakraProps = { w: 'unset' }; const InvocationNodeFooter = ({ nodeId }: Props) => { const hasImageOutput = useNodeHasImageOutput(); const isExecutableNode = useIsExecutableNode(); - const isCacheEnabled = useFeatureStatus('invocationCache'); return ( { justifyContent="space-between" > - {isExecutableNode && isCacheEnabled && } + {isExecutableNode && } {isExecutableNode && hasImageOutput && } diff --git a/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/fields/InputFieldHandle.tsx b/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/fields/InputFieldHandle.tsx index 93b2518ca03..ba3282459fd 100644 --- a/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/fields/InputFieldHandle.tsx +++ b/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/fields/InputFieldHandle.tsx @@ -8,7 +8,6 @@ import { useIsConnectionStartField, } from 'features/nodes/hooks/useFieldConnectionState'; import { useInputFieldTemplateOrThrow } from 'features/nodes/hooks/useInputFieldTemplateOrThrow'; -import { useIsWorkflowEditorLocked } from 'features/nodes/hooks/useIsWorkflowEditorLocked'; import { useFieldTypeName } from 'features/nodes/hooks/usePrettyFieldType'; import { HANDLE_TOOLTIP_OPEN_DELAY } from 'features/nodes/types/constants'; import type { FieldInputTemplate } from 'features/nodes/types/field'; @@ -106,16 +105,9 @@ type HandleCommonProps = { }; const IdleHandle = memo(({ fieldTemplate, fieldTypeName, fieldColor, isModelField }: HandleCommonProps) => { - const isLocked = useIsWorkflowEditorLocked(); return ( - + { if (connectionError !== null) { @@ -149,13 +140,7 @@ const ConnectionInProgressHandle = memo( return ( - + { - const isLocked = useIsWorkflowEditorLocked(); - return ( - + { if (connectionErrorTKey !== null) { @@ -150,13 +140,7 @@ const ConnectionInProgressHandle = memo( return ( - + { const mouseOverNode = useMouseOverNode(nodeId); const mouseOverFormField = useMouseOverFormField(nodeId); const zoomToNode = useZoomToNode(nodeId); - const isLocked = useIsWorkflowEditorLocked(); const isInvalid = useNodeHasErrors(); const hasError = isMissingTemplate || isInvalid; @@ -74,7 +72,6 @@ const NodeWrapper = (props: NodeWrapperProps) => { sx={containerSx} width={width || NODE_WIDTH} opacity={opacity} - data-is-editor-locked={isLocked} data-is-selected={selected} data-is-mouse-over-form-field={mouseOverFormField.isMouseOverFormField} data-status={hasError ? 'error' : needsUpdate ? 'warning' : undefined} diff --git a/invokeai/frontend/web/src/features/nodes/components/flow/nodes/common/NonInvocationNodeWrapper.tsx b/invokeai/frontend/web/src/features/nodes/components/flow/nodes/common/NonInvocationNodeWrapper.tsx index 885c4e5f146..7e2cde7093f 100644 --- a/invokeai/frontend/web/src/features/nodes/components/flow/nodes/common/NonInvocationNodeWrapper.tsx +++ b/invokeai/frontend/web/src/features/nodes/components/flow/nodes/common/NonInvocationNodeWrapper.tsx @@ -1,7 +1,6 @@ import type { ChakraProps } from '@invoke-ai/ui-library'; import { Box, useGlobalMenuClose } from '@invoke-ai/ui-library'; import { useAppSelector } from 'app/store/storeHooks'; -import { useIsWorkflowEditorLocked } from 'features/nodes/hooks/useIsWorkflowEditorLocked'; import { useMouseOverNode } from 'features/nodes/hooks/useMouseOverNode'; import { useNodeExecutionState } from 'features/nodes/hooks/useNodeExecutionState'; import { useZoomToNode } from 'features/nodes/hooks/useZoomToNode'; @@ -23,7 +22,6 @@ const NonInvocationNodeWrapper = (props: NonInvocationNodeWrapperProps) => { const { nodeId, width, children, selected } = props; const mouseOverNode = useMouseOverNode(nodeId); const zoomToNode = useZoomToNode(nodeId); - const isLocked = useIsWorkflowEditorLocked(); const executionState = useNodeExecutionState(nodeId); const isInProgress = executionState?.status === zNodeStatus.enum.IN_PROGRESS; @@ -66,7 +64,6 @@ const NonInvocationNodeWrapper = (props: NonInvocationNodeWrapperProps) => { sx={containerSx} width={width || NODE_WIDTH} opacity={opacity} - data-is-editor-locked={isLocked} data-is-selected={selected} > diff --git a/invokeai/frontend/web/src/features/nodes/components/flow/nodes/common/shared.ts b/invokeai/frontend/web/src/features/nodes/components/flow/nodes/common/shared.ts index 721c816b198..70e56cb4db6 100644 --- a/invokeai/frontend/web/src/features/nodes/components/flow/nodes/common/shared.ts +++ b/invokeai/frontend/web/src/features/nodes/components/flow/nodes/common/shared.ts @@ -56,12 +56,6 @@ export const containerSx: SystemStyleObject = { display: 'block', shadow: '0 0 0 2px var(--border-color-selected)', }, - '&[data-is-editor-locked="true"]': { - '& *': { - cursor: 'not-allowed', - pointerEvents: 'none', - }, - }, }; export const shadowsSx: SystemStyleObject = { diff --git a/invokeai/frontend/web/src/features/nodes/components/flow/panels/TopPanel/TopLeftPanel.tsx b/invokeai/frontend/web/src/features/nodes/components/flow/panels/TopPanel/TopLeftPanel.tsx index 7320c1fce77..2aaa79243c0 100644 --- a/invokeai/frontend/web/src/features/nodes/components/flow/panels/TopPanel/TopLeftPanel.tsx +++ b/invokeai/frontend/web/src/features/nodes/components/flow/panels/TopPanel/TopLeftPanel.tsx @@ -1,61 +1,15 @@ -import { Alert, AlertDescription, AlertIcon, AlertTitle, Box, Flex } from '@invoke-ai/ui-library'; -import { useStore } from '@nanostores/react'; +import { Flex } from '@invoke-ai/ui-library'; import AddNodeButton from 'features/nodes/components/flow/panels/TopPanel/AddNodeButton'; import UpdateNodesButton from 'features/nodes/components/flow/panels/TopPanel/UpdateNodesButton'; -import { - $isInPublishFlow, - $isSelectingOutputNode, - useIsValidationRunInProgress, - useIsWorkflowPublished, -} from 'features/nodes/components/sidePanel/workflow/publish'; -import { useIsWorkflowEditorLocked } from 'features/nodes/hooks/useIsWorkflowEditorLocked'; import { memo } from 'react'; -import { useTranslation } from 'react-i18next'; export const TopLeftPanel = memo(() => { - const isLocked = useIsWorkflowEditorLocked(); - const isInPublishFlow = useStore($isInPublishFlow); - const isPublished = useIsWorkflowPublished(); - const isValidationRunInProgress = useIsValidationRunInProgress(); - const isSelectingOutputNode = useStore($isSelectingOutputNode); - - const { t } = useTranslation(); return ( - {!isLocked && ( - - - - - )} - {isLocked && ( - - - - {t('workflows.builder.workflowLocked')} - {isValidationRunInProgress && ( - - {t('workflows.builder.publishingValidationRunInProgress')} - - )} - {isInPublishFlow && !isValidationRunInProgress && !isSelectingOutputNode && ( - - {t('workflows.builder.workflowLockedDuringPublishing')} - - )} - {isInPublishFlow && !isValidationRunInProgress && isSelectingOutputNode && ( - - {t('workflows.builder.selectingOutputNodeDesc')} - - )} - {isPublished && ( - - {t('workflows.builder.workflowLockedPublished')} - - )} - - - )} + + + + ); }); diff --git a/invokeai/frontend/web/src/features/nodes/components/flow/panels/TopPanel/TopRightPanel.tsx b/invokeai/frontend/web/src/features/nodes/components/flow/panels/TopPanel/TopRightPanel.tsx index af778d3a9fb..5d4977db8ef 100644 --- a/invokeai/frontend/web/src/features/nodes/components/flow/panels/TopPanel/TopRightPanel.tsx +++ b/invokeai/frontend/web/src/features/nodes/components/flow/panels/TopPanel/TopRightPanel.tsx @@ -2,21 +2,15 @@ import { Flex, IconButton } from '@invoke-ai/ui-library'; import ClearFlowButton from 'features/nodes/components/flow/panels/TopPanel/ClearFlowButton'; import SaveWorkflowButton from 'features/nodes/components/flow/panels/TopPanel/SaveWorkflowButton'; import { useWorkflowEditorSettingsModal } from 'features/nodes/components/flow/panels/TopRightPanel/WorkflowEditorSettings'; -import { useIsWorkflowEditorLocked } from 'features/nodes/hooks/useIsWorkflowEditorLocked'; import { memo } from 'react'; import { useTranslation } from 'react-i18next'; import { PiGearSixFill } from 'react-icons/pi'; export const TopRightPanel = memo(() => { const modal = useWorkflowEditorSettingsModal(); - const isLocked = useIsWorkflowEditorLocked(); const { t } = useTranslation(); - if (isLocked) { - return null; - } - return ( diff --git a/invokeai/frontend/web/src/features/nodes/components/sidePanel/WorkflowListMenu/ActiveWorkflowNameAndActions.tsx b/invokeai/frontend/web/src/features/nodes/components/sidePanel/WorkflowListMenu/ActiveWorkflowNameAndActions.tsx index dc8af90b176..f947e3165a8 100644 --- a/invokeai/frontend/web/src/features/nodes/components/sidePanel/WorkflowListMenu/ActiveWorkflowNameAndActions.tsx +++ b/invokeai/frontend/web/src/features/nodes/components/sidePanel/WorkflowListMenu/ActiveWorkflowNameAndActions.tsx @@ -1,6 +1,5 @@ import { Flex, Spacer } from '@invoke-ai/ui-library'; import { useAppSelector } from 'app/store/storeHooks'; -import { useIsWorkflowPublished } from 'features/nodes/components/sidePanel/workflow/publish'; import { WorkflowListMenuTrigger } from 'features/nodes/components/sidePanel/WorkflowListMenu/WorkflowListMenuTrigger'; import { WorkflowViewEditToggleButton } from 'features/nodes/components/sidePanel/WorkflowViewEditToggleButton'; import { selectWorkflowMode } from 'features/nodes/store/workflowLibrarySlice'; @@ -11,13 +10,12 @@ import SaveWorkflowButton from './SaveWorkflowButton'; export const ActiveWorkflowNameAndActions = memo(() => { const mode = useAppSelector(selectWorkflowMode); - const isPublished = useIsWorkflowPublished(); return ( - {mode === 'edit' && !isPublished && } + {mode === 'edit' && } diff --git a/invokeai/frontend/web/src/features/nodes/components/sidePanel/WorkflowsTabLeftPanel.tsx b/invokeai/frontend/web/src/features/nodes/components/sidePanel/WorkflowsTabLeftPanel.tsx index 9f60a1d7a59..a31b71a4d44 100644 --- a/invokeai/frontend/web/src/features/nodes/components/sidePanel/WorkflowsTabLeftPanel.tsx +++ b/invokeai/frontend/web/src/features/nodes/components/sidePanel/WorkflowsTabLeftPanel.tsx @@ -1,10 +1,6 @@ import { Flex } from '@invoke-ai/ui-library'; -import { useStore } from '@nanostores/react'; import { useAppSelector } from 'app/store/storeHooks'; import { EditModeLeftPanelContent } from 'features/nodes/components/sidePanel/EditModeLeftPanelContent'; -import { PublishedWorkflowPanelContent } from 'features/nodes/components/sidePanel/PublishedWorkflowPanelContent'; -import { $isInPublishFlow, useIsWorkflowPublished } from 'features/nodes/components/sidePanel/workflow/publish'; -import { PublishWorkflowPanelContent } from 'features/nodes/components/sidePanel/workflow/PublishWorkflowPanelContent'; import { ActiveWorkflowDescription } from 'features/nodes/components/sidePanel/WorkflowListMenu/ActiveWorkflowDescription'; import { ActiveWorkflowNameAndActions } from 'features/nodes/components/sidePanel/WorkflowListMenu/ActiveWorkflowNameAndActions'; import { selectWorkflowMode } from 'features/nodes/store/workflowLibrarySlice'; @@ -15,19 +11,15 @@ import { ViewModeLeftPanelContent } from './viewMode/ViewModeLeftPanelContent'; const WorkflowsTabLeftPanel = () => { const mode = useAppSelector(selectWorkflowMode); - const isPublished = useIsWorkflowPublished(); - const isInPublishFlow = useStore($isInPublishFlow); return ( - {isInPublishFlow && } - {!isInPublishFlow && } - {!isInPublishFlow && !isPublished && mode === 'view' && } - {!isInPublishFlow && !isPublished && mode === 'view' && } - {!isInPublishFlow && !isPublished && mode === 'edit' && } - {isPublished && } + + {mode === 'view' && } + {mode === 'view' && } + {mode === 'edit' && } ); diff --git a/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/IsolatedWorkflowBuilderWatcher.tsx b/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/IsolatedWorkflowBuilderWatcher.tsx index 0b38dd014c7..60b1dc66331 100644 --- a/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/IsolatedWorkflowBuilderWatcher.tsx +++ b/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/IsolatedWorkflowBuilderWatcher.tsx @@ -44,9 +44,8 @@ const queryOptions = { if (!currentData) { return { serverWorkflowHash: null }; } - const { is_published: _is_published, ...serverWorkflow } = currentData.workflow; return { - serverWorkflowHash: stableHash(serverWorkflow), + serverWorkflowHash: stableHash(currentData.workflow), }; }, } satisfies Parameters[1]; diff --git a/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/PublishWorkflowPanelContent.tsx b/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/PublishWorkflowPanelContent.tsx deleted file mode 100644 index 1f90716819b..00000000000 --- a/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/PublishWorkflowPanelContent.tsx +++ /dev/null @@ -1,475 +0,0 @@ -import type { ButtonProps } from '@invoke-ai/ui-library'; -import { - Button, - ButtonGroup, - Divider, - Flex, - ListItem, - Spacer, - Text, - Tooltip, - UnorderedList, -} from '@invoke-ai/ui-library'; -import { useStore } from '@nanostores/react'; -import { logger } from 'app/logging/logger'; -import { $projectUrl } from 'app/store/nanostores/projectId'; -import { useAppSelector } from 'app/store/storeHooks'; -import ScrollableContent from 'common/components/OverlayScrollbars/ScrollableContent'; -import { withResultAsync } from 'common/util/result'; -import { parseify } from 'common/util/serialize'; -import { ExternalLink } from 'features/gallery/components/ImageViewer/NoContentForViewer'; -import { InvocationNodeContextProvider } from 'features/nodes/components/flow/nodes/Invocation/context'; -import { NodeFieldElementOverlay } from 'features/nodes/components/sidePanel/builder/NodeFieldElementEditMode'; -import { useDoesWorkflowHaveUnsavedChanges } from 'features/nodes/components/sidePanel/workflow/IsolatedWorkflowBuilderWatcher'; -import { - $isInPublishFlow, - $isPublishing, - $isReadyToDoValidationRun, - $isSelectingOutputNode, - $outputNodeId, - $validationRunData, - selectHasUnpublishableNodes, - usePublishInputs, -} from 'features/nodes/components/sidePanel/workflow/publish'; -import { useInputFieldTemplateTitleOrThrow } from 'features/nodes/hooks/useInputFieldTemplateTitleOrThrow'; -import { useInputFieldUserTitleOrThrow } from 'features/nodes/hooks/useInputFieldUserTitleOrThrow'; -import { useMouseOverFormField } from 'features/nodes/hooks/useMouseOverNode'; -import { useNodeTemplateTitleOrThrow } from 'features/nodes/hooks/useNodeTemplateTitleOrThrow'; -import { useNodeUserTitleOrThrow } from 'features/nodes/hooks/useNodeUserTitleOrThrow'; -import { useOutputFieldNames } from 'features/nodes/hooks/useOutputFieldNames'; -import { useOutputFieldTemplate } from 'features/nodes/hooks/useOutputFieldTemplate'; -import { useZoomToNode } from 'features/nodes/hooks/useZoomToNode'; -import { useEnqueueWorkflows } from 'features/queue/hooks/useEnqueueWorkflows'; -import { $isReadyToEnqueue } from 'features/queue/store/readiness'; -import { selectAllowPublishWorkflows } from 'features/system/store/configSlice'; -import { toast } from 'features/toast/toast'; -import type { PropsWithChildren } from 'react'; -import { memo, useCallback, useMemo } from 'react'; -import { Trans, useTranslation } from 'react-i18next'; -import { PiArrowLineRightBold, PiLightningFill, PiXBold } from 'react-icons/pi'; -import { serializeError } from 'serialize-error'; -import { assert } from 'tsafe'; - -const log = logger('generation'); - -export const PublishWorkflowPanelContent = memo(() => { - return ( - - - - - - - - - - - - - - - ); -}); -PublishWorkflowPanelContent.displayName = 'PublishWorkflowPanelContent'; - -const OutputFields = memo(() => { - const { t } = useTranslation(); - const outputNodeId = useStore($outputNodeId); - - return ( - - - {t('workflows.builder.publishedWorkflowOutputs')} - - - - - - {!outputNodeId && ( - - {t('workflows.builder.noOutputNodeSelected')} - - )} - {outputNodeId && ( - - - - )} - - ); -}); -OutputFields.displayName = 'OutputFields'; - -const OutputFieldsContent = memo(({ outputNodeId }: { outputNodeId: string }) => { - const outputFieldNames = useOutputFieldNames(); - - return ( - <> - {outputFieldNames.map((fieldName) => ( - - ))} - - ); -}); -OutputFieldsContent.displayName = 'OutputFieldsContent'; - -const PublishableInputFields = memo(() => { - const { t } = useTranslation(); - const inputs = usePublishInputs(); - - if (inputs.publishable.length === 0) { - return ( - - - {t('workflows.builder.noPublishableInputs')} - - - ); - } - - return ( - - {t('workflows.builder.publishedWorkflowInputs')} - - {inputs.publishable.map(({ nodeId, fieldName }) => { - return ( - - - - ); - })} - - ); -}); -PublishableInputFields.displayName = 'PublishableInputFields'; - -const UnpublishableInputFields = memo(() => { - const { t } = useTranslation(); - const inputs = usePublishInputs(); - - if (inputs.unpublishable.length === 0) { - return null; - } - - return ( - - - {t('workflows.builder.unpublishableInputs')} - - - {inputs.unpublishable.map(({ nodeId, fieldName }) => { - return ( - - - - ); - })} - - ); -}); -UnpublishableInputFields.displayName = 'UnpublishableInputFields'; - -const SelectOutputNodeButton = memo((props: ButtonProps) => { - const { t } = useTranslation(); - const outputNodeId = useStore($outputNodeId); - const isSelectingOutputNode = useStore($isSelectingOutputNode); - const onClick = useCallback(() => { - $outputNodeId.set(null); - $isSelectingOutputNode.set(true); - }, []); - return ( - - ); -}); -SelectOutputNodeButton.displayName = 'SelectOutputNodeButton'; - -const CancelPublishButton = memo(() => { - const { t } = useTranslation(); - const isPublishing = useStore($isPublishing); - const onClick = useCallback(() => { - $isInPublishFlow.set(false); - $isSelectingOutputNode.set(false); - $outputNodeId.set(null); - }, []); - return ( - - ); -}); -CancelPublishButton.displayName = 'CancelDeployButton'; - -const PublishWorkflowButton = memo(() => { - const { t } = useTranslation(); - const isPublishing = useStore($isPublishing); - const isReadyToDoValidationRun = useStore($isReadyToDoValidationRun); - const isReadyToEnqueue = useStore($isReadyToEnqueue); - const doesWorkflowHaveUnsavedChanges = useDoesWorkflowHaveUnsavedChanges(); - const hasUnpublishableNodes = useAppSelector(selectHasUnpublishableNodes); - const outputNodeId = useStore($outputNodeId); - const isSelectingOutputNode = useStore($isSelectingOutputNode); - const inputs = usePublishInputs(); - const allowPublishWorkflows = useAppSelector(selectAllowPublishWorkflows); - - const projectUrl = useStore($projectUrl); - - const enqueue = useEnqueueWorkflows(); - const onClick = useCallback(async () => { - $isPublishing.set(true); - const result = await withResultAsync(() => enqueue(true, true)); - if (result.isErr()) { - toast({ - id: 'TOAST_PUBLISH_FAILED', - status: 'error', - title: t('workflows.builder.publishFailed'), - description: t('workflows.builder.publishFailedDesc'), - duration: null, - }); - log.error({ error: serializeError(result.error) }, 'Failed to enqueue batch'); - } else { - toast({ - id: 'TOAST_PUBLISH_SUCCESSFUL', - status: 'success', - title: t('workflows.builder.publishSuccess'), - description: ( - , - }} - /> - ), - duration: null, - }); - assert(result.value.enqueueResult.batch.batch_id); - assert(result.value.batchConfig.validation_run_data); - $validationRunData.set({ - batchId: result.value.enqueueResult.batch.batch_id, - workflowId: result.value.batchConfig.validation_run_data.workflow_id, - }); - log.debug(parseify(result.value), 'Enqueued batch'); - } - $isPublishing.set(false); - }, [enqueue, projectUrl, t]); - - const isDisabled = useMemo(() => { - return ( - !allowPublishWorkflows || - !isReadyToEnqueue || - doesWorkflowHaveUnsavedChanges || - hasUnpublishableNodes || - !isReadyToDoValidationRun || - !(outputNodeId !== null && !isSelectingOutputNode) || - isPublishing - ); - }, [ - allowPublishWorkflows, - doesWorkflowHaveUnsavedChanges, - hasUnpublishableNodes, - isReadyToDoValidationRun, - isReadyToEnqueue, - isSelectingOutputNode, - outputNodeId, - isPublishing, - ]); - - return ( - 0} - hasUnpublishableInputs={inputs.unpublishable.length > 0} - > - - - ); -}); -PublishWorkflowButton.displayName = 'DoValidationRunButton'; - -const NodeInputFieldPreview = memo(({ nodeId, fieldName }: { nodeId: string; fieldName: string }) => { - const mouseOverFormField = useMouseOverFormField(nodeId); - const nodeUserTitle = useNodeUserTitleOrThrow(); - const nodeTemplateTitle = useNodeTemplateTitleOrThrow(); - const fieldUserTitle = useInputFieldUserTitleOrThrow(fieldName); - const fieldTemplateTitle = useInputFieldTemplateTitleOrThrow(fieldName); - const zoomToNode = useZoomToNode(nodeId); - - return ( - - {`${nodeUserTitle || nodeTemplateTitle} -> ${fieldUserTitle || fieldTemplateTitle}`} - {`${nodeId} -> ${fieldName}`} - - - ); -}); -NodeInputFieldPreview.displayName = 'NodeInputFieldPreview'; - -const NodeOutputFieldPreview = memo(({ nodeId, fieldName }: { nodeId: string; fieldName: string }) => { - const mouseOverFormField = useMouseOverFormField(nodeId); - const nodeUserTitle = useNodeUserTitleOrThrow(); - const nodeTemplateTitle = useNodeTemplateTitleOrThrow(); - const fieldTemplate = useOutputFieldTemplate(fieldName); - const zoomToNode = useZoomToNode(nodeId); - - return ( - - {`${nodeUserTitle || nodeTemplateTitle} -> ${fieldTemplate.title}`} - {`${nodeId} -> ${fieldName}`} - - - ); -}); -NodeOutputFieldPreview.displayName = 'NodeOutputFieldPreview'; - -export const StartPublishFlowButton = memo(() => { - const { t } = useTranslation(); - const allowPublishWorkflows = useAppSelector(selectAllowPublishWorkflows); - const isReadyToEnqueue = useStore($isReadyToEnqueue); - const doesWorkflowHaveUnsavedChanges = useDoesWorkflowHaveUnsavedChanges(); - const hasUnpublishableNodes = useAppSelector(selectHasUnpublishableNodes); - const inputs = usePublishInputs(); - - const onClick = useCallback(() => { - $isInPublishFlow.set(true); - }, []); - - const isDisabled = useMemo(() => { - return !allowPublishWorkflows || !isReadyToEnqueue || doesWorkflowHaveUnsavedChanges || hasUnpublishableNodes; - }, [allowPublishWorkflows, doesWorkflowHaveUnsavedChanges, hasUnpublishableNodes, isReadyToEnqueue]); - - return ( - 0} - hasUnpublishableInputs={inputs.unpublishable.length > 0} - > - - - ); -}); - -StartPublishFlowButton.displayName = 'StartPublishFlowButton'; - -const PublishTooltip = memo( - ({ - isWorkflowSaved, - hasUnpublishableNodes, - isReadyToEnqueue, - hasOutputNode, - hasPublishableInputs, - hasUnpublishableInputs, - children, - }: PropsWithChildren<{ - isWorkflowSaved: boolean; - hasUnpublishableNodes: boolean; - isReadyToEnqueue: boolean; - hasOutputNode: boolean; - hasPublishableInputs: boolean; - hasUnpublishableInputs: boolean; - }>) => { - const { t } = useTranslation(); - const warnings = useMemo(() => { - const _warnings: string[] = []; - if (!hasPublishableInputs) { - _warnings.push(t('workflows.builder.warningWorkflowHasNoPublishableInputFields')); - } - if (hasUnpublishableInputs) { - _warnings.push(t('workflows.builder.warningWorkflowHasUnpublishableInputFields')); - } - return _warnings; - }, [hasPublishableInputs, hasUnpublishableInputs, t]); - const errors = useMemo(() => { - const _errors: string[] = []; - if (!isWorkflowSaved) { - _errors.push(t('workflows.builder.errorWorkflowHasUnsavedChanges')); - } - if (hasUnpublishableNodes) { - _errors.push(t('workflows.builder.errorWorkflowHasUnpublishableNodes')); - } - if (!isReadyToEnqueue) { - _errors.push(t('workflows.builder.errorWorkflowHasInvalidGraph')); - } - if (!hasOutputNode) { - _errors.push(t('workflows.builder.errorWorkflowHasNoOutputNode')); - } - return _errors; - }, [hasUnpublishableNodes, hasOutputNode, isReadyToEnqueue, isWorkflowSaved, t]); - - if (errors.length === 0 && warnings.length === 0) { - return children; - } - - return ( - - {errors.length > 0 && ( - <> - - {t('workflows.builder.cannotPublish')}: - - - {errors.map((problem, index) => ( - {problem} - ))} - - - )} - {warnings.length > 0 && ( - <> - - {t('workflows.builder.publishWarnings')}: - - - {warnings.map((problem, index) => ( - {problem} - ))} - - - )} - - } - > - {children} - - ); - } -); -PublishTooltip.displayName = 'PublishTooltip'; diff --git a/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowLibrary/ShareWorkflowModal.tsx b/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowLibrary/ShareWorkflowModal.tsx deleted file mode 100644 index b88a877e3dd..00000000000 --- a/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowLibrary/ShareWorkflowModal.tsx +++ /dev/null @@ -1,93 +0,0 @@ -import { - Button, - Flex, - Heading, - IconButton, - Modal, - ModalBody, - ModalCloseButton, - ModalContent, - ModalFooter, - ModalHeader, - ModalOverlay, - Text, -} from '@invoke-ai/ui-library'; -import { useStore } from '@nanostores/react'; -import { $projectUrl } from 'app/store/nanostores/projectId'; -import { useAssertSingleton } from 'common/hooks/useAssertSingleton'; -import { useClipboard } from 'common/hooks/useClipboard'; -import { toast } from 'features/toast/toast'; -import { atom } from 'nanostores'; -import { useCallback, useMemo } from 'react'; -import { useTranslation } from 'react-i18next'; -import { PiCopyBold } from 'react-icons/pi'; -import type { WorkflowRecordListItemWithThumbnailDTO } from 'services/api/types'; - -const $workflowToShare = atom(null); -const clearWorkflowToShare = () => $workflowToShare.set(null); - -export const useShareWorkflow = () => { - const copyWorkflowLink = useCallback((workflow: WorkflowRecordListItemWithThumbnailDTO) => { - $workflowToShare.set(workflow); - }, []); - - return copyWorkflowLink; -}; - -export const ShareWorkflowModal = () => { - useAssertSingleton('ShareWorkflowModal'); - const workflowToShare = useStore($workflowToShare); - const projectUrl = useStore($projectUrl); - const { t } = useTranslation(); - const clipboard = useClipboard(); - const workflowLink = useMemo(() => { - if (!workflowToShare || !projectUrl) { - return null; - } - return `${window.location.origin}${projectUrl}/studio?selectedWorkflowId=${workflowToShare.workflow_id}`; - }, [projectUrl, workflowToShare]); - - const handleCopy = useCallback(() => { - if (!workflowLink) { - return; - } - clipboard.writeText(workflowLink, () => { - toast({ - status: 'success', - title: t('toast.linkCopied'), - }); - }); - $workflowToShare.set(null); - }, [workflowLink, clipboard, t]); - - return ( - - - - - - {t('workflows.copyShareLinkForWorkflow')} - {workflowToShare?.name} - - - - - - {workflowLink} - } - onClick={handleCopy} - /> - - - - - - - - - ); -}; diff --git a/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowLibrary/WorkflowLibraryListItemActions/ShareWorkflow.tsx b/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowLibrary/WorkflowLibraryListItemActions/ShareWorkflow.tsx deleted file mode 100644 index 971e9eca78a..00000000000 --- a/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowLibrary/WorkflowLibraryListItemActions/ShareWorkflow.tsx +++ /dev/null @@ -1,35 +0,0 @@ -import { IconButton, Tooltip } from '@invoke-ai/ui-library'; -import { useShareWorkflow } from 'features/nodes/components/sidePanel/workflow/WorkflowLibrary/ShareWorkflowModal'; -import type { MouseEvent } from 'react'; -import { memo, useCallback } from 'react'; -import { useTranslation } from 'react-i18next'; -import { PiShareFatBold } from 'react-icons/pi'; -import type { WorkflowRecordListItemWithThumbnailDTO } from 'services/api/types'; - -export const ShareWorkflowButton = memo(({ workflow }: { workflow: WorkflowRecordListItemWithThumbnailDTO }) => { - const shareWorkflow = useShareWorkflow(); - const { t } = useTranslation(); - - const handleClickShare = useCallback( - (e: MouseEvent) => { - e.stopPropagation(); - shareWorkflow(workflow); - }, - [shareWorkflow, workflow] - ); - - return ( - - } - /> - - ); -}); - -ShareWorkflowButton.displayName = 'ShareWorkflowButton'; diff --git a/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowLibrary/WorkflowLibrarySideNav.tsx b/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowLibrary/WorkflowLibrarySideNav.tsx index 604040d63a8..5000d7f564b 100644 --- a/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowLibrary/WorkflowLibrarySideNav.tsx +++ b/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowLibrary/WorkflowLibrarySideNav.tsx @@ -26,7 +26,6 @@ import { workflowLibraryTagToggled, workflowLibraryViewChanged, } from 'features/nodes/store/workflowLibrarySlice'; -import { selectAllowPublishWorkflows } from 'features/system/store/configSlice'; import { NewWorkflowButton } from 'features/workflowLibrary/components/NewWorkflowButton'; import { UploadWorkflowButton } from 'features/workflowLibrary/components/UploadWorkflowButton'; import { OverlayScrollbarsComponent } from 'overlayscrollbars-react'; @@ -40,7 +39,6 @@ export const WorkflowLibrarySideNav = () => { const { t } = useTranslation(); const categoryOptions = useStore($workflowLibraryCategoriesOptions); const view = useAppSelector(selectWorkflowLibraryView); - const allowPublishWorkflows = useAppSelector(selectAllowPublishWorkflows); return ( @@ -60,9 +58,6 @@ export const WorkflowLibrarySideNav = () => { )} - {allowPublishWorkflows && ( - {t('workflows.published')} - )} diff --git a/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowLibrary/WorkflowList.tsx b/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowLibrary/WorkflowList.tsx index 61802b37fb7..34b40e98473 100644 --- a/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowLibrary/WorkflowList.tsx +++ b/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowLibrary/WorkflowList.tsx @@ -68,7 +68,6 @@ const useInfiniteQueryAry = () => { query: debouncedSearchTerm, tags: view === 'defaults' ? selectedTags : [], has_been_opened: getHasBeenOpened(view), - is_published: view === 'published' ? true : undefined, } satisfies Parameters[0]; }, [orderBy, direction, view, debouncedSearchTerm, selectedTags]); diff --git a/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowLibrary/WorkflowListItem.tsx b/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowLibrary/WorkflowListItem.tsx index 93b8cc1c12f..34913434bc8 100644 --- a/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowLibrary/WorkflowListItem.tsx +++ b/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowLibrary/WorkflowListItem.tsx @@ -1,8 +1,6 @@ import type { SystemStyleObject } from '@invoke-ai/ui-library'; import { Badge, Flex, Icon, Image, Spacer, Text } from '@invoke-ai/ui-library'; import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; -import { LockedWorkflowIcon } from 'features/nodes/components/sidePanel/workflow/WorkflowLibrary/WorkflowLibraryListItemActions/LockedWorkflowIcon'; -import { ShareWorkflowButton } from 'features/nodes/components/sidePanel/workflow/WorkflowLibrary/WorkflowLibraryListItemActions/ShareWorkflow'; import { selectWorkflowId } from 'features/nodes/store/selectors'; import { workflowModeChanged } from 'features/nodes/store/workflowLibrarySlice'; import { useLoadWorkflowWithDialog } from 'features/workflowLibrary/components/LoadWorkflowConfirmationAlertDialog'; @@ -82,7 +80,7 @@ export const WorkflowListItem = memo(({ workflow }: { workflow: WorkflowRecordLi {workflow.name} - {isActive && !workflow.is_published && ( + {isActive && ( )} - {workflow.is_published && ( - - {t('workflows.builder.published')} - - )} {workflow.category === 'project' && } {workflow.category === 'default' && ( )} - {workflow.category === 'default' && !workflow.is_published && ( - - )} - {workflow.category !== 'default' && !workflow.is_published && ( + {workflow.category === 'default' && } + {workflow.category !== 'default' && ( <> )} - {workflow.category === 'project' && } - {workflow.is_published && } diff --git a/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowPanel.tsx b/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowPanel.tsx index 9b47484cc40..37bf9a2f195 100644 --- a/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowPanel.tsx +++ b/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowPanel.tsx @@ -1,8 +1,5 @@ import { Spacer, Tab, TabList, TabPanel, TabPanels, Tabs } from '@invoke-ai/ui-library'; -import { useAppSelector } from 'app/store/storeHooks'; import { WorkflowBuilder } from 'features/nodes/components/sidePanel/builder/WorkflowBuilder'; -import { StartPublishFlowButton } from 'features/nodes/components/sidePanel/workflow/PublishWorkflowPanelContent'; -import { selectAllowPublishWorkflows } from 'features/system/store/configSlice'; import { memo } from 'react'; import { useTranslation } from 'react-i18next'; @@ -11,7 +8,6 @@ import WorkflowJSONTab from './WorkflowJSONTab'; const WorkflowFieldsLinearViewPanel = () => { const { t } = useTranslation(); - const allowPublishWorkflows = useAppSelector(selectAllowPublishWorkflows); return ( @@ -19,7 +15,6 @@ const WorkflowFieldsLinearViewPanel = () => { {t('common.details')} JSON - {allowPublishWorkflows && } diff --git a/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/publish.ts b/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/publish.ts deleted file mode 100644 index 397d127b2f0..00000000000 --- a/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/publish.ts +++ /dev/null @@ -1,157 +0,0 @@ -import { useStore } from '@nanostores/react'; -import { createSelector } from '@reduxjs/toolkit'; -import { skipToken } from '@reduxjs/toolkit/query'; -import { useAppSelector } from 'app/store/storeHooks'; -import { $templates } from 'features/nodes/store/nodesSlice'; -import { - selectNodes, - selectNodesSlice, - selectWorkflowFormNodeFieldFieldIdentifiersDeduped, - selectWorkflowId, -} from 'features/nodes/store/selectors'; -import type { Templates } from 'features/nodes/store/types'; -import type { FieldIdentifier } from 'features/nodes/types/field'; -import { isBoardFieldType } from 'features/nodes/types/field'; -import { isBatchNode, isGeneratorNode, isInvocationNode } from 'features/nodes/types/invocation'; -import { atom, computed } from 'nanostores'; -import { useMemo } from 'react'; -import { useGetBatchStatusQuery } from 'services/api/endpoints/queue'; -import { useGetWorkflowQuery } from 'services/api/endpoints/workflows'; -import { assert } from 'tsafe'; - -type FieldIdentiferWithLabel = FieldIdentifier & { label: string | null }; -type FieldIdentiferWithLabelAndType = FieldIdentiferWithLabel & { type: string }; - -export const $isPublishing = atom(false); -export const $isInPublishFlow = atom(false); -export const $outputNodeId = atom(null); -export const $isSelectingOutputNode = atom(false); -export const $isReadyToDoValidationRun = computed( - [$isInPublishFlow, $outputNodeId, $isSelectingOutputNode], - (isInPublishFlow, outputNodeId, isSelectingOutputNode) => { - return isInPublishFlow && outputNodeId !== null && !isSelectingOutputNode; - } -); -export const $validationRunData = atom<{ batchId: string; workflowId: string } | null>(null); - -export const useIsValidationRunInProgress = () => { - const validationRunData = useStore($validationRunData); - const { isValidationRunInProgress } = useGetBatchStatusQuery( - validationRunData?.batchId ? { batch_id: validationRunData.batchId } : skipToken, - { - selectFromResult: ({ currentData }) => { - if (!currentData) { - return { isValidationRunInProgress: false }; - } - if (currentData && currentData.in_progress > 0) { - return { isValidationRunInProgress: true }; - } - return { isValidationRunInProgress: false }; - }, - } - ); - return validationRunData !== null || isValidationRunInProgress; -}; - -export const selectFieldIdentifiersWithInvocationTypes = createSelector( - selectWorkflowFormNodeFieldFieldIdentifiersDeduped, - selectNodesSlice, - (fieldIdentifiers, nodes) => { - const result: FieldIdentiferWithLabelAndType[] = []; - for (const fieldIdentifier of fieldIdentifiers) { - const node = nodes.nodes.find((node) => node.id === fieldIdentifier.nodeId); - assert(isInvocationNode(node), `Node ${fieldIdentifier.nodeId} not found`); - result.push({ - nodeId: fieldIdentifier.nodeId, - fieldName: fieldIdentifier.fieldName, - type: node.data.type, - label: node.data.inputs[fieldIdentifier.fieldName]?.label ?? null, - }); - } - - return result; - } -); - -export const getPublishInputs = (fieldIdentifiers: FieldIdentiferWithLabelAndType[], templates: Templates) => { - // Certain field types are not allowed to be input fields on a published workflow - const publishable: FieldIdentiferWithLabel[] = []; - const unpublishable: FieldIdentiferWithLabel[] = []; - for (const fieldIdentifier of fieldIdentifiers) { - const fieldTemplate = templates[fieldIdentifier.type]?.inputs[fieldIdentifier.fieldName]; - if (!fieldTemplate) { - unpublishable.push(fieldIdentifier); - continue; - } - if (isBoardFieldType(fieldTemplate.type)) { - unpublishable.push(fieldIdentifier); - continue; - } - publishable.push(fieldIdentifier); - } - return { publishable, unpublishable }; -}; - -export const usePublishInputs = () => { - const templates = useStore($templates); - const fieldIdentifiersWithInvocationTypes = useAppSelector(selectFieldIdentifiersWithInvocationTypes); - const fieldIdentifiers = useMemo( - () => getPublishInputs(fieldIdentifiersWithInvocationTypes, templates), - [fieldIdentifiersWithInvocationTypes, templates] - ); - - return fieldIdentifiers; -}; - -const queryOptions = { - selectFromResult: ({ currentData }) => { - if (!currentData) { - return { isPublished: false }; - } - return { isPublished: currentData.is_published }; - }, -} satisfies Parameters[1]; - -export const useIsWorkflowPublished = () => { - const workflowId = useAppSelector(selectWorkflowId); - const { isPublished } = useGetWorkflowQuery(workflowId ?? skipToken, queryOptions); - - return isPublished; -}; - -// These nodes are not allowed to be in published workflows because they dynamically generate model identifiers -const NODE_TYPE_PUBLISH_DENYLIST = [ - 'metadata_to_model', - 'metadata_to_sdxl_model', - 'metadata_to_vae', - 'metadata_to_lora_collection', - 'metadata_to_loras', - 'metadata_to_sdlx_loras', - 'metadata_to_controlnets', - 'metadata_to_ip_adapters', - 'metadata_to_t2i_adapters', - 'google_imagen3_generate_image', - 'google_imagen3_edit_image', - 'google_imagen4_generate_image', - 'chatgpt_4o_generate_image', - 'chatgpt_4o_edit_image', - 'flux_kontext_generate_image', - 'flux_kontext_edit_image', - 'claude_expand_prompt', - 'claude_analyze_image', -]; - -export const selectHasUnpublishableNodes = createSelector(selectNodes, (nodes) => { - for (const node of nodes) { - if (!isInvocationNode(node)) { - return true; - } - if (isBatchNode(node) || isGeneratorNode(node)) { - return true; - } - if (NODE_TYPE_PUBLISH_DENYLIST.includes(node.data.type)) { - return true; - } - } - return false; -}); diff --git a/invokeai/frontend/web/src/features/nodes/hooks/useIsWorkflowEditorLocked.ts b/invokeai/frontend/web/src/features/nodes/hooks/useIsWorkflowEditorLocked.ts deleted file mode 100644 index 2738dad04b4..00000000000 --- a/invokeai/frontend/web/src/features/nodes/hooks/useIsWorkflowEditorLocked.ts +++ /dev/null @@ -1,15 +0,0 @@ -import { useStore } from '@nanostores/react'; -import { - $isInPublishFlow, - useIsValidationRunInProgress, - useIsWorkflowPublished, -} from 'features/nodes/components/sidePanel/workflow/publish'; - -export const useIsWorkflowEditorLocked = () => { - const isInPublishFlow = useStore($isInPublishFlow); - const isPublished = useIsWorkflowPublished(); - const isValidationRunInProgress = useIsValidationRunInProgress(); - - const isLocked = isInPublishFlow || isPublished || isValidationRunInProgress; - return isLocked; -}; diff --git a/invokeai/frontend/web/src/features/nodes/hooks/useWithFooter.ts b/invokeai/frontend/web/src/features/nodes/hooks/useWithFooter.ts index d3c63329ea2..b140295801b 100644 --- a/invokeai/frontend/web/src/features/nodes/hooks/useWithFooter.ts +++ b/invokeai/frontend/web/src/features/nodes/hooks/useWithFooter.ts @@ -1,16 +1,9 @@ import { useIsExecutableNode } from 'features/nodes/hooks/useIsBatchNode'; -import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus'; -import { useMemo } from 'react'; import { useNodeHasImageOutput } from './useNodeHasImageOutput'; export const useWithFooter = () => { const hasImageOutput = useNodeHasImageOutput(); const isExecutableNode = useIsExecutableNode(); - const isCacheEnabled = useFeatureStatus('invocationCache'); - const withFooter = useMemo( - () => isExecutableNode && (hasImageOutput || isCacheEnabled), - [hasImageOutput, isCacheEnabled, isExecutableNode] - ); - return withFooter; + return isExecutableNode && hasImageOutput; }; diff --git a/invokeai/frontend/web/src/features/nodes/store/nodesSlice.ts b/invokeai/frontend/web/src/features/nodes/store/nodesSlice.ts index 20c27d2cd6e..98b41da3059 100644 --- a/invokeai/frontend/web/src/features/nodes/store/nodesSlice.ts +++ b/invokeai/frontend/web/src/features/nodes/store/nodesSlice.ts @@ -565,7 +565,7 @@ const slice = createSlice({ state.formFieldInitialValues = formFieldInitialValues; }, workflowLoaded: (state, action: PayloadAction) => { - const { nodes, edges, is_published: _is_published, ...workflowExtra } = action.payload; + const { nodes, edges, ...workflowExtra } = action.payload; const formFieldInitialValues = getFormFieldInitialValues(workflowExtra.form, nodes); diff --git a/invokeai/frontend/web/src/features/nodes/types/common.ts b/invokeai/frontend/web/src/features/nodes/types/common.ts index c51defd79c5..97c7fff795d 100644 --- a/invokeai/frontend/web/src/features/nodes/types/common.ts +++ b/invokeai/frontend/web/src/features/nodes/types/common.ts @@ -1,6 +1,3 @@ -import type { S } from 'services/api/types'; -import type { Equals } from 'tsafe'; -import { assert } from 'tsafe'; import { z } from 'zod'; // #region Field data schemas @@ -14,13 +11,6 @@ type ImageFieldCollection = z.infer; export const isImageFieldCollection = (field: unknown): field is ImageFieldCollection => zImageFieldCollection.safeParse(field).success; -const zVideoField = z.object({ - video_id: z.string().trim().min(1), -}); -type VideoField = z.infer; -export const isVideoField = (field: unknown): field is VideoField => zVideoField.safeParse(field).success; -assert>(); - export const zBoardField = z.object({ board_id: z.string().trim().min(1), }); @@ -82,31 +72,10 @@ export const zBaseModelType = z.enum([ 'sdxl-refiner', 'flux', 'cogview4', - 'imagen3', - 'imagen4', - 'chatgpt-4o', - 'flux-kontext', - 'gemini-2.5', - 'veo3', - 'runway', 'unknown', ]); export type BaseModelType = z.infer; -export const zMainModelBase = z.enum([ - 'sd-1', - 'sd-2', - 'sd-3', - 'sdxl', - 'flux', - 'cogview4', - 'imagen3', - 'imagen4', - 'chatgpt-4o', - 'flux-kontext', - 'gemini-2.5', - 'veo3', - 'runway', -]); +export const zMainModelBase = z.enum(['sd-1', 'sd-2', 'sd-3', 'sdxl', 'flux', 'cogview4']); type MainModelBase = z.infer; export const isMainModelBase = (base: unknown): base is MainModelBase => zMainModelBase.safeParse(base).success; export const zModelType = z.enum([ @@ -126,7 +95,6 @@ export const zModelType = z.enum([ 'clip_embed', 'siglip', 'flux_redux', - 'video', 'unknown', ]); export type ModelType = z.infer; @@ -165,7 +133,6 @@ export const zModelFormat = z.enum([ 'bnb_quantized_int8b', 'bnb_quantized_nf4b', 'gguf_quantized', - 'api', 'unknown', ]); export type ModelFormat = z.infer; diff --git a/invokeai/frontend/web/src/features/nodes/types/workflow.ts b/invokeai/frontend/web/src/features/nodes/types/workflow.ts index a241a5e8fe8..d0ce39970a5 100644 --- a/invokeai/frontend/web/src/features/nodes/types/workflow.ts +++ b/invokeai/frontend/web/src/features/nodes/types/workflow.ts @@ -381,7 +381,6 @@ export const zWorkflowV3 = z.object({ }), // Use the validated form schema! form: zValidatedBuilderForm, - is_published: z.boolean().nullish(), }); export type WorkflowV3 = z.infer; // #endregion diff --git a/invokeai/frontend/web/src/features/nodes/util/graph/buildLinearBatchConfig.ts b/invokeai/frontend/web/src/features/nodes/util/graph/buildLinearBatchConfig.ts index 3792b22206d..900573065ff 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graph/buildLinearBatchConfig.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graph/buildLinearBatchConfig.ts @@ -2,7 +2,6 @@ import type { RootState } from 'app/store/store'; import { generateSeeds } from 'common/util/generateSeeds'; import { range } from 'es-toolkit/compat'; import type { SeedBehaviour } from 'features/dynamicPrompts/store/dynamicPromptsSlice'; -import { API_BASE_MODELS, VIDEO_BASE_MODELS } from 'features/modelManagerV2/models'; import type { BaseModelType } from 'features/nodes/types/common'; import type { Graph } from 'features/nodes/util/graph/generation/Graph'; import type { components } from 'services/api/schema'; @@ -14,11 +13,8 @@ const getExtendedPrompts = (arg: { prompts: string[]; base: BaseModelType; }): string[] => { - const { seedBehaviour, iterations, prompts, base } = arg; - // Normally, the seed behaviour implicity determines the batch size. But when we use models without seeds (like - // ChatGPT 4o) in conjunction with the per-prompt seed behaviour, we lose out on that implicit batch size. To rectify - // this, we need to create a batch of the right size by repeating the prompts. - if (seedBehaviour === 'PER_PROMPT' || API_BASE_MODELS.includes(base) || VIDEO_BASE_MODELS.includes(base)) { + const { seedBehaviour, iterations, prompts } = arg; + if (seedBehaviour === 'PER_PROMPT') { return range(iterations).flatMap(() => prompts); } return prompts; diff --git a/invokeai/frontend/web/src/features/nodes/util/graph/generation/Graph.test.ts b/invokeai/frontend/web/src/features/nodes/util/graph/generation/Graph.test.ts index 42d66f0fc81..f2a6dc19885 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graph/generation/Graph.test.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graph/generation/Graph.test.ts @@ -674,7 +674,6 @@ describe('Graph', () => { variant: 'inpaint', format: 'diffusers', repo_variant: 'fp16', - usage_info: null, }); expect(field).toEqual({ key: 'b00ee8df-523d-40d2-9578-597283b07cb2', diff --git a/invokeai/frontend/web/src/features/nodes/util/graph/generation/buildChatGPT4oGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graph/generation/buildChatGPT4oGraph.ts deleted file mode 100644 index c579fc05bc1..00000000000 --- a/invokeai/frontend/web/src/features/nodes/util/graph/generation/buildChatGPT4oGraph.ts +++ /dev/null @@ -1,143 +0,0 @@ -import { logger } from 'app/logging/logger'; -import { getPrefixedId } from 'features/controlLayers/konva/util'; -import { selectMainModelConfig } from 'features/controlLayers/store/paramsSlice'; -import { selectRefImagesSlice } from 'features/controlLayers/store/refImagesSlice'; -import { selectCanvasMetadata } from 'features/controlLayers/store/selectors'; -import { isChatGPT4oAspectRatioID, isChatGPT4oReferenceImageConfig } from 'features/controlLayers/store/types'; -import { getGlobalReferenceImageWarnings } from 'features/controlLayers/store/validators'; -import { type ImageField, zImageField, zModelIdentifierField } from 'features/nodes/types/common'; -import { Graph } from 'features/nodes/util/graph/generation/Graph'; -import { - getOriginalAndScaledSizesForOtherModes, - getOriginalAndScaledSizesForTextToImage, - selectCanvasOutputFields, -} from 'features/nodes/util/graph/graphBuilderUtils'; -import type { GraphBuilderArg, GraphBuilderReturn } from 'features/nodes/util/graph/types'; -import { UnsupportedGenerationModeError } from 'features/nodes/util/graph/types'; -import { selectActiveTab } from 'features/ui/store/uiSelectors'; -import { t } from 'i18next'; -import type { Equals } from 'tsafe'; -import { assert } from 'tsafe'; - -const log = logger('system'); - -export const buildChatGPT4oGraph = async (arg: GraphBuilderArg): Promise => { - const { generationMode, state, manager } = arg; - - if (generationMode !== 'txt2img' && generationMode !== 'img2img') { - throw new UnsupportedGenerationModeError(t('toast.chatGPT4oIncompatibleGenerationMode')); - } - - log.debug({ generationMode, manager: manager?.id }, 'Building ChatGPT 4o graph'); - - const model = selectMainModelConfig(state); - - const refImages = selectRefImagesSlice(state); - - assert(model, 'No model selected'); - assert(model.base === 'chatgpt-4o', 'Selected model is not a ChatGPT 4o API model'); - - const validRefImages = refImages.entities - .filter((entity) => entity.isEnabled) - .filter((entity) => isChatGPT4oReferenceImageConfig(entity.config)) - .filter((entity) => getGlobalReferenceImageWarnings(entity, model).length === 0) - .toReversed(); // sends them in order they are displayed in the list - - let reference_images: ImageField[] | undefined = undefined; - - if (validRefImages.length > 0) { - reference_images = []; - for (const entity of validRefImages) { - assert(entity.config.image, 'Image is required for reference image'); - reference_images.push(zImageField.parse(entity.config.image.crop?.image ?? entity.config.image.original.image)); - } - } - - if (generationMode === 'txt2img') { - const { originalSize, aspectRatio } = getOriginalAndScaledSizesForTextToImage(state); - assert(isChatGPT4oAspectRatioID(aspectRatio.id), 'ChatGPT 4o does not support this aspect ratio'); - - const g = new Graph(getPrefixedId('chatgpt_4o_txt2img_graph')); - const positivePrompt = g.addNode({ - id: getPrefixedId('positive_prompt'), - type: 'string', - }); - const gptImage = g.addNode({ - // @ts-expect-error: These nodes are not available in the OSS application - type: 'chatgpt_4o_generate_image', - model: zModelIdentifierField.parse(model), - aspect_ratio: aspectRatio.id, - reference_images, - ...selectCanvasOutputFields(state), - }); - - g.addEdge( - positivePrompt, - 'value', - gptImage, - // @ts-expect-error: These nodes are not available in the OSS application - 'positive_prompt' - ); - g.addEdgeToMetadata(positivePrompt, 'value', 'positive_prompt'); - g.upsertMetadata({ - model: Graph.getModelMetadataField(model), - width: originalSize.width, - height: originalSize.height, - }); - return { - g, - positivePrompt, - }; - } else if (generationMode === 'img2img') { - const { aspectRatio, rect } = getOriginalAndScaledSizesForOtherModes(state); - assert(isChatGPT4oAspectRatioID(aspectRatio.id), 'ChatGPT 4o does not support this aspect ratio'); - - assert(manager !== null); - const adapters = manager.compositor.getVisibleAdaptersOfType('raster_layer'); - const { image_name } = await manager.compositor.getCompositeImageDTO(adapters, rect, { - is_intermediate: true, - silent: true, - }); - const g = new Graph(getPrefixedId('chatgpt_4o_img2img_graph')); - const positivePrompt = g.addNode({ - id: getPrefixedId('positive_prompt'), - type: 'string', - }); - const gptImage = g.addNode({ - // @ts-expect-error: These nodes are not available in the OSS application - type: 'chatgpt_4o_edit_image', - model: zModelIdentifierField.parse(model), - aspect_ratio: aspectRatio.id, - base_image: { image_name }, - reference_images, - ...selectCanvasOutputFields(state), - }); - - g.addEdge( - positivePrompt, - 'value', - gptImage, - // @ts-expect-error: These nodes are not available in the OSS application - 'positive_prompt' - ); - g.addEdgeToMetadata(positivePrompt, 'value', 'positive_prompt'); - g.upsertMetadata({ - model: Graph.getModelMetadataField(model), - width: rect.width, - height: rect.height, - }); - - if (selectActiveTab(state) === 'canvas') { - g.upsertMetadata(selectCanvasMetadata(state)); - } - - g.setMetadataReceivingNode(gptImage); - - return { - g, - positivePrompt, - }; - } - - assert>(false, 'Invalid generation mode for ChatGPT '); -}; diff --git a/invokeai/frontend/web/src/features/nodes/util/graph/generation/buildFluxKontextGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graph/generation/buildFluxKontextGraph.ts deleted file mode 100644 index 164664e63bb..00000000000 --- a/invokeai/frontend/web/src/features/nodes/util/graph/generation/buildFluxKontextGraph.ts +++ /dev/null @@ -1,124 +0,0 @@ -import { logger } from 'app/logging/logger'; -import { getPrefixedId } from 'features/controlLayers/konva/util'; -import { selectMainModelConfig } from 'features/controlLayers/store/paramsSlice'; -import { selectRefImagesSlice } from 'features/controlLayers/store/refImagesSlice'; -import { isFluxKontextAspectRatioID, isFluxKontextReferenceImageConfig } from 'features/controlLayers/store/types'; -import { getGlobalReferenceImageWarnings } from 'features/controlLayers/store/validators'; -import { zImageField, zModelIdentifierField } from 'features/nodes/types/common'; -import { Graph } from 'features/nodes/util/graph/generation/Graph'; -import { - getOriginalAndScaledSizesForTextToImage, - selectCanvasOutputFields, -} from 'features/nodes/util/graph/graphBuilderUtils'; -import type { GraphBuilderArg, GraphBuilderReturn } from 'features/nodes/util/graph/types'; -import { UnsupportedGenerationModeError } from 'features/nodes/util/graph/types'; -import { t } from 'i18next'; -import { assert } from 'tsafe'; - -const log = logger('system'); - -export const buildFluxKontextGraph = (arg: GraphBuilderArg): GraphBuilderReturn => { - const { generationMode, state, manager } = arg; - - const model = selectMainModelConfig(state); - assert(model, 'No model selected'); - assert(model.base === 'flux-kontext', 'Selected model is not a FLUX Kontext API model'); - - if (generationMode !== 'txt2img') { - throw new UnsupportedGenerationModeError(t('toast.fluxKontextIncompatibleGenerationMode')); - } - - log.debug({ generationMode, manager: manager?.id }, 'Building FLUX Kontext graph'); - - const { originalSize, aspectRatio } = getOriginalAndScaledSizesForTextToImage(state); - assert(isFluxKontextAspectRatioID(aspectRatio.id), 'FLUX Kontext does not support this aspect ratio'); - - const refImages = selectRefImagesSlice(state); - - const validRefImages = refImages.entities - .filter((entity) => entity.isEnabled) - .filter((entity) => isFluxKontextReferenceImageConfig(entity.config)) - .filter((entity) => getGlobalReferenceImageWarnings(entity, model).length === 0); - - const g = new Graph(getPrefixedId('flux_kontext_txt2img_graph')); - const positivePrompt = g.addNode({ - id: getPrefixedId('positive_prompt'), - type: 'string', - }); - - let fluxKontextImage; - - if (validRefImages.length > 0) { - if (validRefImages.length === 1) { - // Single reference image - use it directly - const firstImage = validRefImages[0]?.config.image; - assert(firstImage, 'First image should exist when validRefImages.length > 0'); - - fluxKontextImage = g.addNode({ - // @ts-expect-error: These nodes are not available in the OSS application - type: 'flux_kontext_edit_image', - model: zModelIdentifierField.parse(model), - aspect_ratio: aspectRatio.id, - prompt_upsampling: true, - input_image: zImageField.parse(firstImage.crop?.image ?? firstImage.original.image), - ...selectCanvasOutputFields(state), - }); - } else { - // Multiple reference images - use concatenation - const kontextConcatenator = g.addNode({ - id: getPrefixedId('flux_kontext_image_prep'), - type: 'flux_kontext_image_prep', - images: validRefImages.map(({ config }) => - zImageField.parse(config.image?.crop?.image ?? config.image?.original.image) - ), - }); - - fluxKontextImage = g.addNode({ - // @ts-expect-error: These nodes are not available in the OSS application - type: 'flux_kontext_edit_image', - model: zModelIdentifierField.parse(model), - aspect_ratio: aspectRatio.id, - prompt_upsampling: true, - - ...selectCanvasOutputFields(state), - }); - // @ts-expect-error: These nodes are not available in the OSS application - g.addEdge(kontextConcatenator, 'image', fluxKontextImage, 'input_image'); - } - } else { - fluxKontextImage = g.addNode({ - // @ts-expect-error: These nodes are not available in the OSS application - type: 'flux_kontext_generate_image', - model: zModelIdentifierField.parse(model), - aspect_ratio: aspectRatio.id, - prompt_upsampling: true, - ...selectCanvasOutputFields(state), - }); - } - - g.addEdge( - positivePrompt, - 'value', - fluxKontextImage, - // @ts-expect-error: These nodes are not available in the OSS application - 'positive_prompt' - ); - g.addEdgeToMetadata(positivePrompt, 'value', 'positive_prompt'); - - g.upsertMetadata({ - model: Graph.getModelMetadataField(model), - width: originalSize.width, - height: originalSize.height, - }); - - if (validRefImages.length > 0) { - g.upsertMetadata({ ref_images: [validRefImages] }, 'merge'); - } - - g.setMetadataReceivingNode(fluxKontextImage); - - return { - g, - positivePrompt, - }; -}; diff --git a/invokeai/frontend/web/src/features/nodes/util/graph/generation/buildGemini2_5Graph.ts b/invokeai/frontend/web/src/features/nodes/util/graph/generation/buildGemini2_5Graph.ts deleted file mode 100644 index d31e8bcf7bf..00000000000 --- a/invokeai/frontend/web/src/features/nodes/util/graph/generation/buildGemini2_5Graph.ts +++ /dev/null @@ -1,81 +0,0 @@ -import { logger } from 'app/logging/logger'; -import { getPrefixedId } from 'features/controlLayers/konva/util'; -import { selectMainModelConfig } from 'features/controlLayers/store/paramsSlice'; -import { selectRefImagesSlice } from 'features/controlLayers/store/refImagesSlice'; -import { isGemini2_5ReferenceImageConfig } from 'features/controlLayers/store/types'; -import { getGlobalReferenceImageWarnings } from 'features/controlLayers/store/validators'; -import { type ImageField, zImageField } from 'features/nodes/types/common'; -import { Graph } from 'features/nodes/util/graph/generation/Graph'; -import { selectCanvasOutputFields } from 'features/nodes/util/graph/graphBuilderUtils'; -import type { GraphBuilderArg, GraphBuilderReturn } from 'features/nodes/util/graph/types'; -import { UnsupportedGenerationModeError } from 'features/nodes/util/graph/types'; -import { t } from 'i18next'; -import { assert } from 'tsafe'; - -const log = logger('system'); - -export const buildGemini2_5Graph = (arg: GraphBuilderArg): GraphBuilderReturn => { - const { generationMode, state, manager } = arg; - - if (generationMode !== 'txt2img') { - throw new UnsupportedGenerationModeError( - t('toast.imagenIncompatibleGenerationMode', { model: 'Gemini 2.5 Flash Preview' }) - ); - } - - log.debug({ generationMode, manager: manager?.id }, 'Building Gemini 2.5 graph'); - - const model = selectMainModelConfig(state); - - const refImages = selectRefImagesSlice(state); - - assert(model, 'No model selected'); - assert(model.base === 'gemini-2.5', 'Selected model is not a Gemini 2.5 API model'); - - const validRefImages = refImages.entities - .filter((entity) => entity.isEnabled) - .filter((entity) => isGemini2_5ReferenceImageConfig(entity.config)) - .filter((entity) => getGlobalReferenceImageWarnings(entity, model).length === 0) - .toReversed(); // sends them in order they are displayed in the list - - let reference_images: ImageField[] | undefined = undefined; - - if (validRefImages.length > 0) { - reference_images = []; - for (const entity of validRefImages) { - assert(entity.config.image, 'Image is required for reference image'); - reference_images.push(zImageField.parse(entity.config.image.crop?.image ?? entity.config.image.original.image)); - } - } - - const g = new Graph(getPrefixedId('gemini_2_5_txt2img_graph')); - const positivePrompt = g.addNode({ - id: getPrefixedId('positive_prompt'), - type: 'string', - }); - const geminiImage = g.addNode({ - // @ts-expect-error: These nodes are not available in the OSS application - type: 'google_gemini_generate_image', - reference_images, - ...selectCanvasOutputFields(state), - }); - - g.addEdge( - positivePrompt, - 'value', - geminiImage, - // @ts-expect-error: These nodes are not available in the OSS application - 'positive_prompt' - ); - g.addEdgeToMetadata(positivePrompt, 'value', 'positive_prompt'); - g.upsertMetadata({ - model: Graph.getModelMetadataField(model), - }); - - g.setMetadataReceivingNode(geminiImage); - - return { - g, - positivePrompt, - }; -}; diff --git a/invokeai/frontend/web/src/features/nodes/util/graph/generation/buildImagen3Graph.ts b/invokeai/frontend/web/src/features/nodes/util/graph/generation/buildImagen3Graph.ts deleted file mode 100644 index 53ba5629e43..00000000000 --- a/invokeai/frontend/web/src/features/nodes/util/graph/generation/buildImagen3Graph.ts +++ /dev/null @@ -1,76 +0,0 @@ -import { logger } from 'app/logging/logger'; -import { getPrefixedId } from 'features/controlLayers/konva/util'; -import { selectMainModelConfig } from 'features/controlLayers/store/paramsSlice'; -import { isImagenAspectRatioID } from 'features/controlLayers/store/types'; -import { zModelIdentifierField } from 'features/nodes/types/common'; -import { Graph } from 'features/nodes/util/graph/generation/Graph'; -import { - getOriginalAndScaledSizesForTextToImage, - selectCanvasOutputFields, - selectPresetModifiedPrompts, -} from 'features/nodes/util/graph/graphBuilderUtils'; -import type { GraphBuilderArg, GraphBuilderReturn } from 'features/nodes/util/graph/types'; -import { UnsupportedGenerationModeError } from 'features/nodes/util/graph/types'; -import { t } from 'i18next'; -import { assert } from 'tsafe'; - -const log = logger('system'); - -export const buildImagen3Graph = (arg: GraphBuilderArg): GraphBuilderReturn => { - const { generationMode, state, manager } = arg; - log.debug({ generationMode, manager: manager?.id }, 'Building Imagen3 graph'); - - const model = selectMainModelConfig(state); - - assert(model, 'No model selected'); - assert(model.base === 'imagen3', 'Selected model is not an Imagen3 API model'); - - if (generationMode !== 'txt2img') { - throw new UnsupportedGenerationModeError(t('toast.imagenIncompatibleGenerationMode', { model: 'Imagen3' })); - } - - const prompts = selectPresetModifiedPrompts(state); - assert(prompts.positive.length > 0, 'Imagen3 requires positive prompt to have at least one character'); - - const { originalSize, aspectRatio } = getOriginalAndScaledSizesForTextToImage(state); - assert(isImagenAspectRatioID(aspectRatio.id), 'Imagen3 does not support this aspect ratio'); - - const g = new Graph(getPrefixedId('imagen3_txt2img_graph')); - const positivePrompt = g.addNode({ - id: getPrefixedId('positive_prompt'), - type: 'string', - }); - const imagen3 = g.addNode({ - // @ts-expect-error: These nodes are not available in the OSS application - type: 'google_imagen3_generate_image', - model: zModelIdentifierField.parse(model), - negative_prompt: prompts.negative, - aspect_ratio: aspectRatio.id, - // When enhance_prompt is true, Imagen3 will return a new image every time, ignoring the seed. - enhance_prompt: true, - ...selectCanvasOutputFields(state), - }); - - g.addEdge( - positivePrompt, - 'value', - imagen3, - // @ts-expect-error: These nodes are not available in the OSS application - 'positive_prompt' - ); - g.addEdgeToMetadata(positivePrompt, 'value', 'positive_prompt'); - - g.upsertMetadata({ - negative_prompt: prompts.negative, - width: originalSize.width, - height: originalSize.height, - model: Graph.getModelMetadataField(model), - }); - - g.setMetadataReceivingNode(imagen3); - - return { - g, - positivePrompt, - }; -}; diff --git a/invokeai/frontend/web/src/features/nodes/util/graph/generation/buildImagen4Graph.ts b/invokeai/frontend/web/src/features/nodes/util/graph/generation/buildImagen4Graph.ts deleted file mode 100644 index 56ce02033c3..00000000000 --- a/invokeai/frontend/web/src/features/nodes/util/graph/generation/buildImagen4Graph.ts +++ /dev/null @@ -1,75 +0,0 @@ -import { logger } from 'app/logging/logger'; -import { getPrefixedId } from 'features/controlLayers/konva/util'; -import { selectMainModelConfig } from 'features/controlLayers/store/paramsSlice'; -import { isImagenAspectRatioID } from 'features/controlLayers/store/types'; -import { zModelIdentifierField } from 'features/nodes/types/common'; -import { Graph } from 'features/nodes/util/graph/generation/Graph'; -import { - getOriginalAndScaledSizesForTextToImage, - selectCanvasOutputFields, - selectPresetModifiedPrompts, -} from 'features/nodes/util/graph/graphBuilderUtils'; -import type { GraphBuilderArg, GraphBuilderReturn } from 'features/nodes/util/graph/types'; -import { UnsupportedGenerationModeError } from 'features/nodes/util/graph/types'; -import { t } from 'i18next'; -import { assert } from 'tsafe'; - -const log = logger('system'); - -export const buildImagen4Graph = (arg: GraphBuilderArg): GraphBuilderReturn => { - const { generationMode, state, manager } = arg; - log.debug({ generationMode, manager: manager?.id }, 'Building Imagen4 graph'); - - const model = selectMainModelConfig(state); - assert(model, 'No model selected'); - assert(model.base === 'imagen4', 'Selected model is not a Imagen4 API model'); - - if (generationMode !== 'txt2img') { - throw new UnsupportedGenerationModeError(t('toast.imagenIncompatibleGenerationMode', { model: 'Imagen4' })); - } - - const prompts = selectPresetModifiedPrompts(state); - assert(prompts.positive.length > 0, 'Imagen4 requires positive prompt to have at least one character'); - - const { originalSize, aspectRatio } = getOriginalAndScaledSizesForTextToImage(state); - assert(isImagenAspectRatioID(aspectRatio.id), 'Imagen4 does not support this aspect ratio'); - - const g = new Graph(getPrefixedId('imagen4_txt2img_graph')); - const positivePrompt = g.addNode({ - id: getPrefixedId('positive_prompt'), - type: 'string', - }); - const imagen4 = g.addNode({ - // @ts-expect-error: These nodes are not available in the OSS application - type: 'google_imagen4_generate_image', - model: zModelIdentifierField.parse(model), - negative_prompt: prompts.negative, - aspect_ratio: aspectRatio.id, - // When enhance_prompt is true, Imagen4 will return a new image every time, ignoring the seed. - enhance_prompt: true, - ...selectCanvasOutputFields(state), - }); - - g.addEdge( - positivePrompt, - 'value', - imagen4, - // @ts-expect-error: These nodes are not available in the OSS application - 'positive_prompt' - ); - g.addEdgeToMetadata(positivePrompt, 'value', 'positive_prompt'); - - g.upsertMetadata({ - negative_prompt: prompts.negative, - width: originalSize.width, - height: originalSize.height, - model: Graph.getModelMetadataField(model), - }); - - g.setMetadataReceivingNode(imagen4); - - return { - g, - positivePrompt, - }; -}; diff --git a/invokeai/frontend/web/src/features/nodes/util/graph/generation/buildRunwayVideoGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graph/generation/buildRunwayVideoGraph.ts deleted file mode 100644 index 29af7064005..00000000000 --- a/invokeai/frontend/web/src/features/nodes/util/graph/generation/buildRunwayVideoGraph.ts +++ /dev/null @@ -1,87 +0,0 @@ -import { logger } from 'app/logging/logger'; -import { getPrefixedId } from 'features/controlLayers/konva/util'; -import { selectParamsSlice } from 'features/controlLayers/store/paramsSlice'; -import { zImageField } from 'features/nodes/types/common'; -import { Graph } from 'features/nodes/util/graph/generation/Graph'; -import { selectPresetModifiedPrompts } from 'features/nodes/util/graph/graphBuilderUtils'; -import type { GraphBuilderArg, GraphBuilderReturn } from 'features/nodes/util/graph/types'; -import { UnsupportedGenerationModeError } from 'features/nodes/util/graph/types'; -import { - selectStartingFrameImage, - selectVideoModelConfig, - selectVideoSlice, -} from 'features/parameters/store/videoSlice'; -import { t } from 'i18next'; -import { assert } from 'tsafe'; - -const log = logger('system'); - -export const buildRunwayVideoGraph = (arg: GraphBuilderArg): GraphBuilderReturn => { - const { generationMode, state, manager } = arg; - - log.debug({ generationMode, manager: manager?.id }, 'Building Runway video graph'); - - const supportedModes = ['txt2img']; - if (!supportedModes.includes(generationMode)) { - throw new UnsupportedGenerationModeError(t('toast.runwayIncompatibleGenerationMode')); - } - - const model = selectVideoModelConfig(state); - assert(model, 'No model selected'); - assert(model.base === 'runway', 'Selected model is not a Runway model'); - - const params = selectParamsSlice(state); - const videoParams = selectVideoSlice(state); - const prompts = selectPresetModifiedPrompts(state); - assert(prompts.positive.length > 0, 'Runway video requires positive prompt to have at least one character'); - - const startingFrameImage = selectStartingFrameImage(state); - - assert(startingFrameImage, 'Video starting frame is required for runway video generation'); - const firstFrameImageField = zImageField.parse(startingFrameImage.crop?.image ?? startingFrameImage.original); - - const { seed, shouldRandomizeSeed } = params; - const { videoDuration, videoAspectRatio, videoResolution } = videoParams; - - const finalSeed = shouldRandomizeSeed ? undefined : seed; - - const g = new Graph(getPrefixedId('runway_video_graph')); - - const positivePrompt = g.addNode({ - id: getPrefixedId('positive_prompt'), - type: 'string', - value: prompts.positive, - }); - - // Create the runway video generation node - const runwayVideoNode = g.addNode({ - id: getPrefixedId('runway_generate_video'), - // @ts-expect-error: This node is not available in the OSS application - type: 'runway_generate_video', - duration: parseInt(videoDuration || '0', 10), - aspect_ratio: videoAspectRatio, - seed: finalSeed, - first_frame_image: firstFrameImageField, - }); - - // @ts-expect-error: This node is not available in the OSS application - g.addEdge(positivePrompt, 'value', runwayVideoNode, 'prompt'); - - // Set up metadata - g.upsertMetadata({ - model: Graph.getModelMetadataField(model), - positive_prompt: prompts.positive, - duration: videoDuration, - aspect_ratio: videoAspectRatio, - resolution: videoResolution, - seed: finalSeed, - first_frame_image: startingFrameImage, - }); - - g.setMetadataReceivingNode(runwayVideoNode); - - return { - g, - positivePrompt, - }; -}; diff --git a/invokeai/frontend/web/src/features/nodes/util/graph/generation/buildVeo3VideoGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graph/generation/buildVeo3VideoGraph.ts deleted file mode 100644 index b33c9cdde5c..00000000000 --- a/invokeai/frontend/web/src/features/nodes/util/graph/generation/buildVeo3VideoGraph.ts +++ /dev/null @@ -1,89 +0,0 @@ -import { logger } from 'app/logging/logger'; -import { getPrefixedId } from 'features/controlLayers/konva/util'; -import { selectParamsSlice } from 'features/controlLayers/store/paramsSlice'; -import { zImageField } from 'features/nodes/types/common'; -import { Graph } from 'features/nodes/util/graph/generation/Graph'; -import { selectPresetModifiedPrompts } from 'features/nodes/util/graph/graphBuilderUtils'; -import type { GraphBuilderArg, GraphBuilderReturn } from 'features/nodes/util/graph/types'; -import { UnsupportedGenerationModeError } from 'features/nodes/util/graph/types'; -import { - selectStartingFrameImage, - selectVideoModelConfig, - selectVideoSlice, -} from 'features/parameters/store/videoSlice'; -import { t } from 'i18next'; -import { assert } from 'tsafe'; - -const log = logger('system'); - -export const buildVeo3VideoGraph = (arg: GraphBuilderArg): GraphBuilderReturn => { - const { generationMode, state, manager } = arg; - - log.debug({ generationMode, manager: manager?.id }, 'Building Veo3 video graph'); - - const supportedModes = ['txt2img']; - if (!supportedModes.includes(generationMode)) { - throw new UnsupportedGenerationModeError(t('toast.veo3IncompatibleGenerationMode')); - } - - const model = selectVideoModelConfig(state); - assert(model, 'No model selected'); - assert(model.base === 'veo3', 'Selected model is not a Veo3 model'); - - const params = selectParamsSlice(state); - const videoParams = selectVideoSlice(state); - const prompts = selectPresetModifiedPrompts(state); - assert(prompts.positive.length > 0, 'Veo3 video requires positive prompt to have at least one character'); - - const { seed, shouldRandomizeSeed } = params; - const { videoResolution, videoDuration, videoAspectRatio } = videoParams; - const finalSeed = shouldRandomizeSeed ? undefined : seed; - - const g = new Graph(getPrefixedId('veo3_video_graph')); - - const positivePrompt = g.addNode({ - id: getPrefixedId('positive_prompt'), - type: 'string', - value: prompts.positive, - }); - - // Create the veo3 video generation node - const veo3VideoNode = g.addNode({ - id: getPrefixedId('google_veo_3_generate_video'), - // @ts-expect-error: This node is not available in the OSS application - type: 'google_veo_3_generate_video', - model: model, - aspect_ratio: '16:9', - resolution: videoResolution, - seed: finalSeed, - }); - - const startingFrameImage = selectStartingFrameImage(state); - - if (startingFrameImage) { - const startingFrameImageField = zImageField.parse(startingFrameImage.crop?.image ?? startingFrameImage.original); - // @ts-expect-error: This node is not available in the OSS application - veo3VideoNode.starting_image = startingFrameImageField; - } - - // @ts-expect-error: This node is not available in the OSS application - g.addEdge(positivePrompt, 'value', veo3VideoNode, 'prompt'); - - // Set up metadata - g.upsertMetadata({ - model: Graph.getModelMetadataField(model), - positive_prompt: prompts.positive, - duration: videoDuration, - aspect_ratio: videoAspectRatio, - resolution: videoResolution, - seed: finalSeed, - first_frame_image: startingFrameImage, - }); - - g.setMetadataReceivingNode(veo3VideoNode); - - return { - g, - positivePrompt, - }; -}; diff --git a/invokeai/frontend/web/src/features/parameters/components/Advanced/ParamCFGRescaleMultiplier.tsx b/invokeai/frontend/web/src/features/parameters/components/Advanced/ParamCFGRescaleMultiplier.tsx index c601e3b9b60..56024b60a32 100644 --- a/invokeai/frontend/web/src/features/parameters/components/Advanced/ParamCFGRescaleMultiplier.tsx +++ b/invokeai/frontend/web/src/features/parameters/components/Advanced/ParamCFGRescaleMultiplier.tsx @@ -2,13 +2,21 @@ import { CompositeNumberInput, CompositeSlider, FormControl, FormLabel } from '@ import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; import { InformationalPopover } from 'common/components/InformationalPopover/InformationalPopover'; import { selectCFGRescaleMultiplier, setCfgRescaleMultiplier } from 'features/controlLayers/store/paramsSlice'; -import { selectCFGRescaleMultiplierConfig } from 'features/system/store/configSlice'; import { memo, useCallback } from 'react'; import { useTranslation } from 'react-i18next'; +export const CONSTRAINTS = { + initial: 0, + sliderMin: 0, + sliderMax: 0.99, + numberInputMin: 0, + numberInputMax: 0.99, + fineStep: 0.05, + coarseStep: 0.1, +}; + const ParamCFGRescaleMultiplier = () => { const cfgRescaleMultiplier = useAppSelector(selectCFGRescaleMultiplier); - const config = useAppSelector(selectCFGRescaleMultiplierConfig); const dispatch = useAppDispatch(); const { t } = useTranslation(); @@ -22,21 +30,21 @@ const ParamCFGRescaleMultiplier = () => { diff --git a/invokeai/frontend/web/src/features/parameters/components/Advanced/ParamClipSkip.tsx b/invokeai/frontend/web/src/features/parameters/components/Advanced/ParamClipSkip.tsx index 1d5eacc669c..10cc8b0d984 100644 --- a/invokeai/frontend/web/src/features/parameters/components/Advanced/ParamClipSkip.tsx +++ b/invokeai/frontend/web/src/features/parameters/components/Advanced/ParamClipSkip.tsx @@ -3,13 +3,19 @@ import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; import { InformationalPopover } from 'common/components/InformationalPopover/InformationalPopover'; import { selectCLIPSkip, selectModel, setClipSkip } from 'features/controlLayers/store/paramsSlice'; import { CLIP_SKIP_MAP } from 'features/parameters/types/constants'; -import { selectCLIPSkipConfig } from 'features/system/store/configSlice'; import { memo, useCallback, useMemo } from 'react'; import { useTranslation } from 'react-i18next'; +const CONSTRAINTS = { + initial: 0, + sliderMin: 0, + numberInputMin: 0, + fineStep: 1, + coarseStep: 1, +}; + const ParamClipSkip = () => { const clipSkip = useAppSelector(selectCLIPSkip); - const config = useAppSelector(selectCLIPSkipConfig); const model = useAppSelector(selectModel); const dispatch = useAppDispatch(); @@ -47,21 +53,21 @@ const ParamClipSkip = () => { diff --git a/invokeai/frontend/web/src/features/parameters/components/Bbox/BboxAspectRatioSelect.tsx b/invokeai/frontend/web/src/features/parameters/components/Bbox/BboxAspectRatioSelect.tsx index 40145839085..7fa250caad4 100644 --- a/invokeai/frontend/web/src/features/parameters/components/Bbox/BboxAspectRatioSelect.tsx +++ b/invokeai/frontend/web/src/features/parameters/components/Bbox/BboxAspectRatioSelect.tsx @@ -3,20 +3,9 @@ import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; import { InformationalPopover } from 'common/components/InformationalPopover/InformationalPopover'; import { bboxAspectRatioIdChanged } from 'features/controlLayers/store/canvasSlice'; import { useCanvasIsStaging } from 'features/controlLayers/store/canvasStagingAreaSlice'; -import { - selectIsChatGPT4o, - selectIsFluxKontext, - selectIsImagen3, - selectIsImagen4, -} from 'features/controlLayers/store/paramsSlice'; +import { selectIsFluxKontext } from 'features/controlLayers/store/paramsSlice'; import { selectAspectRatioID } from 'features/controlLayers/store/selectors'; -import { - isAspectRatioID, - zAspectRatioID, - zChatGPT4oAspectRatioID, - zFluxKontextAspectRatioID, - zImagen3AspectRatioID, -} from 'features/controlLayers/store/types'; +import { isAspectRatioID, zAspectRatioID, zFluxKontextAspectRatioID } from 'features/controlLayers/store/types'; import type { ChangeEventHandler } from 'react'; import { memo, useCallback, useMemo } from 'react'; import { useTranslation } from 'react-i18next'; @@ -27,24 +16,14 @@ export const BboxAspectRatioSelect = memo(() => { const dispatch = useAppDispatch(); const id = useAppSelector(selectAspectRatioID); const isStaging = useCanvasIsStaging(); - const isImagen3 = useAppSelector(selectIsImagen3); - const isChatGPT4o = useAppSelector(selectIsChatGPT4o); - const isImagen4 = useAppSelector(selectIsImagen4); const isFluxKontext = useAppSelector(selectIsFluxKontext); const options = useMemo(() => { - // Imagen3 and ChatGPT4o have different aspect ratio options, and do not support freeform sizes - if (isImagen3 || isImagen4) { - return zImagen3AspectRatioID.options; - } - if (isChatGPT4o) { - return zChatGPT4oAspectRatioID.options; - } if (isFluxKontext) { return zFluxKontextAspectRatioID.options; } // All other models return zAspectRatioID.options; - }, [isImagen3, isChatGPT4o, isImagen4, isFluxKontext]); + }, [isFluxKontext]); const onChange = useCallback>( (e) => { diff --git a/invokeai/frontend/web/src/features/parameters/components/Bbox/BboxHeight.tsx b/invokeai/frontend/web/src/features/parameters/components/Bbox/BboxHeight.tsx index dd8e319447d..cfaee3d0c95 100644 --- a/invokeai/frontend/web/src/features/parameters/components/Bbox/BboxHeight.tsx +++ b/invokeai/frontend/web/src/features/parameters/components/Bbox/BboxHeight.tsx @@ -4,16 +4,24 @@ import { InformationalPopover } from 'common/components/InformationalPopover/Inf import { bboxHeightChanged } from 'features/controlLayers/store/canvasSlice'; import { selectGridSize, selectHeight, selectOptimalDimension } from 'features/controlLayers/store/selectors'; import { useIsBboxSizeLocked } from 'features/parameters/components/Bbox/use-is-bbox-size-locked'; -import { selectHeightConfig } from 'features/system/store/configSlice'; import { memo, useCallback, useMemo } from 'react'; import { useTranslation } from 'react-i18next'; +const CONSTRAINTS = { + initial: 512, + sliderMin: 64, + sliderMax: 1536, + numberInputMin: 64, + numberInputMax: 4096, + fineStep: 8, + coarseStep: 64, +}; + export const BboxHeight = memo(() => { const { t } = useTranslation(); const dispatch = useAppDispatch(); const optimalDimension = useAppSelector(selectOptimalDimension); const height = useAppSelector(selectHeight); - const config = useAppSelector(selectHeightConfig); const isBboxSizeLocked = useIsBboxSizeLocked(); const gridSize = useAppSelector(selectGridSize); @@ -24,10 +32,7 @@ export const BboxHeight = memo(() => { [dispatch] ); - const marks = useMemo( - () => [config.sliderMin, optimalDimension, config.sliderMax], - [config.sliderMin, config.sliderMax, optimalDimension] - ); + const marks = useMemo(() => [CONSTRAINTS.sliderMin, optimalDimension, CONSTRAINTS.sliderMax], [optimalDimension]); return ( @@ -38,9 +43,9 @@ export const BboxHeight = memo(() => { value={height} defaultValue={optimalDimension} onChange={onChange} - min={config.sliderMin} - max={config.sliderMax} - step={config.coarseStep} + min={CONSTRAINTS.sliderMin} + max={CONSTRAINTS.sliderMax} + step={CONSTRAINTS.coarseStep} fineStep={gridSize} marks={marks} /> @@ -48,9 +53,9 @@ export const BboxHeight = memo(() => { value={height} defaultValue={optimalDimension} onChange={onChange} - min={config.numberInputMin} - max={config.numberInputMax} - step={config.coarseStep} + min={CONSTRAINTS.numberInputMin} + max={CONSTRAINTS.numberInputMax} + step={CONSTRAINTS.coarseStep} fineStep={gridSize} /> diff --git a/invokeai/frontend/web/src/features/parameters/components/Bbox/BboxScaledHeight.tsx b/invokeai/frontend/web/src/features/parameters/components/Bbox/BboxScaledHeight.tsx index da7338e72e3..db9b53f6ece 100644 --- a/invokeai/frontend/web/src/features/parameters/components/Bbox/BboxScaledHeight.tsx +++ b/invokeai/frontend/web/src/features/parameters/components/Bbox/BboxScaledHeight.tsx @@ -4,16 +4,21 @@ import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; import { bboxScaledHeightChanged } from 'features/controlLayers/store/canvasSlice'; import { selectCanvasSlice, selectGridSize, selectOptimalDimension } from 'features/controlLayers/store/selectors'; import { useIsBboxSizeLocked } from 'features/parameters/components/Bbox/use-is-bbox-size-locked'; -import { selectConfigSlice } from 'features/system/store/configSlice'; import { memo, useCallback } from 'react'; import { useTranslation } from 'react-i18next'; const selectIsManual = createSelector(selectCanvasSlice, (canvas) => canvas.bbox.scaleMethod === 'manual'); const selectScaledHeight = createSelector(selectCanvasSlice, (canvas) => canvas.bbox.scaledSize.height); -const selectScaledBoundingBoxHeightConfig = createSelector( - selectConfigSlice, - (config) => config.sd.scaledBoundingBoxHeight -); + +const CONSTRAINTS = { + initial: 512, + sliderMin: 64, + sliderMax: 1536, + numberInputMin: 64, + numberInputMax: 4096, + fineStep: 8, + coarseStep: 64, +}; const BboxScaledHeight = () => { const { t } = useTranslation(); @@ -22,7 +27,6 @@ const BboxScaledHeight = () => { const optimalDimension = useAppSelector(selectOptimalDimension); const isManual = useAppSelector(selectIsManual); const scaledHeight = useAppSelector(selectScaledHeight); - const config = useAppSelector(selectScaledBoundingBoxHeightConfig); const gridSize = useAppSelector(selectGridSize); const onChange = useCallback( @@ -36,9 +40,9 @@ const BboxScaledHeight = () => { {t('parameters.scaledHeight')} { defaultValue={optimalDimension} /> canvas.bbox.scaleMethod === 'manual'); const selectScaledWidth = createSelector(selectCanvasSlice, (canvas) => canvas.bbox.scaledSize.width); -const selectScaledBoundingBoxWidthConfig = createSelector( - selectConfigSlice, - (config) => config.sd.scaledBoundingBoxWidth -); + +const CONSTRAINTS = { + initial: 512, + sliderMin: 64, + sliderMax: 1536, + numberInputMin: 64, + numberInputMax: 4096, + fineStep: 8, + coarseStep: 64, +}; const BboxScaledWidth = () => { const { t } = useTranslation(); @@ -22,7 +27,6 @@ const BboxScaledWidth = () => { const optimalDimension = useAppSelector(selectOptimalDimension); const isManual = useAppSelector(selectIsManual); const scaledWidth = useAppSelector(selectScaledWidth); - const config = useAppSelector(selectScaledBoundingBoxWidthConfig); const gridSize = useAppSelector(selectGridSize); const onChange = useCallback( @@ -36,9 +40,9 @@ const BboxScaledWidth = () => { {t('parameters.scaledWidth')} { marks /> { - const supportsAspectRatio = useAppSelector(selectModelSupportsAspectRatio); - const supportsPixelDimensions = useAppSelector(selectModelSupportsPixelDimensions); - - if (!supportsAspectRatio) { - return null; - } - return ( @@ -30,20 +17,11 @@ export const BboxSettings = memo(() => { - {supportsPixelDimensions && ( - <> - - - - )} + + - {supportsPixelDimensions && ( - <> - - - - )} - {!supportsPixelDimensions && } + + diff --git a/invokeai/frontend/web/src/features/parameters/components/Bbox/BboxWidth.tsx b/invokeai/frontend/web/src/features/parameters/components/Bbox/BboxWidth.tsx index 8ad457da7ac..740df6cf218 100644 --- a/invokeai/frontend/web/src/features/parameters/components/Bbox/BboxWidth.tsx +++ b/invokeai/frontend/web/src/features/parameters/components/Bbox/BboxWidth.tsx @@ -4,16 +4,24 @@ import { InformationalPopover } from 'common/components/InformationalPopover/Inf import { bboxWidthChanged } from 'features/controlLayers/store/canvasSlice'; import { selectGridSize, selectOptimalDimension, selectWidth } from 'features/controlLayers/store/selectors'; import { useIsBboxSizeLocked } from 'features/parameters/components/Bbox/use-is-bbox-size-locked'; -import { selectWidthConfig } from 'features/system/store/configSlice'; import { memo, useCallback, useMemo } from 'react'; import { useTranslation } from 'react-i18next'; +const CONSTRAINTS = { + initial: 512, + sliderMin: 64, + sliderMax: 1536, + numberInputMin: 64, + numberInputMax: 4096, + fineStep: 8, + coarseStep: 64, +}; + export const BboxWidth = memo(() => { const { t } = useTranslation(); const dispatch = useAppDispatch(); const width = useAppSelector(selectWidth); const optimalDimension = useAppSelector(selectOptimalDimension); - const config = useAppSelector(selectWidthConfig); const isBboxSizeLocked = useIsBboxSizeLocked(); const gridSize = useAppSelector(selectGridSize); @@ -24,10 +32,7 @@ export const BboxWidth = memo(() => { [dispatch] ); - const marks = useMemo( - () => [config.sliderMin, optimalDimension, config.sliderMax], - [config.sliderMax, config.sliderMin, optimalDimension] - ); + const marks = useMemo(() => [CONSTRAINTS.sliderMin, optimalDimension, CONSTRAINTS.sliderMax], [optimalDimension]); return ( @@ -38,9 +43,9 @@ export const BboxWidth = memo(() => { value={width} onChange={onChange} defaultValue={optimalDimension} - min={config.sliderMin} - max={config.sliderMax} - step={config.coarseStep} + min={CONSTRAINTS.sliderMin} + max={CONSTRAINTS.sliderMax} + step={CONSTRAINTS.coarseStep} fineStep={gridSize} marks={marks} /> @@ -48,9 +53,9 @@ export const BboxWidth = memo(() => { value={width} onChange={onChange} defaultValue={optimalDimension} - min={config.numberInputMin} - max={config.numberInputMax} - step={config.coarseStep} + min={CONSTRAINTS.numberInputMin} + max={CONSTRAINTS.numberInputMax} + step={CONSTRAINTS.coarseStep} fineStep={gridSize} /> diff --git a/invokeai/frontend/web/src/features/parameters/components/Bbox/use-is-bbox-size-locked.ts b/invokeai/frontend/web/src/features/parameters/components/Bbox/use-is-bbox-size-locked.ts index 57b55d8a21e..eaf13811088 100644 --- a/invokeai/frontend/web/src/features/parameters/components/Bbox/use-is-bbox-size-locked.ts +++ b/invokeai/frontend/web/src/features/parameters/components/Bbox/use-is-bbox-size-locked.ts @@ -1,9 +1,6 @@ -import { useAppSelector } from 'app/store/storeHooks'; import { useCanvasIsStaging } from 'features/controlLayers/store/canvasStagingAreaSlice'; -import { selectIsApiBaseModel } from 'features/controlLayers/store/paramsSlice'; export const useIsBboxSizeLocked = () => { const isStaging = useCanvasIsStaging(); - const isApiModel = useAppSelector(selectIsApiBaseModel); - return isApiModel || isStaging; + return isStaging; }; diff --git a/invokeai/frontend/web/src/features/parameters/components/Canvas/Compositing/CoherencePass/ParamCanvasCoherenceEdgeSize.tsx b/invokeai/frontend/web/src/features/parameters/components/Canvas/Compositing/CoherencePass/ParamCanvasCoherenceEdgeSize.tsx index 007b2b04887..ed830413967 100644 --- a/invokeai/frontend/web/src/features/parameters/components/Canvas/Compositing/CoherencePass/ParamCanvasCoherenceEdgeSize.tsx +++ b/invokeai/frontend/web/src/features/parameters/components/Canvas/Compositing/CoherencePass/ParamCanvasCoherenceEdgeSize.tsx @@ -2,14 +2,22 @@ import { CompositeNumberInput, CompositeSlider, FormControl, FormLabel } from '@ import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; import { InformationalPopover } from 'common/components/InformationalPopover/InformationalPopover'; import { selectCanvasCoherenceEdgeSize, setCanvasCoherenceEdgeSize } from 'features/controlLayers/store/paramsSlice'; -import { selectCanvasCoherenceEdgeSizeConfig } from 'features/system/store/configSlice'; import { memo, useCallback } from 'react'; import { useTranslation } from 'react-i18next'; +const CONSTRAINTS = { + initial: 16, + sliderMin: 0, + sliderMax: 128, + numberInputMin: 0, + numberInputMax: 1024, + fineStep: 8, + coarseStep: 16, +}; + const ParamCanvasCoherenceEdgeSize = () => { const dispatch = useAppDispatch(); const canvasCoherenceEdgeSize = useAppSelector(selectCanvasCoherenceEdgeSize); - const config = useAppSelector(selectCanvasCoherenceEdgeSizeConfig); const { t } = useTranslation(); @@ -26,22 +34,22 @@ const ParamCanvasCoherenceEdgeSize = () => { {t('parameters.coherenceEdgeSize')} diff --git a/invokeai/frontend/web/src/features/parameters/components/Canvas/Compositing/MaskAdjustment/ParamMaskBlur.tsx b/invokeai/frontend/web/src/features/parameters/components/Canvas/Compositing/MaskAdjustment/ParamMaskBlur.tsx index a165388fdcd..082e9ee8097 100644 --- a/invokeai/frontend/web/src/features/parameters/components/Canvas/Compositing/MaskAdjustment/ParamMaskBlur.tsx +++ b/invokeai/frontend/web/src/features/parameters/components/Canvas/Compositing/MaskAdjustment/ParamMaskBlur.tsx @@ -2,15 +2,23 @@ import { CompositeNumberInput, CompositeSlider, FormControl, FormLabel } from '@ import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; import { InformationalPopover } from 'common/components/InformationalPopover/InformationalPopover'; import { selectMaskBlur, setMaskBlur } from 'features/controlLayers/store/paramsSlice'; -import { selectMaskBlurConfig } from 'features/system/store/configSlice'; import { memo, useCallback } from 'react'; import { useTranslation } from 'react-i18next'; +const CONSTRAINTS = { + initial: 16, + sliderMin: 0, + sliderMax: 128, + numberInputMin: 0, + numberInputMax: 512, + fineStep: 1, + coarseStep: 1, +}; + const ParamMaskBlur = () => { const { t } = useTranslation(); const dispatch = useAppDispatch(); const maskBlur = useAppSelector(selectMaskBlur); - const config = useAppSelector(selectMaskBlurConfig); const handleChange = useCallback( (v: number) => { @@ -27,21 +35,21 @@ const ParamMaskBlur = () => { ); diff --git a/invokeai/frontend/web/src/features/parameters/components/Canvas/InfillAndScaling/ParamInfillPatchmatchDownscaleSize.tsx b/invokeai/frontend/web/src/features/parameters/components/Canvas/InfillAndScaling/ParamInfillPatchmatchDownscaleSize.tsx index f2998b9f84b..5b50bdbacd3 100644 --- a/invokeai/frontend/web/src/features/parameters/components/Canvas/InfillAndScaling/ParamInfillPatchmatchDownscaleSize.tsx +++ b/invokeai/frontend/web/src/features/parameters/components/Canvas/InfillAndScaling/ParamInfillPatchmatchDownscaleSize.tsx @@ -6,15 +6,23 @@ import { selectInfillPatchmatchDownscaleSize, setInfillPatchmatchDownscaleSize, } from 'features/controlLayers/store/paramsSlice'; -import { selectInfillPatchmatchDownscaleSizeConfig } from 'features/system/store/configSlice'; import { memo, useCallback } from 'react'; import { useTranslation } from 'react-i18next'; +const CONSTRAINTS = { + initial: 1, + sliderMin: 1, + sliderMax: 10, + numberInputMin: 1, + numberInputMax: 10, + fineStep: 1, + coarseStep: 1, +}; + const ParamInfillPatchmatchDownscaleSize = () => { const dispatch = useAppDispatch(); const infillMethod = useAppSelector(selectInfillMethod); const infillPatchmatchDownscaleSize = useAppSelector(selectInfillPatchmatchDownscaleSize); - const config = useAppSelector(selectInfillPatchmatchDownscaleSizeConfig); const { t } = useTranslation(); @@ -34,20 +42,20 @@ const ParamInfillPatchmatchDownscaleSize = () => { value={infillPatchmatchDownscaleSize} onChange={handleChange} marks - defaultValue={config.initial} - min={config.sliderMin} - max={config.sliderMax} - step={config.coarseStep} - fineStep={config.fineStep} + defaultValue={CONSTRAINTS.initial} + min={CONSTRAINTS.sliderMin} + max={CONSTRAINTS.sliderMax} + step={CONSTRAINTS.coarseStep} + fineStep={CONSTRAINTS.fineStep} /> ); diff --git a/invokeai/frontend/web/src/features/parameters/components/Canvas/InfillAndScaling/ParamInfillTilesize.tsx b/invokeai/frontend/web/src/features/parameters/components/Canvas/InfillAndScaling/ParamInfillTilesize.tsx index 3df4b3e9282..dde61cc6271 100644 --- a/invokeai/frontend/web/src/features/parameters/components/Canvas/InfillAndScaling/ParamInfillTilesize.tsx +++ b/invokeai/frontend/web/src/features/parameters/components/Canvas/InfillAndScaling/ParamInfillTilesize.tsx @@ -1,14 +1,22 @@ import { CompositeNumberInput, CompositeSlider, FormControl, FormLabel } from '@invoke-ai/ui-library'; import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; import { selectInfillMethod, selectInfillTileSize, setInfillTileSize } from 'features/controlLayers/store/paramsSlice'; -import { selectInfillTileSizeConfig } from 'features/system/store/configSlice'; import { memo, useCallback } from 'react'; import { useTranslation } from 'react-i18next'; +const CONSTRAINTS = { + initial: 32, + sliderMin: 16, + sliderMax: 64, + numberInputMin: 16, + numberInputMax: 256, + fineStep: 1, + coarseStep: 1, +}; + const ParamInfillTileSize = () => { const dispatch = useAppDispatch(); const infillTileSize = useAppSelector(selectInfillTileSize); - const config = useAppSelector(selectInfillTileSizeConfig); const infillMethod = useAppSelector(selectInfillMethod); const { t } = useTranslation(); @@ -26,21 +34,21 @@ const ParamInfillTileSize = () => { ); diff --git a/invokeai/frontend/web/src/features/parameters/components/Core/ParamCFGScale.tsx b/invokeai/frontend/web/src/features/parameters/components/Core/ParamCFGScale.tsx index 145ca6f2da7..c0e7a3c2a72 100644 --- a/invokeai/frontend/web/src/features/parameters/components/Core/ParamCFGScale.tsx +++ b/invokeai/frontend/web/src/features/parameters/components/Core/ParamCFGScale.tsx @@ -2,19 +2,25 @@ import { CompositeNumberInput, CompositeSlider, FormControl, FormLabel } from '@ import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; import { InformationalPopover } from 'common/components/InformationalPopover/InformationalPopover'; import { selectCFGScale, setCfgScale } from 'features/controlLayers/store/paramsSlice'; -import { selectCFGScaleConfig } from 'features/system/store/configSlice'; -import { memo, useCallback, useMemo } from 'react'; +import { memo, useCallback } from 'react'; import { useTranslation } from 'react-i18next'; +export const CONSTRAINTS = { + initial: 7, + sliderMin: 1, + sliderMax: 20, + numberInputMin: 1, + numberInputMax: 200, + fineStep: 0.1, + coarseStep: 0.5, +}; + +export const MARKS = [CONSTRAINTS.sliderMin, Math.floor(CONSTRAINTS.sliderMax / 2), CONSTRAINTS.sliderMax]; + const ParamCFGScale = () => { const cfgScale = useAppSelector(selectCFGScale); - const config = useAppSelector(selectCFGScaleConfig); const dispatch = useAppDispatch(); const { t } = useTranslation(); - const marks = useMemo( - () => [config.sliderMin, Math.floor(config.sliderMax / 2), config.sliderMax], - [config.sliderMax, config.sliderMin] - ); const onChange = useCallback((v: number) => dispatch(setCfgScale(v)), [dispatch]); return ( @@ -24,21 +30,21 @@ const ParamCFGScale = () => { diff --git a/invokeai/frontend/web/src/features/parameters/components/Core/ParamGuidance.tsx b/invokeai/frontend/web/src/features/parameters/components/Core/ParamGuidance.tsx index 86740e0846d..290f170b6ce 100644 --- a/invokeai/frontend/web/src/features/parameters/components/Core/ParamGuidance.tsx +++ b/invokeai/frontend/web/src/features/parameters/components/Core/ParamGuidance.tsx @@ -2,23 +2,29 @@ import { CompositeNumberInput, CompositeSlider, FormControl, FormLabel } from '@ import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; import { InformationalPopover } from 'common/components/InformationalPopover/InformationalPopover'; import { selectGuidance, setGuidance } from 'features/controlLayers/store/paramsSlice'; -import { selectGuidanceConfig } from 'features/system/store/configSlice'; -import { memo, useCallback, useMemo } from 'react'; +import { memo, useCallback } from 'react'; import { useTranslation } from 'react-i18next'; +export const CONSTRAINTS = { + initial: 4, + sliderMin: 2, + sliderMax: 6, + numberInputMin: 1, + numberInputMax: 20, + fineStep: 0.1, + coarseStep: 0.5, +}; + +export const MARKS = [ + CONSTRAINTS.sliderMin, + Math.floor(CONSTRAINTS.sliderMax - (CONSTRAINTS.sliderMax - CONSTRAINTS.sliderMin) / 2), + CONSTRAINTS.sliderMax, +]; + const ParamGuidance = () => { const guidance = useAppSelector(selectGuidance); - const config = useAppSelector(selectGuidanceConfig); const dispatch = useAppDispatch(); const { t } = useTranslation(); - const marks = useMemo( - () => [ - config.sliderMin, - Math.floor(config.sliderMax - (config.sliderMax - config.sliderMin) / 2), - config.sliderMax, - ], - [config.sliderMax, config.sliderMin] - ); const onChange = useCallback((v: number) => dispatch(setGuidance(v)), [dispatch]); return ( @@ -28,21 +34,21 @@ const ParamGuidance = () => { diff --git a/invokeai/frontend/web/src/features/parameters/components/Core/ParamPositivePrompt.tsx b/invokeai/frontend/web/src/features/parameters/components/Core/ParamPositivePrompt.tsx index 8001d81c9f2..73d22f0eac7 100644 --- a/invokeai/frontend/web/src/features/parameters/components/Core/ParamPositivePrompt.tsx +++ b/invokeai/frontend/web/src/features/parameters/components/Core/ParamPositivePrompt.tsx @@ -1,5 +1,4 @@ import { Box, Flex, Textarea } from '@invoke-ai/ui-library'; -import { useStore } from '@nanostores/react'; import { useAppDispatch, useAppSelector, useAppStore } from 'app/store/storeHooks'; import { usePersistedTextAreaSize } from 'common/hooks/usePersistedTextareaSize'; import { @@ -8,17 +7,12 @@ import { selectPositivePrompt, selectPositivePromptHistory, } from 'features/controlLayers/store/paramsSlice'; -import { promptGenerationFromImageDndTarget } from 'features/dnd/dnd'; -import { DndDropTarget } from 'features/dnd/DndDropTarget'; import { ShowDynamicPromptsPreviewButton } from 'features/dynamicPrompts/components/ShowDynamicPromptsPreviewButton'; import { NegativePromptToggleButton } from 'features/parameters/components/Core/NegativePromptToggleButton'; import { PromptLabel } from 'features/parameters/components/Prompts/PromptLabel'; import { PromptOverlayButtonWrapper } from 'features/parameters/components/Prompts/PromptOverlayButtonWrapper'; import { ViewModePrompt } from 'features/parameters/components/Prompts/ViewModePrompt'; import { AddPromptTriggerButton } from 'features/prompt/AddPromptTriggerButton'; -import { PromptExpansionMenu } from 'features/prompt/PromptExpansion/PromptExpansionMenu'; -import { PromptExpansionOverlay } from 'features/prompt/PromptExpansion/PromptExpansionOverlay'; -import { promptExpansionApi } from 'features/prompt/PromptExpansion/state'; import { PromptPopover } from 'features/prompt/PromptPopover'; import { usePrompt } from 'features/prompt/usePrompt'; import { @@ -26,9 +20,7 @@ import { selectStylePresetViewMode, } from 'features/stylePresets/store/stylePresetSlice'; import { useRegisteredHotkeys } from 'features/system/components/HotkeysModal/useHotkeyData'; -import { selectAllowPromptExpansion } from 'features/system/store/configSlice'; -import { selectActiveTab } from 'features/ui/store/uiSelectors'; -import React, { memo, useCallback, useMemo, useRef } from 'react'; +import React, { memo, useCallback, useRef } from 'react'; import type { HotkeyCallback } from 'react-hotkeys-hook'; import { useTranslation } from 'react-i18next'; import { useClickAway } from 'react-use'; @@ -120,9 +112,6 @@ export const ParamPositivePrompt = memo(() => { const viewMode = useAppSelector(selectStylePresetViewMode); const activeStylePresetId = useAppSelector(selectStylePresetActivePresetId); const modelSupportsNegativePrompt = useAppSelector(selectModelSupportsNegativePrompt); - const { isPending: isPromptExpansionPending } = useStore(promptExpansionApi.$state); - const isPromptExpansionEnabled = useAppSelector(selectAllowPromptExpansion); - const activeTab = useAppSelector(selectActiveTab); const promptHistoryApi = usePromptHistory(); @@ -153,7 +142,6 @@ export const ParamPositivePrompt = memo(() => { prompt, textareaRef: textareaRef, onChange: handleChange, - isDisabled: isPromptExpansionPending, }); // When the user clicks away from the textarea, reset the prompt history state. @@ -204,8 +192,6 @@ export const ParamPositivePrompt = memo(() => { dependencies: [promptHistoryApi.next, isPromptFocused], }); - const dndTargetData = useMemo(() => promptGenerationFromImageDndTarget.getData(), []); - return ( @@ -224,17 +210,15 @@ export const ParamPositivePrompt = memo(() => { paddingTop={0} paddingBottom={3} resize="vertical" - minH={isPromptExpansionEnabled ? 44 : 32} - isDisabled={isPromptExpansionPending} + minH={32} /> - {activeTab !== 'video' && modelSupportsNegativePrompt && } + {modelSupportsNegativePrompt && } - {isPromptExpansionEnabled && } {viewMode && ( @@ -244,15 +228,6 @@ export const ParamPositivePrompt = memo(() => { label={`${t('parameters.positivePromptPlaceholder')} (${t('stylePresets.preview')})`} /> )} - {isPromptExpansionEnabled && ( - - )} - diff --git a/invokeai/frontend/web/src/features/parameters/components/Core/ParamSteps.tsx b/invokeai/frontend/web/src/features/parameters/components/Core/ParamSteps.tsx index f7ef4660b58..31efe5d0a6f 100644 --- a/invokeai/frontend/web/src/features/parameters/components/Core/ParamSteps.tsx +++ b/invokeai/frontend/web/src/features/parameters/components/Core/ParamSteps.tsx @@ -2,19 +2,25 @@ import { CompositeNumberInput, CompositeSlider, FormControl, FormLabel } from '@ import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; import { InformationalPopover } from 'common/components/InformationalPopover/InformationalPopover'; import { selectSteps, setSteps } from 'features/controlLayers/store/paramsSlice'; -import { selectStepsConfig } from 'features/system/store/configSlice'; -import { memo, useCallback, useMemo } from 'react'; +import { memo, useCallback } from 'react'; import { useTranslation } from 'react-i18next'; +export const CONSTRAINTS = { + initial: 30, + sliderMin: 1, + sliderMax: 100, + numberInputMin: 1, + numberInputMax: 500, + fineStep: 1, + coarseStep: 1, +}; + +export const MARKS = [CONSTRAINTS.sliderMin, Math.floor(CONSTRAINTS.sliderMax / 2), CONSTRAINTS.sliderMax]; + const ParamSteps = () => { const steps = useAppSelector(selectSteps); - const config = useAppSelector(selectStepsConfig); const dispatch = useAppDispatch(); const { t } = useTranslation(); - const marks = useMemo( - () => [config.sliderMin, Math.floor(config.sliderMax / 2), config.sliderMax], - [config.sliderMax, config.sliderMin] - ); const onChange = useCallback( (v: number) => { dispatch(setSteps(v)); @@ -29,21 +35,21 @@ const ParamSteps = () => { diff --git a/invokeai/frontend/web/src/features/parameters/components/Dimensions/Dimensions.tsx b/invokeai/frontend/web/src/features/parameters/components/Dimensions/Dimensions.tsx index 9bcac785b36..05653c6295e 100644 --- a/invokeai/frontend/web/src/features/parameters/components/Dimensions/Dimensions.tsx +++ b/invokeai/frontend/web/src/features/parameters/components/Dimensions/Dimensions.tsx @@ -1,11 +1,5 @@ import type { FormLabelProps } from '@invoke-ai/ui-library'; import { Flex, FormControlGroup } from '@invoke-ai/ui-library'; -import { useAppSelector } from 'app/store/storeHooks'; -import { - selectModelSupportsAspectRatio, - selectModelSupportsPixelDimensions, -} from 'features/controlLayers/store/paramsSlice'; -import { PixelDimensionsUnsupportedAlert } from 'features/parameters/components/PixelDimensionsUnsupportedAlert'; import { memo } from 'react'; import { DimensionsAspectRatioSelect } from './DimensionsAspectRatioSelect'; @@ -17,13 +11,6 @@ import { DimensionsSwapButton } from './DimensionsSwapButton'; import { DimensionsWidth } from './DimensionsWidth'; export const Dimensions = memo(() => { - const supportsAspectRatio = useAppSelector(selectModelSupportsAspectRatio); - const supportsPixelDimensions = useAppSelector(selectModelSupportsPixelDimensions); - - if (!supportsAspectRatio) { - return null; - } - return ( @@ -31,20 +18,11 @@ export const Dimensions = memo(() => { - {supportsPixelDimensions && ( - <> - - - - )} + + - {supportsPixelDimensions && ( - <> - - - - )} - {!supportsPixelDimensions && } + + diff --git a/invokeai/frontend/web/src/features/parameters/components/Dimensions/DimensionsAspectRatioSelect.tsx b/invokeai/frontend/web/src/features/parameters/components/Dimensions/DimensionsAspectRatioSelect.tsx index bd0c0d03a6b..7f7d9893dc0 100644 --- a/invokeai/frontend/web/src/features/parameters/components/Dimensions/DimensionsAspectRatioSelect.tsx +++ b/invokeai/frontend/web/src/features/parameters/components/Dimensions/DimensionsAspectRatioSelect.tsx @@ -4,20 +4,9 @@ import { InformationalPopover } from 'common/components/InformationalPopover/Inf import { aspectRatioIdChanged, selectAspectRatioID, - selectIsChatGPT4o, selectIsFluxKontext, - selectIsGemini2_5, - selectIsImagen3, - selectIsImagen4, } from 'features/controlLayers/store/paramsSlice'; -import { - isAspectRatioID, - zAspectRatioID, - zChatGPT4oAspectRatioID, - zFluxKontextAspectRatioID, - zGemini2_5AspectRatioID, - zImagen3AspectRatioID, -} from 'features/controlLayers/store/types'; +import { isAspectRatioID, zAspectRatioID, zFluxKontextAspectRatioID } from 'features/controlLayers/store/types'; import type { ChangeEventHandler } from 'react'; import { memo, useCallback, useMemo } from 'react'; import { useTranslation } from 'react-i18next'; @@ -27,29 +16,15 @@ export const DimensionsAspectRatioSelect = memo(() => { const { t } = useTranslation(); const dispatch = useAppDispatch(); const id = useAppSelector(selectAspectRatioID); - const isImagen3 = useAppSelector(selectIsImagen3); - const isChatGPT4o = useAppSelector(selectIsChatGPT4o); - const isImagen4 = useAppSelector(selectIsImagen4); const isFluxKontext = useAppSelector(selectIsFluxKontext); - const isGemini2_5 = useAppSelector(selectIsGemini2_5); const options = useMemo(() => { - // Imagen3 and ChatGPT4o have different aspect ratio options, and do not support freeform sizes - if (isImagen3 || isImagen4) { - return zImagen3AspectRatioID.options; - } - if (isChatGPT4o) { - return zChatGPT4oAspectRatioID.options; - } if (isFluxKontext) { return zFluxKontextAspectRatioID.options; } - if (isGemini2_5) { - return zGemini2_5AspectRatioID.options; - } // All other models return zAspectRatioID.options; - }, [isImagen3, isChatGPT4o, isImagen4, isFluxKontext, isGemini2_5]); + }, [isFluxKontext]); const onChange = useCallback>( (e) => { diff --git a/invokeai/frontend/web/src/features/parameters/components/Dimensions/DimensionsHeight.tsx b/invokeai/frontend/web/src/features/parameters/components/Dimensions/DimensionsHeight.tsx index a2f84f360a0..924187c1ed0 100644 --- a/invokeai/frontend/web/src/features/parameters/components/Dimensions/DimensionsHeight.tsx +++ b/invokeai/frontend/web/src/features/parameters/components/Dimensions/DimensionsHeight.tsx @@ -1,20 +1,27 @@ import { CompositeNumberInput, CompositeSlider, FormControl, FormLabel } from '@invoke-ai/ui-library'; import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; import { InformationalPopover } from 'common/components/InformationalPopover/InformationalPopover'; -import { heightChanged, selectHeight, selectIsApiBaseModel } from 'features/controlLayers/store/paramsSlice'; +import { heightChanged, selectHeight } from 'features/controlLayers/store/paramsSlice'; import { selectGridSize, selectOptimalDimension } from 'features/controlLayers/store/selectors'; -import { selectHeightConfig } from 'features/system/store/configSlice'; import { memo, useCallback, useMemo } from 'react'; import { useTranslation } from 'react-i18next'; +export const CONSTRAINTS = { + initial: 512, + sliderMin: 64, + sliderMax: 1536, + numberInputMin: 64, + numberInputMax: 4096, + fineStep: 8, + coarseStep: 64, +}; + export const DimensionsHeight = memo(() => { const { t } = useTranslation(); const dispatch = useAppDispatch(); const optimalDimension = useAppSelector(selectOptimalDimension); const height = useAppSelector(selectHeight); - const config = useAppSelector(selectHeightConfig); const gridSize = useAppSelector(selectGridSize); - const isApiModel = useAppSelector(selectIsApiBaseModel); const onChange = useCallback( (v: number) => { @@ -23,13 +30,10 @@ export const DimensionsHeight = memo(() => { [dispatch] ); - const marks = useMemo( - () => [config.sliderMin, optimalDimension, config.sliderMax], - [config.sliderMin, config.sliderMax, optimalDimension] - ); + const marks = useMemo(() => [CONSTRAINTS.sliderMin, optimalDimension, CONSTRAINTS.sliderMax], [optimalDimension]); return ( - + {t('parameters.height')} @@ -37,9 +41,9 @@ export const DimensionsHeight = memo(() => { value={height} defaultValue={optimalDimension} onChange={onChange} - min={config.sliderMin} - max={config.sliderMax} - step={config.coarseStep} + min={CONSTRAINTS.sliderMin} + max={CONSTRAINTS.sliderMax} + step={CONSTRAINTS.coarseStep} fineStep={gridSize} marks={marks} /> @@ -47,9 +51,9 @@ export const DimensionsHeight = memo(() => { value={height} defaultValue={optimalDimension} onChange={onChange} - min={config.numberInputMin} - max={config.numberInputMax} - step={config.coarseStep} + min={CONSTRAINTS.numberInputMin} + max={CONSTRAINTS.numberInputMax} + step={CONSTRAINTS.coarseStep} fineStep={gridSize} /> diff --git a/invokeai/frontend/web/src/features/parameters/components/Dimensions/DimensionsLockAspectRatioButton.tsx b/invokeai/frontend/web/src/features/parameters/components/Dimensions/DimensionsLockAspectRatioButton.tsx index 2de397cc784..6ab17147a74 100644 --- a/invokeai/frontend/web/src/features/parameters/components/Dimensions/DimensionsLockAspectRatioButton.tsx +++ b/invokeai/frontend/web/src/features/parameters/components/Dimensions/DimensionsLockAspectRatioButton.tsx @@ -1,10 +1,6 @@ import { IconButton } from '@invoke-ai/ui-library'; import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; -import { - aspectRatioLockToggled, - selectAspectRatioIsLocked, - selectIsApiBaseModel, -} from 'features/controlLayers/store/paramsSlice'; +import { aspectRatioLockToggled, selectAspectRatioIsLocked } from 'features/controlLayers/store/paramsSlice'; import { memo, useCallback } from 'react'; import { useTranslation } from 'react-i18next'; import { PiLockSimpleFill, PiLockSimpleOpenBold } from 'react-icons/pi'; @@ -13,7 +9,6 @@ export const DimensionsLockAspectRatioButton = memo(() => { const { t } = useTranslation(); const dispatch = useAppDispatch(); const isLocked = useAppSelector(selectAspectRatioIsLocked); - const isApiModel = useAppSelector(selectIsApiBaseModel); const onClick = useCallback(() => { dispatch(aspectRatioLockToggled()); @@ -27,7 +22,6 @@ export const DimensionsLockAspectRatioButton = memo(() => { variant={isLocked ? 'outline' : 'ghost'} size="sm" icon={isLocked ? : } - isDisabled={isApiModel} /> ); }); diff --git a/invokeai/frontend/web/src/features/parameters/components/Dimensions/DimensionsSetOptimalSizeButton.tsx b/invokeai/frontend/web/src/features/parameters/components/Dimensions/DimensionsSetOptimalSizeButton.tsx index eda44ba925d..c1c43f0cec4 100644 --- a/invokeai/frontend/web/src/features/parameters/components/Dimensions/DimensionsSetOptimalSizeButton.tsx +++ b/invokeai/frontend/web/src/features/parameters/components/Dimensions/DimensionsSetOptimalSizeButton.tsx @@ -1,11 +1,6 @@ import { IconButton } from '@invoke-ai/ui-library'; import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; -import { - selectHeight, - selectIsApiBaseModel, - selectWidth, - sizeOptimized, -} from 'features/controlLayers/store/paramsSlice'; +import { selectHeight, selectWidth, sizeOptimized } from 'features/controlLayers/store/paramsSlice'; import { selectOptimalDimension } from 'features/controlLayers/store/selectors'; import { getIsSizeTooLarge, getIsSizeTooSmall } from 'features/parameters/util/optimalDimension'; import { memo, useCallback, useMemo } from 'react'; @@ -15,7 +10,6 @@ import { PiSparkleFill } from 'react-icons/pi'; export const DimensionsSetOptimalSizeButton = memo(() => { const { t } = useTranslation(); const dispatch = useAppDispatch(); - const isApiModel = useAppSelector(selectIsApiBaseModel); const width = useAppSelector(selectWidth); const height = useAppSelector(selectHeight); const optimalDimension = useAppSelector(selectOptimalDimension); @@ -49,7 +43,6 @@ export const DimensionsSetOptimalSizeButton = memo(() => { size="sm" icon={} colorScheme={isSizeTooSmall || isSizeTooLarge ? 'warning' : 'base'} - isDisabled={isApiModel} /> ); }); diff --git a/invokeai/frontend/web/src/features/parameters/components/Dimensions/DimensionsWidth.tsx b/invokeai/frontend/web/src/features/parameters/components/Dimensions/DimensionsWidth.tsx index eb2f96af96a..20a754c5c30 100644 --- a/invokeai/frontend/web/src/features/parameters/components/Dimensions/DimensionsWidth.tsx +++ b/invokeai/frontend/web/src/features/parameters/components/Dimensions/DimensionsWidth.tsx @@ -1,22 +1,27 @@ import { CompositeNumberInput, CompositeSlider, FormControl, FormLabel } from '@invoke-ai/ui-library'; import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; import { InformationalPopover } from 'common/components/InformationalPopover/InformationalPopover'; -import { selectIsApiBaseModel, selectWidth, widthChanged } from 'features/controlLayers/store/paramsSlice'; +import { selectWidth, widthChanged } from 'features/controlLayers/store/paramsSlice'; import { selectGridSize, selectOptimalDimension } from 'features/controlLayers/store/selectors'; -import { selectWidthConfig } from 'features/system/store/configSlice'; -import { selectActiveTab } from 'features/ui/store/uiSelectors'; import { memo, useCallback, useMemo } from 'react'; import { useTranslation } from 'react-i18next'; +export const CONSTRAINTS = { + initial: 512, + sliderMin: 64, + sliderMax: 1536, + numberInputMin: 64, + numberInputMax: 4096, + fineStep: 8, + coarseStep: 64, +}; + export const DimensionsWidth = memo(() => { const { t } = useTranslation(); const dispatch = useAppDispatch(); const width = useAppSelector(selectWidth); const optimalDimension = useAppSelector(selectOptimalDimension); - const config = useAppSelector(selectWidthConfig); - const isApiModel = useAppSelector(selectIsApiBaseModel); const gridSize = useAppSelector(selectGridSize); - const activeTab = useAppSelector(selectActiveTab); const onChange = useCallback( (v: number) => { @@ -25,13 +30,10 @@ export const DimensionsWidth = memo(() => { [dispatch] ); - const marks = useMemo( - () => [config.sliderMin, optimalDimension, config.sliderMax], - [config.sliderMax, config.sliderMin, optimalDimension] - ); + const marks = useMemo(() => [CONSTRAINTS.sliderMin, optimalDimension, CONSTRAINTS.sliderMax], [optimalDimension]); return ( - + {t('parameters.width')} @@ -39,9 +41,9 @@ export const DimensionsWidth = memo(() => { value={width} onChange={onChange} defaultValue={optimalDimension} - min={config.sliderMin} - max={config.sliderMax} - step={config.coarseStep} + min={CONSTRAINTS.sliderMin} + max={CONSTRAINTS.sliderMax} + step={CONSTRAINTS.coarseStep} fineStep={gridSize} marks={marks} /> @@ -49,9 +51,9 @@ export const DimensionsWidth = memo(() => { value={width} onChange={onChange} defaultValue={optimalDimension} - min={config.numberInputMin} - max={config.numberInputMax} - step={config.coarseStep} + min={CONSTRAINTS.numberInputMin} + max={CONSTRAINTS.numberInputMax} + step={CONSTRAINTS.coarseStep} fineStep={gridSize} /> diff --git a/invokeai/frontend/web/src/features/parameters/components/MainModel/DisabledModelWarning.tsx b/invokeai/frontend/web/src/features/parameters/components/MainModel/DisabledModelWarning.tsx deleted file mode 100644 index 87871387be2..00000000000 --- a/invokeai/frontend/web/src/features/parameters/components/MainModel/DisabledModelWarning.tsx +++ /dev/null @@ -1,39 +0,0 @@ -import { Flex, Link, Text } from '@invoke-ai/ui-library'; -import { useStore } from '@nanostores/react'; -import { $accountSettingsLink } from 'app/store/nanostores/accountSettingsLink'; -import { useAppSelector } from 'app/store/storeHooks'; -import { selectModel } from 'features/controlLayers/store/paramsSlice'; -import { useIsModelDisabled } from 'features/parameters/hooks/useIsModelDisabled'; -import { Trans, useTranslation } from 'react-i18next'; - -export const DisabledModelWarning = () => { - const { t } = useTranslation(); - const model = useAppSelector(selectModel); - - const accountSettingsLink = useStore($accountSettingsLink); - const { isChatGPT4oHighModelDisabled } = useIsModelDisabled(); - - if (!model || !isChatGPT4oHighModelDisabled(model)) { - return null; - } - - return ( - - - - {t('parameters.invoke.accountSettings')} - - ), - }} - /> - - - ); -}; diff --git a/invokeai/frontend/web/src/features/parameters/components/MainModel/NavigateToModelManagerButton.tsx b/invokeai/frontend/web/src/features/parameters/components/MainModel/NavigateToModelManagerButton.tsx index 6d97b6bdd71..5a06bf8c514 100644 --- a/invokeai/frontend/web/src/features/parameters/components/MainModel/NavigateToModelManagerButton.tsx +++ b/invokeai/frontend/web/src/features/parameters/components/MainModel/NavigateToModelManagerButton.tsx @@ -1,34 +1,23 @@ import type { IconButtonProps } from '@invoke-ai/ui-library'; import { IconButton } from '@invoke-ai/ui-library'; -import { useStore } from '@nanostores/react'; -import { $onClickGoToModelManager } from 'app/store/nanostores/onClickGoToModelManager'; -import { useAppSelector } from 'app/store/storeHooks'; -import { selectIsModelsTabDisabled } from 'features/system/store/configSlice'; import { navigationApi } from 'features/ui/layouts/navigation-api'; import { memo, useCallback } from 'react'; import { useTranslation } from 'react-i18next'; import { PiCubeBold } from 'react-icons/pi'; export const NavigateToModelManagerButton = memo((props: Omit) => { - const isModelsTabDisabled = useAppSelector(selectIsModelsTabDisabled); - const onClickGoToModelManager = useStore($onClickGoToModelManager); - const { t } = useTranslation(); const onClick = useCallback(() => { navigationApi.switchToTab('models'); }, []); - if (isModelsTabDisabled && !onClickGoToModelManager) { - return null; - } - return ( } tooltip={`${t('modelManager.manageModels')}`} aria-label={`${t('modelManager.manageModels')}`} - onClick={onClickGoToModelManager ?? onClick} + onClick={onClick} size="sm" variant="ghost" {...props} diff --git a/invokeai/frontend/web/src/features/parameters/components/ModelPicker.tsx b/invokeai/frontend/web/src/features/parameters/components/ModelPicker.tsx index 14d19934510..48e30cc4af3 100644 --- a/invokeai/frontend/web/src/features/parameters/components/ModelPicker.tsx +++ b/invokeai/frontend/web/src/features/parameters/components/ModelPicker.tsx @@ -12,10 +12,8 @@ import { Spacer, Text, } from '@invoke-ai/ui-library'; -import { useStore } from '@nanostores/react'; import { EMPTY_ARRAY } from 'app/store/constants'; import { createMemoizedSelector } from 'app/store/createMemoizedSelector'; -import { $onClickGoToModelManager } from 'app/store/nanostores/onClickGoToModelManager'; import { useAppSelector } from 'app/store/storeHooks'; import type { Group, PickerContextState } from 'common/components/Picker/Picker'; import { buildGroup, getRegex, isGroup, Picker, usePickerContext } from 'common/components/Picker/Picker'; @@ -24,18 +22,11 @@ import { typedMemo } from 'common/util/typedMemo'; import { uniq } from 'es-toolkit/compat'; import { selectLoRAsSlice } from 'features/controlLayers/store/lorasSlice'; import { selectParamsSlice } from 'features/controlLayers/store/paramsSlice'; -import { - API_BASE_MODELS, - MODEL_BASE_TO_COLOR, - MODEL_BASE_TO_LONG_NAME, - MODEL_BASE_TO_SHORT_NAME, -} from 'features/modelManagerV2/models'; +import { MODEL_BASE_TO_COLOR, MODEL_BASE_TO_LONG_NAME, MODEL_BASE_TO_SHORT_NAME } from 'features/modelManagerV2/models'; import { setInstallModelsTabByName } from 'features/modelManagerV2/store/installModelsStore'; import ModelImage from 'features/modelManagerV2/subpanels/ModelManagerPanel/ModelImage'; import type { BaseModelType } from 'features/nodes/types/common'; import { NavigateToModelManagerButton } from 'features/parameters/components/MainModel/NavigateToModelManagerButton'; -import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus'; -import { selectIsModelsTabDisabled } from 'features/system/store/configSlice'; import { navigationApi } from 'features/ui/layouts/navigation-api'; import { filesize } from 'filesize'; import { memo, useCallback, useMemo, useRef } from 'react'; @@ -76,22 +67,12 @@ type WithStarred = T & { starred?: boolean }; const getOptionId = (modelConfig: WithStarred) => modelConfig.key; const ModelManagerLink = memo((props: ButtonProps) => { - const onClickGoToModelManager = useStore($onClickGoToModelManager); const onClick = useCallback(() => { navigationApi.switchToTab('models'); setInstallModelsTabByName('launchpad'); }, []); - return ( - )} - {isFailed && isRetryEnabled && ( + {isFailed && ( - - - - - - - ); -}); -VideoLaunchpadPanel.displayName = 'VideoLaunchpadPanel'; diff --git a/invokeai/frontend/web/src/features/ui/layouts/VideoTabLeftPanel.tsx b/invokeai/frontend/web/src/features/ui/layouts/VideoTabLeftPanel.tsx deleted file mode 100644 index 0d99be4f963..00000000000 --- a/invokeai/frontend/web/src/features/ui/layouts/VideoTabLeftPanel.tsx +++ /dev/null @@ -1,16 +0,0 @@ -import { Box, Flex } from '@invoke-ai/ui-library'; -import QueueControls from 'features/queue/components/QueueControls'; -import { ParametersPanelVideo } from 'features/ui/components/ParametersPanels/ParametersPanelVideo'; -import { memo } from 'react'; - -export const VideoTabLeftPanel = memo(() => { - return ( - - - - - - - ); -}); -VideoTabLeftPanel.displayName = 'VideoTabLeftPanel'; diff --git a/invokeai/frontend/web/src/features/ui/layouts/video-tab-auto-layout.tsx b/invokeai/frontend/web/src/features/ui/layouts/video-tab-auto-layout.tsx deleted file mode 100644 index dbf9b44e4c7..00000000000 --- a/invokeai/frontend/web/src/features/ui/layouts/video-tab-auto-layout.tsx +++ /dev/null @@ -1,278 +0,0 @@ -import type { DockviewApi, GridviewApi, IDockviewReactProps, IGridviewReactProps } from 'dockview'; -import { DockviewReact, GridviewReact, LayoutPriority, Orientation } from 'dockview'; -import { BoardsPanel } from 'features/gallery/components/BoardsListPanelContent'; -import { GalleryPanel } from 'features/gallery/components/Gallery'; -import { ImageViewerPanel } from 'features/gallery/components/ImageViewer/ImageViewerPanel'; -import { FloatingLeftPanelButtons } from 'features/ui/components/FloatingLeftPanelButtons'; -import { FloatingRightPanelButtons } from 'features/ui/components/FloatingRightPanelButtons'; -import type { - AutoLayoutDockviewComponents, - AutoLayoutGridviewComponents, - DockviewPanelParameters, - GridviewPanelParameters, - RootLayoutGridviewComponents, -} from 'features/ui/layouts/auto-layout-context'; -import { AutoLayoutProvider, useAutoLayoutContext, withPanelContainer } from 'features/ui/layouts/auto-layout-context'; -import type { TabName } from 'features/ui/store/uiTypes'; -import { dockviewTheme } from 'features/ui/styles/theme'; -import { t } from 'i18next'; -import { memo, useCallback, useEffect } from 'react'; - -import { DockviewTab } from './DockviewTab'; -import { DockviewTabLaunchpad } from './DockviewTabLaunchpad'; -import { DockviewTabProgress } from './DockviewTabProgress'; -import { navigationApi } from './navigation-api'; -import { PanelHotkeysLogical } from './PanelHotkeysLogical'; -import { - BOARD_PANEL_DEFAULT_HEIGHT_PX, - BOARD_PANEL_MIN_HEIGHT_PX, - BOARDS_PANEL_ID, - DOCKVIEW_TAB_ID, - DOCKVIEW_TAB_LAUNCHPAD_ID, - DOCKVIEW_TAB_PROGRESS_ID, - GALLERY_PANEL_DEFAULT_HEIGHT_PX, - GALLERY_PANEL_ID, - GALLERY_PANEL_MIN_HEIGHT_PX, - LAUNCHPAD_PANEL_ID, - LEFT_PANEL_ID, - LEFT_PANEL_MIN_SIZE_PX, - MAIN_PANEL_ID, - RIGHT_PANEL_ID, - RIGHT_PANEL_MIN_SIZE_PX, - SETTINGS_PANEL_ID, - VIEWER_PANEL_ID, -} from './shared'; -import { VideoLaunchpadPanel } from './VideoLaunchpadPanel'; -import { VideoTabLeftPanel } from './VideoTabLeftPanel'; - -const tabComponents = { - [DOCKVIEW_TAB_ID]: DockviewTab, - [DOCKVIEW_TAB_PROGRESS_ID]: DockviewTabProgress, - [DOCKVIEW_TAB_LAUNCHPAD_ID]: DockviewTabLaunchpad, -}; - -const mainPanelComponents: AutoLayoutDockviewComponents = { - [LAUNCHPAD_PANEL_ID]: withPanelContainer(VideoLaunchpadPanel), - [VIEWER_PANEL_ID]: withPanelContainer(ImageViewerPanel), -}; - -const initializeMainPanelLayout = (tab: TabName, api: DockviewApi) => { - navigationApi.registerContainer(tab, 'main', api, () => { - const launchpad = api.addPanel({ - id: LAUNCHPAD_PANEL_ID, - component: LAUNCHPAD_PANEL_ID, - title: t('ui.panels.launchpad'), - tabComponent: DOCKVIEW_TAB_LAUNCHPAD_ID, - params: { - tab, - focusRegion: 'launchpad', - i18nKey: 'ui.panels.launchpad', - }, - }); - - api.addPanel({ - id: VIEWER_PANEL_ID, - component: VIEWER_PANEL_ID, - title: t('ui.panels.imageViewer'), - tabComponent: DOCKVIEW_TAB_PROGRESS_ID, - params: { - tab, - focusRegion: 'viewer', - i18nKey: 'ui.panels.imageViewer', - }, - position: { - direction: 'within', - referencePanel: launchpad.id, - }, - }); - - launchpad.api.setActive(); - }); -}; - -const MainPanel = memo(() => { - const { tab } = useAutoLayoutContext(); - - const onReady = useCallback( - ({ api }) => { - initializeMainPanelLayout(tab, api); - }, - [tab] - ); - return ( - <> - - - - - - ); -}); -MainPanel.displayName = 'MainPanel'; - -const rightPanelComponents: AutoLayoutGridviewComponents = { - [BOARDS_PANEL_ID]: withPanelContainer(BoardsPanel), - [GALLERY_PANEL_ID]: withPanelContainer(GalleryPanel), -}; - -const initializeRightPanelLayout = (tab: TabName, api: GridviewApi) => { - navigationApi.registerContainer(tab, 'right', api, () => { - const gallery = api.addPanel({ - id: GALLERY_PANEL_ID, - component: GALLERY_PANEL_ID, - minimumWidth: RIGHT_PANEL_MIN_SIZE_PX, - minimumHeight: GALLERY_PANEL_MIN_HEIGHT_PX, - params: { - tab, - focusRegion: 'gallery', - }, - }); - - const boards = api.addPanel({ - id: BOARDS_PANEL_ID, - component: BOARDS_PANEL_ID, - minimumHeight: BOARD_PANEL_MIN_HEIGHT_PX, - params: { - tab, - focusRegion: 'boards', - }, - position: { - direction: 'above', - referencePanel: gallery.id, - }, - }); - - gallery.api.setSize({ height: GALLERY_PANEL_DEFAULT_HEIGHT_PX }); - boards.api.setSize({ height: BOARD_PANEL_DEFAULT_HEIGHT_PX }); - }); -}; - -const RightPanel = memo(() => { - const { tab } = useAutoLayoutContext(); - - const onReady = useCallback( - ({ api }) => { - initializeRightPanelLayout(tab, api); - }, - [tab] - ); - return ( - - ); -}); -RightPanel.displayName = 'RightPanel'; - -const leftPanelComponents: AutoLayoutGridviewComponents = { - [SETTINGS_PANEL_ID]: withPanelContainer(VideoTabLeftPanel), -}; - -const initializeLeftPanelLayout = (tab: TabName, api: GridviewApi) => { - navigationApi.registerContainer(tab, 'left', api, () => { - api.addPanel({ - id: SETTINGS_PANEL_ID, - component: SETTINGS_PANEL_ID, - params: { - tab, - focusRegion: 'settings', - }, - }); - }); -}; - -const LeftPanel = memo(() => { - const { tab } = useAutoLayoutContext(); - - const onReady = useCallback( - ({ api }) => { - initializeLeftPanelLayout(tab, api); - }, - [tab] - ); - return ( - - ); -}); -LeftPanel.displayName = 'LeftPanel'; - -const rootPanelComponents: RootLayoutGridviewComponents = { - [LEFT_PANEL_ID]: LeftPanel, - [MAIN_PANEL_ID]: MainPanel, - [RIGHT_PANEL_ID]: RightPanel, -}; - -const initializeRootPanelLayout = (tab: TabName, api: GridviewApi) => { - navigationApi.registerContainer(tab, 'root', api, () => { - const main = api.addPanel({ - id: MAIN_PANEL_ID, - component: MAIN_PANEL_ID, - priority: LayoutPriority.High, - }); - - const left = api.addPanel({ - id: LEFT_PANEL_ID, - component: LEFT_PANEL_ID, - minimumWidth: LEFT_PANEL_MIN_SIZE_PX, - position: { - direction: 'left', - referencePanel: main.id, - }, - }); - - const right = api.addPanel({ - id: RIGHT_PANEL_ID, - component: RIGHT_PANEL_ID, - minimumWidth: RIGHT_PANEL_MIN_SIZE_PX, - position: { - direction: 'right', - referencePanel: main.id, - }, - }); - - left.api.setSize({ width: LEFT_PANEL_MIN_SIZE_PX }); - right.api.setSize({ width: RIGHT_PANEL_MIN_SIZE_PX }); - }); -}; - -export const VideoTabAutoLayout = memo(() => { - const onReady = useCallback(({ api }) => { - initializeRootPanelLayout('video', api); - }, []); - - useEffect( - () => () => { - navigationApi.unregisterTab('video'); - }, - [] - ); - - return ( - - - - ); -}); -VideoTabAutoLayout.displayName = 'VideoTabAutoLayout'; diff --git a/invokeai/frontend/web/src/features/ui/store/uiTypes.ts b/invokeai/frontend/web/src/features/ui/store/uiTypes.ts index 95f7603821e..04c435053b7 100644 --- a/invokeai/frontend/web/src/features/ui/store/uiTypes.ts +++ b/invokeai/frontend/web/src/features/ui/store/uiTypes.ts @@ -1,7 +1,7 @@ import { isPlainObject } from 'es-toolkit'; import { z } from 'zod'; -export const zTabName = z.enum(['generate', 'canvas', 'upscaling', 'workflows', 'models', 'queue', 'video']); +export const zTabName = z.enum(['generate', 'canvas', 'upscaling', 'workflows', 'models', 'queue']); export type TabName = z.infer; const zPartialDimensions = z.object({ diff --git a/invokeai/frontend/web/src/features/video/components/VideoPlayer.tsx b/invokeai/frontend/web/src/features/video/components/VideoPlayer.tsx deleted file mode 100644 index 3247ab733e4..00000000000 --- a/invokeai/frontend/web/src/features/video/components/VideoPlayer.tsx +++ /dev/null @@ -1,53 +0,0 @@ -import { Flex } from '@invoke-ai/ui-library'; -import { useStore } from '@nanostores/react'; -import { $authToken } from 'app/store/nanostores/authToken'; -import { useVideoContextMenu } from 'features/gallery/components/ContextMenu/VideoContextMenu'; -import { useVideoViewerContext } from 'features/video/context/VideoViewerContext'; -import { MediaController } from 'media-chrome/react'; -import { memo, useRef } from 'react'; -import ReactPlayer from 'react-player'; -import type { VideoDTO } from 'services/api/types'; - -import { VideoPlayerControls } from './VideoPlayerControls'; - -interface VideoPlayerProps { - videoDTO: VideoDTO; -} - -export const VideoPlayer = memo(({ videoDTO }: VideoPlayerProps) => { - const ref = useRef(null); - useVideoContextMenu(videoDTO, ref); - const { videoRef } = useVideoViewerContext(); - const authToken = useStore($authToken); - - return ( - - - - - - - - ); -}); - -VideoPlayer.displayName = 'VideoPlayer'; diff --git a/invokeai/frontend/web/src/features/video/components/VideoPlayerControls.tsx b/invokeai/frontend/web/src/features/video/components/VideoPlayerControls.tsx deleted file mode 100644 index df642880e20..00000000000 --- a/invokeai/frontend/web/src/features/video/components/VideoPlayerControls.tsx +++ /dev/null @@ -1,66 +0,0 @@ -import { Icon, IconButton } from '@invoke-ai/ui-library'; -import { useVideoViewerContext } from 'features/video/context/VideoViewerContext'; -import { useCaptureVideoFrame } from 'features/video/hooks/useCaptureVideoFrame'; -import { - MediaControlBar, - MediaFullscreenButton, - MediaPlayButton, - MediaTimeDisplay, - MediaTimeRange, -} from 'media-chrome/react'; -import type { CSSProperties } from 'react'; -import { useCallback, useState } from 'react'; -import { PiArrowsOutBold, PiCameraBold, PiPauseFill, PiPlayFill, PiSpinnerBold } from 'react-icons/pi'; - -const NoHoverBackground = { - '--media-text-color': 'base.200', - '--media-font-size': '12px', -} as CSSProperties; - -export const VideoPlayerControls = () => { - const captureVideoFrame = useCaptureVideoFrame(); - const [capturing, setCapturing] = useState(false); - const { videoRef } = useVideoViewerContext(); - - const onClickSaveFrame = useCallback(async () => { - setCapturing(true); - await captureVideoFrame(videoRef.current); - setCapturing(false); - }, [captureVideoFrame, videoRef]); - - return ( - - - - - - - - - - - - - - - - - - } - size="lg" - variant="unstyled" - onClick={onClickSaveFrame} - aria-label="Save Current Frame" - isDisabled={capturing} - _disabled={{ - background: 'rgba(20, 20, 30, 0.7)', - }} - height="100%" - backgroundColor="rgba(20, 20, 30, 0.7)" - pb={3} - /> - - ); -}; diff --git a/invokeai/frontend/web/src/features/video/components/VideoView.tsx b/invokeai/frontend/web/src/features/video/components/VideoView.tsx deleted file mode 100644 index 4234c5b16b2..00000000000 --- a/invokeai/frontend/web/src/features/video/components/VideoView.tsx +++ /dev/null @@ -1,26 +0,0 @@ -import { Flex } from '@invoke-ai/ui-library'; -import { useAppSelector } from 'app/store/storeHooks'; -import { useFocusRegion } from 'common/hooks/focus'; -import { selectLastSelectedItem } from 'features/gallery/store/gallerySelectors'; -import { useRef } from 'react'; -import { useVideoDTO } from 'services/api/endpoints/videos'; - -import { VideoPlayer } from './VideoPlayer'; - -export const VideoView = () => { - const ref = useRef(null); - const lastSelectedItem = useAppSelector(selectLastSelectedItem); - const videoDTO = useVideoDTO(lastSelectedItem?.id); - - useFocusRegion('video', ref); - - if (!videoDTO) { - return null; - } - - return ( - - - - ); -}; diff --git a/invokeai/frontend/web/src/features/video/context/VideoViewerContext.tsx b/invokeai/frontend/web/src/features/video/context/VideoViewerContext.tsx deleted file mode 100644 index 96f0b3be452..00000000000 --- a/invokeai/frontend/web/src/features/video/context/VideoViewerContext.tsx +++ /dev/null @@ -1,24 +0,0 @@ -import type { PropsWithChildren, RefObject } from 'react'; -import { createContext, memo, useContext, useMemo, useRef } from 'react'; -import { assert } from 'tsafe'; - -type VideoViewerContextValue = { - videoRef: RefObject; -}; - -const VideoViewerContext = createContext(null); - -export const VideoViewerContextProvider = memo((props: PropsWithChildren) => { - const videoRef = useRef(null); - - const value = useMemo(() => ({ videoRef }), [videoRef]); - - return {props.children}; -}); -VideoViewerContextProvider.displayName = 'VideoViewerContextProvider'; - -export const useVideoViewerContext = () => { - const value = useContext(VideoViewerContext); - assert(value !== null, 'useVideoViewerContext must be used within a VideoViewerContextProvider'); - return value; -}; diff --git a/invokeai/frontend/web/src/features/video/hooks/useCaptureVideoFrame.ts b/invokeai/frontend/web/src/features/video/hooks/useCaptureVideoFrame.ts deleted file mode 100644 index 481bcc30a2c..00000000000 --- a/invokeai/frontend/web/src/features/video/hooks/useCaptureVideoFrame.ts +++ /dev/null @@ -1,90 +0,0 @@ -import { logger } from 'app/logging/logger'; -import { toast } from 'features/toast/toast'; -import { useCallback } from 'react'; -import { serializeError } from 'serialize-error'; -import { uploadImage } from 'services/api/endpoints/images'; - -const log = logger('video'); - -const captureFrame = (video: HTMLVideoElement): File => { - // Validate video element - if (video.videoWidth === 0 || video.videoHeight === 0) { - throw new Error('Invalid video element or video not loaded'); - } - - // Check if video is ready for capture - // https://developer.mozilla.org/en-US/docs/Web/API/HTMLMediaElement/readyState - // 2 == HAVE_CURRENT_DATA - if (video.readyState < 2) { - throw new Error('Video is not ready for frame capture'); - } - - const canvas = document.createElement('canvas'); - canvas.width = video.videoWidth || 0; - canvas.height = video.videoHeight || 0; - - const context = canvas.getContext('2d'); - if (!context) { - throw new Error('Failed to get canvas 2D context'); - } - - // Draw the current video frame to canvas - context.drawImage(video, 0, 0); - - // Convert to data URL with proper format - const dataUri = canvas.toDataURL('image/png', 0.92); - const data = dataUri.split(',')[1]; - const mimeType = dataUri.split(';')[0]?.slice(5); - - if (!data || !mimeType) { - throw new Error('Failed to extract image data from canvas'); - } - - // Convert to blob - const bytes = window.atob(data); - const buf = new ArrayBuffer(bytes.length); - const arr = new Uint8Array(buf); - - for (let i = 0; i < bytes.length; i++) { - arr[i] = bytes.charCodeAt(i); - } - - const blob = new Blob([arr], { type: mimeType }); - const file = new File([blob], 'frame.png', { type: mimeType }); - return file; -}; - -export const useCaptureVideoFrame = () => { - /* - * Capture the current frame of the video uploading it as an asset. - * - * Toasts on success or failure. For convenience, accepts null but immediately creates a toast. - */ - const captureVideoFrame = useCallback(async (video: HTMLVideoElement | null) => { - try { - if (!video) { - toast({ - status: 'error', - title: 'Video not ready', - description: 'Please wait for the video to load before capturing a frame.', - }); - return; - } - const file = captureFrame(video); - await uploadImage({ file, image_category: 'user', is_intermediate: false, silent: true }); - toast({ - status: 'success', - title: 'Frame saved to assets tab', - }); - } catch (error) { - log.error({ error: serializeError(error as Error) }, 'Failed to capture frame'); - toast({ - status: 'error', - title: 'Failed to capture frame', - description: 'There was an error capturing the current video frame.', - }); - } - }, []); - - return captureVideoFrame; -}; diff --git a/invokeai/frontend/web/src/features/workflowLibrary/components/SaveWorkflowAsDialog.tsx b/invokeai/frontend/web/src/features/workflowLibrary/components/SaveWorkflowAsDialog.tsx index f7d5ec40ece..1bdee73d7de 100644 --- a/invokeai/frontend/web/src/features/workflowLibrary/components/SaveWorkflowAsDialog.tsx +++ b/invokeai/frontend/web/src/features/workflowLibrary/components/SaveWorkflowAsDialog.tsx @@ -115,7 +115,6 @@ const Content = memo(({ workflow, cancelRef }: { workflow: WorkflowV3; cancelRef workflow.id = undefined; workflow.name = name; workflow.meta.category = shouldSaveToProject ? 'project' : 'user'; - workflow.is_published = false; // We've just made the workflow a draft, but TS doesn't know that. We need to assert it. assert(isDraftWorkflow(workflow)); diff --git a/invokeai/frontend/web/src/features/workflowLibrary/components/WorkflowLibraryMenu/SaveWorkflowMenuItem.tsx b/invokeai/frontend/web/src/features/workflowLibrary/components/WorkflowLibraryMenu/SaveWorkflowMenuItem.tsx index 26ce0302960..6f5acc431ed 100644 --- a/invokeai/frontend/web/src/features/workflowLibrary/components/WorkflowLibraryMenu/SaveWorkflowMenuItem.tsx +++ b/invokeai/frontend/web/src/features/workflowLibrary/components/WorkflowLibraryMenu/SaveWorkflowMenuItem.tsx @@ -1,6 +1,5 @@ import { MenuItem } from '@invoke-ai/ui-library'; import { useDoesWorkflowHaveUnsavedChanges } from 'features/nodes/components/sidePanel/workflow/IsolatedWorkflowBuilderWatcher'; -import { useIsWorkflowPublished } from 'features/nodes/components/sidePanel/workflow/publish'; import { useSaveOrSaveAsWorkflow } from 'features/workflowLibrary/hooks/useSaveOrSaveAsWorkflow'; import { memo } from 'react'; import { useTranslation } from 'react-i18next'; @@ -10,12 +9,11 @@ const SaveWorkflowMenuItem = () => { const { t } = useTranslation(); const saveOrSaveAsWorkflow = useSaveOrSaveAsWorkflow(); const doesWorkflowHaveUnsavedChanges = useDoesWorkflowHaveUnsavedChanges(); - const isPublished = useIsWorkflowPublished(); return ( } onClick={saveOrSaveAsWorkflow} > diff --git a/invokeai/frontend/web/src/features/workflowLibrary/hooks/useSaveOrSaveAsWorkflow.ts b/invokeai/frontend/web/src/features/workflowLibrary/hooks/useSaveOrSaveAsWorkflow.ts index b9d65bca897..40c186b44d4 100644 --- a/invokeai/frontend/web/src/features/workflowLibrary/hooks/useSaveOrSaveAsWorkflow.ts +++ b/invokeai/frontend/web/src/features/workflowLibrary/hooks/useSaveOrSaveAsWorkflow.ts @@ -1,4 +1,3 @@ -import { useIsWorkflowPublished } from 'features/nodes/components/sidePanel/workflow/publish'; import { useBuildWorkflowFast } from 'features/nodes/util/workflow/buildWorkflow'; import { saveWorkflowAs } from 'features/workflowLibrary/components/SaveWorkflowAsDialog'; import { isLibraryWorkflow, useSaveLibraryWorkflow } from 'features/workflowLibrary/hooks/useSaveLibraryWorkflow'; @@ -11,18 +10,17 @@ import { useCallback } from 'react'; */ export const useSaveOrSaveAsWorkflow = () => { const buildWorkflow = useBuildWorkflowFast(); - const isPublished = useIsWorkflowPublished(); const { saveWorkflow } = useSaveLibraryWorkflow(); const saveOrSaveAsWorkflow = useCallback(() => { const workflow = buildWorkflow(); - if (isLibraryWorkflow(workflow) && !isPublished) { + if (isLibraryWorkflow(workflow)) { saveWorkflow(workflow); } else { saveWorkflowAs(workflow); } - }, [buildWorkflow, isPublished, saveWorkflow]); + }, [buildWorkflow, saveWorkflow]); return saveOrSaveAsWorkflow; }; diff --git a/invokeai/frontend/web/src/index.ts b/invokeai/frontend/web/src/index.ts deleted file mode 100644 index 01688756989..00000000000 --- a/invokeai/frontend/web/src/index.ts +++ /dev/null @@ -1,76 +0,0 @@ -import { enqueueRequestedVideos } from 'features/queue/hooks/useEnqueueVideo'; - -import { adHocPostProcessingRequested } from './app/store/middleware/listenerMiddleware/listeners/addAdHocPostProcessingRequestedListener'; -import { socketConnected } from './app/store/middleware/listenerMiddleware/listeners/socketConnected'; -import { - controlLayerAdded, - inpaintMaskAdded, - rasterLayerAdded, - rgAdded, -} from './features/controlLayers/store/canvasSlice'; -import { refImageAdded } from './features/controlLayers/store/refImagesSlice'; -import { - imageCopiedToClipboard, - imageDownloaded, - imageOpenedInNewTab, - imageUploadedClientSide, - sentImageToCanvas, -} from './features/gallery/store/actions'; -import { boardIdSelected } from './features/gallery/store/gallerySlice'; -import { workflowLoaded } from './features/nodes/store/nodesSlice'; -import { enqueueRequestedCanvas } from './features/queue/hooks/useEnqueueCanvas'; -import { enqueueRequestedGenerate } from './features/queue/hooks/useEnqueueGenerate'; -import { enqueueRequestedUpscaling } from './features/queue/hooks/useEnqueueUpscaling'; -import { enqueueRequestedWorkflows } from './features/queue/hooks/useEnqueueWorkflows'; -import { videoModalLinkClicked, videoModalOpened } from './features/system/store/actions'; -import { accordionStateChanged, expanderStateChanged } from './features/ui/store/uiSlice'; -import { - newWorkflowSaved, - workflowDownloaded, - workflowLoadedFromFile, - workflowUpdated, -} from './features/workflowLibrary/store/actions'; -export { default as InvokeAIUI } from './app/components/InvokeAIUI'; -export type { StudioInitAction } from './app/hooks/useStudioInitAction'; -export type { LoggingOverrides } from './app/logging/logger'; -export type { NumericalParameterConfig, PartialAppConfig } from './app/types/invokeai'; -export { default as Loading } from './common/components/Loading/Loading'; -export { default as HotkeysModal } from './features/system/components/HotkeysModal/HotkeysModal'; -export { default as InvokeAiLogoComponent } from './features/system/components/InvokeAILogoComponent'; -export { default as SettingsModal } from './features/system/components/SettingsModal/SettingsModal'; -export { default as StatusIndicator } from './features/system/components/StatusIndicator'; -export { boardsApi } from './services/api/endpoints/boards'; -export { imagesApi } from './services/api/endpoints/images'; -export { queueApi } from './services/api/endpoints/queue'; -export { stylePresetsApi } from './services/api/endpoints/stylePresets'; -export { workflowsApi } from './services/api/endpoints/workflows'; - -export const reduxActions = { - videoModalLinkClicked, - videoModalOpened, - socketConnected, - workflowDownloaded, - workflowLoadedFromFile, - newWorkflowSaved, - workflowUpdated, - workflowLoaded, - sentImageToCanvas, - imageDownloaded, - imageCopiedToClipboard, - imageOpenedInNewTab, - imageUploadedClientSide, - accordionStateChanged, - expanderStateChanged, - enqueueRequestedGenerate, - enqueueRequestedCanvas, - enqueueRequestedWorkflows, - enqueueRequestedUpscaling, - enqueueRequestedVideos, - adHocPostProcessingRequested, - boardIdSelected, - rasterLayerAdded, - controlLayerAdded, - rgAdded, - inpaintMaskAdded, - refImageAdded, -} as const; diff --git a/invokeai/frontend/web/src/services/api/authToastMiddleware.ts b/invokeai/frontend/web/src/services/api/authToastMiddleware.ts deleted file mode 100644 index 94dfaca2da2..00000000000 --- a/invokeai/frontend/web/src/services/api/authToastMiddleware.ts +++ /dev/null @@ -1,81 +0,0 @@ -import type { Middleware } from '@reduxjs/toolkit'; -import { isRejectedWithValue } from '@reduxjs/toolkit'; -import { $toastMap } from 'app/store/nanostores/toastMap'; -import { toast } from 'features/toast/toast'; -import { t } from 'i18next'; -import { z } from 'zod'; - -const trialUsageErrorSubstring = 'usage allotment for the free trial'; -const trialUsageErrorCode = 'USAGE_LIMIT_TRIAL'; - -const orgUsageErrorSubstring = 'organization has reached its predefined usage allotment'; -const orgUsageErrorCode = 'USAGE_LIMIT_ORG'; - -const indieUsageErrorSubstring = 'usage allotment'; -const indieUsageErrorCode = 'USAGE_LIMIT_INDIE'; - -//TODO make this dynamic with returned error codes instead of substring check -const getErrorCode = (errorString?: string) => { - if (!errorString) { - return undefined; - } - if (errorString.includes(trialUsageErrorSubstring)) { - return trialUsageErrorCode; - } - if (errorString.includes(orgUsageErrorSubstring)) { - return orgUsageErrorCode; - } - if (errorString.includes(indieUsageErrorSubstring)) { - return indieUsageErrorCode; - } -}; - -const zRejectedForbiddenAction = z.object({ - payload: z.object({ - status: z.literal(403), - data: z.object({ - detail: z.string(), - }), - }), - meta: z - .object({ - arg: z - .object({ - endpointName: z.string().optional(), - }) - .optional(), - }) - .optional(), -}); - -export const authToastMiddleware: Middleware = () => (next) => (action) => { - if (isRejectedWithValue(action)) { - try { - const parsed = zRejectedForbiddenAction.parse(action); - const endpointName = parsed.meta?.arg?.endpointName; - if (endpointName === 'getImageDTO') { - // do not show toast if problem is image access - return next(action); - } - const toastMap = $toastMap.get(); - const customMessage = parsed.payload.data.detail !== 'Forbidden' ? parsed.payload.data.detail : undefined; - const errorCode = getErrorCode(customMessage); - const customToastConfig = errorCode ? toastMap?.[errorCode] : undefined; - - if (customToastConfig) { - toast(customToastConfig); - } else { - toast({ - id: `auth-error-toast-${endpointName}`, - title: t('toast.somethingWentWrong'), - status: 'error', - description: customMessage, - }); - } - } catch { - // no-op - } - } - - return next(action); -}; diff --git a/invokeai/frontend/web/src/services/api/endpoints/appInfo.ts b/invokeai/frontend/web/src/services/api/endpoints/appInfo.ts index d4257742189..2388dd0c089 100644 --- a/invokeai/frontend/web/src/services/api/endpoints/appInfo.ts +++ b/invokeai/frontend/web/src/services/api/endpoints/appInfo.ts @@ -1,4 +1,3 @@ -import { $openAPISchemaUrl } from 'app/store/nanostores/openAPISchemaUrl'; import type { OpenAPIV3_1 } from 'openapi-types'; import type { stringify } from 'querystring'; import type { paths } from 'services/api/schema'; @@ -82,11 +81,7 @@ export const appInfoApi = api.injectEndpoints({ invalidatesTags: ['InvocationCacheStatus'], }), getOpenAPISchema: build.query({ - query: () => { - const openAPISchemaUrl = $openAPISchemaUrl.get(); - const url = openAPISchemaUrl ? openAPISchemaUrl : `${window.location.href.replace(/\/$/, '')}/openapi.json`; - return url; - }, + query: () => `${window.location.href.replace(/\/$/, '')}/openapi.json`, providesTags: ['Schema'], }), }), diff --git a/invokeai/frontend/web/src/services/api/endpoints/boards.ts b/invokeai/frontend/web/src/services/api/endpoints/boards.ts index 81d8d6db09b..59211439a45 100644 --- a/invokeai/frontend/web/src/services/api/endpoints/boards.ts +++ b/invokeai/frontend/web/src/services/api/endpoints/boards.ts @@ -3,7 +3,6 @@ import queryString from 'query-string'; import type { BoardDTO, CreateBoardArg, - GetVideoIdsResult, ImageCategory, ListBoardsArgs, OffsetPaginatedResults_ImageDTO_, @@ -13,7 +12,6 @@ import { getListImagesUrl } from 'services/api/util'; import type { ApiTagDescription } from '..'; import { api, buildV1Url, LIST_TAG } from '..'; -import { buildVideosUrl } from './videos'; /** * Builds an endpoint URL for the boards router @@ -97,17 +95,6 @@ export const boardsApi = api.injectEndpoints({ }, }), - getBoardVideosTotal: build.query<{ total: number }, string | undefined>({ - query: (board_id) => ({ - url: buildVideosUrl('ids', { board_id: board_id ?? 'none' }), - method: 'GET', - }), - providesTags: (result, error, arg) => [{ type: 'BoardVideosTotal', id: arg ?? 'none' }, 'FetchOnReconnect'], - transformResponse: (response: GetVideoIdsResult) => { - return { total: response.total_count }; - }, - }), - /** * Boards Mutations */ @@ -145,7 +132,6 @@ export const { useListAllBoardsQuery, useGetBoardImagesTotalQuery, useGetBoardAssetsTotalQuery, - useGetBoardVideosTotalQuery, useCreateBoardMutation, useUpdateBoardMutation, useListAllImageNamesForBoardQuery, diff --git a/invokeai/frontend/web/src/services/api/endpoints/images.ts b/invokeai/frontend/web/src/services/api/endpoints/images.ts index b5b2827ee73..1c24f32fc69 100644 --- a/invokeai/frontend/web/src/services/api/endpoints/images.ts +++ b/invokeai/frontend/web/src/services/api/endpoints/images.ts @@ -1,5 +1,4 @@ import { skipToken } from '@reduxjs/toolkit/query'; -import { $authToken } from 'app/store/nanostores/authToken'; import { getStore } from 'app/store/nanostores/store'; import type { CroppableImageWithDims } from 'features/controlLayers/store/types'; import { ASSETS_CATEGORIES, IMAGE_CATEGORIES } from 'features/gallery/store/types'; @@ -579,10 +578,6 @@ export const uploadImages = async (args: UploadImageArg[]): Promise */ export const imageDTOToFile = async (imageDTO: ImageDTO): Promise => { const init: RequestInit = {}; - const authToken = $authToken.get(); - if (authToken) { - init.headers = { Authorization: `Bearer ${authToken}` }; - } const res = await fetch(imageDTO.image_url, init); const blob = await res.blob(); // Create a new file with the same name, which we will upload diff --git a/invokeai/frontend/web/src/services/api/endpoints/queue.ts b/invokeai/frontend/web/src/services/api/endpoints/queue.ts index 81027d4f2b0..c246bc30beb 100644 --- a/invokeai/frontend/web/src/services/api/endpoints/queue.ts +++ b/invokeai/frontend/web/src/services/api/endpoints/queue.ts @@ -1,4 +1,3 @@ -import { $queueId } from 'app/store/nanostores/queueId'; import queryString from 'query-string'; import type { components, paths } from 'services/api/schema'; import type { @@ -19,7 +18,7 @@ import { api, buildV1Url, LIST_ALL_TAG, LIST_TAG } from '..'; * buildQueueUrl('some-path') * // '/api/v1/queue/queue_id/some-path' */ -const buildQueueUrl = (path: string = '') => buildV1Url(`queue/${$queueId.get()}/${path}`); +const buildQueueUrl = (path: string = '') => buildV1Url(`queue/default/${path}`); export type SessionQueueItemStatus = NonNullable; diff --git a/invokeai/frontend/web/src/services/api/endpoints/videos.ts b/invokeai/frontend/web/src/services/api/endpoints/videos.ts deleted file mode 100644 index e6f6bd7f6d9..00000000000 --- a/invokeai/frontend/web/src/services/api/endpoints/videos.ts +++ /dev/null @@ -1,239 +0,0 @@ -import { skipToken } from '@reduxjs/toolkit/query'; -import { getStore } from 'app/store/nanostores/store'; -import type { paths } from 'services/api/schema'; -import type { GetVideoIdsArgs, GetVideoIdsResult, VideoDTO } from 'services/api/types'; -import { - getTagsToInvalidateForBoardAffectingMutation, - getTagsToInvalidateForVideoMutation, -} from 'services/api/util/tagInvalidation'; -import stableHash from 'stable-hash'; -import type { Param0 } from 'tsafe'; -import type { JsonObject } from 'type-fest'; - -import { api, buildV1Url, LIST_TAG } from '..'; - -/** - * Builds an endpoint URL for the videos router - * @example - * buildVideosUrl('some-path') - * // '/api/v1/videos/some-path' - */ -export const buildVideosUrl = (path: string = '', query?: Parameters[1]) => - buildV1Url(`videos/${path}`, query); - -const buildBoardVideosUrl = (path: string = '') => buildV1Url(`board_videos/${path}`); - -export const videosApi = api.injectEndpoints({ - endpoints: (build) => ({ - /** - * Video Queries - */ - - getVideoDTO: build.query({ - query: (video_id) => ({ url: buildVideosUrl(`i/${video_id}`) }), - providesTags: (result, error, video_id) => [{ type: 'Video', id: video_id }], - }), - - getVideoMetadata: build.query({ - query: (video_id) => ({ url: buildVideosUrl(`i/${video_id}/metadata`) }), - providesTags: (result, error, video_id) => [{ type: 'VideoMetadata', id: video_id }], - }), - - /** - * Get ordered list of image names for selection operations - */ - getVideoIds: build.query({ - query: (queryArgs) => ({ - url: buildVideosUrl('ids', queryArgs), - method: 'GET', - }), - providesTags: (result, error, queryArgs) => [ - 'VideoIdList', - 'FetchOnReconnect', - { type: 'VideoIdList', id: stableHash(queryArgs) }, - ], - }), - /** - * Get image DTOs for the specified image names. Maintains order of input names. - */ - getVideoDTOsByNames: build.mutation< - paths['/api/v1/videos/videos_by_ids']['post']['responses']['200']['content']['application/json'], - paths['/api/v1/videos/videos_by_ids']['post']['requestBody']['content']['application/json'] - >({ - query: (body) => ({ - url: buildVideosUrl('videos_by_ids'), - method: 'POST', - body, - }), - // Don't provide cache tags - we'll manually upsert into individual getImageDTO caches - async onQueryStarted(_, { dispatch, queryFulfilled }) { - try { - const { data: videoDTOs } = await queryFulfilled; - - // Upsert each DTO into the individual image cache - const updates: Param0 = []; - for (const videoDTO of videoDTOs) { - updates.push({ - endpointName: 'getVideoDTO', - arg: videoDTO.video_id, - value: videoDTO, - }); - } - dispatch(videosApi.util.upsertQueryEntries(updates)); - } catch { - // Handle error if needed - } - }, - }), - /** - * Star a list of videos. - */ - starVideos: build.mutation< - paths['/api/v1/videos/star']['post']['responses']['200']['content']['application/json'], - paths['/api/v1/videos/star']['post']['requestBody']['content']['application/json'] - >({ - query: (body) => ({ - url: buildVideosUrl('star'), - method: 'POST', - body, - }), - invalidatesTags: (result) => { - if (!result) { - return []; - } - return [ - ...getTagsToInvalidateForVideoMutation(result.starred_videos), - ...getTagsToInvalidateForBoardAffectingMutation(result.affected_boards), - 'VideoCollectionCounts', - { type: 'VideoCollection', id: 'starred' }, - { type: 'VideoCollection', id: 'unstarred' }, - ]; - }, - }), - /** - * Unstar a list of videos. - */ - unstarVideos: build.mutation< - paths['/api/v1/videos/unstar']['post']['responses']['200']['content']['application/json'], - paths['/api/v1/videos/unstar']['post']['requestBody']['content']['application/json'] - >({ - query: (body) => ({ - url: buildVideosUrl('unstar'), - method: 'POST', - body, - }), - invalidatesTags: (result) => { - if (!result) { - return []; - } - return [ - ...getTagsToInvalidateForVideoMutation(result.unstarred_videos), - ...getTagsToInvalidateForBoardAffectingMutation(result.affected_boards), - 'VideoCollectionCounts', - { type: 'VideoCollection', id: 'starred' }, - { type: 'VideoCollection', id: 'unstarred' }, - ]; - }, - }), - deleteVideos: build.mutation< - paths['/api/v1/videos/delete']['post']['responses']['200']['content']['application/json'], - paths['/api/v1/videos/delete']['post']['requestBody']['content']['application/json'] - >({ - query: (body) => ({ - url: buildVideosUrl('delete'), - method: 'POST', - body, - }), - invalidatesTags: (result) => { - if (!result) { - return []; - } - // We ignore the deleted images when getting tags to invalidate. If we did not, we will invalidate the queries - // that fetch image DTOs, metadata, and workflows. But we have just deleted those images! Invalidating the tags - // will force those queries to re-fetch, and the requests will of course 404. - return [ - ...getTagsToInvalidateForBoardAffectingMutation(result.affected_boards), - 'VideoCollectionCounts', - { type: 'VideoCollection', id: LIST_TAG }, - ]; - }, - }), - addVideosToBoard: build.mutation< - paths['/api/v1/board_videos/batch']['post']['responses']['201']['content']['application/json'], - paths['/api/v1/board_videos/batch']['post']['requestBody']['content']['application/json'] - >({ - query: (body) => ({ - url: buildBoardVideosUrl('batch'), - method: 'POST', - body, - }), - invalidatesTags: (result) => { - if (!result) { - return []; - } - return [ - ...getTagsToInvalidateForVideoMutation(result.added_videos), - ...getTagsToInvalidateForBoardAffectingMutation(result.affected_boards), - ]; - }, - }), - removeVideosFromBoard: build.mutation< - paths['/api/v1/board_videos/batch/delete']['post']['responses']['201']['content']['application/json'], - paths['/api/v1/board_videos/batch/delete']['post']['requestBody']['content']['application/json'] - >({ - query: (body) => ({ - url: buildBoardVideosUrl('batch/delete'), - method: 'POST', - body, - }), - invalidatesTags: (result) => { - if (!result) { - return []; - } - return [ - ...getTagsToInvalidateForVideoMutation(result.removed_videos), - ...getTagsToInvalidateForBoardAffectingMutation(result.affected_boards), - ]; - }, - }), - }), -}); - -export const { - useGetVideoDTOQuery, - useGetVideoIdsQuery, - useGetVideoDTOsByNamesMutation, - useStarVideosMutation, - useUnstarVideosMutation, - useDeleteVideosMutation, - useAddVideosToBoardMutation, - useRemoveVideosFromBoardMutation, - useGetVideoMetadataQuery, -} = videosApi; - -/** - * Imperative RTKQ helper to fetch an VideoDTO. - * @param id The id of the video to fetch - * @param options The options for the query. By default, the query will not subscribe to the store. - * @returns The ImageDTO if found, otherwise null - */ -export const getVideoDTOSafe = async ( - id: string, - options?: Parameters[1] -): Promise => { - const _options = { - subscribe: false, - ...options, - }; - const req = getStore().dispatch(videosApi.endpoints.getVideoDTOsByNames.initiate({ video_ids: [id] }, _options)); - try { - return (await req.unwrap())[0] ?? null; - } catch { - return null; - } -}; - -export const useVideoDTO = (video_id: string | null | undefined) => { - const { currentData: videoDTO } = useGetVideoDTOQuery(video_id ?? skipToken); - return videoDTO ?? null; -}; diff --git a/invokeai/frontend/web/src/services/api/hooks/modelsByType.ts b/invokeai/frontend/web/src/services/api/hooks/modelsByType.ts index 20d2a1e0c7c..aaeb84a4cc2 100644 --- a/invokeai/frontend/web/src/services/api/hooks/modelsByType.ts +++ b/invokeai/frontend/web/src/services/api/hooks/modelsByType.ts @@ -9,15 +9,12 @@ import { } from 'services/api/endpoints/models'; import type { AnyModelConfig } from 'services/api/types'; import { - isChatGPT4oModelConfig, isCLIPEmbedModelConfigOrSubmodel, isControlLayerModelConfig, isControlNetModelConfig, - isFluxKontextApiModelConfig, isFluxKontextModelConfig, isFluxReduxModelConfig, isFluxVAEModelConfig, - isGemini2_5ModelConfig, isIPAdapterModelConfig, isLoRAModelConfig, isNonRefinerMainModelConfig, @@ -26,7 +23,6 @@ import { isT5EncoderModelConfigOrSubmodel, isTIModelConfig, isVAEModelConfigOrSubmodel, - isVideoModelConfig, } from 'services/api/types'; const buildModelsHook = @@ -58,18 +54,11 @@ export const useEmbeddingModels = buildModelsHook(isTIModelConfig); export const useVAEModels = () => buildModelsHook(isVAEModelConfigOrSubmodel)(); export const useFluxVAEModels = () => buildModelsHook(isFluxVAEModelConfig)(); export const useGlobalReferenceImageModels = buildModelsHook( - (config) => - isIPAdapterModelConfig(config) || - isFluxReduxModelConfig(config) || - isChatGPT4oModelConfig(config) || - isFluxKontextApiModelConfig(config) || - isFluxKontextModelConfig(config) || - isGemini2_5ModelConfig(config) + (config) => isIPAdapterModelConfig(config) || isFluxReduxModelConfig(config) || isFluxKontextModelConfig(config) ); export const useRegionalReferenceImageModels = buildModelsHook( (config) => isIPAdapterModelConfig(config) || isFluxReduxModelConfig(config) ); -export const useVideoModels = buildModelsHook(isVideoModelConfig); const buildModelsSelector = (typeGuard: (config: AnyModelConfig) => config is T): Selector => @@ -82,13 +71,7 @@ const buildModelsSelector = }; export const selectIPAdapterModels = buildModelsSelector(isIPAdapterModelConfig); export const selectGlobalRefImageModels = buildModelsSelector( - (config) => - isIPAdapterModelConfig(config) || - isFluxReduxModelConfig(config) || - isChatGPT4oModelConfig(config) || - isFluxKontextApiModelConfig(config) || - isFluxKontextModelConfig(config) || - isGemini2_5ModelConfig(config) + (config) => isIPAdapterModelConfig(config) || isFluxReduxModelConfig(config) || isFluxKontextModelConfig(config) ); export const selectRegionalRefImageModels = buildModelsSelector( (config) => isIPAdapterModelConfig(config) || isFluxReduxModelConfig(config) diff --git a/invokeai/frontend/web/src/services/api/hooks/useDebouncedImageWorkflow.ts b/invokeai/frontend/web/src/services/api/hooks/useDebouncedImageWorkflow.ts index df178225ac0..3769e683618 100644 --- a/invokeai/frontend/web/src/services/api/hooks/useDebouncedImageWorkflow.ts +++ b/invokeai/frontend/web/src/services/api/hooks/useDebouncedImageWorkflow.ts @@ -1,14 +1,10 @@ import { skipToken } from '@reduxjs/toolkit/query'; -import { useAppSelector } from 'app/store/storeHooks'; -import { selectWorkflowFetchDebounce } from 'features/system/store/configSlice'; import { useGetImageWorkflowQuery } from 'services/api/endpoints/images'; import type { ImageDTO } from 'services/api/types'; import { useDebounce } from 'use-debounce'; export const useDebouncedImageWorkflow = (imageDTO?: ImageDTO | null) => { - const workflowFetchDebounce = useAppSelector(selectWorkflowFetchDebounce); - - const [debouncedImageName] = useDebounce(imageDTO?.has_workflow ? imageDTO.image_name : null, workflowFetchDebounce); + const [debouncedImageName] = useDebounce(imageDTO?.has_workflow ? imageDTO.image_name : null, 300); const result = useGetImageWorkflowQuery(debouncedImageName ?? skipToken); diff --git a/invokeai/frontend/web/src/services/api/hooks/useDebouncedMetadata.ts b/invokeai/frontend/web/src/services/api/hooks/useDebouncedMetadata.ts index 4a96867bf5a..7cd41043dbd 100644 --- a/invokeai/frontend/web/src/services/api/hooks/useDebouncedMetadata.ts +++ b/invokeai/frontend/web/src/services/api/hooks/useDebouncedMetadata.ts @@ -1,14 +1,9 @@ import { skipToken } from '@reduxjs/toolkit/query'; -import { useAppSelector } from 'app/store/storeHooks'; -import { selectMetadataFetchDebounce } from 'features/system/store/configSlice'; import { imagesApi, useGetImageMetadataQuery } from 'services/api/endpoints/images'; -import { useGetVideoMetadataQuery, videosApi } from 'services/api/endpoints/videos'; import { useDebounce } from 'use-debounce'; export const useDebouncedMetadata = (imageName?: string | null) => { - const metadataFetchDebounce = useAppSelector(selectMetadataFetchDebounce); - - const [debouncedImageName] = useDebounce(imageName, metadataFetchDebounce); + const [debouncedImageName] = useDebounce(imageName, 300); const { currentData: cachedData } = imagesApi.endpoints.getImageMetadata.useQueryState(imageName ?? skipToken); const { currentData: data, isFetching } = useGetImageMetadataQuery(debouncedImageName ?? skipToken); @@ -17,16 +12,3 @@ export const useDebouncedMetadata = (imageName?: string | null) => { isLoading: cachedData ? false : isFetching || imageName !== debouncedImageName, }; }; - -export const useDebouncedVideoMetadata = (videoId?: string | null) => { - const metadataFetchDebounce = useAppSelector(selectMetadataFetchDebounce); - - const [debouncedVideoId] = useDebounce(videoId, metadataFetchDebounce); - const { currentData: cachedData } = videosApi.endpoints.getVideoMetadata.useQueryState(videoId ?? skipToken); - const { currentData: data, isFetching } = useGetVideoMetadataQuery(debouncedVideoId ?? skipToken); - - return { - metadata: cachedData ?? data, - isLoading: cachedData ? false : isFetching || videoId !== debouncedVideoId, - }; -}; diff --git a/invokeai/frontend/web/src/services/api/hooks/useSelectedModelConfig.ts b/invokeai/frontend/web/src/services/api/hooks/useSelectedModelConfig.ts index 5e1f15e7900..adf197b74a9 100644 --- a/invokeai/frontend/web/src/services/api/hooks/useSelectedModelConfig.ts +++ b/invokeai/frontend/web/src/services/api/hooks/useSelectedModelConfig.ts @@ -1,9 +1,7 @@ import { skipToken } from '@reduxjs/toolkit/query'; import { useAppSelector } from 'app/store/storeHooks'; import { selectModelKey } from 'features/controlLayers/store/paramsSlice'; -import { selectVideoModelKey } from 'features/parameters/store/videoSlice'; import { useGetModelConfigQuery } from 'services/api/endpoints/models'; -import type { VideoApiModelConfig } from 'services/api/types'; export const useSelectedModelConfig = () => { const key = useAppSelector(selectModelKey); @@ -11,10 +9,3 @@ export const useSelectedModelConfig = () => { return modelConfig; }; - -export const useSelectedVideoModelConfig = () => { - const key = useAppSelector(selectVideoModelKey); - const { data: modelConfig } = useGetModelConfigQuery(key ?? skipToken); - - return modelConfig as VideoApiModelConfig | undefined; -}; diff --git a/invokeai/frontend/web/src/services/api/index.ts b/invokeai/frontend/web/src/services/api/index.ts index c8876bbffa4..3254330d81d 100644 --- a/invokeai/frontend/web/src/services/api/index.ts +++ b/invokeai/frontend/web/src/services/api/index.ts @@ -7,9 +7,6 @@ import type { TagDescription, } from '@reduxjs/toolkit/query/react'; import { buildCreateApi, coreModule, fetchBaseQuery, reactHooksModule } from '@reduxjs/toolkit/query/react'; -import { $authToken } from 'app/store/nanostores/authToken'; -import { $baseUrl } from 'app/store/nanostores/baseUrl'; -import { $projectId } from 'app/store/nanostores/projectId'; import queryString from 'query-string'; import stableHash from 'stable-hash'; @@ -72,13 +69,10 @@ export const LIST_TAG = 'LIST'; export const LIST_ALL_TAG = 'LIST_ALL'; export const getBaseUrl = (): string => { - const baseUrl = $baseUrl.get(); - return baseUrl || window.location.href.replace(/\/$/, ''); + return window.location.href.replace(/\/$/, ''); }; const dynamicBaseQuery: BaseQueryFn = (args, api, extraOptions) => { - const authToken = $authToken.get(); - const projectId = $projectId.get(); const isOpenAPIRequest = (args instanceof Object && args.url.includes('openapi.json')) || (typeof args === 'string' && args.includes('openapi.json')); @@ -92,20 +86,6 @@ const dynamicBaseQuery: BaseQueryFn { - if (authToken) { - headers.set('Authorization', `Bearer ${authToken}`); - } - if (projectId) { - headers.set('project-id', projectId); - } - - return headers; - }; - } - const rawBaseQuery = fetchBaseQuery(fetchBaseQueryArgs); return rawBaseQuery(args, api, extraOptions); diff --git a/invokeai/frontend/web/src/services/api/schema.ts b/invokeai/frontend/web/src/services/api/schema.ts index 3be604b7d75..c06c58c577c 100644 --- a/invokeai/frontend/web/src/services/api/schema.ts +++ b/invokeai/frontend/web/src/services/api/schema.ts @@ -792,161 +792,6 @@ export type paths = { patch?: never; trace?: never; }; - "/api/v1/videos/i/{video_id}": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - /** - * Get Video Dto - * @description Gets a video's DTO - */ - get: operations["get_video_dto"]; - put?: never; - post?: never; - delete?: never; - options?: never; - head?: never; - /** - * Update Video - * @description Updates a video - */ - patch: operations["update_video"]; - trace?: never; - }; - "/api/v1/videos/delete": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** Delete Videos From List */ - post: operations["delete_videos_from_list"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/api/v1/videos/star": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** Star Videos In List */ - post: operations["star_videos_in_list"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/api/v1/videos/unstar": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** Unstar Videos In List */ - post: operations["unstar_videos_in_list"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/api/v1/videos/uncategorized": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - post?: never; - /** - * Delete Uncategorized Videos - * @description Deletes all videos that are uncategorized - */ - delete: operations["delete_uncategorized_videos"]; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/api/v1/videos/": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - /** - * List Video Dtos - * @description Lists video DTOs - */ - get: operations["list_video_dtos"]; - put?: never; - post?: never; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/api/v1/videos/ids": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - /** - * Get Video Ids - * @description Gets ordered list of video ids with metadata for optimistic updates - */ - get: operations["get_video_ids"]; - put?: never; - post?: never; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/api/v1/videos/videos_by_ids": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** - * Get Videos By Ids - * @description Gets video DTOs for the specified video ids. Maintains order of input ids. - */ - post: operations["get_videos_by_ids"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; "/api/v1/boards/": { parameters: { query?: never; @@ -1083,46 +928,6 @@ export type paths = { patch?: never; trace?: never; }; - "/api/v1/board_videos/batch": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** - * Add Videos To Board - * @description Adds a list of videos to a board - */ - post: operations["add_videos_to_board"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/api/v1/board_videos/batch/delete": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** - * Remove Videos From Board - * @description Removes a list of videos from their board, if they had one - */ - post: operations["remove_videos_from_board"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; "/api/v1/model_relationships/i/{model_key}": { parameters: { query?: never; @@ -2147,19 +1952,6 @@ export type components = { */ type: "add"; }; - /** AddVideosToBoardResult */ - AddVideosToBoardResult: { - /** - * Affected Boards - * @description The ids of boards affected by the delete operation - */ - affected_boards: string[]; - /** - * Added Videos - * @description The video ids that were added to the board - */ - added_videos: string[]; - }; /** * Alpha Mask to Tensor * @description Convert a mask image to a tensor. Opaque regions are 1 and transparent regions are 0. @@ -2200,7 +1992,7 @@ export type components = { */ type: "alpha_mask_to_tensor"; }; - AnyModelConfig: components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Main_ExternalAPI_ChatGPT4o_Config"] | components["schemas"]["Main_ExternalAPI_Gemini2_5_Config"] | components["schemas"]["Main_ExternalAPI_Imagen3_Config"] | components["schemas"]["Main_ExternalAPI_Imagen4_Config"] | components["schemas"]["Main_ExternalAPI_FluxKontext_Config"] | components["schemas"]["Video_ExternalAPI_Veo3_Config"] | components["schemas"]["Video_ExternalAPI_Runway_Config"] | components["schemas"]["Unknown_Config"]; + AnyModelConfig: components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Unknown_Config"]; /** * AppConfig * @description App Config Response @@ -2237,11 +2029,6 @@ export type components = { * @description App version */ version: string; - /** - * Highlights - * @description Highlights of release - */ - highlights?: string[] | null; }; /** * Apply Tensor Mask to Image @@ -2383,7 +2170,7 @@ export type components = { * fallback/null value `BaseModelType.Any` for these models, instead of making the model base optional. * @enum {string} */ - BaseModelType: "any" | "sd-1" | "sd-2" | "sd-3" | "sdxl" | "sdxl-refiner" | "flux" | "cogview4" | "imagen3" | "imagen4" | "gemini-2.5" | "chatgpt-4o" | "flux-kontext" | "veo3" | "runway" | "unknown"; + BaseModelType: "any" | "sd-1" | "sd-2" | "sd-3" | "sdxl" | "sdxl-refiner" | "flux" | "cogview4" | "unknown"; /** Batch */ Batch: { /** @@ -2722,11 +2509,6 @@ export type components = { * @description The number of assets in the board. */ asset_count: number; - /** - * Video Count - * @description The number of videos in the board. - */ - video_count: number; }; /** * BoardField @@ -2771,19 +2553,6 @@ export type components = { */ image_names: string[]; }; - /** Body_add_videos_to_board */ - Body_add_videos_to_board: { - /** - * Board Id - * @description The id of the board to add to - */ - board_id: string; - /** - * Video Ids - * @description The ids of the videos to add - */ - video_ids: string[]; - }; /** Body_cancel_by_batch_ids */ Body_cancel_by_batch_ids: { /** @@ -2836,14 +2605,6 @@ export type components = { */ image_names: string[]; }; - /** Body_delete_videos_from_list */ - Body_delete_videos_from_list: { - /** - * Video Ids - * @description The list of ids of videos to delete - */ - video_ids: string[]; - }; /** Body_do_hf_login */ Body_do_hf_login: { /** @@ -2919,14 +2680,6 @@ export type components = { */ item_ids: number[]; }; - /** Body_get_videos_by_ids */ - Body_get_videos_by_ids: { - /** - * Video Ids - * @description Object containing list of video ids to fetch DTOs for - */ - video_ids: string[]; - }; /** Body_import_style_presets */ Body_import_style_presets: { /** @@ -2977,14 +2730,6 @@ export type components = { */ image_names: string[]; }; - /** Body_remove_videos_from_board */ - Body_remove_videos_from_board: { - /** - * Video Ids - * @description The ids of the videos to remove - */ - video_ids: string[]; - }; /** Body_set_workflow_thumbnail */ Body_set_workflow_thumbnail: { /** @@ -3002,14 +2747,6 @@ export type components = { */ image_names: string[]; }; - /** Body_star_videos_in_list */ - Body_star_videos_in_list: { - /** - * Video Ids - * @description The list of ids of videos to star - */ - video_ids: string[]; - }; /** Body_unstar_images_in_list */ Body_unstar_images_in_list: { /** @@ -3018,14 +2755,6 @@ export type components = { */ image_names: string[]; }; - /** Body_unstar_videos_in_list */ - Body_unstar_videos_in_list: { - /** - * Video Ids - * @description The list of ids of videos to unstar - */ - video_ids: string[]; - }; /** Body_update_model_image */ Body_update_model_image: { /** @@ -3422,11 +3151,6 @@ export type components = { * @description Url for image to preview model */ cover_image: string | null; - /** - * Usage Info - * @description Usage information for this model - */ - usage_info: string | null; /** * Format * @default diffusers @@ -3503,11 +3227,6 @@ export type components = { * @description Url for image to preview model */ cover_image: string | null; - /** - * Usage Info - * @description Usage information for this model - */ - usage_info: string | null; /** * Format * @default diffusers @@ -3680,11 +3399,6 @@ export type components = { * @description Url for image to preview model */ cover_image: string | null; - /** - * Usage Info - * @description Usage information for this model - */ - usage_info: string | null; /** * Format * @default diffusers @@ -5186,11 +4900,6 @@ export type components = { * @description Url for image to preview model */ cover_image: string | null; - /** - * Usage Info - * @description Usage information for this model - */ - usage_info: string | null; default_settings: components["schemas"]["ControlAdapterDefaultSettings"] | null; /** * Base @@ -5377,11 +5086,6 @@ export type components = { * @description Url for image to preview model */ cover_image: string | null; - /** - * Usage Info - * @description Usage information for this model - */ - usage_info: string | null; /** * Config Path * @description Path to the config for this model, if any. @@ -5456,11 +5160,6 @@ export type components = { * @description Url for image to preview model */ cover_image: string | null; - /** - * Usage Info - * @description Usage information for this model - */ - usage_info: string | null; /** * Config Path * @description Path to the config for this model, if any. @@ -5535,11 +5234,6 @@ export type components = { * @description Url for image to preview model */ cover_image: string | null; - /** - * Usage Info - * @description Usage information for this model - */ - usage_info: string | null; /** * Config Path * @description Path to the config for this model, if any. @@ -5614,11 +5308,6 @@ export type components = { * @description Url for image to preview model */ cover_image: string | null; - /** - * Usage Info - * @description Usage information for this model - */ - usage_info: string | null; /** * Config Path * @description Path to the config for this model, if any. @@ -5693,11 +5382,6 @@ export type components = { * @description Url for image to preview model */ cover_image: string | null; - /** - * Usage Info - * @description Usage information for this model - */ - usage_info: string | null; /** * Format * @default diffusers @@ -5769,11 +5453,6 @@ export type components = { * @description Url for image to preview model */ cover_image: string | null; - /** - * Usage Info - * @description Usage information for this model - */ - usage_info: string | null; /** * Format * @default diffusers @@ -5845,11 +5524,6 @@ export type components = { * @description Url for image to preview model */ cover_image: string | null; - /** - * Usage Info - * @description Usage information for this model - */ - usage_info: string | null; /** * Format * @default diffusers @@ -5921,11 +5595,6 @@ export type components = { * @description Url for image to preview model */ cover_image: string | null; - /** - * Usage Info - * @description Usage information for this model - */ - usage_info: string | null; /** * Format * @default diffusers @@ -6601,19 +6270,6 @@ export type components = { */ deleted_images: string[]; }; - /** DeleteVideosResult */ - DeleteVideosResult: { - /** - * Affected Boards - * @description The ids of boards affected by the delete operation - */ - affected_boards: string[]; - /** - * Deleted Videos - * @description The ids of the videos that were deleted - */ - deleted_videos: string[]; - }; /** * Denoise - SD1.5, SDXL * @description Denoises noisy latents to decodable images @@ -7204,7 +6860,7 @@ export type components = { /** * Use Cache * @description Whether or not to use the cache - * @default false + * @default true */ use_cache?: boolean; /** @@ -7517,11 +7173,6 @@ export type components = { * @description Url for image to preview model */ cover_image: string | null; - /** - * Usage Info - * @description Usage information for this model - */ - usage_info: string | null; /** * Type * @default flux_redux @@ -9644,7 +9295,7 @@ export type components = { * @description The results of node executions */ results: { - [key: string]: components["schemas"]["BooleanCollectionOutput"] | components["schemas"]["BooleanOutput"] | components["schemas"]["BoundingBoxCollectionOutput"] | components["schemas"]["BoundingBoxOutput"] | components["schemas"]["CLIPOutput"] | components["schemas"]["CLIPSkipInvocationOutput"] | components["schemas"]["CalculateImageTilesOutput"] | components["schemas"]["CogView4ConditioningOutput"] | components["schemas"]["CogView4ModelLoaderOutput"] | components["schemas"]["CollectInvocationOutput"] | components["schemas"]["ColorCollectionOutput"] | components["schemas"]["ColorOutput"] | components["schemas"]["ConditioningCollectionOutput"] | components["schemas"]["ConditioningOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["DenoiseMaskOutput"] | components["schemas"]["FaceMaskOutput"] | components["schemas"]["FaceOffOutput"] | components["schemas"]["FloatCollectionOutput"] | components["schemas"]["FloatGeneratorOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["FluxConditioningCollectionOutput"] | components["schemas"]["FluxConditioningOutput"] | components["schemas"]["FluxControlLoRALoaderOutput"] | components["schemas"]["FluxControlNetOutput"] | components["schemas"]["FluxFillOutput"] | components["schemas"]["FluxKontextOutput"] | components["schemas"]["FluxLoRALoaderOutput"] | components["schemas"]["FluxModelLoaderOutput"] | components["schemas"]["FluxReduxOutput"] | components["schemas"]["GradientMaskOutput"] | components["schemas"]["IPAdapterOutput"] | components["schemas"]["IdealSizeOutput"] | components["schemas"]["ImageCollectionOutput"] | components["schemas"]["ImageGeneratorOutput"] | components["schemas"]["ImageOutput"] | components["schemas"]["ImagePanelCoordinateOutput"] | components["schemas"]["IntegerCollectionOutput"] | components["schemas"]["IntegerGeneratorOutput"] | components["schemas"]["IntegerOutput"] | components["schemas"]["IterateInvocationOutput"] | components["schemas"]["LatentsCollectionOutput"] | components["schemas"]["LatentsMetaOutput"] | components["schemas"]["LatentsOutput"] | components["schemas"]["LoRALoaderOutput"] | components["schemas"]["LoRASelectorOutput"] | components["schemas"]["MDControlListOutput"] | components["schemas"]["MDIPAdapterListOutput"] | components["schemas"]["MDT2IAdapterListOutput"] | components["schemas"]["MaskOutput"] | components["schemas"]["MetadataItemOutput"] | components["schemas"]["MetadataOutput"] | components["schemas"]["MetadataToLorasCollectionOutput"] | components["schemas"]["MetadataToModelOutput"] | components["schemas"]["MetadataToSDXLModelOutput"] | components["schemas"]["ModelIdentifierOutput"] | components["schemas"]["ModelLoaderOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["PairTileImageOutput"] | components["schemas"]["SD3ConditioningOutput"] | components["schemas"]["SDXLLoRALoaderOutput"] | components["schemas"]["SDXLModelLoaderOutput"] | components["schemas"]["SDXLRefinerModelLoaderOutput"] | components["schemas"]["SchedulerOutput"] | components["schemas"]["Sd3ModelLoaderOutput"] | components["schemas"]["SeamlessModeOutput"] | components["schemas"]["String2Output"] | components["schemas"]["StringCollectionOutput"] | components["schemas"]["StringGeneratorOutput"] | components["schemas"]["StringOutput"] | components["schemas"]["StringPosNegOutput"] | components["schemas"]["T2IAdapterOutput"] | components["schemas"]["TileToPropertiesOutput"] | components["schemas"]["UNetOutput"] | components["schemas"]["VAEOutput"] | components["schemas"]["VideoOutput"]; + [key: string]: components["schemas"]["BooleanCollectionOutput"] | components["schemas"]["BooleanOutput"] | components["schemas"]["BoundingBoxCollectionOutput"] | components["schemas"]["BoundingBoxOutput"] | components["schemas"]["CLIPOutput"] | components["schemas"]["CLIPSkipInvocationOutput"] | components["schemas"]["CalculateImageTilesOutput"] | components["schemas"]["CogView4ConditioningOutput"] | components["schemas"]["CogView4ModelLoaderOutput"] | components["schemas"]["CollectInvocationOutput"] | components["schemas"]["ColorCollectionOutput"] | components["schemas"]["ColorOutput"] | components["schemas"]["ConditioningCollectionOutput"] | components["schemas"]["ConditioningOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["DenoiseMaskOutput"] | components["schemas"]["FaceMaskOutput"] | components["schemas"]["FaceOffOutput"] | components["schemas"]["FloatCollectionOutput"] | components["schemas"]["FloatGeneratorOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["FluxConditioningCollectionOutput"] | components["schemas"]["FluxConditioningOutput"] | components["schemas"]["FluxControlLoRALoaderOutput"] | components["schemas"]["FluxControlNetOutput"] | components["schemas"]["FluxFillOutput"] | components["schemas"]["FluxKontextOutput"] | components["schemas"]["FluxLoRALoaderOutput"] | components["schemas"]["FluxModelLoaderOutput"] | components["schemas"]["FluxReduxOutput"] | components["schemas"]["GradientMaskOutput"] | components["schemas"]["IPAdapterOutput"] | components["schemas"]["IdealSizeOutput"] | components["schemas"]["ImageCollectionOutput"] | components["schemas"]["ImageGeneratorOutput"] | components["schemas"]["ImageOutput"] | components["schemas"]["ImagePanelCoordinateOutput"] | components["schemas"]["IntegerCollectionOutput"] | components["schemas"]["IntegerGeneratorOutput"] | components["schemas"]["IntegerOutput"] | components["schemas"]["IterateInvocationOutput"] | components["schemas"]["LatentsCollectionOutput"] | components["schemas"]["LatentsMetaOutput"] | components["schemas"]["LatentsOutput"] | components["schemas"]["LoRALoaderOutput"] | components["schemas"]["LoRASelectorOutput"] | components["schemas"]["MDControlListOutput"] | components["schemas"]["MDIPAdapterListOutput"] | components["schemas"]["MDT2IAdapterListOutput"] | components["schemas"]["MaskOutput"] | components["schemas"]["MetadataItemOutput"] | components["schemas"]["MetadataOutput"] | components["schemas"]["MetadataToLorasCollectionOutput"] | components["schemas"]["MetadataToModelOutput"] | components["schemas"]["MetadataToSDXLModelOutput"] | components["schemas"]["ModelIdentifierOutput"] | components["schemas"]["ModelLoaderOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["PairTileImageOutput"] | components["schemas"]["SD3ConditioningOutput"] | components["schemas"]["SDXLLoRALoaderOutput"] | components["schemas"]["SDXLModelLoaderOutput"] | components["schemas"]["SDXLRefinerModelLoaderOutput"] | components["schemas"]["SchedulerOutput"] | components["schemas"]["Sd3ModelLoaderOutput"] | components["schemas"]["SeamlessModeOutput"] | components["schemas"]["String2Output"] | components["schemas"]["StringCollectionOutput"] | components["schemas"]["StringGeneratorOutput"] | components["schemas"]["StringOutput"] | components["schemas"]["StringPosNegOutput"] | components["schemas"]["T2IAdapterOutput"] | components["schemas"]["TileToPropertiesOutput"] | components["schemas"]["UNetOutput"] | components["schemas"]["VAEOutput"]; }; /** * Errors @@ -10127,11 +9778,6 @@ export type components = { * @description Url for image to preview model */ cover_image: string | null; - /** - * Usage Info - * @description Usage information for this model - */ - usage_info: string | null; /** * Type * @default ip_adapter @@ -10200,11 +9846,6 @@ export type components = { * @description Url for image to preview model */ cover_image: string | null; - /** - * Usage Info - * @description Usage information for this model - */ - usage_info: string | null; /** * Type * @default ip_adapter @@ -10273,11 +9914,6 @@ export type components = { * @description Url for image to preview model */ cover_image: string | null; - /** - * Usage Info - * @description Usage information for this model - */ - usage_info: string | null; /** * Type * @default ip_adapter @@ -10346,11 +9982,6 @@ export type components = { * @description Url for image to preview model */ cover_image: string | null; - /** - * Usage Info - * @description Usage information for this model - */ - usage_info: string | null; /** * Type * @default ip_adapter @@ -10419,11 +10050,6 @@ export type components = { * @description Url for image to preview model */ cover_image: string | null; - /** - * Usage Info - * @description Usage information for this model - */ - usage_info: string | null; /** * Type * @default ip_adapter @@ -10494,11 +10120,6 @@ export type components = { * @description Url for image to preview model */ cover_image: string | null; - /** - * Usage Info - * @description Usage information for this model - */ - usage_info: string | null; /** * Type * @default ip_adapter @@ -10569,11 +10190,6 @@ export type components = { * @description Url for image to preview model */ cover_image: string | null; - /** - * Usage Info - * @description Usage information for this model - */ - usage_info: string | null; /** * Type * @default ip_adapter @@ -12807,7 +12423,7 @@ export type components = { * Result * @description The result of the invocation */ - result: components["schemas"]["BooleanCollectionOutput"] | components["schemas"]["BooleanOutput"] | components["schemas"]["BoundingBoxCollectionOutput"] | components["schemas"]["BoundingBoxOutput"] | components["schemas"]["CLIPOutput"] | components["schemas"]["CLIPSkipInvocationOutput"] | components["schemas"]["CalculateImageTilesOutput"] | components["schemas"]["CogView4ConditioningOutput"] | components["schemas"]["CogView4ModelLoaderOutput"] | components["schemas"]["CollectInvocationOutput"] | components["schemas"]["ColorCollectionOutput"] | components["schemas"]["ColorOutput"] | components["schemas"]["ConditioningCollectionOutput"] | components["schemas"]["ConditioningOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["DenoiseMaskOutput"] | components["schemas"]["FaceMaskOutput"] | components["schemas"]["FaceOffOutput"] | components["schemas"]["FloatCollectionOutput"] | components["schemas"]["FloatGeneratorOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["FluxConditioningCollectionOutput"] | components["schemas"]["FluxConditioningOutput"] | components["schemas"]["FluxControlLoRALoaderOutput"] | components["schemas"]["FluxControlNetOutput"] | components["schemas"]["FluxFillOutput"] | components["schemas"]["FluxKontextOutput"] | components["schemas"]["FluxLoRALoaderOutput"] | components["schemas"]["FluxModelLoaderOutput"] | components["schemas"]["FluxReduxOutput"] | components["schemas"]["GradientMaskOutput"] | components["schemas"]["IPAdapterOutput"] | components["schemas"]["IdealSizeOutput"] | components["schemas"]["ImageCollectionOutput"] | components["schemas"]["ImageGeneratorOutput"] | components["schemas"]["ImageOutput"] | components["schemas"]["ImagePanelCoordinateOutput"] | components["schemas"]["IntegerCollectionOutput"] | components["schemas"]["IntegerGeneratorOutput"] | components["schemas"]["IntegerOutput"] | components["schemas"]["IterateInvocationOutput"] | components["schemas"]["LatentsCollectionOutput"] | components["schemas"]["LatentsMetaOutput"] | components["schemas"]["LatentsOutput"] | components["schemas"]["LoRALoaderOutput"] | components["schemas"]["LoRASelectorOutput"] | components["schemas"]["MDControlListOutput"] | components["schemas"]["MDIPAdapterListOutput"] | components["schemas"]["MDT2IAdapterListOutput"] | components["schemas"]["MaskOutput"] | components["schemas"]["MetadataItemOutput"] | components["schemas"]["MetadataOutput"] | components["schemas"]["MetadataToLorasCollectionOutput"] | components["schemas"]["MetadataToModelOutput"] | components["schemas"]["MetadataToSDXLModelOutput"] | components["schemas"]["ModelIdentifierOutput"] | components["schemas"]["ModelLoaderOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["PairTileImageOutput"] | components["schemas"]["SD3ConditioningOutput"] | components["schemas"]["SDXLLoRALoaderOutput"] | components["schemas"]["SDXLModelLoaderOutput"] | components["schemas"]["SDXLRefinerModelLoaderOutput"] | components["schemas"]["SchedulerOutput"] | components["schemas"]["Sd3ModelLoaderOutput"] | components["schemas"]["SeamlessModeOutput"] | components["schemas"]["String2Output"] | components["schemas"]["StringCollectionOutput"] | components["schemas"]["StringGeneratorOutput"] | components["schemas"]["StringOutput"] | components["schemas"]["StringPosNegOutput"] | components["schemas"]["T2IAdapterOutput"] | components["schemas"]["TileToPropertiesOutput"] | components["schemas"]["UNetOutput"] | components["schemas"]["VAEOutput"] | components["schemas"]["VideoOutput"]; + result: components["schemas"]["BooleanCollectionOutput"] | components["schemas"]["BooleanOutput"] | components["schemas"]["BoundingBoxCollectionOutput"] | components["schemas"]["BoundingBoxOutput"] | components["schemas"]["CLIPOutput"] | components["schemas"]["CLIPSkipInvocationOutput"] | components["schemas"]["CalculateImageTilesOutput"] | components["schemas"]["CogView4ConditioningOutput"] | components["schemas"]["CogView4ModelLoaderOutput"] | components["schemas"]["CollectInvocationOutput"] | components["schemas"]["ColorCollectionOutput"] | components["schemas"]["ColorOutput"] | components["schemas"]["ConditioningCollectionOutput"] | components["schemas"]["ConditioningOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["DenoiseMaskOutput"] | components["schemas"]["FaceMaskOutput"] | components["schemas"]["FaceOffOutput"] | components["schemas"]["FloatCollectionOutput"] | components["schemas"]["FloatGeneratorOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["FluxConditioningCollectionOutput"] | components["schemas"]["FluxConditioningOutput"] | components["schemas"]["FluxControlLoRALoaderOutput"] | components["schemas"]["FluxControlNetOutput"] | components["schemas"]["FluxFillOutput"] | components["schemas"]["FluxKontextOutput"] | components["schemas"]["FluxLoRALoaderOutput"] | components["schemas"]["FluxModelLoaderOutput"] | components["schemas"]["FluxReduxOutput"] | components["schemas"]["GradientMaskOutput"] | components["schemas"]["IPAdapterOutput"] | components["schemas"]["IdealSizeOutput"] | components["schemas"]["ImageCollectionOutput"] | components["schemas"]["ImageGeneratorOutput"] | components["schemas"]["ImageOutput"] | components["schemas"]["ImagePanelCoordinateOutput"] | components["schemas"]["IntegerCollectionOutput"] | components["schemas"]["IntegerGeneratorOutput"] | components["schemas"]["IntegerOutput"] | components["schemas"]["IterateInvocationOutput"] | components["schemas"]["LatentsCollectionOutput"] | components["schemas"]["LatentsMetaOutput"] | components["schemas"]["LatentsOutput"] | components["schemas"]["LoRALoaderOutput"] | components["schemas"]["LoRASelectorOutput"] | components["schemas"]["MDControlListOutput"] | components["schemas"]["MDIPAdapterListOutput"] | components["schemas"]["MDT2IAdapterListOutput"] | components["schemas"]["MaskOutput"] | components["schemas"]["MetadataItemOutput"] | components["schemas"]["MetadataOutput"] | components["schemas"]["MetadataToLorasCollectionOutput"] | components["schemas"]["MetadataToModelOutput"] | components["schemas"]["MetadataToSDXLModelOutput"] | components["schemas"]["ModelIdentifierOutput"] | components["schemas"]["ModelLoaderOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["PairTileImageOutput"] | components["schemas"]["SD3ConditioningOutput"] | components["schemas"]["SDXLLoRALoaderOutput"] | components["schemas"]["SDXLModelLoaderOutput"] | components["schemas"]["SDXLRefinerModelLoaderOutput"] | components["schemas"]["SchedulerOutput"] | components["schemas"]["Sd3ModelLoaderOutput"] | components["schemas"]["SeamlessModeOutput"] | components["schemas"]["String2Output"] | components["schemas"]["StringCollectionOutput"] | components["schemas"]["StringGeneratorOutput"] | components["schemas"]["StringOutput"] | components["schemas"]["StringPosNegOutput"] | components["schemas"]["T2IAdapterOutput"] | components["schemas"]["TileToPropertiesOutput"] | components["schemas"]["UNetOutput"] | components["schemas"]["VAEOutput"]; }; /** * InvocationErrorEvent @@ -14718,11 +14334,6 @@ export type components = { * @description Url for image to preview model */ cover_image: string | null; - /** - * Usage Info - * @description Usage information for this model - */ - usage_info: string | null; /** * Format * @default diffusers @@ -14998,11 +14609,6 @@ export type components = { * @description Url for image to preview model */ cover_image: string | null; - /** - * Usage Info - * @description Usage information for this model - */ - usage_info: string | null; /** * Type * @default lora @@ -15078,11 +14684,6 @@ export type components = { * @description Url for image to preview model */ cover_image: string | null; - /** - * Usage Info - * @description Usage information for this model - */ - usage_info: string | null; /** * Type * @default lora @@ -15158,11 +14759,6 @@ export type components = { * @description Url for image to preview model */ cover_image: string | null; - /** - * Usage Info - * @description Usage information for this model - */ - usage_info: string | null; /** * Type * @default lora @@ -15238,11 +14834,6 @@ export type components = { * @description Url for image to preview model */ cover_image: string | null; - /** - * Usage Info - * @description Usage information for this model - */ - usage_info: string | null; /** * Type * @default lora @@ -15318,11 +14909,6 @@ export type components = { * @description Url for image to preview model */ cover_image: string | null; - /** - * Usage Info - * @description Usage information for this model - */ - usage_info: string | null; /** * Type * @default lora @@ -15398,11 +14984,6 @@ export type components = { * @description Url for image to preview model */ cover_image: string | null; - /** - * Usage Info - * @description Usage information for this model - */ - usage_info: string | null; /** * Type * @default lora @@ -15478,11 +15059,6 @@ export type components = { * @description Url for image to preview model */ cover_image: string | null; - /** - * Usage Info - * @description Usage information for this model - */ - usage_info: string | null; /** * Type * @default lora @@ -15558,11 +15134,6 @@ export type components = { * @description Url for image to preview model */ cover_image: string | null; - /** - * Usage Info - * @description Usage information for this model - */ - usage_info: string | null; /** * Type * @default lora @@ -15638,11 +15209,6 @@ export type components = { * @description Url for image to preview model */ cover_image: string | null; - /** - * Usage Info - * @description Usage information for this model - */ - usage_info: string | null; /** * Type * @default lora @@ -15718,11 +15284,6 @@ export type components = { * @description Url for image to preview model */ cover_image: string | null; - /** - * Usage Info - * @description Usage information for this model - */ - usage_info: string | null; /** * Type * @default lora @@ -16012,11 +15573,6 @@ export type components = { * @description Url for image to preview model */ cover_image: string | null; - /** - * Usage Info - * @description Usage information for this model - */ - usage_info: string | null; /** * Type * @default main @@ -16101,11 +15657,6 @@ export type components = { * @description Url for image to preview model */ cover_image: string | null; - /** - * Usage Info - * @description Usage information for this model - */ - usage_info: string | null; /** * Type * @default main @@ -16187,11 +15738,6 @@ export type components = { * @description Url for image to preview model */ cover_image: string | null; - /** - * Usage Info - * @description Usage information for this model - */ - usage_info: string | null; /** * Type * @default main @@ -16274,11 +15820,6 @@ export type components = { * @description Url for image to preview model */ cover_image: string | null; - /** - * Usage Info - * @description Usage information for this model - */ - usage_info: string | null; /** * Type * @default main @@ -16361,11 +15902,6 @@ export type components = { * @description Url for image to preview model */ cover_image: string | null; - /** - * Usage Info - * @description Usage information for this model - */ - usage_info: string | null; /** * Type * @default main @@ -16448,11 +15984,6 @@ export type components = { * @description Url for image to preview model */ cover_image: string | null; - /** - * Usage Info - * @description Usage information for this model - */ - usage_info: string | null; /** * Type * @default main @@ -16535,11 +16066,6 @@ export type components = { * @description Url for image to preview model */ cover_image: string | null; - /** - * Usage Info - * @description Usage information for this model - */ - usage_info: string | null; /** * Type * @default main @@ -16617,11 +16143,6 @@ export type components = { * @description Url for image to preview model */ cover_image: string | null; - /** - * Usage Info - * @description Usage information for this model - */ - usage_info: string | null; /** * Type * @default main @@ -16701,11 +16222,6 @@ export type components = { * @description Url for image to preview model */ cover_image: string | null; - /** - * Usage Info - * @description Usage information for this model - */ - usage_info: string | null; /** * Type * @default main @@ -16785,11 +16301,6 @@ export type components = { * @description Url for image to preview model */ cover_image: string | null; - /** - * Usage Info - * @description Usage information for this model - */ - usage_info: string | null; /** * Type * @default main @@ -16874,11 +16385,6 @@ export type components = { * @description Url for image to preview model */ cover_image: string | null; - /** - * Usage Info - * @description Usage information for this model - */ - usage_info: string | null; /** * Type * @default main @@ -16958,11 +16464,6 @@ export type components = { * @description Url for image to preview model */ cover_image: string | null; - /** - * Usage Info - * @description Usage information for this model - */ - usage_info: string | null; /** * Type * @default main @@ -16993,8 +16494,11 @@ export type components = { */ base: "sdxl"; }; - /** Main_ExternalAPI_ChatGPT4o_Config */ - Main_ExternalAPI_ChatGPT4o_Config: { + /** + * Main_GGUF_FLUX_Config + * @description Model config for main checkpoint models. + */ + Main_GGUF_FLUX_Config: { /** * Key * @description A unique key for this model. @@ -17042,11 +16546,6 @@ export type components = { * @description Url for image to preview model */ cover_image: string | null; - /** - * Usage Info - * @description Usage information for this model - */ - usage_info: string | null; /** * Type * @default main @@ -17061,442 +16560,481 @@ export type components = { /** @description Default settings for this model */ default_settings: components["schemas"]["MainModelDefaultSettings"] | null; /** - * Format - * @default api - * @constant + * Config Path + * @description Path to the config for this model, if any. */ - format: "api"; + config_path: string | null; /** * Base - * @default chatgpt-4o + * @default flux * @constant */ - base: "chatgpt-4o"; - }; - /** Main_ExternalAPI_FluxKontext_Config */ - Main_ExternalAPI_FluxKontext_Config: { + base: "flux"; /** - * Key - * @description A unique key for this model. + * Format + * @default gguf_quantized + * @constant */ - key: string; + format: "gguf_quantized"; + variant: components["schemas"]["FluxVariantType"]; + }; + /** + * Combine Masks + * @description Combine two masks together by multiplying them using `PIL.ImageChops.multiply()`. + */ + MaskCombineInvocation: { /** - * Hash - * @description The hash of the model file(s). + * @description The board to save the image to + * @default null */ - hash: string; + board?: components["schemas"]["BoardField"] | null; /** - * Path - * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory. + * @description Optional metadata to be saved with the image + * @default null */ - path: string; + metadata?: components["schemas"]["MetadataField"] | null; /** - * File Size - * @description The size of the model in bytes. + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. */ - file_size: number; + id: string; /** - * Name - * @description Name of the model. + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false */ - name: string; + is_intermediate?: boolean; /** - * Description - * @description Model description + * Use Cache + * @description Whether or not to use the cache + * @default true */ - description: string | null; + use_cache?: boolean; /** - * Source - * @description The original source of the model (path, URL or repo_id). + * @description The first mask to combine + * @default null */ - source: string; - /** @description The type of source */ - source_type: components["schemas"]["ModelSourceType"]; + mask1?: components["schemas"]["ImageField"] | null; /** - * Source Api Response - * @description The original API response from the source, as stringified JSON. + * @description The second image to combine + * @default null */ - source_api_response: string | null; + mask2?: components["schemas"]["ImageField"] | null; /** - * Cover Image - * @description Url for image to preview model + * type + * @default mask_combine + * @constant */ - cover_image: string | null; + type: "mask_combine"; + }; + /** + * Mask Edge + * @description Applies an edge mask to an image + */ + MaskEdgeInvocation: { /** - * Usage Info - * @description Usage information for this model + * @description The board to save the image to + * @default null */ - usage_info: string | null; + board?: components["schemas"]["BoardField"] | null; /** - * Type - * @default main - * @constant + * @description Optional metadata to be saved with the image + * @default null */ - type: "main"; + metadata?: components["schemas"]["MetadataField"] | null; /** - * Trigger Phrases - * @description Set of trigger phrases for this model + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. */ - trigger_phrases: string[] | null; - /** @description Default settings for this model */ - default_settings: components["schemas"]["MainModelDefaultSettings"] | null; + id: string; /** - * Format - * @default api - * @constant + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false */ - format: "api"; + is_intermediate?: boolean; /** - * Base - * @default flux-kontext - * @constant + * Use Cache + * @description Whether or not to use the cache + * @default true */ - base: "flux-kontext"; - }; - /** Main_ExternalAPI_Gemini2_5_Config */ - Main_ExternalAPI_Gemini2_5_Config: { + use_cache?: boolean; /** - * Key - * @description A unique key for this model. + * @description The image to apply the mask to + * @default null */ - key: string; + image?: components["schemas"]["ImageField"] | null; /** - * Hash - * @description The hash of the model file(s). + * Edge Size + * @description The size of the edge + * @default null */ - hash: string; + edge_size?: number | null; /** - * Path - * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory. + * Edge Blur + * @description The amount of blur on the edge + * @default null */ - path: string; + edge_blur?: number | null; /** - * File Size - * @description The size of the model in bytes. + * Low Threshold + * @description First threshold for the hysteresis procedure in Canny edge detection + * @default null */ - file_size: number; + low_threshold?: number | null; /** - * Name - * @description Name of the model. + * High Threshold + * @description Second threshold for the hysteresis procedure in Canny edge detection + * @default null */ - name: string; + high_threshold?: number | null; /** - * Description - * @description Model description + * type + * @default mask_edge + * @constant */ - description: string | null; + type: "mask_edge"; + }; + /** + * Mask from Alpha + * @description Extracts the alpha channel of an image as a mask. + */ + MaskFromAlphaInvocation: { /** - * Source - * @description The original source of the model (path, URL or repo_id). + * @description The board to save the image to + * @default null */ - source: string; - /** @description The type of source */ - source_type: components["schemas"]["ModelSourceType"]; + board?: components["schemas"]["BoardField"] | null; /** - * Source Api Response - * @description The original API response from the source, as stringified JSON. + * @description Optional metadata to be saved with the image + * @default null */ - source_api_response: string | null; + metadata?: components["schemas"]["MetadataField"] | null; /** - * Cover Image - * @description Url for image to preview model + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. */ - cover_image: string | null; + id: string; /** - * Usage Info - * @description Usage information for this model + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false */ - usage_info: string | null; + is_intermediate?: boolean; /** - * Type - * @default main - * @constant + * Use Cache + * @description Whether or not to use the cache + * @default true */ - type: "main"; + use_cache?: boolean; /** - * Trigger Phrases - * @description Set of trigger phrases for this model + * @description The image to create the mask from + * @default null */ - trigger_phrases: string[] | null; - /** @description Default settings for this model */ - default_settings: components["schemas"]["MainModelDefaultSettings"] | null; + image?: components["schemas"]["ImageField"] | null; /** - * Format - * @default api - * @constant + * Invert + * @description Whether or not to invert the mask + * @default false */ - format: "api"; + invert?: boolean; /** - * Base - * @default gemini-2.5 + * type + * @default tomask * @constant */ - base: "gemini-2.5"; + type: "tomask"; }; - /** Main_ExternalAPI_Imagen3_Config */ - Main_ExternalAPI_Imagen3_Config: { + /** + * Mask from Segmented Image + * @description Generate a mask for a particular color in an ID Map + */ + MaskFromIDInvocation: { /** - * Key - * @description A unique key for this model. + * @description The board to save the image to + * @default null */ - key: string; - /** - * Hash - * @description The hash of the model file(s). - */ - hash: string; + board?: components["schemas"]["BoardField"] | null; /** - * Path - * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory. + * @description Optional metadata to be saved with the image + * @default null */ - path: string; + metadata?: components["schemas"]["MetadataField"] | null; /** - * File Size - * @description The size of the model in bytes. + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. */ - file_size: number; + id: string; /** - * Name - * @description Name of the model. + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false */ - name: string; + is_intermediate?: boolean; /** - * Description - * @description Model description + * Use Cache + * @description Whether or not to use the cache + * @default true */ - description: string | null; + use_cache?: boolean; /** - * Source - * @description The original source of the model (path, URL or repo_id). + * @description The image to create the mask from + * @default null */ - source: string; - /** @description The type of source */ - source_type: components["schemas"]["ModelSourceType"]; + image?: components["schemas"]["ImageField"] | null; /** - * Source Api Response - * @description The original API response from the source, as stringified JSON. + * @description ID color to mask + * @default null */ - source_api_response: string | null; + color?: components["schemas"]["ColorField"] | null; /** - * Cover Image - * @description Url for image to preview model + * Threshold + * @description Threshold for color detection + * @default 100 */ - cover_image: string | null; + threshold?: number; /** - * Usage Info - * @description Usage information for this model + * Invert + * @description Whether or not to invert the mask + * @default false */ - usage_info: string | null; + invert?: boolean; /** - * Type - * @default main + * type + * @default mask_from_id * @constant */ - type: "main"; + type: "mask_from_id"; + }; + /** + * MaskOutput + * @description A torch mask tensor. + */ + MaskOutput: { + /** @description The mask. */ + mask: components["schemas"]["TensorField"]; /** - * Trigger Phrases - * @description Set of trigger phrases for this model + * Width + * @description The width of the mask in pixels. */ - trigger_phrases: string[] | null; - /** @description Default settings for this model */ - default_settings: components["schemas"]["MainModelDefaultSettings"] | null; + width: number; /** - * Format - * @default api - * @constant + * Height + * @description The height of the mask in pixels. */ - format: "api"; + height: number; /** - * Base - * @default imagen3 + * type + * @default mask_output * @constant */ - base: "imagen3"; + type: "mask_output"; }; - /** Main_ExternalAPI_Imagen4_Config */ - Main_ExternalAPI_Imagen4_Config: { + /** + * Tensor Mask to Image + * @description Convert a mask tensor to an image. + */ + MaskTensorToImageInvocation: { /** - * Key - * @description A unique key for this model. + * @description The board to save the image to + * @default null */ - key: string; + board?: components["schemas"]["BoardField"] | null; /** - * Hash - * @description The hash of the model file(s). + * @description Optional metadata to be saved with the image + * @default null */ - hash: string; + metadata?: components["schemas"]["MetadataField"] | null; /** - * Path - * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory. + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. */ - path: string; + id: string; /** - * File Size - * @description The size of the model in bytes. + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false */ - file_size: number; + is_intermediate?: boolean; /** - * Name - * @description Name of the model. + * Use Cache + * @description Whether or not to use the cache + * @default true */ - name: string; + use_cache?: boolean; /** - * Description - * @description Model description + * @description The mask tensor to convert. + * @default null */ - description: string | null; + mask?: components["schemas"]["TensorField"] | null; /** - * Source - * @description The original source of the model (path, URL or repo_id). + * type + * @default tensor_mask_to_image + * @constant */ - source: string; - /** @description The type of source */ - source_type: components["schemas"]["ModelSourceType"]; + type: "tensor_mask_to_image"; + }; + /** + * MediaPipe Face Detection + * @description Detects faces using MediaPipe. + */ + MediaPipeFaceDetectionInvocation: { /** - * Source Api Response - * @description The original API response from the source, as stringified JSON. + * @description The board to save the image to + * @default null */ - source_api_response: string | null; + board?: components["schemas"]["BoardField"] | null; /** - * Cover Image - * @description Url for image to preview model + * @description Optional metadata to be saved with the image + * @default null */ - cover_image: string | null; + metadata?: components["schemas"]["MetadataField"] | null; + /** + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + */ + id: string; /** - * Usage Info - * @description Usage information for this model + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false */ - usage_info: string | null; + is_intermediate?: boolean; /** - * Type - * @default main - * @constant + * Use Cache + * @description Whether or not to use the cache + * @default true */ - type: "main"; + use_cache?: boolean; /** - * Trigger Phrases - * @description Set of trigger phrases for this model + * @description The image to process + * @default null */ - trigger_phrases: string[] | null; - /** @description Default settings for this model */ - default_settings: components["schemas"]["MainModelDefaultSettings"] | null; + image?: components["schemas"]["ImageField"] | null; /** - * Format - * @default api - * @constant + * Max Faces + * @description Maximum number of faces to detect + * @default 1 */ - format: "api"; + max_faces?: number; /** - * Base - * @default imagen4 + * Min Confidence + * @description Minimum confidence for face detection + * @default 0.5 + */ + min_confidence?: number; + /** + * type + * @default mediapipe_face_detection * @constant */ - base: "imagen4"; + type: "mediapipe_face_detection"; }; /** - * Main_GGUF_FLUX_Config - * @description Model config for main checkpoint models. + * Metadata Merge + * @description Merged a collection of MetadataDict into a single MetadataDict. */ - Main_GGUF_FLUX_Config: { + MergeMetadataInvocation: { /** - * Key - * @description A unique key for this model. + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. */ - key: string; + id: string; /** - * Hash - * @description The hash of the model file(s). + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false */ - hash: string; + is_intermediate?: boolean; /** - * Path - * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory. + * Use Cache + * @description Whether or not to use the cache + * @default true */ - path: string; + use_cache?: boolean; /** - * File Size - * @description The size of the model in bytes. + * Collection + * @description Collection of Metadata + * @default null */ - file_size: number; + collection?: components["schemas"]["MetadataField"][] | null; /** - * Name - * @description Name of the model. + * type + * @default merge_metadata + * @constant */ - name: string; + type: "merge_metadata"; + }; + /** + * Merge Tiles to Image + * @description Merge multiple tile images into a single image. + */ + MergeTilesToImageInvocation: { /** - * Description - * @description Model description + * @description The board to save the image to + * @default null */ - description: string | null; + board?: components["schemas"]["BoardField"] | null; /** - * Source - * @description The original source of the model (path, URL or repo_id). + * @description Optional metadata to be saved with the image + * @default null */ - source: string; - /** @description The type of source */ - source_type: components["schemas"]["ModelSourceType"]; - /** - * Source Api Response - * @description The original API response from the source, as stringified JSON. - */ - source_api_response: string | null; + metadata?: components["schemas"]["MetadataField"] | null; /** - * Cover Image - * @description Url for image to preview model + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. */ - cover_image: string | null; + id: string; /** - * Usage Info - * @description Usage information for this model + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false */ - usage_info: string | null; + is_intermediate?: boolean; /** - * Type - * @default main - * @constant + * Use Cache + * @description Whether or not to use the cache + * @default true */ - type: "main"; + use_cache?: boolean; /** - * Trigger Phrases - * @description Set of trigger phrases for this model + * Tiles With Images + * @description A list of tile images with tile properties. + * @default null */ - trigger_phrases: string[] | null; - /** @description Default settings for this model */ - default_settings: components["schemas"]["MainModelDefaultSettings"] | null; + tiles_with_images?: components["schemas"]["TileWithImage"][] | null; /** - * Config Path - * @description Path to the config for this model, if any. + * Blend Mode + * @description blending type Linear or Seam + * @default Seam + * @enum {string} */ - config_path: string | null; + blend_mode?: "Linear" | "Seam"; /** - * Base - * @default flux - * @constant + * Blend Amount + * @description The amount to blend adjacent tiles in pixels. Must be <= the amount of overlap between adjacent tiles. + * @default 32 */ - base: "flux"; + blend_amount?: number; /** - * Format - * @default gguf_quantized + * type + * @default merge_tiles_to_image * @constant */ - format: "gguf_quantized"; - variant: components["schemas"]["FluxVariantType"]; + type: "merge_tiles_to_image"; }; /** - * Combine Masks - * @description Combine two masks together by multiplying them using `PIL.ImageChops.multiply()`. + * MetadataField + * @description Pydantic model for metadata with custom root of type dict[str, Any]. + * Metadata is stored without a strict schema. */ - MaskCombineInvocation: { - /** - * @description The board to save the image to - * @default null - */ - board?: components["schemas"]["BoardField"] | null; - /** - * @description Optional metadata to be saved with the image - * @default null - */ - metadata?: components["schemas"]["MetadataField"] | null; + MetadataField: Record; + /** + * Metadata Field Extractor + * @description Extracts the text value from an image's metadata given a key. + * Raises an error if the image has no metadata or if the value is not a string (nesting not permitted). + */ + MetadataFieldExtractorInvocation: { /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -17515,37 +17053,28 @@ export type components = { */ use_cache?: boolean; /** - * @description The first mask to combine + * @description The image to extract metadata from * @default null */ - mask1?: components["schemas"]["ImageField"] | null; + image?: components["schemas"]["ImageField"] | null; /** - * @description The second image to combine + * Key + * @description The key in the image's metadata to extract the value from * @default null */ - mask2?: components["schemas"]["ImageField"] | null; + key?: string | null; /** * type - * @default mask_combine + * @default metadata_field_extractor * @constant */ - type: "mask_combine"; + type: "metadata_field_extractor"; }; /** - * Mask Edge - * @description Applies an edge mask to an image + * Metadata From Image + * @description Used to create a core metadata item then Add/Update it to the provided metadata */ - MaskEdgeInvocation: { - /** - * @description The board to save the image to - * @default null - */ - board?: components["schemas"]["BoardField"] | null; - /** - * @description Optional metadata to be saved with the image - * @default null - */ - metadata?: components["schemas"]["MetadataField"] | null; + MetadataFromImageInvocation: { /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -17564,56 +17093,70 @@ export type components = { */ use_cache?: boolean; /** - * @description The image to apply the mask to + * @description The image to process * @default null */ image?: components["schemas"]["ImageField"] | null; /** - * Edge Size - * @description The size of the edge - * @default null + * type + * @default metadata_from_image + * @constant */ - edge_size?: number | null; + type: "metadata_from_image"; + }; + /** + * Metadata + * @description Takes a MetadataItem or collection of MetadataItems and outputs a MetadataDict. + */ + MetadataInvocation: { /** - * Edge Blur - * @description The amount of blur on the edge - * @default null + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. */ - edge_blur?: number | null; + id: string; /** - * Low Threshold - * @description First threshold for the hysteresis procedure in Canny edge detection - * @default null + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false */ - low_threshold?: number | null; + is_intermediate?: boolean; /** - * High Threshold - * @description Second threshold for the hysteresis procedure in Canny edge detection + * Use Cache + * @description Whether or not to use the cache + * @default true + */ + use_cache?: boolean; + /** + * Items + * @description A single metadata item or collection of metadata items * @default null */ - high_threshold?: number | null; + items?: components["schemas"]["MetadataItemField"][] | components["schemas"]["MetadataItemField"] | null; /** * type - * @default mask_edge + * @default metadata * @constant */ - type: "mask_edge"; + type: "metadata"; }; - /** - * Mask from Alpha - * @description Extracts the alpha channel of an image as a mask. - */ - MaskFromAlphaInvocation: { + /** MetadataItemField */ + MetadataItemField: { /** - * @description The board to save the image to - * @default null + * Label + * @description Label for this metadata item */ - board?: components["schemas"]["BoardField"] | null; + label: string; /** - * @description Optional metadata to be saved with the image - * @default null + * Value + * @description The value for this metadata item (may be any type) */ - metadata?: components["schemas"]["MetadataField"] | null; + value: unknown; + }; + /** + * Metadata Item + * @description Used to create an arbitrary metadata item. Provide "label" and make a connection to "value" to store that data as the value. + */ + MetadataItemInvocation: { /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -17632,33 +17175,29 @@ export type components = { */ use_cache?: boolean; /** - * @description The image to create the mask from + * Label + * @description Label for this metadata item * @default null */ - image?: components["schemas"]["ImageField"] | null; + label?: string | null; /** - * Invert - * @description Whether or not to invert the mask - * @default false + * Value + * @description The value for this metadata item (may be any type) + * @default null */ - invert?: boolean; + value?: unknown | null; /** * type - * @default tomask + * @default metadata_item * @constant */ - type: "tomask"; + type: "metadata_item"; }; /** - * Mask from Segmented Image - * @description Generate a mask for a particular color in an ID Map + * Metadata Item Linked + * @description Used to Create/Add/Update a value into a metadata label */ - MaskFromIDInvocation: { - /** - * @description The board to save the image to - * @default null - */ - board?: components["schemas"]["BoardField"] | null; + MetadataItemLinkedInvocation: { /** * @description Optional metadata to be saved with the image * @default null @@ -17682,68 +17221,61 @@ export type components = { */ use_cache?: boolean; /** - * @description The image to create the mask from - * @default null + * Label + * @description Label for this metadata item + * @default * CUSTOM LABEL * + * @enum {string} */ - image?: components["schemas"]["ImageField"] | null; + label?: "* CUSTOM LABEL *" | "positive_prompt" | "positive_style_prompt" | "negative_prompt" | "negative_style_prompt" | "width" | "height" | "seed" | "cfg_scale" | "cfg_rescale_multiplier" | "steps" | "scheduler" | "clip_skip" | "model" | "vae" | "seamless_x" | "seamless_y" | "guidance" | "cfg_scale_start_step" | "cfg_scale_end_step"; /** - * @description ID color to mask + * Custom Label + * @description Label for this metadata item * @default null */ - color?: components["schemas"]["ColorField"] | null; - /** - * Threshold - * @description Threshold for color detection - * @default 100 - */ - threshold?: number; + custom_label?: string | null; /** - * Invert - * @description Whether or not to invert the mask - * @default false + * Value + * @description The value for this metadata item (may be any type) + * @default null */ - invert?: boolean; + value?: unknown | null; /** * type - * @default mask_from_id + * @default metadata_item_linked * @constant */ - type: "mask_from_id"; + type: "metadata_item_linked"; }; /** - * MaskOutput - * @description A torch mask tensor. + * MetadataItemOutput + * @description Metadata Item Output */ - MaskOutput: { - /** @description The mask. */ - mask: components["schemas"]["TensorField"]; + MetadataItemOutput: { + /** @description Metadata Item */ + item: components["schemas"]["MetadataItemField"]; /** - * Width - * @description The width of the mask in pixels. + * type + * @default metadata_item_output + * @constant */ - width: number; - /** - * Height - * @description The height of the mask in pixels. - */ - height: number; + type: "metadata_item_output"; + }; + /** MetadataOutput */ + MetadataOutput: { + /** @description Metadata Dict */ + metadata: components["schemas"]["MetadataField"]; /** * type - * @default mask_output + * @default metadata_output * @constant */ - type: "mask_output"; + type: "metadata_output"; }; /** - * Tensor Mask to Image - * @description Convert a mask tensor to an image. + * Metadata To Bool Collection + * @description Extracts a Boolean value Collection of a label from metadata */ - MaskTensorToImageInvocation: { - /** - * @description The board to save the image to - * @default null - */ - board?: components["schemas"]["BoardField"] | null; + MetadataToBoolCollectionInvocation: { /** * @description Optional metadata to be saved with the image * @default null @@ -17767,27 +17299,36 @@ export type components = { */ use_cache?: boolean; /** - * @description The mask tensor to convert. + * Label + * @description Label for this metadata item + * @default * CUSTOM LABEL * + * @enum {string} + */ + label?: "* CUSTOM LABEL *" | "seamless_x" | "seamless_y"; + /** + * Custom Label + * @description Label for this metadata item * @default null */ - mask?: components["schemas"]["TensorField"] | null; + custom_label?: string | null; + /** + * Default Value + * @description The default bool to use if not found in the metadata + * @default null + */ + default_value?: boolean[] | null; /** * type - * @default tensor_mask_to_image + * @default metadata_to_bool_collection * @constant */ - type: "tensor_mask_to_image"; + type: "metadata_to_bool_collection"; }; /** - * MediaPipe Face Detection - * @description Detects faces using MediaPipe. + * Metadata To Bool + * @description Extracts a Boolean value of a label from metadata */ - MediaPipeFaceDetectionInvocation: { - /** - * @description The board to save the image to - * @default null - */ - board?: components["schemas"]["BoardField"] | null; + MetadataToBoolInvocation: { /** * @description Optional metadata to be saved with the image * @default null @@ -17811,34 +17352,41 @@ export type components = { */ use_cache?: boolean; /** - * @description The image to process - * @default null + * Label + * @description Label for this metadata item + * @default * CUSTOM LABEL * + * @enum {string} */ - image?: components["schemas"]["ImageField"] | null; + label?: "* CUSTOM LABEL *" | "seamless_x" | "seamless_y"; /** - * Max Faces - * @description Maximum number of faces to detect - * @default 1 + * Custom Label + * @description Label for this metadata item + * @default null */ - max_faces?: number; + custom_label?: string | null; /** - * Min Confidence - * @description Minimum confidence for face detection - * @default 0.5 + * Default Value + * @description The default bool to use if not found in the metadata + * @default null */ - min_confidence?: number; + default_value?: boolean | null; /** * type - * @default mediapipe_face_detection + * @default metadata_to_bool * @constant */ - type: "mediapipe_face_detection"; + type: "metadata_to_bool"; }; /** - * Metadata Merge - * @description Merged a collection of MetadataDict into a single MetadataDict. + * Metadata To ControlNets + * @description Extracts a Controlnets value of a label from metadata */ - MergeMetadataInvocation: { + MetadataToControlnetsInvocation: { + /** + * @description Optional metadata to be saved with the image + * @default null + */ + metadata?: components["schemas"]["MetadataField"] | null; /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -17857,28 +17405,22 @@ export type components = { */ use_cache?: boolean; /** - * Collection - * @description Collection of Metadata + * ControlNet-List * @default null */ - collection?: components["schemas"]["MetadataField"][] | null; + control_list?: components["schemas"]["ControlField"] | components["schemas"]["ControlField"][] | null; /** * type - * @default merge_metadata + * @default metadata_to_controlnets * @constant */ - type: "merge_metadata"; + type: "metadata_to_controlnets"; }; /** - * Merge Tiles to Image - * @description Merge multiple tile images into a single image. + * Metadata To Float Collection + * @description Extracts a Float value Collection of a label from metadata */ - MergeTilesToImageInvocation: { - /** - * @description The board to save the image to - * @default null - */ - board?: components["schemas"]["BoardField"] | null; + MetadataToFloatCollectionInvocation: { /** * @description Optional metadata to be saved with the image * @default null @@ -17902,43 +17444,41 @@ export type components = { */ use_cache?: boolean; /** - * Tiles With Images - * @description A list of tile images with tile properties. - * @default null + * Label + * @description Label for this metadata item + * @default * CUSTOM LABEL * + * @enum {string} */ - tiles_with_images?: components["schemas"]["TileWithImage"][] | null; + label?: "* CUSTOM LABEL *" | "cfg_scale" | "cfg_rescale_multiplier" | "guidance"; /** - * Blend Mode - * @description blending type Linear or Seam - * @default Seam - * @enum {string} + * Custom Label + * @description Label for this metadata item + * @default null */ - blend_mode?: "Linear" | "Seam"; + custom_label?: string | null; /** - * Blend Amount - * @description The amount to blend adjacent tiles in pixels. Must be <= the amount of overlap between adjacent tiles. - * @default 32 + * Default Value + * @description The default float to use if not found in the metadata + * @default null */ - blend_amount?: number; + default_value?: number[] | null; /** * type - * @default merge_tiles_to_image + * @default metadata_to_float_collection * @constant */ - type: "merge_tiles_to_image"; + type: "metadata_to_float_collection"; }; /** - * MetadataField - * @description Pydantic model for metadata with custom root of type dict[str, Any]. - * Metadata is stored without a strict schema. - */ - MetadataField: Record; - /** - * Metadata Field Extractor - * @description Extracts the text value from an image's metadata given a key. - * Raises an error if the image has no metadata or if the value is not a string (nesting not permitted). + * Metadata To Float + * @description Extracts a Float value of a label from metadata */ - MetadataFieldExtractorInvocation: { + MetadataToFloatInvocation: { + /** + * @description Optional metadata to be saved with the image + * @default null + */ + metadata?: components["schemas"]["MetadataField"] | null; /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -17957,28 +17497,41 @@ export type components = { */ use_cache?: boolean; /** - * @description The image to extract metadata from + * Label + * @description Label for this metadata item + * @default * CUSTOM LABEL * + * @enum {string} + */ + label?: "* CUSTOM LABEL *" | "cfg_scale" | "cfg_rescale_multiplier" | "guidance"; + /** + * Custom Label + * @description Label for this metadata item * @default null */ - image?: components["schemas"]["ImageField"] | null; + custom_label?: string | null; /** - * Key - * @description The key in the image's metadata to extract the value from + * Default Value + * @description The default float to use if not found in the metadata * @default null */ - key?: string | null; + default_value?: number | null; /** * type - * @default metadata_field_extractor + * @default metadata_to_float * @constant */ - type: "metadata_field_extractor"; + type: "metadata_to_float"; }; /** - * Metadata From Image - * @description Used to create a core metadata item then Add/Update it to the provided metadata + * Metadata To IP-Adapters + * @description Extracts a IP-Adapters value of a label from metadata */ - MetadataFromImageInvocation: { + MetadataToIPAdaptersInvocation: { + /** + * @description Optional metadata to be saved with the image + * @default null + */ + metadata?: components["schemas"]["MetadataField"] | null; /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -17997,22 +17550,28 @@ export type components = { */ use_cache?: boolean; /** - * @description The image to process + * IP-Adapter-List + * @description IP-Adapter to apply * @default null */ - image?: components["schemas"]["ImageField"] | null; + ip_adapter_list?: components["schemas"]["IPAdapterField"] | components["schemas"]["IPAdapterField"][] | null; /** * type - * @default metadata_from_image + * @default metadata_to_ip_adapters * @constant */ - type: "metadata_from_image"; + type: "metadata_to_ip_adapters"; }; /** - * Metadata - * @description Takes a MetadataItem or collection of MetadataItems and outputs a MetadataDict. + * Metadata To Integer Collection + * @description Extracts an integer value Collection of a label from metadata */ - MetadataInvocation: { + MetadataToIntegerCollectionInvocation: { + /** + * @description Optional metadata to be saved with the image + * @default null + */ + metadata?: components["schemas"]["MetadataField"] | null; /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -18031,36 +17590,41 @@ export type components = { */ use_cache?: boolean; /** - * Items - * @description A single metadata item or collection of metadata items + * Label + * @description Label for this metadata item + * @default * CUSTOM LABEL * + * @enum {string} + */ + label?: "* CUSTOM LABEL *" | "width" | "height" | "seed" | "steps" | "clip_skip" | "cfg_scale_start_step" | "cfg_scale_end_step"; + /** + * Custom Label + * @description Label for this metadata item * @default null */ - items?: components["schemas"]["MetadataItemField"][] | components["schemas"]["MetadataItemField"] | null; + custom_label?: string | null; + /** + * Default Value + * @description The default integer to use if not found in the metadata + * @default null + */ + default_value?: number[] | null; /** * type - * @default metadata + * @default metadata_to_integer_collection * @constant */ - type: "metadata"; + type: "metadata_to_integer_collection"; }; - /** MetadataItemField */ - MetadataItemField: { + /** + * Metadata To Integer + * @description Extracts an integer value of a label from metadata + */ + MetadataToIntegerInvocation: { /** - * Label - * @description Label for this metadata item + * @description Optional metadata to be saved with the image + * @default null */ - label: string; - /** - * Value - * @description The value for this metadata item (may be any type) - */ - value: unknown; - }; - /** - * Metadata Item - * @description Used to create an arbitrary metadata item. Provide "label" and make a connection to "value" to store that data as the value. - */ - MetadataItemInvocation: { + metadata?: components["schemas"]["MetadataField"] | null; /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -18081,27 +17645,34 @@ export type components = { /** * Label * @description Label for this metadata item + * @default * CUSTOM LABEL * + * @enum {string} + */ + label?: "* CUSTOM LABEL *" | "width" | "height" | "seed" | "steps" | "clip_skip" | "cfg_scale_start_step" | "cfg_scale_end_step"; + /** + * Custom Label + * @description Label for this metadata item * @default null */ - label?: string | null; + custom_label?: string | null; /** - * Value - * @description The value for this metadata item (may be any type) + * Default Value + * @description The default integer to use if not found in the metadata * @default null */ - value?: unknown | null; + default_value?: number | null; /** * type - * @default metadata_item + * @default metadata_to_integer * @constant */ - type: "metadata_item"; + type: "metadata_to_integer"; }; /** - * Metadata Item Linked - * @description Used to Create/Add/Update a value into a metadata label + * Metadata To LoRA Collection + * @description Extracts Lora(s) from metadata into a collection */ - MetadataItemLinkedInvocation: { + MetadataToLorasCollectionInvocation: { /** * @description Optional metadata to be saved with the image * @default null @@ -18124,62 +17695,47 @@ export type components = { * @default true */ use_cache?: boolean; - /** - * Label - * @description Label for this metadata item - * @default * CUSTOM LABEL * - * @enum {string} - */ - label?: "* CUSTOM LABEL *" | "positive_prompt" | "positive_style_prompt" | "negative_prompt" | "negative_style_prompt" | "width" | "height" | "seed" | "cfg_scale" | "cfg_rescale_multiplier" | "steps" | "scheduler" | "clip_skip" | "model" | "vae" | "seamless_x" | "seamless_y" | "guidance" | "cfg_scale_start_step" | "cfg_scale_end_step"; /** * Custom Label * @description Label for this metadata item - * @default null + * @default loras */ - custom_label?: string | null; + custom_label?: string; /** - * Value - * @description The value for this metadata item (may be any type) - * @default null + * LoRAs + * @description LoRA models and weights. May be a single LoRA or collection. + * @default [] */ - value?: unknown | null; + loras?: components["schemas"]["LoRAField"] | components["schemas"]["LoRAField"][] | null; /** * type - * @default metadata_item_linked + * @default metadata_to_lora_collection * @constant */ - type: "metadata_item_linked"; + type: "metadata_to_lora_collection"; }; /** - * MetadataItemOutput - * @description Metadata Item Output + * MetadataToLorasCollectionOutput + * @description Model loader output */ - MetadataItemOutput: { - /** @description Metadata Item */ - item: components["schemas"]["MetadataItemField"]; + MetadataToLorasCollectionOutput: { /** - * type - * @default metadata_item_output - * @constant + * LoRAs + * @description Collection of LoRA model and weights */ - type: "metadata_item_output"; - }; - /** MetadataOutput */ - MetadataOutput: { - /** @description Metadata Dict */ - metadata: components["schemas"]["MetadataField"]; + lora: components["schemas"]["LoRAField"][]; /** * type - * @default metadata_output + * @default metadata_to_lora_collection_output * @constant */ - type: "metadata_output"; + type: "metadata_to_lora_collection_output"; }; /** - * Metadata To Bool Collection - * @description Extracts a Boolean value Collection of a label from metadata + * Metadata To LoRAs + * @description Extracts a Loras value of a label from metadata */ - MetadataToBoolCollectionInvocation: { + MetadataToLorasInvocation: { /** * @description Optional metadata to be saved with the image * @default null @@ -18203,36 +17759,29 @@ export type components = { */ use_cache?: boolean; /** - * Label - * @description Label for this metadata item - * @default * CUSTOM LABEL * - * @enum {string} - */ - label?: "* CUSTOM LABEL *" | "seamless_x" | "seamless_y"; - /** - * Custom Label - * @description Label for this metadata item + * UNet + * @description UNet (scheduler, LoRAs) * @default null */ - custom_label?: string | null; + unet?: components["schemas"]["UNetField"] | null; /** - * Default Value - * @description The default bool to use if not found in the metadata + * CLIP + * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count * @default null */ - default_value?: boolean[] | null; + clip?: components["schemas"]["CLIPField"] | null; /** * type - * @default metadata_to_bool_collection + * @default metadata_to_loras * @constant */ - type: "metadata_to_bool_collection"; + type: "metadata_to_loras"; }; /** - * Metadata To Bool - * @description Extracts a Boolean value of a label from metadata + * Metadata To Model + * @description Extracts a Model value of a label from metadata */ - MetadataToBoolInvocation: { + MetadataToModelInvocation: { /** * @description Optional metadata to be saved with the image * @default null @@ -18258,10 +17807,10 @@ export type components = { /** * Label * @description Label for this metadata item - * @default * CUSTOM LABEL * + * @default model * @enum {string} */ - label?: "* CUSTOM LABEL *" | "seamless_x" | "seamless_y"; + label?: "* CUSTOM LABEL *" | "model"; /** * Custom Label * @description Label for this metadata item @@ -18269,62 +17818,59 @@ export type components = { */ custom_label?: string | null; /** - * Default Value - * @description The default bool to use if not found in the metadata + * @description The default model to use if not found in the metadata * @default null */ - default_value?: boolean | null; + default_value?: components["schemas"]["ModelIdentifierField"] | null; /** * type - * @default metadata_to_bool + * @default metadata_to_model * @constant */ - type: "metadata_to_bool"; + type: "metadata_to_model"; }; /** - * Metadata To ControlNets - * @description Extracts a Controlnets value of a label from metadata + * MetadataToModelOutput + * @description String to main model output */ - MetadataToControlnetsInvocation: { + MetadataToModelOutput: { /** - * @description Optional metadata to be saved with the image - * @default null + * Model + * @description Main model (UNet, VAE, CLIP) to load */ - metadata?: components["schemas"]["MetadataField"] | null; + model: components["schemas"]["ModelIdentifierField"]; /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + * Name + * @description Model Name */ - id: string; + name: string; /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false + * UNet + * @description UNet (scheduler, LoRAs) */ - is_intermediate?: boolean; + unet: components["schemas"]["UNetField"]; /** - * Use Cache - * @description Whether or not to use the cache - * @default true + * VAE + * @description VAE */ - use_cache?: boolean; + vae: components["schemas"]["VAEField"]; /** - * ControlNet-List - * @default null + * CLIP + * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count */ - control_list?: components["schemas"]["ControlField"] | components["schemas"]["ControlField"][] | null; + clip: components["schemas"]["CLIPField"]; /** * type - * @default metadata_to_controlnets + * @default metadata_to_model_output * @constant */ - type: "metadata_to_controlnets"; + type: "metadata_to_model_output"; }; /** - * Metadata To Float Collection - * @description Extracts a Float value Collection of a label from metadata + * Metadata To SDXL LoRAs + * @description Extracts a SDXL Loras value of a label from metadata */ - MetadataToFloatCollectionInvocation: { + MetadataToSDXLLorasInvocation: { /** * @description Optional metadata to be saved with the image * @default null @@ -18348,36 +17894,35 @@ export type components = { */ use_cache?: boolean; /** - * Label - * @description Label for this metadata item - * @default * CUSTOM LABEL * - * @enum {string} + * UNet + * @description UNet (scheduler, LoRAs) + * @default null */ - label?: "* CUSTOM LABEL *" | "cfg_scale" | "cfg_rescale_multiplier" | "guidance"; + unet?: components["schemas"]["UNetField"] | null; /** - * Custom Label - * @description Label for this metadata item + * CLIP 1 + * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count * @default null */ - custom_label?: string | null; + clip?: components["schemas"]["CLIPField"] | null; /** - * Default Value - * @description The default float to use if not found in the metadata + * CLIP 2 + * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count * @default null */ - default_value?: number[] | null; + clip2?: components["schemas"]["CLIPField"] | null; /** * type - * @default metadata_to_float_collection + * @default metadata_to_sdlx_loras * @constant */ - type: "metadata_to_float_collection"; + type: "metadata_to_sdlx_loras"; }; /** - * Metadata To Float - * @description Extracts a Float value of a label from metadata + * Metadata To SDXL Model + * @description Extracts a SDXL Model value of a label from metadata */ - MetadataToFloatInvocation: { + MetadataToSDXLModelInvocation: { /** * @description Optional metadata to be saved with the image * @default null @@ -18403,10 +17948,10 @@ export type components = { /** * Label * @description Label for this metadata item - * @default * CUSTOM LABEL * + * @default model * @enum {string} */ - label?: "* CUSTOM LABEL *" | "cfg_scale" | "cfg_rescale_multiplier" | "guidance"; + label?: "* CUSTOM LABEL *" | "model"; /** * Custom Label * @description Label for this metadata item @@ -18414,63 +17959,64 @@ export type components = { */ custom_label?: string | null; /** - * Default Value - * @description The default float to use if not found in the metadata + * @description The default SDXL Model to use if not found in the metadata * @default null */ - default_value?: number | null; + default_value?: components["schemas"]["ModelIdentifierField"] | null; /** * type - * @default metadata_to_float + * @default metadata_to_sdxl_model * @constant */ - type: "metadata_to_float"; + type: "metadata_to_sdxl_model"; }; /** - * Metadata To IP-Adapters - * @description Extracts a IP-Adapters value of a label from metadata + * MetadataToSDXLModelOutput + * @description String to SDXL main model output */ - MetadataToIPAdaptersInvocation: { + MetadataToSDXLModelOutput: { /** - * @description Optional metadata to be saved with the image - * @default null + * Model + * @description Main model (UNet, VAE, CLIP) to load */ - metadata?: components["schemas"]["MetadataField"] | null; + model: components["schemas"]["ModelIdentifierField"]; /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + * Name + * @description Model Name */ - id: string; + name: string; /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false + * UNet + * @description UNet (scheduler, LoRAs) */ - is_intermediate?: boolean; + unet: components["schemas"]["UNetField"]; /** - * Use Cache - * @description Whether or not to use the cache - * @default true + * CLIP 1 + * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count */ - use_cache?: boolean; + clip: components["schemas"]["CLIPField"]; /** - * IP-Adapter-List - * @description IP-Adapter to apply - * @default null + * CLIP 2 + * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count */ - ip_adapter_list?: components["schemas"]["IPAdapterField"] | components["schemas"]["IPAdapterField"][] | null; + clip2: components["schemas"]["CLIPField"]; + /** + * VAE + * @description VAE + */ + vae: components["schemas"]["VAEField"]; /** * type - * @default metadata_to_ip_adapters + * @default metadata_to_sdxl_model_output * @constant */ - type: "metadata_to_ip_adapters"; + type: "metadata_to_sdxl_model_output"; }; /** - * Metadata To Integer Collection - * @description Extracts an integer value Collection of a label from metadata + * Metadata To Scheduler + * @description Extracts a Scheduler value of a label from metadata */ - MetadataToIntegerCollectionInvocation: { + MetadataToSchedulerInvocation: { /** * @description Optional metadata to be saved with the image * @default null @@ -18496,10 +18042,10 @@ export type components = { /** * Label * @description Label for this metadata item - * @default * CUSTOM LABEL * + * @default scheduler * @enum {string} */ - label?: "* CUSTOM LABEL *" | "width" | "height" | "seed" | "steps" | "clip_skip" | "cfg_scale_start_step" | "cfg_scale_end_step"; + label?: "* CUSTOM LABEL *" | "scheduler"; /** * Custom Label * @description Label for this metadata item @@ -18508,22 +18054,23 @@ export type components = { custom_label?: string | null; /** * Default Value - * @description The default integer to use if not found in the metadata - * @default null + * @description The default scheduler to use if not found in the metadata + * @default euler + * @enum {string} */ - default_value?: number[] | null; + default_value?: "ddim" | "ddpm" | "deis" | "deis_k" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_k" | "kdpm_2_a" | "kdpm_2_a_k" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_3m" | "dpmpp_3m_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc" | "unipc_k" | "lcm" | "tcd"; /** * type - * @default metadata_to_integer_collection + * @default metadata_to_scheduler * @constant */ - type: "metadata_to_integer_collection"; + type: "metadata_to_scheduler"; }; /** - * Metadata To Integer - * @description Extracts an integer value of a label from metadata + * Metadata To String Collection + * @description Extracts a string collection value of a label from metadata */ - MetadataToIntegerInvocation: { + MetadataToStringCollectionInvocation: { /** * @description Optional metadata to be saved with the image * @default null @@ -18552,7 +18099,7 @@ export type components = { * @default * CUSTOM LABEL * * @enum {string} */ - label?: "* CUSTOM LABEL *" | "width" | "height" | "seed" | "steps" | "clip_skip" | "cfg_scale_start_step" | "cfg_scale_end_step"; + label?: "* CUSTOM LABEL *" | "positive_prompt" | "positive_style_prompt" | "negative_prompt" | "negative_style_prompt"; /** * Custom Label * @description Label for this metadata item @@ -18561,22 +18108,22 @@ export type components = { custom_label?: string | null; /** * Default Value - * @description The default integer to use if not found in the metadata + * @description The default string collection to use if not found in the metadata * @default null */ - default_value?: number | null; + default_value?: string[] | null; /** * type - * @default metadata_to_integer + * @default metadata_to_string_collection * @constant */ - type: "metadata_to_integer"; + type: "metadata_to_string_collection"; }; /** - * Metadata To LoRA Collection - * @description Extracts Lora(s) from metadata into a collection + * Metadata To String + * @description Extracts a string value of a label from metadata */ - MetadataToLorasCollectionInvocation: { + MetadataToStringInvocation: { /** * @description Optional metadata to be saved with the image * @default null @@ -18600,46 +18147,36 @@ export type components = { */ use_cache?: boolean; /** - * Custom Label + * Label * @description Label for this metadata item - * @default loras - */ - custom_label?: string; - /** - * LoRAs - * @description LoRA models and weights. May be a single LoRA or collection. - * @default [] + * @default * CUSTOM LABEL * + * @enum {string} */ - loras?: components["schemas"]["LoRAField"] | components["schemas"]["LoRAField"][] | null; + label?: "* CUSTOM LABEL *" | "positive_prompt" | "positive_style_prompt" | "negative_prompt" | "negative_style_prompt"; /** - * type - * @default metadata_to_lora_collection - * @constant + * Custom Label + * @description Label for this metadata item + * @default null */ - type: "metadata_to_lora_collection"; - }; - /** - * MetadataToLorasCollectionOutput - * @description Model loader output - */ - MetadataToLorasCollectionOutput: { + custom_label?: string | null; /** - * LoRAs - * @description Collection of LoRA model and weights + * Default Value + * @description The default string to use if not found in the metadata + * @default null */ - lora: components["schemas"]["LoRAField"][]; + default_value?: string | null; /** * type - * @default metadata_to_lora_collection_output + * @default metadata_to_string * @constant */ - type: "metadata_to_lora_collection_output"; + type: "metadata_to_string"; }; /** - * Metadata To LoRAs - * @description Extracts a Loras value of a label from metadata + * Metadata To T2I-Adapters + * @description Extracts a T2I-Adapters value of a label from metadata */ - MetadataToLorasInvocation: { + MetadataToT2IAdaptersInvocation: { /** * @description Optional metadata to be saved with the image * @default null @@ -18663,29 +18200,23 @@ export type components = { */ use_cache?: boolean; /** - * UNet - * @description UNet (scheduler, LoRAs) - * @default null - */ - unet?: components["schemas"]["UNetField"] | null; - /** - * CLIP - * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count + * T2I-Adapter + * @description IP-Adapter to apply * @default null */ - clip?: components["schemas"]["CLIPField"] | null; + t2i_adapter_list?: components["schemas"]["T2IAdapterField"] | components["schemas"]["T2IAdapterField"][] | null; /** * type - * @default metadata_to_loras + * @default metadata_to_t2i_adapters * @constant */ - type: "metadata_to_loras"; + type: "metadata_to_t2i_adapters"; }; /** - * Metadata To Model - * @description Extracts a Model value of a label from metadata + * Metadata To VAE + * @description Extracts a VAE value of a label from metadata */ - MetadataToModelInvocation: { + MetadataToVAEInvocation: { /** * @description Optional metadata to be saved with the image * @default null @@ -18711,10 +18242,10 @@ export type components = { /** * Label * @description Label for this metadata item - * @default model + * @default vae * @enum {string} */ - label?: "* CUSTOM LABEL *" | "model"; + label?: "* CUSTOM LABEL *" | "vae"; /** * Custom Label * @description Label for this metadata item @@ -18722,64 +18253,57 @@ export type components = { */ custom_label?: string | null; /** - * @description The default model to use if not found in the metadata + * @description The default VAE to use if not found in the metadata * @default null */ - default_value?: components["schemas"]["ModelIdentifierField"] | null; + default_value?: components["schemas"]["VAEField"] | null; /** * type - * @default metadata_to_model + * @default metadata_to_vae * @constant */ - type: "metadata_to_model"; + type: "metadata_to_vae"; }; /** - * MetadataToModelOutput - * @description String to main model output + * ModelFormat + * @description Storage format of model. + * @enum {string} */ - MetadataToModelOutput: { - /** - * Model - * @description Main model (UNet, VAE, CLIP) to load - */ - model: components["schemas"]["ModelIdentifierField"]; - /** - * Name - * @description Model Name - */ - name: string; + ModelFormat: "omi" | "diffusers" | "checkpoint" | "lycoris" | "onnx" | "olive" | "embedding_file" | "embedding_folder" | "invokeai" | "t5_encoder" | "bnb_quantized_int8b" | "bnb_quantized_nf4b" | "gguf_quantized" | "unknown"; + /** ModelIdentifierField */ + ModelIdentifierField: { /** - * UNet - * @description UNet (scheduler, LoRAs) + * Key + * @description The model's unique key */ - unet: components["schemas"]["UNetField"]; + key: string; /** - * VAE - * @description VAE + * Hash + * @description The model's BLAKE3 hash */ - vae: components["schemas"]["VAEField"]; + hash: string; /** - * CLIP - * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count + * Name + * @description The model's name */ - clip: components["schemas"]["CLIPField"]; + name: string; + /** @description The model's base model type */ + base: components["schemas"]["BaseModelType"]; + /** @description The model's type */ + type: components["schemas"]["ModelType"]; /** - * type - * @default metadata_to_model_output - * @constant + * @description The submodel to load, if this is a main model + * @default null */ - type: "metadata_to_model_output"; + submodel_type?: components["schemas"]["SubModelType"] | null; }; /** - * Metadata To SDXL LoRAs - * @description Extracts a SDXL Loras value of a label from metadata + * Any Model + * @description Selects any model, outputting it its identifier. Be careful with this one! The identifier will be accepted as + * input for any model, even if the model types don't match. If you connect this to a mismatched input, you'll get an + * error. */ - MetadataToSDXLLorasInvocation: { - /** - * @description Optional metadata to be saved with the image - * @default null - */ - metadata?: components["schemas"]["MetadataField"] | null; + ModelIdentifierInvocation: { /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -18798,967 +18322,929 @@ export type components = { */ use_cache?: boolean; /** - * UNet - * @description UNet (scheduler, LoRAs) + * Model + * @description The model to select * @default null */ - unet?: components["schemas"]["UNetField"] | null; + model?: components["schemas"]["ModelIdentifierField"] | null; /** - * CLIP 1 - * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count - * @default null + * type + * @default model_identifier + * @constant */ - clip?: components["schemas"]["CLIPField"] | null; + type: "model_identifier"; + }; + /** + * ModelIdentifierOutput + * @description Model identifier output + */ + ModelIdentifierOutput: { /** - * CLIP 2 - * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count - * @default null + * Model + * @description Model identifier */ - clip2?: components["schemas"]["CLIPField"] | null; + model: components["schemas"]["ModelIdentifierField"]; /** * type - * @default metadata_to_sdlx_loras + * @default model_identifier_output * @constant */ - type: "metadata_to_sdlx_loras"; + type: "model_identifier_output"; }; /** - * Metadata To SDXL Model - * @description Extracts a SDXL Model value of a label from metadata + * ModelInstallCancelledEvent + * @description Event model for model_install_cancelled */ - MetadataToSDXLModelInvocation: { + ModelInstallCancelledEvent: { /** - * @description Optional metadata to be saved with the image - * @default null + * Timestamp + * @description The timestamp of the event */ - metadata?: components["schemas"]["MetadataField"] | null; + timestamp: number; /** * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + * @description The ID of the install job */ - id: string; + id: number; /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false + * Source + * @description Source of the model; local path, repo_id or url */ - is_intermediate?: boolean; + source: components["schemas"]["LocalModelSource"] | components["schemas"]["HFModelSource"] | components["schemas"]["URLModelSource"]; + }; + /** + * ModelInstallCompleteEvent + * @description Event model for model_install_complete + */ + ModelInstallCompleteEvent: { /** - * Use Cache - * @description Whether or not to use the cache - * @default true + * Timestamp + * @description The timestamp of the event */ - use_cache?: boolean; + timestamp: number; /** - * Label - * @description Label for this metadata item - * @default model - * @enum {string} + * Id + * @description The ID of the install job */ - label?: "* CUSTOM LABEL *" | "model"; + id: number; /** - * Custom Label - * @description Label for this metadata item - * @default null + * Source + * @description Source of the model; local path, repo_id or url */ - custom_label?: string | null; + source: components["schemas"]["LocalModelSource"] | components["schemas"]["HFModelSource"] | components["schemas"]["URLModelSource"]; /** - * @description The default SDXL Model to use if not found in the metadata - * @default null + * Key + * @description Model config record key */ - default_value?: components["schemas"]["ModelIdentifierField"] | null; + key: string; /** - * type - * @default metadata_to_sdxl_model - * @constant + * Total Bytes + * @description Size of the model (may be None for installation of a local path) */ - type: "metadata_to_sdxl_model"; + total_bytes: number | null; + /** + * Config + * @description The installed model's config + */ + config: components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Unknown_Config"]; }; /** - * MetadataToSDXLModelOutput - * @description String to SDXL main model output + * ModelInstallDownloadProgressEvent + * @description Event model for model_install_download_progress */ - MetadataToSDXLModelOutput: { + ModelInstallDownloadProgressEvent: { /** - * Model - * @description Main model (UNet, VAE, CLIP) to load + * Timestamp + * @description The timestamp of the event */ - model: components["schemas"]["ModelIdentifierField"]; + timestamp: number; /** - * Name - * @description Model Name + * Id + * @description The ID of the install job */ - name: string; + id: number; /** - * UNet - * @description UNet (scheduler, LoRAs) + * Source + * @description Source of the model; local path, repo_id or url */ - unet: components["schemas"]["UNetField"]; + source: components["schemas"]["LocalModelSource"] | components["schemas"]["HFModelSource"] | components["schemas"]["URLModelSource"]; /** - * CLIP 1 - * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count + * Local Path + * @description Where model is downloading to */ - clip: components["schemas"]["CLIPField"]; + local_path: string; /** - * CLIP 2 - * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count + * Bytes + * @description Number of bytes downloaded so far */ - clip2: components["schemas"]["CLIPField"]; + bytes: number; /** - * VAE - * @description VAE + * Total Bytes + * @description Total size of download, including all files */ - vae: components["schemas"]["VAEField"]; + total_bytes: number; /** - * type - * @default metadata_to_sdxl_model_output - * @constant + * Parts + * @description Progress of downloading URLs that comprise the model, if any */ - type: "metadata_to_sdxl_model_output"; + parts: { + [key: string]: number | string; + }[]; }; /** - * Metadata To Scheduler - * @description Extracts a Scheduler value of a label from metadata + * ModelInstallDownloadStartedEvent + * @description Event model for model_install_download_started */ - MetadataToSchedulerInvocation: { + ModelInstallDownloadStartedEvent: { /** - * @description Optional metadata to be saved with the image - * @default null + * Timestamp + * @description The timestamp of the event */ - metadata?: components["schemas"]["MetadataField"] | null; + timestamp: number; /** * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. - */ - id: string; - /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false + * @description The ID of the install job */ - is_intermediate?: boolean; + id: number; /** - * Use Cache - * @description Whether or not to use the cache - * @default true + * Source + * @description Source of the model; local path, repo_id or url */ - use_cache?: boolean; + source: components["schemas"]["LocalModelSource"] | components["schemas"]["HFModelSource"] | components["schemas"]["URLModelSource"]; /** - * Label - * @description Label for this metadata item - * @default scheduler - * @enum {string} + * Local Path + * @description Where model is downloading to */ - label?: "* CUSTOM LABEL *" | "scheduler"; + local_path: string; /** - * Custom Label - * @description Label for this metadata item - * @default null + * Bytes + * @description Number of bytes downloaded so far */ - custom_label?: string | null; + bytes: number; /** - * Default Value - * @description The default scheduler to use if not found in the metadata - * @default euler - * @enum {string} + * Total Bytes + * @description Total size of download, including all files */ - default_value?: "ddim" | "ddpm" | "deis" | "deis_k" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_k" | "kdpm_2_a" | "kdpm_2_a_k" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_3m" | "dpmpp_3m_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc" | "unipc_k" | "lcm" | "tcd"; + total_bytes: number; /** - * type - * @default metadata_to_scheduler - * @constant + * Parts + * @description Progress of downloading URLs that comprise the model, if any */ - type: "metadata_to_scheduler"; + parts: { + [key: string]: number | string; + }[]; }; /** - * Metadata To String Collection - * @description Extracts a string collection value of a label from metadata + * ModelInstallDownloadsCompleteEvent + * @description Emitted once when an install job becomes active. */ - MetadataToStringCollectionInvocation: { + ModelInstallDownloadsCompleteEvent: { /** - * @description Optional metadata to be saved with the image - * @default null + * Timestamp + * @description The timestamp of the event */ - metadata?: components["schemas"]["MetadataField"] | null; + timestamp: number; /** * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + * @description The ID of the install job */ - id: string; + id: number; /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false + * Source + * @description Source of the model; local path, repo_id or url */ - is_intermediate?: boolean; + source: components["schemas"]["LocalModelSource"] | components["schemas"]["HFModelSource"] | components["schemas"]["URLModelSource"]; + }; + /** + * ModelInstallErrorEvent + * @description Event model for model_install_error + */ + ModelInstallErrorEvent: { /** - * Use Cache - * @description Whether or not to use the cache - * @default true + * Timestamp + * @description The timestamp of the event */ - use_cache?: boolean; + timestamp: number; /** - * Label - * @description Label for this metadata item - * @default * CUSTOM LABEL * - * @enum {string} + * Id + * @description The ID of the install job */ - label?: "* CUSTOM LABEL *" | "positive_prompt" | "positive_style_prompt" | "negative_prompt" | "negative_style_prompt"; + id: number; /** - * Custom Label - * @description Label for this metadata item - * @default null + * Source + * @description Source of the model; local path, repo_id or url */ - custom_label?: string | null; + source: components["schemas"]["LocalModelSource"] | components["schemas"]["HFModelSource"] | components["schemas"]["URLModelSource"]; /** - * Default Value - * @description The default string collection to use if not found in the metadata - * @default null + * Error Type + * @description The name of the exception */ - default_value?: string[] | null; + error_type: string; /** - * type - * @default metadata_to_string_collection - * @constant + * Error + * @description A text description of the exception */ - type: "metadata_to_string_collection"; + error: string; }; /** - * Metadata To String - * @description Extracts a string value of a label from metadata + * ModelInstallJob + * @description Object that tracks the current status of an install request. */ - MetadataToStringInvocation: { - /** - * @description Optional metadata to be saved with the image - * @default null - */ - metadata?: components["schemas"]["MetadataField"] | null; + ModelInstallJob: { /** * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + * @description Unique ID for this job */ - id: string; + id: number; /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false + * @description Current status of install process + * @default waiting */ - is_intermediate?: boolean; + status?: components["schemas"]["InstallStatus"]; /** - * Use Cache - * @description Whether or not to use the cache - * @default true + * Error Reason + * @description Information about why the job failed */ - use_cache?: boolean; + error_reason?: string | null; + /** @description Configuration information (e.g. 'description') to apply to model. */ + config_in?: components["schemas"]["ModelRecordChanges"]; /** - * Label - * @description Label for this metadata item - * @default * CUSTOM LABEL * - * @enum {string} + * Config Out + * @description After successful installation, this will hold the configuration object. */ - label?: "* CUSTOM LABEL *" | "positive_prompt" | "positive_style_prompt" | "negative_prompt" | "negative_style_prompt"; + config_out?: (components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Unknown_Config"]) | null; /** - * Custom Label - * @description Label for this metadata item - * @default null + * Inplace + * @description Leave model in its current location; otherwise install under models directory + * @default false */ - custom_label?: string | null; + inplace?: boolean; /** - * Default Value - * @description The default string to use if not found in the metadata - * @default null + * Source + * @description Source (URL, repo_id, or local path) of model */ - default_value?: string | null; + source: components["schemas"]["LocalModelSource"] | components["schemas"]["HFModelSource"] | components["schemas"]["URLModelSource"]; /** - * type - * @default metadata_to_string - * @constant + * Local Path + * Format: path + * @description Path to locally-downloaded model; may be the same as the source */ - type: "metadata_to_string"; - }; - /** - * Metadata To T2I-Adapters - * @description Extracts a T2I-Adapters value of a label from metadata - */ - MetadataToT2IAdaptersInvocation: { + local_path: string; /** - * @description Optional metadata to be saved with the image - * @default null + * Bytes + * @description For a remote model, the number of bytes downloaded so far (may not be available) + * @default 0 */ - metadata?: components["schemas"]["MetadataField"] | null; + bytes?: number; /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + * Total Bytes + * @description Total size of the model to be installed + * @default 0 */ - id: string; + total_bytes?: number; /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false + * Source Metadata + * @description Metadata provided by the model source */ - is_intermediate?: boolean; + source_metadata?: (components["schemas"]["BaseMetadata"] | components["schemas"]["HuggingFaceMetadata"]) | null; /** - * Use Cache - * @description Whether or not to use the cache - * @default true + * Download Parts + * @description Download jobs contributing to this install */ - use_cache?: boolean; + download_parts?: components["schemas"]["DownloadJob"][]; /** - * T2I-Adapter - * @description IP-Adapter to apply - * @default null + * Error + * @description On an error condition, this field will contain the text of the exception */ - t2i_adapter_list?: components["schemas"]["T2IAdapterField"] | components["schemas"]["T2IAdapterField"][] | null; + error?: string | null; /** - * type - * @default metadata_to_t2i_adapters - * @constant + * Error Traceback + * @description On an error condition, this field will contain the exception traceback */ - type: "metadata_to_t2i_adapters"; + error_traceback?: string | null; }; /** - * Metadata To VAE - * @description Extracts a VAE value of a label from metadata + * ModelInstallStartedEvent + * @description Event model for model_install_started */ - MetadataToVAEInvocation: { + ModelInstallStartedEvent: { /** - * @description Optional metadata to be saved with the image - * @default null + * Timestamp + * @description The timestamp of the event */ - metadata?: components["schemas"]["MetadataField"] | null; + timestamp: number; /** * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. - */ - id: string; - /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false - */ - is_intermediate?: boolean; - /** - * Use Cache - * @description Whether or not to use the cache - * @default true - */ - use_cache?: boolean; - /** - * Label - * @description Label for this metadata item - * @default vae - * @enum {string} - */ - label?: "* CUSTOM LABEL *" | "vae"; - /** - * Custom Label - * @description Label for this metadata item - * @default null - */ - custom_label?: string | null; - /** - * @description The default VAE to use if not found in the metadata - * @default null + * @description The ID of the install job */ - default_value?: components["schemas"]["VAEField"] | null; + id: number; /** - * type - * @default metadata_to_vae - * @constant + * Source + * @description Source of the model; local path, repo_id or url */ - type: "metadata_to_vae"; + source: components["schemas"]["LocalModelSource"] | components["schemas"]["HFModelSource"] | components["schemas"]["URLModelSource"]; }; /** - * ModelFormat - * @description Storage format of model. - * @enum {string} + * ModelLoadCompleteEvent + * @description Event model for model_load_complete */ - ModelFormat: "omi" | "diffusers" | "checkpoint" | "lycoris" | "onnx" | "olive" | "embedding_file" | "embedding_folder" | "invokeai" | "t5_encoder" | "bnb_quantized_int8b" | "bnb_quantized_nf4b" | "gguf_quantized" | "api" | "unknown"; - /** ModelIdentifierField */ - ModelIdentifierField: { - /** - * Key - * @description The model's unique key - */ - key: string; + ModelLoadCompleteEvent: { /** - * Hash - * @description The model's BLAKE3 hash + * Timestamp + * @description The timestamp of the event */ - hash: string; + timestamp: number; /** - * Name - * @description The model's name + * Config + * @description The model's config */ - name: string; - /** @description The model's base model type */ - base: components["schemas"]["BaseModelType"]; - /** @description The model's type */ - type: components["schemas"]["ModelType"]; + config: components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Unknown_Config"]; /** - * @description The submodel to load, if this is a main model + * @description The submodel type, if any * @default null */ - submodel_type?: components["schemas"]["SubModelType"] | null; + submodel_type: components["schemas"]["SubModelType"] | null; }; /** - * Any Model - * @description Selects any model, outputting it its identifier. Be careful with this one! The identifier will be accepted as - * input for any model, even if the model types don't match. If you connect this to a mismatched input, you'll get an - * error. + * ModelLoadStartedEvent + * @description Event model for model_load_started */ - ModelIdentifierInvocation: { - /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. - */ - id: string; + ModelLoadStartedEvent: { /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false + * Timestamp + * @description The timestamp of the event */ - is_intermediate?: boolean; + timestamp: number; /** - * Use Cache - * @description Whether or not to use the cache - * @default true + * Config + * @description The model's config */ - use_cache?: boolean; + config: components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Unknown_Config"]; /** - * Model - * @description The model to select + * @description The submodel type, if any * @default null */ - model?: components["schemas"]["ModelIdentifierField"] | null; - /** - * type - * @default model_identifier - * @constant - */ - type: "model_identifier"; + submodel_type: components["schemas"]["SubModelType"] | null; }; /** - * ModelIdentifierOutput - * @description Model identifier output + * ModelLoaderOutput + * @description Model loader output */ - ModelIdentifierOutput: { + ModelLoaderOutput: { /** - * Model - * @description Model identifier + * VAE + * @description VAE */ - model: components["schemas"]["ModelIdentifierField"]; + vae: components["schemas"]["VAEField"]; /** * type - * @default model_identifier_output + * @default model_loader_output * @constant */ - type: "model_identifier_output"; - }; - /** - * ModelInstallCancelledEvent - * @description Event model for model_install_cancelled - */ - ModelInstallCancelledEvent: { - /** - * Timestamp - * @description The timestamp of the event - */ - timestamp: number; + type: "model_loader_output"; /** - * Id - * @description The ID of the install job + * CLIP + * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count */ - id: number; + clip: components["schemas"]["CLIPField"]; /** - * Source - * @description Source of the model; local path, repo_id or url + * UNet + * @description UNet (scheduler, LoRAs) */ - source: components["schemas"]["LocalModelSource"] | components["schemas"]["HFModelSource"] | components["schemas"]["URLModelSource"]; + unet: components["schemas"]["UNetField"]; }; /** - * ModelInstallCompleteEvent - * @description Event model for model_install_complete + * ModelRecordChanges + * @description A set of changes to apply to a model. */ - ModelInstallCompleteEvent: { + ModelRecordChanges: { /** - * Timestamp - * @description The timestamp of the event + * Source + * @description original source of the model */ - timestamp: number; + source?: string | null; + /** @description type of model source */ + source_type?: components["schemas"]["ModelSourceType"] | null; /** - * Id - * @description The ID of the install job + * Source Api Response + * @description metadata from remote source */ - id: number; + source_api_response?: string | null; /** - * Source - * @description Source of the model; local path, repo_id or url + * Name + * @description Name of the model. */ - source: components["schemas"]["LocalModelSource"] | components["schemas"]["HFModelSource"] | components["schemas"]["URLModelSource"]; + name?: string | null; + /** + * Path + * @description Path to the model. + */ + path?: string | null; + /** + * Description + * @description Model description + */ + description?: string | null; + /** @description The base model. */ + base?: components["schemas"]["BaseModelType"] | null; + /** @description Type of model */ + type?: components["schemas"]["ModelType"] | null; /** * Key - * @description Model config record key + * @description Database ID for this model */ - key: string; + key?: string | null; /** - * Total Bytes - * @description Size of the model (may be None for installation of a local path) + * Hash + * @description hash of model file */ - total_bytes: number | null; + hash?: string | null; /** - * Config - * @description The installed model's config + * File Size + * @description Size of model file */ - config: components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Main_ExternalAPI_ChatGPT4o_Config"] | components["schemas"]["Main_ExternalAPI_Gemini2_5_Config"] | components["schemas"]["Main_ExternalAPI_Imagen3_Config"] | components["schemas"]["Main_ExternalAPI_Imagen4_Config"] | components["schemas"]["Main_ExternalAPI_FluxKontext_Config"] | components["schemas"]["Video_ExternalAPI_Veo3_Config"] | components["schemas"]["Video_ExternalAPI_Runway_Config"] | components["schemas"]["Unknown_Config"]; - }; - /** - * ModelInstallDownloadProgressEvent - * @description Event model for model_install_download_progress - */ - ModelInstallDownloadProgressEvent: { + file_size?: number | null; /** - * Timestamp - * @description The timestamp of the event + * Format + * @description format of model file */ - timestamp: number; + format?: string | null; /** - * Id - * @description The ID of the install job + * Trigger Phrases + * @description Set of trigger phrases for this model */ - id: number; + trigger_phrases?: string[] | null; /** - * Source - * @description Source of the model; local path, repo_id or url + * Default Settings + * @description Default settings for this model */ - source: components["schemas"]["LocalModelSource"] | components["schemas"]["HFModelSource"] | components["schemas"]["URLModelSource"]; + default_settings?: components["schemas"]["MainModelDefaultSettings"] | components["schemas"]["LoraModelDefaultSettings"] | components["schemas"]["ControlAdapterDefaultSettings"] | null; /** - * Local Path - * @description Where model is downloading to + * Variant + * @description The variant of the model. */ - local_path: string; + variant?: components["schemas"]["ModelVariantType"] | components["schemas"]["ClipVariantType"] | components["schemas"]["FluxVariantType"] | null; + /** @description The prediction type of the model. */ + prediction_type?: components["schemas"]["SchedulerPredictionType"] | null; /** - * Bytes - * @description Number of bytes downloaded so far + * Upcast Attention + * @description Whether to upcast attention. */ - bytes: number; + upcast_attention?: boolean | null; /** - * Total Bytes - * @description Total size of download, including all files + * Config Path + * @description Path to config file for model */ - total_bytes: number; + config_path?: string | null; + }; + /** ModelRelationshipBatchRequest */ + ModelRelationshipBatchRequest: { /** - * Parts - * @description Progress of downloading URLs that comprise the model, if any + * Model Keys + * @description List of model keys to fetch related models for */ - parts: { - [key: string]: number | string; - }[]; + model_keys: string[]; }; - /** - * ModelInstallDownloadStartedEvent - * @description Event model for model_install_download_started - */ - ModelInstallDownloadStartedEvent: { + /** ModelRelationshipCreateRequest */ + ModelRelationshipCreateRequest: { /** - * Timestamp - * @description The timestamp of the event + * Model Key 1 + * @description The key of the first model in the relationship */ - timestamp: number; + model_key_1: string; + /** + * Model Key 2 + * @description The key of the second model in the relationship + */ + model_key_2: string; + }; + /** + * ModelRepoVariant + * @description Various hugging face variants on the diffusers format. + * @enum {string} + */ + ModelRepoVariant: "" | "fp16" | "fp32" | "onnx" | "openvino" | "flax"; + /** + * ModelSourceType + * @description Model source type. + * @enum {string} + */ + ModelSourceType: "path" | "url" | "hf_repo_id"; + /** + * ModelType + * @description Model type. + * @enum {string} + */ + ModelType: "onnx" | "main" | "vae" | "lora" | "control_lora" | "controlnet" | "embedding" | "ip_adapter" | "clip_vision" | "clip_embed" | "t2i_adapter" | "t5_encoder" | "spandrel_image_to_image" | "siglip" | "flux_redux" | "llava_onevision" | "unknown"; + /** + * ModelVariantType + * @description Variant type. + * @enum {string} + */ + ModelVariantType: "normal" | "inpaint" | "depth"; + /** + * ModelsList + * @description Return list of configs. + */ + ModelsList: { + /** Models */ + models: (components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Unknown_Config"])[]; + }; + /** + * Multiply Integers + * @description Multiplies two numbers + */ + MultiplyInvocation: { /** * Id - * @description The ID of the install job + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. */ - id: number; + id: string; /** - * Source - * @description Source of the model; local path, repo_id or url + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false */ - source: components["schemas"]["LocalModelSource"] | components["schemas"]["HFModelSource"] | components["schemas"]["URLModelSource"]; + is_intermediate?: boolean; /** - * Local Path - * @description Where model is downloading to + * Use Cache + * @description Whether or not to use the cache + * @default true */ - local_path: string; + use_cache?: boolean; /** - * Bytes - * @description Number of bytes downloaded so far + * A + * @description The first number + * @default 0 */ - bytes: number; + a?: number; /** - * Total Bytes - * @description Total size of download, including all files + * B + * @description The second number + * @default 0 */ - total_bytes: number; + b?: number; /** - * Parts - * @description Progress of downloading URLs that comprise the model, if any + * type + * @default mul + * @constant */ - parts: { - [key: string]: number | string; - }[]; + type: "mul"; }; - /** - * ModelInstallDownloadsCompleteEvent - * @description Emitted once when an install job becomes active. - */ - ModelInstallDownloadsCompleteEvent: { + /** NodeFieldValue */ + NodeFieldValue: { /** - * Timestamp - * @description The timestamp of the event + * Node Path + * @description The node into which this batch data item will be substituted. */ - timestamp: number; + node_path: string; /** - * Id - * @description The ID of the install job + * Field Name + * @description The field into which this batch data item will be substituted. */ - id: number; + field_name: string; /** - * Source - * @description Source of the model; local path, repo_id or url + * Value + * @description The value to substitute into the node/field. */ - source: components["schemas"]["LocalModelSource"] | components["schemas"]["HFModelSource"] | components["schemas"]["URLModelSource"]; + value: string | number | components["schemas"]["ImageField"]; }; /** - * ModelInstallErrorEvent - * @description Event model for model_install_error + * Create Latent Noise + * @description Generates latent noise. */ - ModelInstallErrorEvent: { - /** - * Timestamp - * @description The timestamp of the event - */ - timestamp: number; + NoiseInvocation: { /** * Id - * @description The ID of the install job + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. */ - id: number; + id: string; /** - * Source - * @description Source of the model; local path, repo_id or url + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false */ - source: components["schemas"]["LocalModelSource"] | components["schemas"]["HFModelSource"] | components["schemas"]["URLModelSource"]; + is_intermediate?: boolean; /** - * Error Type - * @description The name of the exception + * Use Cache + * @description Whether or not to use the cache + * @default true */ - error_type: string; + use_cache?: boolean; /** - * Error - * @description A text description of the exception + * Seed + * @description Seed for random number generation + * @default 0 */ - error: string; - }; - /** - * ModelInstallJob - * @description Object that tracks the current status of an install request. - */ - ModelInstallJob: { + seed?: number; /** - * Id - * @description Unique ID for this job + * Width + * @description Width of output (px) + * @default 512 */ - id: number; + width?: number; /** - * @description Current status of install process - * @default waiting - */ - status?: components["schemas"]["InstallStatus"]; + * Height + * @description Height of output (px) + * @default 512 + */ + height?: number; /** - * Error Reason - * @description Information about why the job failed + * Use Cpu + * @description Use CPU for noise generation (for reproducible results across platforms) + * @default true */ - error_reason?: string | null; - /** @description Configuration information (e.g. 'description') to apply to model. */ - config_in?: components["schemas"]["ModelRecordChanges"]; + use_cpu?: boolean; /** - * Config Out - * @description After successful installation, this will hold the configuration object. + * type + * @default noise + * @constant */ - config_out?: (components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Main_ExternalAPI_ChatGPT4o_Config"] | components["schemas"]["Main_ExternalAPI_Gemini2_5_Config"] | components["schemas"]["Main_ExternalAPI_Imagen3_Config"] | components["schemas"]["Main_ExternalAPI_Imagen4_Config"] | components["schemas"]["Main_ExternalAPI_FluxKontext_Config"] | components["schemas"]["Video_ExternalAPI_Veo3_Config"] | components["schemas"]["Video_ExternalAPI_Runway_Config"] | components["schemas"]["Unknown_Config"]) | null; + type: "noise"; + }; + /** + * NoiseOutput + * @description Invocation noise output + */ + NoiseOutput: { + /** @description Noise tensor */ + noise: components["schemas"]["LatentsField"]; /** - * Inplace - * @description Leave model in its current location; otherwise install under models directory - * @default false + * Width + * @description Width of output (px) */ - inplace?: boolean; + width: number; /** - * Source - * @description Source (URL, repo_id, or local path) of model + * Height + * @description Height of output (px) */ - source: components["schemas"]["LocalModelSource"] | components["schemas"]["HFModelSource"] | components["schemas"]["URLModelSource"]; + height: number; /** - * Local Path - * Format: path - * @description Path to locally-downloaded model; may be the same as the source + * type + * @default noise_output + * @constant */ - local_path: string; + type: "noise_output"; + }; + /** + * Normal Map + * @description Generates a normal map. + */ + NormalMapInvocation: { /** - * Bytes - * @description For a remote model, the number of bytes downloaded so far (may not be available) - * @default 0 + * @description The board to save the image to + * @default null */ - bytes?: number; + board?: components["schemas"]["BoardField"] | null; /** - * Total Bytes - * @description Total size of the model to be installed - * @default 0 + * @description Optional metadata to be saved with the image + * @default null */ - total_bytes?: number; + metadata?: components["schemas"]["MetadataField"] | null; /** - * Source Metadata - * @description Metadata provided by the model source + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. */ - source_metadata?: (components["schemas"]["BaseMetadata"] | components["schemas"]["HuggingFaceMetadata"]) | null; + id: string; /** - * Download Parts - * @description Download jobs contributing to this install + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false */ - download_parts?: components["schemas"]["DownloadJob"][]; + is_intermediate?: boolean; /** - * Error - * @description On an error condition, this field will contain the text of the exception + * Use Cache + * @description Whether or not to use the cache + * @default true */ - error?: string | null; + use_cache?: boolean; /** - * Error Traceback - * @description On an error condition, this field will contain the exception traceback + * @description The image to process + * @default null */ - error_traceback?: string | null; + image?: components["schemas"]["ImageField"] | null; + /** + * type + * @default normal_map + * @constant + */ + type: "normal_map"; }; - /** - * ModelInstallStartedEvent - * @description Event model for model_install_started - */ - ModelInstallStartedEvent: { + /** OffsetPaginatedResults[BoardDTO] */ + OffsetPaginatedResults_BoardDTO_: { /** - * Timestamp - * @description The timestamp of the event + * Limit + * @description Limit of items to get */ - timestamp: number; + limit: number; /** - * Id - * @description The ID of the install job + * Offset + * @description Offset from which to retrieve items */ - id: number; + offset: number; /** - * Source - * @description Source of the model; local path, repo_id or url + * Total + * @description Total number of items in result */ - source: components["schemas"]["LocalModelSource"] | components["schemas"]["HFModelSource"] | components["schemas"]["URLModelSource"]; + total: number; + /** + * Items + * @description Items + */ + items: components["schemas"]["BoardDTO"][]; }; - /** - * ModelLoadCompleteEvent - * @description Event model for model_load_complete - */ - ModelLoadCompleteEvent: { + /** OffsetPaginatedResults[ImageDTO] */ + OffsetPaginatedResults_ImageDTO_: { /** - * Timestamp - * @description The timestamp of the event + * Limit + * @description Limit of items to get */ - timestamp: number; + limit: number; /** - * Config - * @description The model's config + * Offset + * @description Offset from which to retrieve items */ - config: components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Main_ExternalAPI_ChatGPT4o_Config"] | components["schemas"]["Main_ExternalAPI_Gemini2_5_Config"] | components["schemas"]["Main_ExternalAPI_Imagen3_Config"] | components["schemas"]["Main_ExternalAPI_Imagen4_Config"] | components["schemas"]["Main_ExternalAPI_FluxKontext_Config"] | components["schemas"]["Video_ExternalAPI_Veo3_Config"] | components["schemas"]["Video_ExternalAPI_Runway_Config"] | components["schemas"]["Unknown_Config"]; + offset: number; /** - * @description The submodel type, if any - * @default null + * Total + * @description Total number of items in result */ - submodel_type: components["schemas"]["SubModelType"] | null; + total: number; + /** + * Items + * @description Items + */ + items: components["schemas"]["ImageDTO"][]; }; /** - * ModelLoadStartedEvent - * @description Event model for model_load_started + * OutputFieldJSONSchemaExtra + * @description Extra attributes to be added to input fields and their OpenAPI schema. Used by the workflow editor + * during schema parsing and UI rendering. */ - ModelLoadStartedEvent: { - /** - * Timestamp - * @description The timestamp of the event - */ - timestamp: number; + OutputFieldJSONSchemaExtra: { + field_kind: components["schemas"]["FieldKind"]; /** - * Config - * @description The model's config + * Ui Hidden + * @default false */ - config: components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Main_ExternalAPI_ChatGPT4o_Config"] | components["schemas"]["Main_ExternalAPI_Gemini2_5_Config"] | components["schemas"]["Main_ExternalAPI_Imagen3_Config"] | components["schemas"]["Main_ExternalAPI_Imagen4_Config"] | components["schemas"]["Main_ExternalAPI_FluxKontext_Config"] | components["schemas"]["Video_ExternalAPI_Veo3_Config"] | components["schemas"]["Video_ExternalAPI_Runway_Config"] | components["schemas"]["Unknown_Config"]; + ui_hidden: boolean; /** - * @description The submodel type, if any + * Ui Order * @default null */ - submodel_type: components["schemas"]["SubModelType"] | null; + ui_order: number | null; + /** @default null */ + ui_type: components["schemas"]["UIType"] | null; }; - /** - * ModelLoaderOutput - * @description Model loader output - */ - ModelLoaderOutput: { + /** PaginatedResults[WorkflowRecordListItemWithThumbnailDTO] */ + PaginatedResults_WorkflowRecordListItemWithThumbnailDTO_: { /** - * VAE - * @description VAE + * Page + * @description Current Page */ - vae: components["schemas"]["VAEField"]; + page: number; /** - * type - * @default model_loader_output - * @constant + * Pages + * @description Total number of pages */ - type: "model_loader_output"; + pages: number; /** - * CLIP - * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count + * Per Page + * @description Number of items per page */ - clip: components["schemas"]["CLIPField"]; + per_page: number; /** - * UNet - * @description UNet (scheduler, LoRAs) + * Total + * @description Total number of items in result */ - unet: components["schemas"]["UNetField"]; + total: number; + /** + * Items + * @description Items + */ + items: components["schemas"]["WorkflowRecordListItemWithThumbnailDTO"][]; }; /** - * ModelRecordChanges - * @description A set of changes to apply to a model. + * Pair Tile with Image + * @description Pair an image with its tile properties. */ - ModelRecordChanges: { + PairTileImageInvocation: { /** - * Source - * @description original source of the model + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. */ - source?: string | null; - /** @description type of model source */ - source_type?: components["schemas"]["ModelSourceType"] | null; + id: string; /** - * Source Api Response - * @description metadata from remote source + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false */ - source_api_response?: string | null; + is_intermediate?: boolean; /** - * Name - * @description Name of the model. + * Use Cache + * @description Whether or not to use the cache + * @default true */ - name?: string | null; + use_cache?: boolean; /** - * Path - * @description Path to the model. + * @description The tile image. + * @default null */ - path?: string | null; + image?: components["schemas"]["ImageField"] | null; /** - * Description - * @description Model description - */ - description?: string | null; - /** @description The base model. */ - base?: components["schemas"]["BaseModelType"] | null; - /** @description Type of model */ - type?: components["schemas"]["ModelType"] | null; - /** - * Key - * @description Database ID for this model + * @description The tile properties. + * @default null */ - key?: string | null; + tile?: components["schemas"]["Tile"] | null; /** - * Hash - * @description hash of model file + * type + * @default pair_tile_image + * @constant */ - hash?: string | null; + type: "pair_tile_image"; + }; + /** PairTileImageOutput */ + PairTileImageOutput: { + /** @description A tile description with its corresponding image. */ + tile_with_image: components["schemas"]["TileWithImage"]; /** - * File Size - * @description Size of model file + * type + * @default pair_tile_image_output + * @constant */ - file_size?: number | null; + type: "pair_tile_image_output"; + }; + /** + * Paste Image into Bounding Box + * @description Paste the source image into the target image at the given bounding box. + * + * The source image must be the same size as the bounding box, and the bounding box must fit within the target image. + */ + PasteImageIntoBoundingBoxInvocation: { /** - * Format - * @description format of model file + * @description The board to save the image to + * @default null */ - format?: string | null; + board?: components["schemas"]["BoardField"] | null; /** - * Trigger Phrases - * @description Set of trigger phrases for this model + * @description Optional metadata to be saved with the image + * @default null */ - trigger_phrases?: string[] | null; + metadata?: components["schemas"]["MetadataField"] | null; /** - * Default Settings - * @description Default settings for this model + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. */ - default_settings?: components["schemas"]["MainModelDefaultSettings"] | components["schemas"]["LoraModelDefaultSettings"] | components["schemas"]["ControlAdapterDefaultSettings"] | null; + id: string; /** - * Variant - * @description The variant of the model. + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false */ - variant?: components["schemas"]["ModelVariantType"] | components["schemas"]["ClipVariantType"] | components["schemas"]["FluxVariantType"] | null; - /** @description The prediction type of the model. */ - prediction_type?: components["schemas"]["SchedulerPredictionType"] | null; + is_intermediate?: boolean; /** - * Upcast Attention - * @description Whether to upcast attention. + * Use Cache + * @description Whether or not to use the cache + * @default true */ - upcast_attention?: boolean | null; + use_cache?: boolean; /** - * Config Path - * @description Path to config file for model + * @description The image to paste + * @default null */ - config_path?: string | null; - }; - /** ModelRelationshipBatchRequest */ - ModelRelationshipBatchRequest: { + source_image?: components["schemas"]["ImageField"] | null; /** - * Model Keys - * @description List of model keys to fetch related models for + * @description The image to paste into + * @default null */ - model_keys: string[]; - }; - /** ModelRelationshipCreateRequest */ - ModelRelationshipCreateRequest: { + target_image?: components["schemas"]["ImageField"] | null; /** - * Model Key 1 - * @description The key of the first model in the relationship + * @description The bounding box to paste the image into + * @default null */ - model_key_1: string; + bounding_box?: components["schemas"]["BoundingBoxField"] | null; /** - * Model Key 2 - * @description The key of the second model in the relationship + * type + * @default paste_image_into_bounding_box + * @constant */ - model_key_2: string; - }; - /** - * ModelRepoVariant - * @description Various hugging face variants on the diffusers format. - * @enum {string} - */ - ModelRepoVariant: "" | "fp16" | "fp32" | "onnx" | "openvino" | "flax"; - /** - * ModelSourceType - * @description Model source type. - * @enum {string} - */ - ModelSourceType: "path" | "url" | "hf_repo_id"; - /** - * ModelType - * @description Model type. - * @enum {string} - */ - ModelType: "onnx" | "main" | "vae" | "lora" | "control_lora" | "controlnet" | "embedding" | "ip_adapter" | "clip_vision" | "clip_embed" | "t2i_adapter" | "t5_encoder" | "spandrel_image_to_image" | "siglip" | "flux_redux" | "llava_onevision" | "video" | "unknown"; - /** - * ModelVariantType - * @description Variant type. - * @enum {string} - */ - ModelVariantType: "normal" | "inpaint" | "depth"; - /** - * ModelsList - * @description Return list of configs. - */ - ModelsList: { - /** Models */ - models: (components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Main_ExternalAPI_ChatGPT4o_Config"] | components["schemas"]["Main_ExternalAPI_Gemini2_5_Config"] | components["schemas"]["Main_ExternalAPI_Imagen3_Config"] | components["schemas"]["Main_ExternalAPI_Imagen4_Config"] | components["schemas"]["Main_ExternalAPI_FluxKontext_Config"] | components["schemas"]["Video_ExternalAPI_Veo3_Config"] | components["schemas"]["Video_ExternalAPI_Runway_Config"] | components["schemas"]["Unknown_Config"])[]; + type: "paste_image_into_bounding_box"; }; /** - * Multiply Integers - * @description Multiplies two numbers + * PiDiNet Edge Detection + * @description Generates an edge map using PiDiNet. */ - MultiplyInvocation: { + PiDiNetEdgeDetectionInvocation: { + /** + * @description The board to save the image to + * @default null + */ + board?: components["schemas"]["BoardField"] | null; + /** + * @description Optional metadata to be saved with the image + * @default null + */ + metadata?: components["schemas"]["MetadataField"] | null; /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -19777,47 +19263,73 @@ export type components = { */ use_cache?: boolean; /** - * A - * @description The first number - * @default 0 + * @description The image to process + * @default null */ - a?: number; + image?: components["schemas"]["ImageField"] | null; /** - * B - * @description The second number - * @default 0 + * Quantize Edges + * @description Whether or not to use safe mode + * @default false */ - b?: number; + quantize_edges?: boolean; + /** + * Scribble + * @description Whether or not to use scribble mode + * @default false + */ + scribble?: boolean; /** * type - * @default mul + * @default pidi_edge_detection * @constant */ - type: "mul"; + type: "pidi_edge_detection"; }; - /** NodeFieldValue */ - NodeFieldValue: { + /** PresetData */ + PresetData: { /** - * Node Path - * @description The node into which this batch data item will be substituted. + * Positive Prompt + * @description Positive prompt */ - node_path: string; + positive_prompt: string; /** - * Field Name - * @description The field into which this batch data item will be substituted. + * Negative Prompt + * @description Negative prompt */ - field_name: string; + negative_prompt: string; + }; + /** + * PresetType + * @enum {string} + */ + PresetType: "user" | "default" | "project"; + /** + * ProgressImage + * @description The progress image sent intermittently during processing + */ + ProgressImage: { /** - * Value - * @description The value to substitute into the node/field. + * Width + * @description The effective width of the image in pixels */ - value: string | number | components["schemas"]["ImageField"]; + width: number; + /** + * Height + * @description The effective height of the image in pixels + */ + height: number; + /** + * Dataurl + * @description The image data as a b64 data URL + */ + dataURL: string; }; /** - * Create Latent Noise - * @description Generates latent noise. + * Prompts from File + * @description Loads prompts from a text file */ - NoiseInvocation: { + PromptsFromFileInvocation: { /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -19836,226 +19348,194 @@ export type components = { */ use_cache?: boolean; /** - * Seed - * @description Seed for random number generation - * @default 0 + * File Path + * @description Path to prompt text file + * @default null */ - seed?: number; + file_path?: string | null; /** - * Width - * @description Width of output (px) - * @default 512 + * Pre Prompt + * @description String to prepend to each prompt + * @default null */ - width?: number; + pre_prompt?: string | null; /** - * Height - * @description Height of output (px) - * @default 512 + * Post Prompt + * @description String to append to each prompt + * @default null */ - height?: number; + post_prompt?: string | null; /** - * Use Cpu - * @description Use CPU for noise generation (for reproducible results across platforms) - * @default true - */ - use_cpu?: boolean; + * Start Line + * @description Line in the file to start start from + * @default 1 + */ + start_line?: number; + /** + * Max Prompts + * @description Max lines to read from file (0=all) + * @default 1 + */ + max_prompts?: number; /** * type - * @default noise + * @default prompt_from_file * @constant */ - type: "noise"; + type: "prompt_from_file"; }; /** - * NoiseOutput - * @description Invocation noise output + * PruneResult + * @description Result of pruning the session queue */ - NoiseOutput: { - /** @description Noise tensor */ - noise: components["schemas"]["LatentsField"]; + PruneResult: { /** - * Width - * @description Width of output (px) + * Deleted + * @description Number of queue items deleted */ - width: number; + deleted: number; + }; + /** + * QueueClearedEvent + * @description Event model for queue_cleared + */ + QueueClearedEvent: { /** - * Height - * @description Height of output (px) + * Timestamp + * @description The timestamp of the event */ - height: number; + timestamp: number; /** - * type - * @default noise_output - * @constant + * Queue Id + * @description The ID of the queue */ - type: "noise_output"; + queue_id: string; }; /** - * Normal Map - * @description Generates a normal map. + * QueueItemStatusChangedEvent + * @description Event model for queue_item_status_changed */ - NormalMapInvocation: { - /** - * @description The board to save the image to - * @default null - */ - board?: components["schemas"]["BoardField"] | null; + QueueItemStatusChangedEvent: { /** - * @description Optional metadata to be saved with the image - * @default null + * Timestamp + * @description The timestamp of the event */ - metadata?: components["schemas"]["MetadataField"] | null; + timestamp: number; /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + * Queue Id + * @description The ID of the queue */ - id: string; + queue_id: string; /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false + * Item Id + * @description The ID of the queue item */ - is_intermediate?: boolean; + item_id: number; /** - * Use Cache - * @description Whether or not to use the cache - * @default true + * Batch Id + * @description The ID of the queue batch */ - use_cache?: boolean; + batch_id: string; /** - * @description The image to process + * Origin + * @description The origin of the queue item * @default null */ - image?: components["schemas"]["ImageField"] | null; - /** - * type - * @default normal_map - * @constant - */ - type: "normal_map"; - }; - /** OffsetPaginatedResults[BoardDTO] */ - OffsetPaginatedResults_BoardDTO_: { - /** - * Limit - * @description Limit of items to get - */ - limit: number; + origin: string | null; /** - * Offset - * @description Offset from which to retrieve items + * Destination + * @description The destination of the queue item + * @default null */ - offset: number; + destination: string | null; /** - * Total - * @description Total number of items in result + * Status + * @description The new status of the queue item + * @enum {string} */ - total: number; + status: "pending" | "in_progress" | "completed" | "failed" | "canceled"; /** - * Items - * @description Items + * Error Type + * @description The error type, if any + * @default null */ - items: components["schemas"]["BoardDTO"][]; - }; - /** OffsetPaginatedResults[ImageDTO] */ - OffsetPaginatedResults_ImageDTO_: { + error_type: string | null; /** - * Limit - * @description Limit of items to get + * Error Message + * @description The error message, if any + * @default null */ - limit: number; + error_message: string | null; /** - * Offset - * @description Offset from which to retrieve items + * Error Traceback + * @description The error traceback, if any + * @default null */ - offset: number; + error_traceback: string | null; /** - * Total - * @description Total number of items in result + * Created At + * @description The timestamp when the queue item was created */ - total: number; + created_at: string; /** - * Items - * @description Items + * Updated At + * @description The timestamp when the queue item was last updated */ - items: components["schemas"]["ImageDTO"][]; - }; - /** OffsetPaginatedResults[VideoDTO] */ - OffsetPaginatedResults_VideoDTO_: { + updated_at: string; /** - * Limit - * @description Limit of items to get + * Started At + * @description The timestamp when the queue item was started + * @default null */ - limit: number; + started_at: string | null; /** - * Offset - * @description Offset from which to retrieve items + * Completed At + * @description The timestamp when the queue item was completed + * @default null */ - offset: number; + completed_at: string | null; + /** @description The status of the batch */ + batch_status: components["schemas"]["BatchStatus"]; + /** @description The status of the queue */ + queue_status: components["schemas"]["SessionQueueStatus"]; /** - * Total - * @description Total number of items in result + * Session Id + * @description The ID of the session (aka graph execution state) */ - total: number; + session_id: string; /** - * Items - * @description Items + * Credits + * @description The total credits used for this queue item + * @default null */ - items: components["schemas"]["VideoDTO"][]; + credits: number | null; }; /** - * OutputFieldJSONSchemaExtra - * @description Extra attributes to be added to input fields and their OpenAPI schema. Used by the workflow editor - * during schema parsing and UI rendering. + * QueueItemsRetriedEvent + * @description Event model for queue_items_retried */ - OutputFieldJSONSchemaExtra: { - field_kind: components["schemas"]["FieldKind"]; - /** - * Ui Hidden - * @default false - */ - ui_hidden: boolean; - /** - * Ui Order - * @default null - */ - ui_order: number | null; - /** @default null */ - ui_type: components["schemas"]["UIType"] | null; - }; - /** PaginatedResults[WorkflowRecordListItemWithThumbnailDTO] */ - PaginatedResults_WorkflowRecordListItemWithThumbnailDTO_: { - /** - * Page - * @description Current Page - */ - page: number; - /** - * Pages - * @description Total number of pages - */ - pages: number; + QueueItemsRetriedEvent: { /** - * Per Page - * @description Number of items per page + * Timestamp + * @description The timestamp of the event */ - per_page: number; + timestamp: number; /** - * Total - * @description Total number of items in result + * Queue Id + * @description The ID of the queue */ - total: number; + queue_id: string; /** - * Items - * @description Items + * Retried Item Ids + * @description The IDs of the queue items that were retried */ - items: components["schemas"]["WorkflowRecordListItemWithThumbnailDTO"][]; + retried_item_ids: number[]; }; /** - * Pair Tile with Image - * @description Pair an image with its tile properties. + * Random Float + * @description Outputs a single random float */ - PairTileImageInvocation: { + RandomFloatInvocation: { /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -20074,50 +19554,35 @@ export type components = { */ use_cache?: boolean; /** - * @description The tile image. - * @default null + * Low + * @description The inclusive low value + * @default 0 */ - image?: components["schemas"]["ImageField"] | null; + low?: number; /** - * @description The tile properties. - * @default null + * High + * @description The exclusive high value + * @default 1 */ - tile?: components["schemas"]["Tile"] | null; + high?: number; /** - * type - * @default pair_tile_image - * @constant + * Decimals + * @description The number of decimal places to round to + * @default 2 */ - type: "pair_tile_image"; - }; - /** PairTileImageOutput */ - PairTileImageOutput: { - /** @description A tile description with its corresponding image. */ - tile_with_image: components["schemas"]["TileWithImage"]; + decimals?: number; /** * type - * @default pair_tile_image_output + * @default rand_float * @constant */ - type: "pair_tile_image_output"; + type: "rand_float"; }; /** - * Paste Image into Bounding Box - * @description Paste the source image into the target image at the given bounding box. - * - * The source image must be the same size as the bounding box, and the bounding box must fit within the target image. + * Random Integer + * @description Outputs a single random integer. */ - PasteImageIntoBoundingBoxInvocation: { - /** - * @description The board to save the image to - * @default null - */ - board?: components["schemas"]["BoardField"] | null; - /** - * @description Optional metadata to be saved with the image - * @default null - */ - metadata?: components["schemas"]["MetadataField"] | null; + RandomIntInvocation: { /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -20136,42 +19601,29 @@ export type components = { */ use_cache?: boolean; /** - * @description The image to paste - * @default null - */ - source_image?: components["schemas"]["ImageField"] | null; - /** - * @description The image to paste into - * @default null + * Low + * @description The inclusive low value + * @default 0 */ - target_image?: components["schemas"]["ImageField"] | null; + low?: number; /** - * @description The bounding box to paste the image into - * @default null + * High + * @description The exclusive high value + * @default 2147483647 */ - bounding_box?: components["schemas"]["BoundingBoxField"] | null; + high?: number; /** * type - * @default paste_image_into_bounding_box + * @default rand_int * @constant */ - type: "paste_image_into_bounding_box"; + type: "rand_int"; }; /** - * PiDiNet Edge Detection - * @description Generates an edge map using PiDiNet. + * Random Range + * @description Creates a collection of random numbers */ - PiDiNetEdgeDetectionInvocation: { - /** - * @description The board to save the image to - * @default null - */ - board?: components["schemas"]["BoardField"] | null; - /** - * @description Optional metadata to be saved with the image - * @default null - */ - metadata?: components["schemas"]["MetadataField"] | null; + RandomRangeInvocation: { /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -20190,73 +19642,88 @@ export type components = { */ use_cache?: boolean; /** - * @description The image to process - * @default null + * Low + * @description The inclusive low value + * @default 0 */ - image?: components["schemas"]["ImageField"] | null; + low?: number; /** - * Quantize Edges - * @description Whether or not to use safe mode - * @default false + * High + * @description The exclusive high value + * @default 2147483647 */ - quantize_edges?: boolean; + high?: number; /** - * Scribble - * @description Whether or not to use scribble mode - * @default false + * Size + * @description The number of values to generate + * @default 1 */ - scribble?: boolean; + size?: number; + /** + * Seed + * @description The seed for the RNG (omit for random) + * @default 0 + */ + seed?: number; /** * type - * @default pidi_edge_detection + * @default random_range * @constant */ - type: "pidi_edge_detection"; + type: "random_range"; }; - /** PresetData */ - PresetData: { + /** + * Integer Range + * @description Creates a range of numbers from start to stop with step + */ + RangeInvocation: { /** - * Positive Prompt - * @description Positive prompt + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. */ - positive_prompt: string; + id: string; /** - * Negative Prompt - * @description Negative prompt + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false */ - negative_prompt: string; - }; - /** - * PresetType - * @enum {string} - */ - PresetType: "user" | "default" | "project"; - /** - * ProgressImage - * @description The progress image sent intermittently during processing - */ - ProgressImage: { + is_intermediate?: boolean; /** - * Width - * @description The effective width of the image in pixels + * Use Cache + * @description Whether or not to use the cache + * @default true */ - width: number; + use_cache?: boolean; /** - * Height - * @description The effective height of the image in pixels + * Start + * @description The start of the range + * @default 0 */ - height: number; + start?: number; /** - * Dataurl - * @description The image data as a b64 data URL + * Stop + * @description The stop of the range + * @default 10 */ - dataURL: string; + stop?: number; + /** + * Step + * @description The step of the range + * @default 1 + */ + step?: number; + /** + * type + * @default range + * @constant + */ + type: "range"; }; /** - * Prompts from File - * @description Loads prompts from a text file + * Integer Range of Size + * @description Creates a range from start to start + (size * step) incremented by step */ - PromptsFromFileInvocation: { + RangeOfSizeInvocation: { /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -20275,194 +19742,147 @@ export type components = { */ use_cache?: boolean; /** - * File Path - * @description Path to prompt text file - * @default null - */ - file_path?: string | null; - /** - * Pre Prompt - * @description String to prepend to each prompt - * @default null - */ - pre_prompt?: string | null; - /** - * Post Prompt - * @description String to append to each prompt - * @default null + * Start + * @description The start of the range + * @default 0 */ - post_prompt?: string | null; + start?: number; /** - * Start Line - * @description Line in the file to start start from + * Size + * @description The number of values * @default 1 */ - start_line?: number; + size?: number; /** - * Max Prompts - * @description Max lines to read from file (0=all) + * Step + * @description The step of the range * @default 1 */ - max_prompts?: number; + step?: number; /** * type - * @default prompt_from_file + * @default range_of_size * @constant */ - type: "prompt_from_file"; + type: "range_of_size"; }; /** - * PruneResult - * @description Result of pruning the session queue + * Create Rectangle Mask + * @description Create a rectangular mask. */ - PruneResult: { + RectangleMaskInvocation: { /** - * Deleted - * @description Number of queue items deleted + * @description Optional metadata to be saved with the image + * @default null */ - deleted: number; - }; - /** - * QueueClearedEvent - * @description Event model for queue_cleared - */ - QueueClearedEvent: { + metadata?: components["schemas"]["MetadataField"] | null; /** - * Timestamp - * @description The timestamp of the event + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. */ - timestamp: number; + id: string; /** - * Queue Id - * @description The ID of the queue + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false */ - queue_id: string; - }; - /** - * QueueItemStatusChangedEvent - * @description Event model for queue_item_status_changed - */ - QueueItemStatusChangedEvent: { + is_intermediate?: boolean; /** - * Timestamp - * @description The timestamp of the event + * Use Cache + * @description Whether or not to use the cache + * @default true */ - timestamp: number; - /** - * Queue Id - * @description The ID of the queue - */ - queue_id: string; - /** - * Item Id - * @description The ID of the queue item - */ - item_id: number; - /** - * Batch Id - * @description The ID of the queue batch - */ - batch_id: string; + use_cache?: boolean; /** - * Origin - * @description The origin of the queue item + * Width + * @description The width of the entire mask. * @default null */ - origin: string | null; + width?: number | null; /** - * Destination - * @description The destination of the queue item + * Height + * @description The height of the entire mask. * @default null */ - destination: string | null; - /** - * Status - * @description The new status of the queue item - * @enum {string} - */ - status: "pending" | "in_progress" | "completed" | "failed" | "canceled"; + height?: number | null; /** - * Error Type - * @description The error type, if any + * X Left + * @description The left x-coordinate of the rectangular masked region (inclusive). * @default null */ - error_type: string | null; + x_left?: number | null; /** - * Error Message - * @description The error message, if any + * Y Top + * @description The top y-coordinate of the rectangular masked region (inclusive). * @default null */ - error_message: string | null; + y_top?: number | null; /** - * Error Traceback - * @description The error traceback, if any + * Rectangle Width + * @description The width of the rectangular masked region. * @default null */ - error_traceback: string | null; + rectangle_width?: number | null; /** - * Created At - * @description The timestamp when the queue item was created + * Rectangle Height + * @description The height of the rectangular masked region. + * @default null */ - created_at: string; + rectangle_height?: number | null; /** - * Updated At - * @description The timestamp when the queue item was last updated + * type + * @default rectangle_mask + * @constant */ - updated_at: string; + type: "rectangle_mask"; + }; + /** + * RemoteModelFile + * @description Information about a downloadable file that forms part of a model. + */ + RemoteModelFile: { /** - * Started At - * @description The timestamp when the queue item was started - * @default null + * Url + * Format: uri + * @description The url to download this model file */ - started_at: string | null; + url: string; /** - * Completed At - * @description The timestamp when the queue item was completed - * @default null + * Path + * Format: path + * @description The path to the file, relative to the model root */ - completed_at: string | null; - /** @description The status of the batch */ - batch_status: components["schemas"]["BatchStatus"]; - /** @description The status of the queue */ - queue_status: components["schemas"]["SessionQueueStatus"]; + path: string; /** - * Session Id - * @description The ID of the session (aka graph execution state) + * Size + * @description The size of this file, in bytes + * @default 0 */ - session_id: string; + size?: number | null; /** - * Credits - * @description The total credits used for this queue item - * @default null + * Sha256 + * @description SHA256 hash of this model (not always available) */ - credits: number | null; + sha256?: string | null; }; - /** - * QueueItemsRetriedEvent - * @description Event model for queue_items_retried - */ - QueueItemsRetriedEvent: { - /** - * Timestamp - * @description The timestamp of the event - */ - timestamp: number; + /** RemoveImagesFromBoardResult */ + RemoveImagesFromBoardResult: { /** - * Queue Id - * @description The ID of the queue + * Affected Boards + * @description The ids of boards affected by the delete operation */ - queue_id: string; + affected_boards: string[]; /** - * Retried Item Ids - * @description The IDs of the queue items that were retried + * Removed Images + * @description The image names that were removed from their board */ - retried_item_ids: number[]; + removed_images: string[]; }; /** - * Random Float - * @description Outputs a single random float + * Resize Latents + * @description Resizes latents to explicit width/height (in pixels). Provided dimensions are floor-divided by 8. */ - RandomFloatInvocation: { + ResizeLatentsInvocation: { /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -20477,80 +19897,74 @@ export type components = { /** * Use Cache * @description Whether or not to use the cache - * @default false + * @default true */ use_cache?: boolean; /** - * Low - * @description The inclusive low value - * @default 0 - */ - low?: number; - /** - * High - * @description The exclusive high value - * @default 1 - */ - high?: number; - /** - * Decimals - * @description The number of decimal places to round to - * @default 2 + * @description Latents tensor + * @default null */ - decimals?: number; + latents?: components["schemas"]["LatentsField"] | null; /** - * type - * @default rand_float - * @constant + * Width + * @description Width of output (px) + * @default null */ - type: "rand_float"; - }; - /** - * Random Integer - * @description Outputs a single random integer. - */ - RandomIntInvocation: { + width?: number | null; /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + * Height + * @description Width of output (px) + * @default null */ - id: string; + height?: number | null; /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false + * Mode + * @description Interpolation mode + * @default bilinear + * @enum {string} */ - is_intermediate?: boolean; + mode?: "nearest" | "linear" | "bilinear" | "bicubic" | "trilinear" | "area" | "nearest-exact"; /** - * Use Cache - * @description Whether or not to use the cache + * Antialias + * @description Whether or not to apply antialiasing (bilinear or bicubic only) * @default false */ - use_cache?: boolean; + antialias?: boolean; /** - * Low - * @description The inclusive low value - * @default 0 + * type + * @default lresize + * @constant */ - low?: number; + type: "lresize"; + }; + /** + * ResourceOrigin + * @description The origin of a resource (eg image). + * + * - INTERNAL: The resource was created by the application. + * - EXTERNAL: The resource was not created by the application. + * This may be a user-initiated upload, or an internal application upload (eg Canvas init image). + * @enum {string} + */ + ResourceOrigin: "internal" | "external"; + /** RetryItemsResult */ + RetryItemsResult: { /** - * High - * @description The exclusive high value - * @default 2147483647 + * Queue Id + * @description The ID of the queue */ - high?: number; + queue_id: string; /** - * type - * @default rand_int - * @constant + * Retried Item Ids + * @description The IDs of the queue items that were retried */ - type: "rand_int"; + retried_item_ids: number[]; }; /** - * Random Range - * @description Creates a collection of random numbers + * Round Float + * @description Rounds a float to a specified number of decimal places. */ - RandomRangeInvocation: { + RoundInvocation: { /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -20565,45 +19979,96 @@ export type components = { /** * Use Cache * @description Whether or not to use the cache - * @default false + * @default true */ use_cache?: boolean; /** - * Low - * @description The inclusive low value + * Value + * @description The float value * @default 0 */ - low?: number; + value?: number; /** - * High - * @description The exclusive high value - * @default 2147483647 + * Decimals + * @description The number of decimal places + * @default 0 */ - high?: number; + decimals?: number; /** - * Size - * @description The number of values to generate - * @default 1 + * type + * @default round_float + * @constant */ - size?: number; + type: "round_float"; + }; + /** SAMPoint */ + SAMPoint: { /** - * Seed - * @description The seed for the RNG (omit for random) - * @default 0 + * X + * @description The x-coordinate of the point */ - seed?: number; + x: number; + /** + * Y + * @description The y-coordinate of the point + */ + y: number; + /** @description The label of the point */ + label: components["schemas"]["SAMPointLabel"]; + }; + /** + * SAMPointLabel + * @enum {integer} + */ + SAMPointLabel: -1 | 0 | 1; + /** SAMPointsField */ + SAMPointsField: { + /** + * Points + * @description The points of the object + */ + points: components["schemas"]["SAMPoint"][]; + }; + /** + * SD3ConditioningField + * @description A conditioning tensor primitive value + */ + SD3ConditioningField: { + /** + * Conditioning Name + * @description The name of conditioning tensor + */ + conditioning_name: string; + }; + /** + * SD3ConditioningOutput + * @description Base class for nodes that output a single SD3 conditioning tensor + */ + SD3ConditioningOutput: { + /** @description Conditioning tensor */ + conditioning: components["schemas"]["SD3ConditioningField"]; /** * type - * @default random_range + * @default sd3_conditioning_output * @constant */ - type: "random_range"; + type: "sd3_conditioning_output"; }; /** - * Integer Range - * @description Creates a range of numbers from start to stop with step + * Denoise - SD3 + * @description Run denoising process with a SD3 model. */ - RangeInvocation: { + SD3DenoiseInvocation: { + /** + * @description The board to save the image to + * @default null + */ + board?: components["schemas"]["BoardField"] | null; + /** + * @description Optional metadata to be saved with the image + * @default null + */ + metadata?: components["schemas"]["MetadataField"] | null; /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -20622,35 +20087,95 @@ export type components = { */ use_cache?: boolean; /** - * Start - * @description The start of the range + * @description Latents tensor + * @default null + */ + latents?: components["schemas"]["LatentsField"] | null; + /** + * @description A mask of the region to apply the denoising process to. Values of 0.0 represent the regions to be fully denoised, and 1.0 represent the regions to be preserved. + * @default null + */ + denoise_mask?: components["schemas"]["DenoiseMaskField"] | null; + /** + * Denoising Start + * @description When to start denoising, expressed a percentage of total steps * @default 0 */ - start?: number; + denoising_start?: number; /** - * Stop - * @description The stop of the range + * Denoising End + * @description When to stop denoising, expressed a percentage of total steps + * @default 1 + */ + denoising_end?: number; + /** + * Transformer + * @description SD3 model (MMDiTX) to load + * @default null + */ + transformer?: components["schemas"]["TransformerField"] | null; + /** + * @description Positive conditioning tensor + * @default null + */ + positive_conditioning?: components["schemas"]["SD3ConditioningField"] | null; + /** + * @description Negative conditioning tensor + * @default null + */ + negative_conditioning?: components["schemas"]["SD3ConditioningField"] | null; + /** + * CFG Scale + * @description Classifier-Free Guidance scale + * @default 3.5 + */ + cfg_scale?: number | number[]; + /** + * Width + * @description Width of the generated image. + * @default 1024 + */ + width?: number; + /** + * Height + * @description Height of the generated image. + * @default 1024 + */ + height?: number; + /** + * Steps + * @description Number of steps to run * @default 10 */ - stop?: number; + steps?: number; /** - * Step - * @description The step of the range - * @default 1 + * Seed + * @description Randomness seed for reproducibility. + * @default 0 */ - step?: number; + seed?: number; /** * type - * @default range + * @default sd3_denoise * @constant */ - type: "range"; + type: "sd3_denoise"; }; /** - * Integer Range of Size - * @description Creates a range from start to start + (size * step) incremented by step + * Image to Latents - SD3 + * @description Generates latents from an image. */ - RangeOfSizeInvocation: { + SD3ImageToLatentsInvocation: { + /** + * @description The board to save the image to + * @default null + */ + board?: components["schemas"]["BoardField"] | null; + /** + * @description Optional metadata to be saved with the image + * @default null + */ + metadata?: components["schemas"]["MetadataField"] | null; /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -20669,35 +20194,32 @@ export type components = { */ use_cache?: boolean; /** - * Start - * @description The start of the range - * @default 0 - */ - start?: number; - /** - * Size - * @description The number of values - * @default 1 + * @description The image to encode + * @default null */ - size?: number; + image?: components["schemas"]["ImageField"] | null; /** - * Step - * @description The step of the range - * @default 1 + * @description VAE + * @default null */ - step?: number; + vae?: components["schemas"]["VAEField"] | null; /** * type - * @default range_of_size + * @default sd3_i2l * @constant */ - type: "range_of_size"; + type: "sd3_i2l"; }; /** - * Create Rectangle Mask - * @description Create a rectangular mask. + * Latents to Image - SD3 + * @description Generates an image from latents. */ - RectangleMaskInvocation: { + SD3LatentsToImageInvocation: { + /** + * @description The board to save the image to + * @default null + */ + board?: components["schemas"]["BoardField"] | null; /** * @description Optional metadata to be saved with the image * @default null @@ -20721,190 +20243,115 @@ export type components = { */ use_cache?: boolean; /** - * Width - * @description The width of the entire mask. - * @default null - */ - width?: number | null; - /** - * Height - * @description The height of the entire mask. - * @default null - */ - height?: number | null; - /** - * X Left - * @description The left x-coordinate of the rectangular masked region (inclusive). - * @default null - */ - x_left?: number | null; - /** - * Y Top - * @description The top y-coordinate of the rectangular masked region (inclusive). - * @default null - */ - y_top?: number | null; - /** - * Rectangle Width - * @description The width of the rectangular masked region. + * @description Latents tensor * @default null */ - rectangle_width?: number | null; + latents?: components["schemas"]["LatentsField"] | null; /** - * Rectangle Height - * @description The height of the rectangular masked region. + * @description VAE * @default null */ - rectangle_height?: number | null; + vae?: components["schemas"]["VAEField"] | null; /** * type - * @default rectangle_mask + * @default sd3_l2i * @constant */ - type: "rectangle_mask"; + type: "sd3_l2i"; }; /** - * RemoteModelFile - * @description Information about a downloadable file that forms part of a model. + * Prompt - SDXL + * @description Parse prompt using compel package to conditioning. */ - RemoteModelFile: { + SDXLCompelPromptInvocation: { /** - * Url - * Format: uri - * @description The url to download this model file + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. */ - url: string; + id: string; /** - * Path - * Format: path - * @description The path to the file, relative to the model root + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false */ - path: string; + is_intermediate?: boolean; /** - * Size - * @description The size of this file, in bytes - * @default 0 + * Use Cache + * @description Whether or not to use the cache + * @default true */ - size?: number | null; + use_cache?: boolean; /** - * Sha256 - * @description SHA256 hash of this model (not always available) + * Prompt + * @description Prompt to be parsed by Compel to create a conditioning tensor + * @default */ - sha256?: string | null; - }; - /** RemoveImagesFromBoardResult */ - RemoveImagesFromBoardResult: { + prompt?: string; /** - * Affected Boards - * @description The ids of boards affected by the delete operation + * Style + * @description Prompt to be parsed by Compel to create a conditioning tensor + * @default */ - affected_boards: string[]; + style?: string; /** - * Removed Images - * @description The image names that were removed from their board + * Original Width + * @default 1024 */ - removed_images: string[]; - }; - /** RemoveVideosFromBoardResult */ - RemoveVideosFromBoardResult: { + original_width?: number; /** - * Affected Boards - * @description The ids of boards affected by the delete operation + * Original Height + * @default 1024 */ - affected_boards: string[]; + original_height?: number; /** - * Removed Videos - * @description The video ids that were removed from their board + * Crop Top + * @default 0 */ - removed_videos: string[]; - }; - /** - * Resize Latents - * @description Resizes latents to explicit width/height (in pixels). Provided dimensions are floor-divided by 8. - */ - ResizeLatentsInvocation: { + crop_top?: number; /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + * Crop Left + * @default 0 */ - id: string; + crop_left?: number; /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false + * Target Width + * @default 1024 */ - is_intermediate?: boolean; + target_width?: number; /** - * Use Cache - * @description Whether or not to use the cache - * @default true + * Target Height + * @default 1024 */ - use_cache?: boolean; + target_height?: number; /** - * @description Latents tensor + * CLIP 1 + * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count * @default null */ - latents?: components["schemas"]["LatentsField"] | null; + clip?: components["schemas"]["CLIPField"] | null; /** - * Width - * @description Width of output (px) + * CLIP 2 + * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count * @default null */ - width?: number | null; + clip2?: components["schemas"]["CLIPField"] | null; /** - * Height - * @description Width of output (px) + * @description A mask defining the region that this conditioning prompt applies to. * @default null */ - height?: number | null; - /** - * Mode - * @description Interpolation mode - * @default bilinear - * @enum {string} - */ - mode?: "nearest" | "linear" | "bilinear" | "bicubic" | "trilinear" | "area" | "nearest-exact"; - /** - * Antialias - * @description Whether or not to apply antialiasing (bilinear or bicubic only) - * @default false - */ - antialias?: boolean; + mask?: components["schemas"]["TensorField"] | null; /** * type - * @default lresize + * @default sdxl_compel_prompt * @constant */ - type: "lresize"; - }; - /** - * ResourceOrigin - * @description The origin of a resource (eg image). - * - * - INTERNAL: The resource was created by the application. - * - EXTERNAL: The resource was not created by the application. - * This may be a user-initiated upload, or an internal application upload (eg Canvas init image). - * @enum {string} - */ - ResourceOrigin: "internal" | "external"; - /** RetryItemsResult */ - RetryItemsResult: { - /** - * Queue Id - * @description The ID of the queue - */ - queue_id: string; - /** - * Retried Item Ids - * @description The IDs of the queue items that were retried - */ - retried_item_ids: number[]; + type: "sdxl_compel_prompt"; }; /** - * Round Float - * @description Rounds a float to a specified number of decimal places. + * Apply LoRA Collection - SDXL + * @description Applies a collection of SDXL LoRAs to the provided UNet and CLIP models. */ - RoundInvocation: { + SDXLLoRACollectionLoader: { /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -20923,92 +20370,41 @@ export type components = { */ use_cache?: boolean; /** - * Value - * @description The float value - * @default 0 - */ - value?: number; - /** - * Decimals - * @description The number of decimal places - * @default 0 - */ - decimals?: number; - /** - * type - * @default round_float - * @constant - */ - type: "round_float"; - }; - /** SAMPoint */ - SAMPoint: { - /** - * X - * @description The x-coordinate of the point + * LoRAs + * @description LoRA models and weights. May be a single LoRA or collection. + * @default null */ - x: number; + loras?: components["schemas"]["LoRAField"] | components["schemas"]["LoRAField"][] | null; /** - * Y - * @description The y-coordinate of the point + * UNet + * @description UNet (scheduler, LoRAs) + * @default null */ - y: number; - /** @description The label of the point */ - label: components["schemas"]["SAMPointLabel"]; - }; - /** - * SAMPointLabel - * @enum {integer} - */ - SAMPointLabel: -1 | 0 | 1; - /** SAMPointsField */ - SAMPointsField: { + unet?: components["schemas"]["UNetField"] | null; /** - * Points - * @description The points of the object + * CLIP + * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count + * @default null */ - points: components["schemas"]["SAMPoint"][]; - }; - /** - * SD3ConditioningField - * @description A conditioning tensor primitive value - */ - SD3ConditioningField: { + clip?: components["schemas"]["CLIPField"] | null; /** - * Conditioning Name - * @description The name of conditioning tensor + * CLIP 2 + * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count + * @default null */ - conditioning_name: string; - }; - /** - * SD3ConditioningOutput - * @description Base class for nodes that output a single SD3 conditioning tensor - */ - SD3ConditioningOutput: { - /** @description Conditioning tensor */ - conditioning: components["schemas"]["SD3ConditioningField"]; + clip2?: components["schemas"]["CLIPField"] | null; /** * type - * @default sd3_conditioning_output + * @default sdxl_lora_collection_loader * @constant */ - type: "sd3_conditioning_output"; + type: "sdxl_lora_collection_loader"; }; /** - * Denoise - SD3 - * @description Run denoising process with a SD3 model. + * Apply LoRA - SDXL + * @description Apply selected lora to unet and text_encoder. */ - SD3DenoiseInvocation: { - /** - * @description The board to save the image to - * @default null - */ - board?: components["schemas"]["BoardField"] | null; - /** - * @description Optional metadata to be saved with the image - * @default null - */ - metadata?: components["schemas"]["MetadataField"] | null; + SDXLLoRALoaderInvocation: { /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -21027,95 +20423,77 @@ export type components = { */ use_cache?: boolean; /** - * @description Latents tensor - * @default null - */ - latents?: components["schemas"]["LatentsField"] | null; - /** - * @description A mask of the region to apply the denoising process to. Values of 0.0 represent the regions to be fully denoised, and 1.0 represent the regions to be preserved. + * LoRA + * @description LoRA model to load * @default null */ - denoise_mask?: components["schemas"]["DenoiseMaskField"] | null; + lora?: components["schemas"]["ModelIdentifierField"] | null; /** - * Denoising Start - * @description When to start denoising, expressed a percentage of total steps - * @default 0 + * Weight + * @description The weight at which the LoRA is applied to each model + * @default 0.75 */ - denoising_start?: number; + weight?: number; /** - * Denoising End - * @description When to stop denoising, expressed a percentage of total steps - * @default 1 + * UNet + * @description UNet (scheduler, LoRAs) + * @default null */ - denoising_end?: number; + unet?: components["schemas"]["UNetField"] | null; /** - * Transformer - * @description SD3 model (MMDiTX) to load + * CLIP 1 + * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count * @default null */ - transformer?: components["schemas"]["TransformerField"] | null; + clip?: components["schemas"]["CLIPField"] | null; /** - * @description Positive conditioning tensor + * CLIP 2 + * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count * @default null */ - positive_conditioning?: components["schemas"]["SD3ConditioningField"] | null; - /** - * @description Negative conditioning tensor - * @default null - */ - negative_conditioning?: components["schemas"]["SD3ConditioningField"] | null; - /** - * CFG Scale - * @description Classifier-Free Guidance scale - * @default 3.5 - */ - cfg_scale?: number | number[]; + clip2?: components["schemas"]["CLIPField"] | null; /** - * Width - * @description Width of the generated image. - * @default 1024 + * type + * @default sdxl_lora_loader + * @constant */ - width?: number; + type: "sdxl_lora_loader"; + }; + /** + * SDXLLoRALoaderOutput + * @description SDXL LoRA Loader Output + */ + SDXLLoRALoaderOutput: { /** - * Height - * @description Height of the generated image. - * @default 1024 + * UNet + * @description UNet (scheduler, LoRAs) + * @default null */ - height?: number; + unet: components["schemas"]["UNetField"] | null; /** - * Steps - * @description Number of steps to run - * @default 10 + * CLIP 1 + * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count + * @default null */ - steps?: number; + clip: components["schemas"]["CLIPField"] | null; /** - * Seed - * @description Randomness seed for reproducibility. - * @default 0 + * CLIP 2 + * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count + * @default null */ - seed?: number; + clip2: components["schemas"]["CLIPField"] | null; /** * type - * @default sd3_denoise + * @default sdxl_lora_loader_output * @constant */ - type: "sd3_denoise"; + type: "sdxl_lora_loader_output"; }; /** - * Image to Latents - SD3 - * @description Generates latents from an image. + * Main Model - SDXL + * @description Loads an sdxl base model, outputting its submodels. */ - SD3ImageToLatentsInvocation: { - /** - * @description The board to save the image to - * @default null - */ - board?: components["schemas"]["BoardField"] | null; - /** - * @description Optional metadata to be saved with the image - * @default null - */ - metadata?: components["schemas"]["MetadataField"] | null; + SDXLModelLoaderInvocation: { /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -21134,76 +20512,54 @@ export type components = { */ use_cache?: boolean; /** - * @description The image to encode - * @default null - */ - image?: components["schemas"]["ImageField"] | null; - /** - * @description VAE + * @description SDXL Main model (UNet, VAE, CLIP1, CLIP2) to load * @default null */ - vae?: components["schemas"]["VAEField"] | null; + model?: components["schemas"]["ModelIdentifierField"] | null; /** * type - * @default sd3_i2l + * @default sdxl_model_loader * @constant */ - type: "sd3_i2l"; + type: "sdxl_model_loader"; }; /** - * Latents to Image - SD3 - * @description Generates an image from latents. + * SDXLModelLoaderOutput + * @description SDXL base model loader output */ - SD3LatentsToImageInvocation: { - /** - * @description The board to save the image to - * @default null - */ - board?: components["schemas"]["BoardField"] | null; - /** - * @description Optional metadata to be saved with the image - * @default null - */ - metadata?: components["schemas"]["MetadataField"] | null; - /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. - */ - id: string; + SDXLModelLoaderOutput: { /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false + * UNet + * @description UNet (scheduler, LoRAs) */ - is_intermediate?: boolean; + unet: components["schemas"]["UNetField"]; /** - * Use Cache - * @description Whether or not to use the cache - * @default true + * CLIP 1 + * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count */ - use_cache?: boolean; + clip: components["schemas"]["CLIPField"]; /** - * @description Latents tensor - * @default null + * CLIP 2 + * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count */ - latents?: components["schemas"]["LatentsField"] | null; + clip2: components["schemas"]["CLIPField"]; /** + * VAE * @description VAE - * @default null */ - vae?: components["schemas"]["VAEField"] | null; + vae: components["schemas"]["VAEField"]; /** * type - * @default sd3_l2i + * @default sdxl_model_loader_output * @constant */ - type: "sd3_l2i"; + type: "sdxl_model_loader_output"; }; /** - * Prompt - SDXL + * Prompt - SDXL Refiner * @description Parse prompt using compel package to conditioning. */ - SDXLCompelPromptInvocation: { + SDXLRefinerCompelPromptInvocation: { /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -21221,12 +20577,6 @@ export type components = { * @default true */ use_cache?: boolean; - /** - * Prompt - * @description Prompt to be parsed by Compel to create a conditioning tensor - * @default - */ - prompt?: string; /** * Style * @description Prompt to be parsed by Compel to create a conditioning tensor @@ -21254,44 +20604,28 @@ export type components = { */ crop_left?: number; /** - * Target Width - * @default 1024 - */ - target_width?: number; - /** - * Target Height - * @default 1024 - */ - target_height?: number; - /** - * CLIP 1 - * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count - * @default null + * Aesthetic Score + * @description The aesthetic score to apply to the conditioning tensor + * @default 6 */ - clip?: components["schemas"]["CLIPField"] | null; + aesthetic_score?: number; /** - * CLIP 2 * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count * @default null */ clip2?: components["schemas"]["CLIPField"] | null; - /** - * @description A mask defining the region that this conditioning prompt applies to. - * @default null - */ - mask?: components["schemas"]["TensorField"] | null; /** * type - * @default sdxl_compel_prompt + * @default sdxl_refiner_compel_prompt * @constant */ - type: "sdxl_compel_prompt"; + type: "sdxl_refiner_compel_prompt"; }; /** - * Apply LoRA Collection - SDXL - * @description Applies a collection of SDXL LoRAs to the provided UNet and CLIP models. + * Refiner Model - SDXL + * @description Loads an sdxl refiner model, outputting its submodels. */ - SDXLLoRACollectionLoader: { + SDXLRefinerModelLoaderInvocation: { /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -21310,41 +20644,64 @@ export type components = { */ use_cache?: boolean; /** - * LoRAs - * @description LoRA models and weights. May be a single LoRA or collection. + * @description SDXL Refiner Main Modde (UNet, VAE, CLIP2) to load * @default null */ - loras?: components["schemas"]["LoRAField"] | components["schemas"]["LoRAField"][] | null; + model?: components["schemas"]["ModelIdentifierField"] | null; + /** + * type + * @default sdxl_refiner_model_loader + * @constant + */ + type: "sdxl_refiner_model_loader"; + }; + /** + * SDXLRefinerModelLoaderOutput + * @description SDXL refiner model loader output + */ + SDXLRefinerModelLoaderOutput: { /** * UNet * @description UNet (scheduler, LoRAs) - * @default null */ - unet?: components["schemas"]["UNetField"] | null; + unet: components["schemas"]["UNetField"]; /** - * CLIP + * CLIP 2 * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count - * @default null */ - clip?: components["schemas"]["CLIPField"] | null; + clip2: components["schemas"]["CLIPField"]; /** - * CLIP 2 - * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count - * @default null + * VAE + * @description VAE */ - clip2?: components["schemas"]["CLIPField"] | null; + vae: components["schemas"]["VAEField"]; /** * type - * @default sdxl_lora_collection_loader + * @default sdxl_refiner_model_loader_output * @constant */ - type: "sdxl_lora_collection_loader"; + type: "sdxl_refiner_model_loader_output"; }; /** - * Apply LoRA - SDXL - * @description Apply selected lora to unet and text_encoder. + * SQLiteDirection + * @enum {string} */ - SDXLLoRALoaderInvocation: { + SQLiteDirection: "ASC" | "DESC"; + /** + * Save Image + * @description Saves an image. Unlike an image primitive, this invocation stores a copy of the image. + */ + SaveImageInvocation: { + /** + * @description The board to save the image to + * @default null + */ + board?: components["schemas"]["BoardField"] | null; + /** + * @description Optional metadata to be saved with the image + * @default null + */ + metadata?: components["schemas"]["MetadataField"] | null; /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -21363,77 +20720,132 @@ export type components = { */ use_cache?: boolean; /** - * LoRA - * @description LoRA model to load + * @description The image to process * @default null */ - lora?: components["schemas"]["ModelIdentifierField"] | null; + image?: components["schemas"]["ImageField"] | null; /** - * Weight - * @description The weight at which the LoRA is applied to each model - * @default 0.75 + * type + * @default save_image + * @constant */ - weight?: number; + type: "save_image"; + }; + /** + * Scale Latents + * @description Scales latents by a given factor. + */ + ScaleLatentsInvocation: { /** - * UNet - * @description UNet (scheduler, LoRAs) - * @default null + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. */ - unet?: components["schemas"]["UNetField"] | null; + id: string; /** - * CLIP 1 - * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false + */ + is_intermediate?: boolean; + /** + * Use Cache + * @description Whether or not to use the cache + * @default true + */ + use_cache?: boolean; + /** + * @description Latents tensor * @default null */ - clip?: components["schemas"]["CLIPField"] | null; + latents?: components["schemas"]["LatentsField"] | null; /** - * CLIP 2 - * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count + * Scale Factor + * @description The factor by which to scale * @default null */ - clip2?: components["schemas"]["CLIPField"] | null; + scale_factor?: number | null; + /** + * Mode + * @description Interpolation mode + * @default bilinear + * @enum {string} + */ + mode?: "nearest" | "linear" | "bilinear" | "bicubic" | "trilinear" | "area" | "nearest-exact"; + /** + * Antialias + * @description Whether or not to apply antialiasing (bilinear or bicubic only) + * @default false + */ + antialias?: boolean; /** * type - * @default sdxl_lora_loader + * @default lscale * @constant */ - type: "sdxl_lora_loader"; + type: "lscale"; }; /** - * SDXLLoRALoaderOutput - * @description SDXL LoRA Loader Output + * Scheduler + * @description Selects a scheduler. */ - SDXLLoRALoaderOutput: { + SchedulerInvocation: { /** - * UNet - * @description UNet (scheduler, LoRAs) - * @default null + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. */ - unet: components["schemas"]["UNetField"] | null; + id: string; /** - * CLIP 1 - * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count - * @default null + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false */ - clip: components["schemas"]["CLIPField"] | null; + is_intermediate?: boolean; /** - * CLIP 2 - * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count - * @default null + * Use Cache + * @description Whether or not to use the cache + * @default true */ - clip2: components["schemas"]["CLIPField"] | null; + use_cache?: boolean; + /** + * Scheduler + * @description Scheduler to use during inference + * @default euler + * @enum {string} + */ + scheduler?: "ddim" | "ddpm" | "deis" | "deis_k" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_k" | "kdpm_2_a" | "kdpm_2_a_k" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_3m" | "dpmpp_3m_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc" | "unipc_k" | "lcm" | "tcd"; /** * type - * @default sdxl_lora_loader_output + * @default scheduler * @constant */ - type: "sdxl_lora_loader_output"; + type: "scheduler"; + }; + /** SchedulerOutput */ + SchedulerOutput: { + /** + * Scheduler + * @description Scheduler to use during inference + * @enum {string} + */ + scheduler: "ddim" | "ddpm" | "deis" | "deis_k" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_k" | "kdpm_2_a" | "kdpm_2_a_k" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_3m" | "dpmpp_3m_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc" | "unipc_k" | "lcm" | "tcd"; + /** + * type + * @default scheduler_output + * @constant + */ + type: "scheduler_output"; }; /** - * Main Model - SDXL - * @description Loads an sdxl base model, outputting its submodels. + * SchedulerPredictionType + * @description Scheduler prediction type. + * @enum {string} */ - SDXLModelLoaderInvocation: { + SchedulerPredictionType: "epsilon" | "v_prediction" | "sample"; + /** + * Main Model - SD3 + * @description Loads a SD3 base model, outputting its submodels. + */ + Sd3ModelLoaderInvocation: { /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -21451,38 +20863,64 @@ export type components = { * @default true */ use_cache?: boolean; + /** @description SD3 model (MMDiTX) to load */ + model: components["schemas"]["ModelIdentifierField"]; /** - * @description SDXL Main model (UNet, VAE, CLIP1, CLIP2) to load + * T5 Encoder + * @description T5 tokenizer and text encoder * @default null */ - model?: components["schemas"]["ModelIdentifierField"] | null; + t5_encoder_model?: components["schemas"]["ModelIdentifierField"] | null; + /** + * CLIP L Encoder + * @description CLIP Embed loader + * @default null + */ + clip_l_model?: components["schemas"]["ModelIdentifierField"] | null; + /** + * CLIP G Encoder + * @description CLIP-G Embed loader + * @default null + */ + clip_g_model?: components["schemas"]["ModelIdentifierField"] | null; + /** + * VAE + * @description VAE model to load + * @default null + */ + vae_model?: components["schemas"]["ModelIdentifierField"] | null; /** * type - * @default sdxl_model_loader + * @default sd3_model_loader * @constant */ - type: "sdxl_model_loader"; + type: "sd3_model_loader"; }; /** - * SDXLModelLoaderOutput - * @description SDXL base model loader output + * Sd3ModelLoaderOutput + * @description SD3 base model loader output. */ - SDXLModelLoaderOutput: { + Sd3ModelLoaderOutput: { /** - * UNet - * @description UNet (scheduler, LoRAs) + * Transformer + * @description Transformer */ - unet: components["schemas"]["UNetField"]; + transformer: components["schemas"]["TransformerField"]; /** - * CLIP 1 + * CLIP L * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count */ - clip: components["schemas"]["CLIPField"]; + clip_l: components["schemas"]["CLIPField"]; /** - * CLIP 2 + * CLIP G * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count */ - clip2: components["schemas"]["CLIPField"]; + clip_g: components["schemas"]["CLIPField"]; + /** + * T5 Encoder + * @description T5 tokenizer and text encoder + */ + t5_encoder: components["schemas"]["T5EncoderField"]; /** * VAE * @description VAE @@ -21490,16 +20928,16 @@ export type components = { vae: components["schemas"]["VAEField"]; /** * type - * @default sdxl_model_loader_output + * @default sd3_model_loader_output * @constant */ - type: "sdxl_model_loader_output"; + type: "sd3_model_loader_output"; }; /** - * Prompt - SDXL Refiner - * @description Parse prompt using compel package to conditioning. + * Prompt - SD3 + * @description Encodes and preps a prompt for a SD3 image. */ - SDXLRefinerCompelPromptInvocation: { + Sd3TextEncoderInvocation: { /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -21518,54 +20956,41 @@ export type components = { */ use_cache?: boolean; /** - * Style - * @description Prompt to be parsed by Compel to create a conditioning tensor - * @default - */ - style?: string; - /** - * Original Width - * @default 1024 - */ - original_width?: number; - /** - * Original Height - * @default 1024 - */ - original_height?: number; - /** - * Crop Top - * @default 0 + * CLIP L + * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count + * @default null */ - crop_top?: number; + clip_l?: components["schemas"]["CLIPField"] | null; /** - * Crop Left - * @default 0 + * CLIP G + * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count + * @default null */ - crop_left?: number; + clip_g?: components["schemas"]["CLIPField"] | null; /** - * Aesthetic Score - * @description The aesthetic score to apply to the conditioning tensor - * @default 6 + * T5Encoder + * @description T5 tokenizer and text encoder + * @default null */ - aesthetic_score?: number; + t5_encoder?: components["schemas"]["T5EncoderField"] | null; /** - * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count + * Prompt + * @description Text prompt to encode. * @default null */ - clip2?: components["schemas"]["CLIPField"] | null; + prompt?: string | null; /** * type - * @default sdxl_refiner_compel_prompt + * @default sd3_text_encoder * @constant */ - type: "sdxl_refiner_compel_prompt"; + type: "sd3_text_encoder"; }; /** - * Refiner Model - SDXL - * @description Loads an sdxl refiner model, outputting its submodels. + * Apply Seamless - SD1.5, SDXL + * @description Applies the seamless transformation to the Model UNet and VAE. */ - SDXLRefinerModelLoaderInvocation: { + SeamlessModeInvocation: { /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -21584,64 +21009,65 @@ export type components = { */ use_cache?: boolean; /** - * @description SDXL Refiner Main Modde (UNet, VAE, CLIP2) to load + * UNet + * @description UNet (scheduler, LoRAs) * @default null */ - model?: components["schemas"]["ModelIdentifierField"] | null; + unet?: components["schemas"]["UNetField"] | null; + /** + * VAE + * @description VAE model to load + * @default null + */ + vae?: components["schemas"]["VAEField"] | null; + /** + * Seamless Y + * @description Specify whether Y axis is seamless + * @default true + */ + seamless_y?: boolean; + /** + * Seamless X + * @description Specify whether X axis is seamless + * @default true + */ + seamless_x?: boolean; /** * type - * @default sdxl_refiner_model_loader + * @default seamless * @constant */ - type: "sdxl_refiner_model_loader"; + type: "seamless"; }; /** - * SDXLRefinerModelLoaderOutput - * @description SDXL refiner model loader output + * SeamlessModeOutput + * @description Modified Seamless Model output */ - SDXLRefinerModelLoaderOutput: { + SeamlessModeOutput: { /** * UNet * @description UNet (scheduler, LoRAs) + * @default null */ - unet: components["schemas"]["UNetField"]; - /** - * CLIP 2 - * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count - */ - clip2: components["schemas"]["CLIPField"]; + unet: components["schemas"]["UNetField"] | null; /** * VAE * @description VAE + * @default null */ - vae: components["schemas"]["VAEField"]; + vae: components["schemas"]["VAEField"] | null; /** * type - * @default sdxl_refiner_model_loader_output + * @default seamless_output * @constant */ - type: "sdxl_refiner_model_loader_output"; + type: "seamless_output"; }; /** - * SQLiteDirection - * @enum {string} - */ - SQLiteDirection: "ASC" | "DESC"; - /** - * Save Image - * @description Saves an image. Unlike an image primitive, this invocation stores a copy of the image. + * Segment Anything + * @description Runs a Segment Anything Model (SAM or SAM2). */ - SaveImageInvocation: { - /** - * @description The board to save the image to - * @default null - */ - board?: components["schemas"]["BoardField"] | null; - /** - * @description Optional metadata to be saved with the image - * @default null - */ - metadata?: components["schemas"]["MetadataField"] | null; + SegmentAnythingInvocation: { /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -21656,358 +21082,288 @@ export type components = { /** * Use Cache * @description Whether or not to use the cache - * @default false + * @default true */ use_cache?: boolean; /** - * @description The image to process + * Model + * @description The Segment Anything model to use (SAM or SAM2). * @default null */ - image?: components["schemas"]["ImageField"] | null; + model?: ("segment-anything-base" | "segment-anything-large" | "segment-anything-huge" | "segment-anything-2-tiny" | "segment-anything-2-small" | "segment-anything-2-base" | "segment-anything-2-large") | null; /** - * type - * @default save_image - * @constant + * @description The image to segment. + * @default null */ - type: "save_image"; - }; - /** - * Scale Latents - * @description Scales latents by a given factor. - */ - ScaleLatentsInvocation: { + image?: components["schemas"]["ImageField"] | null; /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + * Bounding Boxes + * @description The bounding boxes to prompt the model with. + * @default null */ - id: string; + bounding_boxes?: components["schemas"]["BoundingBoxField"][] | null; /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false + * Point Lists + * @description The list of point lists to prompt the model with. Each list of points represents a single object. + * @default null */ - is_intermediate?: boolean; + point_lists?: components["schemas"]["SAMPointsField"][] | null; /** - * Use Cache - * @description Whether or not to use the cache + * Apply Polygon Refinement + * @description Whether to apply polygon refinement to the masks. This will smooth the edges of the masks slightly and ensure that each mask consists of a single closed polygon (before merging). * @default true */ - use_cache?: boolean; - /** - * @description Latents tensor - * @default null - */ - latents?: components["schemas"]["LatentsField"] | null; + apply_polygon_refinement?: boolean; /** - * Scale Factor - * @description The factor by which to scale - * @default null + * Mask Filter + * @description The filtering to apply to the detected masks before merging them into a final output. + * @default all + * @enum {string} */ - scale_factor?: number | null; + mask_filter?: "all" | "largest" | "highest_box_score"; /** - * Mode - * @description Interpolation mode - * @default bilinear - * @enum {string} + * type + * @default segment_anything + * @constant */ - mode?: "nearest" | "linear" | "bilinear" | "bicubic" | "trilinear" | "area" | "nearest-exact"; + type: "segment_anything"; + }; + /** SessionProcessorStatus */ + SessionProcessorStatus: { /** - * Antialias - * @description Whether or not to apply antialiasing (bilinear or bicubic only) - * @default false + * Is Started + * @description Whether the session processor is started */ - antialias?: boolean; + is_started: boolean; /** - * type - * @default lscale - * @constant + * Is Processing + * @description Whether a session is being processed */ - type: "lscale"; + is_processing: boolean; }; /** - * Scheduler - * @description Selects a scheduler. + * SessionQueueAndProcessorStatus + * @description The overall status of session queue and processor */ - SchedulerInvocation: { + SessionQueueAndProcessorStatus: { + queue: components["schemas"]["SessionQueueStatus"]; + processor: components["schemas"]["SessionProcessorStatus"]; + }; + /** SessionQueueCountsByDestination */ + SessionQueueCountsByDestination: { /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + * Queue Id + * @description The ID of the queue */ - id: string; + queue_id: string; /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false + * Destination + * @description The destination of queue items included in this status */ - is_intermediate?: boolean; + destination: string; /** - * Use Cache - * @description Whether or not to use the cache - * @default true + * Pending + * @description Number of queue items with status 'pending' for the destination */ - use_cache?: boolean; + pending: number; /** - * Scheduler - * @description Scheduler to use during inference - * @default euler - * @enum {string} + * In Progress + * @description Number of queue items with status 'in_progress' for the destination */ - scheduler?: "ddim" | "ddpm" | "deis" | "deis_k" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_k" | "kdpm_2_a" | "kdpm_2_a_k" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_3m" | "dpmpp_3m_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc" | "unipc_k" | "lcm" | "tcd"; + in_progress: number; /** - * type - * @default scheduler - * @constant + * Completed + * @description Number of queue items with status 'complete' for the destination */ - type: "scheduler"; - }; - /** SchedulerOutput */ - SchedulerOutput: { + completed: number; /** - * Scheduler - * @description Scheduler to use during inference - * @enum {string} + * Failed + * @description Number of queue items with status 'error' for the destination */ - scheduler: "ddim" | "ddpm" | "deis" | "deis_k" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_k" | "kdpm_2_a" | "kdpm_2_a_k" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_3m" | "dpmpp_3m_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc" | "unipc_k" | "lcm" | "tcd"; + failed: number; /** - * type - * @default scheduler_output - * @constant + * Canceled + * @description Number of queue items with status 'canceled' for the destination */ - type: "scheduler_output"; + canceled: number; + /** + * Total + * @description Total number of queue items for the destination + */ + total: number; }; /** - * SchedulerPredictionType - * @description Scheduler prediction type. - * @enum {string} - */ - SchedulerPredictionType: "epsilon" | "v_prediction" | "sample"; - /** - * Main Model - SD3 - * @description Loads a SD3 base model, outputting its submodels. + * SessionQueueItem + * @description Session queue item without the full graph. Used for serialization. */ - Sd3ModelLoaderInvocation: { + SessionQueueItem: { /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + * Item Id + * @description The identifier of the session queue item */ - id: string; + item_id: number; /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false + * Status + * @description The status of this queue item + * @default pending + * @enum {string} */ - is_intermediate?: boolean; + status: "pending" | "in_progress" | "completed" | "failed" | "canceled"; /** - * Use Cache - * @description Whether or not to use the cache - * @default true + * Priority + * @description The priority of this queue item + * @default 0 */ - use_cache?: boolean; - /** @description SD3 model (MMDiTX) to load */ - model: components["schemas"]["ModelIdentifierField"]; + priority: number; /** - * T5 Encoder - * @description T5 tokenizer and text encoder - * @default null + * Batch Id + * @description The ID of the batch associated with this queue item */ - t5_encoder_model?: components["schemas"]["ModelIdentifierField"] | null; + batch_id: string; /** - * CLIP L Encoder - * @description CLIP Embed loader - * @default null + * Origin + * @description The origin of this queue item. This data is used by the frontend to determine how to handle results. */ - clip_l_model?: components["schemas"]["ModelIdentifierField"] | null; + origin?: string | null; /** - * CLIP G Encoder - * @description CLIP-G Embed loader - * @default null + * Destination + * @description The origin of this queue item. This data is used by the frontend to determine how to handle results */ - clip_g_model?: components["schemas"]["ModelIdentifierField"] | null; + destination?: string | null; /** - * VAE - * @description VAE model to load - * @default null + * Session Id + * @description The ID of the session associated with this queue item. The session doesn't exist in graph_executions until the queue item is executed. */ - vae_model?: components["schemas"]["ModelIdentifierField"] | null; + session_id: string; /** - * type - * @default sd3_model_loader - * @constant - */ - type: "sd3_model_loader"; - }; - /** - * Sd3ModelLoaderOutput - * @description SD3 base model loader output. - */ - Sd3ModelLoaderOutput: { - /** - * Transformer - * @description Transformer - */ - transformer: components["schemas"]["TransformerField"]; - /** - * CLIP L - * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count + * Error Type + * @description The error type if this queue item errored */ - clip_l: components["schemas"]["CLIPField"]; + error_type?: string | null; /** - * CLIP G - * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count + * Error Message + * @description The error message if this queue item errored */ - clip_g: components["schemas"]["CLIPField"]; + error_message?: string | null; /** - * T5 Encoder - * @description T5 tokenizer and text encoder + * Error Traceback + * @description The error traceback if this queue item errored */ - t5_encoder: components["schemas"]["T5EncoderField"]; + error_traceback?: string | null; /** - * VAE - * @description VAE + * Created At + * @description When this queue item was created */ - vae: components["schemas"]["VAEField"]; + created_at: string; /** - * type - * @default sd3_model_loader_output - * @constant + * Updated At + * @description When this queue item was updated */ - type: "sd3_model_loader_output"; - }; - /** - * Prompt - SD3 - * @description Encodes and preps a prompt for a SD3 image. - */ - Sd3TextEncoderInvocation: { + updated_at: string; /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + * Started At + * @description When this queue item was started */ - id: string; + started_at?: string | null; /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false + * Completed At + * @description When this queue item was completed */ - is_intermediate?: boolean; + completed_at?: string | null; /** - * Use Cache - * @description Whether or not to use the cache - * @default true + * Queue Id + * @description The id of the queue with which this item is associated */ - use_cache?: boolean; + queue_id: string; /** - * CLIP L - * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count - * @default null + * Field Values + * @description The field values that were used for this queue item */ - clip_l?: components["schemas"]["CLIPField"] | null; + field_values?: components["schemas"]["NodeFieldValue"][] | null; /** - * CLIP G - * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count - * @default null + * Retried From Item Id + * @description The item_id of the queue item that this item was retried from */ - clip_g?: components["schemas"]["CLIPField"] | null; + retried_from_item_id?: number | null; /** - * T5Encoder - * @description T5 tokenizer and text encoder - * @default null + * Is Api Validation Run + * @description Whether this queue item is an API validation run. + * @default false */ - t5_encoder?: components["schemas"]["T5EncoderField"] | null; + is_api_validation_run?: boolean; /** - * Prompt - * @description Text prompt to encode. - * @default null + * Published Workflow Id + * @description The ID of the published workflow associated with this queue item */ - prompt?: string | null; + published_workflow_id?: string | null; /** - * type - * @default sd3_text_encoder - * @constant + * Credits + * @description The total credits used for this queue item */ - type: "sd3_text_encoder"; + credits?: number | null; + /** @description The fully-populated session to be executed */ + session: components["schemas"]["GraphExecutionState"]; + /** @description The workflow associated with this queue item */ + workflow?: components["schemas"]["WorkflowWithoutID"] | null; }; - /** - * Apply Seamless - SD1.5, SDXL - * @description Applies the seamless transformation to the Model UNet and VAE. - */ - SeamlessModeInvocation: { - /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. - */ - id: string; + /** SessionQueueStatus */ + SessionQueueStatus: { /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false + * Queue Id + * @description The ID of the queue */ - is_intermediate?: boolean; + queue_id: string; /** - * Use Cache - * @description Whether or not to use the cache - * @default true + * Item Id + * @description The current queue item id */ - use_cache?: boolean; + item_id: number | null; /** - * UNet - * @description UNet (scheduler, LoRAs) - * @default null + * Batch Id + * @description The current queue item's batch id */ - unet?: components["schemas"]["UNetField"] | null; + batch_id: string | null; /** - * VAE - * @description VAE model to load - * @default null + * Session Id + * @description The current queue item's session id */ - vae?: components["schemas"]["VAEField"] | null; + session_id: string | null; /** - * Seamless Y - * @description Specify whether Y axis is seamless - * @default true + * Pending + * @description Number of queue items with status 'pending' */ - seamless_y?: boolean; + pending: number; /** - * Seamless X - * @description Specify whether X axis is seamless - * @default true + * In Progress + * @description Number of queue items with status 'in_progress' */ - seamless_x?: boolean; + in_progress: number; /** - * type - * @default seamless - * @constant + * Completed + * @description Number of queue items with status 'complete' */ - type: "seamless"; - }; - /** - * SeamlessModeOutput - * @description Modified Seamless Model output - */ - SeamlessModeOutput: { + completed: number; /** - * UNet - * @description UNet (scheduler, LoRAs) - * @default null + * Failed + * @description Number of queue items with status 'error' */ - unet: components["schemas"]["UNetField"] | null; + failed: number; /** - * VAE - * @description VAE - * @default null + * Canceled + * @description Number of queue items with status 'canceled' */ - vae: components["schemas"]["VAEField"] | null; + canceled: number; /** - * type - * @default seamless_output - * @constant + * Total + * @description Total number of queue items */ - type: "seamless_output"; + total: number; }; /** - * Segment Anything - * @description Runs a Segment Anything Model (SAM or SAM2). + * Show Image + * @description Displays a provided image using the OS image viewer, and passes it forward in the pipeline. */ - SegmentAnythingInvocation: { + ShowImageInvocation: { /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -22026,331 +21382,232 @@ export type components = { */ use_cache?: boolean; /** - * Model - * @description The Segment Anything model to use (SAM or SAM2). - * @default null - */ - model?: ("segment-anything-base" | "segment-anything-large" | "segment-anything-huge" | "segment-anything-2-tiny" | "segment-anything-2-small" | "segment-anything-2-base" | "segment-anything-2-large") | null; - /** - * @description The image to segment. + * @description The image to show * @default null */ image?: components["schemas"]["ImageField"] | null; /** - * Bounding Boxes - * @description The bounding boxes to prompt the model with. - * @default null + * type + * @default show_image + * @constant */ - bounding_boxes?: components["schemas"]["BoundingBoxField"][] | null; + type: "show_image"; + }; + /** + * SigLIP_Diffusers_Config + * @description Model config for SigLIP. + */ + SigLIP_Diffusers_Config: { /** - * Point Lists - * @description The list of point lists to prompt the model with. Each list of points represents a single object. - * @default null + * Key + * @description A unique key for this model. */ - point_lists?: components["schemas"]["SAMPointsField"][] | null; + key: string; /** - * Apply Polygon Refinement - * @description Whether to apply polygon refinement to the masks. This will smooth the edges of the masks slightly and ensure that each mask consists of a single closed polygon (before merging). - * @default true + * Hash + * @description The hash of the model file(s). */ - apply_polygon_refinement?: boolean; + hash: string; /** - * Mask Filter - * @description The filtering to apply to the detected masks before merging them into a final output. - * @default all - * @enum {string} + * Path + * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory. */ - mask_filter?: "all" | "largest" | "highest_box_score"; + path: string; /** - * type - * @default segment_anything - * @constant + * File Size + * @description The size of the model in bytes. */ - type: "segment_anything"; - }; - /** SessionProcessorStatus */ - SessionProcessorStatus: { - /** - * Is Started - * @description Whether the session processor is started - */ - is_started: boolean; - /** - * Is Processing - * @description Whether a session is being processed - */ - is_processing: boolean; - }; - /** - * SessionQueueAndProcessorStatus - * @description The overall status of session queue and processor - */ - SessionQueueAndProcessorStatus: { - queue: components["schemas"]["SessionQueueStatus"]; - processor: components["schemas"]["SessionProcessorStatus"]; - }; - /** SessionQueueCountsByDestination */ - SessionQueueCountsByDestination: { + file_size: number; /** - * Queue Id - * @description The ID of the queue + * Name + * @description Name of the model. */ - queue_id: string; + name: string; /** - * Destination - * @description The destination of queue items included in this status + * Description + * @description Model description */ - destination: string; + description: string | null; /** - * Pending - * @description Number of queue items with status 'pending' for the destination + * Source + * @description The original source of the model (path, URL or repo_id). */ - pending: number; + source: string; + /** @description The type of source */ + source_type: components["schemas"]["ModelSourceType"]; /** - * In Progress - * @description Number of queue items with status 'in_progress' for the destination + * Source Api Response + * @description The original API response from the source, as stringified JSON. */ - in_progress: number; + source_api_response: string | null; /** - * Completed - * @description Number of queue items with status 'complete' for the destination + * Cover Image + * @description Url for image to preview model */ - completed: number; + cover_image: string | null; /** - * Failed - * @description Number of queue items with status 'error' for the destination + * Format + * @default diffusers + * @constant */ - failed: number; + format: "diffusers"; + /** @default */ + repo_variant: components["schemas"]["ModelRepoVariant"]; /** - * Canceled - * @description Number of queue items with status 'canceled' for the destination + * Type + * @default siglip + * @constant */ - canceled: number; + type: "siglip"; /** - * Total - * @description Total number of queue items for the destination + * Base + * @default any + * @constant */ - total: number; + base: "any"; }; /** - * SessionQueueItem - * @description Session queue item without the full graph. Used for serialization. + * Image-to-Image (Autoscale) + * @description Run any spandrel image-to-image model (https://github.com/chaiNNer-org/spandrel) until the target scale is reached. */ - SessionQueueItem: { + SpandrelImageToImageAutoscaleInvocation: { /** - * Item Id - * @description The identifier of the session queue item + * @description The board to save the image to + * @default null */ - item_id: number; + board?: components["schemas"]["BoardField"] | null; /** - * Status - * @description The status of this queue item - * @default pending - * @enum {string} + * @description Optional metadata to be saved with the image + * @default null */ - status: "pending" | "in_progress" | "completed" | "failed" | "canceled"; + metadata?: components["schemas"]["MetadataField"] | null; /** - * Priority - * @description The priority of this queue item - * @default 0 + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. */ - priority: number; + id: string; /** - * Batch Id - * @description The ID of the batch associated with this queue item + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false */ - batch_id: string; + is_intermediate?: boolean; /** - * Origin - * @description The origin of this queue item. This data is used by the frontend to determine how to handle results. + * Use Cache + * @description Whether or not to use the cache + * @default true */ - origin?: string | null; + use_cache?: boolean; /** - * Destination - * @description The origin of this queue item. This data is used by the frontend to determine how to handle results + * @description The input image + * @default null */ - destination?: string | null; + image?: components["schemas"]["ImageField"] | null; /** - * Session Id - * @description The ID of the session associated with this queue item. The session doesn't exist in graph_executions until the queue item is executed. + * Image-to-Image Model + * @description Image-to-Image model + * @default null */ - session_id: string; + image_to_image_model?: components["schemas"]["ModelIdentifierField"] | null; /** - * Error Type - * @description The error type if this queue item errored + * Tile Size + * @description The tile size for tiled image-to-image. Set to 0 to disable tiling. + * @default 512 */ - error_type?: string | null; + tile_size?: number; /** - * Error Message - * @description The error message if this queue item errored + * type + * @default spandrel_image_to_image_autoscale + * @constant */ - error_message?: string | null; + type: "spandrel_image_to_image_autoscale"; /** - * Error Traceback - * @description The error traceback if this queue item errored + * Scale + * @description The final scale of the output image. If the model does not upscale the image, this will be ignored. + * @default 4 */ - error_traceback?: string | null; + scale?: number; /** - * Created At - * @description When this queue item was created + * Fit To Multiple Of 8 + * @description If true, the output image will be resized to the nearest multiple of 8 in both dimensions. + * @default false */ - created_at: string; + fit_to_multiple_of_8?: boolean; + }; + /** + * Image-to-Image + * @description Run any spandrel image-to-image model (https://github.com/chaiNNer-org/spandrel). + */ + SpandrelImageToImageInvocation: { /** - * Updated At - * @description When this queue item was updated + * @description The board to save the image to + * @default null */ - updated_at: string; + board?: components["schemas"]["BoardField"] | null; /** - * Started At - * @description When this queue item was started + * @description Optional metadata to be saved with the image + * @default null */ - started_at?: string | null; + metadata?: components["schemas"]["MetadataField"] | null; /** - * Completed At - * @description When this queue item was completed + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. */ - completed_at?: string | null; + id: string; /** - * Queue Id - * @description The id of the queue with which this item is associated + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false */ - queue_id: string; + is_intermediate?: boolean; /** - * Field Values - * @description The field values that were used for this queue item + * Use Cache + * @description Whether or not to use the cache + * @default true */ - field_values?: components["schemas"]["NodeFieldValue"][] | null; + use_cache?: boolean; /** - * Retried From Item Id - * @description The item_id of the queue item that this item was retried from + * @description The input image + * @default null */ - retried_from_item_id?: number | null; + image?: components["schemas"]["ImageField"] | null; /** - * Is Api Validation Run - * @description Whether this queue item is an API validation run. - * @default false + * Image-to-Image Model + * @description Image-to-Image model + * @default null */ - is_api_validation_run?: boolean; + image_to_image_model?: components["schemas"]["ModelIdentifierField"] | null; /** - * Published Workflow Id - * @description The ID of the published workflow associated with this queue item + * Tile Size + * @description The tile size for tiled image-to-image. Set to 0 to disable tiling. + * @default 512 */ - published_workflow_id?: string | null; + tile_size?: number; /** - * Credits - * @description The total credits used for this queue item + * type + * @default spandrel_image_to_image + * @constant */ - credits?: number | null; - /** @description The fully-populated session to be executed */ - session: components["schemas"]["GraphExecutionState"]; - /** @description The workflow associated with this queue item */ - workflow?: components["schemas"]["WorkflowWithoutID"] | null; + type: "spandrel_image_to_image"; }; - /** SessionQueueStatus */ - SessionQueueStatus: { + /** + * Spandrel_Checkpoint_Config + * @description Model config for Spandrel Image to Image models. + */ + Spandrel_Checkpoint_Config: { /** - * Queue Id - * @description The ID of the queue + * Key + * @description A unique key for this model. */ - queue_id: string; + key: string; /** - * Item Id - * @description The current queue item id + * Hash + * @description The hash of the model file(s). */ - item_id: number | null; + hash: string; /** - * Batch Id - * @description The current queue item's batch id - */ - batch_id: string | null; - /** - * Session Id - * @description The current queue item's session id - */ - session_id: string | null; - /** - * Pending - * @description Number of queue items with status 'pending' - */ - pending: number; - /** - * In Progress - * @description Number of queue items with status 'in_progress' - */ - in_progress: number; - /** - * Completed - * @description Number of queue items with status 'complete' - */ - completed: number; - /** - * Failed - * @description Number of queue items with status 'error' - */ - failed: number; - /** - * Canceled - * @description Number of queue items with status 'canceled' - */ - canceled: number; - /** - * Total - * @description Total number of queue items - */ - total: number; - }; - /** - * Show Image - * @description Displays a provided image using the OS image viewer, and passes it forward in the pipeline. - */ - ShowImageInvocation: { - /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. - */ - id: string; - /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false - */ - is_intermediate?: boolean; - /** - * Use Cache - * @description Whether or not to use the cache - * @default true - */ - use_cache?: boolean; - /** - * @description The image to show - * @default null - */ - image?: components["schemas"]["ImageField"] | null; - /** - * type - * @default show_image - * @constant - */ - type: "show_image"; - }; - /** - * SigLIP_Diffusers_Config - * @description Model config for SigLIP. - */ - SigLIP_Diffusers_Config: { - /** - * Key - * @description A unique key for this model. - */ - key: string; - /** - * Hash - * @description The hash of the model file(s). - */ - hash: string; - /** - * Path - * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory. + * Path + * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory. */ path: string; /** @@ -22386,114 +21643,126 @@ export type components = { */ cover_image: string | null; /** - * Usage Info - * @description Usage information for this model - */ - usage_info: string | null; - /** - * Format - * @default diffusers + * Base + * @default any * @constant */ - format: "diffusers"; - /** @default */ - repo_variant: components["schemas"]["ModelRepoVariant"]; + base: "any"; /** * Type - * @default siglip + * @default spandrel_image_to_image * @constant */ - type: "siglip"; + type: "spandrel_image_to_image"; /** - * Base - * @default any + * Format + * @default checkpoint * @constant */ - base: "any"; + format: "checkpoint"; }; - /** - * Image-to-Image (Autoscale) - * @description Run any spandrel image-to-image model (https://github.com/chaiNNer-org/spandrel) until the target scale is reached. - */ - SpandrelImageToImageAutoscaleInvocation: { + /** StarredImagesResult */ + StarredImagesResult: { /** - * @description The board to save the image to - * @default null + * Affected Boards + * @description The ids of boards affected by the delete operation */ - board?: components["schemas"]["BoardField"] | null; + affected_boards: string[]; /** - * @description Optional metadata to be saved with the image - * @default null + * Starred Images + * @description The names of the images that were starred */ - metadata?: components["schemas"]["MetadataField"] | null; + starred_images: string[]; + }; + /** StarterModel */ + StarterModel: { + /** Description */ + description: string; + /** Source */ + source: string; + /** Name */ + name: string; + base: components["schemas"]["BaseModelType"]; + type: components["schemas"]["ModelType"]; + format?: components["schemas"]["ModelFormat"] | null; /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + * Is Installed + * @default false */ - id: string; + is_installed?: boolean; /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false + * Previous Names + * @default [] */ - is_intermediate?: boolean; + previous_names?: string[]; + /** Dependencies */ + dependencies?: components["schemas"]["StarterModelWithoutDependencies"][] | null; + }; + /** StarterModelBundle */ + StarterModelBundle: { + /** Name */ + name: string; + /** Models */ + models: components["schemas"]["StarterModel"][]; + }; + /** StarterModelResponse */ + StarterModelResponse: { + /** Starter Models */ + starter_models: components["schemas"]["StarterModel"][]; + /** Starter Bundles */ + starter_bundles: { + [key: string]: components["schemas"]["StarterModelBundle"]; + }; + }; + /** StarterModelWithoutDependencies */ + StarterModelWithoutDependencies: { + /** Description */ + description: string; + /** Source */ + source: string; + /** Name */ + name: string; + base: components["schemas"]["BaseModelType"]; + type: components["schemas"]["ModelType"]; + format?: components["schemas"]["ModelFormat"] | null; /** - * Use Cache - * @description Whether or not to use the cache - * @default true + * Is Installed + * @default false */ - use_cache?: boolean; + is_installed?: boolean; /** - * @description The input image - * @default null + * Previous Names + * @default [] */ - image?: components["schemas"]["ImageField"] | null; + previous_names?: string[]; + }; + /** + * String2Output + * @description Base class for invocations that output two strings + */ + String2Output: { /** - * Image-to-Image Model - * @description Image-to-Image model - * @default null + * String 1 + * @description string 1 */ - image_to_image_model?: components["schemas"]["ModelIdentifierField"] | null; + string_1: string; /** - * Tile Size - * @description The tile size for tiled image-to-image. Set to 0 to disable tiling. - * @default 512 + * String 2 + * @description string 2 */ - tile_size?: number; + string_2: string; /** * type - * @default spandrel_image_to_image_autoscale + * @default string_2_output * @constant */ - type: "spandrel_image_to_image_autoscale"; - /** - * Scale - * @description The final scale of the output image. If the model does not upscale the image, this will be ignored. - * @default 4 - */ - scale?: number; - /** - * Fit To Multiple Of 8 - * @description If true, the output image will be resized to the nearest multiple of 8 in both dimensions. - * @default false - */ - fit_to_multiple_of_8?: boolean; + type: "string_2_output"; }; /** - * Image-to-Image - * @description Run any spandrel image-to-image model (https://github.com/chaiNNer-org/spandrel). + * String Batch + * @description Create a batched generation, where the workflow is executed once for each string in the batch. */ - SpandrelImageToImageInvocation: { - /** - * @description The board to save the image to - * @default null - */ - board?: components["schemas"]["BoardField"] | null; - /** - * @description Optional metadata to be saved with the image - * @default null - */ - metadata?: components["schemas"]["MetadataField"] | null; + StringBatchInvocation: { /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -22512,242 +21781,10 @@ export type components = { */ use_cache?: boolean; /** - * @description The input image - * @default null - */ - image?: components["schemas"]["ImageField"] | null; - /** - * Image-to-Image Model - * @description Image-to-Image model - * @default null - */ - image_to_image_model?: components["schemas"]["ModelIdentifierField"] | null; - /** - * Tile Size - * @description The tile size for tiled image-to-image. Set to 0 to disable tiling. - * @default 512 - */ - tile_size?: number; - /** - * type - * @default spandrel_image_to_image - * @constant - */ - type: "spandrel_image_to_image"; - }; - /** - * Spandrel_Checkpoint_Config - * @description Model config for Spandrel Image to Image models. - */ - Spandrel_Checkpoint_Config: { - /** - * Key - * @description A unique key for this model. - */ - key: string; - /** - * Hash - * @description The hash of the model file(s). - */ - hash: string; - /** - * Path - * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory. - */ - path: string; - /** - * File Size - * @description The size of the model in bytes. - */ - file_size: number; - /** - * Name - * @description Name of the model. - */ - name: string; - /** - * Description - * @description Model description - */ - description: string | null; - /** - * Source - * @description The original source of the model (path, URL or repo_id). - */ - source: string; - /** @description The type of source */ - source_type: components["schemas"]["ModelSourceType"]; - /** - * Source Api Response - * @description The original API response from the source, as stringified JSON. - */ - source_api_response: string | null; - /** - * Cover Image - * @description Url for image to preview model - */ - cover_image: string | null; - /** - * Usage Info - * @description Usage information for this model - */ - usage_info: string | null; - /** - * Base - * @default any - * @constant - */ - base: "any"; - /** - * Type - * @default spandrel_image_to_image - * @constant - */ - type: "spandrel_image_to_image"; - /** - * Format - * @default checkpoint - * @constant - */ - format: "checkpoint"; - }; - /** StarredImagesResult */ - StarredImagesResult: { - /** - * Affected Boards - * @description The ids of boards affected by the delete operation - */ - affected_boards: string[]; - /** - * Starred Images - * @description The names of the images that were starred - */ - starred_images: string[]; - }; - /** StarredVideosResult */ - StarredVideosResult: { - /** - * Affected Boards - * @description The ids of boards affected by the delete operation - */ - affected_boards: string[]; - /** - * Starred Videos - * @description The ids of the videos that were starred - */ - starred_videos: string[]; - }; - /** StarterModel */ - StarterModel: { - /** Description */ - description: string; - /** Source */ - source: string; - /** Name */ - name: string; - base: components["schemas"]["BaseModelType"]; - type: components["schemas"]["ModelType"]; - format?: components["schemas"]["ModelFormat"] | null; - /** - * Is Installed - * @default false - */ - is_installed?: boolean; - /** - * Previous Names - * @default [] - */ - previous_names?: string[]; - /** Dependencies */ - dependencies?: components["schemas"]["StarterModelWithoutDependencies"][] | null; - }; - /** StarterModelBundle */ - StarterModelBundle: { - /** Name */ - name: string; - /** Models */ - models: components["schemas"]["StarterModel"][]; - }; - /** StarterModelResponse */ - StarterModelResponse: { - /** Starter Models */ - starter_models: components["schemas"]["StarterModel"][]; - /** Starter Bundles */ - starter_bundles: { - [key: string]: components["schemas"]["StarterModelBundle"]; - }; - }; - /** StarterModelWithoutDependencies */ - StarterModelWithoutDependencies: { - /** Description */ - description: string; - /** Source */ - source: string; - /** Name */ - name: string; - base: components["schemas"]["BaseModelType"]; - type: components["schemas"]["ModelType"]; - format?: components["schemas"]["ModelFormat"] | null; - /** - * Is Installed - * @default false - */ - is_installed?: boolean; - /** - * Previous Names - * @default [] - */ - previous_names?: string[]; - }; - /** - * String2Output - * @description Base class for invocations that output two strings - */ - String2Output: { - /** - * String 1 - * @description string 1 - */ - string_1: string; - /** - * String 2 - * @description string 2 - */ - string_2: string; - /** - * type - * @default string_2_output - * @constant - */ - type: "string_2_output"; - }; - /** - * String Batch - * @description Create a batched generation, where the workflow is executed once for each string in the batch. - */ - StringBatchInvocation: { - /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. - */ - id: string; - /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false - */ - is_intermediate?: boolean; - /** - * Use Cache - * @description Whether or not to use the cache - * @default true - */ - use_cache?: boolean; - /** - * Batch Group - * @description The ID of this batch node's group. If provided, all batch nodes in with the same ID will be 'zipped' before execution, and all nodes' collections must be of the same size. - * @default None - * @enum {string} + * Batch Group + * @description The ID of this batch node's group. If provided, all batch nodes in with the same ID will be 'zipped' before execution, and all nodes' collections must be of the same size. + * @default None + * @enum {string} */ batch_group_id?: "None" | "Group 1" | "Group 2" | "Group 3" | "Group 4" | "Group 5"; /** @@ -23433,11 +22470,6 @@ export type components = { * @description Url for image to preview model */ cover_image: string | null; - /** - * Usage Info - * @description Usage information for this model - */ - usage_info: string | null; /** * Format * @default diffusers @@ -23509,11 +22541,6 @@ export type components = { * @description Url for image to preview model */ cover_image: string | null; - /** - * Usage Info - * @description Usage information for this model - */ - usage_info: string | null; /** * Format * @default diffusers @@ -23600,11 +22627,6 @@ export type components = { * @description Url for image to preview model */ cover_image: string | null; - /** - * Usage Info - * @description Usage information for this model - */ - usage_info: string | null; /** * Base * @default any @@ -23677,11 +22699,6 @@ export type components = { * @description Url for image to preview model */ cover_image: string | null; - /** - * Usage Info - * @description Usage information for this model - */ - usage_info: string | null; /** * Base * @default any @@ -23761,11 +22778,6 @@ export type components = { * @description Url for image to preview model */ cover_image: string | null; - /** - * Usage Info - * @description Usage information for this model - */ - usage_info: string | null; /** * Type * @default embedding @@ -23834,11 +22846,6 @@ export type components = { * @description Url for image to preview model */ cover_image: string | null; - /** - * Usage Info - * @description Usage information for this model - */ - usage_info: string | null; /** * Type * @default embedding @@ -23907,11 +22914,6 @@ export type components = { * @description Url for image to preview model */ cover_image: string | null; - /** - * Usage Info - * @description Usage information for this model - */ - usage_info: string | null; /** * Type * @default embedding @@ -23980,11 +22982,6 @@ export type components = { * @description Url for image to preview model */ cover_image: string | null; - /** - * Usage Info - * @description Usage information for this model - */ - usage_info: string | null; /** * Type * @default embedding @@ -24053,11 +23050,6 @@ export type components = { * @description Url for image to preview model */ cover_image: string | null; - /** - * Usage Info - * @description Usage information for this model - */ - usage_info: string | null; /** * Type * @default embedding @@ -24126,11 +23118,6 @@ export type components = { * @description Url for image to preview model */ cover_image: string | null; - /** - * Usage Info - * @description Usage information for this model - */ - usage_info: string | null; /** * Type * @default embedding @@ -24595,11 +23582,6 @@ export type components = { * @description Url for image to preview model */ cover_image: string | null; - /** - * Usage Info - * @description Usage information for this model - */ - usage_info: string | null; /** * Base * @default unknown @@ -24688,19 +23670,6 @@ export type components = { */ unstarred_images: string[]; }; - /** UnstarredVideosResult */ - UnstarredVideosResult: { - /** - * Affected Boards - * @description The ids of boards affected by the delete operation - */ - affected_boards: string[]; - /** - * Unstarred Videos - * @description The ids of the videos that were unstarred - */ - unstarred_videos: string[]; - }; /** Upscaler */ Upscaler: { /** @@ -24825,11 +23794,6 @@ export type components = { * @description Url for image to preview model */ cover_image: string | null; - /** - * Usage Info - * @description Usage information for this model - */ - usage_info: string | null; /** * Config Path * @description Path to the config for this model, if any. @@ -24903,11 +23867,6 @@ export type components = { * @description Url for image to preview model */ cover_image: string | null; - /** - * Usage Info - * @description Usage information for this model - */ - usage_info: string | null; /** * Config Path * @description Path to the config for this model, if any. @@ -24981,11 +23940,6 @@ export type components = { * @description Url for image to preview model */ cover_image: string | null; - /** - * Usage Info - * @description Usage information for this model - */ - usage_info: string | null; /** * Config Path * @description Path to the config for this model, if any. @@ -25059,11 +24013,6 @@ export type components = { * @description Url for image to preview model */ cover_image: string | null; - /** - * Usage Info - * @description Usage information for this model - */ - usage_info: string | null; /** * Config Path * @description Path to the config for this model, if any. @@ -25137,11 +24086,6 @@ export type components = { * @description Url for image to preview model */ cover_image: string | null; - /** - * Usage Info - * @description Usage information for this model - */ - usage_info: string | null; /** * Format * @default diffusers @@ -25212,11 +24156,6 @@ export type components = { * @description Url for image to preview model */ cover_image: string | null; - /** - * Usage Info - * @description Usage information for this model - */ - usage_info: string | null; /** * Format * @default diffusers @@ -25265,324 +24204,6 @@ export type components = { */ output_fields: components["schemas"]["FieldIdentifier"][]; }; - /** - * VideoDTO - * @description Deserialized video record, enriched for the frontend. - */ - VideoDTO: { - /** - * Video Id - * @description The unique id of the video. - */ - video_id: string; - /** - * Video Url - * @description The URL of the video. - */ - video_url: string; - /** - * Thumbnail Url - * @description The URL of the video's thumbnail. - */ - thumbnail_url: string; - /** - * Width - * @description The width of the video in px. - */ - width: number; - /** - * Height - * @description The height of the video in px. - */ - height: number; - /** - * Created At - * @description The created timestamp of the video. - */ - created_at: string; - /** - * Updated At - * @description The updated timestamp of the video. - */ - updated_at: string; - /** - * Deleted At - * @description The deleted timestamp of the video. - */ - deleted_at?: string | null; - /** - * Is Intermediate - * @description Whether this is an intermediate video. - */ - is_intermediate: boolean; - /** - * Session Id - * @description The session ID that generated this video, if it is a generated video. - */ - session_id?: string | null; - /** - * Node Id - * @description The node ID that generated this video, if it is a generated video. - */ - node_id?: string | null; - /** - * Starred - * @description Whether this video is starred. - */ - starred: boolean; - /** - * Board Id - * @description The id of the board the image belongs to, if one exists. - */ - board_id?: string | null; - }; - /** - * VideoField - * @description A video primitive field - */ - VideoField: { - /** - * Video Id - * @description The id of the video - */ - video_id: string; - }; - /** - * VideoIdsResult - * @description Response containing ordered video ids with metadata for optimistic updates. - */ - VideoIdsResult: { - /** - * Video Ids - * @description Ordered list of video ids - */ - video_ids: string[]; - /** - * Starred Count - * @description Number of starred videos (when starred_first=True) - */ - starred_count: number; - /** - * Total Count - * @description Total number of videos matching the query - */ - total_count: number; - }; - /** - * VideoOutput - * @description Base class for nodes that output a video - */ - VideoOutput: { - /** @description The output video */ - video: components["schemas"]["VideoField"]; - /** - * Width - * @description The width of the video in pixels - */ - width: number; - /** - * Height - * @description The height of the video in pixels - */ - height: number; - /** - * Duration Seconds - * @description The duration of the video in seconds - */ - duration_seconds: number; - /** - * type - * @default video_output - * @constant - */ - type: "video_output"; - }; - /** - * VideoRecordChanges - * @description A set of changes to apply to a video record. - * - * Only limited changes are valid: - * - `session_id`: change the session associated with a video - * - `is_intermediate`: change the video's `is_intermediate` flag - * - `starred`: change whether the video is starred - */ - VideoRecordChanges: { - /** - * Session Id - * @description The video's new session ID. - */ - session_id?: string | null; - /** - * Is Intermediate - * @description The video's new `is_intermediate` flag. - */ - is_intermediate?: boolean | null; - /** - * Starred - * @description The video's new `starred` state - */ - starred?: boolean | null; - }; - /** Video_ExternalAPI_Runway_Config */ - Video_ExternalAPI_Runway_Config: { - /** - * Key - * @description A unique key for this model. - */ - key: string; - /** - * Hash - * @description The hash of the model file(s). - */ - hash: string; - /** - * Path - * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory. - */ - path: string; - /** - * File Size - * @description The size of the model in bytes. - */ - file_size: number; - /** - * Name - * @description Name of the model. - */ - name: string; - /** - * Description - * @description Model description - */ - description: string | null; - /** - * Source - * @description The original source of the model (path, URL or repo_id). - */ - source: string; - /** @description The type of source */ - source_type: components["schemas"]["ModelSourceType"]; - /** - * Source Api Response - * @description The original API response from the source, as stringified JSON. - */ - source_api_response: string | null; - /** - * Cover Image - * @description Url for image to preview model - */ - cover_image: string | null; - /** - * Usage Info - * @description Usage information for this model - */ - usage_info: string | null; - /** - * Type - * @default video - * @constant - */ - type: "video"; - /** - * Trigger Phrases - * @description Set of trigger phrases for this model - */ - trigger_phrases: string[] | null; - /** @description Default settings for this model */ - default_settings: components["schemas"]["MainModelDefaultSettings"] | null; - /** - * Format - * @default api - * @constant - */ - format: "api"; - /** - * Base - * @default runway - * @constant - */ - base: "runway"; - }; - /** Video_ExternalAPI_Veo3_Config */ - Video_ExternalAPI_Veo3_Config: { - /** - * Key - * @description A unique key for this model. - */ - key: string; - /** - * Hash - * @description The hash of the model file(s). - */ - hash: string; - /** - * Path - * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory. - */ - path: string; - /** - * File Size - * @description The size of the model in bytes. - */ - file_size: number; - /** - * Name - * @description Name of the model. - */ - name: string; - /** - * Description - * @description Model description - */ - description: string | null; - /** - * Source - * @description The original source of the model (path, URL or repo_id). - */ - source: string; - /** @description The type of source */ - source_type: components["schemas"]["ModelSourceType"]; - /** - * Source Api Response - * @description The original API response from the source, as stringified JSON. - */ - source_api_response: string | null; - /** - * Cover Image - * @description Url for image to preview model - */ - cover_image: string | null; - /** - * Usage Info - * @description Usage information for this model - */ - usage_info: string | null; - /** - * Type - * @default video - * @constant - */ - type: "video"; - /** - * Trigger Phrases - * @description Set of trigger phrases for this model - */ - trigger_phrases: string[] | null; - /** @description Default settings for this model */ - default_settings: components["schemas"]["MainModelDefaultSettings"] | null; - /** - * Format - * @default api - * @constant - */ - format: "api"; - /** - * Base - * @default veo3 - * @constant - */ - base: "veo3"; - }; /** Workflow */ Workflow: { /** @@ -25648,11 +24269,6 @@ export type components = { form?: { [key: string]: components["schemas"]["JsonValue"]; } | null; - /** - * Is Published - * @description Whether the workflow is published or not. - */ - is_published?: boolean | null; /** * Id * @description The id of the workflow. @@ -25714,11 +24330,6 @@ export type components = { * @description The opened timestamp of the workflow. */ opened_at?: string | null; - /** - * Is Published - * @description Whether the workflow is published or not. - */ - is_published?: boolean | null; /** @description The workflow. */ workflow: components["schemas"]["Workflow"]; }; @@ -25749,11 +24360,6 @@ export type components = { * @description The opened timestamp of the workflow. */ opened_at?: string | null; - /** - * Is Published - * @description Whether the workflow is published or not. - */ - is_published?: boolean | null; /** * Description * @description The description of the workflow. @@ -25805,574 +24411,110 @@ export type components = { * @description The opened timestamp of the workflow. */ opened_at?: string | null; - /** - * Is Published - * @description Whether the workflow is published or not. - */ - is_published?: boolean | null; /** @description The workflow. */ workflow: components["schemas"]["Workflow"]; /** * Thumbnail Url - * @description The URL of the workflow thumbnail. - */ - thumbnail_url?: string | null; - }; - /** WorkflowWithoutID */ - WorkflowWithoutID: { - /** - * Name - * @description The name of the workflow. - */ - name: string; - /** - * Author - * @description The author of the workflow. - */ - author: string; - /** - * Description - * @description The description of the workflow. - */ - description: string; - /** - * Version - * @description The version of the workflow. - */ - version: string; - /** - * Contact - * @description The contact of the workflow. - */ - contact: string; - /** - * Tags - * @description The tags of the workflow. - */ - tags: string; - /** - * Notes - * @description The notes of the workflow. - */ - notes: string; - /** - * Exposedfields - * @description The exposed fields of the workflow. - */ - exposedFields: components["schemas"]["ExposedField"][]; - /** @description The meta of the workflow. */ - meta: components["schemas"]["WorkflowMeta"]; - /** - * Nodes - * @description The nodes of the workflow. - */ - nodes: { - [key: string]: components["schemas"]["JsonValue"]; - }[]; - /** - * Edges - * @description The edges of the workflow. - */ - edges: { - [key: string]: components["schemas"]["JsonValue"]; - }[]; - /** - * Form - * @description The form of the workflow. - */ - form?: { - [key: string]: components["schemas"]["JsonValue"]; - } | null; - /** - * Is Published - * @description Whether the workflow is published or not. - */ - is_published?: boolean | null; - }; - }; - responses: never; - parameters: never; - requestBodies: never; - headers: never; - pathItems: never; -}; -export type $defs = Record; -export interface operations { - parse_dynamicprompts: { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - requestBody: { - content: { - "application/json": components["schemas"]["Body_parse_dynamicprompts"]; - }; - }; - responses: { - /** @description Successful Response */ - 200: { - headers: { - [name: string]: unknown; - }; - content: { - "application/json": components["schemas"]["DynamicPromptsResponse"]; - }; - }; - /** @description Validation Error */ - 422: { - headers: { - [name: string]: unknown; - }; - content: { - "application/json": components["schemas"]["HTTPValidationError"]; - }; - }; - }; - }; - list_model_records: { - parameters: { - query?: { - /** @description Base models to include */ - base_models?: components["schemas"]["BaseModelType"][] | null; - /** @description The type of model to get */ - model_type?: components["schemas"]["ModelType"] | null; - /** @description Exact match on the name of the model */ - model_name?: string | null; - /** @description Exact match on the format of the model (e.g. 'diffusers') */ - model_format?: components["schemas"]["ModelFormat"] | null; - }; - header?: never; - path?: never; - cookie?: never; - }; - requestBody?: never; - responses: { - /** @description Successful Response */ - 200: { - headers: { - [name: string]: unknown; - }; - content: { - "application/json": components["schemas"]["ModelsList"]; - }; - }; - /** @description Validation Error */ - 422: { - headers: { - [name: string]: unknown; - }; - content: { - "application/json": components["schemas"]["HTTPValidationError"]; - }; - }; - }; - }; - get_model_records_by_attrs: { - parameters: { - query: { - /** @description The name of the model */ - name: string; - /** @description The type of the model */ - type: components["schemas"]["ModelType"]; - /** @description The base model of the model */ - base: components["schemas"]["BaseModelType"]; - }; - header?: never; - path?: never; - cookie?: never; - }; - requestBody?: never; - responses: { - /** @description Successful Response */ - 200: { - headers: { - [name: string]: unknown; - }; - content: { - "application/json": components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Main_ExternalAPI_ChatGPT4o_Config"] | components["schemas"]["Main_ExternalAPI_Gemini2_5_Config"] | components["schemas"]["Main_ExternalAPI_Imagen3_Config"] | components["schemas"]["Main_ExternalAPI_Imagen4_Config"] | components["schemas"]["Main_ExternalAPI_FluxKontext_Config"] | components["schemas"]["Video_ExternalAPI_Veo3_Config"] | components["schemas"]["Video_ExternalAPI_Runway_Config"] | components["schemas"]["Unknown_Config"]; - }; - }; - /** @description Validation Error */ - 422: { - headers: { - [name: string]: unknown; - }; - content: { - "application/json": components["schemas"]["HTTPValidationError"]; - }; - }; - }; - }; - get_model_record: { - parameters: { - query?: never; - header?: never; - path: { - /** @description Key of the model record to fetch. */ - key: string; - }; - cookie?: never; - }; - requestBody?: never; - responses: { - /** @description The model configuration was retrieved successfully */ - 200: { - headers: { - [name: string]: unknown; - }; - content: { - /** @example { - * "path": "string", - * "name": "string", - * "base": "sd-1", - * "type": "main", - * "format": "checkpoint", - * "config_path": "string", - * "key": "string", - * "hash": "string", - * "file_size": 1, - * "description": "string", - * "source": "string", - * "converted_at": 0, - * "variant": "normal", - * "prediction_type": "epsilon", - * "repo_variant": "fp16", - * "upcast_attention": false - * } */ - "application/json": components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Main_ExternalAPI_ChatGPT4o_Config"] | components["schemas"]["Main_ExternalAPI_Gemini2_5_Config"] | components["schemas"]["Main_ExternalAPI_Imagen3_Config"] | components["schemas"]["Main_ExternalAPI_Imagen4_Config"] | components["schemas"]["Main_ExternalAPI_FluxKontext_Config"] | components["schemas"]["Video_ExternalAPI_Veo3_Config"] | components["schemas"]["Video_ExternalAPI_Runway_Config"] | components["schemas"]["Unknown_Config"]; - }; - }; - /** @description Bad request */ - 400: { - headers: { - [name: string]: unknown; - }; - content?: never; - }; - /** @description The model could not be found */ - 404: { - headers: { - [name: string]: unknown; - }; - content?: never; - }; - /** @description Validation Error */ - 422: { - headers: { - [name: string]: unknown; - }; - content: { - "application/json": components["schemas"]["HTTPValidationError"]; - }; - }; - }; - }; - delete_model: { - parameters: { - query?: never; - header?: never; - path: { - /** @description Unique key of model to remove from model registry. */ - key: string; - }; - cookie?: never; - }; - requestBody?: never; - responses: { - /** @description Model deleted successfully */ - 204: { - headers: { - [name: string]: unknown; - }; - content?: never; - }; - /** @description Model not found */ - 404: { - headers: { - [name: string]: unknown; - }; - content?: never; - }; - /** @description Validation Error */ - 422: { - headers: { - [name: string]: unknown; - }; - content: { - "application/json": components["schemas"]["HTTPValidationError"]; - }; - }; - }; - }; - update_model_record: { - parameters: { - query?: never; - header?: never; - path: { - /** @description Unique key of model */ - key: string; - }; - cookie?: never; - }; - requestBody: { - content: { - "application/json": components["schemas"]["ModelRecordChanges"]; - }; - }; - responses: { - /** @description The model was updated successfully */ - 200: { - headers: { - [name: string]: unknown; - }; - content: { - /** @example { - * "path": "string", - * "name": "string", - * "base": "sd-1", - * "type": "main", - * "format": "checkpoint", - * "config_path": "string", - * "key": "string", - * "hash": "string", - * "file_size": 1, - * "description": "string", - * "source": "string", - * "converted_at": 0, - * "variant": "normal", - * "prediction_type": "epsilon", - * "repo_variant": "fp16", - * "upcast_attention": false - * } */ - "application/json": components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Main_ExternalAPI_ChatGPT4o_Config"] | components["schemas"]["Main_ExternalAPI_Gemini2_5_Config"] | components["schemas"]["Main_ExternalAPI_Imagen3_Config"] | components["schemas"]["Main_ExternalAPI_Imagen4_Config"] | components["schemas"]["Main_ExternalAPI_FluxKontext_Config"] | components["schemas"]["Video_ExternalAPI_Veo3_Config"] | components["schemas"]["Video_ExternalAPI_Runway_Config"] | components["schemas"]["Unknown_Config"]; - }; - }; - /** @description Bad request */ - 400: { - headers: { - [name: string]: unknown; - }; - content?: never; - }; - /** @description The model could not be found */ - 404: { - headers: { - [name: string]: unknown; - }; - content?: never; - }; - /** @description There is already a model corresponding to the new name */ - 409: { - headers: { - [name: string]: unknown; - }; - content?: never; - }; - /** @description Validation Error */ - 422: { - headers: { - [name: string]: unknown; - }; - content: { - "application/json": components["schemas"]["HTTPValidationError"]; - }; - }; - }; - }; - scan_for_models: { - parameters: { - query?: { - /** @description Directory path to search for models */ - scan_path?: string; - }; - header?: never; - path?: never; - cookie?: never; - }; - requestBody?: never; - responses: { - /** @description Directory scanned successfully */ - 200: { - headers: { - [name: string]: unknown; - }; - content: { - "application/json": components["schemas"]["FoundModel"][]; - }; - }; - /** @description Invalid directory path */ - 400: { - headers: { - [name: string]: unknown; - }; - content?: never; - }; - /** @description Validation Error */ - 422: { - headers: { - [name: string]: unknown; - }; - content: { - "application/json": components["schemas"]["HTTPValidationError"]; - }; - }; - }; - }; - get_hugging_face_models: { - parameters: { - query?: { - /** @description Hugging face repo to search for models */ - hugging_face_repo?: string; - }; - header?: never; - path?: never; - cookie?: never; - }; - requestBody?: never; - responses: { - /** @description Hugging Face repo scanned successfully */ - 200: { - headers: { - [name: string]: unknown; - }; - content: { - "application/json": components["schemas"]["HuggingFaceModels"]; - }; - }; - /** @description Invalid hugging face repo */ - 400: { - headers: { - [name: string]: unknown; - }; - content?: never; - }; - /** @description Validation Error */ - 422: { - headers: { - [name: string]: unknown; - }; - content: { - "application/json": components["schemas"]["HTTPValidationError"]; - }; - }; - }; - }; - get_model_image: { - parameters: { - query?: never; - header?: never; - path: { - /** @description The name of model image file to get */ - key: string; - }; - cookie?: never; - }; - requestBody?: never; - responses: { - /** @description The model image was fetched successfully */ - 200: { - headers: { - [name: string]: unknown; - }; - content: { - "application/json": unknown; - }; - }; - /** @description Bad request */ - 400: { - headers: { - [name: string]: unknown; - }; - content?: never; - }; - /** @description The model image could not be found */ - 404: { - headers: { - [name: string]: unknown; - }; - content?: never; - }; - /** @description Validation Error */ - 422: { - headers: { - [name: string]: unknown; - }; - content: { - "application/json": components["schemas"]["HTTPValidationError"]; - }; - }; - }; - }; - delete_model_image: { - parameters: { - query?: never; - header?: never; - path: { - /** @description Unique key of model image to remove from model_images directory. */ - key: string; - }; - cookie?: never; + * @description The URL of the workflow thumbnail. + */ + thumbnail_url?: string | null; }; - requestBody?: never; - responses: { - /** @description Model image deleted successfully */ - 204: { - headers: { - [name: string]: unknown; - }; - content?: never; - }; - /** @description Model image not found */ - 404: { - headers: { - [name: string]: unknown; - }; - content?: never; - }; - /** @description Validation Error */ - 422: { - headers: { - [name: string]: unknown; - }; - content: { - "application/json": components["schemas"]["HTTPValidationError"]; - }; - }; + /** WorkflowWithoutID */ + WorkflowWithoutID: { + /** + * Name + * @description The name of the workflow. + */ + name: string; + /** + * Author + * @description The author of the workflow. + */ + author: string; + /** + * Description + * @description The description of the workflow. + */ + description: string; + /** + * Version + * @description The version of the workflow. + */ + version: string; + /** + * Contact + * @description The contact of the workflow. + */ + contact: string; + /** + * Tags + * @description The tags of the workflow. + */ + tags: string; + /** + * Notes + * @description The notes of the workflow. + */ + notes: string; + /** + * Exposedfields + * @description The exposed fields of the workflow. + */ + exposedFields: components["schemas"]["ExposedField"][]; + /** @description The meta of the workflow. */ + meta: components["schemas"]["WorkflowMeta"]; + /** + * Nodes + * @description The nodes of the workflow. + */ + nodes: { + [key: string]: components["schemas"]["JsonValue"]; + }[]; + /** + * Edges + * @description The edges of the workflow. + */ + edges: { + [key: string]: components["schemas"]["JsonValue"]; + }[]; + /** + * Form + * @description The form of the workflow. + */ + form?: { + [key: string]: components["schemas"]["JsonValue"]; + } | null; }; }; - update_model_image: { + responses: never; + parameters: never; + requestBodies: never; + headers: never; + pathItems: never; +}; +export type $defs = Record; +export interface operations { + parse_dynamicprompts: { parameters: { query?: never; header?: never; - path: { - /** @description Unique key of model */ - key: string; - }; + path?: never; cookie?: never; }; requestBody: { content: { - "multipart/form-data": components["schemas"]["Body_update_model_image"]; + "application/json": components["schemas"]["Body_parse_dynamicprompts"]; }; }; responses: { - /** @description The model image was updated successfully */ + /** @description Successful Response */ 200: { headers: { [name: string]: unknown; }; content: { - "application/json": unknown; - }; - }; - /** @description Bad request */ - 400: { - headers: { - [name: string]: unknown; + "application/json": components["schemas"]["DynamicPromptsResponse"]; }; - content?: never; }; /** @description Validation Error */ 422: { @@ -26385,9 +24527,18 @@ export interface operations { }; }; }; - list_model_installs: { + list_model_records: { parameters: { - query?: never; + query?: { + /** @description Base models to include */ + base_models?: components["schemas"]["BaseModelType"][] | null; + /** @description The type of model to get */ + model_type?: components["schemas"]["ModelType"] | null; + /** @description Exact match on the name of the model */ + model_name?: string | null; + /** @description Exact match on the format of the model (e.g. 'diffusers') */ + model_format?: components["schemas"]["ModelFormat"] | null; + }; header?: never; path?: never; cookie?: never; @@ -26400,53 +24551,8 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["ModelInstallJob"][]; - }; - }; - }; - }; - install_model: { - parameters: { - query: { - /** @description Model source to install, can be a local path, repo_id, or remote URL */ - source: string; - /** @description Whether or not to install a local model in place */ - inplace?: boolean | null; - /** @description access token for the remote resource */ - access_token?: string | null; - }; - header?: never; - path?: never; - cookie?: never; - }; - requestBody: { - content: { - "application/json": components["schemas"]["ModelRecordChanges"]; - }; - }; - responses: { - /** @description The model imported successfully */ - 201: { - headers: { - [name: string]: unknown; - }; - content: { - "application/json": components["schemas"]["ModelInstallJob"]; - }; - }; - /** @description There is already a model corresponding to this path or repo_id */ - 409: { - headers: { - [name: string]: unknown; - }; - content?: never; - }; - /** @description Unrecognized file/folder format */ - 415: { - headers: { - [name: string]: unknown; + "application/json": components["schemas"]["ModelsList"]; }; - content?: never; }; /** @description Validation Error */ 422: { @@ -26457,54 +24563,17 @@ export interface operations { "application/json": components["schemas"]["HTTPValidationError"]; }; }; - /** @description The model appeared to import successfully, but could not be found in the model manager */ - 424: { - headers: { - [name: string]: unknown; - }; - content?: never; - }; - }; - }; - prune_model_install_jobs: { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - requestBody?: never; - responses: { - /** @description Successful Response */ - 200: { - headers: { - [name: string]: unknown; - }; - content: { - "application/json": unknown; - }; - }; - /** @description All completed and errored jobs have been pruned */ - 204: { - headers: { - [name: string]: unknown; - }; - content?: never; - }; - /** @description Bad request */ - 400: { - headers: { - [name: string]: unknown; - }; - content?: never; - }; }; }; - install_hugging_face_model: { + get_model_records_by_attrs: { parameters: { query: { - /** @description HuggingFace repo_id to install */ - source: string; + /** @description The name of the model */ + name: string; + /** @description The type of the model */ + type: components["schemas"]["ModelType"]; + /** @description The base model of the model */ + base: components["schemas"]["BaseModelType"]; }; header?: never; path?: never; @@ -26512,28 +24581,14 @@ export interface operations { }; requestBody?: never; responses: { - /** @description The model is being installed */ - 201: { + /** @description Successful Response */ + 200: { headers: { [name: string]: unknown; }; content: { - "text/html": string; - }; - }; - /** @description Bad request */ - 400: { - headers: { - [name: string]: unknown; - }; - content?: never; - }; - /** @description There is already a model corresponding to this path or repo_id */ - 409: { - headers: { - [name: string]: unknown; + "application/json": components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Unknown_Config"]; }; - content?: never; }; /** @description Validation Error */ 422: { @@ -26546,28 +24601,53 @@ export interface operations { }; }; }; - get_model_install_job: { + get_model_record: { parameters: { query?: never; header?: never; - path: { - /** @description Model install id */ - id: number; + path: { + /** @description Key of the model record to fetch. */ + key: string; }; cookie?: never; }; requestBody?: never; responses: { - /** @description Success */ + /** @description The model configuration was retrieved successfully */ 200: { headers: { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["ModelInstallJob"]; + /** @example { + * "path": "string", + * "name": "string", + * "base": "sd-1", + * "type": "main", + * "format": "checkpoint", + * "config_path": "string", + * "key": "string", + * "hash": "string", + * "file_size": 1, + * "description": "string", + * "source": "string", + * "converted_at": 0, + * "variant": "normal", + * "prediction_type": "epsilon", + * "repo_variant": "fp16", + * "upcast_attention": false + * } */ + "application/json": components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Unknown_Config"]; }; }; - /** @description No such job */ + /** @description Bad request */ + 400: { + headers: { + [name: string]: unknown; + }; + content?: never; + }; + /** @description The model could not be found */ 404: { headers: { [name: string]: unknown; @@ -26585,29 +24665,27 @@ export interface operations { }; }; }; - cancel_model_install_job: { + delete_model: { parameters: { query?: never; header?: never; path: { - /** @description Model install job ID */ - id: number; + /** @description Unique key of model to remove from model registry. */ + key: string; }; cookie?: never; }; requestBody?: never; responses: { - /** @description The job was cancelled successfully */ - 201: { + /** @description Model deleted successfully */ + 204: { headers: { [name: string]: unknown; }; - content: { - "application/json": unknown; - }; + content?: never; }; - /** @description No such job */ - 415: { + /** @description Model not found */ + 404: { headers: { [name: string]: unknown; }; @@ -26624,19 +24702,23 @@ export interface operations { }; }; }; - convert_model: { + update_model_record: { parameters: { query?: never; header?: never; path: { - /** @description Unique key of the safetensors main model to convert to diffusers format. */ + /** @description Unique key of model */ key: string; }; cookie?: never; }; - requestBody?: never; + requestBody: { + content: { + "application/json": components["schemas"]["ModelRecordChanges"]; + }; + }; responses: { - /** @description Model converted successfully */ + /** @description The model was updated successfully */ 200: { headers: { [name: string]: unknown; @@ -26660,7 +24742,7 @@ export interface operations { * "repo_variant": "fp16", * "upcast_attention": false * } */ - "application/json": components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Main_ExternalAPI_ChatGPT4o_Config"] | components["schemas"]["Main_ExternalAPI_Gemini2_5_Config"] | components["schemas"]["Main_ExternalAPI_Imagen3_Config"] | components["schemas"]["Main_ExternalAPI_Imagen4_Config"] | components["schemas"]["Main_ExternalAPI_FluxKontext_Config"] | components["schemas"]["Video_ExternalAPI_Veo3_Config"] | components["schemas"]["Video_ExternalAPI_Runway_Config"] | components["schemas"]["Unknown_Config"]; + "application/json": components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Unknown_Config"]; }; }; /** @description Bad request */ @@ -26670,14 +24752,14 @@ export interface operations { }; content?: never; }; - /** @description Model not found */ + /** @description The model could not be found */ 404: { headers: { [name: string]: unknown; }; content?: never; }; - /** @description There is already a model registered at this location */ + /** @description There is already a model corresponding to the new name */ 409: { headers: { [name: string]: unknown; @@ -26695,56 +24777,97 @@ export interface operations { }; }; }; - get_starter_models: { + scan_for_models: { parameters: { - query?: never; + query?: { + /** @description Directory path to search for models */ + scan_path?: string; + }; header?: never; path?: never; cookie?: never; }; requestBody?: never; responses: { - /** @description Successful Response */ + /** @description Directory scanned successfully */ 200: { headers: { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["StarterModelResponse"]; + "application/json": components["schemas"]["FoundModel"][]; + }; + }; + /** @description Invalid directory path */ + 400: { + headers: { + [name: string]: unknown; + }; + content?: never; + }; + /** @description Validation Error */ + 422: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["HTTPValidationError"]; }; }; }; }; - get_stats: { + get_hugging_face_models: { parameters: { - query?: never; + query?: { + /** @description Hugging face repo to search for models */ + hugging_face_repo?: string; + }; header?: never; path?: never; cookie?: never; }; requestBody?: never; responses: { - /** @description Successful Response */ + /** @description Hugging Face repo scanned successfully */ 200: { headers: { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["CacheStats"] | null; + "application/json": components["schemas"]["HuggingFaceModels"]; + }; + }; + /** @description Invalid hugging face repo */ + 400: { + headers: { + [name: string]: unknown; + }; + content?: never; + }; + /** @description Validation Error */ + 422: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["HTTPValidationError"]; }; }; }; }; - empty_model_cache: { + get_model_image: { parameters: { query?: never; header?: never; - path?: never; + path: { + /** @description The name of model image file to get */ + key: string; + }; cookie?: never; }; requestBody?: never; responses: { - /** @description Successful Response */ + /** @description The model image was fetched successfully */ 200: { headers: { [name: string]: unknown; @@ -26753,49 +24876,99 @@ export interface operations { "application/json": unknown; }; }; + /** @description Bad request */ + 400: { + headers: { + [name: string]: unknown; + }; + content?: never; + }; + /** @description The model image could not be found */ + 404: { + headers: { + [name: string]: unknown; + }; + content?: never; + }; + /** @description Validation Error */ + 422: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["HTTPValidationError"]; + }; + }; }; }; - get_hf_login_status: { + delete_model_image: { parameters: { query?: never; header?: never; - path?: never; + path: { + /** @description Unique key of model image to remove from model_images directory. */ + key: string; + }; cookie?: never; }; requestBody?: never; responses: { - /** @description Successful Response */ - 200: { + /** @description Model image deleted successfully */ + 204: { + headers: { + [name: string]: unknown; + }; + content?: never; + }; + /** @description Model image not found */ + 404: { + headers: { + [name: string]: unknown; + }; + content?: never; + }; + /** @description Validation Error */ + 422: { headers: { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["HFTokenStatus"]; + "application/json": components["schemas"]["HTTPValidationError"]; }; }; }; }; - do_hf_login: { + update_model_image: { parameters: { query?: never; header?: never; - path?: never; + path: { + /** @description Unique key of model */ + key: string; + }; cookie?: never; }; requestBody: { content: { - "application/json": components["schemas"]["Body_do_hf_login"]; + "multipart/form-data": components["schemas"]["Body_update_model_image"]; }; }; responses: { - /** @description Successful Response */ + /** @description The model image was updated successfully */ 200: { headers: { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["HFTokenStatus"]; + "application/json": unknown; + }; + }; + /** @description Bad request */ + 400: { + headers: { + [name: string]: unknown; }; + content?: never; }; /** @description Validation Error */ 422: { @@ -26808,7 +24981,7 @@ export interface operations { }; }; }; - reset_hf_token: { + list_model_installs: { parameters: { query?: never; header?: never; @@ -26823,32 +24996,73 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["HFTokenStatus"]; + "application/json": components["schemas"]["ModelInstallJob"][]; }; }; }; }; - list_downloads: { + install_model: { parameters: { - query?: never; + query: { + /** @description Model source to install, can be a local path, repo_id, or remote URL */ + source: string; + /** @description Whether or not to install a local model in place */ + inplace?: boolean | null; + /** @description access token for the remote resource */ + access_token?: string | null; + }; header?: never; path?: never; cookie?: never; }; - requestBody?: never; + requestBody: { + content: { + "application/json": components["schemas"]["ModelRecordChanges"]; + }; + }; responses: { - /** @description Successful Response */ - 200: { + /** @description The model imported successfully */ + 201: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["ModelInstallJob"]; + }; + }; + /** @description There is already a model corresponding to this path or repo_id */ + 409: { + headers: { + [name: string]: unknown; + }; + content?: never; + }; + /** @description Unrecognized file/folder format */ + 415: { + headers: { + [name: string]: unknown; + }; + content?: never; + }; + /** @description Validation Error */ + 422: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["HTTPValidationError"]; + }; + }; + /** @description The model appeared to import successfully, but could not be found in the model manager */ + 424: { headers: { [name: string]: unknown; }; - content: { - "application/json": components["schemas"]["DownloadJob"][]; - }; + content?: never; }; }; }; - prune_downloads: { + prune_model_install_jobs: { parameters: { query?: never; header?: never; @@ -26866,7 +25080,7 @@ export interface operations { "application/json": unknown; }; }; - /** @description All completed jobs have been pruned */ + /** @description All completed and errored jobs have been pruned */ 204: { headers: { [name: string]: unknown; @@ -26882,27 +25096,40 @@ export interface operations { }; }; }; - download: { + install_hugging_face_model: { parameters: { - query?: never; + query: { + /** @description HuggingFace repo_id to install */ + source: string; + }; header?: never; path?: never; cookie?: never; }; - requestBody: { - content: { - "application/json": components["schemas"]["Body_download"]; - }; - }; + requestBody?: never; responses: { - /** @description Successful Response */ - 200: { + /** @description The model is being installed */ + 201: { headers: { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["DownloadJob"]; + "text/html": string; + }; + }; + /** @description Bad request */ + 400: { + headers: { + [name: string]: unknown; + }; + content?: never; + }; + /** @description There is already a model corresponding to this path or repo_id */ + 409: { + headers: { + [name: string]: unknown; }; + content?: never; }; /** @description Validation Error */ 422: { @@ -26915,12 +25142,12 @@ export interface operations { }; }; }; - get_download_job: { + get_model_install_job: { parameters: { query?: never; header?: never; path: { - /** @description ID of the download job to fetch. */ + /** @description Model install id */ id: number; }; cookie?: never; @@ -26933,10 +25160,10 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["DownloadJob"]; + "application/json": components["schemas"]["ModelInstallJob"]; }; }; - /** @description The requested download JobID could not be found */ + /** @description No such job */ 404: { headers: { [name: string]: unknown; @@ -26954,20 +25181,20 @@ export interface operations { }; }; }; - cancel_download_job: { + cancel_model_install_job: { parameters: { query?: never; header?: never; path: { - /** @description ID of the download job to cancel. */ + /** @description Model install job ID */ id: number; }; cookie?: never; }; requestBody?: never; responses: { - /** @description Successful Response */ - 200: { + /** @description The job was cancelled successfully */ + 201: { headers: { [name: string]: unknown; }; @@ -26975,15 +25202,8 @@ export interface operations { "application/json": unknown; }; }; - /** @description Job has been cancelled */ - 204: { - headers: { - [name: string]: unknown; - }; - content?: never; - }; - /** @description The requested download JobID could not be found */ - 404: { + /** @description No such job */ + 415: { headers: { [name: string]: unknown; }; @@ -27000,106 +25220,120 @@ export interface operations { }; }; }; - cancel_all_download_jobs: { + convert_model: { parameters: { query?: never; header?: never; - path?: never; + path: { + /** @description Unique key of the safetensors main model to convert to diffusers format. */ + key: string; + }; cookie?: never; }; requestBody?: never; responses: { - /** @description Successful Response */ + /** @description Model converted successfully */ 200: { headers: { [name: string]: unknown; }; content: { - "application/json": unknown; + /** @example { + * "path": "string", + * "name": "string", + * "base": "sd-1", + * "type": "main", + * "format": "checkpoint", + * "config_path": "string", + * "key": "string", + * "hash": "string", + * "file_size": 1, + * "description": "string", + * "source": "string", + * "converted_at": 0, + * "variant": "normal", + * "prediction_type": "epsilon", + * "repo_variant": "fp16", + * "upcast_attention": false + * } */ + "application/json": components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Unknown_Config"]; }; }; - /** @description Download jobs have been cancelled */ - 204: { + /** @description Bad request */ + 400: { + headers: { + [name: string]: unknown; + }; + content?: never; + }; + /** @description Model not found */ + 404: { + headers: { + [name: string]: unknown; + }; + content?: never; + }; + /** @description There is already a model registered at this location */ + 409: { headers: { [name: string]: unknown; }; content?: never; }; + /** @description Validation Error */ + 422: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["HTTPValidationError"]; + }; + }; }; }; - upload_image: { + get_starter_models: { parameters: { - query: { - /** @description The category of the image */ - image_category: components["schemas"]["ImageCategory"]; - /** @description Whether this is an intermediate image */ - is_intermediate: boolean; - /** @description The board to add this image to, if any */ - board_id?: string | null; - /** @description The session ID associated with this upload, if any */ - session_id?: string | null; - /** @description Whether to crop the image */ - crop_visible?: boolean | null; - }; + query?: never; header?: never; path?: never; cookie?: never; }; - requestBody: { - content: { - "multipart/form-data": components["schemas"]["Body_upload_image"]; - }; - }; + requestBody?: never; responses: { - /** @description The image was uploaded successfully */ - 201: { + /** @description Successful Response */ + 200: { headers: { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["ImageDTO"]; - }; - }; - /** @description Image upload failed */ - 415: { - headers: { - [name: string]: unknown; + "application/json": components["schemas"]["StarterModelResponse"]; }; - content?: never; }; - /** @description Validation Error */ - 422: { + }; + }; + get_stats: { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + requestBody?: never; + responses: { + /** @description Successful Response */ + 200: { headers: { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["HTTPValidationError"]; + "application/json": components["schemas"]["CacheStats"] | null; }; }; }; }; - list_image_dtos: { + empty_model_cache: { parameters: { - query?: { - /** @description The origin of images to list. */ - image_origin?: components["schemas"]["ResourceOrigin"] | null; - /** @description The categories of image to include. */ - categories?: components["schemas"]["ImageCategory"][] | null; - /** @description Whether to list intermediate images. */ - is_intermediate?: boolean | null; - /** @description The board id to filter by. Use 'none' to find images without a board. */ - board_id?: string | null; - /** @description The page offset */ - offset?: number; - /** @description The number of images per page */ - limit?: number; - /** @description The order of sort */ - order_dir?: components["schemas"]["SQLiteDirection"]; - /** @description Whether to sort by starred images first */ - starred_first?: boolean; - /** @description The term to search for */ - search_term?: string | null; - }; + query?: never; header?: never; path?: never; cookie?: never; @@ -27112,21 +25346,32 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["OffsetPaginatedResults_ImageDTO_"]; + "application/json": unknown; }; }; - /** @description Validation Error */ - 422: { + }; + }; + get_hf_login_status: { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + requestBody?: never; + responses: { + /** @description Successful Response */ + 200: { headers: { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["HTTPValidationError"]; + "application/json": components["schemas"]["HFTokenStatus"]; }; }; }; }; - create_image_upload_entry: { + do_hf_login: { parameters: { query?: never; header?: never; @@ -27135,7 +25380,7 @@ export interface operations { }; requestBody: { content: { - "application/json": components["schemas"]["Body_create_image_upload_entry"]; + "application/json": components["schemas"]["Body_do_hf_login"]; }; }; responses: { @@ -27145,7 +25390,7 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["ImageUploadEntry"]; + "application/json": components["schemas"]["HFTokenStatus"]; }; }; /** @description Validation Error */ @@ -27159,14 +25404,11 @@ export interface operations { }; }; }; - get_image_dto: { + reset_hf_token: { parameters: { query?: never; header?: never; - path: { - /** @description The name of image to get */ - image_name: string; - }; + path?: never; cookie?: never; }; requestBody?: never; @@ -27177,28 +25419,36 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["ImageDTO"]; + "application/json": components["schemas"]["HFTokenStatus"]; }; }; - /** @description Validation Error */ - 422: { + }; + }; + list_downloads: { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + requestBody?: never; + responses: { + /** @description Successful Response */ + 200: { headers: { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["HTTPValidationError"]; + "application/json": components["schemas"]["DownloadJob"][]; }; }; }; }; - delete_image: { + prune_downloads: { parameters: { query?: never; header?: never; - path: { - /** @description The name of the image to delete */ - image_name: string; - }; + path?: never; cookie?: never; }; requestBody?: never; @@ -27209,33 +25459,35 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["DeleteImagesResult"]; + "application/json": unknown; }; }; - /** @description Validation Error */ - 422: { + /** @description All completed jobs have been pruned */ + 204: { headers: { [name: string]: unknown; }; - content: { - "application/json": components["schemas"]["HTTPValidationError"]; + content?: never; + }; + /** @description Bad request */ + 400: { + headers: { + [name: string]: unknown; }; + content?: never; }; }; }; - update_image: { + download: { parameters: { query?: never; header?: never; - path: { - /** @description The name of the image to update */ - image_name: string; - }; + path?: never; cookie?: never; }; requestBody: { content: { - "application/json": components["schemas"]["ImageRecordChanges"]; + "application/json": components["schemas"]["Body_download"]; }; }; responses: { @@ -27245,7 +25497,7 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["ImageDTO"]; + "application/json": components["schemas"]["DownloadJob"]; }; }; /** @description Validation Error */ @@ -27259,53 +25511,52 @@ export interface operations { }; }; }; - get_intermediates_count: { + get_download_job: { parameters: { query?: never; header?: never; - path?: never; + path: { + /** @description ID of the download job to fetch. */ + id: number; + }; cookie?: never; }; requestBody?: never; responses: { - /** @description Successful Response */ + /** @description Success */ 200: { headers: { [name: string]: unknown; }; content: { - "application/json": number; + "application/json": components["schemas"]["DownloadJob"]; }; }; - }; - }; - clear_intermediates: { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - requestBody?: never; - responses: { - /** @description Successful Response */ - 200: { + /** @description The requested download JobID could not be found */ + 404: { + headers: { + [name: string]: unknown; + }; + content?: never; + }; + /** @description Validation Error */ + 422: { headers: { [name: string]: unknown; }; content: { - "application/json": number; + "application/json": components["schemas"]["HTTPValidationError"]; }; }; }; }; - get_image_metadata: { + cancel_download_job: { parameters: { query?: never; header?: never; path: { - /** @description The name of image to get */ - image_name: string; + /** @description ID of the download job to cancel. */ + id: number; }; cookie?: never; }; @@ -27317,8 +25568,22 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["MetadataField"] | null; + "application/json": unknown; + }; + }; + /** @description Job has been cancelled */ + 204: { + headers: { + [name: string]: unknown; + }; + content?: never; + }; + /** @description The requested download JobID could not be found */ + 404: { + headers: { + [name: string]: unknown; }; + content?: never; }; /** @description Validation Error */ 422: { @@ -27331,14 +25596,11 @@ export interface operations { }; }; }; - get_image_workflow: { + cancel_all_download_jobs: { parameters: { query?: never; header?: never; - path: { - /** @description The name of image whose workflow to get */ - image_name: string; - }; + path?: never; cookie?: never; }; requestBody?: never; @@ -27349,43 +25611,53 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["WorkflowAndGraphResponse"]; + "application/json": unknown; }; }; - /** @description Validation Error */ - 422: { + /** @description Download jobs have been cancelled */ + 204: { headers: { [name: string]: unknown; }; - content: { - "application/json": components["schemas"]["HTTPValidationError"]; - }; + content?: never; }; }; }; - get_image_full: { + upload_image: { parameters: { - query?: never; - header?: never; - path: { - /** @description The name of full-resolution image file to get */ - image_name: string; + query: { + /** @description The category of the image */ + image_category: components["schemas"]["ImageCategory"]; + /** @description Whether this is an intermediate image */ + is_intermediate: boolean; + /** @description The board to add this image to, if any */ + board_id?: string | null; + /** @description The session ID associated with this upload, if any */ + session_id?: string | null; + /** @description Whether to crop the image */ + crop_visible?: boolean | null; }; + header?: never; + path?: never; cookie?: never; }; - requestBody?: never; + requestBody: { + content: { + "multipart/form-data": components["schemas"]["Body_upload_image"]; + }; + }; responses: { - /** @description Return the full-resolution image */ - 200: { + /** @description The image was uploaded successfully */ + 201: { headers: { [name: string]: unknown; }; content: { - "image/png": unknown; + "application/json": components["schemas"]["ImageDTO"]; }; }; - /** @description Image not found */ - 404: { + /** @description Image upload failed */ + 415: { headers: { [name: string]: unknown; }; @@ -27402,33 +25674,75 @@ export interface operations { }; }; }; - get_image_full_head: { + list_image_dtos: { parameters: { - query?: never; - header?: never; - path: { - /** @description The name of full-resolution image file to get */ - image_name: string; + query?: { + /** @description The origin of images to list. */ + image_origin?: components["schemas"]["ResourceOrigin"] | null; + /** @description The categories of image to include. */ + categories?: components["schemas"]["ImageCategory"][] | null; + /** @description Whether to list intermediate images. */ + is_intermediate?: boolean | null; + /** @description The board id to filter by. Use 'none' to find images without a board. */ + board_id?: string | null; + /** @description The page offset */ + offset?: number; + /** @description The number of images per page */ + limit?: number; + /** @description The order of sort */ + order_dir?: components["schemas"]["SQLiteDirection"]; + /** @description Whether to sort by starred images first */ + starred_first?: boolean; + /** @description The term to search for */ + search_term?: string | null; }; + header?: never; + path?: never; cookie?: never; }; requestBody?: never; responses: { - /** @description Return the full-resolution image */ + /** @description Successful Response */ 200: { headers: { [name: string]: unknown; }; content: { - "image/png": unknown; + "application/json": components["schemas"]["OffsetPaginatedResults_ImageDTO_"]; }; }; - /** @description Image not found */ - 404: { + /** @description Validation Error */ + 422: { headers: { [name: string]: unknown; }; - content?: never; + content: { + "application/json": components["schemas"]["HTTPValidationError"]; + }; + }; + }; + }; + create_image_upload_entry: { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + requestBody: { + content: { + "application/json": components["schemas"]["Body_create_image_upload_entry"]; + }; + }; + responses: { + /** @description Successful Response */ + 200: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["ImageUploadEntry"]; + }; }; /** @description Validation Error */ 422: { @@ -27441,33 +25755,26 @@ export interface operations { }; }; }; - get_image_thumbnail: { + get_image_dto: { parameters: { query?: never; header?: never; path: { - /** @description The name of thumbnail image file to get */ + /** @description The name of image to get */ image_name: string; }; cookie?: never; }; requestBody?: never; responses: { - /** @description Return the image thumbnail */ + /** @description Successful Response */ 200: { headers: { [name: string]: unknown; }; content: { - "image/webp": unknown; - }; - }; - /** @description Image not found */ - 404: { - headers: { - [name: string]: unknown; + "application/json": components["schemas"]["ImageDTO"]; }; - content?: never; }; /** @description Validation Error */ 422: { @@ -27480,12 +25787,12 @@ export interface operations { }; }; }; - get_image_urls: { + delete_image: { parameters: { query?: never; header?: never; path: { - /** @description The name of the image whose URL to get */ + /** @description The name of the image to delete */ image_name: string; }; cookie?: never; @@ -27498,7 +25805,7 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["ImageUrlsDTO"]; + "application/json": components["schemas"]["DeleteImagesResult"]; }; }; /** @description Validation Error */ @@ -27512,16 +25819,19 @@ export interface operations { }; }; }; - delete_images_from_list: { + update_image: { parameters: { query?: never; header?: never; - path?: never; + path: { + /** @description The name of the image to update */ + image_name: string; + }; cookie?: never; }; requestBody: { content: { - "application/json": components["schemas"]["Body_delete_images_from_list"]; + "application/json": components["schemas"]["ImageRecordChanges"]; }; }; responses: { @@ -27531,7 +25841,7 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["DeleteImagesResult"]; + "application/json": components["schemas"]["ImageDTO"]; }; }; /** @description Validation Error */ @@ -27545,7 +25855,7 @@ export interface operations { }; }; }; - delete_uncategorized_images: { + get_intermediates_count: { parameters: { query?: never; header?: never; @@ -27560,56 +25870,42 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["DeleteImagesResult"]; + "application/json": number; }; }; }; }; - star_images_in_list: { + clear_intermediates: { parameters: { query?: never; header?: never; path?: never; cookie?: never; }; - requestBody: { - content: { - "application/json": components["schemas"]["Body_star_images_in_list"]; - }; - }; - responses: { - /** @description Successful Response */ - 200: { - headers: { - [name: string]: unknown; - }; - content: { - "application/json": components["schemas"]["StarredImagesResult"]; - }; - }; - /** @description Validation Error */ - 422: { + requestBody?: never; + responses: { + /** @description Successful Response */ + 200: { headers: { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["HTTPValidationError"]; + "application/json": number; }; }; }; }; - unstar_images_in_list: { + get_image_metadata: { parameters: { query?: never; header?: never; - path?: never; - cookie?: never; - }; - requestBody: { - content: { - "application/json": components["schemas"]["Body_unstar_images_in_list"]; + path: { + /** @description The name of image to get */ + image_name: string; }; + cookie?: never; }; + requestBody?: never; responses: { /** @description Successful Response */ 200: { @@ -27617,7 +25913,7 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["UnstarredImagesResult"]; + "application/json": components["schemas"]["MetadataField"] | null; }; }; /** @description Validation Error */ @@ -27631,26 +25927,25 @@ export interface operations { }; }; }; - download_images_from_list: { + get_image_workflow: { parameters: { query?: never; header?: never; - path?: never; - cookie?: never; - }; - requestBody?: { - content: { - "application/json": components["schemas"]["Body_download_images_from_list"]; + path: { + /** @description The name of image whose workflow to get */ + image_name: string; }; + cookie?: never; }; + requestBody?: never; responses: { /** @description Successful Response */ - 202: { + 200: { headers: { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["ImagesDownloaded"]; + "application/json": components["schemas"]["WorkflowAndGraphResponse"]; }; }; /** @description Validation Error */ @@ -27664,25 +25959,25 @@ export interface operations { }; }; }; - get_bulk_download_item: { + get_image_full: { parameters: { query?: never; header?: never; path: { - /** @description The bulk_download_item_name of the bulk download item to get */ - bulk_download_item_name: string; + /** @description The name of full-resolution image file to get */ + image_name: string; }; cookie?: never; }; requestBody?: never; responses: { - /** @description Return the complete bulk download item */ + /** @description Return the full-resolution image */ 200: { headers: { [name: string]: unknown; }; content: { - "application/zip": unknown; + "image/png": unknown; }; }; /** @description Image not found */ @@ -27703,38 +25998,33 @@ export interface operations { }; }; }; - get_image_names: { + get_image_full_head: { parameters: { - query?: { - /** @description The origin of images to list. */ - image_origin?: components["schemas"]["ResourceOrigin"] | null; - /** @description The categories of image to include. */ - categories?: components["schemas"]["ImageCategory"][] | null; - /** @description Whether to list intermediate images. */ - is_intermediate?: boolean | null; - /** @description The board id to filter by. Use 'none' to find images without a board. */ - board_id?: string | null; - /** @description The order of sort */ - order_dir?: components["schemas"]["SQLiteDirection"]; - /** @description Whether to sort by starred images first */ - starred_first?: boolean; - /** @description The term to search for */ - search_term?: string | null; - }; + query?: never; header?: never; - path?: never; + path: { + /** @description The name of full-resolution image file to get */ + image_name: string; + }; cookie?: never; }; requestBody?: never; responses: { - /** @description Successful Response */ + /** @description Return the full-resolution image */ 200: { headers: { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["ImageNamesResult"]; + "image/png": unknown; + }; + }; + /** @description Image not found */ + 404: { + headers: { + [name: string]: unknown; }; + content?: never; }; /** @description Validation Error */ 422: { @@ -27747,27 +26037,33 @@ export interface operations { }; }; }; - get_images_by_names: { + get_image_thumbnail: { parameters: { query?: never; header?: never; - path?: never; - cookie?: never; - }; - requestBody: { - content: { - "application/json": components["schemas"]["Body_get_images_by_names"]; + path: { + /** @description The name of thumbnail image file to get */ + image_name: string; }; + cookie?: never; }; + requestBody?: never; responses: { - /** @description Successful Response */ + /** @description Return the image thumbnail */ 200: { headers: { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["ImageDTO"][]; + "image/webp": unknown; + }; + }; + /** @description Image not found */ + 404: { + headers: { + [name: string]: unknown; }; + content?: never; }; /** @description Validation Error */ 422: { @@ -27780,13 +26076,13 @@ export interface operations { }; }; }; - get_video_dto: { + get_image_urls: { parameters: { query?: never; header?: never; path: { - /** @description The id of the video to get */ - video_id: string; + /** @description The name of the image whose URL to get */ + image_name: string; }; cookie?: never; }; @@ -27798,7 +26094,7 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["VideoDTO"]; + "application/json": components["schemas"]["ImageUrlsDTO"]; }; }; /** @description Validation Error */ @@ -27812,19 +26108,16 @@ export interface operations { }; }; }; - update_video: { + delete_images_from_list: { parameters: { query?: never; header?: never; - path: { - /** @description The id of the video to update */ - video_id: string; - }; + path?: never; cookie?: never; }; requestBody: { content: { - "application/json": components["schemas"]["VideoRecordChanges"]; + "application/json": components["schemas"]["Body_delete_images_from_list"]; }; }; responses: { @@ -27834,7 +26127,7 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["VideoDTO"]; + "application/json": components["schemas"]["DeleteImagesResult"]; }; }; /** @description Validation Error */ @@ -27848,18 +26141,14 @@ export interface operations { }; }; }; - delete_videos_from_list: { + delete_uncategorized_images: { parameters: { query?: never; header?: never; path?: never; cookie?: never; }; - requestBody: { - content: { - "application/json": components["schemas"]["Body_delete_videos_from_list"]; - }; - }; + requestBody?: never; responses: { /** @description Successful Response */ 200: { @@ -27867,21 +26156,12 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["DeleteVideosResult"]; - }; - }; - /** @description Validation Error */ - 422: { - headers: { - [name: string]: unknown; - }; - content: { - "application/json": components["schemas"]["HTTPValidationError"]; + "application/json": components["schemas"]["DeleteImagesResult"]; }; }; }; }; - star_videos_in_list: { + star_images_in_list: { parameters: { query?: never; header?: never; @@ -27890,7 +26170,7 @@ export interface operations { }; requestBody: { content: { - "application/json": components["schemas"]["Body_star_videos_in_list"]; + "application/json": components["schemas"]["Body_star_images_in_list"]; }; }; responses: { @@ -27900,7 +26180,7 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["StarredVideosResult"]; + "application/json": components["schemas"]["StarredImagesResult"]; }; }; /** @description Validation Error */ @@ -27914,7 +26194,7 @@ export interface operations { }; }; }; - unstar_videos_in_list: { + unstar_images_in_list: { parameters: { query?: never; header?: never; @@ -27923,7 +26203,7 @@ export interface operations { }; requestBody: { content: { - "application/json": components["schemas"]["Body_unstar_videos_in_list"]; + "application/json": components["schemas"]["Body_unstar_images_in_list"]; }; }; responses: { @@ -27933,7 +26213,7 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["UnstarredVideosResult"]; + "application/json": components["schemas"]["UnstarredImagesResult"]; }; }; /** @description Validation Error */ @@ -27947,58 +26227,66 @@ export interface operations { }; }; }; - delete_uncategorized_videos: { + download_images_from_list: { parameters: { query?: never; header?: never; path?: never; cookie?: never; }; - requestBody?: never; + requestBody?: { + content: { + "application/json": components["schemas"]["Body_download_images_from_list"]; + }; + }; responses: { /** @description Successful Response */ - 200: { + 202: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["ImagesDownloaded"]; + }; + }; + /** @description Validation Error */ + 422: { headers: { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["DeleteVideosResult"]; + "application/json": components["schemas"]["HTTPValidationError"]; }; }; }; }; - list_video_dtos: { + get_bulk_download_item: { parameters: { - query?: { - /** @description Whether to list intermediate videos. */ - is_intermediate?: boolean | null; - /** @description The board id to filter by. Use 'none' to find videos without a board. */ - board_id?: string | null; - /** @description The page offset */ - offset?: number; - /** @description The number of videos per page */ - limit?: number; - /** @description The order of sort */ - order_dir?: components["schemas"]["SQLiteDirection"]; - /** @description Whether to sort by starred videos first */ - starred_first?: boolean; - /** @description The term to search for */ - search_term?: string | null; - }; + query?: never; header?: never; - path?: never; + path: { + /** @description The bulk_download_item_name of the bulk download item to get */ + bulk_download_item_name: string; + }; cookie?: never; }; requestBody?: never; responses: { - /** @description Successful Response */ + /** @description Return the complete bulk download item */ 200: { headers: { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["OffsetPaginatedResults_VideoDTO_"]; + "application/zip": unknown; + }; + }; + /** @description Image not found */ + 404: { + headers: { + [name: string]: unknown; }; + content?: never; }; /** @description Validation Error */ 422: { @@ -28011,16 +26299,20 @@ export interface operations { }; }; }; - get_video_ids: { + get_image_names: { parameters: { query?: { - /** @description Whether to list intermediate videos. */ + /** @description The origin of images to list. */ + image_origin?: components["schemas"]["ResourceOrigin"] | null; + /** @description The categories of image to include. */ + categories?: components["schemas"]["ImageCategory"][] | null; + /** @description Whether to list intermediate images. */ is_intermediate?: boolean | null; - /** @description The board id to filter by. Use 'none' to find videos without a board. */ + /** @description The board id to filter by. Use 'none' to find images without a board. */ board_id?: string | null; /** @description The order of sort */ order_dir?: components["schemas"]["SQLiteDirection"]; - /** @description Whether to sort by starred videos first */ + /** @description Whether to sort by starred images first */ starred_first?: boolean; /** @description The term to search for */ search_term?: string | null; @@ -28037,7 +26329,7 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["VideoIdsResult"]; + "application/json": components["schemas"]["ImageNamesResult"]; }; }; /** @description Validation Error */ @@ -28051,7 +26343,7 @@ export interface operations { }; }; }; - get_videos_by_ids: { + get_images_by_names: { parameters: { query?: never; header?: never; @@ -28060,7 +26352,7 @@ export interface operations { }; requestBody: { content: { - "application/json": components["schemas"]["Body_get_videos_by_ids"]; + "application/json": components["schemas"]["Body_get_images_by_names"]; }; }; responses: { @@ -28070,7 +26362,7 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["VideoDTO"][]; + "application/json": components["schemas"]["ImageDTO"][]; }; }; /** @description Validation Error */ @@ -28432,72 +26724,6 @@ export interface operations { }; }; }; - add_videos_to_board: { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - requestBody: { - content: { - "application/json": components["schemas"]["Body_add_videos_to_board"]; - }; - }; - responses: { - /** @description Videos were added to board successfully */ - 201: { - headers: { - [name: string]: unknown; - }; - content: { - "application/json": components["schemas"]["AddVideosToBoardResult"]; - }; - }; - /** @description Validation Error */ - 422: { - headers: { - [name: string]: unknown; - }; - content: { - "application/json": components["schemas"]["HTTPValidationError"]; - }; - }; - }; - }; - remove_videos_from_board: { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - requestBody: { - content: { - "application/json": components["schemas"]["Body_remove_videos_from_board"]; - }; - }; - responses: { - /** @description Videos were removed from board successfully */ - 201: { - headers: { - [name: string]: unknown; - }; - content: { - "application/json": components["schemas"]["RemoveVideosFromBoardResult"]; - }; - }; - /** @description Validation Error */ - 422: { - headers: { - [name: string]: unknown; - }; - content: { - "application/json": components["schemas"]["HTTPValidationError"]; - }; - }; - }; - }; get_related_models: { parameters: { query?: never; @@ -29769,8 +27995,6 @@ export interface operations { query?: string | null; /** @description Whether to include/exclude recent workflows */ has_been_opened?: boolean | null; - /** @description Whether to include/exclude published workflows */ - is_published?: boolean | null; }; header?: never; path?: never; diff --git a/invokeai/frontend/web/src/services/api/types.ts b/invokeai/frontend/web/src/services/api/types.ts index 8b0cafe46b3..96d15409b85 100644 --- a/invokeai/frontend/web/src/services/api/types.ts +++ b/invokeai/frontend/web/src/services/api/types.ts @@ -14,9 +14,6 @@ export type GetImageNamesResult = paths['/api/v1/images/names']['get']['responses']['200']['content']['application/json']; export type GetImageNamesArgs = NonNullable; -export type GetVideoIdsResult = paths['/api/v1/videos/ids']['get']['responses']['200']['content']['application/json']; -export type GetVideoIdsArgs = NonNullable; - export type ListBoardsArgs = NonNullable; export type CreateBoardArg = paths['/api/v1/boards/']['post']['parameters']['query']; @@ -76,35 +73,10 @@ const _zImageDTO = z.object({ }); export type ImageDTO = z.infer; assert>(); -export const isImageDTO = (dto: ImageDTO | VideoDTO): dto is ImageDTO => { - return 'image_name' in dto; -}; export type BoardDTO = S['BoardDTO']; export type OffsetPaginatedResults_ImageDTO_ = S['OffsetPaginatedResults_ImageDTO_']; -// Videos -const _zVideoDTO = z.object({ - video_id: z.string(), - video_url: z.string(), - thumbnail_url: z.string(), - width: z.number().int().gt(0), - height: z.number().int().gt(0), - created_at: z.string(), - updated_at: z.string(), - deleted_at: z.string().nullish(), - starred: z.boolean(), - board_id: z.string().nullish(), - is_intermediate: z.boolean(), - session_id: z.string().nullish(), - node_id: z.string().nullish(), -}); -export type VideoDTO = z.infer; -assert>(); -export const isVideoDTO = (dto: ImageDTO | VideoDTO): dto is VideoDTO => { - return 'video_id' in dto; -}; - // Model Configs export type AnyModelConfig = S['AnyModelConfig']; export type MainModelConfig = Extract; @@ -298,30 +270,14 @@ export const isFluxReduxModelConfig = (config: AnyModelConfig): config is FLUXRe return config.type === 'flux_redux'; }; -export const isChatGPT4oModelConfig = (config: AnyModelConfig): config is ChatGPT4oModelConfig => { - return config.type === 'main' && config.base === 'chatgpt-4o'; -}; - -export const isVideoModelConfig = (config: AnyModelConfig): config is VideoApiModelConfig => { - return config.type === 'video'; -}; - export const isUnknownModelConfig = (config: AnyModelConfig): config is UnknownModelConfig => { return config.type === 'unknown'; }; -export const isFluxKontextApiModelConfig = (config: AnyModelConfig): config is ApiModelConfig => { - return config.type === 'main' && config.base === 'flux-kontext'; -}; - export const isFluxKontextModelConfig = (config: AnyModelConfig): config is FLUXKontextModelConfig => { return config.type === 'main' && config.base === 'flux' && config.name.toLowerCase().includes('kontext'); }; -export const isGemini2_5ModelConfig = (config: AnyModelConfig): config is ApiModelConfig => { - return config.type === 'main' && config.base === 'gemini-2.5'; -}; - export const isNonRefinerMainModelConfig = (config: AnyModelConfig): config is MainModelConfig => { return config.type === 'main' && config.base !== 'sdxl-refiner'; }; diff --git a/invokeai/frontend/web/src/services/api/util/optimisticUpdates.ts b/invokeai/frontend/web/src/services/api/util/optimisticUpdates.ts index ca79a99ca49..772dc077fb5 100644 --- a/invokeai/frontend/web/src/services/api/util/optimisticUpdates.ts +++ b/invokeai/frontend/web/src/services/api/util/optimisticUpdates.ts @@ -1,5 +1,5 @@ import type { OrderDir } from 'features/gallery/store/types'; -import type { GetImageNamesResult, GetVideoIdsResult, ImageDTO, VideoDTO } from 'services/api/types'; +import type { GetImageNamesResult, ImageDTO } from 'services/api/types'; /** * Calculates the optimal insertion position for a new image in the names list. @@ -57,60 +57,3 @@ export function insertImageIntoNamesResult( total_count: currentResult.total_count + 1, }; } - -/** - * Calculates the optimal insertion position for a new image in the names list. - * For starred_first=true: starred images go to position 0, unstarred go after all starred images - * For starred_first=false: all new images go to position 0 (newest first) - */ -function calculateVideoInsertionPosition( - videoDTO: VideoDTO, - starredFirst: boolean, - starredCount: number, - orderDir: OrderDir = 'DESC' -): number { - if (!starredFirst) { - // When starred_first is false, insertion depends on order direction - return orderDir === 'DESC' ? 0 : Number.MAX_SAFE_INTEGER; - } - - // When starred_first is true - if (videoDTO.starred) { - // Starred images: beginning for desc, after existing starred for asc - return orderDir === 'DESC' ? 0 : starredCount; - } - - // Unstarred images go after all starred images - return orderDir === 'DESC' ? starredCount : Number.MAX_SAFE_INTEGER; -} - -/** - * Optimistically inserts a new image into the ImageNamesResult at the correct position - */ -export function insertVideoIntoGetVideoIdsResult( - currentResult: GetVideoIdsResult, - videoDTO: VideoDTO, - starredFirst: boolean, - orderDir: OrderDir = 'DESC' -): GetVideoIdsResult { - // Don't insert if the image is already in the list - if (currentResult.video_ids.includes(videoDTO.video_id)) { - return currentResult; - } - - const insertPosition = calculateVideoInsertionPosition(videoDTO, starredFirst, currentResult.starred_count, orderDir); - - const newVideoIds = [...currentResult.video_ids]; - // Handle MAX_SAFE_INTEGER by pushing to end - if (insertPosition >= newVideoIds.length) { - newVideoIds.push(videoDTO.video_id); - } else { - newVideoIds.splice(insertPosition, 0, videoDTO.video_id); - } - - return { - video_ids: newVideoIds, - starred_count: starredFirst && videoDTO.starred ? currentResult.starred_count + 1 : currentResult.starred_count, - total_count: currentResult.total_count + 1, - }; -} diff --git a/invokeai/frontend/web/src/services/events/onInvocationComplete.tsx b/invokeai/frontend/web/src/services/events/onInvocationComplete.tsx index 57c8ab7b7b1..f9fafbbcdcd 100644 --- a/invokeai/frontend/web/src/services/events/onInvocationComplete.tsx +++ b/invokeai/frontend/web/src/services/events/onInvocationComplete.tsx @@ -5,21 +5,19 @@ import { selectAutoSwitch, selectGalleryView, selectGetImageNamesQueryArgs, - selectGetVideoIdsQueryArgs, selectListBoardsQueryArgs, selectSelectedBoardId, } from 'features/gallery/store/gallerySelectors'; import { boardIdSelected, galleryViewChanged, itemSelected } from 'features/gallery/store/gallerySlice'; import { $nodeExecutionStates, upsertExecutionState } from 'features/nodes/hooks/useNodeExecutionState'; -import { isImageField, isImageFieldCollection, isVideoField } from 'features/nodes/types/common'; +import { isImageField, isImageFieldCollection } from 'features/nodes/types/common'; import { zNodeStatus } from 'features/nodes/types/invocation'; import type { LRUCache } from 'lru-cache'; import { boardsApi } from 'services/api/endpoints/boards'; import { getImageDTOSafe, imagesApi } from 'services/api/endpoints/images'; -import { getVideoDTOSafe, videosApi } from 'services/api/endpoints/videos'; -import type { ImageDTO, S, VideoDTO } from 'services/api/types'; +import type { ImageDTO, S } from 'services/api/types'; import { getCategories } from 'services/api/util'; -import { insertImageIntoNamesResult, insertVideoIntoGetVideoIdsResult } from 'services/api/util/optimisticUpdates'; +import { insertImageIntoNamesResult } from 'services/api/util/optimisticUpdates'; import { $lastProgressEvent } from 'services/events/stores'; import stableHash from 'stable-hash'; import type { Param0 } from 'tsafe'; @@ -188,157 +186,6 @@ export const buildOnInvocationComplete = ( } }; - const addVideosToGallery = async (data: S['InvocationCompleteEvent']) => { - if (nodeTypeDenylist.includes(data.invocation.type)) { - log.trace(`Skipping denylisted node type (${data.invocation.type})`); - return; - } - - const videoDTOs = await getResultVideoDTOs(data); - if (videoDTOs.length === 0) { - return; - } - - // For efficiency's sake, we want to minimize the number of dispatches and invalidations we do. - // We'll keep track of each change we need to make and do them all at once. - const boardTotalAdditions: Record = {}; - const getVideoIdsArg = selectGetVideoIdsQueryArgs(getState()); - - for (const videoDTO of videoDTOs) { - if (videoDTO.is_intermediate) { - return; - } - - const board_id = videoDTO.board_id ?? 'none'; - - boardTotalAdditions[board_id] = (boardTotalAdditions[board_id] || 0) + 1; - } - - // Update all the board image totals at once - const entries: Param0 = []; - for (const [boardId, amountToAdd] of objectEntries(boardTotalAdditions)) { - // upsertQueryEntries doesn't provide a "recipe" function for the update - we must provide the new value - // directly. So we need to select the board totals first. - const total = boardsApi.endpoints.getBoardImagesTotal.select(boardId)(getState()).data?.total; - if (total === undefined) { - // No cache exists for this board, so we can't update it. - continue; - } - entries.push({ - endpointName: 'getBoardImagesTotal', - arg: boardId, - value: { total: total + amountToAdd }, - }); - } - dispatch(boardsApi.util.upsertQueryEntries(entries)); - - dispatch( - boardsApi.util.updateQueryData('listAllBoards', selectListBoardsQueryArgs(getState()), (draft) => { - for (const board of draft) { - board.image_count = board.image_count + (boardTotalAdditions[board.board_id] ?? 0); - } - }) - ); - - /** - * Optimistic update and cache invalidation for image names queries that match this image's board and categories. - * - Optimistic update for the cache that does not have a search term (we cannot derive the correct insertion - * position when a search term is present). - * - Cache invalidation for the query that has a search term, so it will be refetched. - * - * Note: The image DTO objects are already implicitly cached by the getResultImageDTOs function. We do not need - * to explicitly cache them again here. - */ - for (const videoDTO of videoDTOs) { - // Override board_id and categories for this specific image to build the "expected" args for the query. - const videoSpecificArgs = { - board_id: videoDTO.board_id ?? 'none', - }; - - const expectedQueryArgs = { - ...getVideoIdsArg, - ...videoSpecificArgs, - search_term: '', - }; - - // If the cache for the query args provided here does not exist, RTK Query will ignore the update. - dispatch( - videosApi.util.updateQueryData( - 'getVideoIds', - { - ...getVideoIdsArg, - ...videoSpecificArgs, - search_term: '', - }, - (draft) => { - const updatedResult = insertVideoIntoGetVideoIdsResult( - draft, - videoDTO, - expectedQueryArgs.starred_first ?? true, - expectedQueryArgs.order_dir - ); - - draft.video_ids = updatedResult.video_ids; - draft.starred_count = updatedResult.starred_count; - draft.total_count = updatedResult.total_count; - } - ) - ); - - // If there is a search term present, we need to invalidate that query to ensure the search results are updated. - if (getVideoIdsArg.search_term) { - const expectedQueryArgs = { - ...getVideoIdsArg, - ...videoSpecificArgs, - }; - dispatch(videosApi.util.invalidateTags([{ type: 'VideoList', id: stableHash(expectedQueryArgs) }])); - } - } - - // No need to invalidate tags since we're doing optimistic updates - // Board totals are already updated above via upsertQueryEntries - - const autoSwitch = selectAutoSwitch(getState()); - - if (!autoSwitch) { - return; - } - - // Finally, we may need to autoswitch to the new video. We'll only do it for the last video in the list. - const lastVideoDTO = videoDTOs.at(-1); - - if (!lastVideoDTO) { - return; - } - - const { video_id } = lastVideoDTO; - const board_id = lastVideoDTO.board_id ?? 'none'; - - // With optimistic updates, we can immediately switch to the new image - const selectedBoardId = selectSelectedBoardId(getState()); - - // If the video is from a different board, switch to that board & select the video - otherwise just select the - // video. This implicitly changes the view to 'videos' if it was not already. - if (board_id !== selectedBoardId) { - dispatch( - boardIdSelected({ - boardId: board_id, - select: { - selection: [{ type: 'video', id: video_id }], - galleryView: 'videos', - }, - }) - ); - } else { - // Ensure we are on the 'videos' gallery view - that's where this video will be displayed - const galleryView = selectGalleryView(getState()); - if (galleryView !== 'videos') { - dispatch(galleryViewChanged('videos')); - } - // Select the video immediately since we've optimistically updated the cache - dispatch(itemSelected({ type: 'video', id: lastVideoDTO.video_id })); - } - }; const getResultImageDTOs = async (data: S['InvocationCompleteEvent']): Promise => { const { result } = data; const imageDTOs: ImageDTO[] = []; @@ -360,22 +207,6 @@ export const buildOnInvocationComplete = ( return imageDTOs; }; - const getResultVideoDTOs = async (data: S['InvocationCompleteEvent']): Promise => { - const { result } = data; - const videoDTOs: VideoDTO[] = []; - - for (const [_name, value] of objectEntries(result)) { - if (isVideoField(value)) { - const videoDTO = await getVideoDTOSafe(value.video_id); - if (videoDTO) { - videoDTOs.push(videoDTO); - } - } - } - - return videoDTOs; - }; - return async (data: S['InvocationCompleteEvent']) => { if (finishedQueueItemIds.has(data.item_id)) { log.trace({ data } as JsonObject, `Received event for already-finished queue item ${data.item_id}`); @@ -396,7 +227,6 @@ export const buildOnInvocationComplete = ( } await addImagesToGallery(data); - await addVideosToGallery(data); $lastProgressEvent.set(null); }; diff --git a/invokeai/frontend/web/src/services/events/onModelInstallError.tsx b/invokeai/frontend/web/src/services/events/onModelInstallError.tsx index 4b57381f196..ba7a3ed19f3 100644 --- a/invokeai/frontend/web/src/services/events/onModelInstallError.tsx +++ b/invokeai/frontend/web/src/services/events/onModelInstallError.tsx @@ -1,9 +1,7 @@ import { Button, ExternalLink, Spinner, Text } from '@invoke-ai/ui-library'; -import { skipToken } from '@reduxjs/toolkit/query'; import { logger } from 'app/logging/logger'; import type { AppDispatch, AppGetState } from 'app/store/store'; import { getPrefixedId } from 'features/controlLayers/konva/util'; -import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus'; import { discordLink, githubIssuesLink } from 'features/system/store/constants'; import { toast, toastApi } from 'features/toast/toast'; import { navigationApi } from 'features/ui/layouts/navigation-api'; @@ -145,8 +143,7 @@ export const buildOnModelInstallError = (getState: AppGetState, dispatch: AppDis }; const HFUnauthorizedToastDescription = () => { - const isEnabled = useFeatureStatus('hfToken'); - const { data } = useGetHFTokenStatusQuery(isEnabled ? undefined : skipToken); + const { data } = useGetHFTokenStatusQuery(); const { t } = useTranslation(); diff --git a/invokeai/frontend/web/src/services/events/setEventListeners.tsx b/invokeai/frontend/web/src/services/events/setEventListeners.tsx index 5804399f1f7..49876cee93b 100644 --- a/invokeai/frontend/web/src/services/events/setEventListeners.tsx +++ b/invokeai/frontend/web/src/services/events/setEventListeners.tsx @@ -1,19 +1,9 @@ import { ExternalLink, Flex, Text } from '@invoke-ai/ui-library'; -import { isAnyOf } from '@reduxjs/toolkit'; import { logger } from 'app/logging/logger'; import { socketConnected } from 'app/store/middleware/listenerMiddleware/listeners/socketConnected'; -import { $baseUrl } from 'app/store/nanostores/baseUrl'; -import { $bulkDownloadId } from 'app/store/nanostores/bulkDownloadId'; -import { $queueId } from 'app/store/nanostores/queueId'; import type { AppStore } from 'app/store/store'; -import { listenerMiddleware } from 'app/store/store'; import { deepClone } from 'common/util/deepClone'; import { forEach, isNil, round } from 'es-toolkit/compat'; -import { - $isInPublishFlow, - $outputNodeId, - $validationRunData, -} from 'features/nodes/components/sidePanel/workflow/publish'; import { $nodeExecutionStates, upsertExecutionState } from 'features/nodes/hooks/useNodeExecutionState'; import { zNodeStatus } from 'features/nodes/types/invocation'; import ErrorToastDescription, { getTitle } from 'features/toast/ErrorToastDescription'; @@ -25,7 +15,6 @@ import type { ApiTagDescription } from 'services/api'; import { api, LIST_ALL_TAG, LIST_TAG } from 'services/api'; import { modelsApi } from 'services/api/endpoints/models'; import { queueApi } from 'services/api/endpoints/queue'; -import { workflowsApi } from 'services/api/endpoints/workflows'; import { buildOnInvocationComplete } from 'services/events/onInvocationComplete'; import { buildOnModelInstallError, DiscordLink, GitHubIssuesLink } from 'services/events/onModelInstallError'; import type { ClientToServerEvents, ServerToClientEvents } from 'services/events/types'; @@ -55,12 +44,8 @@ export const setEventListeners = ({ socket, store, setIsConnected }: SetEventLis log.debug('Connected'); setIsConnected(true); dispatch(socketConnected()); - const queue_id = $queueId.get(); - socket.emit('subscribe_queue', { queue_id }); - if (!$baseUrl.get()) { - const bulk_download_id = $bulkDownloadId.get(); - socket.emit('subscribe_bulk_download', { bulk_download_id }); - } + socket.emit('subscribe_queue', { queue_id: 'default' }); + socket.emit('subscribe_bulk_download', { bulk_download_id: 'default' }); $lastProgressEvent.set(null); }); @@ -371,7 +356,6 @@ export const setEventListeners = ({ socket, store, setIsConnected }: SetEventLis // we've got new status for the queue item, batch and queue const { item_id, - session_id, status, batch_status, error_type, @@ -441,67 +425,17 @@ export const setEventListeners = ({ socket, store, setIsConnected }: SetEventLis } else if (status === 'completed' || status === 'failed' || status === 'canceled') { finishedQueueItemIds.set(item_id, true); if (status === 'failed' && error_type) { - const isLocal = getState().config.isLocal ?? true; - const sessionId = session_id; - toast({ id: `INVOCATION_ERROR_${error_type}`, title: getTitle(error_type), status: 'error', duration: null, - updateDescription: isLocal, - description: ( - - ), + updateDescription: true, + description: , }); } // If the queue item is completed, failed, or cancelled, we want to clear the last progress event $lastProgressEvent.set(null); - // $progressImages.setKey(session_id, undefined); - - // When a validation run is completed, we want to clear the validation run batch ID & set the workflow as published - const validationRunData = $validationRunData.get(); - if (!validationRunData || batch_status.batch_id !== validationRunData.batchId || status !== 'completed') { - return; - } - - // The published status of a workflow is server state, provided to the client in by the getWorkflow query. - // After successfully publishing a workflow, we need to invalidate the query cache so that the published status is - // seen throughout the app. We also need to reset the publish flow state. - // - // But, there is a race condition! If we invalidate the query cache and then immediately clear the publish flow state, - // between the time when the publish state is cleared and the query is re-fetched, we will render the wrong UI. - // - // So, we really need to wait for the query re-fetch to complete before clearing the publish flow state. This isn't - // possible using the `invalidateTags()` API. But we can fudge it by adding a once-off listener for that query. - - listenerMiddleware.startListening({ - matcher: isAnyOf( - workflowsApi.endpoints.getWorkflow.matchFulfilled, - workflowsApi.endpoints.getWorkflow.matchRejected - ), - effect: (action, listenerApi) => { - if (workflowsApi.endpoints.getWorkflow.matchFulfilled(action)) { - // If this query was re-fetching the workflow that was just published, we can clear the publish flow state and - // unsubscribe from the listener - if (action.payload.workflow_id === validationRunData.workflowId) { - listenerApi.unsubscribe(); - $validationRunData.set(null); - $isInPublishFlow.set(false); - $outputNodeId.set(null); - } - } else if (workflowsApi.endpoints.getWorkflow.matchRejected(action)) { - // If the query failed, we can unsubscribe from the listener - listenerApi.unsubscribe(); - } - }, - }); - dispatch(workflowsApi.util.invalidateTags([{ type: 'Workflow', id: validationRunData.workflowId }])); } }); diff --git a/invokeai/frontend/web/src/services/events/stores.ts b/invokeai/frontend/web/src/services/events/stores.ts index 6d9b89c88a9..720ba920cf2 100644 --- a/invokeai/frontend/web/src/services/events/stores.ts +++ b/invokeai/frontend/web/src/services/events/stores.ts @@ -1,11 +1,9 @@ import { round } from 'es-toolkit/compat'; -import { atom, computed, map } from 'nanostores'; +import { atom, computed } from 'nanostores'; import type { S } from 'services/api/types'; import type { AppSocket } from 'services/events/types'; -import type { ManagerOptions, SocketOptions } from 'socket.io-client'; export const $socket = atom(null); -export const $socketOptions = map>({}); export const $isConnected = atom(false); export const $lastProgressEvent = atom(null); diff --git a/invokeai/frontend/web/src/services/events/useSocketIO.ts b/invokeai/frontend/web/src/services/events/useSocketIO.ts index 255bcee2fd4..cdbfb882247 100644 --- a/invokeai/frontend/web/src/services/events/useSocketIO.ts +++ b/invokeai/frontend/web/src/services/events/useSocketIO.ts @@ -1,7 +1,3 @@ -import { useStore } from '@nanostores/react'; -import { $authToken } from 'app/store/nanostores/authToken'; -import { $baseUrl } from 'app/store/nanostores/baseUrl'; -import { $isDebugging } from 'app/store/nanostores/isDebugging'; import { useAppStore } from 'app/store/storeHooks'; import { useAssertSingleton } from 'common/hooks/useAssertSingleton'; import type { MapStore } from 'nanostores'; @@ -12,7 +8,7 @@ import type { AppSocket } from 'services/events/types'; import type { ManagerOptions, SocketOptions } from 'socket.io-client'; import { io } from 'socket.io-client'; -import { $isConnected, $lastProgressEvent, $socket, $socketOptions } from './stores'; +import { $isConnected, $lastProgressEvent, $socket } from './stores'; // Inject socket options and url into window for debugging declare global { @@ -27,34 +23,22 @@ declare global { export const useSocketIO = () => { useAssertSingleton('useSocketIO'); const store = useAppStore(); - const baseUrl = useStore($baseUrl); - const authToken = useStore($authToken); - const addlSocketOptions = useStore($socketOptions); const socketUrl = useMemo(() => { const wsProtocol = window.location.protocol === 'https:' ? 'wss' : 'ws'; - if (baseUrl) { - return baseUrl.replace(/^https?:\/\//i, ''); - } - return `${wsProtocol}://${window.location.host}`; - }, [baseUrl]); + }, []); const socketOptions = useMemo(() => { const options: Partial = { timeout: 60000, - path: baseUrl ? '/ws/socket.io' : `${window.location.pathname}ws/socket.io`, + path: `${window.location.pathname}ws/socket.io`, autoConnect: false, // achtung! removing this breaks the dynamic middleware forceNew: true, }; - if (authToken) { - options.auth = { token: authToken }; - options.transports = ['websocket', 'polling']; - } - - return { ...options, ...addlSocketOptions }; - }, [authToken, addlSocketOptions, baseUrl]); + return options; + }, []); useEffect(() => { const socket: AppSocket = io(socketUrl, socketOptions); @@ -64,8 +48,7 @@ export const useSocketIO = () => { socket.connect(); - if ($isDebugging.get() || import.meta.env.MODE === 'development') { - window.$socketOptions = $socketOptions; + if (import.meta.env.MODE === 'development') { // This is only enabled manually for debugging, console is allowed. /* eslint-disable-next-line no-console */ console.log('Socket initialized', socket); @@ -79,7 +62,7 @@ export const useSocketIO = () => { }); return () => { - if ($isDebugging.get() || import.meta.env.MODE === 'development') { + if (import.meta.env.MODE === 'development') { window.$socketOptions = undefined; // This is only enabled manually for debugging, console is allowed. /* eslint-disable-next-line no-console */ diff --git a/invokeai/frontend/web/vite.config.mts b/invokeai/frontend/web/vite.config.mts index a697148322d..d15c35d6bce 100644 --- a/invokeai/frontend/web/vite.config.mts +++ b/invokeai/frontend/web/vite.config.mts @@ -1,71 +1,12 @@ /// import react from '@vitejs/plugin-react-swc'; -import path from 'path'; import { visualizer } from 'rollup-plugin-visualizer'; -import type { PluginOption } from 'vite'; import { defineConfig } from 'vite'; -import cssInjectedByJsPlugin from 'vite-plugin-css-injected-by-js'; -import dts from 'vite-plugin-dts'; import eslint from 'vite-plugin-eslint'; import tsconfigPaths from 'vite-tsconfig-paths'; import { loggerContextPlugin } from './vite-plugin-logger-context'; export default defineConfig(({ mode }) => { - if (mode === 'package') { - return { - base: './', - plugins: [ - react(), - eslint(), - tsconfigPaths(), - loggerContextPlugin(), - visualizer(), - dts({ - insertTypesEntry: true, - }), - cssInjectedByJsPlugin(), - ], - build: { - /** - * zone.js (via faro) requires max ES2015 to prevent spamming unhandled promise rejections. - * - * See: - * - https://github.com/grafana/faro-web-sdk/issues/566 - * - https://github.com/angular/angular/issues/51328 - * - https://github.com/open-telemetry/opentelemetry-js/issues/3030 - */ - target: 'ES2015', - cssCodeSplit: true, - lib: { - entry: path.resolve(__dirname, './src/index.ts'), - name: 'InvokeAIUI', - fileName: (format) => `invoke-ai-ui.${format}.js`, - }, - rollupOptions: { - external: ['react', 'react-dom', '@emotion/react', '@chakra-ui/react', '@invoke-ai/ui-library'], - output: { - globals: { - react: 'React', - 'react-dom': 'ReactDOM', - '@emotion/react': 'EmotionReact', - '@invoke-ai/ui-library': 'UiLibrary', - }, - }, - }, - }, - resolve: { - alias: { - app: path.resolve(__dirname, './src/app'), - assets: path.resolve(__dirname, './src/assets'), - common: path.resolve(__dirname, './src/common'), - features: path.resolve(__dirname, './src/features'), - services: path.resolve(__dirname, './src/services'), - theme: path.resolve(__dirname, './src/theme'), - }, - }, - }; - } - return { base: './', plugins: [ diff --git a/invokeai/invocation_api/__init__.py b/invokeai/invocation_api/__init__.py index 6094b28c5dc..9069f6d4a7a 100644 --- a/invokeai/invocation_api/__init__.py +++ b/invokeai/invocation_api/__init__.py @@ -29,7 +29,6 @@ OutputField, UIComponent, UIType, - VideoField, WithBoard, WithMetadata, WithWorkflow, @@ -68,7 +67,6 @@ LatentsOutput, StringCollectionOutput, StringOutput, - VideoOutput, ) from invokeai.app.invocations.scheduler import SchedulerOutput from invokeai.app.services.boards.boards_common import BoardDTO @@ -115,7 +113,6 @@ "OutputField", "UIComponent", "UIType", - "VideoField", "WithBoard", "WithMetadata", "WithWorkflow", @@ -157,7 +154,6 @@ "LatentsOutput", "StringCollectionOutput", "StringOutput", - "VideoOutput", # invokeai.app.services.image_records.image_records_common "ImageCategory", # invokeai.app.services.boards.boards_common From 1958cb5f7615ccb6ec8a7db2e9b8632c5b2afacb Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Mon, 13 Oct 2025 15:07:00 +1100 Subject: [PATCH 02/20] tidy: removing unused code paths 2 --- .../workflow_records_common.py | 1 - invokeai/frontend/web/package.json | 5 - invokeai/frontend/web/pnpm-lock.yaml | 803 ------------------ .../frontend/web/src/app/logging/logger.ts | 14 - .../frontend/web/src/app/logging/useLogger.ts | 10 - .../src/app/logging/useSyncLoggingConfig.ts | 20 +- .../app/store/nanostores/bulkDownloadId.ts | 9 - invokeai/frontend/web/src/app/store/store.ts | 2 +- .../frontend/web/src/app/types/invokeai.ts | 312 ------- .../components/ChangeBoardModal.tsx | 22 +- .../features/changeBoardModal/store/slice.ts | 6 +- .../features/controlLayers/store/filters.ts | 2 +- .../controlLayers/store/lorasSlice.ts | 3 +- .../src/features/controlLayers/store/types.ts | 24 +- .../src/features/controlLayers/store/util.ts | 15 - .../web/src/features/dnd/DndImageIcon.tsx | 2 +- .../MenuItems/ContextMenuItemChangeBoard.tsx | 7 +- .../MenuItems/ContextMenuItemCopy.tsx | 2 +- .../MenuItems/ContextMenuItemDeleteImage.tsx | 2 +- .../MenuItems/ContextMenuItemLoadWorkflow.tsx | 4 +- .../ContextMenuItemLocateInGalery.tsx | 22 +- ...etadataRecallActionsCanvasGenerateTabs.tsx | 12 +- .../PublishedWorkflowPanelContent.tsx | 50 -- .../builder/NodeFieldElementEditMode.tsx | 2 +- .../LockedWorkflowIcon.tsx | 23 - .../WorkflowLibrary/WorkflowLibraryModal.tsx | 63 +- .../WorkflowLibrarySideNav.tsx | 53 +- .../workflow/WorkflowLibrary/WorkflowList.tsx | 8 +- .../WorkflowLibrary/WorkflowListItem.tsx | 3 +- .../WorkflowLibrary/WorkflowSortControl.tsx | 17 +- .../hooks/useInputFieldUserTitleOrThrow.ts | 20 - .../hooks/useNodeTemplateTitleOrThrow.ts | 10 - .../nodes/hooks/useNodeUserTitleOrThrow.ts | 10 - .../web/src/features/nodes/store/selectors.ts | 10 +- .../nodes/store/workflowLibraryModal.ts | 2 +- .../nodes/store/workflowLibrarySlice.ts | 26 +- .../web/src/features/nodes/types/field.ts | 2 - .../src/features/nodes/types/invocation.ts | 4 +- .../web/src/features/nodes/types/workflow.ts | 2 +- .../components/Bbox/BboxAspectRatioSelect.tsx | 15 +- .../DimensionsAspectRatioSelect.tsx | 21 +- .../CancelAllExceptCurrentButton.tsx | 27 - .../features/queue/hooks/useEnqueueCanvas.ts | 4 - .../queue/hooks/useEnqueueGenerate.ts | 5 - .../queue/hooks/useEnqueueUpscaling.ts | 5 - .../queue/hooks/useEnqueueWorkflows.ts | 4 - .../StylePresetForm/StylePresetTypeField.tsx | 46 - .../frontend/web/src/features/toast/toast.ts | 2 +- .../web/src/features/ui/store/uiTypes.ts | 2 +- .../components/SaveWorkflowAsDialog.tsx | 20 +- .../web/src/services/api/endpoints/images.ts | 20 - .../services/api/endpoints/stylePresets.ts | 19 - .../src/services/api/endpoints/workflows.ts | 8 - .../frontend/web/src/services/api/schema.ts | 2 +- .../frontend/web/src/services/api/types.ts | 1 - .../src/services/api/util/tagInvalidation.ts | 17 - 56 files changed, 99 insertions(+), 1723 deletions(-) delete mode 100644 invokeai/frontend/web/src/app/logging/useLogger.ts delete mode 100644 invokeai/frontend/web/src/app/store/nanostores/bulkDownloadId.ts delete mode 100644 invokeai/frontend/web/src/app/types/invokeai.ts delete mode 100644 invokeai/frontend/web/src/features/nodes/components/sidePanel/PublishedWorkflowPanelContent.tsx delete mode 100644 invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowLibrary/WorkflowLibraryListItemActions/LockedWorkflowIcon.tsx delete mode 100644 invokeai/frontend/web/src/features/nodes/hooks/useInputFieldUserTitleOrThrow.ts delete mode 100644 invokeai/frontend/web/src/features/nodes/hooks/useNodeTemplateTitleOrThrow.ts delete mode 100644 invokeai/frontend/web/src/features/nodes/hooks/useNodeUserTitleOrThrow.ts delete mode 100644 invokeai/frontend/web/src/features/queue/components/CancelAllExceptCurrentButton.tsx delete mode 100644 invokeai/frontend/web/src/features/stylePresets/components/StylePresetForm/StylePresetTypeField.tsx diff --git a/invokeai/app/services/workflow_records/workflow_records_common.py b/invokeai/app/services/workflow_records/workflow_records_common.py index 909ed3b463b..e0cea37468d 100644 --- a/invokeai/app/services/workflow_records/workflow_records_common.py +++ b/invokeai/app/services/workflow_records/workflow_records_common.py @@ -31,7 +31,6 @@ class WorkflowRecordOrderBy(str, Enum, metaclass=MetaEnum): class WorkflowCategory(str, Enum, metaclass=MetaEnum): User = "user" Default = "default" - Project = "project" class WorkflowMeta(BaseModel): diff --git a/invokeai/frontend/web/package.json b/invokeai/frontend/web/package.json index e7a533e2ae8..d6e220fd2df 100644 --- a/invokeai/frontend/web/package.json +++ b/invokeai/frontend/web/package.json @@ -69,7 +69,6 @@ "linkify-react": "^4.3.1", "linkifyjs": "^4.3.1", "lru-cache": "^11.1.0", - "media-chrome": "^4.13.0", "mtwist": "^1.0.2", "nanoid": "^5.1.5", "nanostores": "^1.0.1", @@ -88,13 +87,11 @@ "react-hotkeys-hook": "4.5.0", "react-i18next": "^15.5.3", "react-icons": "^5.5.0", - "react-player": "^3.3.1", "react-redux": "9.2.0", "react-resizable-panels": "^3.0.3", "react-textarea-autosize": "^8.5.9", "react-use": "^17.6.0", "react-virtuoso": "^4.13.0", - "redux-dynamic-middlewares": "^2.2.0", "redux-remember": "^5.2.0", "redux-undo": "^1.1.0", "rfdc": "^1.4.1", @@ -151,8 +148,6 @@ "type-fest": "^4.40.0", "typescript": "^5.8.3", "vite": "^7.0.5", - "vite-plugin-css-injected-by-js": "^3.5.2", - "vite-plugin-dts": "^4.5.3", "vite-plugin-eslint": "^1.8.1", "vite-tsconfig-paths": "^5.1.4", "vitest": "^3.1.2" diff --git a/invokeai/frontend/web/pnpm-lock.yaml b/invokeai/frontend/web/pnpm-lock.yaml index e80b7011165..04ba19ebce9 100644 --- a/invokeai/frontend/web/pnpm-lock.yaml +++ b/invokeai/frontend/web/pnpm-lock.yaml @@ -98,9 +98,6 @@ importers: lru-cache: specifier: ^11.1.0 version: 11.1.0 - media-chrome: - specifier: ^4.13.0 - version: 4.13.0(react@18.3.1) mtwist: specifier: ^1.0.2 version: 1.0.2 @@ -155,9 +152,6 @@ importers: react-icons: specifier: ^5.5.0 version: 5.5.0(react@18.3.1) - react-player: - specifier: ^3.3.1 - version: 3.3.1(@types/react-dom@18.3.7(@types/react@18.3.23))(@types/react@18.3.23)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) react-redux: specifier: 9.2.0 version: 9.2.0(@types/react@18.3.23)(react@18.3.1)(redux@5.0.1) @@ -173,9 +167,6 @@ importers: react-virtuoso: specifier: ^4.13.0 version: 4.13.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - redux-dynamic-middlewares: - specifier: ^2.2.0 - version: 2.2.0 redux-remember: specifier: ^5.2.0 version: 5.2.0(redux@5.0.1) @@ -327,12 +318,6 @@ importers: vite: specifier: ^7.0.5 version: 7.0.5(@types/node@22.16.0)(jiti@2.4.2) - vite-plugin-css-injected-by-js: - specifier: ^3.5.2 - version: 3.5.2(vite@7.0.5(@types/node@22.16.0)(jiti@2.4.2)) - vite-plugin-dts: - specifier: ^4.5.3 - version: 4.5.4(@types/node@22.16.0)(rollup@4.45.1)(typescript@5.8.3)(vite@7.0.5(@types/node@22.16.0)(jiti@2.4.2)) vite-plugin-eslint: specifier: ^1.8.1 version: 1.8.1(eslint@9.31.0(jiti@2.4.2))(vite@7.0.5(@types/node@22.16.0)(jiti@2.4.2)) @@ -930,44 +915,6 @@ packages: '@types/react': '>=16' react: '>=16' - '@microsoft/api-extractor-model@7.30.6': - resolution: {integrity: sha512-znmFn69wf/AIrwHya3fxX6uB5etSIn6vg4Q4RB/tb5VDDs1rqREc+AvMC/p19MUN13CZ7+V/8pkYPTj7q8tftg==} - - '@microsoft/api-extractor@7.52.8': - resolution: {integrity: sha512-cszYIcjiNscDoMB1CIKZ3My61+JOhpERGlGr54i6bocvGLrcL/wo9o+RNXMBrb7XgLtKaizZWUpqRduQuHQLdg==} - hasBin: true - - '@microsoft/tsdoc-config@0.17.1': - resolution: {integrity: sha512-UtjIFe0C6oYgTnad4q1QP4qXwLhe6tIpNTRStJ2RZEPIkqQPREAwE5spzVxsdn9UaEMUqhh0AqSx3X4nWAKXWw==} - - '@microsoft/tsdoc@0.15.1': - resolution: {integrity: sha512-4aErSrCR/On/e5G2hDP0wjooqDdauzEbIq8hIkIe5pXV0rtWJZvdCEKL0ykZxex+IxIwBp0eGeV48hQN07dXtw==} - - '@mux/mux-data-google-ima@0.2.8': - resolution: {integrity: sha512-0ZEkHdcZ6bS8QtcjFcoJeZxJTpX7qRIledf4q1trMWPznugvtajCjCM2kieK/pzkZj1JM6liDRFs1PJSfVUs2A==} - - '@mux/mux-player-react@3.5.3': - resolution: {integrity: sha512-f0McZbIXYDkzecFwhhkf0JgEInPnsOClgBqBhkdhRlLRdrAzMATib+D3Di3rPkRHNH7rc/WWORvSxgJz6m6zkA==} - peerDependencies: - '@types/react': ^17.0.0 || ^17.0.0-0 || ^18 || ^18.0.0-0 || ^19 || ^19.0.0-0 - '@types/react-dom': '*' - react: ^17.0.2 || ^17.0.0-0 || ^18 || ^18.0.0-0 || ^19 || ^19.0.0-0 - react-dom: ^17.0.2 || ^17.0.2-0 || ^18 || ^18.0.0-0 || ^19 || ^19.0.0-0 - peerDependenciesMeta: - '@types/react': - optional: true - '@types/react-dom': - optional: true - - '@mux/mux-player@3.5.3': - resolution: {integrity: sha512-uXKFXbdtioAi+clSVfD60Rw4r7OvA62u2jV6aar9loW9qMsmKv8LU+8uaIaWQjyAORp6E0S37GOVjo72T6O2eQ==} - - '@mux/mux-video@0.26.1': - resolution: {integrity: sha512-gkMdBAgNlB4+krANZHkQFzYWjWeNsJz69y1/hnPtmNQnpvW+O7oc71OffcZrbblyibSxWMQ6MQpYmBVjXlp6sA==} - - '@mux/playback-core@0.30.1': - resolution: {integrity: sha512-rnO1NE9xHDyzbAkmE6ygJYcD7cyyMt7xXqWTykxlceaoSXLjUqgp42HDio7Lcidto4x/O4FIa7ztjV2aCBCXgQ==} - '@nanostores/react@1.0.0': resolution: {integrity: sha512-eDduyNy+lbQJMg6XxZ/YssQqF6b4OXMFEZMYKPJCCmBevp1lg0g+4ZRi94qGHirMtsNfAWKNwsjOhC+q1gvC+A==} engines: {node: ^20.0.0 || >=22.0.0} @@ -1383,28 +1330,6 @@ packages: '@rtsao/scc@1.1.0': resolution: {integrity: sha512-zt6OdqaDoOnJ1ZYsCYGt9YmWzDXl4vQdKTyJev62gFhRGKdx7mcT54V9KIjg+d2wi9EXsPvAPKe7i7WjfVWB8g==} - '@rushstack/node-core-library@5.13.1': - resolution: {integrity: sha512-5yXhzPFGEkVc9Fu92wsNJ9jlvdwz4RNb2bMso+/+TH0nMm1jDDDsOIf4l8GAkPxGuwPw5DH24RliWVfSPhlW/Q==} - peerDependencies: - '@types/node': '*' - peerDependenciesMeta: - '@types/node': - optional: true - - '@rushstack/rig-package@0.5.3': - resolution: {integrity: sha512-olzSSjYrvCNxUFZowevC3uz8gvKr3WTpHQ7BkpjtRpA3wK+T0ybep/SRUMfr195gBzJm5gaXw0ZMgjIyHqJUow==} - - '@rushstack/terminal@0.15.3': - resolution: {integrity: sha512-DGJ0B2Vm69468kZCJkPj3AH5nN+nR9SPmC0rFHtzsS4lBQ7/dgOwtwVxYP7W9JPDMuRBkJ4KHmWKr036eJsj9g==} - peerDependencies: - '@types/node': '*' - peerDependenciesMeta: - '@types/node': - optional: true - - '@rushstack/ts-command-line@5.0.1': - resolution: {integrity: sha512-bsbUucn41UXrQK7wgM8CNM/jagBytEyJqXw/umtI8d68vFm1Jwxh1OtLrlW7uGZgjCWiiPH6ooUNa1aVsuVr3Q==} - '@socket.io/component-emitter@3.1.2': resolution: {integrity: sha512-9BCxFwvbGg/RsZK9tjXd8s4UcwR0MWeFQ1XEKIQVVvAGJyINdrqKMcTRyLoK8Rse1GjzLV9cwjWV1olXRWEXVA==} @@ -1477,9 +1402,6 @@ packages: typescript: optional: true - '@svta/common-media-library@0.12.4': - resolution: {integrity: sha512-9EuOoaNmz7JrfGwjsrD9SxF9otU5TNMnbLu1yU4BeLK0W5cDxVXXR58Z89q9u2AnHjIctscjMTYdlqQ1gojTuw==} - '@swc/core-darwin-arm64@1.12.9': resolution: {integrity: sha512-GACFEp4nD6V+TZNR2JwbMZRHB+Yyvp14FrcmB6UCUYmhuNWjkxi+CLnEvdbuiKyQYv0zA+TRpCHZ+whEs6gwfA==} engines: {node: '>=10'} @@ -1572,9 +1494,6 @@ packages: '@tybys/wasm-util@0.9.0': resolution: {integrity: sha512-6+7nlbMVX/PVDCwaIQ8nTOPveOcFLSt8GcXdx8hD0bt39uWxYT88uXzqTd4fTvqta7oeUJqudepapKNt2DYJFw==} - '@types/argparse@1.0.38': - resolution: {integrity: sha512-ebDJ9b0e702Yr7pWgB0jzm+CX4Srzz8RcXtLJDJB+BSccqMa36uyH/zUsSYao5+BD1ytv3k3rPYCq4mAE1hsXA==} - '@types/aria-query@5.0.4': resolution: {integrity: sha512-rfT93uj5s0PRL7EzccGMs3brplhcrghnDoV26NqKhCAS1hVo+WdNsPvE/yb6ilfr5hi2MEk6d5EWJTKdxg8jVw==} @@ -1734,12 +1653,6 @@ packages: resolution: {integrity: sha512-YzfhzcTnZVPiLfP/oeKtDp2evwvHLMe0LOy7oe+hb9KKIumLNohYS9Hgp1ifwpu42YWxhZE8yieggz6JpqO/1w==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} - '@vercel/edge@1.2.2': - resolution: {integrity: sha512-1+y+f6rk0Yc9ss9bRDgz/gdpLimwoRteKHhrcgHvEpjbP1nyT3ByqEMWm2BTcpIO5UtDmIFXc8zdq4LR190PDA==} - - '@vimeo/player@2.29.0': - resolution: {integrity: sha512-9JjvjeqUndb9otCCFd0/+2ESsLk7VkDE6sxOBy9iy2ukezuQbplVRi+g9g59yAurKofbmTi/KcKxBGO/22zWRw==} - '@vitejs/plugin-react-swc@3.10.2': resolution: {integrity: sha512-xD3Rdvrt5LgANug7WekBn1KhcvLn1H3jNBfJRL3reeOIua/WnZOEV5qi5qIBq5T8R0jUDmRtxuvk4bPhzGHDWw==} peerDependencies: @@ -1788,35 +1701,6 @@ packages: '@vitest/utils@3.2.4': resolution: {integrity: sha512-fB2V0JFrQSMsCo9HiSq3Ezpdv4iYaXRG1Sx8edX3MwxfyNn83mKiGzOcH+Fkxt4MHxr3y42fQi1oeAInqgX2QA==} - '@volar/language-core@2.4.17': - resolution: {integrity: sha512-chmRZMbKmcGpKMoO7Reb70uiLrzo0KWC2CkFttKUuKvrE+VYgi+fL9vWMJ07Fv5ulX0V1TAyyacN9q3nc5/ecA==} - - '@volar/source-map@2.4.17': - resolution: {integrity: sha512-QDybtQyO3Ms/NjFqNHTC5tbDN2oK5VH7ZaKrcubtfHBDj63n2pizHC3wlMQ+iT55kQXZUUAbmBX5L1C8CHFeBw==} - - '@volar/typescript@2.4.17': - resolution: {integrity: sha512-3paEFNh4P5DkgNUB2YkTRrfUekN4brAXxd3Ow1syMqdIPtCZHbUy4AW99S5RO/7mzyTWPMdDSo3mqTpB/LPObQ==} - - '@vue/compiler-core@3.5.17': - resolution: {integrity: sha512-Xe+AittLbAyV0pabcN7cP7/BenRBNcteM4aSDCtRvGw0d9OL+HG1u/XHLY/kt1q4fyMeZYXyIYrsHuPSiDPosA==} - - '@vue/compiler-dom@3.5.17': - resolution: {integrity: sha512-+2UgfLKoaNLhgfhV5Ihnk6wB4ljyW1/7wUIog2puUqajiC29Lp5R/IKDdkebh9jTbTogTbsgB+OY9cEWzG95JQ==} - - '@vue/compiler-vue2@2.7.16': - resolution: {integrity: sha512-qYC3Psj9S/mfu9uVi5WvNZIzq+xnXMhOwbTFKKDD7b1lhpnn71jXSFdTQ+WsIEk0ONCd7VV2IMm7ONl6tbQ86A==} - - '@vue/language-core@2.2.0': - resolution: {integrity: sha512-O1ZZFaaBGkKbsRfnVH1ifOK1/1BUkyK+3SQsfnh6PmMmD4qJcTU8godCeA96jjDRTL6zgnK7YzCHfaUlH2r0Mw==} - peerDependencies: - typescript: '*' - peerDependenciesMeta: - typescript: - optional: true - - '@vue/shared@3.5.17': - resolution: {integrity: sha512-CabR+UN630VnsJO/jHWYBC1YVXyMq94KKp6iF5MQgZJs5I8cmjw6oVMO1oDbtBkENSHSSn/UadWlW/OAgdmKrg==} - '@xobotyi/scrollbar-width@1.9.5': resolution: {integrity: sha512-N8tkAACJx2ww8vFMneJmaAgmjAG1tnVBZJRLRcx061tmsLRZHSEZSLuGWnwPtunsSLvSqXQ2wfp7Mgqg1I+2dQ==} @@ -1855,34 +1739,9 @@ packages: resolution: {integrity: sha512-jRR5wdylq8CkOe6hei19GGZnxM6rBGwFl3Bg0YItGDimvjGtAvdZk4Pu6Cl4u4Igsws4a1fd1Vq3ezrhn4KmFw==} engines: {node: '>= 14'} - ajv-draft-04@1.0.0: - resolution: {integrity: sha512-mv00Te6nmYbRp5DCwclxtt7yV/joXJPGS7nM+97GdxvuttCOfgI3K4U25zboyeX0O+myI8ERluxQe5wljMmVIw==} - peerDependencies: - ajv: ^8.5.0 - peerDependenciesMeta: - ajv: - optional: true - - ajv-formats@3.0.1: - resolution: {integrity: sha512-8iUql50EUR+uUcdRQ3HDqa6EVyo3docL8g5WJ3FNcWmu62IbkGUue/pEyLBW8VGKKucTPgqeks4fIU1DA4yowQ==} - peerDependencies: - ajv: ^8.0.0 - peerDependenciesMeta: - ajv: - optional: true - ajv@6.12.6: resolution: {integrity: sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==} - ajv@8.12.0: - resolution: {integrity: sha512-sRu1kpcO9yLtYxBKvqfTeh9KzZEwO3STyX1HT+4CaDzC6HpTGYhIhPIzj9XuKU7KYDwnaeh5hcOwjy1QuJzBPA==} - - ajv@8.13.0: - resolution: {integrity: sha512-PRA911Blj99jR5RMeTunVbNXMF6Lp4vZXnk5GQjcnUWUTsrXtekg/pnmFFI2u/I36Y/2bITGS30GZCXei6uNkA==} - - alien-signals@0.4.14: - resolution: {integrity: sha512-itUAVzhczTmP2U5yX67xVpsbbOiquusbWVyA9N+sy6+r6YVbFkahXvNCeEPWEOMhwDYwbVbGHFkVL03N9I5g+Q==} - ansi-colors@4.1.3: resolution: {integrity: sha512-/6w/C21Pm1A7aZitlI5Ni/2J6FFQN8i1Cvz3kHABAAbw93v/NlvKdVOqz7CCWz/3iv/JplRSEEZ83XION15ovw==} engines: {node: '>=6'} @@ -1907,9 +1766,6 @@ packages: resolution: {integrity: sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug==} engines: {node: '>=12'} - argparse@1.0.10: - resolution: {integrity: sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==} - argparse@2.0.1: resolution: {integrity: sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==} @@ -1992,15 +1848,6 @@ packages: base64-js@1.5.1: resolution: {integrity: sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==} - bcp-47-match@2.0.3: - resolution: {integrity: sha512-JtTezzbAibu8G0R9op9zb3vcWZd9JF6M0xOYGPn0fNCd7wOpRB1mU2mH9T8gaBGbAAyIIVgB2G7xG0GP98zMAQ==} - - bcp-47-normalize@2.3.0: - resolution: {integrity: sha512-8I/wfzqQvttUFz7HVJgIZ7+dj3vUaIyIxYXaTRP1YWoSDfzt6TUmxaKZeuXR62qBmYr+nvuWINFRl6pZ5DlN4Q==} - - bcp-47@2.1.0: - resolution: {integrity: sha512-9IIS3UPrvIa1Ej+lVDdDwO7zLehjqsaByECw0bu2RRGP73jALm6FYbzI5gWbgHLvNdkvfXB5YrSbocZdOS0c0w==} - better-opn@3.0.2: resolution: {integrity: sha512-aVNobHnJqLiUelTaHat9DZ1qM2w0C0Eym4LPI/3JxOnSokGVdsl1T1kN7TFvsEAD8G47A6VKQ0TVHqbBnYMJlQ==} engines: {node: '>=12.0.0'} @@ -2056,14 +1903,6 @@ packages: caniuse-lite@1.0.30001727: resolution: {integrity: sha512-pB68nIHmbN6L/4C6MH1DokyR3bYqFwjaSs/sWDHGj4CTcFtQUQMuJftVwWkXq7mNWOybD3KhUv3oWHoGxgP14Q==} - castable-video@1.1.10: - resolution: {integrity: sha512-/T1I0A4VG769wTEZ8gWuy1Crn9saAfRTd1UYTb8xbOPlN78+zOi/1nU2dD5koNkfE5VWvgabkIqrGKmyNXOjSQ==} - - ce-la-react@0.3.1: - resolution: {integrity: sha512-g0YwpZDPIwTwFumGTzNHcgJA6VhFfFCJkSNdUdC04br2UfU+56JDrJrJva3FZ7MToB4NDHAFBiPE/PZdNl1mQA==} - peerDependencies: - react: '>=17.0.0' - chai@5.2.0: resolution: {integrity: sha512-mCuXncKXk5iCLhfhwTc0izo0gtEmpz5CtG2y8GiOINBlMVS6v8TMRc5TaLWKS6692m9+dVVfzgeVxR5UxWHTYw==} engines: {node: '>=12'} @@ -2114,18 +1953,12 @@ packages: resolution: {integrity: sha512-JQHZ2QMW6l3aH/j6xCqQThY/9OH4D/9ls34cgkUBiEeocRTU04tHfKPBsUK1PqZCUQM7GiA0IIXJSuXHI64Kbg==} engines: {node: '>=0.8'} - cloudflare-video-element@1.3.3: - resolution: {integrity: sha512-qrHzwLmUhisoIuEoKc7iBbdzBNj2Pi7ThHslU/9U/6PY9DEvo4mh/U+w7OVuzXT9ks7ZXfARvDBfPAaMGF/hIg==} - cmdk@1.1.1: resolution: {integrity: sha512-Vsv7kFaXm+ptHDMZ7izaRsP70GgrW9NBNGswt9OZaVBLlE0SNpDq8eu/VGXyF9r7M0azK3Wy7OlYXsuyYLFzHg==} peerDependencies: react: ^18 || ^19 || ^19.0.0-rc react-dom: ^18 || ^19 || ^19.0.0-rc - codem-isoboxer@0.3.10: - resolution: {integrity: sha512-eNk3TRV+xQMJ1PEj0FQGY8KD4m0GPxT487XJ+Iftm7mVa9WpPFDMWqPt+46buiP5j5Wzqe5oMIhqBcAeKfygSA==} - color-convert@2.0.1: resolution: {integrity: sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==} engines: {node: '>=7.0.0'} @@ -2153,12 +1986,6 @@ packages: engines: {node: '>=18'} hasBin: true - confbox@0.1.8: - resolution: {integrity: sha512-RMtmw0iFkeR4YV+fUOSucriAQNb9g8zFR52MWCtl+cCZOFRNL6zeB395vPzFhEjjn4fMxXudmELnl/KF/WrK6w==} - - confbox@0.2.2: - resolution: {integrity: sha512-1NB+BKqhtNipMsov4xI/NnhCKp9XG9NamYp5PVm9klAT0fsrNPjaFICsCFhNhwZJKNh7zB/3q8qXz0E9oaMNtQ==} - convert-source-map@1.9.0: resolution: {integrity: sha512-ASFBup0Mz1uyiIjANan1jzLQami9z1PoYSZCiiYW2FczPbenXc45FZdBZLzOT+r6+iciuEModtmCti+hjaAk0A==} @@ -2195,9 +2022,6 @@ packages: csstype@3.1.3: resolution: {integrity: sha512-M1uQkMl8rQK/szD0LNhtqxIPLpimGm8sOBwU7lLnCpSbTyY3yeU1Vc7l4KT5zT4s/yOxHH5O7tIuuLOCnLADRw==} - custom-media-element@1.4.5: - resolution: {integrity: sha512-cjrsQufETwxjvwZbYbKBCJNvmQ2++G9AvT45zDi7NXL9k2PdVcs2h0jQz96J6G4TMKRCcEsoJ+QTgQD00Igtjw==} - d3-color@3.1.0: resolution: {integrity: sha512-zg/chbXyeBtMQ1LbD/WSoW2DpC3I0mpmPdW+ynRTj/x2DAWYrIY7qeZIHidozwV24m4iavr15lNwIwLxRmOxhA==} engines: {node: '>=12'} @@ -2236,12 +2060,6 @@ packages: resolution: {integrity: sha512-b8AmV3kfQaqWAuacbPuNbL6vahnOJflOhexLzMMNLga62+/nh0JzvJ0aO/5a5MVgUFGS7Hu1P9P03o3fJkDCyw==} engines: {node: '>=12'} - dash-video-element@0.1.6: - resolution: {integrity: sha512-4gHShaQjcFv6diX5EzB6qAdUGKlIUGGZY8J8yp2pQkWqR0jX4c6plYy0cFraN7mr0DZINe8ujDN1fssDYxJjcg==} - - dashjs@5.0.3: - resolution: {integrity: sha512-TXndNnCUjFjF2nYBxDVba+hWRpVkadkQ8flLp7kHkem+5+wZTfRShJCnVkPUosmjS0YPE9fVNLbYPJxHBeQZvA==} - data-view-buffer@1.0.2: resolution: {integrity: sha512-EmKO5V3OLXh1rtK2wgXRansaK1/mtVdTUEiEI0W8RkvgT05kfxaH29PliLnpLP73yYO6142Q72QNa8Wx/A5CqQ==} engines: {node: '>= 0.4'} @@ -2254,9 +2072,6 @@ packages: resolution: {integrity: sha512-BS8PfmtDGnrgYdOonGZQdLZslWIeCGFP9tpan0hi1Co2Zr2NKADsvGYA8XxuG/4UWgJ6Cjtv+YJnB6MM69QGlQ==} engines: {node: '>= 0.4'} - de-indent@1.0.2: - resolution: {integrity: sha512-e/1zu3xH5MQryN2zdVaF0OrdNLUbvWxzMbi+iNA6Bky7l1RoP8a2fIbRocyHclXt/arDrrR6lL3TqFD9pMQTsg==} - debug@3.2.7: resolution: {integrity: sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==} peerDependencies: @@ -2371,10 +2186,6 @@ packages: resolution: {integrity: sha512-HqD3yTBfnBxIrbnM1DoD6Pcq8NECnh8d4As1Qgh0z5Gg3jRRIqijury0CL3ghu/edArpUYiYqQiDUQBIs4np3Q==} engines: {node: '>=10.0.0'} - entities@4.5.0: - resolution: {integrity: sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw==} - engines: {node: '>=0.12'} - error-ex@1.3.2: resolution: {integrity: sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g==} @@ -2579,9 +2390,6 @@ packages: resolution: {integrity: sha512-/kP8CAwxzLVEeFrMm4kMmy4CCDlpipyA7MYLVrdJIkV0fYF0UaigQHRsxHiuY/GEea+bh4KSv3TIlgr+2UL6bw==} engines: {node: '>=12.0.0'} - exsolve@1.0.7: - resolution: {integrity: sha512-VO5fQUzZtI6C+vx4w/4BWJpg3s/5l+6pRQEHzFRM8WFi4XffSP1Z+4qi7GbjWbvRQEbdIco5mIMq+zX4rPuLrw==} - fast-deep-equal@3.1.3: resolution: {integrity: sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==} @@ -2815,22 +2623,9 @@ packages: resolution: {integrity: sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==} engines: {node: '>= 0.4'} - he@1.2.0: - resolution: {integrity: sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw==} - hasBin: true - - hls-video-element@1.5.6: - resolution: {integrity: sha512-KPdvSR+oBJPiCVb+m6pd2mn3rJEjNbaK8pGhSkxFI2pmyvZIeTVQrPbEO9PT/juwXHwhvCoKJnNxAuFwJG9H5A==} - - hls.js@1.6.9: - resolution: {integrity: sha512-q7qPrri6GRwjcNd7EkFCmhiJ6PBIxeUsdxKbquBkQZpg9jAnp6zSAeN9eEWFlOB09J8JfzAQGoXL5ZEAltjO9g==} - hoist-non-react-statics@3.3.2: resolution: {integrity: sha512-/gGivxi8JPKWNm/W0jSmzcMPpfpPLc3dY/6GxhX2hQ9iGj3aDfklV4ET7NjKpSinLpJ5vafa9iiGIEZg10SfBw==} - html-entities@2.6.0: - resolution: {integrity: sha512-kig+rMn/QOVRvr7c86gQ8lWXq+Hkv6CbAH1hLu+RG338StTpE8Z0b44SDVaqVu7HGKf27frdmUYEs9hTUX/cLQ==} - html-escaper@2.0.2: resolution: {integrity: sha512-H2iMtd0I4Mt5eYiapRdIDjp+XzelXQ0tFE4JS7YFwFevXXMmOp9myNrUvCg0D6ws8iqkRPBfKHgbwig1SmlLfg==} @@ -2869,9 +2664,6 @@ packages: resolution: {integrity: sha512-Hs59xBNfUIunMFgWAbGX5cq6893IbWg4KnrjbYwX3tx0ztorVgTDA6B2sxf8ejHJ4wz8BqGUMYlnzNBer5NvGg==} engines: {node: '>= 4'} - immediate@3.0.6: - resolution: {integrity: sha512-XXOFtyqDjNDAQxVfYxuF7g9Il/IbWmmlQg2MYKOH8ExIT1qg6xc4zyS3HaEEATgs1btfzxq15ciUiY7gjSXRGQ==} - immer@10.1.1: resolution: {integrity: sha512-s2MPrmjovJcoMaHtx6K11Ra7oD05NT97w1IC5zpMkT6Atjr7H8LjaDd81iIxUYpMKSRRNMJE703M1Fhr/TctHw==} @@ -2879,13 +2671,6 @@ packages: resolution: {integrity: sha512-TR3KfrTZTYLPB6jUjfx6MF9WcWrHL9su5TObK4ZkYgBdWKPOFoSoQIdEuTuR82pmtxH2spWG9h6etwfr1pLBqQ==} engines: {node: '>=6'} - import-lazy@4.0.0: - resolution: {integrity: sha512-rKtvo6a868b5Hu3heneU+L4yEQ4jYKLtjpnPeUdK7h0yzXGmyBTypknlkCvHFBqfX9YlorEiMM6Dnq/5atfHkw==} - engines: {node: '>=8'} - - imsc@1.1.5: - resolution: {integrity: sha512-V8je+CGkcvGhgl2C1GlhqFFiUOIEdwXbXLiu1Fcubvvbo+g9inauqT3l0pNYXGoLPBj3jxtZz9t+wCopMkwadQ==} - imurmurhash@0.1.4: resolution: {integrity: sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==} engines: {node: '>=0.8.19'} @@ -2908,12 +2693,6 @@ packages: resolution: {integrity: sha512-4gd7VpWNQNB4UKKCFFVcp1AVv+FMOgs9NKzjHKusc8jTMhd5eL1NqQqOpE0KzMds804/yHlglp3uxgluOqAPLw==} engines: {node: '>= 0.4'} - is-alphabetical@2.0.1: - resolution: {integrity: sha512-FWyyY60MeTNyeSRpkM2Iry0G9hpr7/9kD40mD/cGQEuilcZYS4okz8SN2Q6rLCJ8gbCt6fN+rC+6tMGS99LaxQ==} - - is-alphanumerical@2.0.1: - resolution: {integrity: sha512-hmbYhX/9MUMF5uh7tOXyK/n0ZvWpad5caBA17GsC6vyuCqaWliRG5K1qS9inmUhEMaOBIW7/whAnSwveW/LtZw==} - is-array-buffer@3.0.5: resolution: {integrity: sha512-DDfANUiiG2wC1qawP66qlTugJeL5HyzMpfr8lLK+jMQirGzNod0B12cFB/9q838Ru27sBwfw78/rdoU7RERz6A==} engines: {node: '>= 0.4'} @@ -2949,9 +2728,6 @@ packages: resolution: {integrity: sha512-PwwhEakHVKTdRNVOw+/Gyh0+MzlCl4R6qKvkhuvLtPMggI1WAHt9sOwZxQLSGpUaDnrdyDsomoRgNnCfKNSXXg==} engines: {node: '>= 0.4'} - is-decimal@2.0.1: - resolution: {integrity: sha512-AAB9hiomQs5DXWcRB1rqsxGUstbRroFOPPVAomNk/3XHR5JyEZChOyTWe2oayKnsSsr/kcGqF+z6yuH6HHpN0A==} - is-docker@2.2.1: resolution: {integrity: sha512-F+i2BKsFrH66iaUFc0woD8sLy8getkwTwtOBjvs56Cx4CgJDeKQeqfz8wAYiSb8JOprWhHH5p77PbmYCvvUuXQ==} engines: {node: '>=8'} @@ -3074,9 +2850,6 @@ packages: resolution: {integrity: sha512-rg9zJN+G4n2nfJl5MW3BMygZX56zKPNVEYYqq7adpmMh4Jn2QNEwhvQlFy6jPVdcod7txZtKHWnyZiA3a0zP7A==} hasBin: true - jju@1.4.0: - resolution: {integrity: sha512-8wb9Yw966OSxApiCt0K3yNJL8pnNeIv+OEq2YMidz4FKP6nonSRoOXc80iXY4JaN2FC11B9qsNmDsm+ZOfMROA==} - js-cookie@2.2.1: resolution: {integrity: sha512-HvdH2LzI/EAZcUwA8+0nKNtWHqS+ZmijLA30RwZA0bo7ToCckjK5MkGhjED9KoRcXO6BaGI3I9UIzSA1FKFPOQ==} @@ -3146,9 +2919,6 @@ packages: '@types/node': '>=18' typescript: '>=5.0.4' - kolorist@1.8.0: - resolution: {integrity: sha512-Y+60/zizpJ3HRH8DCss+q95yr6145JXZo46OTpFvDZWLfRCE4qChOyk1b26nMaNpfHHgxagk9dXT5OP0Tfe+dQ==} - konva@9.3.22: resolution: {integrity: sha512-yQI5d1bmELlD/fowuyfOp9ff+oamg26WOCkyqUyc+nczD/lhRa3EvD2MZOoc4c1293TAubW9n34fSQLgSeEgSw==} @@ -3156,9 +2926,6 @@ packages: resolution: {integrity: sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==} engines: {node: '>= 0.8.0'} - lie@3.1.1: - resolution: {integrity: sha512-RiNhHysUjhrDQntfYSfY4MU24coXXdEOgw9WGcKHNeEwffDYbF//u87M1EWaMGzuFoSbqW0C9C6lEEhDOAswfw==} - lines-and-columns@1.2.4: resolution: {integrity: sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==} @@ -3179,13 +2946,6 @@ packages: resolution: {integrity: sha512-IXO6OCs9yg8tMKzfPZ1YmheJbZCiEsnBdcB03l0OcfK9prKnJb96siuHCr5Fl37/yo9DnKU+TLpxzTUspw9shg==} engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} - local-pkg@1.1.1: - resolution: {integrity: sha512-WunYko2W1NcdfAFpuLUoucsgULmgDBRkdxHxWQ7mK0cQqwPiy8E1enjuRBrhLtZkB5iScJ1XIPdhVEFK8aOLSg==} - engines: {node: '>=14'} - - localforage@1.10.0: - resolution: {integrity: sha512-14/H1aX7hzBBmmh7sGPd+AOMkkIrHM3Z1PAyGgZigA1H1p5O5ANnMyWzvpAETtG68/dC4pC0ncy3+PPGzXZHPg==} - locate-path@6.0.0: resolution: {integrity: sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==} engines: {node: '>=10'} @@ -3224,10 +2984,6 @@ packages: lru-cache@5.1.1: resolution: {integrity: sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==} - lru-cache@6.0.0: - resolution: {integrity: sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==} - engines: {node: '>=10'} - lz-string@1.5.0: resolution: {integrity: sha512-h5bgJWpxJNswbU7qCrV0tIKQCaS3blPDrqKWx+QxzuzL1zGUzij9XCWLrSLsJPu5t+eWA/ycetzYAO5IOMcWAQ==} hasBin: true @@ -3252,15 +3008,6 @@ packages: mdn-data@2.0.14: resolution: {integrity: sha512-dn6wd0uw5GsdswPFfsgMp5NSB0/aDe6fK94YJV/AJDYXL6HVLWBsxeq7js7Ad+mU2K9LAlwpk6kN2D5mwCPVow==} - media-chrome@4.11.1: - resolution: {integrity: sha512-+2niDc4qOwlpFAjwxg1OaizK/zKV6y7QqGm4nBFEVlSaG0ZBgOmfc4IXAPiirZqAlZGaFFUaMqCl1SpGU0/naA==} - - media-chrome@4.13.0: - resolution: {integrity: sha512-DfX/Hwxjae/tEHjr1tVnV/6XDFHriMXI1ev8Ji4Z/YwXnqMhNfRtvNsMjefnQK5pkMS/9hC+jmdS+VDWZfsSIw==} - - media-tracks@0.3.3: - resolution: {integrity: sha512-9P2FuUHnZZ3iji+2RQk7Zkh5AmZTnOG5fODACnjhCVveX1McY3jmCRHofIEI+yTBqplz7LXy48c7fQ3Uigp88w==} - memoize-one@6.0.0: resolution: {integrity: sha512-rkpe71W0N0c0Xz6QD0eJETuWAJGnJ9afsl1srmwPrI+yBCkge5EycXXbYRyvL29zZVUWQCY7InPRCv3GDXuZNw==} @@ -3280,9 +3027,6 @@ packages: resolution: {integrity: sha512-I9jwMn07Sy/IwOj3zVkVik2JTvgpaykDZEigL6Rx6N9LbMywwUSMtxET+7lVoDLLd3O3IXwJwvuuns8UB/HeAg==} engines: {node: '>=4'} - minimatch@3.0.8: - resolution: {integrity: sha512-6FsRAQsxQ61mw+qP1ZzbL9Bc78x2p5OqNgNpnoAFLTrX8n5Kxph0CsnhmKKNXTWjXqU5L0pGPR7hYk+XWZr60Q==} - minimatch@3.1.2: resolution: {integrity: sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==} @@ -3301,9 +3045,6 @@ packages: resolution: {integrity: sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==} engines: {node: '>=16 || 14 >=14.17'} - mlly@1.7.4: - resolution: {integrity: sha512-qmdSIPC4bDJXgZTCR7XosJiNKySV7O215tsPtDN9iEO/7q/76b/ijtgRu/+epFXSJhijtTCCGp3DWS549P3xKw==} - moo@0.5.2: resolution: {integrity: sha512-iSAJLHYKnX41mKcJKjqvnAN9sf0LMDTXDEvFv+ffuRR9a1MIuXLjMNL6EsnDHSkKLTWNqQQ5uo61P4EbU4NU+Q==} @@ -3323,15 +3064,6 @@ packages: mtwist@1.0.2: resolution: {integrity: sha512-eRsSga5jkLg7nNERPOV8vDNxgSwuEcj5upQfJcT0gXfJwXo3pMc7xOga0fu8rXHyrxzl7GFVWWDuaPQgpKDvgw==} - muggle-string@0.4.1: - resolution: {integrity: sha512-VNTrAak/KhO2i8dqqnqnAHOa3cYBwXEZe9h+D5h/1ZqFSTEFHdM65lR7RoIqq3tBBYavsOXV84NoHXZ0AkPyqQ==} - - mux-embed@5.11.0: - resolution: {integrity: sha512-uczzXVraqMRmyYmpGh2zthTmBKvvc5D5yaVKQRgGhFOnF7E4nkhqNkdkQc4C0WTPzdqdPl5OtCelNWMF4tg5RQ==} - - mux-embed@5.9.0: - resolution: {integrity: sha512-wmunL3uoPhma/tWy8PrDPZkvJpXvSFBwbD3KkC4PG8Ztjfb1X3hRJwGUAQyRz7z99b/ovLm2UTTitrkvStjH4w==} - nano-css@5.6.2: resolution: {integrity: sha512-+6bHaC8dSDGALM1HJjOHVXpuastdu2xFoZlC77Jh4cg+33Zcgm+Gxd+1xsnpZK14eyHObSp82+ll5y3SX75liw==} peerDependencies: @@ -3352,9 +3084,6 @@ packages: resolution: {integrity: sha512-kNZ9xnoJYKg/AfxjrVL4SS0fKX++4awQReGqWnwTRHxeHGZ1FJFVgTqr/eMrNQdp0Tz7M7tG/TDaX8QfHDwVCw==} engines: {node: ^20.0.0 || >=22.0.0} - native-promise-only@0.8.1: - resolution: {integrity: sha512-zkVhZUA3y8mbz652WrL5x0fB0ehrBkulWT3TomAQ9iDtyXZvzKeEA6GPxAItBYeNYl5yngKRX612qHOhvMkDeg==} - natural-compare@1.4.0: resolution: {integrity: sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==} @@ -3488,9 +3217,6 @@ packages: resolution: {integrity: sha512-ybiGyvspI+fAoRQbIPRddCcSTV9/LsJbf0e/S85VLowVGzRmokfneg2kwVW/KU5rOXrPSbF1qAKPMgNTqqROQQ==} engines: {node: '>=18'} - path-browserify@1.0.1: - resolution: {integrity: sha512-b7uo2UCUOYZcnF/3ID0lulOJi/bafxa1xPe7ZPsammBSpjSWQkjNxlt635YGS2MiR9GjvuXCtz2emr3jbsz98g==} - path-exists@4.0.0: resolution: {integrity: sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==} engines: {node: '>=8'} @@ -3539,15 +3265,6 @@ packages: resolution: {integrity: sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==} engines: {node: '>=12'} - pkg-types@1.3.1: - resolution: {integrity: sha512-/Jm5M4RvtBFVkKWRu2BLUTNP8/M2a+UwuAX+ae4770q1qVGtfjG+WTCupoZixokjmHiry8uI+dlY8KXYV5HVVQ==} - - pkg-types@2.2.0: - resolution: {integrity: sha512-2SM/GZGAEkPp3KWORxQZns4M+WSeXbC2HEvmOIJe3Cmiv6ieAJvdVhDldtHqM5J1Y7MrR1XhkBT/rMlhh9FdqQ==} - - player.style@0.1.9: - resolution: {integrity: sha512-aFmIhHMrnAP8YliFYFMnRw+5AlHqBvnqWy4vHGo2kFxlC+XjmTXqgg62qSxlE8ubAY83c0ViEZGYglSJi6mGCA==} - pluralize@8.0.0: resolution: {integrity: sha512-Nc3IT5yHzflTfbjgqWcCPpo7DaKy4FnpB0l/zCAW0Tc7jxAiuqSxHasntB3D7887LSrA93kDJ9IXovxJYxyLCA==} engines: {node: '>=4'} @@ -3580,9 +3297,6 @@ packages: resolution: {integrity: sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==} engines: {node: '>=6'} - quansync@0.2.10: - resolution: {integrity: sha512-t41VRkMYbkHyCYmOvx/6URnN80H7k4X0lLdBMGsz+maAwrJQYB1djpV6vHrQIBE0WBSGqhtEHrK9U3DWWH8v7A==} - query-string@9.2.2: resolution: {integrity: sha512-pDSIZJ9sFuOp6VnD+5IkakSVf+rICAuuU88Hcsr6AKL0QtxSIfVuKiVP2oahFI7tk3CRSexwV+Ya6MOoTxzg9g==} engines: {node: '>=18'} @@ -3706,13 +3420,6 @@ packages: react-is@17.0.2: resolution: {integrity: sha512-w2GsyukL62IJnlaff/nRegPQR94C/XXamvMWmSHRJ4y7Ts/4ocGRmTHvOs8PSE6pB3dWOrD/nueuU5sduBsQ4w==} - react-player@3.3.1: - resolution: {integrity: sha512-wE/xLloneXZ1keelFCaNeIFVNUp4/7YoUjfHjwF945aQzsbDKiIB0LQuCchGL+la0Y1IybxnR0R6Cm3AiqInMw==} - peerDependencies: - '@types/react': ^17.0.0 || ^18 || ^19 - react: ^17.0.2 || ^18 || ^19 - react-dom: ^17.0.2 || ^18 || ^19 - react-redux@9.2.0: resolution: {integrity: sha512-ROY9fvHhwOD9ySfrF0wmvu//bKCQ6AeZZq1nJNtbDC+kk5DuSuNX/n6YWYF/SYy7bSba4D4FSz8DJeKY/S/r+g==} peerDependencies: @@ -3819,9 +3526,6 @@ packages: resolution: {integrity: sha512-6tDA8g98We0zd0GvVeMT9arEOnTw9qM03L9cJXaCjrip1OO764RDBLBfrB4cwzNGDj5OA5ioymC9GkizgWJDUg==} engines: {node: '>=8'} - redux-dynamic-middlewares@2.2.0: - resolution: {integrity: sha512-GHESQC+Y0PV98ZBoaC6br6cDOsNiM1Cu4UleGMqMWCXX03jIr3BoozYVrRkLVVAl4sC216chakMnZOu6SwNdGA==} - redux-remember@5.2.0: resolution: {integrity: sha512-HqXx9V+DKzgBzpiIT5dyiXZgiiSB6zaMs4sIscwQ+Z0zVwUvJh20mqPEQWo4wbthuo5+5jGrS7Yfvv4HyOuAFw==} peerDependencies: @@ -3947,9 +3651,6 @@ packages: resolution: {integrity: sha512-b3rppTKm9T+PsVCBEOUR46GWI7fdOs00VKZ1+9c1EWDaDMvjQc6tUwuFyIprgGgTcWoVHSKrU8H31ZHA2e0RHA==} engines: {node: '>=10'} - sax@1.2.1: - resolution: {integrity: sha512-8I2a3LovHTOpm7NV5yOyO8IHqgVsfK4+UuySrXU8YXkSRX7k6hCV9b3HrkKCr3nMpgj+0bmocaJJWpvp1oc7ZA==} - scheduler@0.23.2: resolution: {integrity: sha512-UOShsPwz7NrMUqhR6t0hWjFduvOzbtv7toDH1/hIrfRNIDBnnBWd0CwJTGvTpngVlmwGCdP9/Zl/tVrDqcuYzQ==} @@ -3964,11 +3665,6 @@ packages: resolution: {integrity: sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==} hasBin: true - semver@7.5.4: - resolution: {integrity: sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==} - engines: {node: '>=10'} - hasBin: true - semver@7.7.2: resolution: {integrity: sha512-RF0Fw+rO5AMf9MAyaRXI4AV0Ulj5lMHqVxxdSgiVbixSCXoEmmX/jk0CuJw4+3SqroYO9VoUh+HcuJivvtJemA==} engines: {node: '>=10'} @@ -4072,12 +3768,6 @@ packages: resolution: {integrity: sha512-qxQJTx2ryR0Dw0ITYyekNQWpz6f8dGd7vffGNflQQ3Iqj9NJ6qiZ7ELpZsJ/QBhIVAiDfXdag3+Gp8RvWa62AA==} engines: {node: '>=12'} - spotify-audio-element@1.0.2: - resolution: {integrity: sha512-YEovyyeJTJMzdSVqFw/Fx19e1gdcD4bmZZ/fWS0Ji58KTpvAT2rophgK87ocqpy6eJNSmIHikhgbRjGWumgZew==} - - sprintf-js@1.0.3: - resolution: {integrity: sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==} - stable-hash@0.0.6: resolution: {integrity: sha512-0afH4mobqTybYZsXImQRLOjHV4gvOW+92HdUIax9t7a8d9v54KWykEuMVIcXhD9BCi+w3kS4x7O6fmZQ3JlG/g==} @@ -4112,10 +3802,6 @@ packages: prettier: optional: true - string-argv@0.3.2: - resolution: {integrity: sha512-aqD2Q0144Z+/RqG52NeHEkZauTAUWJO8c6yTftGJKO3Tja5tUgIfmIl6kExvhtxSDP7fXB6DvzkfMpCd/F3G+Q==} - engines: {node: '>=0.6.19'} - string-width@4.2.3: resolution: {integrity: sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==} engines: {node: '>=8'} @@ -4183,9 +3869,6 @@ packages: stylis@4.3.6: resolution: {integrity: sha512-yQ3rwFWRfwNUY7H5vpU0wfdkNSnvnJinhF9830Swlaxl03zsOjCfmX0ugac+3LtK0lYSgwL/KXc8oYL3mG4YFQ==} - super-media-element@1.4.2: - resolution: {integrity: sha512-9pP/CVNp4NF2MNlRzLwQkjiTgKKe9WYXrLh9+8QokWmMxz+zt2mf1utkWLco26IuA3AfVcTb//qtlTIjY3VHxA==} - supports-color@10.0.0: resolution: {integrity: sha512-HRVVSbCCMbj7/kdWF9Q+bbckjBHLtHMEoJWlkmYzzdwhYMkjkOwubLM6t7NbWKjgKamGDrWL1++KrjUO1t9oAQ==} engines: {node: '>=18'} @@ -4210,9 +3893,6 @@ packages: resolution: {integrity: sha512-dTEWWNu6JmeVXY0ZYoPuH5cRIwc0MeGbJwah9KUNYSJwommQpCzTySTpEe8Gs1J23aeWEuAobe4Ag7EHVt/LOg==} engines: {node: '>=10'} - tiktok-video-element@0.1.0: - resolution: {integrity: sha512-PVWUlpDdQ/LPXi7x4/furfD7Xh1L72CgkGCaMsZBIjvxucMGm1DDPJdM9IhWBFfo6tuR4cYVO/v596r6GG/lvQ==} - tiny-invariant@1.3.3: resolution: {integrity: sha512-+FbBPE1o9QAYvviau/qC5SE3caw21q3xkvWKBtja5vgqOWIHHJ3ioaq1VPfn/Szqctz2bU/oYeKd9/z5BL+PVg==} @@ -4298,9 +3978,6 @@ packages: tslib@2.8.1: resolution: {integrity: sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==} - twitch-video-element@0.1.2: - resolution: {integrity: sha512-/up4KiWiTYiav+CUo+/DbV8JhP4COwEKSo8h1H/Zft/5NzZ/ZiIQ48h7erFKvwzalN0GfkEGGIfwIzAO0h7FHQ==} - type-check@0.4.0: resolution: {integrity: sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==} engines: {node: '>= 0.8.0'} @@ -4325,23 +4002,11 @@ packages: resolution: {integrity: sha512-3KS2b+kL7fsuk/eJZ7EQdnEmQoaho/r6KUef7hxvltNA5DR8NAUM+8wJMbJyZ4G9/7i3v5zPBIMN5aybAh2/Jg==} engines: {node: '>= 0.4'} - typescript@5.8.2: - resolution: {integrity: sha512-aJn6wq13/afZp/jT9QZmwEjDqqvSGp1VT5GVg+f/t6/oVyrgXM6BY1h9BRh/O5p3PlUPAe+WuiEZOmb/49RqoQ==} - engines: {node: '>=14.17'} - hasBin: true - typescript@5.8.3: resolution: {integrity: sha512-p1diW6TqL9L07nNxvRMM7hMMw4c5XOo/1ibL4aAIGmSAt9slTE1Xgw5KWuof2uTOvCg9BY7ZRi+GaF+7sfgPeQ==} engines: {node: '>=14.17'} hasBin: true - ua-parser-js@1.0.40: - resolution: {integrity: sha512-z6PJ8Lml+v3ichVojCiB8toQJBuwR42ySM4ezjXIqXK3M0HczmKQ3LF4rhU55PfD99KEEXQG6yb7iOMyvYuHew==} - hasBin: true - - ufo@1.6.1: - resolution: {integrity: sha512-9a4/uxlTWJ4+a5i0ooc1rU7C7YOw3wT+UGqdeNNHWnOF9qcMBgLRS+4IYUqbczewFx4mLEig6gawh7X6mFlEkA==} - unbox-primitive@1.1.0: resolution: {integrity: sha512-nWJ91DjeOkej/TA8pXQ3myruKpKEYgqvpw9lz4OPHj/NWFNluYrjbz9j01CJ8yKQd2g4jFoOkINCTW2I5LEEyw==} engines: {node: '>= 0.4'} @@ -4443,28 +4108,11 @@ packages: resolution: {integrity: sha512-0/A9rDy9P7cJ+8w1c9WD9V//9Wj15Ce2MPz8Ri6032usz+NfePxx5AcN3bN+r6ZL6jEo066/yNYB3tn4pQEx+A==} hasBin: true - vimeo-video-element@1.5.3: - resolution: {integrity: sha512-OQWyGS9nTouMqfRJyvmAm/n6IRhZ7x3EfPAef+Q+inGBeHa3SylDbtyeB/rEBd4B/T/lcYBW3rjaD9W2DRYkiQ==} - vite-node@3.2.4: resolution: {integrity: sha512-EbKSKh+bh1E1IFxeO0pg1n4dvoOTt0UDiXMd/qn++r98+jPO1xtJilvXldeuQ8giIB5IkpjCgMleHMNEsGH6pg==} engines: {node: ^18.0.0 || ^20.0.0 || >=22.0.0} hasBin: true - vite-plugin-css-injected-by-js@3.5.2: - resolution: {integrity: sha512-2MpU/Y+SCZyWUB6ua3HbJCrgnF0KACAsmzOQt1UvRVJCGF6S8xdA3ZUhWcWdM9ivG4I5az8PnQmwwrkC2CAQrQ==} - peerDependencies: - vite: '>2.0.0-0' - - vite-plugin-dts@4.5.4: - resolution: {integrity: sha512-d4sOM8M/8z7vRXHHq/ebbblfaxENjogAAekcfcDCCwAyvGqnPrc7f4NZbvItS+g4WTgerW0xDwSz5qz11JT3vg==} - peerDependencies: - typescript: '*' - vite: '*' - peerDependenciesMeta: - vite: - optional: true - vite-plugin-eslint@1.8.1: resolution: {integrity: sha512-PqdMf3Y2fLO9FsNPmMX+//2BF5SF8nEWspZdgl4kSt7UvHDRHVVfHvxsD7ULYzZrJDGRxR81Nq7TOFgwMnUang==} peerDependencies: @@ -4551,9 +4199,6 @@ packages: resolution: {integrity: sha512-Dhxzh5HZuiHQhbvTW9AMetFfBHDMYpo23Uo9btPXgdYP+3T5S+p+jgNy7spra+veYhBP2dCSgxR/i2Y02h5/6w==} engines: {node: '>=0.10.0'} - vscode-uri@3.1.0: - resolution: {integrity: sha512-/BpdSx+yCQGnCvecbyXdxHDkuk55/G3xwnC0GqY4gmQ3j+A+g8kzzgB4Nk/SINjqn6+waqw3EgbVF2QKExkRxQ==} - walk-up-path@4.0.0: resolution: {integrity: sha512-3hu+tD8YzSLGuFYtPRb48vdhKMi0KQV5sn+uWr8+7dMEq/2G/dtLrdDinkLjqq5TIbIBjYJ4Ax/n3YiaW7QM8A==} engines: {node: 20 || >=22} @@ -4561,10 +4206,6 @@ packages: wcwidth@1.0.1: resolution: {integrity: sha512-XHPEwS0q6TaxcvG85+8EYkbiCux2XtWG2mkc47Ng2A77BQu9+DqIOJldST4HgPkuea7dvKSj5VgX3P1d4rW8Tg==} - weakmap-polyfill@2.0.4: - resolution: {integrity: sha512-ZzxBf288iALJseijWelmECm/1x7ZwQn3sMYIkDr2VvZp7r6SEKuT8D0O9Wiq6L9Nl5mazrOMcmiZE/2NCenaxw==} - engines: {node: '>=8.10.0'} - webidl-conversions@3.0.1: resolution: {integrity: sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==} @@ -4600,9 +4241,6 @@ packages: engines: {node: '>=8'} hasBin: true - wistia-video-element@1.3.3: - resolution: {integrity: sha512-ZVC8HH8uV3mQGcSz10MACLDalao/0YdVverNN4GNFsOXiumfqSiZnRVc8WZEywgVckBkR7+yerQYESYPDzvTfQ==} - word-wrap@1.2.5: resolution: {integrity: sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA==} engines: {node: '>=0.10.0'} @@ -4650,9 +4288,6 @@ packages: yallist@3.1.1: resolution: {integrity: sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==} - yallist@4.0.0: - resolution: {integrity: sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==} - yaml-ast-parser@0.0.43: resolution: {integrity: sha512-2PTINUwsRqSd+s8XxKaJWQlUuEMHJQyEuh2edBbW8KNJz0SJPwUSD2zRWqezFEdN7IzAgeuYHFUCF7o8zRdZ0A==} @@ -4676,9 +4311,6 @@ packages: resolution: {integrity: sha512-AyeEbWOu/TAXdxlV9wmGcR0+yh2j3vYPGOECcIj2S7MkrLyC7ne+oye2BKTItt0ii2PHk4cDy+95+LshzbXnGg==} engines: {node: '>=12.20'} - youtube-video-element@1.6.1: - resolution: {integrity: sha512-FDRgXlPxpe1bh6HlhL6GfJVcvVNaZKCcLEZ90X1G3Iu+z2g2cIhm2OWj9abPZq1Zqit6SY7Gwh13H9g7acoBnQ==} - zod-validation-error@3.5.3: resolution: {integrity: sha512-OT5Y8lbUadqVZCsnyFaTQ4/O2mys4tj7PqhdbBCp7McPwvIEKfPtdA6QfPeFQK2/Rz5LgwmAXRJTugBNBi0btw==} engines: {node: '>=18.0.0'} @@ -5371,78 +5003,6 @@ snapshots: '@types/react': 18.3.23 react: 18.3.1 - '@microsoft/api-extractor-model@7.30.6(@types/node@22.16.0)': - dependencies: - '@microsoft/tsdoc': 0.15.1 - '@microsoft/tsdoc-config': 0.17.1 - '@rushstack/node-core-library': 5.13.1(@types/node@22.16.0) - transitivePeerDependencies: - - '@types/node' - - '@microsoft/api-extractor@7.52.8(@types/node@22.16.0)': - dependencies: - '@microsoft/api-extractor-model': 7.30.6(@types/node@22.16.0) - '@microsoft/tsdoc': 0.15.1 - '@microsoft/tsdoc-config': 0.17.1 - '@rushstack/node-core-library': 5.13.1(@types/node@22.16.0) - '@rushstack/rig-package': 0.5.3 - '@rushstack/terminal': 0.15.3(@types/node@22.16.0) - '@rushstack/ts-command-line': 5.0.1(@types/node@22.16.0) - lodash: 4.17.21 - minimatch: 3.0.8 - resolve: 1.22.10 - semver: 7.5.4 - source-map: 0.6.1 - typescript: 5.8.2 - transitivePeerDependencies: - - '@types/node' - - '@microsoft/tsdoc-config@0.17.1': - dependencies: - '@microsoft/tsdoc': 0.15.1 - ajv: 8.12.0 - jju: 1.4.0 - resolve: 1.22.10 - - '@microsoft/tsdoc@0.15.1': {} - - '@mux/mux-data-google-ima@0.2.8': - dependencies: - mux-embed: 5.9.0 - - '@mux/mux-player-react@3.5.3(@types/react-dom@18.3.7(@types/react@18.3.23))(@types/react@18.3.23)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': - dependencies: - '@mux/mux-player': 3.5.3(react@18.3.1) - '@mux/playback-core': 0.30.1 - prop-types: 15.8.1 - react: 18.3.1 - react-dom: 18.3.1(react@18.3.1) - optionalDependencies: - '@types/react': 18.3.23 - '@types/react-dom': 18.3.7(@types/react@18.3.23) - - '@mux/mux-player@3.5.3(react@18.3.1)': - dependencies: - '@mux/mux-video': 0.26.1 - '@mux/playback-core': 0.30.1 - media-chrome: 4.11.1(react@18.3.1) - player.style: 0.1.9(react@18.3.1) - transitivePeerDependencies: - - react - - '@mux/mux-video@0.26.1': - dependencies: - '@mux/mux-data-google-ima': 0.2.8 - '@mux/playback-core': 0.30.1 - castable-video: 1.1.10 - custom-media-element: 1.4.5 - media-tracks: 0.3.3 - - '@mux/playback-core@0.30.1': - dependencies: - hls.js: 1.6.9 - mux-embed: 5.11.0 - '@nanostores/react@1.0.0(nanostores@1.0.1)(react@18.3.1)': dependencies: nanostores: 1.0.1 @@ -5778,40 +5338,6 @@ snapshots: '@rtsao/scc@1.1.0': {} - '@rushstack/node-core-library@5.13.1(@types/node@22.16.0)': - dependencies: - ajv: 8.13.0 - ajv-draft-04: 1.0.0(ajv@8.13.0) - ajv-formats: 3.0.1(ajv@8.13.0) - fs-extra: 11.3.0 - import-lazy: 4.0.0 - jju: 1.4.0 - resolve: 1.22.10 - semver: 7.5.4 - optionalDependencies: - '@types/node': 22.16.0 - - '@rushstack/rig-package@0.5.3': - dependencies: - resolve: 1.22.10 - strip-json-comments: 3.1.1 - - '@rushstack/terminal@0.15.3(@types/node@22.16.0)': - dependencies: - '@rushstack/node-core-library': 5.13.1(@types/node@22.16.0) - supports-color: 8.1.1 - optionalDependencies: - '@types/node': 22.16.0 - - '@rushstack/ts-command-line@5.0.1(@types/node@22.16.0)': - dependencies: - '@rushstack/terminal': 0.15.3(@types/node@22.16.0) - '@types/argparse': 1.0.38 - argparse: 1.0.10 - string-argv: 0.3.2 - transitivePeerDependencies: - - '@types/node' - '@socket.io/component-emitter@3.1.2': {} '@standard-schema/spec@1.0.0': {} @@ -5893,8 +5419,6 @@ snapshots: optionalDependencies: typescript: 5.8.3 - '@svta/common-media-library@0.12.4': {} - '@swc/core-darwin-arm64@1.12.9': optional: true @@ -5977,8 +5501,6 @@ snapshots: tslib: 2.8.1 optional: true - '@types/argparse@1.0.38': {} - '@types/aria-query@5.0.4': {} '@types/babel__core@7.20.5': @@ -6176,13 +5698,6 @@ snapshots: '@typescript-eslint/types': 8.37.0 eslint-visitor-keys: 4.2.1 - '@vercel/edge@1.2.2': {} - - '@vimeo/player@2.29.0': - dependencies: - native-promise-only: 0.8.1 - weakmap-polyfill: 2.0.4 - '@vitejs/plugin-react-swc@3.10.2(vite@7.0.5(@types/node@22.16.0)(jiti@2.4.2))': dependencies: '@rolldown/pluginutils': 1.0.0-beta.11 @@ -6263,51 +5778,6 @@ snapshots: loupe: 3.1.4 tinyrainbow: 2.0.0 - '@volar/language-core@2.4.17': - dependencies: - '@volar/source-map': 2.4.17 - - '@volar/source-map@2.4.17': {} - - '@volar/typescript@2.4.17': - dependencies: - '@volar/language-core': 2.4.17 - path-browserify: 1.0.1 - vscode-uri: 3.1.0 - - '@vue/compiler-core@3.5.17': - dependencies: - '@babel/parser': 7.28.0 - '@vue/shared': 3.5.17 - entities: 4.5.0 - estree-walker: 2.0.2 - source-map-js: 1.2.1 - - '@vue/compiler-dom@3.5.17': - dependencies: - '@vue/compiler-core': 3.5.17 - '@vue/shared': 3.5.17 - - '@vue/compiler-vue2@2.7.16': - dependencies: - de-indent: 1.0.2 - he: 1.2.0 - - '@vue/language-core@2.2.0(typescript@5.8.3)': - dependencies: - '@volar/language-core': 2.4.17 - '@vue/compiler-dom': 3.5.17 - '@vue/compiler-vue2': 2.7.16 - '@vue/shared': 3.5.17 - alien-signals: 0.4.14 - minimatch: 9.0.5 - muggle-string: 0.4.1 - path-browserify: 1.0.1 - optionalDependencies: - typescript: 5.8.3 - - '@vue/shared@3.5.17': {} - '@xobotyi/scrollbar-width@1.9.5': {} '@xyflow/react@12.8.2(@types/react@18.3.23)(immer@10.1.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': @@ -6354,14 +5824,6 @@ snapshots: agent-base@7.1.3: {} - ajv-draft-04@1.0.0(ajv@8.13.0): - optionalDependencies: - ajv: 8.13.0 - - ajv-formats@3.0.1(ajv@8.13.0): - optionalDependencies: - ajv: 8.13.0 - ajv@6.12.6: dependencies: fast-deep-equal: 3.1.3 @@ -6369,22 +5831,6 @@ snapshots: json-schema-traverse: 0.4.1 uri-js: 4.4.1 - ajv@8.12.0: - dependencies: - fast-deep-equal: 3.1.3 - json-schema-traverse: 1.0.0 - require-from-string: 2.0.2 - uri-js: 4.4.1 - - ajv@8.13.0: - dependencies: - fast-deep-equal: 3.1.3 - json-schema-traverse: 1.0.0 - require-from-string: 2.0.2 - uri-js: 4.4.1 - - alien-signals@0.4.14: {} - ansi-colors@4.1.3: {} ansi-regex@5.0.1: {} @@ -6399,10 +5845,6 @@ snapshots: ansi-styles@6.2.1: {} - argparse@1.0.10: - dependencies: - sprintf-js: 1.0.3 - argparse@2.0.1: {} aria-hidden@1.2.6: @@ -6516,19 +5958,6 @@ snapshots: base64-js@1.5.1: {} - bcp-47-match@2.0.3: {} - - bcp-47-normalize@2.3.0: - dependencies: - bcp-47: 2.1.0 - bcp-47-match: 2.0.3 - - bcp-47@2.1.0: - dependencies: - is-alphabetical: 2.0.1 - is-alphanumerical: 2.0.1 - is-decimal: 2.0.1 - better-opn@3.0.2: dependencies: open: 8.4.2 @@ -6591,14 +6020,6 @@ snapshots: caniuse-lite@1.0.30001727: {} - castable-video@1.1.10: - dependencies: - custom-media-element: 1.4.5 - - ce-la-react@0.3.1(react@18.3.1): - dependencies: - react: 18.3.1 - chai@5.2.0: dependencies: assertion-error: 2.0.1 @@ -6656,8 +6077,6 @@ snapshots: clone@1.0.4: {} - cloudflare-video-element@1.3.3: {} - cmdk@1.1.1(@types/react-dom@18.3.7(@types/react@18.3.23))(@types/react@18.3.23)(react-dom@18.3.1(react@18.3.1))(react@18.3.1): dependencies: '@radix-ui/react-compose-refs': 1.1.2(@types/react@18.3.23)(react@18.3.1) @@ -6670,8 +6089,6 @@ snapshots: - '@types/react' - '@types/react-dom' - codem-isoboxer@0.3.10: {} - color-convert@2.0.1: dependencies: color-name: 1.1.4 @@ -6698,10 +6115,6 @@ snapshots: tree-kill: 1.2.2 yargs: 17.7.2 - confbox@0.1.8: {} - - confbox@0.2.2: {} - convert-source-map@1.9.0: {} convert-source-map@2.0.0: {} @@ -6747,8 +6160,6 @@ snapshots: csstype@3.1.3: {} - custom-media-element@1.4.5: {} - d3-color@3.1.0: {} d3-dispatch@3.0.1: {} @@ -6785,24 +6196,6 @@ snapshots: d3-selection: 3.0.0 d3-transition: 3.0.1(d3-selection@3.0.0) - dash-video-element@0.1.6: - dependencies: - custom-media-element: 1.4.5 - dashjs: 5.0.3 - - dashjs@5.0.3: - dependencies: - '@svta/common-media-library': 0.12.4 - bcp-47-match: 2.0.3 - bcp-47-normalize: 2.3.0 - codem-isoboxer: 0.3.10 - fast-deep-equal: 3.1.3 - html-entities: 2.6.0 - imsc: 1.1.5 - localforage: 1.10.0 - path-browserify: 1.0.1 - ua-parser-js: 1.0.40 - data-view-buffer@1.0.2: dependencies: call-bound: 1.0.4 @@ -6821,8 +6214,6 @@ snapshots: es-errors: 1.3.0 is-data-view: 1.0.2 - de-indent@1.0.2: {} - debug@3.2.7: dependencies: ms: 2.1.3 @@ -6929,8 +6320,6 @@ snapshots: engine.io-parser@5.2.3: {} - entities@4.5.0: {} - error-ex@1.3.2: dependencies: is-arrayish: 0.2.1 @@ -7276,8 +6665,6 @@ snapshots: expect-type@1.2.1: {} - exsolve@1.0.7: {} - fast-deep-equal@3.1.3: {} fast-glob@3.3.3: @@ -7501,22 +6888,10 @@ snapshots: dependencies: function-bind: 1.1.2 - he@1.2.0: {} - - hls-video-element@1.5.6: - dependencies: - custom-media-element: 1.4.5 - hls.js: 1.6.9 - media-tracks: 0.3.3 - - hls.js@1.6.9: {} - hoist-non-react-statics@3.3.2: dependencies: react-is: 16.13.1 - html-entities@2.6.0: {} - html-escaper@2.0.2: {} html-parse-stringify@3.0.1: @@ -7552,8 +6927,6 @@ snapshots: ignore@7.0.5: {} - immediate@3.0.6: {} - immer@10.1.1: {} import-fresh@3.3.1: @@ -7561,12 +6934,6 @@ snapshots: parent-module: 1.0.1 resolve-from: 4.0.0 - import-lazy@4.0.0: {} - - imsc@1.1.5: - dependencies: - sax: 1.2.1 - imurmurhash@0.1.4: {} indent-string@4.0.0: {} @@ -7585,13 +6952,6 @@ snapshots: hasown: 2.0.2 side-channel: 1.1.0 - is-alphabetical@2.0.1: {} - - is-alphanumerical@2.0.1: - dependencies: - is-alphabetical: 2.0.1 - is-decimal: 2.0.1 - is-array-buffer@3.0.5: dependencies: call-bind: 1.0.8 @@ -7634,8 +6994,6 @@ snapshots: call-bound: 1.0.4 has-tostringtag: 1.0.2 - is-decimal@2.0.1: {} - is-docker@2.2.1: {} is-extglob@2.1.1: {} @@ -7757,8 +7115,6 @@ snapshots: jiti@2.4.2: {} - jju@1.4.0: {} - js-cookie@2.2.1: {} js-levenshtein@1.1.6: {} @@ -7828,8 +7184,6 @@ snapshots: zod: 3.25.76 zod-validation-error: 3.5.3(zod@3.25.76) - kolorist@1.8.0: {} - konva@9.3.22: {} levn@0.4.1: @@ -7837,10 +7191,6 @@ snapshots: prelude-ls: 1.2.1 type-check: 0.4.0 - lie@3.1.1: - dependencies: - immediate: 3.0.6 - lines-and-columns@1.2.4: {} linkify-react@4.3.1(linkifyjs@4.3.1)(react@18.3.1): @@ -7857,16 +7207,6 @@ snapshots: load-tsconfig@0.2.5: {} - local-pkg@1.1.1: - dependencies: - mlly: 1.7.4 - pkg-types: 2.2.0 - quansync: 0.2.10 - - localforage@1.10.0: - dependencies: - lie: 3.1.1 - locate-path@6.0.0: dependencies: p-locate: 5.0.0 @@ -7900,10 +7240,6 @@ snapshots: dependencies: yallist: 3.1.1 - lru-cache@6.0.0: - dependencies: - yallist: 4.0.0 - lz-string@1.5.0: {} magic-string@0.30.17: @@ -7926,21 +7262,6 @@ snapshots: mdn-data@2.0.14: {} - media-chrome@4.11.1(react@18.3.1): - dependencies: - '@vercel/edge': 1.2.2 - ce-la-react: 0.3.1(react@18.3.1) - transitivePeerDependencies: - - react - - media-chrome@4.13.0(react@18.3.1): - dependencies: - ce-la-react: 0.3.1(react@18.3.1) - transitivePeerDependencies: - - react - - media-tracks@0.3.3: {} - memoize-one@6.0.0: {} merge2@1.4.1: {} @@ -7954,10 +7275,6 @@ snapshots: min-indent@1.0.1: {} - minimatch@3.0.8: - dependencies: - brace-expansion: 1.1.12 - minimatch@3.1.2: dependencies: brace-expansion: 1.1.12 @@ -7974,13 +7291,6 @@ snapshots: minipass@7.1.2: {} - mlly@1.7.4: - dependencies: - acorn: 8.15.0 - pathe: 2.0.3 - pkg-types: 1.3.1 - ufo: 1.6.1 - moo@0.5.2: {} motion-dom@11.18.1: @@ -7995,12 +7305,6 @@ snapshots: mtwist@1.0.2: {} - muggle-string@0.4.1: {} - - mux-embed@5.11.0: {} - - mux-embed@5.9.0: {} - nano-css@5.6.2(react-dom@18.3.1(react@18.3.1))(react@18.3.1): dependencies: '@jridgewell/sourcemap-codec': 1.5.4 @@ -8020,8 +7324,6 @@ snapshots: nanostores@1.0.1: {} - native-promise-only@0.8.1: {} - natural-compare@1.4.0: {} nearley@2.20.1: @@ -8197,8 +7499,6 @@ snapshots: index-to-position: 1.1.0 type-fest: 4.41.0 - path-browserify@1.0.1: {} - path-exists@4.0.0: {} path-exists@5.0.0: {} @@ -8228,24 +7528,6 @@ snapshots: picomatch@4.0.3: {} - pkg-types@1.3.1: - dependencies: - confbox: 0.1.8 - mlly: 1.7.4 - pathe: 2.0.3 - - pkg-types@2.2.0: - dependencies: - confbox: 0.2.2 - exsolve: 1.0.7 - pathe: 2.0.3 - - player.style@0.1.9(react@18.3.1): - dependencies: - media-chrome: 4.11.1(react@18.3.1) - transitivePeerDependencies: - - react - pluralize@8.0.0: {} possible-typed-array-names@1.1.0: {} @@ -8274,8 +7556,6 @@ snapshots: punycode@2.3.1: {} - quansync@0.2.10: {} - query-string@9.2.2: dependencies: decode-uri-component: 0.4.1 @@ -8393,24 +7673,6 @@ snapshots: react-is@17.0.2: {} - react-player@3.3.1(@types/react-dom@18.3.7(@types/react@18.3.23))(@types/react@18.3.23)(react-dom@18.3.1(react@18.3.1))(react@18.3.1): - dependencies: - '@mux/mux-player-react': 3.5.3(@types/react-dom@18.3.7(@types/react@18.3.23))(@types/react@18.3.23)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@types/react': 18.3.23 - cloudflare-video-element: 1.3.3 - dash-video-element: 0.1.6 - hls-video-element: 1.5.6 - react: 18.3.1 - react-dom: 18.3.1(react@18.3.1) - spotify-audio-element: 1.0.2 - tiktok-video-element: 0.1.0 - twitch-video-element: 0.1.2 - vimeo-video-element: 1.5.3 - wistia-video-element: 1.3.3 - youtube-video-element: 1.6.1 - transitivePeerDependencies: - - '@types/react-dom' - react-redux@9.2.0(@types/react@18.3.23)(react@18.3.1)(redux@5.0.1): dependencies: '@types/use-sync-external-store': 0.0.6 @@ -8556,8 +7818,6 @@ snapshots: indent-string: 4.0.0 strip-indent: 3.0.0 - redux-dynamic-middlewares@2.2.0: {} - redux-remember@5.2.0(redux@5.0.1): dependencies: redux: 5.0.1 @@ -8705,8 +7965,6 @@ snapshots: safe-stable-stringify@2.5.0: {} - sax@1.2.1: {} - scheduler@0.23.2: dependencies: loose-envify: 1.4.0 @@ -8717,10 +7975,6 @@ snapshots: semver@6.3.1: {} - semver@7.5.4: - dependencies: - lru-cache: 6.0.0 - semver@7.7.2: {} serialize-error@12.0.0: @@ -8831,10 +8085,6 @@ snapshots: split-on-first@3.0.0: {} - spotify-audio-element@1.0.2: {} - - sprintf-js@1.0.3: {} - stable-hash@0.0.6: {} stack-generator@2.0.10: @@ -8884,8 +8134,6 @@ snapshots: - supports-color - utf-8-validate - string-argv@0.3.2: {} - string-width@4.2.3: dependencies: emoji-regex: 8.0.0 @@ -8976,8 +8224,6 @@ snapshots: stylis@4.3.6: {} - super-media-element@1.4.2: {} - supports-color@10.0.0: {} supports-color@7.2.0: @@ -8998,8 +8244,6 @@ snapshots: throttle-debounce@3.0.1: {} - tiktok-video-element@0.1.0: {} - tiny-invariant@1.3.3: {} tinybench@2.9.0: {} @@ -9062,8 +8306,6 @@ snapshots: tslib@2.8.1: {} - twitch-video-element@0.1.2: {} - type-check@0.4.0: dependencies: prelude-ls: 1.2.1 @@ -9103,14 +8345,8 @@ snapshots: possible-typed-array-names: 1.1.0 reflect.getprototypeof: 1.0.10 - typescript@5.8.2: {} - typescript@5.8.3: {} - ua-parser-js@1.0.40: {} - - ufo@1.6.1: {} - unbox-primitive@1.1.0: dependencies: call-bound: 1.0.4 @@ -9191,10 +8427,6 @@ snapshots: uuid@11.1.0: {} - vimeo-video-element@1.5.3: - dependencies: - '@vimeo/player': 2.29.0 - vite-node@3.2.4(@types/node@22.16.0)(jiti@2.4.2): dependencies: cac: 6.7.14 @@ -9216,29 +8448,6 @@ snapshots: - tsx - yaml - vite-plugin-css-injected-by-js@3.5.2(vite@7.0.5(@types/node@22.16.0)(jiti@2.4.2)): - dependencies: - vite: 7.0.5(@types/node@22.16.0)(jiti@2.4.2) - - vite-plugin-dts@4.5.4(@types/node@22.16.0)(rollup@4.45.1)(typescript@5.8.3)(vite@7.0.5(@types/node@22.16.0)(jiti@2.4.2)): - dependencies: - '@microsoft/api-extractor': 7.52.8(@types/node@22.16.0) - '@rollup/pluginutils': 5.2.0(rollup@4.45.1) - '@volar/typescript': 2.4.17 - '@vue/language-core': 2.2.0(typescript@5.8.3) - compare-versions: 6.1.1 - debug: 4.4.1(supports-color@10.0.0) - kolorist: 1.8.0 - local-pkg: 1.1.1 - magic-string: 0.30.17 - typescript: 5.8.3 - optionalDependencies: - vite: 7.0.5(@types/node@22.16.0)(jiti@2.4.2) - transitivePeerDependencies: - - '@types/node' - - rollup - - supports-color - vite-plugin-eslint@1.8.1(eslint@9.31.0(jiti@2.4.2))(vite@7.0.5(@types/node@22.16.0)(jiti@2.4.2)): dependencies: '@rollup/pluginutils': 4.2.1 @@ -9315,16 +8524,12 @@ snapshots: void-elements@3.1.0: {} - vscode-uri@3.1.0: {} - walk-up-path@4.0.0: {} wcwidth@1.0.1: dependencies: defaults: 1.0.4 - weakmap-polyfill@2.0.4: {} - webidl-conversions@3.0.1: {} webpack-virtual-modules@0.6.2: {} @@ -9384,10 +8589,6 @@ snapshots: siginfo: 2.0.0 stackback: 0.0.2 - wistia-video-element@1.3.3: - dependencies: - super-media-element: 1.4.2 - word-wrap@1.2.5: {} wrap-ansi@7.0.0: @@ -9412,8 +8613,6 @@ snapshots: yallist@3.1.1: {} - yallist@4.0.0: {} - yaml-ast-parser@0.0.43: {} yaml@1.10.2: {} @@ -9434,8 +8633,6 @@ snapshots: yocto-queue@1.2.1: {} - youtube-video-element@1.6.1: {} - zod-validation-error@3.5.3(zod@3.25.76): dependencies: zod: 3.25.76 diff --git a/invokeai/frontend/web/src/app/logging/logger.ts b/invokeai/frontend/web/src/app/logging/logger.ts index 2428638dd12..4e024f1516a 100644 --- a/invokeai/frontend/web/src/app/logging/logger.ts +++ b/invokeai/frontend/web/src/app/logging/logger.ts @@ -36,20 +36,6 @@ export const zLogLevel = z.enum(['trace', 'debug', 'info', 'warn', 'error', 'fat export type LogLevel = z.infer; export const isLogLevel = (v: unknown): v is LogLevel => zLogLevel.safeParse(v).success; -/** - * Override logging settings. - * @property logIsEnabled Override the enabled log state. Omit to use the user's settings. - * @property logNamespaces Override the enabled log namespaces. Use `"*"` for all namespaces. Omit to use the user's settings. - * @property logLevel Override the log level. Omit to use the user's settings. - */ -export type LoggingOverrides = { - logIsEnabled?: boolean; - logNamespaces?: LogNamespace[] | '*'; - logLevel?: LogLevel; -}; - -export const $loggingOverrides = atom(); - // Translate human-readable log levels to numbers, used for log filtering const LOG_LEVEL_MAP: Record = { trace: 10, diff --git a/invokeai/frontend/web/src/app/logging/useLogger.ts b/invokeai/frontend/web/src/app/logging/useLogger.ts deleted file mode 100644 index ac2a05cadbc..00000000000 --- a/invokeai/frontend/web/src/app/logging/useLogger.ts +++ /dev/null @@ -1,10 +0,0 @@ -import { useMemo } from 'react'; - -import type { LogNamespace } from './logger'; -import { logger } from './logger'; - -export const useLogger = (namespace: LogNamespace) => { - const log = useMemo(() => logger(namespace), [namespace]); - - return log; -}; diff --git a/invokeai/frontend/web/src/app/logging/useSyncLoggingConfig.ts b/invokeai/frontend/web/src/app/logging/useSyncLoggingConfig.ts index fb4b2a7b8ee..ca8f26bb3fa 100644 --- a/invokeai/frontend/web/src/app/logging/useSyncLoggingConfig.ts +++ b/invokeai/frontend/web/src/app/logging/useSyncLoggingConfig.ts @@ -1,5 +1,4 @@ -import { useStore } from '@nanostores/react'; -import { $loggingOverrides, configureLogging } from 'app/logging/logger'; +import { configureLogging } from 'app/logging/logger'; import { useAppSelector } from 'app/store/storeHooks'; import { useAssertSingleton } from 'common/hooks/useAssertSingleton'; import { @@ -20,24 +19,11 @@ import { useLayoutEffect } from 'react'; export const useSyncLoggingConfig = () => { useAssertSingleton('useSyncLoggingConfig'); - const loggingOverrides = useStore($loggingOverrides); - const logLevel = useAppSelector(selectSystemLogLevel); const logNamespaces = useAppSelector(selectSystemLogNamespaces); const logIsEnabled = useAppSelector(selectSystemLogIsEnabled); useLayoutEffect(() => { - configureLogging( - loggingOverrides?.logIsEnabled ?? logIsEnabled, - loggingOverrides?.logLevel ?? logLevel, - loggingOverrides?.logNamespaces ?? logNamespaces - ); - }, [ - logIsEnabled, - logLevel, - logNamespaces, - loggingOverrides?.logIsEnabled, - loggingOverrides?.logLevel, - loggingOverrides?.logNamespaces, - ]); + configureLogging(logIsEnabled, logLevel, logNamespaces); + }, [logIsEnabled, logLevel, logNamespaces]); }; diff --git a/invokeai/frontend/web/src/app/store/nanostores/bulkDownloadId.ts b/invokeai/frontend/web/src/app/store/nanostores/bulkDownloadId.ts deleted file mode 100644 index 4f7118e2ebc..00000000000 --- a/invokeai/frontend/web/src/app/store/nanostores/bulkDownloadId.ts +++ /dev/null @@ -1,9 +0,0 @@ -import { atom } from 'nanostores'; - -const DEFAULT_BULK_DOWNLOAD_ID = 'default'; - -/** - * The download id for a bulk download. Used for socket subscriptions. - */ - -export const $bulkDownloadId = atom(DEFAULT_BULK_DOWNLOAD_ID); diff --git a/invokeai/frontend/web/src/app/store/store.ts b/invokeai/frontend/web/src/app/store/store.ts index 90c6b47b195..15dbfc785ca 100644 --- a/invokeai/frontend/web/src/app/store/store.ts +++ b/invokeai/frontend/web/src/app/store/store.ts @@ -53,7 +53,7 @@ import { stateSanitizer } from './middleware/devtools/stateSanitizer'; import { addArchivedOrDeletedBoardListener } from './middleware/listenerMiddleware/listeners/addArchivedOrDeletedBoardListener'; import { addImageUploadedFulfilledListener } from './middleware/listenerMiddleware/listeners/imageUploaded'; -export const listenerMiddleware = createListenerMiddleware(); +const listenerMiddleware = createListenerMiddleware(); const log = logger('system'); diff --git a/invokeai/frontend/web/src/app/types/invokeai.ts b/invokeai/frontend/web/src/app/types/invokeai.ts deleted file mode 100644 index 664ac002e95..00000000000 --- a/invokeai/frontend/web/src/app/types/invokeai.ts +++ /dev/null @@ -1,312 +0,0 @@ -import { zFilterType } from 'features/controlLayers/store/filters'; -import { zParameterPrecision, zParameterScheduler } from 'features/parameters/types/parameterSchemas'; -import { zTabName } from 'features/ui/store/uiTypes'; -import type { PartialDeep } from 'type-fest'; -import z from 'zod'; - -const zAppFeature = z.enum([ - 'faceRestore', - 'upscaling', - 'lightbox', - 'modelManager', - 'githubLink', - 'discordLink', - 'bugLink', - 'aboutModal', - 'localization', - 'consoleLogging', - 'dynamicPrompting', - 'batches', - 'syncModels', - 'multiselect', - 'pauseQueue', - 'resumeQueue', - 'invocationCache', - 'modelCache', - 'bulkDownload', - 'starterModels', - 'hfToken', - 'retryQueueItem', - 'cancelAndClearAll', - 'chatGPT4oHigh', - 'modelRelationships', -]); -export type AppFeature = z.infer; - -const zSDFeature = z.enum([ - 'controlNet', - 'noise', - 'perlinNoise', - 'noiseThreshold', - 'variation', - 'symmetry', - 'seamless', - 'hires', - 'lora', - 'embedding', - 'vae', - 'hrf', -]); -export type SDFeature = z.infer; - -const zNumericalParameterConfig = z.object({ - initial: z.number().default(512), - sliderMin: z.number().default(64), - sliderMax: z.number().default(1536), - numberInputMin: z.number().default(64), - numberInputMax: z.number().default(4096), - fineStep: z.number().default(8), - coarseStep: z.number().default(64), -}); -export type NumericalParameterConfig = z.infer; - -const CONSTRAINTS = { - initial: 512, - sliderMin: 64, - sliderMax: 1536, - numberInputMin: 64, - numberInputMax: 4096, - fineStep: 8, - coarseStep: 64, -}; - -/** - * Configuration options for the InvokeAI UI. - * Distinct from system settings which may be changed inside the app. - */ -export const zAppConfig = z.object({ - /** - * Whether or not we should update image urls when image loading errors - */ - shouldUpdateImagesOnConnect: z.boolean(), - shouldFetchMetadataFromApi: z.boolean(), - /** - * Sets a size limit for outputs on the upscaling tab. This is a maximum dimension, so the actual max number of pixels - * will be the square of this value. - */ - maxUpscaleDimension: z.number().optional(), - allowPrivateBoards: z.boolean(), - allowPrivateStylePresets: z.boolean(), - allowClientSideUpload: z.boolean(), - allowPublishWorkflows: z.boolean(), - allowPromptExpansion: z.boolean(), - allowVideo: z.boolean(), - disabledTabs: z.array(zTabName), - disabledFeatures: z.array(zAppFeature), - disabledSDFeatures: z.array(zSDFeature), - nodesAllowlist: z.array(z.string()).optional(), - nodesDenylist: z.array(z.string()).optional(), - metadataFetchDebounce: z.number().int().optional(), - workflowFetchDebounce: z.number().int().optional(), - isLocal: z.boolean().optional(), - shouldShowCredits: z.boolean().optional(), - sd: z.object({ - defaultModel: z.string().optional(), - disabledControlNetModels: z.array(z.string()), - disabledControlNetProcessors: z.array(zFilterType), - // Core parameters - iterations: zNumericalParameterConfig, - width: zNumericalParameterConfig, - height: zNumericalParameterConfig, - steps: zNumericalParameterConfig, - guidance: zNumericalParameterConfig, - cfgRescaleMultiplier: zNumericalParameterConfig, - img2imgStrength: zNumericalParameterConfig, - scheduler: zParameterScheduler.optional(), - vaePrecision: zParameterPrecision.optional(), - // Canvas - boundingBoxHeight: zNumericalParameterConfig, - boundingBoxWidth: zNumericalParameterConfig, - scaledBoundingBoxHeight: zNumericalParameterConfig, - scaledBoundingBoxWidth: zNumericalParameterConfig, - canvasCoherenceStrength: zNumericalParameterConfig, - canvasCoherenceEdgeSize: zNumericalParameterConfig, - infillTileSize: zNumericalParameterConfig, - infillPatchmatchDownscaleSize: zNumericalParameterConfig, - // Misc advanced - clipSkip: zNumericalParameterConfig, // slider and input max are ignored for this, because the values depend on the model - maskBlur: zNumericalParameterConfig, - hrfStrength: zNumericalParameterConfig, - dynamicPrompts: z.object({ - maxPrompts: zNumericalParameterConfig, - }), - ca: z.object({ - weight: zNumericalParameterConfig, - }), - }), - flux: z.object({ - guidance: zNumericalParameterConfig, - }), -}); - -export type AppConfig = z.infer; -export type PartialAppConfig = PartialDeep; - -export const getDefaultAppConfig = (): AppConfig => ({ - isLocal: true, - shouldUpdateImagesOnConnect: false, - shouldFetchMetadataFromApi: false, - allowPrivateBoards: false, - allowPrivateStylePresets: false, - allowClientSideUpload: false, - allowPublishWorkflows: false, - allowPromptExpansion: false, - allowVideo: false, // used to determine if video is enabled vs upsell - shouldShowCredits: false, - disabledTabs: [], // used to determine if video functionality is visible - disabledFeatures: ['lightbox', 'faceRestore', 'batches'] satisfies AppFeature[], - disabledSDFeatures: ['variation', 'symmetry', 'hires', 'perlinNoise', 'noiseThreshold'] satisfies SDFeature[], - sd: { - disabledControlNetModels: [], - disabledControlNetProcessors: [], - iterations: { - initial: 1, - sliderMin: 1, - sliderMax: 1000, - numberInputMin: 1, - numberInputMax: 10000, - fineStep: 1, - coarseStep: 1, - }, - width: zNumericalParameterConfig.parse({}), // initial value comes from model - height: zNumericalParameterConfig.parse({}), // initial value comes from model - boundingBoxWidth: zNumericalParameterConfig.parse({}), // initial value comes from model - boundingBoxHeight: zNumericalParameterConfig.parse({}), // initial value comes from model - scaledBoundingBoxWidth: zNumericalParameterConfig.parse({}), // initial value comes from model - scaledBoundingBoxHeight: zNumericalParameterConfig.parse({}), // initial value comes from model - scheduler: 'dpmpp_3m_k' as const, - vaePrecision: 'fp32' as const, - steps: { - initial: 30, - sliderMin: 1, - sliderMax: 100, - numberInputMin: 1, - numberInputMax: 500, - fineStep: 1, - coarseStep: 1, - }, - guidance: { - initial: 7, - sliderMin: 1, - sliderMax: 20, - numberInputMin: 1, - numberInputMax: 200, - fineStep: 0.1, - coarseStep: 0.5, - }, - img2imgStrength: { - initial: 0.7, - sliderMin: 0, - sliderMax: 1, - numberInputMin: 0, - numberInputMax: 1, - fineStep: 0.01, - coarseStep: 0.05, - }, - canvasCoherenceStrength: { - initial: 0.3, - sliderMin: 0, - sliderMax: 1, - numberInputMin: 0, - numberInputMax: 1, - fineStep: 0.01, - coarseStep: 0.05, - }, - hrfStrength: { - initial: 0.45, - sliderMin: 0, - sliderMax: 1, - numberInputMin: 0, - numberInputMax: 1, - fineStep: 0.01, - coarseStep: 0.05, - }, - canvasCoherenceEdgeSize: { - initial: 16, - sliderMin: 0, - sliderMax: 128, - numberInputMin: 0, - numberInputMax: 1024, - fineStep: 8, - coarseStep: 16, - }, - cfgRescaleMultiplier: { - initial: 0, - sliderMin: 0, - sliderMax: 0.99, - numberInputMin: 0, - numberInputMax: 0.99, - fineStep: 0.05, - coarseStep: 0.1, - }, - clipSkip: { - initial: 0, - sliderMin: 0, - sliderMax: 12, // determined by model selection, unused in practice - numberInputMin: 0, - numberInputMax: 12, // determined by model selection, unused in practice - fineStep: 1, - coarseStep: 1, - }, - infillPatchmatchDownscaleSize: { - initial: 1, - sliderMin: 1, - sliderMax: 10, - numberInputMin: 1, - numberInputMax: 10, - fineStep: 1, - coarseStep: 1, - }, - infillTileSize: { - initial: 32, - sliderMin: 16, - sliderMax: 64, - numberInputMin: 16, - numberInputMax: 256, - fineStep: 1, - coarseStep: 1, - }, - maskBlur: { - initial: 16, - sliderMin: 0, - sliderMax: 128, - numberInputMin: 0, - numberInputMax: 512, - fineStep: 1, - coarseStep: 1, - }, - ca: { - weight: { - initial: 1, - sliderMin: 0, - sliderMax: 2, - numberInputMin: -1, - numberInputMax: 2, - fineStep: 0.01, - coarseStep: 0.05, - }, - }, - dynamicPrompts: { - maxPrompts: { - initial: 100, - sliderMin: 1, - sliderMax: 1000, - numberInputMin: 1, - numberInputMax: 10000, - fineStep: 1, - coarseStep: 10, - }, - }, - }, - flux: { - guidance: { - initial: 4, - sliderMin: 2, - sliderMax: 6, - numberInputMin: 1, - numberInputMax: 20, - fineStep: 0.1, - coarseStep: 0.5, - }, - }, -}); diff --git a/invokeai/frontend/web/src/features/changeBoardModal/components/ChangeBoardModal.tsx b/invokeai/frontend/web/src/features/changeBoardModal/components/ChangeBoardModal.tsx index e8885097d17..00217eb7963 100644 --- a/invokeai/frontend/web/src/features/changeBoardModal/components/ChangeBoardModal.tsx +++ b/invokeai/frontend/web/src/features/changeBoardModal/components/ChangeBoardModal.tsx @@ -19,11 +19,6 @@ const selectImagesToChange = createSelector( (changeBoardModal) => changeBoardModal.image_names ); -const selectVideosToChange = createSelector( - selectChangeBoardModalSlice, - (changeBoardModal) => changeBoardModal.video_ids -); - const selectIsModalOpen = createSelector( selectChangeBoardModalSlice, (changeBoardModal) => changeBoardModal.isModalOpen @@ -37,7 +32,6 @@ const ChangeBoardModal = () => { const { data: boards, isFetching } = useListAllBoardsQuery({ include_archived: true }); const isModalOpen = useAppSelector(selectIsModalOpen); const imagesToChange = useAppSelector(selectImagesToChange); - const videosToChange = useAppSelector(selectVideosToChange); const [addImagesToBoard] = useAddImagesToBoardMutation(); const [removeImagesFromBoard] = useRemoveImagesFromBoardMutation(); const { t } = useTranslation(); @@ -63,7 +57,7 @@ const ChangeBoardModal = () => { }, [dispatch]); const handleChangeBoard = useCallback(() => { - if (!selectedBoardId || (imagesToChange.length === 0 && videosToChange.length === 0)) { + if (!selectedBoardId || imagesToChange.length === 0) { return; } @@ -78,7 +72,7 @@ const ChangeBoardModal = () => { } } dispatch(changeBoardReset()); - }, [addImagesToBoard, dispatch, imagesToChange, videosToChange, removeImagesFromBoard, selectedBoardId]); + }, [addImagesToBoard, dispatch, imagesToChange, removeImagesFromBoard, selectedBoardId]); const onChange = useCallback((v) => { if (!v) { @@ -99,15 +93,9 @@ const ChangeBoardModal = () => { > - {imagesToChange.length > 0 && - t('boards.movingImagesToBoard', { - count: imagesToChange.length, - })} - {videosToChange.length > 0 && - t('boards.movingVideosToBoard', { - count: videosToChange.length, - })} - : + {t('boards.movingImagesToBoard', { + count: imagesToChange.length, + })} []), - video_ids: z.array(z.string()).default(() => []), }); type ChangeBoardModalState = z.infer; @@ -23,9 +22,6 @@ const slice = createSlice({ imagesToChangeSelected: (state, action: PayloadAction) => { state.image_names = action.payload; }, - videosToChangeSelected: (state, action: PayloadAction) => { - state.video_ids = action.payload; - }, changeBoardReset: (state) => { state.image_names = []; state.isModalOpen = false; @@ -33,7 +29,7 @@ const slice = createSlice({ }, }); -export const { isModalOpenChanged, imagesToChangeSelected, videosToChangeSelected, changeBoardReset } = slice.actions; +export const { isModalOpenChanged, imagesToChangeSelected, changeBoardReset } = slice.actions; export const selectChangeBoardModalSlice = (state: RootState) => state.changeBoardModal; diff --git a/invokeai/frontend/web/src/features/controlLayers/store/filters.ts b/invokeai/frontend/web/src/features/controlLayers/store/filters.ts index 676a353f00f..9373031e11f 100644 --- a/invokeai/frontend/web/src/features/controlLayers/store/filters.ts +++ b/invokeai/frontend/web/src/features/controlLayers/store/filters.ts @@ -166,7 +166,7 @@ const _zFilterConfig = z.discriminatedUnion('type', [ ]); export type FilterConfig = z.infer; -export const zFilterType = z.enum([ +const zFilterType = z.enum([ 'adjust_image', 'canny_edge_detection', 'color_map', diff --git a/invokeai/frontend/web/src/features/controlLayers/store/lorasSlice.ts b/invokeai/frontend/web/src/features/controlLayers/store/lorasSlice.ts index dfde382fcab..42d6986d5c5 100644 --- a/invokeai/frontend/web/src/features/controlLayers/store/lorasSlice.ts +++ b/invokeai/frontend/web/src/features/controlLayers/store/lorasSlice.ts @@ -1,7 +1,6 @@ import { createSelector, createSlice, type PayloadAction } from '@reduxjs/toolkit'; import type { RootState } from 'app/store/store'; import type { SliceConfig } from 'app/store/types'; -import type { NumericalParameterConfig } from 'app/types/invokeai'; import { paramsReset } from 'features/controlLayers/store/paramsSlice'; import { type LoRA, zLoRA } from 'features/controlLayers/store/types'; import { zModelIdentifierField } from 'features/nodes/types/common'; @@ -9,7 +8,7 @@ import type { LoRAModelConfig } from 'services/api/types'; import { v4 as uuidv4 } from 'uuid'; import z from 'zod'; -export const DEFAULT_LORA_WEIGHT_CONFIG: NumericalParameterConfig = { +export const DEFAULT_LORA_WEIGHT_CONFIG = { initial: 0.75, sliderMin: -1, sliderMax: 2, diff --git a/invokeai/frontend/web/src/features/controlLayers/store/types.ts b/invokeai/frontend/web/src/features/controlLayers/store/types.ts index 2b327ae8ca9..7bdbc4f2d6f 100644 --- a/invokeai/frontend/web/src/features/controlLayers/store/types.ts +++ b/invokeai/frontend/web/src/features/controlLayers/store/types.ts @@ -53,7 +53,7 @@ const zCropBox = z.object({ // This parsing happens currently in two places: // - Recalling metadata. // - Loading/rehydrating persisted client state from storage. -export const zCroppableImageWithDims = z.preprocess( +const zCroppableImageWithDims = z.preprocess( (val) => { try { const imageWithDims = zImageWithDims.parse(val); @@ -365,18 +365,10 @@ export const isIPAdapterConfig = (config: RefImageState['config']): config is IP export const isFLUXReduxConfig = (config: RefImageState['config']): config is FLUXReduxConfig => config.type === 'flux_redux'; -export const isChatGPT4oReferenceImageConfig = ( - config: RefImageState['config'] -): config is ChatGPT4oReferenceImageConfig => config.type === 'chatgpt_4o_reference_image'; - export const isFluxKontextReferenceImageConfig = ( config: RefImageState['config'] ): config is FluxKontextReferenceImageConfig => config.type === 'flux_kontext_reference_image'; -export const isGemini2_5ReferenceImageConfig = ( - config: RefImageState['config'] -): config is Gemini2_5ReferenceImageConfig => config.type === 'gemini_2_5_reference_image'; - const zFillStyle = z.enum(['solid', 'grid', 'crosshatch', 'diagonal', 'horizontal', 'vertical']); export type FillStyle = z.infer; export const isFillStyle = (v: unknown): v is FillStyle => zFillStyle.safeParse(v).success; @@ -555,20 +547,6 @@ export const ASPECT_RATIO_MAP: Record, { ratio: n '9:21': { ratio: 9 / 21, inverseID: '21:9' }, }; -export const zFluxKontextAspectRatioID = z.enum(['21:9', '16:9', '4:3', '1:1', '3:4', '9:16', '9:21']); -type FluxKontextAspectRatio = z.infer; -export const isFluxKontextAspectRatioID = (v: unknown): v is z.infer => - zFluxKontextAspectRatioID.safeParse(v).success; -export const FLUX_KONTEXT_ASPECT_RATIOS: Record = { - '3:4': { width: 880, height: 1184 }, - '4:3': { width: 1184, height: 880 }, - '9:16': { width: 752, height: 1392 }, - '16:9': { width: 1392, height: 752 }, - '21:9': { width: 1568, height: 672 }, - '9:21': { width: 672, height: 1568 }, - '1:1': { width: 1024, height: 1024 }, -}; - const zAspectRatioConfig = z.object({ id: zAspectRatioID, value: z.number().gt(0), diff --git a/invokeai/frontend/web/src/features/controlLayers/store/util.ts b/invokeai/frontend/web/src/features/controlLayers/store/util.ts index 54b484e78ae..737144a63df 100644 --- a/invokeai/frontend/web/src/features/controlLayers/store/util.ts +++ b/invokeai/frontend/web/src/features/controlLayers/store/util.ts @@ -7,13 +7,11 @@ import type { CanvasInpaintMaskState, CanvasRasterLayerState, CanvasRegionalGuidanceState, - ChatGPT4oReferenceImageConfig, ControlLoRAConfig, ControlNetConfig, CroppableImageWithDims, FluxKontextReferenceImageConfig, FLUXReduxConfig, - Gemini2_5ReferenceImageConfig, ImageWithDims, IPAdapterConfig, RasterLayerAdjustments, @@ -22,7 +20,6 @@ import type { RgbColor, T2IAdapterConfig, } from 'features/controlLayers/store/types'; -import type { ImageField } from 'features/nodes/types/common'; import type { ImageDTO } from 'services/api/types'; import { assert } from 'tsafe'; import type { PartialDeep } from 'type-fest'; @@ -62,8 +59,6 @@ export const imageDTOToCroppableImage = ( return val; }; -export const imageDTOToImageField = ({ image_name }: ImageDTO): ImageField => ({ image_name }); - const DEFAULT_RG_MASK_FILL_COLORS: RgbColor[] = [ { r: 121, g: 157, b: 219 }, // rgb(121, 157, 219) { r: 131, g: 214, b: 131 }, // rgb(131, 214, 131) @@ -111,16 +106,6 @@ export const initialFLUXRedux: FLUXReduxConfig = { model: null, imageInfluence: 'highest', }; -export const initialChatGPT4oReferenceImage: ChatGPT4oReferenceImageConfig = { - type: 'chatgpt_4o_reference_image', - image: null, - model: null, -}; -export const initialGemini2_5ReferenceImage: Gemini2_5ReferenceImageConfig = { - type: 'gemini_2_5_reference_image', - image: null, - model: null, -}; export const initialFluxKontextReferenceImage: FluxKontextReferenceImageConfig = { type: 'flux_kontext_reference_image', image: null, diff --git a/invokeai/frontend/web/src/features/dnd/DndImageIcon.tsx b/invokeai/frontend/web/src/features/dnd/DndImageIcon.tsx index 192711c5441..0698f1c24b0 100644 --- a/invokeai/frontend/web/src/features/dnd/DndImageIcon.tsx +++ b/invokeai/frontend/web/src/features/dnd/DndImageIcon.tsx @@ -3,7 +3,7 @@ import { IconButton } from '@invoke-ai/ui-library'; import type { MouseEvent } from 'react'; import { memo } from 'react'; -export const imageButtonSx: SystemStyleObject = { +const imageButtonSx: SystemStyleObject = { minW: 0, svg: { transitionProperty: 'common', diff --git a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemChangeBoard.tsx b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemChangeBoard.tsx index 0f1aa2e0a25..71764870153 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemChangeBoard.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemChangeBoard.tsx @@ -1,9 +1,6 @@ import { MenuItem } from '@invoke-ai/ui-library'; import { useAppDispatch } from 'app/store/storeHooks'; -import { - imagesToChangeSelected, - isModalOpenChanged, -} from 'features/changeBoardModal/store/slice'; +import { imagesToChangeSelected, isModalOpenChanged } from 'features/changeBoardModal/store/slice'; import { useImageDTOContext } from 'features/gallery/contexts/ImageDTOContext'; import { memo, useCallback } from 'react'; import { useTranslation } from 'react-i18next'; @@ -15,7 +12,7 @@ export const ContextMenuItemChangeBoard = memo(() => { const imageDTO = useImageDTOContext(); const onClick = useCallback(() => { - dispatch(imagesToChangeSelected([imageDTO.image_name])); + dispatch(imagesToChangeSelected([imageDTO.image_name])); dispatch(isModalOpenChanged(true)); }, [dispatch, imageDTO]); diff --git a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemCopy.tsx b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemCopy.tsx index 1df70a4a429..ba94e3891b6 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemCopy.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemCopy.tsx @@ -11,7 +11,7 @@ export const ContextMenuItemCopy = memo(() => { const copyImageToClipboard = useCopyImageToClipboard(); const onClick = useCallback(() => { - copyImageToClipboard(imageDTO.image_url); + copyImageToClipboard(imageDTO.image_url); }, [copyImageToClipboard, imageDTO]); return ( diff --git a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemDeleteImage.tsx b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemDeleteImage.tsx index 7c56c0205fe..e20221f3423 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemDeleteImage.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemDeleteImage.tsx @@ -12,7 +12,7 @@ export const ContextMenuItemDeleteImage = memo(() => { const onClick = useCallback(async () => { try { - await deleteImageModal.delete([imageDTO.image_name]); + await deleteImageModal.delete([imageDTO.image_name]); } catch { // noop; } diff --git a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemLoadWorkflow.tsx b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemLoadWorkflow.tsx index b99913e43b7..aa17fa5467a 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemLoadWorkflow.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemLoadWorkflow.tsx @@ -14,11 +14,11 @@ export const ContextMenuItemLoadWorkflow = memo(() => { const hasTemplates = useStore($hasTemplates); const onClick = useCallback(() => { - loadWorkflowWithDialog({ type: 'image', data: imageDTO.image_name }); + loadWorkflowWithDialog({ type: 'image', data: imageDTO.image_name }); }, [loadWorkflowWithDialog, imageDTO]); const isDisabled = useMemo(() => { - return !imageDTO.has_workflow || !hasTemplates; + return !imageDTO.has_workflow || !hasTemplates; }, [imageDTO, hasTemplates]); return ( diff --git a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemLocateInGalery.tsx b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemLocateInGalery.tsx index b5342d50f3b..85a42299fea 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemLocateInGalery.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemLocateInGalery.tsx @@ -25,17 +25,17 @@ export const ContextMenuItemLocateInGalery = memo(() => { const onClick = useCallback(() => { navigationApi.expandRightPanel(); galleryPanel.expand(); - flushSync(() => { - dispatch( - boardIdSelected({ - boardId: imageDTO.board_id ?? 'none', - select: { - selection: [{ type: 'image', id: imageDTO.image_name }], - galleryView: IMAGE_CATEGORIES.includes(imageDTO.image_category) ? 'images' : 'assets', - }, - }) - ); - }); + flushSync(() => { + dispatch( + boardIdSelected({ + boardId: imageDTO.board_id ?? 'none', + select: { + selection: [{ type: 'image', id: imageDTO.image_name }], + galleryView: IMAGE_CATEGORIES.includes(imageDTO.image_category) ? 'images' : 'assets', + }, + }) + ); + }); }, [dispatch, galleryPanel, imageDTO]); return ( diff --git a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemMetadataRecallActionsCanvasGenerateTabs.tsx b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemMetadataRecallActionsCanvasGenerateTabs.tsx index 1965b2d698f..23d21abc68a 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemMetadataRecallActionsCanvasGenerateTabs.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemMetadataRecallActionsCanvasGenerateTabs.tsx @@ -24,12 +24,12 @@ export const ContextMenuItemMetadataRecallActionsCanvasGenerateTabs = memo(() => const imageDTO = useImageDTOContext(); - const recallAll = useRecallAll(imageDTO ); - const recallRemix = useRecallRemix(imageDTO ); - const recallPrompts = useRecallPrompts(imageDTO ); - const recallSeed = useRecallSeed(imageDTO ); - const recallDimensions = useRecallDimensions(imageDTO ); - const recallCLIPSkip = useRecallCLIPSkip(imageDTO ); + const recallAll = useRecallAll(imageDTO); + const recallRemix = useRecallRemix(imageDTO); + const recallPrompts = useRecallPrompts(imageDTO); + const recallSeed = useRecallSeed(imageDTO); + const recallDimensions = useRecallDimensions(imageDTO); + const recallCLIPSkip = useRecallCLIPSkip(imageDTO); return ( }> diff --git a/invokeai/frontend/web/src/features/nodes/components/sidePanel/PublishedWorkflowPanelContent.tsx b/invokeai/frontend/web/src/features/nodes/components/sidePanel/PublishedWorkflowPanelContent.tsx deleted file mode 100644 index d9df276f1b1..00000000000 --- a/invokeai/frontend/web/src/features/nodes/components/sidePanel/PublishedWorkflowPanelContent.tsx +++ /dev/null @@ -1,50 +0,0 @@ -import { Button, Flex, Heading, Text } from '@invoke-ai/ui-library'; -import { useAppSelector } from 'app/store/storeHooks'; -import { selectWorkflowId } from 'features/nodes/store/selectors'; -import { toast } from 'features/toast/toast'; -import { useSaveOrSaveAsWorkflow } from 'features/workflowLibrary/hooks/useSaveOrSaveAsWorkflow'; -import { memo, useCallback } from 'react'; -import { useTranslation } from 'react-i18next'; -import { PiCopyBold, PiLockOpenBold } from 'react-icons/pi'; -import { useUnpublishWorkflowMutation } from 'services/api/endpoints/workflows'; - -export const PublishedWorkflowPanelContent = memo(() => { - const { t } = useTranslation(); - const saveAs = useSaveOrSaveAsWorkflow(); - const [unpublishWorkflow] = useUnpublishWorkflowMutation(); - const workflowId = useAppSelector(selectWorkflowId); - - const handleUnpublish = useCallback(async () => { - if (workflowId) { - try { - await unpublishWorkflow(workflowId).unwrap(); - toast({ - title: t('toast.workflowUnpublished'), - status: 'success', - }); - } catch { - toast({ - title: t('toast.problemUnpublishingWorkflow'), - description: t('toast.problemUnpublishingWorkflowDescription'), - status: 'error', - }); - } - } - }, [unpublishWorkflow, workflowId, t]); - - return ( - - - {t('workflows.builder.workflowLocked')} - - {t('workflows.builder.publishedWorkflowsLocked')} - - - - ); -}); -PublishedWorkflowPanelContent.displayName = 'PublishedWorkflowPanelContent'; diff --git a/invokeai/frontend/web/src/features/nodes/components/sidePanel/builder/NodeFieldElementEditMode.tsx b/invokeai/frontend/web/src/features/nodes/components/sidePanel/builder/NodeFieldElementEditMode.tsx index aacfba41c42..9fa81f5a1ec 100644 --- a/invokeai/frontend/web/src/features/nodes/components/sidePanel/builder/NodeFieldElementEditMode.tsx +++ b/invokeai/frontend/web/src/features/nodes/components/sidePanel/builder/NodeFieldElementEditMode.tsx @@ -108,7 +108,7 @@ const nodeFieldOverlaySx: SystemStyleObject = { }, }; -export const NodeFieldElementOverlay = memo(({ nodeId }: { nodeId: string }) => { +const NodeFieldElementOverlay = memo(({ nodeId }: { nodeId: string }) => { const mouseOverNode = useMouseOverNode(nodeId); const mouseOverFormField = useMouseOverFormField(nodeId); diff --git a/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowLibrary/WorkflowLibraryListItemActions/LockedWorkflowIcon.tsx b/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowLibrary/WorkflowLibraryListItemActions/LockedWorkflowIcon.tsx deleted file mode 100644 index 4b08b72bf77..00000000000 --- a/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowLibrary/WorkflowLibraryListItemActions/LockedWorkflowIcon.tsx +++ /dev/null @@ -1,23 +0,0 @@ -import { IconButton, Tooltip } from '@invoke-ai/ui-library'; -import { memo } from 'react'; -import { useTranslation } from 'react-i18next'; -import { PiLockBold } from 'react-icons/pi'; - -export const LockedWorkflowIcon = memo(() => { - const { t } = useTranslation(); - - return ( - - } - /> - - ); -}); - -LockedWorkflowIcon.displayName = 'LockedWorkflowIcon'; diff --git a/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowLibrary/WorkflowLibraryModal.tsx b/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowLibrary/WorkflowLibraryModal.tsx index 18dc640e90f..a86f3291e0b 100644 --- a/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowLibrary/WorkflowLibraryModal.tsx +++ b/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowLibrary/WorkflowLibraryModal.tsx @@ -8,16 +8,11 @@ import { ModalHeader, ModalOverlay, } from '@invoke-ai/ui-library'; -import { useStore } from '@nanostores/react'; import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; import { IAINoContentFallback } from 'common/components/IAIImageFallback'; import { useWorkflowLibraryModal } from 'features/nodes/store/workflowLibraryModal'; -import { - $workflowLibraryCategoriesOptions, - selectWorkflowLibraryView, - workflowLibraryViewChanged, -} from 'features/nodes/store/workflowLibrarySlice'; -import { memo, useEffect, useMemo, useState } from 'react'; +import { selectWorkflowLibraryView, workflowLibraryViewChanged } from 'features/nodes/store/workflowLibrarySlice'; +import { memo, useEffect, useState } from 'react'; import { useTranslation } from 'react-i18next'; import { useGetCountsByCategoryQuery } from 'services/api/endpoints/workflows'; @@ -59,6 +54,26 @@ export const WorkflowLibraryModal = memo(() => { }); WorkflowLibraryModal.displayName = 'WorkflowLibraryModal'; +const recentWorkflowsCountQueryArg = { + categories: ['user', 'default'], + has_been_opened: true, +} satisfies Parameters[0]; + +const yourWorkflowsCountQueryArg = { + categories: ['user'], +} satisfies Parameters[0]; + +const queryOptions = { + selectFromResult: ({ data, isLoading }) => { + if (!data) { + return { count: 0, isLoading: true }; + } + return { + count: Object.values(data).reduce((acc, count) => acc + count, 0), + isLoading, + }; + }, +} satisfies Parameters[1]; /** * On first app load, if the user's selected view has no workflows, switches to the next available view. @@ -66,38 +81,7 @@ WorkflowLibraryModal.displayName = 'WorkflowLibraryModal'; const useSyncInitialWorkflowLibraryCategories = () => { const dispatch = useAppDispatch(); const view = useAppSelector(selectWorkflowLibraryView); - const categoryOptions = useStore($workflowLibraryCategoriesOptions); const [didSync, setDidSync] = useState(false); - const recentWorkflowsCountQueryArg = useMemo( - () => - ({ - categories: ['user', 'project', 'default'], - has_been_opened: true, - }) satisfies Parameters[0], - [] - ); - const yourWorkflowsCountQueryArg = useMemo( - () => - ({ - categories: ['user', 'project'], - }) satisfies Parameters[0], - [] - ); - const queryOptions = useMemo( - () => - ({ - selectFromResult: ({ data, isLoading }) => { - if (!data) { - return { count: 0, isLoading: true }; - } - return { - count: Object.values(data).reduce((acc, count) => acc + count, 0), - isLoading, - }; - }, - }) satisfies Parameters[1], - [] - ); const { count: recentWorkflowsCount, isLoading: isLoadingRecentWorkflowsCount } = useGetCountsByCategoryQuery( recentWorkflowsCountQueryArg, @@ -119,7 +103,7 @@ const useSyncInitialWorkflowLibraryCategories = () => { } else { dispatch(workflowLibraryViewChanged('defaults')); } - } else if (yourWorkflowsCount === 0 && (view === 'yours' || view === 'shared' || view === 'private')) { + } else if (yourWorkflowsCount === 0 && view === 'yours') { if (recentWorkflowsCount > 0) { dispatch(workflowLibraryViewChanged('recent')); } else { @@ -128,7 +112,6 @@ const useSyncInitialWorkflowLibraryCategories = () => { } setDidSync(true); }, [ - categoryOptions, didSync, dispatch, isLoadingRecentWorkflowsCount, diff --git a/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowLibrary/WorkflowLibrarySideNav.tsx b/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowLibrary/WorkflowLibrarySideNav.tsx index 5000d7f564b..dedfdcc6599 100644 --- a/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowLibrary/WorkflowLibrarySideNav.tsx +++ b/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowLibrary/WorkflowLibrarySideNav.tsx @@ -12,16 +12,14 @@ import { Text, Tooltip, } from '@invoke-ai/ui-library'; -import { useStore } from '@nanostores/react'; import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; import { getOverlayScrollbarsParams, overlayScrollbarsStyles } from 'common/components/OverlayScrollbars/constants'; import type { WorkflowLibraryView, WorkflowTagCategory } from 'features/nodes/store/workflowLibrarySlice'; import { - $workflowLibraryCategoriesOptions, - $workflowLibraryTagCategoriesOptions, - $workflowLibraryTagOptions, selectWorkflowLibrarySelectedTags, selectWorkflowLibraryView, + WORKFLOW_LIBRARY_TAG_CATEGORIES, + WORKFLOW_LIBRARY_TAGS, workflowLibraryTagsReset, workflowLibraryTagToggled, workflowLibraryViewChanged, @@ -31,33 +29,18 @@ import { UploadWorkflowButton } from 'features/workflowLibrary/components/Upload import { OverlayScrollbarsComponent } from 'overlayscrollbars-react'; import { memo, useCallback, useMemo } from 'react'; import { useTranslation } from 'react-i18next'; -import { PiArrowCounterClockwiseBold, PiStarFill, PiUsersBold } from 'react-icons/pi'; +import { PiArrowCounterClockwiseBold, PiStarFill } from 'react-icons/pi'; import { useDispatch } from 'react-redux'; import { useGetCountsByTagQuery } from 'services/api/endpoints/workflows'; export const WorkflowLibrarySideNav = () => { const { t } = useTranslation(); - const categoryOptions = useStore($workflowLibraryCategoriesOptions); - const view = useAppSelector(selectWorkflowLibraryView); return ( {t('workflows.recentlyOpened')} {t('workflows.yourWorkflows')} - {categoryOptions.includes('project') && ( - - - - {t('workflows.private')} - - } view="shared"> - {t('workflows.shared')} - - - - - )} @@ -107,7 +90,6 @@ BrowseWorkflowsButton.displayName = 'BrowseWorkflowsButton'; const overlayscrollbarsOptions = getOverlayScrollbarsParams({ visibility: 'visible' }).options; const DefaultsViewCheckboxesCollapsible = memo(() => { - const tagCategoryOptions = useStore($workflowLibraryTagCategoriesOptions); const view = useAppSelector(selectWorkflowLibraryView); return ( @@ -115,7 +97,7 @@ const DefaultsViewCheckboxesCollapsible = memo(() => { - {tagCategoryOptions.map((tagCategory) => ( + {WORKFLOW_LIBRARY_TAG_CATEGORIES.map((tagCategory) => ( ))} @@ -126,16 +108,12 @@ const DefaultsViewCheckboxesCollapsible = memo(() => { }); DefaultsViewCheckboxesCollapsible.displayName = 'DefaultsViewCheckboxes'; +const tagCountQueryArg = { + tags: WORKFLOW_LIBRARY_TAGS.map((tag) => tag.label), + categories: ['default'], +} satisfies Parameters[0]; + const useCountForIndividualTag = (tag: string) => { - const allTags = useStore($workflowLibraryTagOptions); - const queryArg = useMemo( - () => - ({ - tags: allTags.map((tag) => tag.label), - categories: ['default'], - }) satisfies Parameters[0], - [allTags] - ); const queryOptions = useMemo( () => ({ @@ -146,21 +124,12 @@ const useCountForIndividualTag = (tag: string) => { [tag] ); - const { count } = useGetCountsByTagQuery(queryArg, queryOptions); + const { count } = useGetCountsByTagQuery(tagCountQueryArg, queryOptions); return count; }; const useCountForTagCategory = (tagCategory: WorkflowTagCategory) => { - const allTags = useStore($workflowLibraryTagOptions); - const queryArg = useMemo( - () => - ({ - tags: allTags.map((tag) => tag.label), - categories: ['default'], // We only allow filtering by tag for default workflows - }) satisfies Parameters[0], - [allTags] - ); const queryOptions = useMemo( () => ({ @@ -176,7 +145,7 @@ const useCountForTagCategory = (tagCategory: WorkflowTagCategory) => { [tagCategory] ); - const { count } = useGetCountsByTagQuery(queryArg, queryOptions); + const { count } = useGetCountsByTagQuery(tagCountQueryArg, queryOptions); return count; }; diff --git a/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowLibrary/WorkflowList.tsx b/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowLibrary/WorkflowList.tsx index 34b40e98473..203a1f7a319 100644 --- a/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowLibrary/WorkflowList.tsx +++ b/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowLibrary/WorkflowList.tsx @@ -29,15 +29,9 @@ const getCategories = (view: WorkflowLibraryView): WorkflowCategory[] => { case 'defaults': return ['default']; case 'recent': - return ['user', 'project', 'default']; + return ['user', 'default']; case 'yours': - return ['user', 'project']; - case 'private': return ['user']; - case 'shared': - return ['project']; - case 'published': - return ['user', 'project', 'default']; default: assert>(false); } diff --git a/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowLibrary/WorkflowListItem.tsx b/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowLibrary/WorkflowListItem.tsx index 34913434bc8..8ea1bbf511f 100644 --- a/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowLibrary/WorkflowListItem.tsx +++ b/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowLibrary/WorkflowListItem.tsx @@ -7,7 +7,7 @@ import { useLoadWorkflowWithDialog } from 'features/workflowLibrary/components/L import InvokeLogo from 'public/assets/images/invoke-symbol-wht-lrg.svg'; import { memo, useCallback, useMemo } from 'react'; import { useTranslation } from 'react-i18next'; -import { PiImage, PiUsersBold } from 'react-icons/pi'; +import { PiImage } from 'react-icons/pi'; import type { WorkflowRecordListItemWithThumbnailDTO } from 'services/api/types'; import { DeleteWorkflow } from './WorkflowLibraryListItemActions/DeleteWorkflow'; @@ -92,7 +92,6 @@ export const WorkflowListItem = memo(({ workflow }: { workflow: WorkflowRecordLi {t('workflows.opened')} )} - {workflow.category === 'project' && } {workflow.category === 'default' && ( { const orderBy = useAppSelector(selectWorkflowLibraryOrderBy); const direction = useAppSelector(selectWorkflowLibraryDirection); - const sortOptions = useStore($workflowLibrarySortOptions); const ORDER_BY_LABELS = useMemo( () => ({ @@ -68,19 +66,12 @@ export const WorkflowSortControl = () => { [dispatch] ); - useEffect(() => { - if (!sortOptions.includes('opened_at')) { - dispatch(workflowLibraryOrderByChanged('name')); - dispatch(workflowLibraryDirectionChanged('ASC')); - } - }, [sortOptions, dispatch]); - return ( {t('common.orderBy')} - + {WORKFLOW_LIBRARY_SORT_OPTIONS.map((option) => ( diff --git a/invokeai/frontend/web/src/features/nodes/hooks/useInputFieldUserTitleOrThrow.ts b/invokeai/frontend/web/src/features/nodes/hooks/useInputFieldUserTitleOrThrow.ts deleted file mode 100644 index c63abf86dd8..00000000000 --- a/invokeai/frontend/web/src/features/nodes/hooks/useInputFieldUserTitleOrThrow.ts +++ /dev/null @@ -1,20 +0,0 @@ -import { createSelector } from '@reduxjs/toolkit'; -import { useAppSelector } from 'app/store/storeHooks'; -import { useInvocationNodeContext } from 'features/nodes/components/flow/nodes/Invocation/context'; -import { useMemo } from 'react'; - -/** - * Gets the user-defined title of an input field for a given node. - * - * If the node doesn't exist or is not an invocation node, an error is thrown. - * - * @param fieldName The name of the field - */ -export const useInputFieldUserTitleOrThrow = (fieldName: string): string => { - const ctx = useInvocationNodeContext(); - const selector = useMemo( - () => createSelector(ctx.buildSelectInputFieldOrThrow(fieldName), (field) => field.label), - [ctx, fieldName] - ); - return useAppSelector(selector); -}; diff --git a/invokeai/frontend/web/src/features/nodes/hooks/useNodeTemplateTitleOrThrow.ts b/invokeai/frontend/web/src/features/nodes/hooks/useNodeTemplateTitleOrThrow.ts deleted file mode 100644 index a7869df3da1..00000000000 --- a/invokeai/frontend/web/src/features/nodes/hooks/useNodeTemplateTitleOrThrow.ts +++ /dev/null @@ -1,10 +0,0 @@ -import { createSelector } from '@reduxjs/toolkit'; -import { useAppSelector } from 'app/store/storeHooks'; -import { useInvocationNodeContext } from 'features/nodes/components/flow/nodes/Invocation/context'; -import { useMemo } from 'react'; - -export const useNodeTemplateTitleOrThrow = (): string => { - const ctx = useInvocationNodeContext(); - const selector = useMemo(() => createSelector(ctx.selectNodeTemplateOrThrow, (template) => template.title), [ctx]); - return useAppSelector(selector); -}; diff --git a/invokeai/frontend/web/src/features/nodes/hooks/useNodeUserTitleOrThrow.ts b/invokeai/frontend/web/src/features/nodes/hooks/useNodeUserTitleOrThrow.ts deleted file mode 100644 index 41f7e11746a..00000000000 --- a/invokeai/frontend/web/src/features/nodes/hooks/useNodeUserTitleOrThrow.ts +++ /dev/null @@ -1,10 +0,0 @@ -import { createSelector } from '@reduxjs/toolkit'; -import { useAppSelector } from 'app/store/storeHooks'; -import { useInvocationNodeContext } from 'features/nodes/components/flow/nodes/Invocation/context'; -import { useMemo } from 'react'; - -export const useNodeUserTitleOrThrow = () => { - const ctx = useInvocationNodeContext(); - const selector = useMemo(() => createSelector(ctx.selectNodeDataOrThrow, (data) => data.label), [ctx]); - return useAppSelector(selector); -}; diff --git a/invokeai/frontend/web/src/features/nodes/store/selectors.ts b/invokeai/frontend/web/src/features/nodes/store/selectors.ts index f3c35dc220f..5e8b734fbe5 100644 --- a/invokeai/frontend/web/src/features/nodes/store/selectors.ts +++ b/invokeai/frontend/web/src/features/nodes/store/selectors.ts @@ -1,7 +1,6 @@ import type { Selector } from '@reduxjs/toolkit'; import { createSelector } from '@reduxjs/toolkit'; import type { RootState } from 'app/store/store'; -import { uniqBy } from 'es-toolkit/compat'; import { getElement } from 'features/nodes/components/sidePanel/builder/form-manipulation'; import type { NodesState } from 'features/nodes/store/types'; import type { FieldInputInstance } from 'features/nodes/types/field'; @@ -22,7 +21,7 @@ const selectInvocationNode = (nodesSlice: NodesState, nodeId: string): Invocatio return node; }; -export const selectNodeData = (nodesSlice: NodesState, nodeId: string): InvocationNodeData => { +const selectNodeData = (nodesSlice: NodesState, nodeId: string): InvocationNodeData => { const node = selectInvocationNode(nodesSlice, nodeId); return node.data; }; @@ -94,13 +93,6 @@ export const selectFormInitialValues = createNodesSelector((workflow) => workflo export const selectNodeFieldElements = createNodesSelector((workflow) => Object.values(workflow.form.elements).filter(isNodeFieldElement) ); -export const selectWorkflowFormNodeFieldFieldIdentifiersDeduped = createSelector( - selectNodeFieldElements, - (nodeFieldElements) => - uniqBy(nodeFieldElements, (el) => `${el.data.fieldIdentifier.nodeId}-${el.data.fieldIdentifier.fieldName}`).map( - (el) => el.data.fieldIdentifier - ) -); export const buildSelectElement = (id: string) => createNodesSelector((workflow) => workflow.form?.elements[id]); export const buildSelectWorkflowFormNodeElement = (nodeId: string, fieldName: string) => diff --git a/invokeai/frontend/web/src/features/nodes/store/workflowLibraryModal.ts b/invokeai/frontend/web/src/features/nodes/store/workflowLibraryModal.ts index bf490f67dc7..53de0220135 100644 --- a/invokeai/frontend/web/src/features/nodes/store/workflowLibraryModal.ts +++ b/invokeai/frontend/web/src/features/nodes/store/workflowLibraryModal.ts @@ -3,4 +3,4 @@ import { buildUseDisclosure } from 'common/hooks/useBoolean'; /** * Tracks the state for the workflow library modal. */ -export const [useWorkflowLibraryModal, $isWorkflowLibraryModalOpen] = buildUseDisclosure(false); +export const [useWorkflowLibraryModal] = buildUseDisclosure(false); diff --git a/invokeai/frontend/web/src/features/nodes/store/workflowLibrarySlice.ts b/invokeai/frontend/web/src/features/nodes/store/workflowLibrarySlice.ts index a1d8b894647..ee85a03c18f 100644 --- a/invokeai/frontend/web/src/features/nodes/store/workflowLibrarySlice.ts +++ b/invokeai/frontend/web/src/features/nodes/store/workflowLibrarySlice.ts @@ -3,8 +3,6 @@ import { createSelector, createSlice } from '@reduxjs/toolkit'; import type { RootState } from 'app/store/store'; import type { SliceConfig } from 'app/store/types'; import { type WorkflowMode, zWorkflowMode } from 'features/nodes/store/types'; -import type { WorkflowCategory } from 'features/nodes/types/workflow'; -import { atom, computed } from 'nanostores'; import { type SQLiteDirection, type WorkflowRecordOrderBy, @@ -13,7 +11,7 @@ import { } from 'services/api/types'; import z from 'zod'; -const zWorkflowLibraryView = z.enum(['recent', 'yours', 'private', 'shared', 'defaults', 'published']); +const zWorkflowLibraryView = z.enum(['recent', 'yours', 'defaults']); export type WorkflowLibraryView = z.infer; const zWorkflowLibraryState = z.object({ @@ -105,11 +103,8 @@ export const selectWorkflowLibraryDirection = createWorkflowLibrarySelector(({ d export const selectWorkflowLibrarySelectedTags = createWorkflowLibrarySelector(({ selectedTags }) => selectedTags); export const selectWorkflowLibraryView = createWorkflowLibrarySelector(({ view }) => view); -export const DEFAULT_WORKFLOW_LIBRARY_CATEGORIES = ['user', 'default'] satisfies WorkflowCategory[]; -export const $workflowLibraryCategoriesOptions = atom(DEFAULT_WORKFLOW_LIBRARY_CATEGORIES); - export type WorkflowTagCategory = { categoryTKey: string; tags: Array<{ label: string; recommended?: boolean }> }; -export const DEFAULT_WORKFLOW_LIBRARY_TAG_CATEGORIES: WorkflowTagCategory[] = [ +export const WORKFLOW_LIBRARY_TAG_CATEGORIES: WorkflowTagCategory[] = [ { categoryTKey: 'Industry', tags: [{ label: 'Architecture' }, { label: 'Fashion' }, { label: 'Game Dev' }, { label: 'Food' }], @@ -124,18 +119,7 @@ export const DEFAULT_WORKFLOW_LIBRARY_TAG_CATEGORIES: WorkflowTagCategory[] = [ }, { categoryTKey: 'Tech Showcase', tags: [{ label: 'Control' }, { label: 'Reference Image' }] }, ]; -export const $workflowLibraryTagCategoriesOptions = atom( - DEFAULT_WORKFLOW_LIBRARY_TAG_CATEGORIES -); -export const $workflowLibraryTagOptions = computed($workflowLibraryTagCategoriesOptions, (tagCategories) => - tagCategories.flatMap(({ tags }) => tags) -); +export const WORKFLOW_LIBRARY_TAGS = WORKFLOW_LIBRARY_TAG_CATEGORIES.flatMap(({ tags }) => tags); -export type WorkflowSortOption = 'opened_at' | 'created_at' | 'updated_at' | 'name'; -export const DEFAULT_WORKFLOW_LIBRARY_SORT_OPTIONS: WorkflowSortOption[] = [ - 'opened_at', - 'created_at', - 'updated_at', - 'name', -]; -export const $workflowLibrarySortOptions = atom(DEFAULT_WORKFLOW_LIBRARY_SORT_OPTIONS); +type WorkflowSortOption = 'opened_at' | 'created_at' | 'updated_at' | 'name'; +export const WORKFLOW_LIBRARY_SORT_OPTIONS: WorkflowSortOption[] = ['opened_at', 'created_at', 'updated_at', 'name']; diff --git a/invokeai/frontend/web/src/features/nodes/types/field.ts b/invokeai/frontend/web/src/features/nodes/types/field.ts index 5b8634daa2b..356b7656609 100644 --- a/invokeai/frontend/web/src/features/nodes/types/field.ts +++ b/invokeai/frontend/web/src/features/nodes/types/field.ts @@ -168,8 +168,6 @@ const zBoardFieldType = zFieldTypeBase.extend({ name: z.literal('BoardField'), originalType: zStatelessFieldType.optional(), }); -export const isBoardFieldType = (fieldType: FieldType): fieldType is z.infer => - fieldType.name === zBoardFieldType.shape.name.value; const zColorFieldType = zFieldTypeBase.extend({ name: z.literal('ColorField'), diff --git a/invokeai/frontend/web/src/features/nodes/types/invocation.ts b/invokeai/frontend/web/src/features/nodes/types/invocation.ts index 96a52a2377d..3f5727dc6b3 100644 --- a/invokeai/frontend/web/src/features/nodes/types/invocation.ts +++ b/invokeai/frontend/web/src/features/nodes/types/invocation.ts @@ -139,8 +139,6 @@ export const isGeneratorNodeType = (type: string) => export const isBatchNode = (node: InvocationNode) => isBatchNodeType(node.data.type); -export const isGeneratorNode = (node: InvocationNode) => isGeneratorNodeType(node.data.type); - export const isExecutableNode = (node: InvocationNode) => { - return !isBatchNode(node) && !isGeneratorNode(node); + return !isBatchNode(node); }; diff --git a/invokeai/frontend/web/src/features/nodes/types/workflow.ts b/invokeai/frontend/web/src/features/nodes/types/workflow.ts index d0ce39970a5..66e69ec5859 100644 --- a/invokeai/frontend/web/src/features/nodes/types/workflow.ts +++ b/invokeai/frontend/web/src/features/nodes/types/workflow.ts @@ -14,7 +14,7 @@ const zXYPosition = z .default({ x: 0, y: 0 }); export type XYPosition = z.infer; -const zWorkflowCategory = z.enum(['user', 'default', 'project']); +const zWorkflowCategory = z.enum(['user', 'default']); export type WorkflowCategory = z.infer; // #endregion diff --git a/invokeai/frontend/web/src/features/parameters/components/Bbox/BboxAspectRatioSelect.tsx b/invokeai/frontend/web/src/features/parameters/components/Bbox/BboxAspectRatioSelect.tsx index 7fa250caad4..a237896c676 100644 --- a/invokeai/frontend/web/src/features/parameters/components/Bbox/BboxAspectRatioSelect.tsx +++ b/invokeai/frontend/web/src/features/parameters/components/Bbox/BboxAspectRatioSelect.tsx @@ -3,11 +3,10 @@ import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; import { InformationalPopover } from 'common/components/InformationalPopover/InformationalPopover'; import { bboxAspectRatioIdChanged } from 'features/controlLayers/store/canvasSlice'; import { useCanvasIsStaging } from 'features/controlLayers/store/canvasStagingAreaSlice'; -import { selectIsFluxKontext } from 'features/controlLayers/store/paramsSlice'; import { selectAspectRatioID } from 'features/controlLayers/store/selectors'; -import { isAspectRatioID, zAspectRatioID, zFluxKontextAspectRatioID } from 'features/controlLayers/store/types'; +import { isAspectRatioID, zAspectRatioID } from 'features/controlLayers/store/types'; import type { ChangeEventHandler } from 'react'; -import { memo, useCallback, useMemo } from 'react'; +import { memo, useCallback } from 'react'; import { useTranslation } from 'react-i18next'; import { PiCaretDownBold } from 'react-icons/pi'; @@ -16,14 +15,6 @@ export const BboxAspectRatioSelect = memo(() => { const dispatch = useAppDispatch(); const id = useAppSelector(selectAspectRatioID); const isStaging = useCanvasIsStaging(); - const isFluxKontext = useAppSelector(selectIsFluxKontext); - const options = useMemo(() => { - if (isFluxKontext) { - return zFluxKontextAspectRatioID.options; - } - // All other models - return zAspectRatioID.options; - }, [isFluxKontext]); const onChange = useCallback>( (e) => { @@ -41,7 +32,7 @@ export const BboxAspectRatioSelect = memo(() => { {t('parameters.aspect')} }> - {options.map((ratio) => ( + {zAspectRatioID.options.map((ratio) => ( diff --git a/invokeai/frontend/web/src/features/queue/components/CancelAllExceptCurrentButton.tsx b/invokeai/frontend/web/src/features/queue/components/CancelAllExceptCurrentButton.tsx deleted file mode 100644 index 243aee60f16..00000000000 --- a/invokeai/frontend/web/src/features/queue/components/CancelAllExceptCurrentButton.tsx +++ /dev/null @@ -1,27 +0,0 @@ -import type { ButtonProps } from '@invoke-ai/ui-library'; -import { Button } from '@invoke-ai/ui-library'; -import { useCancelAllExceptCurrentQueueItemDialog } from 'features/queue/components/CancelAllExceptCurrentQueueItemConfirmationAlertDialog'; -import { memo } from 'react'; -import { useTranslation } from 'react-i18next'; -import { PiXCircle } from 'react-icons/pi'; - -export const CancelAllExceptCurrentButton = memo((props: ButtonProps) => { - const { t } = useTranslation(); - const api = useCancelAllExceptCurrentQueueItemDialog(); - - return ( - - ); -}); - -CancelAllExceptCurrentButton.displayName = 'CancelAllExceptCurrentButton'; diff --git a/invokeai/frontend/web/src/features/queue/hooks/useEnqueueCanvas.ts b/invokeai/frontend/web/src/features/queue/hooks/useEnqueueCanvas.ts index e6bc69da1f7..c4791289b39 100644 --- a/invokeai/frontend/web/src/features/queue/hooks/useEnqueueCanvas.ts +++ b/invokeai/frontend/web/src/features/queue/hooks/useEnqueueCanvas.ts @@ -1,5 +1,4 @@ import type { AlertStatus } from '@invoke-ai/ui-library'; -import { createAction } from '@reduxjs/toolkit'; import { logger } from 'app/logging/logger'; import type { AppStore } from 'app/store/store'; import { useAppStore } from 'app/store/storeHooks'; @@ -24,13 +23,10 @@ import { enqueueMutationFixedCacheKeyOptions, queueApi } from 'services/api/endp import { assert, AssertionError } from 'tsafe'; const log = logger('generation'); -export const enqueueRequestedCanvas = createAction('app/enqueueRequestedCanvas'); const enqueueCanvas = async (store: AppStore, canvasManager: CanvasManager, prepend: boolean) => { const { dispatch, getState } = store; - dispatch(enqueueRequestedCanvas()); - const state = getState(); const destination = selectCanvasDestination(state); diff --git a/invokeai/frontend/web/src/features/queue/hooks/useEnqueueGenerate.ts b/invokeai/frontend/web/src/features/queue/hooks/useEnqueueGenerate.ts index 16e5ad31b5a..f8ae1251e01 100644 --- a/invokeai/frontend/web/src/features/queue/hooks/useEnqueueGenerate.ts +++ b/invokeai/frontend/web/src/features/queue/hooks/useEnqueueGenerate.ts @@ -1,5 +1,4 @@ import type { AlertStatus } from '@invoke-ai/ui-library'; -import { createAction } from '@reduxjs/toolkit'; import { logger } from 'app/logging/logger'; import type { AppStore } from 'app/store/store'; import { useAppStore } from 'app/store/storeHooks'; @@ -22,13 +21,9 @@ import { assert, AssertionError } from 'tsafe'; const log = logger('generation'); -export const enqueueRequestedGenerate = createAction('app/enqueueRequestedGenerate'); - const enqueueGenerate = async (store: AppStore, prepend: boolean) => { const { dispatch, getState } = store; - dispatch(enqueueRequestedGenerate()); - const state = getState(); const model = state.params.model; diff --git a/invokeai/frontend/web/src/features/queue/hooks/useEnqueueUpscaling.ts b/invokeai/frontend/web/src/features/queue/hooks/useEnqueueUpscaling.ts index 01f278d98db..8d27afe20f4 100644 --- a/invokeai/frontend/web/src/features/queue/hooks/useEnqueueUpscaling.ts +++ b/invokeai/frontend/web/src/features/queue/hooks/useEnqueueUpscaling.ts @@ -1,4 +1,3 @@ -import { createAction } from '@reduxjs/toolkit'; import { logger } from 'app/logging/logger'; import type { AppStore } from 'app/store/store'; import { useAppStore } from 'app/store/storeHooks'; @@ -8,15 +7,11 @@ import { buildMultidiffusionUpscaleGraph } from 'features/nodes/util/graph/build import { useCallback } from 'react'; import { enqueueMutationFixedCacheKeyOptions, queueApi } from 'services/api/endpoints/queue'; -export const enqueueRequestedUpscaling = createAction('app/enqueueRequestedUpscaling'); - const log = logger('generation'); const enqueueUpscaling = async (store: AppStore, prepend: boolean) => { const { dispatch, getState } = store; - dispatch(enqueueRequestedUpscaling()); - const state = getState(); const model = state.params.model; diff --git a/invokeai/frontend/web/src/features/queue/hooks/useEnqueueWorkflows.ts b/invokeai/frontend/web/src/features/queue/hooks/useEnqueueWorkflows.ts index d9f9cf9b952..7d4698773b5 100644 --- a/invokeai/frontend/web/src/features/queue/hooks/useEnqueueWorkflows.ts +++ b/invokeai/frontend/web/src/features/queue/hooks/useEnqueueWorkflows.ts @@ -1,4 +1,3 @@ -import { createAction } from '@reduxjs/toolkit'; import type { AppDispatch, AppStore, RootState } from 'app/store/store'; import { useAppStore } from 'app/store/storeHooks'; import { groupBy } from 'es-toolkit/compat'; @@ -13,8 +12,6 @@ import { useCallback } from 'react'; import { enqueueMutationFixedCacheKeyOptions, queueApi } from 'services/api/endpoints/queue'; import type { Batch, EnqueueBatchArg } from 'services/api/types'; -export const enqueueRequestedWorkflows = createAction('app/enqueueRequestedWorkflows'); - const getBatchDataForWorkflowGeneration = async (state: RootState, dispatch: AppDispatch): Promise => { const nodesState = selectNodesSlice(state); const data: Batch['data'] = []; @@ -76,7 +73,6 @@ const getBatchDataForWorkflowGeneration = async (state: RootState, dispatch: App const enqueueWorkflows = async (store: AppStore, templates: Templates, prepend: boolean) => { const { dispatch, getState } = store; - dispatch(enqueueRequestedWorkflows()); const state = getState(); const nodesState = selectNodesSlice(state); const graph = buildNodesGraph(state, templates); diff --git a/invokeai/frontend/web/src/features/stylePresets/components/StylePresetForm/StylePresetTypeField.tsx b/invokeai/frontend/web/src/features/stylePresets/components/StylePresetForm/StylePresetTypeField.tsx deleted file mode 100644 index d1a7e1ae48d..00000000000 --- a/invokeai/frontend/web/src/features/stylePresets/components/StylePresetForm/StylePresetTypeField.tsx +++ /dev/null @@ -1,46 +0,0 @@ -import type { ComboboxOnChange } from '@invoke-ai/ui-library'; -import { Combobox, FormControl, FormLabel } from '@invoke-ai/ui-library'; -import { useStore } from '@nanostores/react'; -import { $stylePresetModalState } from 'features/stylePresets/store/stylePresetModal'; -import { t } from 'i18next'; -import { useCallback, useMemo } from 'react'; -import type { UseControllerProps } from 'react-hook-form'; -import { useController } from 'react-hook-form'; -import { useTranslation } from 'react-i18next'; - -import type { StylePresetFormData } from './StylePresetForm'; - -const OPTIONS = [ - { label: t('stylePresets.private'), value: 'user' }, - { label: t('stylePresets.shared'), value: 'project' }, -]; - -export const StylePresetTypeField = (props: UseControllerProps) => { - const { field } = useController(props); - const stylePresetModalState = useStore($stylePresetModalState); - const { t } = useTranslation(); - - const onChange = useCallback( - (v) => { - if (v) { - field.onChange(v.value); - } - }, - [field] - ); - - const value = useMemo(() => { - return OPTIONS.find((opt) => opt.value === field.value); - }, [field.value]); - - return ( - - {t('stylePresets.type')} - - - ); -}; diff --git a/invokeai/frontend/web/src/features/toast/toast.ts b/invokeai/frontend/web/src/features/toast/toast.ts index 210725b69e7..6f05476613d 100644 --- a/invokeai/frontend/web/src/features/toast/toast.ts +++ b/invokeai/frontend/web/src/features/toast/toast.ts @@ -9,7 +9,7 @@ export const toastApi = createStandaloneToast({ }).toast; // Slightly modified version of UseToastOptions -export type ToastConfig = Omit & { +type ToastConfig = Omit & { // Only string - Chakra allows numbers id?: string; }; diff --git a/invokeai/frontend/web/src/features/ui/store/uiTypes.ts b/invokeai/frontend/web/src/features/ui/store/uiTypes.ts index 04c435053b7..b86ba3fa182 100644 --- a/invokeai/frontend/web/src/features/ui/store/uiTypes.ts +++ b/invokeai/frontend/web/src/features/ui/store/uiTypes.ts @@ -1,7 +1,7 @@ import { isPlainObject } from 'es-toolkit'; import { z } from 'zod'; -export const zTabName = z.enum(['generate', 'canvas', 'upscaling', 'workflows', 'models', 'queue']); +const zTabName = z.enum(['generate', 'canvas', 'upscaling', 'workflows', 'models', 'queue']); export type TabName = z.infer; const zPartialDimensions = z.object({ diff --git a/invokeai/frontend/web/src/features/workflowLibrary/components/SaveWorkflowAsDialog.tsx b/invokeai/frontend/web/src/features/workflowLibrary/components/SaveWorkflowAsDialog.tsx index 1bdee73d7de..72ca9c309b3 100644 --- a/invokeai/frontend/web/src/features/workflowLibrary/components/SaveWorkflowAsDialog.tsx +++ b/invokeai/frontend/web/src/features/workflowLibrary/components/SaveWorkflowAsDialog.tsx @@ -5,7 +5,6 @@ import { AlertDialogFooter, AlertDialogHeader, Button, - Checkbox, Flex, FormControl, FormLabel, @@ -14,7 +13,6 @@ import { import { useStore } from '@nanostores/react'; import { IAINoContentFallback } from 'common/components/IAIImageFallback'; import { deepClone } from 'common/util/deepClone'; -import { $workflowLibraryCategoriesOptions } from 'features/nodes/store/workflowLibrarySlice'; import type { WorkflowV3 } from 'features/nodes/types/workflow'; import { isDraftWorkflow, useCreateLibraryWorkflow } from 'features/workflowLibrary/hooks/useCreateNewWorkflow'; import { t } from 'i18next'; @@ -83,14 +81,12 @@ export const SaveWorkflowAsDialog = () => { }; const Content = memo(({ workflow, cancelRef }: { workflow: WorkflowV3; cancelRef: RefObject }) => { - const workflowCategories = useStore($workflowLibraryCategoriesOptions); const [name, setName] = useState(() => { if (workflow) { return getInitialName(workflow); } return ''; }); - const [shouldSaveToProject, setShouldSaveToProject] = useState(() => workflowCategories.includes('project')); const { createNewWorkflow } = useCreateLibraryWorkflow(); @@ -100,13 +96,6 @@ const Content = memo(({ workflow, cancelRef }: { workflow: WorkflowV3; cancelRef setName(e.target.value); }, []); - const onChangeCheckbox = useCallback( - (e: ChangeEvent) => { - setShouldSaveToProject(e.target.checked); - }, - [setShouldSaveToProject] - ); - const onClose = useCallback(() => { $workflowToSave.set(null); }, []); @@ -114,7 +103,7 @@ const Content = memo(({ workflow, cancelRef }: { workflow: WorkflowV3; cancelRef const onSave = useCallback(async () => { workflow.id = undefined; workflow.name = name; - workflow.meta.category = shouldSaveToProject ? 'project' : 'user'; + workflow.meta.category = 'user'; // We've just made the workflow a draft, but TS doesn't know that. We need to assert it. assert(isDraftWorkflow(workflow)); @@ -124,7 +113,7 @@ const Content = memo(({ workflow, cancelRef }: { workflow: WorkflowV3; cancelRef onSuccess: onClose, onError: onClose, }); - }, [workflow, name, shouldSaveToProject, createNewWorkflow, onClose]); + }, [workflow, name, createNewWorkflow, onClose]); return ( @@ -137,11 +126,6 @@ const Content = memo(({ workflow, cancelRef }: { workflow: WorkflowV3; cancelRef {t('workflows.workflowName')} - {workflowCategories.includes('project') && ( - - {t('workflows.saveWorkflowToProject')} - - )} diff --git a/invokeai/frontend/web/src/services/api/endpoints/images.ts b/invokeai/frontend/web/src/services/api/endpoints/images.ts index 1c24f32fc69..7b150ac3572 100644 --- a/invokeai/frontend/web/src/services/api/endpoints/images.ts +++ b/invokeai/frontend/web/src/services/api/endpoints/images.ts @@ -475,7 +475,6 @@ export const { useGetImageWorkflowQuery, useLazyGetImageWorkflowQuery, useUploadImageMutation, - useCreateImageUploadEntryMutation, useClearIntermediatesMutation, useAddImagesToBoardMutation, useRemoveImagesFromBoardMutation, @@ -529,25 +528,6 @@ export const getImageDTO = ( return req.unwrap(); }; -/** - * Imperative RTKQ helper to fetch an image's metadata. - * @param image_name The name of the image - * @param options The options for the query. By default, the query will not subscribe to the store. - * @raises Error if the image metadata is not found or there is an error fetching the image metadata. Images without - * metadata will return undefined. - */ -export const getImageMetadata = ( - image_name: string, - options?: Parameters[1] -): Promise => { - const _options = { - subscribe: false, - ...options, - }; - const req = getStore().dispatch(imagesApi.endpoints.getImageMetadata.initiate(image_name, _options)); - return req.unwrap(); -}; - export const uploadImage = (arg: UploadImageArg): Promise => { const { dispatch } = getStore(); const req = dispatch(imagesApi.endpoints.uploadImage.initiate(arg, { track: false })); diff --git a/invokeai/frontend/web/src/services/api/endpoints/stylePresets.ts b/invokeai/frontend/web/src/services/api/endpoints/stylePresets.ts index 25d80dc47e2..f04f0a4ded6 100644 --- a/invokeai/frontend/web/src/services/api/endpoints/stylePresets.ts +++ b/invokeai/frontend/web/src/services/api/endpoints/stylePresets.ts @@ -1,4 +1,3 @@ -import { getStore } from 'app/store/nanostores/store'; import type { paths } from 'services/api/schema'; import type { S } from 'services/api/types'; @@ -129,22 +128,4 @@ export const { useImportStylePresetsMutation, } = stylePresetsApi; -/** - * Imperative RTKQ helper to fetch a style preset. - * @param style_preset_id The id of the style preset to fetch - * @param options The options for the query. By default, the query will not subscribe to the store. - * @raises Error if the style preset is not found or there is an error fetching the style preset - */ -export const getStylePreset = ( - style_preset_id: string, - options?: Parameters[1] -): Promise => { - const _options = { - subscribe: false, - ...options, - }; - const req = getStore().dispatch(stylePresetsApi.endpoints.getStylePreset.initiate(style_preset_id, _options)); - return req.unwrap(); -}; - export const selectListStylePresetsRequestState = stylePresetsApi.endpoints.listStylePresets.select(); diff --git a/invokeai/frontend/web/src/services/api/endpoints/workflows.ts b/invokeai/frontend/web/src/services/api/endpoints/workflows.ts index 70cbc76044f..b9a02204fc4 100644 --- a/invokeai/frontend/web/src/services/api/endpoints/workflows.ts +++ b/invokeai/frontend/web/src/services/api/endpoints/workflows.ts @@ -148,13 +148,6 @@ export const workflowsApi = api.injectEndpoints({ }), invalidatesTags: (result, error, workflow_id) => [{ type: 'Workflow', id: workflow_id }], }), - unpublishWorkflow: build.mutation({ - query: (workflow_id) => ({ - url: buildWorkflowsUrl(`i/${workflow_id}/unpublish`), - method: 'POST', - }), - invalidatesTags: (result, error, workflow_id) => [{ type: 'Workflow', id: workflow_id }], - }), }), }); @@ -170,5 +163,4 @@ export const { useListWorkflowsInfiniteInfiniteQuery, useSetWorkflowThumbnailMutation, useDeleteWorkflowThumbnailMutation, - useUnpublishWorkflowMutation, } = workflowsApi; diff --git a/invokeai/frontend/web/src/services/api/schema.ts b/invokeai/frontend/web/src/services/api/schema.ts index c06c58c577c..4da374c26f1 100644 --- a/invokeai/frontend/web/src/services/api/schema.ts +++ b/invokeai/frontend/web/src/services/api/schema.ts @@ -24292,7 +24292,7 @@ export type components = { * WorkflowCategory * @enum {string} */ - WorkflowCategory: "user" | "default" | "project"; + WorkflowCategory: "user" | "default"; /** WorkflowMeta */ WorkflowMeta: { /** diff --git a/invokeai/frontend/web/src/services/api/types.ts b/invokeai/frontend/web/src/services/api/types.ts index 96d15409b85..d5f7eedaf4c 100644 --- a/invokeai/frontend/web/src/services/api/types.ts +++ b/invokeai/frontend/web/src/services/api/types.ts @@ -102,7 +102,6 @@ type CLIPVisionDiffusersConfig = Extract; export type FLUXReduxModelConfig = Extract; type ApiModelConfig = Extract; -export type VideoApiModelConfig = Extract; type UnknownModelConfig = Extract; export type FLUXKontextModelConfig = MainModelConfig; export type ChatGPT4oModelConfig = ApiModelConfig; diff --git a/invokeai/frontend/web/src/services/api/util/tagInvalidation.ts b/invokeai/frontend/web/src/services/api/util/tagInvalidation.ts index b7a0c73df1a..02d18bf6d3e 100644 --- a/invokeai/frontend/web/src/services/api/util/tagInvalidation.ts +++ b/invokeai/frontend/web/src/services/api/util/tagInvalidation.ts @@ -57,20 +57,3 @@ export const getTagsToInvalidateForImageMutation = (image_names: string[]): ApiT return tags; }; - -export const getTagsToInvalidateForVideoMutation = (video_ids: string[]): ApiTagDescription[] => { - const tags: ApiTagDescription[] = []; - - for (const video_id of video_ids) { - tags.push({ - type: 'Video', - id: video_id, - }); - // tags.push({ - // type: 'VideoMetadata', - // id: video_id, - // }); - } - - return tags; -}; From b3594c5c929257d03ca9239577e3d6c724449004 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Mon, 13 Oct 2025 15:24:47 +1100 Subject: [PATCH 03/20] tidy: removing unused code paths 3 --- invokeai/frontend/web/public/locales/en.json | 54 ++----------------- .../src/app/components/GlobalImageHotkeys.tsx | 2 +- .../frontend/web/src/app/logging/logger.ts | 1 - .../listeners/appStarted.ts | 4 +- .../listeners/boardIdSelected.ts | 10 ++-- .../frontend/web/src/common/hooks/focus.ts | 1 - .../web/src/common/hooks/useGlobalHotkeys.ts | 6 +-- .../features/deleteImageModal/store/state.ts | 10 ++-- .../ContextMenuItemLocateInGalery.tsx | 2 +- .../MenuItems/ContextMenuItemOpenInViewer.tsx | 4 +- .../MultipleSelectionMenuItems.tsx | 10 ++-- .../components/ImageGrid/GalleryImage.tsx | 20 +++---- .../GalleryItemOpenInViewerIconButton.tsx | 4 +- .../ImageGrid/GallerySelectionCountTag.tsx | 3 +- .../ImageViewer/CurrentImageButtons.tsx | 2 +- .../ImageViewer/ImageComparison.tsx | 2 +- .../components/ImageViewer/ImageViewer.tsx | 2 +- .../ImageViewer/ImageViewerToolbar.tsx | 2 +- .../gallery/components/ImageViewer/common.ts | 2 +- .../gallery/components/NewGallery.tsx | 10 ++-- .../components/NextPrevItemButtons.tsx | 14 ++--- .../gallery/store/gallerySelectors.ts | 3 -- .../features/gallery/store/gallerySlice.ts | 20 +++---- .../web/src/features/gallery/store/types.ts | 4 +- .../nodes/CurrentImage/CurrentImageNode.tsx | 2 +- .../components/VideosModal/VideoCard.tsx | 10 +--- .../components/VideosModal/VideosModal.tsx | 31 +---------- .../VideosModal/VideosModalButton.tsx | 6 +-- .../web/src/features/system/store/actions.ts | 4 -- .../services/events/onInvocationComplete.tsx | 6 +-- 30 files changed, 71 insertions(+), 180 deletions(-) delete mode 100644 invokeai/frontend/web/src/features/system/store/actions.ts diff --git a/invokeai/frontend/web/public/locales/en.json b/invokeai/frontend/web/public/locales/en.json index aacd5c728f1..d88ba697c58 100644 --- a/invokeai/frontend/web/public/locales/en.json +++ b/invokeai/frontend/web/public/locales/en.json @@ -43,8 +43,6 @@ "move": "Move", "movingImagesToBoard_one": "Moving {{count}} image to board:", "movingImagesToBoard_other": "Moving {{count}} images to board:", - "movingVideosToBoard_one": "Moving {{count}} video to board:", - "movingVideosToBoard_other": "Moving {{count}} videos to board:", "myBoard": "My Board", "noBoards": "No {{boardType}} Boards", "noMatching": "No matching Boards", @@ -61,8 +59,6 @@ "imagesWithCount_other": "{{count}} images", "assetsWithCount_one": "{{count}} asset", "assetsWithCount_other": "{{count}} assets", - "videosWithCount_one": "{{count}} video", - "videosWithCount_other": "{{count}} videos", "updateBoardError": "Error updating board" }, "accordions": { @@ -375,9 +371,6 @@ "deleteImage_one": "Delete Image", "deleteImage_other": "Delete {{count}} Images", "deleteImagePermanent": "Deleted images cannot be restored.", - "deleteVideo_one": "Delete Video", - "deleteVideo_other": "Delete {{count}} Videos", - "deleteVideoPermanent": "Deleted videos cannot be restored.", "displayBoardSearch": "Board Search", "displaySearch": "Image Search", "download": "Download", @@ -397,7 +390,6 @@ "sortDirection": "Sort Direction", "showStarredImagesFirst": "Show Starred Images First", "noImageSelected": "No Image Selected", - "noVideoSelected": "No Video Selected", "noImagesInGallery": "No Images to Display", "starImage": "Star", "unstarImage": "Unstar", @@ -429,9 +421,7 @@ "openViewer": "Open Viewer", "closeViewer": "Close Viewer", "move": "Move", - "useForPromptGeneration": "Use for Prompt Generation", - "videos": "Videos", - "videosTab": "Videos you've created and saved within Invoke." + "useForPromptGeneration": "Use for Prompt Generation" }, "hotkeys": { "hotkeys": "Hotkeys", @@ -476,10 +466,6 @@ "title": "Select the Queue Tab", "desc": "Selects the Queue tab." }, - "selectVideoTab": { - "title": "Select the Video Tab", - "desc": "Selects the Video tab." - }, "focusPrompt": { "title": "Focus Prompt", "desc": "Move cursor focus to the positive prompt." @@ -514,9 +500,6 @@ "key": "1" } }, - "video": { - "title": "Video" - }, "canvas": { "title": "Canvas", "selectBrushTool": { @@ -823,13 +806,11 @@ "guidance": "Guidance", "height": "Height", "imageDetails": "Image Details", - "videoDetails": "Video Details", "imageDimensions": "Image Dimensions", "metadata": "Metadata", "model": "Model", "negativePrompt": "Negative Prompt", "noImageDetails": "No image details found", - "noVideoDetails": "No video details found", "noMetaData": "No metadata found", "noRecallParameters": "No parameters to recall found", "parameterSet": "Parameter {{parameter}} set", @@ -847,11 +828,7 @@ "vae": "VAE", "width": "Width", "workflow": "Workflow", - "canvasV2Metadata": "Canvas Layers", - "videoModel": "Model", - "videoDuration": "Duration", - "videoAspectRatio": "Aspect Ratio", - "videoResolution": "Resolution" + "canvasV2Metadata": "Canvas Layers" }, "modelManager": { "active": "active", @@ -1269,13 +1246,9 @@ "images": "Images", "images_withCount_one": "Image", "images_withCount_other": "Images", - "videos_withCount_one": "Video", - "videos_withCount_other": "Videos", "infillMethod": "Infill Method", "infillColorValue": "Fill Color", "info": "Info", - "startingFrameImage": "Start Frame", - "startingFrameImageAspectRatioWarning": "Image aspect ratio does not match the video aspect ratio ({{videoAspectRatio}}). This could lead to unexpected cropping during video generation.", "invoke": { "addingImagesTo": "Adding images to", "modelDisabledForTrial": "Generating with {{modelName}} is not available on trial accounts. Visit your account settings to upgrade.", @@ -1322,8 +1295,7 @@ "noNodesInGraph": "No nodes in graph", "systemDisconnected": "System disconnected", "promptExpansionPending": "Prompt expansion in progress", - "promptExpansionResultPending": "Please accept or discard your prompt expansion result", - "videoIsDisabled": "Video generation is not enabled for {{accountType}} accounts." + "promptExpansionResultPending": "Please accept or discard your prompt expansion result" }, "maskBlur": "Mask Blur", "negativePromptPlaceholder": "Negative Prompt", @@ -1341,11 +1313,9 @@ "seamlessXAxis": "Seamless X Axis", "seamlessYAxis": "Seamless Y Axis", "seed": "Seed", - "videoActions": "Video Actions", "imageActions": "Image Actions", "sendToCanvas": "Send To Canvas", "sendToUpscale": "Send To Upscale", - "sendToVideo": "Send To Video", "showOptionsPanel": "Show Side Panel (O or T)", "shuffle": "Shuffle Seed", "steps": "Steps", @@ -1357,7 +1327,6 @@ "postProcessing": "Post-Processing (Shift + U)", "processImage": "Process Image", "upscaling": "Upscaling", - "video": "Video", "useAll": "Use All", "useSize": "Use Size", "useCpuNoise": "Use CPU Noise", @@ -2660,30 +2629,19 @@ "queue": "Queue", "upscaling": "Upscaling", "upscalingTab": "$t(ui.tabs.upscaling) $t(common.tab)", - "video": "Video", "gallery": "Gallery" }, "panels": { "launchpad": "Launchpad", "workflowEditor": "Workflow Editor", "imageViewer": "Viewer", - "canvas": "Canvas", - "video": "Video" + "canvas": "Canvas" }, "launchpad": { "workflowsTitle": "Go deep with Workflows.", "upscalingTitle": "Upscale and add detail.", "canvasTitle": "Edit and refine on Canvas.", "generateTitle": "Generate images from text prompts.", - "videoTitle": "Generate videos from text prompts.", - "video": { - "startingFrameCalloutTitle": "Add a Starting Frame", - "startingFrameCalloutDesc": "Add an image to control the first frame of your video." - }, - "addStartingFrame": { - "title": "Add a Starting Frame", - "description": "Add an image to control the first frame of your video." - }, "modelGuideText": "Want to learn what prompts work best for each model?", "modelGuideLink": "Check out our Model Guide.", "createNewWorkflowFromScratch": "Create a new Workflow from scratch", @@ -2758,10 +2716,6 @@ } } }, - "video": { - "noVideoSelected": "No video selected", - "selectFromGallery": "Select a video from the gallery to play" - }, "system": { "enableLogging": "Enable Logging", "logLevel": { diff --git a/invokeai/frontend/web/src/app/components/GlobalImageHotkeys.tsx b/invokeai/frontend/web/src/app/components/GlobalImageHotkeys.tsx index c86faa50bbf..dd1595bdd74 100644 --- a/invokeai/frontend/web/src/app/components/GlobalImageHotkeys.tsx +++ b/invokeai/frontend/web/src/app/components/GlobalImageHotkeys.tsx @@ -16,7 +16,7 @@ import type { ImageDTO } from 'services/api/types'; export const GlobalImageHotkeys = memo(() => { useAssertSingleton('GlobalImageHotkeys'); const lastSelectedItem = useAppSelector(selectLastSelectedItem); - const imageDTO = useImageDTO(lastSelectedItem?.type === 'image' ? lastSelectedItem.id : null); + const imageDTO = useImageDTO(lastSelectedItem ?? null); if (!imageDTO) { return null; diff --git a/invokeai/frontend/web/src/app/logging/logger.ts b/invokeai/frontend/web/src/app/logging/logger.ts index 4e024f1516a..7c6b4ecdde0 100644 --- a/invokeai/frontend/web/src/app/logging/logger.ts +++ b/invokeai/frontend/web/src/app/logging/logger.ts @@ -26,7 +26,6 @@ export const zLogNamespace = z.enum([ 'system', 'queue', 'workflows', - 'video', ]); export type LogNamespace = z.infer; diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/appStarted.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/appStarted.ts index 5ed60447aae..794d1a1af60 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/appStarted.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/appStarted.ts @@ -1,7 +1,7 @@ import { createAction } from '@reduxjs/toolkit'; import type { AppStartListening } from 'app/store/store'; import { selectLastSelectedItem } from 'features/gallery/store/gallerySelectors'; -import { itemSelected } from 'features/gallery/store/gallerySlice'; +import { imageSelected } from 'features/gallery/store/gallerySlice'; import { imagesApi } from 'services/api/endpoints/images'; export const appStarted = createAction('app/appStarted'); @@ -23,7 +23,7 @@ export const addAppStartedListener = (startAppListening: AppStartListening) => { return; } if (payload.image_names[0]) { - dispatch(itemSelected({ type: 'image', id: payload.image_names[0] })); + dispatch(imageSelected(payload.image_names[0])); } } }, diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/boardIdSelected.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/boardIdSelected.ts index a408a94c041..9fd777fb29b 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/boardIdSelected.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/boardIdSelected.ts @@ -1,7 +1,7 @@ import { isAnyOf } from '@reduxjs/toolkit'; import type { AppStartListening } from 'app/store/store'; import { selectGetImageNamesQueryArgs, selectSelectedBoardId } from 'features/gallery/store/gallerySelectors'; -import { boardIdSelected, galleryViewChanged, itemSelected } from 'features/gallery/store/gallerySlice'; +import { boardIdSelected, galleryViewChanged, imageSelected } from 'features/gallery/store/gallerySlice'; import { imagesApi } from 'services/api/endpoints/images'; export const addBoardIdSelectedListener = (startAppListening: AppStartListening) => { @@ -29,7 +29,7 @@ export const addBoardIdSelectedListener = (startAppListening: AppStartListening) ); if (!isSuccess) { - dispatch(itemSelected(null)); + dispatch(imageSelected(null)); return; } @@ -38,11 +38,7 @@ export const addBoardIdSelectedListener = (startAppListening: AppStartListening) const imageToSelect = imageNames && imageNames.length > 0 ? imageNames[0] : null; - if (imageToSelect) { - dispatch(itemSelected({ type: 'image', id: imageToSelect })); - } else { - dispatch(itemSelected(null)); - } + dispatch(imageSelected(imageToSelect ?? null)); }, }); }; diff --git a/invokeai/frontend/web/src/common/hooks/focus.ts b/invokeai/frontend/web/src/common/hooks/focus.ts index 8a04608a13d..4e093c5c631 100644 --- a/invokeai/frontend/web/src/common/hooks/focus.ts +++ b/invokeai/frontend/web/src/common/hooks/focus.ts @@ -37,7 +37,6 @@ const REGION_NAMES = [ 'workflows', 'progress', 'settings', - 'video', ] as const; /** * The names of the focus regions. diff --git a/invokeai/frontend/web/src/common/hooks/useGlobalHotkeys.ts b/invokeai/frontend/web/src/common/hooks/useGlobalHotkeys.ts index 69b0b2b0fc2..dd43c0b0947 100644 --- a/invokeai/frontend/web/src/common/hooks/useGlobalHotkeys.ts +++ b/invokeai/frontend/web/src/common/hooks/useGlobalHotkeys.ts @@ -131,11 +131,7 @@ export const useGlobalHotkeys = () => { if (!selection.length) { return; } - if (selection.every(({ type }) => type === 'image')) { - deleteImageModalApi.delete(selection.map((s) => s.id)); - } else { - // no-op, we expect selections to always be only images or only video - } + deleteImageModalApi.delete(selection); }, dependencies: [getState, deleteImageModalApi], }); diff --git a/invokeai/frontend/web/src/features/deleteImageModal/store/state.ts b/invokeai/frontend/web/src/features/deleteImageModal/store/state.ts index 38aa8b039f3..c50aa9465f5 100644 --- a/invokeai/frontend/web/src/features/deleteImageModal/store/state.ts +++ b/invokeai/frontend/web/src/features/deleteImageModal/store/state.ts @@ -12,7 +12,7 @@ import { selectCanvasSlice } from 'features/controlLayers/store/selectors'; import type { CanvasState, RefImagesState } from 'features/controlLayers/store/types'; import type { ImageUsage } from 'features/deleteImageModal/store/types'; import { selectGetImageNamesQueryArgs } from 'features/gallery/store/gallerySelectors'; -import { itemSelected } from 'features/gallery/store/gallerySlice'; +import { imageSelected } from 'features/gallery/store/gallerySlice'; import { fieldImageCollectionValueChanged, fieldImageValueChanged } from 'features/nodes/store/nodesSlice'; import { selectNodesSlice } from 'features/nodes/store/selectors'; import type { NodesState } from 'features/nodes/store/types'; @@ -89,14 +89,12 @@ const handleDeletions = async (image_names: string[], store: AppStore) => { const newImageNames = data?.image_names.filter((name) => !deleted_images.includes(name)) || []; const newSelectedImage = newImageNames[index ?? 0] || null; - const galleryImageNames = state.gallery.selection.map((s) => s.id); - - if (intersection(galleryImageNames, image_names).length > 0) { + if (intersection(state.gallery.selection, image_names).length > 0) { if (newSelectedImage) { // Some selected images were deleted, clear selection - dispatch(itemSelected({ type: 'image', id: newSelectedImage })); + dispatch(imageSelected(newSelectedImage)); } else { - dispatch(itemSelected(null)); + dispatch(imageSelected(null)); } } diff --git a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemLocateInGalery.tsx b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemLocateInGalery.tsx index 85a42299fea..0a557710975 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemLocateInGalery.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemLocateInGalery.tsx @@ -30,7 +30,7 @@ export const ContextMenuItemLocateInGalery = memo(() => { boardIdSelected({ boardId: imageDTO.board_id ?? 'none', select: { - selection: [{ type: 'image', id: imageDTO.image_name }], + selection: [imageDTO.image_name], galleryView: IMAGE_CATEGORIES.includes(imageDTO.image_category) ? 'images' : 'assets', }, }) diff --git a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemOpenInViewer.tsx b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemOpenInViewer.tsx index e09b256e85a..7a58f6e007b 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemOpenInViewer.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemOpenInViewer.tsx @@ -1,7 +1,7 @@ import { useAppDispatch } from 'app/store/storeHooks'; import { IconMenuItem } from 'common/components/IconMenuItem'; import { useImageDTOContext } from 'features/gallery/contexts/ImageDTOContext'; -import { imageToCompareChanged, itemSelected } from 'features/gallery/store/gallerySlice'; +import { imageSelected, imageToCompareChanged } from 'features/gallery/store/gallerySlice'; import { navigationApi } from 'features/ui/layouts/navigation-api'; import { VIEWER_PANEL_ID } from 'features/ui/layouts/shared'; import { memo, useCallback } from 'react'; @@ -14,7 +14,7 @@ export const ContextMenuItemOpenInViewer = memo(() => { const imageDTO = useImageDTOContext(); const onClick = useCallback(() => { dispatch(imageToCompareChanged(null)); - dispatch(itemSelected({ type: 'image', id: imageDTO.image_name })); + dispatch(imageSelected(imageDTO.image_name)); navigationApi.focusPanelInActiveTab(VIEWER_PANEL_ID); }, [dispatch, imageDTO]); diff --git a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MultipleSelectionMenuItems.tsx b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MultipleSelectionMenuItems.tsx index 0e086ad5e4e..d148332943c 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MultipleSelectionMenuItems.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MultipleSelectionMenuItems.tsx @@ -22,24 +22,24 @@ const MultipleSelectionMenuItems = () => { const [bulkDownload] = useBulkDownloadImagesMutation(); const handleChangeBoard = useCallback(() => { - dispatch(imagesToChangeSelected(selection.map((s) => s.id))); + dispatch(imagesToChangeSelected(selection)); dispatch(isModalOpenChanged(true)); }, [dispatch, selection]); const handleDeleteSelection = useCallback(() => { - deleteImageModal.delete(selection.map((s) => s.id)); + deleteImageModal.delete(selection); }, [deleteImageModal, selection]); const handleStarSelection = useCallback(() => { - starImages({ image_names: selection.map((s) => s.id) }); + starImages({ image_names: selection }); }, [starImages, selection]); const handleUnstarSelection = useCallback(() => { - unstarImages({ image_names: selection.map((s) => s.id) }); + unstarImages({ image_names: selection }); }, [unstarImages, selection]); const handleBulkDownload = useCallback(() => { - bulkDownload({ image_names: selection.map((s) => s.id) }); + bulkDownload({ image_names: selection }); }, [selection, bulkDownload]); return ( diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageGrid/GalleryImage.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageGrid/GalleryImage.tsx index 764201222b5..ccd58992ef6 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ImageGrid/GalleryImage.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ImageGrid/GalleryImage.tsx @@ -46,7 +46,7 @@ const buildOnClick = if (imageNames.length === 0) { // For basic click without modifiers, we can still set selection if (!shiftKey && !ctrlKey && !metaKey && !altKey) { - dispatch(selectionChanged([{ type: 'image', id: imageName }])); + dispatch(selectionChanged([imageName])); } return; } @@ -61,7 +61,7 @@ const buildOnClick = } } else if (shiftKey) { const rangeEndImageName = imageName; - const lastSelectedImage = selection.at(-1)?.id; + const lastSelectedImage = selection.at(-1); const lastClickedIndex = imageNames.findIndex((name) => name === lastSelectedImage); const currentClickedIndex = imageNames.findIndex((name) => name === rangeEndImageName); if (lastClickedIndex > -1 && currentClickedIndex > -1) { @@ -72,16 +72,16 @@ const buildOnClick = if (currentClickedIndex < lastClickedIndex) { imagesToSelect.reverse(); } - dispatch(selectionChanged(uniq(selection.concat(imagesToSelect.map((name) => ({ type: 'image', id: name })))))); + dispatch(selectionChanged(uniq(selection.concat(imagesToSelect)))); } } else if (ctrlKey || metaKey) { - if (selection.some((n) => n.id === imageName) && selection.length > 1) { - dispatch(selectionChanged(uniq(selection.filter((n) => n.id !== imageName)))); + if (selection.some((n) => n === imageName) && selection.length > 1) { + dispatch(selectionChanged(uniq(selection.filter((n) => n !== imageName)))); } else { - dispatch(selectionChanged(uniq(selection.concat({ type: 'image', id: imageName })))); + dispatch(selectionChanged(uniq(selection.concat(imageName)))); } } else { - dispatch(selectionChanged([{ type: 'image', id: imageName }])); + dispatch(selectionChanged([imageName])); } }; @@ -98,7 +98,7 @@ export const GalleryImage = memo(({ imageDTO }: Props) => { ); const isSelectedForCompare = useAppSelector(selectIsSelectedForCompare); const selectIsSelected = useMemo( - () => createSelector(selectGallerySlice, (gallery) => gallery.selection.some((s) => s.id === imageDTO.image_name)), + () => createSelector(selectGallerySlice, (gallery) => gallery.selection.some((n) => n === imageDTO.image_name)), [imageDTO.image_name] ); const isSelected = useAppSelector(selectIsSelected); @@ -118,9 +118,9 @@ export const GalleryImage = memo(({ imageDTO }: Props) => { // When we have multiple images selected, and the dragged image is part of the selection, initiate a // multi-image drag. - if (selection.length > 1 && selection.some((s) => s.id === imageDTO.image_name)) { + if (selection.length > 1 && selection.some((n) => n === imageDTO.image_name)) { return multipleImageDndSource.getData({ - image_names: selection.map((s) => s.id), + image_names: selection, board_id: boardId, }); } diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageGrid/GalleryItemOpenInViewerIconButton.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageGrid/GalleryItemOpenInViewerIconButton.tsx index ae7821a3f1d..21641d69fcd 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ImageGrid/GalleryItemOpenInViewerIconButton.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ImageGrid/GalleryItemOpenInViewerIconButton.tsx @@ -1,6 +1,6 @@ import { useAppDispatch } from 'app/store/storeHooks'; import { DndImageIcon } from 'features/dnd/DndImageIcon'; -import { imageToCompareChanged, itemSelected } from 'features/gallery/store/gallerySlice'; +import { imageSelected, imageToCompareChanged } from 'features/gallery/store/gallerySlice'; import { navigationApi } from 'features/ui/layouts/navigation-api'; import { VIEWER_PANEL_ID } from 'features/ui/layouts/shared'; import { memo, useCallback } from 'react'; @@ -18,7 +18,7 @@ export const GalleryItemOpenInViewerIconButton = memo(({ imageDTO }: Props) => { const onClick = useCallback(() => { dispatch(imageToCompareChanged(null)); - dispatch(itemSelected({ type: 'image', id: imageDTO.image_name })); + dispatch(imageSelected(imageDTO.image_name)); navigationApi.focusPanelInActiveTab(VIEWER_PANEL_ID); }, [dispatch, imageDTO]); diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageGrid/GallerySelectionCountTag.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageGrid/GallerySelectionCountTag.tsx index 0939e168e51..28c1c689396 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ImageGrid/GallerySelectionCountTag.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ImageGrid/GallerySelectionCountTag.tsx @@ -15,8 +15,7 @@ export const GallerySelectionCountTag = memo(() => { const isGalleryFocused = useIsRegionFocused('gallery'); const onSelectPage = useCallback(() => { - const selection = imageNames.map((name) => ({ type: 'image' as const, id: name })); - dispatch(selectionChanged(selection)); + dispatch(selectionChanged(imageNames)); }, [dispatch, imageNames]); useRegisteredHotkeys({ diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/CurrentImageButtons.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/CurrentImageButtons.tsx index 603768e693a..bd9dc31a570 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/CurrentImageButtons.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/CurrentImageButtons.tsx @@ -51,7 +51,7 @@ export const CurrentImageButtons = memo(({ imageDTO }: { imageDTO: ImageDTO }) = boardIdSelected({ boardId: imageDTO.board_id ?? 'none', select: { - selection: [{ type: 'image', id: imageDTO.image_name }], + selection: [imageDTO.image_name], galleryView: IMAGE_CATEGORIES.includes(imageDTO.image_category) ? 'images' : 'assets', }, }) diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparison.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparison.tsx index e3bf51bbdbf..80dbad347c7 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparison.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageComparison.tsx @@ -40,7 +40,7 @@ ImageComparisonContent.displayName = 'ImageComparisonContent'; export const ImageComparison = memo(() => { const lastSelectedItem = useAppSelector(selectLastSelectedItem); - const lastSelectedImageDTO = useImageDTO(lastSelectedItem?.id); + const lastSelectedImageDTO = useImageDTO(lastSelectedItem); const comparisonImageDTO = useImageDTO(useAppSelector(selectImageToCompare)); const [rect, setRect] = useState(null); diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageViewer.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageViewer.tsx index 72d4a79e890..ce9795ee8b0 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageViewer.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageViewer.tsx @@ -16,7 +16,7 @@ export const ImageViewer = memo(() => { const { t } = useTranslation(); const lastSelectedItem = useAppSelector(selectLastSelectedItem); - const lastSelectedImageDTO = useImageDTO(lastSelectedItem?.type === 'image' ? lastSelectedItem.id : null); + const lastSelectedImageDTO = useImageDTO(lastSelectedItem ?? null); return ( diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageViewerToolbar.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageViewerToolbar.tsx index 7e5f9cd9cea..b963f5a80d6 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageViewerToolbar.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageViewerToolbar.tsx @@ -10,7 +10,7 @@ import { ToggleProgressButton } from './ToggleProgressButton'; export const ImageViewerToolbar = memo(() => { const lastSelectedItem = useAppSelector(selectLastSelectedItem); - const imageDTO = useImageDTO(lastSelectedItem?.id); + const imageDTO = useImageDTO(lastSelectedItem); return ( diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/common.ts b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/common.ts index 0953a96156b..31bb1648b8f 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/common.ts +++ b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/common.ts @@ -65,7 +65,7 @@ export const getSecondImageDims = ( return { width, height }; }; export const selectComparisonImages = createMemoizedSelector(selectGallerySlice, (gallerySlice) => { - const firstImage = gallerySlice.selection.slice(-1)[0]?.id ?? null; + const firstImage = gallerySlice.selection.at(-1) ?? null; const secondImage = gallerySlice.imageToCompare; return { firstImage, secondImage }; }); diff --git a/invokeai/frontend/web/src/features/gallery/components/NewGallery.tsx b/invokeai/frontend/web/src/features/gallery/components/NewGallery.tsx index 026319392c8..d586dc979d9 100644 --- a/invokeai/frontend/web/src/features/gallery/components/NewGallery.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/NewGallery.tsx @@ -126,8 +126,8 @@ const useKeyboardNavigation = ( const imageName = event.altKey ? // When the user holds alt, we are changing the image to compare - if no image to compare is currently selected, // we start from the last selected image - (selectImageToCompare(state) ?? selectLastSelectedItem(state)?.id) - : selectLastSelectedItem(state)?.id; + (selectImageToCompare(state) ?? selectLastSelectedItem(state)) + : selectLastSelectedItem(state); const currentIndex = getItemIndex(imageName ?? null, imageNames); @@ -174,7 +174,7 @@ const useKeyboardNavigation = ( if (event.altKey) { dispatch(imageToCompareChanged(newImageName)); } else { - dispatch(selectionChanged([{ type: 'image', id: newImageName }])); + dispatch(selectionChanged([newImageName])); } } } @@ -261,7 +261,7 @@ const useKeepSelectedImageInView = ( const selection = useAppSelector(selectSelection); useEffect(() => { - const targetImageName = selection.at(-1)?.id; + const targetImageName = selection.at(-1); const virtuosoGridHandle = virtuosoRef.current; const rootEl = rootRef.current; const range = rangeRef.current; @@ -280,7 +280,7 @@ const useStarImageHotkey = () => { const lastSelectedItem = useAppSelector(selectLastSelectedItem); const selectionCount = useAppSelector(selectSelectionCount); const isGalleryFocused = useIsRegionFocused('gallery'); - const imageDTO = useImageDTO(lastSelectedItem?.id); + const imageDTO = useImageDTO(lastSelectedItem); const [starImages] = useStarImagesMutation(); const [unstarImages] = useUnstarImagesMutation(); diff --git a/invokeai/frontend/web/src/features/gallery/components/NextPrevItemButtons.tsx b/invokeai/frontend/web/src/features/gallery/components/NextPrevItemButtons.tsx index b59965c28e3..96b3699cafb 100644 --- a/invokeai/frontend/web/src/features/gallery/components/NextPrevItemButtons.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/NextPrevItemButtons.tsx @@ -3,7 +3,7 @@ import { Box, IconButton } from '@invoke-ai/ui-library'; import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; import { clamp } from 'es-toolkit/compat'; import { selectLastSelectedItem } from 'features/gallery/store/gallerySelectors'; -import { itemSelected } from 'features/gallery/store/gallerySlice'; +import { imageSelected } from 'features/gallery/store/gallerySlice'; import { memo, useCallback, useMemo } from 'react'; import { useTranslation } from 'react-i18next'; import { PiCaretLeftBold, PiCaretRightBold } from 'react-icons/pi'; @@ -17,32 +17,32 @@ const NextPrevItemButtons = ({ inset = 8 }: { inset?: ChakraProps['insetInlineSt const { imageNames, isFetching } = useGalleryImageNames(); const isOnFirstItem = useMemo( - () => (lastSelectedItem ? imageNames.at(0) === lastSelectedItem.id : false), + () => (lastSelectedItem ? imageNames.at(0) === lastSelectedItem : false), [imageNames, lastSelectedItem] ); const isOnLastItem = useMemo( - () => (lastSelectedItem ? imageNames.at(-1) === lastSelectedItem.id : false), + () => (lastSelectedItem ? imageNames.at(-1) === lastSelectedItem : false), [imageNames, lastSelectedItem] ); const onClickLeftArrow = useCallback(() => { - const targetIndex = lastSelectedItem ? imageNames.findIndex((n) => n === lastSelectedItem.id) - 1 : 0; + const targetIndex = lastSelectedItem ? imageNames.findIndex((n) => n === lastSelectedItem) - 1 : 0; const clampedIndex = clamp(targetIndex, 0, imageNames.length - 1); const n = imageNames.at(clampedIndex); if (!n) { return; } - dispatch(itemSelected({ type: lastSelectedItem?.type ?? 'image', id: n })); + dispatch(imageSelected(n)); }, [dispatch, imageNames, lastSelectedItem]); const onClickRightArrow = useCallback(() => { - const targetIndex = lastSelectedItem ? imageNames.findIndex((n) => n === lastSelectedItem.id) + 1 : 0; + const targetIndex = lastSelectedItem ? imageNames.findIndex((n) => n === lastSelectedItem) + 1 : 0; const clampedIndex = clamp(targetIndex, 0, imageNames.length - 1); const n = imageNames.at(clampedIndex); if (!n) { return; } - dispatch(itemSelected({ type: lastSelectedItem?.type ?? 'image', id: n })); + dispatch(imageSelected(n)); }, [dispatch, imageNames, lastSelectedItem]); return ( diff --git a/invokeai/frontend/web/src/features/gallery/store/gallerySelectors.ts b/invokeai/frontend/web/src/features/gallery/store/gallerySelectors.ts index ab6b584a15e..aad849fdb59 100644 --- a/invokeai/frontend/web/src/features/gallery/store/gallerySelectors.ts +++ b/invokeai/frontend/web/src/features/gallery/store/gallerySelectors.ts @@ -24,9 +24,6 @@ const selectGalleryQueryCategories = createSelector(selectGalleryView, (galleryV if (galleryView === 'images') { return IMAGE_CATEGORIES; } - if (galleryView === 'videos') { - return []; - } return ASSETS_CATEGORIES; }); const selectGallerySearchTerm = createSelector(selectGallerySlice, (gallery) => gallery.searchTerm); diff --git a/invokeai/frontend/web/src/features/gallery/store/gallerySlice.ts b/invokeai/frontend/web/src/features/gallery/store/gallerySlice.ts index 1f99fc7a6d3..d66feefa2c9 100644 --- a/invokeai/frontend/web/src/features/gallery/store/gallerySlice.ts +++ b/invokeai/frontend/web/src/features/gallery/store/gallerySlice.ts @@ -2,7 +2,7 @@ import type { PayloadAction } from '@reduxjs/toolkit'; import { createSlice } from '@reduxjs/toolkit'; import type { RootState } from 'app/store/store'; import type { SliceConfig } from 'app/store/types'; -import { isPlainObject } from 'es-toolkit'; +import { isPlainObject, uniq } from 'es-toolkit'; import type { BoardRecordOrderBy } from 'services/api/types'; import { assert } from 'tsafe'; @@ -40,7 +40,7 @@ const slice = createSlice({ name: 'gallery', initialState: getInitialState(), reducers: { - itemSelected: (state, action: PayloadAction<{ type: 'image' | 'video'; id: string } | null>) => { + imageSelected: (state, action: PayloadAction) => { const selectedItem = action.payload; if (!selectedItem) { @@ -49,14 +49,8 @@ const slice = createSlice({ state.selection = [selectedItem]; } }, - selectionChanged: (state, action: PayloadAction<{ type: 'image' | 'video'; id: string }[]>) => { - const uniqueById = new Map(); - for (const item of action.payload) { - if (!uniqueById.has(item.id)) { - uniqueById.set(item.id, item); - } - } - state.selection = Array.from(uniqueById.values()); + selectionChanged: (state, action: PayloadAction) => { + state.selection = uniq(action.payload); }, imageToCompareChanged: (state, action: PayloadAction) => { state.imageToCompare = action.payload; @@ -122,8 +116,8 @@ const slice = createSlice({ comparedImagesSwapped: (state) => { if (state.imageToCompare) { const oldSelection = state.selection; - state.selection = [{ type: 'image', id: state.imageToCompare }]; - state.imageToCompare = oldSelection[0]?.id ?? null; + state.selection = [state.imageToCompare]; + state.imageToCompare = oldSelection[0] ?? null; } }, comparisonFitChanged: (state, action: PayloadAction<'contain' | 'fill'>) => { @@ -151,7 +145,7 @@ const slice = createSlice({ }); export const { - itemSelected, + imageSelected, shouldAutoSwitchChanged, autoAssignBoardOnClickChanged, setGalleryImageMinimumWidth, diff --git a/invokeai/frontend/web/src/features/gallery/store/types.ts b/invokeai/frontend/web/src/features/gallery/store/types.ts index 0a03c7d2662..addeefe870f 100644 --- a/invokeai/frontend/web/src/features/gallery/store/types.ts +++ b/invokeai/frontend/web/src/features/gallery/store/types.ts @@ -1,7 +1,7 @@ import type { ImageCategory } from 'services/api/types'; import z from 'zod'; -const zGalleryView = z.enum(['images', 'assets', 'videos']); +const zGalleryView = z.enum(['images', 'assets']); export type GalleryView = z.infer; const zBoardId = z.string(); // TS hack to get autocomplete for "none" but accept any string @@ -19,7 +19,7 @@ export const IMAGE_CATEGORIES: ImageCategory[] = ['general']; export const ASSETS_CATEGORIES: ImageCategory[] = ['control', 'mask', 'user', 'other']; export const zGalleryState = z.object({ - selection: z.array(z.object({ type: z.enum(['image', 'video']), id: z.string() })), + selection: z.array(z.string()), shouldAutoSwitch: z.boolean(), autoAssignBoardOnClick: z.boolean(), autoAddBoardId: zBoardId, diff --git a/invokeai/frontend/web/src/features/nodes/components/flow/nodes/CurrentImage/CurrentImageNode.tsx b/invokeai/frontend/web/src/features/nodes/components/flow/nodes/CurrentImage/CurrentImageNode.tsx index 2e77415757c..c8423a8fe4e 100644 --- a/invokeai/frontend/web/src/features/nodes/components/flow/nodes/CurrentImage/CurrentImageNode.tsx +++ b/invokeai/frontend/web/src/features/nodes/components/flow/nodes/CurrentImage/CurrentImageNode.tsx @@ -19,7 +19,7 @@ import { $lastProgressEvent } from 'services/events/stores'; const CurrentImageNode = (props: NodeProps) => { const lastSelectedItem = useAppSelector(selectLastSelectedItem); const lastProgressEvent = useStore($lastProgressEvent); - const imageDTO = useImageDTO(lastSelectedItem?.id); + const imageDTO = useImageDTO(lastSelectedItem); if (lastProgressEvent?.image) { return ( diff --git a/invokeai/frontend/web/src/features/system/components/VideosModal/VideoCard.tsx b/invokeai/frontend/web/src/features/system/components/VideosModal/VideoCard.tsx index d97a0bed341..4f037224a62 100644 --- a/invokeai/frontend/web/src/features/system/components/VideosModal/VideoCard.tsx +++ b/invokeai/frontend/web/src/features/system/components/VideosModal/VideoCard.tsx @@ -1,17 +1,11 @@ import { ExternalLink, Flex, Spacer, Text } from '@invoke-ai/ui-library'; -import { useAppDispatch } from 'app/store/storeHooks'; import type { VideoData } from 'features/system/components/VideosModal/data'; -import { videoModalLinkClicked } from 'features/system/store/actions'; -import { memo, useCallback } from 'react'; +import { memo } from 'react'; import { useTranslation } from 'react-i18next'; export const VideoCard = memo(({ video }: { video: VideoData }) => { const { t } = useTranslation(); - const dispatch = useAppDispatch(); const { tKey, link } = video; - const handleLinkClick = useCallback(() => { - dispatch(videoModalLinkClicked(t(`supportVideos.videos.${tKey}.title`))); - }, [dispatch, t, tKey]); return ( @@ -20,7 +14,7 @@ export const VideoCard = memo(({ video }: { video: VideoData }) => { {t(`supportVideos.videos.${tKey}.title`)} - + {t(`supportVideos.videos.${tKey}.description`)} diff --git a/invokeai/frontend/web/src/features/system/components/VideosModal/VideosModal.tsx b/invokeai/frontend/web/src/features/system/components/VideosModal/VideosModal.tsx index 2db109479f5..818f531820e 100644 --- a/invokeai/frontend/web/src/features/system/components/VideosModal/VideosModal.tsx +++ b/invokeai/frontend/web/src/features/system/components/VideosModal/VideosModal.tsx @@ -10,7 +10,6 @@ import { ModalOverlay, Text, } from '@invoke-ai/ui-library'; -import { useAppDispatch } from 'app/store/storeHooks'; import ScrollableContent from 'common/components/OverlayScrollbars/ScrollableContent'; import { buildUseDisclosure } from 'common/hooks/useBoolean'; import { @@ -19,62 +18,36 @@ import { supportVideos, } from 'features/system/components/VideosModal/data'; import { VideoCardList } from 'features/system/components/VideosModal/VideoCardList'; -import { videoModalLinkClicked } from 'features/system/store/actions'; import { discordLink } from 'features/system/store/constants'; -import { memo, useCallback } from 'react'; +import { memo } from 'react'; import { Trans, useTranslation } from 'react-i18next'; export const [useVideosModal] = buildUseDisclosure(false); const GettingStartedPlaylistLink = () => { - const dispatch = useAppDispatch(); - const handleLinkClick = useCallback(() => { - dispatch(videoModalLinkClicked('Getting Started playlist')); - }, [dispatch]); - return ( ); }; const StudioSessionsPlaylistLink = () => { - const dispatch = useAppDispatch(); - const handleLinkClick = useCallback(() => { - dispatch(videoModalLinkClicked('Studio Sessions playlist')); - }, [dispatch]); - return ( ); }; const DiscordLink = () => { - const dispatch = useAppDispatch(); - const handleLinkClick = useCallback(() => { - dispatch(videoModalLinkClicked('Discord')); - }, [dispatch]); - - return ( - - ); + return ; }; const components = { diff --git a/invokeai/frontend/web/src/features/system/components/VideosModal/VideosModalButton.tsx b/invokeai/frontend/web/src/features/system/components/VideosModal/VideosModalButton.tsx index 3b5169a14ae..bf99afdbc8e 100644 --- a/invokeai/frontend/web/src/features/system/components/VideosModal/VideosModalButton.tsx +++ b/invokeai/frontend/web/src/features/system/components/VideosModal/VideosModalButton.tsx @@ -1,21 +1,17 @@ import { IconButton } from '@invoke-ai/ui-library'; -import { useAppDispatch } from 'app/store/storeHooks'; import { useVideosModal } from 'features/system/components/VideosModal/VideosModal'; -import { videoModalOpened } from 'features/system/store/actions'; import { memo, useCallback } from 'react'; import { useTranslation } from 'react-i18next'; import { PiYoutubeLogoFill } from 'react-icons/pi'; export const VideosModalButton = memo(() => { const { t } = useTranslation(); - const dispatch = useAppDispatch(); const videosModal = useVideosModal(); const onClickOpen = useCallback(() => { - dispatch(videoModalOpened()); videosModal.open(); - }, [videosModal, dispatch]); + }, [videosModal]); return ( ('system/videoModalLinkClicked'); -export const videoModalOpened = createAction('system/videoModalOpened'); diff --git a/invokeai/frontend/web/src/services/events/onInvocationComplete.tsx b/invokeai/frontend/web/src/services/events/onInvocationComplete.tsx index f9fafbbcdcd..d076b9a7303 100644 --- a/invokeai/frontend/web/src/services/events/onInvocationComplete.tsx +++ b/invokeai/frontend/web/src/services/events/onInvocationComplete.tsx @@ -8,7 +8,7 @@ import { selectListBoardsQueryArgs, selectSelectedBoardId, } from 'features/gallery/store/gallerySelectors'; -import { boardIdSelected, galleryViewChanged, itemSelected } from 'features/gallery/store/gallerySlice'; +import { boardIdSelected, galleryViewChanged, imageSelected } from 'features/gallery/store/gallerySlice'; import { $nodeExecutionStates, upsertExecutionState } from 'features/nodes/hooks/useNodeExecutionState'; import { isImageField, isImageFieldCollection } from 'features/nodes/types/common'; import { zNodeStatus } from 'features/nodes/types/invocation'; @@ -170,7 +170,7 @@ export const buildOnInvocationComplete = ( boardIdSelected({ boardId: board_id, select: { - selection: [{ type: 'image', id: image_name }], + selection: [image_name], galleryView: 'images', }, }) @@ -182,7 +182,7 @@ export const buildOnInvocationComplete = ( dispatch(galleryViewChanged('images')); } // Select the image immediately since we've optimistically updated the cache - dispatch(itemSelected({ type: 'image', id: lastImageDTO.image_name })); + dispatch(imageSelected(lastImageDTO.image_name)); } }; From 5968d71858cd953ac135d8d964c8c4a120b5cd28 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Mon, 13 Oct 2025 15:31:56 +1100 Subject: [PATCH 04/20] tidy: removing unused code paths 4 --- .../controlLayers/hooks/addLayerHooks.ts | 10 +----- .../src/features/controlLayers/store/types.ts | 34 ++----------------- .../InvokeButtonTooltip.tsx | 21 ------------ .../frontend/web/src/services/api/index.ts | 7 ---- .../src/services/api/util/tagInvalidation.ts | 2 +- .../vocab.json | 4 +-- 6 files changed, 6 insertions(+), 72 deletions(-) diff --git a/invokeai/frontend/web/src/features/controlLayers/hooks/addLayerHooks.ts b/invokeai/frontend/web/src/features/controlLayers/hooks/addLayerHooks.ts index 8f0a1a11a6f..fe23ec9d90b 100644 --- a/invokeai/frontend/web/src/features/controlLayers/hooks/addLayerHooks.ts +++ b/invokeai/frontend/web/src/features/controlLayers/hooks/addLayerHooks.ts @@ -24,11 +24,9 @@ import { import type { CanvasEntityIdentifier, CanvasRegionalGuidanceState, - ChatGPT4oReferenceImageConfig, ControlLoRAConfig, ControlNetConfig, FluxKontextReferenceImageConfig, - Gemini2_5ReferenceImageConfig, IPAdapterConfig, RegionalGuidanceIPAdapterConfig, T2IAdapterConfig, @@ -76,13 +74,7 @@ export const selectDefaultControlAdapter = createSelector( } ); -export const getDefaultRefImageConfig = ( - getState: AppGetState -): - | IPAdapterConfig - | ChatGPT4oReferenceImageConfig - | FluxKontextReferenceImageConfig - | Gemini2_5ReferenceImageConfig => { +export const getDefaultRefImageConfig = (getState: AppGetState): IPAdapterConfig | FluxKontextReferenceImageConfig => { const state = getState(); const mainModelConfig = selectMainModelConfig(state); diff --git a/invokeai/frontend/web/src/features/controlLayers/store/types.ts b/invokeai/frontend/web/src/features/controlLayers/store/types.ts index 7bdbc4f2d6f..0ec0cd7baa8 100644 --- a/invokeai/frontend/web/src/features/controlLayers/store/types.ts +++ b/invokeai/frontend/web/src/features/controlLayers/store/types.ts @@ -313,25 +313,6 @@ const zRegionalGuidanceFLUXReduxConfig = z.object({ }); type RegionalGuidanceFLUXReduxConfig = z.infer; -const zChatGPT4oReferenceImageConfig = z.object({ - type: z.literal('chatgpt_4o_reference_image'), - image: zCroppableImageWithDims.nullable(), - /** - * TODO(psyche): Technically there is no model for ChatGPT 4o reference images - it's just a field in the API call. - * But we use a model drop down to switch between different ref image types, so there needs to be a model here else - * there will be no way to switch between ref image types. - */ - model: zModelIdentifierField.nullable(), -}); -export type ChatGPT4oReferenceImageConfig = z.infer; - -const zGemini2_5ReferenceImageConfig = z.object({ - type: z.literal('gemini_2_5_reference_image'), - image: zCroppableImageWithDims.nullable(), - model: zModelIdentifierField.nullable(), -}); -export type Gemini2_5ReferenceImageConfig = z.infer; - const zFluxKontextReferenceImageConfig = z.object({ type: z.literal('flux_kontext_reference_image'), image: zCroppableImageWithDims.nullable(), @@ -349,13 +330,7 @@ const zCanvasEntityBase = z.object({ export const zRefImageState = z.object({ id: zId, isEnabled: z.boolean().default(true), - config: z.discriminatedUnion('type', [ - zIPAdapterConfig, - zFLUXReduxConfig, - zChatGPT4oReferenceImageConfig, - zFluxKontextReferenceImageConfig, - zGemini2_5ReferenceImageConfig, - ]), + config: z.discriminatedUnion('type', [zIPAdapterConfig, zFLUXReduxConfig, zFluxKontextReferenceImageConfig]), }); export type RefImageState = z.infer; @@ -747,12 +722,7 @@ export const getInitialRefImagesState = (): RefImagesState => ({ export const zCanvasReferenceImageState_OLD = zCanvasEntityBase.extend({ type: z.literal('reference_image'), - ipAdapter: z.discriminatedUnion('type', [ - zIPAdapterConfig, - zFLUXReduxConfig, - zChatGPT4oReferenceImageConfig, - zGemini2_5ReferenceImageConfig, - ]), + ipAdapter: z.discriminatedUnion('type', [zIPAdapterConfig, zFLUXReduxConfig]), }); export const zCanvasMetadata = z.object({ diff --git a/invokeai/frontend/web/src/features/queue/components/InvokeButtonTooltip/InvokeButtonTooltip.tsx b/invokeai/frontend/web/src/features/queue/components/InvokeButtonTooltip/InvokeButtonTooltip.tsx index df6bdc936c3..9f1d004ba87 100644 --- a/invokeai/frontend/web/src/features/queue/components/InvokeButtonTooltip/InvokeButtonTooltip.tsx +++ b/invokeai/frontend/web/src/features/queue/components/InvokeButtonTooltip/InvokeButtonTooltip.tsx @@ -50,27 +50,6 @@ const TooltipContent = memo(({ prepend = false }: { prepend?: boolean }) => { }); TooltipContent.displayName = 'TooltipContent'; -const VideoTabTooltipContent = memo(({ prepend = false }: { prepend?: boolean }) => { - const isReady = useStore($isReadyToEnqueue); - const reasons = useStore($reasonsWhyCannotEnqueue); - - return ( - - - - {reasons.length > 0 && ( - <> - - - - )} - - - - ); -}); -VideoTabTooltipContent.displayName = 'VideoTabTooltipContent'; - const CanvasTabTooltipContent = memo(({ prepend = false }: { prepend?: boolean }) => { const isReady = useStore($isReadyToEnqueue); const reasons = useStore($reasonsWhyCannotEnqueue); diff --git a/invokeai/frontend/web/src/services/api/index.ts b/invokeai/frontend/web/src/services/api/index.ts index 3254330d81d..d5b1e4672a8 100644 --- a/invokeai/frontend/web/src/services/api/index.ts +++ b/invokeai/frontend/web/src/services/api/index.ts @@ -16,7 +16,6 @@ const tagTypes = [ 'Board', 'BoardImagesTotal', 'BoardAssetsTotal', - 'BoardVideosTotal', 'HFTokenStatus', 'Image', 'ImageNameList', @@ -53,12 +52,6 @@ const tagTypes = [ 'StylePreset', 'Schema', 'QueueCountsByDestination', - 'Video', - 'VideoMetadata', - 'VideoList', - 'VideoIdList', - 'VideoCollectionCounts', - 'VideoCollection', // This is invalidated on reconnect. It should be used for queries that have changing data, // especially related to the queue and generation. 'FetchOnReconnect', diff --git a/invokeai/frontend/web/src/services/api/util/tagInvalidation.ts b/invokeai/frontend/web/src/services/api/util/tagInvalidation.ts index 02d18bf6d3e..477a5a03f87 100644 --- a/invokeai/frontend/web/src/services/api/util/tagInvalidation.ts +++ b/invokeai/frontend/web/src/services/api/util/tagInvalidation.ts @@ -4,7 +4,7 @@ import { getListImagesUrl } from 'services/api/util'; import type { ApiTagDescription } from '..'; export const getTagsToInvalidateForBoardAffectingMutation = (affected_boards: string[]): ApiTagDescription[] => { - const tags: ApiTagDescription[] = ['ImageNameList', 'VideoIdList']; + const tags: ApiTagDescription[] = ['ImageNameList']; for (const board_id of affected_boards) { tags.push({ diff --git a/tests/model_identification/stripped_models/dc79db49-7f38-4f54-b4f1-e7c521ded481/vocab.json b/tests/model_identification/stripped_models/dc79db49-7f38-4f54-b4f1-e7c521ded481/vocab.json index 6c49fc63bcb..b05524b241f 100644 --- a/tests/model_identification/stripped_models/dc79db49-7f38-4f54-b4f1-e7c521ded481/vocab.json +++ b/tests/model_identification/stripped_models/dc79db49-7f38-4f54-b4f1-e7c521ded481/vocab.json @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ca10d7e9fb3ed18575dd1e277a2579c16d108e32f27439684afa0e10b1440910 -size 2776833 +oid sha256:01154a4426e6077c8a3f04fca42edb5293bac73a7faed666901f25591ef89182 +size 3383407 From ea71f1d8511bb3597b189c76f04424ca180dcb48 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Mon, 13 Oct 2025 15:42:40 +1100 Subject: [PATCH 05/20] tidy: removing unused code paths 5 --- invokeai/app/api/routers/session_queue.py | 13 +---------- invokeai/app/services/events/events_common.py | 2 -- .../session_queue/session_queue_common.py | 9 -------- .../__mocks__/mockStagingAreaApp.ts | 2 -- .../controlLayers/store/refImagesSlice.ts | 2 +- .../QueueList/QueueItemComponent.tsx | 7 +----- .../queue/components/QueueList/constants.ts | 1 - .../frontend/web/src/services/api/schema.ts | 22 ------------------- .../src/services/events/setEventListeners.tsx | 2 -- 9 files changed, 3 insertions(+), 57 deletions(-) diff --git a/invokeai/app/api/routers/session_queue.py b/invokeai/app/api/routers/session_queue.py index 5320bf18eb3..7b4242e013c 100644 --- a/invokeai/app/api/routers/session_queue.py +++ b/invokeai/app/api/routers/session_queue.py @@ -2,7 +2,7 @@ from fastapi import Body, HTTPException, Path, Query from fastapi.routing import APIRouter -from pydantic import BaseModel, Field +from pydantic import BaseModel from invokeai.app.api.dependencies import ApiDependencies from invokeai.app.services.session_processor.session_processor_common import SessionProcessorStatus @@ -16,7 +16,6 @@ DeleteAllExceptCurrentResult, DeleteByDestinationResult, EnqueueBatchResult, - FieldIdentifier, ItemIdsResult, PruneResult, RetryItemsResult, @@ -37,12 +36,6 @@ class SessionQueueAndProcessorStatus(BaseModel): processor: SessionProcessorStatus -class ValidationRunData(BaseModel): - workflow_id: str = Field(description="The id of the workflow being published.") - input_fields: list[FieldIdentifier] = Body(description="The input fields for the published workflow") - output_fields: list[FieldIdentifier] = Body(description="The output fields for the published workflow") - - @session_queue_router.post( "/{queue_id}/enqueue_batch", operation_id="enqueue_batch", @@ -54,10 +47,6 @@ async def enqueue_batch( queue_id: str = Path(description="The queue id to perform this operation on"), batch: Batch = Body(description="Batch to process"), prepend: bool = Body(default=False, description="Whether or not to prepend this batch in the queue"), - validation_run_data: Optional[ValidationRunData] = Body( - default=None, - description="The validation run data to use for this batch. This is only used if this is a validation run.", - ), ) -> EnqueueBatchResult: """Processes a batch and enqueues the output graphs for execution.""" try: diff --git a/invokeai/app/services/events/events_common.py b/invokeai/app/services/events/events_common.py index 2f995293984..d32816f353e 100644 --- a/invokeai/app/services/events/events_common.py +++ b/invokeai/app/services/events/events_common.py @@ -241,7 +241,6 @@ class QueueItemStatusChangedEvent(QueueItemEventBase): batch_status: BatchStatus = Field(description="The status of the batch") queue_status: SessionQueueStatus = Field(description="The status of the queue") session_id: str = Field(description="The ID of the session (aka graph execution state)") - credits: Optional[float] = Field(default=None, description="The total credits used for this queue item") @classmethod def build( @@ -264,7 +263,6 @@ def build( completed_at=str(queue_item.completed_at) if queue_item.completed_at else None, batch_status=batch_status, queue_status=queue_status, - credits=queue_item.credits, ) diff --git a/invokeai/app/services/session_queue/session_queue_common.py b/invokeai/app/services/session_queue/session_queue_common.py index e912753f423..57b512a8558 100644 --- a/invokeai/app/services/session_queue/session_queue_common.py +++ b/invokeai/app/services/session_queue/session_queue_common.py @@ -249,15 +249,6 @@ class SessionQueueItem(BaseModel): retried_from_item_id: Optional[int] = Field( default=None, description="The item_id of the queue item that this item was retried from" ) - is_api_validation_run: bool = Field( - default=False, - description="Whether this queue item is an API validation run.", - ) - published_workflow_id: Optional[str] = Field( - default=None, - description="The ID of the published workflow associated with this queue item", - ) - credits: Optional[float] = Field(default=None, description="The total credits used for this queue item") session: GraphExecutionState = Field(description="The fully-populated session to be executed") workflow: Optional[WorkflowWithoutID] = Field( default=None, description="The workflow associated with this queue item" diff --git a/invokeai/frontend/web/src/features/controlLayers/components/StagingArea/__mocks__/mockStagingAreaApp.ts b/invokeai/frontend/web/src/features/controlLayers/components/StagingArea/__mocks__/mockStagingAreaApp.ts index 15a50ceb115..e0b56d5a439 100644 --- a/invokeai/frontend/web/src/features/controlLayers/components/StagingArea/__mocks__/mockStagingAreaApp.ts +++ b/invokeai/frontend/web/src/features/controlLayers/components/StagingArea/__mocks__/mockStagingAreaApp.ts @@ -86,8 +86,6 @@ export const createMockQueueItem = (overrides: PartialDeep { const isCanceled = useMemo(() => ['canceled', 'completed', 'failed'].includes(item.status), [item.status]); const isFailed = useMemo(() => ['canceled', 'failed'].includes(item.status), [item.status]); - const isValidationRun = useMemo(() => item.is_api_validation_run === true, [item.is_api_validation_run]); const originText = useOriginText(item.origin); const destinationText = useDestinationText(item.destination); @@ -113,10 +112,6 @@ const QueueItemComponent = ({ index, item }: InnerItemProps) => { )} - - {isValidationRun && {t('workflows.builder.publishingValidationRun')}} - - {!isFailed && ( diff --git a/invokeai/frontend/web/src/features/queue/components/QueueList/constants.ts b/invokeai/frontend/web/src/features/queue/components/QueueList/constants.ts index c75e205614c..e996ff7af13 100644 --- a/invokeai/frontend/web/src/features/queue/components/QueueList/constants.ts +++ b/invokeai/frontend/web/src/features/queue/components/QueueList/constants.ts @@ -9,6 +9,5 @@ export const COLUMN_WIDTHS = { fieldValues: 'auto', createdAt: '9.5rem', completedAt: '9.5rem', - validationRun: 'auto', actions: 'auto', } as const; diff --git a/invokeai/frontend/web/src/services/api/schema.ts b/invokeai/frontend/web/src/services/api/schema.ts index 4da374c26f1..fd73c806f04 100644 --- a/invokeai/frontend/web/src/services/api/schema.ts +++ b/invokeai/frontend/web/src/services/api/schema.ts @@ -19503,12 +19503,6 @@ export type components = { * @description The ID of the session (aka graph execution state) */ session_id: string; - /** - * Credits - * @description The total credits used for this queue item - * @default null - */ - credits: number | null; }; /** * QueueItemsRetriedEvent @@ -21285,22 +21279,6 @@ export type components = { * @description The item_id of the queue item that this item was retried from */ retried_from_item_id?: number | null; - /** - * Is Api Validation Run - * @description Whether this queue item is an API validation run. - * @default false - */ - is_api_validation_run?: boolean; - /** - * Published Workflow Id - * @description The ID of the published workflow associated with this queue item - */ - published_workflow_id?: string | null; - /** - * Credits - * @description The total credits used for this queue item - */ - credits?: number | null; /** @description The fully-populated session to be executed */ session: components["schemas"]["GraphExecutionState"]; /** @description The workflow associated with this queue item */ diff --git a/invokeai/frontend/web/src/services/events/setEventListeners.tsx b/invokeai/frontend/web/src/services/events/setEventListeners.tsx index 49876cee93b..d9a1d8386c1 100644 --- a/invokeai/frontend/web/src/services/events/setEventListeners.tsx +++ b/invokeai/frontend/web/src/services/events/setEventListeners.tsx @@ -365,7 +365,6 @@ export const setEventListeners = ({ socket, store, setIsConnected }: SetEventLis updated_at, completed_at, error_traceback, - credits, } = data; log.debug({ data }, `Queue item ${item_id} status updated: ${status}`); @@ -380,7 +379,6 @@ export const setEventListeners = ({ socket, store, setIsConnected }: SetEventLis draft.error_type = error_type; draft.error_message = error_message; draft.error_traceback = error_traceback; - draft.credits = credits; }) ); From 2810c47c785ac1e5468a730b6cbe97535158f141 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Mon, 13 Oct 2025 15:55:04 +1100 Subject: [PATCH 06/20] tidy: removing unused code paths 6 --- invokeai/app/api/routers/boards.py | 1 - .../board_records/board_records_common.py | 4 -- invokeai/app/services/events/events_common.py | 4 -- .../style_preset_records_common.py | 1 - .../components/Boards/DeleteBoardModal.tsx | 8 +-- .../web/src/features/nodes/store/types.ts | 2 +- .../src/features/nodes/types/v2/workflow.ts | 2 +- .../components/StylePresetMenu.tsx | 2 - .../web/src/services/api/endpoints/boards.ts | 4 +- .../frontend/web/src/services/api/schema.ts | 65 +------------------ 10 files changed, 6 insertions(+), 87 deletions(-) diff --git a/invokeai/app/api/routers/boards.py b/invokeai/app/api/routers/boards.py index ec3b86bcfa2..cf668d5a1a4 100644 --- a/invokeai/app/api/routers/boards.py +++ b/invokeai/app/api/routers/boards.py @@ -33,7 +33,6 @@ class DeleteBoardResult(BaseModel): ) async def create_board( board_name: str = Query(description="The name of the board to create", max_length=300), - is_private: bool = Query(default=False, description="Whether the board is private"), ) -> BoardDTO: """Creates a board""" try: diff --git a/invokeai/app/services/board_records/board_records_common.py b/invokeai/app/services/board_records/board_records_common.py index 81d05d7f597..5067d42999b 100644 --- a/invokeai/app/services/board_records/board_records_common.py +++ b/invokeai/app/services/board_records/board_records_common.py @@ -26,8 +26,6 @@ class BoardRecord(BaseModelExcludeNull): """The name of the cover image of the board.""" archived: bool = Field(description="Whether or not the board is archived.") """Whether or not the board is archived.""" - is_private: Optional[bool] = Field(default=None, description="Whether the board is private.") - """Whether the board is private.""" def deserialize_board_record(board_dict: dict) -> BoardRecord: @@ -42,7 +40,6 @@ def deserialize_board_record(board_dict: dict) -> BoardRecord: updated_at = board_dict.get("updated_at", get_iso_timestamp()) deleted_at = board_dict.get("deleted_at", get_iso_timestamp()) archived = board_dict.get("archived", False) - is_private = board_dict.get("is_private", False) return BoardRecord( board_id=board_id, @@ -52,7 +49,6 @@ def deserialize_board_record(board_dict: dict) -> BoardRecord: updated_at=updated_at, deleted_at=deleted_at, archived=archived, - is_private=is_private, ) diff --git a/invokeai/app/services/events/events_common.py b/invokeai/app/services/events/events_common.py index d32816f353e..a924f2eed9f 100644 --- a/invokeai/app/services/events/events_common.py +++ b/invokeai/app/services/events/events_common.py @@ -195,8 +195,6 @@ class InvocationErrorEvent(InvocationEventBase): error_type: str = Field(description="The error type") error_message: str = Field(description="The error message") error_traceback: str = Field(description="The error traceback") - user_id: Optional[str] = Field(default=None, description="The ID of the user who created the invocation") - project_id: Optional[str] = Field(default=None, description="The ID of the user who created the invocation") @classmethod def build( @@ -219,8 +217,6 @@ def build( error_type=error_type, error_message=error_message, error_traceback=error_traceback, - user_id=getattr(queue_item, "user_id", None), - project_id=getattr(queue_item, "project_id", None), ) diff --git a/invokeai/app/services/style_preset_records/style_preset_records_common.py b/invokeai/app/services/style_preset_records/style_preset_records_common.py index 36153d002d0..9ea0b0219cf 100644 --- a/invokeai/app/services/style_preset_records/style_preset_records_common.py +++ b/invokeai/app/services/style_preset_records/style_preset_records_common.py @@ -26,7 +26,6 @@ class PresetData(BaseModel, extra="forbid"): class PresetType(str, Enum, metaclass=MetaEnum): User = "user" Default = "default" - Project = "project" class StylePresetChanges(BaseModel, extra="forbid"): diff --git a/invokeai/frontend/web/src/features/gallery/components/Boards/DeleteBoardModal.tsx b/invokeai/frontend/web/src/features/gallery/components/Boards/DeleteBoardModal.tsx index 47d59540f1d..b7d99301e7a 100644 --- a/invokeai/frontend/web/src/features/gallery/components/Boards/DeleteBoardModal.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/Boards/DeleteBoardModal.tsx @@ -151,13 +151,7 @@ const DeleteBoardModal = () => { bottomMessage={t('boards.bottomMessage')} /> )} - {boardToDelete !== 'none' && ( - - {boardToDelete.is_private - ? t('boards.deletedPrivateBoardsCannotbeRestored') - : t('boards.deletedBoardsCannotbeRestored')} - - )} + {boardToDelete !== 'none' && {t('boards.deletedBoardsCannotbeRestored')}} {t('gallery.deleteImagePermanent')} diff --git a/invokeai/frontend/web/src/features/nodes/store/types.ts b/invokeai/frontend/web/src/features/nodes/store/types.ts index a3391d7daec..587fbfdd7b9 100644 --- a/invokeai/frontend/web/src/features/nodes/store/types.ts +++ b/invokeai/frontend/web/src/features/nodes/store/types.ts @@ -21,6 +21,6 @@ export const zNodesState = z.object({ nodes: z.array(zAnyNode), edges: z.array(zAnyEdge), formFieldInitialValues: z.record(z.string(), zStatefulFieldValue), - ...zWorkflowV3.omit({ nodes: true, edges: true, is_published: true }).shape, + ...zWorkflowV3.omit({ nodes: true, edges: true }).shape, }); export type NodesState = z.infer; diff --git a/invokeai/frontend/web/src/features/nodes/types/v2/workflow.ts b/invokeai/frontend/web/src/features/nodes/types/v2/workflow.ts index 511a28cdc3b..94300d034a2 100644 --- a/invokeai/frontend/web/src/features/nodes/types/v2/workflow.ts +++ b/invokeai/frontend/web/src/features/nodes/types/v2/workflow.ts @@ -13,7 +13,7 @@ const zXYPosition = z const zDimension = z.number().gt(0).nullish(); -const zWorkflowCategory = z.enum(['user', 'default', 'project']); +const zWorkflowCategory = z.enum(['user', 'default']); // #endregion // #region Workflow Nodes diff --git a/invokeai/frontend/web/src/features/stylePresets/components/StylePresetMenu.tsx b/invokeai/frontend/web/src/features/stylePresets/components/StylePresetMenu.tsx index f6ba71b0b90..c42249d6114 100644 --- a/invokeai/frontend/web/src/features/stylePresets/components/StylePresetMenu.tsx +++ b/invokeai/frontend/web/src/features/stylePresets/components/StylePresetMenu.tsx @@ -31,8 +31,6 @@ export const StylePresetMenu = () => { ) => { if (preset.type === 'default') { acc.defaultPresets.push(preset); - } else if (preset.type === 'project') { - acc.sharedPresets.push(preset); } else { acc.presets.push(preset); } diff --git a/invokeai/frontend/web/src/services/api/endpoints/boards.ts b/invokeai/frontend/web/src/services/api/endpoints/boards.ts index 59211439a45..9b7a4f2ad8a 100644 --- a/invokeai/frontend/web/src/services/api/endpoints/boards.ts +++ b/invokeai/frontend/web/src/services/api/endpoints/boards.ts @@ -100,10 +100,10 @@ export const boardsApi = api.injectEndpoints({ */ createBoard: build.mutation({ - query: ({ board_name, is_private }) => ({ + query: ({ board_name }) => ({ url: buildBoardsUrl(), method: 'POST', - params: { board_name, is_private }, + params: { board_name }, }), invalidatesTags: [{ type: 'Board', id: LIST_TAG }], }), diff --git a/invokeai/frontend/web/src/services/api/schema.ts b/invokeai/frontend/web/src/services/api/schema.ts index fd73c806f04..db4ba519cad 100644 --- a/invokeai/frontend/web/src/services/api/schema.ts +++ b/invokeai/frontend/web/src/services/api/schema.ts @@ -2494,11 +2494,6 @@ export type components = { * @description Whether or not the board is archived. */ archived: boolean; - /** - * Is Private - * @description Whether the board is private. - */ - is_private?: boolean | null; /** * Image Count * @description The number of images in the board. @@ -2661,8 +2656,6 @@ export type components = { * @default false */ prepend?: boolean; - /** @description The validation run data to use for this batch. This is only used if this is a validation run. */ - validation_run_data?: components["schemas"]["ValidationRunData"] | null; }; /** Body_get_images_by_names */ Body_get_images_by_names: { @@ -7460,30 +7453,6 @@ export type components = { */ y: number; }; - /** FieldIdentifier */ - FieldIdentifier: { - /** - * Kind - * @description The kind of field - * @enum {string} - */ - kind: "input" | "output"; - /** - * Node Id - * @description The ID of the node - */ - node_id: string; - /** - * Field Name - * @description The name of the field - */ - field_name: string; - /** - * User Label - * @description The user label of the field, if any - */ - user_label: string | null; - }; /** * FieldKind * @description The kind of field. @@ -12492,18 +12461,6 @@ export type components = { * @description The error traceback */ error_traceback: string; - /** - * User Id - * @description The ID of the user who created the invocation - * @default null - */ - user_id: string | null; - /** - * Project Id - * @description The ID of the user who created the invocation - * @default null - */ - project_id: string | null; }; InvocationOutputMap: { add: components["schemas"]["IntegerOutput"]; @@ -19303,7 +19260,7 @@ export type components = { * PresetType * @enum {string} */ - PresetType: "user" | "default" | "project"; + PresetType: "user" | "default"; /** * ProgressImage * @description The progress image sent intermittently during processing @@ -24164,24 +24121,6 @@ export type components = { /** Error Type */ type: string; }; - /** ValidationRunData */ - ValidationRunData: { - /** - * Workflow Id - * @description The id of the workflow being published. - */ - workflow_id: string; - /** - * Input Fields - * @description The input fields for the published workflow - */ - input_fields: components["schemas"]["FieldIdentifier"][]; - /** - * Output Fields - * @description The output fields for the published workflow - */ - output_fields: components["schemas"]["FieldIdentifier"][]; - }; /** Workflow */ Workflow: { /** @@ -26401,8 +26340,6 @@ export interface operations { query: { /** @description The name of the board to create */ board_name: string; - /** @description Whether the board is private */ - is_private?: boolean; }; header?: never; path?: never; From 373718fc4b66b0f93ab9bed7807e3c5c91048cf4 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Mon, 13 Oct 2025 16:12:15 +1100 Subject: [PATCH 07/20] tidy: removing unused code paths 7 --- README.md | 28 ++++++------------- invokeai/frontend/web/public/locales/en.json | 7 +---- .../InformationalPopover/constants.ts | 5 ---- .../components/AboutModal/AboutModal.tsx | 3 +- .../components/SettingsModal/SettingsMenu.tsx | 16 +---------- .../SettingsModal/SettingsUpsellMenuItem.tsx | 19 ------------- .../src/features/system/store/constants.ts | 1 - 7 files changed, 11 insertions(+), 68 deletions(-) delete mode 100644 invokeai/frontend/web/src/features/system/components/SettingsModal/SettingsUpsellMenuItem.tsx diff --git a/README.md b/README.md index 2a23dfc8541..0465e5ba792 100644 --- a/README.md +++ b/README.md @@ -4,38 +4,27 @@ # Invoke - Professional Creative AI Tools for Visual Media -#### To learn more about Invoke, or implement our Business solutions, visit [invoke.com] - [![discord badge]][discord link] [![latest release badge]][latest release link] [![github stars badge]][github stars link] [![github forks badge]][github forks link] [![CI checks on main badge]][CI checks on main link] [![latest commit to main badge]][latest commit to main link] [![github open issues badge]][github open issues link] [![github open prs badge]][github open prs link] [![translation status badge]][translation status link] Invoke is a leading creative engine built to empower professionals and enthusiasts alike. Generate and create stunning visual media using the latest AI-driven technologies. Invoke offers an industry leading web-based UI, and serves as the foundation for multiple commercial products. -Invoke is available in two editions: - -| **Community Edition** | **Professional Edition** | -|----------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------| -| **For users looking for a locally installed, self-hosted and self-managed service** | **For users or teams looking for a cloud-hosted, fully managed service** | -| - Free to use under a commercially-friendly license | - Monthly subscription fee with three different plan levels | -| - Download and install on compatible hardware | - Offers additional benefits, including multi-user support, improved model training, and more | -| - Includes all core studio features: generate, refine, iterate on images, and build workflows | - Hosted in the cloud for easy, secure model access and scalability | -| Quick Start -> [Installation and Updates][installation docs] | More Information -> [www.invoke.com/pricing](https://www.invoke.com/pricing) | - +- Free to use under a commercially-friendly license +- Download and install on compatible hardware +- Generate, refine, iterate on images, and build workflows ![Highlighted Features - Canvas and Workflows](https://github.com/invoke-ai/InvokeAI/assets/31807370/708f7a82-084f-4860-bfbe-e2588c53548d) # Documentation -| **Quick Links** | -|----------------------------------------------------------------------------------------------------------------------------| -| [Installation and Updates][installation docs] - [Documentation and Tutorials][docs home] - [Bug Reports][github issues] - [Contributing][contributing docs] | -# Installation - -To get started with Invoke, [Download the Installer](https://www.invoke.com/downloads). +| **Quick Links** | +| ----------------------------------------------------------------------------------------------------------------------------------------------------------- | +| [Installation and Updates][installation docs] - [Documentation and Tutorials][docs home] - [Bug Reports][github issues] - [Contributing][contributing docs] | -For detailed step by step instructions, or for instructions on manual/docker installations, visit our documentation on [Installation and Updates][installation docs] +# Installation +To get started with Invoke, [Download the Launcher](https://github.com/invoke-ai/launcher/releases/latest). ## Troubleshooting, FAQ and Support @@ -90,7 +79,6 @@ Original portions of the software are Copyright © 2024 by respective contributo [features docs]: https://invoke-ai.github.io/InvokeAI/features/database/ [faq]: https://invoke-ai.github.io/InvokeAI/faq/ [contributors]: https://invoke-ai.github.io/InvokeAI/contributing/contributors/ -[invoke.com]: https://www.invoke.com/about [github issues]: https://github.com/invoke-ai/InvokeAI/issues [docs home]: https://invoke-ai.github.io/InvokeAI [installation docs]: https://invoke-ai.github.io/InvokeAI/installation/ diff --git a/invokeai/frontend/web/public/locales/en.json b/invokeai/frontend/web/public/locales/en.json index d88ba697c58..0343e1af19b 100644 --- a/invokeai/frontend/web/public/locales/en.json +++ b/invokeai/frontend/web/public/locales/en.json @@ -2612,12 +2612,7 @@ "viewModeTooltip": "This is how your prompt will look with your currently selected template. To edit your prompt, click anywhere in the text box.", "togglePromptPreviews": "Toggle Prompt Previews" }, - "upsell": { - "inviteTeammates": "Invite Teammates", - "professional": "Professional", - "professionalUpsell": "Available in Invoke's Professional Edition. Click here or visit invoke.com/pricing for more details.", - "shareAccess": "Share Access" - }, + "ui": { "tabs": { "generate": "Generate", diff --git a/invokeai/frontend/web/src/common/components/InformationalPopover/constants.ts b/invokeai/frontend/web/src/common/components/InformationalPopover/constants.ts index 22a813b6de1..6db4dcbd682 100644 --- a/invokeai/frontend/web/src/common/components/InformationalPopover/constants.ts +++ b/invokeai/frontend/web/src/common/components/InformationalPopover/constants.ts @@ -1,5 +1,4 @@ import type { PopoverProps } from '@invoke-ai/ui-library'; -import commercialLicenseBg from 'public/assets/images/commercial-license-bg.png'; import denoisingStrength from 'public/assets/images/denoising-strength.png'; export type Feature = @@ -217,10 +216,6 @@ export const POPOVER_DATA: { [key in Feature]?: PopoverData } = { seamlessTilingYAxis: { href: 'https://support.invoke.ai/support/solutions/articles/151000178161-advanced-settings', }, - fluxDevLicense: { - href: 'https://www.invoke.com/get-a-commercial-license-for-flux', - image: commercialLicenseBg, - }, } as const; export const OPEN_DELAY = 1000; // in milliseconds diff --git a/invokeai/frontend/web/src/features/system/components/AboutModal/AboutModal.tsx b/invokeai/frontend/web/src/features/system/components/AboutModal/AboutModal.tsx index bb2eb14a687..dd729f067c2 100644 --- a/invokeai/frontend/web/src/features/system/components/AboutModal/AboutModal.tsx +++ b/invokeai/frontend/web/src/features/system/components/AboutModal/AboutModal.tsx @@ -17,7 +17,7 @@ import { } from '@invoke-ai/ui-library'; import { deepClone } from 'common/util/deepClone'; import DataViewer from 'features/gallery/components/ImageMetadataViewer/DataViewer'; -import { discordLink, githubLink, websiteLink } from 'features/system/store/constants'; +import { discordLink, githubLink } from 'features/system/store/constants'; import InvokeLogoYellow from 'public/assets/images/invoke-tag-lrg.svg'; import type { ReactElement } from 'react'; import { cloneElement, memo, useMemo } from 'react'; @@ -82,7 +82,6 @@ const AboutModal = ({ children }: AboutModalProps) => { {t('common.aboutHeading')} {t('common.aboutDesc')} - diff --git a/invokeai/frontend/web/src/features/system/components/SettingsModal/SettingsMenu.tsx b/invokeai/frontend/web/src/features/system/components/SettingsModal/SettingsMenu.tsx index ddddd79cc51..bbd7103d6c2 100644 --- a/invokeai/frontend/web/src/features/system/components/SettingsModal/SettingsMenu.tsx +++ b/invokeai/frontend/web/src/features/system/components/SettingsModal/SettingsMenu.tsx @@ -14,19 +14,10 @@ import HotkeysModal from 'features/system/components/HotkeysModal/HotkeysModal'; import { discordLink, githubLink } from 'features/system/store/constants'; import { memo } from 'react'; import { useTranslation } from 'react-i18next'; -import { - PiBugBeetleBold, - PiGearSixFill, - PiInfoBold, - PiKeyboardBold, - PiShareNetworkFill, - PiToggleRightFill, - PiUsersBold, -} from 'react-icons/pi'; +import { PiBugBeetleBold, PiGearSixFill, PiInfoBold, PiKeyboardBold, PiToggleRightFill } from 'react-icons/pi'; import { RiDiscordFill, RiGithubFill } from 'react-icons/ri'; import SettingsModal from './SettingsModal'; -import { SettingsUpsellMenuItem } from './SettingsUpsellMenuItem'; const SettingsMenu = () => { const { t } = useTranslation(); const { isOpen, onOpen, onClose } = useDisclosure(); @@ -43,11 +34,6 @@ const SettingsMenu = () => { /> - - } /> - } /> - - }> {t('common.githubLabel')} diff --git a/invokeai/frontend/web/src/features/system/components/SettingsModal/SettingsUpsellMenuItem.tsx b/invokeai/frontend/web/src/features/system/components/SettingsModal/SettingsUpsellMenuItem.tsx deleted file mode 100644 index c5f9a13c2b3..00000000000 --- a/invokeai/frontend/web/src/features/system/components/SettingsModal/SettingsUpsellMenuItem.tsx +++ /dev/null @@ -1,19 +0,0 @@ -import { Flex, Icon, MenuItem, Text, Tooltip } from '@invoke-ai/ui-library'; -import type { ReactElement } from 'react'; -import { useTranslation } from 'react-i18next'; -import { PiArrowUpBold } from 'react-icons/pi'; - -export const SettingsUpsellMenuItem = ({ menuText, menuIcon }: { menuText: string; menuIcon: ReactElement }) => { - const { t } = useTranslation(); - - return ( - - - - {menuText} - - - - - ); -}; diff --git a/invokeai/frontend/web/src/features/system/store/constants.ts b/invokeai/frontend/web/src/features/system/store/constants.ts index 0ca2d24129e..882609e0ac7 100644 --- a/invokeai/frontend/web/src/features/system/store/constants.ts +++ b/invokeai/frontend/web/src/features/system/store/constants.ts @@ -1,4 +1,3 @@ export const githubLink = 'http://github.com/invoke-ai/InvokeAI'; export const githubIssuesLink = 'https://github.com/invoke-ai/InvokeAI/issues'; export const discordLink = 'https://discord.gg/ZmtBAhwWhy'; -export const websiteLink = 'https://www.invoke.com/'; From 35f542529198e8aebe52670e706c8e26d0dafac2 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Tue, 14 Oct 2025 11:09:56 +1100 Subject: [PATCH 08/20] tidy(ui): lift error boundary reset cb outside component --- invokeai/frontend/web/src/app/components/App.tsx | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/invokeai/frontend/web/src/app/components/App.tsx b/invokeai/frontend/web/src/app/components/App.tsx index bae4f4cf633..1c1af39a000 100644 --- a/invokeai/frontend/web/src/app/components/App.tsx +++ b/invokeai/frontend/web/src/app/components/App.tsx @@ -3,22 +3,22 @@ import { GlobalHookIsolator } from 'app/components/GlobalHookIsolator'; import { GlobalModalIsolator } from 'app/components/GlobalModalIsolator'; import { clearStorage } from 'app/store/enhancers/reduxRemember/driver'; import { AppContent } from 'features/ui/components/AppContent'; -import { memo, useCallback } from 'react'; +import { memo } from 'react'; import { ErrorBoundary } from 'react-error-boundary'; import AppErrorBoundaryFallback from './AppErrorBoundaryFallback'; import ThemeLocaleProvider from './ThemeLocaleProvider'; -const App = () => { - const handleReset = useCallback(() => { - clearStorage(); - location.reload(); - return false; - }, []); +const errorBoundaryOnReset = () => { + clearStorage(); + location.reload(); + return false; +}; +const App = () => { return ( - + From 2ba8a9818e5f4e0dcecb152b7145b538097ca3a0 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Tue, 14 Oct 2025 11:13:54 +1100 Subject: [PATCH 09/20] docs(ui): add comments for nes --- .../nodes/hooks/useNodeExecutionState.ts | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/invokeai/frontend/web/src/features/nodes/hooks/useNodeExecutionState.ts b/invokeai/frontend/web/src/features/nodes/hooks/useNodeExecutionState.ts index 1b854015f53..8c6fd3ff016 100644 --- a/invokeai/frontend/web/src/features/nodes/hooks/useNodeExecutionState.ts +++ b/invokeai/frontend/web/src/features/nodes/hooks/useNodeExecutionState.ts @@ -9,6 +9,14 @@ import { zNodeStatus } from 'features/nodes/types/invocation'; import { map } from 'nanostores'; import { useEffect, useMemo } from 'react'; +/** + * A nanostore that holds the ephemeral execution state of nodes in the graph. The execution state includes + * the status, error, progress, progress image, and outputs of each node. + * + * Note that, because a node can be duplicated by an iterate node, it can have multiple outputs recorded, one for each + * iteration. For example, consider a collection of 3 images that are passed to an iterate node, which then passes each + * image to a resize node. The resize node will have 3 outputs - one for each image. + */ export const $nodeExecutionStates = map({}); const initialNodeExecutionState: Omit = { @@ -40,6 +48,14 @@ export const upsertExecutionState = (nodeId: string, updates?: Partial nodes.map((node) => node.id)); +/** + * Keeps the ephemeral store of node execution states in sync with the nodes in the graph. + * + * For example, if a node is deleted from the graph, its execution state is removed from the store, and + * if a new node is added to the graph, an initial execution state is added to the store. + * + * Node execution states are stored in $nodeExecutionStates nanostore. + */ export const useSyncExecutionState = () => { const nodeIds = useAppSelector(selectNodeIds); useEffect(() => { From 2a4babf52b5b556bd8607735a1e8af9609522604 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Tue, 14 Oct 2025 11:19:03 +1100 Subject: [PATCH 10/20] docs(ui): add comments for image context menu --- .../components/ContextMenu/ImageContextMenu.tsx | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/ImageContextMenu.tsx b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/ImageContextMenu.tsx index 79e75af7e32..636c090a754 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/ImageContextMenu.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/ImageContextMenu.tsx @@ -11,6 +11,18 @@ import type { RefObject } from 'react'; import { memo, useCallback, useEffect, useRef } from 'react'; import type { ImageDTO } from 'services/api/types'; +/** + * The context menu is loosely based on https://github.com/lukasbach/chakra-ui-contextmenu. + * + * That library creates a component for _every_ instance of a thing that needed a context menu, which caused perf + * issues. This implementation uses a singleton pattern instead, with a single component that listens for context menu + * events and opens the menu as needed. + * + * Images register themselves with the context menu by mapping their DOM element to their image DTO. When a context + * menu event is fired, we look up the target element in the map (or its parents) to find the image DTO to show the + * context menu for. + */ + /** * The delay in milliseconds before the context menu opens on long press. */ From 709c9e7f897c96ca0fdf45642f6a88e99e206875 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Tue, 14 Oct 2025 11:20:11 +1100 Subject: [PATCH 11/20] tidy(ui): rename GalleryPanel file --- .../gallery/components/{Gallery.tsx => GalleryPanel.tsx} | 0 .../web/src/features/ui/layouts/canvas-tab-auto-layout.tsx | 2 +- .../web/src/features/ui/layouts/generate-tab-auto-layout.tsx | 2 +- .../web/src/features/ui/layouts/upscaling-tab-auto-layout.tsx | 2 +- .../web/src/features/ui/layouts/workflows-tab-auto-layout.tsx | 2 +- 5 files changed, 4 insertions(+), 4 deletions(-) rename invokeai/frontend/web/src/features/gallery/components/{Gallery.tsx => GalleryPanel.tsx} (100%) diff --git a/invokeai/frontend/web/src/features/gallery/components/Gallery.tsx b/invokeai/frontend/web/src/features/gallery/components/GalleryPanel.tsx similarity index 100% rename from invokeai/frontend/web/src/features/gallery/components/Gallery.tsx rename to invokeai/frontend/web/src/features/gallery/components/GalleryPanel.tsx diff --git a/invokeai/frontend/web/src/features/ui/layouts/canvas-tab-auto-layout.tsx b/invokeai/frontend/web/src/features/ui/layouts/canvas-tab-auto-layout.tsx index b622b77e4de..e2cbfe2c5d2 100644 --- a/invokeai/frontend/web/src/features/ui/layouts/canvas-tab-auto-layout.tsx +++ b/invokeai/frontend/web/src/features/ui/layouts/canvas-tab-auto-layout.tsx @@ -2,7 +2,7 @@ import type { DockviewApi, GridviewApi, IDockviewReactProps, IGridviewReactProps import { DockviewReact, GridviewReact, LayoutPriority, Orientation } from 'dockview'; import { CanvasLayersPanel } from 'features/controlLayers/components/CanvasLayersPanelContent'; import { BoardsPanel } from 'features/gallery/components/BoardsListPanelContent'; -import { GalleryPanel } from 'features/gallery/components/Gallery'; +import { GalleryPanel } from 'features/gallery/components/GalleryPanel'; import { ImageViewerPanel } from 'features/gallery/components/ImageViewer/ImageViewerPanel'; import { FloatingCanvasLeftPanelButtons } from 'features/ui/components/FloatingLeftPanelButtons'; import { FloatingRightPanelButtons } from 'features/ui/components/FloatingRightPanelButtons'; diff --git a/invokeai/frontend/web/src/features/ui/layouts/generate-tab-auto-layout.tsx b/invokeai/frontend/web/src/features/ui/layouts/generate-tab-auto-layout.tsx index 81cf2885474..e60c15b5da3 100644 --- a/invokeai/frontend/web/src/features/ui/layouts/generate-tab-auto-layout.tsx +++ b/invokeai/frontend/web/src/features/ui/layouts/generate-tab-auto-layout.tsx @@ -1,7 +1,7 @@ import type { DockviewApi, GridviewApi, IDockviewReactProps, IGridviewReactProps } from 'dockview'; import { DockviewReact, GridviewReact, LayoutPriority, Orientation } from 'dockview'; import { BoardsPanel } from 'features/gallery/components/BoardsListPanelContent'; -import { GalleryPanel } from 'features/gallery/components/Gallery'; +import { GalleryPanel } from 'features/gallery/components/GalleryPanel'; import { ImageViewerPanel } from 'features/gallery/components/ImageViewer/ImageViewerPanel'; import { FloatingLeftPanelButtons } from 'features/ui/components/FloatingLeftPanelButtons'; import { FloatingRightPanelButtons } from 'features/ui/components/FloatingRightPanelButtons'; diff --git a/invokeai/frontend/web/src/features/ui/layouts/upscaling-tab-auto-layout.tsx b/invokeai/frontend/web/src/features/ui/layouts/upscaling-tab-auto-layout.tsx index 7820187119c..e4f443148ff 100644 --- a/invokeai/frontend/web/src/features/ui/layouts/upscaling-tab-auto-layout.tsx +++ b/invokeai/frontend/web/src/features/ui/layouts/upscaling-tab-auto-layout.tsx @@ -1,7 +1,7 @@ import type { DockviewApi, GridviewApi, IDockviewReactProps, IGridviewReactProps } from 'dockview'; import { DockviewReact, GridviewReact, LayoutPriority, Orientation } from 'dockview'; import { BoardsPanel } from 'features/gallery/components/BoardsListPanelContent'; -import { GalleryPanel } from 'features/gallery/components/Gallery'; +import { GalleryPanel } from 'features/gallery/components/GalleryPanel'; import { ImageViewerPanel } from 'features/gallery/components/ImageViewer/ImageViewerPanel'; import { FloatingLeftPanelButtons } from 'features/ui/components/FloatingLeftPanelButtons'; import { FloatingRightPanelButtons } from 'features/ui/components/FloatingRightPanelButtons'; diff --git a/invokeai/frontend/web/src/features/ui/layouts/workflows-tab-auto-layout.tsx b/invokeai/frontend/web/src/features/ui/layouts/workflows-tab-auto-layout.tsx index 06ae423e447..026b7897283 100644 --- a/invokeai/frontend/web/src/features/ui/layouts/workflows-tab-auto-layout.tsx +++ b/invokeai/frontend/web/src/features/ui/layouts/workflows-tab-auto-layout.tsx @@ -1,7 +1,7 @@ import type { DockviewApi, GridviewApi, IDockviewReactProps, IGridviewReactProps } from 'dockview'; import { DockviewReact, GridviewReact, LayoutPriority, Orientation } from 'dockview'; import { BoardsPanel } from 'features/gallery/components/BoardsListPanelContent'; -import { GalleryPanel } from 'features/gallery/components/Gallery'; +import { GalleryPanel } from 'features/gallery/components/GalleryPanel'; import { ImageViewerPanel } from 'features/gallery/components/ImageViewer/ImageViewerPanel'; import NodeEditor from 'features/nodes/components/NodeEditor'; import WorkflowsTabLeftPanel from 'features/nodes/components/sidePanel/WorkflowsTabLeftPanel'; From 8f08051f1fdb32d54e695da1c7da7110ddbd0a58 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Tue, 14 Oct 2025 11:32:33 +1100 Subject: [PATCH 12/20] tidy(ui): rename GalleryImageGrid and add comments --- .../{NewGallery.tsx => GalleryImageGrid.tsx} | 25 +++++++++++++++++-- .../gallery/components/GalleryPanel.tsx | 4 +-- 2 files changed, 25 insertions(+), 4 deletions(-) rename invokeai/frontend/web/src/features/gallery/components/{NewGallery.tsx => GalleryImageGrid.tsx} (90%) diff --git a/invokeai/frontend/web/src/features/gallery/components/NewGallery.tsx b/invokeai/frontend/web/src/features/gallery/components/GalleryImageGrid.tsx similarity index 90% rename from invokeai/frontend/web/src/features/gallery/components/NewGallery.tsx rename to invokeai/frontend/web/src/features/gallery/components/GalleryImageGrid.tsx index d586dc979d9..655e7551ded 100644 --- a/invokeai/frontend/web/src/features/gallery/components/NewGallery.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/GalleryImageGrid.tsx @@ -42,6 +42,27 @@ type GridContext = { imageNames: string[]; }; +/** + * The gallery uses a windowed list to only render the images that are currently visible in the viewport. It starts by + * loading a list of all image names for the selected board or view settings. react-virtuoso reports on the currently- + * visible range of images (plus some "overscan"). We then fetch the full image DTOs only for those images, which are + * cached by RTK Query. As the user scrolls, the visible range changes and we fetch more image DTOs as needed. + * + * This affords a nice UX, where the user can scroll to any part of their gallery. The scrollbar size never changes. + * + * We used other approaches in the past: + * - Infinite scroll: Load an initial chunk of images, then load more as the user scrolls to the bottom. The scrollbar + * continually shrinks as more images are loaded. This is a poor UX, as the user cannot easily scroll to a specific + * part of their gallery. It's also pretty complicated to implement within RTK Query, though since we switched, RTK + * Query now supports infinite queries. It might be easier to do this today. + * - Traditional pagination: Show a fixed number of images per page, with pagination controls. This is a poor UX, + * as the user cannot easily scroll to a specific part of their gallery. Gallerys are often very large, and the page + * size changes depending on the viewport size. + */ + +/** + * Wraps an image - either the placeholder as it is being loaded or the loaded image + */ const ImageAtPosition = memo(({ imageName }: { index: number; imageName: string }) => { /* * We rely on the useRangeBasedImageFetching to fetch all image DTOs, caching them with RTK Query. @@ -307,7 +328,7 @@ const useStarImageHotkey = () => { }); }; -export const ImageGallery = memo(() => { +export const GalleryImageGrid = memo(() => { const virtuosoRef = useRef(null); const rangeRef = useRef({ startIndex: 0, endIndex: 0 }); const rootRef = useRef(null); @@ -378,7 +399,7 @@ export const ImageGallery = memo(() => { ); }); -ImageGallery.displayName = 'NewGallery'; +GalleryImageGrid.displayName = 'GalleryImageGrid'; const scrollSeekConfiguration: ScrollSeekConfiguration = { enter: (velocity) => { diff --git a/invokeai/frontend/web/src/features/gallery/components/GalleryPanel.tsx b/invokeai/frontend/web/src/features/gallery/components/GalleryPanel.tsx index 3864094604d..874561e2048 100644 --- a/invokeai/frontend/web/src/features/gallery/components/GalleryPanel.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/GalleryPanel.tsx @@ -14,10 +14,10 @@ import { useTranslation } from 'react-i18next'; import { PiCaretDownBold, PiCaretUpBold, PiMagnifyingGlassBold } from 'react-icons/pi'; import { useBoardName } from 'services/api/hooks/useBoardName'; +import { GalleryImageGrid } from './GalleryImageGrid'; import { GallerySettingsPopover } from './GallerySettingsPopover/GallerySettingsPopover'; import { GalleryUploadButton } from './GalleryUploadButton'; import { GallerySearch } from './ImageGrid/GallerySearch'; -import { ImageGallery } from './NewGallery'; const COLLAPSE_STYLES: CSSProperties = { flexShrink: 0, minHeight: 0, width: '100%' }; @@ -110,7 +110,7 @@ export const GalleryPanel = memo(() => { - + ); From 3eec5daa5fd7d054655c834003035c733c06de61 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Tue, 14 Oct 2025 15:33:10 +1100 Subject: [PATCH 13/20] docs(ui): add high-level readmes for various features --- .../web/src/features/cropper/README.md | 11 +++ .../src/features/deleteImageModal/README.md | 7 ++ .../frontend/web/src/features/dnd/README.md | 41 ++++++++++ .../web/src/features/dynamicPrompts/README.md | 11 +++ .../web/src/features/gallery/README.md | 77 +++++++++++++++++++ .../ContextMenu/ImageContextMenu.tsx | 12 --- .../gallery/components/ContextMenu/README.md | 18 +++++ .../gallery/components/GalleryImageGrid.tsx | 18 ----- .../web/src/features/imageActions/README.md | 5 ++ 9 files changed, 170 insertions(+), 30 deletions(-) create mode 100644 invokeai/frontend/web/src/features/cropper/README.md create mode 100644 invokeai/frontend/web/src/features/deleteImageModal/README.md create mode 100644 invokeai/frontend/web/src/features/dnd/README.md create mode 100644 invokeai/frontend/web/src/features/dynamicPrompts/README.md create mode 100644 invokeai/frontend/web/src/features/gallery/README.md create mode 100644 invokeai/frontend/web/src/features/gallery/components/ContextMenu/README.md create mode 100644 invokeai/frontend/web/src/features/imageActions/README.md diff --git a/invokeai/frontend/web/src/features/cropper/README.md b/invokeai/frontend/web/src/features/cropper/README.md new file mode 100644 index 00000000000..0903dab7cfc --- /dev/null +++ b/invokeai/frontend/web/src/features/cropper/README.md @@ -0,0 +1,11 @@ +# Image cropper + +This is a simple image cropping canvas app built with KonvaJS ("native" Konva, _not_ the react bindings). + +The editor implementation is here: invokeai/frontend/web/src/features/cropper/lib/editor.ts + +It is rendered in a modal. + +Currently, the crop functionality is only exposed for reference images. These are the kind of images that most often need cropping (i.e. for FLUX Kontext, which is sensitive to the size/aspect ratio of its ref images). All ref image state is enriched to include a ref to the original image, the cropped image, and the crop attributes. + +The functionality could be extended to all images in the future, but there are some questions around whether we consider gallery images immutable. If so, we can't crop them in place. Do we instead add a new cropped image to the gallery? Or do we add a field to the image metadata that points to a cropped version of the image? diff --git a/invokeai/frontend/web/src/features/deleteImageModal/README.md b/invokeai/frontend/web/src/features/deleteImageModal/README.md new file mode 100644 index 00000000000..0591676530e --- /dev/null +++ b/invokeai/frontend/web/src/features/deleteImageModal/README.md @@ -0,0 +1,7 @@ +# Delete image modal + +When users delete images, we show a confirmation dialog to prevent accidental deletions. Users can opt out of this, but we still check if dleeting an image would screw up their workspace and prompt if so. + +For example, if an image is currently set as a field in the workflow editor, we warn the user that deleting it will remove it from the node. We warn them even if they have opted out of the confirmation dialog. + +These "image usage" checks are done using redux selectors/util functions. See invokeai/frontend/web/src/features/deleteImageModal/store/state.ts diff --git a/invokeai/frontend/web/src/features/dnd/README.md b/invokeai/frontend/web/src/features/dnd/README.md new file mode 100644 index 00000000000..a3ab881752d --- /dev/null +++ b/invokeai/frontend/web/src/features/dnd/README.md @@ -0,0 +1,41 @@ +# Drag and drop + +Dnd functionality is implemented with https://github.com/atlassian/pragmatic-drag-and-drop, the successor to https://github.com/atlassian/react-beautiful-dnd + +It uses the native HTML5 drag and drop API and is very performant, though a bit more involved to set up. The library doesn't expose a react API, but rather a set of utility functions to hook into the drag and drop events. + +## Implementation + +The core of our implementation is in invokeai/frontend/web/src/features/dnd/dnd.ts + +We support dragging and dropping of single or multiple images within the app. We have "dnd source" and "dnd target" abstractions. + +A dnd source is is anything that provides the draggable payload/data. Currently, that's either an image DTO or list of image names along with their origin board. + +A dnd target is anything that can accept a drop of that payload. Targets have their own data. For example, a target might be a board with a board ID, or a canvas layer with a layer ID. + +The library has a concept of draggable elements (dnd sources), droppable elements (dnd targets), and dnd monitors. The monitors are invisible elements that track drag events and provide information about the current drag operation. + +The library is a bit to wrap your head around but once you understand the concepts, it's very nice to work with and super flexible. + +## Type safety + +Native drag events do not have any built-in type safety. We inject a unique symbol into the sources and targets and check that via typeguard functions. This gives us confidence that the payload is what we expect it to be and not some other data that might have been dropped from outside the app or some other source. + +## Defining sources and targets + +These are strictly typed in the dnd.ts file. Follow the examples there to define new sources and targets. + +Targets are more complicated - they get an isValid callback (which is called with the currently-dragged source to determine if it can accept the drop) and a handler callback (which is called when the drop is made). + +Both isValid and handler get the source data, target data, and the redux getState/dispatch functions. They can do whatever they need to do to determine if the drop is valid and to handle the drop. + +Typically the isValid function just uses the source type guard function, and the handler function dispatches one or more redux actions to update the state. + +## Other uses of Dnd + +We use the same library for other dnd things: + +- When dragging over some tabbed interface, hovering the tab for a moment will switch to it. See invokeai/frontend/web/src/common/hooks/useCallbackOnDragEnter.ts for a hook that implements this functionality. +- Reordering of canvas layer lists. See invokeai/frontend/web/src/features/controlLayers/components/CanvasEntityList/CanvasEntityGroupList.tsx and invokeai/frontend/web/src/features/controlLayers/components/CanvasEntityList/useCanvasEntityListDnd.ts +- Adding node fields to a workflow form builder and restructuring the form. This gets kinda complicated, as the form builder supports arbitrary nesting of containers with stacking of elements. See invokeai/frontend/web/src/features/nodes/components/sidePanel/builder/dnd-hooks.ts diff --git a/invokeai/frontend/web/src/features/dynamicPrompts/README.md b/invokeai/frontend/web/src/features/dynamicPrompts/README.md new file mode 100644 index 00000000000..242a497140a --- /dev/null +++ b/invokeai/frontend/web/src/features/dynamicPrompts/README.md @@ -0,0 +1,11 @@ +# Dynamic prompts + +The backend API has a route to process a prompt into a list of prompts using the https://github.com/adieyal/dynamicprompts syntax + +In the UI, we watch the current positive prompt field for changes (debounced) and hit that route. + +When generating, we queue up a graph for each of the output prompts. + +There is a modal to show the list of generated prompts with a couple settings for prompt generation. + +The output prompts are stored in the redux slice for ease of consumption during graph building, but only the settings are persisted across page loads. Prompts are ephemeral. diff --git a/invokeai/frontend/web/src/features/gallery/README.md b/invokeai/frontend/web/src/features/gallery/README.md new file mode 100644 index 00000000000..8d9bad8fa69 --- /dev/null +++ b/invokeai/frontend/web/src/features/gallery/README.md @@ -0,0 +1,77 @@ +# Gallery Overview + +The gallery renders a scrollable grid of images. The image sizes adapt to the viewport size, and the user can scroll to any part of their gallery. It supports keyboard navigation, multi-select and context menus. Images can be dragged from the gallery to use them in other parts of the app (they are not removed from the gallery). + +There is some basic ordering and searching support. + +## Boards + +Boards act as folders for images. + +- Users can create any number of boards. +- Each image can be assigned to at most one board. +- There is a default "no board" board, labeled "Uncategorized". +- User-created boards can be deleted. The no-board board cannot be deleted. +- When deleting a board, users can choose to either delete all images in the board, or move them to the no-board board. +- User-created boards can be renamed. The no-board board cannot be renamed. +- Boards cannot be nested. +- Boards can be archived, which hides them from the board list. +- There is no way to show all images at once. The gallery view always shows images for a specific board. +- Boards can be selected to show their images in the panel below the boards list; the gallery grid. +- Boards can be set as the "auto-add" board. New images will be added to this board as they are generated. + +## Image viewer + +Clicking an image in the gallery opens it in the image viewer, which presents a larger view of the image, along with a variety of image actions. + +The image viewer is rendered in one of the main/center panel tabs. + +### Image actions + +A handful of common actions are available as buttons in the image viewer header, matching the context menu actions. + +See invokeai/frontend/web/src/features/gallery/components/ContextMenu/README.md + +### Progress viewer + +During generation, we might get "progress images" showing a low-res version of the image at each step in the denoising process. If these are available, the user can open a progress viewer overlay to see the image at each step. + +Socket subscriptions and related logic for handling progress images are in the image viewer context. See invokeai/frontend/web/src/features/gallery/components/ImageViewer/context.tsx + +### Metadata viewer + +The user can enable a metadata overlay to view the image metadata. This is rendered as a semi-transparent overlay on top of the image. + +"Metadata" refers to key-value pairs of various settings. For example, the prompt, number of steps and model used to generate the image. This metadata is embedded into the image file itself, but also stored in the database for searching and filtering. + +Images also have the execution graph embedded in them. This isn't stored in the database, as it can be large and complex. Instead, we extract it from the image when needed. + +Metadata can be recalled, and the graph can be loaded into the workflow editor. + +### Image comparison + +Users can hold Alt when click an image in the gallery to select it as the "comparison" image. The comparison image is shown alongside the current image in the image viewer with a couple modes (slider, side-by-side, hover-to-swap). + +## Data fetching + +The gallery uses a windowed list to only render the images that are currently visible in the viewport. + +It starts by loading a list of all image names for the selected board or view settings. react-virtuoso reports on the currently-visible range of images (plus some "overscan"). We then fetch the full image DTOs only for those images, which are cached by RTK Query. As the user scrolls, the visible range changes and we fetch more image DTOs as needed. + +This affords a nice UX, where the user can scroll to any part of their gallery. The scrollbar size never changes. + +We've tried some other approachs in the past, but they all had significant UX or implementation issues: + +### Infinite scroll + +Load an initial chunk of images, then load more as the user scrolls to the bottom. + +The scrollbar continually shrinks as more images are loaded. + +This yields a poor UX, as the user cannot easily scroll to a specific part of their gallery. It's also pretty complicated to implement within RTK Query, though since we switched, RTK Query now supports infinite queries. It might be easier to do this today. + +### Traditional pagination + +Show a fixed number of images per page, with pagination controls. + +This is a poor UX, as the user cannot easily scroll to a specific part of their gallery. Gallerys are often very large, and the page size changes depending on the viewport size. The gallery is also constantly inserting new images at the top of the list, which means we are constanty invalidating the current page's query cache and the page numbers are not stable. diff --git a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/ImageContextMenu.tsx b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/ImageContextMenu.tsx index 636c090a754..79e75af7e32 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/ImageContextMenu.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/ImageContextMenu.tsx @@ -11,18 +11,6 @@ import type { RefObject } from 'react'; import { memo, useCallback, useEffect, useRef } from 'react'; import type { ImageDTO } from 'services/api/types'; -/** - * The context menu is loosely based on https://github.com/lukasbach/chakra-ui-contextmenu. - * - * That library creates a component for _every_ instance of a thing that needed a context menu, which caused perf - * issues. This implementation uses a singleton pattern instead, with a single component that listens for context menu - * events and opens the menu as needed. - * - * Images register themselves with the context menu by mapping their DOM element to their image DTO. When a context - * menu event is fired, we look up the target element in the map (or its parents) to find the image DTO to show the - * context menu for. - */ - /** * The delay in milliseconds before the context menu opens on long press. */ diff --git a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/README.md b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/README.md new file mode 100644 index 00000000000..0c96bbfa99c --- /dev/null +++ b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/README.md @@ -0,0 +1,18 @@ +# Image context menu + +The context menu is loosely based on https://github.com/lukasbach/chakra-ui-contextmenu. + +That library creates a component for _every_ instance of a thing that needed a context menu, which caused perf issues. This implementation uses a singleton pattern instead, with a single component that listens for context menu events and opens the menu as needed. + +Images register themselves with the context menu by mapping their DOM element to their image DTO. When a context menu event is fired, we look up the target element in the map (or its parents) to find the image DTO to show the context menu for. + +## Image actions + +- Recalling common individual metadata fields or all metadata +- Opening the image in the image viewer or new tab +- Copying the image to clipboard +- Downloading the image +- Selecting the image for comparison +- Deleting the image +- Moving the image to a different board +- "Sending" the image to other parts of the app such as canvas diff --git a/invokeai/frontend/web/src/features/gallery/components/GalleryImageGrid.tsx b/invokeai/frontend/web/src/features/gallery/components/GalleryImageGrid.tsx index 655e7551ded..f2c50f786ec 100644 --- a/invokeai/frontend/web/src/features/gallery/components/GalleryImageGrid.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/GalleryImageGrid.tsx @@ -42,24 +42,6 @@ type GridContext = { imageNames: string[]; }; -/** - * The gallery uses a windowed list to only render the images that are currently visible in the viewport. It starts by - * loading a list of all image names for the selected board or view settings. react-virtuoso reports on the currently- - * visible range of images (plus some "overscan"). We then fetch the full image DTOs only for those images, which are - * cached by RTK Query. As the user scrolls, the visible range changes and we fetch more image DTOs as needed. - * - * This affords a nice UX, where the user can scroll to any part of their gallery. The scrollbar size never changes. - * - * We used other approaches in the past: - * - Infinite scroll: Load an initial chunk of images, then load more as the user scrolls to the bottom. The scrollbar - * continually shrinks as more images are loaded. This is a poor UX, as the user cannot easily scroll to a specific - * part of their gallery. It's also pretty complicated to implement within RTK Query, though since we switched, RTK - * Query now supports infinite queries. It might be easier to do this today. - * - Traditional pagination: Show a fixed number of images per page, with pagination controls. This is a poor UX, - * as the user cannot easily scroll to a specific part of their gallery. Gallerys are often very large, and the page - * size changes depending on the viewport size. - */ - /** * Wraps an image - either the placeholder as it is being loaded or the loaded image */ diff --git a/invokeai/frontend/web/src/features/imageActions/README.md b/invokeai/frontend/web/src/features/imageActions/README.md new file mode 100644 index 00000000000..22668b29ac5 --- /dev/null +++ b/invokeai/frontend/web/src/features/imageActions/README.md @@ -0,0 +1,5 @@ +# Image actions + +This dir is (unintentially) a dumping ground for things that we do with images. For example, adding an image as a canvas layer. + +Probably these functions should be moved to more appropriate places. From 267c24c5af28dba2d5d0a4ff33b931f46738f366 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Tue, 14 Oct 2025 15:34:14 +1100 Subject: [PATCH 14/20] tidy(ui): clean up useSyncQueueStatus --- .../src/app/components/GlobalHookIsolator.tsx | 4 +-- .../app/hooks/useSyncFaviconQueueStatus.ts | 33 +++++++++++++++++++ .../web/src/app/hooks/useSyncQueueStatus.ts | 25 -------------- 3 files changed, 35 insertions(+), 27 deletions(-) create mode 100644 invokeai/frontend/web/src/app/hooks/useSyncFaviconQueueStatus.ts delete mode 100644 invokeai/frontend/web/src/app/hooks/useSyncQueueStatus.ts diff --git a/invokeai/frontend/web/src/app/components/GlobalHookIsolator.tsx b/invokeai/frontend/web/src/app/components/GlobalHookIsolator.tsx index a4345f373a6..77e8412daa7 100644 --- a/invokeai/frontend/web/src/app/components/GlobalHookIsolator.tsx +++ b/invokeai/frontend/web/src/app/components/GlobalHookIsolator.tsx @@ -1,7 +1,7 @@ import { useGlobalModifiersInit } from '@invoke-ai/ui-library'; import { setupListeners } from '@reduxjs/toolkit/query'; +import { useSyncFaviconQueueStatus } from 'app/hooks/useSyncFaviconQueueStatus'; import { useSyncLangDirection } from 'app/hooks/useSyncLangDirection'; -import { useSyncQueueStatus } from 'app/hooks/useSyncQueueStatus'; import { useSyncLoggingConfig } from 'app/logging/useSyncLoggingConfig'; import { appStarted } from 'app/store/middleware/listenerMiddleware/listeners/appStarted'; import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; @@ -64,7 +64,7 @@ export const GlobalHookIsolator = memo(() => { }, [dispatch]); useStarterModelsToast(); - useSyncQueueStatus(); + useSyncFaviconQueueStatus(); useFocusRegionWatcher(); useWorkflowBuilderWatcher(); useDynamicPromptsWatcher(); diff --git a/invokeai/frontend/web/src/app/hooks/useSyncFaviconQueueStatus.ts b/invokeai/frontend/web/src/app/hooks/useSyncFaviconQueueStatus.ts new file mode 100644 index 00000000000..7bd55f25f4f --- /dev/null +++ b/invokeai/frontend/web/src/app/hooks/useSyncFaviconQueueStatus.ts @@ -0,0 +1,33 @@ +import { useAssertSingleton } from 'common/hooks/useAssertSingleton'; +import { useEffect } from 'react'; +import { useGetQueueStatusQuery } from 'services/api/endpoints/queue'; + +const baseTitle = document.title; +const invokeLogoSVG = 'assets/images/invoke-favicon.svg'; +const invokeAlertLogoSVG = 'assets/images/invoke-alert-favicon.svg'; + +const queryOptions = { + selectFromResult: (res) => ({ + queueSize: res.data ? res.data.queue.pending + res.data.queue.in_progress : 0, + }), +} satisfies Parameters[1]; + +const updateFavicon = (queueSize: number) => { + document.title = queueSize > 0 ? `(${queueSize}) ${baseTitle}` : baseTitle; + const faviconEl = document.getElementById('invoke-favicon'); + if (faviconEl instanceof HTMLLinkElement) { + faviconEl.href = queueSize > 0 ? invokeAlertLogoSVG : invokeLogoSVG; + } +}; + +/** + * This hook synchronizes the queue status with the page's title and favicon. + * It should be considered a singleton and only used once in the component tree. + */ +export const useSyncFaviconQueueStatus = () => { + useAssertSingleton('useSyncFaviconQueueStatus'); + const { queueSize } = useGetQueueStatusQuery(undefined, queryOptions); + useEffect(() => { + updateFavicon(queueSize); + }, [queueSize]); +}; diff --git a/invokeai/frontend/web/src/app/hooks/useSyncQueueStatus.ts b/invokeai/frontend/web/src/app/hooks/useSyncQueueStatus.ts deleted file mode 100644 index d6874c3bb5e..00000000000 --- a/invokeai/frontend/web/src/app/hooks/useSyncQueueStatus.ts +++ /dev/null @@ -1,25 +0,0 @@ -import { useEffect } from 'react'; -import { useGetQueueStatusQuery } from 'services/api/endpoints/queue'; - -const baseTitle = document.title; -const invokeLogoSVG = 'assets/images/invoke-favicon.svg'; -const invokeAlertLogoSVG = 'assets/images/invoke-alert-favicon.svg'; - -/** - * This hook synchronizes the queue status with the page's title and favicon. - * It should be considered a singleton and only used once in the component tree. - */ -export const useSyncQueueStatus = () => { - const { queueSize } = useGetQueueStatusQuery(undefined, { - selectFromResult: (res) => ({ - queueSize: res.data ? res.data.queue.pending + res.data.queue.in_progress : 0, - }), - }); - useEffect(() => { - document.title = queueSize > 0 ? `(${queueSize}) ${baseTitle}` : baseTitle; - const faviconEl = document.getElementById('invoke-favicon'); - if (faviconEl instanceof HTMLLinkElement) { - faviconEl.href = queueSize > 0 ? invokeAlertLogoSVG : invokeLogoSVG; - } - }, [queueSize]); -}; From 2c632f189203a4ad7f278dd92c50be3c43bf8e7d Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Tue, 14 Oct 2025 16:07:40 +1100 Subject: [PATCH 15/20] tidy: app "config", settings modal, infill methods We had an "infill methods" route that long ago told the frontend infill method, upscale method (model), NSFW checker, and watermark feature availability. None of these were used except for the patchmatch check. Removed them, made the check exclusively for patchmatch, updated related code in redux app startup listeners and settings modal. --- invokeai/app/api/routers/app_info.py | 41 +----- .../listeners/appConfigReceived.ts | 29 ----- .../listeners/appStarted.ts | 25 +++- invokeai/frontend/web/src/app/store/store.ts | 3 +- .../controlLayers/store/paramsSlice.ts | 4 +- .../src/features/controlLayers/store/types.ts | 5 +- .../InfillAndScaling/ParamInfillMethod.tsx | 33 ++--- .../SettingsModal/SettingsModal.tsx | 118 ++++++------------ .../SettingsModal/useClearIntermediates.ts | 3 +- .../web/src/services/api/endpoints/appInfo.ts | 11 +- .../frontend/web/src/services/api/schema.ts | 49 +------- .../frontend/web/src/services/api/types.ts | 1 - 12 files changed, 103 insertions(+), 219 deletions(-) delete mode 100644 invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/appConfigReceived.ts diff --git a/invokeai/app/api/routers/app_info.py b/invokeai/app/api/routers/app_info.py index c71739fbd2a..d8f3bb2f807 100644 --- a/invokeai/app/api/routers/app_info.py +++ b/invokeai/app/api/routers/app_info.py @@ -1,7 +1,5 @@ -import typing from enum import Enum from importlib.metadata import distributions -from pathlib import Path import torch from fastapi import Body @@ -9,7 +7,6 @@ from pydantic import BaseModel, Field from invokeai.app.api.dependencies import ApiDependencies -from invokeai.app.invocations.upscale import ESRGAN_MODELS from invokeai.app.services.config.config_default import InvokeAIAppConfig, get_config from invokeai.app.services.invocation_cache.invocation_cache_common import InvocationCacheStatus from invokeai.backend.image_util.infill_methods.patchmatch import PatchMatch @@ -26,11 +23,6 @@ class LogLevel(int, Enum): Critical = logging.CRITICAL -class Upscaler(BaseModel): - upscaling_method: str = Field(description="Name of upscaling method") - upscaling_models: list[str] = Field(description="List of upscaling models for this method") - - app_router = APIRouter(prefix="/v1/app", tags=["app"]) @@ -40,15 +32,6 @@ class AppVersion(BaseModel): version: str = Field(description="App version") -class AppConfig(BaseModel): - """App Config Response""" - - infill_methods: list[str] = Field(description="List of available infill methods") - upscaling_methods: list[Upscaler] = Field(description="List of upscaling methods") - nsfw_methods: list[str] = Field(description="List of NSFW checking methods") - watermarking_methods: list[str] = Field(description="List of invisible watermark methods") - - @app_router.get("/version", operation_id="app_version", status_code=200, response_model=AppVersion) async def get_version() -> AppVersion: return AppVersion(version=__version__) @@ -69,27 +52,9 @@ async def get_app_deps() -> dict[str, str]: return sorted_deps -@app_router.get("/config", operation_id="get_config", status_code=200, response_model=AppConfig) -async def get_config_() -> AppConfig: - infill_methods = ["lama", "tile", "cv2", "color"] # TODO: add mosaic back - if PatchMatch.patchmatch_available(): - infill_methods.append("patchmatch") - - upscaling_models = [] - for model in typing.get_args(ESRGAN_MODELS): - upscaling_models.append(str(Path(model).stem)) - upscaler = Upscaler(upscaling_method="esrgan", upscaling_models=upscaling_models) - - nsfw_methods = ["nsfw_checker"] - - watermarking_methods = ["invisible_watermark"] - - return AppConfig( - infill_methods=infill_methods, - upscaling_methods=[upscaler], - nsfw_methods=nsfw_methods, - watermarking_methods=watermarking_methods, - ) +@app_router.get("/patchmatch_status", operation_id="get_patchmatch_status", status_code=200, response_model=bool) +async def get_patchmatch_status() -> bool: + return PatchMatch.patchmatch_available() class InvokeAIAppConfigWithSetFields(BaseModel): diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/appConfigReceived.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/appConfigReceived.ts deleted file mode 100644 index 4c8f139779a..00000000000 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/appConfigReceived.ts +++ /dev/null @@ -1,29 +0,0 @@ -import type { AppStartListening } from 'app/store/store'; -import { setInfillMethod } from 'features/controlLayers/store/paramsSlice'; -import { shouldUseNSFWCheckerChanged, shouldUseWatermarkerChanged } from 'features/system/store/systemSlice'; -import { appInfoApi } from 'services/api/endpoints/appInfo'; - -export const addAppConfigReceivedListener = (startAppListening: AppStartListening) => { - startAppListening({ - matcher: appInfoApi.endpoints.getAppConfig.matchFulfilled, - effect: (action, { getState, dispatch }) => { - const { infill_methods = [], nsfw_methods = [], watermarking_methods = [] } = action.payload; - const infillMethod = getState().params.infillMethod; - - if (!infill_methods.includes(infillMethod)) { - // If the selected infill method does not exist, prefer 'lama' if it's in the list, otherwise 'tile'. - // TODO(psyche): lama _should_ always be in the list, but the API doesn't guarantee it... - const infillMethod = infill_methods.includes('lama') ? 'lama' : 'tile'; - dispatch(setInfillMethod(infillMethod)); - } - - if (!nsfw_methods.includes('nsfw_checker')) { - dispatch(shouldUseNSFWCheckerChanged(false)); - } - - if (!watermarking_methods.includes('invisible_watermark')) { - dispatch(shouldUseWatermarkerChanged(false)); - } - }, - }); -}; diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/appStarted.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/appStarted.ts index 794d1a1af60..6bff69c64a3 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/appStarted.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/appStarted.ts @@ -1,7 +1,10 @@ import { createAction } from '@reduxjs/toolkit'; import type { AppStartListening } from 'app/store/store'; +import { noop } from 'es-toolkit'; +import { setInfillMethod } from 'features/controlLayers/store/paramsSlice'; import { selectLastSelectedItem } from 'features/gallery/store/gallerySelectors'; import { imageSelected } from 'features/gallery/store/gallerySlice'; +import { appInfoApi } from 'services/api/endpoints/appInfo'; import { imagesApi } from 'services/api/endpoints/images'; export const appStarted = createAction('app/appStarted'); @@ -9,14 +12,17 @@ export const appStarted = createAction('app/appStarted'); export const addAppStartedListener = (startAppListening: AppStartListening) => { startAppListening({ actionCreator: appStarted, - effect: async (action, { unsubscribe, cancelActiveListeners, take, getState, dispatch }) => { + effect: (action, { unsubscribe, cancelActiveListeners, take, getState, dispatch }) => { // this should only run once cancelActiveListeners(); unsubscribe(); // ensure an image is selected when we load the first board - const firstImageLoad = await take(imagesApi.endpoints.getImageNames.matchFulfilled); - if (firstImageLoad !== null) { + take(imagesApi.endpoints.getImageNames.matchFulfilled).then((firstImageLoad) => { + if (firstImageLoad === null) { + // timeout or cancelled + return; + } const [{ payload }] = firstImageLoad; const selectedImage = selectLastSelectedItem(getState()); if (selectedImage) { @@ -25,7 +31,18 @@ export const addAppStartedListener = (startAppListening: AppStartListening) => { if (payload.image_names[0]) { dispatch(imageSelected(payload.image_names[0])); } - } + }); + + dispatch(appInfoApi.endpoints.getPatchmatchStatus.initiate()) + .unwrap() + .then((isPatchmatchAvailable) => { + const infillMethod = getState().params.infillMethod; + + if (!isPatchmatchAvailable && infillMethod === 'patchmatch') { + dispatch(setInfillMethod('lama')); + } + }) + .catch(noop); }, }); }; diff --git a/invokeai/frontend/web/src/app/store/store.ts b/invokeai/frontend/web/src/app/store/store.ts index 15dbfc785ca..cad6f489df7 100644 --- a/invokeai/frontend/web/src/app/store/store.ts +++ b/invokeai/frontend/web/src/app/store/store.ts @@ -4,7 +4,6 @@ import { logger } from 'app/logging/logger'; import { errorHandler } from 'app/store/enhancers/reduxRemember/errors'; import { addAdHocPostProcessingRequestedListener } from 'app/store/middleware/listenerMiddleware/listeners/addAdHocPostProcessingRequestedListener'; import { addAnyEnqueuedListener } from 'app/store/middleware/listenerMiddleware/listeners/anyEnqueued'; -import { addAppConfigReceivedListener } from 'app/store/middleware/listenerMiddleware/listeners/appConfigReceived'; import { addAppStartedListener } from 'app/store/middleware/listenerMiddleware/listeners/appStarted'; import { addBatchEnqueuedListener } from 'app/store/middleware/listenerMiddleware/listeners/batchEnqueued'; import { addDeleteBoardAndImagesFulfilledListener } from 'app/store/middleware/listenerMiddleware/listeners/boardAndImagesDeleted'; @@ -242,6 +241,7 @@ export type AppStartListening = TypedStartListening; export const addAppListener = addListener.withTypes(); +// To avoid circular dependencies, all listener middleware listeners are added here in the main store setup file. const startAppListening = listenerMiddleware.startListening as AppStartListening; addImageUploadedFulfilledListener(startAppListening); @@ -273,7 +273,6 @@ addModelSelectedListener(startAppListening); // app startup addAppStartedListener(startAppListening); addModelsLoadedListener(startAppListening); -addAppConfigReceivedListener(startAppListening); // Ad-hoc upscale workflwo addAdHocPostProcessingRequestedListener(startAppListening); diff --git a/invokeai/frontend/web/src/features/controlLayers/store/paramsSlice.ts b/invokeai/frontend/web/src/features/controlLayers/store/paramsSlice.ts index 90eb53124a1..9dd85b1bc20 100644 --- a/invokeai/frontend/web/src/features/controlLayers/store/paramsSlice.ts +++ b/invokeai/frontend/web/src/features/controlLayers/store/paramsSlice.ts @@ -6,7 +6,7 @@ import { deepClone } from 'common/util/deepClone'; import { roundDownToMultiple, roundToMultiple } from 'common/util/roundDownToMultiple'; import { isPlainObject } from 'es-toolkit'; import { clamp } from 'es-toolkit/compat'; -import type { AspectRatioID, ParamsState, RgbaColor } from 'features/controlLayers/store/types'; +import type { AspectRatioID, InfillMethod, ParamsState, RgbaColor } from 'features/controlLayers/store/types'; import { ASPECT_RATIO_MAP, DEFAULT_ASPECT_RATIO_CONFIG, @@ -219,7 +219,7 @@ const slice = createSlice({ setRefinerStart: (state, action: PayloadAction) => { state.refinerStart = action.payload; }, - setInfillMethod: (state, action: PayloadAction) => { + setInfillMethod: (state, action: PayloadAction) => { state.infillMethod = action.payload; }, setInfillTileSize: (state, action: PayloadAction) => { diff --git a/invokeai/frontend/web/src/features/controlLayers/store/types.ts b/invokeai/frontend/web/src/features/controlLayers/store/types.ts index 0ec0cd7baa8..87c173d7cca 100644 --- a/invokeai/frontend/web/src/features/controlLayers/store/types.ts +++ b/invokeai/frontend/web/src/features/controlLayers/store/types.ts @@ -562,6 +562,9 @@ const zPositivePromptHistory = z .array(zParameterPositivePrompt) .transform((arr) => arr.slice(0, MAX_POSITIVE_PROMPT_HISTORY)); +export const zInfillMethod = z.enum(['patchmatch', 'lama', 'cv2', 'color', 'tile']); +export type InfillMethod = z.infer; + export const zParamsState = z.object({ _version: z.literal(2), maskBlur: z.number(), @@ -569,7 +572,7 @@ export const zParamsState = z.object({ canvasCoherenceMode: zParameterCanvasCoherenceMode, canvasCoherenceMinDenoise: zParameterStrength, canvasCoherenceEdgeSize: z.number(), - infillMethod: z.string(), + infillMethod: zInfillMethod, infillTileSize: z.number(), infillPatchmatchDownscaleSize: z.number(), infillColorValue: zRgbaColor, diff --git a/invokeai/frontend/web/src/features/parameters/components/Canvas/InfillAndScaling/ParamInfillMethod.tsx b/invokeai/frontend/web/src/features/parameters/components/Canvas/InfillAndScaling/ParamInfillMethod.tsx index 2ae24fdb805..5b795aeaddd 100644 --- a/invokeai/frontend/web/src/features/parameters/components/Canvas/InfillAndScaling/ParamInfillMethod.tsx +++ b/invokeai/frontend/web/src/features/parameters/components/Canvas/InfillAndScaling/ParamInfillMethod.tsx @@ -1,34 +1,39 @@ -import type { ComboboxOnChange, ComboboxOption } from '@invoke-ai/ui-library'; +import type { ComboboxOnChange } from '@invoke-ai/ui-library'; import { Combobox, FormControl, FormLabel } from '@invoke-ai/ui-library'; +import { EMPTY_ARRAY } from 'app/store/constants'; import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; import { InformationalPopover } from 'common/components/InformationalPopover/InformationalPopover'; import { selectInfillMethod, setInfillMethod } from 'features/controlLayers/store/paramsSlice'; +import { zInfillMethod } from 'features/controlLayers/store/types'; import { memo, useCallback, useMemo } from 'react'; import { useTranslation } from 'react-i18next'; -import { useGetAppConfigQuery } from 'services/api/endpoints/appInfo'; +import { useGetPatchmatchStatusQuery } from 'services/api/endpoints/appInfo'; const ParamInfillMethod = () => { const { t } = useTranslation(); const dispatch = useAppDispatch(); const infillMethod = useAppSelector(selectInfillMethod); - const { data: appConfigData } = useGetAppConfigQuery(); - const options = useMemo( - () => - appConfigData - ? appConfigData.infill_methods.map((method) => ({ - label: method, - value: method, - })) - : [], - [appConfigData] - ); + const { options } = useGetPatchmatchStatusQuery(undefined, { + selectFromResult: ({ data: isPatchmatchAvailable }) => { + if (isPatchmatchAvailable === undefined) { + // loading... + return { options: EMPTY_ARRAY }; + } + if (isPatchmatchAvailable) { + return { options: zInfillMethod.options.map((o) => ({ label: o, value: o })) }; + } + return { + options: zInfillMethod.options.filter((o) => o !== 'patchmatch').map((o) => ({ label: o, value: o })), + }; + }, + }); const onChange = useCallback( (v) => { if (!v || !options.find((o) => o.value === v.value)) { return; } - dispatch(setInfillMethod(v.value)); + dispatch(setInfillMethod(zInfillMethod.parse(v.value))); }, [dispatch, options] ); diff --git a/invokeai/frontend/web/src/features/system/components/SettingsModal/SettingsModal.tsx b/invokeai/frontend/web/src/features/system/components/SettingsModal/SettingsModal.tsx index 1be280fa40e..dc83e3efa8d 100644 --- a/invokeai/frontend/web/src/features/system/components/SettingsModal/SettingsModal.tsx +++ b/invokeai/frontend/web/src/features/system/components/SettingsModal/SettingsModal.tsx @@ -51,52 +51,23 @@ import { setShouldShowProgressInViewer } from 'features/ui/store/uiSlice'; import type { ChangeEvent, ReactElement } from 'react'; import { cloneElement, memo, useCallback, useEffect } from 'react'; import { useTranslation } from 'react-i18next'; -import { useGetAppConfigQuery } from 'services/api/endpoints/appInfo'; import { SettingsLanguageSelect } from './SettingsLanguageSelect'; -type ConfigOptions = { - shouldShowDeveloperSettings?: boolean; - shouldShowResetWebUiText?: boolean; - shouldShowClearIntermediates?: boolean; - shouldShowLocalizationToggle?: boolean; - shouldShowInvocationProgressDetailSetting?: boolean; -}; - -const defaultConfig: ConfigOptions = { - shouldShowDeveloperSettings: true, - shouldShowResetWebUiText: true, - shouldShowClearIntermediates: true, - shouldShowLocalizationToggle: true, - shouldShowInvocationProgressDetailSetting: true, -}; - -type SettingsModalProps = { - /* The button to open the Settings Modal */ - children: ReactElement; - config?: ConfigOptions; -}; - const [useSettingsModal] = buildUseBoolean(false); -const SettingsModal = ({ config = defaultConfig, children }: SettingsModalProps) => { +const SettingsModal = (props: { children: ReactElement }) => { const dispatch = useAppDispatch(); const { t } = useTranslation(); - const { isNSFWCheckerAvailable, isWatermarkerAvailable } = useGetAppConfigQuery(undefined, { - selectFromResult: ({ data }) => ({ - isNSFWCheckerAvailable: data?.nsfw_methods.includes('nsfw_checker') ?? false, - isWatermarkerAvailable: data?.watermarking_methods.includes('invisible_watermark') ?? false, - }), - }); - const { clearIntermediates, hasPendingItems, intermediatesCount, isLoading: isLoadingClearIntermediates, refetchIntermediatesCount, - } = useClearIntermediates(Boolean(config?.shouldShowClearIntermediates)); + } = useClearIntermediates(); + const settingsModal = useSettingsModal(); const refreshModal = useRefreshAfterResetModal(); @@ -116,10 +87,11 @@ const SettingsModal = ({ config = defaultConfig, children }: SettingsModalProps) }, [dispatch]); useEffect(() => { - if (settingsModal.isTrue && Boolean(config?.shouldShowClearIntermediates)) { + // Refetch intermediates count when modal is opened + if (settingsModal.isTrue) { refetchIntermediatesCount(); } - }, [config?.shouldShowClearIntermediates, refetchIntermediatesCount, settingsModal.isTrue]); + }, [refetchIntermediatesCount, settingsModal.isTrue]); const handleClickResetWebUI = useCallback(() => { clearStorage(); @@ -192,7 +164,7 @@ const SettingsModal = ({ config = defaultConfig, children }: SettingsModalProps) return ( <> - {cloneElement(children, { + {cloneElement(props.children, { onClick: settingsModal.setTrue, })} @@ -216,11 +188,11 @@ const SettingsModal = ({ config = defaultConfig, children }: SettingsModalProps) - + {t('settings.enableNSFWChecker')} - + {t('settings.enableInvisibleWatermark')} @@ -241,22 +213,20 @@ const SettingsModal = ({ config = defaultConfig, children }: SettingsModalProps) onChange={handleChangeShouldAntialiasProgressImage} /> - {Boolean(config?.shouldShowInvocationProgressDetailSetting) && ( - - {t('settings.showDetailedInvocationProgress')} - - - )} + + {t('settings.showDetailedInvocationProgress')} + + {t('parameters.useCpuNoise')} - {Boolean(config?.shouldShowLocalizationToggle) && } + {t('settings.enableInformationalPopovers')} - {Boolean(config?.shouldShowDeveloperSettings) && ( - - - - - - )} + + + + + - {Boolean(config?.shouldShowClearIntermediates) && ( - - - {t('settings.clearIntermediatesDesc1')} - {t('settings.clearIntermediatesDesc2')} - {t('settings.clearIntermediatesDesc3')} - - )} + + + {t('settings.clearIntermediatesDesc1')} + {t('settings.clearIntermediatesDesc2')} + {t('settings.clearIntermediatesDesc3')} + - {Boolean(config?.shouldShowResetWebUiText) && ( - <> - {t('settings.resetWebUIDesc1')} - {t('settings.resetWebUIDesc2')} - - )} + {t('settings.resetWebUIDesc1')} + {t('settings.resetWebUIDesc2')} diff --git a/invokeai/frontend/web/src/features/system/components/SettingsModal/useClearIntermediates.ts b/invokeai/frontend/web/src/features/system/components/SettingsModal/useClearIntermediates.ts index 26ad8a78ad7..c9f1e524bfa 100644 --- a/invokeai/frontend/web/src/features/system/components/SettingsModal/useClearIntermediates.ts +++ b/invokeai/frontend/web/src/features/system/components/SettingsModal/useClearIntermediates.ts @@ -12,12 +12,11 @@ type UseClearIntermediatesReturn = { refetchIntermediatesCount: () => void; }; -export const useClearIntermediates = (shouldShowClearIntermediates: boolean): UseClearIntermediatesReturn => { +export const useClearIntermediates = (): UseClearIntermediatesReturn => { const { t } = useTranslation(); const { data: intermediatesCount, refetch: refetchIntermediatesCount } = useGetIntermediatesCountQuery(undefined, { refetchOnMountOrArgChange: true, - skip: !shouldShowClearIntermediates, }); const [_clearIntermediates, { isLoading }] = useClearIntermediatesMutation(); diff --git a/invokeai/frontend/web/src/services/api/endpoints/appInfo.ts b/invokeai/frontend/web/src/services/api/endpoints/appInfo.ts index 2388dd0c089..f72d6ad81e8 100644 --- a/invokeai/frontend/web/src/services/api/endpoints/appInfo.ts +++ b/invokeai/frontend/web/src/services/api/endpoints/appInfo.ts @@ -1,7 +1,7 @@ import type { OpenAPIV3_1 } from 'openapi-types'; import type { stringify } from 'querystring'; import type { paths } from 'services/api/schema'; -import type { AppConfig, AppVersion } from 'services/api/types'; +import type { AppVersion } from 'services/api/types'; import { api, buildV1Url } from '..'; @@ -33,9 +33,12 @@ export const appInfoApi = api.injectEndpoints({ }), providesTags: ['FetchOnReconnect'], }), - getAppConfig: build.query({ + getPatchmatchStatus: build.query< + paths['/api/v1/app/patchmatch_status']['get']['responses']['200']['content']['application/json'], + void + >({ query: () => ({ - url: buildAppInfoUrl('config'), + url: buildAppInfoUrl('patchmatch_status'), method: 'GET', }), providesTags: ['FetchOnReconnect'], @@ -90,7 +93,7 @@ export const appInfoApi = api.injectEndpoints({ export const { useGetAppVersionQuery, useGetAppDepsQuery, - useGetAppConfigQuery, + useGetPatchmatchStatusQuery, useGetRuntimeConfigQuery, useClearInvocationCacheMutation, useDisableInvocationCacheMutation, diff --git a/invokeai/frontend/web/src/services/api/schema.ts b/invokeai/frontend/web/src/services/api/schema.ts index db4ba519cad..456d5f3d55e 100644 --- a/invokeai/frontend/web/src/services/api/schema.ts +++ b/invokeai/frontend/web/src/services/api/schema.ts @@ -1026,15 +1026,15 @@ export type paths = { patch?: never; trace?: never; }; - "/api/v1/app/config": { + "/api/v1/app/patchmatch_status": { parameters: { query?: never; header?: never; path?: never; cookie?: never; }; - /** Get Config */ - get: operations["get_config"]; + /** Get Patchmatch Status */ + get: operations["get_patchmatch_status"]; put?: never; post?: never; delete?: never; @@ -1993,32 +1993,6 @@ export type components = { type: "alpha_mask_to_tensor"; }; AnyModelConfig: components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Unknown_Config"]; - /** - * AppConfig - * @description App Config Response - */ - AppConfig: { - /** - * Infill Methods - * @description List of available infill methods - */ - infill_methods: string[]; - /** - * Upscaling Methods - * @description List of upscaling methods - */ - upscaling_methods: components["schemas"]["Upscaler"][]; - /** - * Nsfw Methods - * @description List of NSFW checking methods - */ - nsfw_methods: string[]; - /** - * Watermarking Methods - * @description List of invisible watermark methods - */ - watermarking_methods: string[]; - }; /** * AppVersion * @description App Version Response @@ -23605,19 +23579,6 @@ export type components = { */ unstarred_images: string[]; }; - /** Upscaler */ - Upscaler: { - /** - * Upscaling Method - * @description Name of upscaling method - */ - upscaling_method: string; - /** - * Upscaling Models - * @description List of upscaling models for this method - */ - upscaling_models: string[]; - }; /** VAEField */ VAEField: { /** @description Info to load vae submodel */ @@ -26870,7 +26831,7 @@ export interface operations { }; }; }; - get_config: { + get_patchmatch_status: { parameters: { query?: never; header?: never; @@ -26885,7 +26846,7 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["AppConfig"]; + "application/json": boolean; }; }; }; diff --git a/invokeai/frontend/web/src/services/api/types.ts b/invokeai/frontend/web/src/services/api/types.ts index d5f7eedaf4c..fa4c04a62eb 100644 --- a/invokeai/frontend/web/src/services/api/types.ts +++ b/invokeai/frontend/web/src/services/api/types.ts @@ -43,7 +43,6 @@ export type InvocationJSONSchemaExtra = S['UIConfigBase']; // App Info export type AppVersion = S['AppVersion']; -export type AppConfig = S['AppConfig']; const zResourceOrigin = z.enum(['internal', 'external']); type ResourceOrigin = z.infer; From 3539880a6ffd792047066aa0bbc72be8d931cf32 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Tue, 14 Oct 2025 17:11:03 +1100 Subject: [PATCH 16/20] docs(ui): more documentation --- .../frontend/web/src/features/ui/README.md | 26 +++++++++++++++++++ .../frontend/web/src/services/api/README.md | 20 ++++++++++++++ .../services/events/onInvocationComplete.tsx | 10 +++++++ .../services/events/onModelInstallError.tsx | 3 +++ .../src/services/events/setEventListeners.tsx | 4 +++ 5 files changed, 63 insertions(+) create mode 100644 invokeai/frontend/web/src/features/ui/README.md create mode 100644 invokeai/frontend/web/src/services/api/README.md diff --git a/invokeai/frontend/web/src/features/ui/README.md b/invokeai/frontend/web/src/features/ui/README.md new file mode 100644 index 00000000000..f4c262d425d --- /dev/null +++ b/invokeai/frontend/web/src/features/ui/README.md @@ -0,0 +1,26 @@ +# UI/Layout + +We use https://github.com/mathuo/dockview for layout. This library supports resizable and dockable panels. Users can drag and drop panels to rearrange them. + +The intention when adopting this library was to allow users to create their own custom layouts and save them. However, this feature is not yet implemented and each tab only has a predefined layout. + +This works well, but it _is_ fairly complex. You can see that we've needed to write a fairly involved API to manage the layouts: invokeai/frontend/web/src/features/ui/layouts/navigation-api.ts + +And the layouts themselves are awkward to define, especially when compared to plain JSX: invokeai/frontend/web/src/features/ui/layouts/generate-tab-auto-layout.tsx + +This complexity may or may not be worth it. + +## Previous approach + +Previously we used https://github.com/bvaughn/react-resizable-panels and simple JSX components. + +This library is great except it doesn't support absolute size constraints, only relative/percentage constraints. We had a brittle abstraction layer on top of it to try to enforce minimum pixel sizes for panels but it was janky and had FP precision issues causing drifting sizes. + +It also doesn't support dockable panels. + +## Future possibilities + +1. Continue with dockview and implement custom layout saving/loading. We experimented with this and it was _really_ nice. We defined a component for each panel type and use react context to manage state. But we thought that it would be confusing for most users, so we flagged it for a future iteration and instead shipped with predefined layouts. +2. Switch to a simpler layout library or roll our own. + +In hindsight, we should have skipped dockview and found something else that was simpler until we were ready to invest in custom layouts. diff --git a/invokeai/frontend/web/src/services/api/README.md b/invokeai/frontend/web/src/services/api/README.md new file mode 100644 index 00000000000..2cd21dbd99b --- /dev/null +++ b/invokeai/frontend/web/src/services/api/README.md @@ -0,0 +1,20 @@ +# API + +The API client is a fairly standard Redux Toolkit Query (RTK-Query) setup. + +It defines a simple base query with special handling for OpenAPI schema queries and endpoints: invokeai/frontend/web/src/services/api/index.ts + +## Types + +The API provides an OpenAPI schema and we generate TS types from it. They are stored in: invokeai/frontend/web/src/services/api/schema.ts + +We use https://github.com/openapi-ts/openapi-typescript/ to generate the types. + +- Python script to outut the OpenAPI schema: scripts/generate_openapi_schema.py +- Node script to call openapi-typescript and generate the TS types: invokeai/frontend/web/scripts/typegen.js + +Pipe the output of the python script to the node script to update the types. There is a `make` target that does this in one fell swoop (after activating venv): `make frontend-typegen` + +Alternatively, start the ptyhon server and run `pnpm typegen`. + +The schema.ts file is pushed to the repo, and a CI check ensures it is up to date. diff --git a/invokeai/frontend/web/src/services/events/onInvocationComplete.tsx b/invokeai/frontend/web/src/services/events/onInvocationComplete.tsx index d076b9a7303..e2ee74dcad1 100644 --- a/invokeai/frontend/web/src/services/events/onInvocationComplete.tsx +++ b/invokeai/frontend/web/src/services/events/onInvocationComplete.tsx @@ -26,8 +26,18 @@ import type { JsonObject } from 'type-fest'; const log = logger('events'); +// These nodes are passthrough nodes. They do not add images to the gallery, so we must skip that handling for them. const nodeTypeDenylist = ['load_image', 'image']; +/** + * Builds the socket event handler for invocation complete events. Adds output images to the gallery and/or updates + * node execution states for the workflow editor. + * + * @param getState The Redux getState function. + * @param dispatch The Redux dispatch function. + * @param finishedQueueItemIds A cache of finished queue item IDs to prevent duplicate handling and avoid race + * conditions that can happen when a graph finishes very quickly. + */ export const buildOnInvocationComplete = ( getState: AppGetState, dispatch: AppDispatch, diff --git a/invokeai/frontend/web/src/services/events/onModelInstallError.tsx b/invokeai/frontend/web/src/services/events/onModelInstallError.tsx index ba7a3ed19f3..24c31c4a296 100644 --- a/invokeai/frontend/web/src/services/events/onModelInstallError.tsx +++ b/invokeai/frontend/web/src/services/events/onModelInstallError.tsx @@ -39,6 +39,9 @@ const getHFTokenStatus = async (dispatch: AppDispatch): Promise { return async (data: S['ModelInstallErrorEvent']) => { log.error({ data }, 'Model install error'); diff --git a/invokeai/frontend/web/src/services/events/setEventListeners.tsx b/invokeai/frontend/web/src/services/events/setEventListeners.tsx index d9a1d8386c1..f998627d26c 100644 --- a/invokeai/frontend/web/src/services/events/setEventListeners.tsx +++ b/invokeai/frontend/web/src/services/events/setEventListeners.tsx @@ -33,6 +33,10 @@ type SetEventListenersArg = { const selectModelInstalls = modelsApi.endpoints.listModelInstalls.select(); +/** + * Sets up event listeners for the socketio client. Some components will set up their own listeners. These are the ones + * that have app-wide implications. + */ export const setEventListeners = ({ socket, store, setIsConnected }: SetEventListenersArg) => { const { dispatch, getState } = store; From 420d8a445eeb5884fa89e4d524288b6482aecac5 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Tue, 14 Oct 2025 19:31:27 +1100 Subject: [PATCH 17/20] docs(ui): add canvas overview and design doc --- .../web/src/features/controlLayers/README.md | 228 ++++++++++++++++++ 1 file changed, 228 insertions(+) create mode 100644 invokeai/frontend/web/src/features/controlLayers/README.md diff --git a/invokeai/frontend/web/src/features/controlLayers/README.md b/invokeai/frontend/web/src/features/controlLayers/README.md new file mode 100644 index 00000000000..de2aafee13a --- /dev/null +++ b/invokeai/frontend/web/src/features/controlLayers/README.md @@ -0,0 +1,228 @@ +# Canvas + +The canvas is a fairly complex feature. It uses "native" KonvaJS (i.e. not the Konva react bindings) to render a drawing canvas. + +It supports layers, drawing, erasing, undo/redo, exporting, backend filters (i.e. filters that require sending image data to teh backend to process) and frontend filters. + +## Broad Strokes of Design + +The canvas is internally is a hierarchy of classes (modules). All canvas modules inherit from invokeai/frontend/web/src/features/controlLayers/konva/CanvasModuleBase.ts + +### Modules + +The top-level module is the CanvasManager: invokeai/frontend/web/src/features/controlLayers/konva/CanvasManager.ts + +All canvas modules have: + +- A unique id (per instance) +- A ref to its parent module and the canvas manager (the top-leve Manager refs itself) +- A repr() method that returns a plain JS object representing the module instance +- A destroy() method to clean up resources +- A log() method that auto-injects context for the module instanc) + +Modules can do anything, they are simply plain-JS classes to encapsulate some functionality. Some are singletons. Some examples: + +- A singleton module that handles tool-specific interactions: invokeai/frontend/web/src/features/controlLayers/konva/CanvasTool/CanvasToolModule.ts +- Singleton models for each tool e.g. the CanvasBrushToolModule: invokeai/frontend/web/src/features/controlLayers/konva/CanvasTool/CanvasBrushToolModule.ts +- A singleton module to render the background of the canvas: invokeai/frontend/web/src/features/controlLayers/konva/CanvasBackgroundModule.ts +- A strictly logical module that manages various caches of image data: invokeai/frontend/web/src/features/controlLayers/konva/CanvasCacheModule.ts +- A non-singleton module that handles rendering a brush stroke: invokeai/frontend/web/src/features/controlLayers/konva/CanvasObject/CanvasObjectBrushLine.ts + +### Layers (Entities) and Adapters modules + +Canvas has a number of layer types: + +- Raster layers: Traditional raster/pixel layers, much like layers in Photoshop +- Control layers: Internally a raster layer, but designated to hold control data (e.g. depth maps, segmentation masks, etc.) and have special rendering rules +- Regional guidance layers: A mask-like layer (i.e. it has arbitrary shapes but they have no color or texture, it's just a mask region) plus conditioning data like prompts or ref images. The conditioning is applied only to the masked regions +- Inpaint mask layers: Another mask-like layer that indicate regions to inpaint/regenerate + +Instances of layers are called "entities" in the codebase. Each entity has a type (one of the above), a number of properties (e.g. visibility, opacity, etc.), objects (e.g. brush strokes, shapes, images) and possibly other data. + +Each layer type has a corresponding "adapter" module that handles rendering the layer and its objects, applying filters, etc. The adapter modules are non-singleton modules that are instantiated once per layer entity. + +Using the raster layer type as an example, it has a number of sub-modules: + +- A top-level module that coordinates everything: invokeai/frontend/web/src/features/controlLayers/konva/CanvasEntity/CanvasEntityAdapterRasterLayer.ts +- An object (e.g. brush strokes, shapes, images) renderer that draws the layer via Konva: invokeai/frontend/web/src/features/controlLayers/konva/CanvasEntity/CanvasEntityObjectRenderer.ts +- A "buffer" object renderer, which renders in-progress objects (e.g. a brush stroke that is being drawn but not yet committed, important for performance): invokeai/frontend/web/src/features/controlLayers/konva/CanvasEntity/CanvasEntityBufferObjectRenderer.ts +- A module that handles previewing and applying backend filters: invokeai/frontend/web/src/features/controlLayers/konva/CanvasEntity/CanvasEntityFilterer.ts +- A module that handles selecting objects from the pixel data of a layer (aka segmentation tasks): invokeai/frontend/web/src/features/controlLayers/konva/CanvasSegmentAnythingModule.ts +- A module that handles transforming the layer (scale, translate, rotate): invokeai/frontend/web/src/features/controlLayers/konva/CanvasEntity/CanvasEntityTransformer.ts + +## State mgmt + +This gets a bit hairy. We have a mix of redux, Konva and nanostores. + +At a high level, we use observable/listener patterns to react to state changes and propagate them to where they need to go. + +### Redux + +Redux is the source of truth for _persistent_ canvas state - layers, their order, etc. + +The redux API includes: + +- getState(): Get the entire redux state +- subscribe(listener): Subscribe to state changes, listener is called on _every_ state change, no granularity is provided +- dispatch(action): Dispatch an action to change state + +Redux is not suitable for _transient_ state that changes frequently, e.g. the current brush stroke as the user is drawing it. Syncing every change to redux would be too slow and incur a significant performance penalty that would drop FPS too much. + +Canvas modules that have persistent state (e.g. layers, their properties, etc.) use redux to store that state and will subscribe to redux to listen for changes and update themselves as needed. + +### Konva + +Konva's API is imperative (i.e. you call methods on the Konva nodes to change them) but it renders automatically. + +There is no simple way to "subscribe" to changes in Konva nodes. You can listen to certain events (e.g. dragmove, transform, etc.) but there is no generic "node changed" event. + +So we almost exclusively push data to Konva, we never "read" from it. + +### Nanostores + +We use https://github.com/nanostores/nanostores as a lightweight observable state management solution. Nanostores has a plain-JS listener API for subscribing to changes, similar to redux's subscribe(). And it has react bindings so we can use it in react components. + +Modules often use nanostores to store their internal state, especially when that state needs to be observed by other modules or react components. + +For example, the CanvasToolModule uses a nanostore to hold the current tool (brush, eraser, etc.) and its options (brush size, color, etc.). React components can subscribe to that store to update their UI when the tool or its options change. + +So this provides a simple two-way binding between canvas modules and react components. + +### State -> Canvas + +Data may flow from redux state to Canvas. For example, on canvas init we render all layers and their objects from redux state in Konva: + +- Create the layer's entity adapter and all sub-modules +- Iterate over the layer's objects and create a module instance for each object (e.g. brush stroke, shape, image) +- Each object module creates the necessary Konva nodes to represent itself and adds them to the layer + +The entity adapter subscribes to redux to listen for state changes and pass on the updated state to its sub-modules so they can do whatever they need to do w/ the updated state. + +Besides the initial render, we might have to update the Konva representation of a layer when: + +- The layer's properties are changed (e.g. visibility, opacity, etc.) +- The layer's order is changed (e.g. move up/down) +- User does an undo/redo operation that affects the layer +- The layer is deleted + +### Canvas -> State + +When the user interacts w/ the canvas (e.g. draws a brush stroke, erases, moves an object, etc.), we create/update/delete objects in Konva. When the user finishes the interaction (e.g. finishes drawing a brush stroke), we serialize the object to a plain JS object and dispatch a redux action to add the object in redux state. + +Using drawing a line on a raster layer as an example, the flow is: + +- User initiates a brush stroke and draws +- We create a brush line object module instance in the layer's buffer renderer +- The brush line object is given a unique ID +- The brush line mod creates a Konva.Line node to represent the stroke +- The brush line mod tracks the stroke as the user draws, updating the Konva.Line node as needed, all in the buffer renderer +- When the user finishes the stroke, the brush line module transfers control of itself from the layer's buffer renderer to its main renderer +- As the line is marked complete, the line data is serialized to a plain JS object (i.e. array of points and color) and we dispatch a redux action to add the line object to the layer entity in redux state + +Besides drawing tasks, we have similar flows for: + +- Transforming a layer (scale, translate, rotate) +- Filtering a layer +- Selecting objects from a layer (segmentation tasks) + +## Erasing is hard + +HTML Canvas has a limited set of compositing modes. These apply globally to the whole canvas element. There is no "local" compositing mode that applies only to a specific shape or object. There is no concept of layers. + +So to implement erasing (and opacity!), we have to get creative. Konva handles much of this for us. Each layer is represented internally by a Konva.Layer, which in turn is drawn to its own HTML Canvas element. + +Erasing is accomplished by using a globalCompositeOperation of "destination-out" on the brush stroke that is doing the erasing. The brush stroke "cuts a hole" in the layer it is drawn on. + +There is a complication. The UX for erasing a layer should be: + +- User has a layer, let's say it has an image on it +- The layer's size is exactly the size of the image +- User erases the right-hand half of the image +- The layer's size shrinks to fit the remaining content, i.e. the left half of the image +- If the user transforms the layer (scale, translate, rotate), the transformations apply only to the remaining content + +But the "destination-out" compositing mode only makes the erased pixels transparent. It does not actually remove them from the layer. The layer's bounding box includes the eraser strokes - even though they are transparent. The eraser strokes can actually _enlarge_ the layer's bounding box if the user erases outside the original bounds of the layer. + +So, we need a way to calculate the _visual_ bounds of the layer, i.e. the bounding box of all non-transparent pixels. We do this by rendering the layer to an offscreen canvas and reading back the pixel data to calculate the bounds. This process is costly, and we offload some of the work to a web worker to avoid blocking the main thread. Nevertheless, just getting that pixel data is expensive, scaling to the size of the layer. + +The usage of the buffer renderer module helps a lot here, as we only need to recalc the bounds when the user finishes a drawing action, not while they are drawing it. + +You'll see the relevant code for this in the transformer module. It encapsulates the bounds calculation logic and exposes an observable that holds the last-known visual bounds of the layer. + +The worker entrypoint is here invokeai/frontend/web/src/features/controlLayers/konva/CanvasWorkerModule.ts + +## Rasterizing layers + +Layers consist of a mix of vector and pixel data. For example, a brush stroke is a vector (i.e. array of points) and an image is pixel data. + +Ideally we could go straight from user input to pixel data, but this is not feasible for performance reasons. We'd need to write the images to an offscreen canvas, read back the pixel data, send it to the backend, get back the processed pixel data, write it to an offscreen canvas, then read back the pixel data again to update the layer. This would be too slow and block the main thread too much. + +So we use a hybrid approach. We keep the vector data in memory and render it to pixel data only when needed, e.g. when the user applies a backend filter or does a transformation on the canvas. + +This is unfortunately complicated but we couldn't figure out a more performance way to handle this. + +## Compositing layers to prepare for generation + +The canvas is a means to an end: provide strong user control and agency for image generation. + +When generating an image, the raster layers must be composited toegher into a single image that is sent to the backend. All inpaint masks are similarly composited together into a single mask image. Regional guidance and control layers are not composited together, they are sent as individual images. + +This is handled in invokeai/frontend/web/src/features/controlLayers/konva/CanvasCompositorModule.ts + +For each compositing task, the compositor creates a unique hash of the layer's state (e.g. objects, properties, etc.) and uses that to cache the resulting composited image's name (which ref a unique ref to the image file stored on disk). This avoids re-compositing layers that haven't changed since the last generation. + +## The generation bounding box + +Image generation models can only generate images up to certain sizes without causing VRAM OOMs. So we need to give the user a way to specify the size of the generation area. This is done via the "generation bounding box" tool, which is a rectangle that the user can resize and move around the canvas. + +Here's the module for it invokeai/frontend/web/src/features/controlLayers/konva/CanvasTool/CanvasBboxToolModule.ts + +Models all have width/height constraints - they must be multiples of a certain number (typically 8, 16 or 32). This is related to the internal "latents" representatino of images in diffusion models. So the generation bbox must be constrained to these multiples. + +## Staging generations + +The typical use pattern for generating images on canvas is to generate a number of variations and pick one or more to keep. This is supported via the "staging area", which is a horizontal strip of image thumbnails below the canvas. These staged images are rendered via React, not Konva. + +Once canvas generation starts, much of the canvas is locked down until the user finalizes the staging area, either by accepting a single image, adding one or more images as new layers, or discarding all staged images. + +The currently-selected staged image is previewed on the canvas and rendered via invokeai/frontend/web/src/features/controlLayers/konva/CanvasStagingAreaModule.ts + +When the user accepts a staged image, it is added as a new raster layer (there are other options for adding as control, saving directly to gallery, etc). + +This subsystem tracks generated images by watching the queue of generation tasks. The relevant code for queue tracking is in invokeai/frontend/web/src/features/controlLayers/components/StagingArea/state.ts + +## Future enhancements + +### Perf: Reduce the number of canvas elements + +Each layer has a Konva.Layer which has its own canvas element. Once you get too many of these, the browser starts to struggle. + +One idea to improve this would be to have a 3-layer system: + +- The active layer is its own Konva.Layer +- All layers behind it are flattened into a single Konva.Layer +- All layers in front of it are flattened into a single Konva.Layer + +When the user switches the active layer, we re-flatten the layers as needed. This would reduce the number of canvas elements to 3 regardless of how many layers there are. This would greatly improve performance, especially on lower-end devices. + +### Perf: Konva in a web worker + +All of the heavy konva rendering could be offloaded to a web worker. This would free up the main thread for user interactions and UI updates. The main thread would send user input and state changes to the worker, and the worker would send back rendered images to display. + +There used to be a hacky example of this on the Konva docs but I can't find it as of this writing. It requires proxying mouse and keyboard events to the worker, but wasn't too complicated. This could be a _huge_ perf win. + +### Abstract state bindings + +Currently the state bindings (redux, nanostores) are all over the place. There is a singleton module that handles much of the redux binding, but it's still a bit messy: invokeai/frontend/web/src/features/controlLayers/konva/CanvasStateApiModule.ts + +Many modules still directly subscribe to redux with their own selectors. + +Ideally we could have a more abstracted state binding system that could handle multiple backends (e.g. redux, nanostores, etc.) in a more uniform way. This would make it easier to manage state and reduce boilerplate code. + +### Do not lock down canvas as much during staging + +Currently, once the user starts generating images, much of the canvas is locked down until the user finalizes the staging area. This can be frustrating if the user wants to make small adjustments to layers or settings while previewing staged images, but it prevents footguns. + +For example, if the user changes the generation bbox size while staging, then queues up more generations, the output images may not match the bbox size, leading to confusion. + +It's more locked-down than it needs to be. Theoretically, most of the canvas could be interactive while staging. Just needs some careful through to not be too confusing. From dfc3f4e9ad3593a19a9b065bb0fbf90547194bb5 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Wed, 15 Oct 2025 09:59:52 +1100 Subject: [PATCH 18/20] fix(ui): move logging setup to react code --- .../frontend/web/src/app/components/InvokeAIUI.tsx | 10 ++++++++++ invokeai/frontend/web/src/app/logging/logger.ts | 9 --------- 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/invokeai/frontend/web/src/app/components/InvokeAIUI.tsx b/invokeai/frontend/web/src/app/components/InvokeAIUI.tsx index 1fa0a5f3cd9..775a4c7a963 100644 --- a/invokeai/frontend/web/src/app/components/InvokeAIUI.tsx +++ b/invokeai/frontend/web/src/app/components/InvokeAIUI.tsx @@ -1,5 +1,6 @@ import 'i18n'; +import { configureLogging } from 'app/logging/logger'; import { addStorageListeners } from 'app/store/enhancers/reduxRemember/driver'; import { $store } from 'app/store/nanostores/store'; import { createStore } from 'app/store/store'; @@ -7,6 +8,15 @@ import Loading from 'common/components/Loading/Loading'; import React, { lazy, memo, useEffect, useState } from 'react'; import { Provider } from 'react-redux'; +/* + * We need to configure logging before anything else happens - useLayoutEffect ensures we set this at the first + * possible opportunity. + * + * Once redux initializes, we will check the user's settings and update the logging config accordingly. See + * `useSyncLoggingConfig`. + */ +configureLogging(true, 'debug', '*'); + const App = lazy(() => import('./App')); const InvokeAIUI = () => { diff --git a/invokeai/frontend/web/src/app/logging/logger.ts b/invokeai/frontend/web/src/app/logging/logger.ts index 7c6b4ecdde0..6c843068df3 100644 --- a/invokeai/frontend/web/src/app/logging/logger.ts +++ b/invokeai/frontend/web/src/app/logging/logger.ts @@ -83,12 +83,3 @@ export const configureLogging = ( ROARR.write = createLogWriter({ styleOutput }); }; - -/* - * We need to configure logging before anything else happens - useLayoutEffect ensures we set this at the first - * possible opportunity. - * - * Once redux initializes, we will check the user's settings and update the logging config accordingly. See - * `useSyncLoggingConfig`. - */ -configureLogging(true, 'debug', '*'); From da0508880e03d5d5ae87f451f3c90965be0f0b0b Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Wed, 15 Oct 2025 10:27:43 +1100 Subject: [PATCH 19/20] chore: ruff --- invokeai/app/invocations/fields.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/invokeai/app/invocations/fields.py b/invokeai/app/invocations/fields.py index 5a2d0810356..1bca7ec3f53 100644 --- a/invokeai/app/invocations/fields.py +++ b/invokeai/app/invocations/fields.py @@ -235,8 +235,6 @@ class ImageField(BaseModel): image_name: str = Field(description="The name of the image") - - class BoardField(BaseModel): """A board primitive field""" From 3cf9ff66e064854605d87bf21a301743d902cf7a Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Wed, 15 Oct 2025 10:40:22 +1100 Subject: [PATCH 20/20] chore(ui): fix schema --- invokeai/frontend/web/src/services/api/schema.ts | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/invokeai/frontend/web/src/services/api/schema.ts b/invokeai/frontend/web/src/services/api/schema.ts index 456d5f3d55e..b52f6eb74c6 100644 --- a/invokeai/frontend/web/src/services/api/schema.ts +++ b/invokeai/frontend/web/src/services/api/schema.ts @@ -6827,7 +6827,7 @@ export type components = { /** * Use Cache * @description Whether or not to use the cache - * @default true + * @default false */ use_cache?: boolean; /** @@ -19475,7 +19475,7 @@ export type components = { /** * Use Cache * @description Whether or not to use the cache - * @default true + * @default false */ use_cache?: boolean; /** @@ -19522,7 +19522,7 @@ export type components = { /** * Use Cache * @description Whether or not to use the cache - * @default true + * @default false */ use_cache?: boolean; /** @@ -19563,7 +19563,7 @@ export type components = { /** * Use Cache * @description Whether or not to use the cache - * @default true + * @default false */ use_cache?: boolean; /** @@ -20641,7 +20641,7 @@ export type components = { /** * Use Cache * @description Whether or not to use the cache - * @default true + * @default false */ use_cache?: boolean; /**