You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
@@ -392,15 +388,11 @@ class InvokeAIAppConfig(InvokeAISettings):
392
388
internet_available : bool=Field(default=True, description="If true, attempt to download models on the fly; otherwise only use local models", category='Features')
393
389
log_tokenization : bool=Field(default=False, description="Enable logging of parsed prompt tokens.", category='Features')
restore : bool=Field(default=True, description="Enable/disable face restoration code (DEPRECATED)", category='DEPRECATED')
396
391
397
392
always_use_cpu : bool=Field(default=False, description="If true, use the CPU for rendering even if a GPU is available.", category='Memory/Performance')
398
393
free_gpu_mem : bool=Field(default=False, description="If true, purge model from GPU after each generation.", category='Memory/Performance')
399
-
max_loaded_models : int=Field(default=3, gt=0, description="(DEPRECATED: use max_cache_size) Maximum number of models to keep in memory for rapid switching", category='DEPRECATED')
400
394
max_cache_size : float=Field(default=6.0, gt=0, description="Maximum memory amount used by model cache for rapid switching", category='Memory/Performance')
401
395
max_vram_cache_size : float=Field(default=2.75, ge=0, description="Amount of VRAM reserved for model storage", category='Memory/Performance')
402
-
gpu_mem_reserved : float=Field(default=2.75, ge=0, description="DEPRECATED: use max_vram_cache_size. Amount of VRAM reserved for model storage", category='DEPRECATED')
403
-
nsfw_checker : bool=Field(default=True, description="DEPRECATED: use Web settings to enable/disable", category='DEPRECATED')
404
396
precision : Literal[tuple(['auto','float16','float32','autocast'])] =Field(default='auto',description='Floating point precision', category='Memory/Performance')
405
397
sequential_guidance : bool=Field(default=False, description="Whether to calculate guidance in serial instead of in parallel, lowering memory requirements", category='Memory/Performance')
0 commit comments