diff --git a/private_gpt/components/llm/llm_component.py b/private_gpt/components/llm/llm_component.py index 953209a86..4e46c250b 100644 --- a/private_gpt/components/llm/llm_component.py +++ b/private_gpt/components/llm/llm_component.py @@ -131,6 +131,7 @@ def __init__(self, settings: Settings) -> None: temperature=settings.llm.temperature, context_window=settings.llm.context_window, additional_kwargs=settings_kwargs, + request_timeout=ollama_settings.request_timeout, ) case "azopenai": try: diff --git a/private_gpt/settings/settings.py b/private_gpt/settings/settings.py index 417710554..0fe1747c5 100644 --- a/private_gpt/settings/settings.py +++ b/private_gpt/settings/settings.py @@ -241,6 +241,10 @@ class OllamaSettings(BaseModel): 1.1, description="Sets how strongly to penalize repetitions. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. (Default: 1.1)", ) + request_timeout: float = Field( + 120.0, + description="Time elapsed until ollama times out the request. Default is 120s. Format is float. ", + ) class AzureOpenAISettings(BaseModel): diff --git a/settings-ollama.yaml b/settings-ollama.yaml index 9a0aaed0a..d7e1a12ca 100644 --- a/settings-ollama.yaml +++ b/settings-ollama.yaml @@ -14,11 +14,12 @@ ollama: llm_model: mistral embedding_model: nomic-embed-text api_base: http://localhost:11434 - tfs_z: 1.0 # Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. - top_k: 40 # Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40) - top_p: 0.9 # Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9) - repeat_last_n: 64 # Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx) - repeat_penalty: 1.2 # Sets how strongly to penalize repetitions. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. (Default: 1.1) + tfs_z: 1.0 # Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. + top_k: 40 # Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40) + top_p: 0.9 # Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9) + repeat_last_n: 64 # Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx) + repeat_penalty: 1.2 # Sets how strongly to penalize repetitions. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. (Default: 1.1) + request_timeout: 120.0 # Time elapsed until ollama times out the request. Default is 120s. Format is float. vectorstore: database: qdrant diff --git a/settings.yaml b/settings.yaml index 0b4cb3414..dd0f5a057 100644 --- a/settings.yaml +++ b/settings.yaml @@ -89,6 +89,7 @@ ollama: llm_model: llama2 embedding_model: nomic-embed-text api_base: http://localhost:11434 + request_timeout: 120.0 azopenai: api_key: ${AZ_OPENAI_API_KEY:}