|
| 1 | +from __future__ import annotations |
| 2 | + |
| 3 | +from openai import AsyncOpenAI |
| 4 | + |
| 5 | +from ..exceptions import UserError |
| 6 | +from .interface import Model, ModelProvider |
| 7 | +from .openai_provider import OpenAIProvider |
| 8 | + |
| 9 | + |
| 10 | +class MultiProviderMap: |
| 11 | + """A map of model name prefixes to ModelProviders.""" |
| 12 | + |
| 13 | + def __init__(self): |
| 14 | + self._mapping: dict[str, ModelProvider] = {} |
| 15 | + |
| 16 | + def has_prefix(self, prefix: str) -> bool: |
| 17 | + """Returns True if the given prefix is in the mapping.""" |
| 18 | + return prefix in self._mapping |
| 19 | + |
| 20 | + def get_mapping(self) -> dict[str, ModelProvider]: |
| 21 | + """Returns a copy of the current prefix -> ModelProvider mapping.""" |
| 22 | + return self._mapping.copy() |
| 23 | + |
| 24 | + def set_mapping(self, mapping: dict[str, ModelProvider]): |
| 25 | + """Overwrites the current mapping with a new one.""" |
| 26 | + self._mapping = mapping |
| 27 | + |
| 28 | + def get_provider(self, prefix: str) -> ModelProvider | None: |
| 29 | + """Returns the ModelProvider for the given prefix. |
| 30 | +
|
| 31 | + Args: |
| 32 | + prefix: The prefix of the model name e.g. "openai" or "my_prefix". |
| 33 | + """ |
| 34 | + return self._mapping.get(prefix) |
| 35 | + |
| 36 | + def add_provider(self, prefix: str, provider: ModelProvider): |
| 37 | + """Adds a new prefix -> ModelProvider mapping. |
| 38 | +
|
| 39 | + Args: |
| 40 | + prefix: The prefix of the model name e.g. "openai" or "my_prefix". |
| 41 | + provider: The ModelProvider to use for the given prefix. |
| 42 | + """ |
| 43 | + self._mapping[prefix] = provider |
| 44 | + |
| 45 | + def remove_provider(self, prefix: str): |
| 46 | + """Removes the mapping for the given prefix. |
| 47 | +
|
| 48 | + Args: |
| 49 | + prefix: The prefix of the model name e.g. "openai" or "my_prefix". |
| 50 | + """ |
| 51 | + del self._mapping[prefix] |
| 52 | + |
| 53 | + |
| 54 | +class MultiProvider(ModelProvider): |
| 55 | + """This ModelProvider maps to a Model based on the prefix of the model name. By default, the |
| 56 | + mapping is: |
| 57 | + - "openai/" prefix or no prefix -> OpenAIProvider. e.g. "openai/gpt-4.1", "gpt-4.1" |
| 58 | + - "litellm/" prefix -> LitellmProvider. e.g. "litellm/openai/gpt-4.1" |
| 59 | +
|
| 60 | + You can override or customize this mapping. |
| 61 | + """ |
| 62 | + |
| 63 | + def __init__( |
| 64 | + self, |
| 65 | + *, |
| 66 | + provider_map: MultiProviderMap | None = None, |
| 67 | + openai_api_key: str | None = None, |
| 68 | + openai_base_url: str | None = None, |
| 69 | + openai_client: AsyncOpenAI | None = None, |
| 70 | + openai_organization: str | None = None, |
| 71 | + openai_project: str | None = None, |
| 72 | + openai_use_responses: bool | None = None, |
| 73 | + ) -> None: |
| 74 | + """Create a new OpenAI provider. |
| 75 | +
|
| 76 | + Args: |
| 77 | + provider_map: A MultiProviderMap that maps prefixes to ModelProviders. If not provided, |
| 78 | + we will use a default mapping. See the documentation for this class to see the |
| 79 | + default mapping. |
| 80 | + openai_api_key: The API key to use for the OpenAI provider. If not provided, we will use |
| 81 | + the default API key. |
| 82 | + openai_base_url: The base URL to use for the OpenAI provider. If not provided, we will |
| 83 | + use the default base URL. |
| 84 | + openai_client: An optional OpenAI client to use. If not provided, we will create a new |
| 85 | + OpenAI client using the api_key and base_url. |
| 86 | + openai_organization: The organization to use for the OpenAI provider. |
| 87 | + openai_project: The project to use for the OpenAI provider. |
| 88 | + openai_use_responses: Whether to use the OpenAI responses API. |
| 89 | + """ |
| 90 | + self.provider_map = provider_map |
| 91 | + self.openai_provider = OpenAIProvider( |
| 92 | + api_key=openai_api_key, |
| 93 | + base_url=openai_base_url, |
| 94 | + openai_client=openai_client, |
| 95 | + organization=openai_organization, |
| 96 | + project=openai_project, |
| 97 | + use_responses=openai_use_responses, |
| 98 | + ) |
| 99 | + |
| 100 | + self._fallback_providers: dict[str, ModelProvider] = {} |
| 101 | + |
| 102 | + def _get_prefix_and_model_name(self, model_name: str | None) -> tuple[str | None, str | None]: |
| 103 | + if model_name is None: |
| 104 | + return None, None |
| 105 | + elif "/" in model_name: |
| 106 | + prefix, model_name = model_name.split("/", 1) |
| 107 | + return prefix, model_name |
| 108 | + else: |
| 109 | + return None, model_name |
| 110 | + |
| 111 | + def _create_fallback_provider(self, prefix: str) -> ModelProvider: |
| 112 | + if prefix == "litellm": |
| 113 | + from ..extensions.models.litellm_provider import LitellmProvider |
| 114 | + |
| 115 | + return LitellmProvider() |
| 116 | + else: |
| 117 | + raise UserError(f"Unknown prefix: {prefix}") |
| 118 | + |
| 119 | + def _get_fallback_provider(self, prefix: str | None) -> ModelProvider: |
| 120 | + if prefix is None or prefix == "openai": |
| 121 | + return self.openai_provider |
| 122 | + elif prefix in self._fallback_providers: |
| 123 | + return self._fallback_providers[prefix] |
| 124 | + else: |
| 125 | + self._fallback_providers[prefix] = self._create_fallback_provider(prefix) |
| 126 | + return self._fallback_providers[prefix] |
| 127 | + |
| 128 | + def get_model(self, model_name: str | None) -> Model: |
| 129 | + """Returns a Model based on the model name. The model name can have a prefix, ending with |
| 130 | + a "/", which will be used to look up the ModelProvider. If there is no prefix, we will use |
| 131 | + the OpenAI provider. |
| 132 | +
|
| 133 | + Args: |
| 134 | + model_name: The name of the model to get. |
| 135 | +
|
| 136 | + Returns: |
| 137 | + A Model. |
| 138 | + """ |
| 139 | + prefix, model_name = self._get_prefix_and_model_name(model_name) |
| 140 | + |
| 141 | + if prefix and self.provider_map and (provider := self.provider_map.get_provider(prefix)): |
| 142 | + return provider.get_model(model_name) |
| 143 | + else: |
| 144 | + return self._get_fallback_provider(prefix).get_model(model_name) |
0 commit comments