diff --git a/src/config/modelProviders/anthropic.ts b/src/config/modelProviders/anthropic.ts index 67ec9d96be8c..f8962b4a31b1 100644 --- a/src/config/modelProviders/anthropic.ts +++ b/src/config/modelProviders/anthropic.ts @@ -1,5 +1,6 @@ import { ModelProviderCard } from '@/types/llm'; +// ref https://docs.anthropic.com/claude/docs/models-overview const Anthropic: ModelProviderCard = { chatModels: [ { diff --git a/src/config/modelProviders/bedrock.ts b/src/config/modelProviders/bedrock.ts index b74c0d8df909..bacfae5d51c1 100644 --- a/src/config/modelProviders/bedrock.ts +++ b/src/config/modelProviders/bedrock.ts @@ -1,5 +1,6 @@ import { ModelProviderCard } from '@/types/llm'; +// ref https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html const Bedrock: ModelProviderCard = { chatModels: [ { diff --git a/src/config/modelProviders/google.ts b/src/config/modelProviders/google.ts index cb54fdda5a57..f1cddda61a66 100644 --- a/src/config/modelProviders/google.ts +++ b/src/config/modelProviders/google.ts @@ -1,5 +1,6 @@ import { ModelProviderCard } from '@/types/llm'; +// ref https://ai.google.dev/models/gemini const Google: ModelProviderCard = { chatModels: [ { diff --git a/src/config/modelProviders/groq.ts b/src/config/modelProviders/groq.ts index 9e9f34f234c0..21608a690634 100644 --- a/src/config/modelProviders/groq.ts +++ b/src/config/modelProviders/groq.ts @@ -1,5 +1,6 @@ import { ModelProviderCard } from '@/types/llm'; +// ref https://console.groq.com/docs/models const Groq: ModelProviderCard = { chatModels: [ { diff --git a/src/config/modelProviders/mistral.ts b/src/config/modelProviders/mistral.ts index 0575b8d4a7f4..5ba0f178871c 100644 --- a/src/config/modelProviders/mistral.ts +++ b/src/config/modelProviders/mistral.ts @@ -33,11 +33,6 @@ const Mistral: ModelProviderCard = { id: 'mistral-large-latest', tokens: 32_768, }, - { - displayName: 'Mixtral 8x22B', - id: 'mixtral-8x22b', - tokens: 32_768, - }, ], id: 'mistral', }; diff --git a/src/config/modelProviders/moonshot.ts b/src/config/modelProviders/moonshot.ts index 9a9093b72c1d..e57d317bee08 100644 --- a/src/config/modelProviders/moonshot.ts +++ b/src/config/modelProviders/moonshot.ts @@ -20,13 +20,6 @@ const Moonshot: ModelProviderCard = { id: 'moonshot-v1-128k', tokens: 128_000, }, - { - displayName: 'Moonshot Kimi Reverse', - files: true, - id: 'moonshot-v1', - tokens: 200_000, - vision: true, - }, ], id: 'moonshot', }; diff --git a/src/config/modelProviders/ollama.ts b/src/config/modelProviders/ollama.ts index d8f0111ac3c5..7682b913d77e 100644 --- a/src/config/modelProviders/ollama.ts +++ b/src/config/modelProviders/ollama.ts @@ -148,49 +148,6 @@ const Ollama: ModelProviderCard = { tokens: 4000, vision: true, }, - // TODO: 在单独支持千问之后这些 Qwen 模型需要移动到千问的配置中 - { - displayName: 'Qwen Plus', - functionCall: true, - id: 'qwen-plus', - tokens: 30_000, - vision: false, - }, - { - displayName: 'Qwen Turbo', - functionCall: true, - id: 'qwen-turbo', - tokens: 6000, - vision: false, - }, - { - displayName: 'Qwen Max', - functionCall: true, - id: 'qwen-max', - tokens: 6000, - vision: false, - }, - { - displayName: 'Qwen Max Long', - functionCall: true, - id: 'qwen-max-longcontext', - tokens: 28_000, - vision: false, - }, - { - displayName: 'Qwen VL Max', - functionCall: false, - id: 'qwen-vl-max', - tokens: 6000, - vision: true, - }, - { - displayName: 'Qwen VL Plus', - functionCall: false, - id: 'qwen-vl-plus', - tokens: 30_000, - vision: true, - }, ], id: 'ollama', }; diff --git a/src/config/modelProviders/openai.ts b/src/config/modelProviders/openai.ts index 7d507dff56cc..5a57204a27cf 100644 --- a/src/config/modelProviders/openai.ts +++ b/src/config/modelProviders/openai.ts @@ -117,14 +117,6 @@ const OpenAI: ModelProviderCard = { tokens: 128_000, vision: true, }, - { - displayName: 'GPT-4 ALL', - files: true, - functionCall: true, - id: 'gpt-4-all', - tokens: 32_768, - vision: true, - }, ], enabled: true, id: 'openai', diff --git a/src/config/modelProviders/openrouter.ts b/src/config/modelProviders/openrouter.ts index fbbd822b550d..28d51d520da5 100644 --- a/src/config/modelProviders/openrouter.ts +++ b/src/config/modelProviders/openrouter.ts @@ -1,5 +1,6 @@ import { ModelProviderCard } from '@/types/llm'; +// ref https://openrouter.ai/docs#models const OpenRouter: ModelProviderCard = { chatModels: [ { @@ -99,13 +100,21 @@ const OpenRouter: ModelProviderCard = { vision: false, }, { - displayName: 'Mistral: Mixtral 8x22B (base) (free)', + displayName: 'Mistral: Mixtral 8x22B (base)', enabled: true, functionCall: false, id: 'mistralai/mixtral-8x22b', tokens: 64_000, vision: false, }, + { + displayName: 'Microsoft: WizardLM-2 8x22B', + enabled: true, + functionCall: false, + id: 'microsoft/wizardlm-2-8x22b', + tokens: 65_536, + vision: false, + }, ], id: 'openrouter', }; diff --git a/src/config/modelProviders/perplexity.ts b/src/config/modelProviders/perplexity.ts index c767638a3832..3db6e8bc36c1 100644 --- a/src/config/modelProviders/perplexity.ts +++ b/src/config/modelProviders/perplexity.ts @@ -1,43 +1,44 @@ import { ModelProviderCard } from '@/types/llm'; +// ref https://docs.perplexity.ai/docs/model-cards const Perplexity: ModelProviderCard = { chatModels: [ { displayName: 'Perplexity 7B Chat', - id: 'pplx-7b-chat', - tokens: 8192, + id: 'sonar-small-chat', + tokens: 16_384, }, { - displayName: 'Perplexity 70B Chat', + displayName: 'Perplexity 8x7B Chat', enabled: true, - id: 'pplx-70b-chat', - tokens: 8192, + id: 'sonar-medium-chat', + tokens: 16_384, }, { displayName: 'Perplexity 7B Online', - id: 'pplx-7b-online', - tokens: 8192, + id: 'sonar-small-online', + tokens: 12_000, }, { - displayName: 'Perplexity 70B Online', + displayName: 'Perplexity 8x7B Online', enabled: true, - id: 'pplx-70b-online', - tokens: 8192, + id: 'sonar-medium-online', + tokens: 12_000, }, { - displayName: 'Codellama 34B Instruct', - id: 'codellama-34b-instruct', + displayName: 'Codellama 70B Instruct', + id: 'codellama-70b-instruct', tokens: 16_384, }, { - displayName: 'Codellama 70B Instruct', - id: 'codellama-70b-instruct', + displayName: 'Mistral 7B Instruct', + id: 'mistral-7b-instruc', tokens: 16_384, }, { displayName: 'Mixtral 8x7B Instruct', id: 'mixtral-8x7b-instruct', - tokens: 8192, + tokens: 16_384, }, ], id: 'perplexity', diff --git a/src/config/modelProviders/togetherai.ts b/src/config/modelProviders/togetherai.ts index 4fd5de512cf8..ba32eb1cfafd 100644 --- a/src/config/modelProviders/togetherai.ts +++ b/src/config/modelProviders/togetherai.ts @@ -1,5 +1,6 @@ import { ModelProviderCard } from '@/types/llm'; +// ref https://api.together.xyz/models const TogetherAI: ModelProviderCard = { chatModels: [ { diff --git a/src/store/global/slices/settings/selectors/modelProvider.test.ts b/src/store/global/slices/settings/selectors/modelProvider.test.ts index ce1074b59bcc..acbb24d1701d 100644 --- a/src/store/global/slices/settings/selectors/modelProvider.test.ts +++ b/src/store/global/slices/settings/selectors/modelProvider.test.ts @@ -85,14 +85,14 @@ describe('modelProviderSelectors', () => { }); describe('modelEnabledFiles', () => { - it('should return false if the model does not have file ability', () => { + it.skip('should return false if the model does not have file ability', () => { const enabledFiles = modelProviderSelectors.isModelEnabledFiles('gpt-4-vision-preview')( useGlobalStore.getState(), ); expect(enabledFiles).toBeFalsy(); }); - it('should return true if the model has file ability', () => { + it.skip('should return true if the model has file ability', () => { const enabledFiles = modelProviderSelectors.isModelEnabledFiles('gpt-4-all')( useGlobalStore.getState(), );