Skip to content

Commit

Permalink
💄 style: remove custom models from providers, and update Perplexity m…
Browse files Browse the repository at this point in the history
…odel names (lobehub#2069)

* 💄 models: Mixtral 8x22b is no longer free, and added WizardLM 2 8x22b by default.

* 💄 style: Remove reversed or custom models from official model list.

* 💄 style: Perplexity model name updates

* 💄 style: add model refs doc for update check

* 💄 chore: remove gpt-4-all feature flag test

* 💄 chore: remove gpt-4-all feature flag modelEnabledFiles test
  • Loading branch information
MapleEve authored Apr 17, 2024
1 parent 479f562 commit e04754d
Show file tree
Hide file tree
Showing 12 changed files with 33 additions and 81 deletions.
1 change: 1 addition & 0 deletions src/config/modelProviders/anthropic.ts
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import { ModelProviderCard } from '@/types/llm';

// ref https://docs.anthropic.com/claude/docs/models-overview
const Anthropic: ModelProviderCard = {
chatModels: [
{
Expand Down
1 change: 1 addition & 0 deletions src/config/modelProviders/bedrock.ts
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import { ModelProviderCard } from '@/types/llm';

// ref https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html
const Bedrock: ModelProviderCard = {
chatModels: [
{
Expand Down
1 change: 1 addition & 0 deletions src/config/modelProviders/google.ts
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import { ModelProviderCard } from '@/types/llm';

// ref https://ai.google.dev/models/gemini
const Google: ModelProviderCard = {
chatModels: [
{
Expand Down
1 change: 1 addition & 0 deletions src/config/modelProviders/groq.ts
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import { ModelProviderCard } from '@/types/llm';

// ref https://console.groq.com/docs/models
const Groq: ModelProviderCard = {
chatModels: [
{
Expand Down
5 changes: 0 additions & 5 deletions src/config/modelProviders/mistral.ts
Original file line number Diff line number Diff line change
Expand Up @@ -33,11 +33,6 @@ const Mistral: ModelProviderCard = {
id: 'mistral-large-latest',
tokens: 32_768,
},
{
displayName: 'Mixtral 8x22B',
id: 'mixtral-8x22b',
tokens: 32_768,
},
],
id: 'mistral',
};
Expand Down
7 changes: 0 additions & 7 deletions src/config/modelProviders/moonshot.ts
Original file line number Diff line number Diff line change
Expand Up @@ -20,13 +20,6 @@ const Moonshot: ModelProviderCard = {
id: 'moonshot-v1-128k',
tokens: 128_000,
},
{
displayName: 'Moonshot Kimi Reverse',
files: true,
id: 'moonshot-v1',
tokens: 200_000,
vision: true,
},
],
id: 'moonshot',
};
Expand Down
43 changes: 0 additions & 43 deletions src/config/modelProviders/ollama.ts
Original file line number Diff line number Diff line change
Expand Up @@ -148,49 +148,6 @@ const Ollama: ModelProviderCard = {
tokens: 4000,
vision: true,
},
// TODO: 在单独支持千问之后这些 Qwen 模型需要移动到千问的配置中
{
displayName: 'Qwen Plus',
functionCall: true,
id: 'qwen-plus',
tokens: 30_000,
vision: false,
},
{
displayName: 'Qwen Turbo',
functionCall: true,
id: 'qwen-turbo',
tokens: 6000,
vision: false,
},
{
displayName: 'Qwen Max',
functionCall: true,
id: 'qwen-max',
tokens: 6000,
vision: false,
},
{
displayName: 'Qwen Max Long',
functionCall: true,
id: 'qwen-max-longcontext',
tokens: 28_000,
vision: false,
},
{
displayName: 'Qwen VL Max',
functionCall: false,
id: 'qwen-vl-max',
tokens: 6000,
vision: true,
},
{
displayName: 'Qwen VL Plus',
functionCall: false,
id: 'qwen-vl-plus',
tokens: 30_000,
vision: true,
},
],
id: 'ollama',
};
Expand Down
8 changes: 0 additions & 8 deletions src/config/modelProviders/openai.ts
Original file line number Diff line number Diff line change
Expand Up @@ -117,14 +117,6 @@ const OpenAI: ModelProviderCard = {
tokens: 128_000,
vision: true,
},
{
displayName: 'GPT-4 ALL',
files: true,
functionCall: true,
id: 'gpt-4-all',
tokens: 32_768,
vision: true,
},
],
enabled: true,
id: 'openai',
Expand Down
11 changes: 10 additions & 1 deletion src/config/modelProviders/openrouter.ts
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import { ModelProviderCard } from '@/types/llm';

// ref https://openrouter.ai/docs#models
const OpenRouter: ModelProviderCard = {
chatModels: [
{
Expand Down Expand Up @@ -99,13 +100,21 @@ const OpenRouter: ModelProviderCard = {
vision: false,
},
{
displayName: 'Mistral: Mixtral 8x22B (base) (free)',
displayName: 'Mistral: Mixtral 8x22B (base)',
enabled: true,
functionCall: false,
id: 'mistralai/mixtral-8x22b',
tokens: 64_000,
vision: false,
},
{
displayName: 'Microsoft: WizardLM-2 8x22B',
enabled: true,
functionCall: false,
id: 'microsoft/wizardlm-2-8x22b',
tokens: 65_536,
vision: false,
},
],
id: 'openrouter',
};
Expand Down
31 changes: 16 additions & 15 deletions src/config/modelProviders/perplexity.ts
Original file line number Diff line number Diff line change
@@ -1,43 +1,44 @@
import { ModelProviderCard } from '@/types/llm';

// ref https://docs.perplexity.ai/docs/model-cards
const Perplexity: ModelProviderCard = {
chatModels: [
{
displayName: 'Perplexity 7B Chat',
id: 'pplx-7b-chat',
tokens: 8192,
id: 'sonar-small-chat',
tokens: 16_384,
},
{
displayName: 'Perplexity 70B Chat',
displayName: 'Perplexity 8x7B Chat',
enabled: true,
id: 'pplx-70b-chat',
tokens: 8192,
id: 'sonar-medium-chat',
tokens: 16_384,
},
{
displayName: 'Perplexity 7B Online',
id: 'pplx-7b-online',
tokens: 8192,
id: 'sonar-small-online',
tokens: 12_000,
},
{
displayName: 'Perplexity 70B Online',
displayName: 'Perplexity 8x7B Online',
enabled: true,
id: 'pplx-70b-online',
tokens: 8192,
id: 'sonar-medium-online',
tokens: 12_000,
},
{
displayName: 'Codellama 34B Instruct',
id: 'codellama-34b-instruct',
displayName: 'Codellama 70B Instruct',
id: 'codellama-70b-instruct',
tokens: 16_384,
},
{
displayName: 'Codellama 70B Instruct',
id: 'codellama-70b-instruct',
displayName: 'Mistral 7B Instruct',
id: 'mistral-7b-instruc',
tokens: 16_384,
},
{
displayName: 'Mixtral 8x7B Instruct',
id: 'mixtral-8x7b-instruct',
tokens: 8192,
tokens: 16_384,
},
],
id: 'perplexity',
Expand Down
1 change: 1 addition & 0 deletions src/config/modelProviders/togetherai.ts
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import { ModelProviderCard } from '@/types/llm';

// ref https://api.together.xyz/models
const TogetherAI: ModelProviderCard = {
chatModels: [
{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -85,14 +85,14 @@ describe('modelProviderSelectors', () => {
});

describe('modelEnabledFiles', () => {
it('should return false if the model does not have file ability', () => {
it.skip('should return false if the model does not have file ability', () => {
const enabledFiles = modelProviderSelectors.isModelEnabledFiles('gpt-4-vision-preview')(
useGlobalStore.getState(),
);
expect(enabledFiles).toBeFalsy();
});

it('should return true if the model has file ability', () => {
it.skip('should return true if the model has file ability', () => {
const enabledFiles = modelProviderSelectors.isModelEnabledFiles('gpt-4-all')(
useGlobalStore.getState(),
);
Expand Down

0 comments on commit e04754d

Please sign in to comment.