Skip to content

Commit

Permalink
💄 style: fix model info (#3696)
Browse files Browse the repository at this point in the history
* Update stepfun.ts

* Update qwen.ts

* Update qwen.ts

* Update stepfun.ts
  • Loading branch information
sxjeru authored Sep 1, 2024
1 parent 685bd74 commit 4d98037
Show file tree
Hide file tree
Showing 2 changed files with 24 additions and 18 deletions.
23 changes: 12 additions & 11 deletions src/config/modelProviders/qwen.ts
Original file line number Diff line number Diff line change
Expand Up @@ -16,15 +16,15 @@ const Qwen: ModelProviderCard = {
enabled: true,
functionCall: true,
id: 'qwen-turbo',
tokens: 8192,
tokens: 8000, // https://www.alibabacloud.com/help/zh/model-studio/developer-reference/use-qwen-by-calling-api
},
{
description: '通义千问超大规模语言模型增强版,支持中文、英文等不同语言输入',
displayName: 'Qwen Plus',
enabled: true,
functionCall: true,
id: 'qwen-plus',
tokens: 130_000,
tokens: 131_072, // https://help.aliyun.com/zh/dashscope/developer-reference/model-introduction
},
{
description:
Expand All @@ -33,15 +33,15 @@ const Qwen: ModelProviderCard = {
enabled: true,
functionCall: true,
id: 'qwen-max',
tokens: 8192,
tokens: 8000,
},
{
description:
'通义千问千亿级别超大规模语言模型,支持中文、英文等不同语言输入,扩展了上下文窗口',
displayName: 'Qwen Max LongContext',
functionCall: true,
id: 'qwen-max-longcontext',
tokens: 30_720,
tokens: 30_000,
},
{
description:
Expand Down Expand Up @@ -70,49 +70,50 @@ const Qwen: ModelProviderCard = {
tokens: 32_768,
vision: true,
},
// ref https://help.aliyun.com/zh/dashscope/developer-reference/tongyi-qianwen-7b-14b-72b-api-detailes
{
description: '通义千问2对外开源的7B规模的模型',
displayName: 'Qwen2 7B',
id: 'qwen2-7b-instruct',
tokens: 128_000,
tokens: 131_072, // https://huggingface.co/Qwen/Qwen2-7B-Instruct
},
{
description: '通义千问2对外开源的57B规模14B激活参数的MOE模型',
displayName: 'Qwen2 57B-A14B MoE',
id: 'qwen2-57b-a14b-instruct',
tokens: 32_768,
tokens: 65_536, // https://huggingface.co/Qwen/Qwen2-57B-A14B-Instruct
},
{
description: '通义千问2对外开源的72B规模的模型',
displayName: 'Qwen2 72B',
id: 'qwen2-72b-instruct',
tokens: 128_000,
tokens: 131_072, // https://huggingface.co/Qwen/Qwen2-72B-Instruct
},
{
description: 'Qwen2-Math 模型具有强大的数学解题能力',
displayName: 'Qwen2 Math 72B',
id: 'qwen2-math-72b-instruct',
tokens: 128_000,
tokens: 4096, // https://help.aliyun.com/zh/dashscope/developer-reference/use-qwen2-math-by-calling-api
},
{
description:
'以 Qwen-7B 语言模型初始化,添加图像模型,图像输入分辨率为448的预训练模型。',
displayName: 'Qwen VL',
id: 'qwen-vl-v1',
tokens: 8192,
tokens: 8192, // https://huggingface.co/Qwen/Qwen-VL/blob/main/config.json
vision: true,
},
{
description:
'通义千问VL支持灵活的交互方式,包括多图、多轮问答、创作等能力的模型。',
displayName: 'Qwen VL Chat',
id: 'qwen-vl-chat-v1',
tokens: 8192,
tokens: 8192, // https://huggingface.co/Qwen/Qwen-VL-Chat/blob/main/config.json
vision: true,
},
],
checkModel: 'qwen-turbo',
disableBrowserRequest: true,
disableBrowserRequest: true, // CORS issue
id: 'qwen',
modelList: { showModelFetcher: true },
name: 'Qwen',
Expand Down
19 changes: 12 additions & 7 deletions src/config/modelProviders/stepfun.ts
Original file line number Diff line number Diff line change
@@ -1,11 +1,12 @@
import { ModelProviderCard } from '@/types/llm';

// ref https://platform.stepfun.com/docs/llm/text
// 根据文档,阶级星辰大模型的上下文长度,其 k 的含义为 1000
const Stepfun: ModelProviderCard = {
chatModels: [
{
id: 'step-2-16k-nightly',
tokens: 16_384,
tokens: 16_000,
},
{
id: 'step-1-256k',
Expand All @@ -19,27 +20,31 @@ const Stepfun: ModelProviderCard = {
{
enabled: true,
id: 'step-1-32k',
tokens: 32_768,
tokens: 32_000,
},
{
enabled: true,
id: 'step-1-8k',
tokens: 8192,
tokens: 8000,
},
{
enabled: true,
id: 'step-1-flash',
tokens: 8000,
},
{
enabled: true,
id: 'step-1v-32k',
tokens: 32_768,
tokens: 32_000,
vision: true,
},
{
enabled: true,
id: 'step-1v-8k',
tokens: 8192,
tokens: 8000,
vision: true,
},
],
checkModel: 'step-1-8k',
checkModel: 'step-1-flash',
// after test, currently https://api.stepfun.com/v1/chat/completions has the CORS issue
// So we should close the browser request mode
disableBrowserRequest: true,
Expand Down

0 comments on commit 4d98037

Please sign in to comment.