Skip to content

Commit

Permalink
feat: make maxResponseTokenLength configurable (#170, #171)
Browse files Browse the repository at this point in the history
  • Loading branch information
josStorer committed Apr 10, 2023
1 parent a19e029 commit 56349a7
Show file tree
Hide file tree
Showing 9 changed files with 35 additions and 17 deletions.
3 changes: 2 additions & 1 deletion src/_locales/en/main.json
Original file line number Diff line number Diff line change
Expand Up @@ -98,5 +98,6 @@
"Open Conversation Page": "Open Conversation Page",
"Open Conversation Window": "Open Conversation Window",
"Store to Independent Conversation Page": "Store to Independent Conversation Page",
"Keep Conversation Window in Background": "Keep conversation window in background, so that you can use shortcut keys to call it up in any program"
"Keep Conversation Window in Background": "Keep conversation window in background, so that you can use shortcut keys to call it up in any program",
"Max Response Token Length": "Max Response Token Length"
}
3 changes: 2 additions & 1 deletion src/_locales/zh-hans/main.json
Original file line number Diff line number Diff line change
Expand Up @@ -98,5 +98,6 @@
"Open Conversation Page": "打开独立对话页",
"Open Conversation Window": "打开独立对话窗口",
"Store to Independent Conversation Page": "收纳到独立对话页",
"Keep Conversation Window in Background": "保持对话窗口在后台, 以便在任何程序中使用快捷键呼出"
"Keep Conversation Window in Background": "保持对话窗口在后台, 以便在任何程序中使用快捷键呼出",
"Max Response Token Length": "响应的最大token长度"
}
3 changes: 2 additions & 1 deletion src/_locales/zh-hant/main.json
Original file line number Diff line number Diff line change
Expand Up @@ -98,5 +98,6 @@
"Open Conversation Page": "開啟獨立對話頁",
"Open Conversation Window": "開啟獨立對話視窗",
"Store to Independent Conversation Page": "收納到獨立對話頁",
"Keep Conversation Window in Background": "保持對話窗口在後臺, 以便在任何程序中使用快捷鍵呼出"
"Keep Conversation Window in Background": "保持對話窗口在後臺, 以便在任何程序中使用快捷鍵呼出",
"Max Response Token Length": "響應的最大token長度"
}
4 changes: 2 additions & 2 deletions src/background/apis/azure-openai-api.mjs
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
import { getUserConfig, maxResponseTokenLength } from '../../config/index.mjs'
import { getUserConfig } from '../../config/index.mjs'
import { getChatSystemPromptBase, pushRecord, setAbortController } from './shared.mjs'
import { getConversationPairs } from '../../utils/get-conversation-pairs'
import { fetchSSE } from '../../utils/fetch-sse'
Expand Down Expand Up @@ -32,7 +32,7 @@ export async function generateAnswersWithAzureOpenaiApi(port, question, session)
body: JSON.stringify({
messages: prompt,
stream: true,
max_tokens: maxResponseTokenLength,
max_tokens: config.maxResponseTokenLength,
}),
onMessage(message) {
console.debug('sse message', message)
Expand Down
7 changes: 4 additions & 3 deletions src/background/apis/custom-api.mjs
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
// and it has not yet had a negative impact on maintenance.
// If necessary, I will refactor.

import { getUserConfig, maxResponseTokenLength } from '../../config/index.mjs'
import { getUserConfig } from '../../config/index.mjs'
import { fetchSSE } from '../../utils/fetch-sse'
import { getConversationPairs } from '../../utils/get-conversation-pairs'
import { isEmpty } from 'lodash-es'
Expand All @@ -24,7 +24,8 @@ export async function generateAnswersWithCustomApi(port, question, session, apiK
const prompt = getConversationPairs(session.conversationRecords, false)
prompt.unshift({ role: 'system', content: await getCustomApiPromptBase() })
prompt.push({ role: 'user', content: question })
const apiUrl = (await getUserConfig()).customModelApiUrl
const config = await getUserConfig()
const apiUrl = config.customModelApiUrl

let answer = ''
await fetchSSE(apiUrl, {
Expand All @@ -38,7 +39,7 @@ export async function generateAnswersWithCustomApi(port, question, session, apiK
messages: prompt,
model: modelName,
stream: true,
max_tokens: maxResponseTokenLength,
max_tokens: config.maxResponseTokenLength,
}),
onMessage(message) {
console.debug('sse message', message)
Expand Down
12 changes: 7 additions & 5 deletions src/background/apis/openai-api.mjs
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
// api version

import { maxResponseTokenLength, Models, getUserConfig } from '../../config/index.mjs'
import { Models, getUserConfig } from '../../config/index.mjs'
import { fetchSSE } from '../../utils/fetch-sse'
import { getConversationPairs } from '../../utils/get-conversation-pairs'
import { isEmpty } from 'lodash-es'
Expand Down Expand Up @@ -31,7 +31,8 @@ export async function generateAnswersWithGptCompletionApi(
(await getCompletionPromptBase()) +
getConversationPairs(session.conversationRecords, true) +
`Human: ${question}\nAI: `
const apiUrl = (await getUserConfig()).customOpenAiApiUrl
const config = await getUserConfig()
const apiUrl = config.customOpenAiApiUrl

let answer = ''
await fetchSSE(`${apiUrl}/v1/completions`, {
Expand All @@ -45,7 +46,7 @@ export async function generateAnswersWithGptCompletionApi(
prompt: prompt,
model: Models[modelName].value,
stream: true,
max_tokens: maxResponseTokenLength,
max_tokens: config.maxResponseTokenLength,
}),
onMessage(message) {
console.debug('sse message', message)
Expand Down Expand Up @@ -94,7 +95,8 @@ export async function generateAnswersWithChatgptApi(port, question, session, api
const prompt = getConversationPairs(session.conversationRecords, false)
prompt.unshift({ role: 'system', content: await getChatSystemPromptBase() })
prompt.push({ role: 'user', content: question })
const apiUrl = (await getUserConfig()).customOpenAiApiUrl
const config = await getUserConfig()
const apiUrl = config.customOpenAiApiUrl

let answer = ''
await fetchSSE(`${apiUrl}/v1/chat/completions`, {
Expand All @@ -108,7 +110,7 @@ export async function generateAnswersWithChatgptApi(port, question, session, api
messages: prompt,
model: Models[modelName].value,
stream: true,
max_tokens: maxResponseTokenLength,
max_tokens: config.maxResponseTokenLength,
}),
onMessage(message) {
console.debug('sse message', message)
Expand Down
3 changes: 1 addition & 2 deletions src/config/index.mjs
Original file line number Diff line number Diff line change
Expand Up @@ -48,8 +48,6 @@ export const ModelMode = {
fast: 'Fast',
}

export const maxResponseTokenLength = 1000

/**
* @typedef {typeof defaultConfig} UserConfig
*/
Expand Down Expand Up @@ -82,6 +80,7 @@ export const defaultConfig = {

// advanced

maxResponseTokenLength: 1000,
customChatGptWebApiUrl: 'https://chat.openai.com',
customChatGptWebApiPath: '/backend-api/conversation',
customOpenAiApiUrl: 'https://api.openai.com',
Expand Down
14 changes: 14 additions & 0 deletions src/popup/Popup.jsx
Original file line number Diff line number Diff line change
Expand Up @@ -367,6 +367,20 @@ function AdvancedPart({ config, updateConfig }) {

return (
<>
<label>
{t('Max Response Token Length')}
<input
type="number"
min="100"
max="40000"
step="100"
value={config.maxResponseTokenLength}
onChange={(e) => {
const value = parseInt(e.target.value)
updateConfig({ maxResponseTokenLength: value })
}}
/>
</label>
<label>
{t('Custom ChatGPT Web API Url')}
<input
Expand Down
3 changes: 1 addition & 2 deletions src/utils/crop-text.mjs
Original file line number Diff line number Diff line change
Expand Up @@ -20,13 +20,12 @@
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.

import { maxResponseTokenLength } from '../config/index.mjs'
import { encode } from '@nem035/gpt-3-encoder'

// TODO add model support
export function cropText(
text,
maxLength = 3900 - maxResponseTokenLength,
maxLength = 3900 - 1000,
startLength = 400,
endLength = 300,
tiktoken = true,
Expand Down

0 comments on commit 56349a7

Please sign in to comment.