Skip to content

Commit

Permalink
feat: if specify no models in generate still try
Browse files Browse the repository at this point in the history
  • Loading branch information
benfdking committed Sep 11, 2024
1 parent 24a6227 commit c59d95e
Show file tree
Hide file tree
Showing 19 changed files with 2,293 additions and 1,306 deletions.

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

19 changes: 3 additions & 16 deletions js/packages/quary-extension/src/web/chatEdit.ts
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import * as vscode from 'vscode'
import { isErr, Ok, Result } from '@shared/result'
import { isErr, Result } from '@shared/result'
import { getServices, preInitSetup } from './services'
import { chatReturnFinalChat } from './chatHelpers'

export const chatEdit = async (
extensionContext: vscode.ExtensionContext,
Expand Down Expand Up @@ -43,19 +44,5 @@ export const chatEdit = async (
modelName,
prompt: request.prompt,
})
if (isErr(prompt)) {
return prompt
}

const craftedPrompt = [
vscode.LanguageModelChatMessage.Assistant(prompt.value.agentPrompt),
vscode.LanguageModelChatMessage.User(prompt.value.userPrompt),
]
const modelRequest = await model.sendRequest(craftedPrompt, {}, token)

for await (const fragment of modelRequest.text) {
stream.push(new vscode.ChatResponseMarkdownPart(fragment))
}

return Ok(undefined)
return chatReturnFinalChat(prompt, model, token, stream)
}
16 changes: 3 additions & 13 deletions js/packages/quary-extension/src/web/chatExplain.ts
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import * as vscode from 'vscode'
import { isErr, Ok, Result } from '@shared/result'
import { isErr, Result } from '@shared/result'
import { getServices, preInitSetup } from './services'
import { chatReturnFinalChat } from './chatHelpers'

export const chatExplain = async (
extensionContext: vscode.ExtensionContext,
Expand Down Expand Up @@ -41,16 +42,5 @@ export const chatExplain = async (
modelName,
userPrompt: request.prompt,
})
if (isErr(prompt)) {
return prompt
}
const craftedPrompt = [
vscode.LanguageModelChatMessage.Assistant(prompt.value.agentPrompt),
vscode.LanguageModelChatMessage.User(prompt.value.userPrompt),
]
const modelRequest = await model.sendRequest(craftedPrompt, {}, token)
for await (const fragment of modelRequest.text) {
stream.push(new vscode.ChatResponseMarkdownPart(fragment))
}
return Ok(undefined)
return chatReturnFinalChat(prompt, model, token, stream)
}
Loading

0 comments on commit c59d95e

Please sign in to comment.