Skip to content

Commit d80ae53

Browse files
authored
github copilot chat as default provider (#1491)
* add settings * ✨ Clarify LLM provider description in settings Updated the description to mention all VS Code-supported models. * cleanup * use option to specif provider * change default * fix config name * updated docs * fix condition
1 parent 7036f2f commit d80ae53

File tree

11 files changed

+40
-32
lines changed

11 files changed

+40
-32
lines changed

.github/workflows/genai-linters.yml

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,6 @@ env:
2020
GENAISCRIPT_MODEL_LINTER: ${{ vars.GENAISCRIPT_MODEL_LINTER }}
2121
jobs:
2222
lint:
23-
if: ${{ github.event.issue.pull_request }}
2423
runs-on: ubuntu-latest
2524
permissions:
2625
pull-requests: write

.vscode/launch.json

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,6 @@
2929
"${workspaceFolder}/demo"
3030
],
3131
"outFiles": ["${workspaceFolder}/packages/vscode/built/**"],
32-
"preLaunchTask": "npm: compile",
3332
"resolveSourceMapLocations": [
3433
"${workspaceFolder}/**",
3534
"!**/node_modules/**"

.vscode/settings.json

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,7 @@
2727
"cctx",
2828
"changeext",
2929
"chatencoder",
30+
"chatrender",
3031
"chattypes",
3132
"Chunker",
3233
"chunkers",
@@ -243,6 +244,7 @@
243244
"wasms",
244245
"WEBVTT",
245246
"whisperasr",
247+
"wsclient",
246248
"xpai",
247249
"Yohan"
248250
],

demo/.vscode/settings.json

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,6 @@
11
{
22
"cSpell.enabled": false,
3-
"genaiscript.cli.path": "../packages/cli/built/genaiscript.cjs"
3+
"genaiscript.cli.path": "../packages/cli/built/genaiscript.cjs",
4+
"genaiscript.languageChatModels.preferred": true,
5+
"genaiscript.languageChatModelsProvider": true
46
}

docs/src/content/docs/getting-started/configuration.mdx

Lines changed: 22 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -1,23 +1,23 @@
11
---
22
title: Configuration
33
description: Set up your LLM connection and authorization with environment
4-
variables for seamless integration.
4+
variables for seamless integration.
55
keywords: LLM setup, API configuration, environment variables, secure
6-
authorization, LLM integration
6+
authorization, LLM integration
77
sidebar:
8-
order: 2
8+
order: 2
99
hero:
10-
image:
11-
alt: A small, flat, 8-bit style illustration features simple colored blocks
12-
linked by straight lines to depict API and cloud connections. Each block
13-
displays a basic symbol, including representations of artificial
14-
intelligence, a cloud, code, an environment file, and a lock for security.
15-
Iconic, abstract shapes indicate major tech providers like OpenAI, Azure,
16-
Google, Hugging Face, GitHub, and Anthropic. The arrangement is clear and
17-
geometric, with each block in a unique color from a five-color palette,
18-
all on a transparent background with no text, people, or visual effects.
19-
file: ./configuration.png
20-
10+
image:
11+
alt:
12+
A small, flat, 8-bit style illustration features simple colored blocks
13+
linked by straight lines to depict API and cloud connections. Each block
14+
displays a basic symbol, including representations of artificial
15+
intelligence, a cloud, code, an environment file, and a lock for security.
16+
Iconic, abstract shapes indicate major tech providers like OpenAI, Azure,
17+
Google, Hugging Face, GitHub, and Anthropic. The arrangement is clear and
18+
geometric, with each block in a unique color from a five-color palette,
19+
all on a transparent background with no text, people, or visual effects.
20+
file: ./configuration.png
2121
---
2222

2323
import { FileTree } from "@astrojs/starlight/components"
@@ -334,7 +334,7 @@ Ensure that the `models` permission is enabled in your workflow configuration.
334334
335335
```yaml title="genai.yml" "models: read"
336336
permissions:
337-
models: read
337+
models: read
338338
```
339339
340340
</li>
@@ -344,9 +344,9 @@ permissions:
344344
Pass the `GITHUB_TOKEN` when running `genaiscript`
345345
346346
```yaml title="genai.yml" "GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}"
347-
run: npx -y genaiscript run ...
348-
env:
349-
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
347+
run: npx -y genaiscript run ...
348+
env:
349+
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
350350
```
351351
352352
</li>
@@ -1178,7 +1178,10 @@ This mode is useful to run your scripts without having a separate LLM provider o
11781178
and have additional limitations and rate limiting defined by the GitHub Copilot platform.
11791179

11801180
There is no configuration needed as long as you have GitHub Copilot installed and configured in Visual Studio Code.
1181-
You can force using this model by using `github_copilot_chat:*` as a model name.
1181+
1182+
You can force using this model by using `github_copilot_chat:*` as a model name
1183+
or set the **GenAIScript > Language Chat Models Provider** setting to true.
1184+
This will default GenAIScript to use this provider for model aliases.
11821185

11831186
<YouTube id="LRrVMiZgWJg" posterQuality="high" />
11841187

docs/src/content/docs/reference/scripts/system.mdx

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -4824,17 +4824,16 @@ export default function (ctx: ChatGenerationContext) {
48244824
48254825
### `system.z3`
48264826
4827-
Zero-shot Chain Of Thought
4827+
Z3
48284828
4829-
Zero-shot Chain Of Thought technique. More at https://learnprompting.org/docs/intermediate/zero_shot_cot.
4829+
Solve constraints system using the Z3 constraint solver.
48304830
48314831
- tool `z3`: Solves a SMTLIB2 problem using the Z3 constraint solver. Send problems one at a time. Use this tool if you need to run Z3.
48324832
48334833
`````js wrap title="system.z3"
48344834
system({
4835-
title: "Zero-shot Chain Of Thought",
4836-
description:
4837-
"Zero-shot Chain Of Thought technique. More at https://learnprompting.org/docs/intermediate/zero_shot_cot.",
4835+
title: "Z3",
4836+
description: "Solve constraints system using the Z3 constraint solver.",
48384837
})
48394838
const dbg = host.logger("system:z3")
48404839

packages/cli/src/nodehost.ts

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,6 @@ import {
2323
MODEL_PROVIDER_AZURE_SERVERLESS_OPENAI,
2424
AZURE_MANAGEMENT_TOKEN_SCOPES,
2525
MODEL_PROVIDER_AZURE_AI_INFERENCE,
26-
MODEL_PROVIDERS,
2726
NEGATIVE_GLOB_REGEX,
2827
} from "../../core/src/constants"
2928
import {

packages/core/src/chat.ts

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,6 @@ import {
3232
MAX_TOOL_CALLS,
3333
MAX_TOOL_CONTENT_TOKENS,
3434
MAX_TOOL_DESCRIPTION_LENGTH,
35-
MODEL_PROVIDERS,
3635
SYSTEM_FENCE,
3736
} from "./constants"
3837
import { parseAnnotations } from "./annotations"

packages/core/src/modelalias.ts

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,7 @@
11
import debug from "debug"
22
const dbg = debug("genaiscript:modelalias")
3-
import { MODEL_PROVIDERS } from "../../core/src/constants"
43
import { parseKeyValuePair } from "../../core/src/fence"
54
import { runtimeHost } from "../../core/src/host"
6-
import { logVerbose } from "../../core/src/util"
75
import { PromptScriptRunOptions } from "./server/messages"
86
import { providerFeatures } from "./features"
97

packages/vscode/package.json

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -336,6 +336,10 @@
336336
"type": "object",
337337
"description": "Mapping from GenAIScript model (openai:gpt-4) to Visual Studio Code Language Chat Model (github...)"
338338
},
339+
"genaiscript.languageChatModelsProvider": {
340+
"type": "boolean",
341+
"description": "Use GitHub Copilot Chat Models (or other models provided in Visual Studio Code) as the preferred LLM provider when a model is not specified."
342+
},
339343
"genaiscript.diagnostics": {
340344
"type": "boolean",
341345
"default": false,

0 commit comments

Comments
 (0)