diff --git a/.github/workflows/smoke-gemini.lock.yml b/.github/workflows/smoke-gemini.lock.yml
new file mode 100644
index 0000000000..fde66ab74c
--- /dev/null
+++ b/.github/workflows/smoke-gemini.lock.yml
@@ -0,0 +1,1362 @@
+#
+# ___ _ _
+# / _ \ | | (_)
+# | |_| | __ _ ___ _ __ | |_ _ ___
+# | _ |/ _` |/ _ \ '_ \| __| |/ __|
+# | | | | (_| | __/ | | | |_| | (__
+# \_| |_/\__, |\___|_| |_|\__|_|\___|
+# __/ |
+# _ _ |___/
+# | | | | / _| |
+# | | | | ___ _ __ _ __| |_| | _____ ____
+# | |/\| |/ _ \ '__| |/ /| _| |/ _ \ \ /\ / / ___|
+# \ /\ / (_) | | | | ( | | | | (_) \ V V /\__ \
+# \/ \/ \___/|_| |_|\_\|_| |_|\___/ \_/\_/ |___/
+#
+# This file was automatically generated by gh-aw. DO NOT EDIT.
+#
+# To update this file, edit the corresponding .md file and run:
+# gh aw compile
+# Not all edits will cause changes to this file.
+#
+# For more information: https://github.github.com/gh-aw/introduction/overview/
+#
+# Smoke test workflow that validates Gemini engine functionality twice daily
+#
+# Resolved workflow manifest:
+# Imports:
+# - shared/gh.md
+# - shared/reporting.md
+#
+# gh-aw-metadata: {"schema_version":"v1","frontmatter_hash":"1c7e9095a4960a70d4be26e98a3da1fb9a7a3526fc8e60d0614b182f9fbf0904"}
+
+name: "Smoke Gemini"
+"on":
+ pull_request:
+ # names: # Label filtering applied via job conditions
+ # - smoke # Label filtering applied via job conditions
+ types:
+ - labeled
+ schedule:
+ - cron: "22 */12 * * *"
+ # Friendly format: every 12h (scattered)
+ workflow_dispatch:
+
+permissions: {}
+
+concurrency:
+ group: "gh-aw-${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}"
+ cancel-in-progress: true
+
+run-name: "Smoke Gemini"
+
+jobs:
+ activation:
+ needs: pre_activation
+ if: >
+ (needs.pre_activation.outputs.activated == 'true') && (((github.event_name != 'pull_request') || (github.event.pull_request.head.repo.id == github.repository_id)) &&
+ ((github.event_name != 'pull_request') || ((github.event.action != 'labeled') || (github.event.label.name == 'smoke'))))
+ runs-on: ubuntu-slim
+ permissions:
+ contents: read
+ outputs:
+ body: ${{ steps.sanitized.outputs.body }}
+ comment_id: ""
+ comment_repo: ""
+ text: ${{ steps.sanitized.outputs.text }}
+ title: ${{ steps.sanitized.outputs.title }}
+ steps:
+ - name: Checkout actions folder
+ uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
+ with:
+ sparse-checkout: |
+ actions
+ persist-credentials: false
+ - name: Setup Scripts
+ uses: ./actions/setup
+ with:
+ destination: /opt/gh-aw/actions
+ - name: Validate context variables
+ uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8
+ with:
+ script: |
+ const { setupGlobals } = require('/opt/gh-aw/actions/setup_globals.cjs');
+ setupGlobals(core, github, context, exec, io);
+ const { main } = require('/opt/gh-aw/actions/validate_context_variables.cjs');
+ await main();
+ - name: Checkout .github and .agents folders
+ uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
+ with:
+ sparse-checkout: |
+ .github
+ .agents
+ fetch-depth: 1
+ persist-credentials: false
+ - name: Check workflow file timestamps
+ uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8
+ env:
+ GH_AW_WORKFLOW_FILE: "smoke-gemini.lock.yml"
+ with:
+ script: |
+ const { setupGlobals } = require('/opt/gh-aw/actions/setup_globals.cjs');
+ setupGlobals(core, github, context, exec, io);
+ const { main } = require('/opt/gh-aw/actions/check_workflow_timestamp_api.cjs');
+ await main();
+ - name: Compute current body text
+ id: sanitized
+ uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8
+ with:
+ script: |
+ const { setupGlobals } = require('/opt/gh-aw/actions/setup_globals.cjs');
+ setupGlobals(core, github, context, exec, io);
+ const { main } = require('/opt/gh-aw/actions/compute_text.cjs');
+ await main();
+ - name: Create prompt with built-in context
+ env:
+ GH_AW_PROMPT: /tmp/gh-aw/aw-prompts/prompt.txt
+ GH_AW_SAFE_OUTPUTS: ${{ env.GH_AW_SAFE_OUTPUTS }}
+ GH_AW_GITHUB_ACTOR: ${{ github.actor }}
+ GH_AW_GITHUB_EVENT_COMMENT_ID: ${{ github.event.comment.id }}
+ GH_AW_GITHUB_EVENT_DISCUSSION_NUMBER: ${{ github.event.discussion.number }}
+ GH_AW_GITHUB_EVENT_ISSUE_NUMBER: ${{ github.event.issue.number }}
+ GH_AW_GITHUB_EVENT_PULL_REQUEST_NUMBER: ${{ github.event.pull_request.number }}
+ GH_AW_GITHUB_REPOSITORY: ${{ github.repository }}
+ GH_AW_GITHUB_RUN_ID: ${{ github.run_id }}
+ GH_AW_GITHUB_WORKSPACE: ${{ github.workspace }}
+ run: |
+ bash /opt/gh-aw/actions/create_prompt_first.sh
+ cat << 'GH_AW_PROMPT_EOF' > "$GH_AW_PROMPT"
+
+ GH_AW_PROMPT_EOF
+ cat "/opt/gh-aw/prompts/xpia.md" >> "$GH_AW_PROMPT"
+ cat "/opt/gh-aw/prompts/temp_folder_prompt.md" >> "$GH_AW_PROMPT"
+ cat "/opt/gh-aw/prompts/markdown.md" >> "$GH_AW_PROMPT"
+ cat "/opt/gh-aw/prompts/cache_memory_prompt.md" >> "$GH_AW_PROMPT"
+ cat << 'GH_AW_PROMPT_EOF' >> "$GH_AW_PROMPT"
+
+ GitHub API Access Instructions
+
+ The gh CLI is NOT authenticated. Do NOT use gh commands for GitHub operations.
+
+
+ To create or modify GitHub resources (issues, discussions, pull requests, etc.), you MUST call the appropriate safe output tool. Simply writing content will NOT work - the workflow requires actual tool calls.
+
+ Temporary IDs: Some safe output tools support a temporary ID field (usually named temporary_id) so you can reference newly-created items elsewhere in the SAME agent output (for example, using #aw_abc1 in a later body).
+
+ **IMPORTANT - temporary_id format rules:**
+ - If you DON'T need to reference the item later, OMIT the temporary_id field entirely (it will be auto-generated if needed)
+ - If you DO need cross-references/chaining, you MUST match this EXACT validation regex: /^aw_[A-Za-z0-9]{3,8}$/i
+ - Format: aw_ prefix followed by 3 to 8 alphanumeric characters (A-Z, a-z, 0-9, case-insensitive)
+ - Valid alphanumeric characters: ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789
+ - INVALID examples: aw_ab (too short), aw_123456789 (too long), aw_test-id (contains hyphen), aw_id_123 (contains underscore)
+ - VALID examples: aw_abc, aw_abc1, aw_Test123, aw_A1B2C3D4, aw_12345678
+ - To generate valid IDs: use 3-8 random alphanumeric characters or omit the field to let the system auto-generate
+
+ Do NOT invent other aw_* formats — downstream steps will reject them with validation errors matching against /^aw_[A-Za-z0-9]{3,8}$/i.
+
+ Discover available tools from the safeoutputs MCP server.
+
+ **Critical**: Tool calls write structured data that downstream jobs process. Without tool calls, follow-up actions will be skipped.
+
+ **Note**: If you made no other safe output tool calls during this workflow execution, call the "noop" tool to provide a status message indicating completion or that no actions were needed.
+
+
+
+ The following GitHub context information is available for this workflow:
+ {{#if __GH_AW_GITHUB_ACTOR__ }}
+ - **actor**: __GH_AW_GITHUB_ACTOR__
+ {{/if}}
+ {{#if __GH_AW_GITHUB_REPOSITORY__ }}
+ - **repository**: __GH_AW_GITHUB_REPOSITORY__
+ {{/if}}
+ {{#if __GH_AW_GITHUB_WORKSPACE__ }}
+ - **workspace**: __GH_AW_GITHUB_WORKSPACE__
+ {{/if}}
+ {{#if __GH_AW_GITHUB_EVENT_ISSUE_NUMBER__ }}
+ - **issue-number**: #__GH_AW_GITHUB_EVENT_ISSUE_NUMBER__
+ {{/if}}
+ {{#if __GH_AW_GITHUB_EVENT_DISCUSSION_NUMBER__ }}
+ - **discussion-number**: #__GH_AW_GITHUB_EVENT_DISCUSSION_NUMBER__
+ {{/if}}
+ {{#if __GH_AW_GITHUB_EVENT_PULL_REQUEST_NUMBER__ }}
+ - **pull-request-number**: #__GH_AW_GITHUB_EVENT_PULL_REQUEST_NUMBER__
+ {{/if}}
+ {{#if __GH_AW_GITHUB_EVENT_COMMENT_ID__ }}
+ - **comment-id**: __GH_AW_GITHUB_EVENT_COMMENT_ID__
+ {{/if}}
+ {{#if __GH_AW_GITHUB_RUN_ID__ }}
+ - **workflow-run-id**: __GH_AW_GITHUB_RUN_ID__
+ {{/if}}
+
+
+ GH_AW_PROMPT_EOF
+ cat << 'GH_AW_PROMPT_EOF' >> "$GH_AW_PROMPT"
+
+ GH_AW_PROMPT_EOF
+ cat << 'GH_AW_PROMPT_EOF' >> "$GH_AW_PROMPT"
+ {{#runtime-import .github/workflows/shared/gh.md}}
+ GH_AW_PROMPT_EOF
+ cat << 'GH_AW_PROMPT_EOF' >> "$GH_AW_PROMPT"
+ {{#runtime-import .github/workflows/shared/reporting.md}}
+ GH_AW_PROMPT_EOF
+ cat << 'GH_AW_PROMPT_EOF' >> "$GH_AW_PROMPT"
+ {{#runtime-import .github/workflows/smoke-gemini.md}}
+ GH_AW_PROMPT_EOF
+ - name: Interpolate variables and render templates
+ uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8
+ env:
+ GH_AW_PROMPT: /tmp/gh-aw/aw-prompts/prompt.txt
+ GH_AW_GITHUB_REPOSITORY: ${{ github.repository }}
+ GH_AW_GITHUB_RUN_ID: ${{ github.run_id }}
+ with:
+ script: |
+ const { setupGlobals } = require('/opt/gh-aw/actions/setup_globals.cjs');
+ setupGlobals(core, github, context, exec, io);
+ const { main } = require('/opt/gh-aw/actions/interpolate_prompt.cjs');
+ await main();
+ - name: Substitute placeholders
+ uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8
+ env:
+ GH_AW_PROMPT: /tmp/gh-aw/aw-prompts/prompt.txt
+ GH_AW_ALLOWED_EXTENSIONS: ''
+ GH_AW_CACHE_DESCRIPTION: ''
+ GH_AW_CACHE_DIR: '/tmp/gh-aw/cache-memory/'
+ GH_AW_GITHUB_ACTOR: ${{ github.actor }}
+ GH_AW_GITHUB_EVENT_COMMENT_ID: ${{ github.event.comment.id }}
+ GH_AW_GITHUB_EVENT_DISCUSSION_NUMBER: ${{ github.event.discussion.number }}
+ GH_AW_GITHUB_EVENT_ISSUE_NUMBER: ${{ github.event.issue.number }}
+ GH_AW_GITHUB_EVENT_PULL_REQUEST_NUMBER: ${{ github.event.pull_request.number }}
+ GH_AW_GITHUB_REPOSITORY: ${{ github.repository }}
+ GH_AW_GITHUB_RUN_ID: ${{ github.run_id }}
+ GH_AW_GITHUB_WORKSPACE: ${{ github.workspace }}
+ GH_AW_NEEDS_PRE_ACTIVATION_OUTPUTS_ACTIVATED: ${{ needs.pre_activation.outputs.activated }}
+ GH_AW_NEEDS_PRE_ACTIVATION_OUTPUTS_MATCHED_COMMAND: ${{ needs.pre_activation.outputs.matched_command }}
+ with:
+ script: |
+ const { setupGlobals } = require('/opt/gh-aw/actions/setup_globals.cjs');
+ setupGlobals(core, github, context, exec, io);
+
+ const substitutePlaceholders = require('/opt/gh-aw/actions/substitute_placeholders.cjs');
+
+ // Call the substitution function
+ return await substitutePlaceholders({
+ file: process.env.GH_AW_PROMPT,
+ substitutions: {
+ GH_AW_ALLOWED_EXTENSIONS: process.env.GH_AW_ALLOWED_EXTENSIONS,
+ GH_AW_CACHE_DESCRIPTION: process.env.GH_AW_CACHE_DESCRIPTION,
+ GH_AW_CACHE_DIR: process.env.GH_AW_CACHE_DIR,
+ GH_AW_GITHUB_ACTOR: process.env.GH_AW_GITHUB_ACTOR,
+ GH_AW_GITHUB_EVENT_COMMENT_ID: process.env.GH_AW_GITHUB_EVENT_COMMENT_ID,
+ GH_AW_GITHUB_EVENT_DISCUSSION_NUMBER: process.env.GH_AW_GITHUB_EVENT_DISCUSSION_NUMBER,
+ GH_AW_GITHUB_EVENT_ISSUE_NUMBER: process.env.GH_AW_GITHUB_EVENT_ISSUE_NUMBER,
+ GH_AW_GITHUB_EVENT_PULL_REQUEST_NUMBER: process.env.GH_AW_GITHUB_EVENT_PULL_REQUEST_NUMBER,
+ GH_AW_GITHUB_REPOSITORY: process.env.GH_AW_GITHUB_REPOSITORY,
+ GH_AW_GITHUB_RUN_ID: process.env.GH_AW_GITHUB_RUN_ID,
+ GH_AW_GITHUB_WORKSPACE: process.env.GH_AW_GITHUB_WORKSPACE,
+ GH_AW_NEEDS_PRE_ACTIVATION_OUTPUTS_ACTIVATED: process.env.GH_AW_NEEDS_PRE_ACTIVATION_OUTPUTS_ACTIVATED,
+ GH_AW_NEEDS_PRE_ACTIVATION_OUTPUTS_MATCHED_COMMAND: process.env.GH_AW_NEEDS_PRE_ACTIVATION_OUTPUTS_MATCHED_COMMAND
+ }
+ });
+ - name: Validate prompt placeholders
+ env:
+ GH_AW_PROMPT: /tmp/gh-aw/aw-prompts/prompt.txt
+ run: bash /opt/gh-aw/actions/validate_prompt_placeholders.sh
+ - name: Print prompt
+ env:
+ GH_AW_PROMPT: /tmp/gh-aw/aw-prompts/prompt.txt
+ run: bash /opt/gh-aw/actions/print_prompt_summary.sh
+ - name: Upload prompt artifact
+ if: success()
+ uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6
+ with:
+ name: prompt
+ path: /tmp/gh-aw/aw-prompts/prompt.txt
+ retention-days: 1
+
+ agent:
+ needs: activation
+ runs-on: ubuntu-latest
+ permissions:
+ contents: read
+ issues: read
+ pull-requests: read
+ env:
+ DEFAULT_BRANCH: ${{ github.event.repository.default_branch }}
+ GH_AW_ASSETS_ALLOWED_EXTS: ""
+ GH_AW_ASSETS_BRANCH: ""
+ GH_AW_ASSETS_MAX_SIZE_KB: 0
+ GH_AW_MCP_LOG_DIR: /tmp/gh-aw/mcp-logs/safeoutputs
+ GH_AW_SAFE_OUTPUTS: /opt/gh-aw/safeoutputs/outputs.jsonl
+ GH_AW_SAFE_OUTPUTS_CONFIG_PATH: /opt/gh-aw/safeoutputs/config.json
+ GH_AW_SAFE_OUTPUTS_TOOLS_PATH: /opt/gh-aw/safeoutputs/tools.json
+ GH_AW_WORKFLOW_ID_SANITIZED: smokegemini
+ outputs:
+ checkout_pr_success: ${{ steps.checkout-pr.outputs.checkout_pr_success || 'true' }}
+ has_patch: ${{ steps.collect_output.outputs.has_patch }}
+ model: ${{ steps.generate_aw_info.outputs.model }}
+ output: ${{ steps.collect_output.outputs.output }}
+ output_types: ${{ steps.collect_output.outputs.output_types }}
+ secret_verification_result: ${{ steps.validate-secret.outputs.verification_result }}
+ steps:
+ - name: Checkout actions folder
+ uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
+ with:
+ sparse-checkout: |
+ actions
+ persist-credentials: false
+ - name: Setup Scripts
+ uses: ./actions/setup
+ with:
+ destination: /opt/gh-aw/actions
+ - name: Checkout repository
+ uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
+ with:
+ persist-credentials: false
+ - name: Create gh-aw temp directory
+ run: bash /opt/gh-aw/actions/create_gh_aw_tmp_dir.sh
+ # Cache memory file share configuration from frontmatter processed below
+ - name: Create cache-memory directory
+ run: bash /opt/gh-aw/actions/create_cache_memory_dir.sh
+ - name: Restore cache-memory file share data
+ uses: actions/cache/restore@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0
+ with:
+ key: memory-${{ env.GH_AW_WORKFLOW_ID_SANITIZED }}-${{ github.run_id }}
+ path: /tmp/gh-aw/cache-memory
+ restore-keys: |
+ memory-${{ env.GH_AW_WORKFLOW_ID_SANITIZED }}-
+ - name: Configure Git credentials
+ env:
+ REPO_NAME: ${{ github.repository }}
+ SERVER_URL: ${{ github.server_url }}
+ run: |
+ git config --global user.email "github-actions[bot]@users.noreply.github.com"
+ git config --global user.name "github-actions[bot]"
+ # Re-authenticate git with GitHub token
+ SERVER_URL_STRIPPED="${SERVER_URL#https://}"
+ git remote set-url origin "https://x-access-token:${{ github.token }}@${SERVER_URL_STRIPPED}/${REPO_NAME}.git"
+ echo "Git configured with standard GitHub Actions identity"
+ - name: Checkout PR branch
+ id: checkout-pr
+ if: |
+ github.event.pull_request
+ uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8
+ env:
+ GH_TOKEN: ${{ secrets.GH_AW_GITHUB_MCP_SERVER_TOKEN || secrets.GH_AW_GITHUB_TOKEN || secrets.GITHUB_TOKEN }}
+ with:
+ github-token: ${{ secrets.GH_AW_GITHUB_MCP_SERVER_TOKEN || secrets.GH_AW_GITHUB_TOKEN || secrets.GITHUB_TOKEN }}
+ script: |
+ const { setupGlobals } = require('/opt/gh-aw/actions/setup_globals.cjs');
+ setupGlobals(core, github, context, exec, io);
+ const { main } = require('/opt/gh-aw/actions/checkout_pr_branch.cjs');
+ await main();
+ - name: Generate agentic run info
+ id: generate_aw_info
+ uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8
+ with:
+ script: |
+ const fs = require('fs');
+
+ const awInfo = {
+ engine_id: "gemini",
+ engine_name: "Google Gemini CLI",
+ model: process.env.GH_AW_MODEL_AGENT_CUSTOM || "",
+ version: "",
+ agent_version: "",
+ workflow_name: "Smoke Gemini",
+ experimental: true,
+ supports_tools_allowlist: true,
+ run_id: context.runId,
+ run_number: context.runNumber,
+ run_attempt: process.env.GITHUB_RUN_ATTEMPT,
+ repository: context.repo.owner + '/' + context.repo.repo,
+ ref: context.ref,
+ sha: context.sha,
+ actor: context.actor,
+ event_name: context.eventName,
+ staged: false,
+ allowed_domains: ["defaults","github"],
+ firewall_enabled: false,
+ awf_version: "",
+ awmg_version: "v0.1.4",
+ steps: {
+ firewall: "squid"
+ },
+ created_at: new Date().toISOString()
+ };
+
+ // Write to /tmp/gh-aw directory to avoid inclusion in PR
+ const tmpPath = '/tmp/gh-aw/aw_info.json';
+ fs.writeFileSync(tmpPath, JSON.stringify(awInfo, null, 2));
+ console.log('Generated aw_info.json at:', tmpPath);
+ console.log(JSON.stringify(awInfo, null, 2));
+
+ // Set model as output for reuse in other steps/jobs
+ core.setOutput('model', awInfo.model);
+ - name: Validate GEMINI_API_KEY secret
+ id: validate-secret
+ run: /opt/gh-aw/actions/validate_multi_secret.sh GEMINI_API_KEY 'Gemini CLI' https://geminicli.com/docs/get-started/authentication/
+ env:
+ GEMINI_API_KEY: ${{ secrets.GEMINI_API_KEY }}
+ - name: Setup Node.js
+ uses: actions/setup-node@6044e13b5dc448c55e2357c09f80417699197238 # v6.2.0
+ with:
+ node-version: '24'
+ package-manager-cache: false
+ - name: Install awf binary
+ run: bash /opt/gh-aw/actions/install_awf_binary.sh v0.20.1
+ - name: Install Gemini CLI
+ run: npm install -g --silent @google/gemini-cli@0.29.0
+ - name: Determine automatic lockdown mode for GitHub MCP Server
+ id: determine-automatic-lockdown
+ uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8
+ env:
+ GH_AW_GITHUB_TOKEN: ${{ secrets.GH_AW_GITHUB_TOKEN }}
+ GH_AW_GITHUB_MCP_SERVER_TOKEN: ${{ secrets.GH_AW_GITHUB_MCP_SERVER_TOKEN }}
+ with:
+ script: |
+ const determineAutomaticLockdown = require('/opt/gh-aw/actions/determine_automatic_lockdown.cjs');
+ await determineAutomaticLockdown(github, context, core);
+ - name: Download container images
+ run: bash /opt/gh-aw/actions/download_docker_images.sh ghcr.io/github/gh-aw-firewall/agent:0.20.1 ghcr.io/github/gh-aw-firewall/api-proxy:0.20.1 ghcr.io/github/gh-aw-firewall/squid:0.20.1 ghcr.io/github/gh-aw-mcpg:v0.1.4 ghcr.io/github/github-mcp-server:v0.30.3 node:lts-alpine
+ - name: Write Safe Outputs Config
+ run: |
+ mkdir -p /opt/gh-aw/safeoutputs
+ mkdir -p /tmp/gh-aw/safeoutputs
+ mkdir -p /tmp/gh-aw/mcp-logs/safeoutputs
+ cat > /opt/gh-aw/safeoutputs/config.json << 'GH_AW_SAFE_OUTPUTS_CONFIG_EOF'
+ {"add_comment":{"max":2},"add_labels":{"allowed":["smoke-gemini"],"max":3},"create_issue":{"expires":2,"max":1},"missing_data":{},"missing_tool":{},"noop":{"max":1}}
+ GH_AW_SAFE_OUTPUTS_CONFIG_EOF
+ cat > /opt/gh-aw/safeoutputs/tools.json << 'GH_AW_SAFE_OUTPUTS_TOOLS_EOF'
+ [
+ {
+ "description": "Create a new GitHub issue for tracking bugs, feature requests, or tasks. Use this for actionable work items that need assignment, labeling, and status tracking. For reports, announcements, or status updates that don't require task tracking, use create_discussion instead. CONSTRAINTS: Maximum 1 issue(s) can be created.",
+ "inputSchema": {
+ "additionalProperties": false,
+ "properties": {
+ "body": {
+ "description": "Detailed issue description in Markdown. Do NOT repeat the title as a heading since it already appears as the issue's h1. Include context, reproduction steps, or acceptance criteria as appropriate.",
+ "type": "string"
+ },
+ "labels": {
+ "description": "Labels to categorize the issue (e.g., 'bug', 'enhancement'). Labels must exist in the repository.",
+ "items": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ "parent": {
+ "description": "Parent issue number for creating sub-issues. This is the numeric ID from the GitHub URL (e.g., 42 in github.com/owner/repo/issues/42). Can also be a temporary_id (e.g., 'aw_abc123', 'aw_Test123') from a previously created issue in the same workflow run.",
+ "type": [
+ "number",
+ "string"
+ ]
+ },
+ "temporary_id": {
+ "description": "Unique temporary identifier for referencing this issue before it's created. Format: 'aw_' followed by 3 to 8 alphanumeric characters (e.g., 'aw_abc1', 'aw_Test123'). Use '#aw_ID' in body text to reference other issues by their temporary_id; these are replaced with actual issue numbers after creation.",
+ "pattern": "^aw_[A-Za-z0-9]{3,8}$",
+ "type": "string"
+ },
+ "title": {
+ "description": "Concise issue title summarizing the bug, feature, or task. The title appears as the main heading, so keep it brief and descriptive.",
+ "type": "string"
+ }
+ },
+ "required": [
+ "title",
+ "body"
+ ],
+ "type": "object"
+ },
+ "name": "create_issue"
+ },
+ {
+ "description": "Add a comment to an existing GitHub issue, pull request, or discussion. Use this to provide feedback, answer questions, or add information to an existing conversation. For creating new items, use create_issue, create_discussion, or create_pull_request instead. IMPORTANT: Comments are subject to validation constraints enforced by the MCP server - maximum 65536 characters for the complete comment (including footer which is added automatically), 10 mentions (@username), and 50 links. Exceeding these limits will result in an immediate error with specific guidance. NOTE: By default, this tool requires discussions:write permission. If your GitHub App lacks Discussions permission, set 'discussions: false' in the workflow's safe-outputs.add-comment configuration to exclude this permission. CONSTRAINTS: Maximum 2 comment(s) can be added.",
+ "inputSchema": {
+ "additionalProperties": false,
+ "properties": {
+ "body": {
+ "description": "The comment text in Markdown format. This is the 'body' field - do not use 'comment_body' or other variations. Provide helpful, relevant information that adds value to the conversation. CONSTRAINTS: The complete comment (your body text + automatically added footer) must not exceed 65536 characters total. Maximum 10 mentions (@username), maximum 50 links (http/https URLs). A footer (~200-500 characters) is automatically appended with workflow attribution, so leave adequate space. If these limits are exceeded, the tool call will fail with a detailed error message indicating which constraint was violated.",
+ "type": "string"
+ },
+ "item_number": {
+ "description": "The issue, pull request, or discussion number to comment on. This is the numeric ID from the GitHub URL (e.g., 123 in github.com/owner/repo/issues/123). If omitted, the tool will attempt to resolve the target from the current workflow context (triggering issue, PR, or discussion).",
+ "type": "number"
+ }
+ },
+ "required": [
+ "body"
+ ],
+ "type": "object"
+ },
+ "name": "add_comment"
+ },
+ {
+ "description": "Add labels to an existing GitHub issue or pull request for categorization and filtering. Labels must already exist in the repository. For creating new issues with labels, use create_issue with the labels property instead. CONSTRAINTS: Only these labels are allowed: [smoke-gemini].",
+ "inputSchema": {
+ "additionalProperties": false,
+ "properties": {
+ "item_number": {
+ "description": "Issue or PR number to add labels to. This is the numeric ID from the GitHub URL (e.g., 456 in github.com/owner/repo/issues/456). If omitted, adds labels to the item that triggered this workflow.",
+ "type": "number"
+ },
+ "labels": {
+ "description": "Label names to add (e.g., ['bug', 'priority-high']). Labels must exist in the repository.",
+ "items": {
+ "type": "string"
+ },
+ "type": "array"
+ }
+ },
+ "type": "object"
+ },
+ "name": "add_labels"
+ },
+ {
+ "description": "Report that a tool or capability needed to complete the task is not available, or share any information you deem important about missing functionality or limitations. Use this when you cannot accomplish what was requested because the required functionality is missing or access is restricted.",
+ "inputSchema": {
+ "additionalProperties": false,
+ "properties": {
+ "alternatives": {
+ "description": "Any workarounds, manual steps, or alternative approaches the user could take (max 256 characters).",
+ "type": "string"
+ },
+ "reason": {
+ "description": "Explanation of why this tool is needed or what information you want to share about the limitation (max 256 characters).",
+ "type": "string"
+ },
+ "tool": {
+ "description": "Optional: Name or description of the missing tool or capability (max 128 characters). Be specific about what functionality is needed.",
+ "type": "string"
+ }
+ },
+ "required": [
+ "reason"
+ ],
+ "type": "object"
+ },
+ "name": "missing_tool"
+ },
+ {
+ "description": "Log a transparency message when no significant actions are needed. Use this to confirm workflow completion and provide visibility when analysis is complete but no changes or outputs are required (e.g., 'No issues found', 'All checks passed'). This ensures the workflow produces human-visible output even when no other actions are taken.",
+ "inputSchema": {
+ "additionalProperties": false,
+ "properties": {
+ "message": {
+ "description": "Status or completion message to log. Should explain what was analyzed and the outcome (e.g., 'Code review complete - no issues found', 'Analysis complete - all tests passing').",
+ "type": "string"
+ }
+ },
+ "required": [
+ "message"
+ ],
+ "type": "object"
+ },
+ "name": "noop"
+ },
+ {
+ "description": "Report that data or information needed to complete the task is not available. Use this when you cannot accomplish what was requested because required data, context, or information is missing.",
+ "inputSchema": {
+ "additionalProperties": false,
+ "properties": {
+ "alternatives": {
+ "description": "Any workarounds, manual steps, or alternative approaches the user could take (max 256 characters).",
+ "type": "string"
+ },
+ "context": {
+ "description": "Additional context about the missing data or where it should come from (max 256 characters).",
+ "type": "string"
+ },
+ "data_type": {
+ "description": "Type or description of the missing data or information (max 128 characters). Be specific about what data is needed.",
+ "type": "string"
+ },
+ "reason": {
+ "description": "Explanation of why this data is needed to complete the task (max 256 characters).",
+ "type": "string"
+ }
+ },
+ "required": [],
+ "type": "object"
+ },
+ "name": "missing_data"
+ }
+ ]
+ GH_AW_SAFE_OUTPUTS_TOOLS_EOF
+ cat > /opt/gh-aw/safeoutputs/validation.json << 'GH_AW_SAFE_OUTPUTS_VALIDATION_EOF'
+ {
+ "add_comment": {
+ "defaultMax": 1,
+ "fields": {
+ "body": {
+ "required": true,
+ "type": "string",
+ "sanitize": true,
+ "maxLength": 65000
+ },
+ "item_number": {
+ "issueOrPRNumber": true
+ }
+ }
+ },
+ "add_labels": {
+ "defaultMax": 5,
+ "fields": {
+ "item_number": {
+ "issueOrPRNumber": true
+ },
+ "labels": {
+ "required": true,
+ "type": "array",
+ "itemType": "string",
+ "itemSanitize": true,
+ "itemMaxLength": 128
+ }
+ }
+ },
+ "create_issue": {
+ "defaultMax": 1,
+ "fields": {
+ "body": {
+ "required": true,
+ "type": "string",
+ "sanitize": true,
+ "maxLength": 65000
+ },
+ "labels": {
+ "type": "array",
+ "itemType": "string",
+ "itemSanitize": true,
+ "itemMaxLength": 128
+ },
+ "parent": {
+ "issueOrPRNumber": true
+ },
+ "repo": {
+ "type": "string",
+ "maxLength": 256
+ },
+ "temporary_id": {
+ "type": "string"
+ },
+ "title": {
+ "required": true,
+ "type": "string",
+ "sanitize": true,
+ "maxLength": 128
+ }
+ }
+ },
+ "missing_tool": {
+ "defaultMax": 20,
+ "fields": {
+ "alternatives": {
+ "type": "string",
+ "sanitize": true,
+ "maxLength": 512
+ },
+ "reason": {
+ "required": true,
+ "type": "string",
+ "sanitize": true,
+ "maxLength": 256
+ },
+ "tool": {
+ "type": "string",
+ "sanitize": true,
+ "maxLength": 128
+ }
+ }
+ },
+ "noop": {
+ "defaultMax": 1,
+ "fields": {
+ "message": {
+ "required": true,
+ "type": "string",
+ "sanitize": true,
+ "maxLength": 65000
+ }
+ }
+ }
+ }
+ GH_AW_SAFE_OUTPUTS_VALIDATION_EOF
+ - name: Generate Safe Outputs MCP Server Config
+ id: safe-outputs-config
+ run: |
+ # Generate a secure random API key (360 bits of entropy, 40+ chars)
+ # Mask immediately to prevent timing vulnerabilities
+ API_KEY=$(openssl rand -base64 45 | tr -d '/+=')
+ echo "::add-mask::${API_KEY}"
+
+ PORT=3001
+
+ # Set outputs for next steps
+ {
+ echo "safe_outputs_api_key=${API_KEY}"
+ echo "safe_outputs_port=${PORT}"
+ } >> "$GITHUB_OUTPUT"
+
+ echo "Safe Outputs MCP server will run on port ${PORT}"
+
+ - name: Start Safe Outputs MCP HTTP Server
+ id: safe-outputs-start
+ env:
+ DEBUG: '*'
+ GH_AW_SAFE_OUTPUTS_PORT: ${{ steps.safe-outputs-config.outputs.safe_outputs_port }}
+ GH_AW_SAFE_OUTPUTS_API_KEY: ${{ steps.safe-outputs-config.outputs.safe_outputs_api_key }}
+ GH_AW_SAFE_OUTPUTS_TOOLS_PATH: /opt/gh-aw/safeoutputs/tools.json
+ GH_AW_SAFE_OUTPUTS_CONFIG_PATH: /opt/gh-aw/safeoutputs/config.json
+ GH_AW_MCP_LOG_DIR: /tmp/gh-aw/mcp-logs/safeoutputs
+ run: |
+ # Environment variables are set above to prevent template injection
+ export DEBUG
+ export GH_AW_SAFE_OUTPUTS_PORT
+ export GH_AW_SAFE_OUTPUTS_API_KEY
+ export GH_AW_SAFE_OUTPUTS_TOOLS_PATH
+ export GH_AW_SAFE_OUTPUTS_CONFIG_PATH
+ export GH_AW_MCP_LOG_DIR
+
+ bash /opt/gh-aw/actions/start_safe_outputs_server.sh
+
+ - name: Setup Safe Inputs Config
+ run: |
+ mkdir -p /opt/gh-aw/safe-inputs/logs
+ cat > /opt/gh-aw/safe-inputs/tools.json << 'GH_AW_SAFE_INPUTS_TOOLS_EOF'
+ {
+ "serverName": "safeinputs",
+ "version": "1.0.0",
+ "logDir": "/opt/gh-aw/safe-inputs/logs",
+ "tools": [
+ {
+ "name": "gh",
+ "description": "Execute any gh CLI command. This tool is accessible as 'safeinputs-gh'. Provide the full command after 'gh' (e.g., args: 'pr list --limit 5'). The tool will run: gh \u003cargs\u003e. Use single quotes ' for complex args to avoid shell interpretation issues.",
+ "inputSchema": {
+ "properties": {
+ "args": {
+ "description": "Arguments to pass to gh CLI (without the 'gh' prefix). Examples: 'pr list --limit 5', 'issue view 123', 'api repos/{owner}/{repo}'",
+ "type": "string"
+ }
+ },
+ "required": [
+ "args"
+ ],
+ "type": "object"
+ },
+ "handler": "gh.sh",
+ "env": {
+ "GH_AW_GH_TOKEN": "GH_AW_GH_TOKEN",
+ "GH_DEBUG": "GH_DEBUG"
+ },
+ "timeout": 60
+ }
+ ]
+ }
+ GH_AW_SAFE_INPUTS_TOOLS_EOF
+ cat > /opt/gh-aw/safe-inputs/mcp-server.cjs << 'GH_AW_SAFE_INPUTS_SERVER_EOF'
+ const path = require("path");
+ const { startHttpServer } = require("./safe_inputs_mcp_server_http.cjs");
+ const configPath = path.join(__dirname, "tools.json");
+ const port = parseInt(process.env.GH_AW_SAFE_INPUTS_PORT || "3000", 10);
+ const apiKey = process.env.GH_AW_SAFE_INPUTS_API_KEY || "";
+ startHttpServer(configPath, {
+ port: port,
+ stateless: true,
+ logDir: "/opt/gh-aw/safe-inputs/logs"
+ }).catch(error => {
+ console.error("Failed to start safe-inputs HTTP server:", error);
+ process.exit(1);
+ });
+ GH_AW_SAFE_INPUTS_SERVER_EOF
+ chmod +x /opt/gh-aw/safe-inputs/mcp-server.cjs
+
+ - name: Setup Safe Inputs Tool Files
+ run: |
+ cat > /opt/gh-aw/safe-inputs/gh.sh << 'GH_AW_SAFE_INPUTS_SH_GH_EOF'
+ #!/bin/bash
+ # Auto-generated safe-input tool: gh
+ # Execute any gh CLI command. This tool is accessible as 'safeinputs-gh'. Provide the full command after 'gh' (e.g., args: 'pr list --limit 5'). The tool will run: gh . Use single quotes ' for complex args to avoid shell interpretation issues.
+
+ set -euo pipefail
+
+ echo "gh $INPUT_ARGS"
+ echo " token: ${GH_AW_GH_TOKEN:0:6}..."
+ GH_TOKEN="$GH_AW_GH_TOKEN" gh $INPUT_ARGS
+
+ GH_AW_SAFE_INPUTS_SH_GH_EOF
+ chmod +x /opt/gh-aw/safe-inputs/gh.sh
+
+ - name: Generate Safe Inputs MCP Server Config
+ id: safe-inputs-config
+ run: |
+ # Generate a secure random API key (360 bits of entropy, 40+ chars)
+ # Mask immediately to prevent timing vulnerabilities
+ API_KEY=$(openssl rand -base64 45 | tr -d '/+=')
+ echo "::add-mask::${API_KEY}"
+
+ PORT=3000
+
+ # Set outputs for next steps
+ {
+ echo "safe_inputs_api_key=${API_KEY}"
+ echo "safe_inputs_port=${PORT}"
+ } >> "$GITHUB_OUTPUT"
+
+ echo "Safe Inputs MCP server will run on port ${PORT}"
+
+ - name: Start Safe Inputs MCP HTTP Server
+ id: safe-inputs-start
+ env:
+ DEBUG: '*'
+ GH_AW_SAFE_INPUTS_PORT: ${{ steps.safe-inputs-config.outputs.safe_inputs_port }}
+ GH_AW_SAFE_INPUTS_API_KEY: ${{ steps.safe-inputs-config.outputs.safe_inputs_api_key }}
+ GH_AW_GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ GH_DEBUG: 1
+ run: |
+ # Environment variables are set above to prevent template injection
+ export DEBUG
+ export GH_AW_SAFE_INPUTS_PORT
+ export GH_AW_SAFE_INPUTS_API_KEY
+
+ bash /opt/gh-aw/actions/start_safe_inputs_server.sh
+
+ - name: Start MCP Gateway
+ id: start-mcp-gateway
+ env:
+ GH_AW_GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ GH_AW_SAFE_INPUTS_API_KEY: ${{ steps.safe-inputs-start.outputs.api_key }}
+ GH_AW_SAFE_INPUTS_PORT: ${{ steps.safe-inputs-start.outputs.port }}
+ GH_AW_SAFE_OUTPUTS: ${{ env.GH_AW_SAFE_OUTPUTS }}
+ GH_AW_SAFE_OUTPUTS_API_KEY: ${{ steps.safe-outputs-start.outputs.api_key }}
+ GH_AW_SAFE_OUTPUTS_PORT: ${{ steps.safe-outputs-start.outputs.port }}
+ GH_DEBUG: 1
+ GITHUB_MCP_LOCKDOWN: ${{ steps.determine-automatic-lockdown.outputs.lockdown == 'true' && '1' || '0' }}
+ GITHUB_MCP_SERVER_TOKEN: ${{ secrets.GH_AW_GITHUB_MCP_SERVER_TOKEN || secrets.GH_AW_GITHUB_TOKEN || secrets.GITHUB_TOKEN }}
+ run: |
+ set -eo pipefail
+ mkdir -p /tmp/gh-aw/mcp-config
+
+ # Export gateway environment variables for MCP config and gateway script
+ export MCP_GATEWAY_PORT="80"
+ export MCP_GATEWAY_DOMAIN="host.docker.internal"
+ MCP_GATEWAY_API_KEY=$(openssl rand -base64 45 | tr -d '/+=')
+ echo "::add-mask::${MCP_GATEWAY_API_KEY}"
+ export MCP_GATEWAY_API_KEY
+ export MCP_GATEWAY_PAYLOAD_DIR="/tmp/gh-aw/mcp-payloads"
+ mkdir -p "${MCP_GATEWAY_PAYLOAD_DIR}"
+ export DEBUG="*"
+
+ export GH_AW_ENGINE="gemini"
+ export MCP_GATEWAY_DOCKER_COMMAND='docker run -i --rm --network host -v /var/run/docker.sock:/var/run/docker.sock -e MCP_GATEWAY_PORT -e MCP_GATEWAY_DOMAIN -e MCP_GATEWAY_API_KEY -e MCP_GATEWAY_PAYLOAD_DIR -e DEBUG -e MCP_GATEWAY_LOG_DIR -e GH_AW_MCP_LOG_DIR -e GH_AW_SAFE_OUTPUTS -e GH_AW_SAFE_OUTPUTS_CONFIG_PATH -e GH_AW_SAFE_OUTPUTS_TOOLS_PATH -e GH_AW_ASSETS_BRANCH -e GH_AW_ASSETS_MAX_SIZE_KB -e GH_AW_ASSETS_ALLOWED_EXTS -e DEFAULT_BRANCH -e GITHUB_MCP_SERVER_TOKEN -e GITHUB_MCP_LOCKDOWN -e GITHUB_REPOSITORY -e GITHUB_SERVER_URL -e GITHUB_SHA -e GITHUB_WORKSPACE -e GITHUB_TOKEN -e GITHUB_RUN_ID -e GITHUB_RUN_NUMBER -e GITHUB_RUN_ATTEMPT -e GITHUB_JOB -e GITHUB_ACTION -e GITHUB_EVENT_NAME -e GITHUB_EVENT_PATH -e GITHUB_ACTOR -e GITHUB_ACTOR_ID -e GITHUB_TRIGGERING_ACTOR -e GITHUB_WORKFLOW -e GITHUB_WORKFLOW_REF -e GITHUB_WORKFLOW_SHA -e GITHUB_REF -e GITHUB_REF_NAME -e GITHUB_REF_TYPE -e GITHUB_HEAD_REF -e GITHUB_BASE_REF -e GH_AW_SAFE_INPUTS_PORT -e GH_AW_SAFE_INPUTS_API_KEY -e GH_AW_SAFE_OUTPUTS_PORT -e GH_AW_SAFE_OUTPUTS_API_KEY -e GH_AW_GH_TOKEN -e GH_DEBUG -v /tmp/gh-aw/mcp-payloads:/tmp/gh-aw/mcp-payloads:rw -v /opt:/opt:ro -v /tmp:/tmp:rw -v '"${GITHUB_WORKSPACE}"':'"${GITHUB_WORKSPACE}"':rw ghcr.io/github/gh-aw-mcpg:v0.1.4'
+
+ cat << GH_AW_MCP_CONFIG_EOF | bash /opt/gh-aw/actions/start_mcp_gateway.sh
+ {
+ "mcpServers": {
+ "github": {
+ "container": "ghcr.io/github/github-mcp-server:v0.30.3",
+ "env": {
+ "GITHUB_LOCKDOWN_MODE": "$GITHUB_MCP_LOCKDOWN",
+ "GITHUB_PERSONAL_ACCESS_TOKEN": "$GITHUB_MCP_SERVER_TOKEN",
+ "GITHUB_READ_ONLY": "1",
+ "GITHUB_TOOLSETS": "repos,pull_requests"
+ }
+ },
+ "safeinputs": {
+ "type": "http",
+ "url": "http://host.docker.internal:$GH_AW_SAFE_INPUTS_PORT",
+ "headers": {
+ "Authorization": "$GH_AW_SAFE_INPUTS_API_KEY"
+ }
+ },
+ "safeoutputs": {
+ "type": "http",
+ "url": "http://host.docker.internal:$GH_AW_SAFE_OUTPUTS_PORT",
+ "headers": {
+ "Authorization": "$GH_AW_SAFE_OUTPUTS_API_KEY"
+ }
+ }
+ },
+ "gateway": {
+ "port": $MCP_GATEWAY_PORT,
+ "domain": "${MCP_GATEWAY_DOMAIN}",
+ "apiKey": "${MCP_GATEWAY_API_KEY}",
+ "payloadDir": "${MCP_GATEWAY_PAYLOAD_DIR}"
+ }
+ }
+ GH_AW_MCP_CONFIG_EOF
+ - name: Generate workflow overview
+ uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8
+ with:
+ script: |
+ const { generateWorkflowOverview } = require('/opt/gh-aw/actions/generate_workflow_overview.cjs');
+ await generateWorkflowOverview(core);
+ - name: Download prompt artifact
+ uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6
+ with:
+ name: prompt
+ path: /tmp/gh-aw/aw-prompts
+ - name: Clean git credentials
+ run: bash /opt/gh-aw/actions/clean_git_credentials.sh
+ - name: Run Gemini
+ id: agentic_execution
+ run: |
+ set -o pipefail
+ sudo -E awf --env-all --container-workdir "${GITHUB_WORKSPACE}" --allow-domains '*.githubusercontent.com,*.googleapis.com,api.snapcraft.io,archive.ubuntu.com,azure.archive.ubuntu.com,codeload.github.com,crl.geotrust.com,crl.globalsign.com,crl.identrust.com,crl.sectigo.com,crl.thawte.com,crl.usertrust.com,crl.verisign.com,crl3.digicert.com,crl4.digicert.com,crls.ssl.com,generativelanguage.googleapis.com,github-cloud.githubusercontent.com,github-cloud.s3.amazonaws.com,github.com,github.githubassets.com,host.docker.internal,json-schema.org,json.schemastore.org,keyserver.ubuntu.com,lfs.github.com,objects.githubusercontent.com,ocsp.digicert.com,ocsp.geotrust.com,ocsp.globalsign.com,ocsp.identrust.com,ocsp.sectigo.com,ocsp.ssl.com,ocsp.thawte.com,ocsp.usertrust.com,ocsp.verisign.com,packagecloud.io,packages.cloud.google.com,packages.microsoft.com,ppa.launchpad.net,raw.githubusercontent.com,registry.npmjs.org,s.symcb.com,s.symcd.com,security.ubuntu.com,ts-crl.ws.symantec.com,ts-ocsp.ws.symantec.com' --log-level info --proxy-logs-dir /tmp/gh-aw/sandbox/firewall/logs --enable-host-access --image-tag 0.20.1 --skip-pull \
+ -- /bin/bash -c 'export PATH="$(find /opt/hostedtoolcache -maxdepth 4 -type d -name bin 2>/dev/null | tr '\''\n'\'' '\'':'\'')$PATH"; [ -n "$GOROOT" ] && export PATH="$GOROOT/bin:$PATH" || true && gemini --mcp-config /tmp/gh-aw/mcp-config/mcp-servers.json --output-format json --prompt "$(cat /tmp/gh-aw/aw-prompts/prompt.txt)"' 2>&1 | tee -a /tmp/gh-aw/agent-stdio.log
+ env:
+ GEMINI_API_KEY: ${{ secrets.GEMINI_API_KEY }}
+ GH_AW_MCP_CONFIG: /tmp/gh-aw/mcp-config/mcp-servers.json
+ GH_AW_MODEL_AGENT_GEMINI: ${{ vars.GH_AW_MODEL_AGENT_GEMINI || '' }}
+ GH_AW_PROMPT: /tmp/gh-aw/aw-prompts/prompt.txt
+ GH_AW_SAFE_OUTPUTS: ${{ env.GH_AW_SAFE_OUTPUTS }}
+ GITHUB_WORKSPACE: ${{ github.workspace }}
+ - name: Configure Git credentials
+ env:
+ REPO_NAME: ${{ github.repository }}
+ SERVER_URL: ${{ github.server_url }}
+ run: |
+ git config --global user.email "github-actions[bot]@users.noreply.github.com"
+ git config --global user.name "github-actions[bot]"
+ # Re-authenticate git with GitHub token
+ SERVER_URL_STRIPPED="${SERVER_URL#https://}"
+ git remote set-url origin "https://x-access-token:${{ github.token }}@${SERVER_URL_STRIPPED}/${REPO_NAME}.git"
+ echo "Git configured with standard GitHub Actions identity"
+ - name: Stop MCP Gateway
+ if: always()
+ continue-on-error: true
+ env:
+ MCP_GATEWAY_PORT: ${{ steps.start-mcp-gateway.outputs.gateway-port }}
+ MCP_GATEWAY_API_KEY: ${{ steps.start-mcp-gateway.outputs.gateway-api-key }}
+ GATEWAY_PID: ${{ steps.start-mcp-gateway.outputs.gateway-pid }}
+ run: |
+ bash /opt/gh-aw/actions/stop_mcp_gateway.sh "$GATEWAY_PID"
+ - name: Redact secrets in logs
+ if: always()
+ uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8
+ with:
+ script: |
+ const { setupGlobals } = require('/opt/gh-aw/actions/setup_globals.cjs');
+ setupGlobals(core, github, context, exec, io);
+ const { main } = require('/opt/gh-aw/actions/redact_secrets.cjs');
+ await main();
+ env:
+ GH_AW_SECRET_NAMES: 'GEMINI_API_KEY,GH_AW_GITHUB_MCP_SERVER_TOKEN,GH_AW_GITHUB_TOKEN,GITHUB_TOKEN'
+ SECRET_GEMINI_API_KEY: ${{ secrets.GEMINI_API_KEY }}
+ SECRET_GH_AW_GITHUB_MCP_SERVER_TOKEN: ${{ secrets.GH_AW_GITHUB_MCP_SERVER_TOKEN }}
+ SECRET_GH_AW_GITHUB_TOKEN: ${{ secrets.GH_AW_GITHUB_TOKEN }}
+ SECRET_GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ - name: Upload Safe Outputs
+ if: always()
+ uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6
+ with:
+ name: safe-output
+ path: ${{ env.GH_AW_SAFE_OUTPUTS }}
+ if-no-files-found: warn
+ - name: Ingest agent output
+ id: collect_output
+ if: always()
+ uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8
+ env:
+ GH_AW_SAFE_OUTPUTS: ${{ env.GH_AW_SAFE_OUTPUTS }}
+ GH_AW_ALLOWED_DOMAINS: "*.githubusercontent.com,api.snapcraft.io,archive.ubuntu.com,azure.archive.ubuntu.com,codeload.github.com,crl.geotrust.com,crl.globalsign.com,crl.identrust.com,crl.sectigo.com,crl.thawte.com,crl.usertrust.com,crl.verisign.com,crl3.digicert.com,crl4.digicert.com,crls.ssl.com,github-cloud.githubusercontent.com,github-cloud.s3.amazonaws.com,github.githubassets.com,json-schema.org,json.schemastore.org,keyserver.ubuntu.com,lfs.github.com,objects.githubusercontent.com,ocsp.digicert.com,ocsp.geotrust.com,ocsp.globalsign.com,ocsp.identrust.com,ocsp.sectigo.com,ocsp.ssl.com,ocsp.thawte.com,ocsp.usertrust.com,ocsp.verisign.com,packagecloud.io,packages.cloud.google.com,packages.microsoft.com,ppa.launchpad.net,raw.githubusercontent.com,s.symcb.com,s.symcd.com,security.ubuntu.com,ts-crl.ws.symantec.com,ts-ocsp.ws.symantec.com"
+ GITHUB_SERVER_URL: ${{ github.server_url }}
+ GITHUB_API_URL: ${{ github.api_url }}
+ with:
+ script: |
+ const { setupGlobals } = require('/opt/gh-aw/actions/setup_globals.cjs');
+ setupGlobals(core, github, context, exec, io);
+ const { main } = require('/opt/gh-aw/actions/collect_ndjson_output.cjs');
+ await main();
+ - name: Upload sanitized agent output
+ if: always() && env.GH_AW_AGENT_OUTPUT
+ uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6
+ with:
+ name: agent-output
+ path: ${{ env.GH_AW_AGENT_OUTPUT }}
+ if-no-files-found: warn
+ - name: Parse agent logs for step summary
+ if: always()
+ uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8
+ env:
+ GH_AW_AGENT_OUTPUT: /tmp/gh-aw/agent-stdio.log
+ with:
+ script: |
+ const { setupGlobals } = require('/opt/gh-aw/actions/setup_globals.cjs');
+ setupGlobals(core, github, context, exec, io);
+ const { main } = require('/opt/gh-aw/actions/parse_gemini_log.cjs');
+ await main();
+ - name: Parse Safe Inputs logs for step summary
+ if: always()
+ uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8
+ with:
+ script: |
+ const { setupGlobals } = require('/opt/gh-aw/actions/setup_globals.cjs');
+ setupGlobals(core, github, context, exec, io);
+ const { main } = require('/opt/gh-aw/actions/parse_safe_inputs_logs.cjs');
+ await main();
+ - name: Parse MCP Gateway logs for step summary
+ if: always()
+ uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8
+ with:
+ script: |
+ const { setupGlobals } = require('/opt/gh-aw/actions/setup_globals.cjs');
+ setupGlobals(core, github, context, exec, io);
+ const { main } = require('/opt/gh-aw/actions/parse_mcp_gateway_log.cjs');
+ await main();
+ - name: Upload cache-memory data as artifact
+ uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6
+ if: always()
+ with:
+ name: cache-memory
+ path: /tmp/gh-aw/cache-memory
+ - name: Upload agent artifacts
+ if: always()
+ continue-on-error: true
+ uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6
+ with:
+ name: agent-artifacts
+ path: |
+ /tmp/gh-aw/aw-prompts/prompt.txt
+ /tmp/gh-aw/aw_info.json
+ /tmp/gh-aw/mcp-logs/
+ /tmp/gh-aw/safe-inputs/logs/
+ /tmp/gh-aw/agent-stdio.log
+ /tmp/gh-aw/agent/
+ if-no-files-found: ignore
+
+ conclusion:
+ needs:
+ - activation
+ - agent
+ - detection
+ - safe_outputs
+ - update_cache_memory
+ if: (always()) && (needs.agent.result != 'skipped')
+ runs-on: ubuntu-slim
+ permissions:
+ contents: read
+ discussions: write
+ issues: write
+ pull-requests: write
+ outputs:
+ noop_message: ${{ steps.noop.outputs.noop_message }}
+ tools_reported: ${{ steps.missing_tool.outputs.tools_reported }}
+ total_count: ${{ steps.missing_tool.outputs.total_count }}
+ steps:
+ - name: Checkout actions folder
+ uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
+ with:
+ sparse-checkout: |
+ actions
+ persist-credentials: false
+ - name: Setup Scripts
+ uses: ./actions/setup
+ with:
+ destination: /opt/gh-aw/actions
+ - name: Download agent output artifact
+ continue-on-error: true
+ uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6
+ with:
+ name: agent-output
+ path: /tmp/gh-aw/safeoutputs/
+ - name: Setup agent output environment variable
+ run: |
+ mkdir -p /tmp/gh-aw/safeoutputs/
+ find "/tmp/gh-aw/safeoutputs/" -type f -print
+ echo "GH_AW_AGENT_OUTPUT=/tmp/gh-aw/safeoutputs/agent_output.json" >> "$GITHUB_ENV"
+ - name: Process No-Op Messages
+ id: noop
+ uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8
+ env:
+ GH_AW_AGENT_OUTPUT: ${{ env.GH_AW_AGENT_OUTPUT }}
+ GH_AW_NOOP_MAX: 1
+ GH_AW_WORKFLOW_NAME: "Smoke Gemini"
+ with:
+ github-token: ${{ secrets.GH_AW_GITHUB_TOKEN || secrets.GITHUB_TOKEN }}
+ script: |
+ const { setupGlobals } = require('/opt/gh-aw/actions/setup_globals.cjs');
+ setupGlobals(core, github, context, exec, io);
+ const { main } = require('/opt/gh-aw/actions/noop.cjs');
+ await main();
+ - name: Record Missing Tool
+ id: missing_tool
+ uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8
+ env:
+ GH_AW_AGENT_OUTPUT: ${{ env.GH_AW_AGENT_OUTPUT }}
+ GH_AW_WORKFLOW_NAME: "Smoke Gemini"
+ with:
+ github-token: ${{ secrets.GH_AW_GITHUB_TOKEN || secrets.GITHUB_TOKEN }}
+ script: |
+ const { setupGlobals } = require('/opt/gh-aw/actions/setup_globals.cjs');
+ setupGlobals(core, github, context, exec, io);
+ const { main } = require('/opt/gh-aw/actions/missing_tool.cjs');
+ await main();
+ - name: Handle Agent Failure
+ id: handle_agent_failure
+ uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8
+ env:
+ GH_AW_AGENT_OUTPUT: ${{ env.GH_AW_AGENT_OUTPUT }}
+ GH_AW_WORKFLOW_NAME: "Smoke Gemini"
+ GH_AW_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
+ GH_AW_AGENT_CONCLUSION: ${{ needs.agent.result }}
+ GH_AW_WORKFLOW_ID: "smoke-gemini"
+ GH_AW_SECRET_VERIFICATION_RESULT: ${{ needs.agent.outputs.secret_verification_result }}
+ GH_AW_CHECKOUT_PR_SUCCESS: ${{ needs.agent.outputs.checkout_pr_success }}
+ GH_AW_SAFE_OUTPUT_MESSAGES: "{\"footer\":\"\\u003e ✨ *[{workflow_name}]({run_url}) — Powered by Gemini*\",\"runStarted\":\"✨ Gemini awakens... [{workflow_name}]({run_url}) begins its journey on this {event_type}...\",\"runSuccess\":\"🚀 [{workflow_name}]({run_url}) **MISSION COMPLETE!** Gemini has spoken. ✨\",\"runFailure\":\"⚠️ [{workflow_name}]({run_url}) {status}. Gemini encountered unexpected challenges...\"}"
+ GH_AW_GROUP_REPORTS: "false"
+ with:
+ github-token: ${{ secrets.GH_AW_GITHUB_TOKEN || secrets.GITHUB_TOKEN }}
+ script: |
+ const { setupGlobals } = require('/opt/gh-aw/actions/setup_globals.cjs');
+ setupGlobals(core, github, context, exec, io);
+ const { main } = require('/opt/gh-aw/actions/handle_agent_failure.cjs');
+ await main();
+ - name: Handle No-Op Message
+ id: handle_noop_message
+ uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8
+ env:
+ GH_AW_AGENT_OUTPUT: ${{ env.GH_AW_AGENT_OUTPUT }}
+ GH_AW_WORKFLOW_NAME: "Smoke Gemini"
+ GH_AW_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
+ GH_AW_AGENT_CONCLUSION: ${{ needs.agent.result }}
+ GH_AW_NOOP_MESSAGE: ${{ steps.noop.outputs.noop_message }}
+ GH_AW_NOOP_REPORT_AS_ISSUE: "true"
+ with:
+ github-token: ${{ secrets.GH_AW_GITHUB_TOKEN || secrets.GITHUB_TOKEN }}
+ script: |
+ const { setupGlobals } = require('/opt/gh-aw/actions/setup_globals.cjs');
+ setupGlobals(core, github, context, exec, io);
+ const { main } = require('/opt/gh-aw/actions/handle_noop_message.cjs');
+ await main();
+
+ detection:
+ needs: agent
+ if: needs.agent.outputs.output_types != '' || needs.agent.outputs.has_patch == 'true'
+ runs-on: ubuntu-latest
+ permissions:
+ contents: read
+ timeout-minutes: 10
+ outputs:
+ success: ${{ steps.parse_results.outputs.success }}
+ steps:
+ - name: Checkout actions folder
+ uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
+ with:
+ sparse-checkout: |
+ actions
+ persist-credentials: false
+ - name: Setup Scripts
+ uses: ./actions/setup
+ with:
+ destination: /opt/gh-aw/actions
+ - name: Download agent artifacts
+ continue-on-error: true
+ uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6
+ with:
+ name: agent-artifacts
+ path: /tmp/gh-aw/threat-detection/
+ - name: Download agent output artifact
+ continue-on-error: true
+ uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6
+ with:
+ name: agent-output
+ path: /tmp/gh-aw/threat-detection/
+ - name: Print agent output types
+ env:
+ AGENT_OUTPUT_TYPES: ${{ needs.agent.outputs.output_types }}
+ run: |
+ echo "Agent output-types: $AGENT_OUTPUT_TYPES"
+ - name: Setup threat detection
+ uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8
+ env:
+ WORKFLOW_NAME: "Smoke Gemini"
+ WORKFLOW_DESCRIPTION: "Smoke test workflow that validates Gemini engine functionality twice daily"
+ HAS_PATCH: ${{ needs.agent.outputs.has_patch }}
+ with:
+ script: |
+ const { setupGlobals } = require('/opt/gh-aw/actions/setup_globals.cjs');
+ setupGlobals(core, github, context, exec, io);
+ const { main } = require('/opt/gh-aw/actions/setup_threat_detection.cjs');
+ await main();
+ - name: Ensure threat-detection directory and log
+ run: |
+ mkdir -p /tmp/gh-aw/threat-detection
+ touch /tmp/gh-aw/threat-detection/detection.log
+ - name: Validate GEMINI_API_KEY secret
+ id: validate-secret
+ run: /opt/gh-aw/actions/validate_multi_secret.sh GEMINI_API_KEY 'Gemini CLI' https://geminicli.com/docs/get-started/authentication/
+ env:
+ GEMINI_API_KEY: ${{ secrets.GEMINI_API_KEY }}
+ - name: Setup Node.js
+ uses: actions/setup-node@6044e13b5dc448c55e2357c09f80417699197238 # v6.2.0
+ with:
+ node-version: '24'
+ package-manager-cache: false
+ - name: Install Gemini CLI
+ run: npm install -g --silent @google/gemini-cli@0.29.0
+ - name: Run Gemini
+ id: agentic_execution
+ run: |
+ set -o pipefail
+ gemini --output-format json --prompt "$(cat /tmp/gh-aw/aw-prompts/prompt.txt)" 2>&1 | tee /tmp/gh-aw/threat-detection/detection.log
+ env:
+ GEMINI_API_KEY: ${{ secrets.GEMINI_API_KEY }}
+ GH_AW_MODEL_DETECTION_GEMINI: ${{ vars.GH_AW_MODEL_DETECTION_GEMINI || '' }}
+ GH_AW_PROMPT: /tmp/gh-aw/aw-prompts/prompt.txt
+ GITHUB_WORKSPACE: ${{ github.workspace }}
+ - name: Parse threat detection results
+ id: parse_results
+ uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8
+ with:
+ script: |
+ const { setupGlobals } = require('/opt/gh-aw/actions/setup_globals.cjs');
+ setupGlobals(core, github, context, exec, io);
+ const { main } = require('/opt/gh-aw/actions/parse_threat_detection_results.cjs');
+ await main();
+ - name: Upload threat detection log
+ if: always()
+ uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6
+ with:
+ name: threat-detection.log
+ path: /tmp/gh-aw/threat-detection/detection.log
+ if-no-files-found: ignore
+
+ pre_activation:
+ if: >
+ ((github.event_name != 'pull_request') || (github.event.pull_request.head.repo.id == github.repository_id)) &&
+ ((github.event_name != 'pull_request') || ((github.event.action != 'labeled') || (github.event.label.name == 'smoke')))
+ runs-on: ubuntu-slim
+ permissions:
+ contents: read
+ outputs:
+ activated: ${{ steps.check_membership.outputs.is_team_member == 'true' }}
+ steps:
+ - name: Checkout actions folder
+ uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
+ with:
+ sparse-checkout: |
+ actions
+ persist-credentials: false
+ - name: Setup Scripts
+ uses: ./actions/setup
+ with:
+ destination: /opt/gh-aw/actions
+ - name: Check team membership for workflow
+ id: check_membership
+ uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8
+ env:
+ GH_AW_REQUIRED_ROLES: admin,maintainer,write
+ with:
+ github-token: ${{ secrets.GITHUB_TOKEN }}
+ script: |
+ const { setupGlobals } = require('/opt/gh-aw/actions/setup_globals.cjs');
+ setupGlobals(core, github, context, exec, io);
+ const { main } = require('/opt/gh-aw/actions/check_membership.cjs');
+ await main();
+
+ safe_outputs:
+ needs:
+ - agent
+ - detection
+ if: ((!cancelled()) && (needs.agent.result != 'skipped')) && (needs.detection.outputs.success == 'true')
+ runs-on: ubuntu-slim
+ permissions:
+ contents: read
+ discussions: write
+ issues: write
+ pull-requests: write
+ timeout-minutes: 15
+ env:
+ GH_AW_ENGINE_ID: "gemini"
+ GH_AW_SAFE_OUTPUT_MESSAGES: "{\"footer\":\"\\u003e ✨ *[{workflow_name}]({run_url}) — Powered by Gemini*\",\"runStarted\":\"✨ Gemini awakens... [{workflow_name}]({run_url}) begins its journey on this {event_type}...\",\"runSuccess\":\"🚀 [{workflow_name}]({run_url}) **MISSION COMPLETE!** Gemini has spoken. ✨\",\"runFailure\":\"⚠️ [{workflow_name}]({run_url}) {status}. Gemini encountered unexpected challenges...\"}"
+ GH_AW_WORKFLOW_ID: "smoke-gemini"
+ GH_AW_WORKFLOW_NAME: "Smoke Gemini"
+ outputs:
+ create_discussion_error_count: ${{ steps.process_safe_outputs.outputs.create_discussion_error_count }}
+ create_discussion_errors: ${{ steps.process_safe_outputs.outputs.create_discussion_errors }}
+ process_safe_outputs_processed_count: ${{ steps.process_safe_outputs.outputs.processed_count }}
+ process_safe_outputs_temporary_id_map: ${{ steps.process_safe_outputs.outputs.temporary_id_map }}
+ steps:
+ - name: Checkout actions folder
+ uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
+ with:
+ sparse-checkout: |
+ actions
+ persist-credentials: false
+ - name: Setup Scripts
+ uses: ./actions/setup
+ with:
+ destination: /opt/gh-aw/actions
+ - name: Download agent output artifact
+ continue-on-error: true
+ uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6
+ with:
+ name: agent-output
+ path: /tmp/gh-aw/safeoutputs/
+ - name: Setup agent output environment variable
+ run: |
+ mkdir -p /tmp/gh-aw/safeoutputs/
+ find "/tmp/gh-aw/safeoutputs/" -type f -print
+ echo "GH_AW_AGENT_OUTPUT=/tmp/gh-aw/safeoutputs/agent_output.json" >> "$GITHUB_ENV"
+ - name: Process Safe Outputs
+ id: process_safe_outputs
+ uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8
+ env:
+ GH_AW_AGENT_OUTPUT: ${{ env.GH_AW_AGENT_OUTPUT }}
+ GH_AW_SAFE_OUTPUTS_HANDLER_CONFIG: "{\"add_comment\":{\"hide_older_comments\":true,\"max\":2},\"add_labels\":{\"allowed\":[\"smoke-gemini\"]},\"create_issue\":{\"close_older_issues\":true,\"expires\":2,\"max\":1},\"missing_data\":{},\"missing_tool\":{}}"
+ with:
+ github-token: ${{ secrets.GH_AW_GITHUB_TOKEN || secrets.GITHUB_TOKEN }}
+ script: |
+ const { setupGlobals } = require('/opt/gh-aw/actions/setup_globals.cjs');
+ setupGlobals(core, github, context, exec, io);
+ const { main } = require('/opt/gh-aw/actions/safe_output_handler_manager.cjs');
+ await main();
+ - name: Upload safe output items manifest
+ if: always()
+ uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6
+ with:
+ name: safe-output-items
+ path: /tmp/safe-output-items.jsonl
+ if-no-files-found: warn
+
+ update_cache_memory:
+ needs:
+ - agent
+ - detection
+ if: always() && needs.detection.outputs.success == 'true'
+ runs-on: ubuntu-latest
+ permissions:
+ contents: read
+ steps:
+ - name: Checkout actions folder
+ uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
+ with:
+ sparse-checkout: |
+ actions
+ persist-credentials: false
+ - name: Setup Scripts
+ uses: ./actions/setup
+ with:
+ destination: /opt/gh-aw/actions
+ - name: Download cache-memory artifact (default)
+ id: download_cache_default
+ uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6
+ continue-on-error: true
+ with:
+ name: cache-memory
+ path: /tmp/gh-aw/cache-memory
+ - name: Check if cache-memory folder has content (default)
+ id: check_cache_default
+ shell: bash
+ run: |
+ if [ -d "/tmp/gh-aw/cache-memory" ] && [ "$(ls -A /tmp/gh-aw/cache-memory 2>/dev/null)" ]; then
+ echo "has_content=true" >> $GITHUB_OUTPUT
+ else
+ echo "has_content=false" >> $GITHUB_OUTPUT
+ fi
+ - name: Save cache-memory to cache (default)
+ if: steps.check_cache_default.outputs.has_content == 'true'
+ uses: actions/cache/save@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0
+ with:
+ key: memory-${{ env.GH_AW_WORKFLOW_ID_SANITIZED }}-${{ github.run_id }}
+ path: /tmp/gh-aw/cache-memory
+
diff --git a/.github/workflows/smoke-gemini.md b/.github/workflows/smoke-gemini.md
new file mode 100644
index 0000000000..0f03d4dd91
--- /dev/null
+++ b/.github/workflows/smoke-gemini.md
@@ -0,0 +1,67 @@
+---
+description: Smoke test workflow that validates Gemini engine functionality twice daily
+on:
+ schedule: every 12h
+ workflow_dispatch:
+ pull_request:
+ types: [labeled]
+ names: ["smoke"]
+permissions:
+ contents: read
+ issues: read
+ pull-requests: read
+name: Smoke Gemini
+engine: gemini
+strict: true
+imports:
+ - shared/gh.md
+ - shared/reporting.md
+network:
+ allowed:
+ - defaults
+ - github
+tools:
+ cache-memory: true
+ github:
+ toolsets: [repos, pull_requests]
+ edit:
+ bash:
+ - "*"
+safe-outputs:
+ add-comment:
+ hide-older-comments: true
+ max: 2
+ create-issue:
+ expires: 2h
+ close-older-issues: true
+ add-labels:
+ allowed: [smoke-gemini]
+ messages:
+ footer: "> ✨ *[{workflow_name}]({run_url}) — Powered by Gemini*"
+ run-started: "✨ Gemini awakens... [{workflow_name}]({run_url}) begins its journey on this {event_type}..."
+ run-success: "🚀 [{workflow_name}]({run_url}) **MISSION COMPLETE!** Gemini has spoken. ✨"
+ run-failure: "⚠️ [{workflow_name}]({run_url}) {status}. Gemini encountered unexpected challenges..."
+timeout-minutes: 10
+---
+
+# Smoke Test: Gemini Engine Validation
+
+**CRITICAL EFFICIENCY REQUIREMENTS:**
+- Keep ALL outputs extremely short and concise. Use single-line responses.
+- NO verbose explanations or unnecessary context.
+- Minimize file reading - only read what is absolutely necessary for the task.
+
+## Test Requirements
+
+1. **GitHub MCP Testing**: Use GitHub MCP tools to fetch details of exactly 2 merged pull requests from ${{ github.repository }} (title and number only)
+2. **File Writing Testing**: Create a test file `/tmp/gh-aw/agent/smoke-test-gemini-${{ github.run_id }}.txt` with content "Smoke test passed for Gemini at $(date)" (create the directory if it doesn't exist)
+3. **Bash Tool Testing**: Execute bash commands to verify file creation was successful (use `cat` to read the file back)
+4. **Build gh-aw**: Run `GOCACHE=/tmp/go-cache GOMODCACHE=/tmp/go-mod make build` to verify the agent can successfully build the gh-aw project. If the command fails, mark this test as ❌ and report the failure.
+
+## Output
+
+Add a **very brief** comment (max 5-10 lines) to the current pull request with:
+- ✅ or ❌ for each test result
+- Overall status: PASS or FAIL
+
+If all tests pass, use the `add_labels` safe-output tool to add the label `smoke-gemini` to the pull request.
diff --git a/cmd/gh-aw/main_entry_test.go b/cmd/gh-aw/main_entry_test.go
index 7c535973b9..dba6022dc5 100644
--- a/cmd/gh-aw/main_entry_test.go
+++ b/cmd/gh-aw/main_entry_test.go
@@ -40,6 +40,11 @@ func TestValidateEngine(t *testing.T) {
engine: "copilot",
expectErr: false,
},
+ {
+ name: "valid gemini engine",
+ engine: "gemini",
+ expectErr: false,
+ },
{
name: "invalid engine",
engine: "gpt4",
@@ -82,11 +87,11 @@ func TestValidateEngine(t *testing.T) {
return
}
- // Check that error message contains the expected format
- // Error may include "Did you mean" suggestions, so we check if it starts with the base message
- expectedMsg := fmt.Sprintf("invalid engine value '%s'. Must be 'claude', 'codex', or 'copilot'", tt.engine)
- if tt.errMessage != "" && !strings.HasPrefix(err.Error(), expectedMsg) {
- t.Errorf("validateEngine(%q) error message = %v, want to start with %v", tt.engine, err.Error(), expectedMsg)
+ // Check that error message contains the expected format.
+ // The engine list is dynamic, so only check the prefix.
+ expectedPrefix := fmt.Sprintf("invalid engine value '%s'. Must be", tt.engine)
+ if tt.errMessage != "" && !strings.HasPrefix(err.Error(), expectedPrefix) {
+ t.Errorf("validateEngine(%q) error message = %v, want to start with %v", tt.engine, err.Error(), expectedPrefix)
}
} else {
if err != nil {
diff --git a/pkg/cli/completions_test.go b/pkg/cli/completions_test.go
index e51260d5d1..e51c2a8407 100644
--- a/pkg/cli/completions_test.go
+++ b/pkg/cli/completions_test.go
@@ -228,7 +228,7 @@ func TestValidEngineNames(t *testing.T) {
assert.NotEmpty(t, engines, "Engine names list should not be empty")
// Verify expected engines are present
- expectedEngines := []string{"copilot", "claude", "codex"}
+ expectedEngines := []string{"copilot", "claude", "codex", "gemini"}
for _, expected := range expectedEngines {
assert.Contains(t, engines, expected, "Expected engine '%s' to be in the list", expected)
}
@@ -245,7 +245,7 @@ func TestCompleteEngineNames(t *testing.T) {
{
name: "empty prefix returns all engines",
toComplete: "",
- wantLen: 3, // copilot, claude, codex
+ wantLen: 4, // copilot, claude, codex, gemini
},
{
name: "c prefix returns claude, codex, copilot",
@@ -753,7 +753,7 @@ func TestValidEngineNamesConsistency(t *testing.T) {
assert.Len(t, thirdCall, len(secondCall), "Engine names list length should be consistent")
// Verify all expected engines are present in all calls
- expectedEngines := []string{"copilot", "claude", "codex"}
+ expectedEngines := []string{"copilot", "claude", "codex", "gemini"}
for _, engine := range expectedEngines {
assert.Contains(t, firstCall, engine, "Expected engine '%s' in first call", engine)
assert.Contains(t, secondCall, engine, "Expected engine '%s' in second call", engine)
diff --git a/pkg/constants/constants.go b/pkg/constants/constants.go
index 8aaf9ca67b..a6db50420a 100644
--- a/pkg/constants/constants.go
+++ b/pkg/constants/constants.go
@@ -303,6 +303,9 @@ const (
// CopilotLLMGatewayPort is the port for the Copilot LLM gateway
CopilotLLMGatewayPort = 10002
+
+ // GeminiLLMGatewayPort is the port for the Gemini LLM gateway
+ GeminiLLMGatewayPort = 10003
)
// DefaultMCPRegistryURL is the default MCP registry URL.
@@ -339,12 +342,16 @@ const (
EnvVarModelAgentCodex = "GH_AW_MODEL_AGENT_CODEX"
// EnvVarModelAgentCustom configures the default Custom model for agent execution
EnvVarModelAgentCustom = "GH_AW_MODEL_AGENT_CUSTOM"
+ // EnvVarModelAgentGemini configures the default Gemini model for agent execution
+ EnvVarModelAgentGemini = "GH_AW_MODEL_AGENT_GEMINI"
// EnvVarModelDetectionCopilot configures the default Copilot model for detection
EnvVarModelDetectionCopilot = "GH_AW_MODEL_DETECTION_COPILOT"
// EnvVarModelDetectionClaude configures the default Claude model for detection
EnvVarModelDetectionClaude = "GH_AW_MODEL_DETECTION_CLAUDE"
// EnvVarModelDetectionCodex configures the default Codex model for detection
EnvVarModelDetectionCodex = "GH_AW_MODEL_DETECTION_CODEX"
+ // EnvVarModelDetectionGemini configures the default Gemini model for detection
+ EnvVarModelDetectionGemini = "GH_AW_MODEL_DETECTION_GEMINI"
// Common environment variable names used across all engines
@@ -373,6 +380,9 @@ const (
// DefaultCodexVersion is the default version of the OpenAI Codex CLI
const DefaultCodexVersion Version = "0.104.0"
+// DefaultGeminiVersion is the default version of the Google Gemini CLI
+const DefaultGeminiVersion Version = "0.29.0"
+
// DefaultGitHubMCPServerVersion is the default version of the GitHub MCP server Docker image
const DefaultGitHubMCPServerVersion Version = "v0.30.3"
diff --git a/pkg/parser/schemas/main_workflow_schema.json b/pkg/parser/schemas/main_workflow_schema.json
index 006675d0e3..86ead0ca20 100644
--- a/pkg/parser/schemas/main_workflow_schema.json
+++ b/pkg/parser/schemas/main_workflow_schema.json
@@ -6848,8 +6848,8 @@
"oneOf": [
{
"type": "string",
- "enum": ["claude", "codex", "copilot"],
- "description": "Simple engine name: 'claude' (default, Claude Code), 'copilot' (GitHub Copilot CLI), or 'codex' (OpenAI Codex CLI)"
+ "enum": ["claude", "codex", "copilot", "gemini"],
+ "description": "Simple engine name: 'claude' (default, Claude Code), 'copilot' (GitHub Copilot CLI), 'codex' (OpenAI Codex CLI), or 'gemini' (Google Gemini CLI - experimental)"
},
{
"type": "object",
@@ -6857,8 +6857,8 @@
"properties": {
"id": {
"type": "string",
- "enum": ["claude", "codex", "copilot"],
- "description": "AI engine identifier: 'claude' (Claude Code), 'codex' (OpenAI Codex CLI), or 'copilot' (GitHub Copilot CLI)"
+ "enum": ["claude", "codex", "copilot", "gemini"],
+ "description": "AI engine identifier: 'claude' (Claude Code), 'codex' (OpenAI Codex CLI), 'copilot' (GitHub Copilot CLI), or 'gemini' (Google Gemini CLI - experimental)"
},
"version": {
"type": ["string", "number"],
diff --git a/pkg/workflow/agentic_engine.go b/pkg/workflow/agentic_engine.go
index c743bfc4ff..e9d6061970 100644
--- a/pkg/workflow/agentic_engine.go
+++ b/pkg/workflow/agentic_engine.go
@@ -325,6 +325,7 @@ func NewEngineRegistry() *EngineRegistry {
registry.Register(NewClaudeEngine())
registry.Register(NewCodexEngine())
registry.Register(NewCopilotEngine())
+ registry.Register(NewGeminiEngine())
agenticEngineLog.Printf("Registered %d engines", len(registry.engines))
return registry
diff --git a/pkg/workflow/agentic_engine_test.go b/pkg/workflow/agentic_engine_test.go
index b48e642bb7..b6db01e023 100644
--- a/pkg/workflow/agentic_engine_test.go
+++ b/pkg/workflow/agentic_engine_test.go
@@ -9,10 +9,21 @@ import (
func TestEngineRegistry(t *testing.T) {
registry := NewEngineRegistry()
- // Test that built-in engines are registered
+ // Test that built-in engines are registered - check for specific IDs rather than exact count
+ // to avoid brittleness when new engines are added
supportedEngines := registry.GetSupportedEngines()
- if len(supportedEngines) != 3 {
- t.Errorf("Expected 3 supported engines, got %d", len(supportedEngines))
+ expectedEngineIDs := []string{"claude", "codex", "copilot", "gemini"}
+ for _, engineID := range expectedEngineIDs {
+ found := false
+ for _, id := range supportedEngines {
+ if id == engineID {
+ found = true
+ break
+ }
+ }
+ if !found {
+ t.Errorf("Expected engine '%s' to be registered", engineID)
+ }
}
// Test getting engines by ID
diff --git a/pkg/workflow/domains.go b/pkg/workflow/domains.go
index 389571ff50..c95ed2a971 100644
--- a/pkg/workflow/domains.go
+++ b/pkg/workflow/domains.go
@@ -99,6 +99,16 @@ var ClaudeDefaultDomains = []string{
"ts-ocsp.ws.symantec.com",
}
+// GeminiDefaultDomains are the default domains required for Google Gemini CLI authentication and operation
+var GeminiDefaultDomains = []string{
+ "*.googleapis.com",
+ "generativelanguage.googleapis.com",
+ "github.com",
+ "host.docker.internal",
+ "raw.githubusercontent.com",
+ "registry.npmjs.org",
+}
+
// PlaywrightDomains are the domains required for Playwright browser downloads
// These domains are needed when Playwright MCP server initializes in the Docker container
var PlaywrightDomains = []string{
@@ -510,6 +520,12 @@ func GetClaudeAllowedDomainsWithToolsAndRuntimes(network *NetworkPermissions, to
return mergeDomainsWithNetworkToolsAndRuntimes(ClaudeDefaultDomains, network, tools, runtimes)
}
+// GetGeminiAllowedDomainsWithToolsAndRuntimes merges Gemini default domains with NetworkPermissions, HTTP MCP server domains, and runtime ecosystem domains
+// Returns a deduplicated, sorted, comma-separated string suitable for AWF's --allow-domains flag
+func GetGeminiAllowedDomainsWithToolsAndRuntimes(network *NetworkPermissions, tools map[string]any, runtimes map[string]any) string {
+ return mergeDomainsWithNetworkToolsAndRuntimes(GeminiDefaultDomains, network, tools, runtimes)
+}
+
// GetBlockedDomains returns the blocked domains from network permissions
// Returns empty slice if no network permissions configured or no domains blocked
// The returned list is sorted and deduplicated
diff --git a/pkg/workflow/gemini_engine.go b/pkg/workflow/gemini_engine.go
new file mode 100644
index 0000000000..7b05e9a651
--- /dev/null
+++ b/pkg/workflow/gemini_engine.go
@@ -0,0 +1,267 @@
+package workflow
+
+import (
+ "fmt"
+
+ "github.com/github/gh-aw/pkg/constants"
+ "github.com/github/gh-aw/pkg/logger"
+)
+
+var geminiLog = logger.New("workflow:gemini_engine")
+
+// GeminiEngine represents the Google Gemini CLI agentic engine
+type GeminiEngine struct {
+ BaseEngine
+}
+
+func NewGeminiEngine() *GeminiEngine {
+ return &GeminiEngine{
+ BaseEngine: BaseEngine{
+ id: "gemini",
+ displayName: "Google Gemini CLI",
+ description: "Google Gemini CLI with headless mode and LLM gateway support",
+ experimental: true, // Marked as experimental as requested
+ supportsToolsAllowlist: true,
+ supportsMaxTurns: false,
+ supportsWebFetch: false,
+ supportsWebSearch: false,
+ supportsFirewall: true, // Gemini supports network firewalling via AWF
+ supportsPlugins: false,
+ supportsLLMGateway: true, // Gemini supports LLM gateway on port 10003
+ },
+ }
+}
+
+// SupportsLLMGateway returns the LLM gateway port for Gemini engine
+func (e *GeminiEngine) SupportsLLMGateway() int {
+ return constants.GeminiLLMGatewayPort
+}
+
+// GetRequiredSecretNames returns the list of secrets required by the Gemini engine
+// This includes GEMINI_API_KEY and optionally MCP_GATEWAY_API_KEY
+func (e *GeminiEngine) GetRequiredSecretNames(workflowData *WorkflowData) []string {
+ geminiLog.Print("Collecting required secrets for Gemini engine")
+ secrets := []string{"GEMINI_API_KEY"}
+
+ // Add MCP gateway API key if MCP servers are present (gateway is always started with MCP servers)
+ if HasMCPServers(workflowData) {
+ geminiLog.Print("Adding MCP_GATEWAY_API_KEY secret")
+ secrets = append(secrets, "MCP_GATEWAY_API_KEY")
+ }
+
+ // Add GitHub token for GitHub MCP server if present
+ if hasGitHubTool(workflowData.ParsedTools) {
+ geminiLog.Print("Adding GITHUB_MCP_SERVER_TOKEN secret")
+ secrets = append(secrets, "GITHUB_MCP_SERVER_TOKEN")
+ }
+
+ // Add HTTP MCP header secret names
+ headerSecrets := collectHTTPMCPHeaderSecrets(workflowData.Tools)
+ for varName := range headerSecrets {
+ secrets = append(secrets, varName)
+ }
+ if len(headerSecrets) > 0 {
+ geminiLog.Printf("Added %d HTTP MCP header secrets", len(headerSecrets))
+ }
+
+ // Add safe-inputs secret names
+ if IsSafeInputsEnabled(workflowData.SafeInputs, workflowData) {
+ safeInputsSecrets := collectSafeInputsSecrets(workflowData.SafeInputs)
+ for varName := range safeInputsSecrets {
+ secrets = append(secrets, varName)
+ }
+ if len(safeInputsSecrets) > 0 {
+ geminiLog.Printf("Added %d safe-inputs secrets", len(safeInputsSecrets))
+ }
+ }
+
+ return secrets
+}
+
+func (e *GeminiEngine) GetInstallationSteps(workflowData *WorkflowData) []GitHubActionStep {
+ geminiLog.Printf("Generating installation steps for Gemini engine: workflow=%s", workflowData.Name)
+
+ // Skip installation if custom command is specified
+ if workflowData.EngineConfig != nil && workflowData.EngineConfig.Command != "" {
+ geminiLog.Printf("Skipping installation steps: custom command specified (%s)", workflowData.EngineConfig.Command)
+ return []GitHubActionStep{}
+ }
+
+ var steps []GitHubActionStep
+
+ // Define engine configuration for shared validation
+ config := EngineInstallConfig{
+ Secrets: []string{"GEMINI_API_KEY"},
+ DocsURL: "https://geminicli.com/docs/get-started/authentication/",
+ NpmPackage: "@google/gemini-cli",
+ Version: string(constants.DefaultGeminiVersion),
+ Name: "Gemini CLI",
+ CliName: "gemini",
+ InstallStepName: "Install Gemini CLI",
+ }
+
+ // Add secret validation step
+ secretValidation := GenerateMultiSecretValidationStep(
+ config.Secrets,
+ config.Name,
+ config.DocsURL,
+ )
+ steps = append(steps, secretValidation)
+
+ // Determine Gemini version
+ geminiVersion := config.Version
+ if workflowData.EngineConfig != nil && workflowData.EngineConfig.Version != "" {
+ geminiVersion = workflowData.EngineConfig.Version
+ }
+
+ // Add Node.js setup step first (before sandbox installation)
+ npmSteps := GenerateNpmInstallSteps(
+ config.NpmPackage,
+ geminiVersion,
+ config.InstallStepName,
+ config.CliName,
+ true, // Include Node.js setup
+ )
+
+ if len(npmSteps) > 0 {
+ steps = append(steps, npmSteps[0]) // Setup Node.js step
+ }
+
+ // Add AWF installation if firewall is enabled
+ if isFirewallEnabled(workflowData) {
+ // Install AWF after Node.js setup but before Gemini CLI installation
+ firewallConfig := getFirewallConfig(workflowData)
+ agentConfig := getAgentConfig(workflowData)
+ var awfVersion string
+ if firewallConfig != nil {
+ awfVersion = firewallConfig.Version
+ }
+
+ // Install AWF binary (or skip if custom command is specified)
+ awfInstall := generateAWFInstallationStep(awfVersion, agentConfig)
+ if len(awfInstall) > 0 {
+ steps = append(steps, awfInstall)
+ }
+ }
+
+ // Add Gemini CLI installation step after sandbox installation
+ if len(npmSteps) > 1 {
+ steps = append(steps, npmSteps[1:]...) // Install Gemini CLI and subsequent steps
+ }
+
+ return steps
+}
+
+// GetDeclaredOutputFiles returns the output files that Gemini may produce
+func (e *GeminiEngine) GetDeclaredOutputFiles() []string {
+ return []string{}
+}
+
+// GetExecutionSteps returns the GitHub Actions steps for executing Gemini
+func (e *GeminiEngine) GetExecutionSteps(workflowData *WorkflowData, logFile string) []GitHubActionStep {
+ geminiLog.Printf("Generating execution steps for Gemini engine: workflow=%s, firewall=%v", workflowData.Name, isFirewallEnabled(workflowData))
+
+ // Handle custom steps if they exist in engine config
+ steps := InjectCustomEngineSteps(workflowData, e.convertStepToYAML)
+
+ // Build gemini CLI arguments based on configuration
+ var geminiArgs []string
+
+ // Add model if specified
+ if workflowData.EngineConfig != nil && workflowData.EngineConfig.Model != "" {
+ geminiArgs = append(geminiArgs, "--model", workflowData.EngineConfig.Model)
+ }
+
+ // Add MCP config if servers are present
+ if HasMCPServers(workflowData) {
+ geminiArgs = append(geminiArgs, "--mcp-config", "/tmp/gh-aw/mcp-config/mcp-servers.json")
+ }
+
+ // Add headless mode with JSON output
+ geminiArgs = append(geminiArgs, "--output-format", "json")
+
+ // Add prompt argument
+ geminiArgs = append(geminiArgs, "--prompt", "\"$(cat /tmp/gh-aw/aw-prompts/prompt.txt)\"")
+
+ // Build the command
+ commandName := "gemini"
+ if workflowData.EngineConfig != nil && workflowData.EngineConfig.Command != "" {
+ commandName = workflowData.EngineConfig.Command
+ }
+
+ geminiCommand := fmt.Sprintf("%s %s", commandName, shellJoinArgs(geminiArgs))
+
+ // Build the full command with AWF wrapping if enabled
+ var command string
+ firewallEnabled := isFirewallEnabled(workflowData)
+ if firewallEnabled {
+ allowedDomains := GetGeminiAllowedDomainsWithToolsAndRuntimes(
+ workflowData.NetworkPermissions,
+ workflowData.Tools,
+ workflowData.Runtimes,
+ )
+
+ npmPathSetup := GetNpmBinPathSetup()
+ geminiCommandWithPath := fmt.Sprintf("%s && %s", npmPathSetup, geminiCommand)
+
+ command = BuildAWFCommand(AWFCommandConfig{
+ EngineName: "gemini",
+ EngineCommand: geminiCommandWithPath,
+ LogFile: logFile,
+ WorkflowData: workflowData,
+ UsesTTY: false,
+ UsesAPIProxy: false,
+ AllowedDomains: allowedDomains,
+ })
+ } else {
+ command = fmt.Sprintf(`set -o pipefail
+%s 2>&1 | tee %s`, geminiCommand, logFile)
+ }
+
+ // Build environment variables
+ env := map[string]string{
+ "GEMINI_API_KEY": "${{ secrets.GEMINI_API_KEY }}",
+ "GH_AW_PROMPT": "/tmp/gh-aw/aw-prompts/prompt.txt",
+ "GITHUB_WORKSPACE": "${{ github.workspace }}",
+ }
+
+ // Add MCP config env var if needed
+ if HasMCPServers(workflowData) {
+ env["GH_AW_MCP_CONFIG"] = "/tmp/gh-aw/mcp-config/mcp-servers.json"
+ }
+
+ // Add safe outputs env
+ applySafeOutputEnvToMap(env, workflowData)
+
+ // Add model env var if not explicitly configured
+ modelConfigured := workflowData.EngineConfig != nil && workflowData.EngineConfig.Model != ""
+ if !modelConfigured {
+ isDetectionJob := workflowData.SafeOutputs == nil
+ if isDetectionJob {
+ env[constants.EnvVarModelDetectionGemini] = fmt.Sprintf("${{ vars.%s || '' }}", constants.EnvVarModelDetectionGemini)
+ } else {
+ env[constants.EnvVarModelAgentGemini] = fmt.Sprintf("${{ vars.%s || '' }}", constants.EnvVarModelAgentGemini)
+ }
+ }
+
+ // Generate the execution step
+ stepLines := []string{
+ " - name: Run Gemini",
+ " id: agentic_execution",
+ }
+
+ // Filter environment variables for security
+ allowedSecrets := e.GetRequiredSecretNames(workflowData)
+ filteredEnv := FilterEnvForSecrets(env, allowedSecrets)
+
+ // Format step with command and env
+ stepLines = FormatStepWithCommandAndEnv(stepLines, command, filteredEnv)
+
+ steps = append(steps, GitHubActionStep(stepLines))
+ return steps
+}
+
+// convertStepToYAML converts a single step to YAML format
+func (e *GeminiEngine) convertStepToYAML(stepMap map[string]any) (string, error) {
+ return ConvertStepToYAML(stepMap)
+}
diff --git a/pkg/workflow/gemini_engine_test.go b/pkg/workflow/gemini_engine_test.go
new file mode 100644
index 0000000000..2eb271a01e
--- /dev/null
+++ b/pkg/workflow/gemini_engine_test.go
@@ -0,0 +1,295 @@
+//go:build !integration
+
+package workflow
+
+import (
+ "strings"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestGeminiEngine(t *testing.T) {
+ engine := NewGeminiEngine()
+
+ t.Run("engine identity", func(t *testing.T) {
+ assert.Equal(t, "gemini", engine.GetID(), "Engine ID should be 'gemini'")
+ assert.Equal(t, "Google Gemini CLI", engine.GetDisplayName(), "Display name should be 'Google Gemini CLI'")
+ assert.NotEmpty(t, engine.GetDescription(), "Description should not be empty")
+ assert.True(t, engine.IsExperimental(), "Gemini engine should be experimental")
+ })
+
+ t.Run("capabilities", func(t *testing.T) {
+ assert.True(t, engine.SupportsToolsAllowlist(), "Should support tools allowlist")
+ assert.False(t, engine.SupportsMaxTurns(), "Should not support max turns")
+ assert.False(t, engine.SupportsWebFetch(), "Should not support built-in web fetch")
+ assert.False(t, engine.SupportsWebSearch(), "Should not support built-in web search")
+ assert.True(t, engine.SupportsFirewall(), "Should support firewall/AWF")
+ assert.False(t, engine.SupportsPlugins(), "Should not support plugins")
+ assert.Equal(t, 10003, engine.SupportsLLMGateway(), "Should support LLM gateway on port 10003")
+ })
+
+ t.Run("required secrets", func(t *testing.T) {
+ workflowData := &WorkflowData{
+ Name: "test",
+ ParsedTools: &ToolsConfig{},
+ Tools: map[string]any{},
+ }
+ secrets := engine.GetRequiredSecretNames(workflowData)
+ assert.Contains(t, secrets, "GEMINI_API_KEY", "Should require GEMINI_API_KEY")
+ })
+
+ t.Run("required secrets with MCP servers", func(t *testing.T) {
+ workflowData := &WorkflowData{
+ Name: "test",
+ ParsedTools: &ToolsConfig{
+ GitHub: &GitHubToolConfig{},
+ },
+ Tools: map[string]any{
+ "github": map[string]any{},
+ },
+ }
+ secrets := engine.GetRequiredSecretNames(workflowData)
+ assert.Contains(t, secrets, "GEMINI_API_KEY", "Should require GEMINI_API_KEY")
+ assert.Contains(t, secrets, "MCP_GATEWAY_API_KEY", "Should require MCP_GATEWAY_API_KEY when MCP servers present")
+ assert.Contains(t, secrets, "GITHUB_MCP_SERVER_TOKEN", "Should require GITHUB_MCP_SERVER_TOKEN for GitHub tool")
+ })
+
+ t.Run("declared output files", func(t *testing.T) {
+ outputFiles := engine.GetDeclaredOutputFiles()
+ assert.Empty(t, outputFiles, "Should not declare any output files")
+ })
+}
+
+func TestGeminiEngineInstallation(t *testing.T) {
+ engine := NewGeminiEngine()
+
+ t.Run("standard installation", func(t *testing.T) {
+ workflowData := &WorkflowData{
+ Name: "test-workflow",
+ }
+
+ steps := engine.GetInstallationSteps(workflowData)
+ require.NotEmpty(t, steps, "Should generate installation steps")
+
+ // Should have at least: Secret validation + Node.js setup + Install Gemini
+ assert.GreaterOrEqual(t, len(steps), 3, "Should have at least 3 installation steps")
+
+ // Verify first step is secret validation
+ if len(steps) > 0 && len(steps[0]) > 0 {
+ stepContent := strings.Join(steps[0], "\n")
+ assert.Contains(t, stepContent, "Validate GEMINI_API_KEY secret", "First step should validate GEMINI_API_KEY")
+ }
+
+ // Verify second step is Node.js setup
+ if len(steps) > 1 && len(steps[1]) > 0 {
+ stepContent := strings.Join(steps[1], "\n")
+ assert.Contains(t, stepContent, "Setup Node.js", "Second step should setup Node.js")
+ }
+
+ // Verify third step is Install Gemini CLI
+ if len(steps) > 2 && len(steps[2]) > 0 {
+ stepContent := strings.Join(steps[2], "\n")
+ assert.Contains(t, stepContent, "Install Gemini CLI", "Third step should install Gemini CLI")
+ assert.Contains(t, stepContent, "@google/gemini-cli", "Should install @google/gemini-cli package")
+ }
+ })
+
+ t.Run("custom command skips installation", func(t *testing.T) {
+ workflowData := &WorkflowData{
+ Name: "test-workflow",
+ EngineConfig: &EngineConfig{
+ Command: "/custom/gemini",
+ },
+ }
+
+ steps := engine.GetInstallationSteps(workflowData)
+ assert.Empty(t, steps, "Should skip installation when custom command is specified")
+ })
+
+ t.Run("with firewall", func(t *testing.T) {
+ workflowData := &WorkflowData{
+ Name: "test-workflow",
+ NetworkPermissions: &NetworkPermissions{
+ Allowed: []string{"defaults"},
+ Firewall: &FirewallConfig{
+ Enabled: true,
+ },
+ },
+ }
+
+ steps := engine.GetInstallationSteps(workflowData)
+ require.NotEmpty(t, steps, "Should generate installation steps")
+
+ // Should include AWF installation step
+ hasAWFInstall := false
+ for _, step := range steps {
+ stepContent := strings.Join(step, "\n")
+ if strings.Contains(stepContent, "awf") || strings.Contains(stepContent, "firewall") {
+ hasAWFInstall = true
+ break
+ }
+ }
+ assert.True(t, hasAWFInstall, "Should include AWF installation step when firewall is enabled")
+ })
+}
+
+func TestGeminiEngineExecution(t *testing.T) {
+ engine := NewGeminiEngine()
+
+ t.Run("basic execution", func(t *testing.T) {
+ workflowData := &WorkflowData{
+ Name: "test-workflow",
+ }
+
+ steps := engine.GetExecutionSteps(workflowData, "/tmp/test.log")
+ require.Len(t, steps, 1, "Should generate one execution step")
+
+ stepContent := strings.Join(steps[0], "\n")
+
+ assert.Contains(t, stepContent, "name: Run Gemini", "Should have correct step name")
+ assert.Contains(t, stepContent, "id: agentic_execution", "Should have agentic_execution ID")
+ assert.Contains(t, stepContent, "gemini", "Should invoke gemini command")
+ assert.Contains(t, stepContent, "--output-format json", "Should use JSON output format")
+ assert.Contains(t, stepContent, `--prompt "$(cat /tmp/gh-aw/aw-prompts/prompt.txt)"`, "Should include prompt argument with correct shell quoting")
+ assert.Contains(t, stepContent, "/tmp/test.log", "Should include log file")
+ assert.Contains(t, stepContent, "GEMINI_API_KEY: ${{ secrets.GEMINI_API_KEY }}", "Should set GEMINI_API_KEY env var")
+ })
+
+ t.Run("with model", func(t *testing.T) {
+ workflowData := &WorkflowData{
+ Name: "test-workflow",
+ EngineConfig: &EngineConfig{
+ Model: "gemini-1.5-pro",
+ },
+ }
+
+ steps := engine.GetExecutionSteps(workflowData, "/tmp/test.log")
+ require.Len(t, steps, 1, "Should generate one execution step")
+
+ stepContent := strings.Join(steps[0], "\n")
+
+ assert.Contains(t, stepContent, "--model gemini-1.5-pro", "Should include model flag")
+ })
+
+ t.Run("with MCP servers", func(t *testing.T) {
+ workflowData := &WorkflowData{
+ Name: "test-workflow",
+ ParsedTools: &ToolsConfig{
+ GitHub: &GitHubToolConfig{},
+ },
+ Tools: map[string]any{
+ "github": map[string]any{},
+ },
+ }
+
+ steps := engine.GetExecutionSteps(workflowData, "/tmp/test.log")
+ require.Len(t, steps, 1, "Should generate one execution step")
+
+ stepContent := strings.Join(steps[0], "\n")
+
+ assert.Contains(t, stepContent, "--mcp-config /tmp/gh-aw/mcp-config/mcp-servers.json", "Should include MCP config")
+ assert.Contains(t, stepContent, "GH_AW_MCP_CONFIG: /tmp/gh-aw/mcp-config/mcp-servers.json", "Should set MCP config env var")
+ })
+
+ t.Run("with custom command", func(t *testing.T) {
+ workflowData := &WorkflowData{
+ Name: "test-workflow",
+ EngineConfig: &EngineConfig{
+ Command: "/custom/gemini",
+ },
+ }
+
+ steps := engine.GetExecutionSteps(workflowData, "/tmp/test.log")
+ require.Len(t, steps, 1, "Should generate one execution step")
+
+ stepContent := strings.Join(steps[0], "\n")
+
+ assert.Contains(t, stepContent, "/custom/gemini", "Should use custom command")
+ })
+
+ t.Run("environment variables", func(t *testing.T) {
+ workflowData := &WorkflowData{
+ Name: "test-workflow",
+ }
+
+ steps := engine.GetExecutionSteps(workflowData, "/tmp/test.log")
+ require.Len(t, steps, 1, "Should generate one execution step")
+
+ stepContent := strings.Join(steps[0], "\n")
+
+ assert.Contains(t, stepContent, "GEMINI_API_KEY:", "Should include GEMINI_API_KEY")
+ assert.Contains(t, stepContent, "GH_AW_PROMPT:", "Should include GH_AW_PROMPT")
+ assert.Contains(t, stepContent, "GITHUB_WORKSPACE:", "Should include GITHUB_WORKSPACE")
+ })
+
+ t.Run("model environment variables", func(t *testing.T) {
+ // Detection job (no SafeOutputs)
+ detectionWorkflow := &WorkflowData{
+ Name: "detection",
+ SafeOutputs: nil,
+ }
+
+ steps := engine.GetExecutionSteps(detectionWorkflow, "/tmp/test.log")
+ require.Len(t, steps, 1)
+ stepContent := strings.Join(steps[0], "\n")
+ assert.Contains(t, stepContent, "GH_AW_MODEL_DETECTION_GEMINI", "Should include detection model env var")
+
+ // Agent job (with SafeOutputs)
+ agentWorkflow := &WorkflowData{
+ Name: "agent",
+ SafeOutputs: &SafeOutputsConfig{},
+ }
+
+ steps = engine.GetExecutionSteps(agentWorkflow, "/tmp/test.log")
+ require.Len(t, steps, 1)
+ stepContent = strings.Join(steps[0], "\n")
+ assert.Contains(t, stepContent, "GH_AW_MODEL_AGENT_GEMINI", "Should include agent model env var")
+ })
+}
+
+func TestGeminiEngineFirewallIntegration(t *testing.T) {
+ engine := NewGeminiEngine()
+
+ t.Run("firewall enabled", func(t *testing.T) {
+ workflowData := &WorkflowData{
+ Name: "test-workflow",
+ NetworkPermissions: &NetworkPermissions{
+ Allowed: []string{"defaults"},
+ Firewall: &FirewallConfig{
+ Enabled: true,
+ },
+ },
+ }
+
+ steps := engine.GetExecutionSteps(workflowData, "/tmp/test.log")
+ require.Len(t, steps, 1, "Should generate one execution step")
+
+ stepContent := strings.Join(steps[0], "\n")
+
+ // Should use AWF command
+ assert.Contains(t, stepContent, "awf", "Should use AWF when firewall is enabled")
+ assert.Contains(t, stepContent, "--allow-domains", "Should include allow-domains flag")
+ })
+
+ t.Run("firewall disabled", func(t *testing.T) {
+ workflowData := &WorkflowData{
+ Name: "test-workflow",
+ NetworkPermissions: &NetworkPermissions{
+ Firewall: &FirewallConfig{
+ Enabled: false,
+ },
+ },
+ }
+
+ steps := engine.GetExecutionSteps(workflowData, "/tmp/test.log")
+ require.Len(t, steps, 1, "Should generate one execution step")
+
+ stepContent := strings.Join(steps[0], "\n")
+
+ // Should use simple command without AWF
+ assert.Contains(t, stepContent, "set -o pipefail", "Should use simple command with pipefail")
+ assert.NotContains(t, stepContent, "awf", "Should not use AWF when firewall is disabled")
+ })
+}
diff --git a/pkg/workflow/gemini_logs.go b/pkg/workflow/gemini_logs.go
new file mode 100644
index 0000000000..5a073fda53
--- /dev/null
+++ b/pkg/workflow/gemini_logs.go
@@ -0,0 +1,106 @@
+package workflow
+
+import (
+ "encoding/json"
+ "strings"
+
+ "github.com/github/gh-aw/pkg/logger"
+)
+
+var geminiLogsLog = logger.New("workflow:gemini_logs")
+
+// GeminiResponse represents the JSON structure returned by Gemini CLI
+type GeminiResponse struct {
+ Response string `json:"response"`
+ Stats map[string]interface{} `json:"stats"`
+}
+
+// ParseLogMetrics parses Gemini CLI log output and extracts metrics.
+// Gemini CLI outputs a single JSON response when using --output-format json.
+// We parse the last valid JSON line (most complete response) and aggregate stats.
+func (e *GeminiEngine) ParseLogMetrics(logContent string, verbose bool) LogMetrics {
+ geminiLogsLog.Printf("Parsing Gemini log metrics: log_size=%d bytes, verbose=%v", len(logContent), verbose)
+
+ metrics := LogMetrics{
+ Turns: 0,
+ TokenUsage: 0,
+ ToolCalls: []ToolCallInfo{},
+ }
+
+ // Aggregate tool calls in a map to deduplicate across multiple JSON lines
+ toolCallCounts := make(map[string]int)
+
+ // Try to parse the JSON response from Gemini
+ lines := strings.Split(logContent, "\n")
+ for _, line := range lines {
+ line = strings.TrimSpace(line)
+ if line == "" {
+ continue
+ }
+
+ // Try to parse as JSON
+ var response GeminiResponse
+ if err := json.Unmarshal([]byte(line), &response); err != nil {
+ continue
+ }
+
+ // Successfully parsed JSON response - use the last valid response for turn count
+ if response.Response != "" {
+ metrics.Turns = 1 // At least one turn if we got a response
+ }
+
+ // Extract token usage from stats if available
+ if response.Stats != nil {
+ if models, ok := response.Stats["models"].(map[string]interface{}); ok {
+ for _, modelStats := range models {
+ if stats, ok := modelStats.(map[string]interface{}); ok {
+ if inputTokens, ok := stats["input_tokens"].(float64); ok {
+ metrics.TokenUsage += int(inputTokens)
+ }
+ if outputTokens, ok := stats["output_tokens"].(float64); ok {
+ metrics.TokenUsage += int(outputTokens)
+ }
+ }
+ }
+ }
+
+ // Aggregate tool calls using a map to avoid duplicates
+ if tools, ok := response.Stats["tools"].(map[string]interface{}); ok {
+ for toolName := range tools {
+ toolCallCounts[toolName]++
+ }
+ }
+ }
+
+ geminiLogsLog.Printf("Parsed JSON response: response_len=%d, stats_present=%v", len(response.Response), response.Stats != nil)
+ }
+
+ // Convert tool call map to slice
+ for toolName, count := range toolCallCounts {
+ metrics.ToolCalls = append(metrics.ToolCalls, ToolCallInfo{
+ Name: toolName,
+ CallCount: count,
+ })
+ }
+
+ geminiLogsLog.Printf("Parsed metrics: turns=%d, token_usage=%d, tool_calls=%d",
+ metrics.Turns, metrics.TokenUsage, len(metrics.ToolCalls))
+
+ return metrics
+}
+
+// GetLogParserScriptId returns the script ID for parsing Gemini logs
+func (e *GeminiEngine) GetLogParserScriptId() string {
+ return "parse_gemini_log"
+}
+
+// GetLogFileForParsing returns the log file path for parsing
+func (e *GeminiEngine) GetLogFileForParsing() string {
+ return "/tmp/gh-aw/agent-stdio.log"
+}
+
+// GetDefaultDetectionModel returns the default model for threat detection
+// Gemini does not specify a default detection model yet
+func (e *GeminiEngine) GetDefaultDetectionModel() string {
+ return ""
+}
diff --git a/pkg/workflow/gemini_mcp.go b/pkg/workflow/gemini_mcp.go
new file mode 100644
index 0000000000..a4f7d666b0
--- /dev/null
+++ b/pkg/workflow/gemini_mcp.go
@@ -0,0 +1,71 @@
+package workflow
+
+import (
+ "strings"
+
+ "github.com/github/gh-aw/pkg/logger"
+)
+
+var geminiMCPLog = logger.New("workflow:gemini_mcp")
+
+// RenderMCPConfig renders MCP server configuration for Gemini CLI
+func (e *GeminiEngine) RenderMCPConfig(yaml *strings.Builder, tools map[string]any, mcpTools []string, workflowData *WorkflowData) {
+ geminiMCPLog.Printf("Rendering MCP config for Gemini: tool_count=%d, mcp_tool_count=%d", len(tools), len(mcpTools))
+
+ // Create unified renderer with Gemini-specific options
+ createRenderer := func(isLast bool) *MCPConfigRendererUnified {
+ return NewMCPConfigRenderer(MCPRendererOptions{
+ IncludeCopilotFields: false,
+ InlineArgs: false,
+ Format: "json", // Gemini uses JSON format like Claude/Codex
+ IsLast: isLast,
+ ActionMode: GetActionModeFromWorkflowData(workflowData),
+ })
+ }
+
+ // Use shared JSON MCP config renderer
+ _ = RenderJSONMCPConfig(yaml, tools, mcpTools, workflowData, JSONMCPConfigOptions{
+ ConfigPath: "/tmp/gh-aw/mcp-config/mcp-servers.json",
+ GatewayConfig: buildMCPGatewayConfig(workflowData),
+ Renderers: MCPToolRenderers{
+ RenderGitHub: func(yaml *strings.Builder, githubTool any, isLast bool, workflowData *WorkflowData) {
+ renderer := createRenderer(isLast)
+ renderer.RenderGitHubMCP(yaml, githubTool, workflowData)
+ },
+ RenderPlaywright: func(yaml *strings.Builder, playwrightTool any, isLast bool) {
+ renderer := createRenderer(isLast)
+ renderer.RenderPlaywrightMCP(yaml, playwrightTool)
+ },
+ RenderSerena: func(yaml *strings.Builder, serenaTool any, isLast bool) {
+ renderer := createRenderer(isLast)
+ renderer.RenderSerenaMCP(yaml, serenaTool)
+ },
+ RenderCacheMemory: e.renderCacheMemoryMCPConfig,
+ RenderAgenticWorkflows: func(yaml *strings.Builder, isLast bool) {
+ renderer := createRenderer(isLast)
+ renderer.RenderAgenticWorkflowsMCP(yaml)
+ },
+ RenderSafeOutputs: func(yaml *strings.Builder, isLast bool, workflowData *WorkflowData) {
+ renderer := createRenderer(isLast)
+ renderer.RenderSafeOutputsMCP(yaml, workflowData)
+ },
+ RenderSafeInputs: func(yaml *strings.Builder, safeInputs *SafeInputsConfig, isLast bool) {
+ renderer := createRenderer(isLast)
+ renderer.RenderSafeInputsMCP(yaml, safeInputs, workflowData)
+ },
+ RenderWebFetch: func(yaml *strings.Builder, isLast bool) {
+ renderMCPFetchServerConfig(yaml, "json", " ", isLast, false)
+ },
+ RenderCustomMCPConfig: func(yaml *strings.Builder, toolName string, toolConfig map[string]any, isLast bool) error {
+ return renderCustomMCPConfigWrapperWithContext(yaml, toolName, toolConfig, isLast, workflowData)
+ },
+ },
+ })
+}
+
+// renderCacheMemoryMCPConfig handles cache-memory configuration for Gemini
+func (e *GeminiEngine) renderCacheMemoryMCPConfig(yaml *strings.Builder, isLast bool, workflowData *WorkflowData) {
+ // Cache-memory is a simple file share, not an MCP server
+ // No MCP configuration needed
+ geminiMCPLog.Print("Cache-memory tool detected, no MCP config needed")
+}
diff --git a/pkg/workflow/wasm_golden_test.go b/pkg/workflow/wasm_golden_test.go
index 3afcc90b32..0a17ae3387 100644
--- a/pkg/workflow/wasm_golden_test.go
+++ b/pkg/workflow/wasm_golden_test.go
@@ -257,7 +257,7 @@ func TestWasmGolden_NativeVsStringAPI(t *testing.T) {
}
}
-// TestWasmGolden_AllEngines verifies that all three engine types compile correctly
+// TestWasmGolden_AllEngines verifies that all engine types compile correctly
// via the string API and produce valid YAML output.
func TestWasmGolden_AllEngines(t *testing.T) {
engines := []struct {
@@ -268,6 +268,7 @@ func TestWasmGolden_AllEngines(t *testing.T) {
{"copilot", "copilot", ""},
{"claude", "claude", "network:\n allowed:\n - defaults"},
{"codex", "codex", "network:\n allowed:\n - defaults"},
+ {"gemini", "gemini", "network:\n allowed:\n - defaults"},
}
for _, eng := range engines {