diff --git a/.github/workflows/test-claude-add-issue-comment.lock.yml b/.github/workflows/test-claude-add-issue-comment.lock.yml index a79169bc81..b628cd3859 100644 --- a/.github/workflows/test-claude-add-issue-comment.lock.yml +++ b/.github/workflows/test-claude-add-issue-comment.lock.yml @@ -380,7 +380,7 @@ jobs: --- - ## Adding a Comment to an Issue or Pull Request + ## Adding a Comment to an Issue or Pull Request, Reporting Missing Tools or Functionality **IMPORTANT**: To do the actions mentioned in the header of this section, do NOT attempt to use MCP tools, do NOT attempt to use `gh`, do NOT attempt to use the GitHub API. You don't have write access to the GitHub repo. Instead write JSON objects to the file "${{ env.GITHUB_AW_SAFE_OUTPUTS }}". Each line should contain a single JSON object (JSONL format). You can write them one by one as you do them. diff --git a/.github/workflows/test-claude-add-issue-labels.lock.yml b/.github/workflows/test-claude-add-issue-labels.lock.yml index fcff070c68..9e3642cd79 100644 --- a/.github/workflows/test-claude-add-issue-labels.lock.yml +++ b/.github/workflows/test-claude-add-issue-labels.lock.yml @@ -380,7 +380,7 @@ jobs: --- - ## Adding Labels to Issues or Pull Requests + ## Adding Labels to Issues or Pull Requests, Reporting Missing Tools or Functionality **IMPORTANT**: To do the actions mentioned in the header of this section, do NOT attempt to use MCP tools, do NOT attempt to use `gh`, do NOT attempt to use the GitHub API. You don't have write access to the GitHub repo. Instead write JSON objects to the file "${{ env.GITHUB_AW_SAFE_OUTPUTS }}". Each line should contain a single JSON object (JSONL format). You can write them one by one as you do them. diff --git a/.github/workflows/test-claude-command.lock.yml b/.github/workflows/test-claude-command.lock.yml index 3de9a7f193..3b9867fb2a 100644 --- a/.github/workflows/test-claude-command.lock.yml +++ b/.github/workflows/test-claude-command.lock.yml @@ -643,7 +643,7 @@ jobs: --- - ## Adding a Comment to an Issue or Pull Request + ## Adding a Comment to an Issue or Pull Request, Reporting Missing Tools or Functionality **IMPORTANT**: To do the actions mentioned in the header of this section, do NOT attempt to use MCP tools, do NOT attempt to use `gh`, do NOT attempt to use the GitHub API. You don't have write access to the GitHub repo. Instead write JSON objects to the file "${{ env.GITHUB_AW_SAFE_OUTPUTS }}". Each line should contain a single JSON object (JSONL format). You can write them one by one as you do them. @@ -660,9 +660,22 @@ jobs: ``` 2. After you write to that file, read it as JSONL and check it is valid. If it isn't, make any necessary corrections to it to fix it up + **Reporting Missing Tools or Functionality** + + If you need to use a tool or functionality that is not available to complete your task: + 1. Write an entry to "${{ env.GITHUB_AW_SAFE_OUTPUTS }}": + ```json + {"type": "missing-tool", "tool": "tool-name", "reason": "Why this tool is needed", "alternatives": "Suggested alternatives or workarounds"} + ``` + 2. The `tool` field should specify the name or type of missing functionality + 3. The `reason` field should explain why this tool/functionality is required to complete the task + 4. The `alternatives` field is optional but can suggest workarounds or alternative approaches + 5. After you write to that file, read it as JSONL and check it is valid. If it isn't, make any necessary corrections to it to fix it up + **Example JSONL file content:** ``` {"type": "add-issue-comment", "body": "This is related to the issue above."} + {"type": "missing-tool", "tool": "docker", "reason": "Need Docker to build container images", "alternatives": "Could use GitHub Actions build instead"} ``` **Important Notes:** @@ -818,7 +831,7 @@ jobs: uses: actions/github-script@v7 env: GITHUB_AW_SAFE_OUTPUTS: ${{ env.GITHUB_AW_SAFE_OUTPUTS }} - GITHUB_AW_SAFE_OUTPUTS_CONFIG: "{\"add-issue-comment\":{\"enabled\":true}}" + GITHUB_AW_SAFE_OUTPUTS_CONFIG: "{\"add-issue-comment\":{\"enabled\":true},\"missing-tool\":{\"enabled\":true}}" with: script: | async function main() { @@ -1966,3 +1979,87 @@ jobs: } await main(); + missing_tool: + needs: test-claude-command + if: ${{ always() }} + runs-on: ubuntu-latest + permissions: + contents: read + timeout-minutes: 5 + outputs: + tools_reported: ${{ steps.missing_tool.outputs.tools_reported }} + total_count: ${{ steps.missing_tool.outputs.total_count }} + steps: + - name: Record Missing Tool + id: missing_tool + uses: actions/github-script@v7 + env: + GITHUB_AW_AGENT_OUTPUT: ${{ needs.test-claude-command.outputs.output }} + with: + script: | + const fs = require('fs'); + const path = require('path'); + // Get environment variables + const agentOutput = process.env.GITHUB_AW_AGENT_OUTPUT || ''; + const maxReports = process.env.GITHUB_AW_MISSING_TOOL_MAX ? parseInt(process.env.GITHUB_AW_MISSING_TOOL_MAX) : null; + console.log('Processing missing-tool reports...'); + console.log('Agent output length:', agentOutput.length); + if (maxReports) { + console.log('Maximum reports allowed:', maxReports); + } + const missingTools = []; + if (agentOutput.trim()) { + const lines = agentOutput.split('\n').filter(line => line.trim()); + for (const line of lines) { + try { + const entry = JSON.parse(line); + if (entry.type === 'missing-tool') { + // Validate required fields + if (!entry.tool) { + console.log('Warning: missing-tool entry missing "tool" field:', line); + continue; + } + if (!entry.reason) { + console.log('Warning: missing-tool entry missing "reason" field:', line); + continue; + } + const missingTool = { + tool: entry.tool, + reason: entry.reason, + alternatives: entry.alternatives || null, + timestamp: new Date().toISOString() + }; + missingTools.push(missingTool); + console.log('Recorded missing tool:', missingTool.tool); + // Check max limit + if (maxReports && missingTools.length >= maxReports) { + console.log('Reached maximum number of missing tool reports (${maxReports})'); + break; + } + } + } catch (error) { + console.log('Warning: Failed to parse line as JSON:', line); + console.log('Parse error:', error.message); + } + } + } + console.log('Total missing tools reported:', missingTools.length); + // Output results + core.setOutput('tools_reported', JSON.stringify(missingTools)); + core.setOutput('total_count', missingTools.length.toString()); + // Log details for debugging + if (missingTools.length > 0) { + console.log('Missing tools summary:'); + missingTools.forEach((tool, index) => { + console.log('${index + 1}. Tool: ${tool.tool}'); + console.log(' Reason: ${tool.reason}'); + if (tool.alternatives) { + console.log(' Alternatives: ${tool.alternatives}'); + } + console.log(' Reported at: ${tool.timestamp}'); + console.log(''); + }); + } else { + console.log('No missing tools reported in this workflow execution.'); + } + diff --git a/.github/workflows/test-claude-command.md b/.github/workflows/test-claude-command.md index 8733b0dbf5..b2d0a36eaf 100644 --- a/.github/workflows/test-claude-command.md +++ b/.github/workflows/test-claude-command.md @@ -9,6 +9,7 @@ engine: safe-outputs: add-issue-comment: + missing-tool: --- Add a reply comment to issue #${{ github.event.issue.number }} answering the question "${{ needs.task.outputs.text }}" given the context of the repo, starting with saying you're Claude. If there is no command write out a haiku about the repo. diff --git a/.github/workflows/test-claude-create-issue.lock.yml b/.github/workflows/test-claude-create-issue.lock.yml index 37193fd916..a2abd3a9a0 100644 --- a/.github/workflows/test-claude-create-issue.lock.yml +++ b/.github/workflows/test-claude-create-issue.lock.yml @@ -190,7 +190,7 @@ jobs: --- - ## Creating an Issue + ## Creating an IssueReporting Missing Tools or Functionality **IMPORTANT**: To do the actions mentioned in the header of this section, do NOT attempt to use MCP tools, do NOT attempt to use `gh`, do NOT attempt to use the GitHub API. You don't have write access to the GitHub repo. Instead write JSON objects to the file "${{ env.GITHUB_AW_SAFE_OUTPUTS }}". Each line should contain a single JSON object (JSONL format). You can write them one by one as you do them. diff --git a/.github/workflows/test-claude-create-pull-request-review-comment.lock.yml b/.github/workflows/test-claude-create-pull-request-review-comment.lock.yml index c5dbbf0dc2..bf4c99c2ed 100644 --- a/.github/workflows/test-claude-create-pull-request-review-comment.lock.yml +++ b/.github/workflows/test-claude-create-pull-request-review-comment.lock.yml @@ -403,7 +403,7 @@ jobs: --- - ## + ## Reporting Missing Tools or Functionality **IMPORTANT**: To do the actions mentioned in the header of this section, do NOT attempt to use MCP tools, do NOT attempt to use `gh`, do NOT attempt to use the GitHub API. You don't have write access to the GitHub repo. Instead write JSON objects to the file "${{ env.GITHUB_AW_SAFE_OUTPUTS }}". Each line should contain a single JSON object (JSONL format). You can write them one by one as you do them. diff --git a/.github/workflows/test-claude-create-pull-request.lock.yml b/.github/workflows/test-claude-create-pull-request.lock.yml index a15f3d4b9e..3452372725 100644 --- a/.github/workflows/test-claude-create-pull-request.lock.yml +++ b/.github/workflows/test-claude-create-pull-request.lock.yml @@ -194,7 +194,7 @@ jobs: --- - ## Creating a Pull Request + ## Creating a Pull RequestReporting Missing Tools or Functionality **IMPORTANT**: To do the actions mentioned in the header of this section, do NOT attempt to use MCP tools, do NOT attempt to use `gh`, do NOT attempt to use the GitHub API. You don't have write access to the GitHub repo. Instead write JSON objects to the file "${{ env.GITHUB_AW_SAFE_OUTPUTS }}". Each line should contain a single JSON object (JSONL format). You can write them one by one as you do them. diff --git a/.github/workflows/test-claude-mcp.lock.yml b/.github/workflows/test-claude-mcp.lock.yml index f61259f85d..5a0f6faaf6 100644 --- a/.github/workflows/test-claude-mcp.lock.yml +++ b/.github/workflows/test-claude-mcp.lock.yml @@ -401,7 +401,7 @@ jobs: --- - ## Creating an Issue + ## Creating an IssueReporting Missing Tools or Functionality **IMPORTANT**: To do the actions mentioned in the header of this section, do NOT attempt to use MCP tools, do NOT attempt to use `gh`, do NOT attempt to use the GitHub API. You don't have write access to the GitHub repo. Instead write JSON objects to the file "${{ env.GITHUB_AW_SAFE_OUTPUTS }}". Each line should contain a single JSON object (JSONL format). You can write them one by one as you do them. diff --git a/.github/workflows/test-claude-push-to-branch.lock.yml b/.github/workflows/test-claude-push-to-branch.lock.yml index d402c6c69a..e5b2fecff9 100644 --- a/.github/workflows/test-claude-push-to-branch.lock.yml +++ b/.github/workflows/test-claude-push-to-branch.lock.yml @@ -282,7 +282,7 @@ jobs: --- - ## Pushing Changes to Branch + ## Pushing Changes to Branch, Reporting Missing Tools or Functionality **IMPORTANT**: To do the actions mentioned in the header of this section, do NOT attempt to use MCP tools, do NOT attempt to use `gh`, do NOT attempt to use the GitHub API. You don't have write access to the GitHub repo. Instead write JSON objects to the file "${{ env.GITHUB_AW_SAFE_OUTPUTS }}". Each line should contain a single JSON object (JSONL format). You can write them one by one as you do them. diff --git a/.github/workflows/test-claude-update-issue.lock.yml b/.github/workflows/test-claude-update-issue.lock.yml index 11b273519a..dabba5bc0a 100644 --- a/.github/workflows/test-claude-update-issue.lock.yml +++ b/.github/workflows/test-claude-update-issue.lock.yml @@ -384,7 +384,7 @@ jobs: --- - ## Updating Issues + ## Updating Issues, Reporting Missing Tools or Functionality **IMPORTANT**: To do the actions mentioned in the header of this section, do NOT attempt to use MCP tools, do NOT attempt to use `gh`, do NOT attempt to use the GitHub API. You don't have write access to the GitHub repo. Instead write JSON objects to the file "${{ env.GITHUB_AW_SAFE_OUTPUTS }}". Each line should contain a single JSON object (JSONL format). You can write them one by one as you do them. diff --git a/.github/workflows/test-codex-add-issue-comment.lock.yml b/.github/workflows/test-codex-add-issue-comment.lock.yml index 6120e39251..fc16477a54 100644 --- a/.github/workflows/test-codex-add-issue-comment.lock.yml +++ b/.github/workflows/test-codex-add-issue-comment.lock.yml @@ -275,7 +275,7 @@ jobs: --- - ## Adding a Comment to an Issue or Pull Request + ## Adding a Comment to an Issue or Pull Request, Reporting Missing Tools or Functionality **IMPORTANT**: To do the actions mentioned in the header of this section, do NOT attempt to use MCP tools, do NOT attempt to use `gh`, do NOT attempt to use the GitHub API. You don't have write access to the GitHub repo. Instead write JSON objects to the file "${{ env.GITHUB_AW_SAFE_OUTPUTS }}". Each line should contain a single JSON object (JSONL format). You can write them one by one as you do them. diff --git a/.github/workflows/test-codex-add-issue-labels.lock.yml b/.github/workflows/test-codex-add-issue-labels.lock.yml index ef429ef26a..dc971150b2 100644 --- a/.github/workflows/test-codex-add-issue-labels.lock.yml +++ b/.github/workflows/test-codex-add-issue-labels.lock.yml @@ -275,7 +275,7 @@ jobs: --- - ## Adding Labels to Issues or Pull Requests + ## Adding Labels to Issues or Pull Requests, Reporting Missing Tools or Functionality **IMPORTANT**: To do the actions mentioned in the header of this section, do NOT attempt to use MCP tools, do NOT attempt to use `gh`, do NOT attempt to use the GitHub API. You don't have write access to the GitHub repo. Instead write JSON objects to the file "${{ env.GITHUB_AW_SAFE_OUTPUTS }}". Each line should contain a single JSON object (JSONL format). You can write them one by one as you do them. diff --git a/.github/workflows/test-codex-command.lock.yml b/.github/workflows/test-codex-command.lock.yml index 9ea2778a4d..5de0bcbbe6 100644 --- a/.github/workflows/test-codex-command.lock.yml +++ b/.github/workflows/test-codex-command.lock.yml @@ -643,7 +643,7 @@ jobs: --- - ## Adding a Comment to an Issue or Pull Request + ## Adding a Comment to an Issue or Pull Request, Reporting Missing Tools or Functionality **IMPORTANT**: To do the actions mentioned in the header of this section, do NOT attempt to use MCP tools, do NOT attempt to use `gh`, do NOT attempt to use the GitHub API. You don't have write access to the GitHub repo. Instead write JSON objects to the file "${{ env.GITHUB_AW_SAFE_OUTPUTS }}". Each line should contain a single JSON object (JSONL format). You can write them one by one as you do them. @@ -660,9 +660,22 @@ jobs: ``` 2. After you write to that file, read it as JSONL and check it is valid. If it isn't, make any necessary corrections to it to fix it up + **Reporting Missing Tools or Functionality** + + If you need to use a tool or functionality that is not available to complete your task: + 1. Write an entry to "${{ env.GITHUB_AW_SAFE_OUTPUTS }}": + ```json + {"type": "missing-tool", "tool": "tool-name", "reason": "Why this tool is needed", "alternatives": "Suggested alternatives or workarounds"} + ``` + 2. The `tool` field should specify the name or type of missing functionality + 3. The `reason` field should explain why this tool/functionality is required to complete the task + 4. The `alternatives` field is optional but can suggest workarounds or alternative approaches + 5. After you write to that file, read it as JSONL and check it is valid. If it isn't, make any necessary corrections to it to fix it up + **Example JSONL file content:** ``` {"type": "add-issue-comment", "body": "This is related to the issue above."} + {"type": "missing-tool", "tool": "docker", "reason": "Need Docker to build container images", "alternatives": "Could use GitHub Actions build instead"} ``` **Important Notes:** @@ -818,7 +831,7 @@ jobs: uses: actions/github-script@v7 env: GITHUB_AW_SAFE_OUTPUTS: ${{ env.GITHUB_AW_SAFE_OUTPUTS }} - GITHUB_AW_SAFE_OUTPUTS_CONFIG: "{\"add-issue-comment\":{\"enabled\":true}}" + GITHUB_AW_SAFE_OUTPUTS_CONFIG: "{\"add-issue-comment\":{\"enabled\":true},\"missing-tool\":{\"enabled\":true}}" with: script: | async function main() { @@ -1966,3 +1979,87 @@ jobs: } await main(); + missing_tool: + needs: test-codex-command + if: ${{ always() }} + runs-on: ubuntu-latest + permissions: + contents: read + timeout-minutes: 5 + outputs: + tools_reported: ${{ steps.missing_tool.outputs.tools_reported }} + total_count: ${{ steps.missing_tool.outputs.total_count }} + steps: + - name: Record Missing Tool + id: missing_tool + uses: actions/github-script@v7 + env: + GITHUB_AW_AGENT_OUTPUT: ${{ needs.test-codex-command.outputs.output }} + with: + script: | + const fs = require('fs'); + const path = require('path'); + // Get environment variables + const agentOutput = process.env.GITHUB_AW_AGENT_OUTPUT || ''; + const maxReports = process.env.GITHUB_AW_MISSING_TOOL_MAX ? parseInt(process.env.GITHUB_AW_MISSING_TOOL_MAX) : null; + console.log('Processing missing-tool reports...'); + console.log('Agent output length:', agentOutput.length); + if (maxReports) { + console.log('Maximum reports allowed:', maxReports); + } + const missingTools = []; + if (agentOutput.trim()) { + const lines = agentOutput.split('\n').filter(line => line.trim()); + for (const line of lines) { + try { + const entry = JSON.parse(line); + if (entry.type === 'missing-tool') { + // Validate required fields + if (!entry.tool) { + console.log('Warning: missing-tool entry missing "tool" field:', line); + continue; + } + if (!entry.reason) { + console.log('Warning: missing-tool entry missing "reason" field:', line); + continue; + } + const missingTool = { + tool: entry.tool, + reason: entry.reason, + alternatives: entry.alternatives || null, + timestamp: new Date().toISOString() + }; + missingTools.push(missingTool); + console.log('Recorded missing tool:', missingTool.tool); + // Check max limit + if (maxReports && missingTools.length >= maxReports) { + console.log('Reached maximum number of missing tool reports (${maxReports})'); + break; + } + } + } catch (error) { + console.log('Warning: Failed to parse line as JSON:', line); + console.log('Parse error:', error.message); + } + } + } + console.log('Total missing tools reported:', missingTools.length); + // Output results + core.setOutput('tools_reported', JSON.stringify(missingTools)); + core.setOutput('total_count', missingTools.length.toString()); + // Log details for debugging + if (missingTools.length > 0) { + console.log('Missing tools summary:'); + missingTools.forEach((tool, index) => { + console.log('${index + 1}. Tool: ${tool.tool}'); + console.log(' Reason: ${tool.reason}'); + if (tool.alternatives) { + console.log(' Alternatives: ${tool.alternatives}'); + } + console.log(' Reported at: ${tool.timestamp}'); + console.log(''); + }); + } else { + console.log('No missing tools reported in this workflow execution.'); + } + diff --git a/.github/workflows/test-codex-command.md b/.github/workflows/test-codex-command.md index 3c22490ec4..b8b8bedc27 100644 --- a/.github/workflows/test-codex-command.md +++ b/.github/workflows/test-codex-command.md @@ -9,6 +9,7 @@ engine: safe-outputs: add-issue-comment: + missing-tool: --- Add a reply comment to issue #${{ github.event.issue.number }} answering the question "${{ needs.task.outputs.text }}" given the context of the repo, starting with saying you're Codex. If there is no command write out a haiku about the repo. diff --git a/.github/workflows/test-codex-create-issue.lock.yml b/.github/workflows/test-codex-create-issue.lock.yml index 8960572381..4be58e103f 100644 --- a/.github/workflows/test-codex-create-issue.lock.yml +++ b/.github/workflows/test-codex-create-issue.lock.yml @@ -85,7 +85,7 @@ jobs: --- - ## Creating an Issue + ## Creating an IssueReporting Missing Tools or Functionality **IMPORTANT**: To do the actions mentioned in the header of this section, do NOT attempt to use MCP tools, do NOT attempt to use `gh`, do NOT attempt to use the GitHub API. You don't have write access to the GitHub repo. Instead write JSON objects to the file "${{ env.GITHUB_AW_SAFE_OUTPUTS }}". Each line should contain a single JSON object (JSONL format). You can write them one by one as you do them. diff --git a/.github/workflows/test-codex-create-pull-request-review-comment.lock.yml b/.github/workflows/test-codex-create-pull-request-review-comment.lock.yml index 17a9b44c33..cbebbc4b1d 100644 --- a/.github/workflows/test-codex-create-pull-request-review-comment.lock.yml +++ b/.github/workflows/test-codex-create-pull-request-review-comment.lock.yml @@ -298,7 +298,7 @@ jobs: --- - ## + ## Reporting Missing Tools or Functionality **IMPORTANT**: To do the actions mentioned in the header of this section, do NOT attempt to use MCP tools, do NOT attempt to use `gh`, do NOT attempt to use the GitHub API. You don't have write access to the GitHub repo. Instead write JSON objects to the file "${{ env.GITHUB_AW_SAFE_OUTPUTS }}". Each line should contain a single JSON object (JSONL format). You can write them one by one as you do them. diff --git a/.github/workflows/test-codex-create-pull-request.lock.yml b/.github/workflows/test-codex-create-pull-request.lock.yml index 699416921d..e013ba6bf4 100644 --- a/.github/workflows/test-codex-create-pull-request.lock.yml +++ b/.github/workflows/test-codex-create-pull-request.lock.yml @@ -89,7 +89,7 @@ jobs: --- - ## Creating a Pull Request + ## Creating a Pull RequestReporting Missing Tools or Functionality **IMPORTANT**: To do the actions mentioned in the header of this section, do NOT attempt to use MCP tools, do NOT attempt to use `gh`, do NOT attempt to use the GitHub API. You don't have write access to the GitHub repo. Instead write JSON objects to the file "${{ env.GITHUB_AW_SAFE_OUTPUTS }}". Each line should contain a single JSON object (JSONL format). You can write them one by one as you do them. diff --git a/.github/workflows/test-codex-mcp.lock.yml b/.github/workflows/test-codex-mcp.lock.yml index ddecaeb909..c3d4c41b1c 100644 --- a/.github/workflows/test-codex-mcp.lock.yml +++ b/.github/workflows/test-codex-mcp.lock.yml @@ -294,7 +294,7 @@ jobs: --- - ## Creating an Issue + ## Creating an IssueReporting Missing Tools or Functionality **IMPORTANT**: To do the actions mentioned in the header of this section, do NOT attempt to use MCP tools, do NOT attempt to use `gh`, do NOT attempt to use the GitHub API. You don't have write access to the GitHub repo. Instead write JSON objects to the file "${{ env.GITHUB_AW_SAFE_OUTPUTS }}". Each line should contain a single JSON object (JSONL format). You can write them one by one as you do them. diff --git a/.github/workflows/test-codex-push-to-branch.lock.yml b/.github/workflows/test-codex-push-to-branch.lock.yml index 44a312d120..e6a15f4b04 100644 --- a/.github/workflows/test-codex-push-to-branch.lock.yml +++ b/.github/workflows/test-codex-push-to-branch.lock.yml @@ -179,7 +179,7 @@ jobs: --- - ## Pushing Changes to Branch + ## Pushing Changes to Branch, Reporting Missing Tools or Functionality **IMPORTANT**: To do the actions mentioned in the header of this section, do NOT attempt to use MCP tools, do NOT attempt to use `gh`, do NOT attempt to use the GitHub API. You don't have write access to the GitHub repo. Instead write JSON objects to the file "${{ env.GITHUB_AW_SAFE_OUTPUTS }}". Each line should contain a single JSON object (JSONL format). You can write them one by one as you do them. diff --git a/.github/workflows/test-codex-update-issue.lock.yml b/.github/workflows/test-codex-update-issue.lock.yml index 047ea9c714..5fef4244b1 100644 --- a/.github/workflows/test-codex-update-issue.lock.yml +++ b/.github/workflows/test-codex-update-issue.lock.yml @@ -279,7 +279,7 @@ jobs: --- - ## Updating Issues + ## Updating Issues, Reporting Missing Tools or Functionality **IMPORTANT**: To do the actions mentioned in the header of this section, do NOT attempt to use MCP tools, do NOT attempt to use `gh`, do NOT attempt to use the GitHub API. You don't have write access to the GitHub repo. Instead write JSON objects to the file "${{ env.GITHUB_AW_SAFE_OUTPUTS }}". Each line should contain a single JSON object (JSONL format). You can write them one by one as you do them. diff --git a/.github/workflows/test-proxy.lock.yml b/.github/workflows/test-proxy.lock.yml index 19f98af3a6..e605234eb7 100644 --- a/.github/workflows/test-proxy.lock.yml +++ b/.github/workflows/test-proxy.lock.yml @@ -358,7 +358,7 @@ jobs: --- - ## Adding a Comment to an Issue or Pull Request + ## Adding a Comment to an Issue or Pull Request, Reporting Missing Tools or Functionality **IMPORTANT**: To do the actions mentioned in the header of this section, do NOT attempt to use MCP tools, do NOT attempt to use `gh`, do NOT attempt to use the GitHub API. You don't have write access to the GitHub repo. Instead write JSON objects to the file "${{ env.GITHUB_AW_SAFE_OUTPUTS }}". Each line should contain a single JSON object (JSONL format). You can write them one by one as you do them. diff --git a/docs/frontmatter.md b/docs/frontmatter.md index ba74dca817..02c2c33601 100644 --- a/docs/frontmatter.md +++ b/docs/frontmatter.md @@ -164,6 +164,10 @@ engine: version: beta # Optional: version of the action model: claude-3-5-sonnet-20241022 # Optional: specific LLM model max-turns: 5 # Optional: maximum chat iterations per run + env: # Optional: custom environment variables + AWS_REGION: us-west-2 + CUSTOM_API_ENDPOINT: https://api.example.com + DEBUG_MODE: "true" ``` **Fields:** @@ -171,6 +175,7 @@ engine: - **`version`** (optional): Action version (`beta`, `stable`) - **`model`** (optional): Specific LLM model to use - **`max-turns`** (optional): Maximum number of chat iterations per run (cost-control option) +- **`env`** (optional): Custom environment variables to pass to the agentic engine as key-value pairs **Model Defaults:** - **Claude**: Uses the default model from the claude-code-base-action (typically latest Claude model) @@ -194,6 +199,36 @@ engine: 3. Helps prevent runaway chat loops and control costs 4. Only applies to engines that support turn limiting (currently Claude) +**Custom Environment Variables (`env`):** + +The `env` option allows you to pass custom environment variables to the agentic engine: + +```yaml +engine: + id: claude + env: + - "AWS_REGION=us-west-2" + - "CUSTOM_API_ENDPOINT: https://api.example.com" + - "DEBUG_MODE: true" +``` + +**Format Options:** +- `KEY=value` - Standard environment variable format +- `KEY: value` - YAML-style format + +**Behavior:** +1. Custom environment variables are added to the built-in engine variables +2. For Claude: Variables are passed via the `claude_env` input and GitHub Actions `env` section +3. For Codex: Variables are added to the command-based execution environment +4. Supports secrets and GitHub context variables: `"API_KEY: ${{ secrets.MY_SECRET }}"` +5. Useful for custom configurations like Claude on Amazon Vertex AI + +**Use Cases:** +- Configure cloud provider regions: `AWS_REGION=us-west-2` +- Set custom API endpoints: `API_ENDPOINT: https://vertex-ai.googleapis.com` +- Pass authentication tokens: `API_TOKEN: ${{ secrets.CUSTOM_TOKEN }}` +- Enable debug modes: `DEBUG_MODE: true` + ## Network Permissions (`network:`) > This is only supported by the claude engine today. diff --git a/docs/safe-outputs.md b/docs/safe-outputs.md index 1d514971ef..e1f45520d3 100644 --- a/docs/safe-outputs.md +++ b/docs/safe-outputs.md @@ -12,6 +12,7 @@ One of the primary security features of GitHub Agentic Workflows is "safe output | **Label Addition** | `add-issue-label:` | Add labels to issues or pull requests | 3 | | **Issue Updates** | `update-issue:` | Update issue status, title, or body | 1 | | **Push to Branch** | `push-to-branch:` | Push changes directly to a branch | 1 | +| **Missing Tool Reporting** | `missing-tool:` | Report missing tools or functionality needed to complete tasks | unlimited | ## Overview (`safe-outputs:`) @@ -345,6 +346,43 @@ Analyze the pull request and make necessary code improvements. When `create-pull-request` or `push-to-branch` are enabled in the `safe-outputs` configuration, the system automatically adds the following additional Claude tools to enable file editing and pull request creation: +### Missing Tool Reporting (`missing-tool:`) + +**Note:** Missing tool reporting is optional and must be explicitly configured in the `safe-outputs:` section if you want workflows to report when they encounter limitations or need tools that aren't available. + +**Basic Configuration:** +```yaml +safe-outputs: + missing-tool: # Enable missing-tool reporting +``` + +**With Configuration:** +```yaml +safe-outputs: + missing-tool: + max: 10 # Optional: maximum number of missing tool reports (default: unlimited) +``` + +The agentic part of your workflow can report missing tools or functionality that prevents it from completing its task. + +**Example natural language to generate the output:** + +```markdown +# Development Task Agent + +Analyze the repository and implement the requested feature. If you encounter missing tools, capabilities, or permissions that prevent completion, report them so the user can address these limitations. +``` + +The compiled workflow will have additional prompting describing that, to report missing tools, it should write the tool information to a special file. + +**Safety Features:** + +- No write permissions required - only logs missing functionality +- Optional configuration to help users understand workflow limitations when enabled +- Reports are structured with tool name, reason, and optional alternatives +- Maximum count can be configured to prevent excessive reporting +- All missing tool data is captured in workflow artifacts for review + ## Automatically Added Tools When `create-pull-request` or `push-to-branch` are configured, these Claude tools are automatically added: diff --git a/pkg/parser/integration_test.go b/pkg/parser/integration_test.go new file mode 100644 index 0000000000..7defb85fca --- /dev/null +++ b/pkg/parser/integration_test.go @@ -0,0 +1,156 @@ +package parser + +import ( + "os" + "strings" + "testing" +) + +func TestFrontmatterLocationIntegration(t *testing.T) { + // Create a temporary file with frontmatter that has additional properties + tempFile := "/tmp/test_frontmatter_location.md" + content := `--- +name: Test Workflow +on: push +permissions: + contents: read +invalid_property: value +another_bad_prop: bad_value +engine: claude +--- + +This is a test workflow with invalid additional properties in frontmatter. +` + + err := os.WriteFile(tempFile, []byte(content), 0644) + if err != nil { + t.Fatalf("Failed to create temp file: %v", err) + } + defer os.Remove(tempFile) + + // Create a schema that doesn't allow additional properties + schemaJSON := `{ + "type": "object", + "properties": { + "name": {"type": "string"}, + "on": {"type": ["string", "object"]}, + "permissions": {"type": "object"}, + "engine": {"type": "string"} + }, + "additionalProperties": false + }` + + // Parse frontmatter + frontmatterResult, err := ExtractFrontmatterFromContent(content) + if err != nil { + t.Fatalf("Failed to extract frontmatter: %v", err) + } + + // Validate with location information + err = validateWithSchemaAndLocation(frontmatterResult.Frontmatter, schemaJSON, "test workflow", tempFile) + + if err == nil { + t.Fatal("Expected validation error for additional properties, got nil") + } + + errorMessage := err.Error() + t.Logf("Error message: %s", errorMessage) + + // Verify the error points to the correct location + expectedPatterns := []string{ + tempFile + ":", // File path + "6:1:", // Line 6 column 1 (where invalid_property is) + "invalid_property", // The property name in the message + } + + for _, pattern := range expectedPatterns { + if !strings.Contains(errorMessage, pattern) { + t.Errorf("Error message should contain '%s' but got: %s", pattern, errorMessage) + } + } +} + +func TestFrontmatterOffsetCalculation(t *testing.T) { + // Test frontmatter at the beginning of the file + tempFile := "/tmp/test_frontmatter_offset.md" + content := `--- +name: Test Workflow +invalid_prop: bad +--- + +# This is content after frontmatter + + +Content here. +` + + err := os.WriteFile(tempFile, []byte(content), 0644) + if err != nil { + t.Fatalf("Failed to create temp file: %v", err) + } + defer os.Remove(tempFile) + + schemaJSON := `{ + "type": "object", + "properties": { + "name": {"type": "string"} + }, + "additionalProperties": false + }` + + frontmatterResult, err := ExtractFrontmatterFromContent(content) + if err != nil { + t.Fatalf("Failed to extract frontmatter: %v", err) + } + + err = validateWithSchemaAndLocation(frontmatterResult.Frontmatter, schemaJSON, "test workflow", tempFile) + + if err == nil { + t.Fatal("Expected validation error for additional properties, got nil") + } + + errorMessage := err.Error() + t.Logf("Error message with offset: %s", errorMessage) + + // The invalid_prop should be on line 3 (1-based: ---, name, invalid_prop) + expectedPatterns := []string{ + tempFile + ":", + "3:", // Line 3 where invalid_prop appears + "invalid_prop", + } + + for _, pattern := range expectedPatterns { + if !strings.Contains(errorMessage, pattern) { + t.Errorf("Error message should contain '%s' but got: %s", pattern, errorMessage) + } + } +} + +func TestImprovementComparison(t *testing.T) { + yamlContent := `name: Test +engine: claude +invalid_prop: bad_value +another_invalid: also_bad` + + // Simulate the error message we get from jsonschema + errorMessage := "at '': additional properties 'invalid_prop', 'another_invalid' not allowed" + + // Test old behavior + oldLocation := LocateJSONPathInYAML(yamlContent, "") + + // Test new behavior + newLocation := LocateJSONPathInYAMLWithAdditionalProperties(yamlContent, "", errorMessage) + + // The old behavior should point to line 1, column 1 + if oldLocation.Line != 1 || oldLocation.Column != 1 { + t.Errorf("Old behavior expected Line=1, Column=1, got Line=%d, Column=%d", oldLocation.Line, oldLocation.Column) + } + + // The new behavior should point to line 3, column 1 (where invalid_prop is) + if newLocation.Line != 3 || newLocation.Column != 1 { + t.Errorf("New behavior expected Line=3, Column=1, got Line=%d, Column=%d", newLocation.Line, newLocation.Column) + } + + t.Logf("Improvement demonstrated: Old=(Line:%d, Column:%d) -> New=(Line:%d, Column:%d)", + oldLocation.Line, oldLocation.Column, newLocation.Line, newLocation.Column) +} diff --git a/pkg/parser/json_path_locator.go b/pkg/parser/json_path_locator.go index 9b8c29f0d0..2ad39c3ecb 100644 --- a/pkg/parser/json_path_locator.go +++ b/pkg/parser/json_path_locator.go @@ -67,13 +67,36 @@ func LocateJSONPathInYAML(yamlContent string, jsonPath string) JSONPathLocation return JSONPathLocation{Line: 1, Column: 1, Found: true} } - // For now, use a simple line-by-line approach to find the path - // This is less precise than using the YAML parser's position info, - // but will work as a starting point + // Use a more sophisticated line-by-line approach to find the path location := findPathInYAMLLines(yamlContent, pathSegments) return location } +// LocateJSONPathInYAMLWithAdditionalProperties finds the line/column position of a JSON path in YAML source +// with special handling for additional properties errors +func LocateJSONPathInYAMLWithAdditionalProperties(yamlContent string, jsonPath string, errorMessage string) JSONPathLocation { + if jsonPath == "" { + // This might be an additional properties error - try to extract property names + propertyNames := extractAdditionalPropertyNames(errorMessage) + if len(propertyNames) > 0 { + // Find the first additional property in the YAML + return findFirstAdditionalProperty(yamlContent, propertyNames) + } + // Fallback to root level error + return JSONPathLocation{Line: 1, Column: 1, Found: true} + } + + // Check if this is an additional properties error even with a non-empty path + propertyNames := extractAdditionalPropertyNames(errorMessage) + if len(propertyNames) > 0 { + // Find the additional property within the nested context + return findAdditionalPropertyInNestedContext(yamlContent, jsonPath, propertyNames) + } + + // For non-empty paths without additional properties, use the regular logic + return LocateJSONPathInYAML(yamlContent, jsonPath) +} + // findPathInYAMLLines finds a JSON path in YAML content using line-by-line analysis func findPathInYAMLLines(yamlContent string, pathSegments []PathSegment) JSONPathLocation { lines := strings.Split(yamlContent, "\n") @@ -187,3 +210,225 @@ type PathSegment struct { Value string // The raw value Index int // Parsed index for array elements } + +// extractAdditionalPropertyNames extracts property names from additional properties error messages +// Example: "additional properties 'invalid_prop', 'another_invalid' not allowed" -> ["invalid_prop", "another_invalid"] +func extractAdditionalPropertyNames(errorMessage string) []string { + // Look for the pattern: additional properties ... not allowed + // Use regex to match the full property list section + re := regexp.MustCompile(`additional propert(?:y|ies) (.+?) not allowed`) + match := re.FindStringSubmatch(errorMessage) + + if len(match) < 2 { + return []string{} + } + + // Extract all quoted property names from the matched string + propPattern := regexp.MustCompile(`'([^']+)'`) + propMatches := propPattern.FindAllStringSubmatch(match[1], -1) + + var properties []string + for _, propMatch := range propMatches { + if len(propMatch) > 1 { + prop := strings.TrimSpace(propMatch[1]) + if prop != "" { + properties = append(properties, prop) + } + } + } + + return properties +} + +// findFirstAdditionalProperty finds the first occurrence of any of the given property names in YAML +func findFirstAdditionalProperty(yamlContent string, propertyNames []string) JSONPathLocation { + lines := strings.Split(yamlContent, "\n") + + for lineNum, line := range lines { + trimmedLine := strings.TrimSpace(line) + + // Skip empty lines and comments + if trimmedLine == "" || strings.HasPrefix(trimmedLine, "#") { + continue + } + + // Check if this line contains any of the additional properties + for _, propName := range propertyNames { + // Look for "propName:" pattern at the start of the trimmed line + keyPattern := regexp.MustCompile(`^` + regexp.QuoteMeta(propName) + `\s*:`) + if keyPattern.MatchString(trimmedLine) { + // Found the property - return position of the property name + propIndex := strings.Index(line, propName) + if propIndex != -1 { + return JSONPathLocation{ + Line: lineNum + 1, // 1-based line numbers + Column: propIndex + 1, // 1-based column numbers + Found: true, + } + } + } + } + } + + // If we can't find any of the properties, return the default location + return JSONPathLocation{Line: 1, Column: 1, Found: false} +} + +// findAdditionalPropertyInNestedContext finds additional properties within a specific nested JSON path context +// It extracts the sub-YAML content for the JSON path and searches within it for better efficiency +func findAdditionalPropertyInNestedContext(yamlContent string, jsonPath string, propertyNames []string) JSONPathLocation { + // Parse the path segments to understand the nesting structure + pathSegments := parseJSONPath(jsonPath) + if len(pathSegments) == 0 { + // If no path segments, search globally + return findFirstAdditionalProperty(yamlContent, propertyNames) + } + + // Find the nested section that corresponds to the JSON path + nestedSection := findNestedSection(yamlContent, pathSegments) + if nestedSection.startLine == -1 { + // If we can't find the nested section, fall back to global search + return findFirstAdditionalProperty(yamlContent, propertyNames) + } + + // Extract the sub-YAML content for the identified nested section + lines := strings.Split(yamlContent, "\n") + subYAMLLines := make([]string, 0, nestedSection.endLine-nestedSection.startLine+1) + + // Extract lines from the nested section, maintaining relative indentation + var baseIndent = -1 + for lineNum := nestedSection.startLine; lineNum <= nestedSection.endLine && lineNum < len(lines); lineNum++ { + line := lines[lineNum] + + // Skip the section header line (e.g., "on:") + if lineNum == nestedSection.startLine { + continue + } + + // Calculate the indentation and normalize it + lineIndent := len(line) - len(strings.TrimLeft(line, " \t")) + if baseIndent == -1 && strings.TrimSpace(line) != "" { + baseIndent = lineIndent + } + + // Create normalized line by removing the base indentation + var normalizedLine string + if lineIndent >= baseIndent && baseIndent > 0 { + normalizedLine = line[baseIndent:] + } else { + normalizedLine = line + } + + subYAMLLines = append(subYAMLLines, normalizedLine) + } + + // Create the sub-YAML content + subYAMLContent := strings.Join(subYAMLLines, "\n") + + // Search for additional properties within the extracted sub-YAML content + subLocation := findFirstAdditionalProperty(subYAMLContent, propertyNames) + + if !subLocation.Found { + // If we can't find the additional properties in the sub-YAML, + // fall back to a global search + return findFirstAdditionalProperty(yamlContent, propertyNames) + } + + // Map the location back to the original YAML coordinates + // subLocation.Line is 1-based, so we need to adjust it + originalLine := nestedSection.startLine + subLocation.Line // +1 to skip section header, -1 for 0-based indexing + originalColumn := subLocation.Column + + // If we had base indentation, we need to adjust the column position + if baseIndent > 0 { + originalColumn += baseIndent + } + + return JSONPathLocation{ + Line: originalLine + 1, // Convert back to 1-based line numbers + Column: originalColumn, + Found: true, + } +} + +// NestedSection represents a section of YAML content that corresponds to a nested object +type NestedSection struct { + startLine int // 0-based start line + endLine int // 0-based end line (inclusive) + baseIndentLevel int // The indentation level of properties within this section +} + +// findNestedSection locates the section of YAML that corresponds to the given JSON path +func findNestedSection(yamlContent string, pathSegments []PathSegment) NestedSection { + lines := strings.Split(yamlContent, "\n") + + // Start from the beginning and traverse the path + currentLevel := 0 + var foundLine = -1 + var baseIndentLevel = 0 + + for lineNum, line := range lines { + trimmedLine := strings.TrimSpace(line) + + if trimmedLine == "" || strings.HasPrefix(trimmedLine, "#") { + continue + } + + // Calculate indentation level + lineLevel := (len(line) - len(strings.TrimLeft(line, " \t"))) / 2 + + // Check if we're looking for a key at the current path level + if currentLevel < len(pathSegments) { + segment := pathSegments[currentLevel] + + if segment.Type == "key" { + // Look for "key:" pattern + keyPattern := regexp.MustCompile(`^` + regexp.QuoteMeta(segment.Value) + `\s*:`) + if keyPattern.MatchString(trimmedLine) && lineLevel == currentLevel*2 { + // Found a matching key at the correct indentation level + if currentLevel == len(pathSegments)-1 { + // This is the final segment - we found our target + foundLine = lineNum + baseIndentLevel = lineLevel + 2 // Properties inside this object should be indented further + break + } else { + // Move to the next level + currentLevel++ + } + } + } + } + } + + if foundLine == -1 { + return NestedSection{startLine: -1, endLine: -1, baseIndentLevel: 0} + } + + // Find the end of this nested section by looking for the next line at the same or lower indentation + endLine := len(lines) - 1 // Default to end of file + targetLevel := baseIndentLevel - 2 // The level of the key we found + + for lineNum := foundLine + 1; lineNum < len(lines); lineNum++ { + line := lines[lineNum] + trimmedLine := strings.TrimSpace(line) + + if trimmedLine == "" || strings.HasPrefix(trimmedLine, "#") { + continue + } + + lineLevel := (len(line) - len(strings.TrimLeft(line, " \t"))) / 2 + + // If we find a line at the same or lower level than our target, + // the nested section ends at the previous line + if lineLevel <= targetLevel { + endLine = lineNum - 1 + break + } + } + + return NestedSection{ + startLine: foundLine, + endLine: endLine, + baseIndentLevel: baseIndentLevel, + } +} diff --git a/pkg/parser/json_path_locator_improvements_test.go b/pkg/parser/json_path_locator_improvements_test.go new file mode 100644 index 0000000000..8b5a916c47 --- /dev/null +++ b/pkg/parser/json_path_locator_improvements_test.go @@ -0,0 +1,497 @@ +package parser + +import ( + "strings" + "testing" +) + +func TestExtractAdditionalPropertyNames(t *testing.T) { + tests := []struct { + name string + errorMessage string + expected []string + }{ + { + name: "single additional property", + errorMessage: "at '': additional properties 'invalid_key' not allowed", + expected: []string{"invalid_key"}, + }, + { + name: "multiple additional properties", + errorMessage: "at '': additional properties 'invalid_prop', 'another_invalid' not allowed", + expected: []string{"invalid_prop", "another_invalid"}, + }, + { + name: "single property with different format", + errorMessage: "additional property 'bad_field' not allowed", + expected: []string{"bad_field"}, + }, + { + name: "no additional properties in message", + errorMessage: "at '/age': got string, want number", + expected: []string{}, + }, + { + name: "empty message", + errorMessage: "", + expected: []string{}, + }, + { + name: "complex property names", + errorMessage: "additional properties 'invalid-prop', 'another_bad_one', 'third.prop' not allowed", + expected: []string{"invalid-prop", "another_bad_one", "third.prop"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := extractAdditionalPropertyNames(tt.errorMessage) + + if len(result) != len(tt.expected) { + t.Errorf("Expected %d properties, got %d: %v", len(tt.expected), len(result), result) + return + } + + for i, expected := range tt.expected { + if i >= len(result) || result[i] != expected { + t.Errorf("Expected property %d to be '%s', got '%s'", i, expected, result[i]) + } + } + }) + } +} + +func TestFindFirstAdditionalProperty(t *testing.T) { + yamlContent := `name: John Doe +age: 30 +invalid_prop: value +tools: + - name: tool1 +another_bad: value2 +permissions: + read: true + invalid_perm: write` + + tests := []struct { + name string + propertyNames []string + expectedLine int + expectedCol int + shouldFind bool + }{ + { + name: "find first property", + propertyNames: []string{"invalid_prop", "another_bad"}, + expectedLine: 3, + expectedCol: 1, + shouldFind: true, + }, + { + name: "find second property when first not found", + propertyNames: []string{"not_exist", "another_bad"}, + expectedLine: 6, + expectedCol: 1, + shouldFind: true, + }, + { + name: "property not found", + propertyNames: []string{"nonexistent", "also_missing"}, + expectedLine: 1, + expectedCol: 1, + shouldFind: false, + }, + { + name: "nested property found", + propertyNames: []string{"invalid_perm"}, + expectedLine: 9, + expectedCol: 3, // Indented + shouldFind: true, + }, + { + name: "empty property list", + propertyNames: []string{}, + expectedLine: 1, + expectedCol: 1, + shouldFind: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + location := findFirstAdditionalProperty(yamlContent, tt.propertyNames) + + if location.Found != tt.shouldFind { + t.Errorf("Expected Found=%v, got Found=%v", tt.shouldFind, location.Found) + } + + if location.Line != tt.expectedLine { + t.Errorf("Expected Line=%d, got Line=%d", tt.expectedLine, location.Line) + } + + if location.Column != tt.expectedCol { + t.Errorf("Expected Column=%d, got Column=%d", tt.expectedCol, location.Column) + } + }) + } +} + +func TestLocateJSONPathInYAMLWithAdditionalProperties(t *testing.T) { + yamlContent := `name: John +age: 25 +invalid_prop: value +another_invalid: value2` + + tests := []struct { + name string + jsonPath string + errorMessage string + expectedLine int + expectedCol int + shouldFind bool + }{ + { + name: "empty path with additional properties", + jsonPath: "", + errorMessage: "at '': additional properties 'invalid_prop', 'another_invalid' not allowed", + expectedLine: 3, + expectedCol: 1, + shouldFind: true, + }, + { + name: "empty path with single additional property", + jsonPath: "", + errorMessage: "at '': additional properties 'another_invalid' not allowed", + expectedLine: 4, + expectedCol: 1, + shouldFind: true, + }, + { + name: "empty path without additional properties message", + jsonPath: "", + errorMessage: "some other error", + expectedLine: 1, + expectedCol: 1, + shouldFind: true, + }, + { + name: "non-empty path should use regular logic", + jsonPath: "/name", + errorMessage: "any message", + expectedLine: 1, + expectedCol: 6, // After "name:" + shouldFind: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + location := LocateJSONPathInYAMLWithAdditionalProperties(yamlContent, tt.jsonPath, tt.errorMessage) + + if location.Found != tt.shouldFind { + t.Errorf("Expected Found=%v, got Found=%v", tt.shouldFind, location.Found) + } + + if location.Line != tt.expectedLine { + t.Errorf("Expected Line=%d, got Line=%d", tt.expectedLine, location.Line) + } + + if location.Column != tt.expectedCol { + t.Errorf("Expected Column=%d, got Column=%d", tt.expectedCol, location.Column) + } + }) + } +} + +// TestLocateJSONPathInYAMLWithAdditionalPropertiesNested tests the new functionality for nested additional properties +func TestLocateJSONPathInYAMLWithAdditionalPropertiesNested(t *testing.T) { + yamlContent := `name: Test Workflow +on: + push: + branches: [main] + foobar: invalid +permissions: + contents: read + invalid_perm: write +nested: + deeply: + more_nested: true + bad_prop: invalid` + + tests := []struct { + name string + jsonPath string + errorMessage string + expectedLine int + expectedCol int + shouldFind bool + }{ + { + name: "nested additional property under 'on'", + jsonPath: "/on", + errorMessage: "at '/on': additional properties 'foobar' not allowed", + expectedLine: 5, + expectedCol: 3, // Position of 'foobar' + shouldFind: true, + }, + { + name: "nested additional property under 'permissions'", + jsonPath: "/permissions", + errorMessage: "at '/permissions': additional properties 'invalid_perm' not allowed", + expectedLine: 8, + expectedCol: 3, // Position of 'invalid_perm' + shouldFind: true, + }, + { + name: "deeply nested additional property", + jsonPath: "/nested/deeply", + errorMessage: "at '/nested/deeply': additional properties 'bad_prop' not allowed", + expectedLine: 12, + expectedCol: 5, // Position of 'bad_prop' (indented further) + shouldFind: true, + }, + { + name: "multiple additional properties - should find first", + jsonPath: "/on", + errorMessage: "at '/on': additional properties 'foobar', 'another_prop' not allowed", + expectedLine: 5, + expectedCol: 3, // Position of 'foobar' (first one found) + shouldFind: true, + }, + { + name: "non-existent path with additional properties", + jsonPath: "/nonexistent", + errorMessage: "at '/nonexistent': additional properties 'some_prop' not allowed", + expectedLine: 1, // Falls back to global search, which won't find 'some_prop' + expectedCol: 1, + shouldFind: false, + }, + { + name: "nested path without additional properties error", + jsonPath: "/on/push", + errorMessage: "at '/on/push': some other validation error", + expectedLine: 3, // Should find the 'push' key location using regular logic + expectedCol: 8, // After "push:" + shouldFind: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + location := LocateJSONPathInYAMLWithAdditionalProperties(yamlContent, tt.jsonPath, tt.errorMessage) + + if location.Found != tt.shouldFind { + t.Errorf("Expected Found=%v, got Found=%v", tt.shouldFind, location.Found) + } + + if location.Line != tt.expectedLine { + t.Errorf("Expected Line=%d, got Line=%d", tt.expectedLine, location.Line) + } + + if location.Column != tt.expectedCol { + t.Errorf("Expected Column=%d, got Column=%d", tt.expectedCol, location.Column) + } + }) + } +} + +// TestNestedSearchOptimization demonstrates the improved approach of searching within sub-YAML content +func TestNestedSearchOptimization(t *testing.T) { + // Create a complex YAML with many sections to demonstrate the optimization benefit + yamlContent := `name: Complex Workflow +version: "1.0" +# Many top-level properties that should be ignored when searching in nested contexts +global_prop1: value1 +global_prop2: value2 +global_prop3: value3 +global_prop4: value4 +global_prop5: value5 +on: + push: + branches: [main, develop] + pull_request: + branches: [main] + # This is the problematic additional property within the 'on' context + invalid_trigger: not_allowed + workflow_dispatch: {} +permissions: + contents: read + issues: write + # Another additional property within the 'permissions' context + invalid_permission: write +workflow: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 +deeply: + nested: + structure: + with: + many: levels + # Additional property deep in the structure + bad_prop: invalid + valid_prop: good +# More global properties that should be ignored +footer_prop1: value1 +footer_prop2: value2` + + tests := []struct { + name string + jsonPath string + errorMessage string + expectedLine int + expectedCol int + shouldFind bool + }{ + { + name: "find additional property in 'on' section - should not find global properties", + jsonPath: "/on", + errorMessage: "at '/on': additional properties 'invalid_trigger' not allowed", + expectedLine: 15, // Line where 'invalid_trigger' is located + expectedCol: 3, // Column position of 'invalid_trigger' (indented) + shouldFind: true, + }, + { + name: "find additional property in 'permissions' section - should not find on.invalid_trigger", + jsonPath: "/permissions", + errorMessage: "at '/permissions': additional properties 'invalid_permission' not allowed", + expectedLine: 21, // Line where 'invalid_permission' is located + expectedCol: 3, // Column position of 'invalid_permission' (indented) + shouldFind: true, + }, + { + name: "find additional property in deeply nested structure", + jsonPath: "/deeply/nested/structure/with", + errorMessage: "at '/deeply/nested/structure/with': additional properties 'bad_prop' not allowed", + expectedLine: 32, // Line where 'bad_prop' is located + expectedCol: 9, // Column position accounting for deep indentation (4 levels * 2 spaces + 1) + shouldFind: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + location := LocateJSONPathInYAMLWithAdditionalProperties(yamlContent, tt.jsonPath, tt.errorMessage) + + if location.Found != tt.shouldFind { + t.Errorf("Expected Found=%v, got Found=%v", tt.shouldFind, location.Found) + } + + if location.Line != tt.expectedLine { + t.Errorf("Expected Line=%d, got Line=%d", tt.expectedLine, location.Line) + } + + if location.Column != tt.expectedCol { + t.Errorf("Expected Column=%d, got Column=%d", tt.expectedCol, location.Column) + } + + // Verify that the optimization correctly identified the target property + // by checking that the found location actually contains the expected property name + lines := strings.Split(yamlContent, "\n") + if location.Found && location.Line > 0 && location.Line <= len(lines) { + foundLine := lines[location.Line-1] // Convert to 0-based index + propertyNames := extractAdditionalPropertyNames(tt.errorMessage) + if len(propertyNames) > 0 { + expectedProperty := propertyNames[0] + if !strings.Contains(foundLine, expectedProperty) { + t.Errorf("Found line '%s' does not contain expected property '%s'", + strings.TrimSpace(foundLine), expectedProperty) + } + } + } + }) + } +} + +func TestFindFrontmatterBounds(t *testing.T) { + tests := []struct { + name string + lines []string + expectedStartIdx int + expectedEndIdx int + expectedFrontmatterLines int + }{ + { + name: "normal frontmatter", + lines: []string{ + "---", + "name: test", + "age: 30", + "---", + "# Markdown content", + }, + expectedStartIdx: 0, + expectedEndIdx: 3, + expectedFrontmatterLines: 2, + }, + { + name: "frontmatter with comments before", + lines: []string{ + "# Comment at top", + "", + "---", + "name: test", + "---", + "Content", + }, + expectedStartIdx: 2, + expectedEndIdx: 4, + expectedFrontmatterLines: 1, + }, + { + name: "no frontmatter", + lines: []string{ + "# Just a markdown file", + "Some content", + }, + expectedStartIdx: -1, + expectedEndIdx: -1, + expectedFrontmatterLines: 0, + }, + { + name: "incomplete frontmatter (no closing)", + lines: []string{ + "---", + "name: test", + "Some content without closing", + }, + expectedStartIdx: -1, + expectedEndIdx: -1, + expectedFrontmatterLines: 0, + }, + { + name: "empty frontmatter", + lines: []string{ + "---", + "---", + "Content", + }, + expectedStartIdx: 0, + expectedEndIdx: 1, + expectedFrontmatterLines: 0, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + startIdx, endIdx, frontmatterContent := findFrontmatterBounds(tt.lines) + + if startIdx != tt.expectedStartIdx { + t.Errorf("Expected startIdx=%d, got startIdx=%d", tt.expectedStartIdx, startIdx) + } + + if endIdx != tt.expectedEndIdx { + t.Errorf("Expected endIdx=%d, got endIdx=%d", tt.expectedEndIdx, endIdx) + } + + // Count the lines in frontmatterContent + actualLines := 0 + if frontmatterContent != "" { + actualLines = len(strings.Split(frontmatterContent, "\n")) + } + + if actualLines != tt.expectedFrontmatterLines { + t.Errorf("Expected %d frontmatter lines, got %d", tt.expectedFrontmatterLines, actualLines) + } + }) + } +} diff --git a/pkg/parser/schema.go b/pkg/parser/schema.go index d10fe5e20e..697919b6b9 100644 --- a/pkg/parser/schema.go +++ b/pkg/parser/schema.go @@ -135,27 +135,20 @@ func validateWithSchemaAndLocation(frontmatter map[string]any, schemaJSON, conte if filePath != "" { if content, readErr := os.ReadFile(filePath); readErr == nil { lines := strings.Split(string(content), "\n") - // Look for frontmatter section - if len(lines) > 0 && strings.TrimSpace(lines[0]) == "---" { - // Find the end of frontmatter - endIdx := 1 - for i := 1; i < len(lines); i++ { - if strings.TrimSpace(lines[i]) == "---" { - endIdx = i - break - } - } - // Extract frontmatter content for path resolution - frontmatterLines := lines[1:endIdx] - frontmatterContent = strings.Join(frontmatterLines, "\n") - frontmatterStart = 2 // Frontmatter content starts at line 2 - - // Use the frontmatter lines as context (first few lines) - maxLines := min(5, endIdx) - for i := 0; i < maxLines; i++ { - if i < len(lines) { - contextLines = append(contextLines, lines[i]) - } + + // Look for frontmatter section with improved detection + frontmatterStartIdx, frontmatterEndIdx, actualFrontmatterContent := findFrontmatterBounds(lines) + + if frontmatterStartIdx >= 0 && frontmatterEndIdx > frontmatterStartIdx { + frontmatterContent = actualFrontmatterContent + frontmatterStart = frontmatterStartIdx + 2 // +2 because we skip the opening "---" and use 1-based indexing + + // Use the frontmatter section plus a bit of context as context lines + contextStart := max(0, frontmatterStartIdx) + contextEnd := min(len(lines), frontmatterEndIdx+1) + + for i := contextStart; i < contextEnd; i++ { + contextLines = append(contextLines, lines[i]) } } } @@ -175,7 +168,7 @@ func validateWithSchemaAndLocation(frontmatter map[string]any, schemaJSON, conte if len(jsonPaths) > 0 && frontmatterContent != "" { // Use the first error path for the primary error location primaryPath := jsonPaths[0] - location := LocateJSONPathInYAML(frontmatterContent, primaryPath.Path) + location := LocateJSONPathInYAMLWithAdditionalProperties(frontmatterContent, primaryPath.Path, primaryPath.Message) if location.Found { // Adjust line number to account for frontmatter position in file @@ -300,3 +293,48 @@ func validateEngineSpecificRules(frontmatter map[string]any) error { return nil } + +// findFrontmatterBounds finds the start and end indices of frontmatter in file lines +// Returns: startIdx (-1 if not found), endIdx (-1 if not found), frontmatterContent +func findFrontmatterBounds(lines []string) (startIdx int, endIdx int, frontmatterContent string) { + startIdx = -1 + endIdx = -1 + + // Look for the opening "---" + for i, line := range lines { + trimmed := strings.TrimSpace(line) + if trimmed == "---" { + startIdx = i + break + } + // Skip empty lines and comments at the beginning + if trimmed != "" && !strings.HasPrefix(trimmed, "#") { + // Found non-empty, non-comment line before "---" - no frontmatter + return -1, -1, "" + } + } + + if startIdx == -1 { + return -1, -1, "" + } + + // Look for the closing "---" + for i := startIdx + 1; i < len(lines); i++ { + trimmed := strings.TrimSpace(lines[i]) + if trimmed == "---" { + endIdx = i + break + } + } + + if endIdx == -1 { + // No closing "---" found + return -1, -1, "" + } + + // Extract frontmatter content between the markers + frontmatterLines := lines[startIdx+1 : endIdx] + frontmatterContent = strings.Join(frontmatterLines, "\n") + + return startIdx, endIdx, frontmatterContent +} diff --git a/pkg/parser/schemas/main_workflow_schema.json b/pkg/parser/schemas/main_workflow_schema.json index 7a3dc06072..b75876f687 100644 --- a/pkg/parser/schemas/main_workflow_schema.json +++ b/pkg/parser/schemas/main_workflow_schema.json @@ -43,7 +43,7 @@ "additionalProperties": false, "properties": { "branches": { - "type": "array", + "type": "array", "description": "Branches to filter on", "items": { "type": "string" @@ -98,7 +98,7 @@ } }, "branches": { - "type": "array", + "type": "array", "description": "Branches to filter on", "items": { "type": "string" @@ -142,7 +142,24 @@ "description": "Types of issue events", "items": { "type": "string", - "enum": ["opened", "edited", "deleted", "transferred", "pinned", "unpinned", "closed", "reopened", "assigned", "unassigned", "labeled", "unlabeled", "locked", "unlocked", "milestoned", "demilestoned"] + "enum": [ + "opened", + "edited", + "deleted", + "transferred", + "pinned", + "unpinned", + "closed", + "reopened", + "assigned", + "unassigned", + "labeled", + "unlabeled", + "locked", + "unlocked", + "milestoned", + "demilestoned" + ] } } } @@ -157,7 +174,11 @@ "description": "Types of issue comment events", "items": { "type": "string", - "enum": ["created", "edited", "deleted"] + "enum": [ + "created", + "edited", + "deleted" + ] } } } @@ -173,7 +194,9 @@ "description": "Cron expression for schedule" } }, - "required": ["cron"], + "required": [ + "cron" + ], "additionalProperties": false } }, @@ -209,7 +232,11 @@ }, "type": { "type": "string", - "enum": ["string", "choice", "boolean"], + "enum": [ + "string", + "choice", + "boolean" + ], "description": "Input type" }, "options": { @@ -243,7 +270,10 @@ "description": "Types of workflow run events", "items": { "type": "string", - "enum": ["completed", "requested"] + "enum": [ + "completed", + "requested" + ] } }, "branches": { @@ -272,7 +302,15 @@ "description": "Types of release events", "items": { "type": "string", - "enum": ["published", "unpublished", "created", "edited", "deleted", "prereleased", "released"] + "enum": [ + "published", + "unpublished", + "created", + "edited", + "deleted", + "prereleased", + "released" + ] } } } @@ -287,7 +325,11 @@ "description": "Types of pull request review comment events", "items": { "type": "string", - "enum": ["created", "edited", "deleted"] + "enum": [ + "created", + "edited", + "deleted" + ] } } } @@ -298,7 +340,16 @@ }, "reaction": { "type": "string", - "enum": ["+1", "-1", "laugh", "confused", "heart", "hooray", "rocket", "eyes"], + "enum": [ + "+1", + "-1", + "laugh", + "confused", + "heart", + "hooray", + "rocket", + "eyes" + ], "description": "AI reaction to add/remove on triggering item (one of: +1, -1, laugh, confused, heart, hooray, rocket, eyes). Defaults to 'eyes' if not specified." } }, @@ -311,7 +362,12 @@ "oneOf": [ { "type": "string", - "enum": ["read-all", "write-all", "read", "write"], + "enum": [ + "read-all", + "write-all", + "read", + "write" + ], "description": "Simple permissions string" }, { @@ -321,63 +377,122 @@ "properties": { "actions": { "type": "string", - "enum": ["read", "write", "none"] + "enum": [ + "read", + "write", + "none" + ] }, "attestations": { "type": "string", - "enum": ["read", "write", "none"] + "enum": [ + "read", + "write", + "none" + ] }, "checks": { "type": "string", - "enum": ["read", "write", "none"] + "enum": [ + "read", + "write", + "none" + ] }, "contents": { "type": "string", - "enum": ["read", "write", "none"] + "enum": [ + "read", + "write", + "none" + ] }, "deployments": { "type": "string", - "enum": ["read", "write", "none"] + "enum": [ + "read", + "write", + "none" + ] }, "discussions": { "type": "string", - "enum": ["read", "write", "none"] + "enum": [ + "read", + "write", + "none" + ] }, "id-token": { "type": "string", - "enum": ["read", "write", "none"] + "enum": [ + "read", + "write", + "none" + ] }, "issues": { - "type": "string", - "enum": ["read", "write", "none"] + "type": "string", + "enum": [ + "read", + "write", + "none" + ] }, "models": { "type": "string", - "enum": ["read", "none"] + "enum": [ + "read", + "none" + ] }, "packages": { "type": "string", - "enum": ["read", "write", "none"] + "enum": [ + "read", + "write", + "none" + ] }, "pages": { "type": "string", - "enum": ["read", "write", "none"] + "enum": [ + "read", + "write", + "none" + ] }, "pull-requests": { "type": "string", - "enum": ["read", "write", "none"] + "enum": [ + "read", + "write", + "none" + ] }, "repository-projects": { "type": "string", - "enum": ["read", "write", "none"] + "enum": [ + "read", + "write", + "none" + ] }, "security-events": { "type": "string", - "enum": ["read", "write", "none"] + "enum": [ + "read", + "write", + "none" + ] }, "statuses": { "type": "string", - "enum": ["read", "write", "none"] + "enum": [ + "read", + "write", + "none" + ] } } } @@ -577,7 +692,9 @@ "description": "Cancel in-progress jobs in the same concurrency group" } }, - "required": ["group"] + "required": [ + "group" + ] } ] }, @@ -600,7 +717,9 @@ "oneOf": [ { "type": "string", - "enum": ["defaults"], + "enum": [ + "defaults" + ], "description": "Use default network permissions (currently full network access, will change later)" }, { @@ -675,7 +794,10 @@ "oneOf": [ { "type": "string", - "enum": ["claude", "codex"], + "enum": [ + "claude", + "codex" + ], "description": "Simple engine name (claude or codex)" }, { @@ -684,7 +806,10 @@ "properties": { "id": { "type": "string", - "enum": ["claude", "codex"], + "enum": [ + "claude", + "codex" + ], "description": "Agent CLI identifier (claude or codex)" }, "version": { @@ -692,15 +817,24 @@ "description": "Optional version of the action" }, "model": { - "type": "string", + "type": "string", "description": "Optional LLM model to use" }, "max-turns": { "type": "integer", "description": "Maximum number of chat iterations per run" + }, + "env": { + "type": "object", + "description": "Custom environment variables to pass to the agentic engine", + "additionalProperties": { + "type": "string" + } } }, - "required": ["id"], + "required": [ + "id" + ], "additionalProperties": false } ] @@ -912,7 +1046,10 @@ "description": "If true, only checks if cache entry exists and skips download" } }, - "required": ["key", "path"], + "required": [ + "key", + "path" + ], "additionalProperties": false }, { @@ -968,7 +1105,10 @@ "description": "If true, only checks if cache entry exists and skips download" } }, - "required": ["key", "path"], + "required": [ + "key", + "path" + ], "additionalProperties": false } } @@ -1116,7 +1256,10 @@ "side": { "type": "string", "description": "Side of the diff for comments: 'LEFT' or 'RIGHT' (default: 'RIGHT')", - "enum": ["LEFT", "RIGHT"] + "enum": [ + "LEFT", + "RIGHT" + ] } }, "additionalProperties": false @@ -1214,6 +1357,26 @@ "additionalProperties": false } ] + }, + "missing-tool": { + "oneOf": [ + { + "type": "object", + "description": "Configuration for reporting missing tools from agentic workflow output", + "properties": { + "max": { + "type": "integer", + "description": "Maximum number of missing tool reports (default: unlimited)", + "minimum": 1 + } + }, + "additionalProperties": false + }, + { + "type": "null", + "description": "Enable missing tool reporting with default configuration" + } + ] } }, "additionalProperties": false diff --git a/pkg/workflow/claude_engine.go b/pkg/workflow/claude_engine.go index a4cfc8f9bc..29f293c928 100644 --- a/pkg/workflow/claude_engine.go +++ b/pkg/workflow/claude_engine.go @@ -65,12 +65,22 @@ func (e *ClaudeEngine) GetExecutionConfig(workflowName string, logFile string, e actionVersion = engineConfig.Version } - // Build claude_env based on hasOutput parameter + // Build claude_env based on hasOutput parameter and custom env vars claudeEnv := "" if hasOutput { claudeEnv += " GITHUB_AW_SAFE_OUTPUTS: ${{ env.GITHUB_AW_SAFE_OUTPUTS }}" } + // Add custom environment variables from engine config + if engineConfig != nil && len(engineConfig.Env) > 0 { + for key, value := range engineConfig.Env { + if claudeEnv != "" { + claudeEnv += "\n" + } + claudeEnv += " " + key + ": " + value + } + } + inputs := map[string]string{ "prompt_file": "/tmp/aw-prompts/prompt.txt", "anthropic_api_key": "${{ secrets.ANTHROPIC_API_KEY }}", diff --git a/pkg/workflow/codex_engine.go b/pkg/workflow/codex_engine.go index 2e6b4d0190..3c44053b3b 100644 --- a/pkg/workflow/codex_engine.go +++ b/pkg/workflow/codex_engine.go @@ -75,6 +75,13 @@ codex exec \ env["GITHUB_AW_SAFE_OUTPUTS"] = "${{ env.GITHUB_AW_SAFE_OUTPUTS }}" } + // Add custom environment variables from engine config + if engineConfig != nil && len(engineConfig.Env) > 0 { + for key, value := range engineConfig.Env { + env[key] = value + } + } + return ExecutionConfig{ StepName: "Run Codex", Command: command, diff --git a/pkg/workflow/compiler.go b/pkg/workflow/compiler.go index 42d4168219..e01328e529 100644 --- a/pkg/workflow/compiler.go +++ b/pkg/workflow/compiler.go @@ -152,6 +152,7 @@ type SafeOutputsConfig struct { AddIssueLabels *AddIssueLabelsConfig `yaml:"add-issue-label,omitempty"` UpdateIssues *UpdateIssuesConfig `yaml:"update-issue,omitempty"` PushToBranch *PushToBranchConfig `yaml:"push-to-branch,omitempty"` + MissingTool *MissingToolConfig `yaml:"missing-tool,omitempty"` // Optional for reporting missing functionality AllowedDomains []string `yaml:"allowed-domains,omitempty"` } @@ -215,6 +216,11 @@ type PushToBranchConfig struct { Target string `yaml:"target,omitempty"` // Target for push-to-branch: like add-issue-comment but for pull requests } +// MissingToolConfig holds configuration for reporting missing tools or functionality +type MissingToolConfig struct { + Max int `yaml:"max,omitempty"` // Maximum number of missing tool reports (default: unlimited) +} + // CompileWorkflow converts a markdown workflow to GitHub Actions YAML func (c *Compiler) CompileWorkflow(markdownPath string) error { @@ -1799,6 +1805,17 @@ func (c *Compiler) buildJobs(data *WorkflowData) error { return fmt.Errorf("failed to add push_to_branch job: %w", err) } } + + // Build missing_tool job (always enabled when SafeOutputs exists) + if data.SafeOutputs.MissingTool != nil { + missingToolJob, err := c.buildCreateOutputMissingToolJob(data, jobName) + if err != nil { + return fmt.Errorf("failed to build missing_tool job: %w", err) + } + if err := c.jobManager.AddJob(missingToolJob); err != nil { + return fmt.Errorf("failed to add missing_tool job: %w", err) + } + } } // Build additional custom jobs from frontmatter jobs section if err := c.buildCustomJobs(data); err != nil { @@ -2759,7 +2776,15 @@ func (c *Compiler) generatePrompt(yaml *strings.Builder, data *WorkflowData, eng yaml.WriteString(", ") } yaml.WriteString("Pushing Changes to Branch") + written = true + } + + // Missing-tool is always available + if written { + yaml.WriteString(", ") } + yaml.WriteString("Reporting Missing Tools or Functionality") + yaml.WriteString("\n") yaml.WriteString(" \n") yaml.WriteString(" **IMPORTANT**: To do the actions mentioned in the header of this section, do NOT attempt to use MCP tools, do NOT attempt to use `gh`, do NOT attempt to use the GitHub API. You don't have write access to the GitHub repo. Instead write JSON objects to the file \"${{ env.GITHUB_AW_SAFE_OUTPUTS }}\". Each line should contain a single JSON object (JSONL format). You can write them one by one as you do them.\n") @@ -2867,6 +2892,22 @@ func (c *Compiler) generatePrompt(yaml *strings.Builder, data *WorkflowData, eng yaml.WriteString(" \n") } + // Missing-tool instructions are only included when configured + if data.SafeOutputs.MissingTool != nil { + yaml.WriteString(" **Reporting Missing Tools or Functionality**\n") + yaml.WriteString(" \n") + yaml.WriteString(" If you need to use a tool or functionality that is not available to complete your task:\n") + yaml.WriteString(" 1. Write an entry to \"${{ env.GITHUB_AW_SAFE_OUTPUTS }}\":\n") + yaml.WriteString(" ```json\n") + yaml.WriteString(" {\"type\": \"missing-tool\", \"tool\": \"tool-name\", \"reason\": \"Why this tool is needed\", \"alternatives\": \"Suggested alternatives or workarounds\"}\n") + yaml.WriteString(" ```\n") + yaml.WriteString(" 2. The `tool` field should specify the name or type of missing functionality\n") + yaml.WriteString(" 3. The `reason` field should explain why this tool/functionality is required to complete the task\n") + yaml.WriteString(" 4. The `alternatives` field is optional but can suggest workarounds or alternative approaches\n") + yaml.WriteString(" 5. After you write to that file, read it as JSONL and check it is valid. If it isn't, make any necessary corrections to it to fix it up\n") + yaml.WriteString(" \n") + } + yaml.WriteString(" **Example JSONL file content:**\n") yaml.WriteString(" ```\n") @@ -2893,6 +2934,12 @@ func (c *Compiler) generatePrompt(yaml *strings.Builder, data *WorkflowData, eng exampleCount++ } + // Include missing-tool example only when configured + if data.SafeOutputs.MissingTool != nil { + yaml.WriteString(" {\"type\": \"missing-tool\", \"tool\": \"docker\", \"reason\": \"Need Docker to build container images\", \"alternatives\": \"Could use GitHub Actions build instead\"}\n") + exampleCount++ + } + // If no SafeOutputs are enabled, show a generic example if exampleCount == 0 { yaml.WriteString(" # No safe outputs configured for this workflow\n") @@ -2950,9 +2997,11 @@ func (c *Compiler) extractJobsFromFrontmatter(frontmatter map[string]any) map[st // extractSafeOutputsConfig extracts output configuration from frontmatter func (c *Compiler) extractSafeOutputsConfig(frontmatter map[string]any) *SafeOutputsConfig { + var config *SafeOutputsConfig + if output, exists := frontmatter["safe-outputs"]; exists { if outputMap, ok := output.(map[string]any); ok { - config := &SafeOutputsConfig{} + config = &SafeOutputsConfig{} // Handle create-issue issuesConfig := c.parseIssuesConfig(outputMap) @@ -3058,10 +3107,15 @@ func (c *Compiler) extractSafeOutputsConfig(frontmatter map[string]any) *SafeOut config.PushToBranch = pushToBranchConfig } - return config + // Handle missing-tool (parse configuration if present) + missingToolConfig := c.parseMissingToolConfig(outputMap) + if missingToolConfig != nil { + config.MissingTool = missingToolConfig + } } } - return nil + + return config } // parseIssuesConfig handles create-issue configuration @@ -3335,6 +3389,48 @@ func (c *Compiler) parsePushToBranchConfig(outputMap map[string]any) *PushToBran return nil } +// parseMissingToolConfig handles missing-tool configuration +func (c *Compiler) parseMissingToolConfig(outputMap map[string]any) *MissingToolConfig { + if configData, exists := outputMap["missing-tool"]; exists { + missingToolConfig := &MissingToolConfig{} // Default: no max limit + + // Handle the case where configData is nil (missing-tool: with no value) + if configData == nil { + return missingToolConfig + } + + if configMap, ok := configData.(map[string]any); ok { + // Parse max (optional) + if max, exists := configMap["max"]; exists { + // Handle different numeric types that YAML parsers might return + var maxInt int + var validMax bool + switch v := max.(type) { + case int: + maxInt = v + validMax = true + case int64: + maxInt = int(v) + validMax = true + case uint64: + maxInt = int(v) + validMax = true + case float64: + maxInt = int(v) + validMax = true + } + if validMax { + missingToolConfig.Max = maxInt + } + } + } + + return missingToolConfig + } + + return nil +} + // buildCustomJobs creates custom jobs defined in the frontmatter jobs section func (c *Compiler) buildCustomJobs(data *WorkflowData) error { for jobName, jobConfig := range data.Jobs { @@ -3509,10 +3605,36 @@ func (c *Compiler) generateEngineExecutionSteps(yaml *strings.Builder, data *Wor } } } - // Add environment section to pass GITHUB_AW_SAFE_OUTPUTS to the action only if safe-outputs feature is used - if data.SafeOutputs != nil { + // Add environment section for safe-outputs and custom env vars + hasEnvSection := data.SafeOutputs != nil || (data.EngineConfig != nil && len(data.EngineConfig.Env) > 0) + if hasEnvSection { yaml.WriteString(" env:\n") - yaml.WriteString(" GITHUB_AW_SAFE_OUTPUTS: ${{ env.GITHUB_AW_SAFE_OUTPUTS }}\n") + + // Add GITHUB_AW_SAFE_OUTPUTS if safe-outputs feature is used + if data.SafeOutputs != nil { + yaml.WriteString(" GITHUB_AW_SAFE_OUTPUTS: ${{ env.GITHUB_AW_SAFE_OUTPUTS }}\n") + } + + // Add custom environment variables from engine config + if data.EngineConfig != nil && len(data.EngineConfig.Env) > 0 { + for _, envVar := range data.EngineConfig.Env { + // Parse environment variable in format "KEY=value" or "KEY: value" + parts := strings.SplitN(envVar, "=", 2) + if len(parts) == 2 { + key := strings.TrimSpace(parts[0]) + value := strings.TrimSpace(parts[1]) + fmt.Fprintf(yaml, " %s: %s\n", key, value) + } else { + // Try "KEY: value" format + parts = strings.SplitN(envVar, ":", 2) + if len(parts) == 2 { + key := strings.TrimSpace(parts[0]) + value := strings.TrimSpace(parts[1]) + fmt.Fprintf(yaml, " %s: %s\n", key, value) + } + } + } + } } yaml.WriteString(" - name: Capture Agentic Action logs\n") yaml.WriteString(" if: always()\n") @@ -3649,6 +3771,15 @@ func (c *Compiler) generateOutputCollectionStep(yaml *strings.Builder, data *Wor } safeOutputsConfig["push-to-branch"] = pushToBranchConfig } + if data.SafeOutputs.MissingTool != nil { + missingToolConfig := map[string]interface{}{ + "enabled": true, + } + if data.SafeOutputs.MissingTool.Max > 0 { + missingToolConfig["max"] = data.SafeOutputs.MissingTool.Max + } + safeOutputsConfig["missing-tool"] = missingToolConfig + } // Convert to JSON string for environment variable configJSON, _ := json.Marshal(safeOutputsConfig) diff --git a/pkg/workflow/compiler_test.go b/pkg/workflow/compiler_test.go index 8aee28a231..69a4d5073a 100644 --- a/pkg/workflow/compiler_test.go +++ b/pkg/workflow/compiler_test.go @@ -3546,7 +3546,7 @@ Test workflow with reaction. } } - // Verify three jobs are created (task, add_reaction, main) + // Verify two jobs are created (add_reaction, main) - missing_tool is not auto-created jobCount := strings.Count(yamlContent, "runs-on: ubuntu-latest") if jobCount != 2 { t.Errorf("Expected 2 jobs (add_reaction, main), found %d", jobCount) @@ -3618,10 +3618,10 @@ Test workflow without explicit reaction (should not create reaction action). } } - // Verify only two jobs are created (task and main, no add_reaction) + // Verify only one job is created (main) - missing_tool is not auto-created jobCount := strings.Count(yamlContent, "runs-on: ubuntu-latest") if jobCount != 1 { - t.Errorf("Expected 1 jobs (main), found %d", jobCount) + t.Errorf("Expected 1 job (main), found %d", jobCount) } } diff --git a/pkg/workflow/engine.go b/pkg/workflow/engine.go index a3528f3dde..adaae223a1 100644 --- a/pkg/workflow/engine.go +++ b/pkg/workflow/engine.go @@ -10,6 +10,7 @@ type EngineConfig struct { Version string Model string MaxTurns string + Env map[string]string } // NetworkPermissions represents network access permissions @@ -68,6 +69,18 @@ func (c *Compiler) extractEngineConfig(frontmatter map[string]any) (string, *Eng } } + // Extract optional 'env' field (object/map of strings) + if env, hasEnv := engineObj["env"]; hasEnv { + if envMap, ok := env.(map[string]any); ok { + config.Env = make(map[string]string) + for key, value := range envMap { + if valueStr, ok := value.(string); ok { + config.Env[key] = valueStr + } + } + } + } + // Return the ID as the engineSetting for backwards compatibility return config.ID, config } diff --git a/pkg/workflow/engine_config_test.go b/pkg/workflow/engine_config_test.go index 36709c5eba..5ec9d1bcf7 100644 --- a/pkg/workflow/engine_config_test.go +++ b/pkg/workflow/engine_config_test.go @@ -102,6 +102,37 @@ func TestExtractEngineConfig(t *testing.T) { expectedEngineSetting: "claude", expectedConfig: &EngineConfig{ID: "claude", Version: "beta", Model: "claude-3-5-sonnet-20241022", MaxTurns: "10"}, }, + { + name: "object format - with env vars", + frontmatter: map[string]any{ + "engine": map[string]any{ + "id": "claude", + "env": map[string]any{ + "CUSTOM_VAR": "value1", + "ANOTHER_VAR": "${{ secrets.SECRET_VAR }}", + }, + }, + }, + expectedEngineSetting: "claude", + expectedConfig: &EngineConfig{ID: "claude", Env: map[string]string{"CUSTOM_VAR": "value1", "ANOTHER_VAR": "${{ secrets.SECRET_VAR }}"}}, + }, + { + name: "object format - complete with env vars", + frontmatter: map[string]any{ + "engine": map[string]any{ + "id": "claude", + "version": "beta", + "model": "claude-3-5-sonnet-20241022", + "max-turns": 5, + "env": map[string]any{ + "AWS_REGION": "us-west-2", + "API_ENDPOINT": "https://api.example.com", + }, + }, + }, + expectedEngineSetting: "claude", + expectedConfig: &EngineConfig{ID: "claude", Version: "beta", Model: "claude-3-5-sonnet-20241022", MaxTurns: "5", Env: map[string]string{"AWS_REGION": "us-west-2", "API_ENDPOINT": "https://api.example.com"}}, + }, { name: "object format - missing id", frontmatter: map[string]any{ @@ -148,6 +179,18 @@ func TestExtractEngineConfig(t *testing.T) { if config.MaxTurns != test.expectedConfig.MaxTurns { t.Errorf("Expected config.MaxTurns '%s', got '%s'", test.expectedConfig.MaxTurns, config.MaxTurns) } + + if len(config.Env) != len(test.expectedConfig.Env) { + t.Errorf("Expected config.Env length %d, got %d", len(test.expectedConfig.Env), len(config.Env)) + } else { + for key, expectedValue := range test.expectedConfig.Env { + if actualValue, exists := config.Env[key]; !exists { + t.Errorf("Expected config.Env to contain key '%s'", key) + } else if actualValue != expectedValue { + t.Errorf("Expected config.Env['%s'] = '%s', got '%s'", key, expectedValue, actualValue) + } + } + } } }) } @@ -321,6 +364,85 @@ func TestEngineConfigurationWithModel(t *testing.T) { } } +func TestEngineConfigurationWithCustomEnvVars(t *testing.T) { + tests := []struct { + name string + engine AgenticEngine + engineConfig *EngineConfig + hasOutput bool + }{ + { + name: "Claude with custom env vars", + engine: NewClaudeEngine(), + engineConfig: &EngineConfig{ + ID: "claude", + Env: map[string]string{"AWS_REGION": "us-west-2", "CUSTOM_VAR": "${{ secrets.MY_SECRET }}"}, + }, + hasOutput: false, + }, + { + name: "Claude with custom env vars and output", + engine: NewClaudeEngine(), + engineConfig: &EngineConfig{ + ID: "claude", + Env: map[string]string{"API_ENDPOINT": "https://api.example.com", "DEBUG_MODE": "true"}, + }, + hasOutput: true, + }, + { + name: "Codex with custom env vars", + engine: NewCodexEngine(), + engineConfig: &EngineConfig{ + ID: "codex", + Env: map[string]string{"CUSTOM_API_KEY": "test123", "PROXY_URL": "http://proxy.example.com"}, + }, + hasOutput: false, + }, + { + name: "Codex with custom env vars and output", + engine: NewCodexEngine(), + engineConfig: &EngineConfig{ + ID: "codex", + Env: map[string]string{"ENVIRONMENT": "production", "LOG_LEVEL": "debug"}, + }, + hasOutput: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + config := tt.engine.GetExecutionConfig("test-workflow", "test-log", tt.engineConfig, nil, tt.hasOutput) + + switch tt.engine.GetID() { + case "claude": + // For Claude, custom env vars should be in claude_env input + if claudeEnv, exists := config.Inputs["claude_env"]; exists { + for key, value := range tt.engineConfig.Env { + expectedEntry := key + ": " + value + if !strings.Contains(claudeEnv, expectedEntry) { + t.Errorf("Expected claude_env to contain '%s', got: %s", expectedEntry, claudeEnv) + } + } + } else if len(tt.engineConfig.Env) > 0 { + t.Error("Expected claude_env input to be present when custom env vars are defined") + } + + case "codex": + // For Codex, custom env vars should be in Environment field + if tt.engineConfig != nil && len(tt.engineConfig.Env) > 0 { + for key, expectedValue := range tt.engineConfig.Env { + if actualValue, exists := config.Environment[key]; !exists { + t.Errorf("Expected Environment to contain key '%s'", key) + } else if actualValue != expectedValue { + t.Errorf("Expected Environment['%s'] to be '%s', got '%s'", key, expectedValue, actualValue) + } + } + } + } + }) + } +} + func TestNilEngineConfig(t *testing.T) { engines := []AgenticEngine{ NewClaudeEngine(), diff --git a/pkg/workflow/output_missing_tool.go b/pkg/workflow/output_missing_tool.go new file mode 100644 index 0000000000..5e1b699c53 --- /dev/null +++ b/pkg/workflow/output_missing_tool.go @@ -0,0 +1,135 @@ +package workflow + +import ( + "fmt" +) + +// buildCreateOutputMissingToolJob creates the missing_tool job +func (c *Compiler) buildCreateOutputMissingToolJob(data *WorkflowData, mainJobName string) (*Job, error) { + if data.SafeOutputs == nil || data.SafeOutputs.MissingTool == nil { + return nil, fmt.Errorf("safe-outputs.missing-tool configuration is required") + } + + var steps []string + steps = append(steps, " - name: Record Missing Tool\n") + steps = append(steps, " id: missing_tool\n") + steps = append(steps, " uses: actions/github-script@v7\n") + + // Add environment variables + steps = append(steps, " env:\n") + // Pass the agent output content from the main job + steps = append(steps, fmt.Sprintf(" GITHUB_AW_AGENT_OUTPUT: ${{ needs.%s.outputs.output }}\n", mainJobName)) + + // Pass the max configuration if set + if data.SafeOutputs.MissingTool.Max > 0 { + steps = append(steps, fmt.Sprintf(" GITHUB_AW_MISSING_TOOL_MAX: %d\n", data.SafeOutputs.MissingTool.Max)) + } + + steps = append(steps, " with:\n") + steps = append(steps, " script: |\n") + + // Add each line of the script with proper indentation + formattedScript := FormatJavaScriptForYAML(missingToolScript) + steps = append(steps, formattedScript...) + + // Create outputs for the job + outputs := map[string]string{ + "tools_reported": "${{ steps.missing_tool.outputs.tools_reported }}", + "total_count": "${{ steps.missing_tool.outputs.total_count }}", + } + + // Create the job + job := &Job{ + Name: "missing_tool", + RunsOn: "runs-on: ubuntu-latest", + If: "if: ${{ always() }}", // Always run to capture missing tools + Permissions: "permissions:\n contents: read", // Only needs read access for logging + TimeoutMinutes: 5, // Short timeout since it's just processing output + Steps: steps, + Outputs: outputs, + Depends: []string{mainJobName}, // Depend on the main workflow job + } + + return job, nil +} + +// missingToolScript is the JavaScript code that processes missing-tool output +const missingToolScript = ` +const fs = require('fs'); +const path = require('path'); + +// Get environment variables +const agentOutput = process.env.GITHUB_AW_AGENT_OUTPUT || ''; +const maxReports = process.env.GITHUB_AW_MISSING_TOOL_MAX ? parseInt(process.env.GITHUB_AW_MISSING_TOOL_MAX) : null; + +console.log('Processing missing-tool reports...'); +console.log('Agent output length:', agentOutput.length); +if (maxReports) { + console.log('Maximum reports allowed:', maxReports); +} + +const missingTools = []; + +if (agentOutput.trim()) { + const lines = agentOutput.split('\n').filter(line => line.trim()); + + for (const line of lines) { + try { + const entry = JSON.parse(line); + + if (entry.type === 'missing-tool') { + // Validate required fields + if (!entry.tool) { + console.log('Warning: missing-tool entry missing "tool" field:', line); + continue; + } + if (!entry.reason) { + console.log('Warning: missing-tool entry missing "reason" field:', line); + continue; + } + + const missingTool = { + tool: entry.tool, + reason: entry.reason, + alternatives: entry.alternatives || null, + timestamp: new Date().toISOString() + }; + + missingTools.push(missingTool); + console.log('Recorded missing tool:', missingTool.tool); + + // Check max limit + if (maxReports && missingTools.length >= maxReports) { + console.log('Reached maximum number of missing tool reports (${maxReports})'); + break; + } + } + } catch (error) { + console.log('Warning: Failed to parse line as JSON:', line); + console.log('Parse error:', error.message); + } + } +} + +console.log('Total missing tools reported:', missingTools.length); + +// Output results +core.setOutput('tools_reported', JSON.stringify(missingTools)); +core.setOutput('total_count', missingTools.length.toString()); + +// Log details for debugging +if (missingTools.length > 0) { + console.log('Missing tools summary:'); + missingTools.forEach((tool, index) => { + console.log('${index + 1}. Tool: ${tool.tool}'); + console.log(' Reason: ${tool.reason}'); + if (tool.alternatives) { + console.log(' Alternatives: ${tool.alternatives}'); + } + console.log(' Reported at: ${tool.timestamp}'); + console.log(''); + }); +} else { + console.log('No missing tools reported in this workflow execution.'); +} +` diff --git a/pkg/workflow/output_missing_tool_test.go b/pkg/workflow/output_missing_tool_test.go new file mode 100644 index 0000000000..40d5610af4 --- /dev/null +++ b/pkg/workflow/output_missing_tool_test.go @@ -0,0 +1,247 @@ +package workflow + +import ( + "strings" + "testing" +) + +func TestMissingToolSafeOutput(t *testing.T) { + tests := []struct { + name string + frontmatter map[string]any + expectConfig bool + expectJob bool + expectMax int + }{ + { + name: "No safe-outputs config should NOT enable missing-tool by default", + frontmatter: map[string]any{"name": "Test"}, + expectConfig: false, + expectJob: false, + expectMax: 0, + }, + { + name: "Explicit missing-tool config with max", + frontmatter: map[string]any{ + "name": "Test", + "safe-outputs": map[string]any{ + "missing-tool": map[string]any{ + "max": 5, + }, + }, + }, + expectConfig: true, + expectJob: true, + expectMax: 5, + }, + { + name: "Missing-tool with other safe outputs", + frontmatter: map[string]any{ + "name": "Test", + "safe-outputs": map[string]any{ + "create-issue": nil, + "missing-tool": nil, + }, + }, + expectConfig: true, + expectJob: true, + expectMax: 0, + }, + { + name: "Empty missing-tool config", + frontmatter: map[string]any{ + "name": "Test", + "safe-outputs": map[string]any{ + "missing-tool": nil, + }, + }, + expectConfig: true, + expectJob: true, + expectMax: 0, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + compiler := NewCompiler(false, "", "test") + + // Extract safe outputs config + safeOutputs := compiler.extractSafeOutputsConfig(tt.frontmatter) + + // Verify config expectations + if tt.expectConfig { + if safeOutputs == nil { + t.Fatal("Expected SafeOutputsConfig to be created, but it was nil") + } + if safeOutputs.MissingTool == nil { + t.Fatal("Expected MissingTool config to be enabled, but it was nil") + } + if safeOutputs.MissingTool.Max != tt.expectMax { + t.Errorf("Expected max to be %d, got %d", tt.expectMax, safeOutputs.MissingTool.Max) + } + } else { + if safeOutputs != nil && safeOutputs.MissingTool != nil { + t.Error("Expected MissingTool config to be nil, but it was not") + } + } + + // Test job creation + if tt.expectJob { + if safeOutputs == nil || safeOutputs.MissingTool == nil { + t.Error("Expected SafeOutputs and MissingTool config to exist for job creation test") + } else { + job, err := compiler.buildCreateOutputMissingToolJob(&WorkflowData{ + SafeOutputs: safeOutputs, + }, "main-job") + if err != nil { + t.Errorf("Failed to build missing tool job: %v", err) + } + if job == nil { + t.Error("Expected job to be created, but it was nil") + } + if job != nil { + if job.Name != "missing_tool" { + t.Errorf("Expected job name to be 'missing_tool', got '%s'", job.Name) + } + if len(job.Depends) != 1 || job.Depends[0] != "main-job" { + t.Errorf("Expected job to depend on 'main-job', got %v", job.Depends) + } + } + } + } + }) + } +} + +func TestMissingToolPromptGeneration(t *testing.T) { + compiler := NewCompiler(false, "", "test") + + // Create workflow data with missing-tool enabled + data := &WorkflowData{ + MarkdownContent: "Test workflow content", + SafeOutputs: &SafeOutputsConfig{ + MissingTool: &MissingToolConfig{Max: 10}, + }, + } + + var yaml strings.Builder + compiler.generatePrompt(&yaml, data, &ClaudeEngine{}) + + output := yaml.String() + + // Check that missing-tool is mentioned in the header + if !strings.Contains(output, "Reporting Missing Tools or Functionality") { + t.Error("Expected 'Reporting Missing Tools or Functionality' in prompt header") + } + + // Check that missing-tool instructions are present + if !strings.Contains(output, "**Reporting Missing Tools or Functionality**") { + t.Error("Expected missing-tool instructions section") + } + + // Check for JSON format example + if !strings.Contains(output, `"type": "missing-tool"`) { + t.Error("Expected missing-tool JSON example") + } + + // Check for required fields documentation + if !strings.Contains(output, `"tool":`) { + t.Error("Expected tool field documentation") + } + if !strings.Contains(output, `"reason":`) { + t.Error("Expected reason field documentation") + } + if !strings.Contains(output, `"alternatives":`) { + t.Error("Expected alternatives field documentation") + } + + // Check that the example is included in JSONL examples + if !strings.Contains(output, `{"type": "missing-tool", "tool": "docker"`) { + t.Error("Expected missing-tool example in JSONL section") + } +} + +func TestMissingToolNotEnabledByDefault(t *testing.T) { + compiler := NewCompiler(false, "", "test") + + // Test with completely empty frontmatter + emptyFrontmatter := map[string]any{} + safeOutputs := compiler.extractSafeOutputsConfig(emptyFrontmatter) + + if safeOutputs != nil && safeOutputs.MissingTool != nil { + t.Error("Expected MissingTool to not be enabled by default with empty frontmatter") + } + + // Test with frontmatter that has other content but no safe-outputs + frontmatterWithoutSafeOutputs := map[string]any{ + "name": "Test Workflow", + "on": map[string]any{"workflow_dispatch": nil}, + } + safeOutputs = compiler.extractSafeOutputsConfig(frontmatterWithoutSafeOutputs) + + if safeOutputs != nil && safeOutputs.MissingTool != nil { + t.Error("Expected MissingTool to not be enabled by default without safe-outputs section") + } +} + +func TestMissingToolConfigParsing(t *testing.T) { + compiler := NewCompiler(false, "", "test") + + tests := []struct { + name string + configData map[string]any + expectMax int + expectError bool + }{ + { + name: "Empty config", + configData: map[string]any{"missing-tool": nil}, + expectMax: 0, + }, + { + name: "Config with max as int", + configData: map[string]any{ + "missing-tool": map[string]any{"max": 5}, + }, + expectMax: 5, + }, + { + name: "Config with max as float64 (from YAML)", + configData: map[string]any{ + "missing-tool": map[string]any{"max": float64(10)}, + }, + expectMax: 10, + }, + { + name: "Config with max as int64", + configData: map[string]any{ + "missing-tool": map[string]any{"max": int64(15)}, + }, + expectMax: 15, + }, + { + name: "No missing-tool key", + configData: map[string]any{}, + expectMax: -1, // Indicates nil config + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + config := compiler.parseMissingToolConfig(tt.configData) + + if tt.expectMax == -1 { + if config != nil { + t.Error("Expected nil config when missing-tool key is absent") + } + } else { + if config == nil { + t.Fatal("Expected non-nil config") + } + if config.Max != tt.expectMax { + t.Errorf("Expected max %d, got %d", tt.expectMax, config.Max) + } + } + }) + } +} diff --git a/pkg/workflow/output_test.go b/pkg/workflow/output_test.go index 086604a3ec..f4729d8ce6 100644 --- a/pkg/workflow/output_test.go +++ b/pkg/workflow/output_test.go @@ -109,9 +109,9 @@ This workflow has no output configuration. t.Fatalf("Unexpected error parsing workflow without output config: %v", err) } - // Verify output configuration is nil + // Verify output configuration is nil when not specified if workflowData.SafeOutputs != nil { - t.Error("Expected output configuration to be nil when not specified") + t.Error("Expected SafeOutputs to be nil when not configured") } }