From 30d7557a7ecd9b0044e4e2a82bb9076c93ee9836 Mon Sep 17 00:00:00 2001 From: Copilot <198982749+Copilot@users.noreply.github.com> Date: Thu, 4 Sep 2025 11:18:02 -0700 Subject: [PATCH] Add create-security-report safe output with configurable SARIF generation, workflow name defaults, GitHub Code Scanning integration, custom rule ID support, and test workflows (#54) * Initial plan * Implement create-security-report safe output feature Co-authored-by: pelikhan <4175913+pelikhan@users.noreply.github.com> * Final implementation with schema fix, formatting, and validation Co-authored-by: pelikhan <4175913+pelikhan@users.noreply.github.com> * Implement PR feedback: configurable driver, workflow filename rule IDs, and optional column support Co-authored-by: pelikhan <4175913+pelikhan@users.noreply.github.com> * Default security report driver to agentic workflow name from frontmatter Co-authored-by: pelikhan <4175913+pelikhan@users.noreply.github.com> * Add support for optional ruleIdSuffix in security reports Allow LLMs to provide custom rule ID suffixes in security reports via the ruleIdSuffix field. When not provided, defaults to the existing number scheme. - Add ruleIdSuffix validation (alphanumeric, hyphens, underscores only) - Update rule ID generation to use custom suffix when available - Add comprehensive tests for custom and default rule ID scenarios - Update documentation to describe new functionality - Maintain backward compatibility with existing workflows Co-authored-by: pelikhan <4175913+pelikhan@users.noreply.github.com> * Add test agentic workflows for create-security-report safe output Added Claude and Codex test workflows to validate the new create-security-report safe output functionality Co-authored-by: pelikhan <4175913+pelikhan@users.noreply.github.com> * Update test workflows to use workflow_dispatch trigger instead of issues Co-authored-by: pelikhan <4175913+pelikhan@users.noreply.github.com> --------- Co-authored-by: copilot-swe-agent[bot] <198982749+Copilot@users.noreply.github.com> Co-authored-by: pelikhan <4175913+pelikhan@users.noreply.github.com> Co-authored-by: Peli de Halleux --- ...est-claude-create-security-report.lock.yml | 1827 +++++++++++++++++ .../test-claude-create-security-report.md | 33 + ...test-codex-create-security-report.lock.yml | 1589 ++++++++++++++ .../test-codex-create-security-report.md | 33 + docs/safe-outputs.md | 56 + pkg/parser/schemas/main_workflow_schema.json | 24 + pkg/workflow/compiler.go | 166 +- pkg/workflow/compiler_test.go | 8 +- pkg/workflow/js.go | 3 + pkg/workflow/js/create_security_report.cjs | 297 +++ .../js/create_security_report.test.cjs | 604 ++++++ pkg/workflow/security_reports_test.go | 319 +++ 12 files changed, 4951 insertions(+), 8 deletions(-) create mode 100644 .github/workflows/test-claude-create-security-report.lock.yml create mode 100644 .github/workflows/test-claude-create-security-report.md create mode 100644 .github/workflows/test-codex-create-security-report.lock.yml create mode 100644 .github/workflows/test-codex-create-security-report.md create mode 100644 pkg/workflow/js/create_security_report.cjs create mode 100644 pkg/workflow/js/create_security_report.test.cjs create mode 100644 pkg/workflow/security_reports_test.go diff --git a/.github/workflows/test-claude-create-security-report.lock.yml b/.github/workflows/test-claude-create-security-report.lock.yml new file mode 100644 index 0000000000..11b817b80b --- /dev/null +++ b/.github/workflows/test-claude-create-security-report.lock.yml @@ -0,0 +1,1827 @@ +# This file was automatically generated by gh-aw. DO NOT EDIT. +# To update this file, edit the corresponding .md file and run: +# gh aw compile + +name: "Security Analysis with Claude" +"on": + workflow_dispatch: null + +permissions: {} + +concurrency: + group: "gh-aw-${{ github.workflow }}" + +run-name: "Security Analysis with Claude" + +jobs: + add_reaction: + if: github.event_name == 'issues' || github.event_name == 'issue_comment' || github.event_name == 'pull_request_comment' || github.event_name == 'pull_request_review_comment' || (github.event_name == 'pull_request') && (github.event.pull_request.head.repo.full_name == github.repository) + runs-on: ubuntu-latest + permissions: + issues: write + pull-requests: write + outputs: + reaction_id: ${{ steps.react.outputs.reaction-id }} + steps: + - name: Add eyes reaction to the triggering item + id: react + uses: actions/github-script@v7 + env: + GITHUB_AW_REACTION: eyes + with: + script: | + async function main() { + // Read inputs from environment variables + const reaction = process.env.GITHUB_AW_REACTION || "eyes"; + const alias = process.env.GITHUB_AW_ALIAS; // Only present for alias workflows + const runId = context.runId; + const runUrl = context.payload.repository + ? `${context.payload.repository.html_url}/actions/runs/${runId}` + : `https://github.com/${context.repo.owner}/${context.repo.repo}/actions/runs/${runId}`; + console.log("Reaction type:", reaction); + console.log("Alias name:", alias || "none"); + console.log("Run ID:", runId); + console.log("Run URL:", runUrl); + // Validate reaction type + const validReactions = [ + "+1", + "-1", + "laugh", + "confused", + "heart", + "hooray", + "rocket", + "eyes", + ]; + if (!validReactions.includes(reaction)) { + core.setFailed( + `Invalid reaction type: ${reaction}. Valid reactions are: ${validReactions.join(", ")}` + ); + return; + } + // Determine the API endpoint based on the event type + let reactionEndpoint; + let commentUpdateEndpoint; + let shouldEditComment = false; + const eventName = context.eventName; + const owner = context.repo.owner; + const repo = context.repo.repo; + try { + switch (eventName) { + case "issues": + const issueNumber = context.payload?.issue?.number; + if (!issueNumber) { + core.setFailed("Issue number not found in event payload"); + return; + } + reactionEndpoint = `/repos/${owner}/${repo}/issues/${issueNumber}/reactions`; + // Don't edit issue bodies for now - this might be more complex + shouldEditComment = false; + break; + case "issue_comment": + const commentId = context.payload?.comment?.id; + if (!commentId) { + core.setFailed("Comment ID not found in event payload"); + return; + } + reactionEndpoint = `/repos/${owner}/${repo}/issues/comments/${commentId}/reactions`; + commentUpdateEndpoint = `/repos/${owner}/${repo}/issues/comments/${commentId}`; + // Only edit comments for alias workflows + shouldEditComment = alias ? true : false; + break; + case "pull_request": + const prNumber = context.payload?.pull_request?.number; + if (!prNumber) { + core.setFailed("Pull request number not found in event payload"); + return; + } + // PRs are "issues" for the reactions endpoint + reactionEndpoint = `/repos/${owner}/${repo}/issues/${prNumber}/reactions`; + // Don't edit PR bodies for now - this might be more complex + shouldEditComment = false; + break; + case "pull_request_review_comment": + const reviewCommentId = context.payload?.comment?.id; + if (!reviewCommentId) { + core.setFailed("Review comment ID not found in event payload"); + return; + } + reactionEndpoint = `/repos/${owner}/${repo}/pulls/comments/${reviewCommentId}/reactions`; + commentUpdateEndpoint = `/repos/${owner}/${repo}/pulls/comments/${reviewCommentId}`; + // Only edit comments for alias workflows + shouldEditComment = alias ? true : false; + break; + default: + core.setFailed(`Unsupported event type: ${eventName}`); + return; + } + console.log("Reaction API endpoint:", reactionEndpoint); + // Add reaction first + await addReaction(reactionEndpoint, reaction); + // Then edit comment if applicable and if it's a comment event + if (shouldEditComment && commentUpdateEndpoint) { + console.log("Comment update endpoint:", commentUpdateEndpoint); + await editCommentWithWorkflowLink(commentUpdateEndpoint, runUrl); + } else { + if (!alias && commentUpdateEndpoint) { + console.log( + "Skipping comment edit - only available for alias workflows" + ); + } else { + console.log("Skipping comment edit for event type:", eventName); + } + } + } catch (error) { + const errorMessage = error instanceof Error ? error.message : String(error); + console.error("Failed to process reaction and comment edit:", errorMessage); + core.setFailed( + `Failed to process reaction and comment edit: ${errorMessage}` + ); + } + } + /** + * Add a reaction to a GitHub issue, PR, or comment + * @param {string} endpoint - The GitHub API endpoint to add the reaction to + * @param {string} reaction - The reaction type to add + */ + async function addReaction(endpoint, reaction) { + const response = await github.request("POST " + endpoint, { + content: reaction, + headers: { + Accept: "application/vnd.github+json", + }, + }); + const reactionId = response.data?.id; + if (reactionId) { + console.log(`Successfully added reaction: ${reaction} (id: ${reactionId})`); + core.setOutput("reaction-id", reactionId.toString()); + } else { + console.log(`Successfully added reaction: ${reaction}`); + core.setOutput("reaction-id", ""); + } + } + /** + * Edit a comment to add a workflow run link + * @param {string} endpoint - The GitHub API endpoint to update the comment + * @param {string} runUrl - The URL of the workflow run + */ + async function editCommentWithWorkflowLink(endpoint, runUrl) { + try { + // First, get the current comment content + const getResponse = await github.request("GET " + endpoint, { + headers: { + Accept: "application/vnd.github+json", + }, + }); + const originalBody = getResponse.data.body || ""; + const workflowLinkText = `\n\n---\n*šŸ¤– [Workflow run](${runUrl}) triggered by this comment*`; + // Check if we've already added a workflow link to avoid duplicates + if (originalBody.includes("*šŸ¤– [Workflow run](")) { + console.log( + "Comment already contains a workflow run link, skipping edit" + ); + return; + } + const updatedBody = originalBody + workflowLinkText; + // Update the comment + const updateResponse = await github.request("PATCH " + endpoint, { + body: updatedBody, + headers: { + Accept: "application/vnd.github+json", + }, + }); + console.log(`Successfully updated comment with workflow link`); + console.log(`Comment ID: ${updateResponse.data.id}`); + } catch (error) { + // Don't fail the entire job if comment editing fails - just log it + const errorMessage = error instanceof Error ? error.message : String(error); + console.warn("Failed to edit comment with workflow link:", errorMessage); + console.warn( + "This is not critical - the reaction was still added successfully" + ); + } + } + await main(); + + security-analysis-with-claude: + runs-on: ubuntu-latest + permissions: read-all + outputs: + output: ${{ steps.collect_output.outputs.output }} + steps: + - name: Checkout repository + uses: actions/checkout@v5 + - name: Generate Network Permissions Hook + run: | + mkdir -p .claude/hooks + cat > .claude/hooks/network_permissions.py << 'EOF' + #!/usr/bin/env python3 + """ + Network permissions validator for Claude Code engine. + Generated by gh-aw from engine network permissions configuration. + """ + + import json + import sys + import urllib.parse + import re + + # Domain whitelist (populated during generation) + ALLOWED_DOMAINS = ["crl3.digicert.com","crl4.digicert.com","ocsp.digicert.com","ts-crl.ws.symantec.com","ts-ocsp.ws.symantec.com","crl.geotrust.com","ocsp.geotrust.com","crl.thawte.com","ocsp.thawte.com","crl.verisign.com","ocsp.verisign.com","crl.globalsign.com","ocsp.globalsign.com","crls.ssl.com","ocsp.ssl.com","crl.identrust.com","ocsp.identrust.com","crl.sectigo.com","ocsp.sectigo.com","crl.usertrust.com","ocsp.usertrust.com","s.symcb.com","s.symcd.com","ghcr.io","registry.hub.docker.com","*.docker.io","*.docker.com","production.cloudflare.docker.com","dl.k8s.io","pkgs.k8s.io","quay.io","mcr.microsoft.com","gcr.io","auth.docker.io","nuget.org","dist.nuget.org","api.nuget.org","nuget.pkg.github.com","dotnet.microsoft.com","pkgs.dev.azure.com","builds.dotnet.microsoft.com","dotnetcli.blob.core.windows.net","nugetregistryv2prod.blob.core.windows.net","azuresearch-usnc.nuget.org","azuresearch-ussc.nuget.org","dc.services.visualstudio.com","dot.net","ci.dot.net","www.microsoft.com","oneocsp.microsoft.com","pub.dev","pub.dartlang.org","*.githubusercontent.com","raw.githubusercontent.com","objects.githubusercontent.com","lfs.github.com","github-cloud.githubusercontent.com","github-cloud.s3.amazonaws.com","codeload.github.com","go.dev","golang.org","proxy.golang.org","sum.golang.org","pkg.go.dev","goproxy.io","releases.hashicorp.com","apt.releases.hashicorp.com","yum.releases.hashicorp.com","registry.terraform.io","haskell.org","*.hackage.haskell.org","get-ghcup.haskell.org","downloads.haskell.org","www.java.com","jdk.java.net","api.adoptium.net","adoptium.net","repo.maven.apache.org","maven.apache.org","repo1.maven.org","maven.pkg.github.com","maven.oracle.com","repo.spring.io","gradle.org","services.gradle.org","plugins.gradle.org","plugins-artifacts.gradle.org","repo.grails.org","download.eclipse.org","download.oracle.com","jcenter.bintray.com","json-schema.org","json.schemastore.org","archive.ubuntu.com","security.ubuntu.com","ppa.launchpad.net","keyserver.ubuntu.com","azure.archive.ubuntu.com","api.snapcraft.io","deb.debian.org","security.debian.org","keyring.debian.org","packages.debian.org","debian.map.fastlydns.net","apt.llvm.org","dl.fedoraproject.org","mirrors.fedoraproject.org","download.fedoraproject.org","mirror.centos.org","vault.centos.org","dl-cdn.alpinelinux.org","pkg.alpinelinux.org","mirror.archlinux.org","archlinux.org","download.opensuse.org","cdn.redhat.com","packagecloud.io","packages.cloud.google.com","packages.microsoft.com","npmjs.org","npmjs.com","registry.npmjs.com","registry.npmjs.org","skimdb.npmjs.com","npm.pkg.github.com","api.npms.io","nodejs.org","yarnpkg.com","registry.yarnpkg.com","repo.yarnpkg.com","deb.nodesource.com","get.pnpm.io","bun.sh","deno.land","registry.bower.io","cpan.org","www.cpan.org","metacpan.org","cpan.metacpan.org","repo.packagist.org","packagist.org","getcomposer.org","playwright.download.prss.microsoft.com","cdn.playwright.dev","pypi.python.org","pypi.org","pip.pypa.io","*.pythonhosted.org","files.pythonhosted.org","bootstrap.pypa.io","conda.binstar.org","conda.anaconda.org","binstar.org","anaconda.org","repo.continuum.io","repo.anaconda.com","rubygems.org","api.rubygems.org","rubygems.pkg.github.com","bundler.rubygems.org","gems.rubyforge.org","gems.rubyonrails.org","index.rubygems.org","cache.ruby-lang.org","*.rvm.io","crates.io","index.crates.io","static.crates.io","sh.rustup.rs","static.rust-lang.org","download.swift.org","swift.org","cocoapods.org","cdn.cocoapods.org"] + + def extract_domain(url_or_query): + """Extract domain from URL or search query.""" + if not url_or_query: + return None + + if url_or_query.startswith(('http://', 'https://')): + return urllib.parse.urlparse(url_or_query).netloc.lower() + + # Check for domain patterns in search queries + match = re.search(r'site:([a-zA-Z0-9.-]+\.[a-zA-Z]{2,})', url_or_query) + if match: + return match.group(1).lower() + + return None + + def is_domain_allowed(domain): + """Check if domain is allowed.""" + if not domain: + # If no domain detected, allow only if not under deny-all policy + return bool(ALLOWED_DOMAINS) # False if empty list (deny-all), True if has domains + + # Empty allowed domains means deny all + if not ALLOWED_DOMAINS: + return False + + for pattern in ALLOWED_DOMAINS: + regex = pattern.replace('.', r'\.').replace('*', '.*') + if re.match(f'^{regex}$', domain): + return True + return False + + # Main logic + try: + data = json.load(sys.stdin) + tool_name = data.get('tool_name', '') + tool_input = data.get('tool_input', {}) + + if tool_name not in ['WebFetch', 'WebSearch']: + sys.exit(0) # Allow other tools + + target = tool_input.get('url') or tool_input.get('query', '') + domain = extract_domain(target) + + # For WebSearch, apply domain restrictions consistently + # If no domain detected in search query, check if restrictions are in place + if tool_name == 'WebSearch' and not domain: + # Since this hook is only generated when network permissions are configured, + # empty ALLOWED_DOMAINS means deny-all policy + if not ALLOWED_DOMAINS: # Empty list means deny all + print(f"Network access blocked: deny-all policy in effect", file=sys.stderr) + print(f"No domains are allowed for WebSearch", file=sys.stderr) + sys.exit(2) # Block under deny-all policy + else: + print(f"Network access blocked for WebSearch: no specific domain detected", file=sys.stderr) + print(f"Allowed domains: {', '.join(ALLOWED_DOMAINS)}", file=sys.stderr) + sys.exit(2) # Block general searches when domain allowlist is configured + + if not is_domain_allowed(domain): + print(f"Network access blocked for domain: {domain}", file=sys.stderr) + print(f"Allowed domains: {', '.join(ALLOWED_DOMAINS)}", file=sys.stderr) + sys.exit(2) # Block with feedback to Claude + + sys.exit(0) # Allow + + except Exception as e: + print(f"Network validation error: {e}", file=sys.stderr) + sys.exit(2) # Block on errors + + EOF + chmod +x .claude/hooks/network_permissions.py + - name: Generate Claude Settings + run: | + cat > .claude/settings.json << 'EOF' + { + "hooks": { + "PreToolUse": [ + { + "matcher": "WebFetch|WebSearch", + "hooks": [ + { + "type": "command", + "command": ".claude/hooks/network_permissions.py" + } + ] + } + ] + } + } + EOF + - name: Setup agent output + id: setup_agent_output + uses: actions/github-script@v7 + with: + script: | + function main() { + const fs = require("fs"); + const crypto = require("crypto"); + // Generate a random filename for the output file + const randomId = crypto.randomBytes(8).toString("hex"); + const outputFile = `/tmp/aw_output_${randomId}.txt`; + // Ensure the /tmp directory exists and create empty output file + fs.mkdirSync("/tmp", { recursive: true }); + fs.writeFileSync(outputFile, "", { mode: 0o644 }); + // Verify the file was created and is writable + if (!fs.existsSync(outputFile)) { + throw new Error(`Failed to create output file: ${outputFile}`); + } + // Set the environment variable for subsequent steps + core.exportVariable("GITHUB_AW_SAFE_OUTPUTS", outputFile); + console.log("Created agentic output file:", outputFile); + // Also set as step output for reference + core.setOutput("output_file", outputFile); + } + main(); + - name: Setup MCPs + run: | + mkdir -p /tmp/mcp-config + cat > /tmp/mcp-config/mcp-servers.json << 'EOF' + { + "mcpServers": { + "github": { + "command": "docker", + "args": [ + "run", + "-i", + "--rm", + "-e", + "GITHUB_PERSONAL_ACCESS_TOKEN", + "ghcr.io/github/github-mcp-server:sha-09deac4" + ], + "env": { + "GITHUB_PERSONAL_ACCESS_TOKEN": "${{ secrets.GITHUB_TOKEN }}" + } + } + } + } + EOF + - name: Create prompt + env: + GITHUB_AW_SAFE_OUTPUTS: ${{ env.GITHUB_AW_SAFE_OUTPUTS }} + run: | + mkdir -p /tmp/aw-prompts + cat > /tmp/aw-prompts/prompt.txt << 'EOF' + # Security Analysis with Claude + + Analyze the repository codebase for security vulnerabilities and create security reports. + + For each security finding you identify, specify: + - The file path relative to the repository root + - The line number where the issue occurs + - Optional column number for precise location + - The severity level (error, warning, info, or note) + - A detailed description of the security issue + - Optionally, a custom rule ID suffix for meaningful SARIF rule identifiers + + Focus on common security issues like: + - Hardcoded secrets or credentials + - SQL injection vulnerabilities + - Cross-site scripting (XSS) issues + - Insecure file operations + - Authentication bypasses + - Input validation problems + + + --- + + ## Reporting Missing Tools or Functionality + + **IMPORTANT**: To do the actions mentioned in the header of this section, do NOT attempt to use MCP tools, do NOT attempt to use `gh`, do NOT attempt to use the GitHub API. You don't have write access to the GitHub repo. Instead write JSON objects to the file "${{ env.GITHUB_AW_SAFE_OUTPUTS }}". Each line should contain a single JSON object (JSONL format). You can write them one by one as you do them. + + **Format**: Write one JSON object per line. Each object must have a `type` field specifying the action type. + + ### Available Output Types: + + **Example JSONL file content:** + ``` + # No safe outputs configured for this workflow + ``` + + **Important Notes:** + - Do NOT attempt to use MCP tools, `gh`, or the GitHub API for these actions + - Each JSON object must be on its own line + - Only include output types that are configured for this workflow + - The content of this file will be automatically processed and executed + + EOF + - name: Print prompt to step summary + run: | + echo "## Generated Prompt" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo '``````markdown' >> $GITHUB_STEP_SUMMARY + cat /tmp/aw-prompts/prompt.txt >> $GITHUB_STEP_SUMMARY + echo '``````' >> $GITHUB_STEP_SUMMARY + - name: Generate agentic run info + uses: actions/github-script@v7 + with: + script: | + const fs = require('fs'); + + const awInfo = { + engine_id: "claude", + engine_name: "Claude Code", + model: "", + version: "", + workflow_name: "Security Analysis with Claude", + experimental: false, + supports_tools_whitelist: true, + supports_http_transport: true, + run_id: context.runId, + run_number: context.runNumber, + run_attempt: process.env.GITHUB_RUN_ATTEMPT, + repository: context.repo.owner + '/' + context.repo.repo, + ref: context.ref, + sha: context.sha, + actor: context.actor, + event_name: context.eventName, + created_at: new Date().toISOString() + }; + + // Write to /tmp directory to avoid inclusion in PR + const tmpPath = '/tmp/aw_info.json'; + fs.writeFileSync(tmpPath, JSON.stringify(awInfo, null, 2)); + console.log('Generated aw_info.json at:', tmpPath); + console.log(JSON.stringify(awInfo, null, 2)); + - name: Upload agentic run info + if: always() + uses: actions/upload-artifact@v4 + with: + name: aw_info.json + path: /tmp/aw_info.json + if-no-files-found: warn + - name: Execute Claude Code Action + id: agentic_execution + uses: anthropics/claude-code-base-action@v0.0.56 + with: + # Allowed tools (sorted): + # - ExitPlanMode + # - Glob + # - Grep + # - LS + # - NotebookRead + # - Read + # - Task + # - TodoWrite + # - Write + # - mcp__github__download_workflow_run_artifact + # - mcp__github__get_code_scanning_alert + # - mcp__github__get_commit + # - mcp__github__get_dependabot_alert + # - mcp__github__get_discussion + # - mcp__github__get_discussion_comments + # - mcp__github__get_file_contents + # - mcp__github__get_issue + # - mcp__github__get_issue_comments + # - mcp__github__get_job_logs + # - mcp__github__get_me + # - mcp__github__get_notification_details + # - mcp__github__get_pull_request + # - mcp__github__get_pull_request_comments + # - mcp__github__get_pull_request_diff + # - mcp__github__get_pull_request_files + # - mcp__github__get_pull_request_reviews + # - mcp__github__get_pull_request_status + # - mcp__github__get_secret_scanning_alert + # - mcp__github__get_tag + # - mcp__github__get_workflow_run + # - mcp__github__get_workflow_run_logs + # - mcp__github__get_workflow_run_usage + # - mcp__github__list_branches + # - mcp__github__list_code_scanning_alerts + # - mcp__github__list_commits + # - mcp__github__list_dependabot_alerts + # - mcp__github__list_discussion_categories + # - mcp__github__list_discussions + # - mcp__github__list_issues + # - mcp__github__list_notifications + # - mcp__github__list_pull_requests + # - mcp__github__list_secret_scanning_alerts + # - mcp__github__list_tags + # - mcp__github__list_workflow_jobs + # - mcp__github__list_workflow_run_artifacts + # - mcp__github__list_workflow_runs + # - mcp__github__list_workflows + # - mcp__github__search_code + # - mcp__github__search_issues + # - mcp__github__search_orgs + # - mcp__github__search_pull_requests + # - mcp__github__search_repositories + # - mcp__github__search_users + allowed_tools: "ExitPlanMode,Glob,Grep,LS,NotebookRead,Read,Task,TodoWrite,Write,mcp__github__download_workflow_run_artifact,mcp__github__get_code_scanning_alert,mcp__github__get_commit,mcp__github__get_dependabot_alert,mcp__github__get_discussion,mcp__github__get_discussion_comments,mcp__github__get_file_contents,mcp__github__get_issue,mcp__github__get_issue_comments,mcp__github__get_job_logs,mcp__github__get_me,mcp__github__get_notification_details,mcp__github__get_pull_request,mcp__github__get_pull_request_comments,mcp__github__get_pull_request_diff,mcp__github__get_pull_request_files,mcp__github__get_pull_request_reviews,mcp__github__get_pull_request_status,mcp__github__get_secret_scanning_alert,mcp__github__get_tag,mcp__github__get_workflow_run,mcp__github__get_workflow_run_logs,mcp__github__get_workflow_run_usage,mcp__github__list_branches,mcp__github__list_code_scanning_alerts,mcp__github__list_commits,mcp__github__list_dependabot_alerts,mcp__github__list_discussion_categories,mcp__github__list_discussions,mcp__github__list_issues,mcp__github__list_notifications,mcp__github__list_pull_requests,mcp__github__list_secret_scanning_alerts,mcp__github__list_tags,mcp__github__list_workflow_jobs,mcp__github__list_workflow_run_artifacts,mcp__github__list_workflow_runs,mcp__github__list_workflows,mcp__github__search_code,mcp__github__search_issues,mcp__github__search_orgs,mcp__github__search_pull_requests,mcp__github__search_repositories,mcp__github__search_users" + anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }} + claude_env: | + GITHUB_AW_SAFE_OUTPUTS: ${{ env.GITHUB_AW_SAFE_OUTPUTS }} + mcp_config: /tmp/mcp-config/mcp-servers.json + prompt_file: /tmp/aw-prompts/prompt.txt + settings: .claude/settings.json + timeout_minutes: 5 + env: + GITHUB_AW_SAFE_OUTPUTS: ${{ env.GITHUB_AW_SAFE_OUTPUTS }} + - name: Capture Agentic Action logs + if: always() + run: | + # Copy the detailed execution file from Agentic Action if available + if [ -n "${{ steps.agentic_execution.outputs.execution_file }}" ] && [ -f "${{ steps.agentic_execution.outputs.execution_file }}" ]; then + cp ${{ steps.agentic_execution.outputs.execution_file }} /tmp/security-analysis-with-claude.log + else + echo "No execution file output found from Agentic Action" >> /tmp/security-analysis-with-claude.log + fi + + # Ensure log file exists + touch /tmp/security-analysis-with-claude.log + - name: Check if workflow-complete.txt exists, if so upload it + id: check_file + run: | + if [ -f workflow-complete.txt ]; then + echo "File exists" + echo "upload=true" >> $GITHUB_OUTPUT + else + echo "File does not exist" + echo "upload=false" >> $GITHUB_OUTPUT + fi + - name: Upload workflow-complete.txt + if: steps.check_file.outputs.upload == 'true' + uses: actions/upload-artifact@v4 + with: + name: workflow-complete + path: workflow-complete.txt + - name: Collect agent output + id: collect_output + uses: actions/github-script@v7 + env: + GITHUB_AW_SAFE_OUTPUTS: ${{ env.GITHUB_AW_SAFE_OUTPUTS }} + GITHUB_AW_SAFE_OUTPUTS_CONFIG: "{}" + with: + script: | + async function main() { + const fs = require("fs"); + /** + * Sanitizes content for safe output in GitHub Actions + * @param {string} content - The content to sanitize + * @returns {string} The sanitized content + */ + function sanitizeContent(content) { + if (!content || typeof content !== "string") { + return ""; + } + // Read allowed domains from environment variable + const allowedDomainsEnv = process.env.GITHUB_AW_ALLOWED_DOMAINS; + const defaultAllowedDomains = [ + "github.com", + "github.io", + "githubusercontent.com", + "githubassets.com", + "github.dev", + "codespaces.new", + ]; + const allowedDomains = allowedDomainsEnv + ? allowedDomainsEnv + .split(",") + .map(d => d.trim()) + .filter(d => d) + : defaultAllowedDomains; + let sanitized = content; + // Neutralize @mentions to prevent unintended notifications + sanitized = neutralizeMentions(sanitized); + // Remove control characters (except newlines and tabs) + sanitized = sanitized.replace(/[\x00-\x08\x0B\x0C\x0E-\x1F\x7F]/g, ""); + // XML character escaping + sanitized = sanitized + .replace(/&/g, "&") // Must be first to avoid double-escaping + .replace(//g, ">") + .replace(/"/g, """) + .replace(/'/g, "'"); + // URI filtering - replace non-https protocols with "(redacted)" + sanitized = sanitizeUrlProtocols(sanitized); + // Domain filtering for HTTPS URIs + sanitized = sanitizeUrlDomains(sanitized); + // Limit total length to prevent DoS (0.5MB max) + const maxLength = 524288; + if (sanitized.length > maxLength) { + sanitized = + sanitized.substring(0, maxLength) + + "\n[Content truncated due to length]"; + } + // Limit number of lines to prevent log flooding (65k max) + const lines = sanitized.split("\n"); + const maxLines = 65000; + if (lines.length > maxLines) { + sanitized = + lines.slice(0, maxLines).join("\n") + + "\n[Content truncated due to line count]"; + } + // Remove ANSI escape sequences + sanitized = sanitized.replace(/\x1b\[[0-9;]*[mGKH]/g, ""); + // Neutralize common bot trigger phrases + sanitized = neutralizeBotTriggers(sanitized); + // Trim excessive whitespace + return sanitized.trim(); + /** + * Remove unknown domains + * @param {string} s - The string to process + * @returns {string} The string with unknown domains redacted + */ + function sanitizeUrlDomains(s) { + return s.replace( + /\bhttps:\/\/([^\/\s\])}'"<>&\x00-\x1f]+)/gi, + (match, domain) => { + // Extract the hostname part (before first slash, colon, or other delimiter) + const hostname = domain.split(/[\/:\?#]/)[0].toLowerCase(); + // Check if this domain or any parent domain is in the allowlist + const isAllowed = allowedDomains.some(allowedDomain => { + const normalizedAllowed = allowedDomain.toLowerCase(); + return ( + hostname === normalizedAllowed || + hostname.endsWith("." + normalizedAllowed) + ); + }); + return isAllowed ? match : "(redacted)"; + } + ); + } + /** + * Remove unknown protocols except https + * @param {string} s - The string to process + * @returns {string} The string with non-https protocols redacted + */ + function sanitizeUrlProtocols(s) { + // Match both protocol:// and protocol: patterns + return s.replace( + /\b(\w+):(?:\/\/)?[^\s\])}'"<>&\x00-\x1f]+/gi, + (match, protocol) => { + // Allow https (case insensitive), redact everything else + return protocol.toLowerCase() === "https" ? match : "(redacted)"; + } + ); + } + /** + * Neutralizes @mentions by wrapping them in backticks + * @param {string} s - The string to process + * @returns {string} The string with neutralized mentions + */ + function neutralizeMentions(s) { + // Replace @name or @org/team outside code with `@name` + return s.replace( + /(^|[^\w`])@([A-Za-z0-9](?:[A-Za-z0-9-]{0,37}[A-Za-z0-9])?(?:\/[A-Za-z0-9._-]+)?)/g, + (_m, p1, p2) => `${p1}\`@${p2}\`` + ); + } + /** + * Neutralizes bot trigger phrases by wrapping them in backticks + * @param {string} s - The string to process + * @returns {string} The string with neutralized bot triggers + */ + function neutralizeBotTriggers(s) { + // Neutralize common bot trigger phrases like "fixes #123", "closes #asdfs", etc. + return s.replace( + /\b(fixes?|closes?|resolves?|fix|close|resolve)\s+#(\w+)/gi, + (match, action, ref) => `\`${action} #${ref}\`` + ); + } + } + /** + * Gets the maximum allowed count for a given output type + * @param {string} itemType - The output item type + * @param {Object} config - The safe-outputs configuration + * @returns {number} The maximum allowed count + */ + function getMaxAllowedForType(itemType, config) { + // Check if max is explicitly specified in config + if ( + config && + config[itemType] && + typeof config[itemType] === "object" && + config[itemType].max + ) { + return config[itemType].max; + } + // Use default limits for plural-supported types + switch (itemType) { + case "create-issue": + return 1; // Only one issue allowed + case "add-issue-comment": + return 1; // Only one comment allowed + case "create-pull-request": + return 1; // Only one pull request allowed + case "create-pull-request-review-comment": + return 10; // Default to 10 review comments allowed + case "add-issue-label": + return 5; // Only one labels operation allowed + case "update-issue": + return 1; // Only one issue update allowed + case "push-to-branch": + return 1; // Only one push to branch allowed + case "create-discussion": + return 1; // Only one discussion allowed + default: + return 1; // Default to single item for unknown types + } + } + /** + * Attempts to repair common JSON syntax issues in LLM-generated content + * @param {string} jsonStr - The potentially malformed JSON string + * @returns {string} The repaired JSON string + */ + function repairJson(jsonStr) { + let repaired = jsonStr.trim(); + // Fix single quotes to double quotes (must be done first) + repaired = repaired.replace(/'/g, '"'); + // Fix missing quotes around object keys + repaired = repaired.replace( + /([{,]\s*)([a-zA-Z_$][a-zA-Z0-9_$]*)\s*:/g, + '$1"$2":' + ); + // Fix newlines and tabs inside strings by escaping them + repaired = repaired.replace(/"([^"\\]*)"/g, (match, content) => { + if ( + content.includes("\n") || + content.includes("\r") || + content.includes("\t") + ) { + const escaped = content + .replace(/\\/g, "\\\\") + .replace(/\n/g, "\\n") + .replace(/\r/g, "\\r") + .replace(/\t/g, "\\t"); + return `"${escaped}"`; + } + return match; + }); + // Fix unescaped quotes inside string values + repaired = repaired.replace( + /"([^"]*)"([^":,}\]]*)"([^"]*)"(\s*[,:}\]])/g, + (match, p1, p2, p3, p4) => `"${p1}\\"${p2}\\"${p3}"${p4}` + ); + // Fix wrong bracket/brace types - arrays should end with ] not } + repaired = repaired.replace( + /(\[\s*(?:"[^"]*"(?:\s*,\s*"[^"]*")*\s*),?)\s*}/g, + "$1]" + ); + // Fix missing closing braces/brackets + const openBraces = (repaired.match(/\{/g) || []).length; + const closeBraces = (repaired.match(/\}/g) || []).length; + if (openBraces > closeBraces) { + repaired += "}".repeat(openBraces - closeBraces); + } else if (closeBraces > openBraces) { + repaired = "{".repeat(closeBraces - openBraces) + repaired; + } + // Fix missing closing brackets for arrays + const openBrackets = (repaired.match(/\[/g) || []).length; + const closeBrackets = (repaired.match(/\]/g) || []).length; + if (openBrackets > closeBrackets) { + repaired += "]".repeat(openBrackets - closeBrackets); + } else if (closeBrackets > openBrackets) { + repaired = "[".repeat(closeBrackets - openBrackets) + repaired; + } + // Fix trailing commas in objects and arrays (AFTER fixing brackets/braces) + repaired = repaired.replace(/,(\s*[}\]])/g, "$1"); + return repaired; + } + /** + * Attempts to parse JSON with repair fallback + * @param {string} jsonStr - The JSON string to parse + * @returns {Object|undefined} The parsed JSON object, or undefined if parsing fails + */ + function parseJsonWithRepair(jsonStr) { + try { + // First, try normal JSON.parse + return JSON.parse(jsonStr); + } catch (originalError) { + try { + // If that fails, try repairing and parsing again + const repairedJson = repairJson(jsonStr); + return JSON.parse(repairedJson); + } catch (repairError) { + // If repair also fails, print error to console and return undefined + console.log( + `JSON parsing failed. Original: ${originalError.message}. After repair: ${repairError.message}` + ); + return undefined; + } + } + } + const outputFile = process.env.GITHUB_AW_SAFE_OUTPUTS; + const safeOutputsConfig = process.env.GITHUB_AW_SAFE_OUTPUTS_CONFIG; + if (!outputFile) { + console.log("GITHUB_AW_SAFE_OUTPUTS not set, no output to collect"); + core.setOutput("output", ""); + return; + } + if (!fs.existsSync(outputFile)) { + console.log("Output file does not exist:", outputFile); + core.setOutput("output", ""); + return; + } + const outputContent = fs.readFileSync(outputFile, "utf8"); + if (outputContent.trim() === "") { + console.log("Output file is empty"); + core.setOutput("output", ""); + return; + } + console.log("Raw output content length:", outputContent.length); + // Parse the safe-outputs configuration + let expectedOutputTypes = {}; + if (safeOutputsConfig) { + try { + expectedOutputTypes = JSON.parse(safeOutputsConfig); + console.log("Expected output types:", Object.keys(expectedOutputTypes)); + } catch (error) { + console.log( + "Warning: Could not parse safe-outputs config:", + error.message + ); + } + } + // Parse JSONL content + const lines = outputContent.trim().split("\n"); + const parsedItems = []; + const errors = []; + for (let i = 0; i < lines.length; i++) { + const line = lines[i].trim(); + if (line === "") continue; // Skip empty lines + try { + const item = parseJsonWithRepair(line); + // If item is undefined (failed to parse), add error and process next line + if (item === undefined) { + errors.push(`Line ${i + 1}: Invalid JSON - JSON parsing failed`); + continue; + } + // Validate that the item has a 'type' field + if (!item.type) { + errors.push(`Line ${i + 1}: Missing required 'type' field`); + continue; + } + // Validate against expected output types + const itemType = item.type; + if (!expectedOutputTypes[itemType]) { + errors.push( + `Line ${i + 1}: Unexpected output type '${itemType}'. Expected one of: ${Object.keys(expectedOutputTypes).join(", ")}` + ); + continue; + } + // Check for too many items of the same type + const typeCount = parsedItems.filter( + existing => existing.type === itemType + ).length; + const maxAllowed = getMaxAllowedForType(itemType, expectedOutputTypes); + if (typeCount >= maxAllowed) { + errors.push( + `Line ${i + 1}: Too many items of type '${itemType}'. Maximum allowed: ${maxAllowed}.` + ); + continue; + } + // Basic validation based on type + switch (itemType) { + case "create-issue": + if (!item.title || typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: create-issue requires a 'title' string field` + ); + continue; + } + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-issue requires a 'body' string field` + ); + continue; + } + // Sanitize text content + item.title = sanitizeContent(item.title); + item.body = sanitizeContent(item.body); + // Sanitize labels if present + if (item.labels && Array.isArray(item.labels)) { + item.labels = item.labels.map(label => + typeof label === "string" ? sanitizeContent(label) : label + ); + } + break; + case "add-issue-comment": + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: add-issue-comment requires a 'body' string field` + ); + continue; + } + // Sanitize text content + item.body = sanitizeContent(item.body); + break; + case "create-pull-request": + if (!item.title || typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request requires a 'title' string field` + ); + continue; + } + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request requires a 'body' string field` + ); + continue; + } + // Sanitize text content + item.title = sanitizeContent(item.title); + item.body = sanitizeContent(item.body); + // Sanitize branch name if present + if (item.branch && typeof item.branch === "string") { + item.branch = sanitizeContent(item.branch); + } + // Sanitize labels if present + if (item.labels && Array.isArray(item.labels)) { + item.labels = item.labels.map(label => + typeof label === "string" ? sanitizeContent(label) : label + ); + } + break; + case "add-issue-label": + if (!item.labels || !Array.isArray(item.labels)) { + errors.push( + `Line ${i + 1}: add-issue-label requires a 'labels' array field` + ); + continue; + } + if (item.labels.some(label => typeof label !== "string")) { + errors.push( + `Line ${i + 1}: add-issue-label labels array must contain only strings` + ); + continue; + } + // Sanitize label strings + item.labels = item.labels.map(label => sanitizeContent(label)); + break; + case "update-issue": + // Check that at least one updateable field is provided + const hasValidField = + item.status !== undefined || + item.title !== undefined || + item.body !== undefined; + if (!hasValidField) { + errors.push( + `Line ${i + 1}: update-issue requires at least one of: 'status', 'title', or 'body' fields` + ); + continue; + } + // Validate status if provided + if (item.status !== undefined) { + if ( + typeof item.status !== "string" || + (item.status !== "open" && item.status !== "closed") + ) { + errors.push( + `Line ${i + 1}: update-issue 'status' must be 'open' or 'closed'` + ); + continue; + } + } + // Validate title if provided + if (item.title !== undefined) { + if (typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: update-issue 'title' must be a string` + ); + continue; + } + item.title = sanitizeContent(item.title); + } + // Validate body if provided + if (item.body !== undefined) { + if (typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: update-issue 'body' must be a string` + ); + continue; + } + item.body = sanitizeContent(item.body); + } + // Validate issue_number if provided (for target "*") + if (item.issue_number !== undefined) { + if ( + typeof item.issue_number !== "number" && + typeof item.issue_number !== "string" + ) { + errors.push( + `Line ${i + 1}: update-issue 'issue_number' must be a number or string` + ); + continue; + } + } + break; + case "push-to-branch": + // Validate message if provided (optional) + if (item.message !== undefined) { + if (typeof item.message !== "string") { + errors.push( + `Line ${i + 1}: push-to-branch 'message' must be a string` + ); + continue; + } + item.message = sanitizeContent(item.message); + } + // Validate pull_request_number if provided (for target "*") + if (item.pull_request_number !== undefined) { + if ( + typeof item.pull_request_number !== "number" && + typeof item.pull_request_number !== "string" + ) { + errors.push( + `Line ${i + 1}: push-to-branch 'pull_request_number' must be a number or string` + ); + continue; + } + } + break; + case "create-pull-request-review-comment": + // Validate required path field + if (!item.path || typeof item.path !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment requires a 'path' string field` + ); + continue; + } + // Validate required line field + if ( + item.line === undefined || + (typeof item.line !== "number" && typeof item.line !== "string") + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment requires a 'line' number or string field` + ); + continue; + } + // Validate line is a positive integer + const lineNumber = + typeof item.line === "string" ? parseInt(item.line, 10) : item.line; + if ( + isNaN(lineNumber) || + lineNumber <= 0 || + !Number.isInteger(lineNumber) + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'line' must be a positive integer` + ); + continue; + } + // Validate required body field + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment requires a 'body' string field` + ); + continue; + } + // Sanitize required text content + item.body = sanitizeContent(item.body); + // Validate optional start_line field + if (item.start_line !== undefined) { + if ( + typeof item.start_line !== "number" && + typeof item.start_line !== "string" + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'start_line' must be a number or string` + ); + continue; + } + const startLineNumber = + typeof item.start_line === "string" + ? parseInt(item.start_line, 10) + : item.start_line; + if ( + isNaN(startLineNumber) || + startLineNumber <= 0 || + !Number.isInteger(startLineNumber) + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'start_line' must be a positive integer` + ); + continue; + } + if (startLineNumber > lineNumber) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'start_line' must be less than or equal to 'line'` + ); + continue; + } + } + // Validate optional side field + if (item.side !== undefined) { + if ( + typeof item.side !== "string" || + (item.side !== "LEFT" && item.side !== "RIGHT") + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'side' must be 'LEFT' or 'RIGHT'` + ); + continue; + } + } + break; + case "create-discussion": + if (!item.title || typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: create-discussion requires a 'title' string field` + ); + continue; + } + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-discussion requires a 'body' string field` + ); + continue; + } + // Sanitize text content + item.title = sanitizeContent(item.title); + item.body = sanitizeContent(item.body); + break; + default: + errors.push(`Line ${i + 1}: Unknown output type '${itemType}'`); + continue; + } + console.log(`Line ${i + 1}: Valid ${itemType} item`); + parsedItems.push(item); + } catch (error) { + errors.push(`Line ${i + 1}: Invalid JSON - ${error.message}`); + } + } + // Report validation results + if (errors.length > 0) { + console.log("Validation errors found:"); + errors.forEach(error => console.log(` - ${error}`)); + // For now, we'll continue with valid items but log the errors + // In the future, we might want to fail the workflow for invalid items + } + console.log(`Successfully parsed ${parsedItems.length} valid output items`); + // Set the parsed and validated items as output + const validatedOutput = { + items: parsedItems, + errors: errors, + }; + core.setOutput("output", JSON.stringify(validatedOutput)); + core.setOutput("raw_output", outputContent); + } + // Call the main function + await main(); + - name: Print agent output to step summary + env: + GITHUB_AW_SAFE_OUTPUTS: ${{ env.GITHUB_AW_SAFE_OUTPUTS }} + run: | + echo "## Agent Output (JSONL)" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo '``````json' >> $GITHUB_STEP_SUMMARY + cat ${{ env.GITHUB_AW_SAFE_OUTPUTS }} >> $GITHUB_STEP_SUMMARY + # Ensure there's a newline after the file content if it doesn't end with one + if [ -s ${{ env.GITHUB_AW_SAFE_OUTPUTS }} ] && [ "$(tail -c1 ${{ env.GITHUB_AW_SAFE_OUTPUTS }})" != "" ]; then + echo "" >> $GITHUB_STEP_SUMMARY + fi + echo '``````' >> $GITHUB_STEP_SUMMARY + - name: Upload agentic output file + if: always() && steps.collect_output.outputs.output != '' + uses: actions/upload-artifact@v4 + with: + name: aw_output.txt + path: ${{ env.GITHUB_AW_SAFE_OUTPUTS }} + if-no-files-found: warn + - name: Upload engine output files + uses: actions/upload-artifact@v4 + with: + name: agent_outputs + path: | + output.txt + if-no-files-found: ignore + - name: Clean up engine output files + run: | + rm -f output.txt + - name: Parse agent logs for step summary + if: always() + uses: actions/github-script@v7 + env: + AGENT_LOG_FILE: /tmp/security-analysis-with-claude.log + with: + script: | + function main() { + const fs = require("fs"); + try { + // Get the log file path from environment + const logFile = process.env.AGENT_LOG_FILE; + if (!logFile) { + console.log("No agent log file specified"); + return; + } + if (!fs.existsSync(logFile)) { + console.log(`Log file not found: ${logFile}`); + return; + } + const logContent = fs.readFileSync(logFile, "utf8"); + const markdown = parseClaudeLog(logContent); + // Append to GitHub step summary + core.summary.addRaw(markdown).write(); + } catch (error) { + console.error("Error parsing Claude log:", error.message); + core.setFailed(error.message); + } + } + function parseClaudeLog(logContent) { + try { + const logEntries = JSON.parse(logContent); + if (!Array.isArray(logEntries)) { + return "## Agent Log Summary\n\nLog format not recognized as Claude JSON array.\n"; + } + let markdown = "## šŸ¤– Commands and Tools\n\n"; + const toolUsePairs = new Map(); // Map tool_use_id to tool_result + const commandSummary = []; // For the succinct summary + // First pass: collect tool results by tool_use_id + for (const entry of logEntries) { + if (entry.type === "user" && entry.message?.content) { + for (const content of entry.message.content) { + if (content.type === "tool_result" && content.tool_use_id) { + toolUsePairs.set(content.tool_use_id, content); + } + } + } + } + // Collect all tool uses for summary + for (const entry of logEntries) { + if (entry.type === "assistant" && entry.message?.content) { + for (const content of entry.message.content) { + if (content.type === "tool_use") { + const toolName = content.name; + const input = content.input || {}; + // Skip internal tools - only show external commands and API calls + if ( + [ + "Read", + "Write", + "Edit", + "MultiEdit", + "LS", + "Grep", + "Glob", + "TodoWrite", + ].includes(toolName) + ) { + continue; // Skip internal file operations and searches + } + // Find the corresponding tool result to get status + const toolResult = toolUsePairs.get(content.id); + let statusIcon = "ā“"; + if (toolResult) { + statusIcon = toolResult.is_error === true ? "āŒ" : "āœ…"; + } + // Add to command summary (only external tools) + if (toolName === "Bash") { + const formattedCommand = formatBashCommand(input.command || ""); + commandSummary.push(`* ${statusIcon} \`${formattedCommand}\``); + } else if (toolName.startsWith("mcp__")) { + const mcpName = formatMcpName(toolName); + commandSummary.push(`* ${statusIcon} \`${mcpName}(...)\``); + } else { + // Handle other external tools (if any) + commandSummary.push(`* ${statusIcon} ${toolName}`); + } + } + } + } + } + // Add command summary + if (commandSummary.length > 0) { + for (const cmd of commandSummary) { + markdown += `${cmd}\n`; + } + } else { + markdown += "No commands or tools used.\n"; + } + // Add Information section from the last entry with result metadata + markdown += "\n## šŸ“Š Information\n\n"; + // Find the last entry with metadata + const lastEntry = logEntries[logEntries.length - 1]; + if ( + lastEntry && + (lastEntry.num_turns || + lastEntry.duration_ms || + lastEntry.total_cost_usd || + lastEntry.usage) + ) { + if (lastEntry.num_turns) { + markdown += `**Turns:** ${lastEntry.num_turns}\n\n`; + } + if (lastEntry.duration_ms) { + const durationSec = Math.round(lastEntry.duration_ms / 1000); + const minutes = Math.floor(durationSec / 60); + const seconds = durationSec % 60; + markdown += `**Duration:** ${minutes}m ${seconds}s\n\n`; + } + if (lastEntry.total_cost_usd) { + markdown += `**Total Cost:** $${lastEntry.total_cost_usd.toFixed(4)}\n\n`; + } + if (lastEntry.usage) { + const usage = lastEntry.usage; + if (usage.input_tokens || usage.output_tokens) { + markdown += `**Token Usage:**\n`; + if (usage.input_tokens) + markdown += `- Input: ${usage.input_tokens.toLocaleString()}\n`; + if (usage.cache_creation_input_tokens) + markdown += `- Cache Creation: ${usage.cache_creation_input_tokens.toLocaleString()}\n`; + if (usage.cache_read_input_tokens) + markdown += `- Cache Read: ${usage.cache_read_input_tokens.toLocaleString()}\n`; + if (usage.output_tokens) + markdown += `- Output: ${usage.output_tokens.toLocaleString()}\n`; + markdown += "\n"; + } + } + if ( + lastEntry.permission_denials && + lastEntry.permission_denials.length > 0 + ) { + markdown += `**Permission Denials:** ${lastEntry.permission_denials.length}\n\n`; + } + } + markdown += "\n## šŸ¤– Reasoning\n\n"; + // Second pass: process assistant messages in sequence + for (const entry of logEntries) { + if (entry.type === "assistant" && entry.message?.content) { + for (const content of entry.message.content) { + if (content.type === "text" && content.text) { + // Add reasoning text directly (no header) + const text = content.text.trim(); + if (text && text.length > 0) { + markdown += text + "\n\n"; + } + } else if (content.type === "tool_use") { + // Process tool use with its result + const toolResult = toolUsePairs.get(content.id); + const toolMarkdown = formatToolUse(content, toolResult); + if (toolMarkdown) { + markdown += toolMarkdown; + } + } + } + } + } + return markdown; + } catch (error) { + return `## Agent Log Summary\n\nError parsing Claude log: ${error.message}\n`; + } + } + function formatToolUse(toolUse, toolResult) { + const toolName = toolUse.name; + const input = toolUse.input || {}; + // Skip TodoWrite except the very last one (we'll handle this separately) + if (toolName === "TodoWrite") { + return ""; // Skip for now, would need global context to find the last one + } + // Helper function to determine status icon + function getStatusIcon() { + if (toolResult) { + return toolResult.is_error === true ? "āŒ" : "āœ…"; + } + return "ā“"; // Unknown by default + } + let markdown = ""; + const statusIcon = getStatusIcon(); + switch (toolName) { + case "Bash": + const command = input.command || ""; + const description = input.description || ""; + // Format the command to be single line + const formattedCommand = formatBashCommand(command); + if (description) { + markdown += `${description}:\n\n`; + } + markdown += `${statusIcon} \`${formattedCommand}\`\n\n`; + break; + case "Read": + const filePath = input.file_path || input.path || ""; + const relativePath = filePath.replace( + /^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, + "" + ); // Remove /home/runner/work/repo/repo/ prefix + markdown += `${statusIcon} Read \`${relativePath}\`\n\n`; + break; + case "Write": + case "Edit": + case "MultiEdit": + const writeFilePath = input.file_path || input.path || ""; + const writeRelativePath = writeFilePath.replace( + /^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, + "" + ); + markdown += `${statusIcon} Write \`${writeRelativePath}\`\n\n`; + break; + case "Grep": + case "Glob": + const query = input.query || input.pattern || ""; + markdown += `${statusIcon} Search for \`${truncateString(query, 80)}\`\n\n`; + break; + case "LS": + const lsPath = input.path || ""; + const lsRelativePath = lsPath.replace( + /^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, + "" + ); + markdown += `${statusIcon} LS: ${lsRelativePath || lsPath}\n\n`; + break; + default: + // Handle MCP calls and other tools + if (toolName.startsWith("mcp__")) { + const mcpName = formatMcpName(toolName); + const params = formatMcpParameters(input); + markdown += `${statusIcon} ${mcpName}(${params})\n\n`; + } else { + // Generic tool formatting - show the tool name and main parameters + const keys = Object.keys(input); + if (keys.length > 0) { + // Try to find the most important parameter + const mainParam = + keys.find(k => + ["query", "command", "path", "file_path", "content"].includes(k) + ) || keys[0]; + const value = String(input[mainParam] || ""); + if (value) { + markdown += `${statusIcon} ${toolName}: ${truncateString(value, 100)}\n\n`; + } else { + markdown += `${statusIcon} ${toolName}\n\n`; + } + } else { + markdown += `${statusIcon} ${toolName}\n\n`; + } + } + } + return markdown; + } + function formatMcpName(toolName) { + // Convert mcp__github__search_issues to github::search_issues + if (toolName.startsWith("mcp__")) { + const parts = toolName.split("__"); + if (parts.length >= 3) { + const provider = parts[1]; // github, etc. + const method = parts.slice(2).join("_"); // search_issues, etc. + return `${provider}::${method}`; + } + } + return toolName; + } + function formatMcpParameters(input) { + const keys = Object.keys(input); + if (keys.length === 0) return ""; + const paramStrs = []; + for (const key of keys.slice(0, 4)) { + // Show up to 4 parameters + const value = String(input[key] || ""); + paramStrs.push(`${key}: ${truncateString(value, 40)}`); + } + if (keys.length > 4) { + paramStrs.push("..."); + } + return paramStrs.join(", "); + } + function formatBashCommand(command) { + if (!command) return ""; + // Convert multi-line commands to single line by replacing newlines with spaces + // and collapsing multiple spaces + let formatted = command + .replace(/\n/g, " ") // Replace newlines with spaces + .replace(/\r/g, " ") // Replace carriage returns with spaces + .replace(/\t/g, " ") // Replace tabs with spaces + .replace(/\s+/g, " ") // Collapse multiple spaces into one + .trim(); // Remove leading/trailing whitespace + // Escape backticks to prevent markdown issues + formatted = formatted.replace(/`/g, "\\`"); + // Truncate if too long (keep reasonable length for summary) + const maxLength = 80; + if (formatted.length > maxLength) { + formatted = formatted.substring(0, maxLength) + "..."; + } + return formatted; + } + function truncateString(str, maxLength) { + if (!str) return ""; + if (str.length <= maxLength) return str; + return str.substring(0, maxLength) + "..."; + } + // Export for testing + if (typeof module !== "undefined" && module.exports) { + module.exports = { + parseClaudeLog, + formatToolUse, + formatBashCommand, + truncateString, + }; + } + main(); + - name: Upload agent logs + if: always() + uses: actions/upload-artifact@v4 + with: + name: security-analysis-with-claude.log + path: /tmp/security-analysis-with-claude.log + if-no-files-found: warn + + create_security_report: + needs: security-analysis-with-claude + runs-on: ubuntu-latest + permissions: + contents: read + security-events: write + actions: read + timeout-minutes: 10 + outputs: + artifact_uploaded: ${{ steps.create_security_report.outputs.artifact_uploaded }} + codeql_uploaded: ${{ steps.create_security_report.outputs.codeql_uploaded }} + findings_count: ${{ steps.create_security_report.outputs.findings_count }} + sarif_file: ${{ steps.create_security_report.outputs.sarif_file }} + steps: + - name: Create Security Report + id: create_security_report + uses: actions/github-script@v7 + env: + GITHUB_AW_AGENT_OUTPUT: ${{ needs.security-analysis-with-claude.outputs.output }} + GITHUB_AW_SECURITY_REPORT_MAX: 10 + GITHUB_AW_SECURITY_REPORT_DRIVER: Test Claude Security Report + GITHUB_AW_WORKFLOW_FILENAME: test-claude-create-security-report + with: + script: | + async function main() { + // Read the validated output content from environment variable + const outputContent = process.env.GITHUB_AW_AGENT_OUTPUT; + if (!outputContent) { + console.log("No GITHUB_AW_AGENT_OUTPUT environment variable found"); + return; + } + if (outputContent.trim() === "") { + console.log("Agent output content is empty"); + return; + } + console.log("Agent output content length:", outputContent.length); + // Parse the validated output JSON + let validatedOutput; + try { + validatedOutput = JSON.parse(outputContent); + } catch (error) { + console.log( + "Error parsing agent output JSON:", + error instanceof Error ? error.message : String(error) + ); + return; + } + if (!validatedOutput.items || !Array.isArray(validatedOutput.items)) { + console.log("No valid items found in agent output"); + return; + } + // Find all create-security-report items + const securityItems = validatedOutput.items.filter( + /** @param {any} item */ item => item.type === "create-security-report" + ); + if (securityItems.length === 0) { + console.log("No create-security-report items found in agent output"); + return; + } + console.log(`Found ${securityItems.length} create-security-report item(s)`); + // Get the max configuration from environment variable + const maxFindings = process.env.GITHUB_AW_SECURITY_REPORT_MAX + ? parseInt(process.env.GITHUB_AW_SECURITY_REPORT_MAX) + : 0; // 0 means unlimited + console.log( + `Max findings configuration: ${maxFindings === 0 ? "unlimited" : maxFindings}` + ); + // Get the driver configuration from environment variable + const driverName = + process.env.GITHUB_AW_SECURITY_REPORT_DRIVER || + "GitHub Agentic Workflows Security Scanner"; + console.log(`Driver name: ${driverName}`); + // Get the workflow filename for rule ID prefix + const workflowFilename = + process.env.GITHUB_AW_WORKFLOW_FILENAME || "workflow"; + console.log(`Workflow filename for rule ID prefix: ${workflowFilename}`); + const validFindings = []; + // Process each security item and validate the findings + for (let i = 0; i < securityItems.length; i++) { + const securityItem = securityItems[i]; + console.log( + `Processing create-security-report item ${i + 1}/${securityItems.length}:`, + { + file: securityItem.file, + line: securityItem.line, + severity: securityItem.severity, + messageLength: securityItem.message + ? securityItem.message.length + : "undefined", + ruleIdSuffix: securityItem.ruleIdSuffix || "not specified", + } + ); + // Validate required fields + if (!securityItem.file) { + console.log('Missing required field "file" in security report item'); + continue; + } + if ( + !securityItem.line || + (typeof securityItem.line !== "number" && + typeof securityItem.line !== "string") + ) { + console.log( + 'Missing or invalid required field "line" in security report item' + ); + continue; + } + if (!securityItem.severity || typeof securityItem.severity !== "string") { + console.log( + 'Missing or invalid required field "severity" in security report item' + ); + continue; + } + if (!securityItem.message || typeof securityItem.message !== "string") { + console.log( + 'Missing or invalid required field "message" in security report item' + ); + continue; + } + // Parse line number + const line = parseInt(securityItem.line, 10); + if (isNaN(line) || line <= 0) { + console.log(`Invalid line number: ${securityItem.line}`); + continue; + } + // Parse optional column number + let column = 1; // Default to column 1 + if (securityItem.column !== undefined) { + if ( + typeof securityItem.column !== "number" && + typeof securityItem.column !== "string" + ) { + console.log( + 'Invalid field "column" in security report item (must be number or string)' + ); + continue; + } + const parsedColumn = parseInt(securityItem.column, 10); + if (isNaN(parsedColumn) || parsedColumn <= 0) { + console.log(`Invalid column number: ${securityItem.column}`); + continue; + } + column = parsedColumn; + } + // Parse optional rule ID suffix + let ruleIdSuffix = null; + if (securityItem.ruleIdSuffix !== undefined) { + if (typeof securityItem.ruleIdSuffix !== "string") { + console.log( + 'Invalid field "ruleIdSuffix" in security report item (must be string)' + ); + continue; + } + // Validate that the suffix doesn't contain invalid characters + const trimmedSuffix = securityItem.ruleIdSuffix.trim(); + if (trimmedSuffix.length === 0) { + console.log( + 'Invalid field "ruleIdSuffix" in security report item (cannot be empty)' + ); + continue; + } + // Check for characters that would be problematic in rule IDs + if (!/^[a-zA-Z0-9_-]+$/.test(trimmedSuffix)) { + console.log( + `Invalid ruleIdSuffix "${trimmedSuffix}" (must contain only alphanumeric characters, hyphens, and underscores)` + ); + continue; + } + ruleIdSuffix = trimmedSuffix; + } + // Validate severity level and map to SARIF level + const severityMap = { + error: "error", + warning: "warning", + info: "note", + note: "note", + }; + const normalizedSeverity = securityItem.severity.toLowerCase(); + if (!severityMap[normalizedSeverity]) { + console.log( + `Invalid severity level: ${securityItem.severity} (must be error, warning, info, or note)` + ); + continue; + } + const sarifLevel = severityMap[normalizedSeverity]; + // Create a valid finding object + validFindings.push({ + file: securityItem.file.trim(), + line: line, + column: column, + severity: normalizedSeverity, + sarifLevel: sarifLevel, + message: securityItem.message.trim(), + ruleIdSuffix: ruleIdSuffix, + }); + // Check if we've reached the max limit + if (maxFindings > 0 && validFindings.length >= maxFindings) { + console.log(`Reached maximum findings limit: ${maxFindings}`); + break; + } + } + if (validFindings.length === 0) { + console.log("No valid security findings to report"); + return; + } + console.log(`Processing ${validFindings.length} valid security finding(s)`); + // Generate SARIF file + const sarifContent = { + $schema: + "https://raw.githubusercontent.com/oasis-tcs/sarif-spec/master/Schemata/sarif-schema-2.1.0.json", + version: "2.1.0", + runs: [ + { + tool: { + driver: { + name: driverName, + version: "1.0.0", + informationUri: "https://github.com/githubnext/gh-aw-copilots", + }, + }, + results: validFindings.map((finding, index) => ({ + ruleId: finding.ruleIdSuffix + ? `${workflowFilename}-${finding.ruleIdSuffix}` + : `${workflowFilename}-security-finding-${index + 1}`, + message: { text: finding.message }, + level: finding.sarifLevel, + locations: [ + { + physicalLocation: { + artifactLocation: { uri: finding.file }, + region: { + startLine: finding.line, + startColumn: finding.column, + }, + }, + }, + ], + })), + }, + ], + }; + // Write SARIF file to filesystem + const fs = require("fs"); + const path = require("path"); + const sarifFileName = "security-report.sarif"; + const sarifFilePath = path.join(process.cwd(), sarifFileName); + try { + fs.writeFileSync(sarifFilePath, JSON.stringify(sarifContent, null, 2)); + console.log(`āœ“ Created SARIF file: ${sarifFilePath}`); + console.log(`SARIF file size: ${fs.statSync(sarifFilePath).size} bytes`); + // Set outputs for the GitHub Action + core.setOutput("sarif_file", sarifFilePath); + core.setOutput("findings_count", validFindings.length); + core.setOutput("artifact_uploaded", "pending"); + core.setOutput("codeql_uploaded", "pending"); + // Write summary with findings + let summaryContent = "\n\n## Security Report\n"; + summaryContent += `Found **${validFindings.length}** security finding(s):\n\n`; + for (const finding of validFindings) { + const emoji = + finding.severity === "error" + ? "šŸ”“" + : finding.severity === "warning" + ? "🟔" + : "šŸ”µ"; + summaryContent += `${emoji} **${finding.severity.toUpperCase()}** in \`${finding.file}:${finding.line}\`: ${finding.message}\n`; + } + summaryContent += `\nšŸ“„ SARIF file created: \`${sarifFileName}\`\n`; + summaryContent += `šŸ” Findings will be uploaded to GitHub Code Scanning\n`; + await core.summary.addRaw(summaryContent).write(); + } catch (error) { + console.error( + `āœ— Failed to create SARIF file:`, + error instanceof Error ? error.message : String(error) + ); + throw error; + } + console.log( + `Successfully created security report with ${validFindings.length} finding(s)` + ); + return { + sarifFile: sarifFilePath, + findingsCount: validFindings.length, + findings: validFindings, + }; + } + await main(); + - name: Upload SARIF artifact + if: steps.create_security_report.outputs.sarif_file + uses: actions/upload-artifact@v4 + with: + name: security-report.sarif + path: ${{ steps.create_security_report.outputs.sarif_file }} + - name: Upload SARIF to GitHub Security + if: steps.create_security_report.outputs.sarif_file + uses: github/codeql-action/upload-sarif@v3 + with: + sarif_file: ${{ steps.create_security_report.outputs.sarif_file }} + diff --git a/.github/workflows/test-claude-create-security-report.md b/.github/workflows/test-claude-create-security-report.md new file mode 100644 index 0000000000..36a7484e87 --- /dev/null +++ b/.github/workflows/test-claude-create-security-report.md @@ -0,0 +1,33 @@ +--- +name: Test Claude Security Report +on: + workflow_dispatch: + reaction: eyes + +engine: + id: claude + +safe-outputs: + create-security-report: + max: 10 +--- + +# Security Analysis with Claude + +Analyze the repository codebase for security vulnerabilities and create security reports. + +For each security finding you identify, specify: +- The file path relative to the repository root +- The line number where the issue occurs +- Optional column number for precise location +- The severity level (error, warning, info, or note) +- A detailed description of the security issue +- Optionally, a custom rule ID suffix for meaningful SARIF rule identifiers + +Focus on common security issues like: +- Hardcoded secrets or credentials +- SQL injection vulnerabilities +- Cross-site scripting (XSS) issues +- Insecure file operations +- Authentication bypasses +- Input validation problems diff --git a/.github/workflows/test-codex-create-security-report.lock.yml b/.github/workflows/test-codex-create-security-report.lock.yml new file mode 100644 index 0000000000..652230fc34 --- /dev/null +++ b/.github/workflows/test-codex-create-security-report.lock.yml @@ -0,0 +1,1589 @@ +# This file was automatically generated by gh-aw. DO NOT EDIT. +# To update this file, edit the corresponding .md file and run: +# gh aw compile + +name: "Security Analysis with Codex" +"on": + workflow_dispatch: null + +permissions: {} + +concurrency: + group: "gh-aw-${{ github.workflow }}" + +run-name: "Security Analysis with Codex" + +jobs: + add_reaction: + if: github.event_name == 'issues' || github.event_name == 'issue_comment' || github.event_name == 'pull_request_comment' || github.event_name == 'pull_request_review_comment' || (github.event_name == 'pull_request') && (github.event.pull_request.head.repo.full_name == github.repository) + runs-on: ubuntu-latest + permissions: + issues: write + pull-requests: write + outputs: + reaction_id: ${{ steps.react.outputs.reaction-id }} + steps: + - name: Add eyes reaction to the triggering item + id: react + uses: actions/github-script@v7 + env: + GITHUB_AW_REACTION: eyes + with: + script: | + async function main() { + // Read inputs from environment variables + const reaction = process.env.GITHUB_AW_REACTION || "eyes"; + const alias = process.env.GITHUB_AW_ALIAS; // Only present for alias workflows + const runId = context.runId; + const runUrl = context.payload.repository + ? `${context.payload.repository.html_url}/actions/runs/${runId}` + : `https://github.com/${context.repo.owner}/${context.repo.repo}/actions/runs/${runId}`; + console.log("Reaction type:", reaction); + console.log("Alias name:", alias || "none"); + console.log("Run ID:", runId); + console.log("Run URL:", runUrl); + // Validate reaction type + const validReactions = [ + "+1", + "-1", + "laugh", + "confused", + "heart", + "hooray", + "rocket", + "eyes", + ]; + if (!validReactions.includes(reaction)) { + core.setFailed( + `Invalid reaction type: ${reaction}. Valid reactions are: ${validReactions.join(", ")}` + ); + return; + } + // Determine the API endpoint based on the event type + let reactionEndpoint; + let commentUpdateEndpoint; + let shouldEditComment = false; + const eventName = context.eventName; + const owner = context.repo.owner; + const repo = context.repo.repo; + try { + switch (eventName) { + case "issues": + const issueNumber = context.payload?.issue?.number; + if (!issueNumber) { + core.setFailed("Issue number not found in event payload"); + return; + } + reactionEndpoint = `/repos/${owner}/${repo}/issues/${issueNumber}/reactions`; + // Don't edit issue bodies for now - this might be more complex + shouldEditComment = false; + break; + case "issue_comment": + const commentId = context.payload?.comment?.id; + if (!commentId) { + core.setFailed("Comment ID not found in event payload"); + return; + } + reactionEndpoint = `/repos/${owner}/${repo}/issues/comments/${commentId}/reactions`; + commentUpdateEndpoint = `/repos/${owner}/${repo}/issues/comments/${commentId}`; + // Only edit comments for alias workflows + shouldEditComment = alias ? true : false; + break; + case "pull_request": + const prNumber = context.payload?.pull_request?.number; + if (!prNumber) { + core.setFailed("Pull request number not found in event payload"); + return; + } + // PRs are "issues" for the reactions endpoint + reactionEndpoint = `/repos/${owner}/${repo}/issues/${prNumber}/reactions`; + // Don't edit PR bodies for now - this might be more complex + shouldEditComment = false; + break; + case "pull_request_review_comment": + const reviewCommentId = context.payload?.comment?.id; + if (!reviewCommentId) { + core.setFailed("Review comment ID not found in event payload"); + return; + } + reactionEndpoint = `/repos/${owner}/${repo}/pulls/comments/${reviewCommentId}/reactions`; + commentUpdateEndpoint = `/repos/${owner}/${repo}/pulls/comments/${reviewCommentId}`; + // Only edit comments for alias workflows + shouldEditComment = alias ? true : false; + break; + default: + core.setFailed(`Unsupported event type: ${eventName}`); + return; + } + console.log("Reaction API endpoint:", reactionEndpoint); + // Add reaction first + await addReaction(reactionEndpoint, reaction); + // Then edit comment if applicable and if it's a comment event + if (shouldEditComment && commentUpdateEndpoint) { + console.log("Comment update endpoint:", commentUpdateEndpoint); + await editCommentWithWorkflowLink(commentUpdateEndpoint, runUrl); + } else { + if (!alias && commentUpdateEndpoint) { + console.log( + "Skipping comment edit - only available for alias workflows" + ); + } else { + console.log("Skipping comment edit for event type:", eventName); + } + } + } catch (error) { + const errorMessage = error instanceof Error ? error.message : String(error); + console.error("Failed to process reaction and comment edit:", errorMessage); + core.setFailed( + `Failed to process reaction and comment edit: ${errorMessage}` + ); + } + } + /** + * Add a reaction to a GitHub issue, PR, or comment + * @param {string} endpoint - The GitHub API endpoint to add the reaction to + * @param {string} reaction - The reaction type to add + */ + async function addReaction(endpoint, reaction) { + const response = await github.request("POST " + endpoint, { + content: reaction, + headers: { + Accept: "application/vnd.github+json", + }, + }); + const reactionId = response.data?.id; + if (reactionId) { + console.log(`Successfully added reaction: ${reaction} (id: ${reactionId})`); + core.setOutput("reaction-id", reactionId.toString()); + } else { + console.log(`Successfully added reaction: ${reaction}`); + core.setOutput("reaction-id", ""); + } + } + /** + * Edit a comment to add a workflow run link + * @param {string} endpoint - The GitHub API endpoint to update the comment + * @param {string} runUrl - The URL of the workflow run + */ + async function editCommentWithWorkflowLink(endpoint, runUrl) { + try { + // First, get the current comment content + const getResponse = await github.request("GET " + endpoint, { + headers: { + Accept: "application/vnd.github+json", + }, + }); + const originalBody = getResponse.data.body || ""; + const workflowLinkText = `\n\n---\n*šŸ¤– [Workflow run](${runUrl}) triggered by this comment*`; + // Check if we've already added a workflow link to avoid duplicates + if (originalBody.includes("*šŸ¤– [Workflow run](")) { + console.log( + "Comment already contains a workflow run link, skipping edit" + ); + return; + } + const updatedBody = originalBody + workflowLinkText; + // Update the comment + const updateResponse = await github.request("PATCH " + endpoint, { + body: updatedBody, + headers: { + Accept: "application/vnd.github+json", + }, + }); + console.log(`Successfully updated comment with workflow link`); + console.log(`Comment ID: ${updateResponse.data.id}`); + } catch (error) { + // Don't fail the entire job if comment editing fails - just log it + const errorMessage = error instanceof Error ? error.message : String(error); + console.warn("Failed to edit comment with workflow link:", errorMessage); + console.warn( + "This is not critical - the reaction was still added successfully" + ); + } + } + await main(); + + security-analysis-with-codex: + runs-on: ubuntu-latest + permissions: read-all + outputs: + output: ${{ steps.collect_output.outputs.output }} + steps: + - name: Checkout repository + uses: actions/checkout@v5 + - name: Setup Node.js + uses: actions/setup-node@v4 + with: + node-version: '24' + - name: Install Codex + run: npm install -g @openai/codex + - name: Setup agent output + id: setup_agent_output + uses: actions/github-script@v7 + with: + script: | + function main() { + const fs = require("fs"); + const crypto = require("crypto"); + // Generate a random filename for the output file + const randomId = crypto.randomBytes(8).toString("hex"); + const outputFile = `/tmp/aw_output_${randomId}.txt`; + // Ensure the /tmp directory exists and create empty output file + fs.mkdirSync("/tmp", { recursive: true }); + fs.writeFileSync(outputFile, "", { mode: 0o644 }); + // Verify the file was created and is writable + if (!fs.existsSync(outputFile)) { + throw new Error(`Failed to create output file: ${outputFile}`); + } + // Set the environment variable for subsequent steps + core.exportVariable("GITHUB_AW_SAFE_OUTPUTS", outputFile); + console.log("Created agentic output file:", outputFile); + // Also set as step output for reference + core.setOutput("output_file", outputFile); + } + main(); + - name: Setup MCPs + run: | + mkdir -p /tmp/mcp-config + cat > /tmp/mcp-config/config.toml << EOF + [history] + persistence = "none" + + [mcp_servers.github] + command = "docker" + args = [ + "run", + "-i", + "--rm", + "-e", + "GITHUB_PERSONAL_ACCESS_TOKEN", + "ghcr.io/github/github-mcp-server:sha-09deac4" + ] + env = { "GITHUB_PERSONAL_ACCESS_TOKEN" = "${{ secrets.GITHUB_TOKEN }}" } + EOF + - name: Create prompt + env: + GITHUB_AW_SAFE_OUTPUTS: ${{ env.GITHUB_AW_SAFE_OUTPUTS }} + run: | + mkdir -p /tmp/aw-prompts + cat > /tmp/aw-prompts/prompt.txt << 'EOF' + # Security Analysis with Codex + + Analyze the repository codebase for security vulnerabilities and create security reports. + + For each security finding you identify, specify: + - The file path relative to the repository root + - The line number where the issue occurs + - Optional column number for precise location + - The severity level (error, warning, info, or note) + - A detailed description of the security issue + - Optionally, a custom rule ID suffix for meaningful SARIF rule identifiers + + Focus on common security issues like: + - Hardcoded secrets or credentials + - SQL injection vulnerabilities + - Cross-site scripting (XSS) issues + - Insecure file operations + - Authentication bypasses + - Input validation problems + + + --- + + ## Reporting Missing Tools or Functionality + + **IMPORTANT**: To do the actions mentioned in the header of this section, do NOT attempt to use MCP tools, do NOT attempt to use `gh`, do NOT attempt to use the GitHub API. You don't have write access to the GitHub repo. Instead write JSON objects to the file "${{ env.GITHUB_AW_SAFE_OUTPUTS }}". Each line should contain a single JSON object (JSONL format). You can write them one by one as you do them. + + **Format**: Write one JSON object per line. Each object must have a `type` field specifying the action type. + + ### Available Output Types: + + **Example JSONL file content:** + ``` + # No safe outputs configured for this workflow + ``` + + **Important Notes:** + - Do NOT attempt to use MCP tools, `gh`, or the GitHub API for these actions + - Each JSON object must be on its own line + - Only include output types that are configured for this workflow + - The content of this file will be automatically processed and executed + + EOF + - name: Print prompt to step summary + run: | + echo "## Generated Prompt" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo '``````markdown' >> $GITHUB_STEP_SUMMARY + cat /tmp/aw-prompts/prompt.txt >> $GITHUB_STEP_SUMMARY + echo '``````' >> $GITHUB_STEP_SUMMARY + - name: Generate agentic run info + uses: actions/github-script@v7 + with: + script: | + const fs = require('fs'); + + const awInfo = { + engine_id: "codex", + engine_name: "Codex", + model: "", + version: "", + workflow_name: "Security Analysis with Codex", + experimental: true, + supports_tools_whitelist: true, + supports_http_transport: false, + run_id: context.runId, + run_number: context.runNumber, + run_attempt: process.env.GITHUB_RUN_ATTEMPT, + repository: context.repo.owner + '/' + context.repo.repo, + ref: context.ref, + sha: context.sha, + actor: context.actor, + event_name: context.eventName, + created_at: new Date().toISOString() + }; + + // Write to /tmp directory to avoid inclusion in PR + const tmpPath = '/tmp/aw_info.json'; + fs.writeFileSync(tmpPath, JSON.stringify(awInfo, null, 2)); + console.log('Generated aw_info.json at:', tmpPath); + console.log(JSON.stringify(awInfo, null, 2)); + - name: Upload agentic run info + if: always() + uses: actions/upload-artifact@v4 + with: + name: aw_info.json + path: /tmp/aw_info.json + if-no-files-found: warn + - name: Run Codex + run: | + set -o pipefail + INSTRUCTION=$(cat /tmp/aw-prompts/prompt.txt) + export CODEX_HOME=/tmp/mcp-config + + # Create log directory outside git repo + mkdir -p /tmp/aw-logs + + # Run codex with log capture - pipefail ensures codex exit code is preserved + codex exec \ + -c model=o4-mini \ + --full-auto "$INSTRUCTION" 2>&1 | tee /tmp/security-analysis-with-codex.log + env: + GITHUB_AW_SAFE_OUTPUTS: ${{ env.GITHUB_AW_SAFE_OUTPUTS }} + GITHUB_STEP_SUMMARY: ${{ env.GITHUB_STEP_SUMMARY }} + OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} + - name: Check if workflow-complete.txt exists, if so upload it + id: check_file + run: | + if [ -f workflow-complete.txt ]; then + echo "File exists" + echo "upload=true" >> $GITHUB_OUTPUT + else + echo "File does not exist" + echo "upload=false" >> $GITHUB_OUTPUT + fi + - name: Upload workflow-complete.txt + if: steps.check_file.outputs.upload == 'true' + uses: actions/upload-artifact@v4 + with: + name: workflow-complete + path: workflow-complete.txt + - name: Collect agent output + id: collect_output + uses: actions/github-script@v7 + env: + GITHUB_AW_SAFE_OUTPUTS: ${{ env.GITHUB_AW_SAFE_OUTPUTS }} + GITHUB_AW_SAFE_OUTPUTS_CONFIG: "{}" + with: + script: | + async function main() { + const fs = require("fs"); + /** + * Sanitizes content for safe output in GitHub Actions + * @param {string} content - The content to sanitize + * @returns {string} The sanitized content + */ + function sanitizeContent(content) { + if (!content || typeof content !== "string") { + return ""; + } + // Read allowed domains from environment variable + const allowedDomainsEnv = process.env.GITHUB_AW_ALLOWED_DOMAINS; + const defaultAllowedDomains = [ + "github.com", + "github.io", + "githubusercontent.com", + "githubassets.com", + "github.dev", + "codespaces.new", + ]; + const allowedDomains = allowedDomainsEnv + ? allowedDomainsEnv + .split(",") + .map(d => d.trim()) + .filter(d => d) + : defaultAllowedDomains; + let sanitized = content; + // Neutralize @mentions to prevent unintended notifications + sanitized = neutralizeMentions(sanitized); + // Remove control characters (except newlines and tabs) + sanitized = sanitized.replace(/[\x00-\x08\x0B\x0C\x0E-\x1F\x7F]/g, ""); + // XML character escaping + sanitized = sanitized + .replace(/&/g, "&") // Must be first to avoid double-escaping + .replace(//g, ">") + .replace(/"/g, """) + .replace(/'/g, "'"); + // URI filtering - replace non-https protocols with "(redacted)" + sanitized = sanitizeUrlProtocols(sanitized); + // Domain filtering for HTTPS URIs + sanitized = sanitizeUrlDomains(sanitized); + // Limit total length to prevent DoS (0.5MB max) + const maxLength = 524288; + if (sanitized.length > maxLength) { + sanitized = + sanitized.substring(0, maxLength) + + "\n[Content truncated due to length]"; + } + // Limit number of lines to prevent log flooding (65k max) + const lines = sanitized.split("\n"); + const maxLines = 65000; + if (lines.length > maxLines) { + sanitized = + lines.slice(0, maxLines).join("\n") + + "\n[Content truncated due to line count]"; + } + // Remove ANSI escape sequences + sanitized = sanitized.replace(/\x1b\[[0-9;]*[mGKH]/g, ""); + // Neutralize common bot trigger phrases + sanitized = neutralizeBotTriggers(sanitized); + // Trim excessive whitespace + return sanitized.trim(); + /** + * Remove unknown domains + * @param {string} s - The string to process + * @returns {string} The string with unknown domains redacted + */ + function sanitizeUrlDomains(s) { + return s.replace( + /\bhttps:\/\/([^\/\s\])}'"<>&\x00-\x1f]+)/gi, + (match, domain) => { + // Extract the hostname part (before first slash, colon, or other delimiter) + const hostname = domain.split(/[\/:\?#]/)[0].toLowerCase(); + // Check if this domain or any parent domain is in the allowlist + const isAllowed = allowedDomains.some(allowedDomain => { + const normalizedAllowed = allowedDomain.toLowerCase(); + return ( + hostname === normalizedAllowed || + hostname.endsWith("." + normalizedAllowed) + ); + }); + return isAllowed ? match : "(redacted)"; + } + ); + } + /** + * Remove unknown protocols except https + * @param {string} s - The string to process + * @returns {string} The string with non-https protocols redacted + */ + function sanitizeUrlProtocols(s) { + // Match both protocol:// and protocol: patterns + return s.replace( + /\b(\w+):(?:\/\/)?[^\s\])}'"<>&\x00-\x1f]+/gi, + (match, protocol) => { + // Allow https (case insensitive), redact everything else + return protocol.toLowerCase() === "https" ? match : "(redacted)"; + } + ); + } + /** + * Neutralizes @mentions by wrapping them in backticks + * @param {string} s - The string to process + * @returns {string} The string with neutralized mentions + */ + function neutralizeMentions(s) { + // Replace @name or @org/team outside code with `@name` + return s.replace( + /(^|[^\w`])@([A-Za-z0-9](?:[A-Za-z0-9-]{0,37}[A-Za-z0-9])?(?:\/[A-Za-z0-9._-]+)?)/g, + (_m, p1, p2) => `${p1}\`@${p2}\`` + ); + } + /** + * Neutralizes bot trigger phrases by wrapping them in backticks + * @param {string} s - The string to process + * @returns {string} The string with neutralized bot triggers + */ + function neutralizeBotTriggers(s) { + // Neutralize common bot trigger phrases like "fixes #123", "closes #asdfs", etc. + return s.replace( + /\b(fixes?|closes?|resolves?|fix|close|resolve)\s+#(\w+)/gi, + (match, action, ref) => `\`${action} #${ref}\`` + ); + } + } + /** + * Gets the maximum allowed count for a given output type + * @param {string} itemType - The output item type + * @param {Object} config - The safe-outputs configuration + * @returns {number} The maximum allowed count + */ + function getMaxAllowedForType(itemType, config) { + // Check if max is explicitly specified in config + if ( + config && + config[itemType] && + typeof config[itemType] === "object" && + config[itemType].max + ) { + return config[itemType].max; + } + // Use default limits for plural-supported types + switch (itemType) { + case "create-issue": + return 1; // Only one issue allowed + case "add-issue-comment": + return 1; // Only one comment allowed + case "create-pull-request": + return 1; // Only one pull request allowed + case "create-pull-request-review-comment": + return 10; // Default to 10 review comments allowed + case "add-issue-label": + return 5; // Only one labels operation allowed + case "update-issue": + return 1; // Only one issue update allowed + case "push-to-branch": + return 1; // Only one push to branch allowed + case "create-discussion": + return 1; // Only one discussion allowed + default: + return 1; // Default to single item for unknown types + } + } + /** + * Attempts to repair common JSON syntax issues in LLM-generated content + * @param {string} jsonStr - The potentially malformed JSON string + * @returns {string} The repaired JSON string + */ + function repairJson(jsonStr) { + let repaired = jsonStr.trim(); + // Fix single quotes to double quotes (must be done first) + repaired = repaired.replace(/'/g, '"'); + // Fix missing quotes around object keys + repaired = repaired.replace( + /([{,]\s*)([a-zA-Z_$][a-zA-Z0-9_$]*)\s*:/g, + '$1"$2":' + ); + // Fix newlines and tabs inside strings by escaping them + repaired = repaired.replace(/"([^"\\]*)"/g, (match, content) => { + if ( + content.includes("\n") || + content.includes("\r") || + content.includes("\t") + ) { + const escaped = content + .replace(/\\/g, "\\\\") + .replace(/\n/g, "\\n") + .replace(/\r/g, "\\r") + .replace(/\t/g, "\\t"); + return `"${escaped}"`; + } + return match; + }); + // Fix unescaped quotes inside string values + repaired = repaired.replace( + /"([^"]*)"([^":,}\]]*)"([^"]*)"(\s*[,:}\]])/g, + (match, p1, p2, p3, p4) => `"${p1}\\"${p2}\\"${p3}"${p4}` + ); + // Fix wrong bracket/brace types - arrays should end with ] not } + repaired = repaired.replace( + /(\[\s*(?:"[^"]*"(?:\s*,\s*"[^"]*")*\s*),?)\s*}/g, + "$1]" + ); + // Fix missing closing braces/brackets + const openBraces = (repaired.match(/\{/g) || []).length; + const closeBraces = (repaired.match(/\}/g) || []).length; + if (openBraces > closeBraces) { + repaired += "}".repeat(openBraces - closeBraces); + } else if (closeBraces > openBraces) { + repaired = "{".repeat(closeBraces - openBraces) + repaired; + } + // Fix missing closing brackets for arrays + const openBrackets = (repaired.match(/\[/g) || []).length; + const closeBrackets = (repaired.match(/\]/g) || []).length; + if (openBrackets > closeBrackets) { + repaired += "]".repeat(openBrackets - closeBrackets); + } else if (closeBrackets > openBrackets) { + repaired = "[".repeat(closeBrackets - openBrackets) + repaired; + } + // Fix trailing commas in objects and arrays (AFTER fixing brackets/braces) + repaired = repaired.replace(/,(\s*[}\]])/g, "$1"); + return repaired; + } + /** + * Attempts to parse JSON with repair fallback + * @param {string} jsonStr - The JSON string to parse + * @returns {Object|undefined} The parsed JSON object, or undefined if parsing fails + */ + function parseJsonWithRepair(jsonStr) { + try { + // First, try normal JSON.parse + return JSON.parse(jsonStr); + } catch (originalError) { + try { + // If that fails, try repairing and parsing again + const repairedJson = repairJson(jsonStr); + return JSON.parse(repairedJson); + } catch (repairError) { + // If repair also fails, print error to console and return undefined + console.log( + `JSON parsing failed. Original: ${originalError.message}. After repair: ${repairError.message}` + ); + return undefined; + } + } + } + const outputFile = process.env.GITHUB_AW_SAFE_OUTPUTS; + const safeOutputsConfig = process.env.GITHUB_AW_SAFE_OUTPUTS_CONFIG; + if (!outputFile) { + console.log("GITHUB_AW_SAFE_OUTPUTS not set, no output to collect"); + core.setOutput("output", ""); + return; + } + if (!fs.existsSync(outputFile)) { + console.log("Output file does not exist:", outputFile); + core.setOutput("output", ""); + return; + } + const outputContent = fs.readFileSync(outputFile, "utf8"); + if (outputContent.trim() === "") { + console.log("Output file is empty"); + core.setOutput("output", ""); + return; + } + console.log("Raw output content length:", outputContent.length); + // Parse the safe-outputs configuration + let expectedOutputTypes = {}; + if (safeOutputsConfig) { + try { + expectedOutputTypes = JSON.parse(safeOutputsConfig); + console.log("Expected output types:", Object.keys(expectedOutputTypes)); + } catch (error) { + console.log( + "Warning: Could not parse safe-outputs config:", + error.message + ); + } + } + // Parse JSONL content + const lines = outputContent.trim().split("\n"); + const parsedItems = []; + const errors = []; + for (let i = 0; i < lines.length; i++) { + const line = lines[i].trim(); + if (line === "") continue; // Skip empty lines + try { + const item = parseJsonWithRepair(line); + // If item is undefined (failed to parse), add error and process next line + if (item === undefined) { + errors.push(`Line ${i + 1}: Invalid JSON - JSON parsing failed`); + continue; + } + // Validate that the item has a 'type' field + if (!item.type) { + errors.push(`Line ${i + 1}: Missing required 'type' field`); + continue; + } + // Validate against expected output types + const itemType = item.type; + if (!expectedOutputTypes[itemType]) { + errors.push( + `Line ${i + 1}: Unexpected output type '${itemType}'. Expected one of: ${Object.keys(expectedOutputTypes).join(", ")}` + ); + continue; + } + // Check for too many items of the same type + const typeCount = parsedItems.filter( + existing => existing.type === itemType + ).length; + const maxAllowed = getMaxAllowedForType(itemType, expectedOutputTypes); + if (typeCount >= maxAllowed) { + errors.push( + `Line ${i + 1}: Too many items of type '${itemType}'. Maximum allowed: ${maxAllowed}.` + ); + continue; + } + // Basic validation based on type + switch (itemType) { + case "create-issue": + if (!item.title || typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: create-issue requires a 'title' string field` + ); + continue; + } + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-issue requires a 'body' string field` + ); + continue; + } + // Sanitize text content + item.title = sanitizeContent(item.title); + item.body = sanitizeContent(item.body); + // Sanitize labels if present + if (item.labels && Array.isArray(item.labels)) { + item.labels = item.labels.map(label => + typeof label === "string" ? sanitizeContent(label) : label + ); + } + break; + case "add-issue-comment": + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: add-issue-comment requires a 'body' string field` + ); + continue; + } + // Sanitize text content + item.body = sanitizeContent(item.body); + break; + case "create-pull-request": + if (!item.title || typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request requires a 'title' string field` + ); + continue; + } + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request requires a 'body' string field` + ); + continue; + } + // Sanitize text content + item.title = sanitizeContent(item.title); + item.body = sanitizeContent(item.body); + // Sanitize branch name if present + if (item.branch && typeof item.branch === "string") { + item.branch = sanitizeContent(item.branch); + } + // Sanitize labels if present + if (item.labels && Array.isArray(item.labels)) { + item.labels = item.labels.map(label => + typeof label === "string" ? sanitizeContent(label) : label + ); + } + break; + case "add-issue-label": + if (!item.labels || !Array.isArray(item.labels)) { + errors.push( + `Line ${i + 1}: add-issue-label requires a 'labels' array field` + ); + continue; + } + if (item.labels.some(label => typeof label !== "string")) { + errors.push( + `Line ${i + 1}: add-issue-label labels array must contain only strings` + ); + continue; + } + // Sanitize label strings + item.labels = item.labels.map(label => sanitizeContent(label)); + break; + case "update-issue": + // Check that at least one updateable field is provided + const hasValidField = + item.status !== undefined || + item.title !== undefined || + item.body !== undefined; + if (!hasValidField) { + errors.push( + `Line ${i + 1}: update-issue requires at least one of: 'status', 'title', or 'body' fields` + ); + continue; + } + // Validate status if provided + if (item.status !== undefined) { + if ( + typeof item.status !== "string" || + (item.status !== "open" && item.status !== "closed") + ) { + errors.push( + `Line ${i + 1}: update-issue 'status' must be 'open' or 'closed'` + ); + continue; + } + } + // Validate title if provided + if (item.title !== undefined) { + if (typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: update-issue 'title' must be a string` + ); + continue; + } + item.title = sanitizeContent(item.title); + } + // Validate body if provided + if (item.body !== undefined) { + if (typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: update-issue 'body' must be a string` + ); + continue; + } + item.body = sanitizeContent(item.body); + } + // Validate issue_number if provided (for target "*") + if (item.issue_number !== undefined) { + if ( + typeof item.issue_number !== "number" && + typeof item.issue_number !== "string" + ) { + errors.push( + `Line ${i + 1}: update-issue 'issue_number' must be a number or string` + ); + continue; + } + } + break; + case "push-to-branch": + // Validate message if provided (optional) + if (item.message !== undefined) { + if (typeof item.message !== "string") { + errors.push( + `Line ${i + 1}: push-to-branch 'message' must be a string` + ); + continue; + } + item.message = sanitizeContent(item.message); + } + // Validate pull_request_number if provided (for target "*") + if (item.pull_request_number !== undefined) { + if ( + typeof item.pull_request_number !== "number" && + typeof item.pull_request_number !== "string" + ) { + errors.push( + `Line ${i + 1}: push-to-branch 'pull_request_number' must be a number or string` + ); + continue; + } + } + break; + case "create-pull-request-review-comment": + // Validate required path field + if (!item.path || typeof item.path !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment requires a 'path' string field` + ); + continue; + } + // Validate required line field + if ( + item.line === undefined || + (typeof item.line !== "number" && typeof item.line !== "string") + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment requires a 'line' number or string field` + ); + continue; + } + // Validate line is a positive integer + const lineNumber = + typeof item.line === "string" ? parseInt(item.line, 10) : item.line; + if ( + isNaN(lineNumber) || + lineNumber <= 0 || + !Number.isInteger(lineNumber) + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'line' must be a positive integer` + ); + continue; + } + // Validate required body field + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment requires a 'body' string field` + ); + continue; + } + // Sanitize required text content + item.body = sanitizeContent(item.body); + // Validate optional start_line field + if (item.start_line !== undefined) { + if ( + typeof item.start_line !== "number" && + typeof item.start_line !== "string" + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'start_line' must be a number or string` + ); + continue; + } + const startLineNumber = + typeof item.start_line === "string" + ? parseInt(item.start_line, 10) + : item.start_line; + if ( + isNaN(startLineNumber) || + startLineNumber <= 0 || + !Number.isInteger(startLineNumber) + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'start_line' must be a positive integer` + ); + continue; + } + if (startLineNumber > lineNumber) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'start_line' must be less than or equal to 'line'` + ); + continue; + } + } + // Validate optional side field + if (item.side !== undefined) { + if ( + typeof item.side !== "string" || + (item.side !== "LEFT" && item.side !== "RIGHT") + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'side' must be 'LEFT' or 'RIGHT'` + ); + continue; + } + } + break; + case "create-discussion": + if (!item.title || typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: create-discussion requires a 'title' string field` + ); + continue; + } + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-discussion requires a 'body' string field` + ); + continue; + } + // Sanitize text content + item.title = sanitizeContent(item.title); + item.body = sanitizeContent(item.body); + break; + default: + errors.push(`Line ${i + 1}: Unknown output type '${itemType}'`); + continue; + } + console.log(`Line ${i + 1}: Valid ${itemType} item`); + parsedItems.push(item); + } catch (error) { + errors.push(`Line ${i + 1}: Invalid JSON - ${error.message}`); + } + } + // Report validation results + if (errors.length > 0) { + console.log("Validation errors found:"); + errors.forEach(error => console.log(` - ${error}`)); + // For now, we'll continue with valid items but log the errors + // In the future, we might want to fail the workflow for invalid items + } + console.log(`Successfully parsed ${parsedItems.length} valid output items`); + // Set the parsed and validated items as output + const validatedOutput = { + items: parsedItems, + errors: errors, + }; + core.setOutput("output", JSON.stringify(validatedOutput)); + core.setOutput("raw_output", outputContent); + } + // Call the main function + await main(); + - name: Print agent output to step summary + env: + GITHUB_AW_SAFE_OUTPUTS: ${{ env.GITHUB_AW_SAFE_OUTPUTS }} + run: | + echo "## Agent Output (JSONL)" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo '``````json' >> $GITHUB_STEP_SUMMARY + cat ${{ env.GITHUB_AW_SAFE_OUTPUTS }} >> $GITHUB_STEP_SUMMARY + # Ensure there's a newline after the file content if it doesn't end with one + if [ -s ${{ env.GITHUB_AW_SAFE_OUTPUTS }} ] && [ "$(tail -c1 ${{ env.GITHUB_AW_SAFE_OUTPUTS }})" != "" ]; then + echo "" >> $GITHUB_STEP_SUMMARY + fi + echo '``````' >> $GITHUB_STEP_SUMMARY + - name: Upload agentic output file + if: always() && steps.collect_output.outputs.output != '' + uses: actions/upload-artifact@v4 + with: + name: aw_output.txt + path: ${{ env.GITHUB_AW_SAFE_OUTPUTS }} + if-no-files-found: warn + - name: Parse agent logs for step summary + if: always() + uses: actions/github-script@v7 + env: + AGENT_LOG_FILE: /tmp/security-analysis-with-codex.log + with: + script: | + function main() { + const fs = require("fs"); + try { + const logFile = process.env.AGENT_LOG_FILE; + if (!logFile) { + console.log("No agent log file specified"); + return; + } + if (!fs.existsSync(logFile)) { + console.log(`Log file not found: ${logFile}`); + return; + } + const content = fs.readFileSync(logFile, "utf8"); + const parsedLog = parseCodexLog(content); + if (parsedLog) { + core.summary.addRaw(parsedLog).write(); + console.log("Codex log parsed successfully"); + } else { + console.log("Failed to parse Codex log"); + } + } catch (error) { + core.setFailed(error.message); + } + } + function parseCodexLog(logContent) { + try { + const lines = logContent.split("\n"); + let markdown = "## šŸ¤– Commands and Tools\n\n"; + const commandSummary = []; + // First pass: collect commands for summary + for (let i = 0; i < lines.length; i++) { + const line = lines[i]; + // Detect tool usage and exec commands + if (line.includes("] tool ") && line.includes("(")) { + // Extract tool name + const toolMatch = line.match(/\] tool ([^(]+)\(/); + if (toolMatch) { + const toolName = toolMatch[1]; + // Look ahead to find the result status + let statusIcon = "ā“"; // Unknown by default + for (let j = i + 1; j < Math.min(i + 5, lines.length); j++) { + const nextLine = lines[j]; + if (nextLine.includes("success in")) { + statusIcon = "āœ…"; + break; + } else if ( + nextLine.includes("failure in") || + nextLine.includes("error in") || + nextLine.includes("failed in") + ) { + statusIcon = "āŒ"; + break; + } + } + if (toolName.includes(".")) { + // Format as provider::method + const parts = toolName.split("."); + const provider = parts[0]; + const method = parts.slice(1).join("_"); + commandSummary.push( + `* ${statusIcon} \`${provider}::${method}(...)\`` + ); + } else { + commandSummary.push(`* ${statusIcon} \`${toolName}(...)\``); + } + } + } else if (line.includes("] exec ")) { + // Extract exec command + const execMatch = line.match(/exec (.+?) in/); + if (execMatch) { + const formattedCommand = formatBashCommand(execMatch[1]); + // Look ahead to find the result status + let statusIcon = "ā“"; // Unknown by default + for (let j = i + 1; j < Math.min(i + 5, lines.length); j++) { + const nextLine = lines[j]; + if (nextLine.includes("succeeded in")) { + statusIcon = "āœ…"; + break; + } else if ( + nextLine.includes("failed in") || + nextLine.includes("error") + ) { + statusIcon = "āŒ"; + break; + } + } + commandSummary.push(`* ${statusIcon} \`${formattedCommand}\``); + } + } + } + // Add command summary + if (commandSummary.length > 0) { + for (const cmd of commandSummary) { + markdown += `${cmd}\n`; + } + } else { + markdown += "No commands or tools used.\n"; + } + // Add Information section + markdown += "\n## šŸ“Š Information\n\n"; + // Extract metadata from Codex logs + let totalTokens = 0; + const tokenMatches = logContent.match(/tokens used: (\d+)/g); + if (tokenMatches) { + for (const match of tokenMatches) { + const tokens = parseInt(match.match(/(\d+)/)[1]); + totalTokens += tokens; + } + } + if (totalTokens > 0) { + markdown += `**Total Tokens Used:** ${totalTokens.toLocaleString()}\n\n`; + } + // Count tool calls and exec commands + const toolCalls = (logContent.match(/\] tool /g) || []).length; + const execCommands = (logContent.match(/\] exec /g) || []).length; + if (toolCalls > 0) { + markdown += `**Tool Calls:** ${toolCalls}\n\n`; + } + if (execCommands > 0) { + markdown += `**Commands Executed:** ${execCommands}\n\n`; + } + markdown += "\n## šŸ¤– Reasoning\n\n"; + // Second pass: process full conversation flow with interleaved reasoning, tools, and commands + let inThinkingSection = false; + for (let i = 0; i < lines.length; i++) { + const line = lines[i]; + // Skip metadata lines + if ( + line.includes("OpenAI Codex") || + line.startsWith("--------") || + line.includes("workdir:") || + line.includes("model:") || + line.includes("provider:") || + line.includes("approval:") || + line.includes("sandbox:") || + line.includes("reasoning effort:") || + line.includes("reasoning summaries:") || + line.includes("tokens used:") + ) { + continue; + } + // Process thinking sections + if (line.includes("] thinking")) { + inThinkingSection = true; + continue; + } + // Process tool calls + if (line.includes("] tool ") && line.includes("(")) { + inThinkingSection = false; + const toolMatch = line.match(/\] tool ([^(]+)\(/); + if (toolMatch) { + const toolName = toolMatch[1]; + // Look ahead to find the result status + let statusIcon = "ā“"; // Unknown by default + for (let j = i + 1; j < Math.min(i + 5, lines.length); j++) { + const nextLine = lines[j]; + if (nextLine.includes("success in")) { + statusIcon = "āœ…"; + break; + } else if ( + nextLine.includes("failure in") || + nextLine.includes("error in") || + nextLine.includes("failed in") + ) { + statusIcon = "āŒ"; + break; + } + } + if (toolName.includes(".")) { + const parts = toolName.split("."); + const provider = parts[0]; + const method = parts.slice(1).join("_"); + markdown += `${statusIcon} ${provider}::${method}(...)\n\n`; + } else { + markdown += `${statusIcon} ${toolName}(...)\n\n`; + } + } + continue; + } + // Process exec commands + if (line.includes("] exec ")) { + inThinkingSection = false; + const execMatch = line.match(/exec (.+?) in/); + if (execMatch) { + const formattedCommand = formatBashCommand(execMatch[1]); + // Look ahead to find the result status + let statusIcon = "ā“"; // Unknown by default + for (let j = i + 1; j < Math.min(i + 5, lines.length); j++) { + const nextLine = lines[j]; + if (nextLine.includes("succeeded in")) { + statusIcon = "āœ…"; + break; + } else if ( + nextLine.includes("failed in") || + nextLine.includes("error") + ) { + statusIcon = "āŒ"; + break; + } + } + markdown += `${statusIcon} \`${formattedCommand}\`\n\n`; + } + continue; + } + // Process thinking content + if ( + inThinkingSection && + line.trim().length > 20 && + !line.startsWith("[2025-") + ) { + const trimmed = line.trim(); + // Add thinking content directly + markdown += `${trimmed}\n\n`; + } + } + return markdown; + } catch (error) { + console.error("Error parsing Codex log:", error); + return "## šŸ¤– Commands and Tools\n\nError parsing log content.\n\n## šŸ¤– Reasoning\n\nUnable to parse reasoning from log.\n\n"; + } + } + function formatBashCommand(command) { + if (!command) return ""; + // Convert multi-line commands to single line by replacing newlines with spaces + // and collapsing multiple spaces + let formatted = command + .replace(/\n/g, " ") // Replace newlines with spaces + .replace(/\r/g, " ") // Replace carriage returns with spaces + .replace(/\t/g, " ") // Replace tabs with spaces + .replace(/\s+/g, " ") // Collapse multiple spaces into one + .trim(); // Remove leading/trailing whitespace + // Escape backticks to prevent markdown issues + formatted = formatted.replace(/`/g, "\\`"); + // Truncate if too long (keep reasonable length for summary) + const maxLength = 80; + if (formatted.length > maxLength) { + formatted = formatted.substring(0, maxLength) + "..."; + } + return formatted; + } + function truncateString(str, maxLength) { + if (!str) return ""; + if (str.length <= maxLength) return str; + return str.substring(0, maxLength) + "..."; + } + // Export for testing + if (typeof module !== "undefined" && module.exports) { + module.exports = { parseCodexLog, formatBashCommand, truncateString }; + } + main(); + - name: Upload agent logs + if: always() + uses: actions/upload-artifact@v4 + with: + name: security-analysis-with-codex.log + path: /tmp/security-analysis-with-codex.log + if-no-files-found: warn + + create_security_report: + needs: security-analysis-with-codex + runs-on: ubuntu-latest + permissions: + contents: read + security-events: write + actions: read + timeout-minutes: 10 + outputs: + artifact_uploaded: ${{ steps.create_security_report.outputs.artifact_uploaded }} + codeql_uploaded: ${{ steps.create_security_report.outputs.codeql_uploaded }} + findings_count: ${{ steps.create_security_report.outputs.findings_count }} + sarif_file: ${{ steps.create_security_report.outputs.sarif_file }} + steps: + - name: Create Security Report + id: create_security_report + uses: actions/github-script@v7 + env: + GITHUB_AW_AGENT_OUTPUT: ${{ needs.security-analysis-with-codex.outputs.output }} + GITHUB_AW_SECURITY_REPORT_MAX: 10 + GITHUB_AW_SECURITY_REPORT_DRIVER: Test Codex Security Report + GITHUB_AW_WORKFLOW_FILENAME: test-codex-create-security-report + with: + script: | + async function main() { + // Read the validated output content from environment variable + const outputContent = process.env.GITHUB_AW_AGENT_OUTPUT; + if (!outputContent) { + console.log("No GITHUB_AW_AGENT_OUTPUT environment variable found"); + return; + } + if (outputContent.trim() === "") { + console.log("Agent output content is empty"); + return; + } + console.log("Agent output content length:", outputContent.length); + // Parse the validated output JSON + let validatedOutput; + try { + validatedOutput = JSON.parse(outputContent); + } catch (error) { + console.log( + "Error parsing agent output JSON:", + error instanceof Error ? error.message : String(error) + ); + return; + } + if (!validatedOutput.items || !Array.isArray(validatedOutput.items)) { + console.log("No valid items found in agent output"); + return; + } + // Find all create-security-report items + const securityItems = validatedOutput.items.filter( + /** @param {any} item */ item => item.type === "create-security-report" + ); + if (securityItems.length === 0) { + console.log("No create-security-report items found in agent output"); + return; + } + console.log(`Found ${securityItems.length} create-security-report item(s)`); + // Get the max configuration from environment variable + const maxFindings = process.env.GITHUB_AW_SECURITY_REPORT_MAX + ? parseInt(process.env.GITHUB_AW_SECURITY_REPORT_MAX) + : 0; // 0 means unlimited + console.log( + `Max findings configuration: ${maxFindings === 0 ? "unlimited" : maxFindings}` + ); + // Get the driver configuration from environment variable + const driverName = + process.env.GITHUB_AW_SECURITY_REPORT_DRIVER || + "GitHub Agentic Workflows Security Scanner"; + console.log(`Driver name: ${driverName}`); + // Get the workflow filename for rule ID prefix + const workflowFilename = + process.env.GITHUB_AW_WORKFLOW_FILENAME || "workflow"; + console.log(`Workflow filename for rule ID prefix: ${workflowFilename}`); + const validFindings = []; + // Process each security item and validate the findings + for (let i = 0; i < securityItems.length; i++) { + const securityItem = securityItems[i]; + console.log( + `Processing create-security-report item ${i + 1}/${securityItems.length}:`, + { + file: securityItem.file, + line: securityItem.line, + severity: securityItem.severity, + messageLength: securityItem.message + ? securityItem.message.length + : "undefined", + ruleIdSuffix: securityItem.ruleIdSuffix || "not specified", + } + ); + // Validate required fields + if (!securityItem.file) { + console.log('Missing required field "file" in security report item'); + continue; + } + if ( + !securityItem.line || + (typeof securityItem.line !== "number" && + typeof securityItem.line !== "string") + ) { + console.log( + 'Missing or invalid required field "line" in security report item' + ); + continue; + } + if (!securityItem.severity || typeof securityItem.severity !== "string") { + console.log( + 'Missing or invalid required field "severity" in security report item' + ); + continue; + } + if (!securityItem.message || typeof securityItem.message !== "string") { + console.log( + 'Missing or invalid required field "message" in security report item' + ); + continue; + } + // Parse line number + const line = parseInt(securityItem.line, 10); + if (isNaN(line) || line <= 0) { + console.log(`Invalid line number: ${securityItem.line}`); + continue; + } + // Parse optional column number + let column = 1; // Default to column 1 + if (securityItem.column !== undefined) { + if ( + typeof securityItem.column !== "number" && + typeof securityItem.column !== "string" + ) { + console.log( + 'Invalid field "column" in security report item (must be number or string)' + ); + continue; + } + const parsedColumn = parseInt(securityItem.column, 10); + if (isNaN(parsedColumn) || parsedColumn <= 0) { + console.log(`Invalid column number: ${securityItem.column}`); + continue; + } + column = parsedColumn; + } + // Parse optional rule ID suffix + let ruleIdSuffix = null; + if (securityItem.ruleIdSuffix !== undefined) { + if (typeof securityItem.ruleIdSuffix !== "string") { + console.log( + 'Invalid field "ruleIdSuffix" in security report item (must be string)' + ); + continue; + } + // Validate that the suffix doesn't contain invalid characters + const trimmedSuffix = securityItem.ruleIdSuffix.trim(); + if (trimmedSuffix.length === 0) { + console.log( + 'Invalid field "ruleIdSuffix" in security report item (cannot be empty)' + ); + continue; + } + // Check for characters that would be problematic in rule IDs + if (!/^[a-zA-Z0-9_-]+$/.test(trimmedSuffix)) { + console.log( + `Invalid ruleIdSuffix "${trimmedSuffix}" (must contain only alphanumeric characters, hyphens, and underscores)` + ); + continue; + } + ruleIdSuffix = trimmedSuffix; + } + // Validate severity level and map to SARIF level + const severityMap = { + error: "error", + warning: "warning", + info: "note", + note: "note", + }; + const normalizedSeverity = securityItem.severity.toLowerCase(); + if (!severityMap[normalizedSeverity]) { + console.log( + `Invalid severity level: ${securityItem.severity} (must be error, warning, info, or note)` + ); + continue; + } + const sarifLevel = severityMap[normalizedSeverity]; + // Create a valid finding object + validFindings.push({ + file: securityItem.file.trim(), + line: line, + column: column, + severity: normalizedSeverity, + sarifLevel: sarifLevel, + message: securityItem.message.trim(), + ruleIdSuffix: ruleIdSuffix, + }); + // Check if we've reached the max limit + if (maxFindings > 0 && validFindings.length >= maxFindings) { + console.log(`Reached maximum findings limit: ${maxFindings}`); + break; + } + } + if (validFindings.length === 0) { + console.log("No valid security findings to report"); + return; + } + console.log(`Processing ${validFindings.length} valid security finding(s)`); + // Generate SARIF file + const sarifContent = { + $schema: + "https://raw.githubusercontent.com/oasis-tcs/sarif-spec/master/Schemata/sarif-schema-2.1.0.json", + version: "2.1.0", + runs: [ + { + tool: { + driver: { + name: driverName, + version: "1.0.0", + informationUri: "https://github.com/githubnext/gh-aw-copilots", + }, + }, + results: validFindings.map((finding, index) => ({ + ruleId: finding.ruleIdSuffix + ? `${workflowFilename}-${finding.ruleIdSuffix}` + : `${workflowFilename}-security-finding-${index + 1}`, + message: { text: finding.message }, + level: finding.sarifLevel, + locations: [ + { + physicalLocation: { + artifactLocation: { uri: finding.file }, + region: { + startLine: finding.line, + startColumn: finding.column, + }, + }, + }, + ], + })), + }, + ], + }; + // Write SARIF file to filesystem + const fs = require("fs"); + const path = require("path"); + const sarifFileName = "security-report.sarif"; + const sarifFilePath = path.join(process.cwd(), sarifFileName); + try { + fs.writeFileSync(sarifFilePath, JSON.stringify(sarifContent, null, 2)); + console.log(`āœ“ Created SARIF file: ${sarifFilePath}`); + console.log(`SARIF file size: ${fs.statSync(sarifFilePath).size} bytes`); + // Set outputs for the GitHub Action + core.setOutput("sarif_file", sarifFilePath); + core.setOutput("findings_count", validFindings.length); + core.setOutput("artifact_uploaded", "pending"); + core.setOutput("codeql_uploaded", "pending"); + // Write summary with findings + let summaryContent = "\n\n## Security Report\n"; + summaryContent += `Found **${validFindings.length}** security finding(s):\n\n`; + for (const finding of validFindings) { + const emoji = + finding.severity === "error" + ? "šŸ”“" + : finding.severity === "warning" + ? "🟔" + : "šŸ”µ"; + summaryContent += `${emoji} **${finding.severity.toUpperCase()}** in \`${finding.file}:${finding.line}\`: ${finding.message}\n`; + } + summaryContent += `\nšŸ“„ SARIF file created: \`${sarifFileName}\`\n`; + summaryContent += `šŸ” Findings will be uploaded to GitHub Code Scanning\n`; + await core.summary.addRaw(summaryContent).write(); + } catch (error) { + console.error( + `āœ— Failed to create SARIF file:`, + error instanceof Error ? error.message : String(error) + ); + throw error; + } + console.log( + `Successfully created security report with ${validFindings.length} finding(s)` + ); + return { + sarifFile: sarifFilePath, + findingsCount: validFindings.length, + findings: validFindings, + }; + } + await main(); + - name: Upload SARIF artifact + if: steps.create_security_report.outputs.sarif_file + uses: actions/upload-artifact@v4 + with: + name: security-report.sarif + path: ${{ steps.create_security_report.outputs.sarif_file }} + - name: Upload SARIF to GitHub Security + if: steps.create_security_report.outputs.sarif_file + uses: github/codeql-action/upload-sarif@v3 + with: + sarif_file: ${{ steps.create_security_report.outputs.sarif_file }} + diff --git a/.github/workflows/test-codex-create-security-report.md b/.github/workflows/test-codex-create-security-report.md new file mode 100644 index 0000000000..0eac73d41c --- /dev/null +++ b/.github/workflows/test-codex-create-security-report.md @@ -0,0 +1,33 @@ +--- +name: Test Codex Security Report +on: + workflow_dispatch: + reaction: eyes + +engine: + id: codex + +safe-outputs: + create-security-report: + max: 10 +--- + +# Security Analysis with Codex + +Analyze the repository codebase for security vulnerabilities and create security reports. + +For each security finding you identify, specify: +- The file path relative to the repository root +- The line number where the issue occurs +- Optional column number for precise location +- The severity level (error, warning, info, or note) +- A detailed description of the security issue +- Optionally, a custom rule ID suffix for meaningful SARIF rule identifiers + +Focus on common security issues like: +- Hardcoded secrets or credentials +- SQL injection vulnerabilities +- Cross-site scripting (XSS) issues +- Insecure file operations +- Authentication bypasses +- Input validation problems diff --git a/docs/safe-outputs.md b/docs/safe-outputs.md index e1f45520d3..856fc1f71d 100644 --- a/docs/safe-outputs.md +++ b/docs/safe-outputs.md @@ -9,6 +9,8 @@ One of the primary security features of GitHub Agentic Workflows is "safe output | **New Issue Creation** | `create-issue:` | Create GitHub issues based on workflow output | 1 | | **Issue Comments** | `add-issue-comment:` | Post comments on issues or pull requests | 1 | | **Pull Request Creation** | `create-pull-request:` | Create pull requests with code changes | 1 | +| **Pull Request Review Comments** | `create-pull-request-review-comment:` | Create review comments on specific lines of code | 1 | +| **Security Reports** | `create-security-report:` | Generate SARIF security reports and upload to GitHub Code Scanning | unlimited | | **Label Addition** | `add-issue-label:` | Add labels to issues or pull requests | 3 | | **Issue Updates** | `update-issue:` | Update issue status, title, or body | 1 | | **Push to Branch** | `push-to-branch:` | Push changes directly to a branch | 1 | @@ -219,6 +221,60 @@ The compiled workflow will have additional prompting describing that, to create - Comments are automatically positioned on the correct side of the diff - Maximum comment limits prevent spam +### Security Report Creation (`create-security-report:`) + +Adding `create-security-report:` to the `safe-outputs:` section declares that the workflow should conclude with creating security reports in SARIF format based on the workflow's security analysis findings. The SARIF file is uploaded as an artifact and submitted to GitHub Code Scanning. + +**Basic Configuration:** +```yaml +safe-outputs: + create-security-report: +``` + +**With Configuration:** +```yaml +safe-outputs: + create-security-report: + max: 50 # Optional: maximum number of security findings (default: unlimited) +``` + +The agentic part of your workflow should describe the security findings it wants reported with specific file paths, line numbers, severity levels, and descriptions. + +**Example natural language to generate the output:** + +```markdown +# Security Analysis Agent + +Analyze the codebase for security vulnerabilities and create security reports. +Create security reports with your analysis findings. For each security finding, specify: +- The file path relative to the repository root +- The line number where the issue occurs +- The severity level (error, warning, info, or note) +- A detailed description of the security issue + +Security findings will be formatted as SARIF and uploaded to GitHub Code Scanning. +``` + +The compiled workflow will have additional prompting describing that, to create security reports, it should write the security findings to a special file with the following structure: +- `file`: The file path relative to the repository root +- `line`: The line number where the security issue occurs +- `column`: Optional column number where the security issue occurs (defaults to 1) +- `severity`: The severity level ("error", "warning", "info", or "note") +- `message`: The detailed description of the security issue +- `ruleIdSuffix`: Optional custom suffix for the SARIF rule ID (must contain only alphanumeric characters, hyphens, and underscores) + +**Key Features:** +- Generates SARIF (Static Analysis Results Interchange Format) reports +- Automatically uploads reports as GitHub Actions artifacts +- Integrates with GitHub Code Scanning for security dashboard visibility +- Supports standard severity levels (error, warning, info, note) +- Works in any workflow context (not limited to pull requests) +- Maximum findings limit prevents overwhelming reports +- Validates all required fields before generating SARIF +- Supports optional column specification for precise location +- Customizable rule IDs via optional ruleIdSuffix field +- Rule IDs default to `{workflow-filename}-security-finding-{index}` format when no custom suffix is provided + ### Label Addition (`add-issue-label:`) Adding `add-issue-label:` to the `safe-outputs:` section of your workflow declares that the workflow should conclude with adding labels to the current issue or pull request based on the coding agent's analysis. diff --git a/pkg/parser/schemas/main_workflow_schema.json b/pkg/parser/schemas/main_workflow_schema.json index b75876f687..8afc9e3613 100644 --- a/pkg/parser/schemas/main_workflow_schema.json +++ b/pkg/parser/schemas/main_workflow_schema.json @@ -1270,6 +1270,30 @@ } ] }, + "create-security-report": { + "oneOf": [ + { + "type": "object", + "description": "Configuration for creating security reports (SARIF format) from agentic workflow output", + "properties": { + "max": { + "type": "integer", + "description": "Maximum number of security findings to include (default: unlimited)", + "minimum": 1 + }, + "driver": { + "type": "string", + "description": "Driver name for SARIF tool.driver.name field (default: 'GitHub Agentic Workflows Security Scanner')" + } + }, + "additionalProperties": false + }, + { + "type": "null", + "description": "Enable security report creation with default configuration (unlimited findings)" + } + ] + }, "add-issue-label": { "oneOf": [ { diff --git a/pkg/workflow/compiler.go b/pkg/workflow/compiler.go index e01328e529..538c2aecad 100644 --- a/pkg/workflow/compiler.go +++ b/pkg/workflow/compiler.go @@ -115,6 +115,7 @@ func NewCompilerWithCustomOutput(verbose bool, engineOverride string, customOutp // WorkflowData holds all the data needed to generate a GitHub Actions workflow type WorkflowData struct { Name string + FrontmatterName string // name field from frontmatter (for security report driver default) On string Permissions string Network string // top-level network permissions configuration @@ -149,6 +150,7 @@ type SafeOutputsConfig struct { AddIssueComments *AddIssueCommentsConfig `yaml:"add-issue-comment,omitempty"` CreatePullRequests *CreatePullRequestsConfig `yaml:"create-pull-request,omitempty"` CreatePullRequestReviewComments *CreatePullRequestReviewCommentsConfig `yaml:"create-pull-request-review-comment,omitempty"` + CreateSecurityReports *CreateSecurityReportsConfig `yaml:"create-security-report,omitempty"` AddIssueLabels *AddIssueLabelsConfig `yaml:"add-issue-label,omitempty"` UpdateIssues *UpdateIssuesConfig `yaml:"update-issue,omitempty"` PushToBranch *PushToBranchConfig `yaml:"push-to-branch,omitempty"` @@ -195,6 +197,12 @@ type CreatePullRequestReviewCommentsConfig struct { Side string `yaml:"side,omitempty"` // Side of the diff: "LEFT" or "RIGHT" (default: "RIGHT") } +// CreateSecurityReportsConfig holds configuration for creating security reports (SARIF format) from agent output +type CreateSecurityReportsConfig struct { + Max int `yaml:"max,omitempty"` // Maximum number of security findings to include (default: unlimited) + Driver string `yaml:"driver,omitempty"` // Driver name for SARIF tool.driver.name field (default: "GitHub Agentic Workflows Security Scanner") +} + // AddIssueLabelsConfig holds configuration for adding labels to issues/PRs from agent output type AddIssueLabelsConfig struct { Allowed []string `yaml:"allowed,omitempty"` // Optional list of allowed labels. If omitted, any labels are allowed (including creating new ones). @@ -294,7 +302,7 @@ func (c *Compiler) CompileWorkflow(markdownPath string) error { if c.verbose { fmt.Println(console.FormatInfoMessage("Generating GitHub Actions YAML...")) } - yamlContent, err := c.generateYAML(workflowData) + yamlContent, err := c.generateYAML(workflowData, markdownPath) if err != nil { formattedErr := console.FormatError(console.CompilerError{ Position: console.ErrorPosition{ @@ -619,6 +627,7 @@ func (c *Compiler) parseWorkflowFile(markdownPath string) (*WorkflowData, error) // Build workflow data workflowData := &WorkflowData{ Name: workflowName, + FrontmatterName: c.extractStringValue(result.Frontmatter, "name"), Tools: tools, MarkdownContent: markdownContent, AI: engineSetting, @@ -816,6 +825,20 @@ func (c *Compiler) extractTopLevelYAMLSection(frontmatter map[string]any, key st return yamlStr } +// extractStringValue extracts a string value from the frontmatter map +func (c *Compiler) extractStringValue(frontmatter map[string]any, key string) string { + value, exists := frontmatter[key] + if !exists { + return "" + } + + if strValue, ok := value.(string); ok { + return strValue + } + + return "" +} + // commentOutDraftInOnSection comments out draft fields in pull_request sections within the YAML string // The draft field is processed separately by applyPullRequestDraftFilter and should be commented for documentation func (c *Compiler) commentOutDraftInOnSection(yamlStr string) string { @@ -1618,12 +1641,12 @@ func (c *Compiler) indentYAMLLines(yamlContent, indent string) string { } // generateYAML generates the complete GitHub Actions YAML content -func (c *Compiler) generateYAML(data *WorkflowData) (string, error) { +func (c *Compiler) generateYAML(data *WorkflowData, markdownPath string) (string, error) { // Reset job manager for this compilation c.jobManager = NewJobManager() // Build all jobs - if err := c.buildJobs(data); err != nil { + if err := c.buildJobs(data, markdownPath); err != nil { return "", fmt.Errorf("failed to build jobs: %w", err) } @@ -1680,7 +1703,7 @@ func (c *Compiler) isTaskJobNeeded(data *WorkflowData) bool { } // buildJobs creates all jobs for the workflow and adds them to the job manager -func (c *Compiler) buildJobs(data *WorkflowData) error { +func (c *Compiler) buildJobs(data *WorkflowData, markdownPath string) error { // Generate job name from workflow name jobName := c.generateJobName(data.Name) @@ -1762,6 +1785,19 @@ func (c *Compiler) buildJobs(data *WorkflowData) error { } } + // Build create_security_report job if output.create-security-report is configured + if data.SafeOutputs.CreateSecurityReports != nil { + // Extract the workflow filename without extension for rule ID prefix + workflowFilename := strings.TrimSuffix(filepath.Base(markdownPath), ".md") + createSecurityReportJob, err := c.buildCreateOutputSecurityReportJob(data, jobName, workflowFilename) + if err != nil { + return fmt.Errorf("failed to build create_security_report job: %w", err) + } + if err := c.jobManager.AddJob(createSecurityReportJob); err != nil { + return fmt.Errorf("failed to add create_security_report job: %w", err) + } + } + // Build create_pull_request job if output.create-pull-request is configured if data.SafeOutputs.CreatePullRequests != nil { createPullRequestJob, err := c.buildCreateOutputPullRequestJob(data, jobName) @@ -2207,6 +2243,94 @@ func (c *Compiler) buildCreateOutputPullRequestReviewCommentJob(data *WorkflowDa return job, nil } +// buildCreateOutputSecurityReportJob creates the create_security_report job +func (c *Compiler) buildCreateOutputSecurityReportJob(data *WorkflowData, mainJobName string, workflowFilename string) (*Job, error) { + if data.SafeOutputs == nil || data.SafeOutputs.CreateSecurityReports == nil { + return nil, fmt.Errorf("safe-outputs.create-security-report configuration is required") + } + + var steps []string + steps = append(steps, " - name: Create Security Report\n") + steps = append(steps, " id: create_security_report\n") + steps = append(steps, " uses: actions/github-script@v7\n") + + // Add environment variables + steps = append(steps, " env:\n") + // Pass the agent output content from the main job + steps = append(steps, fmt.Sprintf(" GITHUB_AW_AGENT_OUTPUT: ${{ needs.%s.outputs.output }}\n", mainJobName)) + // Pass the max configuration + if data.SafeOutputs.CreateSecurityReports.Max > 0 { + steps = append(steps, fmt.Sprintf(" GITHUB_AW_SECURITY_REPORT_MAX: %d\n", data.SafeOutputs.CreateSecurityReports.Max)) + } + // Pass the driver configuration, defaulting to frontmatter name + driverName := data.SafeOutputs.CreateSecurityReports.Driver + if driverName == "" { + if data.FrontmatterName != "" { + driverName = data.FrontmatterName + } else { + driverName = data.Name // fallback to H1 header name + } + } + steps = append(steps, fmt.Sprintf(" GITHUB_AW_SECURITY_REPORT_DRIVER: %s\n", driverName)) + // Pass the workflow filename for rule ID prefix + steps = append(steps, fmt.Sprintf(" GITHUB_AW_WORKFLOW_FILENAME: %s\n", workflowFilename)) + + steps = append(steps, " with:\n") + steps = append(steps, " script: |\n") + + // Add each line of the script with proper indentation + formattedScript := FormatJavaScriptForYAML(createSecurityReportScript) + steps = append(steps, formattedScript...) + + // Add step to upload SARIF artifact + steps = append(steps, " - name: Upload SARIF artifact\n") + steps = append(steps, " if: steps.create_security_report.outputs.sarif_file\n") + steps = append(steps, " uses: actions/upload-artifact@v4\n") + steps = append(steps, " with:\n") + steps = append(steps, " name: security-report.sarif\n") + steps = append(steps, " path: ${{ steps.create_security_report.outputs.sarif_file }}\n") + + // Add step to upload SARIF to GitHub Code Scanning + steps = append(steps, " - name: Upload SARIF to GitHub Security\n") + steps = append(steps, " if: steps.create_security_report.outputs.sarif_file\n") + steps = append(steps, " uses: github/codeql-action/upload-sarif@v3\n") + steps = append(steps, " with:\n") + steps = append(steps, " sarif_file: ${{ steps.create_security_report.outputs.sarif_file }}\n") + + // Create outputs for the job + outputs := map[string]string{ + "sarif_file": "${{ steps.create_security_report.outputs.sarif_file }}", + "findings_count": "${{ steps.create_security_report.outputs.findings_count }}", + "artifact_uploaded": "${{ steps.create_security_report.outputs.artifact_uploaded }}", + "codeql_uploaded": "${{ steps.create_security_report.outputs.codeql_uploaded }}", + } + + // Build job condition - security reports can run in any context unlike PR review comments + var jobCondition string + if data.Command != "" { + // Build the command trigger condition + commandCondition := buildCommandOnlyCondition(data.Command) + commandConditionStr := commandCondition.Render() + jobCondition = fmt.Sprintf("if: %s", commandConditionStr) + } else { + // No specific condition needed - security reports can run anytime + jobCondition = "" + } + + job := &Job{ + Name: "create_security_report", + If: jobCondition, + RunsOn: "runs-on: ubuntu-latest", + Permissions: "permissions:\n contents: read\n security-events: write\n actions: read", // Need security-events:write for SARIF upload + TimeoutMinutes: 10, // 10-minute timeout + Steps: steps, + Outputs: outputs, + Depends: []string{mainJobName}, // Depend on the main workflow job + } + + return job, nil +} + // buildCreateOutputPullRequestJob creates the create_pull_request job func (c *Compiler) buildCreateOutputPullRequestJob(data *WorkflowData, mainJobName string) (*Job, error) { if data.SafeOutputs == nil || data.SafeOutputs.CreatePullRequests == nil { @@ -3033,6 +3157,12 @@ func (c *Compiler) extractSafeOutputsConfig(frontmatter map[string]any) *SafeOut config.CreatePullRequestReviewComments = prReviewCommentsConfig } + // Handle create-security-report + securityReportsConfig := c.parseSecurityReportsConfig(outputMap) + if securityReportsConfig != nil { + config.CreateSecurityReports = securityReportsConfig + } + // Parse allowed-domains configuration if allowedDomains, exists := outputMap["allowed-domains"]; exists { if domainsArray, ok := allowedDomains.([]any); ok { @@ -3295,6 +3425,34 @@ func (c *Compiler) parsePullRequestReviewCommentsConfig(outputMap map[string]any return prReviewCommentsConfig } +// parseSecurityReportsConfig handles create-security-report configuration +func (c *Compiler) parseSecurityReportsConfig(outputMap map[string]any) *CreateSecurityReportsConfig { + if _, exists := outputMap["create-security-report"]; !exists { + return nil + } + + configData := outputMap["create-security-report"] + securityReportsConfig := &CreateSecurityReportsConfig{Max: 0} // Default max is 0 (unlimited) + + if configMap, ok := configData.(map[string]any); ok { + // Parse max + if max, exists := configMap["max"]; exists { + if maxInt, ok := c.parseIntValue(max); ok { + securityReportsConfig.Max = maxInt + } + } + + // Parse driver + if driver, exists := configMap["driver"]; exists { + if driverStr, ok := driver.(string); ok { + securityReportsConfig.Driver = driverStr + } + } + } + + return securityReportsConfig +} + // parseIntValue safely parses various numeric types to int func (c *Compiler) parseIntValue(value any) (int, bool) { switch v := value.(type) { diff --git a/pkg/workflow/compiler_test.go b/pkg/workflow/compiler_test.go index 431e56b369..37872c9047 100644 --- a/pkg/workflow/compiler_test.go +++ b/pkg/workflow/compiler_test.go @@ -3528,7 +3528,7 @@ Test workflow with reaction. } // Generate YAML and verify it contains reaction jobs - yamlContent, err := compiler.generateYAML(workflowData) + yamlContent, err := compiler.generateYAML(workflowData, "test-workflow.md") if err != nil { t.Fatalf("Failed to generate YAML: %v", err) } @@ -3600,7 +3600,7 @@ Test workflow without explicit reaction (should not create reaction action). } // Generate YAML and verify it does NOT contain reaction jobs - yamlContent, err := compiler.generateYAML(workflowData) + yamlContent, err := compiler.generateYAML(workflowData, "test-workflow.md") if err != nil { t.Fatalf("Failed to generate YAML: %v", err) } @@ -3673,7 +3673,7 @@ Test workflow with reaction and comment editing. } // Generate YAML and verify it contains the enhanced reaction script - yamlContent, err := compiler.generateYAML(workflowData) + yamlContent, err := compiler.generateYAML(workflowData, "test-workflow.md") if err != nil { t.Fatalf("Failed to generate YAML: %v", err) } @@ -3756,7 +3756,7 @@ Test command workflow with reaction and comment editing. } // Generate YAML and verify it contains both alias and reaction environment variables - yamlContent, err := compiler.generateYAML(workflowData) + yamlContent, err := compiler.generateYAML(workflowData, "test-workflow.md") if err != nil { t.Fatalf("Failed to generate YAML: %v", err) } diff --git a/pkg/workflow/js.go b/pkg/workflow/js.go index bbb7aef4c5..63d186a596 100644 --- a/pkg/workflow/js.go +++ b/pkg/workflow/js.go @@ -21,6 +21,9 @@ var createCommentScript string //go:embed js/create_pr_review_comment.cjs var createPRReviewCommentScript string +//go:embed js/create_security_report.cjs +var createSecurityReportScript string + //go:embed js/compute_text.cjs var computeTextScript string diff --git a/pkg/workflow/js/create_security_report.cjs b/pkg/workflow/js/create_security_report.cjs new file mode 100644 index 0000000000..5d30d9867c --- /dev/null +++ b/pkg/workflow/js/create_security_report.cjs @@ -0,0 +1,297 @@ +async function main() { + // Read the validated output content from environment variable + const outputContent = process.env.GITHUB_AW_AGENT_OUTPUT; + if (!outputContent) { + console.log("No GITHUB_AW_AGENT_OUTPUT environment variable found"); + return; + } + + if (outputContent.trim() === "") { + console.log("Agent output content is empty"); + return; + } + + console.log("Agent output content length:", outputContent.length); + + // Parse the validated output JSON + let validatedOutput; + try { + validatedOutput = JSON.parse(outputContent); + } catch (error) { + console.log( + "Error parsing agent output JSON:", + error instanceof Error ? error.message : String(error) + ); + return; + } + + if (!validatedOutput.items || !Array.isArray(validatedOutput.items)) { + console.log("No valid items found in agent output"); + return; + } + + // Find all create-security-report items + const securityItems = validatedOutput.items.filter( + /** @param {any} item */ item => item.type === "create-security-report" + ); + if (securityItems.length === 0) { + console.log("No create-security-report items found in agent output"); + return; + } + + console.log(`Found ${securityItems.length} create-security-report item(s)`); + + // Get the max configuration from environment variable + const maxFindings = process.env.GITHUB_AW_SECURITY_REPORT_MAX + ? parseInt(process.env.GITHUB_AW_SECURITY_REPORT_MAX) + : 0; // 0 means unlimited + console.log( + `Max findings configuration: ${maxFindings === 0 ? "unlimited" : maxFindings}` + ); + + // Get the driver configuration from environment variable + const driverName = + process.env.GITHUB_AW_SECURITY_REPORT_DRIVER || + "GitHub Agentic Workflows Security Scanner"; + console.log(`Driver name: ${driverName}`); + + // Get the workflow filename for rule ID prefix + const workflowFilename = + process.env.GITHUB_AW_WORKFLOW_FILENAME || "workflow"; + console.log(`Workflow filename for rule ID prefix: ${workflowFilename}`); + + const validFindings = []; + + // Process each security item and validate the findings + for (let i = 0; i < securityItems.length; i++) { + const securityItem = securityItems[i]; + console.log( + `Processing create-security-report item ${i + 1}/${securityItems.length}:`, + { + file: securityItem.file, + line: securityItem.line, + severity: securityItem.severity, + messageLength: securityItem.message + ? securityItem.message.length + : "undefined", + ruleIdSuffix: securityItem.ruleIdSuffix || "not specified", + } + ); + + // Validate required fields + if (!securityItem.file) { + console.log('Missing required field "file" in security report item'); + continue; + } + + if ( + !securityItem.line || + (typeof securityItem.line !== "number" && + typeof securityItem.line !== "string") + ) { + console.log( + 'Missing or invalid required field "line" in security report item' + ); + continue; + } + + if (!securityItem.severity || typeof securityItem.severity !== "string") { + console.log( + 'Missing or invalid required field "severity" in security report item' + ); + continue; + } + + if (!securityItem.message || typeof securityItem.message !== "string") { + console.log( + 'Missing or invalid required field "message" in security report item' + ); + continue; + } + + // Parse line number + const line = parseInt(securityItem.line, 10); + if (isNaN(line) || line <= 0) { + console.log(`Invalid line number: ${securityItem.line}`); + continue; + } + + // Parse optional column number + let column = 1; // Default to column 1 + if (securityItem.column !== undefined) { + if ( + typeof securityItem.column !== "number" && + typeof securityItem.column !== "string" + ) { + console.log( + 'Invalid field "column" in security report item (must be number or string)' + ); + continue; + } + const parsedColumn = parseInt(securityItem.column, 10); + if (isNaN(parsedColumn) || parsedColumn <= 0) { + console.log(`Invalid column number: ${securityItem.column}`); + continue; + } + column = parsedColumn; + } + + // Parse optional rule ID suffix + let ruleIdSuffix = null; + if (securityItem.ruleIdSuffix !== undefined) { + if (typeof securityItem.ruleIdSuffix !== "string") { + console.log( + 'Invalid field "ruleIdSuffix" in security report item (must be string)' + ); + continue; + } + // Validate that the suffix doesn't contain invalid characters + const trimmedSuffix = securityItem.ruleIdSuffix.trim(); + if (trimmedSuffix.length === 0) { + console.log( + 'Invalid field "ruleIdSuffix" in security report item (cannot be empty)' + ); + continue; + } + // Check for characters that would be problematic in rule IDs + if (!/^[a-zA-Z0-9_-]+$/.test(trimmedSuffix)) { + console.log( + `Invalid ruleIdSuffix "${trimmedSuffix}" (must contain only alphanumeric characters, hyphens, and underscores)` + ); + continue; + } + ruleIdSuffix = trimmedSuffix; + } + + // Validate severity level and map to SARIF level + const severityMap = { + error: "error", + warning: "warning", + info: "note", + note: "note", + }; + + const normalizedSeverity = securityItem.severity.toLowerCase(); + if (!severityMap[normalizedSeverity]) { + console.log( + `Invalid severity level: ${securityItem.severity} (must be error, warning, info, or note)` + ); + continue; + } + + const sarifLevel = severityMap[normalizedSeverity]; + + // Create a valid finding object + validFindings.push({ + file: securityItem.file.trim(), + line: line, + column: column, + severity: normalizedSeverity, + sarifLevel: sarifLevel, + message: securityItem.message.trim(), + ruleIdSuffix: ruleIdSuffix, + }); + + // Check if we've reached the max limit + if (maxFindings > 0 && validFindings.length >= maxFindings) { + console.log(`Reached maximum findings limit: ${maxFindings}`); + break; + } + } + + if (validFindings.length === 0) { + console.log("No valid security findings to report"); + return; + } + + console.log(`Processing ${validFindings.length} valid security finding(s)`); + + // Generate SARIF file + const sarifContent = { + $schema: + "https://raw.githubusercontent.com/oasis-tcs/sarif-spec/master/Schemata/sarif-schema-2.1.0.json", + version: "2.1.0", + runs: [ + { + tool: { + driver: { + name: driverName, + version: "1.0.0", + informationUri: "https://github.com/githubnext/gh-aw-copilots", + }, + }, + results: validFindings.map((finding, index) => ({ + ruleId: finding.ruleIdSuffix + ? `${workflowFilename}-${finding.ruleIdSuffix}` + : `${workflowFilename}-security-finding-${index + 1}`, + message: { text: finding.message }, + level: finding.sarifLevel, + locations: [ + { + physicalLocation: { + artifactLocation: { uri: finding.file }, + region: { + startLine: finding.line, + startColumn: finding.column, + }, + }, + }, + ], + })), + }, + ], + }; + + // Write SARIF file to filesystem + const fs = require("fs"); + const path = require("path"); + const sarifFileName = "security-report.sarif"; + const sarifFilePath = path.join(process.cwd(), sarifFileName); + + try { + fs.writeFileSync(sarifFilePath, JSON.stringify(sarifContent, null, 2)); + console.log(`āœ“ Created SARIF file: ${sarifFilePath}`); + console.log(`SARIF file size: ${fs.statSync(sarifFilePath).size} bytes`); + + // Set outputs for the GitHub Action + core.setOutput("sarif_file", sarifFilePath); + core.setOutput("findings_count", validFindings.length); + core.setOutput("artifact_uploaded", "pending"); + core.setOutput("codeql_uploaded", "pending"); + + // Write summary with findings + let summaryContent = "\n\n## Security Report\n"; + summaryContent += `Found **${validFindings.length}** security finding(s):\n\n`; + + for (const finding of validFindings) { + const emoji = + finding.severity === "error" + ? "šŸ”“" + : finding.severity === "warning" + ? "🟔" + : "šŸ”µ"; + summaryContent += `${emoji} **${finding.severity.toUpperCase()}** in \`${finding.file}:${finding.line}\`: ${finding.message}\n`; + } + + summaryContent += `\nšŸ“„ SARIF file created: \`${sarifFileName}\`\n`; + summaryContent += `šŸ” Findings will be uploaded to GitHub Code Scanning\n`; + + await core.summary.addRaw(summaryContent).write(); + } catch (error) { + console.error( + `āœ— Failed to create SARIF file:`, + error instanceof Error ? error.message : String(error) + ); + throw error; + } + + console.log( + `Successfully created security report with ${validFindings.length} finding(s)` + ); + return { + sarifFile: sarifFilePath, + findingsCount: validFindings.length, + findings: validFindings, + }; +} +await main(); diff --git a/pkg/workflow/js/create_security_report.test.cjs b/pkg/workflow/js/create_security_report.test.cjs new file mode 100644 index 0000000000..3cf3bc84da --- /dev/null +++ b/pkg/workflow/js/create_security_report.test.cjs @@ -0,0 +1,604 @@ +import { describe, it, expect, vi, beforeEach, afterEach } from "vitest"; +import fs from "fs"; +import path from "path"; + +// Mock the GitHub Actions core module +const mockCore = { + setOutput: vi.fn(), + summary: { + addRaw: vi.fn().mockReturnThis(), + write: vi.fn().mockResolvedValue(undefined), + }, +}; + +// Mock the context +const mockContext = { + runId: "12345", + repo: { + owner: "test-owner", + repo: "test-repo", + }, + payload: { + repository: { + html_url: "https://github.com/test-owner/test-repo", + }, + }, +}; + +// Set up globals +global.core = mockCore; +global.context = mockContext; + +// Read the security report script +const securityReportScript = fs.readFileSync( + path.join(import.meta.dirname, "create_security_report.cjs"), + "utf8" +); + +describe("create_security_report.cjs", () => { + beforeEach(() => { + // Reset mocks + mockCore.setOutput.mockClear(); + mockCore.summary.addRaw.mockClear(); + mockCore.summary.write.mockClear(); + + // Set up basic environment + process.env.GITHUB_AW_AGENT_OUTPUT = ""; + delete process.env.GITHUB_AW_SECURITY_REPORT_MAX; + delete process.env.GITHUB_AW_SECURITY_REPORT_DRIVER; + delete process.env.GITHUB_AW_WORKFLOW_FILENAME; + }); + + afterEach(() => { + // Clean up any created files + try { + const sarifFile = path.join(process.cwd(), "security-report.sarif"); + if (fs.existsSync(sarifFile)) { + fs.unlinkSync(sarifFile); + } + } catch (e) { + // Ignore cleanup errors + } + }); + + describe("main function", () => { + it("should handle missing environment variable", async () => { + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); + + await eval(`(async () => { ${securityReportScript} })()`); + + expect(consoleSpy).toHaveBeenCalledWith( + "No GITHUB_AW_AGENT_OUTPUT environment variable found" + ); + + consoleSpy.mockRestore(); + }); + + it("should handle empty agent output", async () => { + process.env.GITHUB_AW_AGENT_OUTPUT = " "; + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); + + await eval(`(async () => { ${securityReportScript} })()`); + + expect(consoleSpy).toHaveBeenCalledWith("Agent output content is empty"); + + consoleSpy.mockRestore(); + }); + + it("should handle invalid JSON", async () => { + process.env.GITHUB_AW_AGENT_OUTPUT = "invalid json"; + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); + + await eval(`(async () => { ${securityReportScript} })()`); + + expect(consoleSpy).toHaveBeenCalledWith( + "Error parsing agent output JSON:", + expect.any(String) + ); + + consoleSpy.mockRestore(); + }); + + it("should handle missing items array", async () => { + process.env.GITHUB_AW_AGENT_OUTPUT = JSON.stringify({ + status: "success", + }); + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); + + await eval(`(async () => { ${securityReportScript} })()`); + + expect(consoleSpy).toHaveBeenCalledWith( + "No valid items found in agent output" + ); + + consoleSpy.mockRestore(); + }); + + it("should handle no security report items", async () => { + process.env.GITHUB_AW_AGENT_OUTPUT = JSON.stringify({ + items: [{ type: "create-issue", title: "Test Issue" }], + }); + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); + + await eval(`(async () => { ${securityReportScript} })()`); + + expect(consoleSpy).toHaveBeenCalledWith( + "No create-security-report items found in agent output" + ); + + consoleSpy.mockRestore(); + }); + + it("should create SARIF file for valid security findings", async () => { + const securityFindings = { + items: [ + { + type: "create-security-report", + file: "src/app.js", + line: 42, + severity: "error", + message: "SQL injection vulnerability detected", + }, + { + type: "create-security-report", + file: "src/utils.js", + line: 15, + severity: "warning", + message: "Potential XSS vulnerability", + }, + ], + }; + + process.env.GITHUB_AW_AGENT_OUTPUT = JSON.stringify(securityFindings); + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); + + await eval(`(async () => { ${securityReportScript} })()`); + + // Check that SARIF file was created + const sarifFile = path.join(process.cwd(), "security-report.sarif"); + expect(fs.existsSync(sarifFile)).toBe(true); + + // Check SARIF content + const sarifContent = JSON.parse(fs.readFileSync(sarifFile, "utf8")); + expect(sarifContent.version).toBe("2.1.0"); + expect(sarifContent.runs).toHaveLength(1); + expect(sarifContent.runs[0].results).toHaveLength(2); + + // Check first finding + const firstResult = sarifContent.runs[0].results[0]; + expect(firstResult.message.text).toBe( + "SQL injection vulnerability detected" + ); + expect(firstResult.level).toBe("error"); + expect( + firstResult.locations[0].physicalLocation.artifactLocation.uri + ).toBe("src/app.js"); + expect(firstResult.locations[0].physicalLocation.region.startLine).toBe( + 42 + ); + + // Check second finding + const secondResult = sarifContent.runs[0].results[1]; + expect(secondResult.message.text).toBe("Potential XSS vulnerability"); + expect(secondResult.level).toBe("warning"); + expect( + secondResult.locations[0].physicalLocation.artifactLocation.uri + ).toBe("src/utils.js"); + expect(secondResult.locations[0].physicalLocation.region.startLine).toBe( + 15 + ); + + // Check outputs were set + expect(mockCore.setOutput).toHaveBeenCalledWith("sarif_file", sarifFile); + expect(mockCore.setOutput).toHaveBeenCalledWith("findings_count", 2); + + // Check summary was written + expect(mockCore.summary.addRaw).toHaveBeenCalled(); + expect(mockCore.summary.write).toHaveBeenCalled(); + + consoleSpy.mockRestore(); + }); + + it("should respect max findings limit", async () => { + process.env.GITHUB_AW_SECURITY_REPORT_MAX = "1"; + + const securityFindings = { + items: [ + { + type: "create-security-report", + file: "src/app.js", + line: 42, + severity: "error", + message: "First finding", + }, + { + type: "create-security-report", + file: "src/utils.js", + line: 15, + severity: "warning", + message: "Second finding", + }, + ], + }; + + process.env.GITHUB_AW_AGENT_OUTPUT = JSON.stringify(securityFindings); + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); + + await eval(`(async () => { ${securityReportScript} })()`); + + // Check that SARIF file was created with only 1 finding + const sarifFile = path.join(process.cwd(), "security-report.sarif"); + expect(fs.existsSync(sarifFile)).toBe(true); + + const sarifContent = JSON.parse(fs.readFileSync(sarifFile, "utf8")); + expect(sarifContent.runs[0].results).toHaveLength(1); + expect(sarifContent.runs[0].results[0].message.text).toBe( + "First finding" + ); + + // Check output reflects the limit + expect(mockCore.setOutput).toHaveBeenCalledWith("findings_count", 1); + + consoleSpy.mockRestore(); + }); + + it("should validate and filter invalid security findings", async () => { + const mixedFindings = { + items: [ + { + type: "create-security-report", + file: "src/valid.js", + line: 10, + severity: "error", + message: "Valid finding", + }, + { + type: "create-security-report", + // Missing file + line: 20, + severity: "error", + message: "Invalid - no file", + }, + { + type: "create-security-report", + file: "src/invalid.js", + // Missing line + severity: "error", + message: "Invalid - no line", + }, + { + type: "create-security-report", + file: "src/invalid2.js", + line: "not-a-number", + severity: "error", + message: "Invalid - bad line", + }, + { + type: "create-security-report", + file: "src/invalid3.js", + line: 30, + severity: "invalid-severity", + message: "Invalid - bad severity", + }, + ], + }; + + process.env.GITHUB_AW_AGENT_OUTPUT = JSON.stringify(mixedFindings); + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); + + await eval(`(async () => { ${securityReportScript} })()`); + + // Check that SARIF file was created with only the 1 valid finding + const sarifFile = path.join(process.cwd(), "security-report.sarif"); + expect(fs.existsSync(sarifFile)).toBe(true); + + const sarifContent = JSON.parse(fs.readFileSync(sarifFile, "utf8")); + expect(sarifContent.runs[0].results).toHaveLength(1); + expect(sarifContent.runs[0].results[0].message.text).toBe( + "Valid finding" + ); + + // Check outputs + expect(mockCore.setOutput).toHaveBeenCalledWith("findings_count", 1); + + consoleSpy.mockRestore(); + }); + + it("should use custom driver name when configured", async () => { + process.env.GITHUB_AW_SECURITY_REPORT_DRIVER = "Custom Security Scanner"; + process.env.GITHUB_AW_WORKFLOW_FILENAME = "security-scan"; + + const securityFindings = { + items: [ + { + type: "create-security-report", + file: "src/app.js", + line: 42, + severity: "error", + message: "Security issue found", + }, + ], + }; + + process.env.GITHUB_AW_AGENT_OUTPUT = JSON.stringify(securityFindings); + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); + + await eval(`(async () => { ${securityReportScript} })()`); + + const sarifFile = path.join(process.cwd(), "security-report.sarif"); + const sarifContent = JSON.parse(fs.readFileSync(sarifFile, "utf8")); + + // Check driver name + expect(sarifContent.runs[0].tool.driver.name).toBe( + "Custom Security Scanner" + ); + + // Check rule ID includes workflow filename + expect(sarifContent.runs[0].results[0].ruleId).toBe( + "security-scan-security-finding-1" + ); + + consoleSpy.mockRestore(); + }); + + it("should use default driver name when not configured", async () => { + const securityFindings = { + items: [ + { + type: "create-security-report", + file: "src/app.js", + line: 42, + severity: "error", + message: "Security issue found", + }, + ], + }; + + process.env.GITHUB_AW_AGENT_OUTPUT = JSON.stringify(securityFindings); + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); + + await eval(`(async () => { ${securityReportScript} })()`); + + const sarifFile = path.join(process.cwd(), "security-report.sarif"); + const sarifContent = JSON.parse(fs.readFileSync(sarifFile, "utf8")); + + // Check default driver name + expect(sarifContent.runs[0].tool.driver.name).toBe( + "GitHub Agentic Workflows Security Scanner" + ); + + // Check rule ID includes default workflow filename + expect(sarifContent.runs[0].results[0].ruleId).toBe( + "workflow-security-finding-1" + ); + + consoleSpy.mockRestore(); + }); + + it("should support optional column specification", async () => { + const securityFindings = { + items: [ + { + type: "create-security-report", + file: "src/app.js", + line: 42, + column: 15, + severity: "error", + message: "Security issue with column info", + }, + { + type: "create-security-report", + file: "src/utils.js", + line: 25, + // No column specified - should default to 1 + severity: "warning", + message: "Security issue without column", + }, + ], + }; + + process.env.GITHUB_AW_AGENT_OUTPUT = JSON.stringify(securityFindings); + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); + + await eval(`(async () => { ${securityReportScript} })()`); + + const sarifFile = path.join(process.cwd(), "security-report.sarif"); + const sarifContent = JSON.parse(fs.readFileSync(sarifFile, "utf8")); + + // Check first result has custom column + expect( + sarifContent.runs[0].results[0].locations[0].physicalLocation.region + .startColumn + ).toBe(15); + + // Check second result has default column + expect( + sarifContent.runs[0].results[1].locations[0].physicalLocation.region + .startColumn + ).toBe(1); + + consoleSpy.mockRestore(); + }); + + it("should validate column numbers", async () => { + const invalidFindings = { + items: [ + { + type: "create-security-report", + file: "src/valid.js", + line: 10, + column: 5, + severity: "error", + message: "Valid with column", + }, + { + type: "create-security-report", + file: "src/invalid1.js", + line: 20, + column: "not-a-number", + severity: "error", + message: "Invalid column - not a number", + }, + { + type: "create-security-report", + file: "src/invalid2.js", + line: 30, + column: 0, + severity: "error", + message: "Invalid column - zero", + }, + { + type: "create-security-report", + file: "src/invalid3.js", + line: 40, + column: -1, + severity: "error", + message: "Invalid column - negative", + }, + ], + }; + + process.env.GITHUB_AW_AGENT_OUTPUT = JSON.stringify(invalidFindings); + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); + + await eval(`(async () => { ${securityReportScript} })()`); + + // Only the first valid finding should be processed + const sarifFile = path.join(process.cwd(), "security-report.sarif"); + const sarifContent = JSON.parse(fs.readFileSync(sarifFile, "utf8")); + expect(sarifContent.runs[0].results).toHaveLength(1); + expect(sarifContent.runs[0].results[0].message.text).toBe( + "Valid with column" + ); + expect( + sarifContent.runs[0].results[0].locations[0].physicalLocation.region + .startColumn + ).toBe(5); + + consoleSpy.mockRestore(); + }); + + it("should support optional ruleIdSuffix specification", async () => { + process.env.GITHUB_AW_WORKFLOW_FILENAME = "security-scan"; + + const securityFindings = { + items: [ + { + type: "create-security-report", + file: "src/app.js", + line: 42, + severity: "error", + message: "Custom rule ID finding", + ruleIdSuffix: "sql-injection", + }, + { + type: "create-security-report", + file: "src/utils.js", + line: 25, + severity: "warning", + message: "Another custom rule ID", + ruleIdSuffix: "xss-vulnerability", + }, + { + type: "create-security-report", + file: "src/config.js", + line: 10, + severity: "info", + message: "Standard numbered finding", + // No ruleIdSuffix - should use default numbering + }, + ], + }; + + process.env.GITHUB_AW_AGENT_OUTPUT = JSON.stringify(securityFindings); + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); + + await eval(`(async () => { ${securityReportScript} })()`); + + const sarifFile = path.join(process.cwd(), "security-report.sarif"); + const sarifContent = JSON.parse(fs.readFileSync(sarifFile, "utf8")); + + // Check first result has custom rule ID + expect(sarifContent.runs[0].results[0].ruleId).toBe( + "security-scan-sql-injection" + ); + + // Check second result has custom rule ID + expect(sarifContent.runs[0].results[1].ruleId).toBe( + "security-scan-xss-vulnerability" + ); + + // Check third result uses default numbering + expect(sarifContent.runs[0].results[2].ruleId).toBe( + "security-scan-security-finding-3" + ); + + consoleSpy.mockRestore(); + }); + + it("should validate ruleIdSuffix values", async () => { + const invalidFindings = { + items: [ + { + type: "create-security-report", + file: "src/valid.js", + line: 10, + severity: "error", + message: "Valid with valid ruleIdSuffix", + ruleIdSuffix: "valid-rule-id_123", + }, + { + type: "create-security-report", + file: "src/invalid1.js", + line: 20, + severity: "error", + message: "Invalid ruleIdSuffix - empty string", + ruleIdSuffix: "", + }, + { + type: "create-security-report", + file: "src/invalid2.js", + line: 30, + severity: "error", + message: "Invalid ruleIdSuffix - whitespace only", + ruleIdSuffix: " ", + }, + { + type: "create-security-report", + file: "src/invalid3.js", + line: 40, + severity: "error", + message: "Invalid ruleIdSuffix - special characters", + ruleIdSuffix: "rule@id!", + }, + { + type: "create-security-report", + file: "src/invalid4.js", + line: 50, + severity: "error", + message: "Invalid ruleIdSuffix - not a string", + ruleIdSuffix: 123, + }, + ], + }; + + process.env.GITHUB_AW_AGENT_OUTPUT = JSON.stringify(invalidFindings); + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); + + await eval(`(async () => { ${securityReportScript} })()`); + + // Only the first valid finding should be processed + const sarifFile = path.join(process.cwd(), "security-report.sarif"); + const sarifContent = JSON.parse(fs.readFileSync(sarifFile, "utf8")); + expect(sarifContent.runs[0].results).toHaveLength(1); + expect(sarifContent.runs[0].results[0].message.text).toBe( + "Valid with valid ruleIdSuffix" + ); + expect(sarifContent.runs[0].results[0].ruleId).toBe( + "workflow-valid-rule-id_123" + ); + + consoleSpy.mockRestore(); + }); + }); +}); diff --git a/pkg/workflow/security_reports_test.go b/pkg/workflow/security_reports_test.go new file mode 100644 index 0000000000..10ea48537a --- /dev/null +++ b/pkg/workflow/security_reports_test.go @@ -0,0 +1,319 @@ +package workflow + +import ( + "strings" + "testing" +) + +// TestSecurityReportsConfig tests the parsing of create-security-report configuration +func TestSecurityReportsConfig(t *testing.T) { + compiler := NewCompiler(false, "", "test-version") + + tests := []struct { + name string + frontmatter map[string]any + expectedConfig *CreateSecurityReportsConfig + }{ + { + name: "basic security report configuration", + frontmatter: map[string]any{ + "safe-outputs": map[string]any{ + "create-security-report": nil, + }, + }, + expectedConfig: &CreateSecurityReportsConfig{Max: 0}, // 0 means unlimited + }, + { + name: "security report with max configuration", + frontmatter: map[string]any{ + "safe-outputs": map[string]any{ + "create-security-report": map[string]any{ + "max": 50, + }, + }, + }, + expectedConfig: &CreateSecurityReportsConfig{Max: 50}, + }, + { + name: "security report with driver configuration", + frontmatter: map[string]any{ + "safe-outputs": map[string]any{ + "create-security-report": map[string]any{ + "driver": "Custom Security Scanner", + }, + }, + }, + expectedConfig: &CreateSecurityReportsConfig{Max: 0, Driver: "Custom Security Scanner"}, + }, + { + name: "security report with max and driver configuration", + frontmatter: map[string]any{ + "safe-outputs": map[string]any{ + "create-security-report": map[string]any{ + "max": 25, + "driver": "Advanced Scanner", + }, + }, + }, + expectedConfig: &CreateSecurityReportsConfig{Max: 25, Driver: "Advanced Scanner"}, + }, + { + name: "no security report configuration", + frontmatter: map[string]any{ + "safe-outputs": map[string]any{ + "create-issue": nil, + }, + }, + expectedConfig: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + config := compiler.extractSafeOutputsConfig(tt.frontmatter) + + if tt.expectedConfig == nil { + if config == nil || config.CreateSecurityReports == nil { + return // Expected no config + } + t.Errorf("Expected no CreateSecurityReports config, but got: %+v", config.CreateSecurityReports) + return + } + + if config == nil || config.CreateSecurityReports == nil { + t.Errorf("Expected CreateSecurityReports config, but got nil") + return + } + + if config.CreateSecurityReports.Max != tt.expectedConfig.Max { + t.Errorf("Expected Max=%d, got Max=%d", tt.expectedConfig.Max, config.CreateSecurityReports.Max) + } + + if config.CreateSecurityReports.Driver != tt.expectedConfig.Driver { + t.Errorf("Expected Driver=%s, got Driver=%s", tt.expectedConfig.Driver, config.CreateSecurityReports.Driver) + } + }) + } +} + +// TestBuildCreateOutputSecurityReportJob tests the creation of security report job +func TestBuildCreateOutputSecurityReportJob(t *testing.T) { + compiler := NewCompiler(false, "", "test-version") + + // Test valid configuration + data := &WorkflowData{ + SafeOutputs: &SafeOutputsConfig{ + CreateSecurityReports: &CreateSecurityReportsConfig{Max: 0}, + }, + } + + job, err := compiler.buildCreateOutputSecurityReportJob(data, "main_job", "test-workflow") + if err != nil { + t.Fatalf("Expected no error, got: %v", err) + } + + if job.Name != "create_security_report" { + t.Errorf("Expected job name 'create_security_report', got '%s'", job.Name) + } + + if job.TimeoutMinutes != 10 { + t.Errorf("Expected timeout 10 minutes, got %d", job.TimeoutMinutes) + } + + if len(job.Depends) != 1 || job.Depends[0] != "main_job" { + t.Errorf("Expected dependency on 'main_job', got %v", job.Depends) + } + + // Check that job has necessary permissions + if !strings.Contains(job.Permissions, "security-events: write") { + t.Errorf("Expected security-events: write permission in job, got: %s", job.Permissions) + } + + // Check that steps include SARIF upload + stepsStr := strings.Join(job.Steps, "") + if !strings.Contains(stepsStr, "Upload SARIF") { + t.Errorf("Expected SARIF upload steps in job") + } + + if !strings.Contains(stepsStr, "codeql-action/upload-sarif") { + t.Errorf("Expected CodeQL SARIF upload action in job") + } + + // Test with max configuration + dataWithMax := &WorkflowData{ + SafeOutputs: &SafeOutputsConfig{ + CreateSecurityReports: &CreateSecurityReportsConfig{Max: 25}, + }, + } + + jobWithMax, err := compiler.buildCreateOutputSecurityReportJob(dataWithMax, "main_job", "test-workflow") + if err != nil { + t.Fatalf("Expected no error, got: %v", err) + } + + stepsWithMaxStr := strings.Join(jobWithMax.Steps, "") + if !strings.Contains(stepsWithMaxStr, "GITHUB_AW_SECURITY_REPORT_MAX: 25") { + t.Errorf("Expected max configuration in environment variables") + } + + // Test with driver configuration + dataWithDriver := &WorkflowData{ + Name: "My Security Workflow", + FrontmatterName: "My Security Workflow", + SafeOutputs: &SafeOutputsConfig{ + CreateSecurityReports: &CreateSecurityReportsConfig{Driver: "Custom Scanner"}, + }, + } + + jobWithDriver, err := compiler.buildCreateOutputSecurityReportJob(dataWithDriver, "main_job", "my-workflow") + if err != nil { + t.Fatalf("Expected no error, got: %v", err) + } + + stepsWithDriverStr := strings.Join(jobWithDriver.Steps, "") + if !strings.Contains(stepsWithDriverStr, "GITHUB_AW_SECURITY_REPORT_DRIVER: Custom Scanner") { + t.Errorf("Expected driver configuration in environment variables") + } + + // Test with no driver configuration - should default to frontmatter name + dataNoDriver := &WorkflowData{ + Name: "Security Analysis Workflow", + FrontmatterName: "Security Analysis Workflow", + SafeOutputs: &SafeOutputsConfig{ + CreateSecurityReports: &CreateSecurityReportsConfig{Max: 0}, // No driver specified + }, + } + + jobNoDriver, err := compiler.buildCreateOutputSecurityReportJob(dataNoDriver, "main_job", "security-analysis") + if err != nil { + t.Fatalf("Expected no error, got: %v", err) + } + + stepsNoDriverStr := strings.Join(jobNoDriver.Steps, "") + if !strings.Contains(stepsNoDriverStr, "GITHUB_AW_SECURITY_REPORT_DRIVER: Security Analysis Workflow") { + t.Errorf("Expected frontmatter name as default driver in environment variables, got: %s", stepsNoDriverStr) + } + + // Test with no driver and no frontmatter name - should fallback to H1 name + dataFallback := &WorkflowData{ + Name: "Security Analysis", + FrontmatterName: "", // No frontmatter name + SafeOutputs: &SafeOutputsConfig{ + CreateSecurityReports: &CreateSecurityReportsConfig{Max: 0}, // No driver specified + }, + } + + jobFallback, err := compiler.buildCreateOutputSecurityReportJob(dataFallback, "main_job", "security-analysis") + if err != nil { + t.Fatalf("Expected no error, got: %v", err) + } + + stepsFallbackStr := strings.Join(jobFallback.Steps, "") + if !strings.Contains(stepsFallbackStr, "GITHUB_AW_SECURITY_REPORT_DRIVER: Security Analysis") { + t.Errorf("Expected H1 name as fallback driver in environment variables, got: %s", stepsFallbackStr) + } + + // Check that workflow filename is passed + if !strings.Contains(stepsWithDriverStr, "GITHUB_AW_WORKFLOW_FILENAME: my-workflow") { + t.Errorf("Expected workflow filename in environment variables") + } + + // Test error case - no configuration + dataNoConfig := &WorkflowData{SafeOutputs: nil} + _, err = compiler.buildCreateOutputSecurityReportJob(dataNoConfig, "main_job", "test-workflow") + if err == nil { + t.Errorf("Expected error when no SafeOutputs config provided") + } +} + +// TestParseSecurityReportsConfig tests the parsing function directly +func TestParseSecurityReportsConfig(t *testing.T) { + compiler := NewCompiler(false, "", "test-version") + + tests := []struct { + name string + outputMap map[string]any + expectedMax int + expectedDriver string + expectNil bool + }{ + { + name: "basic configuration", + outputMap: map[string]any{ + "create-security-report": nil, + }, + expectedMax: 0, + expectedDriver: "", + expectNil: false, + }, + { + name: "configuration with max", + outputMap: map[string]any{ + "create-security-report": map[string]any{ + "max": 100, + }, + }, + expectedMax: 100, + expectedDriver: "", + expectNil: false, + }, + { + name: "configuration with driver", + outputMap: map[string]any{ + "create-security-report": map[string]any{ + "driver": "Test Security Scanner", + }, + }, + expectedMax: 0, + expectedDriver: "Test Security Scanner", + expectNil: false, + }, + { + name: "configuration with max and driver", + outputMap: map[string]any{ + "create-security-report": map[string]any{ + "max": 50, + "driver": "Combined Scanner", + }, + }, + expectedMax: 50, + expectedDriver: "Combined Scanner", + expectNil: false, + }, + { + name: "no configuration", + outputMap: map[string]any{ + "other-config": nil, + }, + expectedMax: 0, + expectedDriver: "", + expectNil: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + config := compiler.parseSecurityReportsConfig(tt.outputMap) + + if tt.expectNil { + if config != nil { + t.Errorf("Expected nil config, got: %+v", config) + } + return + } + + if config == nil { + t.Errorf("Expected config, got nil") + return + } + + if config.Max != tt.expectedMax { + t.Errorf("Expected Max=%d, got Max=%d", tt.expectedMax, config.Max) + } + + if config.Driver != tt.expectedDriver { + t.Errorf("Expected Driver=%s, got Driver=%s", tt.expectedDriver, config.Driver) + } + }) + } +}