diff --git a/.gitattributes b/.gitattributes
index 63a07cc3e0..33e102d50b 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -5,5 +5,6 @@
pkg/cli/workflows/*.lock.yml linguist-generated=true merge=ours
pkg/workflow/js/*.js linguist-generated=true
actions/*/index.js linguist-generated=true
+actions/setup/js/*.cjs linguist-generated=true
.github/workflows/*.campaign.g.md linguist-generated=true merge=ours
diff --git a/.github/workflows/issue-classifier.lock.yml b/.github/workflows/issue-classifier.lock.yml
index f894328e7a..72300cabfc 100644
--- a/.github/workflows/issue-classifier.lock.yml
+++ b/.github/workflows/issue-classifier.lock.yml
@@ -2241,7 +2241,7 @@ jobs:
path: /tmp/gh-aw/aw_info.json
if-no-files-found: warn
- name: Run AI Inference
- uses: actions/ai-inference@334892bb203895caaed82ec52d23c1ed9385151e # v1
+ uses: actions/ai-inference@334892bb203895caaed82ec52d23c1ed9385151e # v2.0.4
env:
GH_AW_MCP_CONFIG: /tmp/gh-aw/mcp-config/mcp-servers.json
GH_AW_PROMPT: /tmp/gh-aw/aw-prompts/prompt.txt
diff --git a/.github/workflows/release.lock.yml b/.github/workflows/release.lock.yml
index f9d876b923..7505a380e0 100644
--- a/.github/workflows/release.lock.yml
+++ b/.github/workflows/release.lock.yml
@@ -6056,13 +6056,13 @@ jobs:
- name: Download Go modules
run: go mod download
- name: Generate SBOM (SPDX format)
- uses: anchore/sbom-action@43a17d6e7add2b5535efe4dcae9952337c479a93 # v0.20.10
+ uses: anchore/sbom-action@43a17d6e7add2b5535efe4dcae9952337c479a93 # v0.20.11
with:
artifact-name: sbom.spdx.json
format: spdx-json
output-file: sbom.spdx.json
- name: Generate SBOM (CycloneDX format)
- uses: anchore/sbom-action@43a17d6e7add2b5535efe4dcae9952337c479a93 # v0.20.10
+ uses: anchore/sbom-action@43a17d6e7add2b5535efe4dcae9952337c479a93 # v0.20.11
with:
artifact-name: sbom.cdx.json
format: cyclonedx-json
@@ -6261,7 +6261,7 @@ jobs:
fetch-depth: 0
persist-credentials: false
- name: Release with gh-extension-precompile
- uses: cli/gh-extension-precompile@9e2237c30f869ad3bcaed6a4be2cd43564dd421b # v2
+ uses: cli/gh-extension-precompile@9e2237c30f869ad3bcaed6a4be2cd43564dd421b # v2.1.0
with:
build_script_override: scripts/build-release.sh
go_version_file: go.mod
diff --git a/.github/workflows/stale-repo-identifier.lock.yml b/.github/workflows/stale-repo-identifier.lock.yml
index 4962f3b743..a5c11ba3c5 100644
--- a/.github/workflows/stale-repo-identifier.lock.yml
+++ b/.github/workflows/stale-repo-identifier.lock.yml
@@ -176,7 +176,7 @@ jobs:
ORGANIZATION: ${{ env.ORGANIZATION }}
id: stale-repos
name: Run stale_repos tool
- uses: github/stale-repos@a21e55567b83cf3c3f3f9085d3038dc6cee02598 # v3
+ uses: github/stale-repos@a21e55567b83cf3c3f3f9085d3038dc6cee02598 # v3.0.2
- env:
INACTIVE_REPOS: ${{ steps.stale-repos.outputs.inactiveRepos }}
name: Save stale repos output
diff --git a/.github/workflows/super-linter.lock.yml b/.github/workflows/super-linter.lock.yml
index 4c5f0068e1..2f4fa9739c 100644
--- a/.github/workflows/super-linter.lock.yml
+++ b/.github/workflows/super-linter.lock.yml
@@ -7100,7 +7100,7 @@ jobs:
persist-credentials: false
- name: Super-linter
id: super-linter
- uses: super-linter/super-linter@47984f49b4e87383eed97890fe2dca6063bbd9c3 # v8.2.1
+ uses: super-linter/super-linter@47984f49b4e87383eed97890fe2dca6063bbd9c3 # v8.3.1
env:
CREATE_LOG_FILE: "true"
DEFAULT_BRANCH: main
diff --git a/.gitignore b/.gitignore
index ddd11f04f4..ca60997b55 100644
--- a/.gitignore
+++ b/.gitignore
@@ -126,9 +126,13 @@ gosec-results.sarif
govulncheck-results.sarif
trivy-results.sarif
-# Generated action files
+# Generated action files - kept in .gitignore as build artifacts
+# Note: If workflows fail due to missing js/ files, these may need to be committed
+# The js/ directories contain compiled JavaScript from pkg/workflow/js/*.cjs
+# and are generated by 'make actions-build'
actions/setup-safe-outputs/js/
-actions/setup/js/
+# Temporarily removed from .gitignore to fix workflow failures:
+# actions/setup/js/
# License compliance reports
licenses.csv
diff --git a/actions/setup/README.md b/actions/setup/README.md
index 5b6db7b08d..9995726746 100644
--- a/actions/setup/README.md
+++ b/actions/setup/README.md
@@ -6,6 +6,8 @@ This action copies workflow script files to the agent environment.
This action runs in all workflow jobs to provide JavaScript scripts that can be required instead of being inlined in the workflow. This includes scripts for activation jobs, agent jobs, and safe-output jobs.
+The action copies 117 `.cjs` JavaScript files from the `js/` directory to a destination directory (default: `/tmp/gh-aw/actions`). These files are generated by running `make actions-build` and are committed to the repository.
+
## Usage
```yaml
@@ -13,8 +15,8 @@ This action runs in all workflow jobs to provide JavaScript scripts that can be
uses: ./actions/setup
with:
# Destination directory for script files
- # Default: /tmp/gh-aw/actions/activation
- destination: /tmp/gh-aw/actions/activation
+ # Default: /tmp/gh-aw/actions
+ destination: /tmp/gh-aw/actions
```
## Inputs
@@ -23,35 +25,61 @@ This action runs in all workflow jobs to provide JavaScript scripts that can be
**Optional** Destination directory for script files.
-Default: `/tmp/gh-aw/actions/activation`
+Default: `/tmp/gh-aw/actions`
## Outputs
### `files-copied`
-The number of files copied to the destination directory.
+The number of files copied to the destination directory (should be 117).
## Example
```yaml
steps:
- uses: actions/checkout@v4
+ with:
+ sparse-checkout: |
+ actions
- name: Setup Scripts
uses: ./actions/setup
with:
- destination: /tmp/gh-aw/actions/activation
+ destination: /tmp/gh-aw/actions
```
## Files Included
-This action copies all .cjs files from the script registry, including:
+This action copies 117 `.cjs` files from `actions/setup/js/`, including:
+
+- Activation job scripts (check_stop_time, check_skip_if_match, check_command_position, etc.)
+- Agent job scripts (compute_text, create_issue, create_pull_request, etc.)
+- Safe output scripts (safe_outputs_*, safe_inputs_*, messages, etc.)
+- Utility scripts (sanitize_*, validate_*, generate_*, etc.)
+
+All files are copied from the committed `js/` directory which is populated by running `make actions-build` during development.
+
+## Development
+
+The `js/` directory contains generated JavaScript files created by `make actions-build`. These files are committed to the repository so that workflows using sparse checkout can access them without needing to rebuild.
+
+To update the JavaScript files after modifying source files in `pkg/workflow/js/`:
+
+```bash
+make actions-build
+git add actions/setup/js/
+git commit -m "Update action JavaScript files"
+```
+
+## Testing Locally
+
+You can test this action locally using the provided test script:
+
+```bash
+./test-setup-local.sh
+```
-- `check_stop_time.cjs` - Check stop-time limit script
-- `check_skip_if_match.cjs` - Check skip-if-match query script
-- `check_command_position.cjs` - Check command position script
-- `check_workflow_timestamp_api.cjs` - Check workflow file timestamps script
-- `lock-issue.cjs` - Lock issue for agent workflow script
-- `compute_text.cjs` - Compute current body text script (bundled with dependencies)
-- `add_reaction_and_edit_comment.cjs` - Add reaction and edit comment script (bundled with dependencies)
-- And all other registered .cjs files (82 total)
+This script will:
+1. Check if `js/` directory exists (runs `make actions-build` if needed)
+2. Run setup.sh with test configuration
+3. Verify all files are copied correctly
diff --git a/actions/setup/js/add_comment.cjs b/actions/setup/js/add_comment.cjs
new file mode 100644
index 0000000000..bfb7e51177
--- /dev/null
+++ b/actions/setup/js/add_comment.cjs
@@ -0,0 +1,568 @@
+// @ts-check
+///
+
+const { loadAgentOutput } = require("./load_agent_output.cjs");
+const { generateFooterWithMessages } = require("./messages_footer.cjs");
+const { getRepositoryUrl } = require("./get_repository_url.cjs");
+const { replaceTemporaryIdReferences, loadTemporaryIdMap } = require("./temporary_id.cjs");
+const { getTrackerID } = require("./get_tracker_id.cjs");
+
+/**
+ * Hide/minimize a comment using the GraphQL API
+ * @param {any} github - GitHub GraphQL instance
+ * @param {string} nodeId - Comment node ID
+ * @param {string} reason - Reason for hiding (default: outdated)
+ * @returns {Promise<{id: string, isMinimized: boolean}>}
+ */
+async function minimizeComment(github, nodeId, reason = "outdated") {
+ const query = /* GraphQL */ `
+ mutation ($nodeId: ID!, $classifier: ReportedContentClassifiers!) {
+ minimizeComment(input: { subjectId: $nodeId, classifier: $classifier }) {
+ minimizedComment {
+ isMinimized
+ }
+ }
+ }
+ `;
+
+ const result = await github.graphql(query, { nodeId, classifier: reason });
+
+ return {
+ id: nodeId,
+ isMinimized: result.minimizeComment.minimizedComment.isMinimized,
+ };
+}
+
+/**
+ * Find comments on an issue/PR with a specific tracker-id
+ * @param {any} github - GitHub REST API instance
+ * @param {string} owner - Repository owner
+ * @param {string} repo - Repository name
+ * @param {number} issueNumber - Issue/PR number
+ * @param {string} workflowId - Workflow ID to search for
+ * @returns {Promise>}
+ */
+async function findCommentsWithTrackerId(github, owner, repo, issueNumber, workflowId) {
+ const comments = [];
+ let page = 1;
+ const perPage = 100;
+
+ // Paginate through all comments
+ while (true) {
+ const { data } = await github.rest.issues.listComments({
+ owner,
+ repo,
+ issue_number: issueNumber,
+ per_page: perPage,
+ page,
+ });
+
+ if (data.length === 0) {
+ break;
+ }
+
+ // Filter comments that contain the workflow-id and are NOT reaction comments
+ const filteredComments = data.filter(comment => comment.body?.includes(``) && !comment.body.includes(``)).map(({ id, node_id, body }) => ({ id, node_id, body }));
+
+ comments.push(...filteredComments);
+
+ if (data.length < perPage) {
+ break;
+ }
+
+ page++;
+ }
+
+ return comments;
+}
+
+/**
+ * Find comments on a discussion with a specific workflow ID
+ * @param {any} github - GitHub GraphQL instance
+ * @param {string} owner - Repository owner
+ * @param {string} repo - Repository name
+ * @param {number} discussionNumber - Discussion number
+ * @param {string} workflowId - Workflow ID to search for
+ * @returns {Promise>}
+ */
+async function findDiscussionCommentsWithTrackerId(github, owner, repo, discussionNumber, workflowId) {
+ const query = /* GraphQL */ `
+ query ($owner: String!, $repo: String!, $num: Int!, $cursor: String) {
+ repository(owner: $owner, name: $repo) {
+ discussion(number: $num) {
+ comments(first: 100, after: $cursor) {
+ nodes {
+ id
+ body
+ }
+ pageInfo {
+ hasNextPage
+ endCursor
+ }
+ }
+ }
+ }
+ }
+ `;
+
+ const comments = [];
+ let cursor = null;
+
+ while (true) {
+ const result = await github.graphql(query, { owner, repo, num: discussionNumber, cursor });
+
+ if (!result.repository?.discussion?.comments?.nodes) {
+ break;
+ }
+
+ const filteredComments = result.repository.discussion.comments.nodes
+ .filter(comment => comment.body?.includes(``) && !comment.body.includes(``))
+ .map(({ id, body }) => ({ id, body }));
+
+ comments.push(...filteredComments);
+
+ if (!result.repository.discussion.comments.pageInfo.hasNextPage) {
+ break;
+ }
+
+ cursor = result.repository.discussion.comments.pageInfo.endCursor;
+ }
+
+ return comments;
+}
+
+/**
+ * Hide all previous comments from the same workflow
+ * @param {any} github - GitHub API instance
+ * @param {string} owner - Repository owner
+ * @param {string} repo - Repository name
+ * @param {number} itemNumber - Issue/PR/Discussion number
+ * @param {string} workflowId - Workflow ID to match
+ * @param {boolean} isDiscussion - Whether this is a discussion
+ * @param {string} reason - Reason for hiding (default: outdated)
+ * @param {string[] | null} allowedReasons - List of allowed reasons (default: null for all)
+ * @returns {Promise} Number of comments hidden
+ */
+async function hideOlderComments(github, owner, repo, itemNumber, workflowId, isDiscussion, reason = "outdated", allowedReasons = null) {
+ if (!workflowId) {
+ core.info("No workflow ID available, skipping hide-older-comments");
+ return 0;
+ }
+
+ // Normalize reason to uppercase for GitHub API
+ const normalizedReason = reason.toUpperCase();
+
+ // Validate reason against allowed reasons if specified (case-insensitive)
+ if (allowedReasons && allowedReasons.length > 0) {
+ const normalizedAllowedReasons = allowedReasons.map(r => r.toUpperCase());
+ if (!normalizedAllowedReasons.includes(normalizedReason)) {
+ core.warning(`Reason "${reason}" is not in allowed-reasons list [${allowedReasons.join(", ")}]. Skipping hide-older-comments.`);
+ return 0;
+ }
+ }
+
+ core.info(`Searching for previous comments with workflow ID: ${workflowId}`);
+
+ let comments;
+ if (isDiscussion) {
+ comments = await findDiscussionCommentsWithTrackerId(github, owner, repo, itemNumber, workflowId);
+ } else {
+ comments = await findCommentsWithTrackerId(github, owner, repo, itemNumber, workflowId);
+ }
+
+ if (comments.length === 0) {
+ core.info("No previous comments found with matching workflow ID");
+ return 0;
+ }
+
+ core.info(`Found ${comments.length} previous comment(s) to hide with reason: ${normalizedReason}`);
+
+ let hiddenCount = 0;
+ for (const comment of comments) {
+ // TypeScript can't narrow the union type here, but we know it's safe due to isDiscussion check
+ // @ts-expect-error - comment has node_id when not a discussion
+ const nodeId = isDiscussion ? String(comment.id) : comment.node_id;
+ core.info(`Hiding comment: ${nodeId}`);
+
+ const result = await minimizeComment(github, nodeId, normalizedReason);
+ hiddenCount++;
+ core.info(`ā Hidden comment: ${nodeId}`);
+ }
+
+ core.info(`Successfully hidden ${hiddenCount} comment(s)`);
+ return hiddenCount;
+}
+
+/**
+ * Comment on a GitHub Discussion using GraphQL
+ * @param {any} github - GitHub REST API instance
+ * @param {string} owner - Repository owner
+ * @param {string} repo - Repository name
+ * @param {number} discussionNumber - Discussion number
+ * @param {string} message - Comment body
+ * @param {string|undefined} replyToId - Optional comment node ID to reply to (for threaded comments)
+ * @returns {Promise<{id: string, html_url: string, discussion_url: string}>} Comment details
+ */
+async function commentOnDiscussion(github, owner, repo, discussionNumber, message, replyToId) {
+ // 1. Retrieve discussion node ID
+ const { repository } = await github.graphql(
+ `
+ query($owner: String!, $repo: String!, $num: Int!) {
+ repository(owner: $owner, name: $repo) {
+ discussion(number: $num) {
+ id
+ url
+ }
+ }
+ }`,
+ { owner, repo, num: discussionNumber }
+ );
+
+ if (!repository || !repository.discussion) {
+ throw new Error(`Discussion #${discussionNumber} not found in ${owner}/${repo}`);
+ }
+
+ const discussionId = repository.discussion.id;
+ const discussionUrl = repository.discussion.url;
+
+ // 2. Add comment (with optional replyToId for threading)
+ const mutation = replyToId
+ ? `mutation($dId: ID!, $body: String!, $replyToId: ID!) {
+ addDiscussionComment(input: { discussionId: $dId, body: $body, replyToId: $replyToId }) {
+ comment {
+ id
+ body
+ createdAt
+ url
+ }
+ }
+ }`
+ : `mutation($dId: ID!, $body: String!) {
+ addDiscussionComment(input: { discussionId: $dId, body: $body }) {
+ comment {
+ id
+ body
+ createdAt
+ url
+ }
+ }
+ }`;
+
+ const variables = replyToId ? { dId: discussionId, body: message, replyToId } : { dId: discussionId, body: message };
+
+ const result = await github.graphql(mutation, variables);
+
+ const comment = result.addDiscussionComment.comment;
+
+ return {
+ id: comment.id,
+ html_url: comment.url,
+ discussion_url: discussionUrl,
+ };
+}
+
+async function main() {
+ // Check if we're in staged mode
+ const isStaged = process.env.GH_AW_SAFE_OUTPUTS_STAGED === "true";
+ const isDiscussionExplicit = process.env.GITHUB_AW_COMMENT_DISCUSSION === "true";
+ const hideOlderCommentsEnabled = process.env.GH_AW_HIDE_OLDER_COMMENTS === "true";
+
+ // Load the temporary ID map from create_issue job
+ const temporaryIdMap = loadTemporaryIdMap();
+ if (temporaryIdMap.size > 0) {
+ core.info(`Loaded temporary ID map with ${temporaryIdMap.size} entries`);
+ }
+
+ const result = loadAgentOutput();
+ if (!result.success) {
+ return;
+ }
+
+ // Find all add-comment items
+ const commentItems = result.items.filter(/** @param {any} item */ item => item.type === "add_comment");
+ if (commentItems.length === 0) {
+ core.info("No add-comment items found in agent output");
+ return;
+ }
+
+ core.info(`Found ${commentItems.length} add-comment item(s)`);
+
+ // Helper function to get the target number (issue, discussion, or pull request)
+ function getTargetNumber(item) {
+ return item.item_number;
+ }
+
+ // Get the target configuration from environment variable
+ const commentTarget = process.env.GH_AW_COMMENT_TARGET || "triggering";
+ core.info(`Comment target configuration: ${commentTarget}`);
+
+ // Check if we're in an issue, pull request, or discussion context
+ const isIssueContext = context.eventName === "issues" || context.eventName === "issue_comment";
+ const isPRContext = context.eventName === "pull_request" || context.eventName === "pull_request_review" || context.eventName === "pull_request_review_comment";
+ const isDiscussionContext = context.eventName === "discussion" || context.eventName === "discussion_comment";
+ const isDiscussion = isDiscussionContext || isDiscussionExplicit;
+
+ // Get workflow ID for hiding older comments
+ // Use GITHUB_WORKFLOW environment variable which is automatically set by GitHub Actions
+ const workflowId = process.env.GITHUB_WORKFLOW || "";
+
+ // Parse allowed reasons from environment variable
+ const allowedReasons = process.env.GH_AW_ALLOWED_REASONS
+ ? (() => {
+ try {
+ const parsed = JSON.parse(process.env.GH_AW_ALLOWED_REASONS);
+ core.info(`Allowed reasons for hiding: [${parsed.join(", ")}]`);
+ return parsed;
+ } catch (error) {
+ core.warning(`Failed to parse GH_AW_ALLOWED_REASONS: ${error instanceof Error ? error.message : String(error)}`);
+ return null;
+ }
+ })()
+ : null;
+
+ if (hideOlderCommentsEnabled) {
+ core.info(`Hide-older-comments is enabled with workflow ID: ${workflowId || "(none)"}`);
+ }
+
+ // If in staged mode, emit step summary instead of creating comments
+ if (isStaged) {
+ let summaryContent = "## š Staged Mode: Add Comments Preview\n\n";
+ summaryContent += "The following comments would be added if staged mode was disabled:\n\n";
+
+ // Show created items references if available
+ const createdIssueUrl = process.env.GH_AW_CREATED_ISSUE_URL;
+ const createdIssueNumber = process.env.GH_AW_CREATED_ISSUE_NUMBER;
+ const createdDiscussionUrl = process.env.GH_AW_CREATED_DISCUSSION_URL;
+ const createdDiscussionNumber = process.env.GH_AW_CREATED_DISCUSSION_NUMBER;
+ const createdPullRequestUrl = process.env.GH_AW_CREATED_PULL_REQUEST_URL;
+ const createdPullRequestNumber = process.env.GH_AW_CREATED_PULL_REQUEST_NUMBER;
+
+ if (createdIssueUrl || createdDiscussionUrl || createdPullRequestUrl) {
+ summaryContent += "#### Related Items\n\n";
+ if (createdIssueUrl && createdIssueNumber) {
+ summaryContent += `- Issue: [#${createdIssueNumber}](${createdIssueUrl})\n`;
+ }
+ if (createdDiscussionUrl && createdDiscussionNumber) {
+ summaryContent += `- Discussion: [#${createdDiscussionNumber}](${createdDiscussionUrl})\n`;
+ }
+ if (createdPullRequestUrl && createdPullRequestNumber) {
+ summaryContent += `- Pull Request: [#${createdPullRequestNumber}](${createdPullRequestUrl})\n`;
+ }
+ summaryContent += "\n";
+ }
+
+ for (let i = 0; i < commentItems.length; i++) {
+ const item = commentItems[i];
+ summaryContent += `### Comment ${i + 1}\n`;
+ const targetNumber = getTargetNumber(item);
+ if (targetNumber) {
+ const repoUrl = getRepositoryUrl();
+ if (isDiscussion) {
+ const discussionUrl = `${repoUrl}/discussions/${targetNumber}`;
+ summaryContent += `**Target Discussion:** [#${targetNumber}](${discussionUrl})\n\n`;
+ } else {
+ const issueUrl = `${repoUrl}/issues/${targetNumber}`;
+ summaryContent += `**Target Issue:** [#${targetNumber}](${issueUrl})\n\n`;
+ }
+ } else {
+ if (isDiscussion) {
+ summaryContent += `**Target:** Current discussion\n\n`;
+ } else {
+ summaryContent += `**Target:** Current issue/PR\n\n`;
+ }
+ }
+ summaryContent += `**Body:**\n${item.body || "No content provided"}\n\n`;
+ summaryContent += "---\n\n";
+ }
+
+ // Write to step summary
+ await core.summary.addRaw(summaryContent).write();
+ core.info("š Comment creation preview written to step summary");
+ return;
+ }
+
+ // Validate context based on target configuration
+ if (commentTarget === "triggering" && !isIssueContext && !isPRContext && !isDiscussionContext) {
+ core.info('Target is "triggering" but not running in issue, pull request, or discussion context, skipping comment creation');
+ return;
+ }
+
+ // Extract triggering context for footer generation
+ const triggeringIssueNumber = context.payload?.issue?.number && !context.payload?.issue?.pull_request ? context.payload.issue.number : undefined;
+ const triggeringPRNumber = context.payload?.pull_request?.number || (context.payload?.issue?.pull_request ? context.payload.issue.number : undefined);
+ const triggeringDiscussionNumber = context.payload?.discussion?.number;
+
+ const createdComments = [];
+
+ // Process each comment item
+ for (let i = 0; i < commentItems.length; i++) {
+ const commentItem = commentItems[i];
+ core.info(`Processing add-comment item ${i + 1}/${commentItems.length}: bodyLength=${commentItem.body.length}`);
+
+ // Determine the issue/PR number and comment endpoint for this comment
+ let itemNumber;
+ let commentEndpoint;
+
+ if (commentTarget === "*") {
+ // For target "*", we need an explicit number from the comment item
+ const targetNumber = getTargetNumber(commentItem);
+ if (targetNumber) {
+ itemNumber = parseInt(targetNumber, 10);
+ if (isNaN(itemNumber) || itemNumber <= 0) {
+ core.info(`Invalid target number specified: ${targetNumber}`);
+ continue;
+ }
+ commentEndpoint = isDiscussion ? "discussions" : "issues";
+ } else {
+ core.info(`Target is "*" but no number specified in comment item`);
+ continue;
+ }
+ } else if (commentTarget && commentTarget !== "triggering") {
+ // Explicit number specified in target configuration
+ itemNumber = parseInt(commentTarget, 10);
+ if (isNaN(itemNumber) || itemNumber <= 0) {
+ core.info(`Invalid target number in target configuration: ${commentTarget}`);
+ continue;
+ }
+ commentEndpoint = isDiscussion ? "discussions" : "issues";
+ } else {
+ // Default behavior: use triggering issue/PR/discussion
+ if (isIssueContext) {
+ itemNumber = context.payload.issue?.number || context.payload.pull_request?.number || context.payload.discussion?.number;
+ if (context.payload.issue) {
+ commentEndpoint = "issues";
+ } else {
+ core.info("Issue context detected but no issue found in payload");
+ continue;
+ }
+ } else if (isPRContext) {
+ itemNumber = context.payload.pull_request?.number || context.payload.issue?.number || context.payload.discussion?.number;
+ if (context.payload.pull_request) {
+ commentEndpoint = "issues"; // PR comments use the issues API endpoint
+ } else {
+ core.info("Pull request context detected but no pull request found in payload");
+ continue;
+ }
+ } else if (isDiscussionContext) {
+ itemNumber = context.payload.discussion?.number || context.payload.issue?.number || context.payload.pull_request?.number;
+ if (context.payload.discussion) {
+ commentEndpoint = "discussions"; // Discussion comments use GraphQL via commentOnDiscussion
+ } else {
+ core.info("Discussion context detected but no discussion found in payload");
+ continue;
+ }
+ }
+ }
+
+ if (!itemNumber) {
+ core.info("Could not determine issue, pull request, or discussion number");
+ continue;
+ }
+
+ // Extract body from the JSON item and replace temporary ID references
+ let body = replaceTemporaryIdReferences(commentItem.body.trim(), temporaryIdMap);
+
+ // Append references to created issues, discussions, and pull requests if they exist
+ const createdIssueUrl = process.env.GH_AW_CREATED_ISSUE_URL;
+ const createdIssueNumber = process.env.GH_AW_CREATED_ISSUE_NUMBER;
+ const createdDiscussionUrl = process.env.GH_AW_CREATED_DISCUSSION_URL;
+ const createdDiscussionNumber = process.env.GH_AW_CREATED_DISCUSSION_NUMBER;
+ const createdPullRequestUrl = process.env.GH_AW_CREATED_PULL_REQUEST_URL;
+ const createdPullRequestNumber = process.env.GH_AW_CREATED_PULL_REQUEST_NUMBER;
+
+ // Add references section if any URLs are available
+ const references = [
+ createdIssueUrl && createdIssueNumber && `- Issue: [#${createdIssueNumber}](${createdIssueUrl})`,
+ createdDiscussionUrl && createdDiscussionNumber && `- Discussion: [#${createdDiscussionNumber}](${createdDiscussionUrl})`,
+ createdPullRequestUrl && createdPullRequestNumber && `- Pull Request: [#${createdPullRequestNumber}](${createdPullRequestUrl})`,
+ ].filter(Boolean);
+
+ if (references.length > 0) {
+ body += `\n\n#### Related Items\n\n${references.join("\n")}\n`;
+ }
+
+ // Add AI disclaimer with workflow name and run url
+ const workflowName = process.env.GH_AW_WORKFLOW_NAME || "Workflow";
+ const workflowSource = process.env.GH_AW_WORKFLOW_SOURCE || "";
+ const workflowSourceURL = process.env.GH_AW_WORKFLOW_SOURCE_URL || "";
+ const runId = context.runId;
+ const githubServer = process.env.GITHUB_SERVER_URL || "https://github.com";
+ const runUrl = context.payload.repository ? `${context.payload.repository.html_url}/actions/runs/${runId}` : `${githubServer}/${context.repo.owner}/${context.repo.repo}/actions/runs/${runId}`;
+
+ // Add workflow ID comment marker if present
+ if (workflowId) {
+ body += `\n\n`;
+ }
+
+ // Add tracker-id comment if present
+ const trackerIDComment = getTrackerID("markdown");
+ if (trackerIDComment) {
+ body += trackerIDComment;
+ }
+
+ // Add comment type marker to identify this as an add-comment
+ body += `\n\n`;
+
+ body += generateFooterWithMessages(workflowName, runUrl, workflowSource, workflowSourceURL, triggeringIssueNumber, triggeringPRNumber, triggeringDiscussionNumber);
+
+ // Hide older comments from the same workflow if enabled
+ if (hideOlderCommentsEnabled && workflowId) {
+ core.info("Hide-older-comments is enabled, searching for previous comments to hide");
+ await hideOlderComments(github, context.repo.owner, context.repo.repo, itemNumber, workflowId, commentEndpoint === "discussions", "outdated", allowedReasons);
+ }
+
+ let comment;
+
+ // Use GraphQL API for discussions, REST API for issues/PRs
+ if (commentEndpoint === "discussions") {
+ core.info(`Creating comment on discussion #${itemNumber}`);
+ core.info(`Comment content length: ${body.length}`);
+
+ // For discussion_comment events, extract the comment node_id to create a threaded reply
+ const replyToId = context.eventName === "discussion_comment" && context.payload?.comment?.node_id ? context.payload.comment.node_id : undefined;
+
+ if (replyToId) {
+ core.info(`Creating threaded reply to comment ${replyToId}`);
+ }
+
+ // Create discussion comment using GraphQL
+ comment = await commentOnDiscussion(github, context.repo.owner, context.repo.repo, itemNumber, body, replyToId);
+ core.info("Created discussion comment #" + comment.id + ": " + comment.html_url);
+
+ // Add discussion_url to the comment object for consistency
+ comment.discussion_url = comment.discussion_url;
+ } else {
+ core.info(`Creating comment on ${commentEndpoint} #${itemNumber}`);
+ core.info(`Comment content length: ${body.length}`);
+
+ // Create regular issue/PR comment using REST API
+ const { data: restComment } = await github.rest.issues.createComment({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ issue_number: itemNumber,
+ body: body,
+ });
+
+ comment = restComment;
+ core.info("Created comment #" + comment.id + ": " + comment.html_url);
+ }
+
+ createdComments.push(comment);
+
+ // Set output for the last created comment (for backward compatibility)
+ if (i === commentItems.length - 1) {
+ core.setOutput("comment_id", comment.id);
+ core.setOutput("comment_url", comment.html_url);
+ }
+ }
+
+ // Write summary for all created comments
+ if (createdComments.length > 0) {
+ const summaryContent = "\n\n## GitHub Comments\n" + createdComments.map(c => `- Comment #${c.id}: [View Comment](${c.html_url})`).join("\n");
+ await core.summary.addRaw(summaryContent).write();
+ }
+
+ core.info(`Successfully created ${createdComments.length} comment(s)`);
+ return createdComments;
+}
+await main();
diff --git a/actions/setup/js/add_copilot_reviewer.cjs b/actions/setup/js/add_copilot_reviewer.cjs
new file mode 100644
index 0000000000..40281e4bd8
--- /dev/null
+++ b/actions/setup/js/add_copilot_reviewer.cjs
@@ -0,0 +1,63 @@
+// @ts-check
+///
+
+/**
+ * Add Copilot as a reviewer to a pull request.
+ *
+ * This script is used to add the GitHub Copilot pull request reviewer bot
+ * to a pull request. It uses the `github` object from actions/github-script
+ * instead of the `gh api` CLI command.
+ *
+ * Environment variables:
+ * - PR_NUMBER: The pull request number to add the reviewer to
+ */
+
+// GitHub Copilot reviewer bot username
+const COPILOT_REVIEWER_BOT = "copilot-pull-request-reviewer[bot]";
+
+async function main() {
+ // Validate required environment variables
+ const prNumberStr = process.env.PR_NUMBER;
+
+ if (!prNumberStr || prNumberStr.trim() === "") {
+ core.setFailed("PR_NUMBER environment variable is required but not set");
+ return;
+ }
+
+ const prNumber = parseInt(prNumberStr.trim(), 10);
+ if (isNaN(prNumber) || prNumber <= 0) {
+ core.setFailed(`Invalid PR_NUMBER: ${prNumberStr}. Must be a positive integer.`);
+ return;
+ }
+
+ core.info(`Adding Copilot as reviewer to PR #${prNumber}`);
+
+ try {
+ await github.rest.pulls.requestReviewers({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ pull_number: prNumber,
+ reviewers: [COPILOT_REVIEWER_BOT],
+ });
+
+ core.info(`Successfully added Copilot as reviewer to PR #${prNumber}`);
+
+ await core.summary
+ .addRaw(
+ `
+## Copilot Reviewer Added
+
+Successfully added Copilot as a reviewer to PR #${prNumber}.
+`
+ )
+ .write();
+ } catch (error) {
+ const errorMessage = error instanceof Error ? error.message : String(error);
+ core.error(`Failed to add Copilot as reviewer: ${errorMessage}`);
+ core.setFailed(`Failed to add Copilot as reviewer to PR #${prNumber}: ${errorMessage}`);
+ }
+}
+
+main().catch(error => {
+ core.setFailed(error instanceof Error ? error.message : String(error));
+});
diff --git a/actions/setup/js/add_labels.cjs b/actions/setup/js/add_labels.cjs
new file mode 100644
index 0000000000..48f20ec479
--- /dev/null
+++ b/actions/setup/js/add_labels.cjs
@@ -0,0 +1,125 @@
+// @ts-check
+///
+
+const { processSafeOutput } = require("./safe_output_processor.cjs");
+const { validateLabels } = require("./safe_output_validator.cjs");
+
+async function main() {
+ // Use shared processor for common steps
+ const result = await processSafeOutput(
+ {
+ itemType: "add_labels",
+ configKey: "add_labels",
+ displayName: "Labels",
+ itemTypeName: "label addition",
+ supportsPR: true,
+ supportsIssue: true,
+ envVars: {
+ allowed: "GH_AW_LABELS_ALLOWED",
+ maxCount: "GH_AW_LABELS_MAX_COUNT",
+ target: "GH_AW_LABELS_TARGET",
+ },
+ },
+ {
+ title: "Add Labels",
+ description: "The following labels would be added if staged mode was disabled:",
+ renderItem: item => {
+ let content = "";
+ if (item.item_number) {
+ content += `**Target Issue:** #${item.item_number}\n\n`;
+ } else {
+ content += `**Target:** Current issue/PR\n\n`;
+ }
+ if (item.labels && item.labels.length > 0) {
+ content += `**Labels to add:** ${item.labels.join(", ")}\n\n`;
+ }
+ return content;
+ },
+ }
+ );
+
+ if (!result.success) {
+ return;
+ }
+
+ // @ts-ignore - TypeScript doesn't narrow properly after success check
+ const { item: labelsItem, config, targetResult } = result;
+ if (!config || !targetResult || targetResult.number === undefined) {
+ core.setFailed("Internal error: config, targetResult, or targetResult.number is undefined");
+ return;
+ }
+ const { allowed: allowedLabels, maxCount } = config;
+ const itemNumber = targetResult.number;
+ const { contextType } = targetResult;
+
+ const requestedLabels = labelsItem.labels || [];
+ core.info(`Requested labels: ${JSON.stringify(requestedLabels)}`);
+
+ // Use validation helper to sanitize and validate labels
+ const labelsResult = validateLabels(requestedLabels, allowedLabels, maxCount);
+ if (!labelsResult.valid) {
+ // If no valid labels, log info and return gracefully instead of failing
+ if (labelsResult.error && labelsResult.error.includes("No valid labels")) {
+ core.info("No labels to add");
+ core.setOutput("labels_added", "");
+ await core.summary
+ .addRaw(
+ `
+## Label Addition
+
+No labels were added (no valid labels found in agent output).
+`
+ )
+ .write();
+ return;
+ }
+ // For other validation errors, fail the workflow
+ core.setFailed(labelsResult.error || "Invalid labels");
+ return;
+ }
+
+ const uniqueLabels = labelsResult.value || [];
+
+ if (uniqueLabels.length === 0) {
+ core.info("No labels to add");
+ core.setOutput("labels_added", "");
+ await core.summary
+ .addRaw(
+ `
+## Label Addition
+
+No labels were added (no valid labels found in agent output).
+`
+ )
+ .write();
+ return;
+ }
+ core.info(`Adding ${uniqueLabels.length} labels to ${contextType} #${itemNumber}: ${JSON.stringify(uniqueLabels)}`);
+ try {
+ await github.rest.issues.addLabels({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ issue_number: itemNumber,
+ labels: uniqueLabels,
+ });
+ core.info(`Successfully added ${uniqueLabels.length} labels to ${contextType} #${itemNumber}`);
+ core.setOutput("labels_added", uniqueLabels.join("\n"));
+ const labelsListMarkdown = uniqueLabels.map(label => `- \`${label}\``).join("\n");
+ await core.summary
+ .addRaw(
+ `
+## Label Addition
+
+Successfully added ${uniqueLabels.length} label(s) to ${contextType} #${itemNumber}:
+
+${labelsListMarkdown}
+`
+ )
+ .write();
+ } catch (error) {
+ const errorMessage = error instanceof Error ? error.message : String(error);
+ core.error(`Failed to add labels: ${errorMessage}`);
+ core.setFailed(`Failed to add labels: ${errorMessage}`);
+ }
+}
+await main();
diff --git a/actions/setup/js/add_reaction_and_edit_comment.cjs b/actions/setup/js/add_reaction_and_edit_comment.cjs
new file mode 100644
index 0000000000..8be362426e
--- /dev/null
+++ b/actions/setup/js/add_reaction_and_edit_comment.cjs
@@ -0,0 +1,465 @@
+// @ts-check
+///
+
+const { getRunStartedMessage } = require("./messages_run_status.cjs");
+
+async function main() {
+ // Read inputs from environment variables
+ const reaction = process.env.GH_AW_REACTION || "eyes";
+ const command = process.env.GH_AW_COMMAND; // Only present for command workflows
+ const runId = context.runId;
+ const githubServer = process.env.GITHUB_SERVER_URL || "https://github.com";
+ const runUrl = context.payload.repository ? `${context.payload.repository.html_url}/actions/runs/${runId}` : `${githubServer}/${context.repo.owner}/${context.repo.repo}/actions/runs/${runId}`;
+
+ core.info(`Reaction type: ${reaction}`);
+ core.info(`Command name: ${command || "none"}`);
+ core.info(`Run ID: ${runId}`);
+ core.info(`Run URL: ${runUrl}`);
+
+ // Validate reaction type
+ const validReactions = ["+1", "-1", "laugh", "confused", "heart", "hooray", "rocket", "eyes"];
+ if (!validReactions.includes(reaction)) {
+ core.setFailed(`Invalid reaction type: ${reaction}. Valid reactions are: ${validReactions.join(", ")}`);
+ return;
+ }
+
+ // Determine the API endpoint based on the event type
+ let reactionEndpoint;
+ let commentUpdateEndpoint;
+ let shouldCreateComment = false;
+ const eventName = context.eventName;
+ const owner = context.repo.owner;
+ const repo = context.repo.repo;
+
+ try {
+ switch (eventName) {
+ case "issues":
+ const issueNumber = context.payload?.issue?.number;
+ if (!issueNumber) {
+ core.setFailed("Issue number not found in event payload");
+ return;
+ }
+ reactionEndpoint = `/repos/${owner}/${repo}/issues/${issueNumber}/reactions`;
+ commentUpdateEndpoint = `/repos/${owner}/${repo}/issues/${issueNumber}/comments`;
+ // Create comments for all workflows using reactions
+ shouldCreateComment = true;
+ break;
+
+ case "issue_comment":
+ const commentId = context.payload?.comment?.id;
+ const issueNumberForComment = context.payload?.issue?.number;
+ if (!commentId) {
+ core.setFailed("Comment ID not found in event payload");
+ return;
+ }
+ if (!issueNumberForComment) {
+ core.setFailed("Issue number not found in event payload");
+ return;
+ }
+ reactionEndpoint = `/repos/${owner}/${repo}/issues/comments/${commentId}/reactions`;
+ // Create new comment on the issue itself, not on the comment
+ commentUpdateEndpoint = `/repos/${owner}/${repo}/issues/${issueNumberForComment}/comments`;
+ // Create comments for all workflows using reactions
+ shouldCreateComment = true;
+ break;
+
+ case "pull_request":
+ const prNumber = context.payload?.pull_request?.number;
+ if (!prNumber) {
+ core.setFailed("Pull request number not found in event payload");
+ return;
+ }
+ // PRs are "issues" for the reactions endpoint
+ reactionEndpoint = `/repos/${owner}/${repo}/issues/${prNumber}/reactions`;
+ commentUpdateEndpoint = `/repos/${owner}/${repo}/issues/${prNumber}/comments`;
+ // Create comments for all workflows using reactions
+ shouldCreateComment = true;
+ break;
+
+ case "pull_request_review_comment":
+ const reviewCommentId = context.payload?.comment?.id;
+ const prNumberForReviewComment = context.payload?.pull_request?.number;
+ if (!reviewCommentId) {
+ core.setFailed("Review comment ID not found in event payload");
+ return;
+ }
+ if (!prNumberForReviewComment) {
+ core.setFailed("Pull request number not found in event payload");
+ return;
+ }
+ reactionEndpoint = `/repos/${owner}/${repo}/pulls/comments/${reviewCommentId}/reactions`;
+ // Create new comment on the PR itself (using issues endpoint since PRs are issues)
+ commentUpdateEndpoint = `/repos/${owner}/${repo}/issues/${prNumberForReviewComment}/comments`;
+ // Create comments for all workflows using reactions
+ shouldCreateComment = true;
+ break;
+
+ case "discussion":
+ const discussionNumber = context.payload?.discussion?.number;
+ if (!discussionNumber) {
+ core.setFailed("Discussion number not found in event payload");
+ return;
+ }
+ // Discussions use GraphQL API - get the node ID
+ const discussion = await getDiscussionId(owner, repo, discussionNumber);
+ reactionEndpoint = discussion.id; // Store node ID for GraphQL
+ commentUpdateEndpoint = `discussion:${discussionNumber}`; // Special format to indicate discussion
+ // Create comments for all workflows using reactions
+ shouldCreateComment = true;
+ break;
+
+ case "discussion_comment":
+ const discussionCommentNumber = context.payload?.discussion?.number;
+ const discussionCommentId = context.payload?.comment?.id;
+ if (!discussionCommentNumber || !discussionCommentId) {
+ core.setFailed("Discussion or comment information not found in event payload");
+ return;
+ }
+ // Get the comment node ID from the payload
+ const commentNodeId = context.payload?.comment?.node_id;
+ if (!commentNodeId) {
+ core.setFailed("Discussion comment node ID not found in event payload");
+ return;
+ }
+ reactionEndpoint = commentNodeId; // Store node ID for GraphQL
+ commentUpdateEndpoint = `discussion_comment:${discussionCommentNumber}:${discussionCommentId}`; // Special format
+ // Create comments for all workflows using reactions
+ shouldCreateComment = true;
+ break;
+
+ default:
+ core.setFailed(`Unsupported event type: ${eventName}`);
+ return;
+ }
+
+ core.info(`Reaction API endpoint: ${reactionEndpoint}`);
+
+ // Add reaction first
+ // For discussions, reactionEndpoint is a node ID (GraphQL), otherwise it's a REST API path
+ const isDiscussionEvent = eventName === "discussion" || eventName === "discussion_comment";
+ if (isDiscussionEvent) {
+ await addDiscussionReaction(reactionEndpoint, reaction);
+ } else {
+ await addReaction(reactionEndpoint, reaction);
+ }
+
+ // Then add comment if applicable
+ if (shouldCreateComment && commentUpdateEndpoint) {
+ core.info(`Comment endpoint: ${commentUpdateEndpoint}`);
+ await addCommentWithWorkflowLink(commentUpdateEndpoint, runUrl, eventName);
+ } else {
+ core.info(`Skipping comment for event type: ${eventName}`);
+ }
+ } catch (error) {
+ const errorMessage = error instanceof Error ? error.message : String(error);
+ core.error(`Failed to process reaction and comment creation: ${errorMessage}`);
+ core.setFailed(`Failed to process reaction and comment creation: ${errorMessage}`);
+ }
+}
+
+/**
+ * Add a reaction to a GitHub issue, PR, or comment using REST API
+ * @param {string} endpoint - The GitHub API endpoint to add the reaction to
+ * @param {string} reaction - The reaction type to add
+ */
+async function addReaction(endpoint, reaction) {
+ const response = await github.request("POST " + endpoint, {
+ content: reaction,
+ headers: {
+ Accept: "application/vnd.github+json",
+ },
+ });
+
+ const reactionId = response.data?.id;
+ if (reactionId) {
+ core.info(`Successfully added reaction: ${reaction} (id: ${reactionId})`);
+ core.setOutput("reaction-id", reactionId.toString());
+ } else {
+ core.info(`Successfully added reaction: ${reaction}`);
+ core.setOutput("reaction-id", "");
+ }
+}
+
+/**
+ * Add a reaction to a GitHub discussion or discussion comment using GraphQL
+ * @param {string} subjectId - The node ID of the discussion or comment
+ * @param {string} reaction - The reaction type to add (mapped to GitHub's ReactionContent enum)
+ */
+async function addDiscussionReaction(subjectId, reaction) {
+ // Map reaction names to GitHub's GraphQL ReactionContent enum
+ const reactionMap = {
+ "+1": "THUMBS_UP",
+ "-1": "THUMBS_DOWN",
+ laugh: "LAUGH",
+ confused: "CONFUSED",
+ heart: "HEART",
+ hooray: "HOORAY",
+ rocket: "ROCKET",
+ eyes: "EYES",
+ };
+
+ const reactionContent = reactionMap[reaction];
+ if (!reactionContent) {
+ throw new Error(`Invalid reaction type for GraphQL: ${reaction}`);
+ }
+
+ const result = await github.graphql(
+ `
+ mutation($subjectId: ID!, $content: ReactionContent!) {
+ addReaction(input: { subjectId: $subjectId, content: $content }) {
+ reaction {
+ id
+ content
+ }
+ }
+ }`,
+ { subjectId, content: reactionContent }
+ );
+
+ const reactionId = result.addReaction.reaction.id;
+ core.info(`Successfully added reaction: ${reaction} (id: ${reactionId})`);
+ core.setOutput("reaction-id", reactionId);
+}
+
+/**
+ * Get the node ID for a discussion
+ * @param {string} owner - Repository owner
+ * @param {string} repo - Repository name
+ * @param {number} discussionNumber - Discussion number
+ * @returns {Promise<{id: string, url: string}>} Discussion details
+ */
+async function getDiscussionId(owner, repo, discussionNumber) {
+ const { repository } = await github.graphql(
+ `
+ query($owner: String!, $repo: String!, $num: Int!) {
+ repository(owner: $owner, name: $repo) {
+ discussion(number: $num) {
+ id
+ url
+ }
+ }
+ }`,
+ { owner, repo, num: discussionNumber }
+ );
+
+ if (!repository || !repository.discussion) {
+ throw new Error(`Discussion #${discussionNumber} not found in ${owner}/${repo}`);
+ }
+
+ return {
+ id: repository.discussion.id,
+ url: repository.discussion.url,
+ };
+}
+
+/**
+ * Get the node ID for a discussion comment
+ * @param {string} owner - Repository owner
+ * @param {string} repo - Repository name
+ * @param {number} discussionNumber - Discussion number
+ * @param {number} commentId - Comment ID (database ID, not node ID)
+ * @returns {Promise<{id: string, url: string}>} Comment details
+ */
+async function getDiscussionCommentId(owner, repo, discussionNumber, commentId) {
+ // First, get the discussion ID
+ const discussion = await getDiscussionId(owner, repo, discussionNumber);
+ if (!discussion) throw new Error(`Discussion #${discussionNumber} not found in ${owner}/${repo}`);
+
+ // Then fetch the comment by traversing discussion comments
+ // Note: GitHub's GraphQL API doesn't provide a direct way to query comment by database ID
+ // We need to use the comment's node ID from the event payload if available
+ // For now, we'll use a simplified approach - the commentId from context.payload.comment.node_id
+
+ // If the event payload provides node_id, we can use it directly
+ // Otherwise, this would need to fetch all comments and find the matching one
+ const nodeId = context.payload?.comment?.node_id;
+ if (nodeId) {
+ return {
+ id: nodeId,
+ url: context.payload.comment?.html_url || discussion?.url,
+ };
+ }
+
+ throw new Error(`Discussion comment node ID not found in event payload for comment ${commentId}`);
+}
+
+/**
+ * Add a comment with a workflow run link
+ * @param {string} endpoint - The GitHub API endpoint to create the comment (or special format for discussions)
+ * @param {string} runUrl - The URL of the workflow run
+ * @param {string} eventName - The event type (to determine the comment text)
+ */
+async function addCommentWithWorkflowLink(endpoint, runUrl, eventName) {
+ try {
+ // Get workflow name from environment variable
+ const workflowName = process.env.GH_AW_WORKFLOW_NAME || "Workflow";
+
+ // Determine the event type description
+ let eventTypeDescription;
+ switch (eventName) {
+ case "issues":
+ eventTypeDescription = "issue";
+ break;
+ case "pull_request":
+ eventTypeDescription = "pull request";
+ break;
+ case "issue_comment":
+ eventTypeDescription = "issue comment";
+ break;
+ case "pull_request_review_comment":
+ eventTypeDescription = "pull request review comment";
+ break;
+ case "discussion":
+ eventTypeDescription = "discussion";
+ break;
+ case "discussion_comment":
+ eventTypeDescription = "discussion comment";
+ break;
+ default:
+ eventTypeDescription = "event";
+ }
+
+ // Use getRunStartedMessage for the workflow link text (supports custom messages)
+ const workflowLinkText = getRunStartedMessage({
+ workflowName: workflowName,
+ runUrl: runUrl,
+ eventType: eventTypeDescription,
+ });
+
+ // Add workflow-id and tracker-id markers for hide-older-comments feature
+ const workflowId = process.env.GITHUB_WORKFLOW || "";
+ const trackerId = process.env.GH_AW_TRACKER_ID || "";
+
+ let commentBody = workflowLinkText;
+
+ // Add lock notice if lock-for-agent is enabled for issues or issue_comment
+ const lockForAgent = process.env.GH_AW_LOCK_FOR_AGENT === "true";
+ if (lockForAgent && (eventName === "issues" || eventName === "issue_comment")) {
+ commentBody += "\n\nš This issue has been locked while the workflow is running to prevent concurrent modifications.";
+ }
+
+ // Add workflow-id marker if available
+ if (workflowId) {
+ commentBody += `\n\n`;
+ }
+
+ // Add tracker-id marker if available (for backwards compatibility)
+ if (trackerId) {
+ commentBody += `\n\n`;
+ }
+
+ // Add comment type marker to identify this as a reaction comment
+ // This prevents it from being hidden by hide-older-comments
+ commentBody += `\n\n`;
+
+ // Handle discussion events specially
+ if (eventName === "discussion") {
+ // Parse discussion number from special format: "discussion:NUMBER"
+ const discussionNumber = parseInt(endpoint.split(":")[1], 10);
+
+ // Create a new comment on the discussion using GraphQL
+ const { repository } = await github.graphql(
+ `
+ query($owner: String!, $repo: String!, $num: Int!) {
+ repository(owner: $owner, name: $repo) {
+ discussion(number: $num) {
+ id
+ }
+ }
+ }`,
+ { owner: context.repo.owner, repo: context.repo.repo, num: discussionNumber }
+ );
+
+ const discussionId = repository.discussion.id;
+
+ const result = await github.graphql(
+ `
+ mutation($dId: ID!, $body: String!) {
+ addDiscussionComment(input: { discussionId: $dId, body: $body }) {
+ comment {
+ id
+ url
+ }
+ }
+ }`,
+ { dId: discussionId, body: commentBody }
+ );
+
+ const comment = result.addDiscussionComment.comment;
+ core.info(`Successfully created discussion comment with workflow link`);
+ core.info(`Comment ID: ${comment.id}`);
+ core.info(`Comment URL: ${comment.url}`);
+ core.info(`Comment Repo: ${context.repo.owner}/${context.repo.repo}`);
+ core.setOutput("comment-id", comment.id);
+ core.setOutput("comment-url", comment.url);
+ core.setOutput("comment-repo", `${context.repo.owner}/${context.repo.repo}`);
+ return;
+ } else if (eventName === "discussion_comment") {
+ // Parse discussion number from special format: "discussion_comment:NUMBER:COMMENT_ID"
+ const discussionNumber = parseInt(endpoint.split(":")[1], 10);
+
+ // Create a new comment on the discussion using GraphQL
+ const { repository } = await github.graphql(
+ `
+ query($owner: String!, $repo: String!, $num: Int!) {
+ repository(owner: $owner, name: $repo) {
+ discussion(number: $num) {
+ id
+ }
+ }
+ }`,
+ { owner: context.repo.owner, repo: context.repo.repo, num: discussionNumber }
+ );
+
+ const discussionId = repository.discussion.id;
+
+ // Get the comment node ID to use as the parent for threading
+ const commentNodeId = context.payload?.comment?.node_id;
+
+ const result = await github.graphql(
+ `
+ mutation($dId: ID!, $body: String!, $replyToId: ID!) {
+ addDiscussionComment(input: { discussionId: $dId, body: $body, replyToId: $replyToId }) {
+ comment {
+ id
+ url
+ }
+ }
+ }`,
+ { dId: discussionId, body: commentBody, replyToId: commentNodeId }
+ );
+
+ const comment = result.addDiscussionComment.comment;
+ core.info(`Successfully created discussion comment with workflow link`);
+ core.info(`Comment ID: ${comment.id}`);
+ core.info(`Comment URL: ${comment.url}`);
+ core.info(`Comment Repo: ${context.repo.owner}/${context.repo.repo}`);
+ core.setOutput("comment-id", comment.id);
+ core.setOutput("comment-url", comment.url);
+ core.setOutput("comment-repo", `${context.repo.owner}/${context.repo.repo}`);
+ return;
+ }
+
+ // Create a new comment for non-discussion events
+ const createResponse = await github.request("POST " + endpoint, {
+ body: commentBody,
+ headers: {
+ Accept: "application/vnd.github+json",
+ },
+ });
+
+ core.info(`Successfully created comment with workflow link`);
+ core.info(`Comment ID: ${createResponse.data.id}`);
+ core.info(`Comment URL: ${createResponse.data.html_url}`);
+ core.info(`Comment Repo: ${context.repo.owner}/${context.repo.repo}`);
+ core.setOutput("comment-id", createResponse.data.id.toString());
+ core.setOutput("comment-url", createResponse.data.html_url);
+ core.setOutput("comment-repo", `${context.repo.owner}/${context.repo.repo}`);
+ } catch (error) {
+ // Don't fail the entire job if comment creation fails - just log it
+ const errorMessage = error instanceof Error ? error.message : String(error);
+ core.warning("Failed to create comment with workflow link (This is not critical - the reaction was still added successfully): " + errorMessage);
+ }
+}
+
+await main();
diff --git a/actions/setup/js/add_reviewer.cjs b/actions/setup/js/add_reviewer.cjs
new file mode 100644
index 0000000000..f140ca8c75
--- /dev/null
+++ b/actions/setup/js/add_reviewer.cjs
@@ -0,0 +1,132 @@
+// @ts-check
+///
+
+const { processSafeOutput, processItems } = require("./safe_output_processor.cjs");
+
+// GitHub Copilot reviewer bot username
+const COPILOT_REVIEWER_BOT = "copilot-pull-request-reviewer[bot]";
+
+async function main() {
+ // Use shared processor for common steps
+ const result = await processSafeOutput(
+ {
+ itemType: "add_reviewer",
+ configKey: "add_reviewer",
+ displayName: "Reviewers",
+ itemTypeName: "reviewer addition",
+ supportsPR: false, // PR-only: supportsPR=false means ONLY PR context (not issues)
+ supportsIssue: false,
+ envVars: {
+ allowed: "GH_AW_REVIEWERS_ALLOWED",
+ maxCount: "GH_AW_REVIEWERS_MAX_COUNT",
+ target: "GH_AW_REVIEWERS_TARGET",
+ },
+ },
+ {
+ title: "Add Reviewers",
+ description: "The following reviewers would be added if staged mode was disabled:",
+ renderItem: item => {
+ let content = "";
+ if (item.pull_request_number) {
+ content += `**Target Pull Request:** #${item.pull_request_number}\n\n`;
+ } else {
+ content += `**Target:** Current pull request\n\n`;
+ }
+ if (item.reviewers && item.reviewers.length > 0) {
+ content += `**Reviewers to add:** ${item.reviewers.join(", ")}\n\n`;
+ }
+ return content;
+ },
+ }
+ );
+
+ if (!result.success) {
+ return;
+ }
+
+ // @ts-ignore - TypeScript doesn't narrow properly after success check
+ const { item: reviewerItem, config, targetResult } = result;
+ if (!config || !targetResult || targetResult.number === undefined) {
+ core.setFailed("Internal error: config, targetResult, or targetResult.number is undefined");
+ return;
+ }
+ const { allowed: allowedReviewers, maxCount } = config;
+ const prNumber = targetResult.number;
+
+ const requestedReviewers = reviewerItem.reviewers || [];
+ core.info(`Requested reviewers: ${JSON.stringify(requestedReviewers)}`);
+
+ // Use shared helper to filter, sanitize, dedupe, and limit
+ const uniqueReviewers = processItems(requestedReviewers, allowedReviewers, maxCount);
+
+ if (uniqueReviewers.length === 0) {
+ core.info("No reviewers to add");
+ core.setOutput("reviewers_added", "");
+ await core.summary
+ .addRaw(
+ `
+## Reviewer Addition
+
+No reviewers were added (no valid reviewers found in agent output).
+`
+ )
+ .write();
+ return;
+ }
+
+ core.info(`Adding ${uniqueReviewers.length} reviewers to PR #${prNumber}: ${JSON.stringify(uniqueReviewers)}`);
+
+ try {
+ // Special handling for "copilot" reviewer - separate it from other reviewers in a single pass
+ const hasCopilot = uniqueReviewers.includes("copilot");
+ const otherReviewers = hasCopilot ? uniqueReviewers.filter(r => r !== "copilot") : uniqueReviewers;
+
+ // Add non-copilot reviewers first
+ if (otherReviewers.length > 0) {
+ await github.rest.pulls.requestReviewers({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ pull_number: prNumber,
+ reviewers: otherReviewers,
+ });
+ core.info(`Successfully added ${otherReviewers.length} reviewer(s) to PR #${prNumber}`);
+ }
+
+ // Add copilot reviewer separately if requested
+ if (hasCopilot) {
+ try {
+ await github.rest.pulls.requestReviewers({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ pull_number: prNumber,
+ reviewers: [COPILOT_REVIEWER_BOT],
+ });
+ core.info(`Successfully added copilot as reviewer to PR #${prNumber}`);
+ } catch (copilotError) {
+ core.warning(`Failed to add copilot as reviewer: ${copilotError instanceof Error ? copilotError.message : String(copilotError)}`);
+ // Don't fail the whole step if copilot reviewer fails
+ }
+ }
+
+ core.setOutput("reviewers_added", uniqueReviewers.join("\n"));
+
+ const reviewersListMarkdown = uniqueReviewers.map(reviewer => `- \`${reviewer}\``).join("\n");
+ await core.summary
+ .addRaw(
+ `
+## Reviewer Addition
+
+Successfully added ${uniqueReviewers.length} reviewer(s) to PR #${prNumber}:
+
+${reviewersListMarkdown}
+`
+ )
+ .write();
+ } catch (error) {
+ const errorMessage = error instanceof Error ? error.message : String(error);
+ core.error(`Failed to add reviewers: ${errorMessage}`);
+ core.setFailed(`Failed to add reviewers: ${errorMessage}`);
+ }
+}
+
+await main();
diff --git a/actions/setup/js/assign_agent_helpers.cjs b/actions/setup/js/assign_agent_helpers.cjs
new file mode 100644
index 0000000000..9a40fabda2
--- /dev/null
+++ b/actions/setup/js/assign_agent_helpers.cjs
@@ -0,0 +1,419 @@
+// @ts-check
+///
+
+/**
+ * Shared helper functions for assigning coding agents (like Copilot) to issues
+ * These functions use GraphQL to properly assign bot actors that cannot be assigned via gh CLI
+ *
+ * NOTE: All functions use the built-in `github` global object for authentication.
+ * The token must be set at the step level via the `github-token` parameter in GitHub Actions.
+ * This approach is required for compatibility with actions/github-script@v8.
+ */
+
+/**
+ * Map agent names to their GitHub bot login names
+ * @type {Record}
+ */
+const AGENT_LOGIN_NAMES = {
+ copilot: "copilot-swe-agent",
+};
+
+/**
+ * Check if an assignee is a known coding agent (bot)
+ * @param {string} assignee - Assignee name (may include @ prefix)
+ * @returns {string|null} Agent name if it's a known agent, null otherwise
+ */
+function getAgentName(assignee) {
+ // Normalize: remove @ prefix if present
+ const normalized = assignee.startsWith("@") ? assignee.slice(1) : assignee;
+
+ // Check if it's a known agent
+ if (AGENT_LOGIN_NAMES[normalized]) {
+ return normalized;
+ }
+
+ return null;
+}
+
+/**
+ * Return list of coding agent bot login names that are currently available as assignable actors
+ * (intersection of suggestedActors and known AGENT_LOGIN_NAMES values)
+ * @param {string} owner
+ * @param {string} repo
+ * @returns {Promise}
+ */
+async function getAvailableAgentLogins(owner, repo) {
+ const query = `
+ query($owner: String!, $repo: String!) {
+ repository(owner: $owner, name: $repo) {
+ suggestedActors(first: 100, capabilities: CAN_BE_ASSIGNED) {
+ nodes { ... on Bot { login __typename } }
+ }
+ }
+ }
+ `;
+ try {
+ const response = await github.graphql(query, { owner, repo });
+ const actors = response.repository?.suggestedActors?.nodes || [];
+ const knownValues = Object.values(AGENT_LOGIN_NAMES);
+ const available = [];
+ for (const actor of actors) {
+ if (actor && actor.login && knownValues.includes(actor.login)) {
+ available.push(actor.login);
+ }
+ }
+ return available.sort();
+ } catch (e) {
+ const msg = e instanceof Error ? e.message : String(e);
+ core.debug(`Failed to list available agent logins: ${msg}`);
+ return [];
+ }
+}
+
+/**
+ * Find an agent in repository's suggested actors using GraphQL
+ * @param {string} owner - Repository owner
+ * @param {string} repo - Repository name
+ * @param {string} agentName - Agent name (copilot)
+ * @returns {Promise} Agent ID or null if not found
+ */
+async function findAgent(owner, repo, agentName) {
+ const query = `
+ query($owner: String!, $repo: String!) {
+ repository(owner: $owner, name: $repo) {
+ suggestedActors(first: 100, capabilities: CAN_BE_ASSIGNED) {
+ nodes {
+ ... on Bot {
+ id
+ login
+ __typename
+ }
+ }
+ }
+ }
+ }
+ `;
+
+ try {
+ const response = await github.graphql(query, { owner, repo });
+ const actors = response.repository.suggestedActors.nodes;
+
+ const loginName = AGENT_LOGIN_NAMES[agentName];
+ if (!loginName) {
+ core.error(`Unknown agent: ${agentName}. Supported agents: ${Object.keys(AGENT_LOGIN_NAMES).join(", ")}`);
+ return null;
+ }
+
+ for (const actor of actors) {
+ if (actor.login === loginName) {
+ return actor.id;
+ }
+ }
+
+ const available = actors.filter(a => a && a.login && Object.values(AGENT_LOGIN_NAMES).includes(a.login)).map(a => a.login);
+
+ core.warning(`${agentName} coding agent (${loginName}) is not available as an assignee for this repository`);
+ if (available.length > 0) {
+ core.info(`Available assignable coding agents: ${available.join(", ")}`);
+ } else {
+ core.info("No coding agents are currently assignable in this repository.");
+ }
+ if (agentName === "copilot") {
+ core.info("Please visit https://docs.github.com/en/copilot/using-github-copilot/using-copilot-coding-agent-to-work-on-tasks/about-assigning-tasks-to-copilot");
+ }
+ return null;
+ } catch (error) {
+ const errorMessage = error instanceof Error ? error.message : String(error);
+ core.error(`Failed to find ${agentName} agent: ${errorMessage}`);
+ return null;
+ }
+}
+
+/**
+ * Get issue details (ID and current assignees) using GraphQL
+ * @param {string} owner - Repository owner
+ * @param {string} repo - Repository name
+ * @param {number} issueNumber - Issue number
+ * @returns {Promise<{issueId: string, currentAssignees: string[]}|null>}
+ */
+async function getIssueDetails(owner, repo, issueNumber) {
+ const query = `
+ query($owner: String!, $repo: String!, $issueNumber: Int!) {
+ repository(owner: $owner, name: $repo) {
+ issue(number: $issueNumber) {
+ id
+ assignees(first: 100) {
+ nodes {
+ id
+ }
+ }
+ }
+ }
+ }
+ `;
+
+ try {
+ const response = await github.graphql(query, { owner, repo, issueNumber });
+ const issue = response.repository.issue;
+
+ if (!issue || !issue.id) {
+ core.error("Could not get issue data");
+ return null;
+ }
+
+ const currentAssignees = issue.assignees.nodes.map(assignee => assignee.id);
+
+ return {
+ issueId: issue.id,
+ currentAssignees: currentAssignees,
+ };
+ } catch (error) {
+ const errorMessage = error instanceof Error ? error.message : String(error);
+ core.error(`Failed to get issue details: ${errorMessage}`);
+ return null;
+ }
+}
+
+/**
+ * Assign agent to issue using GraphQL replaceActorsForAssignable mutation
+ * @param {string} issueId - GitHub issue ID
+ * @param {string} agentId - Agent ID
+ * @param {string[]} currentAssignees - List of current assignee IDs
+ * @param {string} agentName - Agent name for error messages
+ * @returns {Promise} True if successful
+ */
+async function assignAgentToIssue(issueId, agentId, currentAssignees, agentName) {
+ // Build actor IDs array - include agent and preserve other assignees
+ const actorIds = [agentId];
+ for (const assigneeId of currentAssignees) {
+ if (assigneeId !== agentId) {
+ actorIds.push(assigneeId);
+ }
+ }
+
+ const mutation = `
+ mutation($assignableId: ID!, $actorIds: [ID!]!) {
+ replaceActorsForAssignable(input: {
+ assignableId: $assignableId,
+ actorIds: $actorIds
+ }) {
+ __typename
+ }
+ }
+ `;
+
+ try {
+ core.info("Using built-in github object for mutation");
+
+ core.debug(`GraphQL mutation with variables: assignableId=${issueId}, actorIds=${JSON.stringify(actorIds)}`);
+ const response = await github.graphql(mutation, {
+ assignableId: issueId,
+ actorIds: actorIds,
+ });
+
+ if (response && response.replaceActorsForAssignable && response.replaceActorsForAssignable.__typename) {
+ return true;
+ } else {
+ core.error("Unexpected response from GitHub API");
+ return false;
+ }
+ } catch (error) {
+ const errorMessage = error instanceof Error ? error.message : String(error);
+
+ // Debug: surface the raw GraphQL error structure for troubleshooting fine-grained permission issues
+ try {
+ core.debug(`Raw GraphQL error message: ${errorMessage}`);
+ if (error && typeof error === "object") {
+ // Common GraphQL error shapes: error.errors (array), error.data, error.response
+ const details = {};
+ if (error.errors) details.errors = error.errors;
+ // Some libraries wrap the payload under 'response' or 'response.data'
+ if (error.response) details.response = error.response;
+ if (error.data) details.data = error.data;
+ // If GitHub returns an array of errors with 'type'/'message'
+ if (Array.isArray(error.errors)) {
+ details.compactMessages = error.errors.map(e => e.message).filter(Boolean);
+ }
+ const serialized = JSON.stringify(details, (_k, v) => v, 2);
+ if (serialized && serialized !== "{}") {
+ core.debug(`Raw GraphQL error details: ${serialized}`);
+ // Also emit non-debug version so users without ACTIONS_STEP_DEBUG can see it
+ core.error("Raw GraphQL error details (for troubleshooting):");
+ // Split large JSON for readability
+ for (const line of serialized.split(/\n/)) {
+ if (line.trim()) core.error(line);
+ }
+ }
+ }
+ } catch (loggingErr) {
+ // Never fail assignment because of debug logging
+ core.debug(`Failed to serialize GraphQL error details: ${loggingErr instanceof Error ? loggingErr.message : String(loggingErr)}`);
+ }
+
+ // Check for permission-related errors
+ if (errorMessage.includes("Resource not accessible by personal access token") || errorMessage.includes("Resource not accessible by integration") || errorMessage.includes("Insufficient permissions to assign")) {
+ // Attempt fallback mutation addAssigneesToAssignable when replaceActorsForAssignable is forbidden
+ core.info("Primary mutation replaceActorsForAssignable forbidden. Attempting fallback addAssigneesToAssignable...");
+ try {
+ const fallbackMutation = `
+ mutation($assignableId: ID!, $assigneeIds: [ID!]!) {
+ addAssigneesToAssignable(input: {
+ assignableId: $assignableId,
+ assigneeIds: $assigneeIds
+ }) {
+ clientMutationId
+ }
+ }
+ `;
+ core.info("Using built-in github object for fallback mutation");
+ core.debug(`Fallback GraphQL mutation with variables: assignableId=${issueId}, assigneeIds=[${agentId}]`);
+ const fallbackResp = await github.graphql(fallbackMutation, {
+ assignableId: issueId,
+ assigneeIds: [agentId],
+ });
+ if (fallbackResp && fallbackResp.addAssigneesToAssignable) {
+ core.info(`Fallback succeeded: agent '${agentName}' added via addAssigneesToAssignable.`);
+ return true;
+ } else {
+ core.warning("Fallback mutation returned unexpected response; proceeding with permission guidance.");
+ }
+ } catch (fallbackError) {
+ const fbMsg = fallbackError instanceof Error ? fallbackError.message : String(fallbackError);
+ core.error(`Fallback addAssigneesToAssignable failed: ${fbMsg}`);
+ }
+ logPermissionError(agentName);
+ } else {
+ core.error(`Failed to assign ${agentName}: ${errorMessage}`);
+ }
+ return false;
+ }
+}
+
+/**
+ * Log detailed permission error guidance
+ * @param {string} agentName - Agent name for error messages
+ */
+function logPermissionError(agentName) {
+ core.error(`Failed to assign ${agentName}: Insufficient permissions`);
+ core.error("");
+ core.error("Assigning Copilot agents requires:");
+ core.error(" 1. All four workflow permissions:");
+ core.error(" - actions: write");
+ core.error(" - contents: write");
+ core.error(" - issues: write");
+ core.error(" - pull-requests: write");
+ core.error("");
+ core.error(" 2. A classic PAT with 'repo' scope OR fine-grained PAT with explicit Write permissions above:");
+ core.error(" (Fine-grained PATs must grant repository access + write for Issues, Pull requests, Contents, Actions)");
+ core.error("");
+ core.error(" 3. Repository settings:");
+ core.error(" - Actions must have write permissions");
+ core.error(" - Go to: Settings > Actions > General > Workflow permissions");
+ core.error(" - Select: 'Read and write permissions'");
+ core.error("");
+ core.error(" 4. Organization/Enterprise settings:");
+ core.error(" - Check if your org restricts bot assignments");
+ core.error(" - Verify Copilot is enabled for your repository");
+ core.error("");
+ core.info("For more information, see: https://docs.github.com/en/copilot/how-tos/use-copilot-agents/coding-agent/create-a-pr");
+}
+
+/**
+ * Generate permission error summary content for step summary
+ * @returns {string} Markdown content for permission error guidance
+ */
+function generatePermissionErrorSummary() {
+ let content = "\n### ā ļø Permission Requirements\n\n";
+ content += "Assigning Copilot agents requires **ALL** of these permissions:\n\n";
+ content += "```yaml\n";
+ content += "permissions:\n";
+ content += " actions: write\n";
+ content += " contents: write\n";
+ content += " issues: write\n";
+ content += " pull-requests: write\n";
+ content += "```\n\n";
+ content += "**Token capability note:**\n";
+ content += "- Current token (PAT or GITHUB_TOKEN) lacks assignee mutation capability for this repository.\n";
+ content += "- Both `replaceActorsForAssignable` and fallback `addAssigneesToAssignable` returned FORBIDDEN/Resource not accessible.\n";
+ content += "- This typically means bot/user assignment requires an elevated OAuth or GitHub App installation token.\n\n";
+ content += "**Recommended remediation paths:**\n";
+ content += "1. Create & install a GitHub App with: Issues/Pull requests/Contents/Actions (write) ā use installation token in job.\n";
+ content += "2. Manual assignment: add the agent through the UI until broader token support is available.\n";
+ content += "3. Open a support ticket referencing failing mutation `replaceActorsForAssignable` and repository slug.\n\n";
+ content += "**Why this failed:** Fine-grained and classic PATs can update issue title (verified) but not modify assignees in this environment.\n\n";
+ content += "š Reference: https://docs.github.com/en/copilot/how-tos/use-copilot-agents/coding-agent/create-a-pr (general agent docs)\n";
+ return content;
+}
+
+/**
+ * Assign an agent to an issue using GraphQL
+ * This is the main entry point for assigning agents from other scripts
+ * @param {string} owner - Repository owner
+ * @param {string} repo - Repository name
+ * @param {number} issueNumber - Issue number
+ * @param {string} agentName - Agent name (e.g., "copilot")
+ * @returns {Promise<{success: boolean, error?: string}>}
+ */
+async function assignAgentToIssueByName(owner, repo, issueNumber, agentName) {
+ // Check if agent is supported
+ if (!AGENT_LOGIN_NAMES[agentName]) {
+ const error = `Agent "${agentName}" is not supported. Supported agents: ${Object.keys(AGENT_LOGIN_NAMES).join(", ")}`;
+ core.warning(error);
+ return { success: false, error };
+ }
+
+ try {
+ // Find agent using the github object authenticated via step-level github-token
+ core.info(`Looking for ${agentName} coding agent...`);
+ const agentId = await findAgent(owner, repo, agentName);
+ if (!agentId) {
+ const error = `${agentName} coding agent is not available for this repository`;
+ // Enrich with available agent logins
+ const available = await getAvailableAgentLogins(owner, repo);
+ const enrichedError = available.length > 0 ? `${error} (available agents: ${available.join(", ")})` : error;
+ return { success: false, error: enrichedError };
+ }
+ core.info(`Found ${agentName} coding agent (ID: ${agentId})`);
+
+ // Get issue details (ID and current assignees) via GraphQL
+ core.info("Getting issue details...");
+ const issueDetails = await getIssueDetails(owner, repo, issueNumber);
+ if (!issueDetails) {
+ return { success: false, error: "Failed to get issue details" };
+ }
+
+ core.info(`Issue ID: ${issueDetails.issueId}`);
+
+ // Check if agent is already assigned
+ if (issueDetails.currentAssignees.includes(agentId)) {
+ core.info(`${agentName} is already assigned to issue #${issueNumber}`);
+ return { success: true };
+ }
+
+ // Assign agent using GraphQL mutation
+ core.info(`Assigning ${agentName} coding agent to issue #${issueNumber}...`);
+ const success = await assignAgentToIssue(issueDetails.issueId, agentId, issueDetails.currentAssignees, agentName);
+
+ if (!success) {
+ return { success: false, error: `Failed to assign ${agentName} via GraphQL` };
+ }
+
+ core.info(`Successfully assigned ${agentName} coding agent to issue #${issueNumber}`);
+ return { success: true };
+ } catch (error) {
+ const errorMessage = error instanceof Error ? error.message : String(error);
+ return { success: false, error: errorMessage };
+ }
+}
+
+module.exports = {
+ AGENT_LOGIN_NAMES,
+ getAgentName,
+ getAvailableAgentLogins,
+ findAgent,
+ getIssueDetails,
+ assignAgentToIssue,
+ logPermissionError,
+ generatePermissionErrorSummary,
+ assignAgentToIssueByName,
+};
diff --git a/actions/setup/js/assign_copilot_to_created_issues.cjs b/actions/setup/js/assign_copilot_to_created_issues.cjs
new file mode 100644
index 0000000000..f62d38ccff
--- /dev/null
+++ b/actions/setup/js/assign_copilot_to_created_issues.cjs
@@ -0,0 +1,159 @@
+// @ts-check
+///
+
+const { AGENT_LOGIN_NAMES, findAgent, getIssueDetails, assignAgentToIssue, generatePermissionErrorSummary } = require("./assign_agent_helpers.cjs");
+
+/**
+ * Assign copilot to issues created by create_issue job.
+ * This script reads the issues_to_assign_copilot output and assigns copilot to each issue.
+ * It uses the agent token (GH_AW_AGENT_TOKEN) for the GraphQL mutation.
+ */
+
+async function main() {
+ // Get the issues to assign from step output
+ const issuesToAssignStr = "${{ steps.create_issue.outputs.issues_to_assign_copilot }}";
+
+ if (!issuesToAssignStr || issuesToAssignStr.trim() === "") {
+ core.info("No issues to assign copilot to");
+ return;
+ }
+
+ core.info(`Issues to assign copilot: ${issuesToAssignStr}`);
+
+ // Parse the comma-separated list of repo:number entries
+ const issueEntries = issuesToAssignStr.split(",").filter(entry => entry.trim() !== "");
+ if (issueEntries.length === 0) {
+ core.info("No valid issue entries found");
+ return;
+ }
+
+ core.info(`Processing ${issueEntries.length} issue(s) for copilot assignment`);
+
+ const agentName = "copilot";
+ const results = [];
+ let agentId = null;
+
+ for (const entry of issueEntries) {
+ // Parse repo:number format
+ const parts = entry.split(":");
+ if (parts.length !== 2) {
+ core.warning(`Invalid issue entry format: ${entry}. Expected 'owner/repo:number'`);
+ continue;
+ }
+
+ const repoSlug = parts[0];
+ const issueNumber = parseInt(parts[1], 10);
+
+ if (isNaN(issueNumber) || issueNumber <= 0) {
+ core.warning(`Invalid issue number in entry: ${entry}`);
+ continue;
+ }
+
+ // Parse owner/repo from repo slug
+ const repoParts = repoSlug.split("/");
+ if (repoParts.length !== 2) {
+ core.warning(`Invalid repo format: ${repoSlug}. Expected 'owner/repo'`);
+ continue;
+ }
+
+ const owner = repoParts[0];
+ const repo = repoParts[1];
+
+ try {
+ // Find agent (reuse cached ID for same repo)
+ if (!agentId) {
+ core.info(`Looking for ${agentName} coding agent...`);
+ agentId = await findAgent(owner, repo, agentName);
+ if (!agentId) {
+ throw new Error(`${agentName} coding agent is not available for this repository`);
+ }
+ core.info(`Found ${agentName} coding agent (ID: ${agentId})`);
+ }
+
+ // Get issue details
+ core.info(`Getting details for issue #${issueNumber} in ${repoSlug}...`);
+ const issueDetails = await getIssueDetails(owner, repo, issueNumber);
+ if (!issueDetails) {
+ throw new Error("Failed to get issue details");
+ }
+
+ core.info(`Issue ID: ${issueDetails.issueId}`);
+
+ // Check if agent is already assigned
+ if (issueDetails.currentAssignees.includes(agentId)) {
+ core.info(`${agentName} is already assigned to issue #${issueNumber}`);
+ results.push({
+ repo: repoSlug,
+ issue_number: issueNumber,
+ success: true,
+ already_assigned: true,
+ });
+ continue;
+ }
+
+ // Assign agent using GraphQL mutation
+ core.info(`Assigning ${agentName} coding agent to issue #${issueNumber}...`);
+ const success = await assignAgentToIssue(issueDetails.issueId, agentId, issueDetails.currentAssignees, agentName);
+
+ if (!success) {
+ throw new Error(`Failed to assign ${agentName} via GraphQL`);
+ }
+
+ core.info(`Successfully assigned ${agentName} coding agent to issue #${issueNumber}`);
+ results.push({
+ repo: repoSlug,
+ issue_number: issueNumber,
+ success: true,
+ });
+ } catch (error) {
+ const errorMessage = error instanceof Error ? error.message : String(error);
+ core.error(`Failed to assign ${agentName} to issue #${issueNumber} in ${repoSlug}: ${errorMessage}`);
+ results.push({
+ repo: repoSlug,
+ issue_number: issueNumber,
+ success: false,
+ error: errorMessage,
+ });
+ }
+ }
+
+ // Generate step summary
+ const successCount = results.filter(r => r.success).length;
+ const failureCount = results.filter(r => !r.success).length;
+
+ let summaryContent = "## Copilot Assignment for Created Issues\n\n";
+
+ if (successCount > 0) {
+ summaryContent += `ā
Successfully assigned copilot to ${successCount} issue(s):\n\n`;
+ for (const result of results.filter(r => r.success)) {
+ const note = result.already_assigned ? " (already assigned)" : "";
+ summaryContent += `- ${result.repo}#${result.issue_number}${note}\n`;
+ }
+ summaryContent += "\n";
+ }
+
+ if (failureCount > 0) {
+ summaryContent += `ā Failed to assign copilot to ${failureCount} issue(s):\n\n`;
+ for (const result of results.filter(r => !r.success)) {
+ summaryContent += `- ${result.repo}#${result.issue_number}: ${result.error}\n`;
+ }
+
+ // Check if any failures were permission-related
+ const hasPermissionError = results.some(r => !r.success && r.error && (r.error.includes("Resource not accessible") || r.error.includes("Insufficient permissions")));
+
+ if (hasPermissionError) {
+ summaryContent += generatePermissionErrorSummary();
+ }
+ }
+
+ await core.summary.addRaw(summaryContent).write();
+
+ // Fail if any assignments failed
+ if (failureCount > 0) {
+ core.setFailed(`Failed to assign copilot to ${failureCount} issue(s)`);
+ }
+}
+
+(async () => {
+ await main();
+})();
diff --git a/actions/setup/js/assign_issue.cjs b/actions/setup/js/assign_issue.cjs
new file mode 100644
index 0000000000..c94ef2a670
--- /dev/null
+++ b/actions/setup/js/assign_issue.cjs
@@ -0,0 +1,107 @@
+// @ts-check
+///
+
+const { getAgentName, getIssueDetails, findAgent, assignAgentToIssue } = require("./assign_agent_helpers.cjs");
+
+/**
+ * Assign an issue to a user or bot (including copilot)
+ * This script handles assigning issues after they are created
+ */
+
+async function main() {
+ // Validate required environment variables
+ const ghToken = process.env.GH_TOKEN;
+ const assignee = process.env.ASSIGNEE;
+ const issueNumber = process.env.ISSUE_NUMBER;
+
+ // Check if GH_TOKEN is present
+ if (!ghToken || ghToken.trim() === "") {
+ const docsUrl = "https://githubnext.github.io/gh-aw/reference/safe-outputs/#assigning-issues-to-copilot";
+ core.setFailed(`GH_TOKEN environment variable is required but not set. ` + `This token is needed to assign issues. ` + `For more information on configuring Copilot tokens, see: ${docsUrl}`);
+ return;
+ }
+
+ // Validate assignee
+ if (!assignee || assignee.trim() === "") {
+ core.setFailed("ASSIGNEE environment variable is required but not set");
+ return;
+ }
+
+ // Validate issue number
+ if (!issueNumber || issueNumber.trim() === "") {
+ core.setFailed("ISSUE_NUMBER environment variable is required but not set");
+ return;
+ }
+
+ const trimmedAssignee = assignee.trim();
+ const trimmedIssueNumber = issueNumber.trim();
+ const issueNum = parseInt(trimmedIssueNumber, 10);
+
+ core.info(`Assigning issue #${trimmedIssueNumber} to ${trimmedAssignee}`);
+
+ try {
+ // Check if the assignee is a known coding agent (e.g., copilot, @copilot)
+ const agentName = getAgentName(trimmedAssignee);
+
+ if (agentName) {
+ // Use GraphQL API for agent assignment
+ // The token is set at the step level via github-token parameter
+ core.info(`Detected coding agent: ${agentName}. Using GraphQL API for assignment.`);
+
+ // Get repository owner and repo from context
+ const owner = context.repo.owner;
+ const repo = context.repo.repo;
+
+ // Find the agent in the repository
+ const agentId = await findAgent(owner, repo, agentName);
+ if (!agentId) {
+ throw new Error(`${agentName} coding agent is not available for this repository`);
+ }
+ core.info(`Found ${agentName} coding agent (ID: ${agentId})`);
+
+ // Get issue details
+ const issueDetails = await getIssueDetails(owner, repo, issueNum);
+ if (!issueDetails) {
+ throw new Error("Failed to get issue details");
+ }
+
+ // Check if agent is already assigned
+ if (issueDetails.currentAssignees.includes(agentId)) {
+ core.info(`${agentName} is already assigned to issue #${trimmedIssueNumber}`);
+ } else {
+ // Assign agent using GraphQL mutation - uses built-in github object authenticated via github-token
+ const success = await assignAgentToIssue(issueDetails.issueId, agentId, issueDetails.currentAssignees, agentName);
+
+ if (!success) {
+ throw new Error(`Failed to assign ${agentName} via GraphQL`);
+ }
+ }
+ } else {
+ // Use gh CLI for regular user assignment
+ await exec.exec("gh", ["issue", "edit", trimmedIssueNumber, "--add-assignee", trimmedAssignee], {
+ env: { ...process.env, GH_TOKEN: ghToken },
+ });
+ }
+
+ core.info(`ā
Successfully assigned issue #${trimmedIssueNumber} to ${trimmedAssignee}`);
+
+ // Write summary
+ await core.summary
+ .addRaw(
+ `
+## Issue Assignment
+
+Successfully assigned issue #${trimmedIssueNumber} to \`${trimmedAssignee}\`.
+`
+ )
+ .write();
+ } catch (error) {
+ const errorMessage = error instanceof Error ? error.message : String(error);
+ core.error(`Failed to assign issue: ${errorMessage}`);
+ core.setFailed(`Failed to assign issue #${trimmedIssueNumber} to ${trimmedAssignee}: ${errorMessage}`);
+ }
+}
+
+main().catch(error => {
+ core.setFailed(error instanceof Error ? error.message : String(error));
+});
diff --git a/actions/setup/js/assign_milestone.cjs b/actions/setup/js/assign_milestone.cjs
new file mode 100644
index 0000000000..1853922cf0
--- /dev/null
+++ b/actions/setup/js/assign_milestone.cjs
@@ -0,0 +1,169 @@
+// @ts-check
+///
+
+const { processSafeOutput } = require("./safe_output_processor.cjs");
+
+async function main() {
+ // Use shared processor for common steps
+ const result = await processSafeOutput(
+ {
+ itemType: "assign_milestone",
+ configKey: "assign_milestone",
+ displayName: "Milestone",
+ itemTypeName: "milestone assignment",
+ supportsPR: true,
+ supportsIssue: true,
+ findMultiple: true, // This processor finds multiple items
+ envVars: {
+ allowed: "GH_AW_MILESTONE_ALLOWED",
+ maxCount: "GH_AW_MILESTONE_MAX_COUNT",
+ target: "GH_AW_MILESTONE_TARGET",
+ },
+ },
+ {
+ title: "Assign Milestone",
+ description: "The following milestone assignments would be made if staged mode was disabled:",
+ renderItem: item => {
+ let content = `**Issue:** #${item.issue_number}\n`;
+ content += `**Milestone Number:** ${item.milestone_number}\n\n`;
+ return content;
+ },
+ }
+ );
+
+ if (!result.success) {
+ return;
+ }
+
+ // @ts-ignore - TypeScript doesn't narrow properly after success check
+ const { items: milestoneItems, config } = result;
+ if (!config || !milestoneItems) {
+ core.setFailed("Internal error: config or milestoneItems is undefined");
+ return;
+ }
+ const { allowed: allowedMilestones, maxCount } = config;
+
+ // Limit items to max count
+ const itemsToProcess = milestoneItems.slice(0, maxCount);
+ if (milestoneItems.length > maxCount) {
+ core.warning(`Found ${milestoneItems.length} milestone assignments, but max is ${maxCount}. Processing first ${maxCount}.`);
+ }
+
+ // Fetch all milestones to validate against allowed list
+ let allMilestones = [];
+ if (allowedMilestones) {
+ try {
+ const milestonesResponse = await github.rest.issues.listMilestones({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ state: "all",
+ per_page: 100,
+ });
+ allMilestones = milestonesResponse.data;
+ core.info(`Fetched ${allMilestones.length} milestones from repository`);
+ } catch (error) {
+ const errorMessage = error instanceof Error ? error.message : String(error);
+ core.error(`Failed to fetch milestones: ${errorMessage}`);
+ core.setFailed(`Failed to fetch milestones for validation: ${errorMessage}`);
+ return;
+ }
+ }
+
+ // Process each milestone assignment
+ const results = [];
+ for (const item of itemsToProcess) {
+ const issueNumber = typeof item.issue_number === "number" ? item.issue_number : parseInt(String(item.issue_number), 10);
+ const milestoneNumber = typeof item.milestone_number === "number" ? item.milestone_number : parseInt(String(item.milestone_number), 10);
+
+ if (isNaN(issueNumber) || issueNumber <= 0) {
+ core.error(`Invalid issue_number: ${item.issue_number}`);
+ continue;
+ }
+
+ if (isNaN(milestoneNumber) || milestoneNumber <= 0) {
+ core.error(`Invalid milestone_number: ${item.milestone_number}`);
+ continue;
+ }
+
+ // Validate against allowed list if configured
+ if (allowedMilestones && allowedMilestones.length > 0) {
+ const milestone = allMilestones.find(m => m.number === milestoneNumber);
+
+ if (!milestone) {
+ core.warning(`Milestone #${milestoneNumber} not found in repository. Skipping.`);
+ continue;
+ }
+
+ // Check if milestone title or number (as string) is in allowed list
+ const isAllowed = allowedMilestones.includes(milestone.title) || allowedMilestones.includes(String(milestoneNumber));
+
+ if (!isAllowed) {
+ core.warning(`Milestone "${milestone.title}" (#${milestoneNumber}) is not in the allowed list. Skipping.`);
+ continue;
+ }
+ }
+
+ // Assign the milestone to the issue
+ try {
+ await github.rest.issues.update({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ issue_number: issueNumber,
+ milestone: milestoneNumber,
+ });
+
+ core.info(`Successfully assigned milestone #${milestoneNumber} to issue #${issueNumber}`);
+ results.push({
+ issue_number: issueNumber,
+ milestone_number: milestoneNumber,
+ success: true,
+ });
+ } catch (error) {
+ const errorMessage = error instanceof Error ? error.message : String(error);
+ core.error(`Failed to assign milestone #${milestoneNumber} to issue #${issueNumber}: ${errorMessage}`);
+ results.push({
+ issue_number: issueNumber,
+ milestone_number: milestoneNumber,
+ success: false,
+ error: errorMessage,
+ });
+ }
+ }
+
+ // Generate step summary
+ const successCount = results.filter(r => r.success).length;
+ const failureCount = results.filter(r => !r.success).length;
+
+ let summaryContent = "## Milestone Assignment\n\n";
+
+ if (successCount > 0) {
+ summaryContent += `ā
Successfully assigned ${successCount} milestone(s):\n\n`;
+ for (const result of results.filter(r => r.success)) {
+ summaryContent += `- Issue #${result.issue_number} ā Milestone #${result.milestone_number}\n`;
+ }
+ summaryContent += "\n";
+ }
+
+ if (failureCount > 0) {
+ summaryContent += `ā Failed to assign ${failureCount} milestone(s):\n\n`;
+ for (const result of results.filter(r => !r.success)) {
+ summaryContent += `- Issue #${result.issue_number} ā Milestone #${result.milestone_number}: ${result.error}\n`;
+ }
+ }
+
+ await core.summary.addRaw(summaryContent).write();
+
+ // Set outputs
+ const assignedMilestones = results
+ .filter(r => r.success)
+ .map(r => `${r.issue_number}:${r.milestone_number}`)
+ .join("\n");
+ core.setOutput("assigned_milestones", assignedMilestones);
+
+ // Fail if any assignments failed
+ if (failureCount > 0) {
+ core.setFailed(`Failed to assign ${failureCount} milestone(s)`);
+ }
+}
+
+await main();
diff --git a/actions/setup/js/assign_to_agent.cjs b/actions/setup/js/assign_to_agent.cjs
new file mode 100644
index 0000000000..94235ab486
--- /dev/null
+++ b/actions/setup/js/assign_to_agent.cjs
@@ -0,0 +1,218 @@
+// @ts-check
+///
+
+const { loadAgentOutput } = require("./load_agent_output.cjs");
+const { generateStagedPreview } = require("./staged_preview.cjs");
+const { AGENT_LOGIN_NAMES, getAvailableAgentLogins, findAgent, getIssueDetails, assignAgentToIssue, generatePermissionErrorSummary } = require("./assign_agent_helpers.cjs");
+
+async function main() {
+ const result = loadAgentOutput();
+ if (!result.success) {
+ return;
+ }
+
+ const assignItems = result.items.filter(item => item.type === "assign_to_agent");
+ if (assignItems.length === 0) {
+ core.info("No assign_to_agent items found in agent output");
+ return;
+ }
+
+ core.info(`Found ${assignItems.length} assign_to_agent item(s)`);
+
+ // Check if we're in staged mode
+ if (process.env.GH_AW_SAFE_OUTPUTS_STAGED === "true") {
+ await generateStagedPreview({
+ title: "Assign to Agent",
+ description: "The following agent assignments would be made if staged mode was disabled:",
+ items: assignItems,
+ renderItem: item => {
+ let content = `**Issue:** #${item.issue_number}\n`;
+ content += `**Agent:** ${item.agent || "copilot"}\n`;
+ content += "\n";
+ return content;
+ },
+ });
+ return;
+ }
+
+ // Get default agent from configuration
+ const defaultAgent = process.env.GH_AW_AGENT_DEFAULT?.trim() || "copilot";
+ core.info(`Default agent: ${defaultAgent}`);
+
+ // Get max count configuration
+ const maxCountEnv = process.env.GH_AW_AGENT_MAX_COUNT;
+ const maxCount = maxCountEnv ? parseInt(maxCountEnv, 10) : 1;
+ if (isNaN(maxCount) || maxCount < 1) {
+ core.setFailed(`Invalid max value: ${maxCountEnv}. Must be a positive integer`);
+ return;
+ }
+ core.info(`Max count: ${maxCount}`);
+
+ // Limit items to max count
+ const itemsToProcess = assignItems.slice(0, maxCount);
+ if (assignItems.length > maxCount) {
+ core.warning(`Found ${assignItems.length} agent assignments, but max is ${maxCount}. Processing first ${maxCount}.`);
+ }
+
+ // Get target repository configuration
+ const targetRepoEnv = process.env.GH_AW_TARGET_REPO?.trim();
+ let targetOwner = context.repo.owner;
+ let targetRepo = context.repo.repo;
+
+ if (targetRepoEnv) {
+ const parts = targetRepoEnv.split("/");
+ if (parts.length === 2) {
+ targetOwner = parts[0];
+ targetRepo = parts[1];
+ core.info(`Using target repository: ${targetOwner}/${targetRepo}`);
+ } else {
+ core.warning(`Invalid target-repo format: ${targetRepoEnv}. Expected owner/repo. Using current repository.`);
+ }
+ }
+
+ // The github-token is set at the step level, so the built-in github object is authenticated
+ // with the correct token (GH_AW_AGENT_TOKEN by default)
+
+ // Cache agent IDs to avoid repeated lookups
+ const agentCache = {};
+
+ // Process each agent assignment
+ const results = [];
+ for (const item of itemsToProcess) {
+ const issueNumber = typeof item.issue_number === "number" ? item.issue_number : parseInt(String(item.issue_number), 10);
+ const agentName = item.agent || defaultAgent;
+
+ if (isNaN(issueNumber) || issueNumber <= 0) {
+ core.error(`Invalid issue_number: ${item.issue_number}`);
+ continue;
+ }
+
+ // Check if agent is supported
+ if (!AGENT_LOGIN_NAMES[agentName]) {
+ core.warning(`Agent "${agentName}" is not supported. Supported agents: ${Object.keys(AGENT_LOGIN_NAMES).join(", ")}`);
+ results.push({
+ issue_number: issueNumber,
+ agent: agentName,
+ success: false,
+ error: `Unsupported agent: ${agentName}`,
+ });
+ continue;
+ }
+
+ // Assign the agent to the issue using GraphQL
+ try {
+ // Find agent (use cache if available) - uses built-in github object authenticated via github-token
+ let agentId = agentCache[agentName];
+ if (!agentId) {
+ core.info(`Looking for ${agentName} coding agent...`);
+ agentId = await findAgent(targetOwner, targetRepo, agentName);
+ if (!agentId) {
+ throw new Error(`${agentName} coding agent is not available for this repository`);
+ }
+ agentCache[agentName] = agentId;
+ core.info(`Found ${agentName} coding agent (ID: ${agentId})`);
+ }
+
+ // Get issue details (ID and current assignees) via GraphQL
+ core.info("Getting issue details...");
+ const issueDetails = await getIssueDetails(targetOwner, targetRepo, issueNumber);
+ if (!issueDetails) {
+ throw new Error("Failed to get issue details");
+ }
+
+ core.info(`Issue ID: ${issueDetails.issueId}`);
+
+ // Check if agent is already assigned
+ if (issueDetails.currentAssignees.includes(agentId)) {
+ core.info(`${agentName} is already assigned to issue #${issueNumber}`);
+ results.push({
+ issue_number: issueNumber,
+ agent: agentName,
+ success: true,
+ });
+ continue;
+ }
+
+ // Assign agent using GraphQL mutation - uses built-in github object authenticated via github-token
+ core.info(`Assigning ${agentName} coding agent to issue #${issueNumber}...`);
+ const success = await assignAgentToIssue(issueDetails.issueId, agentId, issueDetails.currentAssignees, agentName);
+
+ if (!success) {
+ throw new Error(`Failed to assign ${agentName} via GraphQL`);
+ }
+
+ core.info(`Successfully assigned ${agentName} coding agent to issue #${issueNumber}`);
+ results.push({
+ issue_number: issueNumber,
+ agent: agentName,
+ success: true,
+ });
+ } catch (error) {
+ let errorMessage = error instanceof Error ? error.message : String(error);
+ if (errorMessage.includes("coding agent is not available for this repository")) {
+ // Enrich with available agent logins to aid troubleshooting - uses built-in github object
+ try {
+ const available = await getAvailableAgentLogins(targetOwner, targetRepo);
+ if (available.length > 0) {
+ errorMessage += ` (available agents: ${available.join(", ")})`;
+ }
+ } catch (e) {
+ core.debug("Failed to enrich unavailable agent message with available list");
+ }
+ }
+ core.error(`Failed to assign agent "${agentName}" to issue #${issueNumber}: ${errorMessage}`);
+ results.push({
+ issue_number: issueNumber,
+ agent: agentName,
+ success: false,
+ error: errorMessage,
+ });
+ }
+ }
+
+ // Generate step summary
+ const successCount = results.filter(r => r.success).length;
+ const failureCount = results.filter(r => !r.success).length;
+
+ let summaryContent = "## Agent Assignment\n\n";
+
+ if (successCount > 0) {
+ summaryContent += `ā
Successfully assigned ${successCount} agent(s):\n\n`;
+ for (const result of results.filter(r => r.success)) {
+ summaryContent += `- Issue #${result.issue_number} ā Agent: ${result.agent}\n`;
+ }
+ summaryContent += "\n";
+ }
+
+ if (failureCount > 0) {
+ summaryContent += `ā Failed to assign ${failureCount} agent(s):\n\n`;
+ for (const result of results.filter(r => !r.success)) {
+ summaryContent += `- Issue #${result.issue_number} ā Agent: ${result.agent}: ${result.error}\n`;
+ }
+
+ // Check if any failures were permission-related
+ const hasPermissionError = results.some(r => !r.success && r.error && (r.error.includes("Resource not accessible") || r.error.includes("Insufficient permissions")));
+
+ if (hasPermissionError) {
+ summaryContent += generatePermissionErrorSummary();
+ }
+ }
+
+ await core.summary.addRaw(summaryContent).write();
+
+ // Set outputs
+ const assignedAgents = results
+ .filter(r => r.success)
+ .map(r => `${r.issue_number}:${r.agent}`)
+ .join("\n");
+ core.setOutput("assigned_agents", assignedAgents);
+
+ // Fail if any assignments failed
+ if (failureCount > 0) {
+ core.setFailed(`Failed to assign ${failureCount} agent(s)`);
+ }
+}
+
+(async () => {
+ await main();
+})();
diff --git a/actions/setup/js/assign_to_user.cjs b/actions/setup/js/assign_to_user.cjs
new file mode 100644
index 0000000000..62f1520538
--- /dev/null
+++ b/actions/setup/js/assign_to_user.cjs
@@ -0,0 +1,131 @@
+// @ts-check
+///
+
+const { processSafeOutput, processItems } = require("./safe_output_processor.cjs");
+
+async function main() {
+ // Use shared processor for common steps
+ const result = await processSafeOutput(
+ {
+ itemType: "assign_to_user",
+ configKey: "assign_to_user",
+ displayName: "Assignees",
+ itemTypeName: "user assignment",
+ supportsPR: false, // Issue-only: not relevant for PRs
+ supportsIssue: true,
+ envVars: {
+ allowed: "GH_AW_ASSIGNEES_ALLOWED",
+ maxCount: "GH_AW_ASSIGNEES_MAX_COUNT",
+ target: "GH_AW_ASSIGNEES_TARGET",
+ },
+ },
+ {
+ title: "Assign to User",
+ description: "The following user assignments would be made if staged mode was disabled:",
+ renderItem: item => {
+ let content = "";
+ if (item.issue_number) {
+ content += `**Target Issue:** #${item.issue_number}\n\n`;
+ } else {
+ content += `**Target:** Current issue\n\n`;
+ }
+ if (item.assignees && item.assignees.length > 0) {
+ content += `**Users to assign:** ${item.assignees.join(", ")}\n\n`;
+ } else if (item.assignee) {
+ content += `**User to assign:** ${item.assignee}\n\n`;
+ }
+ return content;
+ },
+ }
+ );
+
+ if (!result.success) {
+ return;
+ }
+
+ // @ts-ignore - TypeScript doesn't narrow properly after success check
+ const { item: assignItem, config, targetResult } = result;
+ if (!config || !targetResult || targetResult.number === undefined) {
+ core.setFailed("Internal error: config, targetResult, or targetResult.number is undefined");
+ return;
+ }
+ const { allowed: allowedAssignees, maxCount } = config;
+ const issueNumber = targetResult.number;
+
+ // Support both singular "assignee" and plural "assignees" for flexibility
+ let requestedAssignees = [];
+ if (assignItem.assignees && Array.isArray(assignItem.assignees)) {
+ requestedAssignees = assignItem.assignees;
+ } else if (assignItem.assignee) {
+ requestedAssignees = [assignItem.assignee];
+ }
+
+ core.info(`Requested assignees: ${JSON.stringify(requestedAssignees)}`);
+
+ // Use shared helper to filter, sanitize, dedupe, and limit
+ const uniqueAssignees = processItems(requestedAssignees, allowedAssignees, maxCount);
+
+ if (uniqueAssignees.length === 0) {
+ core.info("No assignees to add");
+ core.setOutput("assigned_users", "");
+ await core.summary
+ .addRaw(
+ `
+## User Assignment
+
+No users were assigned (no valid assignees found in agent output).
+`
+ )
+ .write();
+ return;
+ }
+
+ core.info(`Assigning ${uniqueAssignees.length} users to issue #${issueNumber}: ${JSON.stringify(uniqueAssignees)}`);
+
+ try {
+ // Get target repository from environment or use current
+ const targetRepoEnv = process.env.GH_AW_TARGET_REPO_SLUG?.trim();
+ let targetOwner = context.repo.owner;
+ let targetRepo = context.repo.repo;
+
+ if (targetRepoEnv) {
+ const parts = targetRepoEnv.split("/");
+ if (parts.length === 2) {
+ targetOwner = parts[0];
+ targetRepo = parts[1];
+ core.info(`Using target repository: ${targetOwner}/${targetRepo}`);
+ }
+ }
+
+ // Add assignees to the issue
+ await github.rest.issues.addAssignees({
+ owner: targetOwner,
+ repo: targetRepo,
+ issue_number: issueNumber,
+ assignees: uniqueAssignees,
+ });
+
+ core.info(`Successfully assigned ${uniqueAssignees.length} user(s) to issue #${issueNumber}`);
+
+ core.setOutput("assigned_users", uniqueAssignees.join("\n"));
+
+ const assigneesListMarkdown = uniqueAssignees.map(assignee => `- \`${assignee}\``).join("\n");
+ await core.summary
+ .addRaw(
+ `
+## User Assignment
+
+Successfully assigned ${uniqueAssignees.length} user(s) to issue #${issueNumber}:
+
+${assigneesListMarkdown}
+`
+ )
+ .write();
+ } catch (error) {
+ const errorMessage = error instanceof Error ? error.message : String(error);
+ core.error(`Failed to assign users: ${errorMessage}`);
+ core.setFailed(`Failed to assign users: ${errorMessage}`);
+ }
+}
+
+await main();
diff --git a/actions/setup/js/check_command_position.cjs b/actions/setup/js/check_command_position.cjs
new file mode 100644
index 0000000000..812e774e55
--- /dev/null
+++ b/actions/setup/js/check_command_position.cjs
@@ -0,0 +1,69 @@
+// @ts-check
+///
+
+/**
+ * Check if command is the first word in the triggering text
+ * This prevents accidental command triggers from words appearing later in content
+ */
+async function main() {
+ const command = process.env.GH_AW_COMMAND;
+
+ if (!command) {
+ core.setFailed("Configuration error: GH_AW_COMMAND not specified.");
+ return;
+ }
+
+ // Get the triggering text based on event type
+ let text = "";
+ const eventName = context.eventName;
+
+ try {
+ if (eventName === "issues") {
+ text = context.payload.issue?.body || "";
+ } else if (eventName === "pull_request") {
+ text = context.payload.pull_request?.body || "";
+ } else if (eventName === "issue_comment") {
+ text = context.payload.comment?.body || "";
+ } else if (eventName === "pull_request_review_comment") {
+ text = context.payload.comment?.body || "";
+ } else if (eventName === "discussion") {
+ text = context.payload.discussion?.body || "";
+ } else if (eventName === "discussion_comment") {
+ text = context.payload.comment?.body || "";
+ } else {
+ // For non-comment events, pass the check
+ core.info(`Event ${eventName} does not require command position check`);
+ core.setOutput("command_position_ok", "true");
+ return;
+ }
+
+ // Expected command format: /command
+ const expectedCommand = `/${command}`;
+
+ // If text is empty or doesn't contain the command at all, pass the check
+ if (!text || !text.includes(expectedCommand)) {
+ core.info(`No command '${expectedCommand}' found in text, passing check`);
+ core.setOutput("command_position_ok", "true");
+ return;
+ }
+
+ // Normalize whitespace and get the first word
+ const trimmedText = text.trim();
+ const firstWord = trimmedText.split(/\s+/)[0];
+
+ core.info(`Checking command position for: ${expectedCommand}`);
+ core.info(`First word in text: ${firstWord}`);
+
+ if (firstWord === expectedCommand) {
+ core.info(`ā Command '${expectedCommand}' is at the start of the text`);
+ core.setOutput("command_position_ok", "true");
+ } else {
+ core.warning(`ā ļø Command '${expectedCommand}' is not the first word (found: '${firstWord}'). Workflow will be skipped.`);
+ core.setOutput("command_position_ok", "false");
+ }
+ } catch (error) {
+ core.setFailed(error instanceof Error ? error.message : String(error));
+ }
+}
+
+await main();
diff --git a/actions/setup/js/check_membership.cjs b/actions/setup/js/check_membership.cjs
new file mode 100644
index 0000000000..a9192d554b
--- /dev/null
+++ b/actions/setup/js/check_membership.cjs
@@ -0,0 +1,99 @@
+// @ts-check
+///
+
+const { parseRequiredPermissions, parseAllowedBots, checkRepositoryPermission, checkBotStatus } = require("./check_permissions_utils.cjs");
+
+async function main() {
+ const { eventName } = context;
+ const actor = context.actor;
+ const { owner, repo } = context.repo;
+ const requiredPermissions = parseRequiredPermissions();
+ const allowedBots = parseAllowedBots();
+
+ // For workflow_dispatch, only skip check if "write" is in the allowed roles
+ // since workflow_dispatch can be triggered by users with write access
+ if (eventName === "workflow_dispatch") {
+ const hasWriteRole = requiredPermissions.includes("write");
+ if (hasWriteRole) {
+ core.info(`ā
Event ${eventName} does not require validation (write role allowed)`);
+ core.setOutput("is_team_member", "true");
+ core.setOutput("result", "safe_event");
+ return;
+ }
+ // If write is not allowed, continue with permission check
+ core.info(`Event ${eventName} requires validation (write role not allowed)`);
+ }
+
+ // skip check for other safe events
+ // workflow_run is intentionally excluded due to HIGH security risks:
+ // - Privilege escalation (inherits permissions from triggering workflow)
+ // - Branch protection bypass (can execute on protected branches)
+ // - Secret exposure (secrets available from untrusted code)
+ const safeEvents = ["schedule"];
+ if (safeEvents.includes(eventName)) {
+ core.info(`ā
Event ${eventName} does not require validation`);
+ core.setOutput("is_team_member", "true");
+ core.setOutput("result", "safe_event");
+ return;
+ }
+
+ if (!requiredPermissions || requiredPermissions.length === 0) {
+ core.warning("ā Configuration error: Required permissions not specified. Contact repository administrator.");
+ core.setOutput("is_team_member", "false");
+ core.setOutput("result", "config_error");
+ core.setOutput("error_message", "Configuration error: Required permissions not specified");
+ return;
+ }
+
+ // Check if the actor has the required repository permissions
+ const result = await checkRepositoryPermission(actor, owner, repo, requiredPermissions);
+
+ if (result.error) {
+ core.setOutput("is_team_member", "false");
+ core.setOutput("result", "api_error");
+ core.setOutput("error_message", `Repository permission check failed: ${result.error}`);
+ return;
+ }
+
+ if (result.authorized) {
+ core.setOutput("is_team_member", "true");
+ core.setOutput("result", "authorized");
+ core.setOutput("user_permission", result.permission);
+ } else {
+ // User doesn't have required permissions, check if they're an allowed bot
+ if (allowedBots && allowedBots.length > 0) {
+ core.info(`Checking if actor '${actor}' is in allowed bots list: ${allowedBots.join(", ")}`);
+
+ if (allowedBots.includes(actor)) {
+ core.info(`Actor '${actor}' is in the allowed bots list`);
+
+ // Verify the bot is active/installed on the repository
+ const botStatus = await checkBotStatus(actor, owner, repo);
+
+ if (botStatus.isBot && botStatus.isActive) {
+ core.info(`ā
Bot '${actor}' is active on the repository and authorized`);
+ core.setOutput("is_team_member", "true");
+ core.setOutput("result", "authorized_bot");
+ core.setOutput("user_permission", "bot");
+ return;
+ } else if (botStatus.isBot && !botStatus.isActive) {
+ core.warning(`Bot '${actor}' is in the allowed list but not active/installed on ${owner}/${repo}`);
+ core.setOutput("is_team_member", "false");
+ core.setOutput("result", "bot_not_active");
+ core.setOutput("user_permission", result.permission);
+ core.setOutput("error_message", `Access denied: Bot '${actor}' is not active/installed on this repository`);
+ return;
+ } else {
+ core.info(`Actor '${actor}' is in allowed bots list but bot status check failed`);
+ }
+ }
+ }
+
+ // Not authorized by role or bot
+ core.setOutput("is_team_member", "false");
+ core.setOutput("result", "insufficient_permissions");
+ core.setOutput("user_permission", result.permission);
+ core.setOutput("error_message", `Access denied: User '${actor}' is not authorized. Required permissions: ${requiredPermissions.join(", ")}`);
+ }
+}
+await main();
diff --git a/actions/setup/js/check_permissions_utils.cjs b/actions/setup/js/check_permissions_utils.cjs
new file mode 100644
index 0000000000..21c1fea2c4
--- /dev/null
+++ b/actions/setup/js/check_permissions_utils.cjs
@@ -0,0 +1,118 @@
+// @ts-check
+///
+
+/**
+ * Shared utility for repository permission validation
+ * Used by both check_permissions.cjs and check_membership.cjs
+ */
+
+/**
+ * Parse required permissions from environment variable
+ * @returns {string[]} Array of required permission levels
+ */
+function parseRequiredPermissions() {
+ const requiredPermissionsEnv = process.env.GH_AW_REQUIRED_ROLES;
+ return requiredPermissionsEnv ? requiredPermissionsEnv.split(",").filter(p => p.trim() !== "") : [];
+}
+
+/**
+ * Parse allowed bot identifiers from environment variable
+ * @returns {string[]} Array of allowed bot identifiers
+ */
+function parseAllowedBots() {
+ const allowedBotsEnv = process.env.GH_AW_ALLOWED_BOTS;
+ return allowedBotsEnv ? allowedBotsEnv.split(",").filter(b => b.trim() !== "") : [];
+}
+
+/**
+ * Check if the actor is a bot and if it's active on the repository
+ * @param {string} actor - GitHub username to check
+ * @param {string} owner - Repository owner
+ * @param {string} repo - Repository name
+ * @returns {Promise<{isBot: boolean, isActive: boolean, error?: string}>}
+ */
+async function checkBotStatus(actor, owner, repo) {
+ try {
+ // Check if the actor looks like a bot (ends with [bot])
+ const isBot = actor.endsWith("[bot]");
+
+ if (!isBot) {
+ return { isBot: false, isActive: false };
+ }
+
+ core.info(`Checking if bot '${actor}' is active on ${owner}/${repo}`);
+
+ // Try to get the bot's permission level to verify it's installed/active on the repo
+ // GitHub Apps/bots that are installed on a repository show up in the collaborators
+ try {
+ const botPermission = await github.rest.repos.getCollaboratorPermissionLevel({
+ owner: owner,
+ repo: repo,
+ username: actor,
+ });
+
+ core.info(`Bot '${actor}' is active with permission level: ${botPermission.data.permission}`);
+ return { isBot: true, isActive: true };
+ } catch (botError) {
+ // If we get a 404, the bot is not installed/active on this repository
+ if (typeof botError === "object" && botError !== null && "status" in botError && botError.status === 404) {
+ core.warning(`Bot '${actor}' is not active/installed on ${owner}/${repo}`);
+ return { isBot: true, isActive: false };
+ }
+ // For other errors, we'll treat as inactive to be safe
+ const errorMessage = botError instanceof Error ? botError.message : String(botError);
+ core.warning(`Failed to check bot status: ${errorMessage}`);
+ return { isBot: true, isActive: false, error: errorMessage };
+ }
+ } catch (error) {
+ const errorMessage = error instanceof Error ? error.message : String(error);
+ core.warning(`Error checking bot status: ${errorMessage}`);
+ return { isBot: false, isActive: false, error: errorMessage };
+ }
+}
+
+/**
+ * Check if user has required repository permissions
+ * @param {string} actor - GitHub username to check
+ * @param {string} owner - Repository owner
+ * @param {string} repo - Repository name
+ * @param {string[]} requiredPermissions - Array of required permission levels
+ * @returns {Promise<{authorized: boolean, permission?: string, error?: string}>}
+ */
+async function checkRepositoryPermission(actor, owner, repo, requiredPermissions) {
+ try {
+ core.info(`Checking if user '${actor}' has required permissions for ${owner}/${repo}`);
+ core.info(`Required permissions: ${requiredPermissions.join(", ")}`);
+
+ const repoPermission = await github.rest.repos.getCollaboratorPermissionLevel({
+ owner: owner,
+ repo: repo,
+ username: actor,
+ });
+
+ const permission = repoPermission.data.permission;
+ core.info(`Repository permission level: ${permission}`);
+
+ // Check if user has one of the required permission levels
+ for (const requiredPerm of requiredPermissions) {
+ if (permission === requiredPerm || (requiredPerm === "maintainer" && permission === "maintain")) {
+ core.info(`ā
User has ${permission} access to repository`);
+ return { authorized: true, permission: permission };
+ }
+ }
+
+ core.warning(`User permission '${permission}' does not meet requirements: ${requiredPermissions.join(", ")}`);
+ return { authorized: false, permission: permission };
+ } catch (repoError) {
+ const errorMessage = repoError instanceof Error ? repoError.message : String(repoError);
+ core.warning(`Repository permission check failed: ${errorMessage}`);
+ return { authorized: false, error: errorMessage };
+ }
+}
+
+module.exports = {
+ parseRequiredPermissions,
+ parseAllowedBots,
+ checkRepositoryPermission,
+ checkBotStatus,
+};
diff --git a/actions/setup/js/check_skip_if_match.cjs b/actions/setup/js/check_skip_if_match.cjs
new file mode 100644
index 0000000000..7290bc7d72
--- /dev/null
+++ b/actions/setup/js/check_skip_if_match.cjs
@@ -0,0 +1,60 @@
+// @ts-check
+///
+
+async function main() {
+ const skipQuery = process.env.GH_AW_SKIP_QUERY;
+ const workflowName = process.env.GH_AW_WORKFLOW_NAME;
+ const maxMatchesStr = process.env.GH_AW_SKIP_MAX_MATCHES || "1";
+
+ if (!skipQuery) {
+ core.setFailed("Configuration error: GH_AW_SKIP_QUERY not specified.");
+ return;
+ }
+
+ if (!workflowName) {
+ core.setFailed("Configuration error: GH_AW_WORKFLOW_NAME not specified.");
+ return;
+ }
+
+ const maxMatches = parseInt(maxMatchesStr, 10);
+ if (isNaN(maxMatches) || maxMatches < 1) {
+ core.setFailed(`Configuration error: GH_AW_SKIP_MAX_MATCHES must be a positive integer, got "${maxMatchesStr}".`);
+ return;
+ }
+
+ core.info(`Checking skip-if-match query: ${skipQuery}`);
+ core.info(`Maximum matches threshold: ${maxMatches}`);
+
+ // Get repository information from context
+ const { owner, repo } = context.repo;
+
+ // Scope the query to the current repository
+ const scopedQuery = `${skipQuery} repo:${owner}/${repo}`;
+
+ core.info(`Scoped query: ${scopedQuery}`);
+
+ try {
+ // Search for issues and pull requests using the GitHub API
+ // We only need to know if the count reaches the threshold
+ const response = await github.rest.search.issuesAndPullRequests({
+ q: scopedQuery,
+ per_page: 1, // We only need the count, not the items
+ });
+
+ const totalCount = response.data.total_count;
+ core.info(`Search found ${totalCount} matching items`);
+
+ if (totalCount >= maxMatches) {
+ core.warning(`š Skip condition matched (${totalCount} items found, threshold: ${maxMatches}). Workflow execution will be prevented by activation job.`);
+ core.setOutput("skip_check_ok", "false");
+ return;
+ }
+
+ core.info(`ā Found ${totalCount} matches (below threshold of ${maxMatches}), workflow can proceed`);
+ core.setOutput("skip_check_ok", "true");
+ } catch (error) {
+ core.setFailed(`Failed to execute search query: ${error instanceof Error ? error.message : String(error)}`);
+ return;
+ }
+}
+await main();
diff --git a/actions/setup/js/check_stop_time.cjs b/actions/setup/js/check_stop_time.cjs
new file mode 100644
index 0000000000..165a42fe30
--- /dev/null
+++ b/actions/setup/js/check_stop_time.cjs
@@ -0,0 +1,40 @@
+// @ts-check
+///
+
+async function main() {
+ const stopTime = process.env.GH_AW_STOP_TIME;
+ const workflowName = process.env.GH_AW_WORKFLOW_NAME;
+
+ if (!stopTime) {
+ core.setFailed("Configuration error: GH_AW_STOP_TIME not specified.");
+ return;
+ }
+
+ if (!workflowName) {
+ core.setFailed("Configuration error: GH_AW_WORKFLOW_NAME not specified.");
+ return;
+ }
+
+ core.info(`Checking stop-time limit: ${stopTime}`);
+
+ // Parse the stop time (format: "YYYY-MM-DD HH:MM:SS")
+ const stopTimeDate = new Date(stopTime);
+
+ if (isNaN(stopTimeDate.getTime())) {
+ core.setFailed(`Invalid stop-time format: ${stopTime}. Expected format: YYYY-MM-DD HH:MM:SS`);
+ return;
+ }
+
+ const currentTime = new Date();
+ core.info(`Current time: ${currentTime.toISOString()}`);
+ core.info(`Stop time: ${stopTimeDate.toISOString()}`);
+
+ if (currentTime >= stopTimeDate) {
+ core.warning(`ā° Stop time reached. Workflow execution will be prevented by activation job.`);
+ core.setOutput("stop_time_ok", "false");
+ return;
+ }
+
+ core.setOutput("stop_time_ok", "true");
+}
+await main();
diff --git a/actions/setup/js/check_workflow_timestamp_api.cjs b/actions/setup/js/check_workflow_timestamp_api.cjs
new file mode 100644
index 0000000000..f88794ed89
--- /dev/null
+++ b/actions/setup/js/check_workflow_timestamp_api.cjs
@@ -0,0 +1,114 @@
+// @ts-check
+///
+
+/**
+ * Check workflow file timestamps using GitHub API to detect outdated lock files
+ * This script compares the last commit time of the source .md file
+ * with the compiled .lock.yml file and warns if recompilation is needed
+ */
+
+async function main() {
+ const workflowFile = process.env.GH_AW_WORKFLOW_FILE;
+
+ if (!workflowFile) {
+ core.setFailed("Configuration error: GH_AW_WORKFLOW_FILE not available.");
+ return;
+ }
+
+ // Construct file paths
+ const workflowBasename = workflowFile.replace(".lock.yml", "");
+ const workflowMdPath = `.github/workflows/${workflowBasename}.md`;
+ const lockFilePath = `.github/workflows/${workflowFile}`;
+
+ core.info(`Checking workflow timestamps using GitHub API:`);
+ core.info(` Source: ${workflowMdPath}`);
+ core.info(` Lock file: ${lockFilePath}`);
+
+ const { owner, repo } = context.repo;
+ const ref = context.sha;
+
+ // Helper function to get the last commit for a file
+ async function getLastCommitForFile(path) {
+ try {
+ const response = await github.rest.repos.listCommits({
+ owner,
+ repo,
+ path,
+ per_page: 1,
+ sha: ref,
+ });
+
+ if (response.data && response.data.length > 0) {
+ const commit = response.data[0];
+ return {
+ sha: commit.sha,
+ date: commit.commit.committer.date,
+ message: commit.commit.message,
+ };
+ }
+ return null;
+ } catch (error) {
+ core.info(`Could not fetch commit for ${path}: ${error.message}`);
+ return null;
+ }
+ }
+
+ // Fetch last commits for both files
+ const workflowCommit = await getLastCommitForFile(workflowMdPath);
+ const lockCommit = await getLastCommitForFile(lockFilePath);
+
+ // Handle cases where files don't exist
+ if (!workflowCommit) {
+ core.info(`Source file does not exist: ${workflowMdPath}`);
+ }
+
+ if (!lockCommit) {
+ core.info(`Lock file does not exist: ${lockFilePath}`);
+ }
+
+ if (!workflowCommit || !lockCommit) {
+ core.info("Skipping timestamp check - one or both files not found");
+ return;
+ }
+
+ // Parse dates for comparison
+ const workflowDate = new Date(workflowCommit.date);
+ const lockDate = new Date(lockCommit.date);
+
+ core.info(` Source last commit: ${workflowDate.toISOString()} (${workflowCommit.sha.substring(0, 7)})`);
+ core.info(` Lock last commit: ${lockDate.toISOString()} (${lockCommit.sha.substring(0, 7)})`);
+
+ // Check if workflow file is newer than lock file
+ if (workflowDate > lockDate) {
+ const warningMessage = `WARNING: Lock file '${lockFilePath}' is outdated! The workflow file '${workflowMdPath}' has been modified more recently. Run 'gh aw compile' to regenerate the lock file.`;
+
+ core.error(warningMessage);
+
+ // Format timestamps and commits for display
+ const workflowTimestamp = workflowDate.toISOString();
+ const lockTimestamp = lockDate.toISOString();
+
+ // Add summary to GitHub Step Summary
+ let summary = core.summary
+ .addRaw("### ā ļø Workflow Lock File Warning\n\n")
+ .addRaw("**WARNING**: Lock file is outdated and needs to be regenerated.\n\n")
+ .addRaw("**Files:**\n")
+ .addRaw(`- Source: \`${workflowMdPath}\`\n`)
+ .addRaw(` - Last commit: ${workflowTimestamp}\n`)
+ .addRaw(` - Commit SHA: [\`${workflowCommit.sha.substring(0, 7)}\`](https://github.com/${owner}/${repo}/commit/${workflowCommit.sha})\n`)
+ .addRaw(`- Lock: \`${lockFilePath}\`\n`)
+ .addRaw(` - Last commit: ${lockTimestamp}\n`)
+ .addRaw(` - Commit SHA: [\`${lockCommit.sha.substring(0, 7)}\`](https://github.com/${owner}/${repo}/commit/${lockCommit.sha})\n\n`)
+ .addRaw("**Action Required:** Run `gh aw compile` to regenerate the lock file.\n\n");
+
+ await summary.write();
+ } else if (workflowCommit.sha === lockCommit.sha) {
+ core.info("ā
Lock file is up to date (same commit)");
+ } else {
+ core.info("ā
Lock file is up to date");
+ }
+}
+
+main().catch(error => {
+ core.setFailed(error instanceof Error ? error.message : String(error));
+});
diff --git a/actions/setup/js/checkout_pr_branch.cjs b/actions/setup/js/checkout_pr_branch.cjs
new file mode 100644
index 0000000000..871162519b
--- /dev/null
+++ b/actions/setup/js/checkout_pr_branch.cjs
@@ -0,0 +1,47 @@
+// @ts-check
+///
+
+/**
+ * Checkout PR branch when PR context is available
+ * This script handles both pull_request events and comment events on PRs
+ */
+
+async function main() {
+ const eventName = context.eventName;
+ const pullRequest = context.payload.pull_request;
+
+ if (!pullRequest) {
+ core.info("No pull request context available, skipping checkout");
+ return;
+ }
+
+ core.info(`Event: ${eventName}`);
+ core.info(`Pull Request #${pullRequest.number}`);
+
+ try {
+ if (eventName === "pull_request") {
+ // For pull_request events, use the head ref directly
+ const branchName = pullRequest.head.ref;
+ core.info(`Checking out PR branch: ${branchName}`);
+
+ await exec.exec("git", ["fetch", "origin", branchName]);
+ await exec.exec("git", ["checkout", branchName]);
+
+ core.info(`ā
Successfully checked out branch: ${branchName}`);
+ } else {
+ // For comment events on PRs, use gh pr checkout with PR number
+ const prNumber = pullRequest.number;
+ core.info(`Checking out PR #${prNumber} using gh pr checkout`);
+
+ await exec.exec("gh", ["pr", "checkout", prNumber.toString()]);
+
+ core.info(`ā
Successfully checked out PR #${prNumber}`);
+ }
+ } catch (error) {
+ core.setFailed(`Failed to checkout PR branch: ${error instanceof Error ? error.message : String(error)}`);
+ }
+}
+
+main().catch(error => {
+ core.setFailed(error instanceof Error ? error.message : String(error));
+});
diff --git a/actions/setup/js/close_discussion.cjs b/actions/setup/js/close_discussion.cjs
new file mode 100644
index 0000000000..20cad4bcf7
--- /dev/null
+++ b/actions/setup/js/close_discussion.cjs
@@ -0,0 +1,315 @@
+// @ts-check
+///
+
+const { loadAgentOutput } = require("./load_agent_output.cjs");
+const { generateFooter } = require("./generate_footer.cjs");
+const { getTrackerID } = require("./get_tracker_id.cjs");
+const { getRepositoryUrl } = require("./get_repository_url.cjs");
+
+/**
+ * Get discussion details using GraphQL
+ * @param {any} github - GitHub GraphQL instance
+ * @param {string} owner - Repository owner
+ * @param {string} repo - Repository name
+ * @param {number} discussionNumber - Discussion number
+ * @returns {Promise<{id: string, title: string, category: {name: string}, labels: {nodes: Array<{name: string}>}, url: string}>} Discussion details
+ */
+async function getDiscussionDetails(github, owner, repo, discussionNumber) {
+ const { repository } = await github.graphql(
+ `
+ query($owner: String!, $repo: String!, $num: Int!) {
+ repository(owner: $owner, name: $repo) {
+ discussion(number: $num) {
+ id
+ title
+ category {
+ name
+ }
+ labels(first: 100) {
+ nodes {
+ name
+ }
+ }
+ url
+ }
+ }
+ }`,
+ { owner, repo, num: discussionNumber }
+ );
+
+ if (!repository || !repository.discussion) {
+ throw new Error(`Discussion #${discussionNumber} not found in ${owner}/${repo}`);
+ }
+
+ return repository.discussion;
+}
+
+/**
+ * Add comment to a GitHub Discussion using GraphQL
+ * @param {any} github - GitHub GraphQL instance
+ * @param {string} discussionId - Discussion node ID
+ * @param {string} message - Comment body
+ * @returns {Promise<{id: string, url: string}>} Comment details
+ */
+async function addDiscussionComment(github, discussionId, message) {
+ const result = await github.graphql(
+ `
+ mutation($dId: ID!, $body: String!) {
+ addDiscussionComment(input: { discussionId: $dId, body: $body }) {
+ comment {
+ id
+ url
+ }
+ }
+ }`,
+ { dId: discussionId, body: message }
+ );
+
+ return result.addDiscussionComment.comment;
+}
+
+/**
+ * Close a GitHub Discussion using GraphQL
+ * @param {any} github - GitHub GraphQL instance
+ * @param {string} discussionId - Discussion node ID
+ * @param {string|undefined} reason - Optional close reason (RESOLVED, DUPLICATE, OUTDATED, or ANSWERED)
+ * @returns {Promise<{id: string, url: string}>} Discussion details
+ */
+async function closeDiscussion(github, discussionId, reason) {
+ const mutation = reason
+ ? `
+ mutation($dId: ID!, $reason: DiscussionCloseReason!) {
+ closeDiscussion(input: { discussionId: $dId, reason: $reason }) {
+ discussion {
+ id
+ url
+ }
+ }
+ }`
+ : `
+ mutation($dId: ID!) {
+ closeDiscussion(input: { discussionId: $dId }) {
+ discussion {
+ id
+ url
+ }
+ }
+ }`;
+
+ const variables = reason ? { dId: discussionId, reason } : { dId: discussionId };
+ const result = await github.graphql(mutation, variables);
+
+ return result.closeDiscussion.discussion;
+}
+
+async function main() {
+ // Check if we're in staged mode
+ const isStaged = process.env.GH_AW_SAFE_OUTPUTS_STAGED === "true";
+
+ const result = loadAgentOutput();
+ if (!result.success) {
+ return;
+ }
+
+ // Find all close-discussion items
+ const closeDiscussionItems = result.items.filter(/** @param {any} item */ item => item.type === "close_discussion");
+ if (closeDiscussionItems.length === 0) {
+ core.info("No close-discussion items found in agent output");
+ return;
+ }
+
+ core.info(`Found ${closeDiscussionItems.length} close-discussion item(s)`);
+
+ // Get configuration from environment
+ const requiredLabels = process.env.GH_AW_CLOSE_DISCUSSION_REQUIRED_LABELS ? process.env.GH_AW_CLOSE_DISCUSSION_REQUIRED_LABELS.split(",").map(l => l.trim()) : [];
+ const requiredTitlePrefix = process.env.GH_AW_CLOSE_DISCUSSION_REQUIRED_TITLE_PREFIX || "";
+ const requiredCategory = process.env.GH_AW_CLOSE_DISCUSSION_REQUIRED_CATEGORY || "";
+ const target = process.env.GH_AW_CLOSE_DISCUSSION_TARGET || "triggering";
+
+ core.info(`Configuration: requiredLabels=${requiredLabels.join(",")}, requiredTitlePrefix=${requiredTitlePrefix}, requiredCategory=${requiredCategory}, target=${target}`);
+
+ // Check if we're in a discussion context
+ const isDiscussionContext = context.eventName === "discussion" || context.eventName === "discussion_comment";
+
+ // If in staged mode, emit step summary instead of closing discussions
+ if (isStaged) {
+ let summaryContent = "## š Staged Mode: Close Discussions Preview\n\n";
+ summaryContent += "The following discussions would be closed if staged mode was disabled:\n\n";
+
+ for (let i = 0; i < closeDiscussionItems.length; i++) {
+ const item = closeDiscussionItems[i];
+ summaryContent += `### Discussion ${i + 1}\n`;
+
+ const discussionNumber = item.discussion_number;
+ if (discussionNumber) {
+ const repoUrl = getRepositoryUrl();
+ const discussionUrl = `${repoUrl}/discussions/${discussionNumber}`;
+ summaryContent += `**Target Discussion:** [#${discussionNumber}](${discussionUrl})\n\n`;
+ } else {
+ summaryContent += `**Target:** Current discussion\n\n`;
+ }
+
+ if (item.reason) {
+ summaryContent += `**Reason:** ${item.reason}\n\n`;
+ }
+
+ summaryContent += `**Comment:**\n${item.body || "No content provided"}\n\n`;
+
+ if (requiredLabels.length > 0) {
+ summaryContent += `**Required Labels:** ${requiredLabels.join(", ")}\n\n`;
+ }
+ if (requiredTitlePrefix) {
+ summaryContent += `**Required Title Prefix:** ${requiredTitlePrefix}\n\n`;
+ }
+ if (requiredCategory) {
+ summaryContent += `**Required Category:** ${requiredCategory}\n\n`;
+ }
+
+ summaryContent += "---\n\n";
+ }
+
+ // Write to step summary
+ await core.summary.addRaw(summaryContent).write();
+ core.info("š Discussion close preview written to step summary");
+ return;
+ }
+
+ // Validate context based on target configuration
+ if (target === "triggering" && !isDiscussionContext) {
+ core.info('Target is "triggering" but not running in discussion context, skipping discussion close');
+ return;
+ }
+
+ // Extract triggering context for footer generation
+ const triggeringDiscussionNumber = context.payload?.discussion?.number;
+
+ const closedDiscussions = [];
+
+ // Process each close-discussion item
+ for (let i = 0; i < closeDiscussionItems.length; i++) {
+ const item = closeDiscussionItems[i];
+ core.info(`Processing close-discussion item ${i + 1}/${closeDiscussionItems.length}: bodyLength=${item.body.length}`);
+
+ // Determine the discussion number
+ let discussionNumber;
+
+ if (target === "*") {
+ // For target "*", we need an explicit number from the item
+ const targetNumber = item.discussion_number;
+ if (targetNumber) {
+ discussionNumber = parseInt(targetNumber, 10);
+ if (isNaN(discussionNumber) || discussionNumber <= 0) {
+ core.info(`Invalid discussion number specified: ${targetNumber}`);
+ continue;
+ }
+ } else {
+ core.info(`Target is "*" but no discussion_number specified in close-discussion item`);
+ continue;
+ }
+ } else if (target && target !== "triggering") {
+ // Explicit number specified in target configuration
+ discussionNumber = parseInt(target, 10);
+ if (isNaN(discussionNumber) || discussionNumber <= 0) {
+ core.info(`Invalid discussion number in target configuration: ${target}`);
+ continue;
+ }
+ } else {
+ // Default behavior: use triggering discussion
+ if (isDiscussionContext) {
+ discussionNumber = context.payload.discussion?.number;
+ if (!discussionNumber) {
+ core.info("Discussion context detected but no discussion found in payload");
+ continue;
+ }
+ } else {
+ core.info("Not in discussion context and no explicit target specified");
+ continue;
+ }
+ }
+
+ try {
+ // Fetch discussion details to check filters
+ const discussion = await getDiscussionDetails(github, context.repo.owner, context.repo.repo, discussionNumber);
+
+ // Apply label filter
+ if (requiredLabels.length > 0) {
+ const discussionLabels = discussion.labels.nodes.map(l => l.name);
+ const hasRequiredLabel = requiredLabels.some(required => discussionLabels.includes(required));
+ if (!hasRequiredLabel) {
+ core.info(`Discussion #${discussionNumber} does not have required labels: ${requiredLabels.join(", ")}`);
+ continue;
+ }
+ }
+
+ // Apply title prefix filter
+ if (requiredTitlePrefix && !discussion.title.startsWith(requiredTitlePrefix)) {
+ core.info(`Discussion #${discussionNumber} does not have required title prefix: ${requiredTitlePrefix}`);
+ continue;
+ }
+
+ // Apply category filter
+ if (requiredCategory && discussion.category.name !== requiredCategory) {
+ core.info(`Discussion #${discussionNumber} is not in required category: ${requiredCategory}`);
+ continue;
+ }
+
+ // Extract body from the JSON item
+ let body = item.body.trim();
+
+ // Add AI disclaimer with workflow name and run url
+ const workflowName = process.env.GH_AW_WORKFLOW_NAME || "Workflow";
+ const workflowSource = process.env.GH_AW_WORKFLOW_SOURCE || "";
+ const workflowSourceURL = process.env.GH_AW_WORKFLOW_SOURCE_URL || "";
+ const runId = context.runId;
+ const githubServer = process.env.GITHUB_SERVER_URL || "https://github.com";
+ const runUrl = context.payload.repository ? `${context.payload.repository.html_url}/actions/runs/${runId}` : `${githubServer}/${context.repo.owner}/${context.repo.repo}/actions/runs/${runId}`;
+
+ // Add fingerprint comment if present
+ body += getTrackerID("markdown");
+
+ body += generateFooter(workflowName, runUrl, workflowSource, workflowSourceURL, undefined, undefined, triggeringDiscussionNumber);
+
+ core.info(`Adding comment to discussion #${discussionNumber}`);
+ core.info(`Comment content length: ${body.length}`);
+
+ // Add comment first
+ const comment = await addDiscussionComment(github, discussion.id, body);
+ core.info("Added discussion comment: " + comment.url);
+
+ // Then close the discussion
+ core.info(`Closing discussion #${discussionNumber} with reason: ${item.reason || "none"}`);
+ const closedDiscussion = await closeDiscussion(github, discussion.id, item.reason);
+ core.info("Closed discussion: " + closedDiscussion.url);
+
+ closedDiscussions.push({
+ number: discussionNumber,
+ url: discussion.url,
+ comment_url: comment.url,
+ });
+
+ // Set output for the last closed discussion (for backward compatibility)
+ if (i === closeDiscussionItems.length - 1) {
+ core.setOutput("discussion_number", discussionNumber);
+ core.setOutput("discussion_url", discussion.url);
+ core.setOutput("comment_url", comment.url);
+ }
+ } catch (error) {
+ core.error(`ā Failed to close discussion #${discussionNumber}: ${error instanceof Error ? error.message : String(error)}`);
+ throw error;
+ }
+ }
+
+ // Write summary for all closed discussions
+ if (closedDiscussions.length > 0) {
+ let summaryContent = "\n\n## Closed Discussions\n";
+ for (const discussion of closedDiscussions) {
+ summaryContent += `- Discussion #${discussion.number}: [View Discussion](${discussion.url})\n`;
+ summaryContent += ` - Comment: [View Comment](${discussion.comment_url})\n`;
+ }
+ await core.summary.addRaw(summaryContent).write();
+ }
+
+ core.info(`Successfully closed ${closedDiscussions.length} discussion(s)`);
+ return closedDiscussions;
+}
+await main();
diff --git a/actions/setup/js/close_entity_helpers.cjs b/actions/setup/js/close_entity_helpers.cjs
new file mode 100644
index 0000000000..08797458c5
--- /dev/null
+++ b/actions/setup/js/close_entity_helpers.cjs
@@ -0,0 +1,395 @@
+// @ts-check
+///
+
+const { loadAgentOutput } = require("./load_agent_output.cjs");
+const { generateFooter } = require("./generate_footer.cjs");
+const { getTrackerID } = require("./get_tracker_id.cjs");
+const { getRepositoryUrl } = require("./get_repository_url.cjs");
+
+/**
+ * @typedef {'issue' | 'pull_request'} EntityType
+ */
+
+/**
+ * @typedef {Object} EntityConfig
+ * @property {EntityType} entityType - The type of entity (issue or pull_request)
+ * @property {string} itemType - The agent output item type (e.g., "close_issue")
+ * @property {string} itemTypeDisplay - Human-readable item type for log messages (e.g., "close-issue")
+ * @property {string} numberField - The field name for the entity number in agent output (e.g., "issue_number")
+ * @property {string} envVarPrefix - Environment variable prefix (e.g., "GH_AW_CLOSE_ISSUE")
+ * @property {string[]} contextEvents - GitHub event names for this entity context
+ * @property {string} contextPayloadField - The field name in context.payload (e.g., "issue")
+ * @property {string} urlPath - URL path segment (e.g., "issues" or "pull")
+ * @property {string} displayName - Human-readable display name (e.g., "issue" or "pull request")
+ * @property {string} displayNamePlural - Human-readable display name plural (e.g., "issues" or "pull requests")
+ * @property {string} displayNameCapitalized - Capitalized display name (e.g., "Issue" or "Pull Request")
+ * @property {string} displayNameCapitalizedPlural - Capitalized display name plural (e.g., "Issues" or "Pull Requests")
+ */
+
+/**
+ * @typedef {Object} EntityCallbacks
+ * @property {(github: any, owner: string, repo: string, entityNumber: number) => Promise<{number: number, title: string, labels: Array<{name: string}>, html_url: string, state: string}>} getDetails
+ * @property {(github: any, owner: string, repo: string, entityNumber: number, message: string) => Promise<{id: number, html_url: string}>} addComment
+ * @property {(github: any, owner: string, repo: string, entityNumber: number) => Promise<{number: number, html_url: string, title: string}>} closeEntity
+ */
+
+/**
+ * Build the run URL for the current workflow
+ * @returns {string} The workflow run URL
+ */
+function buildRunUrl() {
+ const runId = context.runId;
+ const githubServer = process.env.GITHUB_SERVER_URL || "https://github.com";
+ return context.payload.repository ? `${context.payload.repository.html_url}/actions/runs/${runId}` : `${githubServer}/${context.repo.owner}/${context.repo.repo}/actions/runs/${runId}`;
+}
+
+/**
+ * Build comment body with tracker ID and footer
+ * @param {string} body - The original comment body
+ * @param {number|undefined} triggeringIssueNumber - Issue number that triggered this workflow
+ * @param {number|undefined} triggeringPRNumber - PR number that triggered this workflow
+ * @returns {string} The complete comment body with tracker ID and footer
+ */
+function buildCommentBody(body, triggeringIssueNumber, triggeringPRNumber) {
+ const workflowName = process.env.GH_AW_WORKFLOW_NAME || "Workflow";
+ const workflowSource = process.env.GH_AW_WORKFLOW_SOURCE || "";
+ const workflowSourceURL = process.env.GH_AW_WORKFLOW_SOURCE_URL || "";
+ const runUrl = buildRunUrl();
+
+ let commentBody = body.trim();
+ commentBody += getTrackerID("markdown");
+ commentBody += generateFooter(workflowName, runUrl, workflowSource, workflowSourceURL, triggeringIssueNumber, triggeringPRNumber, undefined);
+
+ return commentBody;
+}
+
+/**
+ * Check if labels match the required labels filter
+ * @param {Array<{name: string}>} entityLabels - Labels on the entity
+ * @param {string[]} requiredLabels - Required labels (any match)
+ * @returns {boolean} True if entity has at least one required label
+ */
+function checkLabelFilter(entityLabels, requiredLabels) {
+ if (requiredLabels.length === 0) {
+ return true;
+ }
+ const labelNames = entityLabels.map(l => l.name);
+ return requiredLabels.some(required => labelNames.includes(required));
+}
+
+/**
+ * Check if title matches the required prefix filter
+ * @param {string} title - Entity title
+ * @param {string} requiredTitlePrefix - Required title prefix
+ * @returns {boolean} True if title starts with required prefix
+ */
+function checkTitlePrefixFilter(title, requiredTitlePrefix) {
+ if (!requiredTitlePrefix) {
+ return true;
+ }
+ return title.startsWith(requiredTitlePrefix);
+}
+
+/**
+ * Generate staged preview content for a close entity operation
+ * @param {EntityConfig} config - Entity configuration
+ * @param {any[]} items - Items to preview
+ * @param {string[]} requiredLabels - Required labels filter
+ * @param {string} requiredTitlePrefix - Required title prefix filter
+ * @returns {Promise}
+ */
+async function generateCloseEntityStagedPreview(config, items, requiredLabels, requiredTitlePrefix) {
+ let summaryContent = `## š Staged Mode: Close ${config.displayNameCapitalizedPlural} Preview\n\n`;
+ summaryContent += `The following ${config.displayNamePlural} would be closed if staged mode was disabled:\n\n`;
+
+ for (let i = 0; i < items.length; i++) {
+ const item = items[i];
+ summaryContent += `### ${config.displayNameCapitalized} ${i + 1}\n`;
+
+ const entityNumber = item[config.numberField];
+ if (entityNumber) {
+ const repoUrl = getRepositoryUrl();
+ const entityUrl = `${repoUrl}/${config.urlPath}/${entityNumber}`;
+ summaryContent += `**Target ${config.displayNameCapitalized}:** [#${entityNumber}](${entityUrl})\n\n`;
+ } else {
+ summaryContent += `**Target:** Current ${config.displayName}\n\n`;
+ }
+
+ summaryContent += `**Comment:**\n${item.body || "No content provided"}\n\n`;
+
+ if (requiredLabels.length > 0) {
+ summaryContent += `**Required Labels:** ${requiredLabels.join(", ")}\n\n`;
+ }
+ if (requiredTitlePrefix) {
+ summaryContent += `**Required Title Prefix:** ${requiredTitlePrefix}\n\n`;
+ }
+
+ summaryContent += "---\n\n";
+ }
+
+ // Write to step summary
+ await core.summary.addRaw(summaryContent).write();
+ core.info(`š ${config.displayNameCapitalized} close preview written to step summary`);
+}
+
+/**
+ * Parse configuration from environment variables
+ * @param {string} envVarPrefix - Environment variable prefix
+ * @returns {{requiredLabels: string[], requiredTitlePrefix: string, target: string}}
+ */
+function parseEntityConfig(envVarPrefix) {
+ const labelsEnvVar = `${envVarPrefix}_REQUIRED_LABELS`;
+ const titlePrefixEnvVar = `${envVarPrefix}_REQUIRED_TITLE_PREFIX`;
+ const targetEnvVar = `${envVarPrefix}_TARGET`;
+
+ const requiredLabels = process.env[labelsEnvVar] ? process.env[labelsEnvVar].split(",").map(l => l.trim()) : [];
+ const requiredTitlePrefix = process.env[titlePrefixEnvVar] || "";
+ const target = process.env[targetEnvVar] || "triggering";
+
+ return { requiredLabels, requiredTitlePrefix, target };
+}
+
+/**
+ * Resolve the entity number based on target configuration and context
+ * @param {EntityConfig} config - Entity configuration
+ * @param {string} target - Target configuration ("triggering", "*", or explicit number)
+ * @param {any} item - The agent output item
+ * @param {boolean} isEntityContext - Whether we're in the correct entity context
+ * @returns {{success: true, number: number} | {success: false, message: string}}
+ */
+function resolveEntityNumber(config, target, item, isEntityContext) {
+ if (target === "*") {
+ const targetNumber = item[config.numberField];
+ if (targetNumber) {
+ const parsed = parseInt(targetNumber, 10);
+ if (isNaN(parsed) || parsed <= 0) {
+ return {
+ success: false,
+ message: `Invalid ${config.displayName} number specified: ${targetNumber}`,
+ };
+ }
+ return { success: true, number: parsed };
+ }
+ return {
+ success: false,
+ message: `Target is "*" but no ${config.numberField} specified in ${config.itemTypeDisplay} item`,
+ };
+ }
+
+ if (target !== "triggering") {
+ const parsed = parseInt(target, 10);
+ if (isNaN(parsed) || parsed <= 0) {
+ return {
+ success: false,
+ message: `Invalid ${config.displayName} number in target configuration: ${target}`,
+ };
+ }
+ return { success: true, number: parsed };
+ }
+
+ // Default behavior: use triggering entity
+ if (isEntityContext) {
+ const number = context.payload[config.contextPayloadField]?.number;
+ if (!number) {
+ return {
+ success: false,
+ message: `${config.displayNameCapitalized} context detected but no ${config.displayName} found in payload`,
+ };
+ }
+ return { success: true, number };
+ }
+
+ return {
+ success: false,
+ message: `Not in ${config.displayName} context and no explicit target specified`,
+ };
+}
+
+/**
+ * Escape special markdown characters in a title
+ * @param {string} title - The title to escape
+ * @returns {string} Escaped title
+ */
+function escapeMarkdownTitle(title) {
+ return title.replace(/[[\]()]/g, "\\$&");
+}
+
+/**
+ * Process close entity items from agent output
+ * @param {EntityConfig} config - Entity configuration
+ * @param {EntityCallbacks} callbacks - Entity-specific API callbacks
+ * @returns {Promise|undefined>}
+ */
+async function processCloseEntityItems(config, callbacks) {
+ // Check if we're in staged mode
+ const isStaged = process.env.GH_AW_SAFE_OUTPUTS_STAGED === "true";
+
+ const result = loadAgentOutput();
+ if (!result.success) {
+ return;
+ }
+
+ // Find all items of this type
+ const items = result.items.filter(/** @param {any} item */ item => item.type === config.itemType);
+ if (items.length === 0) {
+ core.info(`No ${config.itemTypeDisplay} items found in agent output`);
+ return;
+ }
+
+ core.info(`Found ${items.length} ${config.itemTypeDisplay} item(s)`);
+
+ // Get configuration from environment
+ const { requiredLabels, requiredTitlePrefix, target } = parseEntityConfig(config.envVarPrefix);
+
+ core.info(`Configuration: requiredLabels=${requiredLabels.join(",")}, requiredTitlePrefix=${requiredTitlePrefix}, target=${target}`);
+
+ // Check if we're in the correct entity context
+ const isEntityContext = config.contextEvents.some(event => context.eventName === event);
+
+ // If in staged mode, emit step summary instead of closing entities
+ if (isStaged) {
+ await generateCloseEntityStagedPreview(config, items, requiredLabels, requiredTitlePrefix);
+ return;
+ }
+
+ // Validate context based on target configuration
+ if (target === "triggering" && !isEntityContext) {
+ core.info(`Target is "triggering" but not running in ${config.displayName} context, skipping ${config.displayName} close`);
+ return;
+ }
+
+ // Extract triggering context for footer generation
+ const triggeringIssueNumber = context.payload?.issue?.number;
+ const triggeringPRNumber = context.payload?.pull_request?.number;
+
+ const closedEntities = [];
+
+ // Process each item
+ for (let i = 0; i < items.length; i++) {
+ const item = items[i];
+ core.info(`Processing ${config.itemTypeDisplay} item ${i + 1}/${items.length}: bodyLength=${item.body.length}`);
+
+ // Resolve entity number
+ const resolved = resolveEntityNumber(config, target, item, isEntityContext);
+ if (!resolved.success) {
+ core.info(resolved.message);
+ continue;
+ }
+ const entityNumber = resolved.number;
+
+ try {
+ // Fetch entity details to check filters
+ const entity = await callbacks.getDetails(github, context.repo.owner, context.repo.repo, entityNumber);
+
+ // Apply label filter
+ if (!checkLabelFilter(entity.labels, requiredLabels)) {
+ core.info(`${config.displayNameCapitalized} #${entityNumber} does not have required labels: ${requiredLabels.join(", ")}`);
+ continue;
+ }
+
+ // Apply title prefix filter
+ if (!checkTitlePrefixFilter(entity.title, requiredTitlePrefix)) {
+ core.info(`${config.displayNameCapitalized} #${entityNumber} does not have required title prefix: ${requiredTitlePrefix}`);
+ continue;
+ }
+
+ // Check if already closed
+ if (entity.state === "closed") {
+ core.info(`${config.displayNameCapitalized} #${entityNumber} is already closed, skipping`);
+ continue;
+ }
+
+ // Build comment body
+ const commentBody = buildCommentBody(item.body, triggeringIssueNumber, triggeringPRNumber);
+
+ // Add comment before closing
+ const comment = await callbacks.addComment(github, context.repo.owner, context.repo.repo, entityNumber, commentBody);
+ core.info(`ā Added comment to ${config.displayName} #${entityNumber}: ${comment.html_url}`);
+
+ // Close the entity
+ const closedEntity = await callbacks.closeEntity(github, context.repo.owner, context.repo.repo, entityNumber);
+ core.info(`ā Closed ${config.displayName} #${entityNumber}: ${closedEntity.html_url}`);
+
+ closedEntities.push({
+ entity: closedEntity,
+ comment,
+ });
+
+ // Set outputs for the last closed entity (for backward compatibility)
+ if (i === items.length - 1) {
+ const numberOutputName = config.entityType === "issue" ? "issue_number" : "pull_request_number";
+ const urlOutputName = config.entityType === "issue" ? "issue_url" : "pull_request_url";
+ core.setOutput(numberOutputName, closedEntity.number);
+ core.setOutput(urlOutputName, closedEntity.html_url);
+ core.setOutput("comment_url", comment.html_url);
+ }
+ } catch (error) {
+ core.error(`ā Failed to close ${config.displayName} #${entityNumber}: ${error instanceof Error ? error.message : String(error)}`);
+ throw error;
+ }
+ }
+
+ // Write summary for all closed entities
+ if (closedEntities.length > 0) {
+ let summaryContent = `\n\n## Closed ${config.displayNameCapitalizedPlural}\n`;
+ for (const { entity, comment } of closedEntities) {
+ const escapedTitle = escapeMarkdownTitle(entity.title);
+ summaryContent += `- ${config.displayNameCapitalized} #${entity.number}: [${escapedTitle}](${entity.html_url}) ([comment](${comment.html_url}))\n`;
+ }
+ await core.summary.addRaw(summaryContent).write();
+ }
+
+ core.info(`Successfully closed ${closedEntities.length} ${config.displayName}(s)`);
+ return closedEntities;
+}
+
+/**
+ * Configuration for closing issues
+ * @type {EntityConfig}
+ */
+const ISSUE_CONFIG = {
+ entityType: "issue",
+ itemType: "close_issue",
+ itemTypeDisplay: "close-issue",
+ numberField: "issue_number",
+ envVarPrefix: "GH_AW_CLOSE_ISSUE",
+ contextEvents: ["issues", "issue_comment"],
+ contextPayloadField: "issue",
+ urlPath: "issues",
+ displayName: "issue",
+ displayNamePlural: "issues",
+ displayNameCapitalized: "Issue",
+ displayNameCapitalizedPlural: "Issues",
+};
+
+/**
+ * Configuration for closing pull requests
+ * @type {EntityConfig}
+ */
+const PULL_REQUEST_CONFIG = {
+ entityType: "pull_request",
+ itemType: "close_pull_request",
+ itemTypeDisplay: "close-pull-request",
+ numberField: "pull_request_number",
+ envVarPrefix: "GH_AW_CLOSE_PR",
+ contextEvents: ["pull_request", "pull_request_review_comment"],
+ contextPayloadField: "pull_request",
+ urlPath: "pull",
+ displayName: "pull request",
+ displayNamePlural: "pull requests",
+ displayNameCapitalized: "Pull Request",
+ displayNameCapitalizedPlural: "Pull Requests",
+};
+
+module.exports = {
+ processCloseEntityItems,
+ generateCloseEntityStagedPreview,
+ checkLabelFilter,
+ checkTitlePrefixFilter,
+ parseEntityConfig,
+ resolveEntityNumber,
+ buildCommentBody,
+ escapeMarkdownTitle,
+ ISSUE_CONFIG,
+ PULL_REQUEST_CONFIG,
+};
diff --git a/actions/setup/js/close_expired_discussions.cjs b/actions/setup/js/close_expired_discussions.cjs
new file mode 100644
index 0000000000..8cf285b39d
--- /dev/null
+++ b/actions/setup/js/close_expired_discussions.cjs
@@ -0,0 +1,282 @@
+// @ts-check
+//
+
+/**
+ * Maximum number of discussions to update per run
+ */
+const MAX_UPDATES_PER_RUN = 100;
+
+/**
+ * Delay between GraphQL API calls in milliseconds to avoid rate limiting
+ */
+const GRAPHQL_DELAY_MS = 500;
+
+/**
+ * Delay execution for a specified number of milliseconds
+ * @param {number} ms - Milliseconds to delay
+ * @returns {Promise}
+ */
+function delay(ms) {
+ return new Promise(resolve => setTimeout(resolve, ms));
+}
+
+/**
+ * Search for open discussions with expiration markers
+ * @param {any} github - GitHub GraphQL instance
+ * @param {string} owner - Repository owner
+ * @param {string} repo - Repository name
+ * @returns {Promise>} Matching discussions
+ */
+async function searchDiscussionsWithExpiration(github, owner, repo) {
+ const discussions = [];
+ let hasNextPage = true;
+ let cursor = null;
+
+ while (hasNextPage) {
+ const query = `
+ query($owner: String!, $repo: String!, $cursor: String) {
+ repository(owner: $owner, name: $repo) {
+ discussions(first: 100, after: $cursor, states: [OPEN]) {
+ pageInfo {
+ hasNextPage
+ endCursor
+ }
+ nodes {
+ id
+ number
+ title
+ url
+ body
+ createdAt
+ }
+ }
+ }
+ }
+ `;
+
+ const result = await github.graphql(query, {
+ owner: owner,
+ repo: repo,
+ cursor: cursor,
+ });
+
+ if (!result || !result.repository || !result.repository.discussions) {
+ break;
+ }
+
+ const nodes = result.repository.discussions.nodes || [];
+
+ // Filter for discussions with agentic workflow markers and expiration comments
+ for (const discussion of nodes) {
+ // Check if created by an agentic workflow (body contains "> AI generated by" at start of line)
+ const agenticPattern = /^> AI generated by/m;
+ const isAgenticWorkflow = discussion.body && agenticPattern.test(discussion.body);
+
+ if (!isAgenticWorkflow) {
+ continue;
+ }
+
+ // Check if has expiration marker
+ const expirationPattern = //;
+ const match = discussion.body ? discussion.body.match(expirationPattern) : null;
+
+ if (match) {
+ discussions.push(discussion);
+ }
+ }
+
+ hasNextPage = result.repository.discussions.pageInfo.hasNextPage;
+ cursor = result.repository.discussions.pageInfo.endCursor;
+ }
+
+ return discussions;
+}
+
+/**
+ * Extract expiration date from discussion body
+ * @param {string} body - Discussion body
+ * @returns {Date|null} Expiration date or null if not found/invalid
+ */
+function extractExpirationDate(body) {
+ const expirationPattern = //;
+ const match = body.match(expirationPattern);
+
+ if (!match) {
+ return null;
+ }
+
+ const expirationISO = match[1].trim();
+ const expirationDate = new Date(expirationISO);
+
+ // Validate the date
+ if (isNaN(expirationDate.getTime())) {
+ return null;
+ }
+
+ return expirationDate;
+}
+
+/**
+ * Validate discussion creation date
+ * @param {string} createdAt - ISO 8601 creation date
+ * @returns {boolean} True if valid
+ */
+function validateCreationDate(createdAt) {
+ const creationDate = new Date(createdAt);
+ return !isNaN(creationDate.getTime());
+}
+
+/**
+ * Add comment to a GitHub Discussion using GraphQL
+ * @param {any} github - GitHub GraphQL instance
+ * @param {string} discussionId - Discussion node ID
+ * @param {string} message - Comment body
+ * @returns {Promise<{id: string, url: string}>} Comment details
+ */
+async function addDiscussionComment(github, discussionId, message) {
+ const result = await github.graphql(
+ `
+ mutation($dId: ID!, $body: String!) {
+ addDiscussionComment(input: { discussionId: $dId, body: $body }) {
+ comment {
+ id
+ url
+ }
+ }
+ }`,
+ { dId: discussionId, body: message }
+ );
+
+ return result.addDiscussionComment.comment;
+}
+
+/**
+ * Close a GitHub Discussion as OUTDATED using GraphQL
+ * @param {any} github - GitHub GraphQL instance
+ * @param {string} discussionId - Discussion node ID
+ * @returns {Promise<{id: string, url: string}>} Discussion details
+ */
+async function closeDiscussionAsOutdated(github, discussionId) {
+ const result = await github.graphql(
+ `
+ mutation($dId: ID!) {
+ closeDiscussion(input: { discussionId: $dId, reason: OUTDATED }) {
+ discussion {
+ id
+ url
+ }
+ }
+ }`,
+ { dId: discussionId }
+ );
+
+ return result.closeDiscussion.discussion;
+}
+
+async function main() {
+ const owner = context.repo.owner;
+ const repo = context.repo.repo;
+
+ core.info(`Searching for expired discussions in ${owner}/${repo}`);
+
+ // Search for discussions with expiration markers
+ const discussionsWithExpiration = await searchDiscussionsWithExpiration(github, owner, repo);
+
+ if (discussionsWithExpiration.length === 0) {
+ core.info("No discussions with expiration markers found");
+ return;
+ }
+
+ core.info(`Found ${discussionsWithExpiration.length} discussion(s) with expiration markers`);
+
+ // Check which discussions are expired
+ const now = new Date();
+ const expiredDiscussions = [];
+
+ for (const discussion of discussionsWithExpiration) {
+ // Validate creation date
+ if (!validateCreationDate(discussion.createdAt)) {
+ core.warning(`Discussion #${discussion.number} has invalid creation date, skipping`);
+ continue;
+ }
+
+ // Extract and validate expiration date
+ const expirationDate = extractExpirationDate(discussion.body);
+ if (!expirationDate) {
+ core.warning(`Discussion #${discussion.number} has invalid expiration date, skipping`);
+ continue;
+ }
+
+ // Check if expired
+ if (now >= expirationDate) {
+ expiredDiscussions.push({
+ ...discussion,
+ expirationDate: expirationDate,
+ });
+ }
+ }
+
+ if (expiredDiscussions.length === 0) {
+ core.info("No expired discussions found");
+ return;
+ }
+
+ core.info(`Found ${expiredDiscussions.length} expired discussion(s)`);
+
+ // Limit to MAX_UPDATES_PER_RUN
+ const discussionsToClose = expiredDiscussions.slice(0, MAX_UPDATES_PER_RUN);
+
+ if (expiredDiscussions.length > MAX_UPDATES_PER_RUN) {
+ core.warning(`Found ${expiredDiscussions.length} expired discussions, but only closing the first ${MAX_UPDATES_PER_RUN}`);
+ }
+
+ let closedCount = 0;
+ const closedDiscussions = [];
+
+ for (let i = 0; i < discussionsToClose.length; i++) {
+ const discussion = discussionsToClose[i];
+
+ try {
+ const closingMessage = `This discussion was automatically closed because it expired on ${discussion.expirationDate.toISOString()}.`;
+
+ // Add comment first
+ core.info(`Adding closing comment to discussion #${discussion.number}`);
+ await addDiscussionComment(github, discussion.id, closingMessage);
+
+ // Then close the discussion as outdated
+ core.info(`Closing discussion #${discussion.number} as outdated`);
+ await closeDiscussionAsOutdated(github, discussion.id);
+
+ closedDiscussions.push({
+ number: discussion.number,
+ url: discussion.url,
+ title: discussion.title,
+ });
+
+ closedCount++;
+ core.info(`ā Closed discussion #${discussion.number}: ${discussion.url}`);
+ } catch (error) {
+ core.error(`ā Failed to close discussion #${discussion.number}: ${error instanceof Error ? error.message : String(error)}`);
+ // Continue with other discussions even if one fails
+ }
+
+ // Add delay between GraphQL operations to avoid rate limiting (except for the last item)
+ if (i < discussionsToClose.length - 1) {
+ await delay(GRAPHQL_DELAY_MS);
+ }
+ }
+
+ // Write summary
+ if (closedCount > 0) {
+ let summaryContent = `## Closed Expired Discussions\n\n`;
+ summaryContent += `Closed **${closedCount}** expired discussion(s):\n\n`;
+ for (const closed of closedDiscussions) {
+ summaryContent += `- Discussion #${closed.number}: [${closed.title}](${closed.url})\n`;
+ }
+ await core.summary.addRaw(summaryContent).write();
+ }
+
+ core.info(`Successfully closed ${closedCount} expired discussion(s)`);
+}
+
+await main();
diff --git a/actions/setup/js/close_expired_issues.cjs b/actions/setup/js/close_expired_issues.cjs
new file mode 100644
index 0000000000..fc5473d893
--- /dev/null
+++ b/actions/setup/js/close_expired_issues.cjs
@@ -0,0 +1,275 @@
+// @ts-check
+//
+
+/**
+ * Maximum number of issues to update per run
+ */
+const MAX_UPDATES_PER_RUN = 100;
+
+/**
+ * Delay between GraphQL API calls in milliseconds to avoid rate limiting
+ */
+const GRAPHQL_DELAY_MS = 500;
+
+/**
+ * Delay execution for a specified number of milliseconds
+ * @param {number} ms - Milliseconds to delay
+ * @returns {Promise}
+ */
+function delay(ms) {
+ return new Promise(resolve => setTimeout(resolve, ms));
+}
+
+/**
+ * Search for open issues with expiration markers
+ * @param {any} github - GitHub GraphQL instance
+ * @param {string} owner - Repository owner
+ * @param {string} repo - Repository name
+ * @returns {Promise>} Matching issues
+ */
+async function searchIssuesWithExpiration(github, owner, repo) {
+ const issues = [];
+ let hasNextPage = true;
+ let cursor = null;
+
+ while (hasNextPage) {
+ const query = `
+ query($owner: String!, $repo: String!, $cursor: String) {
+ repository(owner: $owner, name: $repo) {
+ issues(first: 100, after: $cursor, states: [OPEN]) {
+ pageInfo {
+ hasNextPage
+ endCursor
+ }
+ nodes {
+ id
+ number
+ title
+ url
+ body
+ createdAt
+ }
+ }
+ }
+ }
+ `;
+
+ const result = await github.graphql(query, {
+ owner: owner,
+ repo: repo,
+ cursor: cursor,
+ });
+
+ if (!result || !result.repository || !result.repository.issues) {
+ break;
+ }
+
+ const nodes = result.repository.issues.nodes || [];
+
+ // Filter for issues with agentic workflow markers and expiration comments
+ for (const issue of nodes) {
+ // Check if created by an agentic workflow (body contains "> AI generated by" at start of line)
+ const agenticPattern = /^> AI generated by/m;
+ const isAgenticWorkflow = issue.body && agenticPattern.test(issue.body);
+
+ if (!isAgenticWorkflow) {
+ continue;
+ }
+
+ // Check if has expiration marker
+ const expirationPattern = //;
+ const match = issue.body ? issue.body.match(expirationPattern) : null;
+
+ if (match) {
+ issues.push(issue);
+ }
+ }
+
+ hasNextPage = result.repository.issues.pageInfo.hasNextPage;
+ cursor = result.repository.issues.pageInfo.endCursor;
+ }
+
+ return issues;
+}
+
+/**
+ * Extract expiration date from issue body
+ * @param {string} body - Issue body
+ * @returns {Date|null} Expiration date or null if not found/invalid
+ */
+function extractExpirationDate(body) {
+ const expirationPattern = //;
+ const match = body.match(expirationPattern);
+
+ if (!match) {
+ return null;
+ }
+
+ const expirationISO = match[1].trim();
+ const expirationDate = new Date(expirationISO);
+
+ // Validate the date
+ if (isNaN(expirationDate.getTime())) {
+ return null;
+ }
+
+ return expirationDate;
+}
+
+/**
+ * Validate issue creation date
+ * @param {string} createdAt - ISO 8601 creation date
+ * @returns {boolean} True if valid
+ */
+function validateCreationDate(createdAt) {
+ const creationDate = new Date(createdAt);
+ return !isNaN(creationDate.getTime());
+}
+
+/**
+ * Add comment to a GitHub Issue using REST API
+ * @param {any} github - GitHub REST instance
+ * @param {string} owner - Repository owner
+ * @param {string} repo - Repository name
+ * @param {number} issueNumber - Issue number
+ * @param {string} message - Comment body
+ * @returns {Promise} Comment details
+ */
+async function addIssueComment(github, owner, repo, issueNumber, message) {
+ const result = await github.rest.issues.createComment({
+ owner: owner,
+ repo: repo,
+ issue_number: issueNumber,
+ body: message,
+ });
+
+ return result.data;
+}
+
+/**
+ * Close a GitHub Issue using REST API
+ * @param {any} github - GitHub REST instance
+ * @param {string} owner - Repository owner
+ * @param {string} repo - Repository name
+ * @param {number} issueNumber - Issue number
+ * @returns {Promise} Issue details
+ */
+async function closeIssue(github, owner, repo, issueNumber) {
+ const result = await github.rest.issues.update({
+ owner: owner,
+ repo: repo,
+ issue_number: issueNumber,
+ state: "closed",
+ state_reason: "not_planned",
+ });
+
+ return result.data;
+}
+
+async function main() {
+ const owner = context.repo.owner;
+ const repo = context.repo.repo;
+
+ core.info(`Searching for expired issues in ${owner}/${repo}`);
+
+ // Search for issues with expiration markers
+ const issuesWithExpiration = await searchIssuesWithExpiration(github, owner, repo);
+
+ if (issuesWithExpiration.length === 0) {
+ core.info("No issues with expiration markers found");
+ return;
+ }
+
+ core.info(`Found ${issuesWithExpiration.length} issue(s) with expiration markers`);
+
+ // Check which issues are expired
+ const now = new Date();
+ const expiredIssues = [];
+
+ for (const issue of issuesWithExpiration) {
+ // Validate creation date
+ if (!validateCreationDate(issue.createdAt)) {
+ core.warning(`Issue #${issue.number} has invalid creation date, skipping`);
+ continue;
+ }
+
+ // Extract and validate expiration date
+ const expirationDate = extractExpirationDate(issue.body);
+ if (!expirationDate) {
+ core.warning(`Issue #${issue.number} has invalid expiration date, skipping`);
+ continue;
+ }
+
+ // Check if expired
+ if (now >= expirationDate) {
+ expiredIssues.push({
+ ...issue,
+ expirationDate: expirationDate,
+ });
+ }
+ }
+
+ if (expiredIssues.length === 0) {
+ core.info("No expired issues found");
+ return;
+ }
+
+ core.info(`Found ${expiredIssues.length} expired issue(s)`);
+
+ // Limit to MAX_UPDATES_PER_RUN
+ const issuesToClose = expiredIssues.slice(0, MAX_UPDATES_PER_RUN);
+
+ if (expiredIssues.length > MAX_UPDATES_PER_RUN) {
+ core.warning(`Found ${expiredIssues.length} expired issues, but only closing the first ${MAX_UPDATES_PER_RUN}`);
+ }
+
+ let closedCount = 0;
+ const closedIssues = [];
+
+ for (let i = 0; i < issuesToClose.length; i++) {
+ const issue = issuesToClose[i];
+
+ try {
+ const closingMessage = `This issue was automatically closed because it expired on ${issue.expirationDate.toISOString()}.`;
+
+ // Add comment first
+ core.info(`Adding closing comment to issue #${issue.number}`);
+ await addIssueComment(github, owner, repo, issue.number, closingMessage);
+
+ // Then close the issue as not planned
+ core.info(`Closing issue #${issue.number} as not planned`);
+ await closeIssue(github, owner, repo, issue.number);
+
+ closedIssues.push({
+ number: issue.number,
+ url: issue.url,
+ title: issue.title,
+ });
+
+ closedCount++;
+ core.info(`ā Closed issue #${issue.number}: ${issue.url}`);
+ } catch (error) {
+ core.error(`ā Failed to close issue #${issue.number}: ${error instanceof Error ? error.message : String(error)}`);
+ // Continue with other issues even if one fails
+ }
+
+ // Add delay between GraphQL operations to avoid rate limiting (except for the last item)
+ if (i < issuesToClose.length - 1) {
+ await delay(GRAPHQL_DELAY_MS);
+ }
+ }
+
+ // Write summary
+ if (closedCount > 0) {
+ let summaryContent = `## Closed Expired Issues\n\n`;
+ summaryContent += `Closed **${closedCount}** expired issue(s):\n\n`;
+ for (const closed of closedIssues) {
+ summaryContent += `- Issue #${closed.number}: [${closed.title}](${closed.url})\n`;
+ }
+ await core.summary.addRaw(summaryContent).write();
+ }
+
+ core.info(`Successfully closed ${closedCount} expired issue(s)`);
+}
+
+await main();
diff --git a/actions/setup/js/close_issue.cjs b/actions/setup/js/close_issue.cjs
new file mode 100644
index 0000000000..0d60fbfd75
--- /dev/null
+++ b/actions/setup/js/close_issue.cjs
@@ -0,0 +1,75 @@
+// @ts-check
+///
+
+const { processCloseEntityItems, ISSUE_CONFIG } = require("./close_entity_helpers.cjs");
+
+/**
+ * Get issue details using REST API
+ * @param {any} github - GitHub REST API instance
+ * @param {string} owner - Repository owner
+ * @param {string} repo - Repository name
+ * @param {number} issueNumber - Issue number
+ * @returns {Promise<{number: number, title: string, labels: Array<{name: string}>, html_url: string, state: string}>} Issue details
+ */
+async function getIssueDetails(github, owner, repo, issueNumber) {
+ const { data: issue } = await github.rest.issues.get({
+ owner,
+ repo,
+ issue_number: issueNumber,
+ });
+
+ if (!issue) {
+ throw new Error(`Issue #${issueNumber} not found in ${owner}/${repo}`);
+ }
+
+ return issue;
+}
+
+/**
+ * Add comment to a GitHub Issue using REST API
+ * @param {any} github - GitHub REST API instance
+ * @param {string} owner - Repository owner
+ * @param {string} repo - Repository name
+ * @param {number} issueNumber - Issue number
+ * @param {string} message - Comment body
+ * @returns {Promise<{id: number, html_url: string}>} Comment details
+ */
+async function addIssueComment(github, owner, repo, issueNumber, message) {
+ const { data: comment } = await github.rest.issues.createComment({
+ owner,
+ repo,
+ issue_number: issueNumber,
+ body: message,
+ });
+
+ return comment;
+}
+
+/**
+ * Close a GitHub Issue using REST API
+ * @param {any} github - GitHub REST API instance
+ * @param {string} owner - Repository owner
+ * @param {string} repo - Repository name
+ * @param {number} issueNumber - Issue number
+ * @returns {Promise<{number: number, html_url: string, title: string}>} Issue details
+ */
+async function closeIssue(github, owner, repo, issueNumber) {
+ const { data: issue } = await github.rest.issues.update({
+ owner,
+ repo,
+ issue_number: issueNumber,
+ state: "closed",
+ });
+
+ return issue;
+}
+
+async function main() {
+ return processCloseEntityItems(ISSUE_CONFIG, {
+ getDetails: getIssueDetails,
+ addComment: addIssueComment,
+ closeEntity: closeIssue,
+ });
+}
+
+await main();
diff --git a/actions/setup/js/close_older_discussions.cjs b/actions/setup/js/close_older_discussions.cjs
new file mode 100644
index 0000000000..b5aeda6a73
--- /dev/null
+++ b/actions/setup/js/close_older_discussions.cjs
@@ -0,0 +1,265 @@
+// @ts-check
+///
+
+const { getCloseOlderDiscussionMessage } = require("./messages_close_discussion.cjs");
+
+/**
+ * Maximum number of older discussions to close
+ */
+const MAX_CLOSE_COUNT = 10;
+
+/**
+ * Delay between GraphQL API calls in milliseconds to avoid rate limiting
+ */
+const GRAPHQL_DELAY_MS = 500;
+
+/**
+ * Delay execution for a specified number of milliseconds
+ * @param {number} ms - Milliseconds to delay
+ * @returns {Promise}
+ */
+function delay(ms) {
+ return new Promise(resolve => setTimeout(resolve, ms));
+}
+
+/**
+ * Search for open discussions with a matching title prefix and/or labels
+ * @param {any} github - GitHub GraphQL instance
+ * @param {string} owner - Repository owner
+ * @param {string} repo - Repository name
+ * @param {string} titlePrefix - Title prefix to match (empty string to skip prefix matching)
+ * @param {string[]} labels - Labels to match (empty array to skip label matching)
+ * @param {string|undefined} categoryId - Optional category ID to filter by
+ * @param {number} excludeNumber - Discussion number to exclude (the newly created one)
+ * @returns {Promise>} Matching discussions
+ */
+async function searchOlderDiscussions(github, owner, repo, titlePrefix, labels, categoryId, excludeNumber) {
+ // Build GraphQL search query
+ // Search for open discussions, optionally with title prefix or labels
+ let searchQuery = `repo:${owner}/${repo} is:open`;
+
+ if (titlePrefix) {
+ // Escape quotes in title prefix to prevent query injection
+ const escapedPrefix = titlePrefix.replace(/"/g, '\\"');
+ searchQuery += ` in:title "${escapedPrefix}"`;
+ }
+
+ // Add label filters to the search query
+ // Note: GitHub search uses AND logic for multiple labels, so discussions must have ALL labels.
+ // We add each label as a separate filter and also validate client-side for extra safety.
+ if (labels && labels.length > 0) {
+ for (const label of labels) {
+ // Escape quotes in label names to prevent query injection
+ const escapedLabel = label.replace(/"/g, '\\"');
+ searchQuery += ` label:"${escapedLabel}"`;
+ }
+ }
+
+ const result = await github.graphql(
+ `
+ query($searchTerms: String!, $first: Int!) {
+ search(query: $searchTerms, type: DISCUSSION, first: $first) {
+ nodes {
+ ... on Discussion {
+ id
+ number
+ title
+ url
+ category {
+ id
+ }
+ labels(first: 100) {
+ nodes {
+ name
+ }
+ }
+ closed
+ }
+ }
+ }
+ }`,
+ { searchTerms: searchQuery, first: 50 }
+ );
+
+ if (!result || !result.search || !result.search.nodes) {
+ return [];
+ }
+
+ // Filter results:
+ // 1. Must not be the excluded discussion (newly created one)
+ // 2. Must not be already closed
+ // 3. If titlePrefix is specified, must have title starting with the prefix
+ // 4. If labels are specified, must have ALL specified labels (AND logic, not OR)
+ // 5. If categoryId is specified, must match
+ return result.search.nodes
+ .filter(
+ /** @param {any} d */ d => {
+ if (!d || d.number === excludeNumber || d.closed) {
+ return false;
+ }
+
+ // Check title prefix if specified
+ if (titlePrefix && d.title && !d.title.startsWith(titlePrefix)) {
+ return false;
+ }
+
+ // Check labels if specified - requires ALL labels to match (AND logic)
+ // This is intentional: we only want to close discussions that have ALL the specified labels
+ if (labels && labels.length > 0) {
+ const discussionLabels = d.labels?.nodes?.map((/** @type {{name: string}} */ l) => l.name) || [];
+ const hasAllLabels = labels.every(label => discussionLabels.includes(label));
+ if (!hasAllLabels) {
+ return false;
+ }
+ }
+
+ // Check category if specified
+ if (categoryId && (!d.category || d.category.id !== categoryId)) {
+ return false;
+ }
+
+ return true;
+ }
+ )
+ .map(
+ /** @param {any} d */ d => ({
+ id: d.id,
+ number: d.number,
+ title: d.title,
+ url: d.url,
+ })
+ );
+}
+
+/**
+ * Add comment to a GitHub Discussion using GraphQL
+ * @param {any} github - GitHub GraphQL instance
+ * @param {string} discussionId - Discussion node ID
+ * @param {string} message - Comment body
+ * @returns {Promise<{id: string, url: string}>} Comment details
+ */
+async function addDiscussionComment(github, discussionId, message) {
+ const result = await github.graphql(
+ `
+ mutation($dId: ID!, $body: String!) {
+ addDiscussionComment(input: { discussionId: $dId, body: $body }) {
+ comment {
+ id
+ url
+ }
+ }
+ }`,
+ { dId: discussionId, body: message }
+ );
+
+ return result.addDiscussionComment.comment;
+}
+
+/**
+ * Close a GitHub Discussion as OUTDATED using GraphQL
+ * @param {any} github - GitHub GraphQL instance
+ * @param {string} discussionId - Discussion node ID
+ * @returns {Promise<{id: string, url: string}>} Discussion details
+ */
+async function closeDiscussionAsOutdated(github, discussionId) {
+ const result = await github.graphql(
+ `
+ mutation($dId: ID!) {
+ closeDiscussion(input: { discussionId: $dId, reason: OUTDATED }) {
+ discussion {
+ id
+ url
+ }
+ }
+ }`,
+ { dId: discussionId }
+ );
+
+ return result.closeDiscussion.discussion;
+}
+
+/**
+ * Close older discussions that match the title prefix and/or labels
+ * @param {any} github - GitHub GraphQL instance
+ * @param {string} owner - Repository owner
+ * @param {string} repo - Repository name
+ * @param {string} titlePrefix - Title prefix to match (empty string to skip)
+ * @param {string[]} labels - Labels to match (empty array to skip)
+ * @param {string|undefined} categoryId - Optional category ID to filter by
+ * @param {{number: number, url: string}} newDiscussion - The newly created discussion
+ * @param {string} workflowName - Name of the workflow
+ * @param {string} runUrl - URL of the workflow run
+ * @returns {Promise>} List of closed discussions
+ */
+async function closeOlderDiscussions(github, owner, repo, titlePrefix, labels, categoryId, newDiscussion, workflowName, runUrl) {
+ // Build search criteria description for logging
+ const searchCriteria = [];
+ if (titlePrefix) searchCriteria.push(`title prefix: "${titlePrefix}"`);
+ if (labels && labels.length > 0) searchCriteria.push(`labels: [${labels.join(", ")}]`);
+ core.info(`Searching for older discussions with ${searchCriteria.join(" and ")}`);
+
+ const olderDiscussions = await searchOlderDiscussions(github, owner, repo, titlePrefix, labels, categoryId, newDiscussion.number);
+
+ if (olderDiscussions.length === 0) {
+ core.info("No older discussions found to close");
+ return [];
+ }
+
+ core.info(`Found ${olderDiscussions.length} older discussion(s) to close`);
+
+ // Limit to MAX_CLOSE_COUNT discussions
+ const discussionsToClose = olderDiscussions.slice(0, MAX_CLOSE_COUNT);
+
+ if (olderDiscussions.length > MAX_CLOSE_COUNT) {
+ core.warning(`Found ${olderDiscussions.length} older discussions, but only closing the first ${MAX_CLOSE_COUNT}`);
+ }
+
+ const closedDiscussions = [];
+
+ for (let i = 0; i < discussionsToClose.length; i++) {
+ const discussion = discussionsToClose[i];
+ try {
+ // Generate closing message using the messages module
+ const closingMessage = getCloseOlderDiscussionMessage({
+ newDiscussionUrl: newDiscussion.url,
+ newDiscussionNumber: newDiscussion.number,
+ workflowName,
+ runUrl,
+ });
+
+ // Add comment first
+ core.info(`Adding closing comment to discussion #${discussion.number}`);
+ await addDiscussionComment(github, discussion.id, closingMessage);
+
+ // Then close the discussion as outdated
+ core.info(`Closing discussion #${discussion.number} as outdated`);
+ await closeDiscussionAsOutdated(github, discussion.id);
+
+ closedDiscussions.push({
+ number: discussion.number,
+ url: discussion.url,
+ });
+
+ core.info(`ā Closed discussion #${discussion.number}: ${discussion.url}`);
+ } catch (error) {
+ core.error(`ā Failed to close discussion #${discussion.number}: ${error instanceof Error ? error.message : String(error)}`);
+ // Continue with other discussions even if one fails
+ }
+
+ // Add delay between GraphQL operations to avoid rate limiting (except for the last item)
+ if (i < discussionsToClose.length - 1) {
+ await delay(GRAPHQL_DELAY_MS);
+ }
+ }
+
+ return closedDiscussions;
+}
+
+module.exports = {
+ closeOlderDiscussions,
+ searchOlderDiscussions,
+ addDiscussionComment,
+ closeDiscussionAsOutdated,
+ MAX_CLOSE_COUNT,
+ GRAPHQL_DELAY_MS,
+};
diff --git a/actions/setup/js/close_pull_request.cjs b/actions/setup/js/close_pull_request.cjs
new file mode 100644
index 0000000000..3fc9fb2ac5
--- /dev/null
+++ b/actions/setup/js/close_pull_request.cjs
@@ -0,0 +1,75 @@
+// @ts-check
+///
+
+const { processCloseEntityItems, PULL_REQUEST_CONFIG } = require("./close_entity_helpers.cjs");
+
+/**
+ * Get pull request details using REST API
+ * @param {any} github - GitHub REST API instance
+ * @param {string} owner - Repository owner
+ * @param {string} repo - Repository name
+ * @param {number} prNumber - Pull request number
+ * @returns {Promise<{number: number, title: string, labels: Array<{name: string}>, html_url: string, state: string}>} Pull request details
+ */
+async function getPullRequestDetails(github, owner, repo, prNumber) {
+ const { data: pr } = await github.rest.pulls.get({
+ owner,
+ repo,
+ pull_number: prNumber,
+ });
+
+ if (!pr) {
+ throw new Error(`Pull request #${prNumber} not found in ${owner}/${repo}`);
+ }
+
+ return pr;
+}
+
+/**
+ * Add comment to a GitHub Pull Request using REST API
+ * @param {any} github - GitHub REST API instance
+ * @param {string} owner - Repository owner
+ * @param {string} repo - Repository name
+ * @param {number} prNumber - Pull request number
+ * @param {string} message - Comment body
+ * @returns {Promise<{id: number, html_url: string}>} Comment details
+ */
+async function addPullRequestComment(github, owner, repo, prNumber, message) {
+ const { data: comment } = await github.rest.issues.createComment({
+ owner,
+ repo,
+ issue_number: prNumber,
+ body: message,
+ });
+
+ return comment;
+}
+
+/**
+ * Close a GitHub Pull Request using REST API
+ * @param {any} github - GitHub REST API instance
+ * @param {string} owner - Repository owner
+ * @param {string} repo - Repository name
+ * @param {number} prNumber - Pull request number
+ * @returns {Promise<{number: number, html_url: string, title: string}>} Pull request details
+ */
+async function closePullRequest(github, owner, repo, prNumber) {
+ const { data: pr } = await github.rest.pulls.update({
+ owner,
+ repo,
+ pull_number: prNumber,
+ state: "closed",
+ });
+
+ return pr;
+}
+
+async function main() {
+ return processCloseEntityItems(PULL_REQUEST_CONFIG, {
+ getDetails: getPullRequestDetails,
+ addComment: addPullRequestComment,
+ closeEntity: closePullRequest,
+ });
+}
+
+await main();
diff --git a/actions/setup/js/collect_ndjson_output.cjs b/actions/setup/js/collect_ndjson_output.cjs
new file mode 100644
index 0000000000..bfd0d86558
--- /dev/null
+++ b/actions/setup/js/collect_ndjson_output.cjs
@@ -0,0 +1,358 @@
+// @ts-check
+///
+
+async function main() {
+ const fs = require("fs");
+ const { sanitizeContent } = require("./sanitize_content.cjs");
+ const { validateItem, getMaxAllowedForType, getMinRequiredForType, hasValidationConfig, MAX_BODY_LENGTH: maxBodyLength, resetValidationConfigCache } = require("./safe_output_type_validator.cjs");
+ const { resolveAllowedMentionsFromPayload } = require("./resolve_mentions_from_payload.cjs");
+
+ // Load validation config from file and set it in environment for the validator to read
+ const validationConfigPath = process.env.GH_AW_VALIDATION_CONFIG_PATH || "/tmp/gh-aw/safeoutputs/validation.json";
+ let validationConfig = null;
+ try {
+ if (fs.existsSync(validationConfigPath)) {
+ const validationConfigContent = fs.readFileSync(validationConfigPath, "utf8");
+ process.env.GH_AW_VALIDATION_CONFIG = validationConfigContent;
+ validationConfig = JSON.parse(validationConfigContent);
+ resetValidationConfigCache(); // Reset cache so it reloads from new env var
+ core.info(`Loaded validation config from ${validationConfigPath}`);
+ }
+ } catch (error) {
+ core.warning(`Failed to read validation config from ${validationConfigPath}: ${error instanceof Error ? error.message : String(error)}`);
+ }
+
+ // Extract mentions configuration from validation config
+ const mentionsConfig = validationConfig?.mentions || null;
+
+ // Resolve allowed mentions for the output collector
+ // This determines which @mentions are allowed in the agent output
+ const allowedMentions = await resolveAllowedMentionsFromPayload(context, github, core, mentionsConfig);
+
+ function repairJson(jsonStr) {
+ let repaired = jsonStr.trim();
+ const _ctrl = { 8: "\\b", 9: "\\t", 10: "\\n", 12: "\\f", 13: "\\r" };
+ repaired = repaired.replace(/[\u0000-\u001F]/g, ch => {
+ const c = ch.charCodeAt(0);
+ return _ctrl[c] || "\\u" + c.toString(16).padStart(4, "0");
+ });
+ repaired = repaired.replace(/'/g, '"');
+ repaired = repaired.replace(/([{,]\s*)([a-zA-Z_$][a-zA-Z0-9_$]*)\s*:/g, '$1"$2":');
+ repaired = repaired.replace(/"([^"\\]*)"/g, (match, content) => {
+ if (content.includes("\n") || content.includes("\r") || content.includes("\t")) {
+ const escaped = content.replace(/\\/g, "\\\\").replace(/\n/g, "\\n").replace(/\r/g, "\\r").replace(/\t/g, "\\t");
+ return `"${escaped}"`;
+ }
+ return match;
+ });
+ repaired = repaired.replace(/"([^"]*)"([^":,}\]]*)"([^"]*)"(\s*[,:}\]])/g, (match, p1, p2, p3, p4) => `"${p1}\\"${p2}\\"${p3}"${p4}`);
+ repaired = repaired.replace(/(\[\s*(?:"[^"]*"(?:\s*,\s*"[^"]*")*\s*),?)\s*}/g, "$1]");
+ const openBraces = (repaired.match(/\{/g) || []).length;
+ const closeBraces = (repaired.match(/\}/g) || []).length;
+ if (openBraces > closeBraces) {
+ repaired += "}".repeat(openBraces - closeBraces);
+ } else if (closeBraces > openBraces) {
+ repaired = "{".repeat(closeBraces - openBraces) + repaired;
+ }
+ const openBrackets = (repaired.match(/\[/g) || []).length;
+ const closeBrackets = (repaired.match(/\]/g) || []).length;
+ if (openBrackets > closeBrackets) {
+ repaired += "]".repeat(openBrackets - closeBrackets);
+ } else if (closeBrackets > openBrackets) {
+ repaired = "[".repeat(closeBrackets - openBrackets) + repaired;
+ }
+ repaired = repaired.replace(/,(\s*[}\]])/g, "$1");
+ return repaired;
+ }
+
+ function validateFieldWithInputSchema(value, fieldName, inputSchema, lineNum) {
+ if (inputSchema.required && (value === undefined || value === null)) {
+ return {
+ isValid: false,
+ error: `Line ${lineNum}: ${fieldName} is required`,
+ };
+ }
+ if (value === undefined || value === null) {
+ return {
+ isValid: true,
+ normalizedValue: inputSchema.default || undefined,
+ };
+ }
+ const inputType = inputSchema.type || "string";
+ let normalizedValue = value;
+ switch (inputType) {
+ case "string":
+ if (typeof value !== "string") {
+ return {
+ isValid: false,
+ error: `Line ${lineNum}: ${fieldName} must be a string`,
+ };
+ }
+ normalizedValue = sanitizeContent(value, { allowedAliases: allowedMentions });
+ break;
+ case "boolean":
+ if (typeof value !== "boolean") {
+ return {
+ isValid: false,
+ error: `Line ${lineNum}: ${fieldName} must be a boolean`,
+ };
+ }
+ break;
+ case "number":
+ if (typeof value !== "number") {
+ return {
+ isValid: false,
+ error: `Line ${lineNum}: ${fieldName} must be a number`,
+ };
+ }
+ break;
+ case "choice":
+ if (typeof value !== "string") {
+ return {
+ isValid: false,
+ error: `Line ${lineNum}: ${fieldName} must be a string for choice type`,
+ };
+ }
+ if (inputSchema.options && !inputSchema.options.includes(value)) {
+ return {
+ isValid: false,
+ error: `Line ${lineNum}: ${fieldName} must be one of: ${inputSchema.options.join(", ")}`,
+ };
+ }
+ normalizedValue = sanitizeContent(value, { allowedAliases: allowedMentions });
+ break;
+ default:
+ if (typeof value === "string") {
+ normalizedValue = sanitizeContent(value, { allowedAliases: allowedMentions });
+ }
+ break;
+ }
+ return {
+ isValid: true,
+ normalizedValue,
+ };
+ }
+ function validateItemWithSafeJobConfig(item, jobConfig, lineNum) {
+ const errors = [];
+ const normalizedItem = { ...item };
+ if (!jobConfig.inputs) {
+ return {
+ isValid: true,
+ errors: [],
+ normalizedItem: item,
+ };
+ }
+ for (const [fieldName, inputSchema] of Object.entries(jobConfig.inputs)) {
+ const fieldValue = item[fieldName];
+ const validation = validateFieldWithInputSchema(fieldValue, fieldName, inputSchema, lineNum);
+ if (!validation.isValid && validation.error) {
+ errors.push(validation.error);
+ } else if (validation.normalizedValue !== undefined) {
+ normalizedItem[fieldName] = validation.normalizedValue;
+ }
+ }
+ return {
+ isValid: errors.length === 0,
+ errors,
+ normalizedItem,
+ };
+ }
+ function parseJsonWithRepair(jsonStr) {
+ try {
+ return JSON.parse(jsonStr);
+ } catch (originalError) {
+ try {
+ const repairedJson = repairJson(jsonStr);
+ return JSON.parse(repairedJson);
+ } catch (repairError) {
+ core.info(`invalid input json: ${jsonStr}`);
+ const originalMsg = originalError instanceof Error ? originalError.message : String(originalError);
+ const repairMsg = repairError instanceof Error ? repairError.message : String(repairError);
+ throw new Error(`JSON parsing failed. Original: ${originalMsg}. After attempted repair: ${repairMsg}`);
+ }
+ }
+ }
+ const outputFile = process.env.GH_AW_SAFE_OUTPUTS;
+ // Read config from file instead of environment variable
+ const configPath = process.env.GH_AW_SAFE_OUTPUTS_CONFIG_PATH || "/tmp/gh-aw/safeoutputs/config.json";
+ let safeOutputsConfig;
+ core.info(`[INGESTION] Reading config from: ${configPath}`);
+ try {
+ if (fs.existsSync(configPath)) {
+ const configFileContent = fs.readFileSync(configPath, "utf8");
+ core.info(`[INGESTION] Raw config content: ${configFileContent}`);
+ safeOutputsConfig = JSON.parse(configFileContent);
+ core.info(`[INGESTION] Parsed config keys: ${JSON.stringify(Object.keys(safeOutputsConfig))}`);
+ } else {
+ core.info(`[INGESTION] Config file does not exist at: ${configPath}`);
+ }
+ } catch (error) {
+ core.warning(`Failed to read config file from ${configPath}: ${error instanceof Error ? error.message : String(error)}`);
+ }
+
+ core.info(`[INGESTION] Output file path: ${outputFile}`);
+ if (!outputFile) {
+ core.info("GH_AW_SAFE_OUTPUTS not set, no output to collect");
+ core.setOutput("output", "");
+ return;
+ }
+ if (!fs.existsSync(outputFile)) {
+ core.info(`Output file does not exist: ${outputFile}`);
+ core.setOutput("output", "");
+ return;
+ }
+ const outputContent = fs.readFileSync(outputFile, "utf8");
+ if (outputContent.trim() === "") {
+ core.info("Output file is empty");
+ }
+ core.info(`Raw output content length: ${outputContent.length}`);
+ core.info(`[INGESTION] First 500 chars of output: ${outputContent.substring(0, 500)}`);
+ let expectedOutputTypes = {};
+ if (safeOutputsConfig) {
+ try {
+ // safeOutputsConfig is already a parsed object from the file
+ // Normalize all config keys to use underscores instead of dashes
+ core.info(`[INGESTION] Normalizing config keys (dash -> underscore)`);
+ expectedOutputTypes = Object.fromEntries(Object.entries(safeOutputsConfig).map(([key, value]) => [key.replace(/-/g, "_"), value]));
+ core.info(`[INGESTION] Expected output types after normalization: ${JSON.stringify(Object.keys(expectedOutputTypes))}`);
+ core.info(`[INGESTION] Expected output types full config: ${JSON.stringify(expectedOutputTypes)}`);
+ } catch (error) {
+ const errorMsg = error instanceof Error ? error.message : String(error);
+ core.info(`Warning: Could not parse safe-outputs config: ${errorMsg}`);
+ }
+ }
+ // Parse JSONL (JSON Lines) format: each line is a separate JSON object
+ // CRITICAL: This expects one JSON object per line. If JSON is formatted with
+ // indentation/pretty-printing, parsing will fail.
+ const lines = outputContent.trim().split("\n");
+ const parsedItems = [];
+ const errors = [];
+ for (let i = 0; i < lines.length; i++) {
+ const line = lines[i].trim();
+ if (line === "") continue;
+ core.info(`[INGESTION] Processing line ${i + 1}: ${line.substring(0, 200)}...`);
+ try {
+ const item = parseJsonWithRepair(line);
+ if (item === undefined) {
+ errors.push(`Line ${i + 1}: Invalid JSON - JSON parsing failed`);
+ continue;
+ }
+ if (!item.type) {
+ errors.push(`Line ${i + 1}: Missing required 'type' field`);
+ continue;
+ }
+ // Normalize type to use underscores (convert any dashes to underscores for resilience)
+ const originalType = item.type;
+ const itemType = item.type.replace(/-/g, "_");
+ core.info(`[INGESTION] Line ${i + 1}: Original type='${originalType}', Normalized type='${itemType}'`);
+ // Update item.type to normalized value
+ item.type = itemType;
+ if (!expectedOutputTypes[itemType]) {
+ core.warning(`[INGESTION] Line ${i + 1}: Type '${itemType}' not found in expected types: ${JSON.stringify(Object.keys(expectedOutputTypes))}`);
+ errors.push(`Line ${i + 1}: Unexpected output type '${itemType}'. Expected one of: ${Object.keys(expectedOutputTypes).join(", ")}`);
+ continue;
+ }
+ const typeCount = parsedItems.filter(existing => existing.type === itemType).length;
+ const maxAllowed = getMaxAllowedForType(itemType, expectedOutputTypes);
+ if (typeCount >= maxAllowed) {
+ errors.push(`Line ${i + 1}: Too many items of type '${itemType}'. Maximum allowed: ${maxAllowed}.`);
+ continue;
+ }
+ core.info(`Line ${i + 1}: type '${itemType}'`);
+
+ // Use the validation engine to validate the item
+ if (hasValidationConfig(itemType)) {
+ const validationResult = validateItem(item, itemType, i + 1, { allowedAliases: allowedMentions });
+ if (!validationResult.isValid) {
+ if (validationResult.error) {
+ errors.push(validationResult.error);
+ }
+ continue;
+ }
+ // Update item with normalized values
+ Object.assign(item, validationResult.normalizedItem);
+ } else {
+ // Fall back to validateItemWithSafeJobConfig for unknown types
+ const jobOutputType = expectedOutputTypes[itemType];
+ if (!jobOutputType) {
+ errors.push(`Line ${i + 1}: Unknown output type '${itemType}'`);
+ continue;
+ }
+ const safeJobConfig = jobOutputType;
+ if (safeJobConfig && safeJobConfig.inputs) {
+ const validation = validateItemWithSafeJobConfig(item, safeJobConfig, i + 1);
+ if (!validation.isValid) {
+ errors.push(...validation.errors);
+ continue;
+ }
+ Object.assign(item, validation.normalizedItem);
+ }
+ }
+
+ core.info(`Line ${i + 1}: Valid ${itemType} item`);
+ parsedItems.push(item);
+ } catch (error) {
+ const errorMsg = error instanceof Error ? error.message : String(error);
+ errors.push(`Line ${i + 1}: Invalid JSON - ${errorMsg}`);
+ }
+ }
+ if (errors.length > 0) {
+ core.warning("Validation errors found:");
+ errors.forEach(error => core.warning(` - ${error}`));
+ }
+ for (const itemType of Object.keys(expectedOutputTypes)) {
+ const minRequired = getMinRequiredForType(itemType, expectedOutputTypes);
+ if (minRequired > 0) {
+ const actualCount = parsedItems.filter(item => item.type === itemType).length;
+ if (actualCount < minRequired) {
+ errors.push(`Too few items of type '${itemType}'. Minimum required: ${minRequired}, found: ${actualCount}.`);
+ }
+ }
+ }
+ core.info(`Successfully parsed ${parsedItems.length} valid output items`);
+ const validatedOutput = {
+ items: parsedItems,
+ errors: errors,
+ };
+ const agentOutputFile = "/tmp/gh-aw/agent_output.json";
+ const validatedOutputJson = JSON.stringify(validatedOutput);
+ try {
+ fs.mkdirSync("/tmp/gh-aw", { recursive: true });
+ fs.writeFileSync(agentOutputFile, validatedOutputJson, "utf8");
+ core.info(`Stored validated output to: ${agentOutputFile}`);
+ core.exportVariable("GH_AW_AGENT_OUTPUT", agentOutputFile);
+ } catch (error) {
+ const errorMsg = error instanceof Error ? error.message : String(error);
+ core.error(`Failed to write agent output file: ${errorMsg}`);
+ }
+ core.setOutput("output", JSON.stringify(validatedOutput));
+ core.setOutput("raw_output", outputContent);
+ const outputTypes = Array.from(new Set(parsedItems.map(item => item.type)));
+ core.info(`output_types: ${outputTypes.join(", ")}`);
+ core.setOutput("output_types", outputTypes.join(","));
+
+ // Check if patch file exists for detection job conditional
+ const patchPath = "/tmp/gh-aw/aw.patch";
+ const hasPatch = fs.existsSync(patchPath);
+ core.info(`Patch file ${hasPatch ? "exists" : "does not exist"} at: ${patchPath}`);
+
+ // Check if allow-empty is enabled for create_pull_request (reuse already loaded config)
+ let allowEmptyPR = false;
+ if (safeOutputsConfig) {
+ // Check if create-pull-request has allow-empty enabled
+ if (safeOutputsConfig["create-pull-request"]?.["allow-empty"] === true || safeOutputsConfig["create_pull_request"]?.["allow_empty"] === true) {
+ allowEmptyPR = true;
+ core.info(`allow-empty is enabled for create-pull-request`);
+ }
+ }
+
+ // If allow-empty is enabled for create_pull_request and there's no patch, that's OK
+ // Set has_patch to true so the create_pull_request job will run
+ if (allowEmptyPR && !hasPatch && outputTypes.includes("create_pull_request")) {
+ core.info(`allow-empty is enabled and no patch exists - will create empty PR`);
+ core.setOutput("has_patch", "true");
+ } else {
+ core.setOutput("has_patch", hasPatch ? "true" : "false");
+ }
+}
+await main();
diff --git a/actions/setup/js/compute_text.cjs b/actions/setup/js/compute_text.cjs
new file mode 100644
index 0000000000..84ae22f413
--- /dev/null
+++ b/actions/setup/js/compute_text.cjs
@@ -0,0 +1,173 @@
+// @ts-check
+///
+
+/**
+ * Sanitizes content for safe output in GitHub Actions
+ * @param {string} content - The content to sanitize
+ * @returns {string} The sanitized content
+ */
+const { sanitizeIncomingText, writeRedactedDomainsLog } = require("./sanitize_incoming_text.cjs");
+
+async function main() {
+ let text = "";
+
+ const actor = context.actor;
+ const { owner, repo } = context.repo;
+
+ // Check if the actor has repository access (admin, maintain permissions)
+ const repoPermission = await github.rest.repos.getCollaboratorPermissionLevel({
+ owner: owner,
+ repo: repo,
+ username: actor,
+ });
+
+ const permission = repoPermission.data.permission;
+ core.info(`Repository permission level: ${permission}`);
+
+ if (permission !== "admin" && permission !== "maintain") {
+ core.setOutput("text", "");
+ return;
+ }
+
+ // Determine current body text based on event context
+ switch (context.eventName) {
+ case "issues":
+ // For issues: title + body
+ if (context.payload.issue) {
+ const title = context.payload.issue.title || "";
+ const body = context.payload.issue.body || "";
+ text = `${title}\n\n${body}`;
+ }
+ break;
+
+ case "pull_request":
+ // For pull requests: title + body
+ if (context.payload.pull_request) {
+ const title = context.payload.pull_request.title || "";
+ const body = context.payload.pull_request.body || "";
+ text = `${title}\n\n${body}`;
+ }
+ break;
+
+ case "pull_request_target":
+ // For pull request target events: title + body
+ if (context.payload.pull_request) {
+ const title = context.payload.pull_request.title || "";
+ const body = context.payload.pull_request.body || "";
+ text = `${title}\n\n${body}`;
+ }
+ break;
+
+ case "issue_comment":
+ // For issue comments: comment body
+ if (context.payload.comment) {
+ text = context.payload.comment.body || "";
+ }
+ break;
+
+ case "pull_request_review_comment":
+ // For PR review comments: comment body
+ if (context.payload.comment) {
+ text = context.payload.comment.body || "";
+ }
+ break;
+
+ case "pull_request_review":
+ // For PR reviews: review body
+ if (context.payload.review) {
+ text = context.payload.review.body || "";
+ }
+ break;
+
+ case "discussion":
+ // For discussions: title + body
+ if (context.payload.discussion) {
+ const title = context.payload.discussion.title || "";
+ const body = context.payload.discussion.body || "";
+ text = `${title}\n\n${body}`;
+ }
+ break;
+
+ case "discussion_comment":
+ // For discussion comments: comment body
+ if (context.payload.comment) {
+ text = context.payload.comment.body || "";
+ }
+ break;
+
+ case "release":
+ // For releases: name + body
+ if (context.payload.release) {
+ const name = context.payload.release.name || context.payload.release.tag_name || "";
+ const body = context.payload.release.body || "";
+ text = `${name}\n\n${body}`;
+ }
+ break;
+
+ case "workflow_dispatch":
+ // For workflow dispatch: check for release_url or release_id in inputs
+ if (context.payload.inputs) {
+ const releaseUrl = context.payload.inputs.release_url;
+ const releaseId = context.payload.inputs.release_id;
+
+ // If release_url is provided, extract owner/repo/tag
+ if (releaseUrl) {
+ const urlMatch = releaseUrl.match(/github\.com\/([^\/]+)\/([^\/]+)\/releases\/tag\/([^\/]+)/);
+ if (urlMatch) {
+ const [, urlOwner, urlRepo, tag] = urlMatch;
+ try {
+ const { data: release } = await github.rest.repos.getReleaseByTag({
+ owner: urlOwner,
+ repo: urlRepo,
+ tag: tag,
+ });
+ const name = release.name || release.tag_name || "";
+ const body = release.body || "";
+ text = `${name}\n\n${body}`;
+ } catch (error) {
+ core.warning(`Failed to fetch release from URL: ${error instanceof Error ? error.message : String(error)}`);
+ }
+ }
+ } else if (releaseId) {
+ // If release_id is provided, fetch the release
+ try {
+ const { data: release } = await github.rest.repos.getRelease({
+ owner: owner,
+ repo: repo,
+ release_id: parseInt(releaseId, 10),
+ });
+ const name = release.name || release.tag_name || "";
+ const body = release.body || "";
+ text = `${name}\n\n${body}`;
+ } catch (error) {
+ core.warning(`Failed to fetch release by ID: ${error instanceof Error ? error.message : String(error)}`);
+ }
+ }
+ }
+ break;
+
+ default:
+ // Default: empty text
+ text = "";
+ break;
+ }
+
+ // Sanitize the text before output
+ // All mentions are escaped (wrapped in backticks) to prevent unintended notifications
+ // Mention filtering will be applied by the agent output collector
+ const sanitizedText = sanitizeIncomingText(text);
+
+ // Display sanitized text in logs
+ core.info(`text: ${sanitizedText}`);
+
+ // Set the sanitized text as output
+ core.setOutput("text", sanitizedText);
+
+ // Write redacted URL domains to log file if any were collected
+ const logPath = writeRedactedDomainsLog();
+ if (logPath) {
+ core.info(`Redacted URL domains written to: ${logPath}`);
+ }
+}
+
+await main();
diff --git a/actions/setup/js/create_agent_task.cjs b/actions/setup/js/create_agent_task.cjs
new file mode 100644
index 0000000000..2ee0a659e1
--- /dev/null
+++ b/actions/setup/js/create_agent_task.cjs
@@ -0,0 +1,179 @@
+// @ts-check
+///
+
+const fs = require("fs");
+const path = require("path");
+
+async function main() {
+ // Initialize outputs to empty strings to ensure they're always set
+ core.setOutput("task_number", "");
+ core.setOutput("task_url", "");
+
+ const isStaged = process.env.GITHUB_AW_SAFE_OUTPUTS_STAGED === "true";
+ const agentOutputFile = process.env.GITHUB_AW_AGENT_OUTPUT;
+ if (!agentOutputFile) {
+ core.info("No GITHUB_AW_AGENT_OUTPUT environment variable found");
+ return;
+ }
+
+ // Read agent output from file
+ let outputContent;
+ try {
+ outputContent = fs.readFileSync(agentOutputFile, "utf8");
+ } catch (error) {
+ core.setFailed(`Error reading agent output file: ${error instanceof Error ? error.message : String(error)}`);
+ return;
+ }
+
+ if (outputContent.trim() === "") {
+ core.info("Agent output content is empty");
+ return;
+ }
+ core.info(`Agent output content length: ${outputContent.length}`);
+
+ let validatedOutput;
+ try {
+ validatedOutput = JSON.parse(outputContent);
+ } catch (error) {
+ core.setFailed(`Error parsing agent output JSON: ${error instanceof Error ? error.message : String(error)}`);
+ return;
+ }
+
+ if (!validatedOutput.items || !Array.isArray(validatedOutput.items)) {
+ core.info("No valid items found in agent output");
+ return;
+ }
+
+ const createAgentTaskItems = validatedOutput.items.filter(item => item.type === "create_agent_task");
+ if (createAgentTaskItems.length === 0) {
+ core.info("No create-agent-task items found in agent output");
+ return;
+ }
+
+ core.info(`Found ${createAgentTaskItems.length} create-agent-task item(s)`);
+
+ if (isStaged) {
+ let summaryContent = "## š Staged Mode: Create Agent Tasks Preview\n\n";
+ summaryContent += "The following agent tasks would be created if staged mode was disabled:\n\n";
+
+ for (const [index, item] of createAgentTaskItems.entries()) {
+ summaryContent += `### Task ${index + 1}\n\n`;
+ summaryContent += `**Description:**\n${item.body || "No description provided"}\n\n`;
+
+ const baseBranch = process.env.GITHUB_AW_AGENT_TASK_BASE || "main";
+ summaryContent += `**Base Branch:** ${baseBranch}\n\n`;
+
+ const targetRepo = process.env.GITHUB_AW_TARGET_REPO || process.env.GITHUB_REPOSITORY || "unknown";
+ summaryContent += `**Target Repository:** ${targetRepo}\n\n`;
+
+ summaryContent += "---\n\n";
+ }
+
+ core.info(summaryContent);
+ core.summary.addRaw(summaryContent);
+ await core.summary.write();
+ return;
+ }
+
+ // Get base branch from environment or use current branch
+ const baseBranch = process.env.GITHUB_AW_AGENT_TASK_BASE || process.env.GITHUB_REF_NAME || "main";
+ const targetRepo = process.env.GITHUB_AW_TARGET_REPO;
+
+ // Process all agent task items
+ const createdTasks = [];
+ let summaryContent = "## ā
Agent Tasks Created\n\n";
+
+ for (const [index, taskItem] of createAgentTaskItems.entries()) {
+ const taskDescription = taskItem.body;
+
+ if (!taskDescription || taskDescription.trim() === "") {
+ core.warning(`Task ${index + 1}: Agent task description is empty, skipping`);
+ continue;
+ }
+
+ try {
+ // Write task description to a temporary file
+ const tmpDir = "/tmp/gh-aw";
+ if (!fs.existsSync(tmpDir)) {
+ fs.mkdirSync(tmpDir, { recursive: true });
+ }
+
+ const taskFile = path.join(tmpDir, `agent-task-description-${index + 1}.md`);
+ fs.writeFileSync(taskFile, taskDescription, "utf8");
+ core.info(`Task ${index + 1}: Task description written to ${taskFile}`);
+
+ // Build gh agent-task create command
+ const ghArgs = ["agent-task", "create", "--from-file", taskFile, "--base", baseBranch];
+
+ if (targetRepo) {
+ ghArgs.push("--repo", targetRepo);
+ }
+
+ core.info(`Task ${index + 1}: Creating agent task with command: gh ${ghArgs.join(" ")}`);
+
+ // Execute gh agent-task create command
+ let taskOutput;
+ try {
+ taskOutput = await exec.getExecOutput("gh", ghArgs, {
+ silent: false,
+ ignoreReturnCode: false,
+ });
+ } catch (execError) {
+ const errorMessage = execError instanceof Error ? execError.message : String(execError);
+
+ // Check for authentication/permission errors
+ if (errorMessage.includes("authentication") || errorMessage.includes("permission") || errorMessage.includes("forbidden") || errorMessage.includes("401") || errorMessage.includes("403")) {
+ core.error(`Task ${index + 1}: Failed to create agent task due to authentication/permission error.`);
+ core.error(`The default GITHUB_TOKEN does not have permission to create agent tasks.`);
+ core.error(`You must configure a Personal Access Token (PAT) as COPILOT_GITHUB_TOKEN or GH_AW_GITHUB_TOKEN.`);
+ core.error(`See documentation: https://githubnext.github.io/gh-aw/reference/safe-outputs/#agent-task-creation-create-agent-task`);
+ } else {
+ core.error(`Task ${index + 1}: Failed to create agent task: ${errorMessage}`);
+ }
+ continue;
+ }
+
+ // Parse the output to extract task number and URL
+ // Expected output format from gh agent-task create is typically:
+ // https://github.com/owner/repo/issues/123
+ const output = taskOutput.stdout.trim();
+ core.info(`Task ${index + 1}: Agent task created: ${output}`);
+
+ // Extract task number from URL
+ const urlMatch = output.match(/github\.com\/[^/]+\/[^/]+\/issues\/(\d+)/);
+ if (urlMatch) {
+ const taskNumber = urlMatch[1];
+ createdTasks.push({ number: taskNumber, url: output });
+
+ summaryContent += `### Task ${index + 1}\n\n`;
+ summaryContent += `**Task:** [#${taskNumber}](${output})\n\n`;
+ summaryContent += `**Base Branch:** ${baseBranch}\n\n`;
+
+ core.info(`ā
Successfully created agent task #${taskNumber}`);
+ } else {
+ core.warning(`Task ${index + 1}: Could not parse task number from output: ${output}`);
+ createdTasks.push({ number: "", url: output });
+ }
+ } catch (error) {
+ core.error(`Task ${index + 1}: Error creating agent task: ${error instanceof Error ? error.message : String(error)}`);
+ }
+ }
+
+ // Set outputs for the first created task (for backward compatibility)
+ if (createdTasks.length > 0) {
+ core.setOutput("task_number", createdTasks[0].number);
+ core.setOutput("task_url", createdTasks[0].url);
+ } else {
+ core.setFailed("No agent tasks were created");
+ return;
+ }
+
+ // Write summary
+ core.info(summaryContent);
+ core.summary.addRaw(summaryContent);
+ await core.summary.write();
+}
+
+main().catch(error => {
+ core.setFailed(error instanceof Error ? error.message : String(error));
+});
diff --git a/actions/setup/js/create_code_scanning_alert.cjs b/actions/setup/js/create_code_scanning_alert.cjs
new file mode 100644
index 0000000000..24def2d02a
--- /dev/null
+++ b/actions/setup/js/create_code_scanning_alert.cjs
@@ -0,0 +1,244 @@
+// @ts-check
+///
+
+const { loadAgentOutput } = require("./load_agent_output.cjs");
+
+async function main() {
+ const result = loadAgentOutput();
+ if (!result.success) {
+ return;
+ }
+
+ // Find all create-code-scanning-alert items
+ const securityItems = result.items.filter(/** @param {any} item */ item => item.type === "create_code_scanning_alert");
+ if (securityItems.length === 0) {
+ core.info("No create-code-scanning-alert items found in agent output");
+ return;
+ }
+
+ core.info(`Found ${securityItems.length} create-code-scanning-alert item(s)`);
+
+ // If in staged mode, emit step summary instead of creating code scanning alerts
+ if (process.env.GH_AW_SAFE_OUTPUTS_STAGED === "true") {
+ let summaryContent = "## š Staged Mode: Create Code Scanning Alerts Preview\n\n";
+ summaryContent += "The following code scanning alerts would be created if staged mode was disabled:\n\n";
+
+ for (let i = 0; i < securityItems.length; i++) {
+ const item = securityItems[i];
+ summaryContent += `### Security Finding ${i + 1}\n`;
+ summaryContent += `**File:** ${item.file || "No file provided"}\n\n`;
+ summaryContent += `**Line:** ${item.line || "No line provided"}\n\n`;
+ summaryContent += `**Severity:** ${item.severity || "No severity provided"}\n\n`;
+ summaryContent += `**Message:**\n${item.message || "No message provided"}\n\n`;
+ summaryContent += "---\n\n";
+ }
+
+ // Write to step summary
+ await core.summary.addRaw(summaryContent).write();
+ core.info("š Code scanning alert creation preview written to step summary");
+ return;
+ }
+
+ // Get the max configuration from environment variable
+ const maxFindings = process.env.GH_AW_SECURITY_REPORT_MAX ? parseInt(process.env.GH_AW_SECURITY_REPORT_MAX) : 0; // 0 means unlimited
+ core.info(`Max findings configuration: ${maxFindings === 0 ? "unlimited" : maxFindings}`);
+
+ // Get the driver configuration from environment variable
+ const driverName = process.env.GH_AW_SECURITY_REPORT_DRIVER || "GitHub Agentic Workflows Security Scanner";
+ core.info(`Driver name: ${driverName}`);
+
+ // Get the workflow filename for rule ID prefix
+ const workflowFilename = process.env.GH_AW_WORKFLOW_FILENAME || "workflow";
+ core.info(`Workflow filename for rule ID prefix: ${workflowFilename}`);
+
+ const validFindings = [];
+
+ // Process each security item and validate the findings
+ for (let i = 0; i < securityItems.length; i++) {
+ const securityItem = securityItems[i];
+ core.info(
+ `Processing create-code-scanning-alert item ${i + 1}/${securityItems.length}: file=${securityItem.file}, line=${securityItem.line}, severity=${securityItem.severity}, messageLength=${securityItem.message ? securityItem.message.length : "undefined"}, ruleIdSuffix=${securityItem.ruleIdSuffix || "not specified"}`
+ );
+
+ // Validate required fields
+ if (!securityItem.file) {
+ core.info('Missing required field "file" in code scanning alert item');
+ continue;
+ }
+
+ if (!securityItem.line || (typeof securityItem.line !== "number" && typeof securityItem.line !== "string")) {
+ core.info('Missing or invalid required field "line" in code scanning alert item');
+ continue;
+ }
+
+ if (!securityItem.severity || typeof securityItem.severity !== "string") {
+ core.info('Missing or invalid required field "severity" in code scanning alert item');
+ continue;
+ }
+
+ if (!securityItem.message || typeof securityItem.message !== "string") {
+ core.info('Missing or invalid required field "message" in code scanning alert item');
+ continue;
+ }
+
+ // Parse line number
+ const line = parseInt(securityItem.line, 10);
+ if (isNaN(line) || line <= 0) {
+ core.info(`Invalid line number: ${securityItem.line}`);
+ continue;
+ }
+
+ // Parse optional column number
+ let column = 1; // Default to column 1
+ if (securityItem.column !== undefined) {
+ if (typeof securityItem.column !== "number" && typeof securityItem.column !== "string") {
+ core.info('Invalid field "column" in code scanning alert item (must be number or string)');
+ continue;
+ }
+ const parsedColumn = parseInt(securityItem.column, 10);
+ if (isNaN(parsedColumn) || parsedColumn <= 0) {
+ core.info(`Invalid column number: ${securityItem.column}`);
+ continue;
+ }
+ column = parsedColumn;
+ }
+
+ // Parse optional rule ID suffix
+ let ruleIdSuffix = null;
+ if (securityItem.ruleIdSuffix !== undefined) {
+ if (typeof securityItem.ruleIdSuffix !== "string") {
+ core.info('Invalid field "ruleIdSuffix" in code scanning alert item (must be string)');
+ continue;
+ }
+ // Validate that the suffix doesn't contain invalid characters
+ const trimmedSuffix = securityItem.ruleIdSuffix.trim();
+ if (trimmedSuffix.length === 0) {
+ core.info('Invalid field "ruleIdSuffix" in code scanning alert item (cannot be empty)');
+ continue;
+ }
+ // Check for characters that would be problematic in rule IDs
+ if (!/^[a-zA-Z0-9_-]+$/.test(trimmedSuffix)) {
+ core.info(`Invalid ruleIdSuffix "${trimmedSuffix}" (must contain only alphanumeric characters, hyphens, and underscores)`);
+ continue;
+ }
+ ruleIdSuffix = trimmedSuffix;
+ }
+
+ // Validate severity level and map to SARIF level
+ /** @type {Record} */
+ const severityMap = {
+ error: "error",
+ warning: "warning",
+ info: "note",
+ note: "note",
+ };
+
+ const normalizedSeverity = securityItem.severity.toLowerCase();
+ if (!severityMap[normalizedSeverity]) {
+ core.info(`Invalid severity level: ${securityItem.severity} (must be error, warning, info, or note)`);
+ continue;
+ }
+
+ const sarifLevel = severityMap[normalizedSeverity];
+
+ // Create a valid finding object
+ validFindings.push({
+ file: securityItem.file.trim(),
+ line: line,
+ column: column,
+ severity: normalizedSeverity,
+ sarifLevel: sarifLevel,
+ message: securityItem.message.trim(),
+ ruleIdSuffix: ruleIdSuffix,
+ });
+
+ // Check if we've reached the max limit
+ if (maxFindings > 0 && validFindings.length >= maxFindings) {
+ core.info(`Reached maximum findings limit: ${maxFindings}`);
+ break;
+ }
+ }
+
+ if (validFindings.length === 0) {
+ core.info("No valid security findings to report");
+ return;
+ }
+
+ core.info(`Processing ${validFindings.length} valid security finding(s)`);
+
+ // Generate SARIF file
+ const sarifContent = {
+ $schema: "https://raw.githubusercontent.com/oasis-tcs/sarif-spec/master/Schemata/sarif-schema-2.1.0.json",
+ version: "2.1.0",
+ runs: [
+ {
+ tool: {
+ driver: {
+ name: driverName,
+ version: "1.0.0",
+ informationUri: "https://github.com/githubnext/gh-aw",
+ },
+ },
+ results: validFindings.map((finding, index) => ({
+ ruleId: finding.ruleIdSuffix ? `${workflowFilename}-${finding.ruleIdSuffix}` : `${workflowFilename}-security-finding-${index + 1}`,
+ message: { text: finding.message },
+ level: finding.sarifLevel,
+ locations: [
+ {
+ physicalLocation: {
+ artifactLocation: { uri: finding.file },
+ region: {
+ startLine: finding.line,
+ startColumn: finding.column,
+ },
+ },
+ },
+ ],
+ })),
+ },
+ ],
+ };
+
+ // Write SARIF file to filesystem
+ const fs = require("fs");
+ const path = require("path");
+ const sarifFileName = "code-scanning-alert.sarif";
+ const sarifFilePath = path.join(process.cwd(), sarifFileName);
+
+ try {
+ fs.writeFileSync(sarifFilePath, JSON.stringify(sarifContent, null, 2));
+ core.info(`ā Created SARIF file: ${sarifFilePath}`);
+ core.info(`SARIF file size: ${fs.statSync(sarifFilePath).size} bytes`);
+
+ // Set outputs for the GitHub Action
+ core.setOutput("sarif_file", sarifFilePath);
+ core.setOutput("findings_count", validFindings.length);
+ core.setOutput("artifact_uploaded", "pending");
+ core.setOutput("codeql_uploaded", "pending");
+
+ // Write summary with findings
+ let summaryContent = "\n\n## Code Scanning Alert\n";
+ summaryContent += `Found **${validFindings.length}** security finding(s):\n\n`;
+
+ for (const finding of validFindings) {
+ const emoji = finding.severity === "error" ? "š“" : finding.severity === "warning" ? "š”" : "šµ";
+ summaryContent += `${emoji} **${finding.severity.toUpperCase()}** in \`${finding.file}:${finding.line}\`: ${finding.message}\n`;
+ }
+
+ summaryContent += `\nš SARIF file created: \`${sarifFileName}\`\n`;
+ summaryContent += `š Findings will be uploaded to GitHub Code Scanning\n`;
+
+ await core.summary.addRaw(summaryContent).write();
+ } catch (error) {
+ core.error(`ā Failed to create SARIF file: ${error instanceof Error ? error.message : String(error)}`);
+ throw error;
+ }
+
+ core.info(`Successfully created code scanning alert with ${validFindings.length} finding(s)`);
+ return {
+ sarifFile: sarifFilePath,
+ findingsCount: validFindings.length,
+ findings: validFindings,
+ };
+}
+await main();
diff --git a/actions/setup/js/create_discussion.cjs b/actions/setup/js/create_discussion.cjs
new file mode 100644
index 0000000000..9a9c0f5af8
--- /dev/null
+++ b/actions/setup/js/create_discussion.cjs
@@ -0,0 +1,345 @@
+// @ts-check
+///
+
+const { loadAgentOutput } = require("./load_agent_output.cjs");
+const { getTrackerID } = require("./get_tracker_id.cjs");
+const { closeOlderDiscussions } = require("./close_older_discussions.cjs");
+const { replaceTemporaryIdReferences, loadTemporaryIdMap } = require("./temporary_id.cjs");
+const { parseAllowedRepos, getDefaultTargetRepo, validateRepo, parseRepoSlug } = require("./repo_helpers.cjs");
+const { addExpirationComment } = require("./expiration_helpers.cjs");
+const { removeDuplicateTitleFromDescription } = require("./remove_duplicate_title.cjs");
+
+/**
+ * Fetch repository ID and discussion categories for a repository
+ * @param {string} owner - Repository owner
+ * @param {string} repo - Repository name
+ * @returns {Promise<{repositoryId: string, discussionCategories: Array<{id: string, name: string, slug: string, description: string}>}|null>}
+ */
+async function fetchRepoDiscussionInfo(owner, repo) {
+ const repositoryQuery = `
+ query($owner: String!, $repo: String!) {
+ repository(owner: $owner, name: $repo) {
+ id
+ discussionCategories(first: 20) {
+ nodes {
+ id
+ name
+ slug
+ description
+ }
+ }
+ }
+ }
+ `;
+ const queryResult = await github.graphql(repositoryQuery, {
+ owner: owner,
+ repo: repo,
+ });
+ if (!queryResult || !queryResult.repository) {
+ return null;
+ }
+ return {
+ repositoryId: queryResult.repository.id,
+ discussionCategories: queryResult.repository.discussionCategories.nodes || [],
+ };
+}
+
+/**
+ * Resolve category ID for a repository
+ * @param {string} categoryConfig - Category ID, name, or slug from config
+ * @param {string} itemCategory - Category from agent output item (optional)
+ * @param {Array<{id: string, name: string, slug: string}>} categories - Available categories
+ * @returns {{id: string, matchType: string, name: string, requestedCategory?: string}|undefined} Resolved category info
+ */
+function resolveCategoryId(categoryConfig, itemCategory, categories) {
+ // Use item category if provided, otherwise use config
+ const categoryToMatch = itemCategory || categoryConfig;
+
+ if (categoryToMatch) {
+ // Try to match against category IDs first
+ const categoryById = categories.find(cat => cat.id === categoryToMatch);
+ if (categoryById) {
+ return { id: categoryById.id, matchType: "id", name: categoryById.name };
+ }
+ // Try to match against category names
+ const categoryByName = categories.find(cat => cat.name === categoryToMatch);
+ if (categoryByName) {
+ return { id: categoryByName.id, matchType: "name", name: categoryByName.name };
+ }
+ // Try to match against category slugs (routes)
+ const categoryBySlug = categories.find(cat => cat.slug === categoryToMatch);
+ if (categoryBySlug) {
+ return { id: categoryBySlug.id, matchType: "slug", name: categoryBySlug.name };
+ }
+ }
+
+ // Fall back to first category if available
+ if (categories.length > 0) {
+ return {
+ id: categories[0].id,
+ matchType: "fallback",
+ name: categories[0].name,
+ requestedCategory: categoryToMatch,
+ };
+ }
+
+ return undefined;
+}
+
+async function main() {
+ // Initialize outputs to empty strings to ensure they're always set
+ core.setOutput("discussion_number", "");
+ core.setOutput("discussion_url", "");
+
+ // Load the temporary ID map from create_issue job
+ const temporaryIdMap = loadTemporaryIdMap();
+ if (temporaryIdMap.size > 0) {
+ core.info(`Loaded temporary ID map with ${temporaryIdMap.size} entries`);
+ }
+
+ const result = loadAgentOutput();
+ if (!result.success) {
+ return;
+ }
+
+ const createDiscussionItems = result.items.filter(item => item.type === "create_discussion");
+ if (createDiscussionItems.length === 0) {
+ core.warning("No create-discussion items found in agent output");
+ return;
+ }
+ core.info(`Found ${createDiscussionItems.length} create-discussion item(s)`);
+
+ // Parse allowed repos and default target
+ const allowedRepos = parseAllowedRepos();
+ const defaultTargetRepo = getDefaultTargetRepo();
+ core.info(`Default target repo: ${defaultTargetRepo}`);
+ if (allowedRepos.size > 0) {
+ core.info(`Allowed repos: ${Array.from(allowedRepos).join(", ")}`);
+ }
+
+ if (process.env.GH_AW_SAFE_OUTPUTS_STAGED === "true") {
+ let summaryContent = "## š Staged Mode: Create Discussions Preview\n\n";
+ summaryContent += "The following discussions would be created if staged mode was disabled:\n\n";
+ for (let i = 0; i < createDiscussionItems.length; i++) {
+ const item = createDiscussionItems[i];
+ summaryContent += `### Discussion ${i + 1}\n`;
+ summaryContent += `**Title:** ${item.title || "No title provided"}\n\n`;
+ if (item.repo) {
+ summaryContent += `**Repository:** ${item.repo}\n\n`;
+ }
+ if (item.body) {
+ summaryContent += `**Body:**\n${item.body}\n\n`;
+ }
+ if (item.category) {
+ summaryContent += `**Category:** ${item.category}\n\n`;
+ }
+ summaryContent += "---\n\n";
+ }
+ await core.summary.addRaw(summaryContent).write();
+ core.info("š Discussion creation preview written to step summary");
+ return;
+ }
+
+ // Cache for repository info to avoid redundant API calls
+ /** @type {Map}>} */
+ const repoInfoCache = new Map();
+
+ // Get configuration for close-older-discussions
+ const closeOlderEnabled = process.env.GH_AW_CLOSE_OLDER_DISCUSSIONS === "true";
+ const titlePrefix = process.env.GH_AW_DISCUSSION_TITLE_PREFIX || "";
+ const configCategory = process.env.GH_AW_DISCUSSION_CATEGORY || "";
+ const labelsEnvVar = process.env.GH_AW_DISCUSSION_LABELS || "";
+ const labels = labelsEnvVar
+ ? labelsEnvVar
+ .split(",")
+ .map(l => l.trim())
+ .filter(l => l.length > 0)
+ : [];
+ const workflowName = process.env.GH_AW_WORKFLOW_NAME || "Workflow";
+ const runId = context.runId;
+ const githubServer = process.env.GITHUB_SERVER_URL || "https://github.com";
+ const runUrl = context.payload.repository ? `${context.payload.repository.html_url}/actions/runs/${runId}` : `${githubServer}/${context.repo.owner}/${context.repo.repo}/actions/runs/${runId}`;
+
+ const createdDiscussions = [];
+ const closedDiscussionsSummary = [];
+
+ for (let i = 0; i < createDiscussionItems.length; i++) {
+ const createDiscussionItem = createDiscussionItems[i];
+
+ // Determine target repository for this discussion
+ const itemRepo = createDiscussionItem.repo ? String(createDiscussionItem.repo).trim() : defaultTargetRepo;
+
+ // Validate the repository is allowed
+ const repoValidation = validateRepo(itemRepo, defaultTargetRepo, allowedRepos);
+ if (!repoValidation.valid) {
+ core.warning(`Skipping discussion: ${repoValidation.error}`);
+ continue;
+ }
+
+ // Parse the repository slug
+ const repoParts = parseRepoSlug(itemRepo);
+ if (!repoParts) {
+ core.warning(`Skipping discussion: Invalid repository format '${itemRepo}'. Expected 'owner/repo'.`);
+ continue;
+ }
+
+ // Get repository info (cached)
+ let repoInfo = repoInfoCache.get(itemRepo);
+ if (!repoInfo) {
+ try {
+ const fetchedInfo = await fetchRepoDiscussionInfo(repoParts.owner, repoParts.repo);
+ if (!fetchedInfo) {
+ core.warning(`Skipping discussion: Failed to fetch repository information for '${itemRepo}'`);
+ continue;
+ }
+ repoInfo = fetchedInfo;
+ repoInfoCache.set(itemRepo, repoInfo);
+ core.info(`Fetched discussion categories for ${itemRepo}: ${JSON.stringify(repoInfo.discussionCategories.map(cat => ({ name: cat.name, id: cat.id })))}`);
+ } catch (error) {
+ const errorMessage = error instanceof Error ? error.message : String(error);
+ if (errorMessage.includes("Not Found") || errorMessage.includes("not found") || errorMessage.includes("Could not resolve to a Repository")) {
+ core.warning(`Skipping discussion: Discussions are not enabled for repository '${itemRepo}'`);
+ continue;
+ }
+ core.error(`Failed to get discussion categories for ${itemRepo}: ${errorMessage}`);
+ throw error;
+ }
+ }
+
+ // Resolve category ID for this discussion
+ const categoryInfo = resolveCategoryId(configCategory, createDiscussionItem.category, repoInfo.discussionCategories);
+ if (!categoryInfo) {
+ core.warning(`Skipping discussion in ${itemRepo}: No discussion category available`);
+ continue;
+ }
+
+ // Log how the category was resolved
+ if (categoryInfo.matchType === "name") {
+ core.info(`Using category by name: ${categoryInfo.name} (${categoryInfo.id})`);
+ } else if (categoryInfo.matchType === "slug") {
+ core.info(`Using category by slug: ${categoryInfo.name} (${categoryInfo.id})`);
+ } else if (categoryInfo.matchType === "fallback") {
+ if (categoryInfo.requestedCategory) {
+ const availableCategoryNames = repoInfo.discussionCategories.map(cat => cat.name).join(", ");
+ core.warning(`Category "${categoryInfo.requestedCategory}" not found by ID, name, or slug. Available categories: ${availableCategoryNames}`);
+ core.info(`Falling back to default category: ${categoryInfo.name} (${categoryInfo.id})`);
+ } else {
+ core.info(`Using default first category: ${categoryInfo.name} (${categoryInfo.id})`);
+ }
+ }
+
+ const categoryId = categoryInfo.id;
+
+ core.info(`Processing create-discussion item ${i + 1}/${createDiscussionItems.length}: title=${createDiscussionItem.title}, bodyLength=${createDiscussionItem.body?.length || 0}, repo=${itemRepo}`);
+
+ // Replace temporary ID references in title
+ let title = createDiscussionItem.title ? replaceTemporaryIdReferences(createDiscussionItem.title.trim(), temporaryIdMap, itemRepo) : "";
+ // Replace temporary ID references in body (with defensive null check)
+ const bodyText = createDiscussionItem.body || "";
+ let processedBody = replaceTemporaryIdReferences(bodyText, temporaryIdMap, itemRepo);
+
+ // Remove duplicate title from description if it starts with a header matching the title
+ processedBody = removeDuplicateTitleFromDescription(title, processedBody);
+
+ let bodyLines = processedBody.split("\n");
+ if (!title) {
+ title = replaceTemporaryIdReferences(bodyText, temporaryIdMap, itemRepo) || "Agent Output";
+ }
+ if (titlePrefix && !title.startsWith(titlePrefix)) {
+ title = titlePrefix + title;
+ }
+
+ // Add tracker-id comment if present
+ const trackerIDComment = getTrackerID("markdown");
+ if (trackerIDComment) {
+ bodyLines.push(trackerIDComment);
+ }
+
+ // Add expiration comment if expires is set
+ addExpirationComment(bodyLines, "GH_AW_DISCUSSION_EXPIRES", "Discussion");
+
+ bodyLines.push(``, ``, `> AI generated by [${workflowName}](${runUrl})`, "");
+ const body = bodyLines.join("\n").trim();
+ core.info(`Creating discussion in ${itemRepo} with title: ${title}`);
+ core.info(`Category ID: ${categoryId}`);
+ core.info(`Body length: ${body.length}`);
+ try {
+ const createDiscussionMutation = `
+ mutation($repositoryId: ID!, $categoryId: ID!, $title: String!, $body: String!) {
+ createDiscussion(input: {
+ repositoryId: $repositoryId,
+ categoryId: $categoryId,
+ title: $title,
+ body: $body
+ }) {
+ discussion {
+ id
+ number
+ title
+ url
+ }
+ }
+ }
+ `;
+ const mutationResult = await github.graphql(createDiscussionMutation, {
+ repositoryId: repoInfo.repositoryId,
+ categoryId: categoryId,
+ title: title,
+ body: body,
+ });
+ const discussion = mutationResult.createDiscussion.discussion;
+ if (!discussion) {
+ core.error(`Failed to create discussion in ${itemRepo}: No discussion data returned`);
+ continue;
+ }
+ core.info(`Created discussion ${itemRepo}#${discussion.number}: ${discussion.url}`);
+ createdDiscussions.push({ ...discussion, _repo: itemRepo });
+ if (i === createDiscussionItems.length - 1) {
+ core.setOutput("discussion_number", discussion.number);
+ core.setOutput("discussion_url", discussion.url);
+ }
+
+ // Close older discussions if enabled and title prefix or labels are set
+ // Note: close-older-discussions only works within the same repository
+ const hasMatchingCriteria = titlePrefix || labels.length > 0;
+ if (closeOlderEnabled && hasMatchingCriteria) {
+ core.info("close-older-discussions is enabled, searching for older discussions to close...");
+ try {
+ const closedDiscussions = await closeOlderDiscussions(github, repoParts.owner, repoParts.repo, titlePrefix, labels, categoryId, { number: discussion.number, url: discussion.url }, workflowName, runUrl);
+
+ if (closedDiscussions.length > 0) {
+ closedDiscussionsSummary.push(...closedDiscussions);
+ core.info(`Closed ${closedDiscussions.length} older discussion(s) as outdated`);
+ }
+ } catch (closeError) {
+ // Log error but don't fail the workflow - closing older discussions is a nice-to-have
+ core.warning(`Failed to close older discussions: ${closeError instanceof Error ? closeError.message : String(closeError)}`);
+ }
+ } else if (closeOlderEnabled && !hasMatchingCriteria) {
+ core.warning("close-older-discussions is enabled but no title-prefix or labels are set - skipping close older discussions");
+ }
+ } catch (error) {
+ core.error(`ā Failed to create discussion "${title}" in ${itemRepo}: ${error instanceof Error ? error.message : String(error)}`);
+ throw error;
+ }
+ }
+ if (createdDiscussions.length > 0) {
+ let summaryContent = "\n\n## GitHub Discussions\n";
+ for (const discussion of createdDiscussions) {
+ const repoLabel = discussion._repo !== defaultTargetRepo ? ` (${discussion._repo})` : "";
+ summaryContent += `- Discussion #${discussion.number}${repoLabel}: [${discussion.title}](${discussion.url})\n`;
+ }
+
+ // Add closed discussions to summary
+ if (closedDiscussionsSummary.length > 0) {
+ summaryContent += "\n### Closed Older Discussions\n";
+ for (const closed of closedDiscussionsSummary) {
+ summaryContent += `- Discussion #${closed.number}: [View](${closed.url}) (marked as outdated)\n`;
+ }
+ }
+
+ await core.summary.addRaw(summaryContent).write();
+ }
+ core.info(`Successfully created ${createdDiscussions.length} discussion(s)`);
+}
+await main();
diff --git a/actions/setup/js/create_issue.cjs b/actions/setup/js/create_issue.cjs
new file mode 100644
index 0000000000..3852972602
--- /dev/null
+++ b/actions/setup/js/create_issue.cjs
@@ -0,0 +1,352 @@
+// @ts-check
+///
+
+const { sanitizeLabelContent } = require("./sanitize_label_content.cjs");
+const { loadAgentOutput } = require("./load_agent_output.cjs");
+const { generateStagedPreview } = require("./staged_preview.cjs");
+const { generateFooter } = require("./generate_footer.cjs");
+const { getTrackerID } = require("./get_tracker_id.cjs");
+const { generateTemporaryId, isTemporaryId, normalizeTemporaryId, replaceTemporaryIdReferences, serializeTemporaryIdMap } = require("./temporary_id.cjs");
+const { parseAllowedRepos, getDefaultTargetRepo, validateRepo, parseRepoSlug } = require("./repo_helpers.cjs");
+const { addExpirationComment } = require("./expiration_helpers.cjs");
+const { removeDuplicateTitleFromDescription } = require("./remove_duplicate_title.cjs");
+
+async function main() {
+ // Initialize outputs to empty strings to ensure they're always set
+ core.setOutput("issue_number", "");
+ core.setOutput("issue_url", "");
+ core.setOutput("temporary_id_map", "{}");
+ core.setOutput("issues_to_assign_copilot", "");
+
+ const isStaged = process.env.GH_AW_SAFE_OUTPUTS_STAGED === "true";
+
+ const result = loadAgentOutput();
+ if (!result.success) {
+ return;
+ }
+
+ const createIssueItems = result.items.filter(item => item.type === "create_issue");
+ if (createIssueItems.length === 0) {
+ core.info("No create-issue items found in agent output");
+ return;
+ }
+ core.info(`Found ${createIssueItems.length} create-issue item(s)`);
+
+ // Parse allowed repos and default target
+ const allowedRepos = parseAllowedRepos();
+ const defaultTargetRepo = getDefaultTargetRepo();
+ core.info(`Default target repo: ${defaultTargetRepo}`);
+ if (allowedRepos.size > 0) {
+ core.info(`Allowed repos: ${Array.from(allowedRepos).join(", ")}`);
+ }
+
+ if (isStaged) {
+ await generateStagedPreview({
+ title: "Create Issues",
+ description: "The following issues would be created if staged mode was disabled:",
+ items: createIssueItems,
+ renderItem: (item, index) => {
+ let content = `#### Issue ${index + 1}\n`;
+ content += `**Title:** ${item.title || "No title provided"}\n\n`;
+ if (item.temporary_id) {
+ content += `**Temporary ID:** ${item.temporary_id}\n\n`;
+ }
+ if (item.repo) {
+ content += `**Repository:** ${item.repo}\n\n`;
+ }
+ if (item.body) {
+ content += `**Body:**\n${item.body}\n\n`;
+ }
+ if (item.labels && item.labels.length > 0) {
+ content += `**Labels:** ${item.labels.join(", ")}\n\n`;
+ }
+ if (item.parent) {
+ content += `**Parent:** ${item.parent}\n\n`;
+ }
+ return content;
+ },
+ });
+ return;
+ }
+ const parentIssueNumber = context.payload?.issue?.number;
+
+ // Map to track temporary_id -> {repo, number} relationships
+ /** @type {Map} */
+ const temporaryIdMap = new Map();
+
+ // Extract triggering context for footer generation
+ const triggeringIssueNumber = context.payload?.issue?.number && !context.payload?.issue?.pull_request ? context.payload.issue.number : undefined;
+ const triggeringPRNumber = context.payload?.pull_request?.number || (context.payload?.issue?.pull_request ? context.payload.issue.number : undefined);
+ const triggeringDiscussionNumber = context.payload?.discussion?.number;
+
+ const labelsEnv = process.env.GH_AW_ISSUE_LABELS;
+ let envLabels = labelsEnv
+ ? labelsEnv
+ .split(",")
+ .map(label => label.trim())
+ .filter(label => label)
+ : [];
+ const createdIssues = [];
+ for (let i = 0; i < createIssueItems.length; i++) {
+ const createIssueItem = createIssueItems[i];
+
+ // Determine target repository for this issue
+ const itemRepo = createIssueItem.repo ? String(createIssueItem.repo).trim() : defaultTargetRepo;
+
+ // Validate the repository is allowed
+ const repoValidation = validateRepo(itemRepo, defaultTargetRepo, allowedRepos);
+ if (!repoValidation.valid) {
+ core.warning(`Skipping issue: ${repoValidation.error}`);
+ continue;
+ }
+
+ // Parse the repository slug
+ const repoParts = parseRepoSlug(itemRepo);
+ if (!repoParts) {
+ core.warning(`Skipping issue: Invalid repository format '${itemRepo}'. Expected 'owner/repo'.`);
+ continue;
+ }
+
+ // Get or generate the temporary ID for this issue
+ const temporaryId = createIssueItem.temporary_id || generateTemporaryId();
+ core.info(`Processing create-issue item ${i + 1}/${createIssueItems.length}: title=${createIssueItem.title}, bodyLength=${createIssueItem.body.length}, temporaryId=${temporaryId}, repo=${itemRepo}`);
+
+ // Debug logging for parent field
+ core.info(`Debug: createIssueItem.parent = ${JSON.stringify(createIssueItem.parent)}`);
+ core.info(`Debug: parentIssueNumber from context = ${JSON.stringify(parentIssueNumber)}`);
+
+ // Resolve parent: check if it's a temporary ID reference
+ let effectiveParentIssueNumber;
+ let effectiveParentRepo = itemRepo; // Default to same repo
+ if (createIssueItem.parent !== undefined) {
+ if (isTemporaryId(createIssueItem.parent)) {
+ // It's a temporary ID, look it up in the map
+ const resolvedParent = temporaryIdMap.get(normalizeTemporaryId(createIssueItem.parent));
+ if (resolvedParent !== undefined) {
+ effectiveParentIssueNumber = resolvedParent.number;
+ effectiveParentRepo = resolvedParent.repo;
+ core.info(`Resolved parent temporary ID '${createIssueItem.parent}' to ${effectiveParentRepo}#${effectiveParentIssueNumber}`);
+ } else {
+ core.warning(`Parent temporary ID '${createIssueItem.parent}' not found in map. Ensure parent issue is created before sub-issues.`);
+ effectiveParentIssueNumber = undefined;
+ }
+ } else {
+ // It's a real issue number
+ effectiveParentIssueNumber = parseInt(String(createIssueItem.parent), 10);
+ if (isNaN(effectiveParentIssueNumber)) {
+ core.warning(`Invalid parent value: ${createIssueItem.parent}`);
+ effectiveParentIssueNumber = undefined;
+ }
+ }
+ } else {
+ // Only use context parent if we're in the same repo as context
+ const contextRepo = `${context.repo.owner}/${context.repo.repo}`;
+ if (itemRepo === contextRepo) {
+ effectiveParentIssueNumber = parentIssueNumber;
+ }
+ }
+ core.info(`Debug: effectiveParentIssueNumber = ${JSON.stringify(effectiveParentIssueNumber)}, effectiveParentRepo = ${effectiveParentRepo}`);
+
+ if (effectiveParentIssueNumber && createIssueItem.parent !== undefined) {
+ core.info(`Using explicit parent issue number from item: ${effectiveParentRepo}#${effectiveParentIssueNumber}`);
+ }
+ let labels = [...envLabels];
+ if (createIssueItem.labels && Array.isArray(createIssueItem.labels)) {
+ labels = [...labels, ...createIssueItem.labels];
+ }
+ labels = labels
+ .filter(label => !!label)
+ .map(label => String(label).trim())
+ .filter(label => label)
+ .map(label => sanitizeLabelContent(label))
+ .filter(label => label)
+ .map(label => (label.length > 64 ? label.substring(0, 64) : label))
+ .filter((label, index, arr) => arr.indexOf(label) === index);
+ let title = createIssueItem.title ? createIssueItem.title.trim() : "";
+
+ // Replace temporary ID references in the body using already-created issues
+ let processedBody = replaceTemporaryIdReferences(createIssueItem.body, temporaryIdMap, itemRepo);
+
+ // Remove duplicate title from description if it starts with a header matching the title
+ processedBody = removeDuplicateTitleFromDescription(title, processedBody);
+
+ let bodyLines = processedBody.split("\n");
+
+ if (!title) {
+ title = createIssueItem.body || "Agent Output";
+ }
+ const titlePrefix = process.env.GH_AW_ISSUE_TITLE_PREFIX;
+ if (titlePrefix && !title.startsWith(titlePrefix)) {
+ title = titlePrefix + title;
+ }
+ if (effectiveParentIssueNumber) {
+ core.info("Detected issue context, parent issue " + effectiveParentRepo + "#" + effectiveParentIssueNumber);
+ // Use full repo reference if cross-repo, short reference if same repo
+ if (effectiveParentRepo === itemRepo) {
+ bodyLines.push(`Related to #${effectiveParentIssueNumber}`);
+ } else {
+ bodyLines.push(`Related to ${effectiveParentRepo}#${effectiveParentIssueNumber}`);
+ }
+ }
+ const workflowName = process.env.GH_AW_WORKFLOW_NAME || "Workflow";
+ const workflowSource = process.env.GH_AW_WORKFLOW_SOURCE || "";
+ const workflowSourceURL = process.env.GH_AW_WORKFLOW_SOURCE_URL || "";
+ const runId = context.runId;
+ const githubServer = process.env.GITHUB_SERVER_URL || "https://github.com";
+ const runUrl = context.payload.repository ? `${context.payload.repository.html_url}/actions/runs/${runId}` : `${githubServer}/${context.repo.owner}/${context.repo.repo}/actions/runs/${runId}`;
+
+ // Add tracker-id comment if present
+ const trackerIDComment = getTrackerID("markdown");
+ if (trackerIDComment) {
+ bodyLines.push(trackerIDComment);
+ }
+
+ // Add expiration comment if expires is set
+ addExpirationComment(bodyLines, "GH_AW_ISSUE_EXPIRES", "Issue");
+
+ bodyLines.push(``, ``, generateFooter(workflowName, runUrl, workflowSource, workflowSourceURL, triggeringIssueNumber, triggeringPRNumber, triggeringDiscussionNumber).trimEnd(), "");
+ const body = bodyLines.join("\n").trim();
+ core.info(`Creating issue in ${itemRepo} with title: ${title}`);
+ core.info(`Labels: ${labels}`);
+ core.info(`Body length: ${body.length}`);
+ try {
+ const { data: issue } = await github.rest.issues.create({
+ owner: repoParts.owner,
+ repo: repoParts.repo,
+ title: title,
+ body: body,
+ labels: labels,
+ });
+ core.info(`Created issue ${itemRepo}#${issue.number}: ${issue.html_url}`);
+ createdIssues.push({ ...issue, _repo: itemRepo });
+
+ // Store the mapping of temporary_id -> {repo, number}
+ temporaryIdMap.set(normalizeTemporaryId(temporaryId), { repo: itemRepo, number: issue.number });
+ core.info(`Stored temporary ID mapping: ${temporaryId} -> ${itemRepo}#${issue.number}`);
+
+ // Debug logging for sub-issue linking
+ core.info(`Debug: About to check if sub-issue linking is needed. effectiveParentIssueNumber = ${effectiveParentIssueNumber}`);
+
+ // Sub-issue linking only works within the same repository
+ if (effectiveParentIssueNumber && effectiveParentRepo === itemRepo) {
+ core.info(`Attempting to link issue #${issue.number} as sub-issue of #${effectiveParentIssueNumber}`);
+ try {
+ // First, get the node IDs for both parent and child issues
+ core.info(`Fetching node ID for parent issue #${effectiveParentIssueNumber}...`);
+ const getIssueNodeIdQuery = `
+ query($owner: String!, $repo: String!, $issueNumber: Int!) {
+ repository(owner: $owner, name: $repo) {
+ issue(number: $issueNumber) {
+ id
+ }
+ }
+ }
+ `;
+
+ // Get parent issue node ID
+ const parentResult = await github.graphql(getIssueNodeIdQuery, {
+ owner: repoParts.owner,
+ repo: repoParts.repo,
+ issueNumber: effectiveParentIssueNumber,
+ });
+ const parentNodeId = parentResult.repository.issue.id;
+ core.info(`Parent issue node ID: ${parentNodeId}`);
+
+ // Get child issue node ID
+ core.info(`Fetching node ID for child issue #${issue.number}...`);
+ const childResult = await github.graphql(getIssueNodeIdQuery, {
+ owner: repoParts.owner,
+ repo: repoParts.repo,
+ issueNumber: issue.number,
+ });
+ const childNodeId = childResult.repository.issue.id;
+ core.info(`Child issue node ID: ${childNodeId}`);
+
+ // Link the child issue as a sub-issue of the parent
+ core.info(`Executing addSubIssue mutation...`);
+ const addSubIssueMutation = `
+ mutation($issueId: ID!, $subIssueId: ID!) {
+ addSubIssue(input: {
+ issueId: $issueId,
+ subIssueId: $subIssueId
+ }) {
+ subIssue {
+ id
+ number
+ }
+ }
+ }
+ `;
+
+ await github.graphql(addSubIssueMutation, {
+ issueId: parentNodeId,
+ subIssueId: childNodeId,
+ });
+
+ core.info("ā Successfully linked issue #" + issue.number + " as sub-issue of #" + effectiveParentIssueNumber);
+ } catch (error) {
+ core.info(`Warning: Could not link sub-issue to parent: ${error instanceof Error ? error.message : String(error)}`);
+ core.info(`Error details: ${error instanceof Error ? error.stack : String(error)}`);
+ // Fallback: add a comment if sub-issue linking fails
+ try {
+ core.info(`Attempting fallback: adding comment to parent issue #${effectiveParentIssueNumber}...`);
+ await github.rest.issues.createComment({
+ owner: repoParts.owner,
+ repo: repoParts.repo,
+ issue_number: effectiveParentIssueNumber,
+ body: `Created related issue: #${issue.number}`,
+ });
+ core.info("ā Added comment to parent issue #" + effectiveParentIssueNumber + " (sub-issue linking not available)");
+ } catch (commentError) {
+ core.info(`Warning: Could not add comment to parent issue: ${commentError instanceof Error ? commentError.message : String(commentError)}`);
+ }
+ }
+ } else if (effectiveParentIssueNumber && effectiveParentRepo !== itemRepo) {
+ core.info(`Skipping sub-issue linking: parent is in different repository (${effectiveParentRepo})`);
+ } else {
+ core.info(`Debug: No parent issue number set, skipping sub-issue linking`);
+ }
+ if (i === createIssueItems.length - 1) {
+ core.setOutput("issue_number", issue.number);
+ core.setOutput("issue_url", issue.html_url);
+ }
+ } catch (error) {
+ const errorMessage = error instanceof Error ? error.message : String(error);
+ if (errorMessage.includes("Issues has been disabled in this repository")) {
+ core.info(`ā Cannot create issue "${title}" in ${itemRepo}: Issues are disabled for this repository`);
+ core.info("Consider enabling issues in repository settings if you want to create issues automatically");
+ continue;
+ }
+ core.error(`ā Failed to create issue "${title}" in ${itemRepo}: ${errorMessage}`);
+ throw error;
+ }
+ }
+ if (createdIssues.length > 0) {
+ let summaryContent = "\n\n## GitHub Issues\n";
+ for (const issue of createdIssues) {
+ const repoLabel = issue._repo !== defaultTargetRepo ? ` (${issue._repo})` : "";
+ summaryContent += `- Issue #${issue.number}${repoLabel}: [${issue.title}](${issue.html_url})\n`;
+ }
+ await core.summary.addRaw(summaryContent).write();
+ }
+
+ // Output the temporary ID map as JSON for use by downstream jobs
+ const tempIdMapOutput = serializeTemporaryIdMap(temporaryIdMap);
+ core.setOutput("temporary_id_map", tempIdMapOutput);
+ core.info(`Temporary ID map: ${tempIdMapOutput}`);
+
+ // Output issues that need copilot assignment for assign_to_agent job
+ // This is used when create-issue has assignees: [copilot]
+ const assignCopilot = process.env.GH_AW_ASSIGN_COPILOT === "true";
+ if (assignCopilot && createdIssues.length > 0) {
+ // Format: repo:number for each issue (for cross-repo support)
+ const issuesToAssign = createdIssues.map(issue => `${issue._repo}:${issue.number}`).join(",");
+ core.setOutput("issues_to_assign_copilot", issuesToAssign);
+ core.info(`Issues to assign copilot: ${issuesToAssign}`);
+ }
+
+ core.info(`Successfully created ${createdIssues.length} issue(s)`);
+}
+(async () => {
+ await main();
+})();
diff --git a/actions/setup/js/create_pr_review_comment.cjs b/actions/setup/js/create_pr_review_comment.cjs
new file mode 100644
index 0000000000..6dc42781ed
--- /dev/null
+++ b/actions/setup/js/create_pr_review_comment.cjs
@@ -0,0 +1,258 @@
+// @ts-check
+///
+
+const { loadAgentOutput } = require("./load_agent_output.cjs");
+const { generateStagedPreview } = require("./staged_preview.cjs");
+const { generateFooter } = require("./generate_footer.cjs");
+const { getRepositoryUrl } = require("./get_repository_url.cjs");
+
+async function main() {
+ // Check if we're in staged mode
+ const isStaged = process.env.GH_AW_SAFE_OUTPUTS_STAGED === "true";
+
+ const result = loadAgentOutput();
+ if (!result.success) {
+ return;
+ }
+
+ // Find all create-pr-review-comment items
+ const reviewCommentItems = result.items.filter(/** @param {any} item */ item => item.type === "create_pull_request_review_comment");
+ if (reviewCommentItems.length === 0) {
+ core.info("No create-pull-request-review-comment items found in agent output");
+ return;
+ }
+
+ core.info(`Found ${reviewCommentItems.length} create-pull-request-review-comment item(s)`);
+
+ // If in staged mode, emit step summary instead of creating review comments
+ if (isStaged) {
+ await generateStagedPreview({
+ title: "Create PR Review Comments",
+ description: "The following review comments would be created if staged mode was disabled:",
+ items: reviewCommentItems,
+ renderItem: (item, index) => {
+ let content = `#### Review Comment ${index + 1}\n`;
+ if (item.pull_request_number) {
+ const repoUrl = getRepositoryUrl();
+ const pullUrl = `${repoUrl}/pull/${item.pull_request_number}`;
+ content += `**Target PR:** [#${item.pull_request_number}](${pullUrl})\n\n`;
+ } else {
+ content += `**Target:** Current PR\n\n`;
+ }
+ content += `**File:** ${item.path || "No path provided"}\n\n`;
+ content += `**Line:** ${item.line || "No line provided"}\n\n`;
+ if (item.start_line) {
+ content += `**Start Line:** ${item.start_line}\n\n`;
+ }
+ content += `**Side:** ${item.side || "RIGHT"}\n\n`;
+ content += `**Body:**\n${item.body || "No content provided"}\n\n`;
+ return content;
+ },
+ });
+ return;
+ }
+
+ // Get the side configuration from environment variable
+ const defaultSide = process.env.GH_AW_PR_REVIEW_COMMENT_SIDE || "RIGHT";
+ core.info(`Default comment side configuration: ${defaultSide}`);
+
+ // Get the target configuration from environment variable
+ const commentTarget = process.env.GH_AW_PR_REVIEW_COMMENT_TARGET || "triggering";
+ core.info(`PR review comment target configuration: ${commentTarget}`);
+
+ // Check if we're in a pull request context, or an issue comment context on a PR
+ const isPRContext =
+ context.eventName === "pull_request" ||
+ context.eventName === "pull_request_review" ||
+ context.eventName === "pull_request_review_comment" ||
+ (context.eventName === "issue_comment" && context.payload.issue && context.payload.issue.pull_request);
+
+ // Validate context based on target configuration
+ if (commentTarget === "triggering" && !isPRContext) {
+ core.info('Target is "triggering" but not running in pull request context, skipping review comment creation');
+ return;
+ }
+
+ // Extract triggering context for footer generation
+ const triggeringIssueNumber = context.payload?.issue?.number && !context.payload?.issue?.pull_request ? context.payload.issue.number : undefined;
+ const triggeringPRNumber = context.payload?.pull_request?.number || (context.payload?.issue?.pull_request ? context.payload.issue.number : undefined);
+ const triggeringDiscussionNumber = context.payload?.discussion?.number;
+
+ const createdComments = [];
+
+ // Process each review comment item
+ for (let i = 0; i < reviewCommentItems.length; i++) {
+ const commentItem = reviewCommentItems[i];
+ core.info(
+ `Processing create-pull-request-review-comment item ${i + 1}/${reviewCommentItems.length}: bodyLength=${commentItem.body ? commentItem.body.length : "undefined"}, path=${commentItem.path}, line=${commentItem.line}, startLine=${commentItem.start_line}`
+ );
+
+ // Validate required fields
+ if (!commentItem.path) {
+ core.info('Missing required field "path" in review comment item');
+ continue;
+ }
+
+ if (!commentItem.line || (typeof commentItem.line !== "number" && typeof commentItem.line !== "string")) {
+ core.info('Missing or invalid required field "line" in review comment item');
+ continue;
+ }
+
+ if (!commentItem.body || typeof commentItem.body !== "string") {
+ core.info('Missing or invalid required field "body" in review comment item');
+ continue;
+ }
+
+ // Determine the PR number for this review comment
+ let pullRequestNumber;
+ let pullRequest;
+
+ if (commentTarget === "*") {
+ // For target "*", we need an explicit PR number from the comment item
+ if (commentItem.pull_request_number) {
+ pullRequestNumber = parseInt(commentItem.pull_request_number, 10);
+ if (isNaN(pullRequestNumber) || pullRequestNumber <= 0) {
+ core.info(`Invalid pull request number specified: ${commentItem.pull_request_number}`);
+ continue;
+ }
+ } else {
+ core.info('Target is "*" but no pull_request_number specified in comment item');
+ continue;
+ }
+ } else if (commentTarget && commentTarget !== "triggering") {
+ // Explicit PR number specified in target
+ pullRequestNumber = parseInt(commentTarget, 10);
+ if (isNaN(pullRequestNumber) || pullRequestNumber <= 0) {
+ core.info(`Invalid pull request number in target configuration: ${commentTarget}`);
+ continue;
+ }
+ } else {
+ // Default behavior: use triggering PR
+ if (context.payload.pull_request) {
+ pullRequestNumber = context.payload.pull_request.number;
+ pullRequest = context.payload.pull_request;
+ } else if (context.payload.issue && context.payload.issue.pull_request) {
+ pullRequestNumber = context.payload.issue.number;
+ } else {
+ core.info("Pull request context detected but no pull request found in payload");
+ continue;
+ }
+ }
+
+ if (!pullRequestNumber) {
+ core.info("Could not determine pull request number");
+ continue;
+ }
+
+ // If we don't have the full PR details yet, fetch them
+ if (!pullRequest || !pullRequest.head || !pullRequest.head.sha) {
+ try {
+ const { data: fullPR } = await github.rest.pulls.get({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ pull_number: pullRequestNumber,
+ });
+ pullRequest = fullPR;
+ core.info(`Fetched full pull request details for PR #${pullRequestNumber}`);
+ } catch (error) {
+ core.info(`Failed to fetch pull request details for PR #${pullRequestNumber}: ${error instanceof Error ? error.message : String(error)}`);
+ continue;
+ }
+ }
+
+ // Check if we have the commit SHA needed for creating review comments
+ if (!pullRequest || !pullRequest.head || !pullRequest.head.sha) {
+ core.info(`Pull request head commit SHA not found for PR #${pullRequestNumber} - cannot create review comment`);
+ continue;
+ }
+
+ core.info(`Creating review comment on PR #${pullRequestNumber}`);
+
+ // Parse line numbers
+ const line = parseInt(commentItem.line, 10);
+ if (isNaN(line) || line <= 0) {
+ core.info(`Invalid line number: ${commentItem.line}`);
+ continue;
+ }
+
+ let startLine = undefined;
+ if (commentItem.start_line) {
+ startLine = parseInt(commentItem.start_line, 10);
+ if (isNaN(startLine) || startLine <= 0 || startLine > line) {
+ core.info(`Invalid start_line number: ${commentItem.start_line} (must be <= line: ${line})`);
+ continue;
+ }
+ }
+
+ // Determine side (LEFT or RIGHT)
+ const side = commentItem.side || defaultSide;
+ if (side !== "LEFT" && side !== "RIGHT") {
+ core.info(`Invalid side value: ${side} (must be LEFT or RIGHT)`);
+ continue;
+ }
+
+ // Extract body from the JSON item
+ let body = commentItem.body.trim();
+
+ // Add AI disclaimer with workflow name and run url
+ const workflowName = process.env.GH_AW_WORKFLOW_NAME || "Workflow";
+ const workflowSource = process.env.GH_AW_WORKFLOW_SOURCE || "";
+ const workflowSourceURL = process.env.GH_AW_WORKFLOW_SOURCE_URL || "";
+ const runId = context.runId;
+ const githubServer = process.env.GITHUB_SERVER_URL || "https://github.com";
+ const runUrl = context.payload.repository ? `${context.payload.repository.html_url}/actions/runs/${runId}` : `${githubServer}/${context.repo.owner}/${context.repo.repo}/actions/runs/${runId}`;
+ body += generateFooter(workflowName, runUrl, workflowSource, workflowSourceURL, triggeringIssueNumber, triggeringPRNumber, triggeringDiscussionNumber);
+
+ core.info(`Creating review comment on PR #${pullRequestNumber} at ${commentItem.path}:${line}${startLine ? ` (lines ${startLine}-${line})` : ""} [${side}]`);
+ core.info(`Comment content length: ${body.length}`);
+
+ try {
+ // Prepare the request parameters
+ /** @type {any} */
+ const requestParams = {
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ pull_number: pullRequestNumber,
+ body: body,
+ path: commentItem.path,
+ commit_id: pullRequest && pullRequest.head ? pullRequest.head.sha : "", // Required for creating review comments
+ line: line,
+ side: side,
+ };
+
+ // Add start_line for multi-line comments
+ if (startLine !== undefined) {
+ requestParams.start_line = startLine;
+ requestParams.start_side = side; // start_side should match side for consistency
+ }
+
+ // Create the review comment using GitHub API
+ const { data: comment } = await github.rest.pulls.createReviewComment(requestParams);
+
+ core.info("Created review comment #" + comment.id + ": " + comment.html_url);
+ createdComments.push(comment);
+
+ // Set output for the last created comment (for backward compatibility)
+ if (i === reviewCommentItems.length - 1) {
+ core.setOutput("review_comment_id", comment.id);
+ core.setOutput("review_comment_url", comment.html_url);
+ }
+ } catch (error) {
+ core.error(`ā Failed to create review comment: ${error instanceof Error ? error.message : String(error)}`);
+ throw error;
+ }
+ }
+
+ // Write summary for all created comments
+ if (createdComments.length > 0) {
+ let summaryContent = "\n\n## GitHub PR Review Comments\n";
+ for (const comment of createdComments) {
+ summaryContent += `- Review Comment #${comment.id}: [View Comment](${comment.html_url})\n`;
+ }
+ await core.summary.addRaw(summaryContent).write();
+ }
+
+ core.info(`Successfully created ${createdComments.length} review comment(s)`);
+ return createdComments;
+}
+await main();
diff --git a/actions/setup/js/create_pull_request.cjs b/actions/setup/js/create_pull_request.cjs
new file mode 100644
index 0000000000..5f9fc75da3
--- /dev/null
+++ b/actions/setup/js/create_pull_request.cjs
@@ -0,0 +1,683 @@
+// @ts-check
+///
+
+/** @type {typeof import("fs")} */
+const fs = require("fs");
+/** @type {typeof import("crypto")} */
+const crypto = require("crypto");
+const { updateActivationComment } = require("./update_activation_comment.cjs");
+const { getTrackerID } = require("./get_tracker_id.cjs");
+const { addExpirationComment } = require("./expiration_helpers.cjs");
+const { removeDuplicateTitleFromDescription } = require("./remove_duplicate_title.cjs");
+
+/**
+ * Generate a patch preview with max 500 lines and 2000 chars for issue body
+ * @param {string} patchContent - The full patch content
+ * @returns {string} Formatted patch preview
+ */
+function generatePatchPreview(patchContent) {
+ if (!patchContent || !patchContent.trim()) {
+ return "";
+ }
+
+ const lines = patchContent.split("\n");
+ const maxLines = 500;
+ const maxChars = 2000;
+
+ // Apply line limit first
+ let preview = lines.length <= maxLines ? patchContent : lines.slice(0, maxLines).join("\n");
+ const lineTruncated = lines.length > maxLines;
+
+ // Apply character limit
+ const charTruncated = preview.length > maxChars;
+ if (charTruncated) {
+ preview = preview.slice(0, maxChars);
+ }
+
+ const truncated = lineTruncated || charTruncated;
+ const summary = truncated ? `Show patch preview (${Math.min(maxLines, lines.length)} of ${lines.length} lines)` : `Show patch (${lines.length} lines)`;
+
+ return `\n\n${summary}
\n\n\`\`\`diff\n${preview}${truncated ? "\n... (truncated)" : ""}\n\`\`\`\n\n `;
+}
+
+async function main() {
+ // Initialize outputs to empty strings to ensure they're always set
+ core.setOutput("pull_request_number", "");
+ core.setOutput("pull_request_url", "");
+ core.setOutput("issue_number", "");
+ core.setOutput("issue_url", "");
+ core.setOutput("branch_name", "");
+ core.setOutput("fallback_used", "");
+
+ // Check if we're in staged mode
+ const isStaged = process.env.GH_AW_SAFE_OUTPUTS_STAGED === "true";
+
+ // Environment validation - fail early if required variables are missing
+ const workflowId = process.env.GH_AW_WORKFLOW_ID;
+ if (!workflowId) {
+ throw new Error("GH_AW_WORKFLOW_ID environment variable is required");
+ }
+
+ const baseBranch = process.env.GH_AW_BASE_BRANCH;
+ if (!baseBranch) {
+ throw new Error("GH_AW_BASE_BRANCH environment variable is required");
+ }
+
+ const agentOutputFile = process.env.GH_AW_AGENT_OUTPUT || "";
+
+ // Read agent output from file
+ let outputContent = "";
+ if (agentOutputFile.trim() !== "") {
+ try {
+ outputContent = fs.readFileSync(agentOutputFile, "utf8");
+ } catch (error) {
+ core.setFailed(`Error reading agent output file: ${error instanceof Error ? error.message : String(error)}`);
+ return;
+ }
+ }
+
+ if (outputContent.trim() === "") {
+ core.info("Agent output content is empty");
+ }
+
+ const ifNoChanges = process.env.GH_AW_PR_IF_NO_CHANGES || "warn";
+ const allowEmpty = (process.env.GH_AW_PR_ALLOW_EMPTY || "false").toLowerCase() === "true";
+
+ // Check if patch file exists and has valid content
+ if (!fs.existsSync("/tmp/gh-aw/aw.patch")) {
+ // If allow-empty is enabled, we can proceed without a patch file
+ if (allowEmpty) {
+ core.info("No patch file found, but allow-empty is enabled - will create empty PR");
+ } else {
+ const message = "No patch file found - cannot create pull request without changes";
+
+ // If in staged mode, still show preview
+ if (isStaged) {
+ let summaryContent = "## š Staged Mode: Create Pull Request Preview\n\n";
+ summaryContent += "The following pull request would be created if staged mode was disabled:\n\n";
+ summaryContent += `**Status:** ā ļø No patch file found\n\n`;
+ summaryContent += `**Message:** ${message}\n\n`;
+
+ // Write to step summary
+ await core.summary.addRaw(summaryContent).write();
+ core.info("š Pull request creation preview written to step summary (no patch file)");
+ return;
+ }
+
+ switch (ifNoChanges) {
+ case "error":
+ throw new Error(message);
+ case "ignore":
+ // Silent success - no console output
+ return;
+ case "warn":
+ default:
+ core.warning(message);
+ return;
+ }
+ }
+ }
+
+ let patchContent = "";
+ let isEmpty = true;
+
+ if (fs.existsSync("/tmp/gh-aw/aw.patch")) {
+ patchContent = fs.readFileSync("/tmp/gh-aw/aw.patch", "utf8");
+ isEmpty = !patchContent || !patchContent.trim();
+ }
+
+ // Check for actual error conditions (but allow empty patches as valid noop)
+ if (patchContent.includes("Failed to generate patch")) {
+ // If allow-empty is enabled, ignore patch errors and proceed
+ if (allowEmpty) {
+ core.info("Patch file contains error, but allow-empty is enabled - will create empty PR");
+ patchContent = "";
+ isEmpty = true;
+ } else {
+ const message = "Patch file contains error message - cannot create pull request without changes";
+
+ // If in staged mode, still show preview
+ if (isStaged) {
+ let summaryContent = "## š Staged Mode: Create Pull Request Preview\n\n";
+ summaryContent += "The following pull request would be created if staged mode was disabled:\n\n";
+ summaryContent += `**Status:** ā ļø Patch file contains error\n\n`;
+ summaryContent += `**Message:** ${message}\n\n`;
+
+ // Write to step summary
+ await core.summary.addRaw(summaryContent).write();
+ core.info("š Pull request creation preview written to step summary (patch error)");
+ return;
+ }
+
+ switch (ifNoChanges) {
+ case "error":
+ throw new Error(message);
+ case "ignore":
+ // Silent success - no console output
+ return;
+ case "warn":
+ default:
+ core.warning(message);
+ return;
+ }
+ }
+ }
+
+ // Validate patch size (unless empty)
+ if (!isEmpty) {
+ // Get maximum patch size from environment (default: 1MB = 1024 KB)
+ const maxSizeKb = parseInt(process.env.GH_AW_MAX_PATCH_SIZE || "1024", 10);
+ const patchSizeBytes = Buffer.byteLength(patchContent, "utf8");
+ const patchSizeKb = Math.ceil(patchSizeBytes / 1024);
+
+ core.info(`Patch size: ${patchSizeKb} KB (maximum allowed: ${maxSizeKb} KB)`);
+
+ if (patchSizeKb > maxSizeKb) {
+ const message = `Patch size (${patchSizeKb} KB) exceeds maximum allowed size (${maxSizeKb} KB)`;
+
+ // If in staged mode, still show preview with error
+ if (isStaged) {
+ let summaryContent = "## š Staged Mode: Create Pull Request Preview\n\n";
+ summaryContent += "The following pull request would be created if staged mode was disabled:\n\n";
+ summaryContent += `**Status:** ā Patch size exceeded\n\n`;
+ summaryContent += `**Message:** ${message}\n\n`;
+
+ // Write to step summary
+ await core.summary.addRaw(summaryContent).write();
+ core.info("š Pull request creation preview written to step summary (patch size error)");
+ return;
+ }
+
+ throw new Error(message);
+ }
+
+ core.info("Patch size validation passed");
+ }
+
+ if (isEmpty && !isStaged && !allowEmpty) {
+ const message = "Patch file is empty - no changes to apply (noop operation)";
+
+ switch (ifNoChanges) {
+ case "error":
+ throw new Error("No changes to push - failing as configured by if-no-changes: error");
+ case "ignore":
+ // Silent success - no console output
+ return;
+ case "warn":
+ default:
+ core.warning(message);
+ return;
+ }
+ }
+
+ core.info(`Agent output content length: ${outputContent.length}`);
+ if (!isEmpty) {
+ core.info("Patch content validation passed");
+ } else if (allowEmpty) {
+ core.info("Patch file is empty - processing empty PR creation (allow-empty is enabled)");
+ } else {
+ core.info("Patch file is empty - processing noop operation");
+ }
+
+ // Parse the validated output JSON
+ let validatedOutput;
+ try {
+ validatedOutput = JSON.parse(outputContent);
+ } catch (error) {
+ core.setFailed(`Error parsing agent output JSON: ${error instanceof Error ? error.message : String(error)}`);
+ return;
+ }
+
+ if (!validatedOutput.items || !Array.isArray(validatedOutput.items)) {
+ core.warning("No valid items found in agent output");
+ return;
+ }
+
+ // Find the create-pull-request item
+ const pullRequestItem = validatedOutput.items.find(/** @param {any} item */ item => item.type === "create_pull_request");
+ if (!pullRequestItem) {
+ core.warning("No create-pull-request item found in agent output");
+ return;
+ }
+
+ core.info(`Found create-pull-request item: title="${pullRequestItem.title}", bodyLength=${pullRequestItem.body.length}`);
+
+ // If in staged mode, emit step summary instead of creating PR
+ if (isStaged) {
+ let summaryContent = "## š Staged Mode: Create Pull Request Preview\n\n";
+ summaryContent += "The following pull request would be created if staged mode was disabled:\n\n";
+
+ summaryContent += `**Title:** ${pullRequestItem.title || "No title provided"}\n\n`;
+ summaryContent += `**Branch:** ${pullRequestItem.branch || "auto-generated"}\n\n`;
+ summaryContent += `**Base:** ${baseBranch}\n\n`;
+
+ if (pullRequestItem.body) {
+ summaryContent += `**Body:**\n${pullRequestItem.body}\n\n`;
+ }
+
+ if (fs.existsSync("/tmp/gh-aw/aw.patch")) {
+ const patchStats = fs.readFileSync("/tmp/gh-aw/aw.patch", "utf8");
+ if (patchStats.trim()) {
+ summaryContent += `**Changes:** Patch file exists with ${patchStats.split("\n").length} lines\n\n`;
+ summaryContent += `Show patch preview
\n\n\`\`\`diff\n${patchStats.slice(0, 2000)}${patchStats.length > 2000 ? "\n... (truncated)" : ""}\n\`\`\`\n\n \n\n`;
+ } else {
+ summaryContent += `**Changes:** No changes (empty patch)\n\n`;
+ }
+ }
+
+ // Write to step summary
+ await core.summary.addRaw(summaryContent).write();
+ core.info("š Pull request creation preview written to step summary");
+ return;
+ }
+
+ // Extract title, body, and branch from the JSON item
+ let title = pullRequestItem.title.trim();
+ let processedBody = pullRequestItem.body;
+
+ // Remove duplicate title from description if it starts with a header matching the title
+ processedBody = removeDuplicateTitleFromDescription(title, processedBody);
+
+ let bodyLines = processedBody.split("\n");
+ let branchName = pullRequestItem.branch ? pullRequestItem.branch.trim() : null;
+
+ // If no title was found, use a default
+ if (!title) {
+ title = "Agent Output";
+ }
+
+ // Apply title prefix if provided via environment variable
+ const titlePrefix = process.env.GH_AW_PR_TITLE_PREFIX;
+ if (titlePrefix && !title.startsWith(titlePrefix)) {
+ title = titlePrefix + title;
+ }
+
+ // Add AI disclaimer with workflow name and run url
+ const workflowName = process.env.GH_AW_WORKFLOW_NAME || "Workflow";
+ const runId = context.runId;
+ const githubServer = process.env.GITHUB_SERVER_URL || "https://github.com";
+ const runUrl = context.payload.repository ? `${context.payload.repository.html_url}/actions/runs/${runId}` : `${githubServer}/${context.repo.owner}/${context.repo.repo}/actions/runs/${runId}`;
+
+ // Add fingerprint comment if present
+ const trackerIDComment = getTrackerID("markdown");
+ if (trackerIDComment) {
+ bodyLines.push(trackerIDComment);
+ }
+
+ // Add expiration comment if expires is set (only for same-repo PRs)
+ addExpirationComment(bodyLines, "GH_AW_PR_EXPIRES", "Pull Request");
+
+ bodyLines.push(``, ``, `> AI generated by [${workflowName}](${runUrl})`, "");
+
+ // Prepare the body content
+ const body = bodyLines.join("\n").trim();
+
+ // Parse labels from environment variable (comma-separated string)
+ const labelsEnv = process.env.GH_AW_PR_LABELS;
+ const labels = labelsEnv
+ ? labelsEnv
+ .split(",")
+ .map(/** @param {string} label */ label => label.trim())
+ .filter(/** @param {string} label */ label => label)
+ : [];
+
+ // Parse draft setting from environment variable (defaults to true)
+ const draftEnv = process.env.GH_AW_PR_DRAFT;
+ const draft = draftEnv ? draftEnv.toLowerCase() === "true" : true;
+
+ core.info(`Creating pull request with title: ${title}`);
+ core.info(`Labels: ${JSON.stringify(labels)}`);
+ core.info(`Draft: ${draft}`);
+ core.info(`Body length: ${body.length}`);
+
+ const randomHex = crypto.randomBytes(8).toString("hex");
+ // Use branch name from JSONL if provided, otherwise generate unique branch name
+ if (!branchName) {
+ core.info("No branch name provided in JSONL, generating unique branch name");
+ // Generate unique branch name using cryptographic random hex
+ branchName = `${workflowId}-${randomHex}`;
+ } else {
+ branchName = `${branchName}-${randomHex}`;
+ core.info(`Using branch name from JSONL with added salt: ${branchName}`);
+ }
+
+ core.info(`Generated branch name: ${branchName}`);
+ core.info(`Base branch: ${baseBranch}`);
+
+ // Create a new branch using git CLI, ensuring it's based on the correct base branch
+
+ // First, fetch the base branch specifically (since we use shallow checkout)
+ core.info(`Fetching base branch: ${baseBranch}`);
+
+ // Fetch without creating/updating local branch to avoid conflicts with current branch
+ // This works even when we're already on the base branch
+ await exec.exec(`git fetch origin ${baseBranch}`);
+
+ // Checkout the base branch (using origin/${baseBranch} if local doesn't exist)
+ try {
+ await exec.exec(`git checkout ${baseBranch}`);
+ } catch (checkoutError) {
+ // If local branch doesn't exist, create it from origin
+ core.info(`Local branch ${baseBranch} doesn't exist, creating from origin/${baseBranch}`);
+ await exec.exec(`git checkout -b ${baseBranch} origin/${baseBranch}`);
+ }
+
+ // Handle branch creation/checkout
+ core.info(`Branch should not exist locally, creating new branch from base: ${branchName}`);
+ await exec.exec(`git checkout -b ${branchName}`);
+ core.info(`Created new branch from base: ${branchName}`);
+
+ // Apply the patch using git CLI (skip if empty)
+ if (!isEmpty) {
+ core.info("Applying patch...");
+
+ // Log first 500 lines of patch for debugging
+ const patchLines = patchContent.split("\n");
+ const previewLineCount = Math.min(500, patchLines.length);
+ core.info(`Patch preview (first ${previewLineCount} of ${patchLines.length} lines):`);
+ for (let i = 0; i < previewLineCount; i++) {
+ core.info(patchLines[i]);
+ }
+
+ // Patches are created with git format-patch, so use git am to apply them
+ try {
+ await exec.exec("git am /tmp/gh-aw/aw.patch");
+ core.info("Patch applied successfully");
+ } catch (patchError) {
+ core.error(`Failed to apply patch: ${patchError instanceof Error ? patchError.message : String(patchError)}`);
+
+ // Investigate why the patch failed by logging git status and the failed patch
+ try {
+ core.info("Investigating patch failure...");
+
+ // Log git status to see the current state
+ const statusResult = await exec.getExecOutput("git", ["status"]);
+ core.info("Git status output:");
+ core.info(statusResult.stdout);
+
+ // Log the failed patch diff
+ const patchResult = await exec.getExecOutput("git", ["am", "--show-current-patch=diff"]);
+ core.info("Failed patch content:");
+ core.info(patchResult.stdout);
+ } catch (investigateError) {
+ core.warning(`Failed to investigate patch failure: ${investigateError instanceof Error ? investigateError.message : String(investigateError)}`);
+ }
+
+ core.setFailed("Failed to apply patch");
+ return;
+ }
+
+ // Push the applied commits to the branch (with fallback to issue creation on failure)
+ try {
+ // Check if remote branch already exists (optional precheck)
+ let remoteBranchExists = false;
+ try {
+ const { stdout } = await exec.getExecOutput(`git ls-remote --heads origin ${branchName}`);
+ if (stdout.trim()) {
+ remoteBranchExists = true;
+ }
+ } catch (checkError) {
+ core.info(`Remote branch check failed (non-fatal): ${checkError instanceof Error ? checkError.message : String(checkError)}`);
+ }
+
+ if (remoteBranchExists) {
+ core.warning(`Remote branch ${branchName} already exists - appending random suffix`);
+ const extraHex = crypto.randomBytes(4).toString("hex");
+ const oldBranch = branchName;
+ branchName = `${branchName}-${extraHex}`;
+ // Rename local branch
+ await exec.exec(`git branch -m ${oldBranch} ${branchName}`);
+ core.info(`Renamed branch to ${branchName}`);
+ }
+
+ await exec.exec(`git push origin ${branchName}`);
+ core.info("Changes pushed to branch");
+ } catch (pushError) {
+ // Push failed - create fallback issue instead of PR
+ core.error(`Git push failed: ${pushError instanceof Error ? pushError.message : String(pushError)}`);
+ core.warning("Git push operation failed - creating fallback issue instead of pull request");
+
+ const runId = context.runId;
+ const githubServer = process.env.GITHUB_SERVER_URL || "https://github.com";
+ const runUrl = context.payload.repository ? `${context.payload.repository.html_url}/actions/runs/${runId}` : `${githubServer}/${context.repo.owner}/${context.repo.repo}/actions/runs/${runId}`;
+
+ // Read patch content for preview
+ let patchPreview = "";
+ if (fs.existsSync("/tmp/gh-aw/aw.patch")) {
+ const patchContent = fs.readFileSync("/tmp/gh-aw/aw.patch", "utf8");
+ patchPreview = generatePatchPreview(patchContent);
+ }
+
+ const fallbackBody = `${body}
+
+---
+
+> [!NOTE]
+> This was originally intended as a pull request, but the git push operation failed.
+>
+> **Workflow Run:** [View run details and download patch artifact](${runUrl})
+>
+> The patch file is available as an artifact (\`aw.patch\`) in the workflow run linked above.
+
+To apply the patch locally:
+
+\`\`\`sh
+# Download the artifact from the workflow run ${runUrl}
+# (Use GitHub MCP tools if gh CLI is not available)
+gh run download ${runId} -n aw.patch
+
+# Apply the patch
+git am aw.patch
+\`\`\`
+${patchPreview}`;
+
+ try {
+ const { data: issue } = await github.rest.issues.create({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ title: title,
+ body: fallbackBody,
+ labels: labels,
+ });
+
+ core.info(`Created fallback issue #${issue.number}: ${issue.html_url}`);
+
+ // Update the activation comment with issue link (if a comment was created)
+ await updateActivationComment(github, context, core, issue.html_url, issue.number, "issue");
+
+ // Set outputs for push failure fallback
+ core.setOutput("issue_number", issue.number);
+ core.setOutput("issue_url", issue.html_url);
+ core.setOutput("branch_name", branchName);
+ core.setOutput("fallback_used", "true");
+ core.setOutput("push_failed", "true");
+
+ // Write summary to GitHub Actions summary
+ await core.summary
+ .addRaw(
+ `
+
+## Push Failure Fallback
+- **Push Error:** ${pushError instanceof Error ? pushError.message : String(pushError)}
+- **Fallback Issue:** [#${issue.number}](${issue.html_url})
+- **Patch Artifact:** Available in workflow run artifacts
+- **Note:** Push failed, created issue as fallback
+`
+ )
+ .write();
+
+ return;
+ } catch (issueError) {
+ core.setFailed(
+ `Failed to push and failed to create fallback issue. Push error: ${pushError instanceof Error ? pushError.message : String(pushError)}. Issue error: ${issueError instanceof Error ? issueError.message : String(issueError)}`
+ );
+ return;
+ }
+ }
+ } else {
+ core.info("Skipping patch application (empty patch)");
+
+ // For empty patches with allow-empty, we still need to push the branch
+ if (allowEmpty) {
+ core.info("allow-empty is enabled - will create branch and push with empty commit");
+ // Push the branch with an empty commit to allow PR creation
+ try {
+ // Create an empty commit to ensure there's a commit difference
+ await exec.exec(`git commit --allow-empty -m "Initialize"`);
+ core.info("Created empty commit");
+
+ // Check if remote branch already exists (optional precheck)
+ let remoteBranchExists = false;
+ try {
+ const { stdout } = await exec.getExecOutput(`git ls-remote --heads origin ${branchName}`);
+ if (stdout.trim()) {
+ remoteBranchExists = true;
+ }
+ } catch (checkError) {
+ core.info(`Remote branch check failed (non-fatal): ${checkError instanceof Error ? checkError.message : String(checkError)}`);
+ }
+
+ if (remoteBranchExists) {
+ core.warning(`Remote branch ${branchName} already exists - appending random suffix`);
+ const extraHex = crypto.randomBytes(4).toString("hex");
+ const oldBranch = branchName;
+ branchName = `${branchName}-${extraHex}`;
+ // Rename local branch
+ await exec.exec(`git branch -m ${oldBranch} ${branchName}`);
+ core.info(`Renamed branch to ${branchName}`);
+ }
+
+ await exec.exec(`git push origin ${branchName}`);
+ core.info("Empty branch pushed successfully");
+ } catch (pushError) {
+ core.setFailed(`Failed to push empty branch: ${pushError instanceof Error ? pushError.message : String(pushError)}`);
+ return;
+ }
+ } else {
+ // For empty patches without allow-empty, handle if-no-changes configuration
+ const message = "No changes to apply - noop operation completed successfully";
+
+ switch (ifNoChanges) {
+ case "error":
+ throw new Error("No changes to apply - failing as configured by if-no-changes: error");
+ case "ignore":
+ // Silent success - no console output
+ return;
+ case "warn":
+ default:
+ core.warning(message);
+ return;
+ }
+ }
+ }
+
+ // Try to create the pull request, with fallback to issue creation
+ try {
+ const { data: pullRequest } = await github.rest.pulls.create({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ title: title,
+ body: body,
+ head: branchName,
+ base: baseBranch,
+ draft: draft,
+ });
+
+ core.info(`Created pull request #${pullRequest.number}: ${pullRequest.html_url}`);
+
+ // Add labels if specified
+ if (labels.length > 0) {
+ await github.rest.issues.addLabels({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ issue_number: pullRequest.number,
+ labels: labels,
+ });
+ core.info(`Added labels to pull request: ${JSON.stringify(labels)}`);
+ }
+
+ // Set output for other jobs to use
+ core.setOutput("pull_request_number", pullRequest.number);
+ core.setOutput("pull_request_url", pullRequest.html_url);
+ core.setOutput("branch_name", branchName);
+
+ // Update the activation comment with PR link (if a comment was created)
+ await updateActivationComment(github, context, core, pullRequest.html_url, pullRequest.number);
+
+ // Write summary to GitHub Actions summary
+ await core.summary
+ .addRaw(
+ `
+
+## Pull Request
+- **Pull Request**: [#${pullRequest.number}](${pullRequest.html_url})
+- **Branch**: \`${branchName}\`
+- **Base Branch**: \`${baseBranch}\`
+`
+ )
+ .write();
+ } catch (prError) {
+ core.warning(`Failed to create pull request: ${prError instanceof Error ? prError.message : String(prError)}`);
+ core.info("Falling back to creating an issue instead");
+
+ // Create issue as fallback with enhanced body content
+ const githubServer = process.env.GITHUB_SERVER_URL || "https://github.com";
+ const branchUrl = context.payload.repository ? `${context.payload.repository.html_url}/tree/${branchName}` : `${githubServer}/${context.repo.owner}/${context.repo.repo}/tree/${branchName}`;
+
+ // Read patch content for preview
+ let patchPreview = "";
+ if (fs.existsSync("/tmp/gh-aw/aw.patch")) {
+ const patchContent = fs.readFileSync("/tmp/gh-aw/aw.patch", "utf8");
+ patchPreview = generatePatchPreview(patchContent);
+ }
+
+ const fallbackBody = `${body}
+
+---
+
+**Note:** This was originally intended as a pull request, but PR creation failed. The changes have been pushed to the branch [\`${branchName}\`](${branchUrl}).
+
+**Original error:** ${prError instanceof Error ? prError.message : String(prError)}
+
+You can manually create a pull request from the branch if needed.${patchPreview}`;
+
+ try {
+ const { data: issue } = await github.rest.issues.create({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ title: title,
+ body: fallbackBody,
+ labels: labels,
+ });
+
+ core.info(`Created fallback issue #${issue.number}: ${issue.html_url}`);
+
+ // Update the activation comment with issue link (if a comment was created)
+ await updateActivationComment(github, context, core, issue.html_url, issue.number, "issue");
+
+ // Set output for other jobs to use (issue instead of PR)
+ core.setOutput("issue_number", issue.number);
+ core.setOutput("issue_url", issue.html_url);
+ core.setOutput("branch_name", branchName);
+ core.setOutput("fallback_used", "true");
+
+ // Write summary to GitHub Actions summary
+ await core.summary
+ .addRaw(
+ `
+
+## Fallback Issue Created
+- **Issue**: [#${issue.number}](${issue.html_url})
+- **Branch**: [\`${branchName}\`](${branchUrl})
+- **Base Branch**: \`${baseBranch}\`
+- **Note**: Pull request creation failed, created issue as fallback
+`
+ )
+ .write();
+ } catch (issueError) {
+ core.setFailed(`Failed to create both pull request and fallback issue. PR error: ${prError instanceof Error ? prError.message : String(prError)}. Issue error: ${issueError instanceof Error ? issueError.message : String(issueError)}`);
+ return;
+ }
+ }
+}
+await main();
diff --git a/actions/setup/js/estimate_tokens.cjs b/actions/setup/js/estimate_tokens.cjs
new file mode 100644
index 0000000000..a5fc23d08b
--- /dev/null
+++ b/actions/setup/js/estimate_tokens.cjs
@@ -0,0 +1,16 @@
+// @ts-check
+///
+
+/**
+ * Estimates token count from text using 4 chars per token estimate
+ * @param {string} text - The text to estimate tokens for
+ * @returns {number} Approximate token count
+ */
+function estimateTokens(text) {
+ if (!text) return 0;
+ return Math.ceil(text.length / 4);
+}
+
+module.exports = {
+ estimateTokens,
+};
diff --git a/actions/setup/js/expiration_helpers.cjs b/actions/setup/js/expiration_helpers.cjs
new file mode 100644
index 0000000000..fa405ee761
--- /dev/null
+++ b/actions/setup/js/expiration_helpers.cjs
@@ -0,0 +1,27 @@
+// @ts-check
+///
+
+/**
+ * Add expiration XML comment to body lines if expires is set
+ * @param {string[]} bodyLines - Array of body lines to append to
+ * @param {string} envVarName - Name of the environment variable containing expires days (e.g., "GH_AW_DISCUSSION_EXPIRES")
+ * @param {string} entityType - Type of entity for logging (e.g., "Discussion", "Issue", "Pull Request")
+ * @returns {void}
+ */
+function addExpirationComment(bodyLines, envVarName, entityType) {
+ const expiresEnv = process.env[envVarName];
+ if (expiresEnv) {
+ const expiresDays = parseInt(expiresEnv, 10);
+ if (!isNaN(expiresDays) && expiresDays > 0) {
+ const expirationDate = new Date();
+ expirationDate.setDate(expirationDate.getDate() + expiresDays);
+ const expirationISO = expirationDate.toISOString();
+ bodyLines.push(``);
+ core.info(`${entityType} will expire on ${expirationISO} (${expiresDays} days)`);
+ }
+ }
+}
+
+module.exports = {
+ addExpirationComment,
+};
diff --git a/actions/setup/js/generate_compact_schema.cjs b/actions/setup/js/generate_compact_schema.cjs
new file mode 100644
index 0000000000..f990c75e2c
--- /dev/null
+++ b/actions/setup/js/generate_compact_schema.cjs
@@ -0,0 +1,43 @@
+// @ts-check
+///
+
+/**
+ * Generates a compact schema description from JSON content
+ * @param {string} content - The JSON content to analyze
+ * @returns {string} Compact schema description for jq/agent
+ */
+function generateCompactSchema(content) {
+ try {
+ const parsed = JSON.parse(content);
+
+ // Generate a compact schema based on the structure
+ if (Array.isArray(parsed)) {
+ if (parsed.length === 0) {
+ return "[]";
+ }
+ // For arrays, describe the first element's structure
+ const firstItem = parsed[0];
+ if (typeof firstItem === "object" && firstItem !== null) {
+ const keys = Object.keys(firstItem);
+ return `[{${keys.join(", ")}}] (${parsed.length} items)`;
+ }
+ return `[${typeof firstItem}] (${parsed.length} items)`;
+ } else if (typeof parsed === "object" && parsed !== null) {
+ // For objects, list top-level keys
+ const keys = Object.keys(parsed);
+ if (keys.length > 10) {
+ return `{${keys.slice(0, 10).join(", ")}, ...} (${keys.length} keys)`;
+ }
+ return `{${keys.join(", ")}}`;
+ }
+
+ return `${typeof parsed}`;
+ } catch {
+ // If not valid JSON, return generic description
+ return "text content";
+ }
+}
+
+module.exports = {
+ generateCompactSchema,
+};
diff --git a/actions/setup/js/generate_footer.cjs b/actions/setup/js/generate_footer.cjs
new file mode 100644
index 0000000000..bd0426e6ed
--- /dev/null
+++ b/actions/setup/js/generate_footer.cjs
@@ -0,0 +1,94 @@
+// @ts-check
+///
+
+/**
+ * Generates an XML comment marker with agentic workflow metadata for traceability.
+ * This marker enables searching and tracing back items generated by an agentic workflow.
+ *
+ * Note: This function is duplicated in messages_footer.cjs. While normally we would
+ * consolidate to a shared module, importing messages_footer.cjs here would cause the
+ * bundler to inline messages_core.cjs which contains 'GH_AW_SAFE_OUTPUT_MESSAGES:' in
+ * a warning message, breaking tests that check for env var declarations.
+ *
+ * @param {string} workflowName - Name of the workflow
+ * @param {string} runUrl - URL of the workflow run
+ * @returns {string} XML comment marker with workflow metadata
+ */
+function generateXMLMarker(workflowName, runUrl) {
+ // Read engine metadata from environment variables
+ const engineId = process.env.GH_AW_ENGINE_ID || "";
+ const engineVersion = process.env.GH_AW_ENGINE_VERSION || "";
+ const engineModel = process.env.GH_AW_ENGINE_MODEL || "";
+ const trackerId = process.env.GH_AW_TRACKER_ID || "";
+
+ // Build the key-value pairs for the marker
+ const parts = [];
+
+ // Always include agentic-workflow name
+ parts.push(`agentic-workflow: ${workflowName}`);
+
+ // Add tracker-id if available (for searchability and tracing)
+ if (trackerId) {
+ parts.push(`tracker-id: ${trackerId}`);
+ }
+
+ // Add engine ID if available
+ if (engineId) {
+ parts.push(`engine: ${engineId}`);
+ }
+
+ // Add version if available
+ if (engineVersion) {
+ parts.push(`version: ${engineVersion}`);
+ }
+
+ // Add model if available
+ if (engineModel) {
+ parts.push(`model: ${engineModel}`);
+ }
+
+ // Always include run URL
+ parts.push(`run: ${runUrl}`);
+
+ // Return the XML comment marker
+ return ``;
+}
+
+/**
+ * Generate footer with AI attribution and workflow installation instructions
+ * @param {string} workflowName - Name of the workflow
+ * @param {string} runUrl - URL of the workflow run
+ * @param {string} workflowSource - Source of the workflow (owner/repo/path@ref)
+ * @param {string} workflowSourceURL - GitHub URL for the workflow source
+ * @param {number|undefined} triggeringIssueNumber - Issue number that triggered this workflow
+ * @param {number|undefined} triggeringPRNumber - Pull request number that triggered this workflow
+ * @param {number|undefined} triggeringDiscussionNumber - Discussion number that triggered this workflow
+ * @returns {string} Footer text
+ */
+function generateFooter(workflowName, runUrl, workflowSource, workflowSourceURL, triggeringIssueNumber, triggeringPRNumber, triggeringDiscussionNumber) {
+ let footer = `\n\n> AI generated by [${workflowName}](${runUrl})`;
+
+ // Add reference to triggering issue/PR/discussion if available
+ if (triggeringIssueNumber) {
+ footer += ` for #${triggeringIssueNumber}`;
+ } else if (triggeringPRNumber) {
+ footer += ` for #${triggeringPRNumber}`;
+ } else if (triggeringDiscussionNumber) {
+ footer += ` for discussion #${triggeringDiscussionNumber}`;
+ }
+
+ if (workflowSource && workflowSourceURL) {
+ footer += `\n>\n> To add this workflow in your repository, run \`gh aw add ${workflowSource}\`. See [usage guide](https://githubnext.github.io/gh-aw/tools/cli/).`;
+ }
+
+ // Add XML comment marker for traceability
+ footer += "\n\n" + generateXMLMarker(workflowName, runUrl);
+
+ footer += "\n";
+ return footer;
+}
+
+module.exports = {
+ generateFooter,
+ generateXMLMarker,
+};
diff --git a/actions/setup/js/generate_git_patch.cjs b/actions/setup/js/generate_git_patch.cjs
new file mode 100644
index 0000000000..af9654f28f
--- /dev/null
+++ b/actions/setup/js/generate_git_patch.cjs
@@ -0,0 +1,141 @@
+// @ts-check
+///
+
+const fs = require("fs");
+const path = require("path");
+const { execSync } = require("child_process");
+
+const { getBaseBranch } = require("./get_base_branch.cjs");
+
+/**
+ * Generates a git patch file for the current changes
+ * @param {string} branchName - The branch name to generate patch for
+ * @returns {Object} Object with patch info or error
+ */
+function generateGitPatch(branchName) {
+ const patchPath = "/tmp/gh-aw/aw.patch";
+ const cwd = process.env.GITHUB_WORKSPACE || process.cwd();
+ const defaultBranch = process.env.DEFAULT_BRANCH || getBaseBranch();
+ const githubSha = process.env.GITHUB_SHA;
+
+ // Ensure /tmp/gh-aw directory exists
+ const patchDir = path.dirname(patchPath);
+ if (!fs.existsSync(patchDir)) {
+ fs.mkdirSync(patchDir, { recursive: true });
+ }
+
+ let patchGenerated = false;
+ let errorMessage = null;
+
+ try {
+ // Strategy 1: If we have a branch name, check if that branch exists and get its diff
+ if (branchName) {
+ // Check if the branch exists locally
+ try {
+ execSync(`git show-ref --verify --quiet refs/heads/${branchName}`, { cwd, encoding: "utf8" });
+
+ // Determine base ref for patch generation
+ let baseRef;
+ try {
+ // Check if origin/branchName exists
+ execSync(`git show-ref --verify --quiet refs/remotes/origin/${branchName}`, { cwd, encoding: "utf8" });
+ baseRef = `origin/${branchName}`;
+ } catch {
+ // Use merge-base with default branch
+ execSync(`git fetch origin ${defaultBranch}`, { cwd, encoding: "utf8" });
+ baseRef = execSync(`git merge-base origin/${defaultBranch} ${branchName}`, { cwd, encoding: "utf8" }).trim();
+ }
+
+ // Count commits to be included
+ const commitCount = parseInt(execSync(`git rev-list --count ${baseRef}..${branchName}`, { cwd, encoding: "utf8" }).trim(), 10);
+
+ if (commitCount > 0) {
+ // Generate patch from the determined base to the branch
+ const patchContent = execSync(`git format-patch ${baseRef}..${branchName} --stdout`, {
+ cwd,
+ encoding: "utf8",
+ });
+
+ if (patchContent && patchContent.trim()) {
+ fs.writeFileSync(patchPath, patchContent, "utf8");
+ patchGenerated = true;
+ }
+ }
+ } catch (branchError) {
+ // Branch does not exist locally
+ }
+ }
+
+ // Strategy 2: Check if commits were made to current HEAD since checkout
+ if (!patchGenerated) {
+ const currentHead = execSync("git rev-parse HEAD", { cwd, encoding: "utf8" }).trim();
+
+ if (!githubSha) {
+ errorMessage = "GITHUB_SHA environment variable is not set";
+ } else if (currentHead === githubSha) {
+ // No commits have been made since checkout
+ } else {
+ // Check if GITHUB_SHA is an ancestor of current HEAD
+ try {
+ execSync(`git merge-base --is-ancestor ${githubSha} HEAD`, { cwd, encoding: "utf8" });
+
+ // Count commits between GITHUB_SHA and HEAD
+ const commitCount = parseInt(execSync(`git rev-list --count ${githubSha}..HEAD`, { cwd, encoding: "utf8" }).trim(), 10);
+
+ if (commitCount > 0) {
+ // Generate patch from GITHUB_SHA to HEAD
+ const patchContent = execSync(`git format-patch ${githubSha}..HEAD --stdout`, {
+ cwd,
+ encoding: "utf8",
+ });
+
+ if (patchContent && patchContent.trim()) {
+ fs.writeFileSync(patchPath, patchContent, "utf8");
+ patchGenerated = true;
+ }
+ }
+ } catch {
+ // GITHUB_SHA is not an ancestor of HEAD - repository state has diverged
+ }
+ }
+ }
+ } catch (error) {
+ errorMessage = `Failed to generate patch: ${error instanceof Error ? error.message : String(error)}`;
+ }
+
+ // Check if patch was generated and has content
+ if (patchGenerated && fs.existsSync(patchPath)) {
+ const patchContent = fs.readFileSync(patchPath, "utf8");
+ const patchSize = Buffer.byteLength(patchContent, "utf8");
+ const patchLines = patchContent.split("\n").length;
+
+ if (!patchContent.trim()) {
+ // Empty patch
+ return {
+ success: false,
+ error: "No changes to commit - patch is empty",
+ patchPath: patchPath,
+ patchSize: 0,
+ patchLines: 0,
+ };
+ }
+
+ return {
+ success: true,
+ patchPath: patchPath,
+ patchSize: patchSize,
+ patchLines: patchLines,
+ };
+ }
+
+ // No patch generated
+ return {
+ success: false,
+ error: errorMessage || "No changes to commit - no commits found",
+ patchPath: patchPath,
+ };
+}
+
+module.exports = {
+ generateGitPatch,
+};
diff --git a/actions/setup/js/generate_safe_inputs_config.cjs b/actions/setup/js/generate_safe_inputs_config.cjs
new file mode 100644
index 0000000000..8e09bae044
--- /dev/null
+++ b/actions/setup/js/generate_safe_inputs_config.cjs
@@ -0,0 +1,34 @@
+// @ts-check
+///
+
+/**
+ * Generates configuration for the Safe Inputs MCP HTTP server
+ * @param {object} params - Parameters for config generation
+ * @param {typeof import("@actions/core")} params.core - GitHub Actions core library
+ * @param {typeof import("crypto")} params.crypto - Node.js crypto library
+ * @returns {{apiKey: string, port: number}} Generated configuration
+ */
+function generateSafeInputsConfig({ core, crypto }) {
+ // Generate a secure random API key for the MCP server
+ // Using 45 bytes gives us 360 bits of entropy and ensures at least 40 characters
+ // after base64 encoding and removing special characters (base64 of 45 bytes = 60 chars)
+ const apiKeyBuffer = crypto.randomBytes(45);
+ const apiKey = apiKeyBuffer.toString("base64").replace(/[/+=]/g, "");
+
+ // Choose a port for the HTTP server (default 3000)
+ const port = 3000;
+
+ // Set outputs with descriptive names to avoid conflicts
+ core.setOutput("safe_inputs_api_key", apiKey);
+ core.setOutput("safe_inputs_port", port.toString());
+
+ core.info(`Safe Inputs MCP server will run on port ${port}`);
+
+ return { apiKey, port };
+}
+
+if (typeof module !== "undefined" && module.exports) {
+ module.exports = {
+ generateSafeInputsConfig,
+ };
+}
diff --git a/actions/setup/js/get_base_branch.cjs b/actions/setup/js/get_base_branch.cjs
new file mode 100644
index 0000000000..ded46f56b5
--- /dev/null
+++ b/actions/setup/js/get_base_branch.cjs
@@ -0,0 +1,14 @@
+// @ts-check
+///
+
+/**
+ * Get the base branch name from environment variable
+ * @returns {string} The base branch name (defaults to "main")
+ */
+function getBaseBranch() {
+ return process.env.GH_AW_BASE_BRANCH || "main";
+}
+
+module.exports = {
+ getBaseBranch,
+};
diff --git a/actions/setup/js/get_current_branch.cjs b/actions/setup/js/get_current_branch.cjs
new file mode 100644
index 0000000000..b0a5e0f2a2
--- /dev/null
+++ b/actions/setup/js/get_current_branch.cjs
@@ -0,0 +1,44 @@
+// @ts-check
+///
+
+const { execSync } = require("child_process");
+
+/**
+ * Get the current git branch name
+ * @returns {string} The current branch name
+ */
+function getCurrentBranch() {
+ // Priority 1: Try git command first to get the actual checked-out branch
+ // This is more reliable than environment variables which may not reflect
+ // branch changes made during the workflow execution
+ const cwd = process.env.GITHUB_WORKSPACE || process.cwd();
+ try {
+ const branch = execSync("git rev-parse --abbrev-ref HEAD", {
+ encoding: "utf8",
+ cwd: cwd,
+ }).trim();
+ return branch;
+ } catch (error) {
+ // Ignore error and try fallback
+ }
+
+ // Priority 2: Fallback to GitHub Actions environment variables
+ // GITHUB_HEAD_REF is set for pull_request events and contains the source branch name
+ // GITHUB_REF_NAME is set for all events and contains the branch/tag name
+ const ghHeadRef = process.env.GITHUB_HEAD_REF;
+ const ghRefName = process.env.GITHUB_REF_NAME;
+
+ if (ghHeadRef) {
+ return ghHeadRef;
+ }
+
+ if (ghRefName) {
+ return ghRefName;
+ }
+
+ throw new Error("Failed to determine current branch: git command failed and no GitHub environment variables available");
+}
+
+module.exports = {
+ getCurrentBranch,
+};
diff --git a/actions/setup/js/get_repository_url.cjs b/actions/setup/js/get_repository_url.cjs
new file mode 100644
index 0000000000..d9f5556b47
--- /dev/null
+++ b/actions/setup/js/get_repository_url.cjs
@@ -0,0 +1,29 @@
+// @ts-check
+///
+
+/**
+ * Get the repository URL for different purposes
+ * This helper handles trial mode where target repository URLs are different from execution context
+ * @returns {string} Repository URL
+ */
+function getRepositoryUrl() {
+ // For trial mode, use target repository for issue/PR URLs but execution context for action runs
+ const targetRepoSlug = process.env.GH_AW_TARGET_REPO_SLUG;
+
+ if (targetRepoSlug) {
+ // Use target repository for issue/PR URLs in trial mode
+ const githubServer = process.env.GITHUB_SERVER_URL || "https://github.com";
+ return `${githubServer}/${targetRepoSlug}`;
+ } else if (context.payload.repository?.html_url) {
+ // Use execution context repository (default behavior)
+ return context.payload.repository.html_url;
+ } else {
+ // Final fallback for action runs when context repo is not available
+ const githubServer = process.env.GITHUB_SERVER_URL || "https://github.com";
+ return `${githubServer}/${context.repo.owner}/${context.repo.repo}`;
+ }
+}
+
+module.exports = {
+ getRepositoryUrl,
+};
diff --git a/actions/setup/js/get_tracker_id.cjs b/actions/setup/js/get_tracker_id.cjs
new file mode 100644
index 0000000000..418f0b3287
--- /dev/null
+++ b/actions/setup/js/get_tracker_id.cjs
@@ -0,0 +1,20 @@
+// @ts-check
+///
+
+/**
+ * Get tracker-id from environment variable, log it, and optionally format it
+ * @param {string} [format] - Output format: "markdown" for HTML comment, "text" for plain text, or undefined for raw value
+ * @returns {string} Tracker ID in requested format or empty string
+ */
+function getTrackerID(format) {
+ const trackerID = process.env.GH_AW_TRACKER_ID || "";
+ if (trackerID) {
+ core.info(`Tracker ID: ${trackerID}`);
+ return format === "markdown" ? `\n\n` : trackerID;
+ }
+ return "";
+}
+
+module.exports = {
+ getTrackerID,
+};
diff --git a/actions/setup/js/hide_comment.cjs b/actions/setup/js/hide_comment.cjs
new file mode 100644
index 0000000000..59fc2a9c55
--- /dev/null
+++ b/actions/setup/js/hide_comment.cjs
@@ -0,0 +1,122 @@
+// @ts-check
+///
+
+const { loadAgentOutput } = require("./load_agent_output.cjs");
+
+/**
+ * Hide a comment using the GraphQL API.
+ * @param {any} github - GitHub GraphQL instance
+ * @param {string} nodeId - Comment node ID (e.g., 'IC_kwDOABCD123456')
+ * @param {string} reason - Reason for hiding (default: spam)
+ * @returns {Promise<{id: string, isMinimized: boolean}>} Hidden comment details
+ */
+async function hideComment(github, nodeId, reason = "spam") {
+ const query = /* GraphQL */ `
+ mutation ($nodeId: ID!, $classifier: ReportedContentClassifiers!) {
+ minimizeComment(input: { subjectId: $nodeId, classifier: $classifier }) {
+ minimizedComment {
+ isMinimized
+ }
+ }
+ }
+ `;
+
+ const result = await github.graphql(query, { nodeId, classifier: reason });
+
+ return {
+ id: nodeId,
+ isMinimized: result.minimizeComment.minimizedComment.isMinimized,
+ };
+}
+
+async function main() {
+ // Check if we're in staged mode
+ const isStaged = process.env.GH_AW_SAFE_OUTPUTS_STAGED === "true";
+
+ // Parse allowed reasons from environment variable
+ let allowedReasons = null;
+ if (process.env.GH_AW_HIDE_COMMENT_ALLOWED_REASONS) {
+ try {
+ allowedReasons = JSON.parse(process.env.GH_AW_HIDE_COMMENT_ALLOWED_REASONS);
+ core.info(`Allowed reasons for hiding: [${allowedReasons.join(", ")}]`);
+ } catch (error) {
+ core.warning(`Failed to parse GH_AW_HIDE_COMMENT_ALLOWED_REASONS: ${error instanceof Error ? error.message : String(error)}`);
+ }
+ }
+
+ const result = loadAgentOutput();
+ if (!result.success) {
+ return;
+ }
+
+ // Find all hide-comment items
+ const hideCommentItems = result.items.filter(/** @param {any} item */ item => item.type === "hide_comment");
+ if (hideCommentItems.length === 0) {
+ core.info("No hide-comment items found in agent output");
+ return;
+ }
+
+ core.info(`Found ${hideCommentItems.length} hide-comment item(s)`);
+
+ // If in staged mode, emit step summary instead of hiding comments
+ if (isStaged) {
+ let summaryContent = "## š Staged Mode: Hide Comments Preview\n\n";
+ summaryContent += "The following comments would be hidden if staged mode was disabled:\n\n";
+
+ for (let i = 0; i < hideCommentItems.length; i++) {
+ const item = hideCommentItems[i];
+ const reason = item.reason || "spam";
+ summaryContent += `### Comment ${i + 1}\n`;
+ summaryContent += `**Node ID**: ${item.comment_id}\n`;
+ summaryContent += `**Action**: Would be hidden as ${reason}\n`;
+ summaryContent += "\n";
+ }
+
+ core.summary.addRaw(summaryContent).write();
+ return;
+ }
+
+ // Process each hide-comment item
+ for (const item of hideCommentItems) {
+ try {
+ const commentId = item.comment_id;
+ if (!commentId || typeof commentId !== "string") {
+ throw new Error("comment_id is required and must be a string (GraphQL node ID)");
+ }
+
+ const reason = item.reason || "spam";
+
+ // Normalize reason to uppercase for GitHub API
+ const normalizedReason = reason.toUpperCase();
+
+ // Validate reason against allowed reasons if specified (case-insensitive)
+ if (allowedReasons && allowedReasons.length > 0) {
+ const normalizedAllowedReasons = allowedReasons.map(r => r.toUpperCase());
+ if (!normalizedAllowedReasons.includes(normalizedReason)) {
+ core.warning(`Reason "${reason}" is not in allowed-reasons list [${allowedReasons.join(", ")}]. Skipping comment ${commentId}.`);
+ continue;
+ }
+ }
+
+ core.info(`Hiding comment: ${commentId} (reason: ${normalizedReason})`);
+
+ const hideResult = await hideComment(github, commentId, normalizedReason);
+
+ if (hideResult.isMinimized) {
+ core.info(`Successfully hidden comment: ${commentId}`);
+ core.setOutput("comment_id", commentId);
+ core.setOutput("is_hidden", "true");
+ } else {
+ throw new Error(`Failed to hide comment: ${commentId}`);
+ }
+ } catch (error) {
+ const errorMessage = error instanceof Error ? error.message : String(error);
+ core.error(`Failed to hide comment: ${errorMessage}`);
+ core.setFailed(`Failed to hide comment: ${errorMessage}`);
+ return;
+ }
+ }
+}
+
+// Call the main function
+await main();
diff --git a/actions/setup/js/interpolate_prompt.cjs b/actions/setup/js/interpolate_prompt.cjs
new file mode 100644
index 0000000000..23def55462
--- /dev/null
+++ b/actions/setup/js/interpolate_prompt.cjs
@@ -0,0 +1,126 @@
+// @ts-check
+///
+
+// interpolate_prompt.cjs
+// Interpolates GitHub Actions expressions and renders template conditionals in the prompt file.
+// This combines variable interpolation and template filtering into a single step.
+
+const fs = require("fs");
+const { isTruthy } = require("./is_truthy.cjs");
+const { processRuntimeImports } = require("./runtime_import.cjs");
+
+/**
+ * Interpolates variables in the prompt content
+ * @param {string} content - The prompt content with ${GH_AW_EXPR_*} placeholders
+ * @param {Record} variables - Map of variable names to their values
+ * @returns {string} - The interpolated content
+ */
+function interpolateVariables(content, variables) {
+ let result = content;
+
+ // Replace each ${VAR_NAME} with its corresponding value
+ for (const [varName, value] of Object.entries(variables)) {
+ const pattern = new RegExp(`\\$\\{${varName}\\}`, "g");
+ result = result.replace(pattern, value);
+ }
+
+ return result;
+}
+
+/**
+ * Renders a Markdown template by processing {{#if}} conditional blocks.
+ * When a conditional block is removed (falsy condition) and the template tags
+ * were on their own lines, the empty lines are cleaned up to avoid
+ * leaving excessive blank lines in the output.
+ * @param {string} markdown - The markdown content to process
+ * @returns {string} - The processed markdown content
+ */
+function renderMarkdownTemplate(markdown) {
+ // First pass: Handle blocks where tags are on their own lines
+ // Captures: (leading newline)(opening tag line)(condition)(body)(closing tag line)(trailing newline)
+ let result = markdown.replace(/(\n?)([ \t]*{{#if\s+([^}]*)}}[ \t]*\n)([\s\S]*?)([ \t]*{{\/if}}[ \t]*)(\n?)/g, (match, leadNL, openLine, cond, body, closeLine, trailNL) => {
+ if (isTruthy(cond)) {
+ // Keep body with leading newline if there was one before the opening tag
+ return leadNL + body;
+ } else {
+ // Remove entire block completely - the line containing the template is removed
+ return "";
+ }
+ });
+
+ // Second pass: Handle inline conditionals (tags not on their own lines)
+ result = result.replace(/{{#if\s+([^}]*)}}([\s\S]*?){{\/if}}/g, (_, cond, body) => (isTruthy(cond) ? body : ""));
+
+ // Clean up excessive blank lines (more than one blank line = 2 newlines)
+ result = result.replace(/\n{3,}/g, "\n\n");
+
+ return result;
+}
+
+/**
+ * Main function for prompt variable interpolation and template rendering
+ */
+async function main() {
+ try {
+ const promptPath = process.env.GH_AW_PROMPT;
+ if (!promptPath) {
+ core.setFailed("GH_AW_PROMPT environment variable is not set");
+ return;
+ }
+
+ // Get the workspace directory for runtime imports
+ const workspaceDir = process.env.GITHUB_WORKSPACE;
+ if (!workspaceDir) {
+ core.setFailed("GITHUB_WORKSPACE environment variable is not set");
+ return;
+ }
+
+ // Read the prompt file
+ let content = fs.readFileSync(promptPath, "utf8");
+
+ // Step 1: Process runtime imports
+ const hasRuntimeImports = /{{#runtime-import\??[ \t]+[^\}]+}}/.test(content);
+ if (hasRuntimeImports) {
+ core.info("Processing runtime import macros");
+ content = processRuntimeImports(content, workspaceDir);
+ core.info("Runtime imports processed successfully");
+ } else {
+ core.info("No runtime import macros found, skipping runtime import processing");
+ }
+
+ // Step 2: Interpolate variables
+ const variables = {};
+ for (const [key, value] of Object.entries(process.env)) {
+ if (key.startsWith("GH_AW_EXPR_")) {
+ variables[key] = value || "";
+ }
+ }
+
+ const varCount = Object.keys(variables).length;
+ if (varCount > 0) {
+ core.info(`Found ${varCount} expression variable(s) to interpolate`);
+ content = interpolateVariables(content, variables);
+ core.info(`Successfully interpolated ${varCount} variable(s) in prompt`);
+ } else {
+ core.info("No expression variables found, skipping interpolation");
+ }
+
+ // Step 3: Render template conditionals
+ const hasConditionals = /{{#if\s+[^}]+}}/.test(content);
+ if (hasConditionals) {
+ core.info("Processing conditional template blocks");
+ content = renderMarkdownTemplate(content);
+ core.info("Template rendered successfully");
+ } else {
+ core.info("No conditional blocks found in prompt, skipping template rendering");
+ }
+
+ // Write back to the same file
+ fs.writeFileSync(promptPath, content, "utf8");
+ } catch (error) {
+ core.setFailed(error instanceof Error ? error.message : String(error));
+ }
+}
+
+// Execute main function
+main();
diff --git a/actions/setup/js/is_truthy.cjs b/actions/setup/js/is_truthy.cjs
new file mode 100644
index 0000000000..84207526da
--- /dev/null
+++ b/actions/setup/js/is_truthy.cjs
@@ -0,0 +1,12 @@
+// @ts-check
+/**
+ * Determines if a value is truthy according to template logic
+ * @param {string} expr - The expression to evaluate
+ * @returns {boolean} - Whether the expression is truthy
+ */
+function isTruthy(expr) {
+ const v = expr.trim().toLowerCase();
+ return !(v === "" || v === "false" || v === "0" || v === "null" || v === "undefined");
+}
+
+module.exports = { isTruthy };
diff --git a/actions/setup/js/link_sub_issue.cjs b/actions/setup/js/link_sub_issue.cjs
new file mode 100644
index 0000000000..5ca6fd42ed
--- /dev/null
+++ b/actions/setup/js/link_sub_issue.cjs
@@ -0,0 +1,361 @@
+// @ts-check
+///
+
+const { loadAgentOutput } = require("./load_agent_output.cjs");
+const { generateStagedPreview } = require("./staged_preview.cjs");
+const { loadTemporaryIdMap, resolveIssueNumber } = require("./temporary_id.cjs");
+
+async function main() {
+ const result = loadAgentOutput();
+ if (!result.success) {
+ return;
+ }
+
+ const linkItems = result.items.filter(item => item.type === "link_sub_issue");
+ if (linkItems.length === 0) {
+ core.info("No link_sub_issue items found in agent output");
+ return;
+ }
+
+ core.info(`Found ${linkItems.length} link_sub_issue item(s)`);
+
+ // Load the temporary ID map from create_issue job
+ const temporaryIdMap = loadTemporaryIdMap();
+ if (temporaryIdMap.size > 0) {
+ core.info(`Loaded temporary ID map with ${temporaryIdMap.size} entries`);
+ }
+
+ // Check if we're in staged mode
+ if (process.env.GH_AW_SAFE_OUTPUTS_STAGED === "true") {
+ await generateStagedPreview({
+ title: "Link Sub-Issue",
+ description: "The following sub-issue links would be created if staged mode was disabled:",
+ items: linkItems,
+ renderItem: item => {
+ // Resolve temporary IDs for display
+ const parentResolved = resolveIssueNumber(item.parent_issue_number, temporaryIdMap);
+ const subResolved = resolveIssueNumber(item.sub_issue_number, temporaryIdMap);
+
+ let parentDisplay = parentResolved.resolved ? `${parentResolved.resolved.repo}#${parentResolved.resolved.number}` : `${item.parent_issue_number} (unresolved)`;
+ let subDisplay = subResolved.resolved ? `${subResolved.resolved.repo}#${subResolved.resolved.number}` : `${item.sub_issue_number} (unresolved)`;
+
+ if (parentResolved.wasTemporaryId && parentResolved.resolved) {
+ parentDisplay += ` (from ${item.parent_issue_number})`;
+ }
+ if (subResolved.wasTemporaryId && subResolved.resolved) {
+ subDisplay += ` (from ${item.sub_issue_number})`;
+ }
+
+ let content = `**Parent Issue:** ${parentDisplay}\n`;
+ content += `**Sub-Issue:** ${subDisplay}\n\n`;
+ return content;
+ },
+ });
+ return;
+ }
+
+ // Get filter configurations
+ const parentRequiredLabelsEnv = process.env.GH_AW_LINK_SUB_ISSUE_PARENT_REQUIRED_LABELS?.trim();
+ const parentRequiredLabels = parentRequiredLabelsEnv
+ ? parentRequiredLabelsEnv
+ .split(",")
+ .map(l => l.trim())
+ .filter(l => l)
+ : [];
+
+ const parentTitlePrefix = process.env.GH_AW_LINK_SUB_ISSUE_PARENT_TITLE_PREFIX?.trim() || "";
+
+ const subRequiredLabelsEnv = process.env.GH_AW_LINK_SUB_ISSUE_SUB_REQUIRED_LABELS?.trim();
+ const subRequiredLabels = subRequiredLabelsEnv
+ ? subRequiredLabelsEnv
+ .split(",")
+ .map(l => l.trim())
+ .filter(l => l)
+ : [];
+
+ const subTitlePrefix = process.env.GH_AW_LINK_SUB_ISSUE_SUB_TITLE_PREFIX?.trim() || "";
+
+ if (parentRequiredLabels.length > 0) {
+ core.info(`Parent required labels: ${JSON.stringify(parentRequiredLabels)}`);
+ }
+ if (parentTitlePrefix) {
+ core.info(`Parent title prefix: ${parentTitlePrefix}`);
+ }
+ if (subRequiredLabels.length > 0) {
+ core.info(`Sub-issue required labels: ${JSON.stringify(subRequiredLabels)}`);
+ }
+ if (subTitlePrefix) {
+ core.info(`Sub-issue title prefix: ${subTitlePrefix}`);
+ }
+
+ // Get max count configuration
+ const maxCountEnv = process.env.GH_AW_LINK_SUB_ISSUE_MAX_COUNT;
+ const maxCount = maxCountEnv ? parseInt(maxCountEnv, 10) : 5;
+ if (isNaN(maxCount) || maxCount < 1) {
+ core.setFailed(`Invalid max value: ${maxCountEnv}. Must be a positive integer`);
+ return;
+ }
+ core.info(`Max count: ${maxCount}`);
+
+ // Limit items to max count
+ const itemsToProcess = linkItems.slice(0, maxCount);
+ if (linkItems.length > maxCount) {
+ core.warning(`Found ${linkItems.length} link_sub_issue items, but max is ${maxCount}. Processing first ${maxCount}.`);
+ }
+
+ // Process each link request
+ const results = [];
+ for (const item of itemsToProcess) {
+ // Resolve issue numbers, supporting temporary IDs from create_issue job
+ const parentResolved = resolveIssueNumber(item.parent_issue_number, temporaryIdMap);
+ const subResolved = resolveIssueNumber(item.sub_issue_number, temporaryIdMap);
+
+ // Check for resolution errors
+ if (parentResolved.errorMessage) {
+ core.warning(`Failed to resolve parent issue: ${parentResolved.errorMessage}`);
+ results.push({
+ parent_issue_number: item.parent_issue_number,
+ sub_issue_number: item.sub_issue_number,
+ success: false,
+ error: parentResolved.errorMessage,
+ });
+ continue;
+ }
+
+ if (subResolved.errorMessage) {
+ core.warning(`Failed to resolve sub-issue: ${subResolved.errorMessage}`);
+ results.push({
+ parent_issue_number: item.parent_issue_number,
+ sub_issue_number: item.sub_issue_number,
+ success: false,
+ error: subResolved.errorMessage,
+ });
+ continue;
+ }
+
+ const parentIssueNumber = parentResolved.resolved.number;
+ const subIssueNumber = subResolved.resolved.number;
+
+ if (parentResolved.wasTemporaryId) {
+ core.info(`Resolved parent temporary ID '${item.parent_issue_number}' to ${parentResolved.resolved.repo}#${parentIssueNumber}`);
+ }
+ if (subResolved.wasTemporaryId) {
+ core.info(`Resolved sub-issue temporary ID '${item.sub_issue_number}' to ${subResolved.resolved.repo}#${subIssueNumber}`);
+ }
+
+ // Fetch parent issue to validate filters
+ let parentIssue;
+ try {
+ const parentResponse = await github.rest.issues.get({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ issue_number: parentIssueNumber,
+ });
+ parentIssue = parentResponse.data;
+ } catch (error) {
+ const errorMessage = error instanceof Error ? error.message : String(error);
+ core.warning(`Failed to fetch parent issue #${parentIssueNumber}: ${errorMessage}`);
+ results.push({
+ parent_issue_number: parentIssueNumber,
+ sub_issue_number: subIssueNumber,
+ success: false,
+ error: `Failed to fetch parent issue: ${errorMessage}`,
+ });
+ continue;
+ }
+
+ // Validate parent issue filters
+ if (parentRequiredLabels.length > 0) {
+ const parentLabels = parentIssue.labels.map(l => (typeof l === "string" ? l : l.name || ""));
+ const missingLabels = parentRequiredLabels.filter(required => !parentLabels.includes(required));
+ if (missingLabels.length > 0) {
+ core.warning(`Parent issue #${parentIssueNumber} is missing required labels: ${missingLabels.join(", ")}. Skipping.`);
+ results.push({
+ parent_issue_number: parentIssueNumber,
+ sub_issue_number: subIssueNumber,
+ success: false,
+ error: `Parent issue missing required labels: ${missingLabels.join(", ")}`,
+ });
+ continue;
+ }
+ }
+
+ if (parentTitlePrefix && !parentIssue.title.startsWith(parentTitlePrefix)) {
+ core.warning(`Parent issue #${parentIssueNumber} title does not start with "${parentTitlePrefix}". Skipping.`);
+ results.push({
+ parent_issue_number: parentIssueNumber,
+ sub_issue_number: subIssueNumber,
+ success: false,
+ error: `Parent issue title does not start with "${parentTitlePrefix}"`,
+ });
+ continue;
+ }
+
+ // Fetch sub-issue to validate filters
+ let subIssue;
+ try {
+ const subResponse = await github.rest.issues.get({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ issue_number: subIssueNumber,
+ });
+ subIssue = subResponse.data;
+ } catch (error) {
+ const errorMessage = error instanceof Error ? error.message : String(error);
+ core.error(`Failed to fetch sub-issue #${subIssueNumber}: ${errorMessage}`);
+ results.push({
+ parent_issue_number: parentIssueNumber,
+ sub_issue_number: subIssueNumber,
+ success: false,
+ error: `Failed to fetch sub-issue: ${errorMessage}`,
+ });
+ continue;
+ }
+
+ // Check if the sub-issue already has a parent using GraphQL
+ try {
+ const parentCheckQuery = `
+ query($owner: String!, $repo: String!, $number: Int!) {
+ repository(owner: $owner, name: $repo) {
+ issue(number: $number) {
+ parent {
+ number
+ title
+ }
+ }
+ }
+ }
+ `;
+ const parentCheckResult = await github.graphql(parentCheckQuery, {
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ number: subIssueNumber,
+ });
+
+ const existingParent = parentCheckResult?.repository?.issue?.parent;
+ if (existingParent) {
+ core.warning(`Sub-issue #${subIssueNumber} is already a sub-issue of #${existingParent.number} ("${existingParent.title}"). Skipping.`);
+ results.push({
+ parent_issue_number: parentIssueNumber,
+ sub_issue_number: subIssueNumber,
+ success: false,
+ error: `Sub-issue is already a sub-issue of #${existingParent.number}`,
+ });
+ continue;
+ }
+ } catch (error) {
+ // If the GraphQL query fails (e.g., parent field not available), log warning but continue
+ const errorMessage = error instanceof Error ? error.message : String(error);
+ core.warning(`Could not check if sub-issue #${subIssueNumber} has a parent: ${errorMessage}. Proceeding with link attempt.`);
+ }
+
+ // Validate sub-issue filters
+ if (subRequiredLabels.length > 0) {
+ const subLabels = subIssue.labels.map(l => (typeof l === "string" ? l : l.name || ""));
+ const missingLabels = subRequiredLabels.filter(required => !subLabels.includes(required));
+ if (missingLabels.length > 0) {
+ core.warning(`Sub-issue #${subIssueNumber} is missing required labels: ${missingLabels.join(", ")}. Skipping.`);
+ results.push({
+ parent_issue_number: parentIssueNumber,
+ sub_issue_number: subIssueNumber,
+ success: false,
+ error: `Sub-issue missing required labels: ${missingLabels.join(", ")}`,
+ });
+ continue;
+ }
+ }
+
+ if (subTitlePrefix && !subIssue.title.startsWith(subTitlePrefix)) {
+ core.warning(`Sub-issue #${subIssueNumber} title does not start with "${subTitlePrefix}". Skipping.`);
+ results.push({
+ parent_issue_number: parentIssueNumber,
+ sub_issue_number: subIssueNumber,
+ success: false,
+ error: `Sub-issue title does not start with "${subTitlePrefix}"`,
+ });
+ continue;
+ }
+
+ // Link the sub-issue using GraphQL mutation
+ try {
+ // Get the parent issue's node ID for GraphQL
+ const parentNodeId = parentIssue.node_id;
+ const subNodeId = subIssue.node_id;
+
+ // Use GraphQL mutation to add sub-issue
+ await github.graphql(
+ `
+ mutation AddSubIssue($parentId: ID!, $subIssueId: ID!) {
+ addSubIssue(input: { issueId: $parentId, subIssueId: $subIssueId }) {
+ issue {
+ id
+ number
+ }
+ subIssue {
+ id
+ number
+ }
+ }
+ }
+ `,
+ {
+ parentId: parentNodeId,
+ subIssueId: subNodeId,
+ }
+ );
+
+ core.info(`Successfully linked issue #${subIssueNumber} as sub-issue of #${parentIssueNumber}`);
+ results.push({
+ parent_issue_number: parentIssueNumber,
+ sub_issue_number: subIssueNumber,
+ success: true,
+ });
+ } catch (error) {
+ const errorMessage = error instanceof Error ? error.message : String(error);
+ core.warning(`Failed to link issue #${subIssueNumber} as sub-issue of #${parentIssueNumber}: ${errorMessage}`);
+ results.push({
+ parent_issue_number: parentIssueNumber,
+ sub_issue_number: subIssueNumber,
+ success: false,
+ error: errorMessage,
+ });
+ }
+ }
+
+ // Generate step summary
+ const successCount = results.filter(r => r.success).length;
+ const failureCount = results.filter(r => !r.success).length;
+
+ let summaryContent = "## Link Sub-Issue\n\n";
+
+ if (successCount > 0) {
+ summaryContent += `ā
Successfully linked ${successCount} sub-issue(s):\n\n`;
+ for (const result of results.filter(r => r.success)) {
+ summaryContent += `- Issue #${result.sub_issue_number} ā Parent #${result.parent_issue_number}\n`;
+ }
+ summaryContent += "\n";
+ }
+
+ if (failureCount > 0) {
+ summaryContent += `ā ļø Failed to link ${failureCount} sub-issue(s):\n\n`;
+ for (const result of results.filter(r => !r.success)) {
+ summaryContent += `- Issue #${result.sub_issue_number} ā Parent #${result.parent_issue_number}: ${result.error}\n`;
+ }
+ }
+
+ await core.summary.addRaw(summaryContent).write();
+
+ // Set outputs
+ const linkedIssues = results
+ .filter(r => r.success)
+ .map(r => `${r.parent_issue_number}:${r.sub_issue_number}`)
+ .join("\n");
+ core.setOutput("linked_issues", linkedIssues);
+
+ // Warn if any linking failed (do not fail the job)
+ if (failureCount > 0) {
+ core.warning(`Failed to link ${failureCount} sub-issue(s). See step summary for details.`);
+ }
+}
+
+await main();
diff --git a/actions/setup/js/load_agent_output.cjs b/actions/setup/js/load_agent_output.cjs
new file mode 100644
index 0000000000..caaa944e6c
--- /dev/null
+++ b/actions/setup/js/load_agent_output.cjs
@@ -0,0 +1,90 @@
+// @ts-check
+///
+
+const fs = require("fs");
+
+/**
+ * Maximum content length to log for debugging purposes
+ * @type {number}
+ */
+const MAX_LOG_CONTENT_LENGTH = 10000;
+
+/**
+ * Truncate content for logging if it exceeds the maximum length
+ * @param {string} content - Content to potentially truncate
+ * @returns {string} Truncated content with indicator if truncated
+ */
+function truncateForLogging(content) {
+ if (content.length <= MAX_LOG_CONTENT_LENGTH) {
+ return content;
+ }
+ return content.substring(0, MAX_LOG_CONTENT_LENGTH) + `\n... (truncated, total length: ${content.length})`;
+}
+
+/**
+ * Load and parse agent output from the GH_AW_AGENT_OUTPUT file
+ *
+ * This utility handles the common pattern of:
+ * 1. Reading the GH_AW_AGENT_OUTPUT environment variable
+ * 2. Loading the file content
+ * 3. Validating the JSON structure
+ * 4. Returning parsed items array
+ *
+ * @returns {{
+ * success: true,
+ * items: any[]
+ * } | {
+ * success: false,
+ * items?: undefined,
+ * error?: string
+ * }} Result object with success flag and items array (if successful) or error message
+ */
+function loadAgentOutput() {
+ const agentOutputFile = process.env.GH_AW_AGENT_OUTPUT;
+
+ // No agent output file specified
+ if (!agentOutputFile) {
+ core.info("No GH_AW_AGENT_OUTPUT environment variable found");
+ return { success: false };
+ }
+
+ // Read agent output from file
+ let outputContent;
+ try {
+ outputContent = fs.readFileSync(agentOutputFile, "utf8");
+ } catch (error) {
+ const errorMessage = `Error reading agent output file: ${error instanceof Error ? error.message : String(error)}`;
+ core.error(errorMessage);
+ return { success: false, error: errorMessage };
+ }
+
+ // Check for empty content
+ if (outputContent.trim() === "") {
+ core.info("Agent output content is empty");
+ return { success: false };
+ }
+
+ core.info(`Agent output content length: ${outputContent.length}`);
+
+ // Parse the validated output JSON
+ let validatedOutput;
+ try {
+ validatedOutput = JSON.parse(outputContent);
+ } catch (error) {
+ const errorMessage = `Error parsing agent output JSON: ${error instanceof Error ? error.message : String(error)}`;
+ core.error(errorMessage);
+ core.info(`Failed to parse content:\n${truncateForLogging(outputContent)}`);
+ return { success: false, error: errorMessage };
+ }
+
+ // Validate items array exists
+ if (!validatedOutput.items || !Array.isArray(validatedOutput.items)) {
+ core.info("No valid items found in agent output");
+ core.info(`Parsed content: ${truncateForLogging(JSON.stringify(validatedOutput))}`);
+ return { success: false };
+ }
+
+ return { success: true, items: validatedOutput.items };
+}
+
+module.exports = { loadAgentOutput, truncateForLogging, MAX_LOG_CONTENT_LENGTH };
diff --git a/actions/setup/js/lock-issue.cjs b/actions/setup/js/lock-issue.cjs
new file mode 100644
index 0000000000..780bb3321b
--- /dev/null
+++ b/actions/setup/js/lock-issue.cjs
@@ -0,0 +1,69 @@
+// @ts-check
+///
+
+/**
+ * Lock a GitHub issue without providing a reason
+ * This script is used in the activation job when lock-for-agent is enabled
+ * to prevent concurrent modifications during agent workflow execution
+ */
+
+async function main() {
+ // Log actor and event information for debugging
+ core.info(`Lock-issue debug: actor=${context.actor}, eventName=${context.eventName}`);
+
+ // Get issue number from context
+ const issueNumber = context.issue.number;
+
+ if (!issueNumber) {
+ core.setFailed("Issue number not found in context");
+ return;
+ }
+
+ const owner = context.repo.owner;
+ const repo = context.repo.repo;
+
+ core.info(`Lock-issue debug: owner=${owner}, repo=${repo}, issueNumber=${issueNumber}`);
+
+ try {
+ // Check if issue is already locked
+ core.info(`Checking if issue #${issueNumber} is already locked`);
+ const { data: issue } = await github.rest.issues.get({
+ owner,
+ repo,
+ issue_number: issueNumber,
+ });
+
+ // Skip locking if this is a pull request (PRs cannot be locked via issues API)
+ if (issue.pull_request) {
+ core.info(`ā¹ļø Issue #${issueNumber} is a pull request, skipping lock operation`);
+ core.setOutput("locked", "false");
+ return;
+ }
+
+ if (issue.locked) {
+ core.info(`ā¹ļø Issue #${issueNumber} is already locked, skipping lock operation`);
+ core.setOutput("locked", "false");
+ return;
+ }
+
+ core.info(`Locking issue #${issueNumber} for agent workflow execution`);
+
+ // Lock the issue without providing a lock_reason parameter
+ await github.rest.issues.lock({
+ owner,
+ repo,
+ issue_number: issueNumber,
+ });
+
+ core.info(`ā
Successfully locked issue #${issueNumber}`);
+ // Set output to indicate the issue was locked and needs to be unlocked
+ core.setOutput("locked", "true");
+ } catch (error) {
+ const errorMessage = error instanceof Error ? error.message : String(error);
+ core.error(`Failed to lock issue: ${errorMessage}`);
+ core.setFailed(`Failed to lock issue #${issueNumber}: ${errorMessage}`);
+ core.setOutput("locked", "false");
+ }
+}
+
+await main();
diff --git a/actions/setup/js/log_parser_bootstrap.cjs b/actions/setup/js/log_parser_bootstrap.cjs
new file mode 100644
index 0000000000..81de526050
--- /dev/null
+++ b/actions/setup/js/log_parser_bootstrap.cjs
@@ -0,0 +1,139 @@
+// @ts-check
+///
+
+const { generatePlainTextSummary, generateCopilotCliStyleSummary } = require("./log_parser_shared.cjs");
+
+/**
+ * Bootstrap helper for log parser entry points.
+ * Handles common logic for environment variable lookup, file existence checks,
+ * content reading (file or directory), and summary emission.
+ *
+ * @param {Object} options - Configuration options
+ * @param {function(string): string|{markdown: string, mcpFailures?: string[], maxTurnsHit?: boolean, logEntries?: Array}} options.parseLog - Parser function that takes log content and returns markdown or result object
+ * @param {string} options.parserName - Name of the parser (e.g., "Codex", "Claude", "Copilot")
+ * @param {boolean} [options.supportsDirectories=false] - Whether the parser supports reading from directories
+ * @returns {void}
+ */
+function runLogParser(options) {
+ const fs = require("fs");
+ const path = require("path");
+ const { parseLog, parserName, supportsDirectories = false } = options;
+
+ try {
+ const logPath = process.env.GH_AW_AGENT_OUTPUT;
+ if (!logPath) {
+ core.info("No agent log file specified");
+ return;
+ }
+
+ if (!fs.existsSync(logPath)) {
+ core.info(`Log path not found: ${logPath}`);
+ return;
+ }
+
+ let content = "";
+
+ // Check if logPath is a directory or a file
+ const stat = fs.statSync(logPath);
+ if (stat.isDirectory()) {
+ if (!supportsDirectories) {
+ core.info(`Log path is a directory but ${parserName} parser does not support directories: ${logPath}`);
+ return;
+ }
+
+ // Read all log files from the directory and concatenate them
+ const files = fs.readdirSync(logPath);
+ const logFiles = files.filter(file => file.endsWith(".log") || file.endsWith(".txt"));
+
+ if (logFiles.length === 0) {
+ core.info(`No log files found in directory: ${logPath}`);
+ return;
+ }
+
+ // Sort log files by name to ensure consistent ordering
+ logFiles.sort();
+
+ // Concatenate all log files
+ for (const file of logFiles) {
+ const filePath = path.join(logPath, file);
+ const fileContent = fs.readFileSync(filePath, "utf8");
+
+ // Add a newline before this file if the previous content doesn't end with one
+ if (content.length > 0 && !content.endsWith("\n")) {
+ content += "\n";
+ }
+
+ content += fileContent;
+ }
+ } else {
+ // Read the single log file
+ content = fs.readFileSync(logPath, "utf8");
+ }
+
+ const result = parseLog(content);
+
+ // Handle result that may be a simple string or an object with metadata
+ let markdown = "";
+ let mcpFailures = [];
+ let maxTurnsHit = false;
+ let logEntries = null;
+
+ if (typeof result === "string") {
+ markdown = result;
+ } else if (result && typeof result === "object") {
+ markdown = result.markdown || "";
+ mcpFailures = result.mcpFailures || [];
+ maxTurnsHit = result.maxTurnsHit || false;
+ logEntries = result.logEntries || null;
+ }
+
+ if (markdown) {
+ // Generate lightweight plain text summary for core.info and Copilot CLI style for step summary
+ if (logEntries && Array.isArray(logEntries) && logEntries.length > 0) {
+ // Extract model from init entry if available
+ const initEntry = logEntries.find(entry => entry.type === "system" && entry.subtype === "init");
+ const model = initEntry?.model || null;
+
+ const plainTextSummary = generatePlainTextSummary(logEntries, {
+ model,
+ parserName,
+ });
+ core.info(plainTextSummary);
+
+ // Generate Copilot CLI style markdown for step summary
+ const copilotCliStyleMarkdown = generateCopilotCliStyleSummary(logEntries, {
+ model,
+ parserName,
+ });
+ core.summary.addRaw(copilotCliStyleMarkdown).write();
+ } else {
+ // Fallback: just log success message for parsers without log entries
+ core.info(`${parserName} log parsed successfully`);
+ // Write original markdown to step summary if available
+ core.summary.addRaw(markdown).write();
+ }
+ } else {
+ core.error(`Failed to parse ${parserName} log`);
+ }
+
+ // Handle MCP server failures if present
+ if (mcpFailures && mcpFailures.length > 0) {
+ const failedServers = mcpFailures.join(", ");
+ core.setFailed(`MCP server(s) failed to launch: ${failedServers}`);
+ }
+
+ // Handle max-turns limit if hit
+ if (maxTurnsHit) {
+ core.setFailed(`Agent execution stopped: max-turns limit reached. The agent did not complete its task successfully.`);
+ }
+ } catch (error) {
+ core.setFailed(error instanceof Error ? error : String(error));
+ }
+}
+
+// Export for testing and usage
+if (typeof module !== "undefined" && module.exports) {
+ module.exports = {
+ runLogParser,
+ };
+}
diff --git a/actions/setup/js/log_parser_shared.cjs b/actions/setup/js/log_parser_shared.cjs
new file mode 100644
index 0000000000..4bbe37d365
--- /dev/null
+++ b/actions/setup/js/log_parser_shared.cjs
@@ -0,0 +1,1400 @@
+// @ts-check
+///
+
+/**
+ * Shared utility functions for log parsers
+ * Used by parse_claude_log.cjs, parse_copilot_log.cjs, and parse_codex_log.cjs
+ */
+
+/**
+ * Maximum length for tool output content in characters.
+ * Tool output/response sections are truncated to this length to keep step summaries readable.
+ * Reduced from 500 to 256 for more compact output.
+ */
+const MAX_TOOL_OUTPUT_LENGTH = 256;
+
+/**
+ * Maximum step summary size in bytes (1000KB).
+ * GitHub Actions step summaries have a limit of 1024KB. We use 1000KB to leave buffer space.
+ * We stop rendering additional content when approaching this limit to prevent workflow failures.
+ */
+const MAX_STEP_SUMMARY_SIZE = 1000 * 1024;
+
+/**
+ * Maximum length for bash command display in plain text summaries.
+ * Commands are truncated to this length for compact display.
+ */
+const MAX_BASH_COMMAND_DISPLAY_LENGTH = 40;
+
+/**
+ * Warning message shown when step summary size limit is reached.
+ * This message is added directly to markdown (not tracked) to ensure it's always visible.
+ * The message is small (~70 bytes) and won't cause practical issues with the 8MB limit.
+ */
+const SIZE_LIMIT_WARNING = "\n\nā ļø *Step summary size limit reached. Additional content truncated.*\n\n";
+
+/**
+ * Tracks the size of content being added to a step summary.
+ * Used to prevent exceeding GitHub Actions step summary size limits.
+ */
+class StepSummaryTracker {
+ /**
+ * Creates a new step summary size tracker.
+ * @param {number} [maxSize=MAX_STEP_SUMMARY_SIZE] - Maximum allowed size in bytes
+ */
+ constructor(maxSize = MAX_STEP_SUMMARY_SIZE) {
+ /** @type {number} */
+ this.currentSize = 0;
+ /** @type {number} */
+ this.maxSize = maxSize;
+ /** @type {boolean} */
+ this.limitReached = false;
+ }
+
+ /**
+ * Adds content to the tracker and returns whether the limit has been reached.
+ * @param {string} content - Content to add
+ * @returns {boolean} True if the content was added, false if the limit was reached
+ */
+ add(content) {
+ if (this.limitReached) {
+ return false;
+ }
+
+ const contentSize = Buffer.byteLength(content, "utf8");
+ if (this.currentSize + contentSize > this.maxSize) {
+ this.limitReached = true;
+ return false;
+ }
+
+ this.currentSize += contentSize;
+ return true;
+ }
+
+ /**
+ * Checks if the limit has been reached.
+ * @returns {boolean} True if the limit has been reached
+ */
+ isLimitReached() {
+ return this.limitReached;
+ }
+
+ /**
+ * Gets the current accumulated size.
+ * @returns {number} Current size in bytes
+ */
+ getSize() {
+ return this.currentSize;
+ }
+
+ /**
+ * Resets the tracker.
+ */
+ reset() {
+ this.currentSize = 0;
+ this.limitReached = false;
+ }
+}
+
+/**
+ * Formats duration in milliseconds to human-readable string
+ * @param {number} ms - Duration in milliseconds
+ * @returns {string} Formatted duration string (e.g., "1s", "1m 30s")
+ */
+function formatDuration(ms) {
+ if (!ms || ms <= 0) return "";
+
+ const seconds = Math.round(ms / 1000);
+ if (seconds < 60) {
+ return `${seconds}s`;
+ }
+
+ const minutes = Math.floor(seconds / 60);
+ const remainingSeconds = seconds % 60;
+ if (remainingSeconds === 0) {
+ return `${minutes}m`;
+ }
+ return `${minutes}m ${remainingSeconds}s`;
+}
+
+/**
+ * Formats a bash command by normalizing whitespace and escaping
+ * @param {string} command - The raw bash command string
+ * @returns {string} Formatted and escaped command string
+ */
+function formatBashCommand(command) {
+ if (!command) return "";
+
+ // Convert multi-line commands to single line by replacing newlines with spaces
+ // and collapsing multiple spaces
+ let formatted = command
+ .replace(/\n/g, " ") // Replace newlines with spaces
+ .replace(/\r/g, " ") // Replace carriage returns with spaces
+ .replace(/\t/g, " ") // Replace tabs with spaces
+ .replace(/\s+/g, " ") // Collapse multiple spaces into one
+ .trim(); // Remove leading/trailing whitespace
+
+ // Escape backticks to prevent markdown issues
+ formatted = formatted.replace(/`/g, "\\`");
+
+ // Truncate if too long (keep reasonable length for summary)
+ const maxLength = 300;
+ if (formatted.length > maxLength) {
+ formatted = formatted.substring(0, maxLength) + "...";
+ }
+
+ return formatted;
+}
+
+/**
+ * Truncates a string to a maximum length with ellipsis
+ * @param {string} str - The string to truncate
+ * @param {number} maxLength - Maximum allowed length
+ * @returns {string} Truncated string with ellipsis if needed
+ */
+function truncateString(str, maxLength) {
+ if (!str) return "";
+ if (str.length <= maxLength) return str;
+ return str.substring(0, maxLength) + "...";
+}
+
+/**
+ * Calculates approximate token count from text using 4 chars per token estimate
+ * @param {string} text - The text to estimate tokens for
+ * @returns {number} Approximate token count
+ */
+function estimateTokens(text) {
+ if (!text) return 0;
+ return Math.ceil(text.length / 4);
+}
+
+/**
+ * Formats MCP tool name from internal format to display format
+ * @param {string} toolName - The raw tool name (e.g., mcp__github__search_issues)
+ * @returns {string} Formatted tool name (e.g., github::search_issues)
+ */
+function formatMcpName(toolName) {
+ // Convert mcp__github__search_issues to github::search_issues
+ if (toolName.startsWith("mcp__")) {
+ const parts = toolName.split("__");
+ if (parts.length >= 3) {
+ const provider = parts[1]; // github, etc.
+ const method = parts.slice(2).join("_"); // search_issues, etc.
+ return `${provider}::${method}`;
+ }
+ }
+ return toolName;
+}
+
+/**
+ * Checks if a tool name looks like a custom agent (kebab-case with multiple words)
+ * Custom agents have names like: add-safe-output-type, cli-consistency-checker, etc.
+ * @param {string} toolName - The tool name to check
+ * @returns {boolean} True if the tool name appears to be a custom agent
+ */
+function isLikelyCustomAgent(toolName) {
+ // Custom agents are kebab-case with at least one hyphen and multiple word segments
+ // They should not start with common prefixes like 'mcp__', 'safe', etc.
+ if (!toolName || typeof toolName !== "string") {
+ return false;
+ }
+
+ // Must contain at least one hyphen
+ if (!toolName.includes("-")) {
+ return false;
+ }
+
+ // Should not contain double underscores (MCP tools)
+ if (toolName.includes("__")) {
+ return false;
+ }
+
+ // Should not start with safe (safeoutputs, safeinputs handled separately)
+ if (toolName.toLowerCase().startsWith("safe")) {
+ return false;
+ }
+
+ // Should be all lowercase with hyphens (kebab-case)
+ // Allow letters, numbers, and hyphens only
+ if (!/^[a-z0-9]+(-[a-z0-9]+)+$/.test(toolName)) {
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * Generates markdown summary from conversation log entries
+ * This is the core shared logic between Claude and Copilot log parsers
+ *
+ * When a summaryTracker is provided, the function tracks the accumulated size
+ * and stops rendering additional content when approaching the step summary limit.
+ *
+ * @param {Array} logEntries - Array of log entries with type, message, etc.
+ * @param {Object} options - Configuration options
+ * @param {Function} options.formatToolCallback - Callback function to format tool use (content, toolResult) => string
+ * @param {Function} options.formatInitCallback - Callback function to format initialization (initEntry) => string or {markdown: string, mcpFailures: string[]}
+ * @param {StepSummaryTracker} [options.summaryTracker] - Optional tracker for step summary size limits
+ * @returns {{markdown: string, commandSummary: Array, sizeLimitReached: boolean}} Generated markdown, command summary, and size limit status
+ */
+function generateConversationMarkdown(logEntries, options) {
+ const { formatToolCallback, formatInitCallback, summaryTracker } = options;
+
+ const toolUsePairs = new Map(); // Map tool_use_id to tool_result
+
+ // First pass: collect tool results by tool_use_id
+ for (const entry of logEntries) {
+ if (entry.type === "user" && entry.message?.content) {
+ for (const content of entry.message.content) {
+ if (content.type === "tool_result" && content.tool_use_id) {
+ toolUsePairs.set(content.tool_use_id, content);
+ }
+ }
+ }
+ }
+
+ let markdown = "";
+ let sizeLimitReached = false;
+
+ /**
+ * Helper to add content with size tracking
+ * @param {string} content - Content to add
+ * @returns {boolean} True if content was added, false if limit reached
+ */
+ function addContent(content) {
+ if (summaryTracker && !summaryTracker.add(content)) {
+ sizeLimitReached = true;
+ return false;
+ }
+ markdown += content;
+ return true;
+ }
+
+ // Check for initialization data first
+ const initEntry = logEntries.find(entry => entry.type === "system" && entry.subtype === "init");
+
+ if (initEntry && formatInitCallback) {
+ if (!addContent("## š Initialization\n\n")) {
+ return { markdown, commandSummary: [], sizeLimitReached };
+ }
+ const initResult = formatInitCallback(initEntry);
+ // Handle both string and object returns (for backward compatibility)
+ if (typeof initResult === "string") {
+ if (!addContent(initResult)) {
+ return { markdown, commandSummary: [], sizeLimitReached };
+ }
+ } else if (initResult && initResult.markdown) {
+ if (!addContent(initResult.markdown)) {
+ return { markdown, commandSummary: [], sizeLimitReached };
+ }
+ }
+ if (!addContent("\n")) {
+ return { markdown, commandSummary: [], sizeLimitReached };
+ }
+ }
+
+ if (!addContent("\n## š¤ Reasoning\n\n")) {
+ return { markdown, commandSummary: [], sizeLimitReached };
+ }
+
+ // Second pass: process assistant messages in sequence
+ for (const entry of logEntries) {
+ if (sizeLimitReached) break;
+
+ if (entry.type === "assistant" && entry.message?.content) {
+ for (const content of entry.message.content) {
+ if (sizeLimitReached) break;
+
+ if (content.type === "text" && content.text) {
+ // Add reasoning text directly
+ const text = content.text.trim();
+ if (text && text.length > 0) {
+ if (!addContent(text + "\n\n")) {
+ break;
+ }
+ }
+ } else if (content.type === "tool_use") {
+ // Process tool use with its result
+ const toolResult = toolUsePairs.get(content.id);
+ const toolMarkdown = formatToolCallback(content, toolResult);
+ if (toolMarkdown) {
+ if (!addContent(toolMarkdown)) {
+ break;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ // Add size limit notice if limit was reached
+ if (sizeLimitReached) {
+ markdown += SIZE_LIMIT_WARNING;
+ return { markdown, commandSummary: [], sizeLimitReached };
+ }
+
+ if (!addContent("## š¤ Commands and Tools\n\n")) {
+ markdown += SIZE_LIMIT_WARNING;
+ return { markdown, commandSummary: [], sizeLimitReached: true };
+ }
+
+ const commandSummary = []; // For the succinct summary
+
+ // Collect all tool uses for summary
+ for (const entry of logEntries) {
+ if (entry.type === "assistant" && entry.message?.content) {
+ for (const content of entry.message.content) {
+ if (content.type === "tool_use") {
+ const toolName = content.name;
+ const input = content.input || {};
+
+ // Skip internal tools - only show external commands and API calls
+ if (["Read", "Write", "Edit", "MultiEdit", "LS", "Grep", "Glob", "TodoWrite"].includes(toolName)) {
+ continue; // Skip internal file operations and searches
+ }
+
+ // Find the corresponding tool result to get status
+ const toolResult = toolUsePairs.get(content.id);
+ let statusIcon = "ā";
+ if (toolResult) {
+ statusIcon = toolResult.is_error === true ? "ā" : "ā
";
+ }
+
+ // Add to command summary (only external tools)
+ if (toolName === "Bash") {
+ const formattedCommand = formatBashCommand(input.command || "");
+ commandSummary.push(`* ${statusIcon} \`${formattedCommand}\``);
+ } else if (toolName.startsWith("mcp__")) {
+ const mcpName = formatMcpName(toolName);
+ commandSummary.push(`* ${statusIcon} \`${mcpName}(...)\``);
+ } else {
+ // Handle other external tools (if any)
+ commandSummary.push(`* ${statusIcon} ${toolName}`);
+ }
+ }
+ }
+ }
+ }
+
+ // Add command summary
+ if (commandSummary.length > 0) {
+ for (const cmd of commandSummary) {
+ if (!addContent(`${cmd}\n`)) {
+ markdown += SIZE_LIMIT_WARNING;
+ return { markdown, commandSummary, sizeLimitReached: true };
+ }
+ }
+ } else {
+ if (!addContent("No commands or tools used.\n")) {
+ markdown += SIZE_LIMIT_WARNING;
+ return { markdown, commandSummary, sizeLimitReached: true };
+ }
+ }
+
+ return { markdown, commandSummary, sizeLimitReached };
+}
+
+/**
+ * Generates information section markdown from the last log entry
+ * @param {any} lastEntry - The last log entry with metadata (num_turns, duration_ms, etc.)
+ * @param {Object} options - Configuration options
+ * @param {Function} [options.additionalInfoCallback] - Optional callback for additional info (lastEntry) => string
+ * @returns {string} Information section markdown
+ */
+function generateInformationSection(lastEntry, options = {}) {
+ const { additionalInfoCallback } = options;
+
+ let markdown = "\n## š Information\n\n";
+
+ if (!lastEntry) {
+ return markdown;
+ }
+
+ if (lastEntry.num_turns) {
+ markdown += `**Turns:** ${lastEntry.num_turns}\n\n`;
+ }
+
+ if (lastEntry.duration_ms) {
+ const durationSec = Math.round(lastEntry.duration_ms / 1000);
+ const minutes = Math.floor(durationSec / 60);
+ const seconds = durationSec % 60;
+ markdown += `**Duration:** ${minutes}m ${seconds}s\n\n`;
+ }
+
+ if (lastEntry.total_cost_usd) {
+ markdown += `**Total Cost:** $${lastEntry.total_cost_usd.toFixed(4)}\n\n`;
+ }
+
+ // Call additional info callback if provided (for engine-specific info like premium requests)
+ if (additionalInfoCallback) {
+ const additionalInfo = additionalInfoCallback(lastEntry);
+ if (additionalInfo) {
+ markdown += additionalInfo;
+ }
+ }
+
+ if (lastEntry.usage) {
+ const usage = lastEntry.usage;
+ if (usage.input_tokens || usage.output_tokens) {
+ // Calculate total tokens (matching Go parser logic)
+ const inputTokens = usage.input_tokens || 0;
+ const outputTokens = usage.output_tokens || 0;
+ const cacheCreationTokens = usage.cache_creation_input_tokens || 0;
+ const cacheReadTokens = usage.cache_read_input_tokens || 0;
+ const totalTokens = inputTokens + outputTokens + cacheCreationTokens + cacheReadTokens;
+
+ markdown += `**Token Usage:**\n`;
+ if (totalTokens > 0) markdown += `- Total: ${totalTokens.toLocaleString()}\n`;
+ if (usage.input_tokens) markdown += `- Input: ${usage.input_tokens.toLocaleString()}\n`;
+ if (usage.cache_creation_input_tokens) markdown += `- Cache Creation: ${usage.cache_creation_input_tokens.toLocaleString()}\n`;
+ if (usage.cache_read_input_tokens) markdown += `- Cache Read: ${usage.cache_read_input_tokens.toLocaleString()}\n`;
+ if (usage.output_tokens) markdown += `- Output: ${usage.output_tokens.toLocaleString()}\n`;
+ markdown += "\n";
+ }
+ }
+
+ if (lastEntry.permission_denials && lastEntry.permission_denials.length > 0) {
+ markdown += `**Permission Denials:** ${lastEntry.permission_denials.length}\n\n`;
+ }
+
+ return markdown;
+}
+
+/**
+ * Formats MCP parameters into a human-readable string
+ * @param {Record} input - The input object containing parameters
+ * @returns {string} Formatted parameters string
+ */
+function formatMcpParameters(input) {
+ const keys = Object.keys(input);
+ if (keys.length === 0) return "";
+
+ const paramStrs = [];
+ for (const key of keys.slice(0, 4)) {
+ // Show up to 4 parameters
+ const value = String(input[key] || "");
+ paramStrs.push(`${key}: ${truncateString(value, 40)}`);
+ }
+
+ if (keys.length > 4) {
+ paramStrs.push("...");
+ }
+
+ return paramStrs.join(", ");
+}
+
+/**
+ * Formats initialization information from system init entry
+ * @param {any} initEntry - The system init entry containing tools, mcp_servers, etc.
+ * @param {Object} options - Configuration options
+ * @param {Function} [options.mcpFailureCallback] - Optional callback for tracking MCP failures (server) => void
+ * @param {Function} [options.modelInfoCallback] - Optional callback for rendering model info (initEntry) => string
+ * @param {boolean} [options.includeSlashCommands] - Whether to include slash commands section (default: false)
+ * @returns {{markdown: string, mcpFailures?: string[]}} Result with formatted markdown string and optional MCP failure list
+ */
+function formatInitializationSummary(initEntry, options = {}) {
+ const { mcpFailureCallback, modelInfoCallback, includeSlashCommands = false } = options;
+ let markdown = "";
+ const mcpFailures = [];
+
+ // Display model and session info
+ if (initEntry.model) {
+ markdown += `**Model:** ${initEntry.model}\n\n`;
+ }
+
+ // Call model info callback for engine-specific model information (e.g., Copilot premium info)
+ if (modelInfoCallback) {
+ const modelInfo = modelInfoCallback(initEntry);
+ if (modelInfo) {
+ markdown += modelInfo;
+ }
+ }
+
+ if (initEntry.session_id) {
+ markdown += `**Session ID:** ${initEntry.session_id}\n\n`;
+ }
+
+ if (initEntry.cwd) {
+ // Show a cleaner path by removing common prefixes
+ const cleanCwd = initEntry.cwd.replace(/^\/home\/runner\/work\/[^\/]+\/[^\/]+/, ".");
+ markdown += `**Working Directory:** ${cleanCwd}\n\n`;
+ }
+
+ // Display MCP servers status
+ if (initEntry.mcp_servers && Array.isArray(initEntry.mcp_servers)) {
+ markdown += "**MCP Servers:**\n";
+ for (const server of initEntry.mcp_servers) {
+ const statusIcon = server.status === "connected" ? "ā
" : server.status === "failed" ? "ā" : "ā";
+ markdown += `- ${statusIcon} ${server.name} (${server.status})\n`;
+
+ // Track failed MCP servers - call callback if provided (for Claude's detailed error tracking)
+ if (server.status === "failed") {
+ mcpFailures.push(server.name);
+
+ // Call callback to allow engine-specific failure handling
+ if (mcpFailureCallback) {
+ const failureDetails = mcpFailureCallback(server);
+ if (failureDetails) {
+ markdown += failureDetails;
+ }
+ }
+ }
+ }
+ markdown += "\n";
+ }
+
+ // Display tools by category
+ if (initEntry.tools && Array.isArray(initEntry.tools)) {
+ markdown += "**Available Tools:**\n";
+
+ // Categorize tools with improved groupings
+ /** @type {{ [key: string]: string[] }} */
+ const categories = {
+ Core: [],
+ "File Operations": [],
+ Builtin: [],
+ "Safe Outputs": [],
+ "Safe Inputs": [],
+ "Git/GitHub": [],
+ Playwright: [],
+ Serena: [],
+ MCP: [],
+ "Custom Agents": [],
+ Other: [],
+ };
+
+ // Builtin tools that come with gh-aw / Copilot
+ const builtinTools = ["bash", "write_bash", "read_bash", "stop_bash", "list_bash", "grep", "glob", "view", "create", "edit", "store_memory", "code_review", "codeql_checker", "report_progress", "report_intent", "gh-advisory-database"];
+
+ // Internal tools that are specific to Copilot CLI
+ const internalTools = ["fetch_copilot_cli_documentation"];
+
+ for (const tool of initEntry.tools) {
+ const toolLower = tool.toLowerCase();
+
+ if (["Task", "Bash", "BashOutput", "KillBash", "ExitPlanMode"].includes(tool)) {
+ categories["Core"].push(tool);
+ } else if (["Read", "Edit", "MultiEdit", "Write", "LS", "Grep", "Glob", "NotebookEdit"].includes(tool)) {
+ categories["File Operations"].push(tool);
+ } else if (builtinTools.includes(toolLower) || internalTools.includes(toolLower)) {
+ categories["Builtin"].push(tool);
+ } else if (tool.startsWith("safeoutputs-") || tool.startsWith("safe_outputs-")) {
+ // Extract the tool name without the prefix for cleaner display
+ const toolName = tool.replace(/^safeoutputs-|^safe_outputs-/, "");
+ categories["Safe Outputs"].push(toolName);
+ } else if (tool.startsWith("safeinputs-") || tool.startsWith("safe_inputs-")) {
+ // Extract the tool name without the prefix for cleaner display
+ const toolName = tool.replace(/^safeinputs-|^safe_inputs-/, "");
+ categories["Safe Inputs"].push(toolName);
+ } else if (tool.startsWith("mcp__github__")) {
+ categories["Git/GitHub"].push(formatMcpName(tool));
+ } else if (tool.startsWith("mcp__playwright__")) {
+ categories["Playwright"].push(formatMcpName(tool));
+ } else if (tool.startsWith("mcp__serena__")) {
+ categories["Serena"].push(formatMcpName(tool));
+ } else if (tool.startsWith("mcp__") || ["ListMcpResourcesTool", "ReadMcpResourceTool"].includes(tool)) {
+ categories["MCP"].push(tool.startsWith("mcp__") ? formatMcpName(tool) : tool);
+ } else if (isLikelyCustomAgent(tool)) {
+ // Custom agents typically have hyphenated names (kebab-case)
+ categories["Custom Agents"].push(tool);
+ } else {
+ categories["Other"].push(tool);
+ }
+ }
+
+ // Display categories with tools
+ for (const [category, tools] of Object.entries(categories)) {
+ if (tools.length > 0) {
+ markdown += `- **${category}:** ${tools.length} tools\n`;
+ // Show all tools for complete visibility
+ markdown += ` - ${tools.join(", ")}\n`;
+ }
+ }
+ markdown += "\n";
+ }
+
+ // Display slash commands if available (Claude-specific)
+ if (includeSlashCommands && initEntry.slash_commands && Array.isArray(initEntry.slash_commands)) {
+ const commandCount = initEntry.slash_commands.length;
+ markdown += `**Slash Commands:** ${commandCount} available\n`;
+ if (commandCount <= 10) {
+ markdown += `- ${initEntry.slash_commands.join(", ")}\n`;
+ } else {
+ markdown += `- ${initEntry.slash_commands.slice(0, 5).join(", ")}, and ${commandCount - 5} more\n`;
+ }
+ markdown += "\n";
+ }
+
+ // Return format compatible with both engines
+ // Claude expects { markdown, mcpFailures }, Copilot expects just markdown
+ if (mcpFailures.length > 0) {
+ return { markdown, mcpFailures };
+ }
+ return { markdown };
+}
+
+/**
+ * Formats a tool use entry with its result into markdown
+ * @param {any} toolUse - The tool use object containing name, input, etc.
+ * @param {any} toolResult - The corresponding tool result object
+ * @param {Object} options - Configuration options
+ * @param {boolean} [options.includeDetailedParameters] - Whether to include detailed parameter section (default: false)
+ * @returns {string} Formatted markdown string
+ */
+function formatToolUse(toolUse, toolResult, options = {}) {
+ const { includeDetailedParameters = false } = options;
+ const toolName = toolUse.name;
+ const input = toolUse.input || {};
+
+ // Skip TodoWrite except the very last one (we'll handle this separately)
+ if (toolName === "TodoWrite") {
+ return ""; // Skip for now, would need global context to find the last one
+ }
+
+ // Helper function to determine status icon
+ function getStatusIcon() {
+ if (toolResult) {
+ return toolResult.is_error === true ? "ā" : "ā
";
+ }
+ return "ā"; // Unknown by default
+ }
+
+ const statusIcon = getStatusIcon();
+ let summary = "";
+ let details = "";
+
+ // Get tool output from result
+ if (toolResult && toolResult.content) {
+ if (typeof toolResult.content === "string") {
+ details = toolResult.content;
+ } else if (Array.isArray(toolResult.content)) {
+ details = toolResult.content.map(c => (typeof c === "string" ? c : c.text || "")).join("\n");
+ }
+ }
+
+ // Calculate token estimate from input + output
+ const inputText = JSON.stringify(input);
+ const outputText = details;
+ const totalTokens = estimateTokens(inputText) + estimateTokens(outputText);
+
+ // Format metadata (duration and tokens)
+ let metadata = "";
+ if (toolResult && toolResult.duration_ms) {
+ metadata += `${formatDuration(toolResult.duration_ms)} `;
+ }
+ if (totalTokens > 0) {
+ metadata += `~${totalTokens}t`;
+ }
+ metadata = metadata.trim();
+
+ // Build the summary based on tool type
+ switch (toolName) {
+ case "Bash":
+ const command = input.command || "";
+ const description = input.description || "";
+
+ // Format the command to be single line
+ const formattedCommand = formatBashCommand(command);
+
+ if (description) {
+ summary = `${description}: ${formattedCommand}`;
+ } else {
+ summary = `${formattedCommand}`;
+ }
+ break;
+
+ case "Read":
+ const filePath = input.file_path || input.path || "";
+ const relativePath = filePath.replace(/^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, ""); // Remove /home/runner/work/repo/repo/ prefix
+ summary = `Read ${relativePath}`;
+ break;
+
+ case "Write":
+ case "Edit":
+ case "MultiEdit":
+ const writeFilePath = input.file_path || input.path || "";
+ const writeRelativePath = writeFilePath.replace(/^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, "");
+ summary = `Write ${writeRelativePath}`;
+ break;
+
+ case "Grep":
+ case "Glob":
+ const query = input.query || input.pattern || "";
+ summary = `Search for ${truncateString(query, 80)}`;
+ break;
+
+ case "LS":
+ const lsPath = input.path || "";
+ const lsRelativePath = lsPath.replace(/^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, "");
+ summary = `LS: ${lsRelativePath || lsPath}`;
+ break;
+
+ default:
+ // Handle MCP calls and other tools
+ if (toolName.startsWith("mcp__")) {
+ const mcpName = formatMcpName(toolName);
+ const params = formatMcpParameters(input);
+ summary = `${mcpName}(${params})`;
+ } else {
+ // Generic tool formatting - show the tool name and main parameters
+ const keys = Object.keys(input);
+ if (keys.length > 0) {
+ // Try to find the most important parameter
+ const mainParam = keys.find(k => ["query", "command", "path", "file_path", "content"].includes(k)) || keys[0];
+ const value = String(input[mainParam] || "");
+
+ if (value) {
+ summary = `${toolName}: ${truncateString(value, 100)}`;
+ } else {
+ summary = toolName;
+ }
+ } else {
+ summary = toolName;
+ }
+ }
+ }
+
+ // Build sections for formatToolCallAsDetails
+ /** @type {Array<{label: string, content: string, language?: string}>} */
+ const sections = [];
+
+ // For Copilot: include detailed parameters section
+ if (includeDetailedParameters) {
+ const inputKeys = Object.keys(input);
+ if (inputKeys.length > 0) {
+ sections.push({
+ label: "Parameters",
+ content: JSON.stringify(input, null, 2),
+ language: "json",
+ });
+ }
+ }
+
+ // Add response section if we have details
+ // Note: formatToolCallAsDetails will truncate content to MAX_TOOL_OUTPUT_LENGTH
+ if (details && details.trim()) {
+ sections.push({
+ label: includeDetailedParameters ? "Response" : "Output",
+ content: details,
+ });
+ }
+
+ // Use the shared formatToolCallAsDetails helper
+ return formatToolCallAsDetails({
+ summary,
+ statusIcon,
+ sections,
+ metadata: metadata || undefined,
+ });
+}
+
+/**
+ * Parses log content as JSON array or JSONL format
+ * Handles multiple formats: JSON array, JSONL, and mixed format with debug logs
+ * @param {string} logContent - The raw log content as a string
+ * @returns {Array|null} Array of parsed log entries, or null if parsing fails
+ */
+function parseLogEntries(logContent) {
+ let logEntries;
+
+ // First, try to parse as JSON array (old format)
+ try {
+ logEntries = JSON.parse(logContent);
+ if (!Array.isArray(logEntries) || logEntries.length === 0) {
+ throw new Error("Not a JSON array or empty array");
+ }
+ return logEntries;
+ } catch (jsonArrayError) {
+ // If that fails, try to parse as JSONL format (mixed format with debug logs)
+ logEntries = [];
+ const lines = logContent.split("\n");
+
+ for (const line of lines) {
+ const trimmedLine = line.trim();
+ if (trimmedLine === "") {
+ continue; // Skip empty lines
+ }
+
+ // Handle lines that start with [ (JSON array format)
+ if (trimmedLine.startsWith("[{")) {
+ try {
+ const arrayEntries = JSON.parse(trimmedLine);
+ if (Array.isArray(arrayEntries)) {
+ logEntries.push(...arrayEntries);
+ continue;
+ }
+ } catch (arrayParseError) {
+ // Skip invalid array lines
+ continue;
+ }
+ }
+
+ // Skip debug log lines that don't start with {
+ // (these are typically timestamped debug messages)
+ if (!trimmedLine.startsWith("{")) {
+ continue;
+ }
+
+ // Try to parse each line as JSON
+ try {
+ const jsonEntry = JSON.parse(trimmedLine);
+ logEntries.push(jsonEntry);
+ } catch (jsonLineError) {
+ // Skip invalid JSON lines (could be partial debug output)
+ continue;
+ }
+ }
+ }
+
+ // Return null if we couldn't parse anything
+ if (!Array.isArray(logEntries) || logEntries.length === 0) {
+ return null;
+ }
+
+ return logEntries;
+}
+
+/**
+ * Generic helper to format a tool call as an HTML details section.
+ * This is a reusable helper for all code engines (Claude, Copilot, Codex).
+ *
+ * Tool output/response content is automatically truncated to MAX_TOOL_OUTPUT_LENGTH (256 chars)
+ * to keep step summaries readable and prevent size limit issues.
+ *
+ * @param {Object} options - Configuration options
+ * @param {string} options.summary - The summary text to show in the collapsed state (e.g., "ā
github::list_issues")
+ * @param {string} [options.statusIcon] - Status icon (ā
, ā, or ā). If not provided, should be included in summary.
+ * @param {Array<{label: string, content: string, language?: string}>} [options.sections] - Array of content sections to show in expanded state
+ * @param {string} [options.metadata] - Optional metadata to append to summary (e.g., "~100t", "5s")
+ * @param {number} [options.maxContentLength=MAX_TOOL_OUTPUT_LENGTH] - Maximum length for section content before truncation
+ * @returns {string} Formatted HTML details string or plain summary if no sections provided
+ *
+ * @example
+ * // Basic usage with sections
+ * formatToolCallAsDetails({
+ * summary: "ā
github::list_issues",
+ * metadata: "~100t",
+ * sections: [
+ * { label: "Parameters", content: '{"state":"open"}', language: "json" },
+ * { label: "Response", content: '{"items":[]}', language: "json" }
+ * ]
+ * });
+ *
+ * @example
+ * // Bash command usage
+ * formatToolCallAsDetails({
+ * summary: "ā
ls -la",
+ * sections: [
+ * { label: "Command", content: "ls -la", language: "bash" },
+ * { label: "Output", content: "file1.txt\nfile2.txt" }
+ * ]
+ * });
+ */
+function formatToolCallAsDetails(options) {
+ const { summary, statusIcon, sections, metadata, maxContentLength = MAX_TOOL_OUTPUT_LENGTH } = options;
+
+ // Build the full summary line
+ let fullSummary = summary;
+ if (statusIcon && !summary.startsWith(statusIcon)) {
+ fullSummary = `${statusIcon} ${summary}`;
+ }
+ if (metadata) {
+ fullSummary += ` ${metadata}`;
+ }
+
+ // If no sections or all sections are empty, just return the summary
+ const hasContent = sections && sections.some(s => s.content && s.content.trim());
+ if (!hasContent) {
+ return `${fullSummary}\n\n`;
+ }
+
+ // Build the details content
+ let detailsContent = "";
+ for (const section of sections) {
+ if (!section.content || !section.content.trim()) {
+ continue;
+ }
+
+ detailsContent += `**${section.label}:**\n\n`;
+
+ // Truncate content if it exceeds maxContentLength
+ let content = section.content;
+ if (content.length > maxContentLength) {
+ content = content.substring(0, maxContentLength) + "... (truncated)";
+ }
+
+ // Use 6 backticks to avoid conflicts with content that may contain 3 or 5 backticks
+ if (section.language) {
+ detailsContent += `\`\`\`\`\`\`${section.language}\n`;
+ } else {
+ detailsContent += "``````\n";
+ }
+ detailsContent += content;
+ detailsContent += "\n``````\n\n";
+ }
+
+ // Remove trailing newlines from details content
+ detailsContent = detailsContent.trimEnd();
+
+ return `\n${fullSummary}
\n\n${detailsContent}\n \n\n`;
+}
+
+/**
+ * Generates a lightweight plain text summary optimized for raw text rendering.
+ * This is designed for console output (core.info) instead of markdown step summaries.
+ *
+ * The output includes:
+ * - A compact header with model info
+ * - Agent conversation with response text and tool executions
+ * - Basic execution statistics
+ *
+ * @param {Array} logEntries - Array of log entries with type, message, etc.
+ * @param {Object} options - Configuration options
+ * @param {string} [options.model] - Model name to include in the header
+ * @param {string} [options.parserName] - Name of the parser (e.g., "Copilot", "Claude")
+ * @returns {string} Plain text summary for console output
+ */
+function generatePlainTextSummary(logEntries, options = {}) {
+ const { model, parserName = "Agent" } = options;
+ const lines = [];
+
+ // Header
+ lines.push(`=== ${parserName} Execution Summary ===`);
+ if (model) {
+ lines.push(`Model: ${model}`);
+ }
+ lines.push("");
+
+ // Collect tool usage pairs for status lookup
+ const toolUsePairs = new Map();
+ for (const entry of logEntries) {
+ if (entry.type === "user" && entry.message?.content) {
+ for (const content of entry.message.content) {
+ if (content.type === "tool_result" && content.tool_use_id) {
+ toolUsePairs.set(content.tool_use_id, content);
+ }
+ }
+ }
+ }
+
+ // Generate conversation flow with agent responses and tool executions
+ lines.push("Conversation:");
+ lines.push("");
+
+ let conversationLineCount = 0;
+ const MAX_CONVERSATION_LINES = 5000; // Limit conversation output
+ let conversationTruncated = false;
+
+ for (const entry of logEntries) {
+ if (conversationLineCount >= MAX_CONVERSATION_LINES) {
+ conversationTruncated = true;
+ break;
+ }
+
+ if (entry.type === "assistant" && entry.message?.content) {
+ for (const content of entry.message.content) {
+ if (conversationLineCount >= MAX_CONVERSATION_LINES) {
+ conversationTruncated = true;
+ break;
+ }
+
+ if (content.type === "text" && content.text) {
+ // Display agent response text
+ const text = content.text.trim();
+ if (text && text.length > 0) {
+ // Truncate long responses to keep output manageable
+ const maxTextLength = 500;
+ let displayText = text;
+ if (displayText.length > maxTextLength) {
+ displayText = displayText.substring(0, maxTextLength) + "...";
+ }
+
+ // Split into lines and add Agent prefix
+ const textLines = displayText.split("\n");
+ for (const line of textLines) {
+ if (conversationLineCount >= MAX_CONVERSATION_LINES) {
+ conversationTruncated = true;
+ break;
+ }
+ lines.push(`Agent: ${line}`);
+ conversationLineCount++;
+ }
+ lines.push(""); // Add blank line after agent response
+ conversationLineCount++;
+ }
+ } else if (content.type === "tool_use") {
+ // Display tool execution
+ const toolName = content.name;
+ const input = content.input || {};
+
+ // Skip internal tools (file operations)
+ if (["Read", "Write", "Edit", "MultiEdit", "LS", "Grep", "Glob", "TodoWrite"].includes(toolName)) {
+ continue;
+ }
+
+ const toolResult = toolUsePairs.get(content.id);
+ const isError = toolResult?.is_error === true;
+ const statusIcon = isError ? "ā" : "ā";
+
+ // Format tool execution in Copilot CLI style
+ let displayName;
+ let resultPreview = "";
+
+ if (toolName === "Bash") {
+ const cmd = formatBashCommand(input.command || "");
+ displayName = `$ ${cmd}`;
+
+ // Show result preview if available
+ if (toolResult && toolResult.content) {
+ const resultText = typeof toolResult.content === "string" ? toolResult.content : String(toolResult.content);
+ const resultLines = resultText.split("\n").filter(l => l.trim());
+ if (resultLines.length > 0) {
+ const previewLine = resultLines[0].substring(0, 80);
+ if (resultLines.length > 1) {
+ resultPreview = ` ā ${resultLines.length} lines...`;
+ } else if (previewLine) {
+ resultPreview = ` ā ${previewLine}`;
+ }
+ }
+ }
+ } else if (toolName.startsWith("mcp__")) {
+ // Format MCP tool names like github-list_pull_requests
+ const formattedName = formatMcpName(toolName).replace("::", "-");
+ displayName = formattedName;
+
+ // Show result preview if available
+ if (toolResult && toolResult.content) {
+ const resultText = typeof toolResult.content === "string" ? toolResult.content : JSON.stringify(toolResult.content);
+ const truncated = resultText.length > 80 ? resultText.substring(0, 80) + "..." : resultText;
+ resultPreview = ` ā ${truncated}`;
+ }
+ } else {
+ displayName = toolName;
+
+ // Show result preview if available
+ if (toolResult && toolResult.content) {
+ const resultText = typeof toolResult.content === "string" ? toolResult.content : String(toolResult.content);
+ const truncated = resultText.length > 80 ? resultText.substring(0, 80) + "..." : resultText;
+ resultPreview = ` ā ${truncated}`;
+ }
+ }
+
+ lines.push(`${statusIcon} ${displayName}`);
+ conversationLineCount++;
+
+ if (resultPreview) {
+ lines.push(resultPreview);
+ conversationLineCount++;
+ }
+
+ lines.push(""); // Add blank line after tool execution
+ conversationLineCount++;
+ }
+ }
+ }
+ }
+
+ if (conversationTruncated) {
+ lines.push("... (conversation truncated)");
+ lines.push("");
+ }
+
+ // Statistics
+ const lastEntry = logEntries[logEntries.length - 1];
+ lines.push("Statistics:");
+ if (lastEntry?.num_turns) {
+ lines.push(` Turns: ${lastEntry.num_turns}`);
+ }
+ if (lastEntry?.duration_ms) {
+ const duration = formatDuration(lastEntry.duration_ms);
+ if (duration) {
+ lines.push(` Duration: ${duration}`);
+ }
+ }
+
+ // Count tools for statistics
+ let toolCounts = { total: 0, success: 0, error: 0 };
+ for (const entry of logEntries) {
+ if (entry.type === "assistant" && entry.message?.content) {
+ for (const content of entry.message.content) {
+ if (content.type === "tool_use") {
+ const toolName = content.name;
+ // Skip internal tools
+ if (["Read", "Write", "Edit", "MultiEdit", "LS", "Grep", "Glob", "TodoWrite"].includes(toolName)) {
+ continue;
+ }
+ toolCounts.total++;
+ const toolResult = toolUsePairs.get(content.id);
+ const isError = toolResult?.is_error === true;
+ if (isError) {
+ toolCounts.error++;
+ } else {
+ toolCounts.success++;
+ }
+ }
+ }
+ }
+ }
+
+ if (toolCounts.total > 0) {
+ lines.push(` Tools: ${toolCounts.success}/${toolCounts.total} succeeded`);
+ }
+ if (lastEntry?.usage) {
+ const usage = lastEntry.usage;
+ if (usage.input_tokens || usage.output_tokens) {
+ // Calculate total tokens (matching Go parser logic)
+ const inputTokens = usage.input_tokens || 0;
+ const outputTokens = usage.output_tokens || 0;
+ const cacheCreationTokens = usage.cache_creation_input_tokens || 0;
+ const cacheReadTokens = usage.cache_read_input_tokens || 0;
+ const totalTokens = inputTokens + outputTokens + cacheCreationTokens + cacheReadTokens;
+
+ lines.push(` Tokens: ${totalTokens.toLocaleString()} total (${usage.input_tokens.toLocaleString()} in / ${usage.output_tokens.toLocaleString()} out)`);
+ }
+ }
+ if (lastEntry?.total_cost_usd) {
+ lines.push(` Cost: $${lastEntry.total_cost_usd.toFixed(4)}`);
+ }
+
+ return lines.join("\n");
+}
+
+/**
+ * Generates a markdown-formatted Copilot CLI style summary for step summaries.
+ * Similar to generatePlainTextSummary but outputs markdown with code blocks for proper rendering.
+ *
+ * The output includes:
+ * - A "Conversation:" section showing agent responses and tool executions
+ * - A "Statistics:" section with execution metrics
+ *
+ * @param {Array} logEntries - Array of log entries with type, message, etc.
+ * @param {Object} options - Configuration options
+ * @param {string} [options.model] - Model name to include in the header
+ * @param {string} [options.parserName] - Name of the parser (e.g., "Copilot", "Claude")
+ * @returns {string} Markdown-formatted summary for step summary rendering
+ */
+function generateCopilotCliStyleSummary(logEntries, options = {}) {
+ const { model, parserName = "Agent" } = options;
+ const lines = [];
+
+ // Collect tool usage pairs for status lookup
+ const toolUsePairs = new Map();
+ for (const entry of logEntries) {
+ if (entry.type === "user" && entry.message?.content) {
+ for (const content of entry.message.content) {
+ if (content.type === "tool_result" && content.tool_use_id) {
+ toolUsePairs.set(content.tool_use_id, content);
+ }
+ }
+ }
+ }
+
+ // Generate conversation flow with agent responses and tool executions
+ lines.push("```");
+ lines.push("Conversation:");
+ lines.push("");
+
+ let conversationLineCount = 0;
+ const MAX_CONVERSATION_LINES = 5000; // Limit conversation output
+ let conversationTruncated = false;
+
+ for (const entry of logEntries) {
+ if (conversationLineCount >= MAX_CONVERSATION_LINES) {
+ conversationTruncated = true;
+ break;
+ }
+
+ if (entry.type === "assistant" && entry.message?.content) {
+ for (const content of entry.message.content) {
+ if (conversationLineCount >= MAX_CONVERSATION_LINES) {
+ conversationTruncated = true;
+ break;
+ }
+
+ if (content.type === "text" && content.text) {
+ // Display agent response text
+ const text = content.text.trim();
+ if (text && text.length > 0) {
+ // Truncate long responses to keep output manageable
+ const maxTextLength = 500;
+ let displayText = text;
+ if (displayText.length > maxTextLength) {
+ displayText = displayText.substring(0, maxTextLength) + "...";
+ }
+
+ // Split into lines and add Agent prefix
+ const textLines = displayText.split("\n");
+ for (const line of textLines) {
+ if (conversationLineCount >= MAX_CONVERSATION_LINES) {
+ conversationTruncated = true;
+ break;
+ }
+ lines.push(`Agent: ${line}`);
+ conversationLineCount++;
+ }
+ lines.push(""); // Add blank line after agent response
+ conversationLineCount++;
+ }
+ } else if (content.type === "tool_use") {
+ // Display tool execution
+ const toolName = content.name;
+ const input = content.input || {};
+
+ // Skip internal tools (file operations)
+ if (["Read", "Write", "Edit", "MultiEdit", "LS", "Grep", "Glob", "TodoWrite"].includes(toolName)) {
+ continue;
+ }
+
+ const toolResult = toolUsePairs.get(content.id);
+ const isError = toolResult?.is_error === true;
+ const statusIcon = isError ? "ā" : "ā";
+
+ // Format tool execution in Copilot CLI style
+ let displayName;
+ let resultPreview = "";
+
+ if (toolName === "Bash") {
+ const cmd = formatBashCommand(input.command || "");
+ displayName = `$ ${cmd}`;
+
+ // Show result preview if available
+ if (toolResult && toolResult.content) {
+ const resultText = typeof toolResult.content === "string" ? toolResult.content : String(toolResult.content);
+ const resultLines = resultText.split("\n").filter(l => l.trim());
+ if (resultLines.length > 0) {
+ const previewLine = resultLines[0].substring(0, 80);
+ if (resultLines.length > 1) {
+ resultPreview = ` ā ${resultLines.length} lines...`;
+ } else if (previewLine) {
+ resultPreview = ` ā ${previewLine}`;
+ }
+ }
+ }
+ } else if (toolName.startsWith("mcp__")) {
+ // Format MCP tool names like github-list_pull_requests
+ const formattedName = formatMcpName(toolName).replace("::", "-");
+ displayName = formattedName;
+
+ // Show result preview if available
+ if (toolResult && toolResult.content) {
+ const resultText = typeof toolResult.content === "string" ? toolResult.content : JSON.stringify(toolResult.content);
+ const truncated = resultText.length > 80 ? resultText.substring(0, 80) + "..." : resultText;
+ resultPreview = ` ā ${truncated}`;
+ }
+ } else {
+ displayName = toolName;
+
+ // Show result preview if available
+ if (toolResult && toolResult.content) {
+ const resultText = typeof toolResult.content === "string" ? toolResult.content : String(toolResult.content);
+ const truncated = resultText.length > 80 ? resultText.substring(0, 80) + "..." : resultText;
+ resultPreview = ` ā ${truncated}`;
+ }
+ }
+
+ lines.push(`${statusIcon} ${displayName}`);
+ conversationLineCount++;
+
+ if (resultPreview) {
+ lines.push(resultPreview);
+ conversationLineCount++;
+ }
+
+ lines.push(""); // Add blank line after tool execution
+ conversationLineCount++;
+ }
+ }
+ }
+ }
+
+ if (conversationTruncated) {
+ lines.push("... (conversation truncated)");
+ lines.push("");
+ }
+
+ // Statistics
+ const lastEntry = logEntries[logEntries.length - 1];
+ lines.push("Statistics:");
+ if (lastEntry?.num_turns) {
+ lines.push(` Turns: ${lastEntry.num_turns}`);
+ }
+ if (lastEntry?.duration_ms) {
+ const duration = formatDuration(lastEntry.duration_ms);
+ if (duration) {
+ lines.push(` Duration: ${duration}`);
+ }
+ }
+
+ // Count tools for statistics
+ let toolCounts = { total: 0, success: 0, error: 0 };
+ for (const entry of logEntries) {
+ if (entry.type === "assistant" && entry.message?.content) {
+ for (const content of entry.message.content) {
+ if (content.type === "tool_use") {
+ const toolName = content.name;
+ // Skip internal tools
+ if (["Read", "Write", "Edit", "MultiEdit", "LS", "Grep", "Glob", "TodoWrite"].includes(toolName)) {
+ continue;
+ }
+ toolCounts.total++;
+ const toolResult = toolUsePairs.get(content.id);
+ const isError = toolResult?.is_error === true;
+ if (isError) {
+ toolCounts.error++;
+ } else {
+ toolCounts.success++;
+ }
+ }
+ }
+ }
+ }
+
+ if (toolCounts.total > 0) {
+ lines.push(` Tools: ${toolCounts.success}/${toolCounts.total} succeeded`);
+ }
+ if (lastEntry?.usage) {
+ const usage = lastEntry.usage;
+ if (usage.input_tokens || usage.output_tokens) {
+ // Calculate total tokens (matching Go parser logic)
+ const inputTokens = usage.input_tokens || 0;
+ const outputTokens = usage.output_tokens || 0;
+ const cacheCreationTokens = usage.cache_creation_input_tokens || 0;
+ const cacheReadTokens = usage.cache_read_input_tokens || 0;
+ const totalTokens = inputTokens + outputTokens + cacheCreationTokens + cacheReadTokens;
+
+ lines.push(` Tokens: ${totalTokens.toLocaleString()} total (${usage.input_tokens.toLocaleString()} in / ${usage.output_tokens.toLocaleString()} out)`);
+ }
+ }
+ if (lastEntry?.total_cost_usd) {
+ lines.push(` Cost: $${lastEntry.total_cost_usd.toFixed(4)}`);
+ }
+
+ lines.push("```");
+
+ return lines.join("\n");
+}
+
+// Export functions and constants
+module.exports = {
+ // Constants
+ MAX_TOOL_OUTPUT_LENGTH,
+ MAX_STEP_SUMMARY_SIZE,
+ // Classes
+ StepSummaryTracker,
+ // Functions
+ formatDuration,
+ formatBashCommand,
+ truncateString,
+ estimateTokens,
+ formatMcpName,
+ isLikelyCustomAgent,
+ generateConversationMarkdown,
+ generateInformationSection,
+ formatMcpParameters,
+ formatInitializationSummary,
+ formatToolUse,
+ parseLogEntries,
+ formatToolCallAsDetails,
+ generatePlainTextSummary,
+ generateCopilotCliStyleSummary,
+};
diff --git a/actions/setup/js/mcp_handler_python.cjs b/actions/setup/js/mcp_handler_python.cjs
new file mode 100644
index 0000000000..1807e86c56
--- /dev/null
+++ b/actions/setup/js/mcp_handler_python.cjs
@@ -0,0 +1,100 @@
+// @ts-check
+
+/**
+ * Python Script Handler for Safe-Inputs
+ *
+ * This module provides a handler for executing Python scripts in safe-inputs tools.
+ * It uses a Pythonic approach for passing inputs via JSON on stdin.
+ */
+
+const { execFile } = require("child_process");
+
+/**
+ * Create a Python script handler function that executes a .py file.
+ * Inputs are passed as JSON via stdin for a more Pythonic approach:
+ * - Inputs are passed as JSON object via stdin (similar to JavaScript tools)
+ * - Python script reads and parses JSON from stdin into 'inputs' dictionary
+ * - Outputs are read from stdout (JSON format expected)
+ *
+ * @param {Object} server - The MCP server instance for logging
+ * @param {string} toolName - Name of the tool for logging purposes
+ * @param {string} scriptPath - Path to the Python script to execute
+ * @param {number} [timeoutSeconds=60] - Timeout in seconds for script execution
+ * @returns {Function} Async handler function that executes the Python script
+ */
+function createPythonHandler(server, toolName, scriptPath, timeoutSeconds = 60) {
+ return async args => {
+ server.debug(` [${toolName}] Invoking Python handler: ${scriptPath}`);
+ server.debug(` [${toolName}] Python handler args: ${JSON.stringify(args)}`);
+ server.debug(` [${toolName}] Timeout: ${timeoutSeconds}s`);
+
+ // Pass inputs as JSON via stdin (more Pythonic approach)
+ const inputJson = JSON.stringify(args || {});
+ server.debug(` [${toolName}] Input JSON (${inputJson.length} bytes): ${inputJson.substring(0, 200)}${inputJson.length > 200 ? "..." : ""}`);
+
+ return new Promise((resolve, reject) => {
+ server.debug(` [${toolName}] Executing Python script...`);
+
+ const child = execFile(
+ "python3",
+ [scriptPath],
+ {
+ env: process.env,
+ timeout: timeoutSeconds * 1000, // Convert to milliseconds
+ maxBuffer: 10 * 1024 * 1024, // 10MB buffer
+ },
+ (error, stdout, stderr) => {
+ // Log stdout and stderr
+ if (stdout) {
+ server.debug(` [${toolName}] stdout: ${stdout.substring(0, 500)}${stdout.length > 500 ? "..." : ""}`);
+ }
+ if (stderr) {
+ server.debug(` [${toolName}] stderr: ${stderr.substring(0, 500)}${stderr.length > 500 ? "..." : ""}`);
+ }
+
+ if (error) {
+ server.debugError(` [${toolName}] Python script error: `, error);
+ reject(error);
+ return;
+ }
+
+ // Parse output from stdout
+ let result;
+ try {
+ // Try to parse stdout as JSON
+ if (stdout && stdout.trim()) {
+ result = JSON.parse(stdout.trim());
+ } else {
+ result = { stdout: stdout || "", stderr: stderr || "" };
+ }
+ } catch (parseError) {
+ server.debug(` [${toolName}] Output is not JSON, returning as text`);
+ result = { stdout: stdout || "", stderr: stderr || "" };
+ }
+
+ server.debug(` [${toolName}] Python handler completed successfully`);
+
+ // Return MCP format
+ resolve({
+ content: [
+ {
+ type: "text",
+ text: JSON.stringify(result),
+ },
+ ],
+ });
+ }
+ );
+
+ // Write input JSON to stdin
+ if (child.stdin) {
+ child.stdin.write(inputJson);
+ child.stdin.end();
+ }
+ });
+ };
+}
+
+module.exports = {
+ createPythonHandler,
+};
diff --git a/actions/setup/js/mcp_handler_shell.cjs b/actions/setup/js/mcp_handler_shell.cjs
new file mode 100644
index 0000000000..cda3f28276
--- /dev/null
+++ b/actions/setup/js/mcp_handler_shell.cjs
@@ -0,0 +1,146 @@
+// @ts-check
+
+/**
+ * Shell Script Handler for Safe-Inputs
+ *
+ * This module provides a handler for executing shell scripts in safe-inputs tools.
+ * It follows GitHub Actions conventions for passing inputs and reading outputs.
+ */
+
+const fs = require("fs");
+const path = require("path");
+const { execFile } = require("child_process");
+const os = require("os");
+
+/**
+ * Create a shell script handler function that executes a .sh file.
+ * Uses GitHub Actions convention for passing inputs/outputs:
+ * - Inputs are passed as environment variables prefixed with INPUT_ (uppercased, dashes replaced with underscores)
+ * - Outputs are read from GITHUB_OUTPUT file (key=value format, one per line)
+ * - Returns: { stdout, stderr, outputs }
+ *
+ * @param {Object} server - The MCP server instance for logging
+ * @param {string} toolName - Name of the tool for logging purposes
+ * @param {string} scriptPath - Path to the shell script to execute
+ * @param {number} [timeoutSeconds=60] - Timeout in seconds for script execution
+ * @returns {Function} Async handler function that executes the shell script
+ */
+function createShellHandler(server, toolName, scriptPath, timeoutSeconds = 60) {
+ return async args => {
+ server.debug(` [${toolName}] Invoking shell handler: ${scriptPath}`);
+ server.debug(` [${toolName}] Shell handler args: ${JSON.stringify(args)}`);
+ server.debug(` [${toolName}] Timeout: ${timeoutSeconds}s`);
+
+ // Create environment variables from args (GitHub Actions convention: INPUT_NAME)
+ const env = { ...process.env };
+ for (const [key, value] of Object.entries(args || {})) {
+ const envKey = `INPUT_${key.toUpperCase().replace(/-/g, "_")}`;
+ env[envKey] = String(value);
+ server.debug(` [${toolName}] Set env: ${envKey}=${String(value).substring(0, 100)}${String(value).length > 100 ? "..." : ""}`);
+ }
+
+ // Create a temporary file for outputs (GitHub Actions convention: GITHUB_OUTPUT)
+ const outputFile = path.join(os.tmpdir(), `mcp-shell-output-${Date.now()}-${Math.random().toString(36).substring(2)}.txt`);
+ env.GITHUB_OUTPUT = outputFile;
+ server.debug(` [${toolName}] Output file: ${outputFile}`);
+
+ // Create the output file (empty)
+ fs.writeFileSync(outputFile, "");
+
+ return new Promise((resolve, reject) => {
+ server.debug(` [${toolName}] Executing shell script...`);
+
+ execFile(
+ scriptPath,
+ [],
+ {
+ env,
+ timeout: timeoutSeconds * 1000, // Convert to milliseconds
+ maxBuffer: 10 * 1024 * 1024, // 10MB buffer
+ },
+ (error, stdout, stderr) => {
+ // Log stdout and stderr
+ if (stdout) {
+ server.debug(` [${toolName}] stdout: ${stdout.substring(0, 500)}${stdout.length > 500 ? "..." : ""}`);
+ }
+ if (stderr) {
+ server.debug(` [${toolName}] stderr: ${stderr.substring(0, 500)}${stderr.length > 500 ? "..." : ""}`);
+ }
+
+ if (error) {
+ server.debugError(` [${toolName}] Shell script error: `, error);
+
+ // Clean up output file
+ try {
+ if (fs.existsSync(outputFile)) {
+ fs.unlinkSync(outputFile);
+ }
+ } catch {
+ // Ignore cleanup errors
+ }
+
+ reject(error);
+ return;
+ }
+
+ // Read outputs from the GITHUB_OUTPUT file
+ /** @type {Record} */
+ const outputs = {};
+ try {
+ if (fs.existsSync(outputFile)) {
+ const outputContent = fs.readFileSync(outputFile, "utf-8");
+ server.debug(` [${toolName}] Output file content: ${outputContent.substring(0, 500)}${outputContent.length > 500 ? "..." : ""}`);
+
+ // Parse outputs (key=value format, one per line)
+ const lines = outputContent.split("\n");
+ for (const line of lines) {
+ const trimmed = line.trim();
+ if (trimmed && trimmed.includes("=")) {
+ const eqIndex = trimmed.indexOf("=");
+ const key = trimmed.substring(0, eqIndex);
+ const value = trimmed.substring(eqIndex + 1);
+ outputs[key] = value;
+ server.debug(` [${toolName}] Parsed output: ${key}=${value.substring(0, 100)}${value.length > 100 ? "..." : ""}`);
+ }
+ }
+ }
+ } catch (readError) {
+ server.debugError(` [${toolName}] Error reading output file: `, readError);
+ }
+
+ // Clean up output file
+ try {
+ if (fs.existsSync(outputFile)) {
+ fs.unlinkSync(outputFile);
+ }
+ } catch {
+ // Ignore cleanup errors
+ }
+
+ // Build the result
+ const result = {
+ stdout: stdout || "",
+ stderr: stderr || "",
+ outputs,
+ };
+
+ server.debug(` [${toolName}] Shell handler completed, outputs: ${Object.keys(outputs).join(", ") || "(none)"}`);
+
+ // Return MCP format
+ resolve({
+ content: [
+ {
+ type: "text",
+ text: JSON.stringify(result),
+ },
+ ],
+ });
+ }
+ );
+ });
+ };
+}
+
+module.exports = {
+ createShellHandler,
+};
diff --git a/actions/setup/js/mcp_http_transport.cjs b/actions/setup/js/mcp_http_transport.cjs
new file mode 100644
index 0000000000..b37a581e4d
--- /dev/null
+++ b/actions/setup/js/mcp_http_transport.cjs
@@ -0,0 +1,295 @@
+// @ts-check
+///
+
+/**
+ * MCP HTTP Transport Implementation
+ *
+ * This module provides the HTTP transport layer for the MCP (Model Context Protocol),
+ * removing the dependency on @modelcontextprotocol/sdk.
+ *
+ * Features:
+ * - HTTP request/response handling
+ * - Session management (stateful and stateless modes)
+ * - CORS support for development
+ * - JSON-RPC 2.0 compatible
+ *
+ * References:
+ * - MCP Specification: https://spec.modelcontextprotocol.io
+ * - JSON-RPC 2.0: https://www.jsonrpc.org/specification
+ */
+
+const http = require("http");
+const { randomUUID } = require("crypto");
+const { createServer, registerTool, handleRequest } = require("./mcp_server_core.cjs");
+
+/**
+ * Simple MCP Server wrapper that provides a class-like interface
+ * compatible with the HTTP transport, backed by mcp_server_core functions.
+ */
+class MCPServer {
+ /**
+ * @param {Object} serverInfo - Server metadata
+ * @param {string} serverInfo.name - Server name
+ * @param {string} serverInfo.version - Server version
+ * @param {Object} [options] - Server options
+ * @param {Object} [options.capabilities] - Server capabilities
+ */
+ constructor(serverInfo, options = {}) {
+ this._coreServer = createServer(serverInfo, options);
+ this.serverInfo = serverInfo;
+ this.capabilities = options.capabilities || { tools: {} };
+ this.tools = new Map();
+ this.transport = null;
+ this.initialized = false;
+ }
+
+ /**
+ * Register a tool with the server
+ * @param {string} name - Tool name
+ * @param {string} description - Tool description
+ * @param {Object} inputSchema - JSON Schema for tool input
+ * @param {Function} handler - Async function that handles tool calls
+ */
+ tool(name, description, inputSchema, handler) {
+ this.tools.set(name, {
+ name,
+ description,
+ inputSchema,
+ handler,
+ });
+ // Also register with the core server
+ registerTool(this._coreServer, {
+ name,
+ description,
+ inputSchema,
+ handler,
+ });
+ }
+
+ /**
+ * Connect to a transport
+ * @param {any} transport - Transport instance (must have setServer and start methods)
+ */
+ async connect(transport) {
+ this.transport = transport;
+ transport.setServer(this);
+ await transport.start();
+ }
+
+ /**
+ * Handle an incoming JSON-RPC request
+ * @param {Object} request - JSON-RPC request
+ * @returns {Promise