diff --git a/.github/workflows/example-engine-network-permissions.lock.yml b/.github/workflows/example-engine-network-permissions.lock.yml index 1a7f15a4dc..661c0d4cd7 100644 --- a/.github/workflows/example-engine-network-permissions.lock.yml +++ b/.github/workflows/example-engine-network-permissions.lock.yml @@ -320,24 +320,24 @@ jobs: with: script: | function main() { - const fs = require('fs'); + const fs = require("fs"); try { // Get the log file path from environment const logFile = process.env.AGENT_LOG_FILE; if (!logFile) { - console.log('No agent log file specified'); + console.log("No agent log file specified"); return; } if (!fs.existsSync(logFile)) { console.log(`Log file not found: ${logFile}`); return; } - const logContent = fs.readFileSync(logFile, 'utf8'); + const logContent = fs.readFileSync(logFile, "utf8"); const markdown = parseClaudeLog(logContent); // Append to GitHub step summary core.summary.addRaw(markdown).write(); } catch (error) { - console.error('Error parsing Claude log:', error.message); + console.error("Error parsing Claude log:", error.message); core.setFailed(error.message); } } @@ -345,16 +345,16 @@ jobs: try { const logEntries = JSON.parse(logContent); if (!Array.isArray(logEntries)) { - return '## Agent Log Summary\n\nLog format not recognized as Claude JSON array.\n'; + return "## Agent Log Summary\n\nLog format not recognized as Claude JSON array.\n"; } - let markdown = '## 🤖 Commands and Tools\n\n'; + let markdown = "## 🤖 Commands and Tools\n\n"; const toolUsePairs = new Map(); // Map tool_use_id to tool_result const commandSummary = []; // For the succinct summary // First pass: collect tool results by tool_use_id for (const entry of logEntries) { - if (entry.type === 'user' && entry.message?.content) { + if (entry.type === "user" && entry.message?.content) { for (const content of entry.message.content) { - if (content.type === 'tool_result' && content.tool_use_id) { + if (content.type === "tool_result" && content.tool_use_id) { toolUsePairs.set(content.tool_use_id, content); } } @@ -362,26 +362,37 @@ jobs: } // Collect all tool uses for summary for (const entry of logEntries) { - if (entry.type === 'assistant' && entry.message?.content) { + if (entry.type === "assistant" && entry.message?.content) { for (const content of entry.message.content) { - if (content.type === 'tool_use') { + if (content.type === "tool_use") { const toolName = content.name; const input = content.input || {}; // Skip internal tools - only show external commands and API calls - if (['Read', 'Write', 'Edit', 'MultiEdit', 'LS', 'Grep', 'Glob', 'TodoWrite'].includes(toolName)) { + if ( + [ + "Read", + "Write", + "Edit", + "MultiEdit", + "LS", + "Grep", + "Glob", + "TodoWrite", + ].includes(toolName) + ) { continue; // Skip internal file operations and searches } // Find the corresponding tool result to get status const toolResult = toolUsePairs.get(content.id); - let statusIcon = '❓'; + let statusIcon = "❓"; if (toolResult) { - statusIcon = toolResult.is_error === true ? '❌' : '✅'; + statusIcon = toolResult.is_error === true ? "❌" : "✅"; } // Add to command summary (only external tools) - if (toolName === 'Bash') { - const formattedCommand = formatBashCommand(input.command || ''); + if (toolName === "Bash") { + const formattedCommand = formatBashCommand(input.command || ""); commandSummary.push(`* ${statusIcon} \`${formattedCommand}\``); - } else if (toolName.startsWith('mcp__')) { + } else if (toolName.startsWith("mcp__")) { const mcpName = formatMcpName(toolName); commandSummary.push(`* ${statusIcon} \`${mcpName}(...)\``); } else { @@ -398,13 +409,19 @@ jobs: markdown += `${cmd}\n`; } } else { - markdown += 'No commands or tools used.\n'; + markdown += "No commands or tools used.\n"; } // Add Information section from the last entry with result metadata - markdown += '\n## 📊 Information\n\n'; + markdown += "\n## 📊 Information\n\n"; // Find the last entry with metadata const lastEntry = logEntries[logEntries.length - 1]; - if (lastEntry && (lastEntry.num_turns || lastEntry.duration_ms || lastEntry.total_cost_usd || lastEntry.usage)) { + if ( + lastEntry && + (lastEntry.num_turns || + lastEntry.duration_ms || + lastEntry.total_cost_usd || + lastEntry.usage) + ) { if (lastEntry.num_turns) { markdown += `**Turns:** ${lastEntry.num_turns}\n\n`; } @@ -421,29 +438,36 @@ jobs: const usage = lastEntry.usage; if (usage.input_tokens || usage.output_tokens) { markdown += `**Token Usage:**\n`; - if (usage.input_tokens) markdown += `- Input: ${usage.input_tokens.toLocaleString()}\n`; - if (usage.cache_creation_input_tokens) markdown += `- Cache Creation: ${usage.cache_creation_input_tokens.toLocaleString()}\n`; - if (usage.cache_read_input_tokens) markdown += `- Cache Read: ${usage.cache_read_input_tokens.toLocaleString()}\n`; - if (usage.output_tokens) markdown += `- Output: ${usage.output_tokens.toLocaleString()}\n`; - markdown += '\n'; + if (usage.input_tokens) + markdown += `- Input: ${usage.input_tokens.toLocaleString()}\n`; + if (usage.cache_creation_input_tokens) + markdown += `- Cache Creation: ${usage.cache_creation_input_tokens.toLocaleString()}\n`; + if (usage.cache_read_input_tokens) + markdown += `- Cache Read: ${usage.cache_read_input_tokens.toLocaleString()}\n`; + if (usage.output_tokens) + markdown += `- Output: ${usage.output_tokens.toLocaleString()}\n`; + markdown += "\n"; } } - if (lastEntry.permission_denials && lastEntry.permission_denials.length > 0) { + if ( + lastEntry.permission_denials && + lastEntry.permission_denials.length > 0 + ) { markdown += `**Permission Denials:** ${lastEntry.permission_denials.length}\n\n`; } } - markdown += '\n## 🤖 Reasoning\n\n'; + markdown += "\n## 🤖 Reasoning\n\n"; // Second pass: process assistant messages in sequence for (const entry of logEntries) { - if (entry.type === 'assistant' && entry.message?.content) { + if (entry.type === "assistant" && entry.message?.content) { for (const content of entry.message.content) { - if (content.type === 'text' && content.text) { + if (content.type === "text" && content.text) { // Add reasoning text directly (no header) const text = content.text.trim(); if (text && text.length > 0) { - markdown += text + '\n\n'; + markdown += text + "\n\n"; } - } else if (content.type === 'tool_use') { + } else if (content.type === "tool_use") { // Process tool use with its result const toolResult = toolUsePairs.get(content.id); const toolMarkdown = formatToolUse(content, toolResult); @@ -463,22 +487,22 @@ jobs: const toolName = toolUse.name; const input = toolUse.input || {}; // Skip TodoWrite except the very last one (we'll handle this separately) - if (toolName === 'TodoWrite') { - return ''; // Skip for now, would need global context to find the last one + if (toolName === "TodoWrite") { + return ""; // Skip for now, would need global context to find the last one } // Helper function to determine status icon function getStatusIcon() { if (toolResult) { - return toolResult.is_error === true ? '❌' : '✅'; + return toolResult.is_error === true ? "❌" : "✅"; } - return '❓'; // Unknown by default + return "❓"; // Unknown by default } - let markdown = ''; + let markdown = ""; const statusIcon = getStatusIcon(); switch (toolName) { - case 'Bash': - const command = input.command || ''; - const description = input.description || ''; + case "Bash": + const command = input.command || ""; + const description = input.description || ""; // Format the command to be single line const formattedCommand = formatBashCommand(command); if (description) { @@ -486,31 +510,40 @@ jobs: } markdown += `${statusIcon} \`${formattedCommand}\`\n\n`; break; - case 'Read': - const filePath = input.file_path || input.path || ''; - const relativePath = filePath.replace(/^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, ''); // Remove /home/runner/work/repo/repo/ prefix + case "Read": + const filePath = input.file_path || input.path || ""; + const relativePath = filePath.replace( + /^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, + "" + ); // Remove /home/runner/work/repo/repo/ prefix markdown += `${statusIcon} Read \`${relativePath}\`\n\n`; break; - case 'Write': - case 'Edit': - case 'MultiEdit': - const writeFilePath = input.file_path || input.path || ''; - const writeRelativePath = writeFilePath.replace(/^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, ''); + case "Write": + case "Edit": + case "MultiEdit": + const writeFilePath = input.file_path || input.path || ""; + const writeRelativePath = writeFilePath.replace( + /^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, + "" + ); markdown += `${statusIcon} Write \`${writeRelativePath}\`\n\n`; break; - case 'Grep': - case 'Glob': - const query = input.query || input.pattern || ''; + case "Grep": + case "Glob": + const query = input.query || input.pattern || ""; markdown += `${statusIcon} Search for \`${truncateString(query, 80)}\`\n\n`; break; - case 'LS': - const lsPath = input.path || ''; - const lsRelativePath = lsPath.replace(/^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, ''); + case "LS": + const lsPath = input.path || ""; + const lsRelativePath = lsPath.replace( + /^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, + "" + ); markdown += `${statusIcon} LS: ${lsRelativePath || lsPath}\n\n`; break; default: // Handle MCP calls and other tools - if (toolName.startsWith('mcp__')) { + if (toolName.startsWith("mcp__")) { const mcpName = formatMcpName(toolName); const params = formatMcpParameters(input); markdown += `${statusIcon} ${mcpName}(${params})\n\n`; @@ -519,8 +552,11 @@ jobs: const keys = Object.keys(input); if (keys.length > 0) { // Try to find the most important parameter - const mainParam = keys.find(k => ['query', 'command', 'path', 'file_path', 'content'].includes(k)) || keys[0]; - const value = String(input[mainParam] || ''); + const mainParam = + keys.find(k => + ["query", "command", "path", "file_path", "content"].includes(k) + ) || keys[0]; + const value = String(input[mainParam] || ""); if (value) { markdown += `${statusIcon} ${toolName}: ${truncateString(value, 100)}\n\n`; } else { @@ -535,11 +571,11 @@ jobs: } function formatMcpName(toolName) { // Convert mcp__github__search_issues to github::search_issues - if (toolName.startsWith('mcp__')) { - const parts = toolName.split('__'); + if (toolName.startsWith("mcp__")) { + const parts = toolName.split("__"); if (parts.length >= 3) { const provider = parts[1]; // github, etc. - const method = parts.slice(2).join('_'); // search_issues, etc. + const method = parts.slice(2).join("_"); // search_issues, etc. return `${provider}::${method}`; } } @@ -547,44 +583,50 @@ jobs: } function formatMcpParameters(input) { const keys = Object.keys(input); - if (keys.length === 0) return ''; + if (keys.length === 0) return ""; const paramStrs = []; - for (const key of keys.slice(0, 4)) { // Show up to 4 parameters - const value = String(input[key] || ''); + for (const key of keys.slice(0, 4)) { + // Show up to 4 parameters + const value = String(input[key] || ""); paramStrs.push(`${key}: ${truncateString(value, 40)}`); } if (keys.length > 4) { - paramStrs.push('...'); + paramStrs.push("..."); } - return paramStrs.join(', '); + return paramStrs.join(", "); } function formatBashCommand(command) { - if (!command) return ''; + if (!command) return ""; // Convert multi-line commands to single line by replacing newlines with spaces // and collapsing multiple spaces let formatted = command - .replace(/\n/g, ' ') // Replace newlines with spaces - .replace(/\r/g, ' ') // Replace carriage returns with spaces - .replace(/\t/g, ' ') // Replace tabs with spaces - .replace(/\s+/g, ' ') // Collapse multiple spaces into one - .trim(); // Remove leading/trailing whitespace + .replace(/\n/g, " ") // Replace newlines with spaces + .replace(/\r/g, " ") // Replace carriage returns with spaces + .replace(/\t/g, " ") // Replace tabs with spaces + .replace(/\s+/g, " ") // Collapse multiple spaces into one + .trim(); // Remove leading/trailing whitespace // Escape backticks to prevent markdown issues - formatted = formatted.replace(/`/g, '\\`'); + formatted = formatted.replace(/`/g, "\\`"); // Truncate if too long (keep reasonable length for summary) const maxLength = 80; if (formatted.length > maxLength) { - formatted = formatted.substring(0, maxLength) + '...'; + formatted = formatted.substring(0, maxLength) + "..."; } return formatted; } function truncateString(str, maxLength) { - if (!str) return ''; + if (!str) return ""; if (str.length <= maxLength) return str; - return str.substring(0, maxLength) + '...'; + return str.substring(0, maxLength) + "..."; } // Export for testing - if (typeof module !== 'undefined' && module.exports) { - module.exports = { parseClaudeLog, formatToolUse, formatBashCommand, truncateString }; + if (typeof module !== "undefined" && module.exports) { + module.exports = { + parseClaudeLog, + formatToolUse, + formatBashCommand, + truncateString, + }; } main(); - name: Upload agent logs diff --git a/.github/workflows/issue-triage.lock.yml b/.github/workflows/issue-triage.lock.yml index 43ee52d497..c2980b1957 100644 --- a/.github/workflows/issue-triage.lock.yml +++ b/.github/workflows/issue-triage.lock.yml @@ -333,24 +333,24 @@ jobs: with: script: | function main() { - const fs = require('fs'); + const fs = require("fs"); try { // Get the log file path from environment const logFile = process.env.AGENT_LOG_FILE; if (!logFile) { - console.log('No agent log file specified'); + console.log("No agent log file specified"); return; } if (!fs.existsSync(logFile)) { console.log(`Log file not found: ${logFile}`); return; } - const logContent = fs.readFileSync(logFile, 'utf8'); + const logContent = fs.readFileSync(logFile, "utf8"); const markdown = parseClaudeLog(logContent); // Append to GitHub step summary core.summary.addRaw(markdown).write(); } catch (error) { - console.error('Error parsing Claude log:', error.message); + console.error("Error parsing Claude log:", error.message); core.setFailed(error.message); } } @@ -358,16 +358,16 @@ jobs: try { const logEntries = JSON.parse(logContent); if (!Array.isArray(logEntries)) { - return '## Agent Log Summary\n\nLog format not recognized as Claude JSON array.\n'; + return "## Agent Log Summary\n\nLog format not recognized as Claude JSON array.\n"; } - let markdown = '## 🤖 Commands and Tools\n\n'; + let markdown = "## 🤖 Commands and Tools\n\n"; const toolUsePairs = new Map(); // Map tool_use_id to tool_result const commandSummary = []; // For the succinct summary // First pass: collect tool results by tool_use_id for (const entry of logEntries) { - if (entry.type === 'user' && entry.message?.content) { + if (entry.type === "user" && entry.message?.content) { for (const content of entry.message.content) { - if (content.type === 'tool_result' && content.tool_use_id) { + if (content.type === "tool_result" && content.tool_use_id) { toolUsePairs.set(content.tool_use_id, content); } } @@ -375,26 +375,37 @@ jobs: } // Collect all tool uses for summary for (const entry of logEntries) { - if (entry.type === 'assistant' && entry.message?.content) { + if (entry.type === "assistant" && entry.message?.content) { for (const content of entry.message.content) { - if (content.type === 'tool_use') { + if (content.type === "tool_use") { const toolName = content.name; const input = content.input || {}; // Skip internal tools - only show external commands and API calls - if (['Read', 'Write', 'Edit', 'MultiEdit', 'LS', 'Grep', 'Glob', 'TodoWrite'].includes(toolName)) { + if ( + [ + "Read", + "Write", + "Edit", + "MultiEdit", + "LS", + "Grep", + "Glob", + "TodoWrite", + ].includes(toolName) + ) { continue; // Skip internal file operations and searches } // Find the corresponding tool result to get status const toolResult = toolUsePairs.get(content.id); - let statusIcon = '❓'; + let statusIcon = "❓"; if (toolResult) { - statusIcon = toolResult.is_error === true ? '❌' : '✅'; + statusIcon = toolResult.is_error === true ? "❌" : "✅"; } // Add to command summary (only external tools) - if (toolName === 'Bash') { - const formattedCommand = formatBashCommand(input.command || ''); + if (toolName === "Bash") { + const formattedCommand = formatBashCommand(input.command || ""); commandSummary.push(`* ${statusIcon} \`${formattedCommand}\``); - } else if (toolName.startsWith('mcp__')) { + } else if (toolName.startsWith("mcp__")) { const mcpName = formatMcpName(toolName); commandSummary.push(`* ${statusIcon} \`${mcpName}(...)\``); } else { @@ -411,13 +422,19 @@ jobs: markdown += `${cmd}\n`; } } else { - markdown += 'No commands or tools used.\n'; + markdown += "No commands or tools used.\n"; } // Add Information section from the last entry with result metadata - markdown += '\n## 📊 Information\n\n'; + markdown += "\n## 📊 Information\n\n"; // Find the last entry with metadata const lastEntry = logEntries[logEntries.length - 1]; - if (lastEntry && (lastEntry.num_turns || lastEntry.duration_ms || lastEntry.total_cost_usd || lastEntry.usage)) { + if ( + lastEntry && + (lastEntry.num_turns || + lastEntry.duration_ms || + lastEntry.total_cost_usd || + lastEntry.usage) + ) { if (lastEntry.num_turns) { markdown += `**Turns:** ${lastEntry.num_turns}\n\n`; } @@ -434,29 +451,36 @@ jobs: const usage = lastEntry.usage; if (usage.input_tokens || usage.output_tokens) { markdown += `**Token Usage:**\n`; - if (usage.input_tokens) markdown += `- Input: ${usage.input_tokens.toLocaleString()}\n`; - if (usage.cache_creation_input_tokens) markdown += `- Cache Creation: ${usage.cache_creation_input_tokens.toLocaleString()}\n`; - if (usage.cache_read_input_tokens) markdown += `- Cache Read: ${usage.cache_read_input_tokens.toLocaleString()}\n`; - if (usage.output_tokens) markdown += `- Output: ${usage.output_tokens.toLocaleString()}\n`; - markdown += '\n'; + if (usage.input_tokens) + markdown += `- Input: ${usage.input_tokens.toLocaleString()}\n`; + if (usage.cache_creation_input_tokens) + markdown += `- Cache Creation: ${usage.cache_creation_input_tokens.toLocaleString()}\n`; + if (usage.cache_read_input_tokens) + markdown += `- Cache Read: ${usage.cache_read_input_tokens.toLocaleString()}\n`; + if (usage.output_tokens) + markdown += `- Output: ${usage.output_tokens.toLocaleString()}\n`; + markdown += "\n"; } } - if (lastEntry.permission_denials && lastEntry.permission_denials.length > 0) { + if ( + lastEntry.permission_denials && + lastEntry.permission_denials.length > 0 + ) { markdown += `**Permission Denials:** ${lastEntry.permission_denials.length}\n\n`; } } - markdown += '\n## 🤖 Reasoning\n\n'; + markdown += "\n## 🤖 Reasoning\n\n"; // Second pass: process assistant messages in sequence for (const entry of logEntries) { - if (entry.type === 'assistant' && entry.message?.content) { + if (entry.type === "assistant" && entry.message?.content) { for (const content of entry.message.content) { - if (content.type === 'text' && content.text) { + if (content.type === "text" && content.text) { // Add reasoning text directly (no header) const text = content.text.trim(); if (text && text.length > 0) { - markdown += text + '\n\n'; + markdown += text + "\n\n"; } - } else if (content.type === 'tool_use') { + } else if (content.type === "tool_use") { // Process tool use with its result const toolResult = toolUsePairs.get(content.id); const toolMarkdown = formatToolUse(content, toolResult); @@ -476,22 +500,22 @@ jobs: const toolName = toolUse.name; const input = toolUse.input || {}; // Skip TodoWrite except the very last one (we'll handle this separately) - if (toolName === 'TodoWrite') { - return ''; // Skip for now, would need global context to find the last one + if (toolName === "TodoWrite") { + return ""; // Skip for now, would need global context to find the last one } // Helper function to determine status icon function getStatusIcon() { if (toolResult) { - return toolResult.is_error === true ? '❌' : '✅'; + return toolResult.is_error === true ? "❌" : "✅"; } - return '❓'; // Unknown by default + return "❓"; // Unknown by default } - let markdown = ''; + let markdown = ""; const statusIcon = getStatusIcon(); switch (toolName) { - case 'Bash': - const command = input.command || ''; - const description = input.description || ''; + case "Bash": + const command = input.command || ""; + const description = input.description || ""; // Format the command to be single line const formattedCommand = formatBashCommand(command); if (description) { @@ -499,31 +523,40 @@ jobs: } markdown += `${statusIcon} \`${formattedCommand}\`\n\n`; break; - case 'Read': - const filePath = input.file_path || input.path || ''; - const relativePath = filePath.replace(/^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, ''); // Remove /home/runner/work/repo/repo/ prefix + case "Read": + const filePath = input.file_path || input.path || ""; + const relativePath = filePath.replace( + /^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, + "" + ); // Remove /home/runner/work/repo/repo/ prefix markdown += `${statusIcon} Read \`${relativePath}\`\n\n`; break; - case 'Write': - case 'Edit': - case 'MultiEdit': - const writeFilePath = input.file_path || input.path || ''; - const writeRelativePath = writeFilePath.replace(/^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, ''); + case "Write": + case "Edit": + case "MultiEdit": + const writeFilePath = input.file_path || input.path || ""; + const writeRelativePath = writeFilePath.replace( + /^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, + "" + ); markdown += `${statusIcon} Write \`${writeRelativePath}\`\n\n`; break; - case 'Grep': - case 'Glob': - const query = input.query || input.pattern || ''; + case "Grep": + case "Glob": + const query = input.query || input.pattern || ""; markdown += `${statusIcon} Search for \`${truncateString(query, 80)}\`\n\n`; break; - case 'LS': - const lsPath = input.path || ''; - const lsRelativePath = lsPath.replace(/^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, ''); + case "LS": + const lsPath = input.path || ""; + const lsRelativePath = lsPath.replace( + /^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, + "" + ); markdown += `${statusIcon} LS: ${lsRelativePath || lsPath}\n\n`; break; default: // Handle MCP calls and other tools - if (toolName.startsWith('mcp__')) { + if (toolName.startsWith("mcp__")) { const mcpName = formatMcpName(toolName); const params = formatMcpParameters(input); markdown += `${statusIcon} ${mcpName}(${params})\n\n`; @@ -532,8 +565,11 @@ jobs: const keys = Object.keys(input); if (keys.length > 0) { // Try to find the most important parameter - const mainParam = keys.find(k => ['query', 'command', 'path', 'file_path', 'content'].includes(k)) || keys[0]; - const value = String(input[mainParam] || ''); + const mainParam = + keys.find(k => + ["query", "command", "path", "file_path", "content"].includes(k) + ) || keys[0]; + const value = String(input[mainParam] || ""); if (value) { markdown += `${statusIcon} ${toolName}: ${truncateString(value, 100)}\n\n`; } else { @@ -548,11 +584,11 @@ jobs: } function formatMcpName(toolName) { // Convert mcp__github__search_issues to github::search_issues - if (toolName.startsWith('mcp__')) { - const parts = toolName.split('__'); + if (toolName.startsWith("mcp__")) { + const parts = toolName.split("__"); if (parts.length >= 3) { const provider = parts[1]; // github, etc. - const method = parts.slice(2).join('_'); // search_issues, etc. + const method = parts.slice(2).join("_"); // search_issues, etc. return `${provider}::${method}`; } } @@ -560,44 +596,50 @@ jobs: } function formatMcpParameters(input) { const keys = Object.keys(input); - if (keys.length === 0) return ''; + if (keys.length === 0) return ""; const paramStrs = []; - for (const key of keys.slice(0, 4)) { // Show up to 4 parameters - const value = String(input[key] || ''); + for (const key of keys.slice(0, 4)) { + // Show up to 4 parameters + const value = String(input[key] || ""); paramStrs.push(`${key}: ${truncateString(value, 40)}`); } if (keys.length > 4) { - paramStrs.push('...'); + paramStrs.push("..."); } - return paramStrs.join(', '); + return paramStrs.join(", "); } function formatBashCommand(command) { - if (!command) return ''; + if (!command) return ""; // Convert multi-line commands to single line by replacing newlines with spaces // and collapsing multiple spaces let formatted = command - .replace(/\n/g, ' ') // Replace newlines with spaces - .replace(/\r/g, ' ') // Replace carriage returns with spaces - .replace(/\t/g, ' ') // Replace tabs with spaces - .replace(/\s+/g, ' ') // Collapse multiple spaces into one - .trim(); // Remove leading/trailing whitespace + .replace(/\n/g, " ") // Replace newlines with spaces + .replace(/\r/g, " ") // Replace carriage returns with spaces + .replace(/\t/g, " ") // Replace tabs with spaces + .replace(/\s+/g, " ") // Collapse multiple spaces into one + .trim(); // Remove leading/trailing whitespace // Escape backticks to prevent markdown issues - formatted = formatted.replace(/`/g, '\\`'); + formatted = formatted.replace(/`/g, "\\`"); // Truncate if too long (keep reasonable length for summary) const maxLength = 80; if (formatted.length > maxLength) { - formatted = formatted.substring(0, maxLength) + '...'; + formatted = formatted.substring(0, maxLength) + "..."; } return formatted; } function truncateString(str, maxLength) { - if (!str) return ''; + if (!str) return ""; if (str.length <= maxLength) return str; - return str.substring(0, maxLength) + '...'; + return str.substring(0, maxLength) + "..."; } // Export for testing - if (typeof module !== 'undefined' && module.exports) { - module.exports = { parseClaudeLog, formatToolUse, formatBashCommand, truncateString }; + if (typeof module !== "undefined" && module.exports) { + module.exports = { + parseClaudeLog, + formatToolUse, + formatBashCommand, + truncateString, + }; } main(); - name: Upload agent logs diff --git a/.github/workflows/test-claude-add-issue-comment.lock.yml b/.github/workflows/test-claude-add-issue-comment.lock.yml index bb61ad0587..f36012ec78 100644 --- a/.github/workflows/test-claude-add-issue-comment.lock.yml +++ b/.github/workflows/test-claude-add-issue-comment.lock.yml @@ -34,21 +34,32 @@ jobs: with: script: | async function main() { - // Read inputs from environment variables - const reaction = process.env.GITHUB_AW_REACTION || 'eyes'; + // Read inputs from environment variables + const reaction = process.env.GITHUB_AW_REACTION || "eyes"; const alias = process.env.GITHUB_AW_ALIAS; // Only present for alias workflows const runId = context.runId; - const runUrl = context.payload.repository + const runUrl = context.payload.repository ? `${context.payload.repository.html_url}/actions/runs/${runId}` : `https://github.com/${context.repo.owner}/${context.repo.repo}/actions/runs/${runId}`; - console.log('Reaction type:', reaction); - console.log('Alias name:', alias || 'none'); - console.log('Run ID:', runId); - console.log('Run URL:', runUrl); + console.log("Reaction type:", reaction); + console.log("Alias name:", alias || "none"); + console.log("Run ID:", runId); + console.log("Run URL:", runUrl); // Validate reaction type - const validReactions = ['+1', '-1', 'laugh', 'confused', 'heart', 'hooray', 'rocket', 'eyes']; + const validReactions = [ + "+1", + "-1", + "laugh", + "confused", + "heart", + "hooray", + "rocket", + "eyes", + ]; if (!validReactions.includes(reaction)) { - core.setFailed(`Invalid reaction type: ${reaction}. Valid reactions are: ${validReactions.join(', ')}`); + core.setFailed( + `Invalid reaction type: ${reaction}. Valid reactions are: ${validReactions.join(", ")}` + ); return; } // Determine the API endpoint based on the event type @@ -60,20 +71,20 @@ jobs: const repo = context.repo.repo; try { switch (eventName) { - case 'issues': + case "issues": const issueNumber = context.payload?.issue?.number; if (!issueNumber) { - core.setFailed('Issue number not found in event payload'); + core.setFailed("Issue number not found in event payload"); return; } reactionEndpoint = `/repos/${owner}/${repo}/issues/${issueNumber}/reactions`; // Don't edit issue bodies for now - this might be more complex shouldEditComment = false; break; - case 'issue_comment': + case "issue_comment": const commentId = context.payload?.comment?.id; if (!commentId) { - core.setFailed('Comment ID not found in event payload'); + core.setFailed("Comment ID not found in event payload"); return; } reactionEndpoint = `/repos/${owner}/${repo}/issues/comments/${commentId}/reactions`; @@ -81,10 +92,10 @@ jobs: // Only edit comments for alias workflows shouldEditComment = alias ? true : false; break; - case 'pull_request': + case "pull_request": const prNumber = context.payload?.pull_request?.number; if (!prNumber) { - core.setFailed('Pull request number not found in event payload'); + core.setFailed("Pull request number not found in event payload"); return; } // PRs are "issues" for the reactions endpoint @@ -92,10 +103,10 @@ jobs: // Don't edit PR bodies for now - this might be more complex shouldEditComment = false; break; - case 'pull_request_review_comment': + case "pull_request_review_comment": const reviewCommentId = context.payload?.comment?.id; if (!reviewCommentId) { - core.setFailed('Review comment ID not found in event payload'); + core.setFailed("Review comment ID not found in event payload"); return; } reactionEndpoint = `/repos/${owner}/${repo}/pulls/comments/${reviewCommentId}/reactions`; @@ -107,24 +118,28 @@ jobs: core.setFailed(`Unsupported event type: ${eventName}`); return; } - console.log('Reaction API endpoint:', reactionEndpoint); + console.log("Reaction API endpoint:", reactionEndpoint); // Add reaction first await addReaction(reactionEndpoint, reaction); // Then edit comment if applicable and if it's a comment event if (shouldEditComment && commentUpdateEndpoint) { - console.log('Comment update endpoint:', commentUpdateEndpoint); + console.log("Comment update endpoint:", commentUpdateEndpoint); await editCommentWithWorkflowLink(commentUpdateEndpoint, runUrl); } else { if (!alias && commentUpdateEndpoint) { - console.log('Skipping comment edit - only available for alias workflows'); + console.log( + "Skipping comment edit - only available for alias workflows" + ); } else { - console.log('Skipping comment edit for event type:', eventName); + console.log("Skipping comment edit for event type:", eventName); } } } catch (error) { const errorMessage = error instanceof Error ? error.message : String(error); - console.error('Failed to process reaction and comment edit:', errorMessage); - core.setFailed(`Failed to process reaction and comment edit: ${errorMessage}`); + console.error("Failed to process reaction and comment edit:", errorMessage); + core.setFailed( + `Failed to process reaction and comment edit: ${errorMessage}` + ); } } /** @@ -133,19 +148,19 @@ jobs: * @param {string} reaction - The reaction type to add */ async function addReaction(endpoint, reaction) { - const response = await github.request('POST ' + endpoint, { + const response = await github.request("POST " + endpoint, { content: reaction, headers: { - 'Accept': 'application/vnd.github+json' - } + Accept: "application/vnd.github+json", + }, }); const reactionId = response.data?.id; if (reactionId) { console.log(`Successfully added reaction: ${reaction} (id: ${reactionId})`); - core.setOutput('reaction-id', reactionId.toString()); + core.setOutput("reaction-id", reactionId.toString()); } else { console.log(`Successfully added reaction: ${reaction}`); - core.setOutput('reaction-id', ''); + core.setOutput("reaction-id", ""); } } /** @@ -156,33 +171,37 @@ jobs: async function editCommentWithWorkflowLink(endpoint, runUrl) { try { // First, get the current comment content - const getResponse = await github.request('GET ' + endpoint, { + const getResponse = await github.request("GET " + endpoint, { headers: { - 'Accept': 'application/vnd.github+json' - } + Accept: "application/vnd.github+json", + }, }); - const originalBody = getResponse.data.body || ''; + const originalBody = getResponse.data.body || ""; const workflowLinkText = `\n\n---\n*🤖 [Workflow run](${runUrl}) triggered by this comment*`; // Check if we've already added a workflow link to avoid duplicates - if (originalBody.includes('*🤖 [Workflow run](')) { - console.log('Comment already contains a workflow run link, skipping edit'); + if (originalBody.includes("*🤖 [Workflow run](")) { + console.log( + "Comment already contains a workflow run link, skipping edit" + ); return; } const updatedBody = originalBody + workflowLinkText; // Update the comment - const updateResponse = await github.request('PATCH ' + endpoint, { + const updateResponse = await github.request("PATCH " + endpoint, { body: updatedBody, headers: { - 'Accept': 'application/vnd.github+json' - } + Accept: "application/vnd.github+json", + }, }); console.log(`Successfully updated comment with workflow link`); console.log(`Comment ID: ${updateResponse.data.id}`); } catch (error) { // Don't fail the entire job if comment editing fails - just log it const errorMessage = error instanceof Error ? error.message : String(error); - console.warn('Failed to edit comment with workflow link:', errorMessage); - console.warn('This is not critical - the reaction was still added successfully'); + console.warn("Failed to edit comment with workflow link:", errorMessage); + console.warn( + "This is not critical - the reaction was still added successfully" + ); } } await main(); @@ -308,23 +327,23 @@ jobs: with: script: | function main() { - const fs = require('fs'); - const crypto = require('crypto'); + const fs = require("fs"); + const crypto = require("crypto"); // Generate a random filename for the output file - const randomId = crypto.randomBytes(8).toString('hex'); + const randomId = crypto.randomBytes(8).toString("hex"); const outputFile = `/tmp/aw_output_${randomId}.txt`; // Ensure the /tmp directory exists and create empty output file - fs.mkdirSync('/tmp', { recursive: true }); - fs.writeFileSync(outputFile, '', { mode: 0o644 }); + fs.mkdirSync("/tmp", { recursive: true }); + fs.writeFileSync(outputFile, "", { mode: 0o644 }); // Verify the file was created and is writable if (!fs.existsSync(outputFile)) { throw new Error(`Failed to create output file: ${outputFile}`); } // Set the environment variable for subsequent steps - core.exportVariable('GITHUB_AW_SAFE_OUTPUTS', outputFile); - console.log('Created agentic output file:', outputFile); + core.exportVariable("GITHUB_AW_SAFE_OUTPUTS", outputFile); + console.log("Created agentic output file:", outputFile); // Also set as step output for reference - core.setOutput('output_file', outputFile); + core.setOutput("output_file", outputFile); } main(); - name: Setup MCPs @@ -547,34 +566,37 @@ jobs: * @returns {string} The sanitized content */ function sanitizeContent(content) { - if (!content || typeof content !== 'string') { - return ''; + if (!content || typeof content !== "string") { + return ""; } // Read allowed domains from environment variable const allowedDomainsEnv = process.env.GITHUB_AW_ALLOWED_DOMAINS; const defaultAllowedDomains = [ - 'github.com', - 'github.io', - 'githubusercontent.com', - 'githubassets.com', - 'github.dev', - 'codespaces.new' + "github.com", + "github.io", + "githubusercontent.com", + "githubassets.com", + "github.dev", + "codespaces.new", ]; const allowedDomains = allowedDomainsEnv - ? allowedDomainsEnv.split(',').map(d => d.trim()).filter(d => d) + ? allowedDomainsEnv + .split(",") + .map(d => d.trim()) + .filter(d => d) : defaultAllowedDomains; let sanitized = content; // Neutralize @mentions to prevent unintended notifications sanitized = neutralizeMentions(sanitized); // Remove control characters (except newlines and tabs) - sanitized = sanitized.replace(/[\x00-\x08\x0B\x0C\x0E-\x1F\x7F]/g, ''); + sanitized = sanitized.replace(/[\x00-\x08\x0B\x0C\x0E-\x1F\x7F]/g, ""); // XML character escaping sanitized = sanitized - .replace(/&/g, '&') // Must be first to avoid double-escaping - .replace(//g, '>') - .replace(/"/g, '"') - .replace(/'/g, '''); + .replace(/&/g, "&") // Must be first to avoid double-escaping + .replace(//g, ">") + .replace(/"/g, """) + .replace(/'/g, "'"); // URI filtering - replace non-https protocols with "(redacted)" sanitized = sanitizeUrlProtocols(sanitized); // Domain filtering for HTTPS URIs @@ -582,16 +604,20 @@ jobs: // Limit total length to prevent DoS (0.5MB max) const maxLength = 524288; if (sanitized.length > maxLength) { - sanitized = sanitized.substring(0, maxLength) + '\n[Content truncated due to length]'; + sanitized = + sanitized.substring(0, maxLength) + + "\n[Content truncated due to length]"; } // Limit number of lines to prevent log flooding (65k max) - const lines = sanitized.split('\n'); + const lines = sanitized.split("\n"); const maxLines = 65000; if (lines.length > maxLines) { - sanitized = lines.slice(0, maxLines).join('\n') + '\n[Content truncated due to line count]'; + sanitized = + lines.slice(0, maxLines).join("\n") + + "\n[Content truncated due to line count]"; } // Remove ANSI escape sequences - sanitized = sanitized.replace(/\x1b\[[0-9;]*[mGKH]/g, ''); + sanitized = sanitized.replace(/\x1b\[[0-9;]*[mGKH]/g, ""); // Neutralize common bot trigger phrases sanitized = neutralizeBotTriggers(sanitized); // Trim excessive whitespace @@ -602,16 +628,22 @@ jobs: * @returns {string} The string with unknown domains redacted */ function sanitizeUrlDomains(s) { - return s.replace(/\bhttps:\/\/([^\/\s\])}'"<>&\x00-\x1f]+)/gi, (match, domain) => { - // Extract the hostname part (before first slash, colon, or other delimiter) - const hostname = domain.split(/[\/:\?#]/)[0].toLowerCase(); - // Check if this domain or any parent domain is in the allowlist - const isAllowed = allowedDomains.some(allowedDomain => { - const normalizedAllowed = allowedDomain.toLowerCase(); - return hostname === normalizedAllowed || hostname.endsWith('.' + normalizedAllowed); - }); - return isAllowed ? match : '(redacted)'; - }); + return s.replace( + /\bhttps:\/\/([^\/\s\])}'"<>&\x00-\x1f]+)/gi, + (match, domain) => { + // Extract the hostname part (before first slash, colon, or other delimiter) + const hostname = domain.split(/[\/:\?#]/)[0].toLowerCase(); + // Check if this domain or any parent domain is in the allowlist + const isAllowed = allowedDomains.some(allowedDomain => { + const normalizedAllowed = allowedDomain.toLowerCase(); + return ( + hostname === normalizedAllowed || + hostname.endsWith("." + normalizedAllowed) + ); + }); + return isAllowed ? match : "(redacted)"; + } + ); } /** * Remove unknown protocols except https @@ -620,10 +652,13 @@ jobs: */ function sanitizeUrlProtocols(s) { // Match both protocol:// and protocol: patterns - return s.replace(/\b(\w+):(?:\/\/)?[^\s\])}'"<>&\x00-\x1f]+/gi, (match, protocol) => { - // Allow https (case insensitive), redact everything else - return protocol.toLowerCase() === 'https' ? match : '(redacted)'; - }); + return s.replace( + /\b(\w+):(?:\/\/)?[^\s\])}'"<>&\x00-\x1f]+/gi, + (match, protocol) => { + // Allow https (case insensitive), redact everything else + return protocol.toLowerCase() === "https" ? match : "(redacted)"; + } + ); } /** * Neutralizes @mentions by wrapping them in backticks @@ -632,8 +667,10 @@ jobs: */ function neutralizeMentions(s) { // Replace @name or @org/team outside code with `@name` - return s.replace(/(^|[^\w`])@([A-Za-z0-9](?:[A-Za-z0-9-]{0,37}[A-Za-z0-9])?(?:\/[A-Za-z0-9._-]+)?)/g, - (_m, p1, p2) => `${p1}\`@${p2}\``); + return s.replace( + /(^|[^\w`])@([A-Za-z0-9](?:[A-Za-z0-9-]{0,37}[A-Za-z0-9])?(?:\/[A-Za-z0-9._-]+)?)/g, + (_m, p1, p2) => `${p1}\`@${p2}\`` + ); } /** * Neutralizes bot trigger phrases by wrapping them in backticks @@ -642,8 +679,10 @@ jobs: */ function neutralizeBotTriggers(s) { // Neutralize common bot trigger phrases like "fixes #123", "closes #asdfs", etc. - return s.replace(/\b(fixes?|closes?|resolves?|fix|close|resolve)\s+#(\w+)/gi, - (match, action, ref) => `\`${action} #${ref}\``); + return s.replace( + /\b(fixes?|closes?|resolves?|fix|close|resolve)\s+#(\w+)/gi, + (match, action, ref) => `\`${action} #${ref}\`` + ); } } /** @@ -654,65 +693,165 @@ jobs: */ function getMaxAllowedForType(itemType, config) { // Check if max is explicitly specified in config - if (config && config[itemType] && typeof config[itemType] === 'object' && config[itemType].max) { + if ( + config && + config[itemType] && + typeof config[itemType] === "object" && + config[itemType].max + ) { return config[itemType].max; } // Use default limits for plural-supported types switch (itemType) { - case 'create-issue': + case "create-issue": return 1; // Only one issue allowed - case 'add-issue-comment': + case "add-issue-comment": return 1; // Only one comment allowed - case 'create-pull-request': - return 1; // Only one pull request allowed - case 'add-issue-label': - return 5; // Only one labels operation allowed - case 'update-issue': - return 1; // Only one issue update allowed - case 'push-to-branch': - return 1; // Only one push to branch allowed + case "create-pull-request": + return 1; // Only one pull request allowed + case "create-pull-request-review-comment": + return 10; // Default to 10 review comments allowed + case "add-issue-label": + return 5; // Only one labels operation allowed + case "update-issue": + return 1; // Only one issue update allowed + case "push-to-branch": + return 1; // Only one push to branch allowed + case "create-discussion": + return 1; // Only one discussion allowed default: - return 1; // Default to single item for unknown types + return 1; // Default to single item for unknown types + } + } + /** + * Attempts to repair common JSON syntax issues in LLM-generated content + * @param {string} jsonStr - The potentially malformed JSON string + * @returns {string} The repaired JSON string + */ + function repairJson(jsonStr) { + let repaired = jsonStr.trim(); + // Fix single quotes to double quotes (must be done first) + repaired = repaired.replace(/'/g, '"'); + // Fix missing quotes around object keys + repaired = repaired.replace( + /([{,]\s*)([a-zA-Z_$][a-zA-Z0-9_$]*)\s*:/g, + '$1"$2":' + ); + // Fix newlines and tabs inside strings by escaping them + repaired = repaired.replace(/"([^"\\]*)"/g, (match, content) => { + if ( + content.includes("\n") || + content.includes("\r") || + content.includes("\t") + ) { + const escaped = content + .replace(/\\/g, "\\\\") + .replace(/\n/g, "\\n") + .replace(/\r/g, "\\r") + .replace(/\t/g, "\\t"); + return `"${escaped}"`; + } + return match; + }); + // Fix unescaped quotes inside string values + repaired = repaired.replace( + /"([^"]*)"([^":,}\]]*)"([^"]*)"(\s*[,:}\]])/g, + (match, p1, p2, p3, p4) => `"${p1}\\"${p2}\\"${p3}"${p4}` + ); + // Fix wrong bracket/brace types - arrays should end with ] not } + repaired = repaired.replace( + /(\[\s*(?:"[^"]*"(?:\s*,\s*"[^"]*")*\s*),?)\s*}/g, + "$1]" + ); + // Fix missing closing braces/brackets + const openBraces = (repaired.match(/\{/g) || []).length; + const closeBraces = (repaired.match(/\}/g) || []).length; + if (openBraces > closeBraces) { + repaired += "}".repeat(openBraces - closeBraces); + } else if (closeBraces > openBraces) { + repaired = "{".repeat(closeBraces - openBraces) + repaired; + } + // Fix missing closing brackets for arrays + const openBrackets = (repaired.match(/\[/g) || []).length; + const closeBrackets = (repaired.match(/\]/g) || []).length; + if (openBrackets > closeBrackets) { + repaired += "]".repeat(openBrackets - closeBrackets); + } else if (closeBrackets > openBrackets) { + repaired = "[".repeat(closeBrackets - openBrackets) + repaired; + } + // Fix trailing commas in objects and arrays (AFTER fixing brackets/braces) + repaired = repaired.replace(/,(\s*[}\]])/g, "$1"); + return repaired; + } + /** + * Attempts to parse JSON with repair fallback + * @param {string} jsonStr - The JSON string to parse + * @returns {Object|undefined} The parsed JSON object, or undefined if parsing fails + */ + function parseJsonWithRepair(jsonStr) { + try { + // First, try normal JSON.parse + return JSON.parse(jsonStr); + } catch (originalError) { + try { + // If that fails, try repairing and parsing again + const repairedJson = repairJson(jsonStr); + return JSON.parse(repairedJson); + } catch (repairError) { + // If repair also fails, print error to console and return undefined + console.log( + `JSON parsing failed. Original: ${originalError.message}. After repair: ${repairError.message}` + ); + return undefined; + } } } const outputFile = process.env.GITHUB_AW_SAFE_OUTPUTS; const safeOutputsConfig = process.env.GITHUB_AW_SAFE_OUTPUTS_CONFIG; if (!outputFile) { - console.log('GITHUB_AW_SAFE_OUTPUTS not set, no output to collect'); - core.setOutput('output', ''); + console.log("GITHUB_AW_SAFE_OUTPUTS not set, no output to collect"); + core.setOutput("output", ""); return; } if (!fs.existsSync(outputFile)) { - console.log('Output file does not exist:', outputFile); - core.setOutput('output', ''); + console.log("Output file does not exist:", outputFile); + core.setOutput("output", ""); return; } - const outputContent = fs.readFileSync(outputFile, 'utf8'); - if (outputContent.trim() === '') { - console.log('Output file is empty'); - core.setOutput('output', ''); + const outputContent = fs.readFileSync(outputFile, "utf8"); + if (outputContent.trim() === "") { + console.log("Output file is empty"); + core.setOutput("output", ""); return; } - console.log('Raw output content length:', outputContent.length); + console.log("Raw output content length:", outputContent.length); // Parse the safe-outputs configuration let expectedOutputTypes = {}; if (safeOutputsConfig) { try { expectedOutputTypes = JSON.parse(safeOutputsConfig); - console.log('Expected output types:', Object.keys(expectedOutputTypes)); + console.log("Expected output types:", Object.keys(expectedOutputTypes)); } catch (error) { - console.log('Warning: Could not parse safe-outputs config:', error.message); + console.log( + "Warning: Could not parse safe-outputs config:", + error.message + ); } } // Parse JSONL content - const lines = outputContent.trim().split('\n'); + const lines = outputContent.trim().split("\n"); const parsedItems = []; const errors = []; for (let i = 0; i < lines.length; i++) { const line = lines[i].trim(); - if (line === '') continue; // Skip empty lines + if (line === "") continue; // Skip empty lines try { - const item = JSON.parse(line); + const item = parseJsonWithRepair(line); + // If item is undefined (failed to parse), add error and process next line + if (item === undefined) { + errors.push(`Line ${i + 1}: Invalid JSON - JSON parsing failed`); + continue; + } // Validate that the item has a 'type' field if (!item.type) { errors.push(`Line ${i + 1}: Missing required 'type' field`); @@ -721,25 +860,35 @@ jobs: // Validate against expected output types const itemType = item.type; if (!expectedOutputTypes[itemType]) { - errors.push(`Line ${i + 1}: Unexpected output type '${itemType}'. Expected one of: ${Object.keys(expectedOutputTypes).join(', ')}`); + errors.push( + `Line ${i + 1}: Unexpected output type '${itemType}'. Expected one of: ${Object.keys(expectedOutputTypes).join(", ")}` + ); continue; } // Check for too many items of the same type - const typeCount = parsedItems.filter(existing => existing.type === itemType).length; + const typeCount = parsedItems.filter( + existing => existing.type === itemType + ).length; const maxAllowed = getMaxAllowedForType(itemType, expectedOutputTypes); if (typeCount >= maxAllowed) { - errors.push(`Line ${i + 1}: Too many items of type '${itemType}'. Maximum allowed: ${maxAllowed}.`); + errors.push( + `Line ${i + 1}: Too many items of type '${itemType}'. Maximum allowed: ${maxAllowed}.` + ); continue; } // Basic validation based on type switch (itemType) { - case 'create-issue': - if (!item.title || typeof item.title !== 'string') { - errors.push(`Line ${i + 1}: create-issue requires a 'title' string field`); + case "create-issue": + if (!item.title || typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: create-issue requires a 'title' string field` + ); continue; } - if (!item.body || typeof item.body !== 'string') { - errors.push(`Line ${i + 1}: create-issue requires a 'body' string field`); + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-issue requires a 'body' string field` + ); continue; } // Sanitize text content @@ -747,107 +896,247 @@ jobs: item.body = sanitizeContent(item.body); // Sanitize labels if present if (item.labels && Array.isArray(item.labels)) { - item.labels = item.labels.map(label => typeof label === 'string' ? sanitizeContent(label) : label); + item.labels = item.labels.map(label => + typeof label === "string" ? sanitizeContent(label) : label + ); } break; - case 'add-issue-comment': - if (!item.body || typeof item.body !== 'string') { - errors.push(`Line ${i + 1}: add-issue-comment requires a 'body' string field`); + case "add-issue-comment": + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: add-issue-comment requires a 'body' string field` + ); continue; } // Sanitize text content item.body = sanitizeContent(item.body); break; - case 'create-pull-request': - if (!item.title || typeof item.title !== 'string') { - errors.push(`Line ${i + 1}: create-pull-request requires a 'title' string field`); + case "create-pull-request": + if (!item.title || typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request requires a 'title' string field` + ); continue; } - if (!item.body || typeof item.body !== 'string') { - errors.push(`Line ${i + 1}: create-pull-request requires a 'body' string field`); + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request requires a 'body' string field` + ); continue; } // Sanitize text content item.title = sanitizeContent(item.title); item.body = sanitizeContent(item.body); // Sanitize branch name if present - if (item.branch && typeof item.branch === 'string') { + if (item.branch && typeof item.branch === "string") { item.branch = sanitizeContent(item.branch); } // Sanitize labels if present if (item.labels && Array.isArray(item.labels)) { - item.labels = item.labels.map(label => typeof label === 'string' ? sanitizeContent(label) : label); + item.labels = item.labels.map(label => + typeof label === "string" ? sanitizeContent(label) : label + ); } break; - case 'add-issue-label': + case "add-issue-label": if (!item.labels || !Array.isArray(item.labels)) { - errors.push(`Line ${i + 1}: add-issue-label requires a 'labels' array field`); + errors.push( + `Line ${i + 1}: add-issue-label requires a 'labels' array field` + ); continue; } - if (item.labels.some(label => typeof label !== 'string')) { - errors.push(`Line ${i + 1}: add-issue-label labels array must contain only strings`); + if (item.labels.some(label => typeof label !== "string")) { + errors.push( + `Line ${i + 1}: add-issue-label labels array must contain only strings` + ); continue; } // Sanitize label strings item.labels = item.labels.map(label => sanitizeContent(label)); break; - case 'update-issue': + case "update-issue": // Check that at least one updateable field is provided - const hasValidField = (item.status !== undefined) || - (item.title !== undefined) || - (item.body !== undefined); + const hasValidField = + item.status !== undefined || + item.title !== undefined || + item.body !== undefined; if (!hasValidField) { - errors.push(`Line ${i + 1}: update-issue requires at least one of: 'status', 'title', or 'body' fields`); + errors.push( + `Line ${i + 1}: update-issue requires at least one of: 'status', 'title', or 'body' fields` + ); continue; } // Validate status if provided if (item.status !== undefined) { - if (typeof item.status !== 'string' || (item.status !== 'open' && item.status !== 'closed')) { - errors.push(`Line ${i + 1}: update-issue 'status' must be 'open' or 'closed'`); + if ( + typeof item.status !== "string" || + (item.status !== "open" && item.status !== "closed") + ) { + errors.push( + `Line ${i + 1}: update-issue 'status' must be 'open' or 'closed'` + ); continue; } } // Validate title if provided if (item.title !== undefined) { - if (typeof item.title !== 'string') { - errors.push(`Line ${i + 1}: update-issue 'title' must be a string`); + if (typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: update-issue 'title' must be a string` + ); continue; } item.title = sanitizeContent(item.title); } // Validate body if provided if (item.body !== undefined) { - if (typeof item.body !== 'string') { - errors.push(`Line ${i + 1}: update-issue 'body' must be a string`); + if (typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: update-issue 'body' must be a string` + ); continue; } item.body = sanitizeContent(item.body); } // Validate issue_number if provided (for target "*") if (item.issue_number !== undefined) { - if (typeof item.issue_number !== 'number' && typeof item.issue_number !== 'string') { - errors.push(`Line ${i + 1}: update-issue 'issue_number' must be a number or string`); + if ( + typeof item.issue_number !== "number" && + typeof item.issue_number !== "string" + ) { + errors.push( + `Line ${i + 1}: update-issue 'issue_number' must be a number or string` + ); continue; } } break; - case 'push-to-branch': + case "push-to-branch": // Validate message if provided (optional) if (item.message !== undefined) { - if (typeof item.message !== 'string') { - errors.push(`Line ${i + 1}: push-to-branch 'message' must be a string`); + if (typeof item.message !== "string") { + errors.push( + `Line ${i + 1}: push-to-branch 'message' must be a string` + ); continue; } item.message = sanitizeContent(item.message); } // Validate pull_request_number if provided (for target "*") if (item.pull_request_number !== undefined) { - if (typeof item.pull_request_number !== 'number' && typeof item.pull_request_number !== 'string') { - errors.push(`Line ${i + 1}: push-to-branch 'pull_request_number' must be a number or string`); + if ( + typeof item.pull_request_number !== "number" && + typeof item.pull_request_number !== "string" + ) { + errors.push( + `Line ${i + 1}: push-to-branch 'pull_request_number' must be a number or string` + ); continue; } } break; + case "create-pull-request-review-comment": + // Validate required path field + if (!item.path || typeof item.path !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment requires a 'path' string field` + ); + continue; + } + // Validate required line field + if ( + item.line === undefined || + (typeof item.line !== "number" && typeof item.line !== "string") + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment requires a 'line' number or string field` + ); + continue; + } + // Validate line is a positive integer + const lineNumber = + typeof item.line === "string" ? parseInt(item.line, 10) : item.line; + if ( + isNaN(lineNumber) || + lineNumber <= 0 || + !Number.isInteger(lineNumber) + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'line' must be a positive integer` + ); + continue; + } + // Validate required body field + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment requires a 'body' string field` + ); + continue; + } + // Sanitize required text content + item.body = sanitizeContent(item.body); + // Validate optional start_line field + if (item.start_line !== undefined) { + if ( + typeof item.start_line !== "number" && + typeof item.start_line !== "string" + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'start_line' must be a number or string` + ); + continue; + } + const startLineNumber = + typeof item.start_line === "string" + ? parseInt(item.start_line, 10) + : item.start_line; + if ( + isNaN(startLineNumber) || + startLineNumber <= 0 || + !Number.isInteger(startLineNumber) + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'start_line' must be a positive integer` + ); + continue; + } + if (startLineNumber > lineNumber) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'start_line' must be less than or equal to 'line'` + ); + continue; + } + } + // Validate optional side field + if (item.side !== undefined) { + if ( + typeof item.side !== "string" || + (item.side !== "LEFT" && item.side !== "RIGHT") + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'side' must be 'LEFT' or 'RIGHT'` + ); + continue; + } + } + break; + case "create-discussion": + if (!item.title || typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: create-discussion requires a 'title' string field` + ); + continue; + } + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-discussion requires a 'body' string field` + ); + continue; + } + // Sanitize text content + item.title = sanitizeContent(item.title); + item.body = sanitizeContent(item.body); + break; default: errors.push(`Line ${i + 1}: Unknown output type '${itemType}'`); continue; @@ -860,7 +1149,7 @@ jobs: } // Report validation results if (errors.length > 0) { - console.log('Validation errors found:'); + console.log("Validation errors found:"); errors.forEach(error => console.log(` - ${error}`)); // For now, we'll continue with valid items but log the errors // In the future, we might want to fail the workflow for invalid items @@ -869,10 +1158,10 @@ jobs: // Set the parsed and validated items as output const validatedOutput = { items: parsedItems, - errors: errors + errors: errors, }; - core.setOutput('output', JSON.stringify(validatedOutput)); - core.setOutput('raw_output', outputContent); + core.setOutput("output", JSON.stringify(validatedOutput)); + core.setOutput("raw_output", outputContent); } // Call the main function await main(); @@ -914,24 +1203,24 @@ jobs: with: script: | function main() { - const fs = require('fs'); + const fs = require("fs"); try { // Get the log file path from environment const logFile = process.env.AGENT_LOG_FILE; if (!logFile) { - console.log('No agent log file specified'); + console.log("No agent log file specified"); return; } if (!fs.existsSync(logFile)) { console.log(`Log file not found: ${logFile}`); return; } - const logContent = fs.readFileSync(logFile, 'utf8'); + const logContent = fs.readFileSync(logFile, "utf8"); const markdown = parseClaudeLog(logContent); // Append to GitHub step summary core.summary.addRaw(markdown).write(); } catch (error) { - console.error('Error parsing Claude log:', error.message); + console.error("Error parsing Claude log:", error.message); core.setFailed(error.message); } } @@ -939,16 +1228,16 @@ jobs: try { const logEntries = JSON.parse(logContent); if (!Array.isArray(logEntries)) { - return '## Agent Log Summary\n\nLog format not recognized as Claude JSON array.\n'; + return "## Agent Log Summary\n\nLog format not recognized as Claude JSON array.\n"; } - let markdown = '## 🤖 Commands and Tools\n\n'; + let markdown = "## 🤖 Commands and Tools\n\n"; const toolUsePairs = new Map(); // Map tool_use_id to tool_result const commandSummary = []; // For the succinct summary // First pass: collect tool results by tool_use_id for (const entry of logEntries) { - if (entry.type === 'user' && entry.message?.content) { + if (entry.type === "user" && entry.message?.content) { for (const content of entry.message.content) { - if (content.type === 'tool_result' && content.tool_use_id) { + if (content.type === "tool_result" && content.tool_use_id) { toolUsePairs.set(content.tool_use_id, content); } } @@ -956,26 +1245,37 @@ jobs: } // Collect all tool uses for summary for (const entry of logEntries) { - if (entry.type === 'assistant' && entry.message?.content) { + if (entry.type === "assistant" && entry.message?.content) { for (const content of entry.message.content) { - if (content.type === 'tool_use') { + if (content.type === "tool_use") { const toolName = content.name; const input = content.input || {}; // Skip internal tools - only show external commands and API calls - if (['Read', 'Write', 'Edit', 'MultiEdit', 'LS', 'Grep', 'Glob', 'TodoWrite'].includes(toolName)) { + if ( + [ + "Read", + "Write", + "Edit", + "MultiEdit", + "LS", + "Grep", + "Glob", + "TodoWrite", + ].includes(toolName) + ) { continue; // Skip internal file operations and searches } // Find the corresponding tool result to get status const toolResult = toolUsePairs.get(content.id); - let statusIcon = '❓'; + let statusIcon = "❓"; if (toolResult) { - statusIcon = toolResult.is_error === true ? '❌' : '✅'; + statusIcon = toolResult.is_error === true ? "❌" : "✅"; } // Add to command summary (only external tools) - if (toolName === 'Bash') { - const formattedCommand = formatBashCommand(input.command || ''); + if (toolName === "Bash") { + const formattedCommand = formatBashCommand(input.command || ""); commandSummary.push(`* ${statusIcon} \`${formattedCommand}\``); - } else if (toolName.startsWith('mcp__')) { + } else if (toolName.startsWith("mcp__")) { const mcpName = formatMcpName(toolName); commandSummary.push(`* ${statusIcon} \`${mcpName}(...)\``); } else { @@ -992,13 +1292,19 @@ jobs: markdown += `${cmd}\n`; } } else { - markdown += 'No commands or tools used.\n'; + markdown += "No commands or tools used.\n"; } // Add Information section from the last entry with result metadata - markdown += '\n## 📊 Information\n\n'; + markdown += "\n## 📊 Information\n\n"; // Find the last entry with metadata const lastEntry = logEntries[logEntries.length - 1]; - if (lastEntry && (lastEntry.num_turns || lastEntry.duration_ms || lastEntry.total_cost_usd || lastEntry.usage)) { + if ( + lastEntry && + (lastEntry.num_turns || + lastEntry.duration_ms || + lastEntry.total_cost_usd || + lastEntry.usage) + ) { if (lastEntry.num_turns) { markdown += `**Turns:** ${lastEntry.num_turns}\n\n`; } @@ -1015,29 +1321,36 @@ jobs: const usage = lastEntry.usage; if (usage.input_tokens || usage.output_tokens) { markdown += `**Token Usage:**\n`; - if (usage.input_tokens) markdown += `- Input: ${usage.input_tokens.toLocaleString()}\n`; - if (usage.cache_creation_input_tokens) markdown += `- Cache Creation: ${usage.cache_creation_input_tokens.toLocaleString()}\n`; - if (usage.cache_read_input_tokens) markdown += `- Cache Read: ${usage.cache_read_input_tokens.toLocaleString()}\n`; - if (usage.output_tokens) markdown += `- Output: ${usage.output_tokens.toLocaleString()}\n`; - markdown += '\n'; + if (usage.input_tokens) + markdown += `- Input: ${usage.input_tokens.toLocaleString()}\n`; + if (usage.cache_creation_input_tokens) + markdown += `- Cache Creation: ${usage.cache_creation_input_tokens.toLocaleString()}\n`; + if (usage.cache_read_input_tokens) + markdown += `- Cache Read: ${usage.cache_read_input_tokens.toLocaleString()}\n`; + if (usage.output_tokens) + markdown += `- Output: ${usage.output_tokens.toLocaleString()}\n`; + markdown += "\n"; } } - if (lastEntry.permission_denials && lastEntry.permission_denials.length > 0) { + if ( + lastEntry.permission_denials && + lastEntry.permission_denials.length > 0 + ) { markdown += `**Permission Denials:** ${lastEntry.permission_denials.length}\n\n`; } } - markdown += '\n## 🤖 Reasoning\n\n'; + markdown += "\n## 🤖 Reasoning\n\n"; // Second pass: process assistant messages in sequence for (const entry of logEntries) { - if (entry.type === 'assistant' && entry.message?.content) { + if (entry.type === "assistant" && entry.message?.content) { for (const content of entry.message.content) { - if (content.type === 'text' && content.text) { + if (content.type === "text" && content.text) { // Add reasoning text directly (no header) const text = content.text.trim(); if (text && text.length > 0) { - markdown += text + '\n\n'; + markdown += text + "\n\n"; } - } else if (content.type === 'tool_use') { + } else if (content.type === "tool_use") { // Process tool use with its result const toolResult = toolUsePairs.get(content.id); const toolMarkdown = formatToolUse(content, toolResult); @@ -1057,22 +1370,22 @@ jobs: const toolName = toolUse.name; const input = toolUse.input || {}; // Skip TodoWrite except the very last one (we'll handle this separately) - if (toolName === 'TodoWrite') { - return ''; // Skip for now, would need global context to find the last one + if (toolName === "TodoWrite") { + return ""; // Skip for now, would need global context to find the last one } // Helper function to determine status icon function getStatusIcon() { if (toolResult) { - return toolResult.is_error === true ? '❌' : '✅'; + return toolResult.is_error === true ? "❌" : "✅"; } - return '❓'; // Unknown by default + return "❓"; // Unknown by default } - let markdown = ''; + let markdown = ""; const statusIcon = getStatusIcon(); switch (toolName) { - case 'Bash': - const command = input.command || ''; - const description = input.description || ''; + case "Bash": + const command = input.command || ""; + const description = input.description || ""; // Format the command to be single line const formattedCommand = formatBashCommand(command); if (description) { @@ -1080,31 +1393,40 @@ jobs: } markdown += `${statusIcon} \`${formattedCommand}\`\n\n`; break; - case 'Read': - const filePath = input.file_path || input.path || ''; - const relativePath = filePath.replace(/^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, ''); // Remove /home/runner/work/repo/repo/ prefix + case "Read": + const filePath = input.file_path || input.path || ""; + const relativePath = filePath.replace( + /^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, + "" + ); // Remove /home/runner/work/repo/repo/ prefix markdown += `${statusIcon} Read \`${relativePath}\`\n\n`; break; - case 'Write': - case 'Edit': - case 'MultiEdit': - const writeFilePath = input.file_path || input.path || ''; - const writeRelativePath = writeFilePath.replace(/^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, ''); + case "Write": + case "Edit": + case "MultiEdit": + const writeFilePath = input.file_path || input.path || ""; + const writeRelativePath = writeFilePath.replace( + /^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, + "" + ); markdown += `${statusIcon} Write \`${writeRelativePath}\`\n\n`; break; - case 'Grep': - case 'Glob': - const query = input.query || input.pattern || ''; + case "Grep": + case "Glob": + const query = input.query || input.pattern || ""; markdown += `${statusIcon} Search for \`${truncateString(query, 80)}\`\n\n`; break; - case 'LS': - const lsPath = input.path || ''; - const lsRelativePath = lsPath.replace(/^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, ''); + case "LS": + const lsPath = input.path || ""; + const lsRelativePath = lsPath.replace( + /^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, + "" + ); markdown += `${statusIcon} LS: ${lsRelativePath || lsPath}\n\n`; break; default: // Handle MCP calls and other tools - if (toolName.startsWith('mcp__')) { + if (toolName.startsWith("mcp__")) { const mcpName = formatMcpName(toolName); const params = formatMcpParameters(input); markdown += `${statusIcon} ${mcpName}(${params})\n\n`; @@ -1113,8 +1435,11 @@ jobs: const keys = Object.keys(input); if (keys.length > 0) { // Try to find the most important parameter - const mainParam = keys.find(k => ['query', 'command', 'path', 'file_path', 'content'].includes(k)) || keys[0]; - const value = String(input[mainParam] || ''); + const mainParam = + keys.find(k => + ["query", "command", "path", "file_path", "content"].includes(k) + ) || keys[0]; + const value = String(input[mainParam] || ""); if (value) { markdown += `${statusIcon} ${toolName}: ${truncateString(value, 100)}\n\n`; } else { @@ -1129,11 +1454,11 @@ jobs: } function formatMcpName(toolName) { // Convert mcp__github__search_issues to github::search_issues - if (toolName.startsWith('mcp__')) { - const parts = toolName.split('__'); + if (toolName.startsWith("mcp__")) { + const parts = toolName.split("__"); if (parts.length >= 3) { const provider = parts[1]; // github, etc. - const method = parts.slice(2).join('_'); // search_issues, etc. + const method = parts.slice(2).join("_"); // search_issues, etc. return `${provider}::${method}`; } } @@ -1141,44 +1466,50 @@ jobs: } function formatMcpParameters(input) { const keys = Object.keys(input); - if (keys.length === 0) return ''; + if (keys.length === 0) return ""; const paramStrs = []; - for (const key of keys.slice(0, 4)) { // Show up to 4 parameters - const value = String(input[key] || ''); + for (const key of keys.slice(0, 4)) { + // Show up to 4 parameters + const value = String(input[key] || ""); paramStrs.push(`${key}: ${truncateString(value, 40)}`); } if (keys.length > 4) { - paramStrs.push('...'); + paramStrs.push("..."); } - return paramStrs.join(', '); + return paramStrs.join(", "); } function formatBashCommand(command) { - if (!command) return ''; + if (!command) return ""; // Convert multi-line commands to single line by replacing newlines with spaces // and collapsing multiple spaces let formatted = command - .replace(/\n/g, ' ') // Replace newlines with spaces - .replace(/\r/g, ' ') // Replace carriage returns with spaces - .replace(/\t/g, ' ') // Replace tabs with spaces - .replace(/\s+/g, ' ') // Collapse multiple spaces into one - .trim(); // Remove leading/trailing whitespace + .replace(/\n/g, " ") // Replace newlines with spaces + .replace(/\r/g, " ") // Replace carriage returns with spaces + .replace(/\t/g, " ") // Replace tabs with spaces + .replace(/\s+/g, " ") // Collapse multiple spaces into one + .trim(); // Remove leading/trailing whitespace // Escape backticks to prevent markdown issues - formatted = formatted.replace(/`/g, '\\`'); + formatted = formatted.replace(/`/g, "\\`"); // Truncate if too long (keep reasonable length for summary) const maxLength = 80; if (formatted.length > maxLength) { - formatted = formatted.substring(0, maxLength) + '...'; + formatted = formatted.substring(0, maxLength) + "..."; } return formatted; } function truncateString(str, maxLength) { - if (!str) return ''; + if (!str) return ""; if (str.length <= maxLength) return str; - return str.substring(0, maxLength) + '...'; + return str.substring(0, maxLength) + "..."; } // Export for testing - if (typeof module !== 'undefined' && module.exports) { - module.exports = { parseClaudeLog, formatToolUse, formatBashCommand, truncateString }; + if (typeof module !== "undefined" && module.exports) { + module.exports = { + parseClaudeLog, + formatToolUse, + formatBashCommand, + truncateString, + }; } main(); - name: Upload agent logs @@ -1213,30 +1544,35 @@ jobs: // Read the validated output content from environment variable const outputContent = process.env.GITHUB_AW_AGENT_OUTPUT; if (!outputContent) { - console.log('No GITHUB_AW_AGENT_OUTPUT environment variable found'); + console.log("No GITHUB_AW_AGENT_OUTPUT environment variable found"); return; } - if (outputContent.trim() === '') { - console.log('Agent output content is empty'); + if (outputContent.trim() === "") { + console.log("Agent output content is empty"); return; } - console.log('Agent output content length:', outputContent.length); + console.log("Agent output content length:", outputContent.length); // Parse the validated output JSON let validatedOutput; try { validatedOutput = JSON.parse(outputContent); } catch (error) { - console.log('Error parsing agent output JSON:', error instanceof Error ? error.message : String(error)); + console.log( + "Error parsing agent output JSON:", + error instanceof Error ? error.message : String(error) + ); return; } if (!validatedOutput.items || !Array.isArray(validatedOutput.items)) { - console.log('No valid items found in agent output'); + console.log("No valid items found in agent output"); return; } // Find all add-issue-comment items - const commentItems = validatedOutput.items.filter(/** @param {any} item */ item => item.type === 'add-issue-comment'); + const commentItems = validatedOutput.items.filter( + /** @param {any} item */ item => item.type === "add-issue-comment" + ); if (commentItems.length === 0) { - console.log('No add-issue-comment items found in agent output'); + console.log("No add-issue-comment items found in agent output"); return; } console.log(`Found ${commentItems.length} add-issue-comment item(s)`); @@ -1244,18 +1580,27 @@ jobs: const commentTarget = process.env.GITHUB_AW_COMMENT_TARGET || "triggering"; console.log(`Comment target configuration: ${commentTarget}`); // Check if we're in an issue or pull request context - const isIssueContext = context.eventName === 'issues' || context.eventName === 'issue_comment'; - const isPRContext = context.eventName === 'pull_request' || context.eventName === 'pull_request_review' || context.eventName === 'pull_request_review_comment'; + const isIssueContext = + context.eventName === "issues" || context.eventName === "issue_comment"; + const isPRContext = + context.eventName === "pull_request" || + context.eventName === "pull_request_review" || + context.eventName === "pull_request_review_comment"; // Validate context based on target configuration if (commentTarget === "triggering" && !isIssueContext && !isPRContext) { - console.log('Target is "triggering" but not running in issue or pull request context, skipping comment creation'); + console.log( + 'Target is "triggering" but not running in issue or pull request context, skipping comment creation' + ); return; } const createdComments = []; // Process each comment item for (let i = 0; i < commentItems.length; i++) { const commentItem = commentItems[i]; - console.log(`Processing add-issue-comment item ${i + 1}/${commentItems.length}:`, { bodyLength: commentItem.body.length }); + console.log( + `Processing add-issue-comment item ${i + 1}/${commentItems.length}:`, + { bodyLength: commentItem.body.length } + ); // Determine the issue/PR number and comment endpoint for this comment let issueNumber; let commentEndpoint; @@ -1264,79 +1609,90 @@ jobs: if (commentItem.issue_number) { issueNumber = parseInt(commentItem.issue_number, 10); if (isNaN(issueNumber) || issueNumber <= 0) { - console.log(`Invalid issue number specified: ${commentItem.issue_number}`); + console.log( + `Invalid issue number specified: ${commentItem.issue_number}` + ); continue; } - commentEndpoint = 'issues'; + commentEndpoint = "issues"; } else { - console.log('Target is "*" but no issue_number specified in comment item'); + console.log( + 'Target is "*" but no issue_number specified in comment item' + ); continue; } } else if (commentTarget && commentTarget !== "triggering") { // Explicit issue number specified in target issueNumber = parseInt(commentTarget, 10); if (isNaN(issueNumber) || issueNumber <= 0) { - console.log(`Invalid issue number in target configuration: ${commentTarget}`); + console.log( + `Invalid issue number in target configuration: ${commentTarget}` + ); continue; } - commentEndpoint = 'issues'; + commentEndpoint = "issues"; } else { // Default behavior: use triggering issue/PR if (isIssueContext) { if (context.payload.issue) { issueNumber = context.payload.issue.number; - commentEndpoint = 'issues'; + commentEndpoint = "issues"; } else { - console.log('Issue context detected but no issue found in payload'); + console.log("Issue context detected but no issue found in payload"); continue; } } else if (isPRContext) { if (context.payload.pull_request) { issueNumber = context.payload.pull_request.number; - commentEndpoint = 'issues'; // PR comments use the issues API endpoint + commentEndpoint = "issues"; // PR comments use the issues API endpoint } else { - console.log('Pull request context detected but no pull request found in payload'); + console.log( + "Pull request context detected but no pull request found in payload" + ); continue; } } } if (!issueNumber) { - console.log('Could not determine issue or pull request number'); + console.log("Could not determine issue or pull request number"); continue; } // Extract body from the JSON item let body = commentItem.body.trim(); // Add AI disclaimer with run id, run htmlurl const runId = context.runId; - const runUrl = context.payload.repository + const runUrl = context.payload.repository ? `${context.payload.repository.html_url}/actions/runs/${runId}` - : `https://github.com/actions/runs/${runId}`; + : `https://github.com/actions/runs/${runId}`; body += `\n\n> Generated by Agentic Workflow Run [${runId}](${runUrl})\n`; console.log(`Creating comment on ${commentEndpoint} #${issueNumber}`); - console.log('Comment content length:', body.length); + console.log("Comment content length:", body.length); try { // Create the comment using GitHub API const { data: comment } = await github.rest.issues.createComment({ owner: context.repo.owner, repo: context.repo.repo, issue_number: issueNumber, - body: body + body: body, }); - console.log('Created comment #' + comment.id + ': ' + comment.html_url); + console.log("Created comment #" + comment.id + ": " + comment.html_url); createdComments.push(comment); // Set output for the last created comment (for backward compatibility) if (i === commentItems.length - 1) { - core.setOutput('comment_id', comment.id); - core.setOutput('comment_url', comment.html_url); + core.setOutput("comment_id", comment.id); + core.setOutput("comment_url", comment.html_url); } } catch (error) { - console.error(`✗ Failed to create comment:`, error instanceof Error ? error.message : String(error)); + console.error( + `✗ Failed to create comment:`, + error instanceof Error ? error.message : String(error) + ); throw error; } } // Write summary for all created comments if (createdComments.length > 0) { - let summaryContent = '\n\n## GitHub Comments\n'; + let summaryContent = "\n\n## GitHub Comments\n"; for (const comment of createdComments) { summaryContent += `- Comment #${comment.id}: [View Comment](${comment.html_url})\n`; } diff --git a/.github/workflows/test-claude-add-issue-labels.lock.yml b/.github/workflows/test-claude-add-issue-labels.lock.yml index 1c32c25bef..c2d7f089a9 100644 --- a/.github/workflows/test-claude-add-issue-labels.lock.yml +++ b/.github/workflows/test-claude-add-issue-labels.lock.yml @@ -34,21 +34,32 @@ jobs: with: script: | async function main() { - // Read inputs from environment variables - const reaction = process.env.GITHUB_AW_REACTION || 'eyes'; + // Read inputs from environment variables + const reaction = process.env.GITHUB_AW_REACTION || "eyes"; const alias = process.env.GITHUB_AW_ALIAS; // Only present for alias workflows const runId = context.runId; - const runUrl = context.payload.repository + const runUrl = context.payload.repository ? `${context.payload.repository.html_url}/actions/runs/${runId}` : `https://github.com/${context.repo.owner}/${context.repo.repo}/actions/runs/${runId}`; - console.log('Reaction type:', reaction); - console.log('Alias name:', alias || 'none'); - console.log('Run ID:', runId); - console.log('Run URL:', runUrl); + console.log("Reaction type:", reaction); + console.log("Alias name:", alias || "none"); + console.log("Run ID:", runId); + console.log("Run URL:", runUrl); // Validate reaction type - const validReactions = ['+1', '-1', 'laugh', 'confused', 'heart', 'hooray', 'rocket', 'eyes']; + const validReactions = [ + "+1", + "-1", + "laugh", + "confused", + "heart", + "hooray", + "rocket", + "eyes", + ]; if (!validReactions.includes(reaction)) { - core.setFailed(`Invalid reaction type: ${reaction}. Valid reactions are: ${validReactions.join(', ')}`); + core.setFailed( + `Invalid reaction type: ${reaction}. Valid reactions are: ${validReactions.join(", ")}` + ); return; } // Determine the API endpoint based on the event type @@ -60,20 +71,20 @@ jobs: const repo = context.repo.repo; try { switch (eventName) { - case 'issues': + case "issues": const issueNumber = context.payload?.issue?.number; if (!issueNumber) { - core.setFailed('Issue number not found in event payload'); + core.setFailed("Issue number not found in event payload"); return; } reactionEndpoint = `/repos/${owner}/${repo}/issues/${issueNumber}/reactions`; // Don't edit issue bodies for now - this might be more complex shouldEditComment = false; break; - case 'issue_comment': + case "issue_comment": const commentId = context.payload?.comment?.id; if (!commentId) { - core.setFailed('Comment ID not found in event payload'); + core.setFailed("Comment ID not found in event payload"); return; } reactionEndpoint = `/repos/${owner}/${repo}/issues/comments/${commentId}/reactions`; @@ -81,10 +92,10 @@ jobs: // Only edit comments for alias workflows shouldEditComment = alias ? true : false; break; - case 'pull_request': + case "pull_request": const prNumber = context.payload?.pull_request?.number; if (!prNumber) { - core.setFailed('Pull request number not found in event payload'); + core.setFailed("Pull request number not found in event payload"); return; } // PRs are "issues" for the reactions endpoint @@ -92,10 +103,10 @@ jobs: // Don't edit PR bodies for now - this might be more complex shouldEditComment = false; break; - case 'pull_request_review_comment': + case "pull_request_review_comment": const reviewCommentId = context.payload?.comment?.id; if (!reviewCommentId) { - core.setFailed('Review comment ID not found in event payload'); + core.setFailed("Review comment ID not found in event payload"); return; } reactionEndpoint = `/repos/${owner}/${repo}/pulls/comments/${reviewCommentId}/reactions`; @@ -107,24 +118,28 @@ jobs: core.setFailed(`Unsupported event type: ${eventName}`); return; } - console.log('Reaction API endpoint:', reactionEndpoint); + console.log("Reaction API endpoint:", reactionEndpoint); // Add reaction first await addReaction(reactionEndpoint, reaction); // Then edit comment if applicable and if it's a comment event if (shouldEditComment && commentUpdateEndpoint) { - console.log('Comment update endpoint:', commentUpdateEndpoint); + console.log("Comment update endpoint:", commentUpdateEndpoint); await editCommentWithWorkflowLink(commentUpdateEndpoint, runUrl); } else { if (!alias && commentUpdateEndpoint) { - console.log('Skipping comment edit - only available for alias workflows'); + console.log( + "Skipping comment edit - only available for alias workflows" + ); } else { - console.log('Skipping comment edit for event type:', eventName); + console.log("Skipping comment edit for event type:", eventName); } } } catch (error) { const errorMessage = error instanceof Error ? error.message : String(error); - console.error('Failed to process reaction and comment edit:', errorMessage); - core.setFailed(`Failed to process reaction and comment edit: ${errorMessage}`); + console.error("Failed to process reaction and comment edit:", errorMessage); + core.setFailed( + `Failed to process reaction and comment edit: ${errorMessage}` + ); } } /** @@ -133,19 +148,19 @@ jobs: * @param {string} reaction - The reaction type to add */ async function addReaction(endpoint, reaction) { - const response = await github.request('POST ' + endpoint, { + const response = await github.request("POST " + endpoint, { content: reaction, headers: { - 'Accept': 'application/vnd.github+json' - } + Accept: "application/vnd.github+json", + }, }); const reactionId = response.data?.id; if (reactionId) { console.log(`Successfully added reaction: ${reaction} (id: ${reactionId})`); - core.setOutput('reaction-id', reactionId.toString()); + core.setOutput("reaction-id", reactionId.toString()); } else { console.log(`Successfully added reaction: ${reaction}`); - core.setOutput('reaction-id', ''); + core.setOutput("reaction-id", ""); } } /** @@ -156,33 +171,37 @@ jobs: async function editCommentWithWorkflowLink(endpoint, runUrl) { try { // First, get the current comment content - const getResponse = await github.request('GET ' + endpoint, { + const getResponse = await github.request("GET " + endpoint, { headers: { - 'Accept': 'application/vnd.github+json' - } + Accept: "application/vnd.github+json", + }, }); - const originalBody = getResponse.data.body || ''; + const originalBody = getResponse.data.body || ""; const workflowLinkText = `\n\n---\n*🤖 [Workflow run](${runUrl}) triggered by this comment*`; // Check if we've already added a workflow link to avoid duplicates - if (originalBody.includes('*🤖 [Workflow run](')) { - console.log('Comment already contains a workflow run link, skipping edit'); + if (originalBody.includes("*🤖 [Workflow run](")) { + console.log( + "Comment already contains a workflow run link, skipping edit" + ); return; } const updatedBody = originalBody + workflowLinkText; // Update the comment - const updateResponse = await github.request('PATCH ' + endpoint, { + const updateResponse = await github.request("PATCH " + endpoint, { body: updatedBody, headers: { - 'Accept': 'application/vnd.github+json' - } + Accept: "application/vnd.github+json", + }, }); console.log(`Successfully updated comment with workflow link`); console.log(`Comment ID: ${updateResponse.data.id}`); } catch (error) { // Don't fail the entire job if comment editing fails - just log it const errorMessage = error instanceof Error ? error.message : String(error); - console.warn('Failed to edit comment with workflow link:', errorMessage); - console.warn('This is not critical - the reaction was still added successfully'); + console.warn("Failed to edit comment with workflow link:", errorMessage); + console.warn( + "This is not critical - the reaction was still added successfully" + ); } } await main(); @@ -308,23 +327,23 @@ jobs: with: script: | function main() { - const fs = require('fs'); - const crypto = require('crypto'); + const fs = require("fs"); + const crypto = require("crypto"); // Generate a random filename for the output file - const randomId = crypto.randomBytes(8).toString('hex'); + const randomId = crypto.randomBytes(8).toString("hex"); const outputFile = `/tmp/aw_output_${randomId}.txt`; // Ensure the /tmp directory exists and create empty output file - fs.mkdirSync('/tmp', { recursive: true }); - fs.writeFileSync(outputFile, '', { mode: 0o644 }); + fs.mkdirSync("/tmp", { recursive: true }); + fs.writeFileSync(outputFile, "", { mode: 0o644 }); // Verify the file was created and is writable if (!fs.existsSync(outputFile)) { throw new Error(`Failed to create output file: ${outputFile}`); } // Set the environment variable for subsequent steps - core.exportVariable('GITHUB_AW_SAFE_OUTPUTS', outputFile); - console.log('Created agentic output file:', outputFile); + core.exportVariable("GITHUB_AW_SAFE_OUTPUTS", outputFile); + console.log("Created agentic output file:", outputFile); // Also set as step output for reference - core.setOutput('output_file', outputFile); + core.setOutput("output_file", outputFile); } main(); - name: Setup MCPs @@ -547,34 +566,37 @@ jobs: * @returns {string} The sanitized content */ function sanitizeContent(content) { - if (!content || typeof content !== 'string') { - return ''; + if (!content || typeof content !== "string") { + return ""; } // Read allowed domains from environment variable const allowedDomainsEnv = process.env.GITHUB_AW_ALLOWED_DOMAINS; const defaultAllowedDomains = [ - 'github.com', - 'github.io', - 'githubusercontent.com', - 'githubassets.com', - 'github.dev', - 'codespaces.new' + "github.com", + "github.io", + "githubusercontent.com", + "githubassets.com", + "github.dev", + "codespaces.new", ]; const allowedDomains = allowedDomainsEnv - ? allowedDomainsEnv.split(',').map(d => d.trim()).filter(d => d) + ? allowedDomainsEnv + .split(",") + .map(d => d.trim()) + .filter(d => d) : defaultAllowedDomains; let sanitized = content; // Neutralize @mentions to prevent unintended notifications sanitized = neutralizeMentions(sanitized); // Remove control characters (except newlines and tabs) - sanitized = sanitized.replace(/[\x00-\x08\x0B\x0C\x0E-\x1F\x7F]/g, ''); + sanitized = sanitized.replace(/[\x00-\x08\x0B\x0C\x0E-\x1F\x7F]/g, ""); // XML character escaping sanitized = sanitized - .replace(/&/g, '&') // Must be first to avoid double-escaping - .replace(//g, '>') - .replace(/"/g, '"') - .replace(/'/g, '''); + .replace(/&/g, "&") // Must be first to avoid double-escaping + .replace(//g, ">") + .replace(/"/g, """) + .replace(/'/g, "'"); // URI filtering - replace non-https protocols with "(redacted)" sanitized = sanitizeUrlProtocols(sanitized); // Domain filtering for HTTPS URIs @@ -582,16 +604,20 @@ jobs: // Limit total length to prevent DoS (0.5MB max) const maxLength = 524288; if (sanitized.length > maxLength) { - sanitized = sanitized.substring(0, maxLength) + '\n[Content truncated due to length]'; + sanitized = + sanitized.substring(0, maxLength) + + "\n[Content truncated due to length]"; } // Limit number of lines to prevent log flooding (65k max) - const lines = sanitized.split('\n'); + const lines = sanitized.split("\n"); const maxLines = 65000; if (lines.length > maxLines) { - sanitized = lines.slice(0, maxLines).join('\n') + '\n[Content truncated due to line count]'; + sanitized = + lines.slice(0, maxLines).join("\n") + + "\n[Content truncated due to line count]"; } // Remove ANSI escape sequences - sanitized = sanitized.replace(/\x1b\[[0-9;]*[mGKH]/g, ''); + sanitized = sanitized.replace(/\x1b\[[0-9;]*[mGKH]/g, ""); // Neutralize common bot trigger phrases sanitized = neutralizeBotTriggers(sanitized); // Trim excessive whitespace @@ -602,16 +628,22 @@ jobs: * @returns {string} The string with unknown domains redacted */ function sanitizeUrlDomains(s) { - return s.replace(/\bhttps:\/\/([^\/\s\])}'"<>&\x00-\x1f]+)/gi, (match, domain) => { - // Extract the hostname part (before first slash, colon, or other delimiter) - const hostname = domain.split(/[\/:\?#]/)[0].toLowerCase(); - // Check if this domain or any parent domain is in the allowlist - const isAllowed = allowedDomains.some(allowedDomain => { - const normalizedAllowed = allowedDomain.toLowerCase(); - return hostname === normalizedAllowed || hostname.endsWith('.' + normalizedAllowed); - }); - return isAllowed ? match : '(redacted)'; - }); + return s.replace( + /\bhttps:\/\/([^\/\s\])}'"<>&\x00-\x1f]+)/gi, + (match, domain) => { + // Extract the hostname part (before first slash, colon, or other delimiter) + const hostname = domain.split(/[\/:\?#]/)[0].toLowerCase(); + // Check if this domain or any parent domain is in the allowlist + const isAllowed = allowedDomains.some(allowedDomain => { + const normalizedAllowed = allowedDomain.toLowerCase(); + return ( + hostname === normalizedAllowed || + hostname.endsWith("." + normalizedAllowed) + ); + }); + return isAllowed ? match : "(redacted)"; + } + ); } /** * Remove unknown protocols except https @@ -620,10 +652,13 @@ jobs: */ function sanitizeUrlProtocols(s) { // Match both protocol:// and protocol: patterns - return s.replace(/\b(\w+):(?:\/\/)?[^\s\])}'"<>&\x00-\x1f]+/gi, (match, protocol) => { - // Allow https (case insensitive), redact everything else - return protocol.toLowerCase() === 'https' ? match : '(redacted)'; - }); + return s.replace( + /\b(\w+):(?:\/\/)?[^\s\])}'"<>&\x00-\x1f]+/gi, + (match, protocol) => { + // Allow https (case insensitive), redact everything else + return protocol.toLowerCase() === "https" ? match : "(redacted)"; + } + ); } /** * Neutralizes @mentions by wrapping them in backticks @@ -632,8 +667,10 @@ jobs: */ function neutralizeMentions(s) { // Replace @name or @org/team outside code with `@name` - return s.replace(/(^|[^\w`])@([A-Za-z0-9](?:[A-Za-z0-9-]{0,37}[A-Za-z0-9])?(?:\/[A-Za-z0-9._-]+)?)/g, - (_m, p1, p2) => `${p1}\`@${p2}\``); + return s.replace( + /(^|[^\w`])@([A-Za-z0-9](?:[A-Za-z0-9-]{0,37}[A-Za-z0-9])?(?:\/[A-Za-z0-9._-]+)?)/g, + (_m, p1, p2) => `${p1}\`@${p2}\`` + ); } /** * Neutralizes bot trigger phrases by wrapping them in backticks @@ -642,8 +679,10 @@ jobs: */ function neutralizeBotTriggers(s) { // Neutralize common bot trigger phrases like "fixes #123", "closes #asdfs", etc. - return s.replace(/\b(fixes?|closes?|resolves?|fix|close|resolve)\s+#(\w+)/gi, - (match, action, ref) => `\`${action} #${ref}\``); + return s.replace( + /\b(fixes?|closes?|resolves?|fix|close|resolve)\s+#(\w+)/gi, + (match, action, ref) => `\`${action} #${ref}\`` + ); } } /** @@ -654,65 +693,165 @@ jobs: */ function getMaxAllowedForType(itemType, config) { // Check if max is explicitly specified in config - if (config && config[itemType] && typeof config[itemType] === 'object' && config[itemType].max) { + if ( + config && + config[itemType] && + typeof config[itemType] === "object" && + config[itemType].max + ) { return config[itemType].max; } // Use default limits for plural-supported types switch (itemType) { - case 'create-issue': + case "create-issue": return 1; // Only one issue allowed - case 'add-issue-comment': + case "add-issue-comment": return 1; // Only one comment allowed - case 'create-pull-request': - return 1; // Only one pull request allowed - case 'add-issue-label': - return 5; // Only one labels operation allowed - case 'update-issue': - return 1; // Only one issue update allowed - case 'push-to-branch': - return 1; // Only one push to branch allowed + case "create-pull-request": + return 1; // Only one pull request allowed + case "create-pull-request-review-comment": + return 10; // Default to 10 review comments allowed + case "add-issue-label": + return 5; // Only one labels operation allowed + case "update-issue": + return 1; // Only one issue update allowed + case "push-to-branch": + return 1; // Only one push to branch allowed + case "create-discussion": + return 1; // Only one discussion allowed default: - return 1; // Default to single item for unknown types + return 1; // Default to single item for unknown types + } + } + /** + * Attempts to repair common JSON syntax issues in LLM-generated content + * @param {string} jsonStr - The potentially malformed JSON string + * @returns {string} The repaired JSON string + */ + function repairJson(jsonStr) { + let repaired = jsonStr.trim(); + // Fix single quotes to double quotes (must be done first) + repaired = repaired.replace(/'/g, '"'); + // Fix missing quotes around object keys + repaired = repaired.replace( + /([{,]\s*)([a-zA-Z_$][a-zA-Z0-9_$]*)\s*:/g, + '$1"$2":' + ); + // Fix newlines and tabs inside strings by escaping them + repaired = repaired.replace(/"([^"\\]*)"/g, (match, content) => { + if ( + content.includes("\n") || + content.includes("\r") || + content.includes("\t") + ) { + const escaped = content + .replace(/\\/g, "\\\\") + .replace(/\n/g, "\\n") + .replace(/\r/g, "\\r") + .replace(/\t/g, "\\t"); + return `"${escaped}"`; + } + return match; + }); + // Fix unescaped quotes inside string values + repaired = repaired.replace( + /"([^"]*)"([^":,}\]]*)"([^"]*)"(\s*[,:}\]])/g, + (match, p1, p2, p3, p4) => `"${p1}\\"${p2}\\"${p3}"${p4}` + ); + // Fix wrong bracket/brace types - arrays should end with ] not } + repaired = repaired.replace( + /(\[\s*(?:"[^"]*"(?:\s*,\s*"[^"]*")*\s*),?)\s*}/g, + "$1]" + ); + // Fix missing closing braces/brackets + const openBraces = (repaired.match(/\{/g) || []).length; + const closeBraces = (repaired.match(/\}/g) || []).length; + if (openBraces > closeBraces) { + repaired += "}".repeat(openBraces - closeBraces); + } else if (closeBraces > openBraces) { + repaired = "{".repeat(closeBraces - openBraces) + repaired; + } + // Fix missing closing brackets for arrays + const openBrackets = (repaired.match(/\[/g) || []).length; + const closeBrackets = (repaired.match(/\]/g) || []).length; + if (openBrackets > closeBrackets) { + repaired += "]".repeat(openBrackets - closeBrackets); + } else if (closeBrackets > openBrackets) { + repaired = "[".repeat(closeBrackets - openBrackets) + repaired; + } + // Fix trailing commas in objects and arrays (AFTER fixing brackets/braces) + repaired = repaired.replace(/,(\s*[}\]])/g, "$1"); + return repaired; + } + /** + * Attempts to parse JSON with repair fallback + * @param {string} jsonStr - The JSON string to parse + * @returns {Object|undefined} The parsed JSON object, or undefined if parsing fails + */ + function parseJsonWithRepair(jsonStr) { + try { + // First, try normal JSON.parse + return JSON.parse(jsonStr); + } catch (originalError) { + try { + // If that fails, try repairing and parsing again + const repairedJson = repairJson(jsonStr); + return JSON.parse(repairedJson); + } catch (repairError) { + // If repair also fails, print error to console and return undefined + console.log( + `JSON parsing failed. Original: ${originalError.message}. After repair: ${repairError.message}` + ); + return undefined; + } } } const outputFile = process.env.GITHUB_AW_SAFE_OUTPUTS; const safeOutputsConfig = process.env.GITHUB_AW_SAFE_OUTPUTS_CONFIG; if (!outputFile) { - console.log('GITHUB_AW_SAFE_OUTPUTS not set, no output to collect'); - core.setOutput('output', ''); + console.log("GITHUB_AW_SAFE_OUTPUTS not set, no output to collect"); + core.setOutput("output", ""); return; } if (!fs.existsSync(outputFile)) { - console.log('Output file does not exist:', outputFile); - core.setOutput('output', ''); + console.log("Output file does not exist:", outputFile); + core.setOutput("output", ""); return; } - const outputContent = fs.readFileSync(outputFile, 'utf8'); - if (outputContent.trim() === '') { - console.log('Output file is empty'); - core.setOutput('output', ''); + const outputContent = fs.readFileSync(outputFile, "utf8"); + if (outputContent.trim() === "") { + console.log("Output file is empty"); + core.setOutput("output", ""); return; } - console.log('Raw output content length:', outputContent.length); + console.log("Raw output content length:", outputContent.length); // Parse the safe-outputs configuration let expectedOutputTypes = {}; if (safeOutputsConfig) { try { expectedOutputTypes = JSON.parse(safeOutputsConfig); - console.log('Expected output types:', Object.keys(expectedOutputTypes)); + console.log("Expected output types:", Object.keys(expectedOutputTypes)); } catch (error) { - console.log('Warning: Could not parse safe-outputs config:', error.message); + console.log( + "Warning: Could not parse safe-outputs config:", + error.message + ); } } // Parse JSONL content - const lines = outputContent.trim().split('\n'); + const lines = outputContent.trim().split("\n"); const parsedItems = []; const errors = []; for (let i = 0; i < lines.length; i++) { const line = lines[i].trim(); - if (line === '') continue; // Skip empty lines + if (line === "") continue; // Skip empty lines try { - const item = JSON.parse(line); + const item = parseJsonWithRepair(line); + // If item is undefined (failed to parse), add error and process next line + if (item === undefined) { + errors.push(`Line ${i + 1}: Invalid JSON - JSON parsing failed`); + continue; + } // Validate that the item has a 'type' field if (!item.type) { errors.push(`Line ${i + 1}: Missing required 'type' field`); @@ -721,25 +860,35 @@ jobs: // Validate against expected output types const itemType = item.type; if (!expectedOutputTypes[itemType]) { - errors.push(`Line ${i + 1}: Unexpected output type '${itemType}'. Expected one of: ${Object.keys(expectedOutputTypes).join(', ')}`); + errors.push( + `Line ${i + 1}: Unexpected output type '${itemType}'. Expected one of: ${Object.keys(expectedOutputTypes).join(", ")}` + ); continue; } // Check for too many items of the same type - const typeCount = parsedItems.filter(existing => existing.type === itemType).length; + const typeCount = parsedItems.filter( + existing => existing.type === itemType + ).length; const maxAllowed = getMaxAllowedForType(itemType, expectedOutputTypes); if (typeCount >= maxAllowed) { - errors.push(`Line ${i + 1}: Too many items of type '${itemType}'. Maximum allowed: ${maxAllowed}.`); + errors.push( + `Line ${i + 1}: Too many items of type '${itemType}'. Maximum allowed: ${maxAllowed}.` + ); continue; } // Basic validation based on type switch (itemType) { - case 'create-issue': - if (!item.title || typeof item.title !== 'string') { - errors.push(`Line ${i + 1}: create-issue requires a 'title' string field`); + case "create-issue": + if (!item.title || typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: create-issue requires a 'title' string field` + ); continue; } - if (!item.body || typeof item.body !== 'string') { - errors.push(`Line ${i + 1}: create-issue requires a 'body' string field`); + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-issue requires a 'body' string field` + ); continue; } // Sanitize text content @@ -747,107 +896,247 @@ jobs: item.body = sanitizeContent(item.body); // Sanitize labels if present if (item.labels && Array.isArray(item.labels)) { - item.labels = item.labels.map(label => typeof label === 'string' ? sanitizeContent(label) : label); + item.labels = item.labels.map(label => + typeof label === "string" ? sanitizeContent(label) : label + ); } break; - case 'add-issue-comment': - if (!item.body || typeof item.body !== 'string') { - errors.push(`Line ${i + 1}: add-issue-comment requires a 'body' string field`); + case "add-issue-comment": + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: add-issue-comment requires a 'body' string field` + ); continue; } // Sanitize text content item.body = sanitizeContent(item.body); break; - case 'create-pull-request': - if (!item.title || typeof item.title !== 'string') { - errors.push(`Line ${i + 1}: create-pull-request requires a 'title' string field`); + case "create-pull-request": + if (!item.title || typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request requires a 'title' string field` + ); continue; } - if (!item.body || typeof item.body !== 'string') { - errors.push(`Line ${i + 1}: create-pull-request requires a 'body' string field`); + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request requires a 'body' string field` + ); continue; } // Sanitize text content item.title = sanitizeContent(item.title); item.body = sanitizeContent(item.body); // Sanitize branch name if present - if (item.branch && typeof item.branch === 'string') { + if (item.branch && typeof item.branch === "string") { item.branch = sanitizeContent(item.branch); } // Sanitize labels if present if (item.labels && Array.isArray(item.labels)) { - item.labels = item.labels.map(label => typeof label === 'string' ? sanitizeContent(label) : label); + item.labels = item.labels.map(label => + typeof label === "string" ? sanitizeContent(label) : label + ); } break; - case 'add-issue-label': + case "add-issue-label": if (!item.labels || !Array.isArray(item.labels)) { - errors.push(`Line ${i + 1}: add-issue-label requires a 'labels' array field`); + errors.push( + `Line ${i + 1}: add-issue-label requires a 'labels' array field` + ); continue; } - if (item.labels.some(label => typeof label !== 'string')) { - errors.push(`Line ${i + 1}: add-issue-label labels array must contain only strings`); + if (item.labels.some(label => typeof label !== "string")) { + errors.push( + `Line ${i + 1}: add-issue-label labels array must contain only strings` + ); continue; } // Sanitize label strings item.labels = item.labels.map(label => sanitizeContent(label)); break; - case 'update-issue': + case "update-issue": // Check that at least one updateable field is provided - const hasValidField = (item.status !== undefined) || - (item.title !== undefined) || - (item.body !== undefined); + const hasValidField = + item.status !== undefined || + item.title !== undefined || + item.body !== undefined; if (!hasValidField) { - errors.push(`Line ${i + 1}: update-issue requires at least one of: 'status', 'title', or 'body' fields`); + errors.push( + `Line ${i + 1}: update-issue requires at least one of: 'status', 'title', or 'body' fields` + ); continue; } // Validate status if provided if (item.status !== undefined) { - if (typeof item.status !== 'string' || (item.status !== 'open' && item.status !== 'closed')) { - errors.push(`Line ${i + 1}: update-issue 'status' must be 'open' or 'closed'`); + if ( + typeof item.status !== "string" || + (item.status !== "open" && item.status !== "closed") + ) { + errors.push( + `Line ${i + 1}: update-issue 'status' must be 'open' or 'closed'` + ); continue; } } // Validate title if provided if (item.title !== undefined) { - if (typeof item.title !== 'string') { - errors.push(`Line ${i + 1}: update-issue 'title' must be a string`); + if (typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: update-issue 'title' must be a string` + ); continue; } item.title = sanitizeContent(item.title); } // Validate body if provided if (item.body !== undefined) { - if (typeof item.body !== 'string') { - errors.push(`Line ${i + 1}: update-issue 'body' must be a string`); + if (typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: update-issue 'body' must be a string` + ); continue; } item.body = sanitizeContent(item.body); } // Validate issue_number if provided (for target "*") if (item.issue_number !== undefined) { - if (typeof item.issue_number !== 'number' && typeof item.issue_number !== 'string') { - errors.push(`Line ${i + 1}: update-issue 'issue_number' must be a number or string`); + if ( + typeof item.issue_number !== "number" && + typeof item.issue_number !== "string" + ) { + errors.push( + `Line ${i + 1}: update-issue 'issue_number' must be a number or string` + ); continue; } } break; - case 'push-to-branch': + case "push-to-branch": // Validate message if provided (optional) if (item.message !== undefined) { - if (typeof item.message !== 'string') { - errors.push(`Line ${i + 1}: push-to-branch 'message' must be a string`); + if (typeof item.message !== "string") { + errors.push( + `Line ${i + 1}: push-to-branch 'message' must be a string` + ); continue; } item.message = sanitizeContent(item.message); } // Validate pull_request_number if provided (for target "*") if (item.pull_request_number !== undefined) { - if (typeof item.pull_request_number !== 'number' && typeof item.pull_request_number !== 'string') { - errors.push(`Line ${i + 1}: push-to-branch 'pull_request_number' must be a number or string`); + if ( + typeof item.pull_request_number !== "number" && + typeof item.pull_request_number !== "string" + ) { + errors.push( + `Line ${i + 1}: push-to-branch 'pull_request_number' must be a number or string` + ); + continue; + } + } + break; + case "create-pull-request-review-comment": + // Validate required path field + if (!item.path || typeof item.path !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment requires a 'path' string field` + ); + continue; + } + // Validate required line field + if ( + item.line === undefined || + (typeof item.line !== "number" && typeof item.line !== "string") + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment requires a 'line' number or string field` + ); + continue; + } + // Validate line is a positive integer + const lineNumber = + typeof item.line === "string" ? parseInt(item.line, 10) : item.line; + if ( + isNaN(lineNumber) || + lineNumber <= 0 || + !Number.isInteger(lineNumber) + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'line' must be a positive integer` + ); + continue; + } + // Validate required body field + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment requires a 'body' string field` + ); + continue; + } + // Sanitize required text content + item.body = sanitizeContent(item.body); + // Validate optional start_line field + if (item.start_line !== undefined) { + if ( + typeof item.start_line !== "number" && + typeof item.start_line !== "string" + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'start_line' must be a number or string` + ); + continue; + } + const startLineNumber = + typeof item.start_line === "string" + ? parseInt(item.start_line, 10) + : item.start_line; + if ( + isNaN(startLineNumber) || + startLineNumber <= 0 || + !Number.isInteger(startLineNumber) + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'start_line' must be a positive integer` + ); + continue; + } + if (startLineNumber > lineNumber) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'start_line' must be less than or equal to 'line'` + ); + continue; + } + } + // Validate optional side field + if (item.side !== undefined) { + if ( + typeof item.side !== "string" || + (item.side !== "LEFT" && item.side !== "RIGHT") + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'side' must be 'LEFT' or 'RIGHT'` + ); continue; } } break; + case "create-discussion": + if (!item.title || typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: create-discussion requires a 'title' string field` + ); + continue; + } + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-discussion requires a 'body' string field` + ); + continue; + } + // Sanitize text content + item.title = sanitizeContent(item.title); + item.body = sanitizeContent(item.body); + break; default: errors.push(`Line ${i + 1}: Unknown output type '${itemType}'`); continue; @@ -860,7 +1149,7 @@ jobs: } // Report validation results if (errors.length > 0) { - console.log('Validation errors found:'); + console.log("Validation errors found:"); errors.forEach(error => console.log(` - ${error}`)); // For now, we'll continue with valid items but log the errors // In the future, we might want to fail the workflow for invalid items @@ -869,10 +1158,10 @@ jobs: // Set the parsed and validated items as output const validatedOutput = { items: parsedItems, - errors: errors + errors: errors, }; - core.setOutput('output', JSON.stringify(validatedOutput)); - core.setOutput('raw_output', outputContent); + core.setOutput("output", JSON.stringify(validatedOutput)); + core.setOutput("raw_output", outputContent); } // Call the main function await main(); @@ -914,24 +1203,24 @@ jobs: with: script: | function main() { - const fs = require('fs'); + const fs = require("fs"); try { // Get the log file path from environment const logFile = process.env.AGENT_LOG_FILE; if (!logFile) { - console.log('No agent log file specified'); + console.log("No agent log file specified"); return; } if (!fs.existsSync(logFile)) { console.log(`Log file not found: ${logFile}`); return; } - const logContent = fs.readFileSync(logFile, 'utf8'); + const logContent = fs.readFileSync(logFile, "utf8"); const markdown = parseClaudeLog(logContent); // Append to GitHub step summary core.summary.addRaw(markdown).write(); } catch (error) { - console.error('Error parsing Claude log:', error.message); + console.error("Error parsing Claude log:", error.message); core.setFailed(error.message); } } @@ -939,16 +1228,16 @@ jobs: try { const logEntries = JSON.parse(logContent); if (!Array.isArray(logEntries)) { - return '## Agent Log Summary\n\nLog format not recognized as Claude JSON array.\n'; + return "## Agent Log Summary\n\nLog format not recognized as Claude JSON array.\n"; } - let markdown = '## 🤖 Commands and Tools\n\n'; + let markdown = "## 🤖 Commands and Tools\n\n"; const toolUsePairs = new Map(); // Map tool_use_id to tool_result const commandSummary = []; // For the succinct summary // First pass: collect tool results by tool_use_id for (const entry of logEntries) { - if (entry.type === 'user' && entry.message?.content) { + if (entry.type === "user" && entry.message?.content) { for (const content of entry.message.content) { - if (content.type === 'tool_result' && content.tool_use_id) { + if (content.type === "tool_result" && content.tool_use_id) { toolUsePairs.set(content.tool_use_id, content); } } @@ -956,26 +1245,37 @@ jobs: } // Collect all tool uses for summary for (const entry of logEntries) { - if (entry.type === 'assistant' && entry.message?.content) { + if (entry.type === "assistant" && entry.message?.content) { for (const content of entry.message.content) { - if (content.type === 'tool_use') { + if (content.type === "tool_use") { const toolName = content.name; const input = content.input || {}; // Skip internal tools - only show external commands and API calls - if (['Read', 'Write', 'Edit', 'MultiEdit', 'LS', 'Grep', 'Glob', 'TodoWrite'].includes(toolName)) { + if ( + [ + "Read", + "Write", + "Edit", + "MultiEdit", + "LS", + "Grep", + "Glob", + "TodoWrite", + ].includes(toolName) + ) { continue; // Skip internal file operations and searches } // Find the corresponding tool result to get status const toolResult = toolUsePairs.get(content.id); - let statusIcon = '❓'; + let statusIcon = "❓"; if (toolResult) { - statusIcon = toolResult.is_error === true ? '❌' : '✅'; + statusIcon = toolResult.is_error === true ? "❌" : "✅"; } // Add to command summary (only external tools) - if (toolName === 'Bash') { - const formattedCommand = formatBashCommand(input.command || ''); + if (toolName === "Bash") { + const formattedCommand = formatBashCommand(input.command || ""); commandSummary.push(`* ${statusIcon} \`${formattedCommand}\``); - } else if (toolName.startsWith('mcp__')) { + } else if (toolName.startsWith("mcp__")) { const mcpName = formatMcpName(toolName); commandSummary.push(`* ${statusIcon} \`${mcpName}(...)\``); } else { @@ -992,13 +1292,19 @@ jobs: markdown += `${cmd}\n`; } } else { - markdown += 'No commands or tools used.\n'; + markdown += "No commands or tools used.\n"; } // Add Information section from the last entry with result metadata - markdown += '\n## 📊 Information\n\n'; + markdown += "\n## 📊 Information\n\n"; // Find the last entry with metadata const lastEntry = logEntries[logEntries.length - 1]; - if (lastEntry && (lastEntry.num_turns || lastEntry.duration_ms || lastEntry.total_cost_usd || lastEntry.usage)) { + if ( + lastEntry && + (lastEntry.num_turns || + lastEntry.duration_ms || + lastEntry.total_cost_usd || + lastEntry.usage) + ) { if (lastEntry.num_turns) { markdown += `**Turns:** ${lastEntry.num_turns}\n\n`; } @@ -1015,29 +1321,36 @@ jobs: const usage = lastEntry.usage; if (usage.input_tokens || usage.output_tokens) { markdown += `**Token Usage:**\n`; - if (usage.input_tokens) markdown += `- Input: ${usage.input_tokens.toLocaleString()}\n`; - if (usage.cache_creation_input_tokens) markdown += `- Cache Creation: ${usage.cache_creation_input_tokens.toLocaleString()}\n`; - if (usage.cache_read_input_tokens) markdown += `- Cache Read: ${usage.cache_read_input_tokens.toLocaleString()}\n`; - if (usage.output_tokens) markdown += `- Output: ${usage.output_tokens.toLocaleString()}\n`; - markdown += '\n'; + if (usage.input_tokens) + markdown += `- Input: ${usage.input_tokens.toLocaleString()}\n`; + if (usage.cache_creation_input_tokens) + markdown += `- Cache Creation: ${usage.cache_creation_input_tokens.toLocaleString()}\n`; + if (usage.cache_read_input_tokens) + markdown += `- Cache Read: ${usage.cache_read_input_tokens.toLocaleString()}\n`; + if (usage.output_tokens) + markdown += `- Output: ${usage.output_tokens.toLocaleString()}\n`; + markdown += "\n"; } } - if (lastEntry.permission_denials && lastEntry.permission_denials.length > 0) { + if ( + lastEntry.permission_denials && + lastEntry.permission_denials.length > 0 + ) { markdown += `**Permission Denials:** ${lastEntry.permission_denials.length}\n\n`; } } - markdown += '\n## 🤖 Reasoning\n\n'; + markdown += "\n## 🤖 Reasoning\n\n"; // Second pass: process assistant messages in sequence for (const entry of logEntries) { - if (entry.type === 'assistant' && entry.message?.content) { + if (entry.type === "assistant" && entry.message?.content) { for (const content of entry.message.content) { - if (content.type === 'text' && content.text) { + if (content.type === "text" && content.text) { // Add reasoning text directly (no header) const text = content.text.trim(); if (text && text.length > 0) { - markdown += text + '\n\n'; + markdown += text + "\n\n"; } - } else if (content.type === 'tool_use') { + } else if (content.type === "tool_use") { // Process tool use with its result const toolResult = toolUsePairs.get(content.id); const toolMarkdown = formatToolUse(content, toolResult); @@ -1057,22 +1370,22 @@ jobs: const toolName = toolUse.name; const input = toolUse.input || {}; // Skip TodoWrite except the very last one (we'll handle this separately) - if (toolName === 'TodoWrite') { - return ''; // Skip for now, would need global context to find the last one + if (toolName === "TodoWrite") { + return ""; // Skip for now, would need global context to find the last one } // Helper function to determine status icon function getStatusIcon() { if (toolResult) { - return toolResult.is_error === true ? '❌' : '✅'; + return toolResult.is_error === true ? "❌" : "✅"; } - return '❓'; // Unknown by default + return "❓"; // Unknown by default } - let markdown = ''; + let markdown = ""; const statusIcon = getStatusIcon(); switch (toolName) { - case 'Bash': - const command = input.command || ''; - const description = input.description || ''; + case "Bash": + const command = input.command || ""; + const description = input.description || ""; // Format the command to be single line const formattedCommand = formatBashCommand(command); if (description) { @@ -1080,31 +1393,40 @@ jobs: } markdown += `${statusIcon} \`${formattedCommand}\`\n\n`; break; - case 'Read': - const filePath = input.file_path || input.path || ''; - const relativePath = filePath.replace(/^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, ''); // Remove /home/runner/work/repo/repo/ prefix + case "Read": + const filePath = input.file_path || input.path || ""; + const relativePath = filePath.replace( + /^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, + "" + ); // Remove /home/runner/work/repo/repo/ prefix markdown += `${statusIcon} Read \`${relativePath}\`\n\n`; break; - case 'Write': - case 'Edit': - case 'MultiEdit': - const writeFilePath = input.file_path || input.path || ''; - const writeRelativePath = writeFilePath.replace(/^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, ''); + case "Write": + case "Edit": + case "MultiEdit": + const writeFilePath = input.file_path || input.path || ""; + const writeRelativePath = writeFilePath.replace( + /^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, + "" + ); markdown += `${statusIcon} Write \`${writeRelativePath}\`\n\n`; break; - case 'Grep': - case 'Glob': - const query = input.query || input.pattern || ''; + case "Grep": + case "Glob": + const query = input.query || input.pattern || ""; markdown += `${statusIcon} Search for \`${truncateString(query, 80)}\`\n\n`; break; - case 'LS': - const lsPath = input.path || ''; - const lsRelativePath = lsPath.replace(/^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, ''); + case "LS": + const lsPath = input.path || ""; + const lsRelativePath = lsPath.replace( + /^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, + "" + ); markdown += `${statusIcon} LS: ${lsRelativePath || lsPath}\n\n`; break; default: // Handle MCP calls and other tools - if (toolName.startsWith('mcp__')) { + if (toolName.startsWith("mcp__")) { const mcpName = formatMcpName(toolName); const params = formatMcpParameters(input); markdown += `${statusIcon} ${mcpName}(${params})\n\n`; @@ -1113,8 +1435,11 @@ jobs: const keys = Object.keys(input); if (keys.length > 0) { // Try to find the most important parameter - const mainParam = keys.find(k => ['query', 'command', 'path', 'file_path', 'content'].includes(k)) || keys[0]; - const value = String(input[mainParam] || ''); + const mainParam = + keys.find(k => + ["query", "command", "path", "file_path", "content"].includes(k) + ) || keys[0]; + const value = String(input[mainParam] || ""); if (value) { markdown += `${statusIcon} ${toolName}: ${truncateString(value, 100)}\n\n`; } else { @@ -1129,11 +1454,11 @@ jobs: } function formatMcpName(toolName) { // Convert mcp__github__search_issues to github::search_issues - if (toolName.startsWith('mcp__')) { - const parts = toolName.split('__'); + if (toolName.startsWith("mcp__")) { + const parts = toolName.split("__"); if (parts.length >= 3) { const provider = parts[1]; // github, etc. - const method = parts.slice(2).join('_'); // search_issues, etc. + const method = parts.slice(2).join("_"); // search_issues, etc. return `${provider}::${method}`; } } @@ -1141,44 +1466,50 @@ jobs: } function formatMcpParameters(input) { const keys = Object.keys(input); - if (keys.length === 0) return ''; + if (keys.length === 0) return ""; const paramStrs = []; - for (const key of keys.slice(0, 4)) { // Show up to 4 parameters - const value = String(input[key] || ''); + for (const key of keys.slice(0, 4)) { + // Show up to 4 parameters + const value = String(input[key] || ""); paramStrs.push(`${key}: ${truncateString(value, 40)}`); } if (keys.length > 4) { - paramStrs.push('...'); + paramStrs.push("..."); } - return paramStrs.join(', '); + return paramStrs.join(", "); } function formatBashCommand(command) { - if (!command) return ''; + if (!command) return ""; // Convert multi-line commands to single line by replacing newlines with spaces // and collapsing multiple spaces let formatted = command - .replace(/\n/g, ' ') // Replace newlines with spaces - .replace(/\r/g, ' ') // Replace carriage returns with spaces - .replace(/\t/g, ' ') // Replace tabs with spaces - .replace(/\s+/g, ' ') // Collapse multiple spaces into one - .trim(); // Remove leading/trailing whitespace + .replace(/\n/g, " ") // Replace newlines with spaces + .replace(/\r/g, " ") // Replace carriage returns with spaces + .replace(/\t/g, " ") // Replace tabs with spaces + .replace(/\s+/g, " ") // Collapse multiple spaces into one + .trim(); // Remove leading/trailing whitespace // Escape backticks to prevent markdown issues - formatted = formatted.replace(/`/g, '\\`'); + formatted = formatted.replace(/`/g, "\\`"); // Truncate if too long (keep reasonable length for summary) const maxLength = 80; if (formatted.length > maxLength) { - formatted = formatted.substring(0, maxLength) + '...'; + formatted = formatted.substring(0, maxLength) + "..."; } return formatted; } function truncateString(str, maxLength) { - if (!str) return ''; + if (!str) return ""; if (str.length <= maxLength) return str; - return str.substring(0, maxLength) + '...'; + return str.substring(0, maxLength) + "..."; } // Export for testing - if (typeof module !== 'undefined' && module.exports) { - module.exports = { parseClaudeLog, formatToolUse, formatBashCommand, truncateString }; + if (typeof module !== "undefined" && module.exports) { + module.exports = { + parseClaudeLog, + formatToolUse, + formatBashCommand, + truncateString, + }; } main(); - name: Upload agent logs @@ -1214,60 +1545,78 @@ jobs: // Read the validated output content from environment variable const outputContent = process.env.GITHUB_AW_AGENT_OUTPUT; if (!outputContent) { - console.log('No GITHUB_AW_AGENT_OUTPUT environment variable found'); + console.log("No GITHUB_AW_AGENT_OUTPUT environment variable found"); return; } - if (outputContent.trim() === '') { - console.log('Agent output content is empty'); + if (outputContent.trim() === "") { + console.log("Agent output content is empty"); return; } - console.log('Agent output content length:', outputContent.length); + console.log("Agent output content length:", outputContent.length); // Parse the validated output JSON let validatedOutput; try { validatedOutput = JSON.parse(outputContent); } catch (error) { - console.log('Error parsing agent output JSON:', error instanceof Error ? error.message : String(error)); + console.log( + "Error parsing agent output JSON:", + error instanceof Error ? error.message : String(error) + ); return; } if (!validatedOutput.items || !Array.isArray(validatedOutput.items)) { - console.log('No valid items found in agent output'); + console.log("No valid items found in agent output"); return; } // Find the add-issue-label item - const labelsItem = validatedOutput.items.find(/** @param {any} item */ item => item.type === 'add-issue-label'); + const labelsItem = validatedOutput.items.find( + /** @param {any} item */ item => item.type === "add-issue-label" + ); if (!labelsItem) { - console.log('No add-issue-label item found in agent output'); + console.log("No add-issue-label item found in agent output"); return; } - console.log('Found add-issue-label item:', { labelsCount: labelsItem.labels.length }); + console.log("Found add-issue-label item:", { + labelsCount: labelsItem.labels.length, + }); // Read the allowed labels from environment variable (optional) const allowedLabelsEnv = process.env.GITHUB_AW_LABELS_ALLOWED; let allowedLabels = null; - if (allowedLabelsEnv && allowedLabelsEnv.trim() !== '') { - allowedLabels = allowedLabelsEnv.split(',').map(label => label.trim()).filter(label => label); + if (allowedLabelsEnv && allowedLabelsEnv.trim() !== "") { + allowedLabels = allowedLabelsEnv + .split(",") + .map(label => label.trim()) + .filter(label => label); if (allowedLabels.length === 0) { allowedLabels = null; // Treat empty list as no restrictions } } if (allowedLabels) { - console.log('Allowed labels:', allowedLabels); + console.log("Allowed labels:", allowedLabels); } else { - console.log('No label restrictions - any labels are allowed'); + console.log("No label restrictions - any labels are allowed"); } // Read the max limit from environment variable (default: 3) const maxCountEnv = process.env.GITHUB_AW_LABELS_MAX_COUNT; const maxCount = maxCountEnv ? parseInt(maxCountEnv, 10) : 3; if (isNaN(maxCount) || maxCount < 1) { - core.setFailed(`Invalid max value: ${maxCountEnv}. Must be a positive integer`); + core.setFailed( + `Invalid max value: ${maxCountEnv}. Must be a positive integer` + ); return; } - console.log('Max count:', maxCount); + console.log("Max count:", maxCount); // Check if we're in an issue or pull request context - const isIssueContext = context.eventName === 'issues' || context.eventName === 'issue_comment'; - const isPRContext = context.eventName === 'pull_request' || context.eventName === 'pull_request_review' || context.eventName === 'pull_request_review_comment'; + const isIssueContext = + context.eventName === "issues" || context.eventName === "issue_comment"; + const isPRContext = + context.eventName === "pull_request" || + context.eventName === "pull_request_review" || + context.eventName === "pull_request_review_comment"; if (!isIssueContext && !isPRContext) { - core.setFailed('Not running in issue or pull request context, skipping label addition'); + core.setFailed( + "Not running in issue or pull request context, skipping label addition" + ); return; } // Determine the issue/PR number @@ -1276,38 +1625,44 @@ jobs: if (isIssueContext) { if (context.payload.issue) { issueNumber = context.payload.issue.number; - contextType = 'issue'; + contextType = "issue"; } else { - core.setFailed('Issue context detected but no issue found in payload'); + core.setFailed("Issue context detected but no issue found in payload"); return; } } else if (isPRContext) { if (context.payload.pull_request) { issueNumber = context.payload.pull_request.number; - contextType = 'pull request'; + contextType = "pull request"; } else { - core.setFailed('Pull request context detected but no pull request found in payload'); + core.setFailed( + "Pull request context detected but no pull request found in payload" + ); return; } } if (!issueNumber) { - core.setFailed('Could not determine issue or pull request number'); + core.setFailed("Could not determine issue or pull request number"); return; } // Extract labels from the JSON item const requestedLabels = labelsItem.labels || []; - console.log('Requested labels:', requestedLabels); + console.log("Requested labels:", requestedLabels); // Check for label removal attempts (labels starting with '-') for (const label of requestedLabels) { - if (label.startsWith('-')) { - core.setFailed(`Label removal is not permitted. Found line starting with '-': ${label}`); + if (label.startsWith("-")) { + core.setFailed( + `Label removal is not permitted. Found line starting with '-': ${label}` + ); return; } } // Validate that all requested labels are in the allowed list (if restrictions are set) let validLabels; if (allowedLabels) { - validLabels = requestedLabels.filter(/** @param {string} label */ label => allowedLabels.includes(label)); + validLabels = requestedLabels.filter( + /** @param {string} label */ label => allowedLabels.includes(label) + ); } else { // No restrictions, all requested labels are valid validLabels = requestedLabels; @@ -1316,40 +1671,55 @@ jobs: let uniqueLabels = [...new Set(validLabels)]; // Enforce max limit if (uniqueLabels.length > maxCount) { - console.log(`too many labels, keep ${maxCount}`) + console.log(`too many labels, keep ${maxCount}`); uniqueLabels = uniqueLabels.slice(0, maxCount); } if (uniqueLabels.length === 0) { - console.log('No labels to add'); - core.setOutput('labels_added', ''); - await core.summary.addRaw(` + console.log("No labels to add"); + core.setOutput("labels_added", ""); + await core.summary + .addRaw( + ` ## Label Addition No labels were added (no valid labels found in agent output). - `).write(); + ` + ) + .write(); return; } - console.log(`Adding ${uniqueLabels.length} labels to ${contextType} #${issueNumber}:`, uniqueLabels); + console.log( + `Adding ${uniqueLabels.length} labels to ${contextType} #${issueNumber}:`, + uniqueLabels + ); try { // Add labels using GitHub API await github.rest.issues.addLabels({ owner: context.repo.owner, repo: context.repo.repo, issue_number: issueNumber, - labels: uniqueLabels + labels: uniqueLabels, }); - console.log(`Successfully added ${uniqueLabels.length} labels to ${contextType} #${issueNumber}`); + console.log( + `Successfully added ${uniqueLabels.length} labels to ${contextType} #${issueNumber}` + ); // Set output for other jobs to use - core.setOutput('labels_added', uniqueLabels.join('\n')); + core.setOutput("labels_added", uniqueLabels.join("\n")); // Write summary - const labelsListMarkdown = uniqueLabels.map(label => `- \`${label}\``).join('\n'); - await core.summary.addRaw(` + const labelsListMarkdown = uniqueLabels + .map(label => `- \`${label}\``) + .join("\n"); + await core.summary + .addRaw( + ` ## Label Addition Successfully added ${uniqueLabels.length} label(s) to ${contextType} #${issueNumber}: ${labelsListMarkdown} - `).write(); + ` + ) + .write(); } catch (error) { const errorMessage = error instanceof Error ? error.message : String(error); - console.error('Failed to add labels:', errorMessage); + console.error("Failed to add labels:", errorMessage); core.setFailed(`Failed to add labels: ${errorMessage}`); } } diff --git a/.github/workflows/test-claude-command.lock.yml b/.github/workflows/test-claude-command.lock.yml index 9179cb1f28..74cf82a444 100644 --- a/.github/workflows/test-claude-command.lock.yml +++ b/.github/workflows/test-claude-command.lock.yml @@ -38,24 +38,28 @@ jobs: const { owner, repo } = context.repo; // Check if the actor has repository access (admin, maintain permissions) try { - console.log(`Checking if user '${actor}' is admin or maintainer of ${owner}/${repo}`); - const repoPermission = await github.rest.repos.getCollaboratorPermissionLevel({ - owner: owner, - repo: repo, - username: actor - }); + console.log( + `Checking if user '${actor}' is admin or maintainer of ${owner}/${repo}` + ); + const repoPermission = + await github.rest.repos.getCollaboratorPermissionLevel({ + owner: owner, + repo: repo, + username: actor, + }); const permission = repoPermission.data.permission; console.log(`Repository permission level: ${permission}`); - if (permission === 'admin' || permission === 'maintain') { + if (permission === "admin" || permission === "maintain") { console.log(`User has ${permission} access to repository`); - core.setOutput('is_team_member', 'true'); + core.setOutput("is_team_member", "true"); return; } } catch (repoError) { - const errorMessage = repoError instanceof Error ? repoError.message : String(repoError); + const errorMessage = + repoError instanceof Error ? repoError.message : String(repoError); console.log(`Repository permission check failed: ${errorMessage}`); } - core.setOutput('is_team_member', 'false'); + core.setOutput("is_team_member", "false"); } await main(); - name: Validate team membership @@ -75,34 +79,37 @@ jobs: * @returns {string} The sanitized content */ function sanitizeContent(content) { - if (!content || typeof content !== 'string') { - return ''; + if (!content || typeof content !== "string") { + return ""; } // Read allowed domains from environment variable const allowedDomainsEnv = process.env.GITHUB_AW_ALLOWED_DOMAINS; const defaultAllowedDomains = [ - 'github.com', - 'github.io', - 'githubusercontent.com', - 'githubassets.com', - 'github.dev', - 'codespaces.new' + "github.com", + "github.io", + "githubusercontent.com", + "githubassets.com", + "github.dev", + "codespaces.new", ]; const allowedDomains = allowedDomainsEnv - ? allowedDomainsEnv.split(',').map(d => d.trim()).filter(d => d) + ? allowedDomainsEnv + .split(",") + .map(d => d.trim()) + .filter(d => d) : defaultAllowedDomains; let sanitized = content; // Neutralize @mentions to prevent unintended notifications sanitized = neutralizeMentions(sanitized); // Remove control characters (except newlines and tabs) - sanitized = sanitized.replace(/[\x00-\x08\x0B\x0C\x0E-\x1F\x7F]/g, ''); + sanitized = sanitized.replace(/[\x00-\x08\x0B\x0C\x0E-\x1F\x7F]/g, ""); // XML character escaping sanitized = sanitized - .replace(/&/g, '&') // Must be first to avoid double-escaping - .replace(//g, '>') - .replace(/"/g, '"') - .replace(/'/g, '''); + .replace(/&/g, "&") // Must be first to avoid double-escaping + .replace(//g, ">") + .replace(/"/g, """) + .replace(/'/g, "'"); // URI filtering - replace non-https protocols with "(redacted)" // Step 1: Temporarily mark HTTPS URLs to protect them sanitized = sanitizeUrlProtocols(sanitized); @@ -112,16 +119,19 @@ jobs: // Limit total length to prevent DoS (0.5MB max) const maxLength = 524288; if (sanitized.length > maxLength) { - sanitized = sanitized.substring(0, maxLength) + '\n[Content truncated due to length]'; + sanitized = + sanitized.substring(0, maxLength) + "\n[Content truncated due to length]"; } // Limit number of lines to prevent log flooding (65k max) - const lines = sanitized.split('\n'); + const lines = sanitized.split("\n"); const maxLines = 65000; if (lines.length > maxLines) { - sanitized = lines.slice(0, maxLines).join('\n') + '\n[Content truncated due to line count]'; + sanitized = + lines.slice(0, maxLines).join("\n") + + "\n[Content truncated due to line count]"; } // Remove ANSI escape sequences - sanitized = sanitized.replace(/\x1b\[[0-9;]*[mGKH]/g, ''); + sanitized = sanitized.replace(/\x1b\[[0-9;]*[mGKH]/g, ""); // Neutralize common bot trigger phrases sanitized = neutralizeBotTriggers(sanitized); // Trim excessive whitespace @@ -132,16 +142,22 @@ jobs: * @returns {string} The string with unknown domains redacted */ function sanitizeUrlDomains(s) { - s = s.replace(/\bhttps:\/\/([^\/\s\])}'"<>&\x00-\x1f]+)/gi, (match, domain) => { - // Extract the hostname part (before first slash, colon, or other delimiter) - const hostname = domain.split(/[\/:\?#]/)[0].toLowerCase(); - // Check if this domain or any parent domain is in the allowlist - const isAllowed = allowedDomains.some(allowedDomain => { - const normalizedAllowed = allowedDomain.toLowerCase(); - return hostname === normalizedAllowed || hostname.endsWith('.' + normalizedAllowed); - }); - return isAllowed ? match : '(redacted)'; - }); + s = s.replace( + /\bhttps:\/\/([^\/\s\])}'"<>&\x00-\x1f]+)/gi, + (match, domain) => { + // Extract the hostname part (before first slash, colon, or other delimiter) + const hostname = domain.split(/[\/:\?#]/)[0].toLowerCase(); + // Check if this domain or any parent domain is in the allowlist + const isAllowed = allowedDomains.some(allowedDomain => { + const normalizedAllowed = allowedDomain.toLowerCase(); + return ( + hostname === normalizedAllowed || + hostname.endsWith("." + normalizedAllowed) + ); + }); + return isAllowed ? match : "(redacted)"; + } + ); return s; } /** @@ -152,10 +168,13 @@ jobs: function sanitizeUrlProtocols(s) { // Match both protocol:// and protocol: patterns // This covers URLs like https://example.com, javascript:alert(), mailto:user@domain.com, etc. - return s.replace(/\b(\w+):(?:\/\/)?[^\s\])}'"<>&\x00-\x1f]+/gi, (match, protocol) => { - // Allow https (case insensitive), redact everything else - return protocol.toLowerCase() === 'https' ? match : '(redacted)'; - }); + return s.replace( + /\b(\w+):(?:\/\/)?[^\s\])}'"<>&\x00-\x1f]+/gi, + (match, protocol) => { + // Allow https (case insensitive), redact everything else + return protocol.toLowerCase() === "https" ? match : "(redacted)"; + } + ); } /** * Neutralizes @mentions by wrapping them in backticks @@ -164,8 +183,10 @@ jobs: */ function neutralizeMentions(s) { // Replace @name or @org/team outside code with `@name` - return s.replace(/(^|[^\w`])@([A-Za-z0-9](?:[A-Za-z0-9-]{0,37}[A-Za-z0-9])?(?:\/[A-Za-z0-9._-]+)?)/g, - (_m, p1, p2) => `${p1}\`@${p2}\``); + return s.replace( + /(^|[^\w`])@([A-Za-z0-9](?:[A-Za-z0-9-]{0,37}[A-Za-z0-9])?(?:\/[A-Za-z0-9._-]+)?)/g, + (_m, p1, p2) => `${p1}\`@${p2}\`` + ); } /** * Neutralizes bot trigger phrases by wrapping them in backticks @@ -174,73 +195,77 @@ jobs: */ function neutralizeBotTriggers(s) { // Neutralize common bot trigger phrases like "fixes #123", "closes #asdfs", etc. - return s.replace(/\b(fixes?|closes?|resolves?|fix|close|resolve)\s+#(\w+)/gi, - (match, action, ref) => `\`${action} #${ref}\``); + return s.replace( + /\b(fixes?|closes?|resolves?|fix|close|resolve)\s+#(\w+)/gi, + (match, action, ref) => `\`${action} #${ref}\`` + ); } } async function main() { - let text = ''; + let text = ""; const actor = context.actor; const { owner, repo } = context.repo; // Check if the actor has repository access (admin, maintain permissions) - const repoPermission = await github.rest.repos.getCollaboratorPermissionLevel({ - owner: owner, - repo: repo, - username: actor - }); + const repoPermission = await github.rest.repos.getCollaboratorPermissionLevel( + { + owner: owner, + repo: repo, + username: actor, + } + ); const permission = repoPermission.data.permission; console.log(`Repository permission level: ${permission}`); - if (permission !== 'admin' && permission !== 'maintain') { - core.setOutput('text', ''); + if (permission !== "admin" && permission !== "maintain") { + core.setOutput("text", ""); return; } // Determine current body text based on event context switch (context.eventName) { - case 'issues': + case "issues": // For issues: title + body if (context.payload.issue) { - const title = context.payload.issue.title || ''; - const body = context.payload.issue.body || ''; + const title = context.payload.issue.title || ""; + const body = context.payload.issue.body || ""; text = `${title}\n\n${body}`; } break; - case 'pull_request': + case "pull_request": // For pull requests: title + body if (context.payload.pull_request) { - const title = context.payload.pull_request.title || ''; - const body = context.payload.pull_request.body || ''; + const title = context.payload.pull_request.title || ""; + const body = context.payload.pull_request.body || ""; text = `${title}\n\n${body}`; } break; - case 'pull_request_target': + case "pull_request_target": // For pull request target events: title + body if (context.payload.pull_request) { - const title = context.payload.pull_request.title || ''; - const body = context.payload.pull_request.body || ''; + const title = context.payload.pull_request.title || ""; + const body = context.payload.pull_request.body || ""; text = `${title}\n\n${body}`; } break; - case 'issue_comment': + case "issue_comment": // For issue comments: comment body if (context.payload.comment) { - text = context.payload.comment.body || ''; + text = context.payload.comment.body || ""; } break; - case 'pull_request_review_comment': + case "pull_request_review_comment": // For PR review comments: comment body if (context.payload.comment) { - text = context.payload.comment.body || ''; + text = context.payload.comment.body || ""; } break; - case 'pull_request_review': + case "pull_request_review": // For PR reviews: review body if (context.payload.review) { - text = context.payload.review.body || ''; + text = context.payload.review.body || ""; } break; default: // Default: empty text - text = ''; + text = ""; break; } // Sanitize the text before output @@ -248,7 +273,7 @@ jobs: // Display sanitized text in logs console.log(`text: ${sanitizedText}`); // Set the sanitized text as output - core.setOutput('text', sanitizedText); + core.setOutput("text", sanitizedText); } await main(); @@ -271,21 +296,32 @@ jobs: with: script: | async function main() { - // Read inputs from environment variables - const reaction = process.env.GITHUB_AW_REACTION || 'eyes'; + // Read inputs from environment variables + const reaction = process.env.GITHUB_AW_REACTION || "eyes"; const alias = process.env.GITHUB_AW_ALIAS; // Only present for alias workflows const runId = context.runId; - const runUrl = context.payload.repository + const runUrl = context.payload.repository ? `${context.payload.repository.html_url}/actions/runs/${runId}` : `https://github.com/${context.repo.owner}/${context.repo.repo}/actions/runs/${runId}`; - console.log('Reaction type:', reaction); - console.log('Alias name:', alias || 'none'); - console.log('Run ID:', runId); - console.log('Run URL:', runUrl); + console.log("Reaction type:", reaction); + console.log("Alias name:", alias || "none"); + console.log("Run ID:", runId); + console.log("Run URL:", runUrl); // Validate reaction type - const validReactions = ['+1', '-1', 'laugh', 'confused', 'heart', 'hooray', 'rocket', 'eyes']; + const validReactions = [ + "+1", + "-1", + "laugh", + "confused", + "heart", + "hooray", + "rocket", + "eyes", + ]; if (!validReactions.includes(reaction)) { - core.setFailed(`Invalid reaction type: ${reaction}. Valid reactions are: ${validReactions.join(', ')}`); + core.setFailed( + `Invalid reaction type: ${reaction}. Valid reactions are: ${validReactions.join(", ")}` + ); return; } // Determine the API endpoint based on the event type @@ -297,20 +333,20 @@ jobs: const repo = context.repo.repo; try { switch (eventName) { - case 'issues': + case "issues": const issueNumber = context.payload?.issue?.number; if (!issueNumber) { - core.setFailed('Issue number not found in event payload'); + core.setFailed("Issue number not found in event payload"); return; } reactionEndpoint = `/repos/${owner}/${repo}/issues/${issueNumber}/reactions`; // Don't edit issue bodies for now - this might be more complex shouldEditComment = false; break; - case 'issue_comment': + case "issue_comment": const commentId = context.payload?.comment?.id; if (!commentId) { - core.setFailed('Comment ID not found in event payload'); + core.setFailed("Comment ID not found in event payload"); return; } reactionEndpoint = `/repos/${owner}/${repo}/issues/comments/${commentId}/reactions`; @@ -318,10 +354,10 @@ jobs: // Only edit comments for alias workflows shouldEditComment = alias ? true : false; break; - case 'pull_request': + case "pull_request": const prNumber = context.payload?.pull_request?.number; if (!prNumber) { - core.setFailed('Pull request number not found in event payload'); + core.setFailed("Pull request number not found in event payload"); return; } // PRs are "issues" for the reactions endpoint @@ -329,10 +365,10 @@ jobs: // Don't edit PR bodies for now - this might be more complex shouldEditComment = false; break; - case 'pull_request_review_comment': + case "pull_request_review_comment": const reviewCommentId = context.payload?.comment?.id; if (!reviewCommentId) { - core.setFailed('Review comment ID not found in event payload'); + core.setFailed("Review comment ID not found in event payload"); return; } reactionEndpoint = `/repos/${owner}/${repo}/pulls/comments/${reviewCommentId}/reactions`; @@ -344,24 +380,28 @@ jobs: core.setFailed(`Unsupported event type: ${eventName}`); return; } - console.log('Reaction API endpoint:', reactionEndpoint); + console.log("Reaction API endpoint:", reactionEndpoint); // Add reaction first await addReaction(reactionEndpoint, reaction); // Then edit comment if applicable and if it's a comment event if (shouldEditComment && commentUpdateEndpoint) { - console.log('Comment update endpoint:', commentUpdateEndpoint); + console.log("Comment update endpoint:", commentUpdateEndpoint); await editCommentWithWorkflowLink(commentUpdateEndpoint, runUrl); } else { if (!alias && commentUpdateEndpoint) { - console.log('Skipping comment edit - only available for alias workflows'); + console.log( + "Skipping comment edit - only available for alias workflows" + ); } else { - console.log('Skipping comment edit for event type:', eventName); + console.log("Skipping comment edit for event type:", eventName); } } } catch (error) { const errorMessage = error instanceof Error ? error.message : String(error); - console.error('Failed to process reaction and comment edit:', errorMessage); - core.setFailed(`Failed to process reaction and comment edit: ${errorMessage}`); + console.error("Failed to process reaction and comment edit:", errorMessage); + core.setFailed( + `Failed to process reaction and comment edit: ${errorMessage}` + ); } } /** @@ -370,19 +410,19 @@ jobs: * @param {string} reaction - The reaction type to add */ async function addReaction(endpoint, reaction) { - const response = await github.request('POST ' + endpoint, { + const response = await github.request("POST " + endpoint, { content: reaction, headers: { - 'Accept': 'application/vnd.github+json' - } + Accept: "application/vnd.github+json", + }, }); const reactionId = response.data?.id; if (reactionId) { console.log(`Successfully added reaction: ${reaction} (id: ${reactionId})`); - core.setOutput('reaction-id', reactionId.toString()); + core.setOutput("reaction-id", reactionId.toString()); } else { console.log(`Successfully added reaction: ${reaction}`); - core.setOutput('reaction-id', ''); + core.setOutput("reaction-id", ""); } } /** @@ -393,33 +433,37 @@ jobs: async function editCommentWithWorkflowLink(endpoint, runUrl) { try { // First, get the current comment content - const getResponse = await github.request('GET ' + endpoint, { + const getResponse = await github.request("GET " + endpoint, { headers: { - 'Accept': 'application/vnd.github+json' - } + Accept: "application/vnd.github+json", + }, }); - const originalBody = getResponse.data.body || ''; + const originalBody = getResponse.data.body || ""; const workflowLinkText = `\n\n---\n*🤖 [Workflow run](${runUrl}) triggered by this comment*`; // Check if we've already added a workflow link to avoid duplicates - if (originalBody.includes('*🤖 [Workflow run](')) { - console.log('Comment already contains a workflow run link, skipping edit'); + if (originalBody.includes("*🤖 [Workflow run](")) { + console.log( + "Comment already contains a workflow run link, skipping edit" + ); return; } const updatedBody = originalBody + workflowLinkText; // Update the comment - const updateResponse = await github.request('PATCH ' + endpoint, { + const updateResponse = await github.request("PATCH " + endpoint, { body: updatedBody, headers: { - 'Accept': 'application/vnd.github+json' - } + Accept: "application/vnd.github+json", + }, }); console.log(`Successfully updated comment with workflow link`); console.log(`Comment ID: ${updateResponse.data.id}`); } catch (error) { // Don't fail the entire job if comment editing fails - just log it const errorMessage = error instanceof Error ? error.message : String(error); - console.warn('Failed to edit comment with workflow link:', errorMessage); - console.warn('This is not critical - the reaction was still added successfully'); + console.warn("Failed to edit comment with workflow link:", errorMessage); + console.warn( + "This is not critical - the reaction was still added successfully" + ); } } await main(); @@ -546,23 +590,23 @@ jobs: with: script: | function main() { - const fs = require('fs'); - const crypto = require('crypto'); + const fs = require("fs"); + const crypto = require("crypto"); // Generate a random filename for the output file - const randomId = crypto.randomBytes(8).toString('hex'); + const randomId = crypto.randomBytes(8).toString("hex"); const outputFile = `/tmp/aw_output_${randomId}.txt`; // Ensure the /tmp directory exists and create empty output file - fs.mkdirSync('/tmp', { recursive: true }); - fs.writeFileSync(outputFile, '', { mode: 0o644 }); + fs.mkdirSync("/tmp", { recursive: true }); + fs.writeFileSync(outputFile, "", { mode: 0o644 }); // Verify the file was created and is writable if (!fs.existsSync(outputFile)) { throw new Error(`Failed to create output file: ${outputFile}`); } // Set the environment variable for subsequent steps - core.exportVariable('GITHUB_AW_SAFE_OUTPUTS', outputFile); - console.log('Created agentic output file:', outputFile); + core.exportVariable("GITHUB_AW_SAFE_OUTPUTS", outputFile); + console.log("Created agentic output file:", outputFile); // Also set as step output for reference - core.setOutput('output_file', outputFile); + core.setOutput("output_file", outputFile); } main(); - name: Setup MCPs @@ -785,34 +829,37 @@ jobs: * @returns {string} The sanitized content */ function sanitizeContent(content) { - if (!content || typeof content !== 'string') { - return ''; + if (!content || typeof content !== "string") { + return ""; } // Read allowed domains from environment variable const allowedDomainsEnv = process.env.GITHUB_AW_ALLOWED_DOMAINS; const defaultAllowedDomains = [ - 'github.com', - 'github.io', - 'githubusercontent.com', - 'githubassets.com', - 'github.dev', - 'codespaces.new' + "github.com", + "github.io", + "githubusercontent.com", + "githubassets.com", + "github.dev", + "codespaces.new", ]; const allowedDomains = allowedDomainsEnv - ? allowedDomainsEnv.split(',').map(d => d.trim()).filter(d => d) + ? allowedDomainsEnv + .split(",") + .map(d => d.trim()) + .filter(d => d) : defaultAllowedDomains; let sanitized = content; // Neutralize @mentions to prevent unintended notifications sanitized = neutralizeMentions(sanitized); // Remove control characters (except newlines and tabs) - sanitized = sanitized.replace(/[\x00-\x08\x0B\x0C\x0E-\x1F\x7F]/g, ''); + sanitized = sanitized.replace(/[\x00-\x08\x0B\x0C\x0E-\x1F\x7F]/g, ""); // XML character escaping sanitized = sanitized - .replace(/&/g, '&') // Must be first to avoid double-escaping - .replace(//g, '>') - .replace(/"/g, '"') - .replace(/'/g, '''); + .replace(/&/g, "&") // Must be first to avoid double-escaping + .replace(//g, ">") + .replace(/"/g, """) + .replace(/'/g, "'"); // URI filtering - replace non-https protocols with "(redacted)" sanitized = sanitizeUrlProtocols(sanitized); // Domain filtering for HTTPS URIs @@ -820,16 +867,20 @@ jobs: // Limit total length to prevent DoS (0.5MB max) const maxLength = 524288; if (sanitized.length > maxLength) { - sanitized = sanitized.substring(0, maxLength) + '\n[Content truncated due to length]'; + sanitized = + sanitized.substring(0, maxLength) + + "\n[Content truncated due to length]"; } // Limit number of lines to prevent log flooding (65k max) - const lines = sanitized.split('\n'); + const lines = sanitized.split("\n"); const maxLines = 65000; if (lines.length > maxLines) { - sanitized = lines.slice(0, maxLines).join('\n') + '\n[Content truncated due to line count]'; + sanitized = + lines.slice(0, maxLines).join("\n") + + "\n[Content truncated due to line count]"; } // Remove ANSI escape sequences - sanitized = sanitized.replace(/\x1b\[[0-9;]*[mGKH]/g, ''); + sanitized = sanitized.replace(/\x1b\[[0-9;]*[mGKH]/g, ""); // Neutralize common bot trigger phrases sanitized = neutralizeBotTriggers(sanitized); // Trim excessive whitespace @@ -840,16 +891,22 @@ jobs: * @returns {string} The string with unknown domains redacted */ function sanitizeUrlDomains(s) { - return s.replace(/\bhttps:\/\/([^\/\s\])}'"<>&\x00-\x1f]+)/gi, (match, domain) => { - // Extract the hostname part (before first slash, colon, or other delimiter) - const hostname = domain.split(/[\/:\?#]/)[0].toLowerCase(); - // Check if this domain or any parent domain is in the allowlist - const isAllowed = allowedDomains.some(allowedDomain => { - const normalizedAllowed = allowedDomain.toLowerCase(); - return hostname === normalizedAllowed || hostname.endsWith('.' + normalizedAllowed); - }); - return isAllowed ? match : '(redacted)'; - }); + return s.replace( + /\bhttps:\/\/([^\/\s\])}'"<>&\x00-\x1f]+)/gi, + (match, domain) => { + // Extract the hostname part (before first slash, colon, or other delimiter) + const hostname = domain.split(/[\/:\?#]/)[0].toLowerCase(); + // Check if this domain or any parent domain is in the allowlist + const isAllowed = allowedDomains.some(allowedDomain => { + const normalizedAllowed = allowedDomain.toLowerCase(); + return ( + hostname === normalizedAllowed || + hostname.endsWith("." + normalizedAllowed) + ); + }); + return isAllowed ? match : "(redacted)"; + } + ); } /** * Remove unknown protocols except https @@ -858,10 +915,13 @@ jobs: */ function sanitizeUrlProtocols(s) { // Match both protocol:// and protocol: patterns - return s.replace(/\b(\w+):(?:\/\/)?[^\s\])}'"<>&\x00-\x1f]+/gi, (match, protocol) => { - // Allow https (case insensitive), redact everything else - return protocol.toLowerCase() === 'https' ? match : '(redacted)'; - }); + return s.replace( + /\b(\w+):(?:\/\/)?[^\s\])}'"<>&\x00-\x1f]+/gi, + (match, protocol) => { + // Allow https (case insensitive), redact everything else + return protocol.toLowerCase() === "https" ? match : "(redacted)"; + } + ); } /** * Neutralizes @mentions by wrapping them in backticks @@ -870,8 +930,10 @@ jobs: */ function neutralizeMentions(s) { // Replace @name or @org/team outside code with `@name` - return s.replace(/(^|[^\w`])@([A-Za-z0-9](?:[A-Za-z0-9-]{0,37}[A-Za-z0-9])?(?:\/[A-Za-z0-9._-]+)?)/g, - (_m, p1, p2) => `${p1}\`@${p2}\``); + return s.replace( + /(^|[^\w`])@([A-Za-z0-9](?:[A-Za-z0-9-]{0,37}[A-Za-z0-9])?(?:\/[A-Za-z0-9._-]+)?)/g, + (_m, p1, p2) => `${p1}\`@${p2}\`` + ); } /** * Neutralizes bot trigger phrases by wrapping them in backticks @@ -880,8 +942,10 @@ jobs: */ function neutralizeBotTriggers(s) { // Neutralize common bot trigger phrases like "fixes #123", "closes #asdfs", etc. - return s.replace(/\b(fixes?|closes?|resolves?|fix|close|resolve)\s+#(\w+)/gi, - (match, action, ref) => `\`${action} #${ref}\``); + return s.replace( + /\b(fixes?|closes?|resolves?|fix|close|resolve)\s+#(\w+)/gi, + (match, action, ref) => `\`${action} #${ref}\`` + ); } } /** @@ -892,65 +956,165 @@ jobs: */ function getMaxAllowedForType(itemType, config) { // Check if max is explicitly specified in config - if (config && config[itemType] && typeof config[itemType] === 'object' && config[itemType].max) { + if ( + config && + config[itemType] && + typeof config[itemType] === "object" && + config[itemType].max + ) { return config[itemType].max; } // Use default limits for plural-supported types switch (itemType) { - case 'create-issue': + case "create-issue": return 1; // Only one issue allowed - case 'add-issue-comment': + case "add-issue-comment": return 1; // Only one comment allowed - case 'create-pull-request': - return 1; // Only one pull request allowed - case 'add-issue-label': - return 5; // Only one labels operation allowed - case 'update-issue': - return 1; // Only one issue update allowed - case 'push-to-branch': - return 1; // Only one push to branch allowed + case "create-pull-request": + return 1; // Only one pull request allowed + case "create-pull-request-review-comment": + return 10; // Default to 10 review comments allowed + case "add-issue-label": + return 5; // Only one labels operation allowed + case "update-issue": + return 1; // Only one issue update allowed + case "push-to-branch": + return 1; // Only one push to branch allowed + case "create-discussion": + return 1; // Only one discussion allowed default: - return 1; // Default to single item for unknown types + return 1; // Default to single item for unknown types + } + } + /** + * Attempts to repair common JSON syntax issues in LLM-generated content + * @param {string} jsonStr - The potentially malformed JSON string + * @returns {string} The repaired JSON string + */ + function repairJson(jsonStr) { + let repaired = jsonStr.trim(); + // Fix single quotes to double quotes (must be done first) + repaired = repaired.replace(/'/g, '"'); + // Fix missing quotes around object keys + repaired = repaired.replace( + /([{,]\s*)([a-zA-Z_$][a-zA-Z0-9_$]*)\s*:/g, + '$1"$2":' + ); + // Fix newlines and tabs inside strings by escaping them + repaired = repaired.replace(/"([^"\\]*)"/g, (match, content) => { + if ( + content.includes("\n") || + content.includes("\r") || + content.includes("\t") + ) { + const escaped = content + .replace(/\\/g, "\\\\") + .replace(/\n/g, "\\n") + .replace(/\r/g, "\\r") + .replace(/\t/g, "\\t"); + return `"${escaped}"`; + } + return match; + }); + // Fix unescaped quotes inside string values + repaired = repaired.replace( + /"([^"]*)"([^":,}\]]*)"([^"]*)"(\s*[,:}\]])/g, + (match, p1, p2, p3, p4) => `"${p1}\\"${p2}\\"${p3}"${p4}` + ); + // Fix wrong bracket/brace types - arrays should end with ] not } + repaired = repaired.replace( + /(\[\s*(?:"[^"]*"(?:\s*,\s*"[^"]*")*\s*),?)\s*}/g, + "$1]" + ); + // Fix missing closing braces/brackets + const openBraces = (repaired.match(/\{/g) || []).length; + const closeBraces = (repaired.match(/\}/g) || []).length; + if (openBraces > closeBraces) { + repaired += "}".repeat(openBraces - closeBraces); + } else if (closeBraces > openBraces) { + repaired = "{".repeat(closeBraces - openBraces) + repaired; + } + // Fix missing closing brackets for arrays + const openBrackets = (repaired.match(/\[/g) || []).length; + const closeBrackets = (repaired.match(/\]/g) || []).length; + if (openBrackets > closeBrackets) { + repaired += "]".repeat(openBrackets - closeBrackets); + } else if (closeBrackets > openBrackets) { + repaired = "[".repeat(closeBrackets - openBrackets) + repaired; + } + // Fix trailing commas in objects and arrays (AFTER fixing brackets/braces) + repaired = repaired.replace(/,(\s*[}\]])/g, "$1"); + return repaired; + } + /** + * Attempts to parse JSON with repair fallback + * @param {string} jsonStr - The JSON string to parse + * @returns {Object|undefined} The parsed JSON object, or undefined if parsing fails + */ + function parseJsonWithRepair(jsonStr) { + try { + // First, try normal JSON.parse + return JSON.parse(jsonStr); + } catch (originalError) { + try { + // If that fails, try repairing and parsing again + const repairedJson = repairJson(jsonStr); + return JSON.parse(repairedJson); + } catch (repairError) { + // If repair also fails, print error to console and return undefined + console.log( + `JSON parsing failed. Original: ${originalError.message}. After repair: ${repairError.message}` + ); + return undefined; + } } } const outputFile = process.env.GITHUB_AW_SAFE_OUTPUTS; const safeOutputsConfig = process.env.GITHUB_AW_SAFE_OUTPUTS_CONFIG; if (!outputFile) { - console.log('GITHUB_AW_SAFE_OUTPUTS not set, no output to collect'); - core.setOutput('output', ''); + console.log("GITHUB_AW_SAFE_OUTPUTS not set, no output to collect"); + core.setOutput("output", ""); return; } if (!fs.existsSync(outputFile)) { - console.log('Output file does not exist:', outputFile); - core.setOutput('output', ''); + console.log("Output file does not exist:", outputFile); + core.setOutput("output", ""); return; } - const outputContent = fs.readFileSync(outputFile, 'utf8'); - if (outputContent.trim() === '') { - console.log('Output file is empty'); - core.setOutput('output', ''); + const outputContent = fs.readFileSync(outputFile, "utf8"); + if (outputContent.trim() === "") { + console.log("Output file is empty"); + core.setOutput("output", ""); return; } - console.log('Raw output content length:', outputContent.length); + console.log("Raw output content length:", outputContent.length); // Parse the safe-outputs configuration let expectedOutputTypes = {}; if (safeOutputsConfig) { try { expectedOutputTypes = JSON.parse(safeOutputsConfig); - console.log('Expected output types:', Object.keys(expectedOutputTypes)); + console.log("Expected output types:", Object.keys(expectedOutputTypes)); } catch (error) { - console.log('Warning: Could not parse safe-outputs config:', error.message); + console.log( + "Warning: Could not parse safe-outputs config:", + error.message + ); } } // Parse JSONL content - const lines = outputContent.trim().split('\n'); + const lines = outputContent.trim().split("\n"); const parsedItems = []; const errors = []; for (let i = 0; i < lines.length; i++) { const line = lines[i].trim(); - if (line === '') continue; // Skip empty lines + if (line === "") continue; // Skip empty lines try { - const item = JSON.parse(line); + const item = parseJsonWithRepair(line); + // If item is undefined (failed to parse), add error and process next line + if (item === undefined) { + errors.push(`Line ${i + 1}: Invalid JSON - JSON parsing failed`); + continue; + } // Validate that the item has a 'type' field if (!item.type) { errors.push(`Line ${i + 1}: Missing required 'type' field`); @@ -959,25 +1123,35 @@ jobs: // Validate against expected output types const itemType = item.type; if (!expectedOutputTypes[itemType]) { - errors.push(`Line ${i + 1}: Unexpected output type '${itemType}'. Expected one of: ${Object.keys(expectedOutputTypes).join(', ')}`); + errors.push( + `Line ${i + 1}: Unexpected output type '${itemType}'. Expected one of: ${Object.keys(expectedOutputTypes).join(", ")}` + ); continue; } // Check for too many items of the same type - const typeCount = parsedItems.filter(existing => existing.type === itemType).length; + const typeCount = parsedItems.filter( + existing => existing.type === itemType + ).length; const maxAllowed = getMaxAllowedForType(itemType, expectedOutputTypes); if (typeCount >= maxAllowed) { - errors.push(`Line ${i + 1}: Too many items of type '${itemType}'. Maximum allowed: ${maxAllowed}.`); + errors.push( + `Line ${i + 1}: Too many items of type '${itemType}'. Maximum allowed: ${maxAllowed}.` + ); continue; } // Basic validation based on type switch (itemType) { - case 'create-issue': - if (!item.title || typeof item.title !== 'string') { - errors.push(`Line ${i + 1}: create-issue requires a 'title' string field`); + case "create-issue": + if (!item.title || typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: create-issue requires a 'title' string field` + ); continue; } - if (!item.body || typeof item.body !== 'string') { - errors.push(`Line ${i + 1}: create-issue requires a 'body' string field`); + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-issue requires a 'body' string field` + ); continue; } // Sanitize text content @@ -985,107 +1159,247 @@ jobs: item.body = sanitizeContent(item.body); // Sanitize labels if present if (item.labels && Array.isArray(item.labels)) { - item.labels = item.labels.map(label => typeof label === 'string' ? sanitizeContent(label) : label); + item.labels = item.labels.map(label => + typeof label === "string" ? sanitizeContent(label) : label + ); } break; - case 'add-issue-comment': - if (!item.body || typeof item.body !== 'string') { - errors.push(`Line ${i + 1}: add-issue-comment requires a 'body' string field`); + case "add-issue-comment": + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: add-issue-comment requires a 'body' string field` + ); continue; } // Sanitize text content item.body = sanitizeContent(item.body); break; - case 'create-pull-request': - if (!item.title || typeof item.title !== 'string') { - errors.push(`Line ${i + 1}: create-pull-request requires a 'title' string field`); + case "create-pull-request": + if (!item.title || typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request requires a 'title' string field` + ); continue; } - if (!item.body || typeof item.body !== 'string') { - errors.push(`Line ${i + 1}: create-pull-request requires a 'body' string field`); + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request requires a 'body' string field` + ); continue; } // Sanitize text content item.title = sanitizeContent(item.title); item.body = sanitizeContent(item.body); // Sanitize branch name if present - if (item.branch && typeof item.branch === 'string') { + if (item.branch && typeof item.branch === "string") { item.branch = sanitizeContent(item.branch); } // Sanitize labels if present if (item.labels && Array.isArray(item.labels)) { - item.labels = item.labels.map(label => typeof label === 'string' ? sanitizeContent(label) : label); + item.labels = item.labels.map(label => + typeof label === "string" ? sanitizeContent(label) : label + ); } break; - case 'add-issue-label': + case "add-issue-label": if (!item.labels || !Array.isArray(item.labels)) { - errors.push(`Line ${i + 1}: add-issue-label requires a 'labels' array field`); + errors.push( + `Line ${i + 1}: add-issue-label requires a 'labels' array field` + ); continue; } - if (item.labels.some(label => typeof label !== 'string')) { - errors.push(`Line ${i + 1}: add-issue-label labels array must contain only strings`); + if (item.labels.some(label => typeof label !== "string")) { + errors.push( + `Line ${i + 1}: add-issue-label labels array must contain only strings` + ); continue; } // Sanitize label strings item.labels = item.labels.map(label => sanitizeContent(label)); break; - case 'update-issue': + case "update-issue": // Check that at least one updateable field is provided - const hasValidField = (item.status !== undefined) || - (item.title !== undefined) || - (item.body !== undefined); + const hasValidField = + item.status !== undefined || + item.title !== undefined || + item.body !== undefined; if (!hasValidField) { - errors.push(`Line ${i + 1}: update-issue requires at least one of: 'status', 'title', or 'body' fields`); + errors.push( + `Line ${i + 1}: update-issue requires at least one of: 'status', 'title', or 'body' fields` + ); continue; } // Validate status if provided if (item.status !== undefined) { - if (typeof item.status !== 'string' || (item.status !== 'open' && item.status !== 'closed')) { - errors.push(`Line ${i + 1}: update-issue 'status' must be 'open' or 'closed'`); + if ( + typeof item.status !== "string" || + (item.status !== "open" && item.status !== "closed") + ) { + errors.push( + `Line ${i + 1}: update-issue 'status' must be 'open' or 'closed'` + ); continue; } } // Validate title if provided if (item.title !== undefined) { - if (typeof item.title !== 'string') { - errors.push(`Line ${i + 1}: update-issue 'title' must be a string`); + if (typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: update-issue 'title' must be a string` + ); continue; } item.title = sanitizeContent(item.title); } // Validate body if provided if (item.body !== undefined) { - if (typeof item.body !== 'string') { - errors.push(`Line ${i + 1}: update-issue 'body' must be a string`); + if (typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: update-issue 'body' must be a string` + ); continue; } item.body = sanitizeContent(item.body); } // Validate issue_number if provided (for target "*") if (item.issue_number !== undefined) { - if (typeof item.issue_number !== 'number' && typeof item.issue_number !== 'string') { - errors.push(`Line ${i + 1}: update-issue 'issue_number' must be a number or string`); + if ( + typeof item.issue_number !== "number" && + typeof item.issue_number !== "string" + ) { + errors.push( + `Line ${i + 1}: update-issue 'issue_number' must be a number or string` + ); continue; } } break; - case 'push-to-branch': + case "push-to-branch": // Validate message if provided (optional) if (item.message !== undefined) { - if (typeof item.message !== 'string') { - errors.push(`Line ${i + 1}: push-to-branch 'message' must be a string`); + if (typeof item.message !== "string") { + errors.push( + `Line ${i + 1}: push-to-branch 'message' must be a string` + ); continue; } item.message = sanitizeContent(item.message); } // Validate pull_request_number if provided (for target "*") if (item.pull_request_number !== undefined) { - if (typeof item.pull_request_number !== 'number' && typeof item.pull_request_number !== 'string') { - errors.push(`Line ${i + 1}: push-to-branch 'pull_request_number' must be a number or string`); + if ( + typeof item.pull_request_number !== "number" && + typeof item.pull_request_number !== "string" + ) { + errors.push( + `Line ${i + 1}: push-to-branch 'pull_request_number' must be a number or string` + ); continue; } } break; + case "create-pull-request-review-comment": + // Validate required path field + if (!item.path || typeof item.path !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment requires a 'path' string field` + ); + continue; + } + // Validate required line field + if ( + item.line === undefined || + (typeof item.line !== "number" && typeof item.line !== "string") + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment requires a 'line' number or string field` + ); + continue; + } + // Validate line is a positive integer + const lineNumber = + typeof item.line === "string" ? parseInt(item.line, 10) : item.line; + if ( + isNaN(lineNumber) || + lineNumber <= 0 || + !Number.isInteger(lineNumber) + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'line' must be a positive integer` + ); + continue; + } + // Validate required body field + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment requires a 'body' string field` + ); + continue; + } + // Sanitize required text content + item.body = sanitizeContent(item.body); + // Validate optional start_line field + if (item.start_line !== undefined) { + if ( + typeof item.start_line !== "number" && + typeof item.start_line !== "string" + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'start_line' must be a number or string` + ); + continue; + } + const startLineNumber = + typeof item.start_line === "string" + ? parseInt(item.start_line, 10) + : item.start_line; + if ( + isNaN(startLineNumber) || + startLineNumber <= 0 || + !Number.isInteger(startLineNumber) + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'start_line' must be a positive integer` + ); + continue; + } + if (startLineNumber > lineNumber) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'start_line' must be less than or equal to 'line'` + ); + continue; + } + } + // Validate optional side field + if (item.side !== undefined) { + if ( + typeof item.side !== "string" || + (item.side !== "LEFT" && item.side !== "RIGHT") + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'side' must be 'LEFT' or 'RIGHT'` + ); + continue; + } + } + break; + case "create-discussion": + if (!item.title || typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: create-discussion requires a 'title' string field` + ); + continue; + } + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-discussion requires a 'body' string field` + ); + continue; + } + // Sanitize text content + item.title = sanitizeContent(item.title); + item.body = sanitizeContent(item.body); + break; default: errors.push(`Line ${i + 1}: Unknown output type '${itemType}'`); continue; @@ -1098,7 +1412,7 @@ jobs: } // Report validation results if (errors.length > 0) { - console.log('Validation errors found:'); + console.log("Validation errors found:"); errors.forEach(error => console.log(` - ${error}`)); // For now, we'll continue with valid items but log the errors // In the future, we might want to fail the workflow for invalid items @@ -1107,10 +1421,10 @@ jobs: // Set the parsed and validated items as output const validatedOutput = { items: parsedItems, - errors: errors + errors: errors, }; - core.setOutput('output', JSON.stringify(validatedOutput)); - core.setOutput('raw_output', outputContent); + core.setOutput("output", JSON.stringify(validatedOutput)); + core.setOutput("raw_output", outputContent); } // Call the main function await main(); @@ -1152,24 +1466,24 @@ jobs: with: script: | function main() { - const fs = require('fs'); + const fs = require("fs"); try { // Get the log file path from environment const logFile = process.env.AGENT_LOG_FILE; if (!logFile) { - console.log('No agent log file specified'); + console.log("No agent log file specified"); return; } if (!fs.existsSync(logFile)) { console.log(`Log file not found: ${logFile}`); return; } - const logContent = fs.readFileSync(logFile, 'utf8'); + const logContent = fs.readFileSync(logFile, "utf8"); const markdown = parseClaudeLog(logContent); // Append to GitHub step summary core.summary.addRaw(markdown).write(); } catch (error) { - console.error('Error parsing Claude log:', error.message); + console.error("Error parsing Claude log:", error.message); core.setFailed(error.message); } } @@ -1177,16 +1491,16 @@ jobs: try { const logEntries = JSON.parse(logContent); if (!Array.isArray(logEntries)) { - return '## Agent Log Summary\n\nLog format not recognized as Claude JSON array.\n'; + return "## Agent Log Summary\n\nLog format not recognized as Claude JSON array.\n"; } - let markdown = '## 🤖 Commands and Tools\n\n'; + let markdown = "## 🤖 Commands and Tools\n\n"; const toolUsePairs = new Map(); // Map tool_use_id to tool_result const commandSummary = []; // For the succinct summary // First pass: collect tool results by tool_use_id for (const entry of logEntries) { - if (entry.type === 'user' && entry.message?.content) { + if (entry.type === "user" && entry.message?.content) { for (const content of entry.message.content) { - if (content.type === 'tool_result' && content.tool_use_id) { + if (content.type === "tool_result" && content.tool_use_id) { toolUsePairs.set(content.tool_use_id, content); } } @@ -1194,26 +1508,37 @@ jobs: } // Collect all tool uses for summary for (const entry of logEntries) { - if (entry.type === 'assistant' && entry.message?.content) { + if (entry.type === "assistant" && entry.message?.content) { for (const content of entry.message.content) { - if (content.type === 'tool_use') { + if (content.type === "tool_use") { const toolName = content.name; const input = content.input || {}; // Skip internal tools - only show external commands and API calls - if (['Read', 'Write', 'Edit', 'MultiEdit', 'LS', 'Grep', 'Glob', 'TodoWrite'].includes(toolName)) { + if ( + [ + "Read", + "Write", + "Edit", + "MultiEdit", + "LS", + "Grep", + "Glob", + "TodoWrite", + ].includes(toolName) + ) { continue; // Skip internal file operations and searches } // Find the corresponding tool result to get status const toolResult = toolUsePairs.get(content.id); - let statusIcon = '❓'; + let statusIcon = "❓"; if (toolResult) { - statusIcon = toolResult.is_error === true ? '❌' : '✅'; + statusIcon = toolResult.is_error === true ? "❌" : "✅"; } // Add to command summary (only external tools) - if (toolName === 'Bash') { - const formattedCommand = formatBashCommand(input.command || ''); + if (toolName === "Bash") { + const formattedCommand = formatBashCommand(input.command || ""); commandSummary.push(`* ${statusIcon} \`${formattedCommand}\``); - } else if (toolName.startsWith('mcp__')) { + } else if (toolName.startsWith("mcp__")) { const mcpName = formatMcpName(toolName); commandSummary.push(`* ${statusIcon} \`${mcpName}(...)\``); } else { @@ -1230,13 +1555,19 @@ jobs: markdown += `${cmd}\n`; } } else { - markdown += 'No commands or tools used.\n'; + markdown += "No commands or tools used.\n"; } // Add Information section from the last entry with result metadata - markdown += '\n## 📊 Information\n\n'; + markdown += "\n## 📊 Information\n\n"; // Find the last entry with metadata const lastEntry = logEntries[logEntries.length - 1]; - if (lastEntry && (lastEntry.num_turns || lastEntry.duration_ms || lastEntry.total_cost_usd || lastEntry.usage)) { + if ( + lastEntry && + (lastEntry.num_turns || + lastEntry.duration_ms || + lastEntry.total_cost_usd || + lastEntry.usage) + ) { if (lastEntry.num_turns) { markdown += `**Turns:** ${lastEntry.num_turns}\n\n`; } @@ -1253,29 +1584,36 @@ jobs: const usage = lastEntry.usage; if (usage.input_tokens || usage.output_tokens) { markdown += `**Token Usage:**\n`; - if (usage.input_tokens) markdown += `- Input: ${usage.input_tokens.toLocaleString()}\n`; - if (usage.cache_creation_input_tokens) markdown += `- Cache Creation: ${usage.cache_creation_input_tokens.toLocaleString()}\n`; - if (usage.cache_read_input_tokens) markdown += `- Cache Read: ${usage.cache_read_input_tokens.toLocaleString()}\n`; - if (usage.output_tokens) markdown += `- Output: ${usage.output_tokens.toLocaleString()}\n`; - markdown += '\n'; + if (usage.input_tokens) + markdown += `- Input: ${usage.input_tokens.toLocaleString()}\n`; + if (usage.cache_creation_input_tokens) + markdown += `- Cache Creation: ${usage.cache_creation_input_tokens.toLocaleString()}\n`; + if (usage.cache_read_input_tokens) + markdown += `- Cache Read: ${usage.cache_read_input_tokens.toLocaleString()}\n`; + if (usage.output_tokens) + markdown += `- Output: ${usage.output_tokens.toLocaleString()}\n`; + markdown += "\n"; } } - if (lastEntry.permission_denials && lastEntry.permission_denials.length > 0) { + if ( + lastEntry.permission_denials && + lastEntry.permission_denials.length > 0 + ) { markdown += `**Permission Denials:** ${lastEntry.permission_denials.length}\n\n`; } } - markdown += '\n## 🤖 Reasoning\n\n'; + markdown += "\n## 🤖 Reasoning\n\n"; // Second pass: process assistant messages in sequence for (const entry of logEntries) { - if (entry.type === 'assistant' && entry.message?.content) { + if (entry.type === "assistant" && entry.message?.content) { for (const content of entry.message.content) { - if (content.type === 'text' && content.text) { + if (content.type === "text" && content.text) { // Add reasoning text directly (no header) const text = content.text.trim(); if (text && text.length > 0) { - markdown += text + '\n\n'; + markdown += text + "\n\n"; } - } else if (content.type === 'tool_use') { + } else if (content.type === "tool_use") { // Process tool use with its result const toolResult = toolUsePairs.get(content.id); const toolMarkdown = formatToolUse(content, toolResult); @@ -1295,22 +1633,22 @@ jobs: const toolName = toolUse.name; const input = toolUse.input || {}; // Skip TodoWrite except the very last one (we'll handle this separately) - if (toolName === 'TodoWrite') { - return ''; // Skip for now, would need global context to find the last one + if (toolName === "TodoWrite") { + return ""; // Skip for now, would need global context to find the last one } // Helper function to determine status icon function getStatusIcon() { if (toolResult) { - return toolResult.is_error === true ? '❌' : '✅'; + return toolResult.is_error === true ? "❌" : "✅"; } - return '❓'; // Unknown by default + return "❓"; // Unknown by default } - let markdown = ''; + let markdown = ""; const statusIcon = getStatusIcon(); switch (toolName) { - case 'Bash': - const command = input.command || ''; - const description = input.description || ''; + case "Bash": + const command = input.command || ""; + const description = input.description || ""; // Format the command to be single line const formattedCommand = formatBashCommand(command); if (description) { @@ -1318,31 +1656,40 @@ jobs: } markdown += `${statusIcon} \`${formattedCommand}\`\n\n`; break; - case 'Read': - const filePath = input.file_path || input.path || ''; - const relativePath = filePath.replace(/^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, ''); // Remove /home/runner/work/repo/repo/ prefix + case "Read": + const filePath = input.file_path || input.path || ""; + const relativePath = filePath.replace( + /^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, + "" + ); // Remove /home/runner/work/repo/repo/ prefix markdown += `${statusIcon} Read \`${relativePath}\`\n\n`; break; - case 'Write': - case 'Edit': - case 'MultiEdit': - const writeFilePath = input.file_path || input.path || ''; - const writeRelativePath = writeFilePath.replace(/^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, ''); + case "Write": + case "Edit": + case "MultiEdit": + const writeFilePath = input.file_path || input.path || ""; + const writeRelativePath = writeFilePath.replace( + /^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, + "" + ); markdown += `${statusIcon} Write \`${writeRelativePath}\`\n\n`; break; - case 'Grep': - case 'Glob': - const query = input.query || input.pattern || ''; + case "Grep": + case "Glob": + const query = input.query || input.pattern || ""; markdown += `${statusIcon} Search for \`${truncateString(query, 80)}\`\n\n`; break; - case 'LS': - const lsPath = input.path || ''; - const lsRelativePath = lsPath.replace(/^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, ''); + case "LS": + const lsPath = input.path || ""; + const lsRelativePath = lsPath.replace( + /^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, + "" + ); markdown += `${statusIcon} LS: ${lsRelativePath || lsPath}\n\n`; break; default: // Handle MCP calls and other tools - if (toolName.startsWith('mcp__')) { + if (toolName.startsWith("mcp__")) { const mcpName = formatMcpName(toolName); const params = formatMcpParameters(input); markdown += `${statusIcon} ${mcpName}(${params})\n\n`; @@ -1351,8 +1698,11 @@ jobs: const keys = Object.keys(input); if (keys.length > 0) { // Try to find the most important parameter - const mainParam = keys.find(k => ['query', 'command', 'path', 'file_path', 'content'].includes(k)) || keys[0]; - const value = String(input[mainParam] || ''); + const mainParam = + keys.find(k => + ["query", "command", "path", "file_path", "content"].includes(k) + ) || keys[0]; + const value = String(input[mainParam] || ""); if (value) { markdown += `${statusIcon} ${toolName}: ${truncateString(value, 100)}\n\n`; } else { @@ -1367,11 +1717,11 @@ jobs: } function formatMcpName(toolName) { // Convert mcp__github__search_issues to github::search_issues - if (toolName.startsWith('mcp__')) { - const parts = toolName.split('__'); + if (toolName.startsWith("mcp__")) { + const parts = toolName.split("__"); if (parts.length >= 3) { const provider = parts[1]; // github, etc. - const method = parts.slice(2).join('_'); // search_issues, etc. + const method = parts.slice(2).join("_"); // search_issues, etc. return `${provider}::${method}`; } } @@ -1379,44 +1729,50 @@ jobs: } function formatMcpParameters(input) { const keys = Object.keys(input); - if (keys.length === 0) return ''; + if (keys.length === 0) return ""; const paramStrs = []; - for (const key of keys.slice(0, 4)) { // Show up to 4 parameters - const value = String(input[key] || ''); + for (const key of keys.slice(0, 4)) { + // Show up to 4 parameters + const value = String(input[key] || ""); paramStrs.push(`${key}: ${truncateString(value, 40)}`); } if (keys.length > 4) { - paramStrs.push('...'); + paramStrs.push("..."); } - return paramStrs.join(', '); + return paramStrs.join(", "); } function formatBashCommand(command) { - if (!command) return ''; + if (!command) return ""; // Convert multi-line commands to single line by replacing newlines with spaces // and collapsing multiple spaces let formatted = command - .replace(/\n/g, ' ') // Replace newlines with spaces - .replace(/\r/g, ' ') // Replace carriage returns with spaces - .replace(/\t/g, ' ') // Replace tabs with spaces - .replace(/\s+/g, ' ') // Collapse multiple spaces into one - .trim(); // Remove leading/trailing whitespace + .replace(/\n/g, " ") // Replace newlines with spaces + .replace(/\r/g, " ") // Replace carriage returns with spaces + .replace(/\t/g, " ") // Replace tabs with spaces + .replace(/\s+/g, " ") // Collapse multiple spaces into one + .trim(); // Remove leading/trailing whitespace // Escape backticks to prevent markdown issues - formatted = formatted.replace(/`/g, '\\`'); + formatted = formatted.replace(/`/g, "\\`"); // Truncate if too long (keep reasonable length for summary) const maxLength = 80; if (formatted.length > maxLength) { - formatted = formatted.substring(0, maxLength) + '...'; + formatted = formatted.substring(0, maxLength) + "..."; } return formatted; } function truncateString(str, maxLength) { - if (!str) return ''; + if (!str) return ""; if (str.length <= maxLength) return str; - return str.substring(0, maxLength) + '...'; + return str.substring(0, maxLength) + "..."; } // Export for testing - if (typeof module !== 'undefined' && module.exports) { - module.exports = { parseClaudeLog, formatToolUse, formatBashCommand, truncateString }; + if (typeof module !== "undefined" && module.exports) { + module.exports = { + parseClaudeLog, + formatToolUse, + formatBashCommand, + truncateString, + }; } main(); - name: Upload agent logs @@ -1451,30 +1807,35 @@ jobs: // Read the validated output content from environment variable const outputContent = process.env.GITHUB_AW_AGENT_OUTPUT; if (!outputContent) { - console.log('No GITHUB_AW_AGENT_OUTPUT environment variable found'); + console.log("No GITHUB_AW_AGENT_OUTPUT environment variable found"); return; } - if (outputContent.trim() === '') { - console.log('Agent output content is empty'); + if (outputContent.trim() === "") { + console.log("Agent output content is empty"); return; } - console.log('Agent output content length:', outputContent.length); + console.log("Agent output content length:", outputContent.length); // Parse the validated output JSON let validatedOutput; try { validatedOutput = JSON.parse(outputContent); } catch (error) { - console.log('Error parsing agent output JSON:', error instanceof Error ? error.message : String(error)); + console.log( + "Error parsing agent output JSON:", + error instanceof Error ? error.message : String(error) + ); return; } if (!validatedOutput.items || !Array.isArray(validatedOutput.items)) { - console.log('No valid items found in agent output'); + console.log("No valid items found in agent output"); return; } // Find all add-issue-comment items - const commentItems = validatedOutput.items.filter(/** @param {any} item */ item => item.type === 'add-issue-comment'); + const commentItems = validatedOutput.items.filter( + /** @param {any} item */ item => item.type === "add-issue-comment" + ); if (commentItems.length === 0) { - console.log('No add-issue-comment items found in agent output'); + console.log("No add-issue-comment items found in agent output"); return; } console.log(`Found ${commentItems.length} add-issue-comment item(s)`); @@ -1482,18 +1843,27 @@ jobs: const commentTarget = process.env.GITHUB_AW_COMMENT_TARGET || "triggering"; console.log(`Comment target configuration: ${commentTarget}`); // Check if we're in an issue or pull request context - const isIssueContext = context.eventName === 'issues' || context.eventName === 'issue_comment'; - const isPRContext = context.eventName === 'pull_request' || context.eventName === 'pull_request_review' || context.eventName === 'pull_request_review_comment'; + const isIssueContext = + context.eventName === "issues" || context.eventName === "issue_comment"; + const isPRContext = + context.eventName === "pull_request" || + context.eventName === "pull_request_review" || + context.eventName === "pull_request_review_comment"; // Validate context based on target configuration if (commentTarget === "triggering" && !isIssueContext && !isPRContext) { - console.log('Target is "triggering" but not running in issue or pull request context, skipping comment creation'); + console.log( + 'Target is "triggering" but not running in issue or pull request context, skipping comment creation' + ); return; } const createdComments = []; // Process each comment item for (let i = 0; i < commentItems.length; i++) { const commentItem = commentItems[i]; - console.log(`Processing add-issue-comment item ${i + 1}/${commentItems.length}:`, { bodyLength: commentItem.body.length }); + console.log( + `Processing add-issue-comment item ${i + 1}/${commentItems.length}:`, + { bodyLength: commentItem.body.length } + ); // Determine the issue/PR number and comment endpoint for this comment let issueNumber; let commentEndpoint; @@ -1502,79 +1872,90 @@ jobs: if (commentItem.issue_number) { issueNumber = parseInt(commentItem.issue_number, 10); if (isNaN(issueNumber) || issueNumber <= 0) { - console.log(`Invalid issue number specified: ${commentItem.issue_number}`); + console.log( + `Invalid issue number specified: ${commentItem.issue_number}` + ); continue; } - commentEndpoint = 'issues'; + commentEndpoint = "issues"; } else { - console.log('Target is "*" but no issue_number specified in comment item'); + console.log( + 'Target is "*" but no issue_number specified in comment item' + ); continue; } } else if (commentTarget && commentTarget !== "triggering") { // Explicit issue number specified in target issueNumber = parseInt(commentTarget, 10); if (isNaN(issueNumber) || issueNumber <= 0) { - console.log(`Invalid issue number in target configuration: ${commentTarget}`); + console.log( + `Invalid issue number in target configuration: ${commentTarget}` + ); continue; } - commentEndpoint = 'issues'; + commentEndpoint = "issues"; } else { // Default behavior: use triggering issue/PR if (isIssueContext) { if (context.payload.issue) { issueNumber = context.payload.issue.number; - commentEndpoint = 'issues'; + commentEndpoint = "issues"; } else { - console.log('Issue context detected but no issue found in payload'); + console.log("Issue context detected but no issue found in payload"); continue; } } else if (isPRContext) { if (context.payload.pull_request) { issueNumber = context.payload.pull_request.number; - commentEndpoint = 'issues'; // PR comments use the issues API endpoint + commentEndpoint = "issues"; // PR comments use the issues API endpoint } else { - console.log('Pull request context detected but no pull request found in payload'); + console.log( + "Pull request context detected but no pull request found in payload" + ); continue; } } } if (!issueNumber) { - console.log('Could not determine issue or pull request number'); + console.log("Could not determine issue or pull request number"); continue; } // Extract body from the JSON item let body = commentItem.body.trim(); // Add AI disclaimer with run id, run htmlurl const runId = context.runId; - const runUrl = context.payload.repository + const runUrl = context.payload.repository ? `${context.payload.repository.html_url}/actions/runs/${runId}` - : `https://github.com/actions/runs/${runId}`; + : `https://github.com/actions/runs/${runId}`; body += `\n\n> Generated by Agentic Workflow Run [${runId}](${runUrl})\n`; console.log(`Creating comment on ${commentEndpoint} #${issueNumber}`); - console.log('Comment content length:', body.length); + console.log("Comment content length:", body.length); try { // Create the comment using GitHub API const { data: comment } = await github.rest.issues.createComment({ owner: context.repo.owner, repo: context.repo.repo, issue_number: issueNumber, - body: body + body: body, }); - console.log('Created comment #' + comment.id + ': ' + comment.html_url); + console.log("Created comment #" + comment.id + ": " + comment.html_url); createdComments.push(comment); // Set output for the last created comment (for backward compatibility) if (i === commentItems.length - 1) { - core.setOutput('comment_id', comment.id); - core.setOutput('comment_url', comment.html_url); + core.setOutput("comment_id", comment.id); + core.setOutput("comment_url", comment.html_url); } } catch (error) { - console.error(`✗ Failed to create comment:`, error instanceof Error ? error.message : String(error)); + console.error( + `✗ Failed to create comment:`, + error instanceof Error ? error.message : String(error) + ); throw error; } } // Write summary for all created comments if (createdComments.length > 0) { - let summaryContent = '\n\n## GitHub Comments\n'; + let summaryContent = "\n\n## GitHub Comments\n"; for (const comment of createdComments) { summaryContent += `- Comment #${comment.id}: [View Comment](${comment.html_url})\n`; } diff --git a/.github/workflows/test-claude-create-issue.lock.yml b/.github/workflows/test-claude-create-issue.lock.yml index aa213e81ab..37193fd916 100644 --- a/.github/workflows/test-claude-create-issue.lock.yml +++ b/.github/workflows/test-claude-create-issue.lock.yml @@ -135,23 +135,23 @@ jobs: with: script: | function main() { - const fs = require('fs'); - const crypto = require('crypto'); + const fs = require("fs"); + const crypto = require("crypto"); // Generate a random filename for the output file - const randomId = crypto.randomBytes(8).toString('hex'); + const randomId = crypto.randomBytes(8).toString("hex"); const outputFile = `/tmp/aw_output_${randomId}.txt`; // Ensure the /tmp directory exists and create empty output file - fs.mkdirSync('/tmp', { recursive: true }); - fs.writeFileSync(outputFile, '', { mode: 0o644 }); + fs.mkdirSync("/tmp", { recursive: true }); + fs.writeFileSync(outputFile, "", { mode: 0o644 }); // Verify the file was created and is writable if (!fs.existsSync(outputFile)) { throw new Error(`Failed to create output file: ${outputFile}`); } // Set the environment variable for subsequent steps - core.exportVariable('GITHUB_AW_SAFE_OUTPUTS', outputFile); - console.log('Created agentic output file:', outputFile); + core.exportVariable("GITHUB_AW_SAFE_OUTPUTS", outputFile); + console.log("Created agentic output file:", outputFile); // Also set as step output for reference - core.setOutput('output_file', outputFile); + core.setOutput("output_file", outputFile); } main(); - name: Setup MCPs @@ -376,34 +376,37 @@ jobs: * @returns {string} The sanitized content */ function sanitizeContent(content) { - if (!content || typeof content !== 'string') { - return ''; + if (!content || typeof content !== "string") { + return ""; } // Read allowed domains from environment variable const allowedDomainsEnv = process.env.GITHUB_AW_ALLOWED_DOMAINS; const defaultAllowedDomains = [ - 'github.com', - 'github.io', - 'githubusercontent.com', - 'githubassets.com', - 'github.dev', - 'codespaces.new' + "github.com", + "github.io", + "githubusercontent.com", + "githubassets.com", + "github.dev", + "codespaces.new", ]; const allowedDomains = allowedDomainsEnv - ? allowedDomainsEnv.split(',').map(d => d.trim()).filter(d => d) + ? allowedDomainsEnv + .split(",") + .map(d => d.trim()) + .filter(d => d) : defaultAllowedDomains; let sanitized = content; // Neutralize @mentions to prevent unintended notifications sanitized = neutralizeMentions(sanitized); // Remove control characters (except newlines and tabs) - sanitized = sanitized.replace(/[\x00-\x08\x0B\x0C\x0E-\x1F\x7F]/g, ''); + sanitized = sanitized.replace(/[\x00-\x08\x0B\x0C\x0E-\x1F\x7F]/g, ""); // XML character escaping sanitized = sanitized - .replace(/&/g, '&') // Must be first to avoid double-escaping - .replace(//g, '>') - .replace(/"/g, '"') - .replace(/'/g, '''); + .replace(/&/g, "&") // Must be first to avoid double-escaping + .replace(//g, ">") + .replace(/"/g, """) + .replace(/'/g, "'"); // URI filtering - replace non-https protocols with "(redacted)" sanitized = sanitizeUrlProtocols(sanitized); // Domain filtering for HTTPS URIs @@ -411,16 +414,20 @@ jobs: // Limit total length to prevent DoS (0.5MB max) const maxLength = 524288; if (sanitized.length > maxLength) { - sanitized = sanitized.substring(0, maxLength) + '\n[Content truncated due to length]'; + sanitized = + sanitized.substring(0, maxLength) + + "\n[Content truncated due to length]"; } // Limit number of lines to prevent log flooding (65k max) - const lines = sanitized.split('\n'); + const lines = sanitized.split("\n"); const maxLines = 65000; if (lines.length > maxLines) { - sanitized = lines.slice(0, maxLines).join('\n') + '\n[Content truncated due to line count]'; + sanitized = + lines.slice(0, maxLines).join("\n") + + "\n[Content truncated due to line count]"; } // Remove ANSI escape sequences - sanitized = sanitized.replace(/\x1b\[[0-9;]*[mGKH]/g, ''); + sanitized = sanitized.replace(/\x1b\[[0-9;]*[mGKH]/g, ""); // Neutralize common bot trigger phrases sanitized = neutralizeBotTriggers(sanitized); // Trim excessive whitespace @@ -431,16 +438,22 @@ jobs: * @returns {string} The string with unknown domains redacted */ function sanitizeUrlDomains(s) { - return s.replace(/\bhttps:\/\/([^\/\s\])}'"<>&\x00-\x1f]+)/gi, (match, domain) => { - // Extract the hostname part (before first slash, colon, or other delimiter) - const hostname = domain.split(/[\/:\?#]/)[0].toLowerCase(); - // Check if this domain or any parent domain is in the allowlist - const isAllowed = allowedDomains.some(allowedDomain => { - const normalizedAllowed = allowedDomain.toLowerCase(); - return hostname === normalizedAllowed || hostname.endsWith('.' + normalizedAllowed); - }); - return isAllowed ? match : '(redacted)'; - }); + return s.replace( + /\bhttps:\/\/([^\/\s\])}'"<>&\x00-\x1f]+)/gi, + (match, domain) => { + // Extract the hostname part (before first slash, colon, or other delimiter) + const hostname = domain.split(/[\/:\?#]/)[0].toLowerCase(); + // Check if this domain or any parent domain is in the allowlist + const isAllowed = allowedDomains.some(allowedDomain => { + const normalizedAllowed = allowedDomain.toLowerCase(); + return ( + hostname === normalizedAllowed || + hostname.endsWith("." + normalizedAllowed) + ); + }); + return isAllowed ? match : "(redacted)"; + } + ); } /** * Remove unknown protocols except https @@ -449,10 +462,13 @@ jobs: */ function sanitizeUrlProtocols(s) { // Match both protocol:// and protocol: patterns - return s.replace(/\b(\w+):(?:\/\/)?[^\s\])}'"<>&\x00-\x1f]+/gi, (match, protocol) => { - // Allow https (case insensitive), redact everything else - return protocol.toLowerCase() === 'https' ? match : '(redacted)'; - }); + return s.replace( + /\b(\w+):(?:\/\/)?[^\s\])}'"<>&\x00-\x1f]+/gi, + (match, protocol) => { + // Allow https (case insensitive), redact everything else + return protocol.toLowerCase() === "https" ? match : "(redacted)"; + } + ); } /** * Neutralizes @mentions by wrapping them in backticks @@ -461,8 +477,10 @@ jobs: */ function neutralizeMentions(s) { // Replace @name or @org/team outside code with `@name` - return s.replace(/(^|[^\w`])@([A-Za-z0-9](?:[A-Za-z0-9-]{0,37}[A-Za-z0-9])?(?:\/[A-Za-z0-9._-]+)?)/g, - (_m, p1, p2) => `${p1}\`@${p2}\``); + return s.replace( + /(^|[^\w`])@([A-Za-z0-9](?:[A-Za-z0-9-]{0,37}[A-Za-z0-9])?(?:\/[A-Za-z0-9._-]+)?)/g, + (_m, p1, p2) => `${p1}\`@${p2}\`` + ); } /** * Neutralizes bot trigger phrases by wrapping them in backticks @@ -471,8 +489,10 @@ jobs: */ function neutralizeBotTriggers(s) { // Neutralize common bot trigger phrases like "fixes #123", "closes #asdfs", etc. - return s.replace(/\b(fixes?|closes?|resolves?|fix|close|resolve)\s+#(\w+)/gi, - (match, action, ref) => `\`${action} #${ref}\``); + return s.replace( + /\b(fixes?|closes?|resolves?|fix|close|resolve)\s+#(\w+)/gi, + (match, action, ref) => `\`${action} #${ref}\`` + ); } } /** @@ -483,65 +503,165 @@ jobs: */ function getMaxAllowedForType(itemType, config) { // Check if max is explicitly specified in config - if (config && config[itemType] && typeof config[itemType] === 'object' && config[itemType].max) { + if ( + config && + config[itemType] && + typeof config[itemType] === "object" && + config[itemType].max + ) { return config[itemType].max; } // Use default limits for plural-supported types switch (itemType) { - case 'create-issue': + case "create-issue": return 1; // Only one issue allowed - case 'add-issue-comment': + case "add-issue-comment": return 1; // Only one comment allowed - case 'create-pull-request': - return 1; // Only one pull request allowed - case 'add-issue-label': - return 5; // Only one labels operation allowed - case 'update-issue': - return 1; // Only one issue update allowed - case 'push-to-branch': - return 1; // Only one push to branch allowed + case "create-pull-request": + return 1; // Only one pull request allowed + case "create-pull-request-review-comment": + return 10; // Default to 10 review comments allowed + case "add-issue-label": + return 5; // Only one labels operation allowed + case "update-issue": + return 1; // Only one issue update allowed + case "push-to-branch": + return 1; // Only one push to branch allowed + case "create-discussion": + return 1; // Only one discussion allowed default: - return 1; // Default to single item for unknown types + return 1; // Default to single item for unknown types + } + } + /** + * Attempts to repair common JSON syntax issues in LLM-generated content + * @param {string} jsonStr - The potentially malformed JSON string + * @returns {string} The repaired JSON string + */ + function repairJson(jsonStr) { + let repaired = jsonStr.trim(); + // Fix single quotes to double quotes (must be done first) + repaired = repaired.replace(/'/g, '"'); + // Fix missing quotes around object keys + repaired = repaired.replace( + /([{,]\s*)([a-zA-Z_$][a-zA-Z0-9_$]*)\s*:/g, + '$1"$2":' + ); + // Fix newlines and tabs inside strings by escaping them + repaired = repaired.replace(/"([^"\\]*)"/g, (match, content) => { + if ( + content.includes("\n") || + content.includes("\r") || + content.includes("\t") + ) { + const escaped = content + .replace(/\\/g, "\\\\") + .replace(/\n/g, "\\n") + .replace(/\r/g, "\\r") + .replace(/\t/g, "\\t"); + return `"${escaped}"`; + } + return match; + }); + // Fix unescaped quotes inside string values + repaired = repaired.replace( + /"([^"]*)"([^":,}\]]*)"([^"]*)"(\s*[,:}\]])/g, + (match, p1, p2, p3, p4) => `"${p1}\\"${p2}\\"${p3}"${p4}` + ); + // Fix wrong bracket/brace types - arrays should end with ] not } + repaired = repaired.replace( + /(\[\s*(?:"[^"]*"(?:\s*,\s*"[^"]*")*\s*),?)\s*}/g, + "$1]" + ); + // Fix missing closing braces/brackets + const openBraces = (repaired.match(/\{/g) || []).length; + const closeBraces = (repaired.match(/\}/g) || []).length; + if (openBraces > closeBraces) { + repaired += "}".repeat(openBraces - closeBraces); + } else if (closeBraces > openBraces) { + repaired = "{".repeat(closeBraces - openBraces) + repaired; + } + // Fix missing closing brackets for arrays + const openBrackets = (repaired.match(/\[/g) || []).length; + const closeBrackets = (repaired.match(/\]/g) || []).length; + if (openBrackets > closeBrackets) { + repaired += "]".repeat(openBrackets - closeBrackets); + } else if (closeBrackets > openBrackets) { + repaired = "[".repeat(closeBrackets - openBrackets) + repaired; + } + // Fix trailing commas in objects and arrays (AFTER fixing brackets/braces) + repaired = repaired.replace(/,(\s*[}\]])/g, "$1"); + return repaired; + } + /** + * Attempts to parse JSON with repair fallback + * @param {string} jsonStr - The JSON string to parse + * @returns {Object|undefined} The parsed JSON object, or undefined if parsing fails + */ + function parseJsonWithRepair(jsonStr) { + try { + // First, try normal JSON.parse + return JSON.parse(jsonStr); + } catch (originalError) { + try { + // If that fails, try repairing and parsing again + const repairedJson = repairJson(jsonStr); + return JSON.parse(repairedJson); + } catch (repairError) { + // If repair also fails, print error to console and return undefined + console.log( + `JSON parsing failed. Original: ${originalError.message}. After repair: ${repairError.message}` + ); + return undefined; + } } } const outputFile = process.env.GITHUB_AW_SAFE_OUTPUTS; const safeOutputsConfig = process.env.GITHUB_AW_SAFE_OUTPUTS_CONFIG; if (!outputFile) { - console.log('GITHUB_AW_SAFE_OUTPUTS not set, no output to collect'); - core.setOutput('output', ''); + console.log("GITHUB_AW_SAFE_OUTPUTS not set, no output to collect"); + core.setOutput("output", ""); return; } if (!fs.existsSync(outputFile)) { - console.log('Output file does not exist:', outputFile); - core.setOutput('output', ''); + console.log("Output file does not exist:", outputFile); + core.setOutput("output", ""); return; } - const outputContent = fs.readFileSync(outputFile, 'utf8'); - if (outputContent.trim() === '') { - console.log('Output file is empty'); - core.setOutput('output', ''); + const outputContent = fs.readFileSync(outputFile, "utf8"); + if (outputContent.trim() === "") { + console.log("Output file is empty"); + core.setOutput("output", ""); return; } - console.log('Raw output content length:', outputContent.length); + console.log("Raw output content length:", outputContent.length); // Parse the safe-outputs configuration let expectedOutputTypes = {}; if (safeOutputsConfig) { try { expectedOutputTypes = JSON.parse(safeOutputsConfig); - console.log('Expected output types:', Object.keys(expectedOutputTypes)); + console.log("Expected output types:", Object.keys(expectedOutputTypes)); } catch (error) { - console.log('Warning: Could not parse safe-outputs config:', error.message); + console.log( + "Warning: Could not parse safe-outputs config:", + error.message + ); } } // Parse JSONL content - const lines = outputContent.trim().split('\n'); + const lines = outputContent.trim().split("\n"); const parsedItems = []; const errors = []; for (let i = 0; i < lines.length; i++) { const line = lines[i].trim(); - if (line === '') continue; // Skip empty lines + if (line === "") continue; // Skip empty lines try { - const item = JSON.parse(line); + const item = parseJsonWithRepair(line); + // If item is undefined (failed to parse), add error and process next line + if (item === undefined) { + errors.push(`Line ${i + 1}: Invalid JSON - JSON parsing failed`); + continue; + } // Validate that the item has a 'type' field if (!item.type) { errors.push(`Line ${i + 1}: Missing required 'type' field`); @@ -550,25 +670,35 @@ jobs: // Validate against expected output types const itemType = item.type; if (!expectedOutputTypes[itemType]) { - errors.push(`Line ${i + 1}: Unexpected output type '${itemType}'. Expected one of: ${Object.keys(expectedOutputTypes).join(', ')}`); + errors.push( + `Line ${i + 1}: Unexpected output type '${itemType}'. Expected one of: ${Object.keys(expectedOutputTypes).join(", ")}` + ); continue; } // Check for too many items of the same type - const typeCount = parsedItems.filter(existing => existing.type === itemType).length; + const typeCount = parsedItems.filter( + existing => existing.type === itemType + ).length; const maxAllowed = getMaxAllowedForType(itemType, expectedOutputTypes); if (typeCount >= maxAllowed) { - errors.push(`Line ${i + 1}: Too many items of type '${itemType}'. Maximum allowed: ${maxAllowed}.`); + errors.push( + `Line ${i + 1}: Too many items of type '${itemType}'. Maximum allowed: ${maxAllowed}.` + ); continue; } // Basic validation based on type switch (itemType) { - case 'create-issue': - if (!item.title || typeof item.title !== 'string') { - errors.push(`Line ${i + 1}: create-issue requires a 'title' string field`); + case "create-issue": + if (!item.title || typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: create-issue requires a 'title' string field` + ); continue; } - if (!item.body || typeof item.body !== 'string') { - errors.push(`Line ${i + 1}: create-issue requires a 'body' string field`); + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-issue requires a 'body' string field` + ); continue; } // Sanitize text content @@ -576,106 +706,246 @@ jobs: item.body = sanitizeContent(item.body); // Sanitize labels if present if (item.labels && Array.isArray(item.labels)) { - item.labels = item.labels.map(label => typeof label === 'string' ? sanitizeContent(label) : label); + item.labels = item.labels.map(label => + typeof label === "string" ? sanitizeContent(label) : label + ); } break; - case 'add-issue-comment': - if (!item.body || typeof item.body !== 'string') { - errors.push(`Line ${i + 1}: add-issue-comment requires a 'body' string field`); + case "add-issue-comment": + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: add-issue-comment requires a 'body' string field` + ); continue; } // Sanitize text content item.body = sanitizeContent(item.body); break; - case 'create-pull-request': - if (!item.title || typeof item.title !== 'string') { - errors.push(`Line ${i + 1}: create-pull-request requires a 'title' string field`); + case "create-pull-request": + if (!item.title || typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request requires a 'title' string field` + ); continue; } - if (!item.body || typeof item.body !== 'string') { - errors.push(`Line ${i + 1}: create-pull-request requires a 'body' string field`); + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request requires a 'body' string field` + ); continue; } // Sanitize text content item.title = sanitizeContent(item.title); item.body = sanitizeContent(item.body); // Sanitize branch name if present - if (item.branch && typeof item.branch === 'string') { + if (item.branch && typeof item.branch === "string") { item.branch = sanitizeContent(item.branch); } // Sanitize labels if present if (item.labels && Array.isArray(item.labels)) { - item.labels = item.labels.map(label => typeof label === 'string' ? sanitizeContent(label) : label); + item.labels = item.labels.map(label => + typeof label === "string" ? sanitizeContent(label) : label + ); } break; - case 'add-issue-label': + case "add-issue-label": if (!item.labels || !Array.isArray(item.labels)) { - errors.push(`Line ${i + 1}: add-issue-label requires a 'labels' array field`); + errors.push( + `Line ${i + 1}: add-issue-label requires a 'labels' array field` + ); continue; } - if (item.labels.some(label => typeof label !== 'string')) { - errors.push(`Line ${i + 1}: add-issue-label labels array must contain only strings`); + if (item.labels.some(label => typeof label !== "string")) { + errors.push( + `Line ${i + 1}: add-issue-label labels array must contain only strings` + ); continue; } // Sanitize label strings item.labels = item.labels.map(label => sanitizeContent(label)); break; - case 'update-issue': + case "update-issue": // Check that at least one updateable field is provided - const hasValidField = (item.status !== undefined) || - (item.title !== undefined) || - (item.body !== undefined); + const hasValidField = + item.status !== undefined || + item.title !== undefined || + item.body !== undefined; if (!hasValidField) { - errors.push(`Line ${i + 1}: update-issue requires at least one of: 'status', 'title', or 'body' fields`); + errors.push( + `Line ${i + 1}: update-issue requires at least one of: 'status', 'title', or 'body' fields` + ); continue; } // Validate status if provided if (item.status !== undefined) { - if (typeof item.status !== 'string' || (item.status !== 'open' && item.status !== 'closed')) { - errors.push(`Line ${i + 1}: update-issue 'status' must be 'open' or 'closed'`); + if ( + typeof item.status !== "string" || + (item.status !== "open" && item.status !== "closed") + ) { + errors.push( + `Line ${i + 1}: update-issue 'status' must be 'open' or 'closed'` + ); continue; } } // Validate title if provided if (item.title !== undefined) { - if (typeof item.title !== 'string') { - errors.push(`Line ${i + 1}: update-issue 'title' must be a string`); + if (typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: update-issue 'title' must be a string` + ); continue; } item.title = sanitizeContent(item.title); } // Validate body if provided if (item.body !== undefined) { - if (typeof item.body !== 'string') { - errors.push(`Line ${i + 1}: update-issue 'body' must be a string`); + if (typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: update-issue 'body' must be a string` + ); continue; } item.body = sanitizeContent(item.body); } // Validate issue_number if provided (for target "*") if (item.issue_number !== undefined) { - if (typeof item.issue_number !== 'number' && typeof item.issue_number !== 'string') { - errors.push(`Line ${i + 1}: update-issue 'issue_number' must be a number or string`); + if ( + typeof item.issue_number !== "number" && + typeof item.issue_number !== "string" + ) { + errors.push( + `Line ${i + 1}: update-issue 'issue_number' must be a number or string` + ); continue; } } break; - case 'push-to-branch': + case "push-to-branch": // Validate message if provided (optional) if (item.message !== undefined) { - if (typeof item.message !== 'string') { - errors.push(`Line ${i + 1}: push-to-branch 'message' must be a string`); + if (typeof item.message !== "string") { + errors.push( + `Line ${i + 1}: push-to-branch 'message' must be a string` + ); continue; } item.message = sanitizeContent(item.message); } // Validate pull_request_number if provided (for target "*") if (item.pull_request_number !== undefined) { - if (typeof item.pull_request_number !== 'number' && typeof item.pull_request_number !== 'string') { - errors.push(`Line ${i + 1}: push-to-branch 'pull_request_number' must be a number or string`); + if ( + typeof item.pull_request_number !== "number" && + typeof item.pull_request_number !== "string" + ) { + errors.push( + `Line ${i + 1}: push-to-branch 'pull_request_number' must be a number or string` + ); + continue; + } + } + break; + case "create-pull-request-review-comment": + // Validate required path field + if (!item.path || typeof item.path !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment requires a 'path' string field` + ); + continue; + } + // Validate required line field + if ( + item.line === undefined || + (typeof item.line !== "number" && typeof item.line !== "string") + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment requires a 'line' number or string field` + ); + continue; + } + // Validate line is a positive integer + const lineNumber = + typeof item.line === "string" ? parseInt(item.line, 10) : item.line; + if ( + isNaN(lineNumber) || + lineNumber <= 0 || + !Number.isInteger(lineNumber) + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'line' must be a positive integer` + ); + continue; + } + // Validate required body field + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment requires a 'body' string field` + ); + continue; + } + // Sanitize required text content + item.body = sanitizeContent(item.body); + // Validate optional start_line field + if (item.start_line !== undefined) { + if ( + typeof item.start_line !== "number" && + typeof item.start_line !== "string" + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'start_line' must be a number or string` + ); + continue; + } + const startLineNumber = + typeof item.start_line === "string" + ? parseInt(item.start_line, 10) + : item.start_line; + if ( + isNaN(startLineNumber) || + startLineNumber <= 0 || + !Number.isInteger(startLineNumber) + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'start_line' must be a positive integer` + ); + continue; + } + if (startLineNumber > lineNumber) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'start_line' must be less than or equal to 'line'` + ); continue; } } + // Validate optional side field + if (item.side !== undefined) { + if ( + typeof item.side !== "string" || + (item.side !== "LEFT" && item.side !== "RIGHT") + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'side' must be 'LEFT' or 'RIGHT'` + ); + continue; + } + } + break; + case "create-discussion": + if (!item.title || typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: create-discussion requires a 'title' string field` + ); + continue; + } + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-discussion requires a 'body' string field` + ); + continue; + } + // Sanitize text content + item.title = sanitizeContent(item.title); + item.body = sanitizeContent(item.body); break; default: errors.push(`Line ${i + 1}: Unknown output type '${itemType}'`); @@ -689,7 +959,7 @@ jobs: } // Report validation results if (errors.length > 0) { - console.log('Validation errors found:'); + console.log("Validation errors found:"); errors.forEach(error => console.log(` - ${error}`)); // For now, we'll continue with valid items but log the errors // In the future, we might want to fail the workflow for invalid items @@ -698,10 +968,10 @@ jobs: // Set the parsed and validated items as output const validatedOutput = { items: parsedItems, - errors: errors + errors: errors, }; - core.setOutput('output', JSON.stringify(validatedOutput)); - core.setOutput('raw_output', outputContent); + core.setOutput("output", JSON.stringify(validatedOutput)); + core.setOutput("raw_output", outputContent); } // Call the main function await main(); @@ -743,24 +1013,24 @@ jobs: with: script: | function main() { - const fs = require('fs'); + const fs = require("fs"); try { // Get the log file path from environment const logFile = process.env.AGENT_LOG_FILE; if (!logFile) { - console.log('No agent log file specified'); + console.log("No agent log file specified"); return; } if (!fs.existsSync(logFile)) { console.log(`Log file not found: ${logFile}`); return; } - const logContent = fs.readFileSync(logFile, 'utf8'); + const logContent = fs.readFileSync(logFile, "utf8"); const markdown = parseClaudeLog(logContent); // Append to GitHub step summary core.summary.addRaw(markdown).write(); } catch (error) { - console.error('Error parsing Claude log:', error.message); + console.error("Error parsing Claude log:", error.message); core.setFailed(error.message); } } @@ -768,16 +1038,16 @@ jobs: try { const logEntries = JSON.parse(logContent); if (!Array.isArray(logEntries)) { - return '## Agent Log Summary\n\nLog format not recognized as Claude JSON array.\n'; + return "## Agent Log Summary\n\nLog format not recognized as Claude JSON array.\n"; } - let markdown = '## 🤖 Commands and Tools\n\n'; + let markdown = "## 🤖 Commands and Tools\n\n"; const toolUsePairs = new Map(); // Map tool_use_id to tool_result const commandSummary = []; // For the succinct summary // First pass: collect tool results by tool_use_id for (const entry of logEntries) { - if (entry.type === 'user' && entry.message?.content) { + if (entry.type === "user" && entry.message?.content) { for (const content of entry.message.content) { - if (content.type === 'tool_result' && content.tool_use_id) { + if (content.type === "tool_result" && content.tool_use_id) { toolUsePairs.set(content.tool_use_id, content); } } @@ -785,26 +1055,37 @@ jobs: } // Collect all tool uses for summary for (const entry of logEntries) { - if (entry.type === 'assistant' && entry.message?.content) { + if (entry.type === "assistant" && entry.message?.content) { for (const content of entry.message.content) { - if (content.type === 'tool_use') { + if (content.type === "tool_use") { const toolName = content.name; const input = content.input || {}; // Skip internal tools - only show external commands and API calls - if (['Read', 'Write', 'Edit', 'MultiEdit', 'LS', 'Grep', 'Glob', 'TodoWrite'].includes(toolName)) { + if ( + [ + "Read", + "Write", + "Edit", + "MultiEdit", + "LS", + "Grep", + "Glob", + "TodoWrite", + ].includes(toolName) + ) { continue; // Skip internal file operations and searches } // Find the corresponding tool result to get status const toolResult = toolUsePairs.get(content.id); - let statusIcon = '❓'; + let statusIcon = "❓"; if (toolResult) { - statusIcon = toolResult.is_error === true ? '❌' : '✅'; + statusIcon = toolResult.is_error === true ? "❌" : "✅"; } // Add to command summary (only external tools) - if (toolName === 'Bash') { - const formattedCommand = formatBashCommand(input.command || ''); + if (toolName === "Bash") { + const formattedCommand = formatBashCommand(input.command || ""); commandSummary.push(`* ${statusIcon} \`${formattedCommand}\``); - } else if (toolName.startsWith('mcp__')) { + } else if (toolName.startsWith("mcp__")) { const mcpName = formatMcpName(toolName); commandSummary.push(`* ${statusIcon} \`${mcpName}(...)\``); } else { @@ -821,13 +1102,19 @@ jobs: markdown += `${cmd}\n`; } } else { - markdown += 'No commands or tools used.\n'; + markdown += "No commands or tools used.\n"; } // Add Information section from the last entry with result metadata - markdown += '\n## 📊 Information\n\n'; + markdown += "\n## 📊 Information\n\n"; // Find the last entry with metadata const lastEntry = logEntries[logEntries.length - 1]; - if (lastEntry && (lastEntry.num_turns || lastEntry.duration_ms || lastEntry.total_cost_usd || lastEntry.usage)) { + if ( + lastEntry && + (lastEntry.num_turns || + lastEntry.duration_ms || + lastEntry.total_cost_usd || + lastEntry.usage) + ) { if (lastEntry.num_turns) { markdown += `**Turns:** ${lastEntry.num_turns}\n\n`; } @@ -844,29 +1131,36 @@ jobs: const usage = lastEntry.usage; if (usage.input_tokens || usage.output_tokens) { markdown += `**Token Usage:**\n`; - if (usage.input_tokens) markdown += `- Input: ${usage.input_tokens.toLocaleString()}\n`; - if (usage.cache_creation_input_tokens) markdown += `- Cache Creation: ${usage.cache_creation_input_tokens.toLocaleString()}\n`; - if (usage.cache_read_input_tokens) markdown += `- Cache Read: ${usage.cache_read_input_tokens.toLocaleString()}\n`; - if (usage.output_tokens) markdown += `- Output: ${usage.output_tokens.toLocaleString()}\n`; - markdown += '\n'; + if (usage.input_tokens) + markdown += `- Input: ${usage.input_tokens.toLocaleString()}\n`; + if (usage.cache_creation_input_tokens) + markdown += `- Cache Creation: ${usage.cache_creation_input_tokens.toLocaleString()}\n`; + if (usage.cache_read_input_tokens) + markdown += `- Cache Read: ${usage.cache_read_input_tokens.toLocaleString()}\n`; + if (usage.output_tokens) + markdown += `- Output: ${usage.output_tokens.toLocaleString()}\n`; + markdown += "\n"; } } - if (lastEntry.permission_denials && lastEntry.permission_denials.length > 0) { + if ( + lastEntry.permission_denials && + lastEntry.permission_denials.length > 0 + ) { markdown += `**Permission Denials:** ${lastEntry.permission_denials.length}\n\n`; } } - markdown += '\n## 🤖 Reasoning\n\n'; + markdown += "\n## 🤖 Reasoning\n\n"; // Second pass: process assistant messages in sequence for (const entry of logEntries) { - if (entry.type === 'assistant' && entry.message?.content) { + if (entry.type === "assistant" && entry.message?.content) { for (const content of entry.message.content) { - if (content.type === 'text' && content.text) { + if (content.type === "text" && content.text) { // Add reasoning text directly (no header) const text = content.text.trim(); if (text && text.length > 0) { - markdown += text + '\n\n'; + markdown += text + "\n\n"; } - } else if (content.type === 'tool_use') { + } else if (content.type === "tool_use") { // Process tool use with its result const toolResult = toolUsePairs.get(content.id); const toolMarkdown = formatToolUse(content, toolResult); @@ -886,22 +1180,22 @@ jobs: const toolName = toolUse.name; const input = toolUse.input || {}; // Skip TodoWrite except the very last one (we'll handle this separately) - if (toolName === 'TodoWrite') { - return ''; // Skip for now, would need global context to find the last one + if (toolName === "TodoWrite") { + return ""; // Skip for now, would need global context to find the last one } // Helper function to determine status icon function getStatusIcon() { if (toolResult) { - return toolResult.is_error === true ? '❌' : '✅'; + return toolResult.is_error === true ? "❌" : "✅"; } - return '❓'; // Unknown by default + return "❓"; // Unknown by default } - let markdown = ''; + let markdown = ""; const statusIcon = getStatusIcon(); switch (toolName) { - case 'Bash': - const command = input.command || ''; - const description = input.description || ''; + case "Bash": + const command = input.command || ""; + const description = input.description || ""; // Format the command to be single line const formattedCommand = formatBashCommand(command); if (description) { @@ -909,31 +1203,40 @@ jobs: } markdown += `${statusIcon} \`${formattedCommand}\`\n\n`; break; - case 'Read': - const filePath = input.file_path || input.path || ''; - const relativePath = filePath.replace(/^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, ''); // Remove /home/runner/work/repo/repo/ prefix + case "Read": + const filePath = input.file_path || input.path || ""; + const relativePath = filePath.replace( + /^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, + "" + ); // Remove /home/runner/work/repo/repo/ prefix markdown += `${statusIcon} Read \`${relativePath}\`\n\n`; break; - case 'Write': - case 'Edit': - case 'MultiEdit': - const writeFilePath = input.file_path || input.path || ''; - const writeRelativePath = writeFilePath.replace(/^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, ''); + case "Write": + case "Edit": + case "MultiEdit": + const writeFilePath = input.file_path || input.path || ""; + const writeRelativePath = writeFilePath.replace( + /^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, + "" + ); markdown += `${statusIcon} Write \`${writeRelativePath}\`\n\n`; break; - case 'Grep': - case 'Glob': - const query = input.query || input.pattern || ''; + case "Grep": + case "Glob": + const query = input.query || input.pattern || ""; markdown += `${statusIcon} Search for \`${truncateString(query, 80)}\`\n\n`; break; - case 'LS': - const lsPath = input.path || ''; - const lsRelativePath = lsPath.replace(/^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, ''); + case "LS": + const lsPath = input.path || ""; + const lsRelativePath = lsPath.replace( + /^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, + "" + ); markdown += `${statusIcon} LS: ${lsRelativePath || lsPath}\n\n`; break; default: // Handle MCP calls and other tools - if (toolName.startsWith('mcp__')) { + if (toolName.startsWith("mcp__")) { const mcpName = formatMcpName(toolName); const params = formatMcpParameters(input); markdown += `${statusIcon} ${mcpName}(${params})\n\n`; @@ -942,8 +1245,11 @@ jobs: const keys = Object.keys(input); if (keys.length > 0) { // Try to find the most important parameter - const mainParam = keys.find(k => ['query', 'command', 'path', 'file_path', 'content'].includes(k)) || keys[0]; - const value = String(input[mainParam] || ''); + const mainParam = + keys.find(k => + ["query", "command", "path", "file_path", "content"].includes(k) + ) || keys[0]; + const value = String(input[mainParam] || ""); if (value) { markdown += `${statusIcon} ${toolName}: ${truncateString(value, 100)}\n\n`; } else { @@ -958,11 +1264,11 @@ jobs: } function formatMcpName(toolName) { // Convert mcp__github__search_issues to github::search_issues - if (toolName.startsWith('mcp__')) { - const parts = toolName.split('__'); + if (toolName.startsWith("mcp__")) { + const parts = toolName.split("__"); if (parts.length >= 3) { const provider = parts[1]; // github, etc. - const method = parts.slice(2).join('_'); // search_issues, etc. + const method = parts.slice(2).join("_"); // search_issues, etc. return `${provider}::${method}`; } } @@ -970,44 +1276,50 @@ jobs: } function formatMcpParameters(input) { const keys = Object.keys(input); - if (keys.length === 0) return ''; + if (keys.length === 0) return ""; const paramStrs = []; - for (const key of keys.slice(0, 4)) { // Show up to 4 parameters - const value = String(input[key] || ''); + for (const key of keys.slice(0, 4)) { + // Show up to 4 parameters + const value = String(input[key] || ""); paramStrs.push(`${key}: ${truncateString(value, 40)}`); } if (keys.length > 4) { - paramStrs.push('...'); + paramStrs.push("..."); } - return paramStrs.join(', '); + return paramStrs.join(", "); } function formatBashCommand(command) { - if (!command) return ''; + if (!command) return ""; // Convert multi-line commands to single line by replacing newlines with spaces // and collapsing multiple spaces let formatted = command - .replace(/\n/g, ' ') // Replace newlines with spaces - .replace(/\r/g, ' ') // Replace carriage returns with spaces - .replace(/\t/g, ' ') // Replace tabs with spaces - .replace(/\s+/g, ' ') // Collapse multiple spaces into one - .trim(); // Remove leading/trailing whitespace + .replace(/\n/g, " ") // Replace newlines with spaces + .replace(/\r/g, " ") // Replace carriage returns with spaces + .replace(/\t/g, " ") // Replace tabs with spaces + .replace(/\s+/g, " ") // Collapse multiple spaces into one + .trim(); // Remove leading/trailing whitespace // Escape backticks to prevent markdown issues - formatted = formatted.replace(/`/g, '\\`'); + formatted = formatted.replace(/`/g, "\\`"); // Truncate if too long (keep reasonable length for summary) const maxLength = 80; if (formatted.length > maxLength) { - formatted = formatted.substring(0, maxLength) + '...'; + formatted = formatted.substring(0, maxLength) + "..."; } return formatted; } function truncateString(str, maxLength) { - if (!str) return ''; + if (!str) return ""; if (str.length <= maxLength) return str; - return str.substring(0, maxLength) + '...'; + return str.substring(0, maxLength) + "..."; } // Export for testing - if (typeof module !== 'undefined' && module.exports) { - module.exports = { parseClaudeLog, formatToolUse, formatBashCommand, truncateString }; + if (typeof module !== "undefined" && module.exports) { + module.exports = { + parseClaudeLog, + formatToolUse, + formatBashCommand, + truncateString, + }; } main(); - name: Upload agent logs @@ -1042,30 +1354,35 @@ jobs: // Read the validated output content from environment variable const outputContent = process.env.GITHUB_AW_AGENT_OUTPUT; if (!outputContent) { - console.log('No GITHUB_AW_AGENT_OUTPUT environment variable found'); + console.log("No GITHUB_AW_AGENT_OUTPUT environment variable found"); return; } - if (outputContent.trim() === '') { - console.log('Agent output content is empty'); + if (outputContent.trim() === "") { + console.log("Agent output content is empty"); return; } - console.log('Agent output content length:', outputContent.length); + console.log("Agent output content length:", outputContent.length); // Parse the validated output JSON let validatedOutput; try { validatedOutput = JSON.parse(outputContent); } catch (error) { - console.log('Error parsing agent output JSON:', error instanceof Error ? error.message : String(error)); + console.log( + "Error parsing agent output JSON:", + error instanceof Error ? error.message : String(error) + ); return; } if (!validatedOutput.items || !Array.isArray(validatedOutput.items)) { - console.log('No valid items found in agent output'); + console.log("No valid items found in agent output"); return; } // Find all create-issue items - const createIssueItems = validatedOutput.items.filter(/** @param {any} item */ item => item.type === 'create-issue'); + const createIssueItems = validatedOutput.items.filter( + /** @param {any} item */ item => item.type === "create-issue" + ); if (createIssueItems.length === 0) { - console.log('No create-issue items found in agent output'); + console.log("No create-issue items found in agent output"); return; } console.log(`Found ${createIssueItems.length} create-issue item(s)`); @@ -1073,23 +1390,31 @@ jobs: const parentIssueNumber = context.payload?.issue?.number; // Parse labels from environment variable (comma-separated string) const labelsEnv = process.env.GITHUB_AW_ISSUE_LABELS; - let envLabels = labelsEnv ? labelsEnv.split(',').map(/** @param {string} label */ label => label.trim()).filter(/** @param {string} label */ label => label) : []; + let envLabels = labelsEnv + ? labelsEnv + .split(",") + .map(/** @param {string} label */ label => label.trim()) + .filter(/** @param {string} label */ label => label) + : []; const createdIssues = []; // Process each create-issue item for (let i = 0; i < createIssueItems.length; i++) { const createIssueItem = createIssueItems[i]; - console.log(`Processing create-issue item ${i + 1}/${createIssueItems.length}:`, { title: createIssueItem.title, bodyLength: createIssueItem.body.length }); + console.log( + `Processing create-issue item ${i + 1}/${createIssueItems.length}:`, + { title: createIssueItem.title, bodyLength: createIssueItem.body.length } + ); // Merge environment labels with item-specific labels let labels = [...envLabels]; if (createIssueItem.labels && Array.isArray(createIssueItem.labels)) { labels = [...labels, ...createIssueItem.labels].filter(Boolean); } // Extract title and body from the JSON item - let title = createIssueItem.title ? createIssueItem.title.trim() : ''; - let bodyLines = createIssueItem.body.split('\n'); + let title = createIssueItem.title ? createIssueItem.title.trim() : ""; + let bodyLines = createIssueItem.body.split("\n"); // If no title was found, use the body content as title (or a default) if (!title) { - title = createIssueItem.body || 'Agent Output'; + title = createIssueItem.body || "Agent Output"; } // Apply title prefix if provided via environment variable const titlePrefix = process.env.GITHUB_AW_ISSUE_TITLE_PREFIX; @@ -1097,22 +1422,27 @@ jobs: title = titlePrefix + title; } if (parentIssueNumber) { - console.log('Detected issue context, parent issue #' + parentIssueNumber); + console.log("Detected issue context, parent issue #" + parentIssueNumber); // Add reference to parent issue in the child issue body bodyLines.push(`Related to #${parentIssueNumber}`); } // Add AI disclaimer with run id, run htmlurl // Add AI disclaimer with workflow run information const runId = context.runId; - const runUrl = context.payload.repository + const runUrl = context.payload.repository ? `${context.payload.repository.html_url}/actions/runs/${runId}` - : `https://github.com/actions/runs/${runId}`; - bodyLines.push(``, ``, `> Generated by Agentic Workflow Run [${runId}](${runUrl})`, ''); + : `https://github.com/actions/runs/${runId}`; + bodyLines.push( + ``, + ``, + `> Generated by Agentic Workflow Run [${runId}](${runUrl})`, + "" + ); // Prepare the body content - const body = bodyLines.join('\n').trim(); - console.log('Creating issue with title:', title); - console.log('Labels:', labels); - console.log('Body length:', body.length); + const body = bodyLines.join("\n").trim(); + console.log("Creating issue with title:", title); + console.log("Labels:", labels); + console.log("Body length:", body.length); try { // Create the issue using GitHub API const { data: issue } = await github.rest.issues.create({ @@ -1120,9 +1450,9 @@ jobs: repo: context.repo.repo, title: title, body: body, - labels: labels + labels: labels, }); - console.log('Created issue #' + issue.number + ': ' + issue.html_url); + console.log("Created issue #" + issue.number + ": " + issue.html_url); createdIssues.push(issue); // If we have a parent issue, add a comment to it referencing the new child issue if (parentIssueNumber) { @@ -1131,26 +1461,32 @@ jobs: owner: context.repo.owner, repo: context.repo.repo, issue_number: parentIssueNumber, - body: `Created related issue: #${issue.number}` + body: `Created related issue: #${issue.number}`, }); - console.log('Added comment to parent issue #' + parentIssueNumber); + console.log("Added comment to parent issue #" + parentIssueNumber); } catch (error) { - console.log('Warning: Could not add comment to parent issue:', error instanceof Error ? error.message : String(error)); + console.log( + "Warning: Could not add comment to parent issue:", + error instanceof Error ? error.message : String(error) + ); } } // Set output for the last created issue (for backward compatibility) if (i === createIssueItems.length - 1) { - core.setOutput('issue_number', issue.number); - core.setOutput('issue_url', issue.html_url); + core.setOutput("issue_number", issue.number); + core.setOutput("issue_url", issue.html_url); } } catch (error) { - console.error(`✗ Failed to create issue "${title}":`, error instanceof Error ? error.message : String(error)); + console.error( + `✗ Failed to create issue "${title}":`, + error instanceof Error ? error.message : String(error) + ); throw error; } } // Write summary for all created issues if (createdIssues.length > 0) { - let summaryContent = '\n\n## GitHub Issues\n'; + let summaryContent = "\n\n## GitHub Issues\n"; for (const issue of createdIssues) { summaryContent += `- Issue #${issue.number}: [${issue.title}](${issue.html_url})\n`; } diff --git a/.github/workflows/test-claude-create-pull-request-review-comment.lock.yml b/.github/workflows/test-claude-create-pull-request-review-comment.lock.yml new file mode 100644 index 0000000000..d672a8a8d3 --- /dev/null +++ b/.github/workflows/test-claude-create-pull-request-review-comment.lock.yml @@ -0,0 +1,1738 @@ +# This file was automatically generated by gh-aw. DO NOT EDIT. +# To update this file, edit the corresponding .md file and run: +# gh aw compile + +name: "Test Claude Create Pull Request Review Comment" +"on": + pull_request: + types: + - opened + - synchronize + - reopened + +permissions: {} + +concurrency: + group: "gh-aw-${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}" + cancel-in-progress: true + +run-name: "Test Claude Create Pull Request Review Comment" + +jobs: + task: + if: contains(github.event.pull_request.title, 'prr') + runs-on: ubuntu-latest + steps: + - name: Task job condition barrier + run: echo "Task job executed - conditions satisfied" + + add_reaction: + needs: task + if: github.event_name == 'issues' || github.event_name == 'pull_request' || github.event_name == 'issue_comment' || github.event_name == 'pull_request_comment' || github.event_name == 'pull_request_review_comment' + runs-on: ubuntu-latest + permissions: + issues: write + pull-requests: write + outputs: + reaction_id: ${{ steps.react.outputs.reaction-id }} + steps: + - name: Add eyes reaction to the triggering item + id: react + uses: actions/github-script@v7 + env: + GITHUB_AW_REACTION: eyes + with: + script: | + async function main() { + // Read inputs from environment variables + const reaction = process.env.GITHUB_AW_REACTION || "eyes"; + const alias = process.env.GITHUB_AW_ALIAS; // Only present for alias workflows + const runId = context.runId; + const runUrl = context.payload.repository + ? `${context.payload.repository.html_url}/actions/runs/${runId}` + : `https://github.com/${context.repo.owner}/${context.repo.repo}/actions/runs/${runId}`; + console.log("Reaction type:", reaction); + console.log("Alias name:", alias || "none"); + console.log("Run ID:", runId); + console.log("Run URL:", runUrl); + // Validate reaction type + const validReactions = [ + "+1", + "-1", + "laugh", + "confused", + "heart", + "hooray", + "rocket", + "eyes", + ]; + if (!validReactions.includes(reaction)) { + core.setFailed( + `Invalid reaction type: ${reaction}. Valid reactions are: ${validReactions.join(", ")}` + ); + return; + } + // Determine the API endpoint based on the event type + let reactionEndpoint; + let commentUpdateEndpoint; + let shouldEditComment = false; + const eventName = context.eventName; + const owner = context.repo.owner; + const repo = context.repo.repo; + try { + switch (eventName) { + case "issues": + const issueNumber = context.payload?.issue?.number; + if (!issueNumber) { + core.setFailed("Issue number not found in event payload"); + return; + } + reactionEndpoint = `/repos/${owner}/${repo}/issues/${issueNumber}/reactions`; + // Don't edit issue bodies for now - this might be more complex + shouldEditComment = false; + break; + case "issue_comment": + const commentId = context.payload?.comment?.id; + if (!commentId) { + core.setFailed("Comment ID not found in event payload"); + return; + } + reactionEndpoint = `/repos/${owner}/${repo}/issues/comments/${commentId}/reactions`; + commentUpdateEndpoint = `/repos/${owner}/${repo}/issues/comments/${commentId}`; + // Only edit comments for alias workflows + shouldEditComment = alias ? true : false; + break; + case "pull_request": + const prNumber = context.payload?.pull_request?.number; + if (!prNumber) { + core.setFailed("Pull request number not found in event payload"); + return; + } + // PRs are "issues" for the reactions endpoint + reactionEndpoint = `/repos/${owner}/${repo}/issues/${prNumber}/reactions`; + // Don't edit PR bodies for now - this might be more complex + shouldEditComment = false; + break; + case "pull_request_review_comment": + const reviewCommentId = context.payload?.comment?.id; + if (!reviewCommentId) { + core.setFailed("Review comment ID not found in event payload"); + return; + } + reactionEndpoint = `/repos/${owner}/${repo}/pulls/comments/${reviewCommentId}/reactions`; + commentUpdateEndpoint = `/repos/${owner}/${repo}/pulls/comments/${reviewCommentId}`; + // Only edit comments for alias workflows + shouldEditComment = alias ? true : false; + break; + default: + core.setFailed(`Unsupported event type: ${eventName}`); + return; + } + console.log("Reaction API endpoint:", reactionEndpoint); + // Add reaction first + await addReaction(reactionEndpoint, reaction); + // Then edit comment if applicable and if it's a comment event + if (shouldEditComment && commentUpdateEndpoint) { + console.log("Comment update endpoint:", commentUpdateEndpoint); + await editCommentWithWorkflowLink(commentUpdateEndpoint, runUrl); + } else { + if (!alias && commentUpdateEndpoint) { + console.log( + "Skipping comment edit - only available for alias workflows" + ); + } else { + console.log("Skipping comment edit for event type:", eventName); + } + } + } catch (error) { + const errorMessage = error instanceof Error ? error.message : String(error); + console.error("Failed to process reaction and comment edit:", errorMessage); + core.setFailed( + `Failed to process reaction and comment edit: ${errorMessage}` + ); + } + } + /** + * Add a reaction to a GitHub issue, PR, or comment + * @param {string} endpoint - The GitHub API endpoint to add the reaction to + * @param {string} reaction - The reaction type to add + */ + async function addReaction(endpoint, reaction) { + const response = await github.request("POST " + endpoint, { + content: reaction, + headers: { + Accept: "application/vnd.github+json", + }, + }); + const reactionId = response.data?.id; + if (reactionId) { + console.log(`Successfully added reaction: ${reaction} (id: ${reactionId})`); + core.setOutput("reaction-id", reactionId.toString()); + } else { + console.log(`Successfully added reaction: ${reaction}`); + core.setOutput("reaction-id", ""); + } + } + /** + * Edit a comment to add a workflow run link + * @param {string} endpoint - The GitHub API endpoint to update the comment + * @param {string} runUrl - The URL of the workflow run + */ + async function editCommentWithWorkflowLink(endpoint, runUrl) { + try { + // First, get the current comment content + const getResponse = await github.request("GET " + endpoint, { + headers: { + Accept: "application/vnd.github+json", + }, + }); + const originalBody = getResponse.data.body || ""; + const workflowLinkText = `\n\n---\n*🤖 [Workflow run](${runUrl}) triggered by this comment*`; + // Check if we've already added a workflow link to avoid duplicates + if (originalBody.includes("*🤖 [Workflow run](")) { + console.log( + "Comment already contains a workflow run link, skipping edit" + ); + return; + } + const updatedBody = originalBody + workflowLinkText; + // Update the comment + const updateResponse = await github.request("PATCH " + endpoint, { + body: updatedBody, + headers: { + Accept: "application/vnd.github+json", + }, + }); + console.log(`Successfully updated comment with workflow link`); + console.log(`Comment ID: ${updateResponse.data.id}`); + } catch (error) { + // Don't fail the entire job if comment editing fails - just log it + const errorMessage = error instanceof Error ? error.message : String(error); + console.warn("Failed to edit comment with workflow link:", errorMessage); + console.warn( + "This is not critical - the reaction was still added successfully" + ); + } + } + await main(); + + test-claude-create-pull-request-review-comment: + needs: task + runs-on: ubuntu-latest + permissions: read-all + outputs: + output: ${{ steps.collect_output.outputs.output }} + steps: + - name: Checkout repository + uses: actions/checkout@v5 + - name: Generate Network Permissions Hook + run: | + mkdir -p .claude/hooks + cat > .claude/hooks/network_permissions.py << 'EOF' + #!/usr/bin/env python3 + """ + Network permissions validator for Claude Code engine. + Generated by gh-aw from engine network permissions configuration. + """ + + import json + import sys + import urllib.parse + import re + + # Domain whitelist (populated during generation) + ALLOWED_DOMAINS = ["crl3.digicert.com","crl4.digicert.com","ocsp.digicert.com","ts-crl.ws.symantec.com","ts-ocsp.ws.symantec.com","crl.geotrust.com","ocsp.geotrust.com","crl.thawte.com","ocsp.thawte.com","crl.verisign.com","ocsp.verisign.com","crl.globalsign.com","ocsp.globalsign.com","crls.ssl.com","ocsp.ssl.com","crl.identrust.com","ocsp.identrust.com","crl.sectigo.com","ocsp.sectigo.com","crl.usertrust.com","ocsp.usertrust.com","s.symcb.com","s.symcd.com","ghcr.io","registry.hub.docker.com","*.docker.io","*.docker.com","production.cloudflare.docker.com","dl.k8s.io","pkgs.k8s.io","quay.io","mcr.microsoft.com","gcr.io","auth.docker.io","nuget.org","dist.nuget.org","api.nuget.org","nuget.pkg.github.com","dotnet.microsoft.com","pkgs.dev.azure.com","builds.dotnet.microsoft.com","dotnetcli.blob.core.windows.net","nugetregistryv2prod.blob.core.windows.net","azuresearch-usnc.nuget.org","azuresearch-ussc.nuget.org","dc.services.visualstudio.com","dot.net","ci.dot.net","www.microsoft.com","oneocsp.microsoft.com","pub.dev","pub.dartlang.org","*.githubusercontent.com","raw.githubusercontent.com","objects.githubusercontent.com","lfs.github.com","github-cloud.githubusercontent.com","github-cloud.s3.amazonaws.com","codeload.github.com","go.dev","golang.org","proxy.golang.org","sum.golang.org","pkg.go.dev","goproxy.io","releases.hashicorp.com","apt.releases.hashicorp.com","yum.releases.hashicorp.com","registry.terraform.io","haskell.org","*.hackage.haskell.org","get-ghcup.haskell.org","downloads.haskell.org","www.java.com","jdk.java.net","api.adoptium.net","adoptium.net","repo.maven.apache.org","maven.apache.org","repo1.maven.org","maven.pkg.github.com","maven.oracle.com","repo.spring.io","gradle.org","services.gradle.org","plugins.gradle.org","plugins-artifacts.gradle.org","repo.grails.org","download.eclipse.org","download.oracle.com","jcenter.bintray.com","json-schema.org","json.schemastore.org","archive.ubuntu.com","security.ubuntu.com","ppa.launchpad.net","keyserver.ubuntu.com","azure.archive.ubuntu.com","api.snapcraft.io","deb.debian.org","security.debian.org","keyring.debian.org","packages.debian.org","debian.map.fastlydns.net","apt.llvm.org","dl.fedoraproject.org","mirrors.fedoraproject.org","download.fedoraproject.org","mirror.centos.org","vault.centos.org","dl-cdn.alpinelinux.org","pkg.alpinelinux.org","mirror.archlinux.org","archlinux.org","download.opensuse.org","cdn.redhat.com","packagecloud.io","packages.cloud.google.com","packages.microsoft.com","npmjs.org","npmjs.com","registry.npmjs.com","registry.npmjs.org","skimdb.npmjs.com","npm.pkg.github.com","api.npms.io","nodejs.org","yarnpkg.com","registry.yarnpkg.com","repo.yarnpkg.com","deb.nodesource.com","get.pnpm.io","bun.sh","deno.land","registry.bower.io","cpan.org","www.cpan.org","metacpan.org","cpan.metacpan.org","repo.packagist.org","packagist.org","getcomposer.org","playwright.download.prss.microsoft.com","cdn.playwright.dev","pypi.python.org","pypi.org","pip.pypa.io","*.pythonhosted.org","files.pythonhosted.org","bootstrap.pypa.io","conda.binstar.org","conda.anaconda.org","binstar.org","anaconda.org","repo.continuum.io","repo.anaconda.com","rubygems.org","api.rubygems.org","rubygems.pkg.github.com","bundler.rubygems.org","gems.rubyforge.org","gems.rubyonrails.org","index.rubygems.org","cache.ruby-lang.org","*.rvm.io","crates.io","index.crates.io","static.crates.io","sh.rustup.rs","static.rust-lang.org","download.swift.org","swift.org","cocoapods.org","cdn.cocoapods.org"] + + def extract_domain(url_or_query): + """Extract domain from URL or search query.""" + if not url_or_query: + return None + + if url_or_query.startswith(('http://', 'https://')): + return urllib.parse.urlparse(url_or_query).netloc.lower() + + # Check for domain patterns in search queries + match = re.search(r'site:([a-zA-Z0-9.-]+\.[a-zA-Z]{2,})', url_or_query) + if match: + return match.group(1).lower() + + return None + + def is_domain_allowed(domain): + """Check if domain is allowed.""" + if not domain: + # If no domain detected, allow only if not under deny-all policy + return bool(ALLOWED_DOMAINS) # False if empty list (deny-all), True if has domains + + # Empty allowed domains means deny all + if not ALLOWED_DOMAINS: + return False + + for pattern in ALLOWED_DOMAINS: + regex = pattern.replace('.', r'\.').replace('*', '.*') + if re.match(f'^{regex}$', domain): + return True + return False + + # Main logic + try: + data = json.load(sys.stdin) + tool_name = data.get('tool_name', '') + tool_input = data.get('tool_input', {}) + + if tool_name not in ['WebFetch', 'WebSearch']: + sys.exit(0) # Allow other tools + + target = tool_input.get('url') or tool_input.get('query', '') + domain = extract_domain(target) + + # For WebSearch, apply domain restrictions consistently + # If no domain detected in search query, check if restrictions are in place + if tool_name == 'WebSearch' and not domain: + # Since this hook is only generated when network permissions are configured, + # empty ALLOWED_DOMAINS means deny-all policy + if not ALLOWED_DOMAINS: # Empty list means deny all + print(f"Network access blocked: deny-all policy in effect", file=sys.stderr) + print(f"No domains are allowed for WebSearch", file=sys.stderr) + sys.exit(2) # Block under deny-all policy + else: + print(f"Network access blocked for WebSearch: no specific domain detected", file=sys.stderr) + print(f"Allowed domains: {', '.join(ALLOWED_DOMAINS)}", file=sys.stderr) + sys.exit(2) # Block general searches when domain allowlist is configured + + if not is_domain_allowed(domain): + print(f"Network access blocked for domain: {domain}", file=sys.stderr) + print(f"Allowed domains: {', '.join(ALLOWED_DOMAINS)}", file=sys.stderr) + sys.exit(2) # Block with feedback to Claude + + sys.exit(0) # Allow + + except Exception as e: + print(f"Network validation error: {e}", file=sys.stderr) + sys.exit(2) # Block on errors + + EOF + chmod +x .claude/hooks/network_permissions.py + - name: Generate Claude Settings + run: | + cat > .claude/settings.json << 'EOF' + { + "hooks": { + "PreToolUse": [ + { + "matcher": "WebFetch|WebSearch", + "hooks": [ + { + "type": "command", + "command": ".claude/hooks/network_permissions.py" + } + ] + } + ] + } + } + EOF + - name: Setup agent output + id: setup_agent_output + uses: actions/github-script@v7 + with: + script: | + function main() { + const fs = require("fs"); + const crypto = require("crypto"); + // Generate a random filename for the output file + const randomId = crypto.randomBytes(8).toString("hex"); + const outputFile = `/tmp/aw_output_${randomId}.txt`; + // Ensure the /tmp directory exists and create empty output file + fs.mkdirSync("/tmp", { recursive: true }); + fs.writeFileSync(outputFile, "", { mode: 0o644 }); + // Verify the file was created and is writable + if (!fs.existsSync(outputFile)) { + throw new Error(`Failed to create output file: ${outputFile}`); + } + // Set the environment variable for subsequent steps + core.exportVariable("GITHUB_AW_SAFE_OUTPUTS", outputFile); + console.log("Created agentic output file:", outputFile); + // Also set as step output for reference + core.setOutput("output_file", outputFile); + } + main(); + - name: Setup MCPs + run: | + mkdir -p /tmp/mcp-config + cat > /tmp/mcp-config/mcp-servers.json << 'EOF' + { + "mcpServers": { + "github": { + "command": "docker", + "args": [ + "run", + "-i", + "--rm", + "-e", + "GITHUB_PERSONAL_ACCESS_TOKEN", + "ghcr.io/github/github-mcp-server:sha-09deac4" + ], + "env": { + "GITHUB_PERSONAL_ACCESS_TOKEN": "${{ secrets.GITHUB_TOKEN }}" + } + } + } + } + EOF + - name: Create prompt + env: + GITHUB_AW_SAFE_OUTPUTS: ${{ env.GITHUB_AW_SAFE_OUTPUTS }} + run: | + mkdir -p /tmp/aw-prompts + cat > /tmp/aw-prompts/prompt.txt << 'EOF' + Analyze the pull request and create a few targeted review comments on the code changes. + + Create 2-3 review comments focusing on: + 1. Code quality and best practices + 2. Potential security issues or improvements + 3. Performance optimizations or concerns + + For each review comment, specify: + - The exact file path where the comment should be placed + - The specific line number in the diff + - A helpful comment body with actionable feedback + + If you find multi-line issues, use start_line to comment on ranges of lines. + + + --- + + ## + + **IMPORTANT**: To do the actions mentioned in the header of this section, do NOT attempt to use MCP tools, do NOT attempt to use `gh`, do NOT attempt to use the GitHub API. You don't have write access to the GitHub repo. Instead write JSON objects to the file "${{ env.GITHUB_AW_SAFE_OUTPUTS }}". Each line should contain a single JSON object (JSONL format). You can write them one by one as you do them. + + **Format**: Write one JSON object per line. Each object must have a `type` field specifying the action type. + + ### Available Output Types: + + **Example JSONL file content:** + ``` + # No safe outputs configured for this workflow + ``` + + **Important Notes:** + - Do NOT attempt to use MCP tools, `gh`, or the GitHub API for these actions + - Each JSON object must be on its own line + - Only include output types that are configured for this workflow + - The content of this file will be automatically processed and executed + + EOF + - name: Print prompt to step summary + run: | + echo "## Generated Prompt" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo '``````markdown' >> $GITHUB_STEP_SUMMARY + cat /tmp/aw-prompts/prompt.txt >> $GITHUB_STEP_SUMMARY + echo '``````' >> $GITHUB_STEP_SUMMARY + - name: Generate agentic run info + uses: actions/github-script@v7 + with: + script: | + const fs = require('fs'); + + const awInfo = { + engine_id: "claude", + engine_name: "Claude Code", + model: "", + version: "", + workflow_name: "Test Claude Create Pull Request Review Comment", + experimental: false, + supports_tools_whitelist: true, + supports_http_transport: true, + run_id: context.runId, + run_number: context.runNumber, + run_attempt: process.env.GITHUB_RUN_ATTEMPT, + repository: context.repo.owner + '/' + context.repo.repo, + ref: context.ref, + sha: context.sha, + actor: context.actor, + event_name: context.eventName, + created_at: new Date().toISOString() + }; + + // Write to /tmp directory to avoid inclusion in PR + const tmpPath = '/tmp/aw_info.json'; + fs.writeFileSync(tmpPath, JSON.stringify(awInfo, null, 2)); + console.log('Generated aw_info.json at:', tmpPath); + console.log(JSON.stringify(awInfo, null, 2)); + - name: Upload agentic run info + if: always() + uses: actions/upload-artifact@v4 + with: + name: aw_info.json + path: /tmp/aw_info.json + if-no-files-found: warn + - name: Execute Claude Code Action + id: agentic_execution + uses: anthropics/claude-code-base-action@v0.0.56 + with: + # Allowed tools (sorted): + # - ExitPlanMode + # - Glob + # - Grep + # - LS + # - NotebookRead + # - Read + # - Task + # - TodoWrite + # - Write + # - mcp__github__download_workflow_run_artifact + # - mcp__github__get_code_scanning_alert + # - mcp__github__get_commit + # - mcp__github__get_dependabot_alert + # - mcp__github__get_discussion + # - mcp__github__get_discussion_comments + # - mcp__github__get_file_contents + # - mcp__github__get_issue + # - mcp__github__get_issue_comments + # - mcp__github__get_job_logs + # - mcp__github__get_me + # - mcp__github__get_notification_details + # - mcp__github__get_pull_request + # - mcp__github__get_pull_request_comments + # - mcp__github__get_pull_request_diff + # - mcp__github__get_pull_request_files + # - mcp__github__get_pull_request_reviews + # - mcp__github__get_pull_request_status + # - mcp__github__get_secret_scanning_alert + # - mcp__github__get_tag + # - mcp__github__get_workflow_run + # - mcp__github__get_workflow_run_logs + # - mcp__github__get_workflow_run_usage + # - mcp__github__list_branches + # - mcp__github__list_code_scanning_alerts + # - mcp__github__list_commits + # - mcp__github__list_dependabot_alerts + # - mcp__github__list_discussion_categories + # - mcp__github__list_discussions + # - mcp__github__list_issues + # - mcp__github__list_notifications + # - mcp__github__list_pull_requests + # - mcp__github__list_secret_scanning_alerts + # - mcp__github__list_tags + # - mcp__github__list_workflow_jobs + # - mcp__github__list_workflow_run_artifacts + # - mcp__github__list_workflow_runs + # - mcp__github__list_workflows + # - mcp__github__search_code + # - mcp__github__search_issues + # - mcp__github__search_orgs + # - mcp__github__search_pull_requests + # - mcp__github__search_repositories + # - mcp__github__search_users + allowed_tools: "ExitPlanMode,Glob,Grep,LS,NotebookRead,Read,Task,TodoWrite,Write,mcp__github__download_workflow_run_artifact,mcp__github__get_code_scanning_alert,mcp__github__get_commit,mcp__github__get_dependabot_alert,mcp__github__get_discussion,mcp__github__get_discussion_comments,mcp__github__get_file_contents,mcp__github__get_issue,mcp__github__get_issue_comments,mcp__github__get_job_logs,mcp__github__get_me,mcp__github__get_notification_details,mcp__github__get_pull_request,mcp__github__get_pull_request_comments,mcp__github__get_pull_request_diff,mcp__github__get_pull_request_files,mcp__github__get_pull_request_reviews,mcp__github__get_pull_request_status,mcp__github__get_secret_scanning_alert,mcp__github__get_tag,mcp__github__get_workflow_run,mcp__github__get_workflow_run_logs,mcp__github__get_workflow_run_usage,mcp__github__list_branches,mcp__github__list_code_scanning_alerts,mcp__github__list_commits,mcp__github__list_dependabot_alerts,mcp__github__list_discussion_categories,mcp__github__list_discussions,mcp__github__list_issues,mcp__github__list_notifications,mcp__github__list_pull_requests,mcp__github__list_secret_scanning_alerts,mcp__github__list_tags,mcp__github__list_workflow_jobs,mcp__github__list_workflow_run_artifacts,mcp__github__list_workflow_runs,mcp__github__list_workflows,mcp__github__search_code,mcp__github__search_issues,mcp__github__search_orgs,mcp__github__search_pull_requests,mcp__github__search_repositories,mcp__github__search_users" + anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }} + claude_env: | + GITHUB_AW_SAFE_OUTPUTS: ${{ env.GITHUB_AW_SAFE_OUTPUTS }} + mcp_config: /tmp/mcp-config/mcp-servers.json + prompt_file: /tmp/aw-prompts/prompt.txt + settings: .claude/settings.json + timeout_minutes: 5 + env: + GITHUB_AW_SAFE_OUTPUTS: ${{ env.GITHUB_AW_SAFE_OUTPUTS }} + - name: Capture Agentic Action logs + if: always() + run: | + # Copy the detailed execution file from Agentic Action if available + if [ -n "${{ steps.agentic_execution.outputs.execution_file }}" ] && [ -f "${{ steps.agentic_execution.outputs.execution_file }}" ]; then + cp ${{ steps.agentic_execution.outputs.execution_file }} /tmp/test-claude-create-pull-request-review-comment.log + else + echo "No execution file output found from Agentic Action" >> /tmp/test-claude-create-pull-request-review-comment.log + fi + + # Ensure log file exists + touch /tmp/test-claude-create-pull-request-review-comment.log + - name: Check if workflow-complete.txt exists, if so upload it + id: check_file + run: | + if [ -f workflow-complete.txt ]; then + echo "File exists" + echo "upload=true" >> $GITHUB_OUTPUT + else + echo "File does not exist" + echo "upload=false" >> $GITHUB_OUTPUT + fi + - name: Upload workflow-complete.txt + if: steps.check_file.outputs.upload == 'true' + uses: actions/upload-artifact@v4 + with: + name: workflow-complete + path: workflow-complete.txt + - name: Collect agent output + id: collect_output + uses: actions/github-script@v7 + env: + GITHUB_AW_SAFE_OUTPUTS: ${{ env.GITHUB_AW_SAFE_OUTPUTS }} + GITHUB_AW_SAFE_OUTPUTS_CONFIG: "{}" + with: + script: | + async function main() { + const fs = require("fs"); + /** + * Sanitizes content for safe output in GitHub Actions + * @param {string} content - The content to sanitize + * @returns {string} The sanitized content + */ + function sanitizeContent(content) { + if (!content || typeof content !== "string") { + return ""; + } + // Read allowed domains from environment variable + const allowedDomainsEnv = process.env.GITHUB_AW_ALLOWED_DOMAINS; + const defaultAllowedDomains = [ + "github.com", + "github.io", + "githubusercontent.com", + "githubassets.com", + "github.dev", + "codespaces.new", + ]; + const allowedDomains = allowedDomainsEnv + ? allowedDomainsEnv + .split(",") + .map(d => d.trim()) + .filter(d => d) + : defaultAllowedDomains; + let sanitized = content; + // Neutralize @mentions to prevent unintended notifications + sanitized = neutralizeMentions(sanitized); + // Remove control characters (except newlines and tabs) + sanitized = sanitized.replace(/[\x00-\x08\x0B\x0C\x0E-\x1F\x7F]/g, ""); + // XML character escaping + sanitized = sanitized + .replace(/&/g, "&") // Must be first to avoid double-escaping + .replace(//g, ">") + .replace(/"/g, """) + .replace(/'/g, "'"); + // URI filtering - replace non-https protocols with "(redacted)" + sanitized = sanitizeUrlProtocols(sanitized); + // Domain filtering for HTTPS URIs + sanitized = sanitizeUrlDomains(sanitized); + // Limit total length to prevent DoS (0.5MB max) + const maxLength = 524288; + if (sanitized.length > maxLength) { + sanitized = + sanitized.substring(0, maxLength) + + "\n[Content truncated due to length]"; + } + // Limit number of lines to prevent log flooding (65k max) + const lines = sanitized.split("\n"); + const maxLines = 65000; + if (lines.length > maxLines) { + sanitized = + lines.slice(0, maxLines).join("\n") + + "\n[Content truncated due to line count]"; + } + // Remove ANSI escape sequences + sanitized = sanitized.replace(/\x1b\[[0-9;]*[mGKH]/g, ""); + // Neutralize common bot trigger phrases + sanitized = neutralizeBotTriggers(sanitized); + // Trim excessive whitespace + return sanitized.trim(); + /** + * Remove unknown domains + * @param {string} s - The string to process + * @returns {string} The string with unknown domains redacted + */ + function sanitizeUrlDomains(s) { + return s.replace( + /\bhttps:\/\/([^\/\s\])}'"<>&\x00-\x1f]+)/gi, + (match, domain) => { + // Extract the hostname part (before first slash, colon, or other delimiter) + const hostname = domain.split(/[\/:\?#]/)[0].toLowerCase(); + // Check if this domain or any parent domain is in the allowlist + const isAllowed = allowedDomains.some(allowedDomain => { + const normalizedAllowed = allowedDomain.toLowerCase(); + return ( + hostname === normalizedAllowed || + hostname.endsWith("." + normalizedAllowed) + ); + }); + return isAllowed ? match : "(redacted)"; + } + ); + } + /** + * Remove unknown protocols except https + * @param {string} s - The string to process + * @returns {string} The string with non-https protocols redacted + */ + function sanitizeUrlProtocols(s) { + // Match both protocol:// and protocol: patterns + return s.replace( + /\b(\w+):(?:\/\/)?[^\s\])}'"<>&\x00-\x1f]+/gi, + (match, protocol) => { + // Allow https (case insensitive), redact everything else + return protocol.toLowerCase() === "https" ? match : "(redacted)"; + } + ); + } + /** + * Neutralizes @mentions by wrapping them in backticks + * @param {string} s - The string to process + * @returns {string} The string with neutralized mentions + */ + function neutralizeMentions(s) { + // Replace @name or @org/team outside code with `@name` + return s.replace( + /(^|[^\w`])@([A-Za-z0-9](?:[A-Za-z0-9-]{0,37}[A-Za-z0-9])?(?:\/[A-Za-z0-9._-]+)?)/g, + (_m, p1, p2) => `${p1}\`@${p2}\`` + ); + } + /** + * Neutralizes bot trigger phrases by wrapping them in backticks + * @param {string} s - The string to process + * @returns {string} The string with neutralized bot triggers + */ + function neutralizeBotTriggers(s) { + // Neutralize common bot trigger phrases like "fixes #123", "closes #asdfs", etc. + return s.replace( + /\b(fixes?|closes?|resolves?|fix|close|resolve)\s+#(\w+)/gi, + (match, action, ref) => `\`${action} #${ref}\`` + ); + } + } + /** + * Gets the maximum allowed count for a given output type + * @param {string} itemType - The output item type + * @param {Object} config - The safe-outputs configuration + * @returns {number} The maximum allowed count + */ + function getMaxAllowedForType(itemType, config) { + // Check if max is explicitly specified in config + if ( + config && + config[itemType] && + typeof config[itemType] === "object" && + config[itemType].max + ) { + return config[itemType].max; + } + // Use default limits for plural-supported types + switch (itemType) { + case "create-issue": + return 1; // Only one issue allowed + case "add-issue-comment": + return 1; // Only one comment allowed + case "create-pull-request": + return 1; // Only one pull request allowed + case "create-pull-request-review-comment": + return 10; // Default to 10 review comments allowed + case "add-issue-label": + return 5; // Only one labels operation allowed + case "update-issue": + return 1; // Only one issue update allowed + case "push-to-branch": + return 1; // Only one push to branch allowed + case "create-discussion": + return 1; // Only one discussion allowed + default: + return 1; // Default to single item for unknown types + } + } + /** + * Attempts to repair common JSON syntax issues in LLM-generated content + * @param {string} jsonStr - The potentially malformed JSON string + * @returns {string} The repaired JSON string + */ + function repairJson(jsonStr) { + let repaired = jsonStr.trim(); + // Fix single quotes to double quotes (must be done first) + repaired = repaired.replace(/'/g, '"'); + // Fix missing quotes around object keys + repaired = repaired.replace( + /([{,]\s*)([a-zA-Z_$][a-zA-Z0-9_$]*)\s*:/g, + '$1"$2":' + ); + // Fix newlines and tabs inside strings by escaping them + repaired = repaired.replace(/"([^"\\]*)"/g, (match, content) => { + if ( + content.includes("\n") || + content.includes("\r") || + content.includes("\t") + ) { + const escaped = content + .replace(/\\/g, "\\\\") + .replace(/\n/g, "\\n") + .replace(/\r/g, "\\r") + .replace(/\t/g, "\\t"); + return `"${escaped}"`; + } + return match; + }); + // Fix unescaped quotes inside string values + repaired = repaired.replace( + /"([^"]*)"([^":,}\]]*)"([^"]*)"(\s*[,:}\]])/g, + (match, p1, p2, p3, p4) => `"${p1}\\"${p2}\\"${p3}"${p4}` + ); + // Fix wrong bracket/brace types - arrays should end with ] not } + repaired = repaired.replace( + /(\[\s*(?:"[^"]*"(?:\s*,\s*"[^"]*")*\s*),?)\s*}/g, + "$1]" + ); + // Fix missing closing braces/brackets + const openBraces = (repaired.match(/\{/g) || []).length; + const closeBraces = (repaired.match(/\}/g) || []).length; + if (openBraces > closeBraces) { + repaired += "}".repeat(openBraces - closeBraces); + } else if (closeBraces > openBraces) { + repaired = "{".repeat(closeBraces - openBraces) + repaired; + } + // Fix missing closing brackets for arrays + const openBrackets = (repaired.match(/\[/g) || []).length; + const closeBrackets = (repaired.match(/\]/g) || []).length; + if (openBrackets > closeBrackets) { + repaired += "]".repeat(openBrackets - closeBrackets); + } else if (closeBrackets > openBrackets) { + repaired = "[".repeat(closeBrackets - openBrackets) + repaired; + } + // Fix trailing commas in objects and arrays (AFTER fixing brackets/braces) + repaired = repaired.replace(/,(\s*[}\]])/g, "$1"); + return repaired; + } + /** + * Attempts to parse JSON with repair fallback + * @param {string} jsonStr - The JSON string to parse + * @returns {Object|undefined} The parsed JSON object, or undefined if parsing fails + */ + function parseJsonWithRepair(jsonStr) { + try { + // First, try normal JSON.parse + return JSON.parse(jsonStr); + } catch (originalError) { + try { + // If that fails, try repairing and parsing again + const repairedJson = repairJson(jsonStr); + return JSON.parse(repairedJson); + } catch (repairError) { + // If repair also fails, print error to console and return undefined + console.log( + `JSON parsing failed. Original: ${originalError.message}. After repair: ${repairError.message}` + ); + return undefined; + } + } + } + const outputFile = process.env.GITHUB_AW_SAFE_OUTPUTS; + const safeOutputsConfig = process.env.GITHUB_AW_SAFE_OUTPUTS_CONFIG; + if (!outputFile) { + console.log("GITHUB_AW_SAFE_OUTPUTS not set, no output to collect"); + core.setOutput("output", ""); + return; + } + if (!fs.existsSync(outputFile)) { + console.log("Output file does not exist:", outputFile); + core.setOutput("output", ""); + return; + } + const outputContent = fs.readFileSync(outputFile, "utf8"); + if (outputContent.trim() === "") { + console.log("Output file is empty"); + core.setOutput("output", ""); + return; + } + console.log("Raw output content length:", outputContent.length); + // Parse the safe-outputs configuration + let expectedOutputTypes = {}; + if (safeOutputsConfig) { + try { + expectedOutputTypes = JSON.parse(safeOutputsConfig); + console.log("Expected output types:", Object.keys(expectedOutputTypes)); + } catch (error) { + console.log( + "Warning: Could not parse safe-outputs config:", + error.message + ); + } + } + // Parse JSONL content + const lines = outputContent.trim().split("\n"); + const parsedItems = []; + const errors = []; + for (let i = 0; i < lines.length; i++) { + const line = lines[i].trim(); + if (line === "") continue; // Skip empty lines + try { + const item = parseJsonWithRepair(line); + // If item is undefined (failed to parse), add error and process next line + if (item === undefined) { + errors.push(`Line ${i + 1}: Invalid JSON - JSON parsing failed`); + continue; + } + // Validate that the item has a 'type' field + if (!item.type) { + errors.push(`Line ${i + 1}: Missing required 'type' field`); + continue; + } + // Validate against expected output types + const itemType = item.type; + if (!expectedOutputTypes[itemType]) { + errors.push( + `Line ${i + 1}: Unexpected output type '${itemType}'. Expected one of: ${Object.keys(expectedOutputTypes).join(", ")}` + ); + continue; + } + // Check for too many items of the same type + const typeCount = parsedItems.filter( + existing => existing.type === itemType + ).length; + const maxAllowed = getMaxAllowedForType(itemType, expectedOutputTypes); + if (typeCount >= maxAllowed) { + errors.push( + `Line ${i + 1}: Too many items of type '${itemType}'. Maximum allowed: ${maxAllowed}.` + ); + continue; + } + // Basic validation based on type + switch (itemType) { + case "create-issue": + if (!item.title || typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: create-issue requires a 'title' string field` + ); + continue; + } + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-issue requires a 'body' string field` + ); + continue; + } + // Sanitize text content + item.title = sanitizeContent(item.title); + item.body = sanitizeContent(item.body); + // Sanitize labels if present + if (item.labels && Array.isArray(item.labels)) { + item.labels = item.labels.map(label => + typeof label === "string" ? sanitizeContent(label) : label + ); + } + break; + case "add-issue-comment": + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: add-issue-comment requires a 'body' string field` + ); + continue; + } + // Sanitize text content + item.body = sanitizeContent(item.body); + break; + case "create-pull-request": + if (!item.title || typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request requires a 'title' string field` + ); + continue; + } + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request requires a 'body' string field` + ); + continue; + } + // Sanitize text content + item.title = sanitizeContent(item.title); + item.body = sanitizeContent(item.body); + // Sanitize branch name if present + if (item.branch && typeof item.branch === "string") { + item.branch = sanitizeContent(item.branch); + } + // Sanitize labels if present + if (item.labels && Array.isArray(item.labels)) { + item.labels = item.labels.map(label => + typeof label === "string" ? sanitizeContent(label) : label + ); + } + break; + case "add-issue-label": + if (!item.labels || !Array.isArray(item.labels)) { + errors.push( + `Line ${i + 1}: add-issue-label requires a 'labels' array field` + ); + continue; + } + if (item.labels.some(label => typeof label !== "string")) { + errors.push( + `Line ${i + 1}: add-issue-label labels array must contain only strings` + ); + continue; + } + // Sanitize label strings + item.labels = item.labels.map(label => sanitizeContent(label)); + break; + case "update-issue": + // Check that at least one updateable field is provided + const hasValidField = + item.status !== undefined || + item.title !== undefined || + item.body !== undefined; + if (!hasValidField) { + errors.push( + `Line ${i + 1}: update-issue requires at least one of: 'status', 'title', or 'body' fields` + ); + continue; + } + // Validate status if provided + if (item.status !== undefined) { + if ( + typeof item.status !== "string" || + (item.status !== "open" && item.status !== "closed") + ) { + errors.push( + `Line ${i + 1}: update-issue 'status' must be 'open' or 'closed'` + ); + continue; + } + } + // Validate title if provided + if (item.title !== undefined) { + if (typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: update-issue 'title' must be a string` + ); + continue; + } + item.title = sanitizeContent(item.title); + } + // Validate body if provided + if (item.body !== undefined) { + if (typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: update-issue 'body' must be a string` + ); + continue; + } + item.body = sanitizeContent(item.body); + } + // Validate issue_number if provided (for target "*") + if (item.issue_number !== undefined) { + if ( + typeof item.issue_number !== "number" && + typeof item.issue_number !== "string" + ) { + errors.push( + `Line ${i + 1}: update-issue 'issue_number' must be a number or string` + ); + continue; + } + } + break; + case "push-to-branch": + // Validate message if provided (optional) + if (item.message !== undefined) { + if (typeof item.message !== "string") { + errors.push( + `Line ${i + 1}: push-to-branch 'message' must be a string` + ); + continue; + } + item.message = sanitizeContent(item.message); + } + // Validate pull_request_number if provided (for target "*") + if (item.pull_request_number !== undefined) { + if ( + typeof item.pull_request_number !== "number" && + typeof item.pull_request_number !== "string" + ) { + errors.push( + `Line ${i + 1}: push-to-branch 'pull_request_number' must be a number or string` + ); + continue; + } + } + break; + case "create-pull-request-review-comment": + // Validate required path field + if (!item.path || typeof item.path !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment requires a 'path' string field` + ); + continue; + } + // Validate required line field + if ( + item.line === undefined || + (typeof item.line !== "number" && typeof item.line !== "string") + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment requires a 'line' number or string field` + ); + continue; + } + // Validate line is a positive integer + const lineNumber = + typeof item.line === "string" ? parseInt(item.line, 10) : item.line; + if ( + isNaN(lineNumber) || + lineNumber <= 0 || + !Number.isInteger(lineNumber) + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'line' must be a positive integer` + ); + continue; + } + // Validate required body field + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment requires a 'body' string field` + ); + continue; + } + // Sanitize required text content + item.body = sanitizeContent(item.body); + // Validate optional start_line field + if (item.start_line !== undefined) { + if ( + typeof item.start_line !== "number" && + typeof item.start_line !== "string" + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'start_line' must be a number or string` + ); + continue; + } + const startLineNumber = + typeof item.start_line === "string" + ? parseInt(item.start_line, 10) + : item.start_line; + if ( + isNaN(startLineNumber) || + startLineNumber <= 0 || + !Number.isInteger(startLineNumber) + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'start_line' must be a positive integer` + ); + continue; + } + if (startLineNumber > lineNumber) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'start_line' must be less than or equal to 'line'` + ); + continue; + } + } + // Validate optional side field + if (item.side !== undefined) { + if ( + typeof item.side !== "string" || + (item.side !== "LEFT" && item.side !== "RIGHT") + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'side' must be 'LEFT' or 'RIGHT'` + ); + continue; + } + } + break; + case "create-discussion": + if (!item.title || typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: create-discussion requires a 'title' string field` + ); + continue; + } + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-discussion requires a 'body' string field` + ); + continue; + } + // Sanitize text content + item.title = sanitizeContent(item.title); + item.body = sanitizeContent(item.body); + break; + default: + errors.push(`Line ${i + 1}: Unknown output type '${itemType}'`); + continue; + } + console.log(`Line ${i + 1}: Valid ${itemType} item`); + parsedItems.push(item); + } catch (error) { + errors.push(`Line ${i + 1}: Invalid JSON - ${error.message}`); + } + } + // Report validation results + if (errors.length > 0) { + console.log("Validation errors found:"); + errors.forEach(error => console.log(` - ${error}`)); + // For now, we'll continue with valid items but log the errors + // In the future, we might want to fail the workflow for invalid items + } + console.log(`Successfully parsed ${parsedItems.length} valid output items`); + // Set the parsed and validated items as output + const validatedOutput = { + items: parsedItems, + errors: errors, + }; + core.setOutput("output", JSON.stringify(validatedOutput)); + core.setOutput("raw_output", outputContent); + } + // Call the main function + await main(); + - name: Print agent output to step summary + env: + GITHUB_AW_SAFE_OUTPUTS: ${{ env.GITHUB_AW_SAFE_OUTPUTS }} + run: | + echo "## Agent Output (JSONL)" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo '``````json' >> $GITHUB_STEP_SUMMARY + cat ${{ env.GITHUB_AW_SAFE_OUTPUTS }} >> $GITHUB_STEP_SUMMARY + # Ensure there's a newline after the file content if it doesn't end with one + if [ -s ${{ env.GITHUB_AW_SAFE_OUTPUTS }} ] && [ "$(tail -c1 ${{ env.GITHUB_AW_SAFE_OUTPUTS }})" != "" ]; then + echo "" >> $GITHUB_STEP_SUMMARY + fi + echo '``````' >> $GITHUB_STEP_SUMMARY + - name: Upload agentic output file + if: always() && steps.collect_output.outputs.output != '' + uses: actions/upload-artifact@v4 + with: + name: aw_output.txt + path: ${{ env.GITHUB_AW_SAFE_OUTPUTS }} + if-no-files-found: warn + - name: Upload engine output files + uses: actions/upload-artifact@v4 + with: + name: agent_outputs + path: | + output.txt + if-no-files-found: ignore + - name: Clean up engine output files + run: | + rm -f output.txt + - name: Parse agent logs for step summary + if: always() + uses: actions/github-script@v7 + env: + AGENT_LOG_FILE: /tmp/test-claude-create-pull-request-review-comment.log + with: + script: | + function main() { + const fs = require("fs"); + try { + // Get the log file path from environment + const logFile = process.env.AGENT_LOG_FILE; + if (!logFile) { + console.log("No agent log file specified"); + return; + } + if (!fs.existsSync(logFile)) { + console.log(`Log file not found: ${logFile}`); + return; + } + const logContent = fs.readFileSync(logFile, "utf8"); + const markdown = parseClaudeLog(logContent); + // Append to GitHub step summary + core.summary.addRaw(markdown).write(); + } catch (error) { + console.error("Error parsing Claude log:", error.message); + core.setFailed(error.message); + } + } + function parseClaudeLog(logContent) { + try { + const logEntries = JSON.parse(logContent); + if (!Array.isArray(logEntries)) { + return "## Agent Log Summary\n\nLog format not recognized as Claude JSON array.\n"; + } + let markdown = "## 🤖 Commands and Tools\n\n"; + const toolUsePairs = new Map(); // Map tool_use_id to tool_result + const commandSummary = []; // For the succinct summary + // First pass: collect tool results by tool_use_id + for (const entry of logEntries) { + if (entry.type === "user" && entry.message?.content) { + for (const content of entry.message.content) { + if (content.type === "tool_result" && content.tool_use_id) { + toolUsePairs.set(content.tool_use_id, content); + } + } + } + } + // Collect all tool uses for summary + for (const entry of logEntries) { + if (entry.type === "assistant" && entry.message?.content) { + for (const content of entry.message.content) { + if (content.type === "tool_use") { + const toolName = content.name; + const input = content.input || {}; + // Skip internal tools - only show external commands and API calls + if ( + [ + "Read", + "Write", + "Edit", + "MultiEdit", + "LS", + "Grep", + "Glob", + "TodoWrite", + ].includes(toolName) + ) { + continue; // Skip internal file operations and searches + } + // Find the corresponding tool result to get status + const toolResult = toolUsePairs.get(content.id); + let statusIcon = "❓"; + if (toolResult) { + statusIcon = toolResult.is_error === true ? "❌" : "✅"; + } + // Add to command summary (only external tools) + if (toolName === "Bash") { + const formattedCommand = formatBashCommand(input.command || ""); + commandSummary.push(`* ${statusIcon} \`${formattedCommand}\``); + } else if (toolName.startsWith("mcp__")) { + const mcpName = formatMcpName(toolName); + commandSummary.push(`* ${statusIcon} \`${mcpName}(...)\``); + } else { + // Handle other external tools (if any) + commandSummary.push(`* ${statusIcon} ${toolName}`); + } + } + } + } + } + // Add command summary + if (commandSummary.length > 0) { + for (const cmd of commandSummary) { + markdown += `${cmd}\n`; + } + } else { + markdown += "No commands or tools used.\n"; + } + // Add Information section from the last entry with result metadata + markdown += "\n## 📊 Information\n\n"; + // Find the last entry with metadata + const lastEntry = logEntries[logEntries.length - 1]; + if ( + lastEntry && + (lastEntry.num_turns || + lastEntry.duration_ms || + lastEntry.total_cost_usd || + lastEntry.usage) + ) { + if (lastEntry.num_turns) { + markdown += `**Turns:** ${lastEntry.num_turns}\n\n`; + } + if (lastEntry.duration_ms) { + const durationSec = Math.round(lastEntry.duration_ms / 1000); + const minutes = Math.floor(durationSec / 60); + const seconds = durationSec % 60; + markdown += `**Duration:** ${minutes}m ${seconds}s\n\n`; + } + if (lastEntry.total_cost_usd) { + markdown += `**Total Cost:** $${lastEntry.total_cost_usd.toFixed(4)}\n\n`; + } + if (lastEntry.usage) { + const usage = lastEntry.usage; + if (usage.input_tokens || usage.output_tokens) { + markdown += `**Token Usage:**\n`; + if (usage.input_tokens) + markdown += `- Input: ${usage.input_tokens.toLocaleString()}\n`; + if (usage.cache_creation_input_tokens) + markdown += `- Cache Creation: ${usage.cache_creation_input_tokens.toLocaleString()}\n`; + if (usage.cache_read_input_tokens) + markdown += `- Cache Read: ${usage.cache_read_input_tokens.toLocaleString()}\n`; + if (usage.output_tokens) + markdown += `- Output: ${usage.output_tokens.toLocaleString()}\n`; + markdown += "\n"; + } + } + if ( + lastEntry.permission_denials && + lastEntry.permission_denials.length > 0 + ) { + markdown += `**Permission Denials:** ${lastEntry.permission_denials.length}\n\n`; + } + } + markdown += "\n## 🤖 Reasoning\n\n"; + // Second pass: process assistant messages in sequence + for (const entry of logEntries) { + if (entry.type === "assistant" && entry.message?.content) { + for (const content of entry.message.content) { + if (content.type === "text" && content.text) { + // Add reasoning text directly (no header) + const text = content.text.trim(); + if (text && text.length > 0) { + markdown += text + "\n\n"; + } + } else if (content.type === "tool_use") { + // Process tool use with its result + const toolResult = toolUsePairs.get(content.id); + const toolMarkdown = formatToolUse(content, toolResult); + if (toolMarkdown) { + markdown += toolMarkdown; + } + } + } + } + } + return markdown; + } catch (error) { + return `## Agent Log Summary\n\nError parsing Claude log: ${error.message}\n`; + } + } + function formatToolUse(toolUse, toolResult) { + const toolName = toolUse.name; + const input = toolUse.input || {}; + // Skip TodoWrite except the very last one (we'll handle this separately) + if (toolName === "TodoWrite") { + return ""; // Skip for now, would need global context to find the last one + } + // Helper function to determine status icon + function getStatusIcon() { + if (toolResult) { + return toolResult.is_error === true ? "❌" : "✅"; + } + return "❓"; // Unknown by default + } + let markdown = ""; + const statusIcon = getStatusIcon(); + switch (toolName) { + case "Bash": + const command = input.command || ""; + const description = input.description || ""; + // Format the command to be single line + const formattedCommand = formatBashCommand(command); + if (description) { + markdown += `${description}:\n\n`; + } + markdown += `${statusIcon} \`${formattedCommand}\`\n\n`; + break; + case "Read": + const filePath = input.file_path || input.path || ""; + const relativePath = filePath.replace( + /^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, + "" + ); // Remove /home/runner/work/repo/repo/ prefix + markdown += `${statusIcon} Read \`${relativePath}\`\n\n`; + break; + case "Write": + case "Edit": + case "MultiEdit": + const writeFilePath = input.file_path || input.path || ""; + const writeRelativePath = writeFilePath.replace( + /^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, + "" + ); + markdown += `${statusIcon} Write \`${writeRelativePath}\`\n\n`; + break; + case "Grep": + case "Glob": + const query = input.query || input.pattern || ""; + markdown += `${statusIcon} Search for \`${truncateString(query, 80)}\`\n\n`; + break; + case "LS": + const lsPath = input.path || ""; + const lsRelativePath = lsPath.replace( + /^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, + "" + ); + markdown += `${statusIcon} LS: ${lsRelativePath || lsPath}\n\n`; + break; + default: + // Handle MCP calls and other tools + if (toolName.startsWith("mcp__")) { + const mcpName = formatMcpName(toolName); + const params = formatMcpParameters(input); + markdown += `${statusIcon} ${mcpName}(${params})\n\n`; + } else { + // Generic tool formatting - show the tool name and main parameters + const keys = Object.keys(input); + if (keys.length > 0) { + // Try to find the most important parameter + const mainParam = + keys.find(k => + ["query", "command", "path", "file_path", "content"].includes(k) + ) || keys[0]; + const value = String(input[mainParam] || ""); + if (value) { + markdown += `${statusIcon} ${toolName}: ${truncateString(value, 100)}\n\n`; + } else { + markdown += `${statusIcon} ${toolName}\n\n`; + } + } else { + markdown += `${statusIcon} ${toolName}\n\n`; + } + } + } + return markdown; + } + function formatMcpName(toolName) { + // Convert mcp__github__search_issues to github::search_issues + if (toolName.startsWith("mcp__")) { + const parts = toolName.split("__"); + if (parts.length >= 3) { + const provider = parts[1]; // github, etc. + const method = parts.slice(2).join("_"); // search_issues, etc. + return `${provider}::${method}`; + } + } + return toolName; + } + function formatMcpParameters(input) { + const keys = Object.keys(input); + if (keys.length === 0) return ""; + const paramStrs = []; + for (const key of keys.slice(0, 4)) { + // Show up to 4 parameters + const value = String(input[key] || ""); + paramStrs.push(`${key}: ${truncateString(value, 40)}`); + } + if (keys.length > 4) { + paramStrs.push("..."); + } + return paramStrs.join(", "); + } + function formatBashCommand(command) { + if (!command) return ""; + // Convert multi-line commands to single line by replacing newlines with spaces + // and collapsing multiple spaces + let formatted = command + .replace(/\n/g, " ") // Replace newlines with spaces + .replace(/\r/g, " ") // Replace carriage returns with spaces + .replace(/\t/g, " ") // Replace tabs with spaces + .replace(/\s+/g, " ") // Collapse multiple spaces into one + .trim(); // Remove leading/trailing whitespace + // Escape backticks to prevent markdown issues + formatted = formatted.replace(/`/g, "\\`"); + // Truncate if too long (keep reasonable length for summary) + const maxLength = 80; + if (formatted.length > maxLength) { + formatted = formatted.substring(0, maxLength) + "..."; + } + return formatted; + } + function truncateString(str, maxLength) { + if (!str) return ""; + if (str.length <= maxLength) return str; + return str.substring(0, maxLength) + "..."; + } + // Export for testing + if (typeof module !== "undefined" && module.exports) { + module.exports = { + parseClaudeLog, + formatToolUse, + formatBashCommand, + truncateString, + }; + } + main(); + - name: Upload agent logs + if: always() + uses: actions/upload-artifact@v4 + with: + name: test-claude-create-pull-request-review-comment.log + path: /tmp/test-claude-create-pull-request-review-comment.log + if-no-files-found: warn + + create_pr_review_comment: + needs: test-claude-create-pull-request-review-comment + if: github.event.pull_request.number + runs-on: ubuntu-latest + permissions: + contents: read + pull-requests: write + timeout-minutes: 10 + outputs: + review_comment_id: ${{ steps.create_pr_review_comment.outputs.review_comment_id }} + review_comment_url: ${{ steps.create_pr_review_comment.outputs.review_comment_url }} + steps: + - name: Create PR Review Comment + id: create_pr_review_comment + uses: actions/github-script@v7 + env: + GITHUB_AW_AGENT_OUTPUT: ${{ needs.test-claude-create-pull-request-review-comment.outputs.output }} + GITHUB_AW_PR_REVIEW_COMMENT_SIDE: "RIGHT" + with: + script: | + async function main() { + // Read the validated output content from environment variable + const outputContent = process.env.GITHUB_AW_AGENT_OUTPUT; + if (!outputContent) { + console.log("No GITHUB_AW_AGENT_OUTPUT environment variable found"); + return; + } + if (outputContent.trim() === "") { + console.log("Agent output content is empty"); + return; + } + console.log("Agent output content length:", outputContent.length); + // Parse the validated output JSON + let validatedOutput; + try { + validatedOutput = JSON.parse(outputContent); + } catch (error) { + console.log( + "Error parsing agent output JSON:", + error instanceof Error ? error.message : String(error) + ); + return; + } + if (!validatedOutput.items || !Array.isArray(validatedOutput.items)) { + console.log("No valid items found in agent output"); + return; + } + // Find all create-pull-request-review-comment items + const reviewCommentItems = validatedOutput.items.filter( + /** @param {any} item */ item => + item.type === "create-pull-request-review-comment" + ); + if (reviewCommentItems.length === 0) { + console.log( + "No create-pull-request-review-comment items found in agent output" + ); + return; + } + console.log( + `Found ${reviewCommentItems.length} create-pull-request-review-comment item(s)` + ); + // Get the side configuration from environment variable + const defaultSide = process.env.GITHUB_AW_PR_REVIEW_COMMENT_SIDE || "RIGHT"; + console.log(`Default comment side configuration: ${defaultSide}`); + // Check if we're in a pull request context + const isPRContext = + context.eventName === "pull_request" || + context.eventName === "pull_request_review" || + context.eventName === "pull_request_review_comment"; + if (!isPRContext) { + console.log( + "Not running in pull request context, skipping review comment creation" + ); + return; + } + if (!context.payload.pull_request) { + console.log( + "Pull request context detected but no pull request found in payload" + ); + return; + } + const pullRequestNumber = context.payload.pull_request.number; + console.log(`Creating review comments on PR #${pullRequestNumber}`); + const createdComments = []; + // Process each review comment item + for (let i = 0; i < reviewCommentItems.length; i++) { + const commentItem = reviewCommentItems[i]; + console.log( + `Processing create-pull-request-review-comment item ${i + 1}/${reviewCommentItems.length}:`, + { + bodyLength: commentItem.body ? commentItem.body.length : "undefined", + path: commentItem.path, + line: commentItem.line, + startLine: commentItem.start_line, + } + ); + // Validate required fields + if (!commentItem.path) { + console.log('Missing required field "path" in review comment item'); + continue; + } + if ( + !commentItem.line || + (typeof commentItem.line !== "number" && + typeof commentItem.line !== "string") + ) { + console.log( + 'Missing or invalid required field "line" in review comment item' + ); + continue; + } + if (!commentItem.body || typeof commentItem.body !== "string") { + console.log( + 'Missing or invalid required field "body" in review comment item' + ); + continue; + } + // Parse line numbers + const line = parseInt(commentItem.line, 10); + if (isNaN(line) || line <= 0) { + console.log(`Invalid line number: ${commentItem.line}`); + continue; + } + let startLine = undefined; + if (commentItem.start_line) { + startLine = parseInt(commentItem.start_line, 10); + if (isNaN(startLine) || startLine <= 0 || startLine > line) { + console.log( + `Invalid start_line number: ${commentItem.start_line} (must be <= line: ${line})` + ); + continue; + } + } + // Determine side (LEFT or RIGHT) + const side = commentItem.side || defaultSide; + if (side !== "LEFT" && side !== "RIGHT") { + console.log(`Invalid side value: ${side} (must be LEFT or RIGHT)`); + continue; + } + // Extract body from the JSON item + let body = commentItem.body.trim(); + // Add AI disclaimer with run id, run htmlurl + const runId = context.runId; + const runUrl = context.payload.repository + ? `${context.payload.repository.html_url}/actions/runs/${runId}` + : `https://github.com/actions/runs/${runId}`; + body += `\n\n> Generated by Agentic Workflow Run [${runId}](${runUrl})\n`; + console.log( + `Creating review comment on PR #${pullRequestNumber} at ${commentItem.path}:${line}${startLine ? ` (lines ${startLine}-${line})` : ""} [${side}]` + ); + console.log("Comment content length:", body.length); + try { + // Prepare the request parameters + const requestParams = { + owner: context.repo.owner, + repo: context.repo.repo, + pull_number: pullRequestNumber, + body: body, + path: commentItem.path, + line: line, + side: side, + }; + // Add start_line for multi-line comments + if (startLine !== undefined) { + requestParams.start_line = startLine; + requestParams.start_side = side; // start_side should match side for consistency + } + // Create the review comment using GitHub API + const { data: comment } = + await github.rest.pulls.createReviewComment(requestParams); + console.log( + "Created review comment #" + comment.id + ": " + comment.html_url + ); + createdComments.push(comment); + // Set output for the last created comment (for backward compatibility) + if (i === reviewCommentItems.length - 1) { + core.setOutput("review_comment_id", comment.id); + core.setOutput("review_comment_url", comment.html_url); + } + } catch (error) { + console.error( + `✗ Failed to create review comment:`, + error instanceof Error ? error.message : String(error) + ); + throw error; + } + } + // Write summary for all created comments + if (createdComments.length > 0) { + let summaryContent = "\n\n## GitHub PR Review Comments\n"; + for (const comment of createdComments) { + summaryContent += `- Review Comment #${comment.id}: [View Comment](${comment.html_url})\n`; + } + await core.summary.addRaw(summaryContent).write(); + } + console.log( + `Successfully created ${createdComments.length} review comment(s)` + ); + return createdComments; + } + await main(); + diff --git a/.github/workflows/test-claude-create-pull-request-review-comment.md b/.github/workflows/test-claude-create-pull-request-review-comment.md new file mode 100644 index 0000000000..cb56a9e17b --- /dev/null +++ b/.github/workflows/test-claude-create-pull-request-review-comment.md @@ -0,0 +1,29 @@ +--- +on: + pull_request: + types: [opened, synchronize, reopened] + reaction: eyes + +engine: + id: claude + +if: contains(github.event.pull_request.title, 'prr') + +safe-outputs: + create-pull-request-review-comment: + max: 3 +--- + +Analyze the pull request and create a few targeted review comments on the code changes. + +Create 2-3 review comments focusing on: +1. Code quality and best practices +2. Potential security issues or improvements +3. Performance optimizations or concerns + +For each review comment, specify: +- The exact file path where the comment should be placed +- The specific line number in the diff +- A helpful comment body with actionable feedback + +If you find multi-line issues, use start_line to comment on ranges of lines. diff --git a/.github/workflows/test-claude-create-pull-request.lock.yml b/.github/workflows/test-claude-create-pull-request.lock.yml index 4da9d023d9..a15f3d4b9e 100644 --- a/.github/workflows/test-claude-create-pull-request.lock.yml +++ b/.github/workflows/test-claude-create-pull-request.lock.yml @@ -135,23 +135,23 @@ jobs: with: script: | function main() { - const fs = require('fs'); - const crypto = require('crypto'); + const fs = require("fs"); + const crypto = require("crypto"); // Generate a random filename for the output file - const randomId = crypto.randomBytes(8).toString('hex'); + const randomId = crypto.randomBytes(8).toString("hex"); const outputFile = `/tmp/aw_output_${randomId}.txt`; // Ensure the /tmp directory exists and create empty output file - fs.mkdirSync('/tmp', { recursive: true }); - fs.writeFileSync(outputFile, '', { mode: 0o644 }); + fs.mkdirSync("/tmp", { recursive: true }); + fs.writeFileSync(outputFile, "", { mode: 0o644 }); // Verify the file was created and is writable if (!fs.existsSync(outputFile)) { throw new Error(`Failed to create output file: ${outputFile}`); } // Set the environment variable for subsequent steps - core.exportVariable('GITHUB_AW_SAFE_OUTPUTS', outputFile); - console.log('Created agentic output file:', outputFile); + core.exportVariable("GITHUB_AW_SAFE_OUTPUTS", outputFile); + console.log("Created agentic output file:", outputFile); // Also set as step output for reference - core.setOutput('output_file', outputFile); + core.setOutput("output_file", outputFile); } main(); - name: Setup MCPs @@ -393,34 +393,37 @@ jobs: * @returns {string} The sanitized content */ function sanitizeContent(content) { - if (!content || typeof content !== 'string') { - return ''; + if (!content || typeof content !== "string") { + return ""; } // Read allowed domains from environment variable const allowedDomainsEnv = process.env.GITHUB_AW_ALLOWED_DOMAINS; const defaultAllowedDomains = [ - 'github.com', - 'github.io', - 'githubusercontent.com', - 'githubassets.com', - 'github.dev', - 'codespaces.new' + "github.com", + "github.io", + "githubusercontent.com", + "githubassets.com", + "github.dev", + "codespaces.new", ]; const allowedDomains = allowedDomainsEnv - ? allowedDomainsEnv.split(',').map(d => d.trim()).filter(d => d) + ? allowedDomainsEnv + .split(",") + .map(d => d.trim()) + .filter(d => d) : defaultAllowedDomains; let sanitized = content; // Neutralize @mentions to prevent unintended notifications sanitized = neutralizeMentions(sanitized); // Remove control characters (except newlines and tabs) - sanitized = sanitized.replace(/[\x00-\x08\x0B\x0C\x0E-\x1F\x7F]/g, ''); + sanitized = sanitized.replace(/[\x00-\x08\x0B\x0C\x0E-\x1F\x7F]/g, ""); // XML character escaping sanitized = sanitized - .replace(/&/g, '&') // Must be first to avoid double-escaping - .replace(//g, '>') - .replace(/"/g, '"') - .replace(/'/g, '''); + .replace(/&/g, "&") // Must be first to avoid double-escaping + .replace(//g, ">") + .replace(/"/g, """) + .replace(/'/g, "'"); // URI filtering - replace non-https protocols with "(redacted)" sanitized = sanitizeUrlProtocols(sanitized); // Domain filtering for HTTPS URIs @@ -428,16 +431,20 @@ jobs: // Limit total length to prevent DoS (0.5MB max) const maxLength = 524288; if (sanitized.length > maxLength) { - sanitized = sanitized.substring(0, maxLength) + '\n[Content truncated due to length]'; + sanitized = + sanitized.substring(0, maxLength) + + "\n[Content truncated due to length]"; } // Limit number of lines to prevent log flooding (65k max) - const lines = sanitized.split('\n'); + const lines = sanitized.split("\n"); const maxLines = 65000; if (lines.length > maxLines) { - sanitized = lines.slice(0, maxLines).join('\n') + '\n[Content truncated due to line count]'; + sanitized = + lines.slice(0, maxLines).join("\n") + + "\n[Content truncated due to line count]"; } // Remove ANSI escape sequences - sanitized = sanitized.replace(/\x1b\[[0-9;]*[mGKH]/g, ''); + sanitized = sanitized.replace(/\x1b\[[0-9;]*[mGKH]/g, ""); // Neutralize common bot trigger phrases sanitized = neutralizeBotTriggers(sanitized); // Trim excessive whitespace @@ -448,16 +455,22 @@ jobs: * @returns {string} The string with unknown domains redacted */ function sanitizeUrlDomains(s) { - return s.replace(/\bhttps:\/\/([^\/\s\])}'"<>&\x00-\x1f]+)/gi, (match, domain) => { - // Extract the hostname part (before first slash, colon, or other delimiter) - const hostname = domain.split(/[\/:\?#]/)[0].toLowerCase(); - // Check if this domain or any parent domain is in the allowlist - const isAllowed = allowedDomains.some(allowedDomain => { - const normalizedAllowed = allowedDomain.toLowerCase(); - return hostname === normalizedAllowed || hostname.endsWith('.' + normalizedAllowed); - }); - return isAllowed ? match : '(redacted)'; - }); + return s.replace( + /\bhttps:\/\/([^\/\s\])}'"<>&\x00-\x1f]+)/gi, + (match, domain) => { + // Extract the hostname part (before first slash, colon, or other delimiter) + const hostname = domain.split(/[\/:\?#]/)[0].toLowerCase(); + // Check if this domain or any parent domain is in the allowlist + const isAllowed = allowedDomains.some(allowedDomain => { + const normalizedAllowed = allowedDomain.toLowerCase(); + return ( + hostname === normalizedAllowed || + hostname.endsWith("." + normalizedAllowed) + ); + }); + return isAllowed ? match : "(redacted)"; + } + ); } /** * Remove unknown protocols except https @@ -466,10 +479,13 @@ jobs: */ function sanitizeUrlProtocols(s) { // Match both protocol:// and protocol: patterns - return s.replace(/\b(\w+):(?:\/\/)?[^\s\])}'"<>&\x00-\x1f]+/gi, (match, protocol) => { - // Allow https (case insensitive), redact everything else - return protocol.toLowerCase() === 'https' ? match : '(redacted)'; - }); + return s.replace( + /\b(\w+):(?:\/\/)?[^\s\])}'"<>&\x00-\x1f]+/gi, + (match, protocol) => { + // Allow https (case insensitive), redact everything else + return protocol.toLowerCase() === "https" ? match : "(redacted)"; + } + ); } /** * Neutralizes @mentions by wrapping them in backticks @@ -478,8 +494,10 @@ jobs: */ function neutralizeMentions(s) { // Replace @name or @org/team outside code with `@name` - return s.replace(/(^|[^\w`])@([A-Za-z0-9](?:[A-Za-z0-9-]{0,37}[A-Za-z0-9])?(?:\/[A-Za-z0-9._-]+)?)/g, - (_m, p1, p2) => `${p1}\`@${p2}\``); + return s.replace( + /(^|[^\w`])@([A-Za-z0-9](?:[A-Za-z0-9-]{0,37}[A-Za-z0-9])?(?:\/[A-Za-z0-9._-]+)?)/g, + (_m, p1, p2) => `${p1}\`@${p2}\`` + ); } /** * Neutralizes bot trigger phrases by wrapping them in backticks @@ -488,8 +506,10 @@ jobs: */ function neutralizeBotTriggers(s) { // Neutralize common bot trigger phrases like "fixes #123", "closes #asdfs", etc. - return s.replace(/\b(fixes?|closes?|resolves?|fix|close|resolve)\s+#(\w+)/gi, - (match, action, ref) => `\`${action} #${ref}\``); + return s.replace( + /\b(fixes?|closes?|resolves?|fix|close|resolve)\s+#(\w+)/gi, + (match, action, ref) => `\`${action} #${ref}\`` + ); } } /** @@ -500,65 +520,165 @@ jobs: */ function getMaxAllowedForType(itemType, config) { // Check if max is explicitly specified in config - if (config && config[itemType] && typeof config[itemType] === 'object' && config[itemType].max) { + if ( + config && + config[itemType] && + typeof config[itemType] === "object" && + config[itemType].max + ) { return config[itemType].max; } // Use default limits for plural-supported types switch (itemType) { - case 'create-issue': + case "create-issue": return 1; // Only one issue allowed - case 'add-issue-comment': + case "add-issue-comment": return 1; // Only one comment allowed - case 'create-pull-request': - return 1; // Only one pull request allowed - case 'add-issue-label': - return 5; // Only one labels operation allowed - case 'update-issue': - return 1; // Only one issue update allowed - case 'push-to-branch': - return 1; // Only one push to branch allowed + case "create-pull-request": + return 1; // Only one pull request allowed + case "create-pull-request-review-comment": + return 10; // Default to 10 review comments allowed + case "add-issue-label": + return 5; // Only one labels operation allowed + case "update-issue": + return 1; // Only one issue update allowed + case "push-to-branch": + return 1; // Only one push to branch allowed + case "create-discussion": + return 1; // Only one discussion allowed default: - return 1; // Default to single item for unknown types + return 1; // Default to single item for unknown types + } + } + /** + * Attempts to repair common JSON syntax issues in LLM-generated content + * @param {string} jsonStr - The potentially malformed JSON string + * @returns {string} The repaired JSON string + */ + function repairJson(jsonStr) { + let repaired = jsonStr.trim(); + // Fix single quotes to double quotes (must be done first) + repaired = repaired.replace(/'/g, '"'); + // Fix missing quotes around object keys + repaired = repaired.replace( + /([{,]\s*)([a-zA-Z_$][a-zA-Z0-9_$]*)\s*:/g, + '$1"$2":' + ); + // Fix newlines and tabs inside strings by escaping them + repaired = repaired.replace(/"([^"\\]*)"/g, (match, content) => { + if ( + content.includes("\n") || + content.includes("\r") || + content.includes("\t") + ) { + const escaped = content + .replace(/\\/g, "\\\\") + .replace(/\n/g, "\\n") + .replace(/\r/g, "\\r") + .replace(/\t/g, "\\t"); + return `"${escaped}"`; + } + return match; + }); + // Fix unescaped quotes inside string values + repaired = repaired.replace( + /"([^"]*)"([^":,}\]]*)"([^"]*)"(\s*[,:}\]])/g, + (match, p1, p2, p3, p4) => `"${p1}\\"${p2}\\"${p3}"${p4}` + ); + // Fix wrong bracket/brace types - arrays should end with ] not } + repaired = repaired.replace( + /(\[\s*(?:"[^"]*"(?:\s*,\s*"[^"]*")*\s*),?)\s*}/g, + "$1]" + ); + // Fix missing closing braces/brackets + const openBraces = (repaired.match(/\{/g) || []).length; + const closeBraces = (repaired.match(/\}/g) || []).length; + if (openBraces > closeBraces) { + repaired += "}".repeat(openBraces - closeBraces); + } else if (closeBraces > openBraces) { + repaired = "{".repeat(closeBraces - openBraces) + repaired; + } + // Fix missing closing brackets for arrays + const openBrackets = (repaired.match(/\[/g) || []).length; + const closeBrackets = (repaired.match(/\]/g) || []).length; + if (openBrackets > closeBrackets) { + repaired += "]".repeat(openBrackets - closeBrackets); + } else if (closeBrackets > openBrackets) { + repaired = "[".repeat(closeBrackets - openBrackets) + repaired; + } + // Fix trailing commas in objects and arrays (AFTER fixing brackets/braces) + repaired = repaired.replace(/,(\s*[}\]])/g, "$1"); + return repaired; + } + /** + * Attempts to parse JSON with repair fallback + * @param {string} jsonStr - The JSON string to parse + * @returns {Object|undefined} The parsed JSON object, or undefined if parsing fails + */ + function parseJsonWithRepair(jsonStr) { + try { + // First, try normal JSON.parse + return JSON.parse(jsonStr); + } catch (originalError) { + try { + // If that fails, try repairing and parsing again + const repairedJson = repairJson(jsonStr); + return JSON.parse(repairedJson); + } catch (repairError) { + // If repair also fails, print error to console and return undefined + console.log( + `JSON parsing failed. Original: ${originalError.message}. After repair: ${repairError.message}` + ); + return undefined; + } } } const outputFile = process.env.GITHUB_AW_SAFE_OUTPUTS; const safeOutputsConfig = process.env.GITHUB_AW_SAFE_OUTPUTS_CONFIG; if (!outputFile) { - console.log('GITHUB_AW_SAFE_OUTPUTS not set, no output to collect'); - core.setOutput('output', ''); + console.log("GITHUB_AW_SAFE_OUTPUTS not set, no output to collect"); + core.setOutput("output", ""); return; } if (!fs.existsSync(outputFile)) { - console.log('Output file does not exist:', outputFile); - core.setOutput('output', ''); + console.log("Output file does not exist:", outputFile); + core.setOutput("output", ""); return; } - const outputContent = fs.readFileSync(outputFile, 'utf8'); - if (outputContent.trim() === '') { - console.log('Output file is empty'); - core.setOutput('output', ''); + const outputContent = fs.readFileSync(outputFile, "utf8"); + if (outputContent.trim() === "") { + console.log("Output file is empty"); + core.setOutput("output", ""); return; } - console.log('Raw output content length:', outputContent.length); + console.log("Raw output content length:", outputContent.length); // Parse the safe-outputs configuration let expectedOutputTypes = {}; if (safeOutputsConfig) { try { expectedOutputTypes = JSON.parse(safeOutputsConfig); - console.log('Expected output types:', Object.keys(expectedOutputTypes)); + console.log("Expected output types:", Object.keys(expectedOutputTypes)); } catch (error) { - console.log('Warning: Could not parse safe-outputs config:', error.message); + console.log( + "Warning: Could not parse safe-outputs config:", + error.message + ); } } // Parse JSONL content - const lines = outputContent.trim().split('\n'); + const lines = outputContent.trim().split("\n"); const parsedItems = []; const errors = []; for (let i = 0; i < lines.length; i++) { const line = lines[i].trim(); - if (line === '') continue; // Skip empty lines + if (line === "") continue; // Skip empty lines try { - const item = JSON.parse(line); + const item = parseJsonWithRepair(line); + // If item is undefined (failed to parse), add error and process next line + if (item === undefined) { + errors.push(`Line ${i + 1}: Invalid JSON - JSON parsing failed`); + continue; + } // Validate that the item has a 'type' field if (!item.type) { errors.push(`Line ${i + 1}: Missing required 'type' field`); @@ -567,25 +687,35 @@ jobs: // Validate against expected output types const itemType = item.type; if (!expectedOutputTypes[itemType]) { - errors.push(`Line ${i + 1}: Unexpected output type '${itemType}'. Expected one of: ${Object.keys(expectedOutputTypes).join(', ')}`); + errors.push( + `Line ${i + 1}: Unexpected output type '${itemType}'. Expected one of: ${Object.keys(expectedOutputTypes).join(", ")}` + ); continue; } // Check for too many items of the same type - const typeCount = parsedItems.filter(existing => existing.type === itemType).length; + const typeCount = parsedItems.filter( + existing => existing.type === itemType + ).length; const maxAllowed = getMaxAllowedForType(itemType, expectedOutputTypes); if (typeCount >= maxAllowed) { - errors.push(`Line ${i + 1}: Too many items of type '${itemType}'. Maximum allowed: ${maxAllowed}.`); + errors.push( + `Line ${i + 1}: Too many items of type '${itemType}'. Maximum allowed: ${maxAllowed}.` + ); continue; } // Basic validation based on type switch (itemType) { - case 'create-issue': - if (!item.title || typeof item.title !== 'string') { - errors.push(`Line ${i + 1}: create-issue requires a 'title' string field`); + case "create-issue": + if (!item.title || typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: create-issue requires a 'title' string field` + ); continue; } - if (!item.body || typeof item.body !== 'string') { - errors.push(`Line ${i + 1}: create-issue requires a 'body' string field`); + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-issue requires a 'body' string field` + ); continue; } // Sanitize text content @@ -593,107 +723,247 @@ jobs: item.body = sanitizeContent(item.body); // Sanitize labels if present if (item.labels && Array.isArray(item.labels)) { - item.labels = item.labels.map(label => typeof label === 'string' ? sanitizeContent(label) : label); + item.labels = item.labels.map(label => + typeof label === "string" ? sanitizeContent(label) : label + ); } break; - case 'add-issue-comment': - if (!item.body || typeof item.body !== 'string') { - errors.push(`Line ${i + 1}: add-issue-comment requires a 'body' string field`); + case "add-issue-comment": + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: add-issue-comment requires a 'body' string field` + ); continue; } // Sanitize text content item.body = sanitizeContent(item.body); break; - case 'create-pull-request': - if (!item.title || typeof item.title !== 'string') { - errors.push(`Line ${i + 1}: create-pull-request requires a 'title' string field`); + case "create-pull-request": + if (!item.title || typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request requires a 'title' string field` + ); continue; } - if (!item.body || typeof item.body !== 'string') { - errors.push(`Line ${i + 1}: create-pull-request requires a 'body' string field`); + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request requires a 'body' string field` + ); continue; } // Sanitize text content item.title = sanitizeContent(item.title); item.body = sanitizeContent(item.body); // Sanitize branch name if present - if (item.branch && typeof item.branch === 'string') { + if (item.branch && typeof item.branch === "string") { item.branch = sanitizeContent(item.branch); } // Sanitize labels if present if (item.labels && Array.isArray(item.labels)) { - item.labels = item.labels.map(label => typeof label === 'string' ? sanitizeContent(label) : label); + item.labels = item.labels.map(label => + typeof label === "string" ? sanitizeContent(label) : label + ); } break; - case 'add-issue-label': + case "add-issue-label": if (!item.labels || !Array.isArray(item.labels)) { - errors.push(`Line ${i + 1}: add-issue-label requires a 'labels' array field`); + errors.push( + `Line ${i + 1}: add-issue-label requires a 'labels' array field` + ); continue; } - if (item.labels.some(label => typeof label !== 'string')) { - errors.push(`Line ${i + 1}: add-issue-label labels array must contain only strings`); + if (item.labels.some(label => typeof label !== "string")) { + errors.push( + `Line ${i + 1}: add-issue-label labels array must contain only strings` + ); continue; } // Sanitize label strings item.labels = item.labels.map(label => sanitizeContent(label)); break; - case 'update-issue': + case "update-issue": // Check that at least one updateable field is provided - const hasValidField = (item.status !== undefined) || - (item.title !== undefined) || - (item.body !== undefined); + const hasValidField = + item.status !== undefined || + item.title !== undefined || + item.body !== undefined; if (!hasValidField) { - errors.push(`Line ${i + 1}: update-issue requires at least one of: 'status', 'title', or 'body' fields`); + errors.push( + `Line ${i + 1}: update-issue requires at least one of: 'status', 'title', or 'body' fields` + ); continue; } // Validate status if provided if (item.status !== undefined) { - if (typeof item.status !== 'string' || (item.status !== 'open' && item.status !== 'closed')) { - errors.push(`Line ${i + 1}: update-issue 'status' must be 'open' or 'closed'`); + if ( + typeof item.status !== "string" || + (item.status !== "open" && item.status !== "closed") + ) { + errors.push( + `Line ${i + 1}: update-issue 'status' must be 'open' or 'closed'` + ); continue; } } // Validate title if provided if (item.title !== undefined) { - if (typeof item.title !== 'string') { - errors.push(`Line ${i + 1}: update-issue 'title' must be a string`); + if (typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: update-issue 'title' must be a string` + ); continue; } item.title = sanitizeContent(item.title); } // Validate body if provided if (item.body !== undefined) { - if (typeof item.body !== 'string') { - errors.push(`Line ${i + 1}: update-issue 'body' must be a string`); + if (typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: update-issue 'body' must be a string` + ); continue; } item.body = sanitizeContent(item.body); } // Validate issue_number if provided (for target "*") if (item.issue_number !== undefined) { - if (typeof item.issue_number !== 'number' && typeof item.issue_number !== 'string') { - errors.push(`Line ${i + 1}: update-issue 'issue_number' must be a number or string`); + if ( + typeof item.issue_number !== "number" && + typeof item.issue_number !== "string" + ) { + errors.push( + `Line ${i + 1}: update-issue 'issue_number' must be a number or string` + ); continue; } } break; - case 'push-to-branch': + case "push-to-branch": // Validate message if provided (optional) if (item.message !== undefined) { - if (typeof item.message !== 'string') { - errors.push(`Line ${i + 1}: push-to-branch 'message' must be a string`); + if (typeof item.message !== "string") { + errors.push( + `Line ${i + 1}: push-to-branch 'message' must be a string` + ); continue; } item.message = sanitizeContent(item.message); } // Validate pull_request_number if provided (for target "*") if (item.pull_request_number !== undefined) { - if (typeof item.pull_request_number !== 'number' && typeof item.pull_request_number !== 'string') { - errors.push(`Line ${i + 1}: push-to-branch 'pull_request_number' must be a number or string`); + if ( + typeof item.pull_request_number !== "number" && + typeof item.pull_request_number !== "string" + ) { + errors.push( + `Line ${i + 1}: push-to-branch 'pull_request_number' must be a number or string` + ); continue; } } break; + case "create-pull-request-review-comment": + // Validate required path field + if (!item.path || typeof item.path !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment requires a 'path' string field` + ); + continue; + } + // Validate required line field + if ( + item.line === undefined || + (typeof item.line !== "number" && typeof item.line !== "string") + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment requires a 'line' number or string field` + ); + continue; + } + // Validate line is a positive integer + const lineNumber = + typeof item.line === "string" ? parseInt(item.line, 10) : item.line; + if ( + isNaN(lineNumber) || + lineNumber <= 0 || + !Number.isInteger(lineNumber) + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'line' must be a positive integer` + ); + continue; + } + // Validate required body field + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment requires a 'body' string field` + ); + continue; + } + // Sanitize required text content + item.body = sanitizeContent(item.body); + // Validate optional start_line field + if (item.start_line !== undefined) { + if ( + typeof item.start_line !== "number" && + typeof item.start_line !== "string" + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'start_line' must be a number or string` + ); + continue; + } + const startLineNumber = + typeof item.start_line === "string" + ? parseInt(item.start_line, 10) + : item.start_line; + if ( + isNaN(startLineNumber) || + startLineNumber <= 0 || + !Number.isInteger(startLineNumber) + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'start_line' must be a positive integer` + ); + continue; + } + if (startLineNumber > lineNumber) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'start_line' must be less than or equal to 'line'` + ); + continue; + } + } + // Validate optional side field + if (item.side !== undefined) { + if ( + typeof item.side !== "string" || + (item.side !== "LEFT" && item.side !== "RIGHT") + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'side' must be 'LEFT' or 'RIGHT'` + ); + continue; + } + } + break; + case "create-discussion": + if (!item.title || typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: create-discussion requires a 'title' string field` + ); + continue; + } + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-discussion requires a 'body' string field` + ); + continue; + } + // Sanitize text content + item.title = sanitizeContent(item.title); + item.body = sanitizeContent(item.body); + break; default: errors.push(`Line ${i + 1}: Unknown output type '${itemType}'`); continue; @@ -706,7 +976,7 @@ jobs: } // Report validation results if (errors.length > 0) { - console.log('Validation errors found:'); + console.log("Validation errors found:"); errors.forEach(error => console.log(` - ${error}`)); // For now, we'll continue with valid items but log the errors // In the future, we might want to fail the workflow for invalid items @@ -715,10 +985,10 @@ jobs: // Set the parsed and validated items as output const validatedOutput = { items: parsedItems, - errors: errors + errors: errors, }; - core.setOutput('output', JSON.stringify(validatedOutput)); - core.setOutput('raw_output', outputContent); + core.setOutput("output", JSON.stringify(validatedOutput)); + core.setOutput("raw_output", outputContent); } // Call the main function await main(); @@ -760,24 +1030,24 @@ jobs: with: script: | function main() { - const fs = require('fs'); + const fs = require("fs"); try { // Get the log file path from environment const logFile = process.env.AGENT_LOG_FILE; if (!logFile) { - console.log('No agent log file specified'); + console.log("No agent log file specified"); return; } if (!fs.existsSync(logFile)) { console.log(`Log file not found: ${logFile}`); return; } - const logContent = fs.readFileSync(logFile, 'utf8'); + const logContent = fs.readFileSync(logFile, "utf8"); const markdown = parseClaudeLog(logContent); // Append to GitHub step summary core.summary.addRaw(markdown).write(); } catch (error) { - console.error('Error parsing Claude log:', error.message); + console.error("Error parsing Claude log:", error.message); core.setFailed(error.message); } } @@ -785,16 +1055,16 @@ jobs: try { const logEntries = JSON.parse(logContent); if (!Array.isArray(logEntries)) { - return '## Agent Log Summary\n\nLog format not recognized as Claude JSON array.\n'; + return "## Agent Log Summary\n\nLog format not recognized as Claude JSON array.\n"; } - let markdown = '## 🤖 Commands and Tools\n\n'; + let markdown = "## 🤖 Commands and Tools\n\n"; const toolUsePairs = new Map(); // Map tool_use_id to tool_result const commandSummary = []; // For the succinct summary // First pass: collect tool results by tool_use_id for (const entry of logEntries) { - if (entry.type === 'user' && entry.message?.content) { + if (entry.type === "user" && entry.message?.content) { for (const content of entry.message.content) { - if (content.type === 'tool_result' && content.tool_use_id) { + if (content.type === "tool_result" && content.tool_use_id) { toolUsePairs.set(content.tool_use_id, content); } } @@ -802,26 +1072,37 @@ jobs: } // Collect all tool uses for summary for (const entry of logEntries) { - if (entry.type === 'assistant' && entry.message?.content) { + if (entry.type === "assistant" && entry.message?.content) { for (const content of entry.message.content) { - if (content.type === 'tool_use') { + if (content.type === "tool_use") { const toolName = content.name; const input = content.input || {}; // Skip internal tools - only show external commands and API calls - if (['Read', 'Write', 'Edit', 'MultiEdit', 'LS', 'Grep', 'Glob', 'TodoWrite'].includes(toolName)) { + if ( + [ + "Read", + "Write", + "Edit", + "MultiEdit", + "LS", + "Grep", + "Glob", + "TodoWrite", + ].includes(toolName) + ) { continue; // Skip internal file operations and searches } // Find the corresponding tool result to get status const toolResult = toolUsePairs.get(content.id); - let statusIcon = '❓'; + let statusIcon = "❓"; if (toolResult) { - statusIcon = toolResult.is_error === true ? '❌' : '✅'; + statusIcon = toolResult.is_error === true ? "❌" : "✅"; } // Add to command summary (only external tools) - if (toolName === 'Bash') { - const formattedCommand = formatBashCommand(input.command || ''); + if (toolName === "Bash") { + const formattedCommand = formatBashCommand(input.command || ""); commandSummary.push(`* ${statusIcon} \`${formattedCommand}\``); - } else if (toolName.startsWith('mcp__')) { + } else if (toolName.startsWith("mcp__")) { const mcpName = formatMcpName(toolName); commandSummary.push(`* ${statusIcon} \`${mcpName}(...)\``); } else { @@ -838,13 +1119,19 @@ jobs: markdown += `${cmd}\n`; } } else { - markdown += 'No commands or tools used.\n'; + markdown += "No commands or tools used.\n"; } // Add Information section from the last entry with result metadata - markdown += '\n## 📊 Information\n\n'; + markdown += "\n## 📊 Information\n\n"; // Find the last entry with metadata const lastEntry = logEntries[logEntries.length - 1]; - if (lastEntry && (lastEntry.num_turns || lastEntry.duration_ms || lastEntry.total_cost_usd || lastEntry.usage)) { + if ( + lastEntry && + (lastEntry.num_turns || + lastEntry.duration_ms || + lastEntry.total_cost_usd || + lastEntry.usage) + ) { if (lastEntry.num_turns) { markdown += `**Turns:** ${lastEntry.num_turns}\n\n`; } @@ -861,29 +1148,36 @@ jobs: const usage = lastEntry.usage; if (usage.input_tokens || usage.output_tokens) { markdown += `**Token Usage:**\n`; - if (usage.input_tokens) markdown += `- Input: ${usage.input_tokens.toLocaleString()}\n`; - if (usage.cache_creation_input_tokens) markdown += `- Cache Creation: ${usage.cache_creation_input_tokens.toLocaleString()}\n`; - if (usage.cache_read_input_tokens) markdown += `- Cache Read: ${usage.cache_read_input_tokens.toLocaleString()}\n`; - if (usage.output_tokens) markdown += `- Output: ${usage.output_tokens.toLocaleString()}\n`; - markdown += '\n'; + if (usage.input_tokens) + markdown += `- Input: ${usage.input_tokens.toLocaleString()}\n`; + if (usage.cache_creation_input_tokens) + markdown += `- Cache Creation: ${usage.cache_creation_input_tokens.toLocaleString()}\n`; + if (usage.cache_read_input_tokens) + markdown += `- Cache Read: ${usage.cache_read_input_tokens.toLocaleString()}\n`; + if (usage.output_tokens) + markdown += `- Output: ${usage.output_tokens.toLocaleString()}\n`; + markdown += "\n"; } } - if (lastEntry.permission_denials && lastEntry.permission_denials.length > 0) { + if ( + lastEntry.permission_denials && + lastEntry.permission_denials.length > 0 + ) { markdown += `**Permission Denials:** ${lastEntry.permission_denials.length}\n\n`; } } - markdown += '\n## 🤖 Reasoning\n\n'; + markdown += "\n## 🤖 Reasoning\n\n"; // Second pass: process assistant messages in sequence for (const entry of logEntries) { - if (entry.type === 'assistant' && entry.message?.content) { + if (entry.type === "assistant" && entry.message?.content) { for (const content of entry.message.content) { - if (content.type === 'text' && content.text) { + if (content.type === "text" && content.text) { // Add reasoning text directly (no header) const text = content.text.trim(); if (text && text.length > 0) { - markdown += text + '\n\n'; + markdown += text + "\n\n"; } - } else if (content.type === 'tool_use') { + } else if (content.type === "tool_use") { // Process tool use with its result const toolResult = toolUsePairs.get(content.id); const toolMarkdown = formatToolUse(content, toolResult); @@ -903,22 +1197,22 @@ jobs: const toolName = toolUse.name; const input = toolUse.input || {}; // Skip TodoWrite except the very last one (we'll handle this separately) - if (toolName === 'TodoWrite') { - return ''; // Skip for now, would need global context to find the last one + if (toolName === "TodoWrite") { + return ""; // Skip for now, would need global context to find the last one } // Helper function to determine status icon function getStatusIcon() { if (toolResult) { - return toolResult.is_error === true ? '❌' : '✅'; + return toolResult.is_error === true ? "❌" : "✅"; } - return '❓'; // Unknown by default + return "❓"; // Unknown by default } - let markdown = ''; + let markdown = ""; const statusIcon = getStatusIcon(); switch (toolName) { - case 'Bash': - const command = input.command || ''; - const description = input.description || ''; + case "Bash": + const command = input.command || ""; + const description = input.description || ""; // Format the command to be single line const formattedCommand = formatBashCommand(command); if (description) { @@ -926,31 +1220,40 @@ jobs: } markdown += `${statusIcon} \`${formattedCommand}\`\n\n`; break; - case 'Read': - const filePath = input.file_path || input.path || ''; - const relativePath = filePath.replace(/^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, ''); // Remove /home/runner/work/repo/repo/ prefix + case "Read": + const filePath = input.file_path || input.path || ""; + const relativePath = filePath.replace( + /^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, + "" + ); // Remove /home/runner/work/repo/repo/ prefix markdown += `${statusIcon} Read \`${relativePath}\`\n\n`; break; - case 'Write': - case 'Edit': - case 'MultiEdit': - const writeFilePath = input.file_path || input.path || ''; - const writeRelativePath = writeFilePath.replace(/^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, ''); + case "Write": + case "Edit": + case "MultiEdit": + const writeFilePath = input.file_path || input.path || ""; + const writeRelativePath = writeFilePath.replace( + /^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, + "" + ); markdown += `${statusIcon} Write \`${writeRelativePath}\`\n\n`; break; - case 'Grep': - case 'Glob': - const query = input.query || input.pattern || ''; + case "Grep": + case "Glob": + const query = input.query || input.pattern || ""; markdown += `${statusIcon} Search for \`${truncateString(query, 80)}\`\n\n`; break; - case 'LS': - const lsPath = input.path || ''; - const lsRelativePath = lsPath.replace(/^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, ''); + case "LS": + const lsPath = input.path || ""; + const lsRelativePath = lsPath.replace( + /^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, + "" + ); markdown += `${statusIcon} LS: ${lsRelativePath || lsPath}\n\n`; break; default: // Handle MCP calls and other tools - if (toolName.startsWith('mcp__')) { + if (toolName.startsWith("mcp__")) { const mcpName = formatMcpName(toolName); const params = formatMcpParameters(input); markdown += `${statusIcon} ${mcpName}(${params})\n\n`; @@ -959,8 +1262,11 @@ jobs: const keys = Object.keys(input); if (keys.length > 0) { // Try to find the most important parameter - const mainParam = keys.find(k => ['query', 'command', 'path', 'file_path', 'content'].includes(k)) || keys[0]; - const value = String(input[mainParam] || ''); + const mainParam = + keys.find(k => + ["query", "command", "path", "file_path", "content"].includes(k) + ) || keys[0]; + const value = String(input[mainParam] || ""); if (value) { markdown += `${statusIcon} ${toolName}: ${truncateString(value, 100)}\n\n`; } else { @@ -975,11 +1281,11 @@ jobs: } function formatMcpName(toolName) { // Convert mcp__github__search_issues to github::search_issues - if (toolName.startsWith('mcp__')) { - const parts = toolName.split('__'); + if (toolName.startsWith("mcp__")) { + const parts = toolName.split("__"); if (parts.length >= 3) { const provider = parts[1]; // github, etc. - const method = parts.slice(2).join('_'); // search_issues, etc. + const method = parts.slice(2).join("_"); // search_issues, etc. return `${provider}::${method}`; } } @@ -987,44 +1293,50 @@ jobs: } function formatMcpParameters(input) { const keys = Object.keys(input); - if (keys.length === 0) return ''; + if (keys.length === 0) return ""; const paramStrs = []; - for (const key of keys.slice(0, 4)) { // Show up to 4 parameters - const value = String(input[key] || ''); + for (const key of keys.slice(0, 4)) { + // Show up to 4 parameters + const value = String(input[key] || ""); paramStrs.push(`${key}: ${truncateString(value, 40)}`); } if (keys.length > 4) { - paramStrs.push('...'); + paramStrs.push("..."); } - return paramStrs.join(', '); + return paramStrs.join(", "); } function formatBashCommand(command) { - if (!command) return ''; + if (!command) return ""; // Convert multi-line commands to single line by replacing newlines with spaces // and collapsing multiple spaces let formatted = command - .replace(/\n/g, ' ') // Replace newlines with spaces - .replace(/\r/g, ' ') // Replace carriage returns with spaces - .replace(/\t/g, ' ') // Replace tabs with spaces - .replace(/\s+/g, ' ') // Collapse multiple spaces into one - .trim(); // Remove leading/trailing whitespace + .replace(/\n/g, " ") // Replace newlines with spaces + .replace(/\r/g, " ") // Replace carriage returns with spaces + .replace(/\t/g, " ") // Replace tabs with spaces + .replace(/\s+/g, " ") // Collapse multiple spaces into one + .trim(); // Remove leading/trailing whitespace // Escape backticks to prevent markdown issues - formatted = formatted.replace(/`/g, '\\`'); + formatted = formatted.replace(/`/g, "\\`"); // Truncate if too long (keep reasonable length for summary) const maxLength = 80; if (formatted.length > maxLength) { - formatted = formatted.substring(0, maxLength) + '...'; + formatted = formatted.substring(0, maxLength) + "..."; } return formatted; } function truncateString(str, maxLength) { - if (!str) return ''; + if (!str) return ""; if (str.length <= maxLength) return str; - return str.substring(0, maxLength) + '...'; + return str.substring(0, maxLength) + "..."; } // Export for testing - if (typeof module !== 'undefined' && module.exports) { - module.exports = { parseClaudeLog, formatToolUse, formatBashCommand, truncateString }; + if (typeof module !== "undefined" && module.exports) { + module.exports = { + parseClaudeLog, + formatToolUse, + formatBashCommand, + truncateString, + }; } main(); - name: Upload agent logs @@ -1197,52 +1509,70 @@ jobs: // Environment validation - fail early if required variables are missing const workflowId = process.env.GITHUB_AW_WORKFLOW_ID; if (!workflowId) { - throw new Error('GITHUB_AW_WORKFLOW_ID environment variable is required'); + throw new Error("GITHUB_AW_WORKFLOW_ID environment variable is required"); } const baseBranch = process.env.GITHUB_AW_BASE_BRANCH; if (!baseBranch) { - throw new Error('GITHUB_AW_BASE_BRANCH environment variable is required'); + throw new Error("GITHUB_AW_BASE_BRANCH environment variable is required"); } const outputContent = process.env.GITHUB_AW_AGENT_OUTPUT || ""; - if (outputContent.trim() === '') { - console.log('Agent output content is empty'); + if (outputContent.trim() === "") { + console.log("Agent output content is empty"); } // Check if patch file exists and has valid content - if (!fs.existsSync('/tmp/aw.patch')) { - throw new Error('No patch file found - cannot create pull request without changes'); + if (!fs.existsSync("/tmp/aw.patch")) { + throw new Error( + "No patch file found - cannot create pull request without changes" + ); } - const patchContent = fs.readFileSync('/tmp/aw.patch', 'utf8'); - if (!patchContent || !patchContent.trim() || patchContent.includes('Failed to generate patch')) { - throw new Error('Patch file is empty or contains error message - cannot create pull request without changes'); + const patchContent = fs.readFileSync("/tmp/aw.patch", "utf8"); + if ( + !patchContent || + !patchContent.trim() || + patchContent.includes("Failed to generate patch") + ) { + throw new Error( + "Patch file is empty or contains error message - cannot create pull request without changes" + ); } - console.log('Agent output content length:', outputContent.length); - console.log('Patch content validation passed'); + console.log("Agent output content length:", outputContent.length); + console.log("Patch content validation passed"); // Parse the validated output JSON let validatedOutput; try { validatedOutput = JSON.parse(outputContent); } catch (error) { - console.log('Error parsing agent output JSON:', error instanceof Error ? error.message : String(error)); + console.log( + "Error parsing agent output JSON:", + error instanceof Error ? error.message : String(error) + ); return; } if (!validatedOutput.items || !Array.isArray(validatedOutput.items)) { - console.log('No valid items found in agent output'); + console.log("No valid items found in agent output"); return; } // Find the create-pull-request item - const pullRequestItem = validatedOutput.items.find(/** @param {any} item */ item => item.type === 'create-pull-request'); + const pullRequestItem = validatedOutput.items.find( + /** @param {any} item */ item => item.type === "create-pull-request" + ); if (!pullRequestItem) { - console.log('No create-pull-request item found in agent output'); + console.log("No create-pull-request item found in agent output"); return; } - console.log('Found create-pull-request item:', { title: pullRequestItem.title, bodyLength: pullRequestItem.body.length }); + console.log("Found create-pull-request item:", { + title: pullRequestItem.title, + bodyLength: pullRequestItem.body.length, + }); // Extract title, body, and branch from the JSON item let title = pullRequestItem.title.trim(); - let bodyLines = pullRequestItem.body.split('\n'); - let branchName = pullRequestItem.branch ? pullRequestItem.branch.trim() : null; + let bodyLines = pullRequestItem.body.split("\n"); + let branchName = pullRequestItem.branch + ? pullRequestItem.branch.trim() + : null; // If no title was found, use a default if (!title) { - title = 'Agent Output'; + title = "Agent Output"; } // Apply title prefix if provided via environment variable const titlePrefix = process.env.GITHUB_AW_PR_TITLE_PREFIX; @@ -1251,59 +1581,80 @@ jobs: } // Add AI disclaimer with run id, run htmlurl const runId = context.runId; - const runUrl = context.payload.repository + const runUrl = context.payload.repository ? `${context.payload.repository.html_url}/actions/runs/${runId}` : `https://github.com/actions/runs/${runId}`; - bodyLines.push(``, ``, `> Generated by Agentic Workflow Run [${runId}](${runUrl})`, ''); + bodyLines.push( + ``, + ``, + `> Generated by Agentic Workflow Run [${runId}](${runUrl})`, + "" + ); // Prepare the body content - const body = bodyLines.join('\n').trim(); + const body = bodyLines.join("\n").trim(); // Parse labels from environment variable (comma-separated string) const labelsEnv = process.env.GITHUB_AW_PR_LABELS; - const labels = labelsEnv ? labelsEnv.split(',').map(/** @param {string} label */ label => label.trim()).filter(/** @param {string} label */ label => label) : []; + const labels = labelsEnv + ? labelsEnv + .split(",") + .map(/** @param {string} label */ label => label.trim()) + .filter(/** @param {string} label */ label => label) + : []; // Parse draft setting from environment variable (defaults to true) const draftEnv = process.env.GITHUB_AW_PR_DRAFT; - const draft = draftEnv ? draftEnv.toLowerCase() === 'true' : true; - console.log('Creating pull request with title:', title); - console.log('Labels:', labels); - console.log('Draft:', draft); - console.log('Body length:', body.length); + const draft = draftEnv ? draftEnv.toLowerCase() === "true" : true; + console.log("Creating pull request with title:", title); + console.log("Labels:", labels); + console.log("Draft:", draft); + console.log("Body length:", body.length); // Use branch name from JSONL if provided, otherwise generate unique branch name if (!branchName) { - console.log('No branch name provided in JSONL, generating unique branch name'); + console.log( + "No branch name provided in JSONL, generating unique branch name" + ); // Generate unique branch name using cryptographic random hex - const randomHex = crypto.randomBytes(8).toString('hex'); + const randomHex = crypto.randomBytes(8).toString("hex"); branchName = `${workflowId}/${randomHex}`; } else { - console.log('Using branch name from JSONL:', branchName); + console.log("Using branch name from JSONL:", branchName); } - console.log('Generated branch name:', branchName); - console.log('Base branch:', baseBranch); + console.log("Generated branch name:", branchName); + console.log("Base branch:", baseBranch); // Create a new branch using git CLI // Configure git (required for commits) - execSync('git config --global user.email "action@github.com"', { stdio: 'inherit' }); - execSync('git config --global user.name "GitHub Action"', { stdio: 'inherit' }); + execSync('git config --global user.email "action@github.com"', { + stdio: "inherit", + }); + execSync('git config --global user.name "GitHub Action"', { + stdio: "inherit", + }); // Handle branch creation/checkout - const branchFromJsonl = pullRequestItem.branch ? pullRequestItem.branch.trim() : null; + const branchFromJsonl = pullRequestItem.branch + ? pullRequestItem.branch.trim() + : null; if (branchFromJsonl) { - console.log('Checking if branch from JSONL exists:', branchFromJsonl); - console.log('Branch does not exist locally, creating new branch:', branchFromJsonl); - execSync(`git checkout -b ${branchFromJsonl}`, { stdio: 'inherit' }); - console.log('Using existing/created branch:', branchFromJsonl); + console.log("Checking if branch from JSONL exists:", branchFromJsonl); + console.log( + "Branch does not exist locally, creating new branch:", + branchFromJsonl + ); + execSync(`git checkout -b ${branchFromJsonl}`, { stdio: "inherit" }); + console.log("Using existing/created branch:", branchFromJsonl); } else { // Create and checkout new branch with generated name - execSync(`git checkout -b ${branchName}`, { stdio: 'inherit' }); - console.log('Created and checked out new branch:', branchName); + execSync(`git checkout -b ${branchName}`, { stdio: "inherit" }); + console.log("Created and checked out new branch:", branchName); } // Apply the patch using git CLI - console.log('Applying patch...'); + console.log("Applying patch..."); // Apply the patch using git apply - execSync('git apply /tmp/aw.patch', { stdio: 'inherit' }); - console.log('Patch applied successfully'); + execSync("git apply /tmp/aw.patch", { stdio: "inherit" }); + console.log("Patch applied successfully"); // Commit and push the changes - execSync('git add .', { stdio: 'inherit' }); - execSync(`git commit -m "Add agent output: ${title}"`, { stdio: 'inherit' }); - execSync(`git push origin ${branchName}`, { stdio: 'inherit' }); - console.log('Changes committed and pushed'); + execSync("git add .", { stdio: "inherit" }); + execSync(`git commit -m "Add agent output: ${title}"`, { stdio: "inherit" }); + execSync(`git push origin ${branchName}`, { stdio: "inherit" }); + console.log("Changes committed and pushed"); // Create the pull request const { data: pullRequest } = await github.rest.pulls.create({ owner: context.repo.owner, @@ -1312,31 +1663,36 @@ jobs: body: body, head: branchName, base: baseBranch, - draft: draft + draft: draft, }); - console.log('Created pull request #' + pullRequest.number + ': ' + pullRequest.html_url); + console.log( + "Created pull request #" + pullRequest.number + ": " + pullRequest.html_url + ); // Add labels if specified if (labels.length > 0) { await github.rest.issues.addLabels({ owner: context.repo.owner, repo: context.repo.repo, issue_number: pullRequest.number, - labels: labels + labels: labels, }); - console.log('Added labels to pull request:', labels); + console.log("Added labels to pull request:", labels); } // Set output for other jobs to use - core.setOutput('pull_request_number', pullRequest.number); - core.setOutput('pull_request_url', pullRequest.html_url); - core.setOutput('branch_name', branchName); + core.setOutput("pull_request_number", pullRequest.number); + core.setOutput("pull_request_url", pullRequest.html_url); + core.setOutput("branch_name", branchName); // Write summary to GitHub Actions summary await core.summary - .addRaw(` + .addRaw( + ` ## Pull Request - **Pull Request**: [#${pullRequest.number}](${pullRequest.html_url}) - **Branch**: \`${branchName}\` - **Base Branch**: \`${baseBranch}\` - `).write(); + ` + ) + .write(); } await main(); diff --git a/.github/workflows/test-claude-mcp.lock.yml b/.github/workflows/test-claude-mcp.lock.yml index c3d2ff398a..893359de69 100644 --- a/.github/workflows/test-claude-mcp.lock.yml +++ b/.github/workflows/test-claude-mcp.lock.yml @@ -31,21 +31,32 @@ jobs: with: script: | async function main() { - // Read inputs from environment variables - const reaction = process.env.GITHUB_AW_REACTION || 'eyes'; + // Read inputs from environment variables + const reaction = process.env.GITHUB_AW_REACTION || "eyes"; const alias = process.env.GITHUB_AW_ALIAS; // Only present for alias workflows const runId = context.runId; - const runUrl = context.payload.repository + const runUrl = context.payload.repository ? `${context.payload.repository.html_url}/actions/runs/${runId}` : `https://github.com/${context.repo.owner}/${context.repo.repo}/actions/runs/${runId}`; - console.log('Reaction type:', reaction); - console.log('Alias name:', alias || 'none'); - console.log('Run ID:', runId); - console.log('Run URL:', runUrl); + console.log("Reaction type:", reaction); + console.log("Alias name:", alias || "none"); + console.log("Run ID:", runId); + console.log("Run URL:", runUrl); // Validate reaction type - const validReactions = ['+1', '-1', 'laugh', 'confused', 'heart', 'hooray', 'rocket', 'eyes']; + const validReactions = [ + "+1", + "-1", + "laugh", + "confused", + "heart", + "hooray", + "rocket", + "eyes", + ]; if (!validReactions.includes(reaction)) { - core.setFailed(`Invalid reaction type: ${reaction}. Valid reactions are: ${validReactions.join(', ')}`); + core.setFailed( + `Invalid reaction type: ${reaction}. Valid reactions are: ${validReactions.join(", ")}` + ); return; } // Determine the API endpoint based on the event type @@ -57,20 +68,20 @@ jobs: const repo = context.repo.repo; try { switch (eventName) { - case 'issues': + case "issues": const issueNumber = context.payload?.issue?.number; if (!issueNumber) { - core.setFailed('Issue number not found in event payload'); + core.setFailed("Issue number not found in event payload"); return; } reactionEndpoint = `/repos/${owner}/${repo}/issues/${issueNumber}/reactions`; // Don't edit issue bodies for now - this might be more complex shouldEditComment = false; break; - case 'issue_comment': + case "issue_comment": const commentId = context.payload?.comment?.id; if (!commentId) { - core.setFailed('Comment ID not found in event payload'); + core.setFailed("Comment ID not found in event payload"); return; } reactionEndpoint = `/repos/${owner}/${repo}/issues/comments/${commentId}/reactions`; @@ -78,10 +89,10 @@ jobs: // Only edit comments for alias workflows shouldEditComment = alias ? true : false; break; - case 'pull_request': + case "pull_request": const prNumber = context.payload?.pull_request?.number; if (!prNumber) { - core.setFailed('Pull request number not found in event payload'); + core.setFailed("Pull request number not found in event payload"); return; } // PRs are "issues" for the reactions endpoint @@ -89,10 +100,10 @@ jobs: // Don't edit PR bodies for now - this might be more complex shouldEditComment = false; break; - case 'pull_request_review_comment': + case "pull_request_review_comment": const reviewCommentId = context.payload?.comment?.id; if (!reviewCommentId) { - core.setFailed('Review comment ID not found in event payload'); + core.setFailed("Review comment ID not found in event payload"); return; } reactionEndpoint = `/repos/${owner}/${repo}/pulls/comments/${reviewCommentId}/reactions`; @@ -104,24 +115,28 @@ jobs: core.setFailed(`Unsupported event type: ${eventName}`); return; } - console.log('Reaction API endpoint:', reactionEndpoint); + console.log("Reaction API endpoint:", reactionEndpoint); // Add reaction first await addReaction(reactionEndpoint, reaction); // Then edit comment if applicable and if it's a comment event if (shouldEditComment && commentUpdateEndpoint) { - console.log('Comment update endpoint:', commentUpdateEndpoint); + console.log("Comment update endpoint:", commentUpdateEndpoint); await editCommentWithWorkflowLink(commentUpdateEndpoint, runUrl); } else { if (!alias && commentUpdateEndpoint) { - console.log('Skipping comment edit - only available for alias workflows'); + console.log( + "Skipping comment edit - only available for alias workflows" + ); } else { - console.log('Skipping comment edit for event type:', eventName); + console.log("Skipping comment edit for event type:", eventName); } } } catch (error) { const errorMessage = error instanceof Error ? error.message : String(error); - console.error('Failed to process reaction and comment edit:', errorMessage); - core.setFailed(`Failed to process reaction and comment edit: ${errorMessage}`); + console.error("Failed to process reaction and comment edit:", errorMessage); + core.setFailed( + `Failed to process reaction and comment edit: ${errorMessage}` + ); } } /** @@ -130,19 +145,19 @@ jobs: * @param {string} reaction - The reaction type to add */ async function addReaction(endpoint, reaction) { - const response = await github.request('POST ' + endpoint, { + const response = await github.request("POST " + endpoint, { content: reaction, headers: { - 'Accept': 'application/vnd.github+json' - } + Accept: "application/vnd.github+json", + }, }); const reactionId = response.data?.id; if (reactionId) { console.log(`Successfully added reaction: ${reaction} (id: ${reactionId})`); - core.setOutput('reaction-id', reactionId.toString()); + core.setOutput("reaction-id", reactionId.toString()); } else { console.log(`Successfully added reaction: ${reaction}`); - core.setOutput('reaction-id', ''); + core.setOutput("reaction-id", ""); } } /** @@ -153,33 +168,37 @@ jobs: async function editCommentWithWorkflowLink(endpoint, runUrl) { try { // First, get the current comment content - const getResponse = await github.request('GET ' + endpoint, { + const getResponse = await github.request("GET " + endpoint, { headers: { - 'Accept': 'application/vnd.github+json' - } + Accept: "application/vnd.github+json", + }, }); - const originalBody = getResponse.data.body || ''; + const originalBody = getResponse.data.body || ""; const workflowLinkText = `\n\n---\n*🤖 [Workflow run](${runUrl}) triggered by this comment*`; // Check if we've already added a workflow link to avoid duplicates - if (originalBody.includes('*🤖 [Workflow run](')) { - console.log('Comment already contains a workflow run link, skipping edit'); + if (originalBody.includes("*🤖 [Workflow run](")) { + console.log( + "Comment already contains a workflow run link, skipping edit" + ); return; } const updatedBody = originalBody + workflowLinkText; // Update the comment - const updateResponse = await github.request('PATCH ' + endpoint, { + const updateResponse = await github.request("PATCH " + endpoint, { body: updatedBody, headers: { - 'Accept': 'application/vnd.github+json' - } + Accept: "application/vnd.github+json", + }, }); console.log(`Successfully updated comment with workflow link`); console.log(`Comment ID: ${updateResponse.data.id}`); } catch (error) { // Don't fail the entire job if comment editing fails - just log it const errorMessage = error instanceof Error ? error.message : String(error); - console.warn('Failed to edit comment with workflow link:', errorMessage); - console.warn('This is not critical - the reaction was still added successfully'); + console.warn("Failed to edit comment with workflow link:", errorMessage); + console.warn( + "This is not critical - the reaction was still added successfully" + ); } } await main(); @@ -305,23 +324,23 @@ jobs: with: script: | function main() { - const fs = require('fs'); - const crypto = require('crypto'); + const fs = require("fs"); + const crypto = require("crypto"); // Generate a random filename for the output file - const randomId = crypto.randomBytes(8).toString('hex'); + const randomId = crypto.randomBytes(8).toString("hex"); const outputFile = `/tmp/aw_output_${randomId}.txt`; // Ensure the /tmp directory exists and create empty output file - fs.mkdirSync('/tmp', { recursive: true }); - fs.writeFileSync(outputFile, '', { mode: 0o644 }); + fs.mkdirSync("/tmp", { recursive: true }); + fs.writeFileSync(outputFile, "", { mode: 0o644 }); // Verify the file was created and is writable if (!fs.existsSync(outputFile)) { throw new Error(`Failed to create output file: ${outputFile}`); } // Set the environment variable for subsequent steps - core.exportVariable('GITHUB_AW_SAFE_OUTPUTS', outputFile); - console.log('Created agentic output file:', outputFile); + core.exportVariable("GITHUB_AW_SAFE_OUTPUTS", outputFile); + console.log("Created agentic output file:", outputFile); // Also set as step output for reference - core.setOutput('output_file', outputFile); + core.setOutput("output_file", outputFile); } main(); - name: Setup MCPs @@ -569,34 +588,37 @@ jobs: * @returns {string} The sanitized content */ function sanitizeContent(content) { - if (!content || typeof content !== 'string') { - return ''; + if (!content || typeof content !== "string") { + return ""; } // Read allowed domains from environment variable const allowedDomainsEnv = process.env.GITHUB_AW_ALLOWED_DOMAINS; const defaultAllowedDomains = [ - 'github.com', - 'github.io', - 'githubusercontent.com', - 'githubassets.com', - 'github.dev', - 'codespaces.new' + "github.com", + "github.io", + "githubusercontent.com", + "githubassets.com", + "github.dev", + "codespaces.new", ]; const allowedDomains = allowedDomainsEnv - ? allowedDomainsEnv.split(',').map(d => d.trim()).filter(d => d) + ? allowedDomainsEnv + .split(",") + .map(d => d.trim()) + .filter(d => d) : defaultAllowedDomains; let sanitized = content; // Neutralize @mentions to prevent unintended notifications sanitized = neutralizeMentions(sanitized); // Remove control characters (except newlines and tabs) - sanitized = sanitized.replace(/[\x00-\x08\x0B\x0C\x0E-\x1F\x7F]/g, ''); + sanitized = sanitized.replace(/[\x00-\x08\x0B\x0C\x0E-\x1F\x7F]/g, ""); // XML character escaping sanitized = sanitized - .replace(/&/g, '&') // Must be first to avoid double-escaping - .replace(//g, '>') - .replace(/"/g, '"') - .replace(/'/g, '''); + .replace(/&/g, "&") // Must be first to avoid double-escaping + .replace(//g, ">") + .replace(/"/g, """) + .replace(/'/g, "'"); // URI filtering - replace non-https protocols with "(redacted)" sanitized = sanitizeUrlProtocols(sanitized); // Domain filtering for HTTPS URIs @@ -604,16 +626,20 @@ jobs: // Limit total length to prevent DoS (0.5MB max) const maxLength = 524288; if (sanitized.length > maxLength) { - sanitized = sanitized.substring(0, maxLength) + '\n[Content truncated due to length]'; + sanitized = + sanitized.substring(0, maxLength) + + "\n[Content truncated due to length]"; } // Limit number of lines to prevent log flooding (65k max) - const lines = sanitized.split('\n'); + const lines = sanitized.split("\n"); const maxLines = 65000; if (lines.length > maxLines) { - sanitized = lines.slice(0, maxLines).join('\n') + '\n[Content truncated due to line count]'; + sanitized = + lines.slice(0, maxLines).join("\n") + + "\n[Content truncated due to line count]"; } // Remove ANSI escape sequences - sanitized = sanitized.replace(/\x1b\[[0-9;]*[mGKH]/g, ''); + sanitized = sanitized.replace(/\x1b\[[0-9;]*[mGKH]/g, ""); // Neutralize common bot trigger phrases sanitized = neutralizeBotTriggers(sanitized); // Trim excessive whitespace @@ -624,16 +650,22 @@ jobs: * @returns {string} The string with unknown domains redacted */ function sanitizeUrlDomains(s) { - return s.replace(/\bhttps:\/\/([^\/\s\])}'"<>&\x00-\x1f]+)/gi, (match, domain) => { - // Extract the hostname part (before first slash, colon, or other delimiter) - const hostname = domain.split(/[\/:\?#]/)[0].toLowerCase(); - // Check if this domain or any parent domain is in the allowlist - const isAllowed = allowedDomains.some(allowedDomain => { - const normalizedAllowed = allowedDomain.toLowerCase(); - return hostname === normalizedAllowed || hostname.endsWith('.' + normalizedAllowed); - }); - return isAllowed ? match : '(redacted)'; - }); + return s.replace( + /\bhttps:\/\/([^\/\s\])}'"<>&\x00-\x1f]+)/gi, + (match, domain) => { + // Extract the hostname part (before first slash, colon, or other delimiter) + const hostname = domain.split(/[\/:\?#]/)[0].toLowerCase(); + // Check if this domain or any parent domain is in the allowlist + const isAllowed = allowedDomains.some(allowedDomain => { + const normalizedAllowed = allowedDomain.toLowerCase(); + return ( + hostname === normalizedAllowed || + hostname.endsWith("." + normalizedAllowed) + ); + }); + return isAllowed ? match : "(redacted)"; + } + ); } /** * Remove unknown protocols except https @@ -642,10 +674,13 @@ jobs: */ function sanitizeUrlProtocols(s) { // Match both protocol:// and protocol: patterns - return s.replace(/\b(\w+):(?:\/\/)?[^\s\])}'"<>&\x00-\x1f]+/gi, (match, protocol) => { - // Allow https (case insensitive), redact everything else - return protocol.toLowerCase() === 'https' ? match : '(redacted)'; - }); + return s.replace( + /\b(\w+):(?:\/\/)?[^\s\])}'"<>&\x00-\x1f]+/gi, + (match, protocol) => { + // Allow https (case insensitive), redact everything else + return protocol.toLowerCase() === "https" ? match : "(redacted)"; + } + ); } /** * Neutralizes @mentions by wrapping them in backticks @@ -654,8 +689,10 @@ jobs: */ function neutralizeMentions(s) { // Replace @name or @org/team outside code with `@name` - return s.replace(/(^|[^\w`])@([A-Za-z0-9](?:[A-Za-z0-9-]{0,37}[A-Za-z0-9])?(?:\/[A-Za-z0-9._-]+)?)/g, - (_m, p1, p2) => `${p1}\`@${p2}\``); + return s.replace( + /(^|[^\w`])@([A-Za-z0-9](?:[A-Za-z0-9-]{0,37}[A-Za-z0-9])?(?:\/[A-Za-z0-9._-]+)?)/g, + (_m, p1, p2) => `${p1}\`@${p2}\`` + ); } /** * Neutralizes bot trigger phrases by wrapping them in backticks @@ -664,8 +701,10 @@ jobs: */ function neutralizeBotTriggers(s) { // Neutralize common bot trigger phrases like "fixes #123", "closes #asdfs", etc. - return s.replace(/\b(fixes?|closes?|resolves?|fix|close|resolve)\s+#(\w+)/gi, - (match, action, ref) => `\`${action} #${ref}\``); + return s.replace( + /\b(fixes?|closes?|resolves?|fix|close|resolve)\s+#(\w+)/gi, + (match, action, ref) => `\`${action} #${ref}\`` + ); } } /** @@ -676,65 +715,165 @@ jobs: */ function getMaxAllowedForType(itemType, config) { // Check if max is explicitly specified in config - if (config && config[itemType] && typeof config[itemType] === 'object' && config[itemType].max) { + if ( + config && + config[itemType] && + typeof config[itemType] === "object" && + config[itemType].max + ) { return config[itemType].max; } // Use default limits for plural-supported types switch (itemType) { - case 'create-issue': + case "create-issue": return 1; // Only one issue allowed - case 'add-issue-comment': + case "add-issue-comment": return 1; // Only one comment allowed - case 'create-pull-request': - return 1; // Only one pull request allowed - case 'add-issue-label': - return 5; // Only one labels operation allowed - case 'update-issue': - return 1; // Only one issue update allowed - case 'push-to-branch': - return 1; // Only one push to branch allowed + case "create-pull-request": + return 1; // Only one pull request allowed + case "create-pull-request-review-comment": + return 10; // Default to 10 review comments allowed + case "add-issue-label": + return 5; // Only one labels operation allowed + case "update-issue": + return 1; // Only one issue update allowed + case "push-to-branch": + return 1; // Only one push to branch allowed + case "create-discussion": + return 1; // Only one discussion allowed default: - return 1; // Default to single item for unknown types + return 1; // Default to single item for unknown types + } + } + /** + * Attempts to repair common JSON syntax issues in LLM-generated content + * @param {string} jsonStr - The potentially malformed JSON string + * @returns {string} The repaired JSON string + */ + function repairJson(jsonStr) { + let repaired = jsonStr.trim(); + // Fix single quotes to double quotes (must be done first) + repaired = repaired.replace(/'/g, '"'); + // Fix missing quotes around object keys + repaired = repaired.replace( + /([{,]\s*)([a-zA-Z_$][a-zA-Z0-9_$]*)\s*:/g, + '$1"$2":' + ); + // Fix newlines and tabs inside strings by escaping them + repaired = repaired.replace(/"([^"\\]*)"/g, (match, content) => { + if ( + content.includes("\n") || + content.includes("\r") || + content.includes("\t") + ) { + const escaped = content + .replace(/\\/g, "\\\\") + .replace(/\n/g, "\\n") + .replace(/\r/g, "\\r") + .replace(/\t/g, "\\t"); + return `"${escaped}"`; + } + return match; + }); + // Fix unescaped quotes inside string values + repaired = repaired.replace( + /"([^"]*)"([^":,}\]]*)"([^"]*)"(\s*[,:}\]])/g, + (match, p1, p2, p3, p4) => `"${p1}\\"${p2}\\"${p3}"${p4}` + ); + // Fix wrong bracket/brace types - arrays should end with ] not } + repaired = repaired.replace( + /(\[\s*(?:"[^"]*"(?:\s*,\s*"[^"]*")*\s*),?)\s*}/g, + "$1]" + ); + // Fix missing closing braces/brackets + const openBraces = (repaired.match(/\{/g) || []).length; + const closeBraces = (repaired.match(/\}/g) || []).length; + if (openBraces > closeBraces) { + repaired += "}".repeat(openBraces - closeBraces); + } else if (closeBraces > openBraces) { + repaired = "{".repeat(closeBraces - openBraces) + repaired; + } + // Fix missing closing brackets for arrays + const openBrackets = (repaired.match(/\[/g) || []).length; + const closeBrackets = (repaired.match(/\]/g) || []).length; + if (openBrackets > closeBrackets) { + repaired += "]".repeat(openBrackets - closeBrackets); + } else if (closeBrackets > openBrackets) { + repaired = "[".repeat(closeBrackets - openBrackets) + repaired; + } + // Fix trailing commas in objects and arrays (AFTER fixing brackets/braces) + repaired = repaired.replace(/,(\s*[}\]])/g, "$1"); + return repaired; + } + /** + * Attempts to parse JSON with repair fallback + * @param {string} jsonStr - The JSON string to parse + * @returns {Object|undefined} The parsed JSON object, or undefined if parsing fails + */ + function parseJsonWithRepair(jsonStr) { + try { + // First, try normal JSON.parse + return JSON.parse(jsonStr); + } catch (originalError) { + try { + // If that fails, try repairing and parsing again + const repairedJson = repairJson(jsonStr); + return JSON.parse(repairedJson); + } catch (repairError) { + // If repair also fails, print error to console and return undefined + console.log( + `JSON parsing failed. Original: ${originalError.message}. After repair: ${repairError.message}` + ); + return undefined; + } } } const outputFile = process.env.GITHUB_AW_SAFE_OUTPUTS; const safeOutputsConfig = process.env.GITHUB_AW_SAFE_OUTPUTS_CONFIG; if (!outputFile) { - console.log('GITHUB_AW_SAFE_OUTPUTS not set, no output to collect'); - core.setOutput('output', ''); + console.log("GITHUB_AW_SAFE_OUTPUTS not set, no output to collect"); + core.setOutput("output", ""); return; } if (!fs.existsSync(outputFile)) { - console.log('Output file does not exist:', outputFile); - core.setOutput('output', ''); + console.log("Output file does not exist:", outputFile); + core.setOutput("output", ""); return; } - const outputContent = fs.readFileSync(outputFile, 'utf8'); - if (outputContent.trim() === '') { - console.log('Output file is empty'); - core.setOutput('output', ''); + const outputContent = fs.readFileSync(outputFile, "utf8"); + if (outputContent.trim() === "") { + console.log("Output file is empty"); + core.setOutput("output", ""); return; } - console.log('Raw output content length:', outputContent.length); + console.log("Raw output content length:", outputContent.length); // Parse the safe-outputs configuration let expectedOutputTypes = {}; if (safeOutputsConfig) { try { expectedOutputTypes = JSON.parse(safeOutputsConfig); - console.log('Expected output types:', Object.keys(expectedOutputTypes)); + console.log("Expected output types:", Object.keys(expectedOutputTypes)); } catch (error) { - console.log('Warning: Could not parse safe-outputs config:', error.message); + console.log( + "Warning: Could not parse safe-outputs config:", + error.message + ); } } // Parse JSONL content - const lines = outputContent.trim().split('\n'); + const lines = outputContent.trim().split("\n"); const parsedItems = []; const errors = []; for (let i = 0; i < lines.length; i++) { const line = lines[i].trim(); - if (line === '') continue; // Skip empty lines + if (line === "") continue; // Skip empty lines try { - const item = JSON.parse(line); + const item = parseJsonWithRepair(line); + // If item is undefined (failed to parse), add error and process next line + if (item === undefined) { + errors.push(`Line ${i + 1}: Invalid JSON - JSON parsing failed`); + continue; + } // Validate that the item has a 'type' field if (!item.type) { errors.push(`Line ${i + 1}: Missing required 'type' field`); @@ -743,25 +882,35 @@ jobs: // Validate against expected output types const itemType = item.type; if (!expectedOutputTypes[itemType]) { - errors.push(`Line ${i + 1}: Unexpected output type '${itemType}'. Expected one of: ${Object.keys(expectedOutputTypes).join(', ')}`); + errors.push( + `Line ${i + 1}: Unexpected output type '${itemType}'. Expected one of: ${Object.keys(expectedOutputTypes).join(", ")}` + ); continue; } // Check for too many items of the same type - const typeCount = parsedItems.filter(existing => existing.type === itemType).length; + const typeCount = parsedItems.filter( + existing => existing.type === itemType + ).length; const maxAllowed = getMaxAllowedForType(itemType, expectedOutputTypes); if (typeCount >= maxAllowed) { - errors.push(`Line ${i + 1}: Too many items of type '${itemType}'. Maximum allowed: ${maxAllowed}.`); + errors.push( + `Line ${i + 1}: Too many items of type '${itemType}'. Maximum allowed: ${maxAllowed}.` + ); continue; } // Basic validation based on type switch (itemType) { - case 'create-issue': - if (!item.title || typeof item.title !== 'string') { - errors.push(`Line ${i + 1}: create-issue requires a 'title' string field`); + case "create-issue": + if (!item.title || typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: create-issue requires a 'title' string field` + ); continue; } - if (!item.body || typeof item.body !== 'string') { - errors.push(`Line ${i + 1}: create-issue requires a 'body' string field`); + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-issue requires a 'body' string field` + ); continue; } // Sanitize text content @@ -769,107 +918,247 @@ jobs: item.body = sanitizeContent(item.body); // Sanitize labels if present if (item.labels && Array.isArray(item.labels)) { - item.labels = item.labels.map(label => typeof label === 'string' ? sanitizeContent(label) : label); + item.labels = item.labels.map(label => + typeof label === "string" ? sanitizeContent(label) : label + ); } break; - case 'add-issue-comment': - if (!item.body || typeof item.body !== 'string') { - errors.push(`Line ${i + 1}: add-issue-comment requires a 'body' string field`); + case "add-issue-comment": + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: add-issue-comment requires a 'body' string field` + ); continue; } // Sanitize text content item.body = sanitizeContent(item.body); break; - case 'create-pull-request': - if (!item.title || typeof item.title !== 'string') { - errors.push(`Line ${i + 1}: create-pull-request requires a 'title' string field`); + case "create-pull-request": + if (!item.title || typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request requires a 'title' string field` + ); continue; } - if (!item.body || typeof item.body !== 'string') { - errors.push(`Line ${i + 1}: create-pull-request requires a 'body' string field`); + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request requires a 'body' string field` + ); continue; } // Sanitize text content item.title = sanitizeContent(item.title); item.body = sanitizeContent(item.body); // Sanitize branch name if present - if (item.branch && typeof item.branch === 'string') { + if (item.branch && typeof item.branch === "string") { item.branch = sanitizeContent(item.branch); } // Sanitize labels if present if (item.labels && Array.isArray(item.labels)) { - item.labels = item.labels.map(label => typeof label === 'string' ? sanitizeContent(label) : label); + item.labels = item.labels.map(label => + typeof label === "string" ? sanitizeContent(label) : label + ); } break; - case 'add-issue-label': + case "add-issue-label": if (!item.labels || !Array.isArray(item.labels)) { - errors.push(`Line ${i + 1}: add-issue-label requires a 'labels' array field`); + errors.push( + `Line ${i + 1}: add-issue-label requires a 'labels' array field` + ); continue; } - if (item.labels.some(label => typeof label !== 'string')) { - errors.push(`Line ${i + 1}: add-issue-label labels array must contain only strings`); + if (item.labels.some(label => typeof label !== "string")) { + errors.push( + `Line ${i + 1}: add-issue-label labels array must contain only strings` + ); continue; } // Sanitize label strings item.labels = item.labels.map(label => sanitizeContent(label)); break; - case 'update-issue': + case "update-issue": // Check that at least one updateable field is provided - const hasValidField = (item.status !== undefined) || - (item.title !== undefined) || - (item.body !== undefined); + const hasValidField = + item.status !== undefined || + item.title !== undefined || + item.body !== undefined; if (!hasValidField) { - errors.push(`Line ${i + 1}: update-issue requires at least one of: 'status', 'title', or 'body' fields`); + errors.push( + `Line ${i + 1}: update-issue requires at least one of: 'status', 'title', or 'body' fields` + ); continue; } // Validate status if provided if (item.status !== undefined) { - if (typeof item.status !== 'string' || (item.status !== 'open' && item.status !== 'closed')) { - errors.push(`Line ${i + 1}: update-issue 'status' must be 'open' or 'closed'`); + if ( + typeof item.status !== "string" || + (item.status !== "open" && item.status !== "closed") + ) { + errors.push( + `Line ${i + 1}: update-issue 'status' must be 'open' or 'closed'` + ); continue; } } // Validate title if provided if (item.title !== undefined) { - if (typeof item.title !== 'string') { - errors.push(`Line ${i + 1}: update-issue 'title' must be a string`); + if (typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: update-issue 'title' must be a string` + ); continue; } item.title = sanitizeContent(item.title); } // Validate body if provided if (item.body !== undefined) { - if (typeof item.body !== 'string') { - errors.push(`Line ${i + 1}: update-issue 'body' must be a string`); + if (typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: update-issue 'body' must be a string` + ); continue; } item.body = sanitizeContent(item.body); } // Validate issue_number if provided (for target "*") if (item.issue_number !== undefined) { - if (typeof item.issue_number !== 'number' && typeof item.issue_number !== 'string') { - errors.push(`Line ${i + 1}: update-issue 'issue_number' must be a number or string`); + if ( + typeof item.issue_number !== "number" && + typeof item.issue_number !== "string" + ) { + errors.push( + `Line ${i + 1}: update-issue 'issue_number' must be a number or string` + ); continue; } } break; - case 'push-to-branch': + case "push-to-branch": // Validate message if provided (optional) if (item.message !== undefined) { - if (typeof item.message !== 'string') { - errors.push(`Line ${i + 1}: push-to-branch 'message' must be a string`); + if (typeof item.message !== "string") { + errors.push( + `Line ${i + 1}: push-to-branch 'message' must be a string` + ); continue; } item.message = sanitizeContent(item.message); } // Validate pull_request_number if provided (for target "*") if (item.pull_request_number !== undefined) { - if (typeof item.pull_request_number !== 'number' && typeof item.pull_request_number !== 'string') { - errors.push(`Line ${i + 1}: push-to-branch 'pull_request_number' must be a number or string`); + if ( + typeof item.pull_request_number !== "number" && + typeof item.pull_request_number !== "string" + ) { + errors.push( + `Line ${i + 1}: push-to-branch 'pull_request_number' must be a number or string` + ); + continue; + } + } + break; + case "create-pull-request-review-comment": + // Validate required path field + if (!item.path || typeof item.path !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment requires a 'path' string field` + ); + continue; + } + // Validate required line field + if ( + item.line === undefined || + (typeof item.line !== "number" && typeof item.line !== "string") + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment requires a 'line' number or string field` + ); + continue; + } + // Validate line is a positive integer + const lineNumber = + typeof item.line === "string" ? parseInt(item.line, 10) : item.line; + if ( + isNaN(lineNumber) || + lineNumber <= 0 || + !Number.isInteger(lineNumber) + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'line' must be a positive integer` + ); + continue; + } + // Validate required body field + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment requires a 'body' string field` + ); + continue; + } + // Sanitize required text content + item.body = sanitizeContent(item.body); + // Validate optional start_line field + if (item.start_line !== undefined) { + if ( + typeof item.start_line !== "number" && + typeof item.start_line !== "string" + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'start_line' must be a number or string` + ); + continue; + } + const startLineNumber = + typeof item.start_line === "string" + ? parseInt(item.start_line, 10) + : item.start_line; + if ( + isNaN(startLineNumber) || + startLineNumber <= 0 || + !Number.isInteger(startLineNumber) + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'start_line' must be a positive integer` + ); + continue; + } + if (startLineNumber > lineNumber) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'start_line' must be less than or equal to 'line'` + ); + continue; + } + } + // Validate optional side field + if (item.side !== undefined) { + if ( + typeof item.side !== "string" || + (item.side !== "LEFT" && item.side !== "RIGHT") + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'side' must be 'LEFT' or 'RIGHT'` + ); continue; } } break; + case "create-discussion": + if (!item.title || typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: create-discussion requires a 'title' string field` + ); + continue; + } + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-discussion requires a 'body' string field` + ); + continue; + } + // Sanitize text content + item.title = sanitizeContent(item.title); + item.body = sanitizeContent(item.body); + break; default: errors.push(`Line ${i + 1}: Unknown output type '${itemType}'`); continue; @@ -882,7 +1171,7 @@ jobs: } // Report validation results if (errors.length > 0) { - console.log('Validation errors found:'); + console.log("Validation errors found:"); errors.forEach(error => console.log(` - ${error}`)); // For now, we'll continue with valid items but log the errors // In the future, we might want to fail the workflow for invalid items @@ -891,10 +1180,10 @@ jobs: // Set the parsed and validated items as output const validatedOutput = { items: parsedItems, - errors: errors + errors: errors, }; - core.setOutput('output', JSON.stringify(validatedOutput)); - core.setOutput('raw_output', outputContent); + core.setOutput("output", JSON.stringify(validatedOutput)); + core.setOutput("raw_output", outputContent); } // Call the main function await main(); @@ -936,24 +1225,24 @@ jobs: with: script: | function main() { - const fs = require('fs'); + const fs = require("fs"); try { // Get the log file path from environment const logFile = process.env.AGENT_LOG_FILE; if (!logFile) { - console.log('No agent log file specified'); + console.log("No agent log file specified"); return; } if (!fs.existsSync(logFile)) { console.log(`Log file not found: ${logFile}`); return; } - const logContent = fs.readFileSync(logFile, 'utf8'); + const logContent = fs.readFileSync(logFile, "utf8"); const markdown = parseClaudeLog(logContent); // Append to GitHub step summary core.summary.addRaw(markdown).write(); } catch (error) { - console.error('Error parsing Claude log:', error.message); + console.error("Error parsing Claude log:", error.message); core.setFailed(error.message); } } @@ -961,16 +1250,16 @@ jobs: try { const logEntries = JSON.parse(logContent); if (!Array.isArray(logEntries)) { - return '## Agent Log Summary\n\nLog format not recognized as Claude JSON array.\n'; + return "## Agent Log Summary\n\nLog format not recognized as Claude JSON array.\n"; } - let markdown = '## 🤖 Commands and Tools\n\n'; + let markdown = "## 🤖 Commands and Tools\n\n"; const toolUsePairs = new Map(); // Map tool_use_id to tool_result const commandSummary = []; // For the succinct summary // First pass: collect tool results by tool_use_id for (const entry of logEntries) { - if (entry.type === 'user' && entry.message?.content) { + if (entry.type === "user" && entry.message?.content) { for (const content of entry.message.content) { - if (content.type === 'tool_result' && content.tool_use_id) { + if (content.type === "tool_result" && content.tool_use_id) { toolUsePairs.set(content.tool_use_id, content); } } @@ -978,26 +1267,37 @@ jobs: } // Collect all tool uses for summary for (const entry of logEntries) { - if (entry.type === 'assistant' && entry.message?.content) { + if (entry.type === "assistant" && entry.message?.content) { for (const content of entry.message.content) { - if (content.type === 'tool_use') { + if (content.type === "tool_use") { const toolName = content.name; const input = content.input || {}; // Skip internal tools - only show external commands and API calls - if (['Read', 'Write', 'Edit', 'MultiEdit', 'LS', 'Grep', 'Glob', 'TodoWrite'].includes(toolName)) { + if ( + [ + "Read", + "Write", + "Edit", + "MultiEdit", + "LS", + "Grep", + "Glob", + "TodoWrite", + ].includes(toolName) + ) { continue; // Skip internal file operations and searches } // Find the corresponding tool result to get status const toolResult = toolUsePairs.get(content.id); - let statusIcon = '❓'; + let statusIcon = "❓"; if (toolResult) { - statusIcon = toolResult.is_error === true ? '❌' : '✅'; + statusIcon = toolResult.is_error === true ? "❌" : "✅"; } // Add to command summary (only external tools) - if (toolName === 'Bash') { - const formattedCommand = formatBashCommand(input.command || ''); + if (toolName === "Bash") { + const formattedCommand = formatBashCommand(input.command || ""); commandSummary.push(`* ${statusIcon} \`${formattedCommand}\``); - } else if (toolName.startsWith('mcp__')) { + } else if (toolName.startsWith("mcp__")) { const mcpName = formatMcpName(toolName); commandSummary.push(`* ${statusIcon} \`${mcpName}(...)\``); } else { @@ -1014,13 +1314,19 @@ jobs: markdown += `${cmd}\n`; } } else { - markdown += 'No commands or tools used.\n'; + markdown += "No commands or tools used.\n"; } // Add Information section from the last entry with result metadata - markdown += '\n## 📊 Information\n\n'; + markdown += "\n## 📊 Information\n\n"; // Find the last entry with metadata const lastEntry = logEntries[logEntries.length - 1]; - if (lastEntry && (lastEntry.num_turns || lastEntry.duration_ms || lastEntry.total_cost_usd || lastEntry.usage)) { + if ( + lastEntry && + (lastEntry.num_turns || + lastEntry.duration_ms || + lastEntry.total_cost_usd || + lastEntry.usage) + ) { if (lastEntry.num_turns) { markdown += `**Turns:** ${lastEntry.num_turns}\n\n`; } @@ -1037,29 +1343,36 @@ jobs: const usage = lastEntry.usage; if (usage.input_tokens || usage.output_tokens) { markdown += `**Token Usage:**\n`; - if (usage.input_tokens) markdown += `- Input: ${usage.input_tokens.toLocaleString()}\n`; - if (usage.cache_creation_input_tokens) markdown += `- Cache Creation: ${usage.cache_creation_input_tokens.toLocaleString()}\n`; - if (usage.cache_read_input_tokens) markdown += `- Cache Read: ${usage.cache_read_input_tokens.toLocaleString()}\n`; - if (usage.output_tokens) markdown += `- Output: ${usage.output_tokens.toLocaleString()}\n`; - markdown += '\n'; + if (usage.input_tokens) + markdown += `- Input: ${usage.input_tokens.toLocaleString()}\n`; + if (usage.cache_creation_input_tokens) + markdown += `- Cache Creation: ${usage.cache_creation_input_tokens.toLocaleString()}\n`; + if (usage.cache_read_input_tokens) + markdown += `- Cache Read: ${usage.cache_read_input_tokens.toLocaleString()}\n`; + if (usage.output_tokens) + markdown += `- Output: ${usage.output_tokens.toLocaleString()}\n`; + markdown += "\n"; } } - if (lastEntry.permission_denials && lastEntry.permission_denials.length > 0) { + if ( + lastEntry.permission_denials && + lastEntry.permission_denials.length > 0 + ) { markdown += `**Permission Denials:** ${lastEntry.permission_denials.length}\n\n`; } } - markdown += '\n## 🤖 Reasoning\n\n'; + markdown += "\n## 🤖 Reasoning\n\n"; // Second pass: process assistant messages in sequence for (const entry of logEntries) { - if (entry.type === 'assistant' && entry.message?.content) { + if (entry.type === "assistant" && entry.message?.content) { for (const content of entry.message.content) { - if (content.type === 'text' && content.text) { + if (content.type === "text" && content.text) { // Add reasoning text directly (no header) const text = content.text.trim(); if (text && text.length > 0) { - markdown += text + '\n\n'; + markdown += text + "\n\n"; } - } else if (content.type === 'tool_use') { + } else if (content.type === "tool_use") { // Process tool use with its result const toolResult = toolUsePairs.get(content.id); const toolMarkdown = formatToolUse(content, toolResult); @@ -1079,22 +1392,22 @@ jobs: const toolName = toolUse.name; const input = toolUse.input || {}; // Skip TodoWrite except the very last one (we'll handle this separately) - if (toolName === 'TodoWrite') { - return ''; // Skip for now, would need global context to find the last one + if (toolName === "TodoWrite") { + return ""; // Skip for now, would need global context to find the last one } // Helper function to determine status icon function getStatusIcon() { if (toolResult) { - return toolResult.is_error === true ? '❌' : '✅'; + return toolResult.is_error === true ? "❌" : "✅"; } - return '❓'; // Unknown by default + return "❓"; // Unknown by default } - let markdown = ''; + let markdown = ""; const statusIcon = getStatusIcon(); switch (toolName) { - case 'Bash': - const command = input.command || ''; - const description = input.description || ''; + case "Bash": + const command = input.command || ""; + const description = input.description || ""; // Format the command to be single line const formattedCommand = formatBashCommand(command); if (description) { @@ -1102,31 +1415,40 @@ jobs: } markdown += `${statusIcon} \`${formattedCommand}\`\n\n`; break; - case 'Read': - const filePath = input.file_path || input.path || ''; - const relativePath = filePath.replace(/^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, ''); // Remove /home/runner/work/repo/repo/ prefix + case "Read": + const filePath = input.file_path || input.path || ""; + const relativePath = filePath.replace( + /^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, + "" + ); // Remove /home/runner/work/repo/repo/ prefix markdown += `${statusIcon} Read \`${relativePath}\`\n\n`; break; - case 'Write': - case 'Edit': - case 'MultiEdit': - const writeFilePath = input.file_path || input.path || ''; - const writeRelativePath = writeFilePath.replace(/^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, ''); + case "Write": + case "Edit": + case "MultiEdit": + const writeFilePath = input.file_path || input.path || ""; + const writeRelativePath = writeFilePath.replace( + /^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, + "" + ); markdown += `${statusIcon} Write \`${writeRelativePath}\`\n\n`; break; - case 'Grep': - case 'Glob': - const query = input.query || input.pattern || ''; + case "Grep": + case "Glob": + const query = input.query || input.pattern || ""; markdown += `${statusIcon} Search for \`${truncateString(query, 80)}\`\n\n`; break; - case 'LS': - const lsPath = input.path || ''; - const lsRelativePath = lsPath.replace(/^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, ''); + case "LS": + const lsPath = input.path || ""; + const lsRelativePath = lsPath.replace( + /^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, + "" + ); markdown += `${statusIcon} LS: ${lsRelativePath || lsPath}\n\n`; break; default: // Handle MCP calls and other tools - if (toolName.startsWith('mcp__')) { + if (toolName.startsWith("mcp__")) { const mcpName = formatMcpName(toolName); const params = formatMcpParameters(input); markdown += `${statusIcon} ${mcpName}(${params})\n\n`; @@ -1135,8 +1457,11 @@ jobs: const keys = Object.keys(input); if (keys.length > 0) { // Try to find the most important parameter - const mainParam = keys.find(k => ['query', 'command', 'path', 'file_path', 'content'].includes(k)) || keys[0]; - const value = String(input[mainParam] || ''); + const mainParam = + keys.find(k => + ["query", "command", "path", "file_path", "content"].includes(k) + ) || keys[0]; + const value = String(input[mainParam] || ""); if (value) { markdown += `${statusIcon} ${toolName}: ${truncateString(value, 100)}\n\n`; } else { @@ -1151,11 +1476,11 @@ jobs: } function formatMcpName(toolName) { // Convert mcp__github__search_issues to github::search_issues - if (toolName.startsWith('mcp__')) { - const parts = toolName.split('__'); + if (toolName.startsWith("mcp__")) { + const parts = toolName.split("__"); if (parts.length >= 3) { const provider = parts[1]; // github, etc. - const method = parts.slice(2).join('_'); // search_issues, etc. + const method = parts.slice(2).join("_"); // search_issues, etc. return `${provider}::${method}`; } } @@ -1163,44 +1488,50 @@ jobs: } function formatMcpParameters(input) { const keys = Object.keys(input); - if (keys.length === 0) return ''; + if (keys.length === 0) return ""; const paramStrs = []; - for (const key of keys.slice(0, 4)) { // Show up to 4 parameters - const value = String(input[key] || ''); + for (const key of keys.slice(0, 4)) { + // Show up to 4 parameters + const value = String(input[key] || ""); paramStrs.push(`${key}: ${truncateString(value, 40)}`); } if (keys.length > 4) { - paramStrs.push('...'); + paramStrs.push("..."); } - return paramStrs.join(', '); + return paramStrs.join(", "); } function formatBashCommand(command) { - if (!command) return ''; + if (!command) return ""; // Convert multi-line commands to single line by replacing newlines with spaces // and collapsing multiple spaces let formatted = command - .replace(/\n/g, ' ') // Replace newlines with spaces - .replace(/\r/g, ' ') // Replace carriage returns with spaces - .replace(/\t/g, ' ') // Replace tabs with spaces - .replace(/\s+/g, ' ') // Collapse multiple spaces into one - .trim(); // Remove leading/trailing whitespace + .replace(/\n/g, " ") // Replace newlines with spaces + .replace(/\r/g, " ") // Replace carriage returns with spaces + .replace(/\t/g, " ") // Replace tabs with spaces + .replace(/\s+/g, " ") // Collapse multiple spaces into one + .trim(); // Remove leading/trailing whitespace // Escape backticks to prevent markdown issues - formatted = formatted.replace(/`/g, '\\`'); + formatted = formatted.replace(/`/g, "\\`"); // Truncate if too long (keep reasonable length for summary) const maxLength = 80; if (formatted.length > maxLength) { - formatted = formatted.substring(0, maxLength) + '...'; + formatted = formatted.substring(0, maxLength) + "..."; } return formatted; } function truncateString(str, maxLength) { - if (!str) return ''; + if (!str) return ""; if (str.length <= maxLength) return str; - return str.substring(0, maxLength) + '...'; + return str.substring(0, maxLength) + "..."; } // Export for testing - if (typeof module !== 'undefined' && module.exports) { - module.exports = { parseClaudeLog, formatToolUse, formatBashCommand, truncateString }; + if (typeof module !== "undefined" && module.exports) { + module.exports = { + parseClaudeLog, + formatToolUse, + formatBashCommand, + truncateString, + }; } main(); - name: Upload agent logs @@ -1233,30 +1564,35 @@ jobs: // Read the validated output content from environment variable const outputContent = process.env.GITHUB_AW_AGENT_OUTPUT; if (!outputContent) { - console.log('No GITHUB_AW_AGENT_OUTPUT environment variable found'); + console.log("No GITHUB_AW_AGENT_OUTPUT environment variable found"); return; } - if (outputContent.trim() === '') { - console.log('Agent output content is empty'); + if (outputContent.trim() === "") { + console.log("Agent output content is empty"); return; } - console.log('Agent output content length:', outputContent.length); + console.log("Agent output content length:", outputContent.length); // Parse the validated output JSON let validatedOutput; try { validatedOutput = JSON.parse(outputContent); } catch (error) { - console.log('Error parsing agent output JSON:', error instanceof Error ? error.message : String(error)); + console.log( + "Error parsing agent output JSON:", + error instanceof Error ? error.message : String(error) + ); return; } if (!validatedOutput.items || !Array.isArray(validatedOutput.items)) { - console.log('No valid items found in agent output'); + console.log("No valid items found in agent output"); return; } // Find all create-issue items - const createIssueItems = validatedOutput.items.filter(/** @param {any} item */ item => item.type === 'create-issue'); + const createIssueItems = validatedOutput.items.filter( + /** @param {any} item */ item => item.type === "create-issue" + ); if (createIssueItems.length === 0) { - console.log('No create-issue items found in agent output'); + console.log("No create-issue items found in agent output"); return; } console.log(`Found ${createIssueItems.length} create-issue item(s)`); @@ -1264,23 +1600,31 @@ jobs: const parentIssueNumber = context.payload?.issue?.number; // Parse labels from environment variable (comma-separated string) const labelsEnv = process.env.GITHUB_AW_ISSUE_LABELS; - let envLabels = labelsEnv ? labelsEnv.split(',').map(/** @param {string} label */ label => label.trim()).filter(/** @param {string} label */ label => label) : []; + let envLabels = labelsEnv + ? labelsEnv + .split(",") + .map(/** @param {string} label */ label => label.trim()) + .filter(/** @param {string} label */ label => label) + : []; const createdIssues = []; // Process each create-issue item for (let i = 0; i < createIssueItems.length; i++) { const createIssueItem = createIssueItems[i]; - console.log(`Processing create-issue item ${i + 1}/${createIssueItems.length}:`, { title: createIssueItem.title, bodyLength: createIssueItem.body.length }); + console.log( + `Processing create-issue item ${i + 1}/${createIssueItems.length}:`, + { title: createIssueItem.title, bodyLength: createIssueItem.body.length } + ); // Merge environment labels with item-specific labels let labels = [...envLabels]; if (createIssueItem.labels && Array.isArray(createIssueItem.labels)) { labels = [...labels, ...createIssueItem.labels].filter(Boolean); } // Extract title and body from the JSON item - let title = createIssueItem.title ? createIssueItem.title.trim() : ''; - let bodyLines = createIssueItem.body.split('\n'); + let title = createIssueItem.title ? createIssueItem.title.trim() : ""; + let bodyLines = createIssueItem.body.split("\n"); // If no title was found, use the body content as title (or a default) if (!title) { - title = createIssueItem.body || 'Agent Output'; + title = createIssueItem.body || "Agent Output"; } // Apply title prefix if provided via environment variable const titlePrefix = process.env.GITHUB_AW_ISSUE_TITLE_PREFIX; @@ -1288,22 +1632,27 @@ jobs: title = titlePrefix + title; } if (parentIssueNumber) { - console.log('Detected issue context, parent issue #' + parentIssueNumber); + console.log("Detected issue context, parent issue #" + parentIssueNumber); // Add reference to parent issue in the child issue body bodyLines.push(`Related to #${parentIssueNumber}`); } // Add AI disclaimer with run id, run htmlurl // Add AI disclaimer with workflow run information const runId = context.runId; - const runUrl = context.payload.repository + const runUrl = context.payload.repository ? `${context.payload.repository.html_url}/actions/runs/${runId}` - : `https://github.com/actions/runs/${runId}`; - bodyLines.push(``, ``, `> Generated by Agentic Workflow Run [${runId}](${runUrl})`, ''); + : `https://github.com/actions/runs/${runId}`; + bodyLines.push( + ``, + ``, + `> Generated by Agentic Workflow Run [${runId}](${runUrl})`, + "" + ); // Prepare the body content - const body = bodyLines.join('\n').trim(); - console.log('Creating issue with title:', title); - console.log('Labels:', labels); - console.log('Body length:', body.length); + const body = bodyLines.join("\n").trim(); + console.log("Creating issue with title:", title); + console.log("Labels:", labels); + console.log("Body length:", body.length); try { // Create the issue using GitHub API const { data: issue } = await github.rest.issues.create({ @@ -1311,9 +1660,9 @@ jobs: repo: context.repo.repo, title: title, body: body, - labels: labels + labels: labels, }); - console.log('Created issue #' + issue.number + ': ' + issue.html_url); + console.log("Created issue #" + issue.number + ": " + issue.html_url); createdIssues.push(issue); // If we have a parent issue, add a comment to it referencing the new child issue if (parentIssueNumber) { @@ -1322,26 +1671,32 @@ jobs: owner: context.repo.owner, repo: context.repo.repo, issue_number: parentIssueNumber, - body: `Created related issue: #${issue.number}` + body: `Created related issue: #${issue.number}`, }); - console.log('Added comment to parent issue #' + parentIssueNumber); + console.log("Added comment to parent issue #" + parentIssueNumber); } catch (error) { - console.log('Warning: Could not add comment to parent issue:', error instanceof Error ? error.message : String(error)); + console.log( + "Warning: Could not add comment to parent issue:", + error instanceof Error ? error.message : String(error) + ); } } // Set output for the last created issue (for backward compatibility) if (i === createIssueItems.length - 1) { - core.setOutput('issue_number', issue.number); - core.setOutput('issue_url', issue.html_url); + core.setOutput("issue_number", issue.number); + core.setOutput("issue_url", issue.html_url); } } catch (error) { - console.error(`✗ Failed to create issue "${title}":`, error instanceof Error ? error.message : String(error)); + console.error( + `✗ Failed to create issue "${title}":`, + error instanceof Error ? error.message : String(error) + ); throw error; } } // Write summary for all created issues if (createdIssues.length > 0) { - let summaryContent = '\n\n## GitHub Issues\n'; + let summaryContent = "\n\n## GitHub Issues\n"; for (const issue of createdIssues) { summaryContent += `- Issue #${issue.number}: [${issue.title}](${issue.html_url})\n`; } diff --git a/.github/workflows/test-claude-push-to-branch.lock.yml b/.github/workflows/test-claude-push-to-branch.lock.yml index ac9cb6e1a6..d402c6c69a 100644 --- a/.github/workflows/test-claude-push-to-branch.lock.yml +++ b/.github/workflows/test-claude-push-to-branch.lock.yml @@ -36,24 +36,28 @@ jobs: const { owner, repo } = context.repo; // Check if the actor has repository access (admin, maintain permissions) try { - console.log(`Checking if user '${actor}' is admin or maintainer of ${owner}/${repo}`); - const repoPermission = await github.rest.repos.getCollaboratorPermissionLevel({ - owner: owner, - repo: repo, - username: actor - }); + console.log( + `Checking if user '${actor}' is admin or maintainer of ${owner}/${repo}` + ); + const repoPermission = + await github.rest.repos.getCollaboratorPermissionLevel({ + owner: owner, + repo: repo, + username: actor, + }); const permission = repoPermission.data.permission; console.log(`Repository permission level: ${permission}`); - if (permission === 'admin' || permission === 'maintain') { + if (permission === "admin" || permission === "maintain") { console.log(`User has ${permission} access to repository`); - core.setOutput('is_team_member', 'true'); + core.setOutput("is_team_member", "true"); return; } } catch (repoError) { - const errorMessage = repoError instanceof Error ? repoError.message : String(repoError); + const errorMessage = + repoError instanceof Error ? repoError.message : String(repoError); console.log(`Repository permission check failed: ${errorMessage}`); } - core.setOutput('is_team_member', 'false'); + core.setOutput("is_team_member", "false"); } await main(); - name: Validate team membership @@ -185,23 +189,23 @@ jobs: with: script: | function main() { - const fs = require('fs'); - const crypto = require('crypto'); + const fs = require("fs"); + const crypto = require("crypto"); // Generate a random filename for the output file - const randomId = crypto.randomBytes(8).toString('hex'); + const randomId = crypto.randomBytes(8).toString("hex"); const outputFile = `/tmp/aw_output_${randomId}.txt`; // Ensure the /tmp directory exists and create empty output file - fs.mkdirSync('/tmp', { recursive: true }); - fs.writeFileSync(outputFile, '', { mode: 0o644 }); + fs.mkdirSync("/tmp", { recursive: true }); + fs.writeFileSync(outputFile, "", { mode: 0o644 }); // Verify the file was created and is writable if (!fs.existsSync(outputFile)) { throw new Error(`Failed to create output file: ${outputFile}`); } // Set the environment variable for subsequent steps - core.exportVariable('GITHUB_AW_SAFE_OUTPUTS', outputFile); - console.log('Created agentic output file:', outputFile); + core.exportVariable("GITHUB_AW_SAFE_OUTPUTS", outputFile); + console.log("Created agentic output file:", outputFile); // Also set as step output for reference - core.setOutput('output_file', outputFile); + core.setOutput("output_file", outputFile); } main(); - name: Setup MCPs @@ -476,34 +480,37 @@ jobs: * @returns {string} The sanitized content */ function sanitizeContent(content) { - if (!content || typeof content !== 'string') { - return ''; + if (!content || typeof content !== "string") { + return ""; } // Read allowed domains from environment variable const allowedDomainsEnv = process.env.GITHUB_AW_ALLOWED_DOMAINS; const defaultAllowedDomains = [ - 'github.com', - 'github.io', - 'githubusercontent.com', - 'githubassets.com', - 'github.dev', - 'codespaces.new' + "github.com", + "github.io", + "githubusercontent.com", + "githubassets.com", + "github.dev", + "codespaces.new", ]; const allowedDomains = allowedDomainsEnv - ? allowedDomainsEnv.split(',').map(d => d.trim()).filter(d => d) + ? allowedDomainsEnv + .split(",") + .map(d => d.trim()) + .filter(d => d) : defaultAllowedDomains; let sanitized = content; // Neutralize @mentions to prevent unintended notifications sanitized = neutralizeMentions(sanitized); // Remove control characters (except newlines and tabs) - sanitized = sanitized.replace(/[\x00-\x08\x0B\x0C\x0E-\x1F\x7F]/g, ''); + sanitized = sanitized.replace(/[\x00-\x08\x0B\x0C\x0E-\x1F\x7F]/g, ""); // XML character escaping sanitized = sanitized - .replace(/&/g, '&') // Must be first to avoid double-escaping - .replace(//g, '>') - .replace(/"/g, '"') - .replace(/'/g, '''); + .replace(/&/g, "&") // Must be first to avoid double-escaping + .replace(//g, ">") + .replace(/"/g, """) + .replace(/'/g, "'"); // URI filtering - replace non-https protocols with "(redacted)" sanitized = sanitizeUrlProtocols(sanitized); // Domain filtering for HTTPS URIs @@ -511,16 +518,20 @@ jobs: // Limit total length to prevent DoS (0.5MB max) const maxLength = 524288; if (sanitized.length > maxLength) { - sanitized = sanitized.substring(0, maxLength) + '\n[Content truncated due to length]'; + sanitized = + sanitized.substring(0, maxLength) + + "\n[Content truncated due to length]"; } // Limit number of lines to prevent log flooding (65k max) - const lines = sanitized.split('\n'); + const lines = sanitized.split("\n"); const maxLines = 65000; if (lines.length > maxLines) { - sanitized = lines.slice(0, maxLines).join('\n') + '\n[Content truncated due to line count]'; + sanitized = + lines.slice(0, maxLines).join("\n") + + "\n[Content truncated due to line count]"; } // Remove ANSI escape sequences - sanitized = sanitized.replace(/\x1b\[[0-9;]*[mGKH]/g, ''); + sanitized = sanitized.replace(/\x1b\[[0-9;]*[mGKH]/g, ""); // Neutralize common bot trigger phrases sanitized = neutralizeBotTriggers(sanitized); // Trim excessive whitespace @@ -531,16 +542,22 @@ jobs: * @returns {string} The string with unknown domains redacted */ function sanitizeUrlDomains(s) { - return s.replace(/\bhttps:\/\/([^\/\s\])}'"<>&\x00-\x1f]+)/gi, (match, domain) => { - // Extract the hostname part (before first slash, colon, or other delimiter) - const hostname = domain.split(/[\/:\?#]/)[0].toLowerCase(); - // Check if this domain or any parent domain is in the allowlist - const isAllowed = allowedDomains.some(allowedDomain => { - const normalizedAllowed = allowedDomain.toLowerCase(); - return hostname === normalizedAllowed || hostname.endsWith('.' + normalizedAllowed); - }); - return isAllowed ? match : '(redacted)'; - }); + return s.replace( + /\bhttps:\/\/([^\/\s\])}'"<>&\x00-\x1f]+)/gi, + (match, domain) => { + // Extract the hostname part (before first slash, colon, or other delimiter) + const hostname = domain.split(/[\/:\?#]/)[0].toLowerCase(); + // Check if this domain or any parent domain is in the allowlist + const isAllowed = allowedDomains.some(allowedDomain => { + const normalizedAllowed = allowedDomain.toLowerCase(); + return ( + hostname === normalizedAllowed || + hostname.endsWith("." + normalizedAllowed) + ); + }); + return isAllowed ? match : "(redacted)"; + } + ); } /** * Remove unknown protocols except https @@ -549,10 +566,13 @@ jobs: */ function sanitizeUrlProtocols(s) { // Match both protocol:// and protocol: patterns - return s.replace(/\b(\w+):(?:\/\/)?[^\s\])}'"<>&\x00-\x1f]+/gi, (match, protocol) => { - // Allow https (case insensitive), redact everything else - return protocol.toLowerCase() === 'https' ? match : '(redacted)'; - }); + return s.replace( + /\b(\w+):(?:\/\/)?[^\s\])}'"<>&\x00-\x1f]+/gi, + (match, protocol) => { + // Allow https (case insensitive), redact everything else + return protocol.toLowerCase() === "https" ? match : "(redacted)"; + } + ); } /** * Neutralizes @mentions by wrapping them in backticks @@ -561,8 +581,10 @@ jobs: */ function neutralizeMentions(s) { // Replace @name or @org/team outside code with `@name` - return s.replace(/(^|[^\w`])@([A-Za-z0-9](?:[A-Za-z0-9-]{0,37}[A-Za-z0-9])?(?:\/[A-Za-z0-9._-]+)?)/g, - (_m, p1, p2) => `${p1}\`@${p2}\``); + return s.replace( + /(^|[^\w`])@([A-Za-z0-9](?:[A-Za-z0-9-]{0,37}[A-Za-z0-9])?(?:\/[A-Za-z0-9._-]+)?)/g, + (_m, p1, p2) => `${p1}\`@${p2}\`` + ); } /** * Neutralizes bot trigger phrases by wrapping them in backticks @@ -571,8 +593,10 @@ jobs: */ function neutralizeBotTriggers(s) { // Neutralize common bot trigger phrases like "fixes #123", "closes #asdfs", etc. - return s.replace(/\b(fixes?|closes?|resolves?|fix|close|resolve)\s+#(\w+)/gi, - (match, action, ref) => `\`${action} #${ref}\``); + return s.replace( + /\b(fixes?|closes?|resolves?|fix|close|resolve)\s+#(\w+)/gi, + (match, action, ref) => `\`${action} #${ref}\`` + ); } } /** @@ -583,65 +607,165 @@ jobs: */ function getMaxAllowedForType(itemType, config) { // Check if max is explicitly specified in config - if (config && config[itemType] && typeof config[itemType] === 'object' && config[itemType].max) { + if ( + config && + config[itemType] && + typeof config[itemType] === "object" && + config[itemType].max + ) { return config[itemType].max; } // Use default limits for plural-supported types switch (itemType) { - case 'create-issue': + case "create-issue": return 1; // Only one issue allowed - case 'add-issue-comment': + case "add-issue-comment": return 1; // Only one comment allowed - case 'create-pull-request': - return 1; // Only one pull request allowed - case 'add-issue-label': - return 5; // Only one labels operation allowed - case 'update-issue': - return 1; // Only one issue update allowed - case 'push-to-branch': - return 1; // Only one push to branch allowed + case "create-pull-request": + return 1; // Only one pull request allowed + case "create-pull-request-review-comment": + return 10; // Default to 10 review comments allowed + case "add-issue-label": + return 5; // Only one labels operation allowed + case "update-issue": + return 1; // Only one issue update allowed + case "push-to-branch": + return 1; // Only one push to branch allowed + case "create-discussion": + return 1; // Only one discussion allowed default: - return 1; // Default to single item for unknown types + return 1; // Default to single item for unknown types + } + } + /** + * Attempts to repair common JSON syntax issues in LLM-generated content + * @param {string} jsonStr - The potentially malformed JSON string + * @returns {string} The repaired JSON string + */ + function repairJson(jsonStr) { + let repaired = jsonStr.trim(); + // Fix single quotes to double quotes (must be done first) + repaired = repaired.replace(/'/g, '"'); + // Fix missing quotes around object keys + repaired = repaired.replace( + /([{,]\s*)([a-zA-Z_$][a-zA-Z0-9_$]*)\s*:/g, + '$1"$2":' + ); + // Fix newlines and tabs inside strings by escaping them + repaired = repaired.replace(/"([^"\\]*)"/g, (match, content) => { + if ( + content.includes("\n") || + content.includes("\r") || + content.includes("\t") + ) { + const escaped = content + .replace(/\\/g, "\\\\") + .replace(/\n/g, "\\n") + .replace(/\r/g, "\\r") + .replace(/\t/g, "\\t"); + return `"${escaped}"`; + } + return match; + }); + // Fix unescaped quotes inside string values + repaired = repaired.replace( + /"([^"]*)"([^":,}\]]*)"([^"]*)"(\s*[,:}\]])/g, + (match, p1, p2, p3, p4) => `"${p1}\\"${p2}\\"${p3}"${p4}` + ); + // Fix wrong bracket/brace types - arrays should end with ] not } + repaired = repaired.replace( + /(\[\s*(?:"[^"]*"(?:\s*,\s*"[^"]*")*\s*),?)\s*}/g, + "$1]" + ); + // Fix missing closing braces/brackets + const openBraces = (repaired.match(/\{/g) || []).length; + const closeBraces = (repaired.match(/\}/g) || []).length; + if (openBraces > closeBraces) { + repaired += "}".repeat(openBraces - closeBraces); + } else if (closeBraces > openBraces) { + repaired = "{".repeat(closeBraces - openBraces) + repaired; + } + // Fix missing closing brackets for arrays + const openBrackets = (repaired.match(/\[/g) || []).length; + const closeBrackets = (repaired.match(/\]/g) || []).length; + if (openBrackets > closeBrackets) { + repaired += "]".repeat(openBrackets - closeBrackets); + } else if (closeBrackets > openBrackets) { + repaired = "[".repeat(closeBrackets - openBrackets) + repaired; + } + // Fix trailing commas in objects and arrays (AFTER fixing brackets/braces) + repaired = repaired.replace(/,(\s*[}\]])/g, "$1"); + return repaired; + } + /** + * Attempts to parse JSON with repair fallback + * @param {string} jsonStr - The JSON string to parse + * @returns {Object|undefined} The parsed JSON object, or undefined if parsing fails + */ + function parseJsonWithRepair(jsonStr) { + try { + // First, try normal JSON.parse + return JSON.parse(jsonStr); + } catch (originalError) { + try { + // If that fails, try repairing and parsing again + const repairedJson = repairJson(jsonStr); + return JSON.parse(repairedJson); + } catch (repairError) { + // If repair also fails, print error to console and return undefined + console.log( + `JSON parsing failed. Original: ${originalError.message}. After repair: ${repairError.message}` + ); + return undefined; + } } } const outputFile = process.env.GITHUB_AW_SAFE_OUTPUTS; const safeOutputsConfig = process.env.GITHUB_AW_SAFE_OUTPUTS_CONFIG; if (!outputFile) { - console.log('GITHUB_AW_SAFE_OUTPUTS not set, no output to collect'); - core.setOutput('output', ''); + console.log("GITHUB_AW_SAFE_OUTPUTS not set, no output to collect"); + core.setOutput("output", ""); return; } if (!fs.existsSync(outputFile)) { - console.log('Output file does not exist:', outputFile); - core.setOutput('output', ''); + console.log("Output file does not exist:", outputFile); + core.setOutput("output", ""); return; } - const outputContent = fs.readFileSync(outputFile, 'utf8'); - if (outputContent.trim() === '') { - console.log('Output file is empty'); - core.setOutput('output', ''); + const outputContent = fs.readFileSync(outputFile, "utf8"); + if (outputContent.trim() === "") { + console.log("Output file is empty"); + core.setOutput("output", ""); return; } - console.log('Raw output content length:', outputContent.length); + console.log("Raw output content length:", outputContent.length); // Parse the safe-outputs configuration let expectedOutputTypes = {}; if (safeOutputsConfig) { try { expectedOutputTypes = JSON.parse(safeOutputsConfig); - console.log('Expected output types:', Object.keys(expectedOutputTypes)); + console.log("Expected output types:", Object.keys(expectedOutputTypes)); } catch (error) { - console.log('Warning: Could not parse safe-outputs config:', error.message); + console.log( + "Warning: Could not parse safe-outputs config:", + error.message + ); } } // Parse JSONL content - const lines = outputContent.trim().split('\n'); + const lines = outputContent.trim().split("\n"); const parsedItems = []; const errors = []; for (let i = 0; i < lines.length; i++) { const line = lines[i].trim(); - if (line === '') continue; // Skip empty lines + if (line === "") continue; // Skip empty lines try { - const item = JSON.parse(line); + const item = parseJsonWithRepair(line); + // If item is undefined (failed to parse), add error and process next line + if (item === undefined) { + errors.push(`Line ${i + 1}: Invalid JSON - JSON parsing failed`); + continue; + } // Validate that the item has a 'type' field if (!item.type) { errors.push(`Line ${i + 1}: Missing required 'type' field`); @@ -650,25 +774,35 @@ jobs: // Validate against expected output types const itemType = item.type; if (!expectedOutputTypes[itemType]) { - errors.push(`Line ${i + 1}: Unexpected output type '${itemType}'. Expected one of: ${Object.keys(expectedOutputTypes).join(', ')}`); + errors.push( + `Line ${i + 1}: Unexpected output type '${itemType}'. Expected one of: ${Object.keys(expectedOutputTypes).join(", ")}` + ); continue; } // Check for too many items of the same type - const typeCount = parsedItems.filter(existing => existing.type === itemType).length; + const typeCount = parsedItems.filter( + existing => existing.type === itemType + ).length; const maxAllowed = getMaxAllowedForType(itemType, expectedOutputTypes); if (typeCount >= maxAllowed) { - errors.push(`Line ${i + 1}: Too many items of type '${itemType}'. Maximum allowed: ${maxAllowed}.`); + errors.push( + `Line ${i + 1}: Too many items of type '${itemType}'. Maximum allowed: ${maxAllowed}.` + ); continue; } // Basic validation based on type switch (itemType) { - case 'create-issue': - if (!item.title || typeof item.title !== 'string') { - errors.push(`Line ${i + 1}: create-issue requires a 'title' string field`); + case "create-issue": + if (!item.title || typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: create-issue requires a 'title' string field` + ); continue; } - if (!item.body || typeof item.body !== 'string') { - errors.push(`Line ${i + 1}: create-issue requires a 'body' string field`); + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-issue requires a 'body' string field` + ); continue; } // Sanitize text content @@ -676,107 +810,247 @@ jobs: item.body = sanitizeContent(item.body); // Sanitize labels if present if (item.labels && Array.isArray(item.labels)) { - item.labels = item.labels.map(label => typeof label === 'string' ? sanitizeContent(label) : label); + item.labels = item.labels.map(label => + typeof label === "string" ? sanitizeContent(label) : label + ); } break; - case 'add-issue-comment': - if (!item.body || typeof item.body !== 'string') { - errors.push(`Line ${i + 1}: add-issue-comment requires a 'body' string field`); + case "add-issue-comment": + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: add-issue-comment requires a 'body' string field` + ); continue; } // Sanitize text content item.body = sanitizeContent(item.body); break; - case 'create-pull-request': - if (!item.title || typeof item.title !== 'string') { - errors.push(`Line ${i + 1}: create-pull-request requires a 'title' string field`); + case "create-pull-request": + if (!item.title || typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request requires a 'title' string field` + ); continue; } - if (!item.body || typeof item.body !== 'string') { - errors.push(`Line ${i + 1}: create-pull-request requires a 'body' string field`); + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request requires a 'body' string field` + ); continue; } // Sanitize text content item.title = sanitizeContent(item.title); item.body = sanitizeContent(item.body); // Sanitize branch name if present - if (item.branch && typeof item.branch === 'string') { + if (item.branch && typeof item.branch === "string") { item.branch = sanitizeContent(item.branch); } // Sanitize labels if present if (item.labels && Array.isArray(item.labels)) { - item.labels = item.labels.map(label => typeof label === 'string' ? sanitizeContent(label) : label); + item.labels = item.labels.map(label => + typeof label === "string" ? sanitizeContent(label) : label + ); } break; - case 'add-issue-label': + case "add-issue-label": if (!item.labels || !Array.isArray(item.labels)) { - errors.push(`Line ${i + 1}: add-issue-label requires a 'labels' array field`); + errors.push( + `Line ${i + 1}: add-issue-label requires a 'labels' array field` + ); continue; } - if (item.labels.some(label => typeof label !== 'string')) { - errors.push(`Line ${i + 1}: add-issue-label labels array must contain only strings`); + if (item.labels.some(label => typeof label !== "string")) { + errors.push( + `Line ${i + 1}: add-issue-label labels array must contain only strings` + ); continue; } // Sanitize label strings item.labels = item.labels.map(label => sanitizeContent(label)); break; - case 'update-issue': + case "update-issue": // Check that at least one updateable field is provided - const hasValidField = (item.status !== undefined) || - (item.title !== undefined) || - (item.body !== undefined); + const hasValidField = + item.status !== undefined || + item.title !== undefined || + item.body !== undefined; if (!hasValidField) { - errors.push(`Line ${i + 1}: update-issue requires at least one of: 'status', 'title', or 'body' fields`); + errors.push( + `Line ${i + 1}: update-issue requires at least one of: 'status', 'title', or 'body' fields` + ); continue; } // Validate status if provided if (item.status !== undefined) { - if (typeof item.status !== 'string' || (item.status !== 'open' && item.status !== 'closed')) { - errors.push(`Line ${i + 1}: update-issue 'status' must be 'open' or 'closed'`); + if ( + typeof item.status !== "string" || + (item.status !== "open" && item.status !== "closed") + ) { + errors.push( + `Line ${i + 1}: update-issue 'status' must be 'open' or 'closed'` + ); continue; } } // Validate title if provided if (item.title !== undefined) { - if (typeof item.title !== 'string') { - errors.push(`Line ${i + 1}: update-issue 'title' must be a string`); + if (typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: update-issue 'title' must be a string` + ); continue; } item.title = sanitizeContent(item.title); } // Validate body if provided if (item.body !== undefined) { - if (typeof item.body !== 'string') { - errors.push(`Line ${i + 1}: update-issue 'body' must be a string`); + if (typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: update-issue 'body' must be a string` + ); continue; } item.body = sanitizeContent(item.body); } // Validate issue_number if provided (for target "*") if (item.issue_number !== undefined) { - if (typeof item.issue_number !== 'number' && typeof item.issue_number !== 'string') { - errors.push(`Line ${i + 1}: update-issue 'issue_number' must be a number or string`); + if ( + typeof item.issue_number !== "number" && + typeof item.issue_number !== "string" + ) { + errors.push( + `Line ${i + 1}: update-issue 'issue_number' must be a number or string` + ); continue; } } break; - case 'push-to-branch': + case "push-to-branch": // Validate message if provided (optional) if (item.message !== undefined) { - if (typeof item.message !== 'string') { - errors.push(`Line ${i + 1}: push-to-branch 'message' must be a string`); + if (typeof item.message !== "string") { + errors.push( + `Line ${i + 1}: push-to-branch 'message' must be a string` + ); continue; } item.message = sanitizeContent(item.message); } // Validate pull_request_number if provided (for target "*") if (item.pull_request_number !== undefined) { - if (typeof item.pull_request_number !== 'number' && typeof item.pull_request_number !== 'string') { - errors.push(`Line ${i + 1}: push-to-branch 'pull_request_number' must be a number or string`); + if ( + typeof item.pull_request_number !== "number" && + typeof item.pull_request_number !== "string" + ) { + errors.push( + `Line ${i + 1}: push-to-branch 'pull_request_number' must be a number or string` + ); + continue; + } + } + break; + case "create-pull-request-review-comment": + // Validate required path field + if (!item.path || typeof item.path !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment requires a 'path' string field` + ); + continue; + } + // Validate required line field + if ( + item.line === undefined || + (typeof item.line !== "number" && typeof item.line !== "string") + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment requires a 'line' number or string field` + ); + continue; + } + // Validate line is a positive integer + const lineNumber = + typeof item.line === "string" ? parseInt(item.line, 10) : item.line; + if ( + isNaN(lineNumber) || + lineNumber <= 0 || + !Number.isInteger(lineNumber) + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'line' must be a positive integer` + ); + continue; + } + // Validate required body field + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment requires a 'body' string field` + ); + continue; + } + // Sanitize required text content + item.body = sanitizeContent(item.body); + // Validate optional start_line field + if (item.start_line !== undefined) { + if ( + typeof item.start_line !== "number" && + typeof item.start_line !== "string" + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'start_line' must be a number or string` + ); + continue; + } + const startLineNumber = + typeof item.start_line === "string" + ? parseInt(item.start_line, 10) + : item.start_line; + if ( + isNaN(startLineNumber) || + startLineNumber <= 0 || + !Number.isInteger(startLineNumber) + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'start_line' must be a positive integer` + ); + continue; + } + if (startLineNumber > lineNumber) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'start_line' must be less than or equal to 'line'` + ); + continue; + } + } + // Validate optional side field + if (item.side !== undefined) { + if ( + typeof item.side !== "string" || + (item.side !== "LEFT" && item.side !== "RIGHT") + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'side' must be 'LEFT' or 'RIGHT'` + ); continue; } } break; + case "create-discussion": + if (!item.title || typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: create-discussion requires a 'title' string field` + ); + continue; + } + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-discussion requires a 'body' string field` + ); + continue; + } + // Sanitize text content + item.title = sanitizeContent(item.title); + item.body = sanitizeContent(item.body); + break; default: errors.push(`Line ${i + 1}: Unknown output type '${itemType}'`); continue; @@ -789,7 +1063,7 @@ jobs: } // Report validation results if (errors.length > 0) { - console.log('Validation errors found:'); + console.log("Validation errors found:"); errors.forEach(error => console.log(` - ${error}`)); // For now, we'll continue with valid items but log the errors // In the future, we might want to fail the workflow for invalid items @@ -798,10 +1072,10 @@ jobs: // Set the parsed and validated items as output const validatedOutput = { items: parsedItems, - errors: errors + errors: errors, }; - core.setOutput('output', JSON.stringify(validatedOutput)); - core.setOutput('raw_output', outputContent); + core.setOutput("output", JSON.stringify(validatedOutput)); + core.setOutput("raw_output", outputContent); } // Call the main function await main(); @@ -843,24 +1117,24 @@ jobs: with: script: | function main() { - const fs = require('fs'); + const fs = require("fs"); try { // Get the log file path from environment const logFile = process.env.AGENT_LOG_FILE; if (!logFile) { - console.log('No agent log file specified'); + console.log("No agent log file specified"); return; } if (!fs.existsSync(logFile)) { console.log(`Log file not found: ${logFile}`); return; } - const logContent = fs.readFileSync(logFile, 'utf8'); + const logContent = fs.readFileSync(logFile, "utf8"); const markdown = parseClaudeLog(logContent); // Append to GitHub step summary core.summary.addRaw(markdown).write(); } catch (error) { - console.error('Error parsing Claude log:', error.message); + console.error("Error parsing Claude log:", error.message); core.setFailed(error.message); } } @@ -868,16 +1142,16 @@ jobs: try { const logEntries = JSON.parse(logContent); if (!Array.isArray(logEntries)) { - return '## Agent Log Summary\n\nLog format not recognized as Claude JSON array.\n'; + return "## Agent Log Summary\n\nLog format not recognized as Claude JSON array.\n"; } - let markdown = '## 🤖 Commands and Tools\n\n'; + let markdown = "## 🤖 Commands and Tools\n\n"; const toolUsePairs = new Map(); // Map tool_use_id to tool_result const commandSummary = []; // For the succinct summary // First pass: collect tool results by tool_use_id for (const entry of logEntries) { - if (entry.type === 'user' && entry.message?.content) { + if (entry.type === "user" && entry.message?.content) { for (const content of entry.message.content) { - if (content.type === 'tool_result' && content.tool_use_id) { + if (content.type === "tool_result" && content.tool_use_id) { toolUsePairs.set(content.tool_use_id, content); } } @@ -885,26 +1159,37 @@ jobs: } // Collect all tool uses for summary for (const entry of logEntries) { - if (entry.type === 'assistant' && entry.message?.content) { + if (entry.type === "assistant" && entry.message?.content) { for (const content of entry.message.content) { - if (content.type === 'tool_use') { + if (content.type === "tool_use") { const toolName = content.name; const input = content.input || {}; // Skip internal tools - only show external commands and API calls - if (['Read', 'Write', 'Edit', 'MultiEdit', 'LS', 'Grep', 'Glob', 'TodoWrite'].includes(toolName)) { + if ( + [ + "Read", + "Write", + "Edit", + "MultiEdit", + "LS", + "Grep", + "Glob", + "TodoWrite", + ].includes(toolName) + ) { continue; // Skip internal file operations and searches } // Find the corresponding tool result to get status const toolResult = toolUsePairs.get(content.id); - let statusIcon = '❓'; + let statusIcon = "❓"; if (toolResult) { - statusIcon = toolResult.is_error === true ? '❌' : '✅'; + statusIcon = toolResult.is_error === true ? "❌" : "✅"; } // Add to command summary (only external tools) - if (toolName === 'Bash') { - const formattedCommand = formatBashCommand(input.command || ''); + if (toolName === "Bash") { + const formattedCommand = formatBashCommand(input.command || ""); commandSummary.push(`* ${statusIcon} \`${formattedCommand}\``); - } else if (toolName.startsWith('mcp__')) { + } else if (toolName.startsWith("mcp__")) { const mcpName = formatMcpName(toolName); commandSummary.push(`* ${statusIcon} \`${mcpName}(...)\``); } else { @@ -921,13 +1206,19 @@ jobs: markdown += `${cmd}\n`; } } else { - markdown += 'No commands or tools used.\n'; + markdown += "No commands or tools used.\n"; } // Add Information section from the last entry with result metadata - markdown += '\n## 📊 Information\n\n'; + markdown += "\n## 📊 Information\n\n"; // Find the last entry with metadata const lastEntry = logEntries[logEntries.length - 1]; - if (lastEntry && (lastEntry.num_turns || lastEntry.duration_ms || lastEntry.total_cost_usd || lastEntry.usage)) { + if ( + lastEntry && + (lastEntry.num_turns || + lastEntry.duration_ms || + lastEntry.total_cost_usd || + lastEntry.usage) + ) { if (lastEntry.num_turns) { markdown += `**Turns:** ${lastEntry.num_turns}\n\n`; } @@ -944,29 +1235,36 @@ jobs: const usage = lastEntry.usage; if (usage.input_tokens || usage.output_tokens) { markdown += `**Token Usage:**\n`; - if (usage.input_tokens) markdown += `- Input: ${usage.input_tokens.toLocaleString()}\n`; - if (usage.cache_creation_input_tokens) markdown += `- Cache Creation: ${usage.cache_creation_input_tokens.toLocaleString()}\n`; - if (usage.cache_read_input_tokens) markdown += `- Cache Read: ${usage.cache_read_input_tokens.toLocaleString()}\n`; - if (usage.output_tokens) markdown += `- Output: ${usage.output_tokens.toLocaleString()}\n`; - markdown += '\n'; + if (usage.input_tokens) + markdown += `- Input: ${usage.input_tokens.toLocaleString()}\n`; + if (usage.cache_creation_input_tokens) + markdown += `- Cache Creation: ${usage.cache_creation_input_tokens.toLocaleString()}\n`; + if (usage.cache_read_input_tokens) + markdown += `- Cache Read: ${usage.cache_read_input_tokens.toLocaleString()}\n`; + if (usage.output_tokens) + markdown += `- Output: ${usage.output_tokens.toLocaleString()}\n`; + markdown += "\n"; } } - if (lastEntry.permission_denials && lastEntry.permission_denials.length > 0) { + if ( + lastEntry.permission_denials && + lastEntry.permission_denials.length > 0 + ) { markdown += `**Permission Denials:** ${lastEntry.permission_denials.length}\n\n`; } } - markdown += '\n## 🤖 Reasoning\n\n'; + markdown += "\n## 🤖 Reasoning\n\n"; // Second pass: process assistant messages in sequence for (const entry of logEntries) { - if (entry.type === 'assistant' && entry.message?.content) { + if (entry.type === "assistant" && entry.message?.content) { for (const content of entry.message.content) { - if (content.type === 'text' && content.text) { + if (content.type === "text" && content.text) { // Add reasoning text directly (no header) const text = content.text.trim(); if (text && text.length > 0) { - markdown += text + '\n\n'; + markdown += text + "\n\n"; } - } else if (content.type === 'tool_use') { + } else if (content.type === "tool_use") { // Process tool use with its result const toolResult = toolUsePairs.get(content.id); const toolMarkdown = formatToolUse(content, toolResult); @@ -986,22 +1284,22 @@ jobs: const toolName = toolUse.name; const input = toolUse.input || {}; // Skip TodoWrite except the very last one (we'll handle this separately) - if (toolName === 'TodoWrite') { - return ''; // Skip for now, would need global context to find the last one + if (toolName === "TodoWrite") { + return ""; // Skip for now, would need global context to find the last one } // Helper function to determine status icon function getStatusIcon() { if (toolResult) { - return toolResult.is_error === true ? '❌' : '✅'; + return toolResult.is_error === true ? "❌" : "✅"; } - return '❓'; // Unknown by default + return "❓"; // Unknown by default } - let markdown = ''; + let markdown = ""; const statusIcon = getStatusIcon(); switch (toolName) { - case 'Bash': - const command = input.command || ''; - const description = input.description || ''; + case "Bash": + const command = input.command || ""; + const description = input.description || ""; // Format the command to be single line const formattedCommand = formatBashCommand(command); if (description) { @@ -1009,31 +1307,40 @@ jobs: } markdown += `${statusIcon} \`${formattedCommand}\`\n\n`; break; - case 'Read': - const filePath = input.file_path || input.path || ''; - const relativePath = filePath.replace(/^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, ''); // Remove /home/runner/work/repo/repo/ prefix + case "Read": + const filePath = input.file_path || input.path || ""; + const relativePath = filePath.replace( + /^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, + "" + ); // Remove /home/runner/work/repo/repo/ prefix markdown += `${statusIcon} Read \`${relativePath}\`\n\n`; break; - case 'Write': - case 'Edit': - case 'MultiEdit': - const writeFilePath = input.file_path || input.path || ''; - const writeRelativePath = writeFilePath.replace(/^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, ''); + case "Write": + case "Edit": + case "MultiEdit": + const writeFilePath = input.file_path || input.path || ""; + const writeRelativePath = writeFilePath.replace( + /^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, + "" + ); markdown += `${statusIcon} Write \`${writeRelativePath}\`\n\n`; break; - case 'Grep': - case 'Glob': - const query = input.query || input.pattern || ''; + case "Grep": + case "Glob": + const query = input.query || input.pattern || ""; markdown += `${statusIcon} Search for \`${truncateString(query, 80)}\`\n\n`; break; - case 'LS': - const lsPath = input.path || ''; - const lsRelativePath = lsPath.replace(/^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, ''); + case "LS": + const lsPath = input.path || ""; + const lsRelativePath = lsPath.replace( + /^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, + "" + ); markdown += `${statusIcon} LS: ${lsRelativePath || lsPath}\n\n`; break; default: // Handle MCP calls and other tools - if (toolName.startsWith('mcp__')) { + if (toolName.startsWith("mcp__")) { const mcpName = formatMcpName(toolName); const params = formatMcpParameters(input); markdown += `${statusIcon} ${mcpName}(${params})\n\n`; @@ -1042,8 +1349,11 @@ jobs: const keys = Object.keys(input); if (keys.length > 0) { // Try to find the most important parameter - const mainParam = keys.find(k => ['query', 'command', 'path', 'file_path', 'content'].includes(k)) || keys[0]; - const value = String(input[mainParam] || ''); + const mainParam = + keys.find(k => + ["query", "command", "path", "file_path", "content"].includes(k) + ) || keys[0]; + const value = String(input[mainParam] || ""); if (value) { markdown += `${statusIcon} ${toolName}: ${truncateString(value, 100)}\n\n`; } else { @@ -1058,11 +1368,11 @@ jobs: } function formatMcpName(toolName) { // Convert mcp__github__search_issues to github::search_issues - if (toolName.startsWith('mcp__')) { - const parts = toolName.split('__'); + if (toolName.startsWith("mcp__")) { + const parts = toolName.split("__"); if (parts.length >= 3) { const provider = parts[1]; // github, etc. - const method = parts.slice(2).join('_'); // search_issues, etc. + const method = parts.slice(2).join("_"); // search_issues, etc. return `${provider}::${method}`; } } @@ -1070,44 +1380,50 @@ jobs: } function formatMcpParameters(input) { const keys = Object.keys(input); - if (keys.length === 0) return ''; + if (keys.length === 0) return ""; const paramStrs = []; - for (const key of keys.slice(0, 4)) { // Show up to 4 parameters - const value = String(input[key] || ''); + for (const key of keys.slice(0, 4)) { + // Show up to 4 parameters + const value = String(input[key] || ""); paramStrs.push(`${key}: ${truncateString(value, 40)}`); } if (keys.length > 4) { - paramStrs.push('...'); + paramStrs.push("..."); } - return paramStrs.join(', '); + return paramStrs.join(", "); } function formatBashCommand(command) { - if (!command) return ''; + if (!command) return ""; // Convert multi-line commands to single line by replacing newlines with spaces // and collapsing multiple spaces let formatted = command - .replace(/\n/g, ' ') // Replace newlines with spaces - .replace(/\r/g, ' ') // Replace carriage returns with spaces - .replace(/\t/g, ' ') // Replace tabs with spaces - .replace(/\s+/g, ' ') // Collapse multiple spaces into one - .trim(); // Remove leading/trailing whitespace + .replace(/\n/g, " ") // Replace newlines with spaces + .replace(/\r/g, " ") // Replace carriage returns with spaces + .replace(/\t/g, " ") // Replace tabs with spaces + .replace(/\s+/g, " ") // Collapse multiple spaces into one + .trim(); // Remove leading/trailing whitespace // Escape backticks to prevent markdown issues - formatted = formatted.replace(/`/g, '\\`'); + formatted = formatted.replace(/`/g, "\\`"); // Truncate if too long (keep reasonable length for summary) const maxLength = 80; if (formatted.length > maxLength) { - formatted = formatted.substring(0, maxLength) + '...'; + formatted = formatted.substring(0, maxLength) + "..."; } return formatted; } function truncateString(str, maxLength) { - if (!str) return ''; + if (!str) return ""; if (str.length <= maxLength) return str; - return str.substring(0, maxLength) + '...'; + return str.substring(0, maxLength) + "..."; } // Export for testing - if (typeof module !== 'undefined' && module.exports) { - module.exports = { parseClaudeLog, formatToolUse, formatBashCommand, truncateString }; + if (typeof module !== "undefined" && module.exports) { + module.exports = { + parseClaudeLog, + formatToolUse, + formatBashCommand, + truncateString, + }; } main(); - name: Upload agent logs @@ -1276,118 +1592,143 @@ jobs: // Environment validation - fail early if required variables are missing const branchName = process.env.GITHUB_AW_PUSH_BRANCH; if (!branchName) { - core.setFailed('GITHUB_AW_PUSH_BRANCH environment variable is required'); + core.setFailed("GITHUB_AW_PUSH_BRANCH environment variable is required"); return; } const outputContent = process.env.GITHUB_AW_AGENT_OUTPUT || ""; - if (outputContent.trim() === '') { - console.log('Agent output content is empty'); + if (outputContent.trim() === "") { + console.log("Agent output content is empty"); return; } const target = process.env.GITHUB_AW_PUSH_TARGET || "triggering"; // Check if patch file exists and has valid content - if (!fs.existsSync('/tmp/aw.patch')) { - core.setFailed('No patch file found - cannot push without changes'); + if (!fs.existsSync("/tmp/aw.patch")) { + core.setFailed("No patch file found - cannot push without changes"); return; } - const patchContent = fs.readFileSync('/tmp/aw.patch', 'utf8'); - if (!patchContent || !patchContent.trim() || patchContent.includes('Failed to generate patch')) { - core.setFailed('Patch file is empty or contains error message - cannot push without changes'); + const patchContent = fs.readFileSync("/tmp/aw.patch", "utf8"); + if ( + !patchContent || + !patchContent.trim() || + patchContent.includes("Failed to generate patch") + ) { + core.setFailed( + "Patch file is empty or contains error message - cannot push without changes" + ); return; } - console.log('Agent output content length:', outputContent.length); - console.log('Patch content validation passed'); - console.log('Target branch:', branchName); - console.log('Target configuration:', target); + console.log("Agent output content length:", outputContent.length); + console.log("Patch content validation passed"); + console.log("Target branch:", branchName); + console.log("Target configuration:", target); // Parse the validated output JSON let validatedOutput; try { validatedOutput = JSON.parse(outputContent); } catch (error) { - console.log('Error parsing agent output JSON:', error instanceof Error ? error.message : String(error)); + console.log( + "Error parsing agent output JSON:", + error instanceof Error ? error.message : String(error) + ); return; } if (!validatedOutput.items || !Array.isArray(validatedOutput.items)) { - console.log('No valid items found in agent output'); + console.log("No valid items found in agent output"); return; } // Find the push-to-branch item - const pushItem = validatedOutput.items.find(/** @param {any} item */ item => item.type === 'push-to-branch'); + const pushItem = validatedOutput.items.find( + /** @param {any} item */ item => item.type === "push-to-branch" + ); if (!pushItem) { - console.log('No push-to-branch item found in agent output'); + console.log("No push-to-branch item found in agent output"); return; } - console.log('Found push-to-branch item'); + console.log("Found push-to-branch item"); // Validate target configuration for pull request context if (target !== "*" && target !== "triggering") { // If target is a specific number, validate it's a valid pull request number const targetNumber = parseInt(target, 10); if (isNaN(targetNumber)) { - core.setFailed('Invalid target configuration: must be "triggering", "*", or a valid pull request number'); + core.setFailed( + 'Invalid target configuration: must be "triggering", "*", or a valid pull request number' + ); return; } } // Check if we're in a pull request context when required if (target === "triggering" && !context.payload.pull_request) { - core.setFailed('push-to-branch with target "triggering" requires pull request context'); + core.setFailed( + 'push-to-branch with target "triggering" requires pull request context' + ); return; } // Configure git (required for commits) - execSync('git config --global user.email "action@github.com"', { stdio: 'inherit' }); - execSync('git config --global user.name "GitHub Action"', { stdio: 'inherit' }); + execSync('git config --global user.email "action@github.com"', { + stdio: "inherit", + }); + execSync('git config --global user.name "GitHub Action"', { + stdio: "inherit", + }); // Switch to or create the target branch - console.log('Switching to branch:', branchName); + console.log("Switching to branch:", branchName); try { // Try to checkout existing branch first - execSync('git fetch origin', { stdio: 'inherit' }); - execSync(`git checkout ${branchName}`, { stdio: 'inherit' }); - console.log('Checked out existing branch:', branchName); + execSync("git fetch origin", { stdio: "inherit" }); + execSync(`git checkout ${branchName}`, { stdio: "inherit" }); + console.log("Checked out existing branch:", branchName); } catch (error) { // Branch doesn't exist, create it - console.log('Branch does not exist, creating new branch:', branchName); - execSync(`git checkout -b ${branchName}`, { stdio: 'inherit' }); + console.log("Branch does not exist, creating new branch:", branchName); + execSync(`git checkout -b ${branchName}`, { stdio: "inherit" }); } // Apply the patch using git CLI - console.log('Applying patch...'); + console.log("Applying patch..."); try { - execSync('git apply /tmp/aw.patch', { stdio: 'inherit' }); - console.log('Patch applied successfully'); + execSync("git apply /tmp/aw.patch", { stdio: "inherit" }); + console.log("Patch applied successfully"); } catch (error) { - console.error('Failed to apply patch:', error instanceof Error ? error.message : String(error)); - core.setFailed('Failed to apply patch'); + console.error( + "Failed to apply patch:", + error instanceof Error ? error.message : String(error) + ); + core.setFailed("Failed to apply patch"); return; } // Commit and push the changes - execSync('git add .', { stdio: 'inherit' }); + execSync("git add .", { stdio: "inherit" }); // Check if there are changes to commit try { - execSync('git diff --cached --exit-code', { stdio: 'ignore' }); - console.log('No changes to commit'); + execSync("git diff --cached --exit-code", { stdio: "ignore" }); + console.log("No changes to commit"); return; } catch (error) { // Exit code != 0 means there are changes to commit, which is what we want } - const commitMessage = pushItem.message || 'Apply agent changes'; - execSync(`git commit -m "${commitMessage}"`, { stdio: 'inherit' }); - execSync(`git push origin ${branchName}`, { stdio: 'inherit' }); - console.log('Changes committed and pushed to branch:', branchName); + const commitMessage = pushItem.message || "Apply agent changes"; + execSync(`git commit -m "${commitMessage}"`, { stdio: "inherit" }); + execSync(`git push origin ${branchName}`, { stdio: "inherit" }); + console.log("Changes committed and pushed to branch:", branchName); // Get commit SHA - const commitSha = execSync('git rev-parse HEAD', { encoding: 'utf8' }).trim(); - const pushUrl = context.payload.repository + const commitSha = execSync("git rev-parse HEAD", { encoding: "utf8" }).trim(); + const pushUrl = context.payload.repository ? `${context.payload.repository.html_url}/tree/${branchName}` : `https://github.com/${context.repo.owner}/${context.repo.repo}/tree/${branchName}`; // Set outputs - core.setOutput('branch_name', branchName); - core.setOutput('commit_sha', commitSha); - core.setOutput('push_url', pushUrl); + core.setOutput("branch_name", branchName); + core.setOutput("commit_sha", commitSha); + core.setOutput("push_url", pushUrl); // Write summary to GitHub Actions summary await core.summary - .addRaw(` + .addRaw( + ` ## Push to Branch - **Branch**: \`${branchName}\` - **Commit**: [${commitSha.substring(0, 7)}](${pushUrl}) - **URL**: [${pushUrl}](${pushUrl}) - `).write(); + ` + ) + .write(); } await main(); diff --git a/.github/workflows/test-claude-update-issue.lock.yml b/.github/workflows/test-claude-update-issue.lock.yml index 092dcb4e0d..13ae26ed5d 100644 --- a/.github/workflows/test-claude-update-issue.lock.yml +++ b/.github/workflows/test-claude-update-issue.lock.yml @@ -34,21 +34,32 @@ jobs: with: script: | async function main() { - // Read inputs from environment variables - const reaction = process.env.GITHUB_AW_REACTION || 'eyes'; + // Read inputs from environment variables + const reaction = process.env.GITHUB_AW_REACTION || "eyes"; const alias = process.env.GITHUB_AW_ALIAS; // Only present for alias workflows const runId = context.runId; - const runUrl = context.payload.repository + const runUrl = context.payload.repository ? `${context.payload.repository.html_url}/actions/runs/${runId}` : `https://github.com/${context.repo.owner}/${context.repo.repo}/actions/runs/${runId}`; - console.log('Reaction type:', reaction); - console.log('Alias name:', alias || 'none'); - console.log('Run ID:', runId); - console.log('Run URL:', runUrl); + console.log("Reaction type:", reaction); + console.log("Alias name:", alias || "none"); + console.log("Run ID:", runId); + console.log("Run URL:", runUrl); // Validate reaction type - const validReactions = ['+1', '-1', 'laugh', 'confused', 'heart', 'hooray', 'rocket', 'eyes']; + const validReactions = [ + "+1", + "-1", + "laugh", + "confused", + "heart", + "hooray", + "rocket", + "eyes", + ]; if (!validReactions.includes(reaction)) { - core.setFailed(`Invalid reaction type: ${reaction}. Valid reactions are: ${validReactions.join(', ')}`); + core.setFailed( + `Invalid reaction type: ${reaction}. Valid reactions are: ${validReactions.join(", ")}` + ); return; } // Determine the API endpoint based on the event type @@ -60,20 +71,20 @@ jobs: const repo = context.repo.repo; try { switch (eventName) { - case 'issues': + case "issues": const issueNumber = context.payload?.issue?.number; if (!issueNumber) { - core.setFailed('Issue number not found in event payload'); + core.setFailed("Issue number not found in event payload"); return; } reactionEndpoint = `/repos/${owner}/${repo}/issues/${issueNumber}/reactions`; // Don't edit issue bodies for now - this might be more complex shouldEditComment = false; break; - case 'issue_comment': + case "issue_comment": const commentId = context.payload?.comment?.id; if (!commentId) { - core.setFailed('Comment ID not found in event payload'); + core.setFailed("Comment ID not found in event payload"); return; } reactionEndpoint = `/repos/${owner}/${repo}/issues/comments/${commentId}/reactions`; @@ -81,10 +92,10 @@ jobs: // Only edit comments for alias workflows shouldEditComment = alias ? true : false; break; - case 'pull_request': + case "pull_request": const prNumber = context.payload?.pull_request?.number; if (!prNumber) { - core.setFailed('Pull request number not found in event payload'); + core.setFailed("Pull request number not found in event payload"); return; } // PRs are "issues" for the reactions endpoint @@ -92,10 +103,10 @@ jobs: // Don't edit PR bodies for now - this might be more complex shouldEditComment = false; break; - case 'pull_request_review_comment': + case "pull_request_review_comment": const reviewCommentId = context.payload?.comment?.id; if (!reviewCommentId) { - core.setFailed('Review comment ID not found in event payload'); + core.setFailed("Review comment ID not found in event payload"); return; } reactionEndpoint = `/repos/${owner}/${repo}/pulls/comments/${reviewCommentId}/reactions`; @@ -107,24 +118,28 @@ jobs: core.setFailed(`Unsupported event type: ${eventName}`); return; } - console.log('Reaction API endpoint:', reactionEndpoint); + console.log("Reaction API endpoint:", reactionEndpoint); // Add reaction first await addReaction(reactionEndpoint, reaction); // Then edit comment if applicable and if it's a comment event if (shouldEditComment && commentUpdateEndpoint) { - console.log('Comment update endpoint:', commentUpdateEndpoint); + console.log("Comment update endpoint:", commentUpdateEndpoint); await editCommentWithWorkflowLink(commentUpdateEndpoint, runUrl); } else { if (!alias && commentUpdateEndpoint) { - console.log('Skipping comment edit - only available for alias workflows'); + console.log( + "Skipping comment edit - only available for alias workflows" + ); } else { - console.log('Skipping comment edit for event type:', eventName); + console.log("Skipping comment edit for event type:", eventName); } } } catch (error) { const errorMessage = error instanceof Error ? error.message : String(error); - console.error('Failed to process reaction and comment edit:', errorMessage); - core.setFailed(`Failed to process reaction and comment edit: ${errorMessage}`); + console.error("Failed to process reaction and comment edit:", errorMessage); + core.setFailed( + `Failed to process reaction and comment edit: ${errorMessage}` + ); } } /** @@ -133,19 +148,19 @@ jobs: * @param {string} reaction - The reaction type to add */ async function addReaction(endpoint, reaction) { - const response = await github.request('POST ' + endpoint, { + const response = await github.request("POST " + endpoint, { content: reaction, headers: { - 'Accept': 'application/vnd.github+json' - } + Accept: "application/vnd.github+json", + }, }); const reactionId = response.data?.id; if (reactionId) { console.log(`Successfully added reaction: ${reaction} (id: ${reactionId})`); - core.setOutput('reaction-id', reactionId.toString()); + core.setOutput("reaction-id", reactionId.toString()); } else { console.log(`Successfully added reaction: ${reaction}`); - core.setOutput('reaction-id', ''); + core.setOutput("reaction-id", ""); } } /** @@ -156,33 +171,37 @@ jobs: async function editCommentWithWorkflowLink(endpoint, runUrl) { try { // First, get the current comment content - const getResponse = await github.request('GET ' + endpoint, { + const getResponse = await github.request("GET " + endpoint, { headers: { - 'Accept': 'application/vnd.github+json' - } + Accept: "application/vnd.github+json", + }, }); - const originalBody = getResponse.data.body || ''; + const originalBody = getResponse.data.body || ""; const workflowLinkText = `\n\n---\n*🤖 [Workflow run](${runUrl}) triggered by this comment*`; // Check if we've already added a workflow link to avoid duplicates - if (originalBody.includes('*🤖 [Workflow run](')) { - console.log('Comment already contains a workflow run link, skipping edit'); + if (originalBody.includes("*🤖 [Workflow run](")) { + console.log( + "Comment already contains a workflow run link, skipping edit" + ); return; } const updatedBody = originalBody + workflowLinkText; // Update the comment - const updateResponse = await github.request('PATCH ' + endpoint, { + const updateResponse = await github.request("PATCH " + endpoint, { body: updatedBody, headers: { - 'Accept': 'application/vnd.github+json' - } + Accept: "application/vnd.github+json", + }, }); console.log(`Successfully updated comment with workflow link`); console.log(`Comment ID: ${updateResponse.data.id}`); } catch (error) { // Don't fail the entire job if comment editing fails - just log it const errorMessage = error instanceof Error ? error.message : String(error); - console.warn('Failed to edit comment with workflow link:', errorMessage); - console.warn('This is not critical - the reaction was still added successfully'); + console.warn("Failed to edit comment with workflow link:", errorMessage); + console.warn( + "This is not critical - the reaction was still added successfully" + ); } } await main(); @@ -308,23 +327,23 @@ jobs: with: script: | function main() { - const fs = require('fs'); - const crypto = require('crypto'); + const fs = require("fs"); + const crypto = require("crypto"); // Generate a random filename for the output file - const randomId = crypto.randomBytes(8).toString('hex'); + const randomId = crypto.randomBytes(8).toString("hex"); const outputFile = `/tmp/aw_output_${randomId}.txt`; // Ensure the /tmp directory exists and create empty output file - fs.mkdirSync('/tmp', { recursive: true }); - fs.writeFileSync(outputFile, '', { mode: 0o644 }); + fs.mkdirSync("/tmp", { recursive: true }); + fs.writeFileSync(outputFile, "", { mode: 0o644 }); // Verify the file was created and is writable if (!fs.existsSync(outputFile)) { throw new Error(`Failed to create output file: ${outputFile}`); } // Set the environment variable for subsequent steps - core.exportVariable('GITHUB_AW_SAFE_OUTPUTS', outputFile); - console.log('Created agentic output file:', outputFile); + core.exportVariable("GITHUB_AW_SAFE_OUTPUTS", outputFile); + console.log("Created agentic output file:", outputFile); // Also set as step output for reference - core.setOutput('output_file', outputFile); + core.setOutput("output_file", outputFile); } main(); - name: Setup MCPs @@ -550,34 +569,37 @@ jobs: * @returns {string} The sanitized content */ function sanitizeContent(content) { - if (!content || typeof content !== 'string') { - return ''; + if (!content || typeof content !== "string") { + return ""; } // Read allowed domains from environment variable const allowedDomainsEnv = process.env.GITHUB_AW_ALLOWED_DOMAINS; const defaultAllowedDomains = [ - 'github.com', - 'github.io', - 'githubusercontent.com', - 'githubassets.com', - 'github.dev', - 'codespaces.new' + "github.com", + "github.io", + "githubusercontent.com", + "githubassets.com", + "github.dev", + "codespaces.new", ]; const allowedDomains = allowedDomainsEnv - ? allowedDomainsEnv.split(',').map(d => d.trim()).filter(d => d) + ? allowedDomainsEnv + .split(",") + .map(d => d.trim()) + .filter(d => d) : defaultAllowedDomains; let sanitized = content; // Neutralize @mentions to prevent unintended notifications sanitized = neutralizeMentions(sanitized); // Remove control characters (except newlines and tabs) - sanitized = sanitized.replace(/[\x00-\x08\x0B\x0C\x0E-\x1F\x7F]/g, ''); + sanitized = sanitized.replace(/[\x00-\x08\x0B\x0C\x0E-\x1F\x7F]/g, ""); // XML character escaping sanitized = sanitized - .replace(/&/g, '&') // Must be first to avoid double-escaping - .replace(//g, '>') - .replace(/"/g, '"') - .replace(/'/g, '''); + .replace(/&/g, "&") // Must be first to avoid double-escaping + .replace(//g, ">") + .replace(/"/g, """) + .replace(/'/g, "'"); // URI filtering - replace non-https protocols with "(redacted)" sanitized = sanitizeUrlProtocols(sanitized); // Domain filtering for HTTPS URIs @@ -585,16 +607,20 @@ jobs: // Limit total length to prevent DoS (0.5MB max) const maxLength = 524288; if (sanitized.length > maxLength) { - sanitized = sanitized.substring(0, maxLength) + '\n[Content truncated due to length]'; + sanitized = + sanitized.substring(0, maxLength) + + "\n[Content truncated due to length]"; } // Limit number of lines to prevent log flooding (65k max) - const lines = sanitized.split('\n'); + const lines = sanitized.split("\n"); const maxLines = 65000; if (lines.length > maxLines) { - sanitized = lines.slice(0, maxLines).join('\n') + '\n[Content truncated due to line count]'; + sanitized = + lines.slice(0, maxLines).join("\n") + + "\n[Content truncated due to line count]"; } // Remove ANSI escape sequences - sanitized = sanitized.replace(/\x1b\[[0-9;]*[mGKH]/g, ''); + sanitized = sanitized.replace(/\x1b\[[0-9;]*[mGKH]/g, ""); // Neutralize common bot trigger phrases sanitized = neutralizeBotTriggers(sanitized); // Trim excessive whitespace @@ -605,16 +631,22 @@ jobs: * @returns {string} The string with unknown domains redacted */ function sanitizeUrlDomains(s) { - return s.replace(/\bhttps:\/\/([^\/\s\])}'"<>&\x00-\x1f]+)/gi, (match, domain) => { - // Extract the hostname part (before first slash, colon, or other delimiter) - const hostname = domain.split(/[\/:\?#]/)[0].toLowerCase(); - // Check if this domain or any parent domain is in the allowlist - const isAllowed = allowedDomains.some(allowedDomain => { - const normalizedAllowed = allowedDomain.toLowerCase(); - return hostname === normalizedAllowed || hostname.endsWith('.' + normalizedAllowed); - }); - return isAllowed ? match : '(redacted)'; - }); + return s.replace( + /\bhttps:\/\/([^\/\s\])}'"<>&\x00-\x1f]+)/gi, + (match, domain) => { + // Extract the hostname part (before first slash, colon, or other delimiter) + const hostname = domain.split(/[\/:\?#]/)[0].toLowerCase(); + // Check if this domain or any parent domain is in the allowlist + const isAllowed = allowedDomains.some(allowedDomain => { + const normalizedAllowed = allowedDomain.toLowerCase(); + return ( + hostname === normalizedAllowed || + hostname.endsWith("." + normalizedAllowed) + ); + }); + return isAllowed ? match : "(redacted)"; + } + ); } /** * Remove unknown protocols except https @@ -623,10 +655,13 @@ jobs: */ function sanitizeUrlProtocols(s) { // Match both protocol:// and protocol: patterns - return s.replace(/\b(\w+):(?:\/\/)?[^\s\])}'"<>&\x00-\x1f]+/gi, (match, protocol) => { - // Allow https (case insensitive), redact everything else - return protocol.toLowerCase() === 'https' ? match : '(redacted)'; - }); + return s.replace( + /\b(\w+):(?:\/\/)?[^\s\])}'"<>&\x00-\x1f]+/gi, + (match, protocol) => { + // Allow https (case insensitive), redact everything else + return protocol.toLowerCase() === "https" ? match : "(redacted)"; + } + ); } /** * Neutralizes @mentions by wrapping them in backticks @@ -635,8 +670,10 @@ jobs: */ function neutralizeMentions(s) { // Replace @name or @org/team outside code with `@name` - return s.replace(/(^|[^\w`])@([A-Za-z0-9](?:[A-Za-z0-9-]{0,37}[A-Za-z0-9])?(?:\/[A-Za-z0-9._-]+)?)/g, - (_m, p1, p2) => `${p1}\`@${p2}\``); + return s.replace( + /(^|[^\w`])@([A-Za-z0-9](?:[A-Za-z0-9-]{0,37}[A-Za-z0-9])?(?:\/[A-Za-z0-9._-]+)?)/g, + (_m, p1, p2) => `${p1}\`@${p2}\`` + ); } /** * Neutralizes bot trigger phrases by wrapping them in backticks @@ -645,8 +682,10 @@ jobs: */ function neutralizeBotTriggers(s) { // Neutralize common bot trigger phrases like "fixes #123", "closes #asdfs", etc. - return s.replace(/\b(fixes?|closes?|resolves?|fix|close|resolve)\s+#(\w+)/gi, - (match, action, ref) => `\`${action} #${ref}\``); + return s.replace( + /\b(fixes?|closes?|resolves?|fix|close|resolve)\s+#(\w+)/gi, + (match, action, ref) => `\`${action} #${ref}\`` + ); } } /** @@ -657,65 +696,165 @@ jobs: */ function getMaxAllowedForType(itemType, config) { // Check if max is explicitly specified in config - if (config && config[itemType] && typeof config[itemType] === 'object' && config[itemType].max) { + if ( + config && + config[itemType] && + typeof config[itemType] === "object" && + config[itemType].max + ) { return config[itemType].max; } // Use default limits for plural-supported types switch (itemType) { - case 'create-issue': + case "create-issue": return 1; // Only one issue allowed - case 'add-issue-comment': + case "add-issue-comment": return 1; // Only one comment allowed - case 'create-pull-request': - return 1; // Only one pull request allowed - case 'add-issue-label': - return 5; // Only one labels operation allowed - case 'update-issue': - return 1; // Only one issue update allowed - case 'push-to-branch': - return 1; // Only one push to branch allowed + case "create-pull-request": + return 1; // Only one pull request allowed + case "create-pull-request-review-comment": + return 10; // Default to 10 review comments allowed + case "add-issue-label": + return 5; // Only one labels operation allowed + case "update-issue": + return 1; // Only one issue update allowed + case "push-to-branch": + return 1; // Only one push to branch allowed + case "create-discussion": + return 1; // Only one discussion allowed default: - return 1; // Default to single item for unknown types + return 1; // Default to single item for unknown types + } + } + /** + * Attempts to repair common JSON syntax issues in LLM-generated content + * @param {string} jsonStr - The potentially malformed JSON string + * @returns {string} The repaired JSON string + */ + function repairJson(jsonStr) { + let repaired = jsonStr.trim(); + // Fix single quotes to double quotes (must be done first) + repaired = repaired.replace(/'/g, '"'); + // Fix missing quotes around object keys + repaired = repaired.replace( + /([{,]\s*)([a-zA-Z_$][a-zA-Z0-9_$]*)\s*:/g, + '$1"$2":' + ); + // Fix newlines and tabs inside strings by escaping them + repaired = repaired.replace(/"([^"\\]*)"/g, (match, content) => { + if ( + content.includes("\n") || + content.includes("\r") || + content.includes("\t") + ) { + const escaped = content + .replace(/\\/g, "\\\\") + .replace(/\n/g, "\\n") + .replace(/\r/g, "\\r") + .replace(/\t/g, "\\t"); + return `"${escaped}"`; + } + return match; + }); + // Fix unescaped quotes inside string values + repaired = repaired.replace( + /"([^"]*)"([^":,}\]]*)"([^"]*)"(\s*[,:}\]])/g, + (match, p1, p2, p3, p4) => `"${p1}\\"${p2}\\"${p3}"${p4}` + ); + // Fix wrong bracket/brace types - arrays should end with ] not } + repaired = repaired.replace( + /(\[\s*(?:"[^"]*"(?:\s*,\s*"[^"]*")*\s*),?)\s*}/g, + "$1]" + ); + // Fix missing closing braces/brackets + const openBraces = (repaired.match(/\{/g) || []).length; + const closeBraces = (repaired.match(/\}/g) || []).length; + if (openBraces > closeBraces) { + repaired += "}".repeat(openBraces - closeBraces); + } else if (closeBraces > openBraces) { + repaired = "{".repeat(closeBraces - openBraces) + repaired; + } + // Fix missing closing brackets for arrays + const openBrackets = (repaired.match(/\[/g) || []).length; + const closeBrackets = (repaired.match(/\]/g) || []).length; + if (openBrackets > closeBrackets) { + repaired += "]".repeat(openBrackets - closeBrackets); + } else if (closeBrackets > openBrackets) { + repaired = "[".repeat(closeBrackets - openBrackets) + repaired; + } + // Fix trailing commas in objects and arrays (AFTER fixing brackets/braces) + repaired = repaired.replace(/,(\s*[}\]])/g, "$1"); + return repaired; + } + /** + * Attempts to parse JSON with repair fallback + * @param {string} jsonStr - The JSON string to parse + * @returns {Object|undefined} The parsed JSON object, or undefined if parsing fails + */ + function parseJsonWithRepair(jsonStr) { + try { + // First, try normal JSON.parse + return JSON.parse(jsonStr); + } catch (originalError) { + try { + // If that fails, try repairing and parsing again + const repairedJson = repairJson(jsonStr); + return JSON.parse(repairedJson); + } catch (repairError) { + // If repair also fails, print error to console and return undefined + console.log( + `JSON parsing failed. Original: ${originalError.message}. After repair: ${repairError.message}` + ); + return undefined; + } } } const outputFile = process.env.GITHUB_AW_SAFE_OUTPUTS; const safeOutputsConfig = process.env.GITHUB_AW_SAFE_OUTPUTS_CONFIG; if (!outputFile) { - console.log('GITHUB_AW_SAFE_OUTPUTS not set, no output to collect'); - core.setOutput('output', ''); + console.log("GITHUB_AW_SAFE_OUTPUTS not set, no output to collect"); + core.setOutput("output", ""); return; } if (!fs.existsSync(outputFile)) { - console.log('Output file does not exist:', outputFile); - core.setOutput('output', ''); + console.log("Output file does not exist:", outputFile); + core.setOutput("output", ""); return; } - const outputContent = fs.readFileSync(outputFile, 'utf8'); - if (outputContent.trim() === '') { - console.log('Output file is empty'); - core.setOutput('output', ''); + const outputContent = fs.readFileSync(outputFile, "utf8"); + if (outputContent.trim() === "") { + console.log("Output file is empty"); + core.setOutput("output", ""); return; } - console.log('Raw output content length:', outputContent.length); + console.log("Raw output content length:", outputContent.length); // Parse the safe-outputs configuration let expectedOutputTypes = {}; if (safeOutputsConfig) { try { expectedOutputTypes = JSON.parse(safeOutputsConfig); - console.log('Expected output types:', Object.keys(expectedOutputTypes)); + console.log("Expected output types:", Object.keys(expectedOutputTypes)); } catch (error) { - console.log('Warning: Could not parse safe-outputs config:', error.message); + console.log( + "Warning: Could not parse safe-outputs config:", + error.message + ); } } // Parse JSONL content - const lines = outputContent.trim().split('\n'); + const lines = outputContent.trim().split("\n"); const parsedItems = []; const errors = []; for (let i = 0; i < lines.length; i++) { const line = lines[i].trim(); - if (line === '') continue; // Skip empty lines + if (line === "") continue; // Skip empty lines try { - const item = JSON.parse(line); + const item = parseJsonWithRepair(line); + // If item is undefined (failed to parse), add error and process next line + if (item === undefined) { + errors.push(`Line ${i + 1}: Invalid JSON - JSON parsing failed`); + continue; + } // Validate that the item has a 'type' field if (!item.type) { errors.push(`Line ${i + 1}: Missing required 'type' field`); @@ -724,25 +863,35 @@ jobs: // Validate against expected output types const itemType = item.type; if (!expectedOutputTypes[itemType]) { - errors.push(`Line ${i + 1}: Unexpected output type '${itemType}'. Expected one of: ${Object.keys(expectedOutputTypes).join(', ')}`); + errors.push( + `Line ${i + 1}: Unexpected output type '${itemType}'. Expected one of: ${Object.keys(expectedOutputTypes).join(", ")}` + ); continue; } // Check for too many items of the same type - const typeCount = parsedItems.filter(existing => existing.type === itemType).length; + const typeCount = parsedItems.filter( + existing => existing.type === itemType + ).length; const maxAllowed = getMaxAllowedForType(itemType, expectedOutputTypes); if (typeCount >= maxAllowed) { - errors.push(`Line ${i + 1}: Too many items of type '${itemType}'. Maximum allowed: ${maxAllowed}.`); + errors.push( + `Line ${i + 1}: Too many items of type '${itemType}'. Maximum allowed: ${maxAllowed}.` + ); continue; } // Basic validation based on type switch (itemType) { - case 'create-issue': - if (!item.title || typeof item.title !== 'string') { - errors.push(`Line ${i + 1}: create-issue requires a 'title' string field`); + case "create-issue": + if (!item.title || typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: create-issue requires a 'title' string field` + ); continue; } - if (!item.body || typeof item.body !== 'string') { - errors.push(`Line ${i + 1}: create-issue requires a 'body' string field`); + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-issue requires a 'body' string field` + ); continue; } // Sanitize text content @@ -750,107 +899,247 @@ jobs: item.body = sanitizeContent(item.body); // Sanitize labels if present if (item.labels && Array.isArray(item.labels)) { - item.labels = item.labels.map(label => typeof label === 'string' ? sanitizeContent(label) : label); + item.labels = item.labels.map(label => + typeof label === "string" ? sanitizeContent(label) : label + ); } break; - case 'add-issue-comment': - if (!item.body || typeof item.body !== 'string') { - errors.push(`Line ${i + 1}: add-issue-comment requires a 'body' string field`); + case "add-issue-comment": + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: add-issue-comment requires a 'body' string field` + ); continue; } // Sanitize text content item.body = sanitizeContent(item.body); break; - case 'create-pull-request': - if (!item.title || typeof item.title !== 'string') { - errors.push(`Line ${i + 1}: create-pull-request requires a 'title' string field`); + case "create-pull-request": + if (!item.title || typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request requires a 'title' string field` + ); continue; } - if (!item.body || typeof item.body !== 'string') { - errors.push(`Line ${i + 1}: create-pull-request requires a 'body' string field`); + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request requires a 'body' string field` + ); continue; } // Sanitize text content item.title = sanitizeContent(item.title); item.body = sanitizeContent(item.body); // Sanitize branch name if present - if (item.branch && typeof item.branch === 'string') { + if (item.branch && typeof item.branch === "string") { item.branch = sanitizeContent(item.branch); } // Sanitize labels if present if (item.labels && Array.isArray(item.labels)) { - item.labels = item.labels.map(label => typeof label === 'string' ? sanitizeContent(label) : label); + item.labels = item.labels.map(label => + typeof label === "string" ? sanitizeContent(label) : label + ); } break; - case 'add-issue-label': + case "add-issue-label": if (!item.labels || !Array.isArray(item.labels)) { - errors.push(`Line ${i + 1}: add-issue-label requires a 'labels' array field`); + errors.push( + `Line ${i + 1}: add-issue-label requires a 'labels' array field` + ); continue; } - if (item.labels.some(label => typeof label !== 'string')) { - errors.push(`Line ${i + 1}: add-issue-label labels array must contain only strings`); + if (item.labels.some(label => typeof label !== "string")) { + errors.push( + `Line ${i + 1}: add-issue-label labels array must contain only strings` + ); continue; } // Sanitize label strings item.labels = item.labels.map(label => sanitizeContent(label)); break; - case 'update-issue': + case "update-issue": // Check that at least one updateable field is provided - const hasValidField = (item.status !== undefined) || - (item.title !== undefined) || - (item.body !== undefined); + const hasValidField = + item.status !== undefined || + item.title !== undefined || + item.body !== undefined; if (!hasValidField) { - errors.push(`Line ${i + 1}: update-issue requires at least one of: 'status', 'title', or 'body' fields`); + errors.push( + `Line ${i + 1}: update-issue requires at least one of: 'status', 'title', or 'body' fields` + ); continue; } // Validate status if provided if (item.status !== undefined) { - if (typeof item.status !== 'string' || (item.status !== 'open' && item.status !== 'closed')) { - errors.push(`Line ${i + 1}: update-issue 'status' must be 'open' or 'closed'`); + if ( + typeof item.status !== "string" || + (item.status !== "open" && item.status !== "closed") + ) { + errors.push( + `Line ${i + 1}: update-issue 'status' must be 'open' or 'closed'` + ); continue; } } // Validate title if provided if (item.title !== undefined) { - if (typeof item.title !== 'string') { - errors.push(`Line ${i + 1}: update-issue 'title' must be a string`); + if (typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: update-issue 'title' must be a string` + ); continue; } item.title = sanitizeContent(item.title); } // Validate body if provided if (item.body !== undefined) { - if (typeof item.body !== 'string') { - errors.push(`Line ${i + 1}: update-issue 'body' must be a string`); + if (typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: update-issue 'body' must be a string` + ); continue; } item.body = sanitizeContent(item.body); } // Validate issue_number if provided (for target "*") if (item.issue_number !== undefined) { - if (typeof item.issue_number !== 'number' && typeof item.issue_number !== 'string') { - errors.push(`Line ${i + 1}: update-issue 'issue_number' must be a number or string`); + if ( + typeof item.issue_number !== "number" && + typeof item.issue_number !== "string" + ) { + errors.push( + `Line ${i + 1}: update-issue 'issue_number' must be a number or string` + ); continue; } } break; - case 'push-to-branch': + case "push-to-branch": // Validate message if provided (optional) if (item.message !== undefined) { - if (typeof item.message !== 'string') { - errors.push(`Line ${i + 1}: push-to-branch 'message' must be a string`); + if (typeof item.message !== "string") { + errors.push( + `Line ${i + 1}: push-to-branch 'message' must be a string` + ); continue; } item.message = sanitizeContent(item.message); } // Validate pull_request_number if provided (for target "*") if (item.pull_request_number !== undefined) { - if (typeof item.pull_request_number !== 'number' && typeof item.pull_request_number !== 'string') { - errors.push(`Line ${i + 1}: push-to-branch 'pull_request_number' must be a number or string`); + if ( + typeof item.pull_request_number !== "number" && + typeof item.pull_request_number !== "string" + ) { + errors.push( + `Line ${i + 1}: push-to-branch 'pull_request_number' must be a number or string` + ); continue; } } break; + case "create-pull-request-review-comment": + // Validate required path field + if (!item.path || typeof item.path !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment requires a 'path' string field` + ); + continue; + } + // Validate required line field + if ( + item.line === undefined || + (typeof item.line !== "number" && typeof item.line !== "string") + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment requires a 'line' number or string field` + ); + continue; + } + // Validate line is a positive integer + const lineNumber = + typeof item.line === "string" ? parseInt(item.line, 10) : item.line; + if ( + isNaN(lineNumber) || + lineNumber <= 0 || + !Number.isInteger(lineNumber) + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'line' must be a positive integer` + ); + continue; + } + // Validate required body field + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment requires a 'body' string field` + ); + continue; + } + // Sanitize required text content + item.body = sanitizeContent(item.body); + // Validate optional start_line field + if (item.start_line !== undefined) { + if ( + typeof item.start_line !== "number" && + typeof item.start_line !== "string" + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'start_line' must be a number or string` + ); + continue; + } + const startLineNumber = + typeof item.start_line === "string" + ? parseInt(item.start_line, 10) + : item.start_line; + if ( + isNaN(startLineNumber) || + startLineNumber <= 0 || + !Number.isInteger(startLineNumber) + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'start_line' must be a positive integer` + ); + continue; + } + if (startLineNumber > lineNumber) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'start_line' must be less than or equal to 'line'` + ); + continue; + } + } + // Validate optional side field + if (item.side !== undefined) { + if ( + typeof item.side !== "string" || + (item.side !== "LEFT" && item.side !== "RIGHT") + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'side' must be 'LEFT' or 'RIGHT'` + ); + continue; + } + } + break; + case "create-discussion": + if (!item.title || typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: create-discussion requires a 'title' string field` + ); + continue; + } + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-discussion requires a 'body' string field` + ); + continue; + } + // Sanitize text content + item.title = sanitizeContent(item.title); + item.body = sanitizeContent(item.body); + break; default: errors.push(`Line ${i + 1}: Unknown output type '${itemType}'`); continue; @@ -863,7 +1152,7 @@ jobs: } // Report validation results if (errors.length > 0) { - console.log('Validation errors found:'); + console.log("Validation errors found:"); errors.forEach(error => console.log(` - ${error}`)); // For now, we'll continue with valid items but log the errors // In the future, we might want to fail the workflow for invalid items @@ -872,10 +1161,10 @@ jobs: // Set the parsed and validated items as output const validatedOutput = { items: parsedItems, - errors: errors + errors: errors, }; - core.setOutput('output', JSON.stringify(validatedOutput)); - core.setOutput('raw_output', outputContent); + core.setOutput("output", JSON.stringify(validatedOutput)); + core.setOutput("raw_output", outputContent); } // Call the main function await main(); @@ -917,24 +1206,24 @@ jobs: with: script: | function main() { - const fs = require('fs'); + const fs = require("fs"); try { // Get the log file path from environment const logFile = process.env.AGENT_LOG_FILE; if (!logFile) { - console.log('No agent log file specified'); + console.log("No agent log file specified"); return; } if (!fs.existsSync(logFile)) { console.log(`Log file not found: ${logFile}`); return; } - const logContent = fs.readFileSync(logFile, 'utf8'); + const logContent = fs.readFileSync(logFile, "utf8"); const markdown = parseClaudeLog(logContent); // Append to GitHub step summary core.summary.addRaw(markdown).write(); } catch (error) { - console.error('Error parsing Claude log:', error.message); + console.error("Error parsing Claude log:", error.message); core.setFailed(error.message); } } @@ -942,16 +1231,16 @@ jobs: try { const logEntries = JSON.parse(logContent); if (!Array.isArray(logEntries)) { - return '## Agent Log Summary\n\nLog format not recognized as Claude JSON array.\n'; + return "## Agent Log Summary\n\nLog format not recognized as Claude JSON array.\n"; } - let markdown = '## 🤖 Commands and Tools\n\n'; + let markdown = "## 🤖 Commands and Tools\n\n"; const toolUsePairs = new Map(); // Map tool_use_id to tool_result const commandSummary = []; // For the succinct summary // First pass: collect tool results by tool_use_id for (const entry of logEntries) { - if (entry.type === 'user' && entry.message?.content) { + if (entry.type === "user" && entry.message?.content) { for (const content of entry.message.content) { - if (content.type === 'tool_result' && content.tool_use_id) { + if (content.type === "tool_result" && content.tool_use_id) { toolUsePairs.set(content.tool_use_id, content); } } @@ -959,26 +1248,37 @@ jobs: } // Collect all tool uses for summary for (const entry of logEntries) { - if (entry.type === 'assistant' && entry.message?.content) { + if (entry.type === "assistant" && entry.message?.content) { for (const content of entry.message.content) { - if (content.type === 'tool_use') { + if (content.type === "tool_use") { const toolName = content.name; const input = content.input || {}; // Skip internal tools - only show external commands and API calls - if (['Read', 'Write', 'Edit', 'MultiEdit', 'LS', 'Grep', 'Glob', 'TodoWrite'].includes(toolName)) { + if ( + [ + "Read", + "Write", + "Edit", + "MultiEdit", + "LS", + "Grep", + "Glob", + "TodoWrite", + ].includes(toolName) + ) { continue; // Skip internal file operations and searches } // Find the corresponding tool result to get status const toolResult = toolUsePairs.get(content.id); - let statusIcon = '❓'; + let statusIcon = "❓"; if (toolResult) { - statusIcon = toolResult.is_error === true ? '❌' : '✅'; + statusIcon = toolResult.is_error === true ? "❌" : "✅"; } // Add to command summary (only external tools) - if (toolName === 'Bash') { - const formattedCommand = formatBashCommand(input.command || ''); + if (toolName === "Bash") { + const formattedCommand = formatBashCommand(input.command || ""); commandSummary.push(`* ${statusIcon} \`${formattedCommand}\``); - } else if (toolName.startsWith('mcp__')) { + } else if (toolName.startsWith("mcp__")) { const mcpName = formatMcpName(toolName); commandSummary.push(`* ${statusIcon} \`${mcpName}(...)\``); } else { @@ -995,13 +1295,19 @@ jobs: markdown += `${cmd}\n`; } } else { - markdown += 'No commands or tools used.\n'; + markdown += "No commands or tools used.\n"; } // Add Information section from the last entry with result metadata - markdown += '\n## 📊 Information\n\n'; + markdown += "\n## 📊 Information\n\n"; // Find the last entry with metadata const lastEntry = logEntries[logEntries.length - 1]; - if (lastEntry && (lastEntry.num_turns || lastEntry.duration_ms || lastEntry.total_cost_usd || lastEntry.usage)) { + if ( + lastEntry && + (lastEntry.num_turns || + lastEntry.duration_ms || + lastEntry.total_cost_usd || + lastEntry.usage) + ) { if (lastEntry.num_turns) { markdown += `**Turns:** ${lastEntry.num_turns}\n\n`; } @@ -1018,29 +1324,36 @@ jobs: const usage = lastEntry.usage; if (usage.input_tokens || usage.output_tokens) { markdown += `**Token Usage:**\n`; - if (usage.input_tokens) markdown += `- Input: ${usage.input_tokens.toLocaleString()}\n`; - if (usage.cache_creation_input_tokens) markdown += `- Cache Creation: ${usage.cache_creation_input_tokens.toLocaleString()}\n`; - if (usage.cache_read_input_tokens) markdown += `- Cache Read: ${usage.cache_read_input_tokens.toLocaleString()}\n`; - if (usage.output_tokens) markdown += `- Output: ${usage.output_tokens.toLocaleString()}\n`; - markdown += '\n'; + if (usage.input_tokens) + markdown += `- Input: ${usage.input_tokens.toLocaleString()}\n`; + if (usage.cache_creation_input_tokens) + markdown += `- Cache Creation: ${usage.cache_creation_input_tokens.toLocaleString()}\n`; + if (usage.cache_read_input_tokens) + markdown += `- Cache Read: ${usage.cache_read_input_tokens.toLocaleString()}\n`; + if (usage.output_tokens) + markdown += `- Output: ${usage.output_tokens.toLocaleString()}\n`; + markdown += "\n"; } } - if (lastEntry.permission_denials && lastEntry.permission_denials.length > 0) { + if ( + lastEntry.permission_denials && + lastEntry.permission_denials.length > 0 + ) { markdown += `**Permission Denials:** ${lastEntry.permission_denials.length}\n\n`; } } - markdown += '\n## 🤖 Reasoning\n\n'; + markdown += "\n## 🤖 Reasoning\n\n"; // Second pass: process assistant messages in sequence for (const entry of logEntries) { - if (entry.type === 'assistant' && entry.message?.content) { + if (entry.type === "assistant" && entry.message?.content) { for (const content of entry.message.content) { - if (content.type === 'text' && content.text) { + if (content.type === "text" && content.text) { // Add reasoning text directly (no header) const text = content.text.trim(); if (text && text.length > 0) { - markdown += text + '\n\n'; + markdown += text + "\n\n"; } - } else if (content.type === 'tool_use') { + } else if (content.type === "tool_use") { // Process tool use with its result const toolResult = toolUsePairs.get(content.id); const toolMarkdown = formatToolUse(content, toolResult); @@ -1060,22 +1373,22 @@ jobs: const toolName = toolUse.name; const input = toolUse.input || {}; // Skip TodoWrite except the very last one (we'll handle this separately) - if (toolName === 'TodoWrite') { - return ''; // Skip for now, would need global context to find the last one + if (toolName === "TodoWrite") { + return ""; // Skip for now, would need global context to find the last one } // Helper function to determine status icon function getStatusIcon() { if (toolResult) { - return toolResult.is_error === true ? '❌' : '✅'; + return toolResult.is_error === true ? "❌" : "✅"; } - return '❓'; // Unknown by default + return "❓"; // Unknown by default } - let markdown = ''; + let markdown = ""; const statusIcon = getStatusIcon(); switch (toolName) { - case 'Bash': - const command = input.command || ''; - const description = input.description || ''; + case "Bash": + const command = input.command || ""; + const description = input.description || ""; // Format the command to be single line const formattedCommand = formatBashCommand(command); if (description) { @@ -1083,31 +1396,40 @@ jobs: } markdown += `${statusIcon} \`${formattedCommand}\`\n\n`; break; - case 'Read': - const filePath = input.file_path || input.path || ''; - const relativePath = filePath.replace(/^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, ''); // Remove /home/runner/work/repo/repo/ prefix + case "Read": + const filePath = input.file_path || input.path || ""; + const relativePath = filePath.replace( + /^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, + "" + ); // Remove /home/runner/work/repo/repo/ prefix markdown += `${statusIcon} Read \`${relativePath}\`\n\n`; break; - case 'Write': - case 'Edit': - case 'MultiEdit': - const writeFilePath = input.file_path || input.path || ''; - const writeRelativePath = writeFilePath.replace(/^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, ''); + case "Write": + case "Edit": + case "MultiEdit": + const writeFilePath = input.file_path || input.path || ""; + const writeRelativePath = writeFilePath.replace( + /^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, + "" + ); markdown += `${statusIcon} Write \`${writeRelativePath}\`\n\n`; break; - case 'Grep': - case 'Glob': - const query = input.query || input.pattern || ''; + case "Grep": + case "Glob": + const query = input.query || input.pattern || ""; markdown += `${statusIcon} Search for \`${truncateString(query, 80)}\`\n\n`; break; - case 'LS': - const lsPath = input.path || ''; - const lsRelativePath = lsPath.replace(/^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, ''); + case "LS": + const lsPath = input.path || ""; + const lsRelativePath = lsPath.replace( + /^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, + "" + ); markdown += `${statusIcon} LS: ${lsRelativePath || lsPath}\n\n`; break; default: // Handle MCP calls and other tools - if (toolName.startsWith('mcp__')) { + if (toolName.startsWith("mcp__")) { const mcpName = formatMcpName(toolName); const params = formatMcpParameters(input); markdown += `${statusIcon} ${mcpName}(${params})\n\n`; @@ -1116,8 +1438,11 @@ jobs: const keys = Object.keys(input); if (keys.length > 0) { // Try to find the most important parameter - const mainParam = keys.find(k => ['query', 'command', 'path', 'file_path', 'content'].includes(k)) || keys[0]; - const value = String(input[mainParam] || ''); + const mainParam = + keys.find(k => + ["query", "command", "path", "file_path", "content"].includes(k) + ) || keys[0]; + const value = String(input[mainParam] || ""); if (value) { markdown += `${statusIcon} ${toolName}: ${truncateString(value, 100)}\n\n`; } else { @@ -1132,11 +1457,11 @@ jobs: } function formatMcpName(toolName) { // Convert mcp__github__search_issues to github::search_issues - if (toolName.startsWith('mcp__')) { - const parts = toolName.split('__'); + if (toolName.startsWith("mcp__")) { + const parts = toolName.split("__"); if (parts.length >= 3) { const provider = parts[1]; // github, etc. - const method = parts.slice(2).join('_'); // search_issues, etc. + const method = parts.slice(2).join("_"); // search_issues, etc. return `${provider}::${method}`; } } @@ -1144,44 +1469,50 @@ jobs: } function formatMcpParameters(input) { const keys = Object.keys(input); - if (keys.length === 0) return ''; + if (keys.length === 0) return ""; const paramStrs = []; - for (const key of keys.slice(0, 4)) { // Show up to 4 parameters - const value = String(input[key] || ''); + for (const key of keys.slice(0, 4)) { + // Show up to 4 parameters + const value = String(input[key] || ""); paramStrs.push(`${key}: ${truncateString(value, 40)}`); } if (keys.length > 4) { - paramStrs.push('...'); + paramStrs.push("..."); } - return paramStrs.join(', '); + return paramStrs.join(", "); } function formatBashCommand(command) { - if (!command) return ''; + if (!command) return ""; // Convert multi-line commands to single line by replacing newlines with spaces // and collapsing multiple spaces let formatted = command - .replace(/\n/g, ' ') // Replace newlines with spaces - .replace(/\r/g, ' ') // Replace carriage returns with spaces - .replace(/\t/g, ' ') // Replace tabs with spaces - .replace(/\s+/g, ' ') // Collapse multiple spaces into one - .trim(); // Remove leading/trailing whitespace + .replace(/\n/g, " ") // Replace newlines with spaces + .replace(/\r/g, " ") // Replace carriage returns with spaces + .replace(/\t/g, " ") // Replace tabs with spaces + .replace(/\s+/g, " ") // Collapse multiple spaces into one + .trim(); // Remove leading/trailing whitespace // Escape backticks to prevent markdown issues - formatted = formatted.replace(/`/g, '\\`'); + formatted = formatted.replace(/`/g, "\\`"); // Truncate if too long (keep reasonable length for summary) const maxLength = 80; if (formatted.length > maxLength) { - formatted = formatted.substring(0, maxLength) + '...'; + formatted = formatted.substring(0, maxLength) + "..."; } return formatted; } function truncateString(str, maxLength) { - if (!str) return ''; + if (!str) return ""; if (str.length <= maxLength) return str; - return str.substring(0, maxLength) + '...'; + return str.substring(0, maxLength) + "..."; } // Export for testing - if (typeof module !== 'undefined' && module.exports) { - module.exports = { parseClaudeLog, formatToolUse, formatBashCommand, truncateString }; + if (typeof module !== "undefined" && module.exports) { + module.exports = { + parseClaudeLog, + formatToolUse, + formatBashCommand, + truncateString, + }; } main(); - name: Upload agent logs @@ -1218,45 +1549,55 @@ jobs: // Read the validated output content from environment variable const outputContent = process.env.GITHUB_AW_AGENT_OUTPUT; if (!outputContent) { - console.log('No GITHUB_AW_AGENT_OUTPUT environment variable found'); + console.log("No GITHUB_AW_AGENT_OUTPUT environment variable found"); return; } - if (outputContent.trim() === '') { - console.log('Agent output content is empty'); + if (outputContent.trim() === "") { + console.log("Agent output content is empty"); return; } - console.log('Agent output content length:', outputContent.length); + console.log("Agent output content length:", outputContent.length); // Parse the validated output JSON let validatedOutput; try { validatedOutput = JSON.parse(outputContent); } catch (error) { - console.log('Error parsing agent output JSON:', error instanceof Error ? error.message : String(error)); + console.log( + "Error parsing agent output JSON:", + error instanceof Error ? error.message : String(error) + ); return; } if (!validatedOutput.items || !Array.isArray(validatedOutput.items)) { - console.log('No valid items found in agent output'); + console.log("No valid items found in agent output"); return; } // Find all update-issue items - const updateItems = validatedOutput.items.filter(/** @param {any} item */ item => item.type === 'update-issue'); + const updateItems = validatedOutput.items.filter( + /** @param {any} item */ item => item.type === "update-issue" + ); if (updateItems.length === 0) { - console.log('No update-issue items found in agent output'); + console.log("No update-issue items found in agent output"); return; } console.log(`Found ${updateItems.length} update-issue item(s)`); // Get the configuration from environment variables const updateTarget = process.env.GITHUB_AW_UPDATE_TARGET || "triggering"; - const canUpdateStatus = process.env.GITHUB_AW_UPDATE_STATUS === 'true'; - const canUpdateTitle = process.env.GITHUB_AW_UPDATE_TITLE === 'true'; - const canUpdateBody = process.env.GITHUB_AW_UPDATE_BODY === 'true'; + const canUpdateStatus = process.env.GITHUB_AW_UPDATE_STATUS === "true"; + const canUpdateTitle = process.env.GITHUB_AW_UPDATE_TITLE === "true"; + const canUpdateBody = process.env.GITHUB_AW_UPDATE_BODY === "true"; console.log(`Update target configuration: ${updateTarget}`); - console.log(`Can update status: ${canUpdateStatus}, title: ${canUpdateTitle}, body: ${canUpdateBody}`); + console.log( + `Can update status: ${canUpdateStatus}, title: ${canUpdateTitle}, body: ${canUpdateBody}` + ); // Check if we're in an issue context - const isIssueContext = context.eventName === 'issues' || context.eventName === 'issue_comment'; + const isIssueContext = + context.eventName === "issues" || context.eventName === "issue_comment"; // Validate context based on target configuration if (updateTarget === "triggering" && !isIssueContext) { - console.log('Target is "triggering" but not running in issue context, skipping issue update'); + console.log( + 'Target is "triggering" but not running in issue context, skipping issue update' + ); return; } const updatedIssues = []; @@ -1271,18 +1612,24 @@ jobs: if (updateItem.issue_number) { issueNumber = parseInt(updateItem.issue_number, 10); if (isNaN(issueNumber) || issueNumber <= 0) { - console.log(`Invalid issue number specified: ${updateItem.issue_number}`); + console.log( + `Invalid issue number specified: ${updateItem.issue_number}` + ); continue; } } else { - console.log('Target is "*" but no issue_number specified in update item'); + console.log( + 'Target is "*" but no issue_number specified in update item' + ); continue; } } else if (updateTarget && updateTarget !== "triggering") { // Explicit issue number specified in target issueNumber = parseInt(updateTarget, 10); if (isNaN(issueNumber) || issueNumber <= 0) { - console.log(`Invalid issue number in target configuration: ${updateTarget}`); + console.log( + `Invalid issue number in target configuration: ${updateTarget}` + ); continue; } } else { @@ -1291,16 +1638,16 @@ jobs: if (context.payload.issue) { issueNumber = context.payload.issue.number; } else { - console.log('Issue context detected but no issue found in payload'); + console.log("Issue context detected but no issue found in payload"); continue; } } else { - console.log('Could not determine issue number'); + console.log("Could not determine issue number"); continue; } } if (!issueNumber) { - console.log('Could not determine issue number'); + console.log("Could not determine issue number"); continue; } console.log(`Updating issue #${issueNumber}`); @@ -1309,34 +1656,39 @@ jobs: let hasUpdates = false; if (canUpdateStatus && updateItem.status !== undefined) { // Validate status value - if (updateItem.status === 'open' || updateItem.status === 'closed') { + if (updateItem.status === "open" || updateItem.status === "closed") { updateData.state = updateItem.status; hasUpdates = true; console.log(`Will update status to: ${updateItem.status}`); } else { - console.log(`Invalid status value: ${updateItem.status}. Must be 'open' or 'closed'`); + console.log( + `Invalid status value: ${updateItem.status}. Must be 'open' or 'closed'` + ); } } if (canUpdateTitle && updateItem.title !== undefined) { - if (typeof updateItem.title === 'string' && updateItem.title.trim().length > 0) { + if ( + typeof updateItem.title === "string" && + updateItem.title.trim().length > 0 + ) { updateData.title = updateItem.title.trim(); hasUpdates = true; console.log(`Will update title to: ${updateItem.title.trim()}`); } else { - console.log('Invalid title value: must be a non-empty string'); + console.log("Invalid title value: must be a non-empty string"); } } if (canUpdateBody && updateItem.body !== undefined) { - if (typeof updateItem.body === 'string') { + if (typeof updateItem.body === "string") { updateData.body = updateItem.body; hasUpdates = true; console.log(`Will update body (length: ${updateItem.body.length})`); } else { - console.log('Invalid body value: must be a string'); + console.log("Invalid body value: must be a string"); } } if (!hasUpdates) { - console.log('No valid updates to apply for this item'); + console.log("No valid updates to apply for this item"); continue; } try { @@ -1345,23 +1697,26 @@ jobs: owner: context.repo.owner, repo: context.repo.repo, issue_number: issueNumber, - ...updateData + ...updateData, }); - console.log('Updated issue #' + issue.number + ': ' + issue.html_url); + console.log("Updated issue #" + issue.number + ": " + issue.html_url); updatedIssues.push(issue); // Set output for the last updated issue (for backward compatibility) if (i === updateItems.length - 1) { - core.setOutput('issue_number', issue.number); - core.setOutput('issue_url', issue.html_url); + core.setOutput("issue_number", issue.number); + core.setOutput("issue_url", issue.html_url); } } catch (error) { - console.error(`✗ Failed to update issue #${issueNumber}:`, error instanceof Error ? error.message : String(error)); + console.error( + `✗ Failed to update issue #${issueNumber}:`, + error instanceof Error ? error.message : String(error) + ); throw error; } } // Write summary for all updated issues if (updatedIssues.length > 0) { - let summaryContent = '\n\n## Updated Issues\n'; + let summaryContent = "\n\n## Updated Issues\n"; for (const issue of updatedIssues) { summaryContent += `- Issue #${issue.number}: [${issue.title}](${issue.html_url})\n`; } diff --git a/.github/workflows/test-codex-add-issue-comment.lock.yml b/.github/workflows/test-codex-add-issue-comment.lock.yml index 0158fe8968..9f7371bb16 100644 --- a/.github/workflows/test-codex-add-issue-comment.lock.yml +++ b/.github/workflows/test-codex-add-issue-comment.lock.yml @@ -34,21 +34,32 @@ jobs: with: script: | async function main() { - // Read inputs from environment variables - const reaction = process.env.GITHUB_AW_REACTION || 'eyes'; + // Read inputs from environment variables + const reaction = process.env.GITHUB_AW_REACTION || "eyes"; const alias = process.env.GITHUB_AW_ALIAS; // Only present for alias workflows const runId = context.runId; - const runUrl = context.payload.repository + const runUrl = context.payload.repository ? `${context.payload.repository.html_url}/actions/runs/${runId}` : `https://github.com/${context.repo.owner}/${context.repo.repo}/actions/runs/${runId}`; - console.log('Reaction type:', reaction); - console.log('Alias name:', alias || 'none'); - console.log('Run ID:', runId); - console.log('Run URL:', runUrl); + console.log("Reaction type:", reaction); + console.log("Alias name:", alias || "none"); + console.log("Run ID:", runId); + console.log("Run URL:", runUrl); // Validate reaction type - const validReactions = ['+1', '-1', 'laugh', 'confused', 'heart', 'hooray', 'rocket', 'eyes']; + const validReactions = [ + "+1", + "-1", + "laugh", + "confused", + "heart", + "hooray", + "rocket", + "eyes", + ]; if (!validReactions.includes(reaction)) { - core.setFailed(`Invalid reaction type: ${reaction}. Valid reactions are: ${validReactions.join(', ')}`); + core.setFailed( + `Invalid reaction type: ${reaction}. Valid reactions are: ${validReactions.join(", ")}` + ); return; } // Determine the API endpoint based on the event type @@ -60,20 +71,20 @@ jobs: const repo = context.repo.repo; try { switch (eventName) { - case 'issues': + case "issues": const issueNumber = context.payload?.issue?.number; if (!issueNumber) { - core.setFailed('Issue number not found in event payload'); + core.setFailed("Issue number not found in event payload"); return; } reactionEndpoint = `/repos/${owner}/${repo}/issues/${issueNumber}/reactions`; // Don't edit issue bodies for now - this might be more complex shouldEditComment = false; break; - case 'issue_comment': + case "issue_comment": const commentId = context.payload?.comment?.id; if (!commentId) { - core.setFailed('Comment ID not found in event payload'); + core.setFailed("Comment ID not found in event payload"); return; } reactionEndpoint = `/repos/${owner}/${repo}/issues/comments/${commentId}/reactions`; @@ -81,10 +92,10 @@ jobs: // Only edit comments for alias workflows shouldEditComment = alias ? true : false; break; - case 'pull_request': + case "pull_request": const prNumber = context.payload?.pull_request?.number; if (!prNumber) { - core.setFailed('Pull request number not found in event payload'); + core.setFailed("Pull request number not found in event payload"); return; } // PRs are "issues" for the reactions endpoint @@ -92,10 +103,10 @@ jobs: // Don't edit PR bodies for now - this might be more complex shouldEditComment = false; break; - case 'pull_request_review_comment': + case "pull_request_review_comment": const reviewCommentId = context.payload?.comment?.id; if (!reviewCommentId) { - core.setFailed('Review comment ID not found in event payload'); + core.setFailed("Review comment ID not found in event payload"); return; } reactionEndpoint = `/repos/${owner}/${repo}/pulls/comments/${reviewCommentId}/reactions`; @@ -107,24 +118,28 @@ jobs: core.setFailed(`Unsupported event type: ${eventName}`); return; } - console.log('Reaction API endpoint:', reactionEndpoint); + console.log("Reaction API endpoint:", reactionEndpoint); // Add reaction first await addReaction(reactionEndpoint, reaction); // Then edit comment if applicable and if it's a comment event if (shouldEditComment && commentUpdateEndpoint) { - console.log('Comment update endpoint:', commentUpdateEndpoint); + console.log("Comment update endpoint:", commentUpdateEndpoint); await editCommentWithWorkflowLink(commentUpdateEndpoint, runUrl); } else { if (!alias && commentUpdateEndpoint) { - console.log('Skipping comment edit - only available for alias workflows'); + console.log( + "Skipping comment edit - only available for alias workflows" + ); } else { - console.log('Skipping comment edit for event type:', eventName); + console.log("Skipping comment edit for event type:", eventName); } } } catch (error) { const errorMessage = error instanceof Error ? error.message : String(error); - console.error('Failed to process reaction and comment edit:', errorMessage); - core.setFailed(`Failed to process reaction and comment edit: ${errorMessage}`); + console.error("Failed to process reaction and comment edit:", errorMessage); + core.setFailed( + `Failed to process reaction and comment edit: ${errorMessage}` + ); } } /** @@ -133,19 +148,19 @@ jobs: * @param {string} reaction - The reaction type to add */ async function addReaction(endpoint, reaction) { - const response = await github.request('POST ' + endpoint, { + const response = await github.request("POST " + endpoint, { content: reaction, headers: { - 'Accept': 'application/vnd.github+json' - } + Accept: "application/vnd.github+json", + }, }); const reactionId = response.data?.id; if (reactionId) { console.log(`Successfully added reaction: ${reaction} (id: ${reactionId})`); - core.setOutput('reaction-id', reactionId.toString()); + core.setOutput("reaction-id", reactionId.toString()); } else { console.log(`Successfully added reaction: ${reaction}`); - core.setOutput('reaction-id', ''); + core.setOutput("reaction-id", ""); } } /** @@ -156,33 +171,37 @@ jobs: async function editCommentWithWorkflowLink(endpoint, runUrl) { try { // First, get the current comment content - const getResponse = await github.request('GET ' + endpoint, { + const getResponse = await github.request("GET " + endpoint, { headers: { - 'Accept': 'application/vnd.github+json' - } + Accept: "application/vnd.github+json", + }, }); - const originalBody = getResponse.data.body || ''; + const originalBody = getResponse.data.body || ""; const workflowLinkText = `\n\n---\n*🤖 [Workflow run](${runUrl}) triggered by this comment*`; // Check if we've already added a workflow link to avoid duplicates - if (originalBody.includes('*🤖 [Workflow run](')) { - console.log('Comment already contains a workflow run link, skipping edit'); + if (originalBody.includes("*🤖 [Workflow run](")) { + console.log( + "Comment already contains a workflow run link, skipping edit" + ); return; } const updatedBody = originalBody + workflowLinkText; // Update the comment - const updateResponse = await github.request('PATCH ' + endpoint, { + const updateResponse = await github.request("PATCH " + endpoint, { body: updatedBody, headers: { - 'Accept': 'application/vnd.github+json' - } + Accept: "application/vnd.github+json", + }, }); console.log(`Successfully updated comment with workflow link`); console.log(`Comment ID: ${updateResponse.data.id}`); } catch (error) { // Don't fail the entire job if comment editing fails - just log it const errorMessage = error instanceof Error ? error.message : String(error); - console.warn('Failed to edit comment with workflow link:', errorMessage); - console.warn('This is not critical - the reaction was still added successfully'); + console.warn("Failed to edit comment with workflow link:", errorMessage); + console.warn( + "This is not critical - the reaction was still added successfully" + ); } } await main(); @@ -207,23 +226,23 @@ jobs: with: script: | function main() { - const fs = require('fs'); - const crypto = require('crypto'); + const fs = require("fs"); + const crypto = require("crypto"); // Generate a random filename for the output file - const randomId = crypto.randomBytes(8).toString('hex'); + const randomId = crypto.randomBytes(8).toString("hex"); const outputFile = `/tmp/aw_output_${randomId}.txt`; // Ensure the /tmp directory exists and create empty output file - fs.mkdirSync('/tmp', { recursive: true }); - fs.writeFileSync(outputFile, '', { mode: 0o644 }); + fs.mkdirSync("/tmp", { recursive: true }); + fs.writeFileSync(outputFile, "", { mode: 0o644 }); // Verify the file was created and is writable if (!fs.existsSync(outputFile)) { throw new Error(`Failed to create output file: ${outputFile}`); } // Set the environment variable for subsequent steps - core.exportVariable('GITHUB_AW_SAFE_OUTPUTS', outputFile); - console.log('Created agentic output file:', outputFile); + core.exportVariable("GITHUB_AW_SAFE_OUTPUTS", outputFile); + console.log("Created agentic output file:", outputFile); // Also set as step output for reference - core.setOutput('output_file', outputFile); + core.setOutput("output_file", outputFile); } main(); - name: Setup MCPs @@ -332,13 +351,14 @@ jobs: if-no-files-found: warn - name: Run Codex run: | + set -o pipefail INSTRUCTION=$(cat /tmp/aw-prompts/prompt.txt) export CODEX_HOME=/tmp/mcp-config # Create log directory outside git repo mkdir -p /tmp/aw-logs - # Run codex with log capture + # Run codex with log capture - pipefail ensures codex exit code is preserved codex exec \ -c model=o4-mini \ --full-auto "$INSTRUCTION" 2>&1 | tee /tmp/test-codex-add-issue-comment.log @@ -378,34 +398,37 @@ jobs: * @returns {string} The sanitized content */ function sanitizeContent(content) { - if (!content || typeof content !== 'string') { - return ''; + if (!content || typeof content !== "string") { + return ""; } // Read allowed domains from environment variable const allowedDomainsEnv = process.env.GITHUB_AW_ALLOWED_DOMAINS; const defaultAllowedDomains = [ - 'github.com', - 'github.io', - 'githubusercontent.com', - 'githubassets.com', - 'github.dev', - 'codespaces.new' + "github.com", + "github.io", + "githubusercontent.com", + "githubassets.com", + "github.dev", + "codespaces.new", ]; const allowedDomains = allowedDomainsEnv - ? allowedDomainsEnv.split(',').map(d => d.trim()).filter(d => d) + ? allowedDomainsEnv + .split(",") + .map(d => d.trim()) + .filter(d => d) : defaultAllowedDomains; let sanitized = content; // Neutralize @mentions to prevent unintended notifications sanitized = neutralizeMentions(sanitized); // Remove control characters (except newlines and tabs) - sanitized = sanitized.replace(/[\x00-\x08\x0B\x0C\x0E-\x1F\x7F]/g, ''); + sanitized = sanitized.replace(/[\x00-\x08\x0B\x0C\x0E-\x1F\x7F]/g, ""); // XML character escaping sanitized = sanitized - .replace(/&/g, '&') // Must be first to avoid double-escaping - .replace(//g, '>') - .replace(/"/g, '"') - .replace(/'/g, '''); + .replace(/&/g, "&") // Must be first to avoid double-escaping + .replace(//g, ">") + .replace(/"/g, """) + .replace(/'/g, "'"); // URI filtering - replace non-https protocols with "(redacted)" sanitized = sanitizeUrlProtocols(sanitized); // Domain filtering for HTTPS URIs @@ -413,16 +436,20 @@ jobs: // Limit total length to prevent DoS (0.5MB max) const maxLength = 524288; if (sanitized.length > maxLength) { - sanitized = sanitized.substring(0, maxLength) + '\n[Content truncated due to length]'; + sanitized = + sanitized.substring(0, maxLength) + + "\n[Content truncated due to length]"; } // Limit number of lines to prevent log flooding (65k max) - const lines = sanitized.split('\n'); + const lines = sanitized.split("\n"); const maxLines = 65000; if (lines.length > maxLines) { - sanitized = lines.slice(0, maxLines).join('\n') + '\n[Content truncated due to line count]'; + sanitized = + lines.slice(0, maxLines).join("\n") + + "\n[Content truncated due to line count]"; } // Remove ANSI escape sequences - sanitized = sanitized.replace(/\x1b\[[0-9;]*[mGKH]/g, ''); + sanitized = sanitized.replace(/\x1b\[[0-9;]*[mGKH]/g, ""); // Neutralize common bot trigger phrases sanitized = neutralizeBotTriggers(sanitized); // Trim excessive whitespace @@ -433,16 +460,22 @@ jobs: * @returns {string} The string with unknown domains redacted */ function sanitizeUrlDomains(s) { - return s.replace(/\bhttps:\/\/([^\/\s\])}'"<>&\x00-\x1f]+)/gi, (match, domain) => { - // Extract the hostname part (before first slash, colon, or other delimiter) - const hostname = domain.split(/[\/:\?#]/)[0].toLowerCase(); - // Check if this domain or any parent domain is in the allowlist - const isAllowed = allowedDomains.some(allowedDomain => { - const normalizedAllowed = allowedDomain.toLowerCase(); - return hostname === normalizedAllowed || hostname.endsWith('.' + normalizedAllowed); - }); - return isAllowed ? match : '(redacted)'; - }); + return s.replace( + /\bhttps:\/\/([^\/\s\])}'"<>&\x00-\x1f]+)/gi, + (match, domain) => { + // Extract the hostname part (before first slash, colon, or other delimiter) + const hostname = domain.split(/[\/:\?#]/)[0].toLowerCase(); + // Check if this domain or any parent domain is in the allowlist + const isAllowed = allowedDomains.some(allowedDomain => { + const normalizedAllowed = allowedDomain.toLowerCase(); + return ( + hostname === normalizedAllowed || + hostname.endsWith("." + normalizedAllowed) + ); + }); + return isAllowed ? match : "(redacted)"; + } + ); } /** * Remove unknown protocols except https @@ -451,10 +484,13 @@ jobs: */ function sanitizeUrlProtocols(s) { // Match both protocol:// and protocol: patterns - return s.replace(/\b(\w+):(?:\/\/)?[^\s\])}'"<>&\x00-\x1f]+/gi, (match, protocol) => { - // Allow https (case insensitive), redact everything else - return protocol.toLowerCase() === 'https' ? match : '(redacted)'; - }); + return s.replace( + /\b(\w+):(?:\/\/)?[^\s\])}'"<>&\x00-\x1f]+/gi, + (match, protocol) => { + // Allow https (case insensitive), redact everything else + return protocol.toLowerCase() === "https" ? match : "(redacted)"; + } + ); } /** * Neutralizes @mentions by wrapping them in backticks @@ -463,8 +499,10 @@ jobs: */ function neutralizeMentions(s) { // Replace @name or @org/team outside code with `@name` - return s.replace(/(^|[^\w`])@([A-Za-z0-9](?:[A-Za-z0-9-]{0,37}[A-Za-z0-9])?(?:\/[A-Za-z0-9._-]+)?)/g, - (_m, p1, p2) => `${p1}\`@${p2}\``); + return s.replace( + /(^|[^\w`])@([A-Za-z0-9](?:[A-Za-z0-9-]{0,37}[A-Za-z0-9])?(?:\/[A-Za-z0-9._-]+)?)/g, + (_m, p1, p2) => `${p1}\`@${p2}\`` + ); } /** * Neutralizes bot trigger phrases by wrapping them in backticks @@ -473,8 +511,10 @@ jobs: */ function neutralizeBotTriggers(s) { // Neutralize common bot trigger phrases like "fixes #123", "closes #asdfs", etc. - return s.replace(/\b(fixes?|closes?|resolves?|fix|close|resolve)\s+#(\w+)/gi, - (match, action, ref) => `\`${action} #${ref}\``); + return s.replace( + /\b(fixes?|closes?|resolves?|fix|close|resolve)\s+#(\w+)/gi, + (match, action, ref) => `\`${action} #${ref}\`` + ); } } /** @@ -485,65 +525,165 @@ jobs: */ function getMaxAllowedForType(itemType, config) { // Check if max is explicitly specified in config - if (config && config[itemType] && typeof config[itemType] === 'object' && config[itemType].max) { + if ( + config && + config[itemType] && + typeof config[itemType] === "object" && + config[itemType].max + ) { return config[itemType].max; } // Use default limits for plural-supported types switch (itemType) { - case 'create-issue': + case "create-issue": return 1; // Only one issue allowed - case 'add-issue-comment': + case "add-issue-comment": return 1; // Only one comment allowed - case 'create-pull-request': - return 1; // Only one pull request allowed - case 'add-issue-label': - return 5; // Only one labels operation allowed - case 'update-issue': - return 1; // Only one issue update allowed - case 'push-to-branch': - return 1; // Only one push to branch allowed + case "create-pull-request": + return 1; // Only one pull request allowed + case "create-pull-request-review-comment": + return 10; // Default to 10 review comments allowed + case "add-issue-label": + return 5; // Only one labels operation allowed + case "update-issue": + return 1; // Only one issue update allowed + case "push-to-branch": + return 1; // Only one push to branch allowed + case "create-discussion": + return 1; // Only one discussion allowed default: - return 1; // Default to single item for unknown types + return 1; // Default to single item for unknown types + } + } + /** + * Attempts to repair common JSON syntax issues in LLM-generated content + * @param {string} jsonStr - The potentially malformed JSON string + * @returns {string} The repaired JSON string + */ + function repairJson(jsonStr) { + let repaired = jsonStr.trim(); + // Fix single quotes to double quotes (must be done first) + repaired = repaired.replace(/'/g, '"'); + // Fix missing quotes around object keys + repaired = repaired.replace( + /([{,]\s*)([a-zA-Z_$][a-zA-Z0-9_$]*)\s*:/g, + '$1"$2":' + ); + // Fix newlines and tabs inside strings by escaping them + repaired = repaired.replace(/"([^"\\]*)"/g, (match, content) => { + if ( + content.includes("\n") || + content.includes("\r") || + content.includes("\t") + ) { + const escaped = content + .replace(/\\/g, "\\\\") + .replace(/\n/g, "\\n") + .replace(/\r/g, "\\r") + .replace(/\t/g, "\\t"); + return `"${escaped}"`; + } + return match; + }); + // Fix unescaped quotes inside string values + repaired = repaired.replace( + /"([^"]*)"([^":,}\]]*)"([^"]*)"(\s*[,:}\]])/g, + (match, p1, p2, p3, p4) => `"${p1}\\"${p2}\\"${p3}"${p4}` + ); + // Fix wrong bracket/brace types - arrays should end with ] not } + repaired = repaired.replace( + /(\[\s*(?:"[^"]*"(?:\s*,\s*"[^"]*")*\s*),?)\s*}/g, + "$1]" + ); + // Fix missing closing braces/brackets + const openBraces = (repaired.match(/\{/g) || []).length; + const closeBraces = (repaired.match(/\}/g) || []).length; + if (openBraces > closeBraces) { + repaired += "}".repeat(openBraces - closeBraces); + } else if (closeBraces > openBraces) { + repaired = "{".repeat(closeBraces - openBraces) + repaired; + } + // Fix missing closing brackets for arrays + const openBrackets = (repaired.match(/\[/g) || []).length; + const closeBrackets = (repaired.match(/\]/g) || []).length; + if (openBrackets > closeBrackets) { + repaired += "]".repeat(openBrackets - closeBrackets); + } else if (closeBrackets > openBrackets) { + repaired = "[".repeat(closeBrackets - openBrackets) + repaired; + } + // Fix trailing commas in objects and arrays (AFTER fixing brackets/braces) + repaired = repaired.replace(/,(\s*[}\]])/g, "$1"); + return repaired; + } + /** + * Attempts to parse JSON with repair fallback + * @param {string} jsonStr - The JSON string to parse + * @returns {Object|undefined} The parsed JSON object, or undefined if parsing fails + */ + function parseJsonWithRepair(jsonStr) { + try { + // First, try normal JSON.parse + return JSON.parse(jsonStr); + } catch (originalError) { + try { + // If that fails, try repairing and parsing again + const repairedJson = repairJson(jsonStr); + return JSON.parse(repairedJson); + } catch (repairError) { + // If repair also fails, print error to console and return undefined + console.log( + `JSON parsing failed. Original: ${originalError.message}. After repair: ${repairError.message}` + ); + return undefined; + } } } const outputFile = process.env.GITHUB_AW_SAFE_OUTPUTS; const safeOutputsConfig = process.env.GITHUB_AW_SAFE_OUTPUTS_CONFIG; if (!outputFile) { - console.log('GITHUB_AW_SAFE_OUTPUTS not set, no output to collect'); - core.setOutput('output', ''); + console.log("GITHUB_AW_SAFE_OUTPUTS not set, no output to collect"); + core.setOutput("output", ""); return; } if (!fs.existsSync(outputFile)) { - console.log('Output file does not exist:', outputFile); - core.setOutput('output', ''); + console.log("Output file does not exist:", outputFile); + core.setOutput("output", ""); return; } - const outputContent = fs.readFileSync(outputFile, 'utf8'); - if (outputContent.trim() === '') { - console.log('Output file is empty'); - core.setOutput('output', ''); + const outputContent = fs.readFileSync(outputFile, "utf8"); + if (outputContent.trim() === "") { + console.log("Output file is empty"); + core.setOutput("output", ""); return; } - console.log('Raw output content length:', outputContent.length); + console.log("Raw output content length:", outputContent.length); // Parse the safe-outputs configuration let expectedOutputTypes = {}; if (safeOutputsConfig) { try { expectedOutputTypes = JSON.parse(safeOutputsConfig); - console.log('Expected output types:', Object.keys(expectedOutputTypes)); + console.log("Expected output types:", Object.keys(expectedOutputTypes)); } catch (error) { - console.log('Warning: Could not parse safe-outputs config:', error.message); + console.log( + "Warning: Could not parse safe-outputs config:", + error.message + ); } } // Parse JSONL content - const lines = outputContent.trim().split('\n'); + const lines = outputContent.trim().split("\n"); const parsedItems = []; const errors = []; for (let i = 0; i < lines.length; i++) { const line = lines[i].trim(); - if (line === '') continue; // Skip empty lines + if (line === "") continue; // Skip empty lines try { - const item = JSON.parse(line); + const item = parseJsonWithRepair(line); + // If item is undefined (failed to parse), add error and process next line + if (item === undefined) { + errors.push(`Line ${i + 1}: Invalid JSON - JSON parsing failed`); + continue; + } // Validate that the item has a 'type' field if (!item.type) { errors.push(`Line ${i + 1}: Missing required 'type' field`); @@ -552,25 +692,35 @@ jobs: // Validate against expected output types const itemType = item.type; if (!expectedOutputTypes[itemType]) { - errors.push(`Line ${i + 1}: Unexpected output type '${itemType}'. Expected one of: ${Object.keys(expectedOutputTypes).join(', ')}`); + errors.push( + `Line ${i + 1}: Unexpected output type '${itemType}'. Expected one of: ${Object.keys(expectedOutputTypes).join(", ")}` + ); continue; } // Check for too many items of the same type - const typeCount = parsedItems.filter(existing => existing.type === itemType).length; + const typeCount = parsedItems.filter( + existing => existing.type === itemType + ).length; const maxAllowed = getMaxAllowedForType(itemType, expectedOutputTypes); if (typeCount >= maxAllowed) { - errors.push(`Line ${i + 1}: Too many items of type '${itemType}'. Maximum allowed: ${maxAllowed}.`); + errors.push( + `Line ${i + 1}: Too many items of type '${itemType}'. Maximum allowed: ${maxAllowed}.` + ); continue; } // Basic validation based on type switch (itemType) { - case 'create-issue': - if (!item.title || typeof item.title !== 'string') { - errors.push(`Line ${i + 1}: create-issue requires a 'title' string field`); + case "create-issue": + if (!item.title || typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: create-issue requires a 'title' string field` + ); continue; } - if (!item.body || typeof item.body !== 'string') { - errors.push(`Line ${i + 1}: create-issue requires a 'body' string field`); + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-issue requires a 'body' string field` + ); continue; } // Sanitize text content @@ -578,107 +728,247 @@ jobs: item.body = sanitizeContent(item.body); // Sanitize labels if present if (item.labels && Array.isArray(item.labels)) { - item.labels = item.labels.map(label => typeof label === 'string' ? sanitizeContent(label) : label); + item.labels = item.labels.map(label => + typeof label === "string" ? sanitizeContent(label) : label + ); } break; - case 'add-issue-comment': - if (!item.body || typeof item.body !== 'string') { - errors.push(`Line ${i + 1}: add-issue-comment requires a 'body' string field`); + case "add-issue-comment": + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: add-issue-comment requires a 'body' string field` + ); continue; } // Sanitize text content item.body = sanitizeContent(item.body); break; - case 'create-pull-request': - if (!item.title || typeof item.title !== 'string') { - errors.push(`Line ${i + 1}: create-pull-request requires a 'title' string field`); + case "create-pull-request": + if (!item.title || typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request requires a 'title' string field` + ); continue; } - if (!item.body || typeof item.body !== 'string') { - errors.push(`Line ${i + 1}: create-pull-request requires a 'body' string field`); + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request requires a 'body' string field` + ); continue; } // Sanitize text content item.title = sanitizeContent(item.title); item.body = sanitizeContent(item.body); // Sanitize branch name if present - if (item.branch && typeof item.branch === 'string') { + if (item.branch && typeof item.branch === "string") { item.branch = sanitizeContent(item.branch); } // Sanitize labels if present if (item.labels && Array.isArray(item.labels)) { - item.labels = item.labels.map(label => typeof label === 'string' ? sanitizeContent(label) : label); + item.labels = item.labels.map(label => + typeof label === "string" ? sanitizeContent(label) : label + ); } break; - case 'add-issue-label': + case "add-issue-label": if (!item.labels || !Array.isArray(item.labels)) { - errors.push(`Line ${i + 1}: add-issue-label requires a 'labels' array field`); + errors.push( + `Line ${i + 1}: add-issue-label requires a 'labels' array field` + ); continue; } - if (item.labels.some(label => typeof label !== 'string')) { - errors.push(`Line ${i + 1}: add-issue-label labels array must contain only strings`); + if (item.labels.some(label => typeof label !== "string")) { + errors.push( + `Line ${i + 1}: add-issue-label labels array must contain only strings` + ); continue; } // Sanitize label strings item.labels = item.labels.map(label => sanitizeContent(label)); break; - case 'update-issue': + case "update-issue": // Check that at least one updateable field is provided - const hasValidField = (item.status !== undefined) || - (item.title !== undefined) || - (item.body !== undefined); + const hasValidField = + item.status !== undefined || + item.title !== undefined || + item.body !== undefined; if (!hasValidField) { - errors.push(`Line ${i + 1}: update-issue requires at least one of: 'status', 'title', or 'body' fields`); + errors.push( + `Line ${i + 1}: update-issue requires at least one of: 'status', 'title', or 'body' fields` + ); continue; } // Validate status if provided if (item.status !== undefined) { - if (typeof item.status !== 'string' || (item.status !== 'open' && item.status !== 'closed')) { - errors.push(`Line ${i + 1}: update-issue 'status' must be 'open' or 'closed'`); + if ( + typeof item.status !== "string" || + (item.status !== "open" && item.status !== "closed") + ) { + errors.push( + `Line ${i + 1}: update-issue 'status' must be 'open' or 'closed'` + ); continue; } } // Validate title if provided if (item.title !== undefined) { - if (typeof item.title !== 'string') { - errors.push(`Line ${i + 1}: update-issue 'title' must be a string`); + if (typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: update-issue 'title' must be a string` + ); continue; } item.title = sanitizeContent(item.title); } // Validate body if provided if (item.body !== undefined) { - if (typeof item.body !== 'string') { - errors.push(`Line ${i + 1}: update-issue 'body' must be a string`); + if (typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: update-issue 'body' must be a string` + ); continue; } item.body = sanitizeContent(item.body); } // Validate issue_number if provided (for target "*") if (item.issue_number !== undefined) { - if (typeof item.issue_number !== 'number' && typeof item.issue_number !== 'string') { - errors.push(`Line ${i + 1}: update-issue 'issue_number' must be a number or string`); + if ( + typeof item.issue_number !== "number" && + typeof item.issue_number !== "string" + ) { + errors.push( + `Line ${i + 1}: update-issue 'issue_number' must be a number or string` + ); continue; } } break; - case 'push-to-branch': + case "push-to-branch": // Validate message if provided (optional) if (item.message !== undefined) { - if (typeof item.message !== 'string') { - errors.push(`Line ${i + 1}: push-to-branch 'message' must be a string`); + if (typeof item.message !== "string") { + errors.push( + `Line ${i + 1}: push-to-branch 'message' must be a string` + ); continue; } item.message = sanitizeContent(item.message); } // Validate pull_request_number if provided (for target "*") if (item.pull_request_number !== undefined) { - if (typeof item.pull_request_number !== 'number' && typeof item.pull_request_number !== 'string') { - errors.push(`Line ${i + 1}: push-to-branch 'pull_request_number' must be a number or string`); + if ( + typeof item.pull_request_number !== "number" && + typeof item.pull_request_number !== "string" + ) { + errors.push( + `Line ${i + 1}: push-to-branch 'pull_request_number' must be a number or string` + ); + continue; + } + } + break; + case "create-pull-request-review-comment": + // Validate required path field + if (!item.path || typeof item.path !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment requires a 'path' string field` + ); + continue; + } + // Validate required line field + if ( + item.line === undefined || + (typeof item.line !== "number" && typeof item.line !== "string") + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment requires a 'line' number or string field` + ); + continue; + } + // Validate line is a positive integer + const lineNumber = + typeof item.line === "string" ? parseInt(item.line, 10) : item.line; + if ( + isNaN(lineNumber) || + lineNumber <= 0 || + !Number.isInteger(lineNumber) + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'line' must be a positive integer` + ); + continue; + } + // Validate required body field + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment requires a 'body' string field` + ); + continue; + } + // Sanitize required text content + item.body = sanitizeContent(item.body); + // Validate optional start_line field + if (item.start_line !== undefined) { + if ( + typeof item.start_line !== "number" && + typeof item.start_line !== "string" + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'start_line' must be a number or string` + ); + continue; + } + const startLineNumber = + typeof item.start_line === "string" + ? parseInt(item.start_line, 10) + : item.start_line; + if ( + isNaN(startLineNumber) || + startLineNumber <= 0 || + !Number.isInteger(startLineNumber) + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'start_line' must be a positive integer` + ); + continue; + } + if (startLineNumber > lineNumber) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'start_line' must be less than or equal to 'line'` + ); + continue; + } + } + // Validate optional side field + if (item.side !== undefined) { + if ( + typeof item.side !== "string" || + (item.side !== "LEFT" && item.side !== "RIGHT") + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'side' must be 'LEFT' or 'RIGHT'` + ); continue; } } break; + case "create-discussion": + if (!item.title || typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: create-discussion requires a 'title' string field` + ); + continue; + } + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-discussion requires a 'body' string field` + ); + continue; + } + // Sanitize text content + item.title = sanitizeContent(item.title); + item.body = sanitizeContent(item.body); + break; default: errors.push(`Line ${i + 1}: Unknown output type '${itemType}'`); continue; @@ -691,7 +981,7 @@ jobs: } // Report validation results if (errors.length > 0) { - console.log('Validation errors found:'); + console.log("Validation errors found:"); errors.forEach(error => console.log(` - ${error}`)); // For now, we'll continue with valid items but log the errors // In the future, we might want to fail the workflow for invalid items @@ -700,10 +990,10 @@ jobs: // Set the parsed and validated items as output const validatedOutput = { items: parsedItems, - errors: errors + errors: errors, }; - core.setOutput('output', JSON.stringify(validatedOutput)); - core.setOutput('raw_output', outputContent); + core.setOutput("output", JSON.stringify(validatedOutput)); + core.setOutput("raw_output", outputContent); } // Call the main function await main(); @@ -735,24 +1025,24 @@ jobs: with: script: | function main() { - const fs = require('fs'); + const fs = require("fs"); try { const logFile = process.env.AGENT_LOG_FILE; if (!logFile) { - console.log('No agent log file specified'); + console.log("No agent log file specified"); return; } if (!fs.existsSync(logFile)) { console.log(`Log file not found: ${logFile}`); return; } - const content = fs.readFileSync(logFile, 'utf8'); + const content = fs.readFileSync(logFile, "utf8"); const parsedLog = parseCodexLog(content); if (parsedLog) { core.summary.addRaw(parsedLog).write(); - console.log('Codex log parsed successfully'); + console.log("Codex log parsed successfully"); } else { - console.log('Failed to parse Codex log'); + console.log("Failed to parse Codex log"); } } catch (error) { core.setFailed(error.message); @@ -760,54 +1050,63 @@ jobs: } function parseCodexLog(logContent) { try { - const lines = logContent.split('\n'); - let markdown = '## 🤖 Commands and Tools\n\n'; + const lines = logContent.split("\n"); + let markdown = "## 🤖 Commands and Tools\n\n"; const commandSummary = []; // First pass: collect commands for summary for (let i = 0; i < lines.length; i++) { const line = lines[i]; // Detect tool usage and exec commands - if (line.includes('] tool ') && line.includes('(')) { + if (line.includes("] tool ") && line.includes("(")) { // Extract tool name const toolMatch = line.match(/\] tool ([^(]+)\(/); if (toolMatch) { const toolName = toolMatch[1]; // Look ahead to find the result status - let statusIcon = '❓'; // Unknown by default + let statusIcon = "❓"; // Unknown by default for (let j = i + 1; j < Math.min(i + 5, lines.length); j++) { const nextLine = lines[j]; - if (nextLine.includes('success in')) { - statusIcon = '✅'; + if (nextLine.includes("success in")) { + statusIcon = "✅"; break; - } else if (nextLine.includes('failure in') || nextLine.includes('error in') || nextLine.includes('failed in')) { - statusIcon = '❌'; + } else if ( + nextLine.includes("failure in") || + nextLine.includes("error in") || + nextLine.includes("failed in") + ) { + statusIcon = "❌"; break; } } - if (toolName.includes('.')) { + if (toolName.includes(".")) { // Format as provider::method - const parts = toolName.split('.'); + const parts = toolName.split("."); const provider = parts[0]; - const method = parts.slice(1).join('_'); - commandSummary.push(`* ${statusIcon} \`${provider}::${method}(...)\``); + const method = parts.slice(1).join("_"); + commandSummary.push( + `* ${statusIcon} \`${provider}::${method}(...)\`` + ); } else { commandSummary.push(`* ${statusIcon} \`${toolName}(...)\``); } } - } else if (line.includes('] exec ')) { + } else if (line.includes("] exec ")) { // Extract exec command const execMatch = line.match(/exec (.+?) in/); if (execMatch) { const formattedCommand = formatBashCommand(execMatch[1]); // Look ahead to find the result status - let statusIcon = '❓'; // Unknown by default + let statusIcon = "❓"; // Unknown by default for (let j = i + 1; j < Math.min(i + 5, lines.length); j++) { const nextLine = lines[j]; - if (nextLine.includes('succeeded in')) { - statusIcon = '✅'; + if (nextLine.includes("succeeded in")) { + statusIcon = "✅"; break; - } else if (nextLine.includes('failed in') || nextLine.includes('error')) { - statusIcon = '❌'; + } else if ( + nextLine.includes("failed in") || + nextLine.includes("error") + ) { + statusIcon = "❌"; break; } } @@ -821,10 +1120,10 @@ jobs: markdown += `${cmd}\n`; } } else { - markdown += 'No commands or tools used.\n'; + markdown += "No commands or tools used.\n"; } // Add Information section - markdown += '\n## 📊 Information\n\n'; + markdown += "\n## 📊 Information\n\n"; // Extract metadata from Codex logs let totalTokens = 0; const tokenMatches = logContent.match(/tokens used: (\d+)/g); @@ -846,46 +1145,57 @@ jobs: if (execCommands > 0) { markdown += `**Commands Executed:** ${execCommands}\n\n`; } - markdown += '\n## 🤖 Reasoning\n\n'; + markdown += "\n## 🤖 Reasoning\n\n"; // Second pass: process full conversation flow with interleaved reasoning, tools, and commands let inThinkingSection = false; for (let i = 0; i < lines.length; i++) { const line = lines[i]; // Skip metadata lines - if (line.includes('OpenAI Codex') || line.startsWith('--------') || - line.includes('workdir:') || line.includes('model:') || - line.includes('provider:') || line.includes('approval:') || - line.includes('sandbox:') || line.includes('reasoning effort:') || - line.includes('reasoning summaries:') || line.includes('tokens used:')) { + if ( + line.includes("OpenAI Codex") || + line.startsWith("--------") || + line.includes("workdir:") || + line.includes("model:") || + line.includes("provider:") || + line.includes("approval:") || + line.includes("sandbox:") || + line.includes("reasoning effort:") || + line.includes("reasoning summaries:") || + line.includes("tokens used:") + ) { continue; } // Process thinking sections - if (line.includes('] thinking')) { + if (line.includes("] thinking")) { inThinkingSection = true; continue; } // Process tool calls - if (line.includes('] tool ') && line.includes('(')) { + if (line.includes("] tool ") && line.includes("(")) { inThinkingSection = false; const toolMatch = line.match(/\] tool ([^(]+)\(/); if (toolMatch) { const toolName = toolMatch[1]; // Look ahead to find the result status - let statusIcon = '❓'; // Unknown by default + let statusIcon = "❓"; // Unknown by default for (let j = i + 1; j < Math.min(i + 5, lines.length); j++) { const nextLine = lines[j]; - if (nextLine.includes('success in')) { - statusIcon = '✅'; + if (nextLine.includes("success in")) { + statusIcon = "✅"; break; - } else if (nextLine.includes('failure in') || nextLine.includes('error in') || nextLine.includes('failed in')) { - statusIcon = '❌'; + } else if ( + nextLine.includes("failure in") || + nextLine.includes("error in") || + nextLine.includes("failed in") + ) { + statusIcon = "❌"; break; } } - if (toolName.includes('.')) { - const parts = toolName.split('.'); + if (toolName.includes(".")) { + const parts = toolName.split("."); const provider = parts[0]; - const method = parts.slice(1).join('_'); + const method = parts.slice(1).join("_"); markdown += `${statusIcon} ${provider}::${method}(...)\n\n`; } else { markdown += `${statusIcon} ${toolName}(...)\n\n`; @@ -894,20 +1204,23 @@ jobs: continue; } // Process exec commands - if (line.includes('] exec ')) { + if (line.includes("] exec ")) { inThinkingSection = false; const execMatch = line.match(/exec (.+?) in/); if (execMatch) { const formattedCommand = formatBashCommand(execMatch[1]); // Look ahead to find the result status - let statusIcon = '❓'; // Unknown by default + let statusIcon = "❓"; // Unknown by default for (let j = i + 1; j < Math.min(i + 5, lines.length); j++) { const nextLine = lines[j]; - if (nextLine.includes('succeeded in')) { - statusIcon = '✅'; + if (nextLine.includes("succeeded in")) { + statusIcon = "✅"; break; - } else if (nextLine.includes('failed in') || nextLine.includes('error')) { - statusIcon = '❌'; + } else if ( + nextLine.includes("failed in") || + nextLine.includes("error") + ) { + statusIcon = "❌"; break; } } @@ -916,7 +1229,11 @@ jobs: continue; } // Process thinking content - if (inThinkingSection && line.trim().length > 20 && !line.startsWith('[2025-')) { + if ( + inThinkingSection && + line.trim().length > 20 && + !line.startsWith("[2025-") + ) { const trimmed = line.trim(); // Add thinking content directly markdown += `${trimmed}\n\n`; @@ -924,36 +1241,36 @@ jobs: } return markdown; } catch (error) { - console.error('Error parsing Codex log:', error); - return '## 🤖 Commands and Tools\n\nError parsing log content.\n\n## 🤖 Reasoning\n\nUnable to parse reasoning from log.\n\n'; + console.error("Error parsing Codex log:", error); + return "## 🤖 Commands and Tools\n\nError parsing log content.\n\n## 🤖 Reasoning\n\nUnable to parse reasoning from log.\n\n"; } } function formatBashCommand(command) { - if (!command) return ''; + if (!command) return ""; // Convert multi-line commands to single line by replacing newlines with spaces // and collapsing multiple spaces let formatted = command - .replace(/\n/g, ' ') // Replace newlines with spaces - .replace(/\r/g, ' ') // Replace carriage returns with spaces - .replace(/\t/g, ' ') // Replace tabs with spaces - .replace(/\s+/g, ' ') // Collapse multiple spaces into one - .trim(); // Remove leading/trailing whitespace + .replace(/\n/g, " ") // Replace newlines with spaces + .replace(/\r/g, " ") // Replace carriage returns with spaces + .replace(/\t/g, " ") // Replace tabs with spaces + .replace(/\s+/g, " ") // Collapse multiple spaces into one + .trim(); // Remove leading/trailing whitespace // Escape backticks to prevent markdown issues - formatted = formatted.replace(/`/g, '\\`'); + formatted = formatted.replace(/`/g, "\\`"); // Truncate if too long (keep reasonable length for summary) const maxLength = 80; if (formatted.length > maxLength) { - formatted = formatted.substring(0, maxLength) + '...'; + formatted = formatted.substring(0, maxLength) + "..."; } return formatted; } function truncateString(str, maxLength) { - if (!str) return ''; + if (!str) return ""; if (str.length <= maxLength) return str; - return str.substring(0, maxLength) + '...'; + return str.substring(0, maxLength) + "..."; } // Export for testing - if (typeof module !== 'undefined' && module.exports) { + if (typeof module !== "undefined" && module.exports) { module.exports = { parseCodexLog, formatBashCommand, truncateString }; } main(); @@ -989,30 +1306,35 @@ jobs: // Read the validated output content from environment variable const outputContent = process.env.GITHUB_AW_AGENT_OUTPUT; if (!outputContent) { - console.log('No GITHUB_AW_AGENT_OUTPUT environment variable found'); + console.log("No GITHUB_AW_AGENT_OUTPUT environment variable found"); return; } - if (outputContent.trim() === '') { - console.log('Agent output content is empty'); + if (outputContent.trim() === "") { + console.log("Agent output content is empty"); return; } - console.log('Agent output content length:', outputContent.length); + console.log("Agent output content length:", outputContent.length); // Parse the validated output JSON let validatedOutput; try { validatedOutput = JSON.parse(outputContent); } catch (error) { - console.log('Error parsing agent output JSON:', error instanceof Error ? error.message : String(error)); + console.log( + "Error parsing agent output JSON:", + error instanceof Error ? error.message : String(error) + ); return; } if (!validatedOutput.items || !Array.isArray(validatedOutput.items)) { - console.log('No valid items found in agent output'); + console.log("No valid items found in agent output"); return; } // Find all add-issue-comment items - const commentItems = validatedOutput.items.filter(/** @param {any} item */ item => item.type === 'add-issue-comment'); + const commentItems = validatedOutput.items.filter( + /** @param {any} item */ item => item.type === "add-issue-comment" + ); if (commentItems.length === 0) { - console.log('No add-issue-comment items found in agent output'); + console.log("No add-issue-comment items found in agent output"); return; } console.log(`Found ${commentItems.length} add-issue-comment item(s)`); @@ -1020,18 +1342,27 @@ jobs: const commentTarget = process.env.GITHUB_AW_COMMENT_TARGET || "triggering"; console.log(`Comment target configuration: ${commentTarget}`); // Check if we're in an issue or pull request context - const isIssueContext = context.eventName === 'issues' || context.eventName === 'issue_comment'; - const isPRContext = context.eventName === 'pull_request' || context.eventName === 'pull_request_review' || context.eventName === 'pull_request_review_comment'; + const isIssueContext = + context.eventName === "issues" || context.eventName === "issue_comment"; + const isPRContext = + context.eventName === "pull_request" || + context.eventName === "pull_request_review" || + context.eventName === "pull_request_review_comment"; // Validate context based on target configuration if (commentTarget === "triggering" && !isIssueContext && !isPRContext) { - console.log('Target is "triggering" but not running in issue or pull request context, skipping comment creation'); + console.log( + 'Target is "triggering" but not running in issue or pull request context, skipping comment creation' + ); return; } const createdComments = []; // Process each comment item for (let i = 0; i < commentItems.length; i++) { const commentItem = commentItems[i]; - console.log(`Processing add-issue-comment item ${i + 1}/${commentItems.length}:`, { bodyLength: commentItem.body.length }); + console.log( + `Processing add-issue-comment item ${i + 1}/${commentItems.length}:`, + { bodyLength: commentItem.body.length } + ); // Determine the issue/PR number and comment endpoint for this comment let issueNumber; let commentEndpoint; @@ -1040,79 +1371,90 @@ jobs: if (commentItem.issue_number) { issueNumber = parseInt(commentItem.issue_number, 10); if (isNaN(issueNumber) || issueNumber <= 0) { - console.log(`Invalid issue number specified: ${commentItem.issue_number}`); + console.log( + `Invalid issue number specified: ${commentItem.issue_number}` + ); continue; } - commentEndpoint = 'issues'; + commentEndpoint = "issues"; } else { - console.log('Target is "*" but no issue_number specified in comment item'); + console.log( + 'Target is "*" but no issue_number specified in comment item' + ); continue; } } else if (commentTarget && commentTarget !== "triggering") { // Explicit issue number specified in target issueNumber = parseInt(commentTarget, 10); if (isNaN(issueNumber) || issueNumber <= 0) { - console.log(`Invalid issue number in target configuration: ${commentTarget}`); + console.log( + `Invalid issue number in target configuration: ${commentTarget}` + ); continue; } - commentEndpoint = 'issues'; + commentEndpoint = "issues"; } else { // Default behavior: use triggering issue/PR if (isIssueContext) { if (context.payload.issue) { issueNumber = context.payload.issue.number; - commentEndpoint = 'issues'; + commentEndpoint = "issues"; } else { - console.log('Issue context detected but no issue found in payload'); + console.log("Issue context detected but no issue found in payload"); continue; } } else if (isPRContext) { if (context.payload.pull_request) { issueNumber = context.payload.pull_request.number; - commentEndpoint = 'issues'; // PR comments use the issues API endpoint + commentEndpoint = "issues"; // PR comments use the issues API endpoint } else { - console.log('Pull request context detected but no pull request found in payload'); + console.log( + "Pull request context detected but no pull request found in payload" + ); continue; } } } if (!issueNumber) { - console.log('Could not determine issue or pull request number'); + console.log("Could not determine issue or pull request number"); continue; } // Extract body from the JSON item let body = commentItem.body.trim(); // Add AI disclaimer with run id, run htmlurl const runId = context.runId; - const runUrl = context.payload.repository + const runUrl = context.payload.repository ? `${context.payload.repository.html_url}/actions/runs/${runId}` - : `https://github.com/actions/runs/${runId}`; + : `https://github.com/actions/runs/${runId}`; body += `\n\n> Generated by Agentic Workflow Run [${runId}](${runUrl})\n`; console.log(`Creating comment on ${commentEndpoint} #${issueNumber}`); - console.log('Comment content length:', body.length); + console.log("Comment content length:", body.length); try { // Create the comment using GitHub API const { data: comment } = await github.rest.issues.createComment({ owner: context.repo.owner, repo: context.repo.repo, issue_number: issueNumber, - body: body + body: body, }); - console.log('Created comment #' + comment.id + ': ' + comment.html_url); + console.log("Created comment #" + comment.id + ": " + comment.html_url); createdComments.push(comment); // Set output for the last created comment (for backward compatibility) if (i === commentItems.length - 1) { - core.setOutput('comment_id', comment.id); - core.setOutput('comment_url', comment.html_url); + core.setOutput("comment_id", comment.id); + core.setOutput("comment_url", comment.html_url); } } catch (error) { - console.error(`✗ Failed to create comment:`, error instanceof Error ? error.message : String(error)); + console.error( + `✗ Failed to create comment:`, + error instanceof Error ? error.message : String(error) + ); throw error; } } // Write summary for all created comments if (createdComments.length > 0) { - let summaryContent = '\n\n## GitHub Comments\n'; + let summaryContent = "\n\n## GitHub Comments\n"; for (const comment of createdComments) { summaryContent += `- Comment #${comment.id}: [View Comment](${comment.html_url})\n`; } diff --git a/.github/workflows/test-codex-add-issue-labels.lock.yml b/.github/workflows/test-codex-add-issue-labels.lock.yml index cbbd91370e..b010691964 100644 --- a/.github/workflows/test-codex-add-issue-labels.lock.yml +++ b/.github/workflows/test-codex-add-issue-labels.lock.yml @@ -34,21 +34,32 @@ jobs: with: script: | async function main() { - // Read inputs from environment variables - const reaction = process.env.GITHUB_AW_REACTION || 'eyes'; + // Read inputs from environment variables + const reaction = process.env.GITHUB_AW_REACTION || "eyes"; const alias = process.env.GITHUB_AW_ALIAS; // Only present for alias workflows const runId = context.runId; - const runUrl = context.payload.repository + const runUrl = context.payload.repository ? `${context.payload.repository.html_url}/actions/runs/${runId}` : `https://github.com/${context.repo.owner}/${context.repo.repo}/actions/runs/${runId}`; - console.log('Reaction type:', reaction); - console.log('Alias name:', alias || 'none'); - console.log('Run ID:', runId); - console.log('Run URL:', runUrl); + console.log("Reaction type:", reaction); + console.log("Alias name:", alias || "none"); + console.log("Run ID:", runId); + console.log("Run URL:", runUrl); // Validate reaction type - const validReactions = ['+1', '-1', 'laugh', 'confused', 'heart', 'hooray', 'rocket', 'eyes']; + const validReactions = [ + "+1", + "-1", + "laugh", + "confused", + "heart", + "hooray", + "rocket", + "eyes", + ]; if (!validReactions.includes(reaction)) { - core.setFailed(`Invalid reaction type: ${reaction}. Valid reactions are: ${validReactions.join(', ')}`); + core.setFailed( + `Invalid reaction type: ${reaction}. Valid reactions are: ${validReactions.join(", ")}` + ); return; } // Determine the API endpoint based on the event type @@ -60,20 +71,20 @@ jobs: const repo = context.repo.repo; try { switch (eventName) { - case 'issues': + case "issues": const issueNumber = context.payload?.issue?.number; if (!issueNumber) { - core.setFailed('Issue number not found in event payload'); + core.setFailed("Issue number not found in event payload"); return; } reactionEndpoint = `/repos/${owner}/${repo}/issues/${issueNumber}/reactions`; // Don't edit issue bodies for now - this might be more complex shouldEditComment = false; break; - case 'issue_comment': + case "issue_comment": const commentId = context.payload?.comment?.id; if (!commentId) { - core.setFailed('Comment ID not found in event payload'); + core.setFailed("Comment ID not found in event payload"); return; } reactionEndpoint = `/repos/${owner}/${repo}/issues/comments/${commentId}/reactions`; @@ -81,10 +92,10 @@ jobs: // Only edit comments for alias workflows shouldEditComment = alias ? true : false; break; - case 'pull_request': + case "pull_request": const prNumber = context.payload?.pull_request?.number; if (!prNumber) { - core.setFailed('Pull request number not found in event payload'); + core.setFailed("Pull request number not found in event payload"); return; } // PRs are "issues" for the reactions endpoint @@ -92,10 +103,10 @@ jobs: // Don't edit PR bodies for now - this might be more complex shouldEditComment = false; break; - case 'pull_request_review_comment': + case "pull_request_review_comment": const reviewCommentId = context.payload?.comment?.id; if (!reviewCommentId) { - core.setFailed('Review comment ID not found in event payload'); + core.setFailed("Review comment ID not found in event payload"); return; } reactionEndpoint = `/repos/${owner}/${repo}/pulls/comments/${reviewCommentId}/reactions`; @@ -107,24 +118,28 @@ jobs: core.setFailed(`Unsupported event type: ${eventName}`); return; } - console.log('Reaction API endpoint:', reactionEndpoint); + console.log("Reaction API endpoint:", reactionEndpoint); // Add reaction first await addReaction(reactionEndpoint, reaction); // Then edit comment if applicable and if it's a comment event if (shouldEditComment && commentUpdateEndpoint) { - console.log('Comment update endpoint:', commentUpdateEndpoint); + console.log("Comment update endpoint:", commentUpdateEndpoint); await editCommentWithWorkflowLink(commentUpdateEndpoint, runUrl); } else { if (!alias && commentUpdateEndpoint) { - console.log('Skipping comment edit - only available for alias workflows'); + console.log( + "Skipping comment edit - only available for alias workflows" + ); } else { - console.log('Skipping comment edit for event type:', eventName); + console.log("Skipping comment edit for event type:", eventName); } } } catch (error) { const errorMessage = error instanceof Error ? error.message : String(error); - console.error('Failed to process reaction and comment edit:', errorMessage); - core.setFailed(`Failed to process reaction and comment edit: ${errorMessage}`); + console.error("Failed to process reaction and comment edit:", errorMessage); + core.setFailed( + `Failed to process reaction and comment edit: ${errorMessage}` + ); } } /** @@ -133,19 +148,19 @@ jobs: * @param {string} reaction - The reaction type to add */ async function addReaction(endpoint, reaction) { - const response = await github.request('POST ' + endpoint, { + const response = await github.request("POST " + endpoint, { content: reaction, headers: { - 'Accept': 'application/vnd.github+json' - } + Accept: "application/vnd.github+json", + }, }); const reactionId = response.data?.id; if (reactionId) { console.log(`Successfully added reaction: ${reaction} (id: ${reactionId})`); - core.setOutput('reaction-id', reactionId.toString()); + core.setOutput("reaction-id", reactionId.toString()); } else { console.log(`Successfully added reaction: ${reaction}`); - core.setOutput('reaction-id', ''); + core.setOutput("reaction-id", ""); } } /** @@ -156,33 +171,37 @@ jobs: async function editCommentWithWorkflowLink(endpoint, runUrl) { try { // First, get the current comment content - const getResponse = await github.request('GET ' + endpoint, { + const getResponse = await github.request("GET " + endpoint, { headers: { - 'Accept': 'application/vnd.github+json' - } + Accept: "application/vnd.github+json", + }, }); - const originalBody = getResponse.data.body || ''; + const originalBody = getResponse.data.body || ""; const workflowLinkText = `\n\n---\n*🤖 [Workflow run](${runUrl}) triggered by this comment*`; // Check if we've already added a workflow link to avoid duplicates - if (originalBody.includes('*🤖 [Workflow run](')) { - console.log('Comment already contains a workflow run link, skipping edit'); + if (originalBody.includes("*🤖 [Workflow run](")) { + console.log( + "Comment already contains a workflow run link, skipping edit" + ); return; } const updatedBody = originalBody + workflowLinkText; // Update the comment - const updateResponse = await github.request('PATCH ' + endpoint, { + const updateResponse = await github.request("PATCH " + endpoint, { body: updatedBody, headers: { - 'Accept': 'application/vnd.github+json' - } + Accept: "application/vnd.github+json", + }, }); console.log(`Successfully updated comment with workflow link`); console.log(`Comment ID: ${updateResponse.data.id}`); } catch (error) { // Don't fail the entire job if comment editing fails - just log it const errorMessage = error instanceof Error ? error.message : String(error); - console.warn('Failed to edit comment with workflow link:', errorMessage); - console.warn('This is not critical - the reaction was still added successfully'); + console.warn("Failed to edit comment with workflow link:", errorMessage); + console.warn( + "This is not critical - the reaction was still added successfully" + ); } } await main(); @@ -207,23 +226,23 @@ jobs: with: script: | function main() { - const fs = require('fs'); - const crypto = require('crypto'); + const fs = require("fs"); + const crypto = require("crypto"); // Generate a random filename for the output file - const randomId = crypto.randomBytes(8).toString('hex'); + const randomId = crypto.randomBytes(8).toString("hex"); const outputFile = `/tmp/aw_output_${randomId}.txt`; // Ensure the /tmp directory exists and create empty output file - fs.mkdirSync('/tmp', { recursive: true }); - fs.writeFileSync(outputFile, '', { mode: 0o644 }); + fs.mkdirSync("/tmp", { recursive: true }); + fs.writeFileSync(outputFile, "", { mode: 0o644 }); // Verify the file was created and is writable if (!fs.existsSync(outputFile)) { throw new Error(`Failed to create output file: ${outputFile}`); } // Set the environment variable for subsequent steps - core.exportVariable('GITHUB_AW_SAFE_OUTPUTS', outputFile); - console.log('Created agentic output file:', outputFile); + core.exportVariable("GITHUB_AW_SAFE_OUTPUTS", outputFile); + console.log("Created agentic output file:", outputFile); // Also set as step output for reference - core.setOutput('output_file', outputFile); + core.setOutput("output_file", outputFile); } main(); - name: Setup MCPs @@ -332,13 +351,14 @@ jobs: if-no-files-found: warn - name: Run Codex run: | + set -o pipefail INSTRUCTION=$(cat /tmp/aw-prompts/prompt.txt) export CODEX_HOME=/tmp/mcp-config # Create log directory outside git repo mkdir -p /tmp/aw-logs - # Run codex with log capture + # Run codex with log capture - pipefail ensures codex exit code is preserved codex exec \ -c model=o4-mini \ --full-auto "$INSTRUCTION" 2>&1 | tee /tmp/test-codex-add-issue-labels.log @@ -378,34 +398,37 @@ jobs: * @returns {string} The sanitized content */ function sanitizeContent(content) { - if (!content || typeof content !== 'string') { - return ''; + if (!content || typeof content !== "string") { + return ""; } // Read allowed domains from environment variable const allowedDomainsEnv = process.env.GITHUB_AW_ALLOWED_DOMAINS; const defaultAllowedDomains = [ - 'github.com', - 'github.io', - 'githubusercontent.com', - 'githubassets.com', - 'github.dev', - 'codespaces.new' + "github.com", + "github.io", + "githubusercontent.com", + "githubassets.com", + "github.dev", + "codespaces.new", ]; const allowedDomains = allowedDomainsEnv - ? allowedDomainsEnv.split(',').map(d => d.trim()).filter(d => d) + ? allowedDomainsEnv + .split(",") + .map(d => d.trim()) + .filter(d => d) : defaultAllowedDomains; let sanitized = content; // Neutralize @mentions to prevent unintended notifications sanitized = neutralizeMentions(sanitized); // Remove control characters (except newlines and tabs) - sanitized = sanitized.replace(/[\x00-\x08\x0B\x0C\x0E-\x1F\x7F]/g, ''); + sanitized = sanitized.replace(/[\x00-\x08\x0B\x0C\x0E-\x1F\x7F]/g, ""); // XML character escaping sanitized = sanitized - .replace(/&/g, '&') // Must be first to avoid double-escaping - .replace(//g, '>') - .replace(/"/g, '"') - .replace(/'/g, '''); + .replace(/&/g, "&") // Must be first to avoid double-escaping + .replace(//g, ">") + .replace(/"/g, """) + .replace(/'/g, "'"); // URI filtering - replace non-https protocols with "(redacted)" sanitized = sanitizeUrlProtocols(sanitized); // Domain filtering for HTTPS URIs @@ -413,16 +436,20 @@ jobs: // Limit total length to prevent DoS (0.5MB max) const maxLength = 524288; if (sanitized.length > maxLength) { - sanitized = sanitized.substring(0, maxLength) + '\n[Content truncated due to length]'; + sanitized = + sanitized.substring(0, maxLength) + + "\n[Content truncated due to length]"; } // Limit number of lines to prevent log flooding (65k max) - const lines = sanitized.split('\n'); + const lines = sanitized.split("\n"); const maxLines = 65000; if (lines.length > maxLines) { - sanitized = lines.slice(0, maxLines).join('\n') + '\n[Content truncated due to line count]'; + sanitized = + lines.slice(0, maxLines).join("\n") + + "\n[Content truncated due to line count]"; } // Remove ANSI escape sequences - sanitized = sanitized.replace(/\x1b\[[0-9;]*[mGKH]/g, ''); + sanitized = sanitized.replace(/\x1b\[[0-9;]*[mGKH]/g, ""); // Neutralize common bot trigger phrases sanitized = neutralizeBotTriggers(sanitized); // Trim excessive whitespace @@ -433,16 +460,22 @@ jobs: * @returns {string} The string with unknown domains redacted */ function sanitizeUrlDomains(s) { - return s.replace(/\bhttps:\/\/([^\/\s\])}'"<>&\x00-\x1f]+)/gi, (match, domain) => { - // Extract the hostname part (before first slash, colon, or other delimiter) - const hostname = domain.split(/[\/:\?#]/)[0].toLowerCase(); - // Check if this domain or any parent domain is in the allowlist - const isAllowed = allowedDomains.some(allowedDomain => { - const normalizedAllowed = allowedDomain.toLowerCase(); - return hostname === normalizedAllowed || hostname.endsWith('.' + normalizedAllowed); - }); - return isAllowed ? match : '(redacted)'; - }); + return s.replace( + /\bhttps:\/\/([^\/\s\])}'"<>&\x00-\x1f]+)/gi, + (match, domain) => { + // Extract the hostname part (before first slash, colon, or other delimiter) + const hostname = domain.split(/[\/:\?#]/)[0].toLowerCase(); + // Check if this domain or any parent domain is in the allowlist + const isAllowed = allowedDomains.some(allowedDomain => { + const normalizedAllowed = allowedDomain.toLowerCase(); + return ( + hostname === normalizedAllowed || + hostname.endsWith("." + normalizedAllowed) + ); + }); + return isAllowed ? match : "(redacted)"; + } + ); } /** * Remove unknown protocols except https @@ -451,10 +484,13 @@ jobs: */ function sanitizeUrlProtocols(s) { // Match both protocol:// and protocol: patterns - return s.replace(/\b(\w+):(?:\/\/)?[^\s\])}'"<>&\x00-\x1f]+/gi, (match, protocol) => { - // Allow https (case insensitive), redact everything else - return protocol.toLowerCase() === 'https' ? match : '(redacted)'; - }); + return s.replace( + /\b(\w+):(?:\/\/)?[^\s\])}'"<>&\x00-\x1f]+/gi, + (match, protocol) => { + // Allow https (case insensitive), redact everything else + return protocol.toLowerCase() === "https" ? match : "(redacted)"; + } + ); } /** * Neutralizes @mentions by wrapping them in backticks @@ -463,8 +499,10 @@ jobs: */ function neutralizeMentions(s) { // Replace @name or @org/team outside code with `@name` - return s.replace(/(^|[^\w`])@([A-Za-z0-9](?:[A-Za-z0-9-]{0,37}[A-Za-z0-9])?(?:\/[A-Za-z0-9._-]+)?)/g, - (_m, p1, p2) => `${p1}\`@${p2}\``); + return s.replace( + /(^|[^\w`])@([A-Za-z0-9](?:[A-Za-z0-9-]{0,37}[A-Za-z0-9])?(?:\/[A-Za-z0-9._-]+)?)/g, + (_m, p1, p2) => `${p1}\`@${p2}\`` + ); } /** * Neutralizes bot trigger phrases by wrapping them in backticks @@ -473,8 +511,10 @@ jobs: */ function neutralizeBotTriggers(s) { // Neutralize common bot trigger phrases like "fixes #123", "closes #asdfs", etc. - return s.replace(/\b(fixes?|closes?|resolves?|fix|close|resolve)\s+#(\w+)/gi, - (match, action, ref) => `\`${action} #${ref}\``); + return s.replace( + /\b(fixes?|closes?|resolves?|fix|close|resolve)\s+#(\w+)/gi, + (match, action, ref) => `\`${action} #${ref}\`` + ); } } /** @@ -485,65 +525,165 @@ jobs: */ function getMaxAllowedForType(itemType, config) { // Check if max is explicitly specified in config - if (config && config[itemType] && typeof config[itemType] === 'object' && config[itemType].max) { + if ( + config && + config[itemType] && + typeof config[itemType] === "object" && + config[itemType].max + ) { return config[itemType].max; } // Use default limits for plural-supported types switch (itemType) { - case 'create-issue': + case "create-issue": return 1; // Only one issue allowed - case 'add-issue-comment': + case "add-issue-comment": return 1; // Only one comment allowed - case 'create-pull-request': - return 1; // Only one pull request allowed - case 'add-issue-label': - return 5; // Only one labels operation allowed - case 'update-issue': - return 1; // Only one issue update allowed - case 'push-to-branch': - return 1; // Only one push to branch allowed + case "create-pull-request": + return 1; // Only one pull request allowed + case "create-pull-request-review-comment": + return 10; // Default to 10 review comments allowed + case "add-issue-label": + return 5; // Only one labels operation allowed + case "update-issue": + return 1; // Only one issue update allowed + case "push-to-branch": + return 1; // Only one push to branch allowed + case "create-discussion": + return 1; // Only one discussion allowed default: - return 1; // Default to single item for unknown types + return 1; // Default to single item for unknown types + } + } + /** + * Attempts to repair common JSON syntax issues in LLM-generated content + * @param {string} jsonStr - The potentially malformed JSON string + * @returns {string} The repaired JSON string + */ + function repairJson(jsonStr) { + let repaired = jsonStr.trim(); + // Fix single quotes to double quotes (must be done first) + repaired = repaired.replace(/'/g, '"'); + // Fix missing quotes around object keys + repaired = repaired.replace( + /([{,]\s*)([a-zA-Z_$][a-zA-Z0-9_$]*)\s*:/g, + '$1"$2":' + ); + // Fix newlines and tabs inside strings by escaping them + repaired = repaired.replace(/"([^"\\]*)"/g, (match, content) => { + if ( + content.includes("\n") || + content.includes("\r") || + content.includes("\t") + ) { + const escaped = content + .replace(/\\/g, "\\\\") + .replace(/\n/g, "\\n") + .replace(/\r/g, "\\r") + .replace(/\t/g, "\\t"); + return `"${escaped}"`; + } + return match; + }); + // Fix unescaped quotes inside string values + repaired = repaired.replace( + /"([^"]*)"([^":,}\]]*)"([^"]*)"(\s*[,:}\]])/g, + (match, p1, p2, p3, p4) => `"${p1}\\"${p2}\\"${p3}"${p4}` + ); + // Fix wrong bracket/brace types - arrays should end with ] not } + repaired = repaired.replace( + /(\[\s*(?:"[^"]*"(?:\s*,\s*"[^"]*")*\s*),?)\s*}/g, + "$1]" + ); + // Fix missing closing braces/brackets + const openBraces = (repaired.match(/\{/g) || []).length; + const closeBraces = (repaired.match(/\}/g) || []).length; + if (openBraces > closeBraces) { + repaired += "}".repeat(openBraces - closeBraces); + } else if (closeBraces > openBraces) { + repaired = "{".repeat(closeBraces - openBraces) + repaired; + } + // Fix missing closing brackets for arrays + const openBrackets = (repaired.match(/\[/g) || []).length; + const closeBrackets = (repaired.match(/\]/g) || []).length; + if (openBrackets > closeBrackets) { + repaired += "]".repeat(openBrackets - closeBrackets); + } else if (closeBrackets > openBrackets) { + repaired = "[".repeat(closeBrackets - openBrackets) + repaired; + } + // Fix trailing commas in objects and arrays (AFTER fixing brackets/braces) + repaired = repaired.replace(/,(\s*[}\]])/g, "$1"); + return repaired; + } + /** + * Attempts to parse JSON with repair fallback + * @param {string} jsonStr - The JSON string to parse + * @returns {Object|undefined} The parsed JSON object, or undefined if parsing fails + */ + function parseJsonWithRepair(jsonStr) { + try { + // First, try normal JSON.parse + return JSON.parse(jsonStr); + } catch (originalError) { + try { + // If that fails, try repairing and parsing again + const repairedJson = repairJson(jsonStr); + return JSON.parse(repairedJson); + } catch (repairError) { + // If repair also fails, print error to console and return undefined + console.log( + `JSON parsing failed. Original: ${originalError.message}. After repair: ${repairError.message}` + ); + return undefined; + } } } const outputFile = process.env.GITHUB_AW_SAFE_OUTPUTS; const safeOutputsConfig = process.env.GITHUB_AW_SAFE_OUTPUTS_CONFIG; if (!outputFile) { - console.log('GITHUB_AW_SAFE_OUTPUTS not set, no output to collect'); - core.setOutput('output', ''); + console.log("GITHUB_AW_SAFE_OUTPUTS not set, no output to collect"); + core.setOutput("output", ""); return; } if (!fs.existsSync(outputFile)) { - console.log('Output file does not exist:', outputFile); - core.setOutput('output', ''); + console.log("Output file does not exist:", outputFile); + core.setOutput("output", ""); return; } - const outputContent = fs.readFileSync(outputFile, 'utf8'); - if (outputContent.trim() === '') { - console.log('Output file is empty'); - core.setOutput('output', ''); + const outputContent = fs.readFileSync(outputFile, "utf8"); + if (outputContent.trim() === "") { + console.log("Output file is empty"); + core.setOutput("output", ""); return; } - console.log('Raw output content length:', outputContent.length); + console.log("Raw output content length:", outputContent.length); // Parse the safe-outputs configuration let expectedOutputTypes = {}; if (safeOutputsConfig) { try { expectedOutputTypes = JSON.parse(safeOutputsConfig); - console.log('Expected output types:', Object.keys(expectedOutputTypes)); + console.log("Expected output types:", Object.keys(expectedOutputTypes)); } catch (error) { - console.log('Warning: Could not parse safe-outputs config:', error.message); + console.log( + "Warning: Could not parse safe-outputs config:", + error.message + ); } } // Parse JSONL content - const lines = outputContent.trim().split('\n'); + const lines = outputContent.trim().split("\n"); const parsedItems = []; const errors = []; for (let i = 0; i < lines.length; i++) { const line = lines[i].trim(); - if (line === '') continue; // Skip empty lines + if (line === "") continue; // Skip empty lines try { - const item = JSON.parse(line); + const item = parseJsonWithRepair(line); + // If item is undefined (failed to parse), add error and process next line + if (item === undefined) { + errors.push(`Line ${i + 1}: Invalid JSON - JSON parsing failed`); + continue; + } // Validate that the item has a 'type' field if (!item.type) { errors.push(`Line ${i + 1}: Missing required 'type' field`); @@ -552,25 +692,35 @@ jobs: // Validate against expected output types const itemType = item.type; if (!expectedOutputTypes[itemType]) { - errors.push(`Line ${i + 1}: Unexpected output type '${itemType}'. Expected one of: ${Object.keys(expectedOutputTypes).join(', ')}`); + errors.push( + `Line ${i + 1}: Unexpected output type '${itemType}'. Expected one of: ${Object.keys(expectedOutputTypes).join(", ")}` + ); continue; } // Check for too many items of the same type - const typeCount = parsedItems.filter(existing => existing.type === itemType).length; + const typeCount = parsedItems.filter( + existing => existing.type === itemType + ).length; const maxAllowed = getMaxAllowedForType(itemType, expectedOutputTypes); if (typeCount >= maxAllowed) { - errors.push(`Line ${i + 1}: Too many items of type '${itemType}'. Maximum allowed: ${maxAllowed}.`); + errors.push( + `Line ${i + 1}: Too many items of type '${itemType}'. Maximum allowed: ${maxAllowed}.` + ); continue; } // Basic validation based on type switch (itemType) { - case 'create-issue': - if (!item.title || typeof item.title !== 'string') { - errors.push(`Line ${i + 1}: create-issue requires a 'title' string field`); + case "create-issue": + if (!item.title || typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: create-issue requires a 'title' string field` + ); continue; } - if (!item.body || typeof item.body !== 'string') { - errors.push(`Line ${i + 1}: create-issue requires a 'body' string field`); + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-issue requires a 'body' string field` + ); continue; } // Sanitize text content @@ -578,107 +728,247 @@ jobs: item.body = sanitizeContent(item.body); // Sanitize labels if present if (item.labels && Array.isArray(item.labels)) { - item.labels = item.labels.map(label => typeof label === 'string' ? sanitizeContent(label) : label); + item.labels = item.labels.map(label => + typeof label === "string" ? sanitizeContent(label) : label + ); } break; - case 'add-issue-comment': - if (!item.body || typeof item.body !== 'string') { - errors.push(`Line ${i + 1}: add-issue-comment requires a 'body' string field`); + case "add-issue-comment": + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: add-issue-comment requires a 'body' string field` + ); continue; } // Sanitize text content item.body = sanitizeContent(item.body); break; - case 'create-pull-request': - if (!item.title || typeof item.title !== 'string') { - errors.push(`Line ${i + 1}: create-pull-request requires a 'title' string field`); + case "create-pull-request": + if (!item.title || typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request requires a 'title' string field` + ); continue; } - if (!item.body || typeof item.body !== 'string') { - errors.push(`Line ${i + 1}: create-pull-request requires a 'body' string field`); + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request requires a 'body' string field` + ); continue; } // Sanitize text content item.title = sanitizeContent(item.title); item.body = sanitizeContent(item.body); // Sanitize branch name if present - if (item.branch && typeof item.branch === 'string') { + if (item.branch && typeof item.branch === "string") { item.branch = sanitizeContent(item.branch); } // Sanitize labels if present if (item.labels && Array.isArray(item.labels)) { - item.labels = item.labels.map(label => typeof label === 'string' ? sanitizeContent(label) : label); + item.labels = item.labels.map(label => + typeof label === "string" ? sanitizeContent(label) : label + ); } break; - case 'add-issue-label': + case "add-issue-label": if (!item.labels || !Array.isArray(item.labels)) { - errors.push(`Line ${i + 1}: add-issue-label requires a 'labels' array field`); + errors.push( + `Line ${i + 1}: add-issue-label requires a 'labels' array field` + ); continue; } - if (item.labels.some(label => typeof label !== 'string')) { - errors.push(`Line ${i + 1}: add-issue-label labels array must contain only strings`); + if (item.labels.some(label => typeof label !== "string")) { + errors.push( + `Line ${i + 1}: add-issue-label labels array must contain only strings` + ); continue; } // Sanitize label strings item.labels = item.labels.map(label => sanitizeContent(label)); break; - case 'update-issue': + case "update-issue": // Check that at least one updateable field is provided - const hasValidField = (item.status !== undefined) || - (item.title !== undefined) || - (item.body !== undefined); + const hasValidField = + item.status !== undefined || + item.title !== undefined || + item.body !== undefined; if (!hasValidField) { - errors.push(`Line ${i + 1}: update-issue requires at least one of: 'status', 'title', or 'body' fields`); + errors.push( + `Line ${i + 1}: update-issue requires at least one of: 'status', 'title', or 'body' fields` + ); continue; } // Validate status if provided if (item.status !== undefined) { - if (typeof item.status !== 'string' || (item.status !== 'open' && item.status !== 'closed')) { - errors.push(`Line ${i + 1}: update-issue 'status' must be 'open' or 'closed'`); + if ( + typeof item.status !== "string" || + (item.status !== "open" && item.status !== "closed") + ) { + errors.push( + `Line ${i + 1}: update-issue 'status' must be 'open' or 'closed'` + ); continue; } } // Validate title if provided if (item.title !== undefined) { - if (typeof item.title !== 'string') { - errors.push(`Line ${i + 1}: update-issue 'title' must be a string`); + if (typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: update-issue 'title' must be a string` + ); continue; } item.title = sanitizeContent(item.title); } // Validate body if provided if (item.body !== undefined) { - if (typeof item.body !== 'string') { - errors.push(`Line ${i + 1}: update-issue 'body' must be a string`); + if (typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: update-issue 'body' must be a string` + ); continue; } item.body = sanitizeContent(item.body); } // Validate issue_number if provided (for target "*") if (item.issue_number !== undefined) { - if (typeof item.issue_number !== 'number' && typeof item.issue_number !== 'string') { - errors.push(`Line ${i + 1}: update-issue 'issue_number' must be a number or string`); + if ( + typeof item.issue_number !== "number" && + typeof item.issue_number !== "string" + ) { + errors.push( + `Line ${i + 1}: update-issue 'issue_number' must be a number or string` + ); continue; } } break; - case 'push-to-branch': + case "push-to-branch": // Validate message if provided (optional) if (item.message !== undefined) { - if (typeof item.message !== 'string') { - errors.push(`Line ${i + 1}: push-to-branch 'message' must be a string`); + if (typeof item.message !== "string") { + errors.push( + `Line ${i + 1}: push-to-branch 'message' must be a string` + ); continue; } item.message = sanitizeContent(item.message); } // Validate pull_request_number if provided (for target "*") if (item.pull_request_number !== undefined) { - if (typeof item.pull_request_number !== 'number' && typeof item.pull_request_number !== 'string') { - errors.push(`Line ${i + 1}: push-to-branch 'pull_request_number' must be a number or string`); + if ( + typeof item.pull_request_number !== "number" && + typeof item.pull_request_number !== "string" + ) { + errors.push( + `Line ${i + 1}: push-to-branch 'pull_request_number' must be a number or string` + ); continue; } } break; + case "create-pull-request-review-comment": + // Validate required path field + if (!item.path || typeof item.path !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment requires a 'path' string field` + ); + continue; + } + // Validate required line field + if ( + item.line === undefined || + (typeof item.line !== "number" && typeof item.line !== "string") + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment requires a 'line' number or string field` + ); + continue; + } + // Validate line is a positive integer + const lineNumber = + typeof item.line === "string" ? parseInt(item.line, 10) : item.line; + if ( + isNaN(lineNumber) || + lineNumber <= 0 || + !Number.isInteger(lineNumber) + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'line' must be a positive integer` + ); + continue; + } + // Validate required body field + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment requires a 'body' string field` + ); + continue; + } + // Sanitize required text content + item.body = sanitizeContent(item.body); + // Validate optional start_line field + if (item.start_line !== undefined) { + if ( + typeof item.start_line !== "number" && + typeof item.start_line !== "string" + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'start_line' must be a number or string` + ); + continue; + } + const startLineNumber = + typeof item.start_line === "string" + ? parseInt(item.start_line, 10) + : item.start_line; + if ( + isNaN(startLineNumber) || + startLineNumber <= 0 || + !Number.isInteger(startLineNumber) + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'start_line' must be a positive integer` + ); + continue; + } + if (startLineNumber > lineNumber) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'start_line' must be less than or equal to 'line'` + ); + continue; + } + } + // Validate optional side field + if (item.side !== undefined) { + if ( + typeof item.side !== "string" || + (item.side !== "LEFT" && item.side !== "RIGHT") + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'side' must be 'LEFT' or 'RIGHT'` + ); + continue; + } + } + break; + case "create-discussion": + if (!item.title || typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: create-discussion requires a 'title' string field` + ); + continue; + } + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-discussion requires a 'body' string field` + ); + continue; + } + // Sanitize text content + item.title = sanitizeContent(item.title); + item.body = sanitizeContent(item.body); + break; default: errors.push(`Line ${i + 1}: Unknown output type '${itemType}'`); continue; @@ -691,7 +981,7 @@ jobs: } // Report validation results if (errors.length > 0) { - console.log('Validation errors found:'); + console.log("Validation errors found:"); errors.forEach(error => console.log(` - ${error}`)); // For now, we'll continue with valid items but log the errors // In the future, we might want to fail the workflow for invalid items @@ -700,10 +990,10 @@ jobs: // Set the parsed and validated items as output const validatedOutput = { items: parsedItems, - errors: errors + errors: errors, }; - core.setOutput('output', JSON.stringify(validatedOutput)); - core.setOutput('raw_output', outputContent); + core.setOutput("output", JSON.stringify(validatedOutput)); + core.setOutput("raw_output", outputContent); } // Call the main function await main(); @@ -735,24 +1025,24 @@ jobs: with: script: | function main() { - const fs = require('fs'); + const fs = require("fs"); try { const logFile = process.env.AGENT_LOG_FILE; if (!logFile) { - console.log('No agent log file specified'); + console.log("No agent log file specified"); return; } if (!fs.existsSync(logFile)) { console.log(`Log file not found: ${logFile}`); return; } - const content = fs.readFileSync(logFile, 'utf8'); + const content = fs.readFileSync(logFile, "utf8"); const parsedLog = parseCodexLog(content); if (parsedLog) { core.summary.addRaw(parsedLog).write(); - console.log('Codex log parsed successfully'); + console.log("Codex log parsed successfully"); } else { - console.log('Failed to parse Codex log'); + console.log("Failed to parse Codex log"); } } catch (error) { core.setFailed(error.message); @@ -760,54 +1050,63 @@ jobs: } function parseCodexLog(logContent) { try { - const lines = logContent.split('\n'); - let markdown = '## 🤖 Commands and Tools\n\n'; + const lines = logContent.split("\n"); + let markdown = "## 🤖 Commands and Tools\n\n"; const commandSummary = []; // First pass: collect commands for summary for (let i = 0; i < lines.length; i++) { const line = lines[i]; // Detect tool usage and exec commands - if (line.includes('] tool ') && line.includes('(')) { + if (line.includes("] tool ") && line.includes("(")) { // Extract tool name const toolMatch = line.match(/\] tool ([^(]+)\(/); if (toolMatch) { const toolName = toolMatch[1]; // Look ahead to find the result status - let statusIcon = '❓'; // Unknown by default + let statusIcon = "❓"; // Unknown by default for (let j = i + 1; j < Math.min(i + 5, lines.length); j++) { const nextLine = lines[j]; - if (nextLine.includes('success in')) { - statusIcon = '✅'; + if (nextLine.includes("success in")) { + statusIcon = "✅"; break; - } else if (nextLine.includes('failure in') || nextLine.includes('error in') || nextLine.includes('failed in')) { - statusIcon = '❌'; + } else if ( + nextLine.includes("failure in") || + nextLine.includes("error in") || + nextLine.includes("failed in") + ) { + statusIcon = "❌"; break; } } - if (toolName.includes('.')) { + if (toolName.includes(".")) { // Format as provider::method - const parts = toolName.split('.'); + const parts = toolName.split("."); const provider = parts[0]; - const method = parts.slice(1).join('_'); - commandSummary.push(`* ${statusIcon} \`${provider}::${method}(...)\``); + const method = parts.slice(1).join("_"); + commandSummary.push( + `* ${statusIcon} \`${provider}::${method}(...)\`` + ); } else { commandSummary.push(`* ${statusIcon} \`${toolName}(...)\``); } } - } else if (line.includes('] exec ')) { + } else if (line.includes("] exec ")) { // Extract exec command const execMatch = line.match(/exec (.+?) in/); if (execMatch) { const formattedCommand = formatBashCommand(execMatch[1]); // Look ahead to find the result status - let statusIcon = '❓'; // Unknown by default + let statusIcon = "❓"; // Unknown by default for (let j = i + 1; j < Math.min(i + 5, lines.length); j++) { const nextLine = lines[j]; - if (nextLine.includes('succeeded in')) { - statusIcon = '✅'; + if (nextLine.includes("succeeded in")) { + statusIcon = "✅"; break; - } else if (nextLine.includes('failed in') || nextLine.includes('error')) { - statusIcon = '❌'; + } else if ( + nextLine.includes("failed in") || + nextLine.includes("error") + ) { + statusIcon = "❌"; break; } } @@ -821,10 +1120,10 @@ jobs: markdown += `${cmd}\n`; } } else { - markdown += 'No commands or tools used.\n'; + markdown += "No commands or tools used.\n"; } // Add Information section - markdown += '\n## 📊 Information\n\n'; + markdown += "\n## 📊 Information\n\n"; // Extract metadata from Codex logs let totalTokens = 0; const tokenMatches = logContent.match(/tokens used: (\d+)/g); @@ -846,46 +1145,57 @@ jobs: if (execCommands > 0) { markdown += `**Commands Executed:** ${execCommands}\n\n`; } - markdown += '\n## 🤖 Reasoning\n\n'; + markdown += "\n## 🤖 Reasoning\n\n"; // Second pass: process full conversation flow with interleaved reasoning, tools, and commands let inThinkingSection = false; for (let i = 0; i < lines.length; i++) { const line = lines[i]; // Skip metadata lines - if (line.includes('OpenAI Codex') || line.startsWith('--------') || - line.includes('workdir:') || line.includes('model:') || - line.includes('provider:') || line.includes('approval:') || - line.includes('sandbox:') || line.includes('reasoning effort:') || - line.includes('reasoning summaries:') || line.includes('tokens used:')) { + if ( + line.includes("OpenAI Codex") || + line.startsWith("--------") || + line.includes("workdir:") || + line.includes("model:") || + line.includes("provider:") || + line.includes("approval:") || + line.includes("sandbox:") || + line.includes("reasoning effort:") || + line.includes("reasoning summaries:") || + line.includes("tokens used:") + ) { continue; } // Process thinking sections - if (line.includes('] thinking')) { + if (line.includes("] thinking")) { inThinkingSection = true; continue; } // Process tool calls - if (line.includes('] tool ') && line.includes('(')) { + if (line.includes("] tool ") && line.includes("(")) { inThinkingSection = false; const toolMatch = line.match(/\] tool ([^(]+)\(/); if (toolMatch) { const toolName = toolMatch[1]; // Look ahead to find the result status - let statusIcon = '❓'; // Unknown by default + let statusIcon = "❓"; // Unknown by default for (let j = i + 1; j < Math.min(i + 5, lines.length); j++) { const nextLine = lines[j]; - if (nextLine.includes('success in')) { - statusIcon = '✅'; + if (nextLine.includes("success in")) { + statusIcon = "✅"; break; - } else if (nextLine.includes('failure in') || nextLine.includes('error in') || nextLine.includes('failed in')) { - statusIcon = '❌'; + } else if ( + nextLine.includes("failure in") || + nextLine.includes("error in") || + nextLine.includes("failed in") + ) { + statusIcon = "❌"; break; } } - if (toolName.includes('.')) { - const parts = toolName.split('.'); + if (toolName.includes(".")) { + const parts = toolName.split("."); const provider = parts[0]; - const method = parts.slice(1).join('_'); + const method = parts.slice(1).join("_"); markdown += `${statusIcon} ${provider}::${method}(...)\n\n`; } else { markdown += `${statusIcon} ${toolName}(...)\n\n`; @@ -894,20 +1204,23 @@ jobs: continue; } // Process exec commands - if (line.includes('] exec ')) { + if (line.includes("] exec ")) { inThinkingSection = false; const execMatch = line.match(/exec (.+?) in/); if (execMatch) { const formattedCommand = formatBashCommand(execMatch[1]); // Look ahead to find the result status - let statusIcon = '❓'; // Unknown by default + let statusIcon = "❓"; // Unknown by default for (let j = i + 1; j < Math.min(i + 5, lines.length); j++) { const nextLine = lines[j]; - if (nextLine.includes('succeeded in')) { - statusIcon = '✅'; + if (nextLine.includes("succeeded in")) { + statusIcon = "✅"; break; - } else if (nextLine.includes('failed in') || nextLine.includes('error')) { - statusIcon = '❌'; + } else if ( + nextLine.includes("failed in") || + nextLine.includes("error") + ) { + statusIcon = "❌"; break; } } @@ -916,7 +1229,11 @@ jobs: continue; } // Process thinking content - if (inThinkingSection && line.trim().length > 20 && !line.startsWith('[2025-')) { + if ( + inThinkingSection && + line.trim().length > 20 && + !line.startsWith("[2025-") + ) { const trimmed = line.trim(); // Add thinking content directly markdown += `${trimmed}\n\n`; @@ -924,36 +1241,36 @@ jobs: } return markdown; } catch (error) { - console.error('Error parsing Codex log:', error); - return '## 🤖 Commands and Tools\n\nError parsing log content.\n\n## 🤖 Reasoning\n\nUnable to parse reasoning from log.\n\n'; + console.error("Error parsing Codex log:", error); + return "## 🤖 Commands and Tools\n\nError parsing log content.\n\n## 🤖 Reasoning\n\nUnable to parse reasoning from log.\n\n"; } } function formatBashCommand(command) { - if (!command) return ''; + if (!command) return ""; // Convert multi-line commands to single line by replacing newlines with spaces // and collapsing multiple spaces let formatted = command - .replace(/\n/g, ' ') // Replace newlines with spaces - .replace(/\r/g, ' ') // Replace carriage returns with spaces - .replace(/\t/g, ' ') // Replace tabs with spaces - .replace(/\s+/g, ' ') // Collapse multiple spaces into one - .trim(); // Remove leading/trailing whitespace + .replace(/\n/g, " ") // Replace newlines with spaces + .replace(/\r/g, " ") // Replace carriage returns with spaces + .replace(/\t/g, " ") // Replace tabs with spaces + .replace(/\s+/g, " ") // Collapse multiple spaces into one + .trim(); // Remove leading/trailing whitespace // Escape backticks to prevent markdown issues - formatted = formatted.replace(/`/g, '\\`'); + formatted = formatted.replace(/`/g, "\\`"); // Truncate if too long (keep reasonable length for summary) const maxLength = 80; if (formatted.length > maxLength) { - formatted = formatted.substring(0, maxLength) + '...'; + formatted = formatted.substring(0, maxLength) + "..."; } return formatted; } function truncateString(str, maxLength) { - if (!str) return ''; + if (!str) return ""; if (str.length <= maxLength) return str; - return str.substring(0, maxLength) + '...'; + return str.substring(0, maxLength) + "..."; } // Export for testing - if (typeof module !== 'undefined' && module.exports) { + if (typeof module !== "undefined" && module.exports) { module.exports = { parseCodexLog, formatBashCommand, truncateString }; } main(); @@ -990,60 +1307,78 @@ jobs: // Read the validated output content from environment variable const outputContent = process.env.GITHUB_AW_AGENT_OUTPUT; if (!outputContent) { - console.log('No GITHUB_AW_AGENT_OUTPUT environment variable found'); + console.log("No GITHUB_AW_AGENT_OUTPUT environment variable found"); return; } - if (outputContent.trim() === '') { - console.log('Agent output content is empty'); + if (outputContent.trim() === "") { + console.log("Agent output content is empty"); return; } - console.log('Agent output content length:', outputContent.length); + console.log("Agent output content length:", outputContent.length); // Parse the validated output JSON let validatedOutput; try { validatedOutput = JSON.parse(outputContent); } catch (error) { - console.log('Error parsing agent output JSON:', error instanceof Error ? error.message : String(error)); + console.log( + "Error parsing agent output JSON:", + error instanceof Error ? error.message : String(error) + ); return; } if (!validatedOutput.items || !Array.isArray(validatedOutput.items)) { - console.log('No valid items found in agent output'); + console.log("No valid items found in agent output"); return; } // Find the add-issue-label item - const labelsItem = validatedOutput.items.find(/** @param {any} item */ item => item.type === 'add-issue-label'); + const labelsItem = validatedOutput.items.find( + /** @param {any} item */ item => item.type === "add-issue-label" + ); if (!labelsItem) { - console.log('No add-issue-label item found in agent output'); + console.log("No add-issue-label item found in agent output"); return; } - console.log('Found add-issue-label item:', { labelsCount: labelsItem.labels.length }); + console.log("Found add-issue-label item:", { + labelsCount: labelsItem.labels.length, + }); // Read the allowed labels from environment variable (optional) const allowedLabelsEnv = process.env.GITHUB_AW_LABELS_ALLOWED; let allowedLabels = null; - if (allowedLabelsEnv && allowedLabelsEnv.trim() !== '') { - allowedLabels = allowedLabelsEnv.split(',').map(label => label.trim()).filter(label => label); + if (allowedLabelsEnv && allowedLabelsEnv.trim() !== "") { + allowedLabels = allowedLabelsEnv + .split(",") + .map(label => label.trim()) + .filter(label => label); if (allowedLabels.length === 0) { allowedLabels = null; // Treat empty list as no restrictions } } if (allowedLabels) { - console.log('Allowed labels:', allowedLabels); + console.log("Allowed labels:", allowedLabels); } else { - console.log('No label restrictions - any labels are allowed'); + console.log("No label restrictions - any labels are allowed"); } // Read the max limit from environment variable (default: 3) const maxCountEnv = process.env.GITHUB_AW_LABELS_MAX_COUNT; const maxCount = maxCountEnv ? parseInt(maxCountEnv, 10) : 3; if (isNaN(maxCount) || maxCount < 1) { - core.setFailed(`Invalid max value: ${maxCountEnv}. Must be a positive integer`); + core.setFailed( + `Invalid max value: ${maxCountEnv}. Must be a positive integer` + ); return; } - console.log('Max count:', maxCount); + console.log("Max count:", maxCount); // Check if we're in an issue or pull request context - const isIssueContext = context.eventName === 'issues' || context.eventName === 'issue_comment'; - const isPRContext = context.eventName === 'pull_request' || context.eventName === 'pull_request_review' || context.eventName === 'pull_request_review_comment'; + const isIssueContext = + context.eventName === "issues" || context.eventName === "issue_comment"; + const isPRContext = + context.eventName === "pull_request" || + context.eventName === "pull_request_review" || + context.eventName === "pull_request_review_comment"; if (!isIssueContext && !isPRContext) { - core.setFailed('Not running in issue or pull request context, skipping label addition'); + core.setFailed( + "Not running in issue or pull request context, skipping label addition" + ); return; } // Determine the issue/PR number @@ -1052,38 +1387,44 @@ jobs: if (isIssueContext) { if (context.payload.issue) { issueNumber = context.payload.issue.number; - contextType = 'issue'; + contextType = "issue"; } else { - core.setFailed('Issue context detected but no issue found in payload'); + core.setFailed("Issue context detected but no issue found in payload"); return; } } else if (isPRContext) { if (context.payload.pull_request) { issueNumber = context.payload.pull_request.number; - contextType = 'pull request'; + contextType = "pull request"; } else { - core.setFailed('Pull request context detected but no pull request found in payload'); + core.setFailed( + "Pull request context detected but no pull request found in payload" + ); return; } } if (!issueNumber) { - core.setFailed('Could not determine issue or pull request number'); + core.setFailed("Could not determine issue or pull request number"); return; } // Extract labels from the JSON item const requestedLabels = labelsItem.labels || []; - console.log('Requested labels:', requestedLabels); + console.log("Requested labels:", requestedLabels); // Check for label removal attempts (labels starting with '-') for (const label of requestedLabels) { - if (label.startsWith('-')) { - core.setFailed(`Label removal is not permitted. Found line starting with '-': ${label}`); + if (label.startsWith("-")) { + core.setFailed( + `Label removal is not permitted. Found line starting with '-': ${label}` + ); return; } } // Validate that all requested labels are in the allowed list (if restrictions are set) let validLabels; if (allowedLabels) { - validLabels = requestedLabels.filter(/** @param {string} label */ label => allowedLabels.includes(label)); + validLabels = requestedLabels.filter( + /** @param {string} label */ label => allowedLabels.includes(label) + ); } else { // No restrictions, all requested labels are valid validLabels = requestedLabels; @@ -1092,40 +1433,55 @@ jobs: let uniqueLabels = [...new Set(validLabels)]; // Enforce max limit if (uniqueLabels.length > maxCount) { - console.log(`too many labels, keep ${maxCount}`) + console.log(`too many labels, keep ${maxCount}`); uniqueLabels = uniqueLabels.slice(0, maxCount); } if (uniqueLabels.length === 0) { - console.log('No labels to add'); - core.setOutput('labels_added', ''); - await core.summary.addRaw(` + console.log("No labels to add"); + core.setOutput("labels_added", ""); + await core.summary + .addRaw( + ` ## Label Addition No labels were added (no valid labels found in agent output). - `).write(); + ` + ) + .write(); return; } - console.log(`Adding ${uniqueLabels.length} labels to ${contextType} #${issueNumber}:`, uniqueLabels); + console.log( + `Adding ${uniqueLabels.length} labels to ${contextType} #${issueNumber}:`, + uniqueLabels + ); try { // Add labels using GitHub API await github.rest.issues.addLabels({ owner: context.repo.owner, repo: context.repo.repo, issue_number: issueNumber, - labels: uniqueLabels + labels: uniqueLabels, }); - console.log(`Successfully added ${uniqueLabels.length} labels to ${contextType} #${issueNumber}`); + console.log( + `Successfully added ${uniqueLabels.length} labels to ${contextType} #${issueNumber}` + ); // Set output for other jobs to use - core.setOutput('labels_added', uniqueLabels.join('\n')); + core.setOutput("labels_added", uniqueLabels.join("\n")); // Write summary - const labelsListMarkdown = uniqueLabels.map(label => `- \`${label}\``).join('\n'); - await core.summary.addRaw(` + const labelsListMarkdown = uniqueLabels + .map(label => `- \`${label}\``) + .join("\n"); + await core.summary + .addRaw( + ` ## Label Addition Successfully added ${uniqueLabels.length} label(s) to ${contextType} #${issueNumber}: ${labelsListMarkdown} - `).write(); + ` + ) + .write(); } catch (error) { const errorMessage = error instanceof Error ? error.message : String(error); - console.error('Failed to add labels:', errorMessage); + console.error("Failed to add labels:", errorMessage); core.setFailed(`Failed to add labels: ${errorMessage}`); } } diff --git a/.github/workflows/test-codex-command.lock.yml b/.github/workflows/test-codex-command.lock.yml index cfa1d53de9..ff390dda8f 100644 --- a/.github/workflows/test-codex-command.lock.yml +++ b/.github/workflows/test-codex-command.lock.yml @@ -38,24 +38,28 @@ jobs: const { owner, repo } = context.repo; // Check if the actor has repository access (admin, maintain permissions) try { - console.log(`Checking if user '${actor}' is admin or maintainer of ${owner}/${repo}`); - const repoPermission = await github.rest.repos.getCollaboratorPermissionLevel({ - owner: owner, - repo: repo, - username: actor - }); + console.log( + `Checking if user '${actor}' is admin or maintainer of ${owner}/${repo}` + ); + const repoPermission = + await github.rest.repos.getCollaboratorPermissionLevel({ + owner: owner, + repo: repo, + username: actor, + }); const permission = repoPermission.data.permission; console.log(`Repository permission level: ${permission}`); - if (permission === 'admin' || permission === 'maintain') { + if (permission === "admin" || permission === "maintain") { console.log(`User has ${permission} access to repository`); - core.setOutput('is_team_member', 'true'); + core.setOutput("is_team_member", "true"); return; } } catch (repoError) { - const errorMessage = repoError instanceof Error ? repoError.message : String(repoError); + const errorMessage = + repoError instanceof Error ? repoError.message : String(repoError); console.log(`Repository permission check failed: ${errorMessage}`); } - core.setOutput('is_team_member', 'false'); + core.setOutput("is_team_member", "false"); } await main(); - name: Validate team membership @@ -75,34 +79,37 @@ jobs: * @returns {string} The sanitized content */ function sanitizeContent(content) { - if (!content || typeof content !== 'string') { - return ''; + if (!content || typeof content !== "string") { + return ""; } // Read allowed domains from environment variable const allowedDomainsEnv = process.env.GITHUB_AW_ALLOWED_DOMAINS; const defaultAllowedDomains = [ - 'github.com', - 'github.io', - 'githubusercontent.com', - 'githubassets.com', - 'github.dev', - 'codespaces.new' + "github.com", + "github.io", + "githubusercontent.com", + "githubassets.com", + "github.dev", + "codespaces.new", ]; const allowedDomains = allowedDomainsEnv - ? allowedDomainsEnv.split(',').map(d => d.trim()).filter(d => d) + ? allowedDomainsEnv + .split(",") + .map(d => d.trim()) + .filter(d => d) : defaultAllowedDomains; let sanitized = content; // Neutralize @mentions to prevent unintended notifications sanitized = neutralizeMentions(sanitized); // Remove control characters (except newlines and tabs) - sanitized = sanitized.replace(/[\x00-\x08\x0B\x0C\x0E-\x1F\x7F]/g, ''); + sanitized = sanitized.replace(/[\x00-\x08\x0B\x0C\x0E-\x1F\x7F]/g, ""); // XML character escaping sanitized = sanitized - .replace(/&/g, '&') // Must be first to avoid double-escaping - .replace(//g, '>') - .replace(/"/g, '"') - .replace(/'/g, '''); + .replace(/&/g, "&") // Must be first to avoid double-escaping + .replace(//g, ">") + .replace(/"/g, """) + .replace(/'/g, "'"); // URI filtering - replace non-https protocols with "(redacted)" // Step 1: Temporarily mark HTTPS URLs to protect them sanitized = sanitizeUrlProtocols(sanitized); @@ -112,16 +119,19 @@ jobs: // Limit total length to prevent DoS (0.5MB max) const maxLength = 524288; if (sanitized.length > maxLength) { - sanitized = sanitized.substring(0, maxLength) + '\n[Content truncated due to length]'; + sanitized = + sanitized.substring(0, maxLength) + "\n[Content truncated due to length]"; } // Limit number of lines to prevent log flooding (65k max) - const lines = sanitized.split('\n'); + const lines = sanitized.split("\n"); const maxLines = 65000; if (lines.length > maxLines) { - sanitized = lines.slice(0, maxLines).join('\n') + '\n[Content truncated due to line count]'; + sanitized = + lines.slice(0, maxLines).join("\n") + + "\n[Content truncated due to line count]"; } // Remove ANSI escape sequences - sanitized = sanitized.replace(/\x1b\[[0-9;]*[mGKH]/g, ''); + sanitized = sanitized.replace(/\x1b\[[0-9;]*[mGKH]/g, ""); // Neutralize common bot trigger phrases sanitized = neutralizeBotTriggers(sanitized); // Trim excessive whitespace @@ -132,16 +142,22 @@ jobs: * @returns {string} The string with unknown domains redacted */ function sanitizeUrlDomains(s) { - s = s.replace(/\bhttps:\/\/([^\/\s\])}'"<>&\x00-\x1f]+)/gi, (match, domain) => { - // Extract the hostname part (before first slash, colon, or other delimiter) - const hostname = domain.split(/[\/:\?#]/)[0].toLowerCase(); - // Check if this domain or any parent domain is in the allowlist - const isAllowed = allowedDomains.some(allowedDomain => { - const normalizedAllowed = allowedDomain.toLowerCase(); - return hostname === normalizedAllowed || hostname.endsWith('.' + normalizedAllowed); - }); - return isAllowed ? match : '(redacted)'; - }); + s = s.replace( + /\bhttps:\/\/([^\/\s\])}'"<>&\x00-\x1f]+)/gi, + (match, domain) => { + // Extract the hostname part (before first slash, colon, or other delimiter) + const hostname = domain.split(/[\/:\?#]/)[0].toLowerCase(); + // Check if this domain or any parent domain is in the allowlist + const isAllowed = allowedDomains.some(allowedDomain => { + const normalizedAllowed = allowedDomain.toLowerCase(); + return ( + hostname === normalizedAllowed || + hostname.endsWith("." + normalizedAllowed) + ); + }); + return isAllowed ? match : "(redacted)"; + } + ); return s; } /** @@ -152,10 +168,13 @@ jobs: function sanitizeUrlProtocols(s) { // Match both protocol:// and protocol: patterns // This covers URLs like https://example.com, javascript:alert(), mailto:user@domain.com, etc. - return s.replace(/\b(\w+):(?:\/\/)?[^\s\])}'"<>&\x00-\x1f]+/gi, (match, protocol) => { - // Allow https (case insensitive), redact everything else - return protocol.toLowerCase() === 'https' ? match : '(redacted)'; - }); + return s.replace( + /\b(\w+):(?:\/\/)?[^\s\])}'"<>&\x00-\x1f]+/gi, + (match, protocol) => { + // Allow https (case insensitive), redact everything else + return protocol.toLowerCase() === "https" ? match : "(redacted)"; + } + ); } /** * Neutralizes @mentions by wrapping them in backticks @@ -164,8 +183,10 @@ jobs: */ function neutralizeMentions(s) { // Replace @name or @org/team outside code with `@name` - return s.replace(/(^|[^\w`])@([A-Za-z0-9](?:[A-Za-z0-9-]{0,37}[A-Za-z0-9])?(?:\/[A-Za-z0-9._-]+)?)/g, - (_m, p1, p2) => `${p1}\`@${p2}\``); + return s.replace( + /(^|[^\w`])@([A-Za-z0-9](?:[A-Za-z0-9-]{0,37}[A-Za-z0-9])?(?:\/[A-Za-z0-9._-]+)?)/g, + (_m, p1, p2) => `${p1}\`@${p2}\`` + ); } /** * Neutralizes bot trigger phrases by wrapping them in backticks @@ -174,73 +195,77 @@ jobs: */ function neutralizeBotTriggers(s) { // Neutralize common bot trigger phrases like "fixes #123", "closes #asdfs", etc. - return s.replace(/\b(fixes?|closes?|resolves?|fix|close|resolve)\s+#(\w+)/gi, - (match, action, ref) => `\`${action} #${ref}\``); + return s.replace( + /\b(fixes?|closes?|resolves?|fix|close|resolve)\s+#(\w+)/gi, + (match, action, ref) => `\`${action} #${ref}\`` + ); } } async function main() { - let text = ''; + let text = ""; const actor = context.actor; const { owner, repo } = context.repo; // Check if the actor has repository access (admin, maintain permissions) - const repoPermission = await github.rest.repos.getCollaboratorPermissionLevel({ - owner: owner, - repo: repo, - username: actor - }); + const repoPermission = await github.rest.repos.getCollaboratorPermissionLevel( + { + owner: owner, + repo: repo, + username: actor, + } + ); const permission = repoPermission.data.permission; console.log(`Repository permission level: ${permission}`); - if (permission !== 'admin' && permission !== 'maintain') { - core.setOutput('text', ''); + if (permission !== "admin" && permission !== "maintain") { + core.setOutput("text", ""); return; } // Determine current body text based on event context switch (context.eventName) { - case 'issues': + case "issues": // For issues: title + body if (context.payload.issue) { - const title = context.payload.issue.title || ''; - const body = context.payload.issue.body || ''; + const title = context.payload.issue.title || ""; + const body = context.payload.issue.body || ""; text = `${title}\n\n${body}`; } break; - case 'pull_request': + case "pull_request": // For pull requests: title + body if (context.payload.pull_request) { - const title = context.payload.pull_request.title || ''; - const body = context.payload.pull_request.body || ''; + const title = context.payload.pull_request.title || ""; + const body = context.payload.pull_request.body || ""; text = `${title}\n\n${body}`; } break; - case 'pull_request_target': + case "pull_request_target": // For pull request target events: title + body if (context.payload.pull_request) { - const title = context.payload.pull_request.title || ''; - const body = context.payload.pull_request.body || ''; + const title = context.payload.pull_request.title || ""; + const body = context.payload.pull_request.body || ""; text = `${title}\n\n${body}`; } break; - case 'issue_comment': + case "issue_comment": // For issue comments: comment body if (context.payload.comment) { - text = context.payload.comment.body || ''; + text = context.payload.comment.body || ""; } break; - case 'pull_request_review_comment': + case "pull_request_review_comment": // For PR review comments: comment body if (context.payload.comment) { - text = context.payload.comment.body || ''; + text = context.payload.comment.body || ""; } break; - case 'pull_request_review': + case "pull_request_review": // For PR reviews: review body if (context.payload.review) { - text = context.payload.review.body || ''; + text = context.payload.review.body || ""; } break; default: // Default: empty text - text = ''; + text = ""; break; } // Sanitize the text before output @@ -248,7 +273,7 @@ jobs: // Display sanitized text in logs console.log(`text: ${sanitizedText}`); // Set the sanitized text as output - core.setOutput('text', sanitizedText); + core.setOutput("text", sanitizedText); } await main(); @@ -271,21 +296,32 @@ jobs: with: script: | async function main() { - // Read inputs from environment variables - const reaction = process.env.GITHUB_AW_REACTION || 'eyes'; + // Read inputs from environment variables + const reaction = process.env.GITHUB_AW_REACTION || "eyes"; const alias = process.env.GITHUB_AW_ALIAS; // Only present for alias workflows const runId = context.runId; - const runUrl = context.payload.repository + const runUrl = context.payload.repository ? `${context.payload.repository.html_url}/actions/runs/${runId}` : `https://github.com/${context.repo.owner}/${context.repo.repo}/actions/runs/${runId}`; - console.log('Reaction type:', reaction); - console.log('Alias name:', alias || 'none'); - console.log('Run ID:', runId); - console.log('Run URL:', runUrl); + console.log("Reaction type:", reaction); + console.log("Alias name:", alias || "none"); + console.log("Run ID:", runId); + console.log("Run URL:", runUrl); // Validate reaction type - const validReactions = ['+1', '-1', 'laugh', 'confused', 'heart', 'hooray', 'rocket', 'eyes']; + const validReactions = [ + "+1", + "-1", + "laugh", + "confused", + "heart", + "hooray", + "rocket", + "eyes", + ]; if (!validReactions.includes(reaction)) { - core.setFailed(`Invalid reaction type: ${reaction}. Valid reactions are: ${validReactions.join(', ')}`); + core.setFailed( + `Invalid reaction type: ${reaction}. Valid reactions are: ${validReactions.join(", ")}` + ); return; } // Determine the API endpoint based on the event type @@ -297,20 +333,20 @@ jobs: const repo = context.repo.repo; try { switch (eventName) { - case 'issues': + case "issues": const issueNumber = context.payload?.issue?.number; if (!issueNumber) { - core.setFailed('Issue number not found in event payload'); + core.setFailed("Issue number not found in event payload"); return; } reactionEndpoint = `/repos/${owner}/${repo}/issues/${issueNumber}/reactions`; // Don't edit issue bodies for now - this might be more complex shouldEditComment = false; break; - case 'issue_comment': + case "issue_comment": const commentId = context.payload?.comment?.id; if (!commentId) { - core.setFailed('Comment ID not found in event payload'); + core.setFailed("Comment ID not found in event payload"); return; } reactionEndpoint = `/repos/${owner}/${repo}/issues/comments/${commentId}/reactions`; @@ -318,10 +354,10 @@ jobs: // Only edit comments for alias workflows shouldEditComment = alias ? true : false; break; - case 'pull_request': + case "pull_request": const prNumber = context.payload?.pull_request?.number; if (!prNumber) { - core.setFailed('Pull request number not found in event payload'); + core.setFailed("Pull request number not found in event payload"); return; } // PRs are "issues" for the reactions endpoint @@ -329,10 +365,10 @@ jobs: // Don't edit PR bodies for now - this might be more complex shouldEditComment = false; break; - case 'pull_request_review_comment': + case "pull_request_review_comment": const reviewCommentId = context.payload?.comment?.id; if (!reviewCommentId) { - core.setFailed('Review comment ID not found in event payload'); + core.setFailed("Review comment ID not found in event payload"); return; } reactionEndpoint = `/repos/${owner}/${repo}/pulls/comments/${reviewCommentId}/reactions`; @@ -344,24 +380,28 @@ jobs: core.setFailed(`Unsupported event type: ${eventName}`); return; } - console.log('Reaction API endpoint:', reactionEndpoint); + console.log("Reaction API endpoint:", reactionEndpoint); // Add reaction first await addReaction(reactionEndpoint, reaction); // Then edit comment if applicable and if it's a comment event if (shouldEditComment && commentUpdateEndpoint) { - console.log('Comment update endpoint:', commentUpdateEndpoint); + console.log("Comment update endpoint:", commentUpdateEndpoint); await editCommentWithWorkflowLink(commentUpdateEndpoint, runUrl); } else { if (!alias && commentUpdateEndpoint) { - console.log('Skipping comment edit - only available for alias workflows'); + console.log( + "Skipping comment edit - only available for alias workflows" + ); } else { - console.log('Skipping comment edit for event type:', eventName); + console.log("Skipping comment edit for event type:", eventName); } } } catch (error) { const errorMessage = error instanceof Error ? error.message : String(error); - console.error('Failed to process reaction and comment edit:', errorMessage); - core.setFailed(`Failed to process reaction and comment edit: ${errorMessage}`); + console.error("Failed to process reaction and comment edit:", errorMessage); + core.setFailed( + `Failed to process reaction and comment edit: ${errorMessage}` + ); } } /** @@ -370,19 +410,19 @@ jobs: * @param {string} reaction - The reaction type to add */ async function addReaction(endpoint, reaction) { - const response = await github.request('POST ' + endpoint, { + const response = await github.request("POST " + endpoint, { content: reaction, headers: { - 'Accept': 'application/vnd.github+json' - } + Accept: "application/vnd.github+json", + }, }); const reactionId = response.data?.id; if (reactionId) { console.log(`Successfully added reaction: ${reaction} (id: ${reactionId})`); - core.setOutput('reaction-id', reactionId.toString()); + core.setOutput("reaction-id", reactionId.toString()); } else { console.log(`Successfully added reaction: ${reaction}`); - core.setOutput('reaction-id', ''); + core.setOutput("reaction-id", ""); } } /** @@ -393,33 +433,37 @@ jobs: async function editCommentWithWorkflowLink(endpoint, runUrl) { try { // First, get the current comment content - const getResponse = await github.request('GET ' + endpoint, { + const getResponse = await github.request("GET " + endpoint, { headers: { - 'Accept': 'application/vnd.github+json' - } + Accept: "application/vnd.github+json", + }, }); - const originalBody = getResponse.data.body || ''; + const originalBody = getResponse.data.body || ""; const workflowLinkText = `\n\n---\n*🤖 [Workflow run](${runUrl}) triggered by this comment*`; // Check if we've already added a workflow link to avoid duplicates - if (originalBody.includes('*🤖 [Workflow run](')) { - console.log('Comment already contains a workflow run link, skipping edit'); + if (originalBody.includes("*🤖 [Workflow run](")) { + console.log( + "Comment already contains a workflow run link, skipping edit" + ); return; } const updatedBody = originalBody + workflowLinkText; // Update the comment - const updateResponse = await github.request('PATCH ' + endpoint, { + const updateResponse = await github.request("PATCH " + endpoint, { body: updatedBody, headers: { - 'Accept': 'application/vnd.github+json' - } + Accept: "application/vnd.github+json", + }, }); console.log(`Successfully updated comment with workflow link`); console.log(`Comment ID: ${updateResponse.data.id}`); } catch (error) { // Don't fail the entire job if comment editing fails - just log it const errorMessage = error instanceof Error ? error.message : String(error); - console.warn('Failed to edit comment with workflow link:', errorMessage); - console.warn('This is not critical - the reaction was still added successfully'); + console.warn("Failed to edit comment with workflow link:", errorMessage); + console.warn( + "This is not critical - the reaction was still added successfully" + ); } } await main(); @@ -546,23 +590,23 @@ jobs: with: script: | function main() { - const fs = require('fs'); - const crypto = require('crypto'); + const fs = require("fs"); + const crypto = require("crypto"); // Generate a random filename for the output file - const randomId = crypto.randomBytes(8).toString('hex'); + const randomId = crypto.randomBytes(8).toString("hex"); const outputFile = `/tmp/aw_output_${randomId}.txt`; // Ensure the /tmp directory exists and create empty output file - fs.mkdirSync('/tmp', { recursive: true }); - fs.writeFileSync(outputFile, '', { mode: 0o644 }); + fs.mkdirSync("/tmp", { recursive: true }); + fs.writeFileSync(outputFile, "", { mode: 0o644 }); // Verify the file was created and is writable if (!fs.existsSync(outputFile)) { throw new Error(`Failed to create output file: ${outputFile}`); } // Set the environment variable for subsequent steps - core.exportVariable('GITHUB_AW_SAFE_OUTPUTS', outputFile); - console.log('Created agentic output file:', outputFile); + core.exportVariable("GITHUB_AW_SAFE_OUTPUTS", outputFile); + console.log("Created agentic output file:", outputFile); // Also set as step output for reference - core.setOutput('output_file', outputFile); + core.setOutput("output_file", outputFile); } main(); - name: Setup MCPs @@ -785,34 +829,37 @@ jobs: * @returns {string} The sanitized content */ function sanitizeContent(content) { - if (!content || typeof content !== 'string') { - return ''; + if (!content || typeof content !== "string") { + return ""; } // Read allowed domains from environment variable const allowedDomainsEnv = process.env.GITHUB_AW_ALLOWED_DOMAINS; const defaultAllowedDomains = [ - 'github.com', - 'github.io', - 'githubusercontent.com', - 'githubassets.com', - 'github.dev', - 'codespaces.new' + "github.com", + "github.io", + "githubusercontent.com", + "githubassets.com", + "github.dev", + "codespaces.new", ]; const allowedDomains = allowedDomainsEnv - ? allowedDomainsEnv.split(',').map(d => d.trim()).filter(d => d) + ? allowedDomainsEnv + .split(",") + .map(d => d.trim()) + .filter(d => d) : defaultAllowedDomains; let sanitized = content; // Neutralize @mentions to prevent unintended notifications sanitized = neutralizeMentions(sanitized); // Remove control characters (except newlines and tabs) - sanitized = sanitized.replace(/[\x00-\x08\x0B\x0C\x0E-\x1F\x7F]/g, ''); + sanitized = sanitized.replace(/[\x00-\x08\x0B\x0C\x0E-\x1F\x7F]/g, ""); // XML character escaping sanitized = sanitized - .replace(/&/g, '&') // Must be first to avoid double-escaping - .replace(//g, '>') - .replace(/"/g, '"') - .replace(/'/g, '''); + .replace(/&/g, "&") // Must be first to avoid double-escaping + .replace(//g, ">") + .replace(/"/g, """) + .replace(/'/g, "'"); // URI filtering - replace non-https protocols with "(redacted)" sanitized = sanitizeUrlProtocols(sanitized); // Domain filtering for HTTPS URIs @@ -820,16 +867,20 @@ jobs: // Limit total length to prevent DoS (0.5MB max) const maxLength = 524288; if (sanitized.length > maxLength) { - sanitized = sanitized.substring(0, maxLength) + '\n[Content truncated due to length]'; + sanitized = + sanitized.substring(0, maxLength) + + "\n[Content truncated due to length]"; } // Limit number of lines to prevent log flooding (65k max) - const lines = sanitized.split('\n'); + const lines = sanitized.split("\n"); const maxLines = 65000; if (lines.length > maxLines) { - sanitized = lines.slice(0, maxLines).join('\n') + '\n[Content truncated due to line count]'; + sanitized = + lines.slice(0, maxLines).join("\n") + + "\n[Content truncated due to line count]"; } // Remove ANSI escape sequences - sanitized = sanitized.replace(/\x1b\[[0-9;]*[mGKH]/g, ''); + sanitized = sanitized.replace(/\x1b\[[0-9;]*[mGKH]/g, ""); // Neutralize common bot trigger phrases sanitized = neutralizeBotTriggers(sanitized); // Trim excessive whitespace @@ -840,16 +891,22 @@ jobs: * @returns {string} The string with unknown domains redacted */ function sanitizeUrlDomains(s) { - return s.replace(/\bhttps:\/\/([^\/\s\])}'"<>&\x00-\x1f]+)/gi, (match, domain) => { - // Extract the hostname part (before first slash, colon, or other delimiter) - const hostname = domain.split(/[\/:\?#]/)[0].toLowerCase(); - // Check if this domain or any parent domain is in the allowlist - const isAllowed = allowedDomains.some(allowedDomain => { - const normalizedAllowed = allowedDomain.toLowerCase(); - return hostname === normalizedAllowed || hostname.endsWith('.' + normalizedAllowed); - }); - return isAllowed ? match : '(redacted)'; - }); + return s.replace( + /\bhttps:\/\/([^\/\s\])}'"<>&\x00-\x1f]+)/gi, + (match, domain) => { + // Extract the hostname part (before first slash, colon, or other delimiter) + const hostname = domain.split(/[\/:\?#]/)[0].toLowerCase(); + // Check if this domain or any parent domain is in the allowlist + const isAllowed = allowedDomains.some(allowedDomain => { + const normalizedAllowed = allowedDomain.toLowerCase(); + return ( + hostname === normalizedAllowed || + hostname.endsWith("." + normalizedAllowed) + ); + }); + return isAllowed ? match : "(redacted)"; + } + ); } /** * Remove unknown protocols except https @@ -858,10 +915,13 @@ jobs: */ function sanitizeUrlProtocols(s) { // Match both protocol:// and protocol: patterns - return s.replace(/\b(\w+):(?:\/\/)?[^\s\])}'"<>&\x00-\x1f]+/gi, (match, protocol) => { - // Allow https (case insensitive), redact everything else - return protocol.toLowerCase() === 'https' ? match : '(redacted)'; - }); + return s.replace( + /\b(\w+):(?:\/\/)?[^\s\])}'"<>&\x00-\x1f]+/gi, + (match, protocol) => { + // Allow https (case insensitive), redact everything else + return protocol.toLowerCase() === "https" ? match : "(redacted)"; + } + ); } /** * Neutralizes @mentions by wrapping them in backticks @@ -870,8 +930,10 @@ jobs: */ function neutralizeMentions(s) { // Replace @name or @org/team outside code with `@name` - return s.replace(/(^|[^\w`])@([A-Za-z0-9](?:[A-Za-z0-9-]{0,37}[A-Za-z0-9])?(?:\/[A-Za-z0-9._-]+)?)/g, - (_m, p1, p2) => `${p1}\`@${p2}\``); + return s.replace( + /(^|[^\w`])@([A-Za-z0-9](?:[A-Za-z0-9-]{0,37}[A-Za-z0-9])?(?:\/[A-Za-z0-9._-]+)?)/g, + (_m, p1, p2) => `${p1}\`@${p2}\`` + ); } /** * Neutralizes bot trigger phrases by wrapping them in backticks @@ -880,8 +942,10 @@ jobs: */ function neutralizeBotTriggers(s) { // Neutralize common bot trigger phrases like "fixes #123", "closes #asdfs", etc. - return s.replace(/\b(fixes?|closes?|resolves?|fix|close|resolve)\s+#(\w+)/gi, - (match, action, ref) => `\`${action} #${ref}\``); + return s.replace( + /\b(fixes?|closes?|resolves?|fix|close|resolve)\s+#(\w+)/gi, + (match, action, ref) => `\`${action} #${ref}\`` + ); } } /** @@ -892,65 +956,165 @@ jobs: */ function getMaxAllowedForType(itemType, config) { // Check if max is explicitly specified in config - if (config && config[itemType] && typeof config[itemType] === 'object' && config[itemType].max) { + if ( + config && + config[itemType] && + typeof config[itemType] === "object" && + config[itemType].max + ) { return config[itemType].max; } // Use default limits for plural-supported types switch (itemType) { - case 'create-issue': + case "create-issue": return 1; // Only one issue allowed - case 'add-issue-comment': + case "add-issue-comment": return 1; // Only one comment allowed - case 'create-pull-request': - return 1; // Only one pull request allowed - case 'add-issue-label': - return 5; // Only one labels operation allowed - case 'update-issue': - return 1; // Only one issue update allowed - case 'push-to-branch': - return 1; // Only one push to branch allowed + case "create-pull-request": + return 1; // Only one pull request allowed + case "create-pull-request-review-comment": + return 10; // Default to 10 review comments allowed + case "add-issue-label": + return 5; // Only one labels operation allowed + case "update-issue": + return 1; // Only one issue update allowed + case "push-to-branch": + return 1; // Only one push to branch allowed + case "create-discussion": + return 1; // Only one discussion allowed default: - return 1; // Default to single item for unknown types + return 1; // Default to single item for unknown types + } + } + /** + * Attempts to repair common JSON syntax issues in LLM-generated content + * @param {string} jsonStr - The potentially malformed JSON string + * @returns {string} The repaired JSON string + */ + function repairJson(jsonStr) { + let repaired = jsonStr.trim(); + // Fix single quotes to double quotes (must be done first) + repaired = repaired.replace(/'/g, '"'); + // Fix missing quotes around object keys + repaired = repaired.replace( + /([{,]\s*)([a-zA-Z_$][a-zA-Z0-9_$]*)\s*:/g, + '$1"$2":' + ); + // Fix newlines and tabs inside strings by escaping them + repaired = repaired.replace(/"([^"\\]*)"/g, (match, content) => { + if ( + content.includes("\n") || + content.includes("\r") || + content.includes("\t") + ) { + const escaped = content + .replace(/\\/g, "\\\\") + .replace(/\n/g, "\\n") + .replace(/\r/g, "\\r") + .replace(/\t/g, "\\t"); + return `"${escaped}"`; + } + return match; + }); + // Fix unescaped quotes inside string values + repaired = repaired.replace( + /"([^"]*)"([^":,}\]]*)"([^"]*)"(\s*[,:}\]])/g, + (match, p1, p2, p3, p4) => `"${p1}\\"${p2}\\"${p3}"${p4}` + ); + // Fix wrong bracket/brace types - arrays should end with ] not } + repaired = repaired.replace( + /(\[\s*(?:"[^"]*"(?:\s*,\s*"[^"]*")*\s*),?)\s*}/g, + "$1]" + ); + // Fix missing closing braces/brackets + const openBraces = (repaired.match(/\{/g) || []).length; + const closeBraces = (repaired.match(/\}/g) || []).length; + if (openBraces > closeBraces) { + repaired += "}".repeat(openBraces - closeBraces); + } else if (closeBraces > openBraces) { + repaired = "{".repeat(closeBraces - openBraces) + repaired; + } + // Fix missing closing brackets for arrays + const openBrackets = (repaired.match(/\[/g) || []).length; + const closeBrackets = (repaired.match(/\]/g) || []).length; + if (openBrackets > closeBrackets) { + repaired += "]".repeat(openBrackets - closeBrackets); + } else if (closeBrackets > openBrackets) { + repaired = "[".repeat(closeBrackets - openBrackets) + repaired; + } + // Fix trailing commas in objects and arrays (AFTER fixing brackets/braces) + repaired = repaired.replace(/,(\s*[}\]])/g, "$1"); + return repaired; + } + /** + * Attempts to parse JSON with repair fallback + * @param {string} jsonStr - The JSON string to parse + * @returns {Object|undefined} The parsed JSON object, or undefined if parsing fails + */ + function parseJsonWithRepair(jsonStr) { + try { + // First, try normal JSON.parse + return JSON.parse(jsonStr); + } catch (originalError) { + try { + // If that fails, try repairing and parsing again + const repairedJson = repairJson(jsonStr); + return JSON.parse(repairedJson); + } catch (repairError) { + // If repair also fails, print error to console and return undefined + console.log( + `JSON parsing failed. Original: ${originalError.message}. After repair: ${repairError.message}` + ); + return undefined; + } } } const outputFile = process.env.GITHUB_AW_SAFE_OUTPUTS; const safeOutputsConfig = process.env.GITHUB_AW_SAFE_OUTPUTS_CONFIG; if (!outputFile) { - console.log('GITHUB_AW_SAFE_OUTPUTS not set, no output to collect'); - core.setOutput('output', ''); + console.log("GITHUB_AW_SAFE_OUTPUTS not set, no output to collect"); + core.setOutput("output", ""); return; } if (!fs.existsSync(outputFile)) { - console.log('Output file does not exist:', outputFile); - core.setOutput('output', ''); + console.log("Output file does not exist:", outputFile); + core.setOutput("output", ""); return; } - const outputContent = fs.readFileSync(outputFile, 'utf8'); - if (outputContent.trim() === '') { - console.log('Output file is empty'); - core.setOutput('output', ''); + const outputContent = fs.readFileSync(outputFile, "utf8"); + if (outputContent.trim() === "") { + console.log("Output file is empty"); + core.setOutput("output", ""); return; } - console.log('Raw output content length:', outputContent.length); + console.log("Raw output content length:", outputContent.length); // Parse the safe-outputs configuration let expectedOutputTypes = {}; if (safeOutputsConfig) { try { expectedOutputTypes = JSON.parse(safeOutputsConfig); - console.log('Expected output types:', Object.keys(expectedOutputTypes)); + console.log("Expected output types:", Object.keys(expectedOutputTypes)); } catch (error) { - console.log('Warning: Could not parse safe-outputs config:', error.message); + console.log( + "Warning: Could not parse safe-outputs config:", + error.message + ); } } // Parse JSONL content - const lines = outputContent.trim().split('\n'); + const lines = outputContent.trim().split("\n"); const parsedItems = []; const errors = []; for (let i = 0; i < lines.length; i++) { const line = lines[i].trim(); - if (line === '') continue; // Skip empty lines + if (line === "") continue; // Skip empty lines try { - const item = JSON.parse(line); + const item = parseJsonWithRepair(line); + // If item is undefined (failed to parse), add error and process next line + if (item === undefined) { + errors.push(`Line ${i + 1}: Invalid JSON - JSON parsing failed`); + continue; + } // Validate that the item has a 'type' field if (!item.type) { errors.push(`Line ${i + 1}: Missing required 'type' field`); @@ -959,25 +1123,35 @@ jobs: // Validate against expected output types const itemType = item.type; if (!expectedOutputTypes[itemType]) { - errors.push(`Line ${i + 1}: Unexpected output type '${itemType}'. Expected one of: ${Object.keys(expectedOutputTypes).join(', ')}`); + errors.push( + `Line ${i + 1}: Unexpected output type '${itemType}'. Expected one of: ${Object.keys(expectedOutputTypes).join(", ")}` + ); continue; } // Check for too many items of the same type - const typeCount = parsedItems.filter(existing => existing.type === itemType).length; + const typeCount = parsedItems.filter( + existing => existing.type === itemType + ).length; const maxAllowed = getMaxAllowedForType(itemType, expectedOutputTypes); if (typeCount >= maxAllowed) { - errors.push(`Line ${i + 1}: Too many items of type '${itemType}'. Maximum allowed: ${maxAllowed}.`); + errors.push( + `Line ${i + 1}: Too many items of type '${itemType}'. Maximum allowed: ${maxAllowed}.` + ); continue; } // Basic validation based on type switch (itemType) { - case 'create-issue': - if (!item.title || typeof item.title !== 'string') { - errors.push(`Line ${i + 1}: create-issue requires a 'title' string field`); + case "create-issue": + if (!item.title || typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: create-issue requires a 'title' string field` + ); continue; } - if (!item.body || typeof item.body !== 'string') { - errors.push(`Line ${i + 1}: create-issue requires a 'body' string field`); + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-issue requires a 'body' string field` + ); continue; } // Sanitize text content @@ -985,107 +1159,247 @@ jobs: item.body = sanitizeContent(item.body); // Sanitize labels if present if (item.labels && Array.isArray(item.labels)) { - item.labels = item.labels.map(label => typeof label === 'string' ? sanitizeContent(label) : label); + item.labels = item.labels.map(label => + typeof label === "string" ? sanitizeContent(label) : label + ); } break; - case 'add-issue-comment': - if (!item.body || typeof item.body !== 'string') { - errors.push(`Line ${i + 1}: add-issue-comment requires a 'body' string field`); + case "add-issue-comment": + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: add-issue-comment requires a 'body' string field` + ); continue; } // Sanitize text content item.body = sanitizeContent(item.body); break; - case 'create-pull-request': - if (!item.title || typeof item.title !== 'string') { - errors.push(`Line ${i + 1}: create-pull-request requires a 'title' string field`); + case "create-pull-request": + if (!item.title || typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request requires a 'title' string field` + ); continue; } - if (!item.body || typeof item.body !== 'string') { - errors.push(`Line ${i + 1}: create-pull-request requires a 'body' string field`); + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request requires a 'body' string field` + ); continue; } // Sanitize text content item.title = sanitizeContent(item.title); item.body = sanitizeContent(item.body); // Sanitize branch name if present - if (item.branch && typeof item.branch === 'string') { + if (item.branch && typeof item.branch === "string") { item.branch = sanitizeContent(item.branch); } // Sanitize labels if present if (item.labels && Array.isArray(item.labels)) { - item.labels = item.labels.map(label => typeof label === 'string' ? sanitizeContent(label) : label); + item.labels = item.labels.map(label => + typeof label === "string" ? sanitizeContent(label) : label + ); } break; - case 'add-issue-label': + case "add-issue-label": if (!item.labels || !Array.isArray(item.labels)) { - errors.push(`Line ${i + 1}: add-issue-label requires a 'labels' array field`); + errors.push( + `Line ${i + 1}: add-issue-label requires a 'labels' array field` + ); continue; } - if (item.labels.some(label => typeof label !== 'string')) { - errors.push(`Line ${i + 1}: add-issue-label labels array must contain only strings`); + if (item.labels.some(label => typeof label !== "string")) { + errors.push( + `Line ${i + 1}: add-issue-label labels array must contain only strings` + ); continue; } // Sanitize label strings item.labels = item.labels.map(label => sanitizeContent(label)); break; - case 'update-issue': + case "update-issue": // Check that at least one updateable field is provided - const hasValidField = (item.status !== undefined) || - (item.title !== undefined) || - (item.body !== undefined); + const hasValidField = + item.status !== undefined || + item.title !== undefined || + item.body !== undefined; if (!hasValidField) { - errors.push(`Line ${i + 1}: update-issue requires at least one of: 'status', 'title', or 'body' fields`); + errors.push( + `Line ${i + 1}: update-issue requires at least one of: 'status', 'title', or 'body' fields` + ); continue; } // Validate status if provided if (item.status !== undefined) { - if (typeof item.status !== 'string' || (item.status !== 'open' && item.status !== 'closed')) { - errors.push(`Line ${i + 1}: update-issue 'status' must be 'open' or 'closed'`); + if ( + typeof item.status !== "string" || + (item.status !== "open" && item.status !== "closed") + ) { + errors.push( + `Line ${i + 1}: update-issue 'status' must be 'open' or 'closed'` + ); continue; } } // Validate title if provided if (item.title !== undefined) { - if (typeof item.title !== 'string') { - errors.push(`Line ${i + 1}: update-issue 'title' must be a string`); + if (typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: update-issue 'title' must be a string` + ); continue; } item.title = sanitizeContent(item.title); } // Validate body if provided if (item.body !== undefined) { - if (typeof item.body !== 'string') { - errors.push(`Line ${i + 1}: update-issue 'body' must be a string`); + if (typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: update-issue 'body' must be a string` + ); continue; } item.body = sanitizeContent(item.body); } // Validate issue_number if provided (for target "*") if (item.issue_number !== undefined) { - if (typeof item.issue_number !== 'number' && typeof item.issue_number !== 'string') { - errors.push(`Line ${i + 1}: update-issue 'issue_number' must be a number or string`); + if ( + typeof item.issue_number !== "number" && + typeof item.issue_number !== "string" + ) { + errors.push( + `Line ${i + 1}: update-issue 'issue_number' must be a number or string` + ); continue; } } break; - case 'push-to-branch': + case "push-to-branch": // Validate message if provided (optional) if (item.message !== undefined) { - if (typeof item.message !== 'string') { - errors.push(`Line ${i + 1}: push-to-branch 'message' must be a string`); + if (typeof item.message !== "string") { + errors.push( + `Line ${i + 1}: push-to-branch 'message' must be a string` + ); continue; } item.message = sanitizeContent(item.message); } // Validate pull_request_number if provided (for target "*") if (item.pull_request_number !== undefined) { - if (typeof item.pull_request_number !== 'number' && typeof item.pull_request_number !== 'string') { - errors.push(`Line ${i + 1}: push-to-branch 'pull_request_number' must be a number or string`); + if ( + typeof item.pull_request_number !== "number" && + typeof item.pull_request_number !== "string" + ) { + errors.push( + `Line ${i + 1}: push-to-branch 'pull_request_number' must be a number or string` + ); continue; } } break; + case "create-pull-request-review-comment": + // Validate required path field + if (!item.path || typeof item.path !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment requires a 'path' string field` + ); + continue; + } + // Validate required line field + if ( + item.line === undefined || + (typeof item.line !== "number" && typeof item.line !== "string") + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment requires a 'line' number or string field` + ); + continue; + } + // Validate line is a positive integer + const lineNumber = + typeof item.line === "string" ? parseInt(item.line, 10) : item.line; + if ( + isNaN(lineNumber) || + lineNumber <= 0 || + !Number.isInteger(lineNumber) + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'line' must be a positive integer` + ); + continue; + } + // Validate required body field + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment requires a 'body' string field` + ); + continue; + } + // Sanitize required text content + item.body = sanitizeContent(item.body); + // Validate optional start_line field + if (item.start_line !== undefined) { + if ( + typeof item.start_line !== "number" && + typeof item.start_line !== "string" + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'start_line' must be a number or string` + ); + continue; + } + const startLineNumber = + typeof item.start_line === "string" + ? parseInt(item.start_line, 10) + : item.start_line; + if ( + isNaN(startLineNumber) || + startLineNumber <= 0 || + !Number.isInteger(startLineNumber) + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'start_line' must be a positive integer` + ); + continue; + } + if (startLineNumber > lineNumber) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'start_line' must be less than or equal to 'line'` + ); + continue; + } + } + // Validate optional side field + if (item.side !== undefined) { + if ( + typeof item.side !== "string" || + (item.side !== "LEFT" && item.side !== "RIGHT") + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'side' must be 'LEFT' or 'RIGHT'` + ); + continue; + } + } + break; + case "create-discussion": + if (!item.title || typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: create-discussion requires a 'title' string field` + ); + continue; + } + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-discussion requires a 'body' string field` + ); + continue; + } + // Sanitize text content + item.title = sanitizeContent(item.title); + item.body = sanitizeContent(item.body); + break; default: errors.push(`Line ${i + 1}: Unknown output type '${itemType}'`); continue; @@ -1098,7 +1412,7 @@ jobs: } // Report validation results if (errors.length > 0) { - console.log('Validation errors found:'); + console.log("Validation errors found:"); errors.forEach(error => console.log(` - ${error}`)); // For now, we'll continue with valid items but log the errors // In the future, we might want to fail the workflow for invalid items @@ -1107,10 +1421,10 @@ jobs: // Set the parsed and validated items as output const validatedOutput = { items: parsedItems, - errors: errors + errors: errors, }; - core.setOutput('output', JSON.stringify(validatedOutput)); - core.setOutput('raw_output', outputContent); + core.setOutput("output", JSON.stringify(validatedOutput)); + core.setOutput("raw_output", outputContent); } // Call the main function await main(); @@ -1152,24 +1466,24 @@ jobs: with: script: | function main() { - const fs = require('fs'); + const fs = require("fs"); try { // Get the log file path from environment const logFile = process.env.AGENT_LOG_FILE; if (!logFile) { - console.log('No agent log file specified'); + console.log("No agent log file specified"); return; } if (!fs.existsSync(logFile)) { console.log(`Log file not found: ${logFile}`); return; } - const logContent = fs.readFileSync(logFile, 'utf8'); + const logContent = fs.readFileSync(logFile, "utf8"); const markdown = parseClaudeLog(logContent); // Append to GitHub step summary core.summary.addRaw(markdown).write(); } catch (error) { - console.error('Error parsing Claude log:', error.message); + console.error("Error parsing Claude log:", error.message); core.setFailed(error.message); } } @@ -1177,16 +1491,16 @@ jobs: try { const logEntries = JSON.parse(logContent); if (!Array.isArray(logEntries)) { - return '## Agent Log Summary\n\nLog format not recognized as Claude JSON array.\n'; + return "## Agent Log Summary\n\nLog format not recognized as Claude JSON array.\n"; } - let markdown = '## 🤖 Commands and Tools\n\n'; + let markdown = "## 🤖 Commands and Tools\n\n"; const toolUsePairs = new Map(); // Map tool_use_id to tool_result const commandSummary = []; // For the succinct summary // First pass: collect tool results by tool_use_id for (const entry of logEntries) { - if (entry.type === 'user' && entry.message?.content) { + if (entry.type === "user" && entry.message?.content) { for (const content of entry.message.content) { - if (content.type === 'tool_result' && content.tool_use_id) { + if (content.type === "tool_result" && content.tool_use_id) { toolUsePairs.set(content.tool_use_id, content); } } @@ -1194,26 +1508,37 @@ jobs: } // Collect all tool uses for summary for (const entry of logEntries) { - if (entry.type === 'assistant' && entry.message?.content) { + if (entry.type === "assistant" && entry.message?.content) { for (const content of entry.message.content) { - if (content.type === 'tool_use') { + if (content.type === "tool_use") { const toolName = content.name; const input = content.input || {}; // Skip internal tools - only show external commands and API calls - if (['Read', 'Write', 'Edit', 'MultiEdit', 'LS', 'Grep', 'Glob', 'TodoWrite'].includes(toolName)) { + if ( + [ + "Read", + "Write", + "Edit", + "MultiEdit", + "LS", + "Grep", + "Glob", + "TodoWrite", + ].includes(toolName) + ) { continue; // Skip internal file operations and searches } // Find the corresponding tool result to get status const toolResult = toolUsePairs.get(content.id); - let statusIcon = '❓'; + let statusIcon = "❓"; if (toolResult) { - statusIcon = toolResult.is_error === true ? '❌' : '✅'; + statusIcon = toolResult.is_error === true ? "❌" : "✅"; } // Add to command summary (only external tools) - if (toolName === 'Bash') { - const formattedCommand = formatBashCommand(input.command || ''); + if (toolName === "Bash") { + const formattedCommand = formatBashCommand(input.command || ""); commandSummary.push(`* ${statusIcon} \`${formattedCommand}\``); - } else if (toolName.startsWith('mcp__')) { + } else if (toolName.startsWith("mcp__")) { const mcpName = formatMcpName(toolName); commandSummary.push(`* ${statusIcon} \`${mcpName}(...)\``); } else { @@ -1230,13 +1555,19 @@ jobs: markdown += `${cmd}\n`; } } else { - markdown += 'No commands or tools used.\n'; + markdown += "No commands or tools used.\n"; } // Add Information section from the last entry with result metadata - markdown += '\n## 📊 Information\n\n'; + markdown += "\n## 📊 Information\n\n"; // Find the last entry with metadata const lastEntry = logEntries[logEntries.length - 1]; - if (lastEntry && (lastEntry.num_turns || lastEntry.duration_ms || lastEntry.total_cost_usd || lastEntry.usage)) { + if ( + lastEntry && + (lastEntry.num_turns || + lastEntry.duration_ms || + lastEntry.total_cost_usd || + lastEntry.usage) + ) { if (lastEntry.num_turns) { markdown += `**Turns:** ${lastEntry.num_turns}\n\n`; } @@ -1253,29 +1584,36 @@ jobs: const usage = lastEntry.usage; if (usage.input_tokens || usage.output_tokens) { markdown += `**Token Usage:**\n`; - if (usage.input_tokens) markdown += `- Input: ${usage.input_tokens.toLocaleString()}\n`; - if (usage.cache_creation_input_tokens) markdown += `- Cache Creation: ${usage.cache_creation_input_tokens.toLocaleString()}\n`; - if (usage.cache_read_input_tokens) markdown += `- Cache Read: ${usage.cache_read_input_tokens.toLocaleString()}\n`; - if (usage.output_tokens) markdown += `- Output: ${usage.output_tokens.toLocaleString()}\n`; - markdown += '\n'; + if (usage.input_tokens) + markdown += `- Input: ${usage.input_tokens.toLocaleString()}\n`; + if (usage.cache_creation_input_tokens) + markdown += `- Cache Creation: ${usage.cache_creation_input_tokens.toLocaleString()}\n`; + if (usage.cache_read_input_tokens) + markdown += `- Cache Read: ${usage.cache_read_input_tokens.toLocaleString()}\n`; + if (usage.output_tokens) + markdown += `- Output: ${usage.output_tokens.toLocaleString()}\n`; + markdown += "\n"; } } - if (lastEntry.permission_denials && lastEntry.permission_denials.length > 0) { + if ( + lastEntry.permission_denials && + lastEntry.permission_denials.length > 0 + ) { markdown += `**Permission Denials:** ${lastEntry.permission_denials.length}\n\n`; } } - markdown += '\n## 🤖 Reasoning\n\n'; + markdown += "\n## 🤖 Reasoning\n\n"; // Second pass: process assistant messages in sequence for (const entry of logEntries) { - if (entry.type === 'assistant' && entry.message?.content) { + if (entry.type === "assistant" && entry.message?.content) { for (const content of entry.message.content) { - if (content.type === 'text' && content.text) { + if (content.type === "text" && content.text) { // Add reasoning text directly (no header) const text = content.text.trim(); if (text && text.length > 0) { - markdown += text + '\n\n'; + markdown += text + "\n\n"; } - } else if (content.type === 'tool_use') { + } else if (content.type === "tool_use") { // Process tool use with its result const toolResult = toolUsePairs.get(content.id); const toolMarkdown = formatToolUse(content, toolResult); @@ -1295,22 +1633,22 @@ jobs: const toolName = toolUse.name; const input = toolUse.input || {}; // Skip TodoWrite except the very last one (we'll handle this separately) - if (toolName === 'TodoWrite') { - return ''; // Skip for now, would need global context to find the last one + if (toolName === "TodoWrite") { + return ""; // Skip for now, would need global context to find the last one } // Helper function to determine status icon function getStatusIcon() { if (toolResult) { - return toolResult.is_error === true ? '❌' : '✅'; + return toolResult.is_error === true ? "❌" : "✅"; } - return '❓'; // Unknown by default + return "❓"; // Unknown by default } - let markdown = ''; + let markdown = ""; const statusIcon = getStatusIcon(); switch (toolName) { - case 'Bash': - const command = input.command || ''; - const description = input.description || ''; + case "Bash": + const command = input.command || ""; + const description = input.description || ""; // Format the command to be single line const formattedCommand = formatBashCommand(command); if (description) { @@ -1318,31 +1656,40 @@ jobs: } markdown += `${statusIcon} \`${formattedCommand}\`\n\n`; break; - case 'Read': - const filePath = input.file_path || input.path || ''; - const relativePath = filePath.replace(/^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, ''); // Remove /home/runner/work/repo/repo/ prefix + case "Read": + const filePath = input.file_path || input.path || ""; + const relativePath = filePath.replace( + /^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, + "" + ); // Remove /home/runner/work/repo/repo/ prefix markdown += `${statusIcon} Read \`${relativePath}\`\n\n`; break; - case 'Write': - case 'Edit': - case 'MultiEdit': - const writeFilePath = input.file_path || input.path || ''; - const writeRelativePath = writeFilePath.replace(/^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, ''); + case "Write": + case "Edit": + case "MultiEdit": + const writeFilePath = input.file_path || input.path || ""; + const writeRelativePath = writeFilePath.replace( + /^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, + "" + ); markdown += `${statusIcon} Write \`${writeRelativePath}\`\n\n`; break; - case 'Grep': - case 'Glob': - const query = input.query || input.pattern || ''; + case "Grep": + case "Glob": + const query = input.query || input.pattern || ""; markdown += `${statusIcon} Search for \`${truncateString(query, 80)}\`\n\n`; break; - case 'LS': - const lsPath = input.path || ''; - const lsRelativePath = lsPath.replace(/^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, ''); + case "LS": + const lsPath = input.path || ""; + const lsRelativePath = lsPath.replace( + /^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, + "" + ); markdown += `${statusIcon} LS: ${lsRelativePath || lsPath}\n\n`; break; default: // Handle MCP calls and other tools - if (toolName.startsWith('mcp__')) { + if (toolName.startsWith("mcp__")) { const mcpName = formatMcpName(toolName); const params = formatMcpParameters(input); markdown += `${statusIcon} ${mcpName}(${params})\n\n`; @@ -1351,8 +1698,11 @@ jobs: const keys = Object.keys(input); if (keys.length > 0) { // Try to find the most important parameter - const mainParam = keys.find(k => ['query', 'command', 'path', 'file_path', 'content'].includes(k)) || keys[0]; - const value = String(input[mainParam] || ''); + const mainParam = + keys.find(k => + ["query", "command", "path", "file_path", "content"].includes(k) + ) || keys[0]; + const value = String(input[mainParam] || ""); if (value) { markdown += `${statusIcon} ${toolName}: ${truncateString(value, 100)}\n\n`; } else { @@ -1367,11 +1717,11 @@ jobs: } function formatMcpName(toolName) { // Convert mcp__github__search_issues to github::search_issues - if (toolName.startsWith('mcp__')) { - const parts = toolName.split('__'); + if (toolName.startsWith("mcp__")) { + const parts = toolName.split("__"); if (parts.length >= 3) { const provider = parts[1]; // github, etc. - const method = parts.slice(2).join('_'); // search_issues, etc. + const method = parts.slice(2).join("_"); // search_issues, etc. return `${provider}::${method}`; } } @@ -1379,44 +1729,50 @@ jobs: } function formatMcpParameters(input) { const keys = Object.keys(input); - if (keys.length === 0) return ''; + if (keys.length === 0) return ""; const paramStrs = []; - for (const key of keys.slice(0, 4)) { // Show up to 4 parameters - const value = String(input[key] || ''); + for (const key of keys.slice(0, 4)) { + // Show up to 4 parameters + const value = String(input[key] || ""); paramStrs.push(`${key}: ${truncateString(value, 40)}`); } if (keys.length > 4) { - paramStrs.push('...'); + paramStrs.push("..."); } - return paramStrs.join(', '); + return paramStrs.join(", "); } function formatBashCommand(command) { - if (!command) return ''; + if (!command) return ""; // Convert multi-line commands to single line by replacing newlines with spaces // and collapsing multiple spaces let formatted = command - .replace(/\n/g, ' ') // Replace newlines with spaces - .replace(/\r/g, ' ') // Replace carriage returns with spaces - .replace(/\t/g, ' ') // Replace tabs with spaces - .replace(/\s+/g, ' ') // Collapse multiple spaces into one - .trim(); // Remove leading/trailing whitespace + .replace(/\n/g, " ") // Replace newlines with spaces + .replace(/\r/g, " ") // Replace carriage returns with spaces + .replace(/\t/g, " ") // Replace tabs with spaces + .replace(/\s+/g, " ") // Collapse multiple spaces into one + .trim(); // Remove leading/trailing whitespace // Escape backticks to prevent markdown issues - formatted = formatted.replace(/`/g, '\\`'); + formatted = formatted.replace(/`/g, "\\`"); // Truncate if too long (keep reasonable length for summary) const maxLength = 80; if (formatted.length > maxLength) { - formatted = formatted.substring(0, maxLength) + '...'; + formatted = formatted.substring(0, maxLength) + "..."; } return formatted; } function truncateString(str, maxLength) { - if (!str) return ''; + if (!str) return ""; if (str.length <= maxLength) return str; - return str.substring(0, maxLength) + '...'; + return str.substring(0, maxLength) + "..."; } // Export for testing - if (typeof module !== 'undefined' && module.exports) { - module.exports = { parseClaudeLog, formatToolUse, formatBashCommand, truncateString }; + if (typeof module !== "undefined" && module.exports) { + module.exports = { + parseClaudeLog, + formatToolUse, + formatBashCommand, + truncateString, + }; } main(); - name: Upload agent logs @@ -1451,30 +1807,35 @@ jobs: // Read the validated output content from environment variable const outputContent = process.env.GITHUB_AW_AGENT_OUTPUT; if (!outputContent) { - console.log('No GITHUB_AW_AGENT_OUTPUT environment variable found'); + console.log("No GITHUB_AW_AGENT_OUTPUT environment variable found"); return; } - if (outputContent.trim() === '') { - console.log('Agent output content is empty'); + if (outputContent.trim() === "") { + console.log("Agent output content is empty"); return; } - console.log('Agent output content length:', outputContent.length); + console.log("Agent output content length:", outputContent.length); // Parse the validated output JSON let validatedOutput; try { validatedOutput = JSON.parse(outputContent); } catch (error) { - console.log('Error parsing agent output JSON:', error instanceof Error ? error.message : String(error)); + console.log( + "Error parsing agent output JSON:", + error instanceof Error ? error.message : String(error) + ); return; } if (!validatedOutput.items || !Array.isArray(validatedOutput.items)) { - console.log('No valid items found in agent output'); + console.log("No valid items found in agent output"); return; } // Find all add-issue-comment items - const commentItems = validatedOutput.items.filter(/** @param {any} item */ item => item.type === 'add-issue-comment'); + const commentItems = validatedOutput.items.filter( + /** @param {any} item */ item => item.type === "add-issue-comment" + ); if (commentItems.length === 0) { - console.log('No add-issue-comment items found in agent output'); + console.log("No add-issue-comment items found in agent output"); return; } console.log(`Found ${commentItems.length} add-issue-comment item(s)`); @@ -1482,18 +1843,27 @@ jobs: const commentTarget = process.env.GITHUB_AW_COMMENT_TARGET || "triggering"; console.log(`Comment target configuration: ${commentTarget}`); // Check if we're in an issue or pull request context - const isIssueContext = context.eventName === 'issues' || context.eventName === 'issue_comment'; - const isPRContext = context.eventName === 'pull_request' || context.eventName === 'pull_request_review' || context.eventName === 'pull_request_review_comment'; + const isIssueContext = + context.eventName === "issues" || context.eventName === "issue_comment"; + const isPRContext = + context.eventName === "pull_request" || + context.eventName === "pull_request_review" || + context.eventName === "pull_request_review_comment"; // Validate context based on target configuration if (commentTarget === "triggering" && !isIssueContext && !isPRContext) { - console.log('Target is "triggering" but not running in issue or pull request context, skipping comment creation'); + console.log( + 'Target is "triggering" but not running in issue or pull request context, skipping comment creation' + ); return; } const createdComments = []; // Process each comment item for (let i = 0; i < commentItems.length; i++) { const commentItem = commentItems[i]; - console.log(`Processing add-issue-comment item ${i + 1}/${commentItems.length}:`, { bodyLength: commentItem.body.length }); + console.log( + `Processing add-issue-comment item ${i + 1}/${commentItems.length}:`, + { bodyLength: commentItem.body.length } + ); // Determine the issue/PR number and comment endpoint for this comment let issueNumber; let commentEndpoint; @@ -1502,79 +1872,90 @@ jobs: if (commentItem.issue_number) { issueNumber = parseInt(commentItem.issue_number, 10); if (isNaN(issueNumber) || issueNumber <= 0) { - console.log(`Invalid issue number specified: ${commentItem.issue_number}`); + console.log( + `Invalid issue number specified: ${commentItem.issue_number}` + ); continue; } - commentEndpoint = 'issues'; + commentEndpoint = "issues"; } else { - console.log('Target is "*" but no issue_number specified in comment item'); + console.log( + 'Target is "*" but no issue_number specified in comment item' + ); continue; } } else if (commentTarget && commentTarget !== "triggering") { // Explicit issue number specified in target issueNumber = parseInt(commentTarget, 10); if (isNaN(issueNumber) || issueNumber <= 0) { - console.log(`Invalid issue number in target configuration: ${commentTarget}`); + console.log( + `Invalid issue number in target configuration: ${commentTarget}` + ); continue; } - commentEndpoint = 'issues'; + commentEndpoint = "issues"; } else { // Default behavior: use triggering issue/PR if (isIssueContext) { if (context.payload.issue) { issueNumber = context.payload.issue.number; - commentEndpoint = 'issues'; + commentEndpoint = "issues"; } else { - console.log('Issue context detected but no issue found in payload'); + console.log("Issue context detected but no issue found in payload"); continue; } } else if (isPRContext) { if (context.payload.pull_request) { issueNumber = context.payload.pull_request.number; - commentEndpoint = 'issues'; // PR comments use the issues API endpoint + commentEndpoint = "issues"; // PR comments use the issues API endpoint } else { - console.log('Pull request context detected but no pull request found in payload'); + console.log( + "Pull request context detected but no pull request found in payload" + ); continue; } } } if (!issueNumber) { - console.log('Could not determine issue or pull request number'); + console.log("Could not determine issue or pull request number"); continue; } // Extract body from the JSON item let body = commentItem.body.trim(); // Add AI disclaimer with run id, run htmlurl const runId = context.runId; - const runUrl = context.payload.repository + const runUrl = context.payload.repository ? `${context.payload.repository.html_url}/actions/runs/${runId}` - : `https://github.com/actions/runs/${runId}`; + : `https://github.com/actions/runs/${runId}`; body += `\n\n> Generated by Agentic Workflow Run [${runId}](${runUrl})\n`; console.log(`Creating comment on ${commentEndpoint} #${issueNumber}`); - console.log('Comment content length:', body.length); + console.log("Comment content length:", body.length); try { // Create the comment using GitHub API const { data: comment } = await github.rest.issues.createComment({ owner: context.repo.owner, repo: context.repo.repo, issue_number: issueNumber, - body: body + body: body, }); - console.log('Created comment #' + comment.id + ': ' + comment.html_url); + console.log("Created comment #" + comment.id + ": " + comment.html_url); createdComments.push(comment); // Set output for the last created comment (for backward compatibility) if (i === commentItems.length - 1) { - core.setOutput('comment_id', comment.id); - core.setOutput('comment_url', comment.html_url); + core.setOutput("comment_id", comment.id); + core.setOutput("comment_url", comment.html_url); } } catch (error) { - console.error(`✗ Failed to create comment:`, error instanceof Error ? error.message : String(error)); + console.error( + `✗ Failed to create comment:`, + error instanceof Error ? error.message : String(error) + ); throw error; } } // Write summary for all created comments if (createdComments.length > 0) { - let summaryContent = '\n\n## GitHub Comments\n'; + let summaryContent = "\n\n## GitHub Comments\n"; for (const comment of createdComments) { summaryContent += `- Comment #${comment.id}: [View Comment](${comment.html_url})\n`; } diff --git a/.github/workflows/test-codex-create-issue.lock.yml b/.github/workflows/test-codex-create-issue.lock.yml index 73038ac2c3..8960572381 100644 --- a/.github/workflows/test-codex-create-issue.lock.yml +++ b/.github/workflows/test-codex-create-issue.lock.yml @@ -34,23 +34,23 @@ jobs: with: script: | function main() { - const fs = require('fs'); - const crypto = require('crypto'); + const fs = require("fs"); + const crypto = require("crypto"); // Generate a random filename for the output file - const randomId = crypto.randomBytes(8).toString('hex'); + const randomId = crypto.randomBytes(8).toString("hex"); const outputFile = `/tmp/aw_output_${randomId}.txt`; // Ensure the /tmp directory exists and create empty output file - fs.mkdirSync('/tmp', { recursive: true }); - fs.writeFileSync(outputFile, '', { mode: 0o644 }); + fs.mkdirSync("/tmp", { recursive: true }); + fs.writeFileSync(outputFile, "", { mode: 0o644 }); // Verify the file was created and is writable if (!fs.existsSync(outputFile)) { throw new Error(`Failed to create output file: ${outputFile}`); } // Set the environment variable for subsequent steps - core.exportVariable('GITHUB_AW_SAFE_OUTPUTS', outputFile); - console.log('Created agentic output file:', outputFile); + core.exportVariable("GITHUB_AW_SAFE_OUTPUTS", outputFile); + console.log("Created agentic output file:", outputFile); // Also set as step output for reference - core.setOutput('output_file', outputFile); + core.setOutput("output_file", outputFile); } main(); - name: Setup MCPs @@ -161,13 +161,14 @@ jobs: if-no-files-found: warn - name: Run Codex run: | + set -o pipefail INSTRUCTION=$(cat /tmp/aw-prompts/prompt.txt) export CODEX_HOME=/tmp/mcp-config # Create log directory outside git repo mkdir -p /tmp/aw-logs - # Run codex with log capture + # Run codex with log capture - pipefail ensures codex exit code is preserved codex exec \ -c model=o4-mini \ --full-auto "$INSTRUCTION" 2>&1 | tee /tmp/test-codex-create-issue.log @@ -207,34 +208,37 @@ jobs: * @returns {string} The sanitized content */ function sanitizeContent(content) { - if (!content || typeof content !== 'string') { - return ''; + if (!content || typeof content !== "string") { + return ""; } // Read allowed domains from environment variable const allowedDomainsEnv = process.env.GITHUB_AW_ALLOWED_DOMAINS; const defaultAllowedDomains = [ - 'github.com', - 'github.io', - 'githubusercontent.com', - 'githubassets.com', - 'github.dev', - 'codespaces.new' + "github.com", + "github.io", + "githubusercontent.com", + "githubassets.com", + "github.dev", + "codespaces.new", ]; const allowedDomains = allowedDomainsEnv - ? allowedDomainsEnv.split(',').map(d => d.trim()).filter(d => d) + ? allowedDomainsEnv + .split(",") + .map(d => d.trim()) + .filter(d => d) : defaultAllowedDomains; let sanitized = content; // Neutralize @mentions to prevent unintended notifications sanitized = neutralizeMentions(sanitized); // Remove control characters (except newlines and tabs) - sanitized = sanitized.replace(/[\x00-\x08\x0B\x0C\x0E-\x1F\x7F]/g, ''); + sanitized = sanitized.replace(/[\x00-\x08\x0B\x0C\x0E-\x1F\x7F]/g, ""); // XML character escaping sanitized = sanitized - .replace(/&/g, '&') // Must be first to avoid double-escaping - .replace(//g, '>') - .replace(/"/g, '"') - .replace(/'/g, '''); + .replace(/&/g, "&") // Must be first to avoid double-escaping + .replace(//g, ">") + .replace(/"/g, """) + .replace(/'/g, "'"); // URI filtering - replace non-https protocols with "(redacted)" sanitized = sanitizeUrlProtocols(sanitized); // Domain filtering for HTTPS URIs @@ -242,16 +246,20 @@ jobs: // Limit total length to prevent DoS (0.5MB max) const maxLength = 524288; if (sanitized.length > maxLength) { - sanitized = sanitized.substring(0, maxLength) + '\n[Content truncated due to length]'; + sanitized = + sanitized.substring(0, maxLength) + + "\n[Content truncated due to length]"; } // Limit number of lines to prevent log flooding (65k max) - const lines = sanitized.split('\n'); + const lines = sanitized.split("\n"); const maxLines = 65000; if (lines.length > maxLines) { - sanitized = lines.slice(0, maxLines).join('\n') + '\n[Content truncated due to line count]'; + sanitized = + lines.slice(0, maxLines).join("\n") + + "\n[Content truncated due to line count]"; } // Remove ANSI escape sequences - sanitized = sanitized.replace(/\x1b\[[0-9;]*[mGKH]/g, ''); + sanitized = sanitized.replace(/\x1b\[[0-9;]*[mGKH]/g, ""); // Neutralize common bot trigger phrases sanitized = neutralizeBotTriggers(sanitized); // Trim excessive whitespace @@ -262,16 +270,22 @@ jobs: * @returns {string} The string with unknown domains redacted */ function sanitizeUrlDomains(s) { - return s.replace(/\bhttps:\/\/([^\/\s\])}'"<>&\x00-\x1f]+)/gi, (match, domain) => { - // Extract the hostname part (before first slash, colon, or other delimiter) - const hostname = domain.split(/[\/:\?#]/)[0].toLowerCase(); - // Check if this domain or any parent domain is in the allowlist - const isAllowed = allowedDomains.some(allowedDomain => { - const normalizedAllowed = allowedDomain.toLowerCase(); - return hostname === normalizedAllowed || hostname.endsWith('.' + normalizedAllowed); - }); - return isAllowed ? match : '(redacted)'; - }); + return s.replace( + /\bhttps:\/\/([^\/\s\])}'"<>&\x00-\x1f]+)/gi, + (match, domain) => { + // Extract the hostname part (before first slash, colon, or other delimiter) + const hostname = domain.split(/[\/:\?#]/)[0].toLowerCase(); + // Check if this domain or any parent domain is in the allowlist + const isAllowed = allowedDomains.some(allowedDomain => { + const normalizedAllowed = allowedDomain.toLowerCase(); + return ( + hostname === normalizedAllowed || + hostname.endsWith("." + normalizedAllowed) + ); + }); + return isAllowed ? match : "(redacted)"; + } + ); } /** * Remove unknown protocols except https @@ -280,10 +294,13 @@ jobs: */ function sanitizeUrlProtocols(s) { // Match both protocol:// and protocol: patterns - return s.replace(/\b(\w+):(?:\/\/)?[^\s\])}'"<>&\x00-\x1f]+/gi, (match, protocol) => { - // Allow https (case insensitive), redact everything else - return protocol.toLowerCase() === 'https' ? match : '(redacted)'; - }); + return s.replace( + /\b(\w+):(?:\/\/)?[^\s\])}'"<>&\x00-\x1f]+/gi, + (match, protocol) => { + // Allow https (case insensitive), redact everything else + return protocol.toLowerCase() === "https" ? match : "(redacted)"; + } + ); } /** * Neutralizes @mentions by wrapping them in backticks @@ -292,8 +309,10 @@ jobs: */ function neutralizeMentions(s) { // Replace @name or @org/team outside code with `@name` - return s.replace(/(^|[^\w`])@([A-Za-z0-9](?:[A-Za-z0-9-]{0,37}[A-Za-z0-9])?(?:\/[A-Za-z0-9._-]+)?)/g, - (_m, p1, p2) => `${p1}\`@${p2}\``); + return s.replace( + /(^|[^\w`])@([A-Za-z0-9](?:[A-Za-z0-9-]{0,37}[A-Za-z0-9])?(?:\/[A-Za-z0-9._-]+)?)/g, + (_m, p1, p2) => `${p1}\`@${p2}\`` + ); } /** * Neutralizes bot trigger phrases by wrapping them in backticks @@ -302,8 +321,10 @@ jobs: */ function neutralizeBotTriggers(s) { // Neutralize common bot trigger phrases like "fixes #123", "closes #asdfs", etc. - return s.replace(/\b(fixes?|closes?|resolves?|fix|close|resolve)\s+#(\w+)/gi, - (match, action, ref) => `\`${action} #${ref}\``); + return s.replace( + /\b(fixes?|closes?|resolves?|fix|close|resolve)\s+#(\w+)/gi, + (match, action, ref) => `\`${action} #${ref}\`` + ); } } /** @@ -314,65 +335,165 @@ jobs: */ function getMaxAllowedForType(itemType, config) { // Check if max is explicitly specified in config - if (config && config[itemType] && typeof config[itemType] === 'object' && config[itemType].max) { + if ( + config && + config[itemType] && + typeof config[itemType] === "object" && + config[itemType].max + ) { return config[itemType].max; } // Use default limits for plural-supported types switch (itemType) { - case 'create-issue': + case "create-issue": return 1; // Only one issue allowed - case 'add-issue-comment': + case "add-issue-comment": return 1; // Only one comment allowed - case 'create-pull-request': - return 1; // Only one pull request allowed - case 'add-issue-label': - return 5; // Only one labels operation allowed - case 'update-issue': - return 1; // Only one issue update allowed - case 'push-to-branch': - return 1; // Only one push to branch allowed + case "create-pull-request": + return 1; // Only one pull request allowed + case "create-pull-request-review-comment": + return 10; // Default to 10 review comments allowed + case "add-issue-label": + return 5; // Only one labels operation allowed + case "update-issue": + return 1; // Only one issue update allowed + case "push-to-branch": + return 1; // Only one push to branch allowed + case "create-discussion": + return 1; // Only one discussion allowed default: - return 1; // Default to single item for unknown types + return 1; // Default to single item for unknown types + } + } + /** + * Attempts to repair common JSON syntax issues in LLM-generated content + * @param {string} jsonStr - The potentially malformed JSON string + * @returns {string} The repaired JSON string + */ + function repairJson(jsonStr) { + let repaired = jsonStr.trim(); + // Fix single quotes to double quotes (must be done first) + repaired = repaired.replace(/'/g, '"'); + // Fix missing quotes around object keys + repaired = repaired.replace( + /([{,]\s*)([a-zA-Z_$][a-zA-Z0-9_$]*)\s*:/g, + '$1"$2":' + ); + // Fix newlines and tabs inside strings by escaping them + repaired = repaired.replace(/"([^"\\]*)"/g, (match, content) => { + if ( + content.includes("\n") || + content.includes("\r") || + content.includes("\t") + ) { + const escaped = content + .replace(/\\/g, "\\\\") + .replace(/\n/g, "\\n") + .replace(/\r/g, "\\r") + .replace(/\t/g, "\\t"); + return `"${escaped}"`; + } + return match; + }); + // Fix unescaped quotes inside string values + repaired = repaired.replace( + /"([^"]*)"([^":,}\]]*)"([^"]*)"(\s*[,:}\]])/g, + (match, p1, p2, p3, p4) => `"${p1}\\"${p2}\\"${p3}"${p4}` + ); + // Fix wrong bracket/brace types - arrays should end with ] not } + repaired = repaired.replace( + /(\[\s*(?:"[^"]*"(?:\s*,\s*"[^"]*")*\s*),?)\s*}/g, + "$1]" + ); + // Fix missing closing braces/brackets + const openBraces = (repaired.match(/\{/g) || []).length; + const closeBraces = (repaired.match(/\}/g) || []).length; + if (openBraces > closeBraces) { + repaired += "}".repeat(openBraces - closeBraces); + } else if (closeBraces > openBraces) { + repaired = "{".repeat(closeBraces - openBraces) + repaired; + } + // Fix missing closing brackets for arrays + const openBrackets = (repaired.match(/\[/g) || []).length; + const closeBrackets = (repaired.match(/\]/g) || []).length; + if (openBrackets > closeBrackets) { + repaired += "]".repeat(openBrackets - closeBrackets); + } else if (closeBrackets > openBrackets) { + repaired = "[".repeat(closeBrackets - openBrackets) + repaired; + } + // Fix trailing commas in objects and arrays (AFTER fixing brackets/braces) + repaired = repaired.replace(/,(\s*[}\]])/g, "$1"); + return repaired; + } + /** + * Attempts to parse JSON with repair fallback + * @param {string} jsonStr - The JSON string to parse + * @returns {Object|undefined} The parsed JSON object, or undefined if parsing fails + */ + function parseJsonWithRepair(jsonStr) { + try { + // First, try normal JSON.parse + return JSON.parse(jsonStr); + } catch (originalError) { + try { + // If that fails, try repairing and parsing again + const repairedJson = repairJson(jsonStr); + return JSON.parse(repairedJson); + } catch (repairError) { + // If repair also fails, print error to console and return undefined + console.log( + `JSON parsing failed. Original: ${originalError.message}. After repair: ${repairError.message}` + ); + return undefined; + } } } const outputFile = process.env.GITHUB_AW_SAFE_OUTPUTS; const safeOutputsConfig = process.env.GITHUB_AW_SAFE_OUTPUTS_CONFIG; if (!outputFile) { - console.log('GITHUB_AW_SAFE_OUTPUTS not set, no output to collect'); - core.setOutput('output', ''); + console.log("GITHUB_AW_SAFE_OUTPUTS not set, no output to collect"); + core.setOutput("output", ""); return; } if (!fs.existsSync(outputFile)) { - console.log('Output file does not exist:', outputFile); - core.setOutput('output', ''); + console.log("Output file does not exist:", outputFile); + core.setOutput("output", ""); return; } - const outputContent = fs.readFileSync(outputFile, 'utf8'); - if (outputContent.trim() === '') { - console.log('Output file is empty'); - core.setOutput('output', ''); + const outputContent = fs.readFileSync(outputFile, "utf8"); + if (outputContent.trim() === "") { + console.log("Output file is empty"); + core.setOutput("output", ""); return; } - console.log('Raw output content length:', outputContent.length); + console.log("Raw output content length:", outputContent.length); // Parse the safe-outputs configuration let expectedOutputTypes = {}; if (safeOutputsConfig) { try { expectedOutputTypes = JSON.parse(safeOutputsConfig); - console.log('Expected output types:', Object.keys(expectedOutputTypes)); + console.log("Expected output types:", Object.keys(expectedOutputTypes)); } catch (error) { - console.log('Warning: Could not parse safe-outputs config:', error.message); + console.log( + "Warning: Could not parse safe-outputs config:", + error.message + ); } } // Parse JSONL content - const lines = outputContent.trim().split('\n'); + const lines = outputContent.trim().split("\n"); const parsedItems = []; const errors = []; for (let i = 0; i < lines.length; i++) { const line = lines[i].trim(); - if (line === '') continue; // Skip empty lines + if (line === "") continue; // Skip empty lines try { - const item = JSON.parse(line); + const item = parseJsonWithRepair(line); + // If item is undefined (failed to parse), add error and process next line + if (item === undefined) { + errors.push(`Line ${i + 1}: Invalid JSON - JSON parsing failed`); + continue; + } // Validate that the item has a 'type' field if (!item.type) { errors.push(`Line ${i + 1}: Missing required 'type' field`); @@ -381,25 +502,35 @@ jobs: // Validate against expected output types const itemType = item.type; if (!expectedOutputTypes[itemType]) { - errors.push(`Line ${i + 1}: Unexpected output type '${itemType}'. Expected one of: ${Object.keys(expectedOutputTypes).join(', ')}`); + errors.push( + `Line ${i + 1}: Unexpected output type '${itemType}'. Expected one of: ${Object.keys(expectedOutputTypes).join(", ")}` + ); continue; } // Check for too many items of the same type - const typeCount = parsedItems.filter(existing => existing.type === itemType).length; + const typeCount = parsedItems.filter( + existing => existing.type === itemType + ).length; const maxAllowed = getMaxAllowedForType(itemType, expectedOutputTypes); if (typeCount >= maxAllowed) { - errors.push(`Line ${i + 1}: Too many items of type '${itemType}'. Maximum allowed: ${maxAllowed}.`); + errors.push( + `Line ${i + 1}: Too many items of type '${itemType}'. Maximum allowed: ${maxAllowed}.` + ); continue; } // Basic validation based on type switch (itemType) { - case 'create-issue': - if (!item.title || typeof item.title !== 'string') { - errors.push(`Line ${i + 1}: create-issue requires a 'title' string field`); + case "create-issue": + if (!item.title || typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: create-issue requires a 'title' string field` + ); continue; } - if (!item.body || typeof item.body !== 'string') { - errors.push(`Line ${i + 1}: create-issue requires a 'body' string field`); + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-issue requires a 'body' string field` + ); continue; } // Sanitize text content @@ -407,107 +538,247 @@ jobs: item.body = sanitizeContent(item.body); // Sanitize labels if present if (item.labels && Array.isArray(item.labels)) { - item.labels = item.labels.map(label => typeof label === 'string' ? sanitizeContent(label) : label); + item.labels = item.labels.map(label => + typeof label === "string" ? sanitizeContent(label) : label + ); } break; - case 'add-issue-comment': - if (!item.body || typeof item.body !== 'string') { - errors.push(`Line ${i + 1}: add-issue-comment requires a 'body' string field`); + case "add-issue-comment": + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: add-issue-comment requires a 'body' string field` + ); continue; } // Sanitize text content item.body = sanitizeContent(item.body); break; - case 'create-pull-request': - if (!item.title || typeof item.title !== 'string') { - errors.push(`Line ${i + 1}: create-pull-request requires a 'title' string field`); + case "create-pull-request": + if (!item.title || typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request requires a 'title' string field` + ); continue; } - if (!item.body || typeof item.body !== 'string') { - errors.push(`Line ${i + 1}: create-pull-request requires a 'body' string field`); + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request requires a 'body' string field` + ); continue; } // Sanitize text content item.title = sanitizeContent(item.title); item.body = sanitizeContent(item.body); // Sanitize branch name if present - if (item.branch && typeof item.branch === 'string') { + if (item.branch && typeof item.branch === "string") { item.branch = sanitizeContent(item.branch); } // Sanitize labels if present if (item.labels && Array.isArray(item.labels)) { - item.labels = item.labels.map(label => typeof label === 'string' ? sanitizeContent(label) : label); + item.labels = item.labels.map(label => + typeof label === "string" ? sanitizeContent(label) : label + ); } break; - case 'add-issue-label': + case "add-issue-label": if (!item.labels || !Array.isArray(item.labels)) { - errors.push(`Line ${i + 1}: add-issue-label requires a 'labels' array field`); + errors.push( + `Line ${i + 1}: add-issue-label requires a 'labels' array field` + ); continue; } - if (item.labels.some(label => typeof label !== 'string')) { - errors.push(`Line ${i + 1}: add-issue-label labels array must contain only strings`); + if (item.labels.some(label => typeof label !== "string")) { + errors.push( + `Line ${i + 1}: add-issue-label labels array must contain only strings` + ); continue; } // Sanitize label strings item.labels = item.labels.map(label => sanitizeContent(label)); break; - case 'update-issue': + case "update-issue": // Check that at least one updateable field is provided - const hasValidField = (item.status !== undefined) || - (item.title !== undefined) || - (item.body !== undefined); + const hasValidField = + item.status !== undefined || + item.title !== undefined || + item.body !== undefined; if (!hasValidField) { - errors.push(`Line ${i + 1}: update-issue requires at least one of: 'status', 'title', or 'body' fields`); + errors.push( + `Line ${i + 1}: update-issue requires at least one of: 'status', 'title', or 'body' fields` + ); continue; } // Validate status if provided if (item.status !== undefined) { - if (typeof item.status !== 'string' || (item.status !== 'open' && item.status !== 'closed')) { - errors.push(`Line ${i + 1}: update-issue 'status' must be 'open' or 'closed'`); + if ( + typeof item.status !== "string" || + (item.status !== "open" && item.status !== "closed") + ) { + errors.push( + `Line ${i + 1}: update-issue 'status' must be 'open' or 'closed'` + ); continue; } } // Validate title if provided if (item.title !== undefined) { - if (typeof item.title !== 'string') { - errors.push(`Line ${i + 1}: update-issue 'title' must be a string`); + if (typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: update-issue 'title' must be a string` + ); continue; } item.title = sanitizeContent(item.title); } // Validate body if provided if (item.body !== undefined) { - if (typeof item.body !== 'string') { - errors.push(`Line ${i + 1}: update-issue 'body' must be a string`); + if (typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: update-issue 'body' must be a string` + ); continue; } item.body = sanitizeContent(item.body); } // Validate issue_number if provided (for target "*") if (item.issue_number !== undefined) { - if (typeof item.issue_number !== 'number' && typeof item.issue_number !== 'string') { - errors.push(`Line ${i + 1}: update-issue 'issue_number' must be a number or string`); + if ( + typeof item.issue_number !== "number" && + typeof item.issue_number !== "string" + ) { + errors.push( + `Line ${i + 1}: update-issue 'issue_number' must be a number or string` + ); continue; } } break; - case 'push-to-branch': + case "push-to-branch": // Validate message if provided (optional) if (item.message !== undefined) { - if (typeof item.message !== 'string') { - errors.push(`Line ${i + 1}: push-to-branch 'message' must be a string`); + if (typeof item.message !== "string") { + errors.push( + `Line ${i + 1}: push-to-branch 'message' must be a string` + ); continue; } item.message = sanitizeContent(item.message); } // Validate pull_request_number if provided (for target "*") if (item.pull_request_number !== undefined) { - if (typeof item.pull_request_number !== 'number' && typeof item.pull_request_number !== 'string') { - errors.push(`Line ${i + 1}: push-to-branch 'pull_request_number' must be a number or string`); + if ( + typeof item.pull_request_number !== "number" && + typeof item.pull_request_number !== "string" + ) { + errors.push( + `Line ${i + 1}: push-to-branch 'pull_request_number' must be a number or string` + ); continue; } } break; + case "create-pull-request-review-comment": + // Validate required path field + if (!item.path || typeof item.path !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment requires a 'path' string field` + ); + continue; + } + // Validate required line field + if ( + item.line === undefined || + (typeof item.line !== "number" && typeof item.line !== "string") + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment requires a 'line' number or string field` + ); + continue; + } + // Validate line is a positive integer + const lineNumber = + typeof item.line === "string" ? parseInt(item.line, 10) : item.line; + if ( + isNaN(lineNumber) || + lineNumber <= 0 || + !Number.isInteger(lineNumber) + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'line' must be a positive integer` + ); + continue; + } + // Validate required body field + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment requires a 'body' string field` + ); + continue; + } + // Sanitize required text content + item.body = sanitizeContent(item.body); + // Validate optional start_line field + if (item.start_line !== undefined) { + if ( + typeof item.start_line !== "number" && + typeof item.start_line !== "string" + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'start_line' must be a number or string` + ); + continue; + } + const startLineNumber = + typeof item.start_line === "string" + ? parseInt(item.start_line, 10) + : item.start_line; + if ( + isNaN(startLineNumber) || + startLineNumber <= 0 || + !Number.isInteger(startLineNumber) + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'start_line' must be a positive integer` + ); + continue; + } + if (startLineNumber > lineNumber) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'start_line' must be less than or equal to 'line'` + ); + continue; + } + } + // Validate optional side field + if (item.side !== undefined) { + if ( + typeof item.side !== "string" || + (item.side !== "LEFT" && item.side !== "RIGHT") + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'side' must be 'LEFT' or 'RIGHT'` + ); + continue; + } + } + break; + case "create-discussion": + if (!item.title || typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: create-discussion requires a 'title' string field` + ); + continue; + } + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-discussion requires a 'body' string field` + ); + continue; + } + // Sanitize text content + item.title = sanitizeContent(item.title); + item.body = sanitizeContent(item.body); + break; default: errors.push(`Line ${i + 1}: Unknown output type '${itemType}'`); continue; @@ -520,7 +791,7 @@ jobs: } // Report validation results if (errors.length > 0) { - console.log('Validation errors found:'); + console.log("Validation errors found:"); errors.forEach(error => console.log(` - ${error}`)); // For now, we'll continue with valid items but log the errors // In the future, we might want to fail the workflow for invalid items @@ -529,10 +800,10 @@ jobs: // Set the parsed and validated items as output const validatedOutput = { items: parsedItems, - errors: errors + errors: errors, }; - core.setOutput('output', JSON.stringify(validatedOutput)); - core.setOutput('raw_output', outputContent); + core.setOutput("output", JSON.stringify(validatedOutput)); + core.setOutput("raw_output", outputContent); } // Call the main function await main(); @@ -564,24 +835,24 @@ jobs: with: script: | function main() { - const fs = require('fs'); + const fs = require("fs"); try { const logFile = process.env.AGENT_LOG_FILE; if (!logFile) { - console.log('No agent log file specified'); + console.log("No agent log file specified"); return; } if (!fs.existsSync(logFile)) { console.log(`Log file not found: ${logFile}`); return; } - const content = fs.readFileSync(logFile, 'utf8'); + const content = fs.readFileSync(logFile, "utf8"); const parsedLog = parseCodexLog(content); if (parsedLog) { core.summary.addRaw(parsedLog).write(); - console.log('Codex log parsed successfully'); + console.log("Codex log parsed successfully"); } else { - console.log('Failed to parse Codex log'); + console.log("Failed to parse Codex log"); } } catch (error) { core.setFailed(error.message); @@ -589,54 +860,63 @@ jobs: } function parseCodexLog(logContent) { try { - const lines = logContent.split('\n'); - let markdown = '## 🤖 Commands and Tools\n\n'; + const lines = logContent.split("\n"); + let markdown = "## 🤖 Commands and Tools\n\n"; const commandSummary = []; // First pass: collect commands for summary for (let i = 0; i < lines.length; i++) { const line = lines[i]; // Detect tool usage and exec commands - if (line.includes('] tool ') && line.includes('(')) { + if (line.includes("] tool ") && line.includes("(")) { // Extract tool name const toolMatch = line.match(/\] tool ([^(]+)\(/); if (toolMatch) { const toolName = toolMatch[1]; // Look ahead to find the result status - let statusIcon = '❓'; // Unknown by default + let statusIcon = "❓"; // Unknown by default for (let j = i + 1; j < Math.min(i + 5, lines.length); j++) { const nextLine = lines[j]; - if (nextLine.includes('success in')) { - statusIcon = '✅'; + if (nextLine.includes("success in")) { + statusIcon = "✅"; break; - } else if (nextLine.includes('failure in') || nextLine.includes('error in') || nextLine.includes('failed in')) { - statusIcon = '❌'; + } else if ( + nextLine.includes("failure in") || + nextLine.includes("error in") || + nextLine.includes("failed in") + ) { + statusIcon = "❌"; break; } } - if (toolName.includes('.')) { + if (toolName.includes(".")) { // Format as provider::method - const parts = toolName.split('.'); + const parts = toolName.split("."); const provider = parts[0]; - const method = parts.slice(1).join('_'); - commandSummary.push(`* ${statusIcon} \`${provider}::${method}(...)\``); + const method = parts.slice(1).join("_"); + commandSummary.push( + `* ${statusIcon} \`${provider}::${method}(...)\`` + ); } else { commandSummary.push(`* ${statusIcon} \`${toolName}(...)\``); } } - } else if (line.includes('] exec ')) { + } else if (line.includes("] exec ")) { // Extract exec command const execMatch = line.match(/exec (.+?) in/); if (execMatch) { const formattedCommand = formatBashCommand(execMatch[1]); // Look ahead to find the result status - let statusIcon = '❓'; // Unknown by default + let statusIcon = "❓"; // Unknown by default for (let j = i + 1; j < Math.min(i + 5, lines.length); j++) { const nextLine = lines[j]; - if (nextLine.includes('succeeded in')) { - statusIcon = '✅'; + if (nextLine.includes("succeeded in")) { + statusIcon = "✅"; break; - } else if (nextLine.includes('failed in') || nextLine.includes('error')) { - statusIcon = '❌'; + } else if ( + nextLine.includes("failed in") || + nextLine.includes("error") + ) { + statusIcon = "❌"; break; } } @@ -650,10 +930,10 @@ jobs: markdown += `${cmd}\n`; } } else { - markdown += 'No commands or tools used.\n'; + markdown += "No commands or tools used.\n"; } // Add Information section - markdown += '\n## 📊 Information\n\n'; + markdown += "\n## 📊 Information\n\n"; // Extract metadata from Codex logs let totalTokens = 0; const tokenMatches = logContent.match(/tokens used: (\d+)/g); @@ -675,46 +955,57 @@ jobs: if (execCommands > 0) { markdown += `**Commands Executed:** ${execCommands}\n\n`; } - markdown += '\n## 🤖 Reasoning\n\n'; + markdown += "\n## 🤖 Reasoning\n\n"; // Second pass: process full conversation flow with interleaved reasoning, tools, and commands let inThinkingSection = false; for (let i = 0; i < lines.length; i++) { const line = lines[i]; // Skip metadata lines - if (line.includes('OpenAI Codex') || line.startsWith('--------') || - line.includes('workdir:') || line.includes('model:') || - line.includes('provider:') || line.includes('approval:') || - line.includes('sandbox:') || line.includes('reasoning effort:') || - line.includes('reasoning summaries:') || line.includes('tokens used:')) { + if ( + line.includes("OpenAI Codex") || + line.startsWith("--------") || + line.includes("workdir:") || + line.includes("model:") || + line.includes("provider:") || + line.includes("approval:") || + line.includes("sandbox:") || + line.includes("reasoning effort:") || + line.includes("reasoning summaries:") || + line.includes("tokens used:") + ) { continue; } // Process thinking sections - if (line.includes('] thinking')) { + if (line.includes("] thinking")) { inThinkingSection = true; continue; } // Process tool calls - if (line.includes('] tool ') && line.includes('(')) { + if (line.includes("] tool ") && line.includes("(")) { inThinkingSection = false; const toolMatch = line.match(/\] tool ([^(]+)\(/); if (toolMatch) { const toolName = toolMatch[1]; // Look ahead to find the result status - let statusIcon = '❓'; // Unknown by default + let statusIcon = "❓"; // Unknown by default for (let j = i + 1; j < Math.min(i + 5, lines.length); j++) { const nextLine = lines[j]; - if (nextLine.includes('success in')) { - statusIcon = '✅'; + if (nextLine.includes("success in")) { + statusIcon = "✅"; break; - } else if (nextLine.includes('failure in') || nextLine.includes('error in') || nextLine.includes('failed in')) { - statusIcon = '❌'; + } else if ( + nextLine.includes("failure in") || + nextLine.includes("error in") || + nextLine.includes("failed in") + ) { + statusIcon = "❌"; break; } } - if (toolName.includes('.')) { - const parts = toolName.split('.'); + if (toolName.includes(".")) { + const parts = toolName.split("."); const provider = parts[0]; - const method = parts.slice(1).join('_'); + const method = parts.slice(1).join("_"); markdown += `${statusIcon} ${provider}::${method}(...)\n\n`; } else { markdown += `${statusIcon} ${toolName}(...)\n\n`; @@ -723,20 +1014,23 @@ jobs: continue; } // Process exec commands - if (line.includes('] exec ')) { + if (line.includes("] exec ")) { inThinkingSection = false; const execMatch = line.match(/exec (.+?) in/); if (execMatch) { const formattedCommand = formatBashCommand(execMatch[1]); // Look ahead to find the result status - let statusIcon = '❓'; // Unknown by default + let statusIcon = "❓"; // Unknown by default for (let j = i + 1; j < Math.min(i + 5, lines.length); j++) { const nextLine = lines[j]; - if (nextLine.includes('succeeded in')) { - statusIcon = '✅'; + if (nextLine.includes("succeeded in")) { + statusIcon = "✅"; break; - } else if (nextLine.includes('failed in') || nextLine.includes('error')) { - statusIcon = '❌'; + } else if ( + nextLine.includes("failed in") || + nextLine.includes("error") + ) { + statusIcon = "❌"; break; } } @@ -745,7 +1039,11 @@ jobs: continue; } // Process thinking content - if (inThinkingSection && line.trim().length > 20 && !line.startsWith('[2025-')) { + if ( + inThinkingSection && + line.trim().length > 20 && + !line.startsWith("[2025-") + ) { const trimmed = line.trim(); // Add thinking content directly markdown += `${trimmed}\n\n`; @@ -753,36 +1051,36 @@ jobs: } return markdown; } catch (error) { - console.error('Error parsing Codex log:', error); - return '## 🤖 Commands and Tools\n\nError parsing log content.\n\n## 🤖 Reasoning\n\nUnable to parse reasoning from log.\n\n'; + console.error("Error parsing Codex log:", error); + return "## 🤖 Commands and Tools\n\nError parsing log content.\n\n## 🤖 Reasoning\n\nUnable to parse reasoning from log.\n\n"; } } function formatBashCommand(command) { - if (!command) return ''; + if (!command) return ""; // Convert multi-line commands to single line by replacing newlines with spaces // and collapsing multiple spaces let formatted = command - .replace(/\n/g, ' ') // Replace newlines with spaces - .replace(/\r/g, ' ') // Replace carriage returns with spaces - .replace(/\t/g, ' ') // Replace tabs with spaces - .replace(/\s+/g, ' ') // Collapse multiple spaces into one - .trim(); // Remove leading/trailing whitespace + .replace(/\n/g, " ") // Replace newlines with spaces + .replace(/\r/g, " ") // Replace carriage returns with spaces + .replace(/\t/g, " ") // Replace tabs with spaces + .replace(/\s+/g, " ") // Collapse multiple spaces into one + .trim(); // Remove leading/trailing whitespace // Escape backticks to prevent markdown issues - formatted = formatted.replace(/`/g, '\\`'); + formatted = formatted.replace(/`/g, "\\`"); // Truncate if too long (keep reasonable length for summary) const maxLength = 80; if (formatted.length > maxLength) { - formatted = formatted.substring(0, maxLength) + '...'; + formatted = formatted.substring(0, maxLength) + "..."; } return formatted; } function truncateString(str, maxLength) { - if (!str) return ''; + if (!str) return ""; if (str.length <= maxLength) return str; - return str.substring(0, maxLength) + '...'; + return str.substring(0, maxLength) + "..."; } // Export for testing - if (typeof module !== 'undefined' && module.exports) { + if (typeof module !== "undefined" && module.exports) { module.exports = { parseCodexLog, formatBashCommand, truncateString }; } main(); @@ -818,30 +1116,35 @@ jobs: // Read the validated output content from environment variable const outputContent = process.env.GITHUB_AW_AGENT_OUTPUT; if (!outputContent) { - console.log('No GITHUB_AW_AGENT_OUTPUT environment variable found'); + console.log("No GITHUB_AW_AGENT_OUTPUT environment variable found"); return; } - if (outputContent.trim() === '') { - console.log('Agent output content is empty'); + if (outputContent.trim() === "") { + console.log("Agent output content is empty"); return; } - console.log('Agent output content length:', outputContent.length); + console.log("Agent output content length:", outputContent.length); // Parse the validated output JSON let validatedOutput; try { validatedOutput = JSON.parse(outputContent); } catch (error) { - console.log('Error parsing agent output JSON:', error instanceof Error ? error.message : String(error)); + console.log( + "Error parsing agent output JSON:", + error instanceof Error ? error.message : String(error) + ); return; } if (!validatedOutput.items || !Array.isArray(validatedOutput.items)) { - console.log('No valid items found in agent output'); + console.log("No valid items found in agent output"); return; } // Find all create-issue items - const createIssueItems = validatedOutput.items.filter(/** @param {any} item */ item => item.type === 'create-issue'); + const createIssueItems = validatedOutput.items.filter( + /** @param {any} item */ item => item.type === "create-issue" + ); if (createIssueItems.length === 0) { - console.log('No create-issue items found in agent output'); + console.log("No create-issue items found in agent output"); return; } console.log(`Found ${createIssueItems.length} create-issue item(s)`); @@ -849,23 +1152,31 @@ jobs: const parentIssueNumber = context.payload?.issue?.number; // Parse labels from environment variable (comma-separated string) const labelsEnv = process.env.GITHUB_AW_ISSUE_LABELS; - let envLabels = labelsEnv ? labelsEnv.split(',').map(/** @param {string} label */ label => label.trim()).filter(/** @param {string} label */ label => label) : []; + let envLabels = labelsEnv + ? labelsEnv + .split(",") + .map(/** @param {string} label */ label => label.trim()) + .filter(/** @param {string} label */ label => label) + : []; const createdIssues = []; // Process each create-issue item for (let i = 0; i < createIssueItems.length; i++) { const createIssueItem = createIssueItems[i]; - console.log(`Processing create-issue item ${i + 1}/${createIssueItems.length}:`, { title: createIssueItem.title, bodyLength: createIssueItem.body.length }); + console.log( + `Processing create-issue item ${i + 1}/${createIssueItems.length}:`, + { title: createIssueItem.title, bodyLength: createIssueItem.body.length } + ); // Merge environment labels with item-specific labels let labels = [...envLabels]; if (createIssueItem.labels && Array.isArray(createIssueItem.labels)) { labels = [...labels, ...createIssueItem.labels].filter(Boolean); } // Extract title and body from the JSON item - let title = createIssueItem.title ? createIssueItem.title.trim() : ''; - let bodyLines = createIssueItem.body.split('\n'); + let title = createIssueItem.title ? createIssueItem.title.trim() : ""; + let bodyLines = createIssueItem.body.split("\n"); // If no title was found, use the body content as title (or a default) if (!title) { - title = createIssueItem.body || 'Agent Output'; + title = createIssueItem.body || "Agent Output"; } // Apply title prefix if provided via environment variable const titlePrefix = process.env.GITHUB_AW_ISSUE_TITLE_PREFIX; @@ -873,22 +1184,27 @@ jobs: title = titlePrefix + title; } if (parentIssueNumber) { - console.log('Detected issue context, parent issue #' + parentIssueNumber); + console.log("Detected issue context, parent issue #" + parentIssueNumber); // Add reference to parent issue in the child issue body bodyLines.push(`Related to #${parentIssueNumber}`); } // Add AI disclaimer with run id, run htmlurl // Add AI disclaimer with workflow run information const runId = context.runId; - const runUrl = context.payload.repository + const runUrl = context.payload.repository ? `${context.payload.repository.html_url}/actions/runs/${runId}` - : `https://github.com/actions/runs/${runId}`; - bodyLines.push(``, ``, `> Generated by Agentic Workflow Run [${runId}](${runUrl})`, ''); + : `https://github.com/actions/runs/${runId}`; + bodyLines.push( + ``, + ``, + `> Generated by Agentic Workflow Run [${runId}](${runUrl})`, + "" + ); // Prepare the body content - const body = bodyLines.join('\n').trim(); - console.log('Creating issue with title:', title); - console.log('Labels:', labels); - console.log('Body length:', body.length); + const body = bodyLines.join("\n").trim(); + console.log("Creating issue with title:", title); + console.log("Labels:", labels); + console.log("Body length:", body.length); try { // Create the issue using GitHub API const { data: issue } = await github.rest.issues.create({ @@ -896,9 +1212,9 @@ jobs: repo: context.repo.repo, title: title, body: body, - labels: labels + labels: labels, }); - console.log('Created issue #' + issue.number + ': ' + issue.html_url); + console.log("Created issue #" + issue.number + ": " + issue.html_url); createdIssues.push(issue); // If we have a parent issue, add a comment to it referencing the new child issue if (parentIssueNumber) { @@ -907,26 +1223,32 @@ jobs: owner: context.repo.owner, repo: context.repo.repo, issue_number: parentIssueNumber, - body: `Created related issue: #${issue.number}` + body: `Created related issue: #${issue.number}`, }); - console.log('Added comment to parent issue #' + parentIssueNumber); + console.log("Added comment to parent issue #" + parentIssueNumber); } catch (error) { - console.log('Warning: Could not add comment to parent issue:', error instanceof Error ? error.message : String(error)); + console.log( + "Warning: Could not add comment to parent issue:", + error instanceof Error ? error.message : String(error) + ); } } // Set output for the last created issue (for backward compatibility) if (i === createIssueItems.length - 1) { - core.setOutput('issue_number', issue.number); - core.setOutput('issue_url', issue.html_url); + core.setOutput("issue_number", issue.number); + core.setOutput("issue_url", issue.html_url); } } catch (error) { - console.error(`✗ Failed to create issue "${title}":`, error instanceof Error ? error.message : String(error)); + console.error( + `✗ Failed to create issue "${title}":`, + error instanceof Error ? error.message : String(error) + ); throw error; } } // Write summary for all created issues if (createdIssues.length > 0) { - let summaryContent = '\n\n## GitHub Issues\n'; + let summaryContent = "\n\n## GitHub Issues\n"; for (const issue of createdIssues) { summaryContent += `- Issue #${issue.number}: [${issue.title}](${issue.html_url})\n`; } diff --git a/.github/workflows/test-codex-create-pull-request-review-comment.lock.yml b/.github/workflows/test-codex-create-pull-request-review-comment.lock.yml new file mode 100644 index 0000000000..4d3b735fec --- /dev/null +++ b/.github/workflows/test-codex-create-pull-request-review-comment.lock.yml @@ -0,0 +1,1500 @@ +# This file was automatically generated by gh-aw. DO NOT EDIT. +# To update this file, edit the corresponding .md file and run: +# gh aw compile + +name: "Test Codex Create Pull Request Review Comment" +"on": + pull_request: + types: + - opened + - synchronize + - reopened + +permissions: {} + +concurrency: + group: "gh-aw-${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}" + cancel-in-progress: true + +run-name: "Test Codex Create Pull Request Review Comment" + +jobs: + task: + if: contains(github.event.pull_request.title, 'prr') + runs-on: ubuntu-latest + steps: + - name: Task job condition barrier + run: echo "Task job executed - conditions satisfied" + + add_reaction: + needs: task + if: github.event_name == 'issues' || github.event_name == 'pull_request' || github.event_name == 'issue_comment' || github.event_name == 'pull_request_comment' || github.event_name == 'pull_request_review_comment' + runs-on: ubuntu-latest + permissions: + issues: write + pull-requests: write + outputs: + reaction_id: ${{ steps.react.outputs.reaction-id }} + steps: + - name: Add eyes reaction to the triggering item + id: react + uses: actions/github-script@v7 + env: + GITHUB_AW_REACTION: eyes + with: + script: | + async function main() { + // Read inputs from environment variables + const reaction = process.env.GITHUB_AW_REACTION || "eyes"; + const alias = process.env.GITHUB_AW_ALIAS; // Only present for alias workflows + const runId = context.runId; + const runUrl = context.payload.repository + ? `${context.payload.repository.html_url}/actions/runs/${runId}` + : `https://github.com/${context.repo.owner}/${context.repo.repo}/actions/runs/${runId}`; + console.log("Reaction type:", reaction); + console.log("Alias name:", alias || "none"); + console.log("Run ID:", runId); + console.log("Run URL:", runUrl); + // Validate reaction type + const validReactions = [ + "+1", + "-1", + "laugh", + "confused", + "heart", + "hooray", + "rocket", + "eyes", + ]; + if (!validReactions.includes(reaction)) { + core.setFailed( + `Invalid reaction type: ${reaction}. Valid reactions are: ${validReactions.join(", ")}` + ); + return; + } + // Determine the API endpoint based on the event type + let reactionEndpoint; + let commentUpdateEndpoint; + let shouldEditComment = false; + const eventName = context.eventName; + const owner = context.repo.owner; + const repo = context.repo.repo; + try { + switch (eventName) { + case "issues": + const issueNumber = context.payload?.issue?.number; + if (!issueNumber) { + core.setFailed("Issue number not found in event payload"); + return; + } + reactionEndpoint = `/repos/${owner}/${repo}/issues/${issueNumber}/reactions`; + // Don't edit issue bodies for now - this might be more complex + shouldEditComment = false; + break; + case "issue_comment": + const commentId = context.payload?.comment?.id; + if (!commentId) { + core.setFailed("Comment ID not found in event payload"); + return; + } + reactionEndpoint = `/repos/${owner}/${repo}/issues/comments/${commentId}/reactions`; + commentUpdateEndpoint = `/repos/${owner}/${repo}/issues/comments/${commentId}`; + // Only edit comments for alias workflows + shouldEditComment = alias ? true : false; + break; + case "pull_request": + const prNumber = context.payload?.pull_request?.number; + if (!prNumber) { + core.setFailed("Pull request number not found in event payload"); + return; + } + // PRs are "issues" for the reactions endpoint + reactionEndpoint = `/repos/${owner}/${repo}/issues/${prNumber}/reactions`; + // Don't edit PR bodies for now - this might be more complex + shouldEditComment = false; + break; + case "pull_request_review_comment": + const reviewCommentId = context.payload?.comment?.id; + if (!reviewCommentId) { + core.setFailed("Review comment ID not found in event payload"); + return; + } + reactionEndpoint = `/repos/${owner}/${repo}/pulls/comments/${reviewCommentId}/reactions`; + commentUpdateEndpoint = `/repos/${owner}/${repo}/pulls/comments/${reviewCommentId}`; + // Only edit comments for alias workflows + shouldEditComment = alias ? true : false; + break; + default: + core.setFailed(`Unsupported event type: ${eventName}`); + return; + } + console.log("Reaction API endpoint:", reactionEndpoint); + // Add reaction first + await addReaction(reactionEndpoint, reaction); + // Then edit comment if applicable and if it's a comment event + if (shouldEditComment && commentUpdateEndpoint) { + console.log("Comment update endpoint:", commentUpdateEndpoint); + await editCommentWithWorkflowLink(commentUpdateEndpoint, runUrl); + } else { + if (!alias && commentUpdateEndpoint) { + console.log( + "Skipping comment edit - only available for alias workflows" + ); + } else { + console.log("Skipping comment edit for event type:", eventName); + } + } + } catch (error) { + const errorMessage = error instanceof Error ? error.message : String(error); + console.error("Failed to process reaction and comment edit:", errorMessage); + core.setFailed( + `Failed to process reaction and comment edit: ${errorMessage}` + ); + } + } + /** + * Add a reaction to a GitHub issue, PR, or comment + * @param {string} endpoint - The GitHub API endpoint to add the reaction to + * @param {string} reaction - The reaction type to add + */ + async function addReaction(endpoint, reaction) { + const response = await github.request("POST " + endpoint, { + content: reaction, + headers: { + Accept: "application/vnd.github+json", + }, + }); + const reactionId = response.data?.id; + if (reactionId) { + console.log(`Successfully added reaction: ${reaction} (id: ${reactionId})`); + core.setOutput("reaction-id", reactionId.toString()); + } else { + console.log(`Successfully added reaction: ${reaction}`); + core.setOutput("reaction-id", ""); + } + } + /** + * Edit a comment to add a workflow run link + * @param {string} endpoint - The GitHub API endpoint to update the comment + * @param {string} runUrl - The URL of the workflow run + */ + async function editCommentWithWorkflowLink(endpoint, runUrl) { + try { + // First, get the current comment content + const getResponse = await github.request("GET " + endpoint, { + headers: { + Accept: "application/vnd.github+json", + }, + }); + const originalBody = getResponse.data.body || ""; + const workflowLinkText = `\n\n---\n*🤖 [Workflow run](${runUrl}) triggered by this comment*`; + // Check if we've already added a workflow link to avoid duplicates + if (originalBody.includes("*🤖 [Workflow run](")) { + console.log( + "Comment already contains a workflow run link, skipping edit" + ); + return; + } + const updatedBody = originalBody + workflowLinkText; + // Update the comment + const updateResponse = await github.request("PATCH " + endpoint, { + body: updatedBody, + headers: { + Accept: "application/vnd.github+json", + }, + }); + console.log(`Successfully updated comment with workflow link`); + console.log(`Comment ID: ${updateResponse.data.id}`); + } catch (error) { + // Don't fail the entire job if comment editing fails - just log it + const errorMessage = error instanceof Error ? error.message : String(error); + console.warn("Failed to edit comment with workflow link:", errorMessage); + console.warn( + "This is not critical - the reaction was still added successfully" + ); + } + } + await main(); + + test-codex-create-pull-request-review-comment: + needs: task + runs-on: ubuntu-latest + permissions: read-all + outputs: + output: ${{ steps.collect_output.outputs.output }} + steps: + - name: Checkout repository + uses: actions/checkout@v5 + - name: Setup Node.js + uses: actions/setup-node@v4 + with: + node-version: '24' + - name: Install Codex + run: npm install -g @openai/codex + - name: Setup agent output + id: setup_agent_output + uses: actions/github-script@v7 + with: + script: | + function main() { + const fs = require("fs"); + const crypto = require("crypto"); + // Generate a random filename for the output file + const randomId = crypto.randomBytes(8).toString("hex"); + const outputFile = `/tmp/aw_output_${randomId}.txt`; + // Ensure the /tmp directory exists and create empty output file + fs.mkdirSync("/tmp", { recursive: true }); + fs.writeFileSync(outputFile, "", { mode: 0o644 }); + // Verify the file was created and is writable + if (!fs.existsSync(outputFile)) { + throw new Error(`Failed to create output file: ${outputFile}`); + } + // Set the environment variable for subsequent steps + core.exportVariable("GITHUB_AW_SAFE_OUTPUTS", outputFile); + console.log("Created agentic output file:", outputFile); + // Also set as step output for reference + core.setOutput("output_file", outputFile); + } + main(); + - name: Setup MCPs + run: | + mkdir -p /tmp/mcp-config + cat > /tmp/mcp-config/config.toml << EOF + [history] + persistence = "none" + + [mcp_servers.github] + command = "docker" + args = [ + "run", + "-i", + "--rm", + "-e", + "GITHUB_PERSONAL_ACCESS_TOKEN", + "ghcr.io/github/github-mcp-server:sha-09deac4" + ] + env = { "GITHUB_PERSONAL_ACCESS_TOKEN" = "${{ secrets.GITHUB_TOKEN }}" } + EOF + - name: Create prompt + env: + GITHUB_AW_SAFE_OUTPUTS: ${{ env.GITHUB_AW_SAFE_OUTPUTS }} + run: | + mkdir -p /tmp/aw-prompts + cat > /tmp/aw-prompts/prompt.txt << 'EOF' + Analyze the pull request and create a few targeted review comments on the code changes. + + Create 2-3 review comments focusing on: + 1. Code quality and best practices + 2. Potential security issues or improvements + 3. Performance optimizations or concerns + + For each review comment, specify: + - The exact file path where the comment should be placed + - The specific line number in the diff + - A helpful comment body with actionable feedback + + If you find multi-line issues, use start_line to comment on ranges of lines. + + + --- + + ## + + **IMPORTANT**: To do the actions mentioned in the header of this section, do NOT attempt to use MCP tools, do NOT attempt to use `gh`, do NOT attempt to use the GitHub API. You don't have write access to the GitHub repo. Instead write JSON objects to the file "${{ env.GITHUB_AW_SAFE_OUTPUTS }}". Each line should contain a single JSON object (JSONL format). You can write them one by one as you do them. + + **Format**: Write one JSON object per line. Each object must have a `type` field specifying the action type. + + ### Available Output Types: + + **Example JSONL file content:** + ``` + # No safe outputs configured for this workflow + ``` + + **Important Notes:** + - Do NOT attempt to use MCP tools, `gh`, or the GitHub API for these actions + - Each JSON object must be on its own line + - Only include output types that are configured for this workflow + - The content of this file will be automatically processed and executed + + EOF + - name: Print prompt to step summary + run: | + echo "## Generated Prompt" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo '``````markdown' >> $GITHUB_STEP_SUMMARY + cat /tmp/aw-prompts/prompt.txt >> $GITHUB_STEP_SUMMARY + echo '``````' >> $GITHUB_STEP_SUMMARY + - name: Generate agentic run info + uses: actions/github-script@v7 + with: + script: | + const fs = require('fs'); + + const awInfo = { + engine_id: "codex", + engine_name: "Codex", + model: "", + version: "", + workflow_name: "Test Codex Create Pull Request Review Comment", + experimental: true, + supports_tools_whitelist: true, + supports_http_transport: false, + run_id: context.runId, + run_number: context.runNumber, + run_attempt: process.env.GITHUB_RUN_ATTEMPT, + repository: context.repo.owner + '/' + context.repo.repo, + ref: context.ref, + sha: context.sha, + actor: context.actor, + event_name: context.eventName, + created_at: new Date().toISOString() + }; + + // Write to /tmp directory to avoid inclusion in PR + const tmpPath = '/tmp/aw_info.json'; + fs.writeFileSync(tmpPath, JSON.stringify(awInfo, null, 2)); + console.log('Generated aw_info.json at:', tmpPath); + console.log(JSON.stringify(awInfo, null, 2)); + - name: Upload agentic run info + if: always() + uses: actions/upload-artifact@v4 + with: + name: aw_info.json + path: /tmp/aw_info.json + if-no-files-found: warn + - name: Run Codex + run: | + set -o pipefail + INSTRUCTION=$(cat /tmp/aw-prompts/prompt.txt) + export CODEX_HOME=/tmp/mcp-config + + # Create log directory outside git repo + mkdir -p /tmp/aw-logs + + # Run codex with log capture - pipefail ensures codex exit code is preserved + codex exec \ + -c model=o4-mini \ + --full-auto "$INSTRUCTION" 2>&1 | tee /tmp/test-codex-create-pull-request-review-comment.log + env: + GITHUB_AW_SAFE_OUTPUTS: ${{ env.GITHUB_AW_SAFE_OUTPUTS }} + GITHUB_STEP_SUMMARY: ${{ env.GITHUB_STEP_SUMMARY }} + OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} + - name: Check if workflow-complete.txt exists, if so upload it + id: check_file + run: | + if [ -f workflow-complete.txt ]; then + echo "File exists" + echo "upload=true" >> $GITHUB_OUTPUT + else + echo "File does not exist" + echo "upload=false" >> $GITHUB_OUTPUT + fi + - name: Upload workflow-complete.txt + if: steps.check_file.outputs.upload == 'true' + uses: actions/upload-artifact@v4 + with: + name: workflow-complete + path: workflow-complete.txt + - name: Collect agent output + id: collect_output + uses: actions/github-script@v7 + env: + GITHUB_AW_SAFE_OUTPUTS: ${{ env.GITHUB_AW_SAFE_OUTPUTS }} + GITHUB_AW_SAFE_OUTPUTS_CONFIG: "{}" + with: + script: | + async function main() { + const fs = require("fs"); + /** + * Sanitizes content for safe output in GitHub Actions + * @param {string} content - The content to sanitize + * @returns {string} The sanitized content + */ + function sanitizeContent(content) { + if (!content || typeof content !== "string") { + return ""; + } + // Read allowed domains from environment variable + const allowedDomainsEnv = process.env.GITHUB_AW_ALLOWED_DOMAINS; + const defaultAllowedDomains = [ + "github.com", + "github.io", + "githubusercontent.com", + "githubassets.com", + "github.dev", + "codespaces.new", + ]; + const allowedDomains = allowedDomainsEnv + ? allowedDomainsEnv + .split(",") + .map(d => d.trim()) + .filter(d => d) + : defaultAllowedDomains; + let sanitized = content; + // Neutralize @mentions to prevent unintended notifications + sanitized = neutralizeMentions(sanitized); + // Remove control characters (except newlines and tabs) + sanitized = sanitized.replace(/[\x00-\x08\x0B\x0C\x0E-\x1F\x7F]/g, ""); + // XML character escaping + sanitized = sanitized + .replace(/&/g, "&") // Must be first to avoid double-escaping + .replace(//g, ">") + .replace(/"/g, """) + .replace(/'/g, "'"); + // URI filtering - replace non-https protocols with "(redacted)" + sanitized = sanitizeUrlProtocols(sanitized); + // Domain filtering for HTTPS URIs + sanitized = sanitizeUrlDomains(sanitized); + // Limit total length to prevent DoS (0.5MB max) + const maxLength = 524288; + if (sanitized.length > maxLength) { + sanitized = + sanitized.substring(0, maxLength) + + "\n[Content truncated due to length]"; + } + // Limit number of lines to prevent log flooding (65k max) + const lines = sanitized.split("\n"); + const maxLines = 65000; + if (lines.length > maxLines) { + sanitized = + lines.slice(0, maxLines).join("\n") + + "\n[Content truncated due to line count]"; + } + // Remove ANSI escape sequences + sanitized = sanitized.replace(/\x1b\[[0-9;]*[mGKH]/g, ""); + // Neutralize common bot trigger phrases + sanitized = neutralizeBotTriggers(sanitized); + // Trim excessive whitespace + return sanitized.trim(); + /** + * Remove unknown domains + * @param {string} s - The string to process + * @returns {string} The string with unknown domains redacted + */ + function sanitizeUrlDomains(s) { + return s.replace( + /\bhttps:\/\/([^\/\s\])}'"<>&\x00-\x1f]+)/gi, + (match, domain) => { + // Extract the hostname part (before first slash, colon, or other delimiter) + const hostname = domain.split(/[\/:\?#]/)[0].toLowerCase(); + // Check if this domain or any parent domain is in the allowlist + const isAllowed = allowedDomains.some(allowedDomain => { + const normalizedAllowed = allowedDomain.toLowerCase(); + return ( + hostname === normalizedAllowed || + hostname.endsWith("." + normalizedAllowed) + ); + }); + return isAllowed ? match : "(redacted)"; + } + ); + } + /** + * Remove unknown protocols except https + * @param {string} s - The string to process + * @returns {string} The string with non-https protocols redacted + */ + function sanitizeUrlProtocols(s) { + // Match both protocol:// and protocol: patterns + return s.replace( + /\b(\w+):(?:\/\/)?[^\s\])}'"<>&\x00-\x1f]+/gi, + (match, protocol) => { + // Allow https (case insensitive), redact everything else + return protocol.toLowerCase() === "https" ? match : "(redacted)"; + } + ); + } + /** + * Neutralizes @mentions by wrapping them in backticks + * @param {string} s - The string to process + * @returns {string} The string with neutralized mentions + */ + function neutralizeMentions(s) { + // Replace @name or @org/team outside code with `@name` + return s.replace( + /(^|[^\w`])@([A-Za-z0-9](?:[A-Za-z0-9-]{0,37}[A-Za-z0-9])?(?:\/[A-Za-z0-9._-]+)?)/g, + (_m, p1, p2) => `${p1}\`@${p2}\`` + ); + } + /** + * Neutralizes bot trigger phrases by wrapping them in backticks + * @param {string} s - The string to process + * @returns {string} The string with neutralized bot triggers + */ + function neutralizeBotTriggers(s) { + // Neutralize common bot trigger phrases like "fixes #123", "closes #asdfs", etc. + return s.replace( + /\b(fixes?|closes?|resolves?|fix|close|resolve)\s+#(\w+)/gi, + (match, action, ref) => `\`${action} #${ref}\`` + ); + } + } + /** + * Gets the maximum allowed count for a given output type + * @param {string} itemType - The output item type + * @param {Object} config - The safe-outputs configuration + * @returns {number} The maximum allowed count + */ + function getMaxAllowedForType(itemType, config) { + // Check if max is explicitly specified in config + if ( + config && + config[itemType] && + typeof config[itemType] === "object" && + config[itemType].max + ) { + return config[itemType].max; + } + // Use default limits for plural-supported types + switch (itemType) { + case "create-issue": + return 1; // Only one issue allowed + case "add-issue-comment": + return 1; // Only one comment allowed + case "create-pull-request": + return 1; // Only one pull request allowed + case "create-pull-request-review-comment": + return 10; // Default to 10 review comments allowed + case "add-issue-label": + return 5; // Only one labels operation allowed + case "update-issue": + return 1; // Only one issue update allowed + case "push-to-branch": + return 1; // Only one push to branch allowed + case "create-discussion": + return 1; // Only one discussion allowed + default: + return 1; // Default to single item for unknown types + } + } + /** + * Attempts to repair common JSON syntax issues in LLM-generated content + * @param {string} jsonStr - The potentially malformed JSON string + * @returns {string} The repaired JSON string + */ + function repairJson(jsonStr) { + let repaired = jsonStr.trim(); + // Fix single quotes to double quotes (must be done first) + repaired = repaired.replace(/'/g, '"'); + // Fix missing quotes around object keys + repaired = repaired.replace( + /([{,]\s*)([a-zA-Z_$][a-zA-Z0-9_$]*)\s*:/g, + '$1"$2":' + ); + // Fix newlines and tabs inside strings by escaping them + repaired = repaired.replace(/"([^"\\]*)"/g, (match, content) => { + if ( + content.includes("\n") || + content.includes("\r") || + content.includes("\t") + ) { + const escaped = content + .replace(/\\/g, "\\\\") + .replace(/\n/g, "\\n") + .replace(/\r/g, "\\r") + .replace(/\t/g, "\\t"); + return `"${escaped}"`; + } + return match; + }); + // Fix unescaped quotes inside string values + repaired = repaired.replace( + /"([^"]*)"([^":,}\]]*)"([^"]*)"(\s*[,:}\]])/g, + (match, p1, p2, p3, p4) => `"${p1}\\"${p2}\\"${p3}"${p4}` + ); + // Fix wrong bracket/brace types - arrays should end with ] not } + repaired = repaired.replace( + /(\[\s*(?:"[^"]*"(?:\s*,\s*"[^"]*")*\s*),?)\s*}/g, + "$1]" + ); + // Fix missing closing braces/brackets + const openBraces = (repaired.match(/\{/g) || []).length; + const closeBraces = (repaired.match(/\}/g) || []).length; + if (openBraces > closeBraces) { + repaired += "}".repeat(openBraces - closeBraces); + } else if (closeBraces > openBraces) { + repaired = "{".repeat(closeBraces - openBraces) + repaired; + } + // Fix missing closing brackets for arrays + const openBrackets = (repaired.match(/\[/g) || []).length; + const closeBrackets = (repaired.match(/\]/g) || []).length; + if (openBrackets > closeBrackets) { + repaired += "]".repeat(openBrackets - closeBrackets); + } else if (closeBrackets > openBrackets) { + repaired = "[".repeat(closeBrackets - openBrackets) + repaired; + } + // Fix trailing commas in objects and arrays (AFTER fixing brackets/braces) + repaired = repaired.replace(/,(\s*[}\]])/g, "$1"); + return repaired; + } + /** + * Attempts to parse JSON with repair fallback + * @param {string} jsonStr - The JSON string to parse + * @returns {Object|undefined} The parsed JSON object, or undefined if parsing fails + */ + function parseJsonWithRepair(jsonStr) { + try { + // First, try normal JSON.parse + return JSON.parse(jsonStr); + } catch (originalError) { + try { + // If that fails, try repairing and parsing again + const repairedJson = repairJson(jsonStr); + return JSON.parse(repairedJson); + } catch (repairError) { + // If repair also fails, print error to console and return undefined + console.log( + `JSON parsing failed. Original: ${originalError.message}. After repair: ${repairError.message}` + ); + return undefined; + } + } + } + const outputFile = process.env.GITHUB_AW_SAFE_OUTPUTS; + const safeOutputsConfig = process.env.GITHUB_AW_SAFE_OUTPUTS_CONFIG; + if (!outputFile) { + console.log("GITHUB_AW_SAFE_OUTPUTS not set, no output to collect"); + core.setOutput("output", ""); + return; + } + if (!fs.existsSync(outputFile)) { + console.log("Output file does not exist:", outputFile); + core.setOutput("output", ""); + return; + } + const outputContent = fs.readFileSync(outputFile, "utf8"); + if (outputContent.trim() === "") { + console.log("Output file is empty"); + core.setOutput("output", ""); + return; + } + console.log("Raw output content length:", outputContent.length); + // Parse the safe-outputs configuration + let expectedOutputTypes = {}; + if (safeOutputsConfig) { + try { + expectedOutputTypes = JSON.parse(safeOutputsConfig); + console.log("Expected output types:", Object.keys(expectedOutputTypes)); + } catch (error) { + console.log( + "Warning: Could not parse safe-outputs config:", + error.message + ); + } + } + // Parse JSONL content + const lines = outputContent.trim().split("\n"); + const parsedItems = []; + const errors = []; + for (let i = 0; i < lines.length; i++) { + const line = lines[i].trim(); + if (line === "") continue; // Skip empty lines + try { + const item = parseJsonWithRepair(line); + // If item is undefined (failed to parse), add error and process next line + if (item === undefined) { + errors.push(`Line ${i + 1}: Invalid JSON - JSON parsing failed`); + continue; + } + // Validate that the item has a 'type' field + if (!item.type) { + errors.push(`Line ${i + 1}: Missing required 'type' field`); + continue; + } + // Validate against expected output types + const itemType = item.type; + if (!expectedOutputTypes[itemType]) { + errors.push( + `Line ${i + 1}: Unexpected output type '${itemType}'. Expected one of: ${Object.keys(expectedOutputTypes).join(", ")}` + ); + continue; + } + // Check for too many items of the same type + const typeCount = parsedItems.filter( + existing => existing.type === itemType + ).length; + const maxAllowed = getMaxAllowedForType(itemType, expectedOutputTypes); + if (typeCount >= maxAllowed) { + errors.push( + `Line ${i + 1}: Too many items of type '${itemType}'. Maximum allowed: ${maxAllowed}.` + ); + continue; + } + // Basic validation based on type + switch (itemType) { + case "create-issue": + if (!item.title || typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: create-issue requires a 'title' string field` + ); + continue; + } + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-issue requires a 'body' string field` + ); + continue; + } + // Sanitize text content + item.title = sanitizeContent(item.title); + item.body = sanitizeContent(item.body); + // Sanitize labels if present + if (item.labels && Array.isArray(item.labels)) { + item.labels = item.labels.map(label => + typeof label === "string" ? sanitizeContent(label) : label + ); + } + break; + case "add-issue-comment": + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: add-issue-comment requires a 'body' string field` + ); + continue; + } + // Sanitize text content + item.body = sanitizeContent(item.body); + break; + case "create-pull-request": + if (!item.title || typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request requires a 'title' string field` + ); + continue; + } + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request requires a 'body' string field` + ); + continue; + } + // Sanitize text content + item.title = sanitizeContent(item.title); + item.body = sanitizeContent(item.body); + // Sanitize branch name if present + if (item.branch && typeof item.branch === "string") { + item.branch = sanitizeContent(item.branch); + } + // Sanitize labels if present + if (item.labels && Array.isArray(item.labels)) { + item.labels = item.labels.map(label => + typeof label === "string" ? sanitizeContent(label) : label + ); + } + break; + case "add-issue-label": + if (!item.labels || !Array.isArray(item.labels)) { + errors.push( + `Line ${i + 1}: add-issue-label requires a 'labels' array field` + ); + continue; + } + if (item.labels.some(label => typeof label !== "string")) { + errors.push( + `Line ${i + 1}: add-issue-label labels array must contain only strings` + ); + continue; + } + // Sanitize label strings + item.labels = item.labels.map(label => sanitizeContent(label)); + break; + case "update-issue": + // Check that at least one updateable field is provided + const hasValidField = + item.status !== undefined || + item.title !== undefined || + item.body !== undefined; + if (!hasValidField) { + errors.push( + `Line ${i + 1}: update-issue requires at least one of: 'status', 'title', or 'body' fields` + ); + continue; + } + // Validate status if provided + if (item.status !== undefined) { + if ( + typeof item.status !== "string" || + (item.status !== "open" && item.status !== "closed") + ) { + errors.push( + `Line ${i + 1}: update-issue 'status' must be 'open' or 'closed'` + ); + continue; + } + } + // Validate title if provided + if (item.title !== undefined) { + if (typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: update-issue 'title' must be a string` + ); + continue; + } + item.title = sanitizeContent(item.title); + } + // Validate body if provided + if (item.body !== undefined) { + if (typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: update-issue 'body' must be a string` + ); + continue; + } + item.body = sanitizeContent(item.body); + } + // Validate issue_number if provided (for target "*") + if (item.issue_number !== undefined) { + if ( + typeof item.issue_number !== "number" && + typeof item.issue_number !== "string" + ) { + errors.push( + `Line ${i + 1}: update-issue 'issue_number' must be a number or string` + ); + continue; + } + } + break; + case "push-to-branch": + // Validate message if provided (optional) + if (item.message !== undefined) { + if (typeof item.message !== "string") { + errors.push( + `Line ${i + 1}: push-to-branch 'message' must be a string` + ); + continue; + } + item.message = sanitizeContent(item.message); + } + // Validate pull_request_number if provided (for target "*") + if (item.pull_request_number !== undefined) { + if ( + typeof item.pull_request_number !== "number" && + typeof item.pull_request_number !== "string" + ) { + errors.push( + `Line ${i + 1}: push-to-branch 'pull_request_number' must be a number or string` + ); + continue; + } + } + break; + case "create-pull-request-review-comment": + // Validate required path field + if (!item.path || typeof item.path !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment requires a 'path' string field` + ); + continue; + } + // Validate required line field + if ( + item.line === undefined || + (typeof item.line !== "number" && typeof item.line !== "string") + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment requires a 'line' number or string field` + ); + continue; + } + // Validate line is a positive integer + const lineNumber = + typeof item.line === "string" ? parseInt(item.line, 10) : item.line; + if ( + isNaN(lineNumber) || + lineNumber <= 0 || + !Number.isInteger(lineNumber) + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'line' must be a positive integer` + ); + continue; + } + // Validate required body field + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment requires a 'body' string field` + ); + continue; + } + // Sanitize required text content + item.body = sanitizeContent(item.body); + // Validate optional start_line field + if (item.start_line !== undefined) { + if ( + typeof item.start_line !== "number" && + typeof item.start_line !== "string" + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'start_line' must be a number or string` + ); + continue; + } + const startLineNumber = + typeof item.start_line === "string" + ? parseInt(item.start_line, 10) + : item.start_line; + if ( + isNaN(startLineNumber) || + startLineNumber <= 0 || + !Number.isInteger(startLineNumber) + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'start_line' must be a positive integer` + ); + continue; + } + if (startLineNumber > lineNumber) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'start_line' must be less than or equal to 'line'` + ); + continue; + } + } + // Validate optional side field + if (item.side !== undefined) { + if ( + typeof item.side !== "string" || + (item.side !== "LEFT" && item.side !== "RIGHT") + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'side' must be 'LEFT' or 'RIGHT'` + ); + continue; + } + } + break; + case "create-discussion": + if (!item.title || typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: create-discussion requires a 'title' string field` + ); + continue; + } + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-discussion requires a 'body' string field` + ); + continue; + } + // Sanitize text content + item.title = sanitizeContent(item.title); + item.body = sanitizeContent(item.body); + break; + default: + errors.push(`Line ${i + 1}: Unknown output type '${itemType}'`); + continue; + } + console.log(`Line ${i + 1}: Valid ${itemType} item`); + parsedItems.push(item); + } catch (error) { + errors.push(`Line ${i + 1}: Invalid JSON - ${error.message}`); + } + } + // Report validation results + if (errors.length > 0) { + console.log("Validation errors found:"); + errors.forEach(error => console.log(` - ${error}`)); + // For now, we'll continue with valid items but log the errors + // In the future, we might want to fail the workflow for invalid items + } + console.log(`Successfully parsed ${parsedItems.length} valid output items`); + // Set the parsed and validated items as output + const validatedOutput = { + items: parsedItems, + errors: errors, + }; + core.setOutput("output", JSON.stringify(validatedOutput)); + core.setOutput("raw_output", outputContent); + } + // Call the main function + await main(); + - name: Print agent output to step summary + env: + GITHUB_AW_SAFE_OUTPUTS: ${{ env.GITHUB_AW_SAFE_OUTPUTS }} + run: | + echo "## Agent Output (JSONL)" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo '``````json' >> $GITHUB_STEP_SUMMARY + cat ${{ env.GITHUB_AW_SAFE_OUTPUTS }} >> $GITHUB_STEP_SUMMARY + # Ensure there's a newline after the file content if it doesn't end with one + if [ -s ${{ env.GITHUB_AW_SAFE_OUTPUTS }} ] && [ "$(tail -c1 ${{ env.GITHUB_AW_SAFE_OUTPUTS }})" != "" ]; then + echo "" >> $GITHUB_STEP_SUMMARY + fi + echo '``````' >> $GITHUB_STEP_SUMMARY + - name: Upload agentic output file + if: always() && steps.collect_output.outputs.output != '' + uses: actions/upload-artifact@v4 + with: + name: aw_output.txt + path: ${{ env.GITHUB_AW_SAFE_OUTPUTS }} + if-no-files-found: warn + - name: Parse agent logs for step summary + if: always() + uses: actions/github-script@v7 + env: + AGENT_LOG_FILE: /tmp/test-codex-create-pull-request-review-comment.log + with: + script: | + function main() { + const fs = require("fs"); + try { + const logFile = process.env.AGENT_LOG_FILE; + if (!logFile) { + console.log("No agent log file specified"); + return; + } + if (!fs.existsSync(logFile)) { + console.log(`Log file not found: ${logFile}`); + return; + } + const content = fs.readFileSync(logFile, "utf8"); + const parsedLog = parseCodexLog(content); + if (parsedLog) { + core.summary.addRaw(parsedLog).write(); + console.log("Codex log parsed successfully"); + } else { + console.log("Failed to parse Codex log"); + } + } catch (error) { + core.setFailed(error.message); + } + } + function parseCodexLog(logContent) { + try { + const lines = logContent.split("\n"); + let markdown = "## 🤖 Commands and Tools\n\n"; + const commandSummary = []; + // First pass: collect commands for summary + for (let i = 0; i < lines.length; i++) { + const line = lines[i]; + // Detect tool usage and exec commands + if (line.includes("] tool ") && line.includes("(")) { + // Extract tool name + const toolMatch = line.match(/\] tool ([^(]+)\(/); + if (toolMatch) { + const toolName = toolMatch[1]; + // Look ahead to find the result status + let statusIcon = "❓"; // Unknown by default + for (let j = i + 1; j < Math.min(i + 5, lines.length); j++) { + const nextLine = lines[j]; + if (nextLine.includes("success in")) { + statusIcon = "✅"; + break; + } else if ( + nextLine.includes("failure in") || + nextLine.includes("error in") || + nextLine.includes("failed in") + ) { + statusIcon = "❌"; + break; + } + } + if (toolName.includes(".")) { + // Format as provider::method + const parts = toolName.split("."); + const provider = parts[0]; + const method = parts.slice(1).join("_"); + commandSummary.push( + `* ${statusIcon} \`${provider}::${method}(...)\`` + ); + } else { + commandSummary.push(`* ${statusIcon} \`${toolName}(...)\``); + } + } + } else if (line.includes("] exec ")) { + // Extract exec command + const execMatch = line.match(/exec (.+?) in/); + if (execMatch) { + const formattedCommand = formatBashCommand(execMatch[1]); + // Look ahead to find the result status + let statusIcon = "❓"; // Unknown by default + for (let j = i + 1; j < Math.min(i + 5, lines.length); j++) { + const nextLine = lines[j]; + if (nextLine.includes("succeeded in")) { + statusIcon = "✅"; + break; + } else if ( + nextLine.includes("failed in") || + nextLine.includes("error") + ) { + statusIcon = "❌"; + break; + } + } + commandSummary.push(`* ${statusIcon} \`${formattedCommand}\``); + } + } + } + // Add command summary + if (commandSummary.length > 0) { + for (const cmd of commandSummary) { + markdown += `${cmd}\n`; + } + } else { + markdown += "No commands or tools used.\n"; + } + // Add Information section + markdown += "\n## 📊 Information\n\n"; + // Extract metadata from Codex logs + let totalTokens = 0; + const tokenMatches = logContent.match(/tokens used: (\d+)/g); + if (tokenMatches) { + for (const match of tokenMatches) { + const tokens = parseInt(match.match(/(\d+)/)[1]); + totalTokens += tokens; + } + } + if (totalTokens > 0) { + markdown += `**Total Tokens Used:** ${totalTokens.toLocaleString()}\n\n`; + } + // Count tool calls and exec commands + const toolCalls = (logContent.match(/\] tool /g) || []).length; + const execCommands = (logContent.match(/\] exec /g) || []).length; + if (toolCalls > 0) { + markdown += `**Tool Calls:** ${toolCalls}\n\n`; + } + if (execCommands > 0) { + markdown += `**Commands Executed:** ${execCommands}\n\n`; + } + markdown += "\n## 🤖 Reasoning\n\n"; + // Second pass: process full conversation flow with interleaved reasoning, tools, and commands + let inThinkingSection = false; + for (let i = 0; i < lines.length; i++) { + const line = lines[i]; + // Skip metadata lines + if ( + line.includes("OpenAI Codex") || + line.startsWith("--------") || + line.includes("workdir:") || + line.includes("model:") || + line.includes("provider:") || + line.includes("approval:") || + line.includes("sandbox:") || + line.includes("reasoning effort:") || + line.includes("reasoning summaries:") || + line.includes("tokens used:") + ) { + continue; + } + // Process thinking sections + if (line.includes("] thinking")) { + inThinkingSection = true; + continue; + } + // Process tool calls + if (line.includes("] tool ") && line.includes("(")) { + inThinkingSection = false; + const toolMatch = line.match(/\] tool ([^(]+)\(/); + if (toolMatch) { + const toolName = toolMatch[1]; + // Look ahead to find the result status + let statusIcon = "❓"; // Unknown by default + for (let j = i + 1; j < Math.min(i + 5, lines.length); j++) { + const nextLine = lines[j]; + if (nextLine.includes("success in")) { + statusIcon = "✅"; + break; + } else if ( + nextLine.includes("failure in") || + nextLine.includes("error in") || + nextLine.includes("failed in") + ) { + statusIcon = "❌"; + break; + } + } + if (toolName.includes(".")) { + const parts = toolName.split("."); + const provider = parts[0]; + const method = parts.slice(1).join("_"); + markdown += `${statusIcon} ${provider}::${method}(...)\n\n`; + } else { + markdown += `${statusIcon} ${toolName}(...)\n\n`; + } + } + continue; + } + // Process exec commands + if (line.includes("] exec ")) { + inThinkingSection = false; + const execMatch = line.match(/exec (.+?) in/); + if (execMatch) { + const formattedCommand = formatBashCommand(execMatch[1]); + // Look ahead to find the result status + let statusIcon = "❓"; // Unknown by default + for (let j = i + 1; j < Math.min(i + 5, lines.length); j++) { + const nextLine = lines[j]; + if (nextLine.includes("succeeded in")) { + statusIcon = "✅"; + break; + } else if ( + nextLine.includes("failed in") || + nextLine.includes("error") + ) { + statusIcon = "❌"; + break; + } + } + markdown += `${statusIcon} \`${formattedCommand}\`\n\n`; + } + continue; + } + // Process thinking content + if ( + inThinkingSection && + line.trim().length > 20 && + !line.startsWith("[2025-") + ) { + const trimmed = line.trim(); + // Add thinking content directly + markdown += `${trimmed}\n\n`; + } + } + return markdown; + } catch (error) { + console.error("Error parsing Codex log:", error); + return "## 🤖 Commands and Tools\n\nError parsing log content.\n\n## 🤖 Reasoning\n\nUnable to parse reasoning from log.\n\n"; + } + } + function formatBashCommand(command) { + if (!command) return ""; + // Convert multi-line commands to single line by replacing newlines with spaces + // and collapsing multiple spaces + let formatted = command + .replace(/\n/g, " ") // Replace newlines with spaces + .replace(/\r/g, " ") // Replace carriage returns with spaces + .replace(/\t/g, " ") // Replace tabs with spaces + .replace(/\s+/g, " ") // Collapse multiple spaces into one + .trim(); // Remove leading/trailing whitespace + // Escape backticks to prevent markdown issues + formatted = formatted.replace(/`/g, "\\`"); + // Truncate if too long (keep reasonable length for summary) + const maxLength = 80; + if (formatted.length > maxLength) { + formatted = formatted.substring(0, maxLength) + "..."; + } + return formatted; + } + function truncateString(str, maxLength) { + if (!str) return ""; + if (str.length <= maxLength) return str; + return str.substring(0, maxLength) + "..."; + } + // Export for testing + if (typeof module !== "undefined" && module.exports) { + module.exports = { parseCodexLog, formatBashCommand, truncateString }; + } + main(); + - name: Upload agent logs + if: always() + uses: actions/upload-artifact@v4 + with: + name: test-codex-create-pull-request-review-comment.log + path: /tmp/test-codex-create-pull-request-review-comment.log + if-no-files-found: warn + + create_pr_review_comment: + needs: test-codex-create-pull-request-review-comment + if: github.event.pull_request.number + runs-on: ubuntu-latest + permissions: + contents: read + pull-requests: write + timeout-minutes: 10 + outputs: + review_comment_id: ${{ steps.create_pr_review_comment.outputs.review_comment_id }} + review_comment_url: ${{ steps.create_pr_review_comment.outputs.review_comment_url }} + steps: + - name: Create PR Review Comment + id: create_pr_review_comment + uses: actions/github-script@v7 + env: + GITHUB_AW_AGENT_OUTPUT: ${{ needs.test-codex-create-pull-request-review-comment.outputs.output }} + GITHUB_AW_PR_REVIEW_COMMENT_SIDE: "RIGHT" + with: + script: | + async function main() { + // Read the validated output content from environment variable + const outputContent = process.env.GITHUB_AW_AGENT_OUTPUT; + if (!outputContent) { + console.log("No GITHUB_AW_AGENT_OUTPUT environment variable found"); + return; + } + if (outputContent.trim() === "") { + console.log("Agent output content is empty"); + return; + } + console.log("Agent output content length:", outputContent.length); + // Parse the validated output JSON + let validatedOutput; + try { + validatedOutput = JSON.parse(outputContent); + } catch (error) { + console.log( + "Error parsing agent output JSON:", + error instanceof Error ? error.message : String(error) + ); + return; + } + if (!validatedOutput.items || !Array.isArray(validatedOutput.items)) { + console.log("No valid items found in agent output"); + return; + } + // Find all create-pull-request-review-comment items + const reviewCommentItems = validatedOutput.items.filter( + /** @param {any} item */ item => + item.type === "create-pull-request-review-comment" + ); + if (reviewCommentItems.length === 0) { + console.log( + "No create-pull-request-review-comment items found in agent output" + ); + return; + } + console.log( + `Found ${reviewCommentItems.length} create-pull-request-review-comment item(s)` + ); + // Get the side configuration from environment variable + const defaultSide = process.env.GITHUB_AW_PR_REVIEW_COMMENT_SIDE || "RIGHT"; + console.log(`Default comment side configuration: ${defaultSide}`); + // Check if we're in a pull request context + const isPRContext = + context.eventName === "pull_request" || + context.eventName === "pull_request_review" || + context.eventName === "pull_request_review_comment"; + if (!isPRContext) { + console.log( + "Not running in pull request context, skipping review comment creation" + ); + return; + } + if (!context.payload.pull_request) { + console.log( + "Pull request context detected but no pull request found in payload" + ); + return; + } + const pullRequestNumber = context.payload.pull_request.number; + console.log(`Creating review comments on PR #${pullRequestNumber}`); + const createdComments = []; + // Process each review comment item + for (let i = 0; i < reviewCommentItems.length; i++) { + const commentItem = reviewCommentItems[i]; + console.log( + `Processing create-pull-request-review-comment item ${i + 1}/${reviewCommentItems.length}:`, + { + bodyLength: commentItem.body ? commentItem.body.length : "undefined", + path: commentItem.path, + line: commentItem.line, + startLine: commentItem.start_line, + } + ); + // Validate required fields + if (!commentItem.path) { + console.log('Missing required field "path" in review comment item'); + continue; + } + if ( + !commentItem.line || + (typeof commentItem.line !== "number" && + typeof commentItem.line !== "string") + ) { + console.log( + 'Missing or invalid required field "line" in review comment item' + ); + continue; + } + if (!commentItem.body || typeof commentItem.body !== "string") { + console.log( + 'Missing or invalid required field "body" in review comment item' + ); + continue; + } + // Parse line numbers + const line = parseInt(commentItem.line, 10); + if (isNaN(line) || line <= 0) { + console.log(`Invalid line number: ${commentItem.line}`); + continue; + } + let startLine = undefined; + if (commentItem.start_line) { + startLine = parseInt(commentItem.start_line, 10); + if (isNaN(startLine) || startLine <= 0 || startLine > line) { + console.log( + `Invalid start_line number: ${commentItem.start_line} (must be <= line: ${line})` + ); + continue; + } + } + // Determine side (LEFT or RIGHT) + const side = commentItem.side || defaultSide; + if (side !== "LEFT" && side !== "RIGHT") { + console.log(`Invalid side value: ${side} (must be LEFT or RIGHT)`); + continue; + } + // Extract body from the JSON item + let body = commentItem.body.trim(); + // Add AI disclaimer with run id, run htmlurl + const runId = context.runId; + const runUrl = context.payload.repository + ? `${context.payload.repository.html_url}/actions/runs/${runId}` + : `https://github.com/actions/runs/${runId}`; + body += `\n\n> Generated by Agentic Workflow Run [${runId}](${runUrl})\n`; + console.log( + `Creating review comment on PR #${pullRequestNumber} at ${commentItem.path}:${line}${startLine ? ` (lines ${startLine}-${line})` : ""} [${side}]` + ); + console.log("Comment content length:", body.length); + try { + // Prepare the request parameters + const requestParams = { + owner: context.repo.owner, + repo: context.repo.repo, + pull_number: pullRequestNumber, + body: body, + path: commentItem.path, + line: line, + side: side, + }; + // Add start_line for multi-line comments + if (startLine !== undefined) { + requestParams.start_line = startLine; + requestParams.start_side = side; // start_side should match side for consistency + } + // Create the review comment using GitHub API + const { data: comment } = + await github.rest.pulls.createReviewComment(requestParams); + console.log( + "Created review comment #" + comment.id + ": " + comment.html_url + ); + createdComments.push(comment); + // Set output for the last created comment (for backward compatibility) + if (i === reviewCommentItems.length - 1) { + core.setOutput("review_comment_id", comment.id); + core.setOutput("review_comment_url", comment.html_url); + } + } catch (error) { + console.error( + `✗ Failed to create review comment:`, + error instanceof Error ? error.message : String(error) + ); + throw error; + } + } + // Write summary for all created comments + if (createdComments.length > 0) { + let summaryContent = "\n\n## GitHub PR Review Comments\n"; + for (const comment of createdComments) { + summaryContent += `- Review Comment #${comment.id}: [View Comment](${comment.html_url})\n`; + } + await core.summary.addRaw(summaryContent).write(); + } + console.log( + `Successfully created ${createdComments.length} review comment(s)` + ); + return createdComments; + } + await main(); + diff --git a/.github/workflows/test-codex-create-pull-request-review-comment.md b/.github/workflows/test-codex-create-pull-request-review-comment.md new file mode 100644 index 0000000000..4ce19f9219 --- /dev/null +++ b/.github/workflows/test-codex-create-pull-request-review-comment.md @@ -0,0 +1,29 @@ +--- +on: + pull_request: + types: [opened, synchronize, reopened] + reaction: eyes + +engine: + id: codex + +if: contains(github.event.pull_request.title, 'prr') + +safe-outputs: + create-pull-request-review-comment: + max: 3 +--- + +Analyze the pull request and create a few targeted review comments on the code changes. + +Create 2-3 review comments focusing on: +1. Code quality and best practices +2. Potential security issues or improvements +3. Performance optimizations or concerns + +For each review comment, specify: +- The exact file path where the comment should be placed +- The specific line number in the diff +- A helpful comment body with actionable feedback + +If you find multi-line issues, use start_line to comment on ranges of lines. diff --git a/.github/workflows/test-codex-create-pull-request.lock.yml b/.github/workflows/test-codex-create-pull-request.lock.yml index e03dd68e1f..699416921d 100644 --- a/.github/workflows/test-codex-create-pull-request.lock.yml +++ b/.github/workflows/test-codex-create-pull-request.lock.yml @@ -34,23 +34,23 @@ jobs: with: script: | function main() { - const fs = require('fs'); - const crypto = require('crypto'); + const fs = require("fs"); + const crypto = require("crypto"); // Generate a random filename for the output file - const randomId = crypto.randomBytes(8).toString('hex'); + const randomId = crypto.randomBytes(8).toString("hex"); const outputFile = `/tmp/aw_output_${randomId}.txt`; // Ensure the /tmp directory exists and create empty output file - fs.mkdirSync('/tmp', { recursive: true }); - fs.writeFileSync(outputFile, '', { mode: 0o644 }); + fs.mkdirSync("/tmp", { recursive: true }); + fs.writeFileSync(outputFile, "", { mode: 0o644 }); // Verify the file was created and is writable if (!fs.existsSync(outputFile)) { throw new Error(`Failed to create output file: ${outputFile}`); } // Set the environment variable for subsequent steps - core.exportVariable('GITHUB_AW_SAFE_OUTPUTS', outputFile); - console.log('Created agentic output file:', outputFile); + core.exportVariable("GITHUB_AW_SAFE_OUTPUTS", outputFile); + console.log("Created agentic output file:", outputFile); // Also set as step output for reference - core.setOutput('output_file', outputFile); + core.setOutput("output_file", outputFile); } main(); - name: Setup MCPs @@ -168,13 +168,14 @@ jobs: if-no-files-found: warn - name: Run Codex run: | + set -o pipefail INSTRUCTION=$(cat /tmp/aw-prompts/prompt.txt) export CODEX_HOME=/tmp/mcp-config # Create log directory outside git repo mkdir -p /tmp/aw-logs - # Run codex with log capture + # Run codex with log capture - pipefail ensures codex exit code is preserved codex exec \ -c model=o4-mini \ --full-auto "$INSTRUCTION" 2>&1 | tee /tmp/test-codex-create-pull-request.log @@ -214,34 +215,37 @@ jobs: * @returns {string} The sanitized content */ function sanitizeContent(content) { - if (!content || typeof content !== 'string') { - return ''; + if (!content || typeof content !== "string") { + return ""; } // Read allowed domains from environment variable const allowedDomainsEnv = process.env.GITHUB_AW_ALLOWED_DOMAINS; const defaultAllowedDomains = [ - 'github.com', - 'github.io', - 'githubusercontent.com', - 'githubassets.com', - 'github.dev', - 'codespaces.new' + "github.com", + "github.io", + "githubusercontent.com", + "githubassets.com", + "github.dev", + "codespaces.new", ]; const allowedDomains = allowedDomainsEnv - ? allowedDomainsEnv.split(',').map(d => d.trim()).filter(d => d) + ? allowedDomainsEnv + .split(",") + .map(d => d.trim()) + .filter(d => d) : defaultAllowedDomains; let sanitized = content; // Neutralize @mentions to prevent unintended notifications sanitized = neutralizeMentions(sanitized); // Remove control characters (except newlines and tabs) - sanitized = sanitized.replace(/[\x00-\x08\x0B\x0C\x0E-\x1F\x7F]/g, ''); + sanitized = sanitized.replace(/[\x00-\x08\x0B\x0C\x0E-\x1F\x7F]/g, ""); // XML character escaping sanitized = sanitized - .replace(/&/g, '&') // Must be first to avoid double-escaping - .replace(//g, '>') - .replace(/"/g, '"') - .replace(/'/g, '''); + .replace(/&/g, "&") // Must be first to avoid double-escaping + .replace(//g, ">") + .replace(/"/g, """) + .replace(/'/g, "'"); // URI filtering - replace non-https protocols with "(redacted)" sanitized = sanitizeUrlProtocols(sanitized); // Domain filtering for HTTPS URIs @@ -249,16 +253,20 @@ jobs: // Limit total length to prevent DoS (0.5MB max) const maxLength = 524288; if (sanitized.length > maxLength) { - sanitized = sanitized.substring(0, maxLength) + '\n[Content truncated due to length]'; + sanitized = + sanitized.substring(0, maxLength) + + "\n[Content truncated due to length]"; } // Limit number of lines to prevent log flooding (65k max) - const lines = sanitized.split('\n'); + const lines = sanitized.split("\n"); const maxLines = 65000; if (lines.length > maxLines) { - sanitized = lines.slice(0, maxLines).join('\n') + '\n[Content truncated due to line count]'; + sanitized = + lines.slice(0, maxLines).join("\n") + + "\n[Content truncated due to line count]"; } // Remove ANSI escape sequences - sanitized = sanitized.replace(/\x1b\[[0-9;]*[mGKH]/g, ''); + sanitized = sanitized.replace(/\x1b\[[0-9;]*[mGKH]/g, ""); // Neutralize common bot trigger phrases sanitized = neutralizeBotTriggers(sanitized); // Trim excessive whitespace @@ -269,16 +277,22 @@ jobs: * @returns {string} The string with unknown domains redacted */ function sanitizeUrlDomains(s) { - return s.replace(/\bhttps:\/\/([^\/\s\])}'"<>&\x00-\x1f]+)/gi, (match, domain) => { - // Extract the hostname part (before first slash, colon, or other delimiter) - const hostname = domain.split(/[\/:\?#]/)[0].toLowerCase(); - // Check if this domain or any parent domain is in the allowlist - const isAllowed = allowedDomains.some(allowedDomain => { - const normalizedAllowed = allowedDomain.toLowerCase(); - return hostname === normalizedAllowed || hostname.endsWith('.' + normalizedAllowed); - }); - return isAllowed ? match : '(redacted)'; - }); + return s.replace( + /\bhttps:\/\/([^\/\s\])}'"<>&\x00-\x1f]+)/gi, + (match, domain) => { + // Extract the hostname part (before first slash, colon, or other delimiter) + const hostname = domain.split(/[\/:\?#]/)[0].toLowerCase(); + // Check if this domain or any parent domain is in the allowlist + const isAllowed = allowedDomains.some(allowedDomain => { + const normalizedAllowed = allowedDomain.toLowerCase(); + return ( + hostname === normalizedAllowed || + hostname.endsWith("." + normalizedAllowed) + ); + }); + return isAllowed ? match : "(redacted)"; + } + ); } /** * Remove unknown protocols except https @@ -287,10 +301,13 @@ jobs: */ function sanitizeUrlProtocols(s) { // Match both protocol:// and protocol: patterns - return s.replace(/\b(\w+):(?:\/\/)?[^\s\])}'"<>&\x00-\x1f]+/gi, (match, protocol) => { - // Allow https (case insensitive), redact everything else - return protocol.toLowerCase() === 'https' ? match : '(redacted)'; - }); + return s.replace( + /\b(\w+):(?:\/\/)?[^\s\])}'"<>&\x00-\x1f]+/gi, + (match, protocol) => { + // Allow https (case insensitive), redact everything else + return protocol.toLowerCase() === "https" ? match : "(redacted)"; + } + ); } /** * Neutralizes @mentions by wrapping them in backticks @@ -299,8 +316,10 @@ jobs: */ function neutralizeMentions(s) { // Replace @name or @org/team outside code with `@name` - return s.replace(/(^|[^\w`])@([A-Za-z0-9](?:[A-Za-z0-9-]{0,37}[A-Za-z0-9])?(?:\/[A-Za-z0-9._-]+)?)/g, - (_m, p1, p2) => `${p1}\`@${p2}\``); + return s.replace( + /(^|[^\w`])@([A-Za-z0-9](?:[A-Za-z0-9-]{0,37}[A-Za-z0-9])?(?:\/[A-Za-z0-9._-]+)?)/g, + (_m, p1, p2) => `${p1}\`@${p2}\`` + ); } /** * Neutralizes bot trigger phrases by wrapping them in backticks @@ -309,8 +328,10 @@ jobs: */ function neutralizeBotTriggers(s) { // Neutralize common bot trigger phrases like "fixes #123", "closes #asdfs", etc. - return s.replace(/\b(fixes?|closes?|resolves?|fix|close|resolve)\s+#(\w+)/gi, - (match, action, ref) => `\`${action} #${ref}\``); + return s.replace( + /\b(fixes?|closes?|resolves?|fix|close|resolve)\s+#(\w+)/gi, + (match, action, ref) => `\`${action} #${ref}\`` + ); } } /** @@ -321,65 +342,165 @@ jobs: */ function getMaxAllowedForType(itemType, config) { // Check if max is explicitly specified in config - if (config && config[itemType] && typeof config[itemType] === 'object' && config[itemType].max) { + if ( + config && + config[itemType] && + typeof config[itemType] === "object" && + config[itemType].max + ) { return config[itemType].max; } // Use default limits for plural-supported types switch (itemType) { - case 'create-issue': + case "create-issue": return 1; // Only one issue allowed - case 'add-issue-comment': + case "add-issue-comment": return 1; // Only one comment allowed - case 'create-pull-request': - return 1; // Only one pull request allowed - case 'add-issue-label': - return 5; // Only one labels operation allowed - case 'update-issue': - return 1; // Only one issue update allowed - case 'push-to-branch': - return 1; // Only one push to branch allowed + case "create-pull-request": + return 1; // Only one pull request allowed + case "create-pull-request-review-comment": + return 10; // Default to 10 review comments allowed + case "add-issue-label": + return 5; // Only one labels operation allowed + case "update-issue": + return 1; // Only one issue update allowed + case "push-to-branch": + return 1; // Only one push to branch allowed + case "create-discussion": + return 1; // Only one discussion allowed default: - return 1; // Default to single item for unknown types + return 1; // Default to single item for unknown types + } + } + /** + * Attempts to repair common JSON syntax issues in LLM-generated content + * @param {string} jsonStr - The potentially malformed JSON string + * @returns {string} The repaired JSON string + */ + function repairJson(jsonStr) { + let repaired = jsonStr.trim(); + // Fix single quotes to double quotes (must be done first) + repaired = repaired.replace(/'/g, '"'); + // Fix missing quotes around object keys + repaired = repaired.replace( + /([{,]\s*)([a-zA-Z_$][a-zA-Z0-9_$]*)\s*:/g, + '$1"$2":' + ); + // Fix newlines and tabs inside strings by escaping them + repaired = repaired.replace(/"([^"\\]*)"/g, (match, content) => { + if ( + content.includes("\n") || + content.includes("\r") || + content.includes("\t") + ) { + const escaped = content + .replace(/\\/g, "\\\\") + .replace(/\n/g, "\\n") + .replace(/\r/g, "\\r") + .replace(/\t/g, "\\t"); + return `"${escaped}"`; + } + return match; + }); + // Fix unescaped quotes inside string values + repaired = repaired.replace( + /"([^"]*)"([^":,}\]]*)"([^"]*)"(\s*[,:}\]])/g, + (match, p1, p2, p3, p4) => `"${p1}\\"${p2}\\"${p3}"${p4}` + ); + // Fix wrong bracket/brace types - arrays should end with ] not } + repaired = repaired.replace( + /(\[\s*(?:"[^"]*"(?:\s*,\s*"[^"]*")*\s*),?)\s*}/g, + "$1]" + ); + // Fix missing closing braces/brackets + const openBraces = (repaired.match(/\{/g) || []).length; + const closeBraces = (repaired.match(/\}/g) || []).length; + if (openBraces > closeBraces) { + repaired += "}".repeat(openBraces - closeBraces); + } else if (closeBraces > openBraces) { + repaired = "{".repeat(closeBraces - openBraces) + repaired; + } + // Fix missing closing brackets for arrays + const openBrackets = (repaired.match(/\[/g) || []).length; + const closeBrackets = (repaired.match(/\]/g) || []).length; + if (openBrackets > closeBrackets) { + repaired += "]".repeat(openBrackets - closeBrackets); + } else if (closeBrackets > openBrackets) { + repaired = "[".repeat(closeBrackets - openBrackets) + repaired; + } + // Fix trailing commas in objects and arrays (AFTER fixing brackets/braces) + repaired = repaired.replace(/,(\s*[}\]])/g, "$1"); + return repaired; + } + /** + * Attempts to parse JSON with repair fallback + * @param {string} jsonStr - The JSON string to parse + * @returns {Object|undefined} The parsed JSON object, or undefined if parsing fails + */ + function parseJsonWithRepair(jsonStr) { + try { + // First, try normal JSON.parse + return JSON.parse(jsonStr); + } catch (originalError) { + try { + // If that fails, try repairing and parsing again + const repairedJson = repairJson(jsonStr); + return JSON.parse(repairedJson); + } catch (repairError) { + // If repair also fails, print error to console and return undefined + console.log( + `JSON parsing failed. Original: ${originalError.message}. After repair: ${repairError.message}` + ); + return undefined; + } } } const outputFile = process.env.GITHUB_AW_SAFE_OUTPUTS; const safeOutputsConfig = process.env.GITHUB_AW_SAFE_OUTPUTS_CONFIG; if (!outputFile) { - console.log('GITHUB_AW_SAFE_OUTPUTS not set, no output to collect'); - core.setOutput('output', ''); + console.log("GITHUB_AW_SAFE_OUTPUTS not set, no output to collect"); + core.setOutput("output", ""); return; } if (!fs.existsSync(outputFile)) { - console.log('Output file does not exist:', outputFile); - core.setOutput('output', ''); + console.log("Output file does not exist:", outputFile); + core.setOutput("output", ""); return; } - const outputContent = fs.readFileSync(outputFile, 'utf8'); - if (outputContent.trim() === '') { - console.log('Output file is empty'); - core.setOutput('output', ''); + const outputContent = fs.readFileSync(outputFile, "utf8"); + if (outputContent.trim() === "") { + console.log("Output file is empty"); + core.setOutput("output", ""); return; } - console.log('Raw output content length:', outputContent.length); + console.log("Raw output content length:", outputContent.length); // Parse the safe-outputs configuration let expectedOutputTypes = {}; if (safeOutputsConfig) { try { expectedOutputTypes = JSON.parse(safeOutputsConfig); - console.log('Expected output types:', Object.keys(expectedOutputTypes)); + console.log("Expected output types:", Object.keys(expectedOutputTypes)); } catch (error) { - console.log('Warning: Could not parse safe-outputs config:', error.message); + console.log( + "Warning: Could not parse safe-outputs config:", + error.message + ); } } // Parse JSONL content - const lines = outputContent.trim().split('\n'); + const lines = outputContent.trim().split("\n"); const parsedItems = []; const errors = []; for (let i = 0; i < lines.length; i++) { const line = lines[i].trim(); - if (line === '') continue; // Skip empty lines + if (line === "") continue; // Skip empty lines try { - const item = JSON.parse(line); + const item = parseJsonWithRepair(line); + // If item is undefined (failed to parse), add error and process next line + if (item === undefined) { + errors.push(`Line ${i + 1}: Invalid JSON - JSON parsing failed`); + continue; + } // Validate that the item has a 'type' field if (!item.type) { errors.push(`Line ${i + 1}: Missing required 'type' field`); @@ -388,25 +509,35 @@ jobs: // Validate against expected output types const itemType = item.type; if (!expectedOutputTypes[itemType]) { - errors.push(`Line ${i + 1}: Unexpected output type '${itemType}'. Expected one of: ${Object.keys(expectedOutputTypes).join(', ')}`); + errors.push( + `Line ${i + 1}: Unexpected output type '${itemType}'. Expected one of: ${Object.keys(expectedOutputTypes).join(", ")}` + ); continue; } // Check for too many items of the same type - const typeCount = parsedItems.filter(existing => existing.type === itemType).length; + const typeCount = parsedItems.filter( + existing => existing.type === itemType + ).length; const maxAllowed = getMaxAllowedForType(itemType, expectedOutputTypes); if (typeCount >= maxAllowed) { - errors.push(`Line ${i + 1}: Too many items of type '${itemType}'. Maximum allowed: ${maxAllowed}.`); + errors.push( + `Line ${i + 1}: Too many items of type '${itemType}'. Maximum allowed: ${maxAllowed}.` + ); continue; } // Basic validation based on type switch (itemType) { - case 'create-issue': - if (!item.title || typeof item.title !== 'string') { - errors.push(`Line ${i + 1}: create-issue requires a 'title' string field`); + case "create-issue": + if (!item.title || typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: create-issue requires a 'title' string field` + ); continue; } - if (!item.body || typeof item.body !== 'string') { - errors.push(`Line ${i + 1}: create-issue requires a 'body' string field`); + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-issue requires a 'body' string field` + ); continue; } // Sanitize text content @@ -414,107 +545,247 @@ jobs: item.body = sanitizeContent(item.body); // Sanitize labels if present if (item.labels && Array.isArray(item.labels)) { - item.labels = item.labels.map(label => typeof label === 'string' ? sanitizeContent(label) : label); + item.labels = item.labels.map(label => + typeof label === "string" ? sanitizeContent(label) : label + ); } break; - case 'add-issue-comment': - if (!item.body || typeof item.body !== 'string') { - errors.push(`Line ${i + 1}: add-issue-comment requires a 'body' string field`); + case "add-issue-comment": + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: add-issue-comment requires a 'body' string field` + ); continue; } // Sanitize text content item.body = sanitizeContent(item.body); break; - case 'create-pull-request': - if (!item.title || typeof item.title !== 'string') { - errors.push(`Line ${i + 1}: create-pull-request requires a 'title' string field`); + case "create-pull-request": + if (!item.title || typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request requires a 'title' string field` + ); continue; } - if (!item.body || typeof item.body !== 'string') { - errors.push(`Line ${i + 1}: create-pull-request requires a 'body' string field`); + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request requires a 'body' string field` + ); continue; } // Sanitize text content item.title = sanitizeContent(item.title); item.body = sanitizeContent(item.body); // Sanitize branch name if present - if (item.branch && typeof item.branch === 'string') { + if (item.branch && typeof item.branch === "string") { item.branch = sanitizeContent(item.branch); } // Sanitize labels if present if (item.labels && Array.isArray(item.labels)) { - item.labels = item.labels.map(label => typeof label === 'string' ? sanitizeContent(label) : label); + item.labels = item.labels.map(label => + typeof label === "string" ? sanitizeContent(label) : label + ); } break; - case 'add-issue-label': + case "add-issue-label": if (!item.labels || !Array.isArray(item.labels)) { - errors.push(`Line ${i + 1}: add-issue-label requires a 'labels' array field`); + errors.push( + `Line ${i + 1}: add-issue-label requires a 'labels' array field` + ); continue; } - if (item.labels.some(label => typeof label !== 'string')) { - errors.push(`Line ${i + 1}: add-issue-label labels array must contain only strings`); + if (item.labels.some(label => typeof label !== "string")) { + errors.push( + `Line ${i + 1}: add-issue-label labels array must contain only strings` + ); continue; } // Sanitize label strings item.labels = item.labels.map(label => sanitizeContent(label)); break; - case 'update-issue': + case "update-issue": // Check that at least one updateable field is provided - const hasValidField = (item.status !== undefined) || - (item.title !== undefined) || - (item.body !== undefined); + const hasValidField = + item.status !== undefined || + item.title !== undefined || + item.body !== undefined; if (!hasValidField) { - errors.push(`Line ${i + 1}: update-issue requires at least one of: 'status', 'title', or 'body' fields`); + errors.push( + `Line ${i + 1}: update-issue requires at least one of: 'status', 'title', or 'body' fields` + ); continue; } // Validate status if provided if (item.status !== undefined) { - if (typeof item.status !== 'string' || (item.status !== 'open' && item.status !== 'closed')) { - errors.push(`Line ${i + 1}: update-issue 'status' must be 'open' or 'closed'`); + if ( + typeof item.status !== "string" || + (item.status !== "open" && item.status !== "closed") + ) { + errors.push( + `Line ${i + 1}: update-issue 'status' must be 'open' or 'closed'` + ); continue; } } // Validate title if provided if (item.title !== undefined) { - if (typeof item.title !== 'string') { - errors.push(`Line ${i + 1}: update-issue 'title' must be a string`); + if (typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: update-issue 'title' must be a string` + ); continue; } item.title = sanitizeContent(item.title); } // Validate body if provided if (item.body !== undefined) { - if (typeof item.body !== 'string') { - errors.push(`Line ${i + 1}: update-issue 'body' must be a string`); + if (typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: update-issue 'body' must be a string` + ); continue; } item.body = sanitizeContent(item.body); } // Validate issue_number if provided (for target "*") if (item.issue_number !== undefined) { - if (typeof item.issue_number !== 'number' && typeof item.issue_number !== 'string') { - errors.push(`Line ${i + 1}: update-issue 'issue_number' must be a number or string`); + if ( + typeof item.issue_number !== "number" && + typeof item.issue_number !== "string" + ) { + errors.push( + `Line ${i + 1}: update-issue 'issue_number' must be a number or string` + ); continue; } } break; - case 'push-to-branch': + case "push-to-branch": // Validate message if provided (optional) if (item.message !== undefined) { - if (typeof item.message !== 'string') { - errors.push(`Line ${i + 1}: push-to-branch 'message' must be a string`); + if (typeof item.message !== "string") { + errors.push( + `Line ${i + 1}: push-to-branch 'message' must be a string` + ); continue; } item.message = sanitizeContent(item.message); } // Validate pull_request_number if provided (for target "*") if (item.pull_request_number !== undefined) { - if (typeof item.pull_request_number !== 'number' && typeof item.pull_request_number !== 'string') { - errors.push(`Line ${i + 1}: push-to-branch 'pull_request_number' must be a number or string`); + if ( + typeof item.pull_request_number !== "number" && + typeof item.pull_request_number !== "string" + ) { + errors.push( + `Line ${i + 1}: push-to-branch 'pull_request_number' must be a number or string` + ); + continue; + } + } + break; + case "create-pull-request-review-comment": + // Validate required path field + if (!item.path || typeof item.path !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment requires a 'path' string field` + ); + continue; + } + // Validate required line field + if ( + item.line === undefined || + (typeof item.line !== "number" && typeof item.line !== "string") + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment requires a 'line' number or string field` + ); + continue; + } + // Validate line is a positive integer + const lineNumber = + typeof item.line === "string" ? parseInt(item.line, 10) : item.line; + if ( + isNaN(lineNumber) || + lineNumber <= 0 || + !Number.isInteger(lineNumber) + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'line' must be a positive integer` + ); + continue; + } + // Validate required body field + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment requires a 'body' string field` + ); + continue; + } + // Sanitize required text content + item.body = sanitizeContent(item.body); + // Validate optional start_line field + if (item.start_line !== undefined) { + if ( + typeof item.start_line !== "number" && + typeof item.start_line !== "string" + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'start_line' must be a number or string` + ); + continue; + } + const startLineNumber = + typeof item.start_line === "string" + ? parseInt(item.start_line, 10) + : item.start_line; + if ( + isNaN(startLineNumber) || + startLineNumber <= 0 || + !Number.isInteger(startLineNumber) + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'start_line' must be a positive integer` + ); + continue; + } + if (startLineNumber > lineNumber) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'start_line' must be less than or equal to 'line'` + ); + continue; + } + } + // Validate optional side field + if (item.side !== undefined) { + if ( + typeof item.side !== "string" || + (item.side !== "LEFT" && item.side !== "RIGHT") + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'side' must be 'LEFT' or 'RIGHT'` + ); continue; } } break; + case "create-discussion": + if (!item.title || typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: create-discussion requires a 'title' string field` + ); + continue; + } + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-discussion requires a 'body' string field` + ); + continue; + } + // Sanitize text content + item.title = sanitizeContent(item.title); + item.body = sanitizeContent(item.body); + break; default: errors.push(`Line ${i + 1}: Unknown output type '${itemType}'`); continue; @@ -527,7 +798,7 @@ jobs: } // Report validation results if (errors.length > 0) { - console.log('Validation errors found:'); + console.log("Validation errors found:"); errors.forEach(error => console.log(` - ${error}`)); // For now, we'll continue with valid items but log the errors // In the future, we might want to fail the workflow for invalid items @@ -536,10 +807,10 @@ jobs: // Set the parsed and validated items as output const validatedOutput = { items: parsedItems, - errors: errors + errors: errors, }; - core.setOutput('output', JSON.stringify(validatedOutput)); - core.setOutput('raw_output', outputContent); + core.setOutput("output", JSON.stringify(validatedOutput)); + core.setOutput("raw_output", outputContent); } // Call the main function await main(); @@ -571,24 +842,24 @@ jobs: with: script: | function main() { - const fs = require('fs'); + const fs = require("fs"); try { const logFile = process.env.AGENT_LOG_FILE; if (!logFile) { - console.log('No agent log file specified'); + console.log("No agent log file specified"); return; } if (!fs.existsSync(logFile)) { console.log(`Log file not found: ${logFile}`); return; } - const content = fs.readFileSync(logFile, 'utf8'); + const content = fs.readFileSync(logFile, "utf8"); const parsedLog = parseCodexLog(content); if (parsedLog) { core.summary.addRaw(parsedLog).write(); - console.log('Codex log parsed successfully'); + console.log("Codex log parsed successfully"); } else { - console.log('Failed to parse Codex log'); + console.log("Failed to parse Codex log"); } } catch (error) { core.setFailed(error.message); @@ -596,54 +867,63 @@ jobs: } function parseCodexLog(logContent) { try { - const lines = logContent.split('\n'); - let markdown = '## 🤖 Commands and Tools\n\n'; + const lines = logContent.split("\n"); + let markdown = "## 🤖 Commands and Tools\n\n"; const commandSummary = []; // First pass: collect commands for summary for (let i = 0; i < lines.length; i++) { const line = lines[i]; // Detect tool usage and exec commands - if (line.includes('] tool ') && line.includes('(')) { + if (line.includes("] tool ") && line.includes("(")) { // Extract tool name const toolMatch = line.match(/\] tool ([^(]+)\(/); if (toolMatch) { const toolName = toolMatch[1]; // Look ahead to find the result status - let statusIcon = '❓'; // Unknown by default + let statusIcon = "❓"; // Unknown by default for (let j = i + 1; j < Math.min(i + 5, lines.length); j++) { const nextLine = lines[j]; - if (nextLine.includes('success in')) { - statusIcon = '✅'; + if (nextLine.includes("success in")) { + statusIcon = "✅"; break; - } else if (nextLine.includes('failure in') || nextLine.includes('error in') || nextLine.includes('failed in')) { - statusIcon = '❌'; + } else if ( + nextLine.includes("failure in") || + nextLine.includes("error in") || + nextLine.includes("failed in") + ) { + statusIcon = "❌"; break; } } - if (toolName.includes('.')) { + if (toolName.includes(".")) { // Format as provider::method - const parts = toolName.split('.'); + const parts = toolName.split("."); const provider = parts[0]; - const method = parts.slice(1).join('_'); - commandSummary.push(`* ${statusIcon} \`${provider}::${method}(...)\``); + const method = parts.slice(1).join("_"); + commandSummary.push( + `* ${statusIcon} \`${provider}::${method}(...)\`` + ); } else { commandSummary.push(`* ${statusIcon} \`${toolName}(...)\``); } } - } else if (line.includes('] exec ')) { + } else if (line.includes("] exec ")) { // Extract exec command const execMatch = line.match(/exec (.+?) in/); if (execMatch) { const formattedCommand = formatBashCommand(execMatch[1]); // Look ahead to find the result status - let statusIcon = '❓'; // Unknown by default + let statusIcon = "❓"; // Unknown by default for (let j = i + 1; j < Math.min(i + 5, lines.length); j++) { const nextLine = lines[j]; - if (nextLine.includes('succeeded in')) { - statusIcon = '✅'; + if (nextLine.includes("succeeded in")) { + statusIcon = "✅"; break; - } else if (nextLine.includes('failed in') || nextLine.includes('error')) { - statusIcon = '❌'; + } else if ( + nextLine.includes("failed in") || + nextLine.includes("error") + ) { + statusIcon = "❌"; break; } } @@ -657,10 +937,10 @@ jobs: markdown += `${cmd}\n`; } } else { - markdown += 'No commands or tools used.\n'; + markdown += "No commands or tools used.\n"; } // Add Information section - markdown += '\n## 📊 Information\n\n'; + markdown += "\n## 📊 Information\n\n"; // Extract metadata from Codex logs let totalTokens = 0; const tokenMatches = logContent.match(/tokens used: (\d+)/g); @@ -682,46 +962,57 @@ jobs: if (execCommands > 0) { markdown += `**Commands Executed:** ${execCommands}\n\n`; } - markdown += '\n## 🤖 Reasoning\n\n'; + markdown += "\n## 🤖 Reasoning\n\n"; // Second pass: process full conversation flow with interleaved reasoning, tools, and commands let inThinkingSection = false; for (let i = 0; i < lines.length; i++) { const line = lines[i]; // Skip metadata lines - if (line.includes('OpenAI Codex') || line.startsWith('--------') || - line.includes('workdir:') || line.includes('model:') || - line.includes('provider:') || line.includes('approval:') || - line.includes('sandbox:') || line.includes('reasoning effort:') || - line.includes('reasoning summaries:') || line.includes('tokens used:')) { + if ( + line.includes("OpenAI Codex") || + line.startsWith("--------") || + line.includes("workdir:") || + line.includes("model:") || + line.includes("provider:") || + line.includes("approval:") || + line.includes("sandbox:") || + line.includes("reasoning effort:") || + line.includes("reasoning summaries:") || + line.includes("tokens used:") + ) { continue; } // Process thinking sections - if (line.includes('] thinking')) { + if (line.includes("] thinking")) { inThinkingSection = true; continue; } // Process tool calls - if (line.includes('] tool ') && line.includes('(')) { + if (line.includes("] tool ") && line.includes("(")) { inThinkingSection = false; const toolMatch = line.match(/\] tool ([^(]+)\(/); if (toolMatch) { const toolName = toolMatch[1]; // Look ahead to find the result status - let statusIcon = '❓'; // Unknown by default + let statusIcon = "❓"; // Unknown by default for (let j = i + 1; j < Math.min(i + 5, lines.length); j++) { const nextLine = lines[j]; - if (nextLine.includes('success in')) { - statusIcon = '✅'; + if (nextLine.includes("success in")) { + statusIcon = "✅"; break; - } else if (nextLine.includes('failure in') || nextLine.includes('error in') || nextLine.includes('failed in')) { - statusIcon = '❌'; + } else if ( + nextLine.includes("failure in") || + nextLine.includes("error in") || + nextLine.includes("failed in") + ) { + statusIcon = "❌"; break; } } - if (toolName.includes('.')) { - const parts = toolName.split('.'); + if (toolName.includes(".")) { + const parts = toolName.split("."); const provider = parts[0]; - const method = parts.slice(1).join('_'); + const method = parts.slice(1).join("_"); markdown += `${statusIcon} ${provider}::${method}(...)\n\n`; } else { markdown += `${statusIcon} ${toolName}(...)\n\n`; @@ -730,20 +1021,23 @@ jobs: continue; } // Process exec commands - if (line.includes('] exec ')) { + if (line.includes("] exec ")) { inThinkingSection = false; const execMatch = line.match(/exec (.+?) in/); if (execMatch) { const formattedCommand = formatBashCommand(execMatch[1]); // Look ahead to find the result status - let statusIcon = '❓'; // Unknown by default + let statusIcon = "❓"; // Unknown by default for (let j = i + 1; j < Math.min(i + 5, lines.length); j++) { const nextLine = lines[j]; - if (nextLine.includes('succeeded in')) { - statusIcon = '✅'; + if (nextLine.includes("succeeded in")) { + statusIcon = "✅"; break; - } else if (nextLine.includes('failed in') || nextLine.includes('error')) { - statusIcon = '❌'; + } else if ( + nextLine.includes("failed in") || + nextLine.includes("error") + ) { + statusIcon = "❌"; break; } } @@ -752,7 +1046,11 @@ jobs: continue; } // Process thinking content - if (inThinkingSection && line.trim().length > 20 && !line.startsWith('[2025-')) { + if ( + inThinkingSection && + line.trim().length > 20 && + !line.startsWith("[2025-") + ) { const trimmed = line.trim(); // Add thinking content directly markdown += `${trimmed}\n\n`; @@ -760,36 +1058,36 @@ jobs: } return markdown; } catch (error) { - console.error('Error parsing Codex log:', error); - return '## 🤖 Commands and Tools\n\nError parsing log content.\n\n## 🤖 Reasoning\n\nUnable to parse reasoning from log.\n\n'; + console.error("Error parsing Codex log:", error); + return "## 🤖 Commands and Tools\n\nError parsing log content.\n\n## 🤖 Reasoning\n\nUnable to parse reasoning from log.\n\n"; } } function formatBashCommand(command) { - if (!command) return ''; + if (!command) return ""; // Convert multi-line commands to single line by replacing newlines with spaces // and collapsing multiple spaces let formatted = command - .replace(/\n/g, ' ') // Replace newlines with spaces - .replace(/\r/g, ' ') // Replace carriage returns with spaces - .replace(/\t/g, ' ') // Replace tabs with spaces - .replace(/\s+/g, ' ') // Collapse multiple spaces into one - .trim(); // Remove leading/trailing whitespace + .replace(/\n/g, " ") // Replace newlines with spaces + .replace(/\r/g, " ") // Replace carriage returns with spaces + .replace(/\t/g, " ") // Replace tabs with spaces + .replace(/\s+/g, " ") // Collapse multiple spaces into one + .trim(); // Remove leading/trailing whitespace // Escape backticks to prevent markdown issues - formatted = formatted.replace(/`/g, '\\`'); + formatted = formatted.replace(/`/g, "\\`"); // Truncate if too long (keep reasonable length for summary) const maxLength = 80; if (formatted.length > maxLength) { - formatted = formatted.substring(0, maxLength) + '...'; + formatted = formatted.substring(0, maxLength) + "..."; } return formatted; } function truncateString(str, maxLength) { - if (!str) return ''; + if (!str) return ""; if (str.length <= maxLength) return str; - return str.substring(0, maxLength) + '...'; + return str.substring(0, maxLength) + "..."; } // Export for testing - if (typeof module !== 'undefined' && module.exports) { + if (typeof module !== "undefined" && module.exports) { module.exports = { parseCodexLog, formatBashCommand, truncateString }; } main(); @@ -963,52 +1261,70 @@ jobs: // Environment validation - fail early if required variables are missing const workflowId = process.env.GITHUB_AW_WORKFLOW_ID; if (!workflowId) { - throw new Error('GITHUB_AW_WORKFLOW_ID environment variable is required'); + throw new Error("GITHUB_AW_WORKFLOW_ID environment variable is required"); } const baseBranch = process.env.GITHUB_AW_BASE_BRANCH; if (!baseBranch) { - throw new Error('GITHUB_AW_BASE_BRANCH environment variable is required'); + throw new Error("GITHUB_AW_BASE_BRANCH environment variable is required"); } const outputContent = process.env.GITHUB_AW_AGENT_OUTPUT || ""; - if (outputContent.trim() === '') { - console.log('Agent output content is empty'); + if (outputContent.trim() === "") { + console.log("Agent output content is empty"); } // Check if patch file exists and has valid content - if (!fs.existsSync('/tmp/aw.patch')) { - throw new Error('No patch file found - cannot create pull request without changes'); + if (!fs.existsSync("/tmp/aw.patch")) { + throw new Error( + "No patch file found - cannot create pull request without changes" + ); } - const patchContent = fs.readFileSync('/tmp/aw.patch', 'utf8'); - if (!patchContent || !patchContent.trim() || patchContent.includes('Failed to generate patch')) { - throw new Error('Patch file is empty or contains error message - cannot create pull request without changes'); + const patchContent = fs.readFileSync("/tmp/aw.patch", "utf8"); + if ( + !patchContent || + !patchContent.trim() || + patchContent.includes("Failed to generate patch") + ) { + throw new Error( + "Patch file is empty or contains error message - cannot create pull request without changes" + ); } - console.log('Agent output content length:', outputContent.length); - console.log('Patch content validation passed'); + console.log("Agent output content length:", outputContent.length); + console.log("Patch content validation passed"); // Parse the validated output JSON let validatedOutput; try { validatedOutput = JSON.parse(outputContent); } catch (error) { - console.log('Error parsing agent output JSON:', error instanceof Error ? error.message : String(error)); + console.log( + "Error parsing agent output JSON:", + error instanceof Error ? error.message : String(error) + ); return; } if (!validatedOutput.items || !Array.isArray(validatedOutput.items)) { - console.log('No valid items found in agent output'); + console.log("No valid items found in agent output"); return; } // Find the create-pull-request item - const pullRequestItem = validatedOutput.items.find(/** @param {any} item */ item => item.type === 'create-pull-request'); + const pullRequestItem = validatedOutput.items.find( + /** @param {any} item */ item => item.type === "create-pull-request" + ); if (!pullRequestItem) { - console.log('No create-pull-request item found in agent output'); + console.log("No create-pull-request item found in agent output"); return; } - console.log('Found create-pull-request item:', { title: pullRequestItem.title, bodyLength: pullRequestItem.body.length }); + console.log("Found create-pull-request item:", { + title: pullRequestItem.title, + bodyLength: pullRequestItem.body.length, + }); // Extract title, body, and branch from the JSON item let title = pullRequestItem.title.trim(); - let bodyLines = pullRequestItem.body.split('\n'); - let branchName = pullRequestItem.branch ? pullRequestItem.branch.trim() : null; + let bodyLines = pullRequestItem.body.split("\n"); + let branchName = pullRequestItem.branch + ? pullRequestItem.branch.trim() + : null; // If no title was found, use a default if (!title) { - title = 'Agent Output'; + title = "Agent Output"; } // Apply title prefix if provided via environment variable const titlePrefix = process.env.GITHUB_AW_PR_TITLE_PREFIX; @@ -1017,59 +1333,80 @@ jobs: } // Add AI disclaimer with run id, run htmlurl const runId = context.runId; - const runUrl = context.payload.repository + const runUrl = context.payload.repository ? `${context.payload.repository.html_url}/actions/runs/${runId}` : `https://github.com/actions/runs/${runId}`; - bodyLines.push(``, ``, `> Generated by Agentic Workflow Run [${runId}](${runUrl})`, ''); + bodyLines.push( + ``, + ``, + `> Generated by Agentic Workflow Run [${runId}](${runUrl})`, + "" + ); // Prepare the body content - const body = bodyLines.join('\n').trim(); + const body = bodyLines.join("\n").trim(); // Parse labels from environment variable (comma-separated string) const labelsEnv = process.env.GITHUB_AW_PR_LABELS; - const labels = labelsEnv ? labelsEnv.split(',').map(/** @param {string} label */ label => label.trim()).filter(/** @param {string} label */ label => label) : []; + const labels = labelsEnv + ? labelsEnv + .split(",") + .map(/** @param {string} label */ label => label.trim()) + .filter(/** @param {string} label */ label => label) + : []; // Parse draft setting from environment variable (defaults to true) const draftEnv = process.env.GITHUB_AW_PR_DRAFT; - const draft = draftEnv ? draftEnv.toLowerCase() === 'true' : true; - console.log('Creating pull request with title:', title); - console.log('Labels:', labels); - console.log('Draft:', draft); - console.log('Body length:', body.length); + const draft = draftEnv ? draftEnv.toLowerCase() === "true" : true; + console.log("Creating pull request with title:", title); + console.log("Labels:", labels); + console.log("Draft:", draft); + console.log("Body length:", body.length); // Use branch name from JSONL if provided, otherwise generate unique branch name if (!branchName) { - console.log('No branch name provided in JSONL, generating unique branch name'); + console.log( + "No branch name provided in JSONL, generating unique branch name" + ); // Generate unique branch name using cryptographic random hex - const randomHex = crypto.randomBytes(8).toString('hex'); + const randomHex = crypto.randomBytes(8).toString("hex"); branchName = `${workflowId}/${randomHex}`; } else { - console.log('Using branch name from JSONL:', branchName); + console.log("Using branch name from JSONL:", branchName); } - console.log('Generated branch name:', branchName); - console.log('Base branch:', baseBranch); + console.log("Generated branch name:", branchName); + console.log("Base branch:", baseBranch); // Create a new branch using git CLI // Configure git (required for commits) - execSync('git config --global user.email "action@github.com"', { stdio: 'inherit' }); - execSync('git config --global user.name "GitHub Action"', { stdio: 'inherit' }); + execSync('git config --global user.email "action@github.com"', { + stdio: "inherit", + }); + execSync('git config --global user.name "GitHub Action"', { + stdio: "inherit", + }); // Handle branch creation/checkout - const branchFromJsonl = pullRequestItem.branch ? pullRequestItem.branch.trim() : null; + const branchFromJsonl = pullRequestItem.branch + ? pullRequestItem.branch.trim() + : null; if (branchFromJsonl) { - console.log('Checking if branch from JSONL exists:', branchFromJsonl); - console.log('Branch does not exist locally, creating new branch:', branchFromJsonl); - execSync(`git checkout -b ${branchFromJsonl}`, { stdio: 'inherit' }); - console.log('Using existing/created branch:', branchFromJsonl); + console.log("Checking if branch from JSONL exists:", branchFromJsonl); + console.log( + "Branch does not exist locally, creating new branch:", + branchFromJsonl + ); + execSync(`git checkout -b ${branchFromJsonl}`, { stdio: "inherit" }); + console.log("Using existing/created branch:", branchFromJsonl); } else { // Create and checkout new branch with generated name - execSync(`git checkout -b ${branchName}`, { stdio: 'inherit' }); - console.log('Created and checked out new branch:', branchName); + execSync(`git checkout -b ${branchName}`, { stdio: "inherit" }); + console.log("Created and checked out new branch:", branchName); } // Apply the patch using git CLI - console.log('Applying patch...'); + console.log("Applying patch..."); // Apply the patch using git apply - execSync('git apply /tmp/aw.patch', { stdio: 'inherit' }); - console.log('Patch applied successfully'); + execSync("git apply /tmp/aw.patch", { stdio: "inherit" }); + console.log("Patch applied successfully"); // Commit and push the changes - execSync('git add .', { stdio: 'inherit' }); - execSync(`git commit -m "Add agent output: ${title}"`, { stdio: 'inherit' }); - execSync(`git push origin ${branchName}`, { stdio: 'inherit' }); - console.log('Changes committed and pushed'); + execSync("git add .", { stdio: "inherit" }); + execSync(`git commit -m "Add agent output: ${title}"`, { stdio: "inherit" }); + execSync(`git push origin ${branchName}`, { stdio: "inherit" }); + console.log("Changes committed and pushed"); // Create the pull request const { data: pullRequest } = await github.rest.pulls.create({ owner: context.repo.owner, @@ -1078,31 +1415,36 @@ jobs: body: body, head: branchName, base: baseBranch, - draft: draft + draft: draft, }); - console.log('Created pull request #' + pullRequest.number + ': ' + pullRequest.html_url); + console.log( + "Created pull request #" + pullRequest.number + ": " + pullRequest.html_url + ); // Add labels if specified if (labels.length > 0) { await github.rest.issues.addLabels({ owner: context.repo.owner, repo: context.repo.repo, issue_number: pullRequest.number, - labels: labels + labels: labels, }); - console.log('Added labels to pull request:', labels); + console.log("Added labels to pull request:", labels); } // Set output for other jobs to use - core.setOutput('pull_request_number', pullRequest.number); - core.setOutput('pull_request_url', pullRequest.html_url); - core.setOutput('branch_name', branchName); + core.setOutput("pull_request_number", pullRequest.number); + core.setOutput("pull_request_url", pullRequest.html_url); + core.setOutput("branch_name", branchName); // Write summary to GitHub Actions summary await core.summary - .addRaw(` + .addRaw( + ` ## Pull Request - **Pull Request**: [#${pullRequest.number}](${pullRequest.html_url}) - **Branch**: \`${branchName}\` - **Base Branch**: \`${baseBranch}\` - `).write(); + ` + ) + .write(); } await main(); diff --git a/.github/workflows/test-codex-mcp.lock.yml b/.github/workflows/test-codex-mcp.lock.yml index 4925e71674..e02a80426b 100644 --- a/.github/workflows/test-codex-mcp.lock.yml +++ b/.github/workflows/test-codex-mcp.lock.yml @@ -31,21 +31,32 @@ jobs: with: script: | async function main() { - // Read inputs from environment variables - const reaction = process.env.GITHUB_AW_REACTION || 'eyes'; + // Read inputs from environment variables + const reaction = process.env.GITHUB_AW_REACTION || "eyes"; const alias = process.env.GITHUB_AW_ALIAS; // Only present for alias workflows const runId = context.runId; - const runUrl = context.payload.repository + const runUrl = context.payload.repository ? `${context.payload.repository.html_url}/actions/runs/${runId}` : `https://github.com/${context.repo.owner}/${context.repo.repo}/actions/runs/${runId}`; - console.log('Reaction type:', reaction); - console.log('Alias name:', alias || 'none'); - console.log('Run ID:', runId); - console.log('Run URL:', runUrl); + console.log("Reaction type:", reaction); + console.log("Alias name:", alias || "none"); + console.log("Run ID:", runId); + console.log("Run URL:", runUrl); // Validate reaction type - const validReactions = ['+1', '-1', 'laugh', 'confused', 'heart', 'hooray', 'rocket', 'eyes']; + const validReactions = [ + "+1", + "-1", + "laugh", + "confused", + "heart", + "hooray", + "rocket", + "eyes", + ]; if (!validReactions.includes(reaction)) { - core.setFailed(`Invalid reaction type: ${reaction}. Valid reactions are: ${validReactions.join(', ')}`); + core.setFailed( + `Invalid reaction type: ${reaction}. Valid reactions are: ${validReactions.join(", ")}` + ); return; } // Determine the API endpoint based on the event type @@ -57,20 +68,20 @@ jobs: const repo = context.repo.repo; try { switch (eventName) { - case 'issues': + case "issues": const issueNumber = context.payload?.issue?.number; if (!issueNumber) { - core.setFailed('Issue number not found in event payload'); + core.setFailed("Issue number not found in event payload"); return; } reactionEndpoint = `/repos/${owner}/${repo}/issues/${issueNumber}/reactions`; // Don't edit issue bodies for now - this might be more complex shouldEditComment = false; break; - case 'issue_comment': + case "issue_comment": const commentId = context.payload?.comment?.id; if (!commentId) { - core.setFailed('Comment ID not found in event payload'); + core.setFailed("Comment ID not found in event payload"); return; } reactionEndpoint = `/repos/${owner}/${repo}/issues/comments/${commentId}/reactions`; @@ -78,10 +89,10 @@ jobs: // Only edit comments for alias workflows shouldEditComment = alias ? true : false; break; - case 'pull_request': + case "pull_request": const prNumber = context.payload?.pull_request?.number; if (!prNumber) { - core.setFailed('Pull request number not found in event payload'); + core.setFailed("Pull request number not found in event payload"); return; } // PRs are "issues" for the reactions endpoint @@ -89,10 +100,10 @@ jobs: // Don't edit PR bodies for now - this might be more complex shouldEditComment = false; break; - case 'pull_request_review_comment': + case "pull_request_review_comment": const reviewCommentId = context.payload?.comment?.id; if (!reviewCommentId) { - core.setFailed('Review comment ID not found in event payload'); + core.setFailed("Review comment ID not found in event payload"); return; } reactionEndpoint = `/repos/${owner}/${repo}/pulls/comments/${reviewCommentId}/reactions`; @@ -104,24 +115,28 @@ jobs: core.setFailed(`Unsupported event type: ${eventName}`); return; } - console.log('Reaction API endpoint:', reactionEndpoint); + console.log("Reaction API endpoint:", reactionEndpoint); // Add reaction first await addReaction(reactionEndpoint, reaction); // Then edit comment if applicable and if it's a comment event if (shouldEditComment && commentUpdateEndpoint) { - console.log('Comment update endpoint:', commentUpdateEndpoint); + console.log("Comment update endpoint:", commentUpdateEndpoint); await editCommentWithWorkflowLink(commentUpdateEndpoint, runUrl); } else { if (!alias && commentUpdateEndpoint) { - console.log('Skipping comment edit - only available for alias workflows'); + console.log( + "Skipping comment edit - only available for alias workflows" + ); } else { - console.log('Skipping comment edit for event type:', eventName); + console.log("Skipping comment edit for event type:", eventName); } } } catch (error) { const errorMessage = error instanceof Error ? error.message : String(error); - console.error('Failed to process reaction and comment edit:', errorMessage); - core.setFailed(`Failed to process reaction and comment edit: ${errorMessage}`); + console.error("Failed to process reaction and comment edit:", errorMessage); + core.setFailed( + `Failed to process reaction and comment edit: ${errorMessage}` + ); } } /** @@ -130,19 +145,19 @@ jobs: * @param {string} reaction - The reaction type to add */ async function addReaction(endpoint, reaction) { - const response = await github.request('POST ' + endpoint, { + const response = await github.request("POST " + endpoint, { content: reaction, headers: { - 'Accept': 'application/vnd.github+json' - } + Accept: "application/vnd.github+json", + }, }); const reactionId = response.data?.id; if (reactionId) { console.log(`Successfully added reaction: ${reaction} (id: ${reactionId})`); - core.setOutput('reaction-id', reactionId.toString()); + core.setOutput("reaction-id", reactionId.toString()); } else { console.log(`Successfully added reaction: ${reaction}`); - core.setOutput('reaction-id', ''); + core.setOutput("reaction-id", ""); } } /** @@ -153,33 +168,37 @@ jobs: async function editCommentWithWorkflowLink(endpoint, runUrl) { try { // First, get the current comment content - const getResponse = await github.request('GET ' + endpoint, { + const getResponse = await github.request("GET " + endpoint, { headers: { - 'Accept': 'application/vnd.github+json' - } + Accept: "application/vnd.github+json", + }, }); - const originalBody = getResponse.data.body || ''; + const originalBody = getResponse.data.body || ""; const workflowLinkText = `\n\n---\n*🤖 [Workflow run](${runUrl}) triggered by this comment*`; // Check if we've already added a workflow link to avoid duplicates - if (originalBody.includes('*🤖 [Workflow run](')) { - console.log('Comment already contains a workflow run link, skipping edit'); + if (originalBody.includes("*🤖 [Workflow run](")) { + console.log( + "Comment already contains a workflow run link, skipping edit" + ); return; } const updatedBody = originalBody + workflowLinkText; // Update the comment - const updateResponse = await github.request('PATCH ' + endpoint, { + const updateResponse = await github.request("PATCH " + endpoint, { body: updatedBody, headers: { - 'Accept': 'application/vnd.github+json' - } + Accept: "application/vnd.github+json", + }, }); console.log(`Successfully updated comment with workflow link`); console.log(`Comment ID: ${updateResponse.data.id}`); } catch (error) { // Don't fail the entire job if comment editing fails - just log it const errorMessage = error instanceof Error ? error.message : String(error); - console.warn('Failed to edit comment with workflow link:', errorMessage); - console.warn('This is not critical - the reaction was still added successfully'); + console.warn("Failed to edit comment with workflow link:", errorMessage); + console.warn( + "This is not critical - the reaction was still added successfully" + ); } } await main(); @@ -204,23 +223,23 @@ jobs: with: script: | function main() { - const fs = require('fs'); - const crypto = require('crypto'); + const fs = require("fs"); + const crypto = require("crypto"); // Generate a random filename for the output file - const randomId = crypto.randomBytes(8).toString('hex'); + const randomId = crypto.randomBytes(8).toString("hex"); const outputFile = `/tmp/aw_output_${randomId}.txt`; // Ensure the /tmp directory exists and create empty output file - fs.mkdirSync('/tmp', { recursive: true }); - fs.writeFileSync(outputFile, '', { mode: 0o644 }); + fs.mkdirSync("/tmp", { recursive: true }); + fs.writeFileSync(outputFile, "", { mode: 0o644 }); // Verify the file was created and is writable if (!fs.existsSync(outputFile)) { throw new Error(`Failed to create output file: ${outputFile}`); } // Set the environment variable for subsequent steps - core.exportVariable('GITHUB_AW_SAFE_OUTPUTS', outputFile); - console.log('Created agentic output file:', outputFile); + core.exportVariable("GITHUB_AW_SAFE_OUTPUTS", outputFile); + console.log("Created agentic output file:", outputFile); // Also set as step output for reference - core.setOutput('output_file', outputFile); + core.setOutput("output_file", outputFile); } main(); - name: Setup MCPs @@ -351,13 +370,14 @@ jobs: if-no-files-found: warn - name: Run Codex run: | + set -o pipefail INSTRUCTION=$(cat /tmp/aw-prompts/prompt.txt) export CODEX_HOME=/tmp/mcp-config # Create log directory outside git repo mkdir -p /tmp/aw-logs - # Run codex with log capture + # Run codex with log capture - pipefail ensures codex exit code is preserved codex exec \ -c model=o4-mini \ --full-auto "$INSTRUCTION" 2>&1 | tee /tmp/test-codex-mcp.log @@ -397,34 +417,37 @@ jobs: * @returns {string} The sanitized content */ function sanitizeContent(content) { - if (!content || typeof content !== 'string') { - return ''; + if (!content || typeof content !== "string") { + return ""; } // Read allowed domains from environment variable const allowedDomainsEnv = process.env.GITHUB_AW_ALLOWED_DOMAINS; const defaultAllowedDomains = [ - 'github.com', - 'github.io', - 'githubusercontent.com', - 'githubassets.com', - 'github.dev', - 'codespaces.new' + "github.com", + "github.io", + "githubusercontent.com", + "githubassets.com", + "github.dev", + "codespaces.new", ]; const allowedDomains = allowedDomainsEnv - ? allowedDomainsEnv.split(',').map(d => d.trim()).filter(d => d) + ? allowedDomainsEnv + .split(",") + .map(d => d.trim()) + .filter(d => d) : defaultAllowedDomains; let sanitized = content; // Neutralize @mentions to prevent unintended notifications sanitized = neutralizeMentions(sanitized); // Remove control characters (except newlines and tabs) - sanitized = sanitized.replace(/[\x00-\x08\x0B\x0C\x0E-\x1F\x7F]/g, ''); + sanitized = sanitized.replace(/[\x00-\x08\x0B\x0C\x0E-\x1F\x7F]/g, ""); // XML character escaping sanitized = sanitized - .replace(/&/g, '&') // Must be first to avoid double-escaping - .replace(//g, '>') - .replace(/"/g, '"') - .replace(/'/g, '''); + .replace(/&/g, "&") // Must be first to avoid double-escaping + .replace(//g, ">") + .replace(/"/g, """) + .replace(/'/g, "'"); // URI filtering - replace non-https protocols with "(redacted)" sanitized = sanitizeUrlProtocols(sanitized); // Domain filtering for HTTPS URIs @@ -432,16 +455,20 @@ jobs: // Limit total length to prevent DoS (0.5MB max) const maxLength = 524288; if (sanitized.length > maxLength) { - sanitized = sanitized.substring(0, maxLength) + '\n[Content truncated due to length]'; + sanitized = + sanitized.substring(0, maxLength) + + "\n[Content truncated due to length]"; } // Limit number of lines to prevent log flooding (65k max) - const lines = sanitized.split('\n'); + const lines = sanitized.split("\n"); const maxLines = 65000; if (lines.length > maxLines) { - sanitized = lines.slice(0, maxLines).join('\n') + '\n[Content truncated due to line count]'; + sanitized = + lines.slice(0, maxLines).join("\n") + + "\n[Content truncated due to line count]"; } // Remove ANSI escape sequences - sanitized = sanitized.replace(/\x1b\[[0-9;]*[mGKH]/g, ''); + sanitized = sanitized.replace(/\x1b\[[0-9;]*[mGKH]/g, ""); // Neutralize common bot trigger phrases sanitized = neutralizeBotTriggers(sanitized); // Trim excessive whitespace @@ -452,16 +479,22 @@ jobs: * @returns {string} The string with unknown domains redacted */ function sanitizeUrlDomains(s) { - return s.replace(/\bhttps:\/\/([^\/\s\])}'"<>&\x00-\x1f]+)/gi, (match, domain) => { - // Extract the hostname part (before first slash, colon, or other delimiter) - const hostname = domain.split(/[\/:\?#]/)[0].toLowerCase(); - // Check if this domain or any parent domain is in the allowlist - const isAllowed = allowedDomains.some(allowedDomain => { - const normalizedAllowed = allowedDomain.toLowerCase(); - return hostname === normalizedAllowed || hostname.endsWith('.' + normalizedAllowed); - }); - return isAllowed ? match : '(redacted)'; - }); + return s.replace( + /\bhttps:\/\/([^\/\s\])}'"<>&\x00-\x1f]+)/gi, + (match, domain) => { + // Extract the hostname part (before first slash, colon, or other delimiter) + const hostname = domain.split(/[\/:\?#]/)[0].toLowerCase(); + // Check if this domain or any parent domain is in the allowlist + const isAllowed = allowedDomains.some(allowedDomain => { + const normalizedAllowed = allowedDomain.toLowerCase(); + return ( + hostname === normalizedAllowed || + hostname.endsWith("." + normalizedAllowed) + ); + }); + return isAllowed ? match : "(redacted)"; + } + ); } /** * Remove unknown protocols except https @@ -470,10 +503,13 @@ jobs: */ function sanitizeUrlProtocols(s) { // Match both protocol:// and protocol: patterns - return s.replace(/\b(\w+):(?:\/\/)?[^\s\])}'"<>&\x00-\x1f]+/gi, (match, protocol) => { - // Allow https (case insensitive), redact everything else - return protocol.toLowerCase() === 'https' ? match : '(redacted)'; - }); + return s.replace( + /\b(\w+):(?:\/\/)?[^\s\])}'"<>&\x00-\x1f]+/gi, + (match, protocol) => { + // Allow https (case insensitive), redact everything else + return protocol.toLowerCase() === "https" ? match : "(redacted)"; + } + ); } /** * Neutralizes @mentions by wrapping them in backticks @@ -482,8 +518,10 @@ jobs: */ function neutralizeMentions(s) { // Replace @name or @org/team outside code with `@name` - return s.replace(/(^|[^\w`])@([A-Za-z0-9](?:[A-Za-z0-9-]{0,37}[A-Za-z0-9])?(?:\/[A-Za-z0-9._-]+)?)/g, - (_m, p1, p2) => `${p1}\`@${p2}\``); + return s.replace( + /(^|[^\w`])@([A-Za-z0-9](?:[A-Za-z0-9-]{0,37}[A-Za-z0-9])?(?:\/[A-Za-z0-9._-]+)?)/g, + (_m, p1, p2) => `${p1}\`@${p2}\`` + ); } /** * Neutralizes bot trigger phrases by wrapping them in backticks @@ -492,8 +530,10 @@ jobs: */ function neutralizeBotTriggers(s) { // Neutralize common bot trigger phrases like "fixes #123", "closes #asdfs", etc. - return s.replace(/\b(fixes?|closes?|resolves?|fix|close|resolve)\s+#(\w+)/gi, - (match, action, ref) => `\`${action} #${ref}\``); + return s.replace( + /\b(fixes?|closes?|resolves?|fix|close|resolve)\s+#(\w+)/gi, + (match, action, ref) => `\`${action} #${ref}\`` + ); } } /** @@ -504,65 +544,165 @@ jobs: */ function getMaxAllowedForType(itemType, config) { // Check if max is explicitly specified in config - if (config && config[itemType] && typeof config[itemType] === 'object' && config[itemType].max) { + if ( + config && + config[itemType] && + typeof config[itemType] === "object" && + config[itemType].max + ) { return config[itemType].max; } // Use default limits for plural-supported types switch (itemType) { - case 'create-issue': + case "create-issue": return 1; // Only one issue allowed - case 'add-issue-comment': + case "add-issue-comment": return 1; // Only one comment allowed - case 'create-pull-request': - return 1; // Only one pull request allowed - case 'add-issue-label': - return 5; // Only one labels operation allowed - case 'update-issue': - return 1; // Only one issue update allowed - case 'push-to-branch': - return 1; // Only one push to branch allowed + case "create-pull-request": + return 1; // Only one pull request allowed + case "create-pull-request-review-comment": + return 10; // Default to 10 review comments allowed + case "add-issue-label": + return 5; // Only one labels operation allowed + case "update-issue": + return 1; // Only one issue update allowed + case "push-to-branch": + return 1; // Only one push to branch allowed + case "create-discussion": + return 1; // Only one discussion allowed default: - return 1; // Default to single item for unknown types + return 1; // Default to single item for unknown types + } + } + /** + * Attempts to repair common JSON syntax issues in LLM-generated content + * @param {string} jsonStr - The potentially malformed JSON string + * @returns {string} The repaired JSON string + */ + function repairJson(jsonStr) { + let repaired = jsonStr.trim(); + // Fix single quotes to double quotes (must be done first) + repaired = repaired.replace(/'/g, '"'); + // Fix missing quotes around object keys + repaired = repaired.replace( + /([{,]\s*)([a-zA-Z_$][a-zA-Z0-9_$]*)\s*:/g, + '$1"$2":' + ); + // Fix newlines and tabs inside strings by escaping them + repaired = repaired.replace(/"([^"\\]*)"/g, (match, content) => { + if ( + content.includes("\n") || + content.includes("\r") || + content.includes("\t") + ) { + const escaped = content + .replace(/\\/g, "\\\\") + .replace(/\n/g, "\\n") + .replace(/\r/g, "\\r") + .replace(/\t/g, "\\t"); + return `"${escaped}"`; + } + return match; + }); + // Fix unescaped quotes inside string values + repaired = repaired.replace( + /"([^"]*)"([^":,}\]]*)"([^"]*)"(\s*[,:}\]])/g, + (match, p1, p2, p3, p4) => `"${p1}\\"${p2}\\"${p3}"${p4}` + ); + // Fix wrong bracket/brace types - arrays should end with ] not } + repaired = repaired.replace( + /(\[\s*(?:"[^"]*"(?:\s*,\s*"[^"]*")*\s*),?)\s*}/g, + "$1]" + ); + // Fix missing closing braces/brackets + const openBraces = (repaired.match(/\{/g) || []).length; + const closeBraces = (repaired.match(/\}/g) || []).length; + if (openBraces > closeBraces) { + repaired += "}".repeat(openBraces - closeBraces); + } else if (closeBraces > openBraces) { + repaired = "{".repeat(closeBraces - openBraces) + repaired; + } + // Fix missing closing brackets for arrays + const openBrackets = (repaired.match(/\[/g) || []).length; + const closeBrackets = (repaired.match(/\]/g) || []).length; + if (openBrackets > closeBrackets) { + repaired += "]".repeat(openBrackets - closeBrackets); + } else if (closeBrackets > openBrackets) { + repaired = "[".repeat(closeBrackets - openBrackets) + repaired; + } + // Fix trailing commas in objects and arrays (AFTER fixing brackets/braces) + repaired = repaired.replace(/,(\s*[}\]])/g, "$1"); + return repaired; + } + /** + * Attempts to parse JSON with repair fallback + * @param {string} jsonStr - The JSON string to parse + * @returns {Object|undefined} The parsed JSON object, or undefined if parsing fails + */ + function parseJsonWithRepair(jsonStr) { + try { + // First, try normal JSON.parse + return JSON.parse(jsonStr); + } catch (originalError) { + try { + // If that fails, try repairing and parsing again + const repairedJson = repairJson(jsonStr); + return JSON.parse(repairedJson); + } catch (repairError) { + // If repair also fails, print error to console and return undefined + console.log( + `JSON parsing failed. Original: ${originalError.message}. After repair: ${repairError.message}` + ); + return undefined; + } } } const outputFile = process.env.GITHUB_AW_SAFE_OUTPUTS; const safeOutputsConfig = process.env.GITHUB_AW_SAFE_OUTPUTS_CONFIG; if (!outputFile) { - console.log('GITHUB_AW_SAFE_OUTPUTS not set, no output to collect'); - core.setOutput('output', ''); + console.log("GITHUB_AW_SAFE_OUTPUTS not set, no output to collect"); + core.setOutput("output", ""); return; } if (!fs.existsSync(outputFile)) { - console.log('Output file does not exist:', outputFile); - core.setOutput('output', ''); + console.log("Output file does not exist:", outputFile); + core.setOutput("output", ""); return; } - const outputContent = fs.readFileSync(outputFile, 'utf8'); - if (outputContent.trim() === '') { - console.log('Output file is empty'); - core.setOutput('output', ''); + const outputContent = fs.readFileSync(outputFile, "utf8"); + if (outputContent.trim() === "") { + console.log("Output file is empty"); + core.setOutput("output", ""); return; } - console.log('Raw output content length:', outputContent.length); + console.log("Raw output content length:", outputContent.length); // Parse the safe-outputs configuration let expectedOutputTypes = {}; if (safeOutputsConfig) { try { expectedOutputTypes = JSON.parse(safeOutputsConfig); - console.log('Expected output types:', Object.keys(expectedOutputTypes)); + console.log("Expected output types:", Object.keys(expectedOutputTypes)); } catch (error) { - console.log('Warning: Could not parse safe-outputs config:', error.message); + console.log( + "Warning: Could not parse safe-outputs config:", + error.message + ); } } // Parse JSONL content - const lines = outputContent.trim().split('\n'); + const lines = outputContent.trim().split("\n"); const parsedItems = []; const errors = []; for (let i = 0; i < lines.length; i++) { const line = lines[i].trim(); - if (line === '') continue; // Skip empty lines + if (line === "") continue; // Skip empty lines try { - const item = JSON.parse(line); + const item = parseJsonWithRepair(line); + // If item is undefined (failed to parse), add error and process next line + if (item === undefined) { + errors.push(`Line ${i + 1}: Invalid JSON - JSON parsing failed`); + continue; + } // Validate that the item has a 'type' field if (!item.type) { errors.push(`Line ${i + 1}: Missing required 'type' field`); @@ -571,25 +711,35 @@ jobs: // Validate against expected output types const itemType = item.type; if (!expectedOutputTypes[itemType]) { - errors.push(`Line ${i + 1}: Unexpected output type '${itemType}'. Expected one of: ${Object.keys(expectedOutputTypes).join(', ')}`); + errors.push( + `Line ${i + 1}: Unexpected output type '${itemType}'. Expected one of: ${Object.keys(expectedOutputTypes).join(", ")}` + ); continue; } // Check for too many items of the same type - const typeCount = parsedItems.filter(existing => existing.type === itemType).length; + const typeCount = parsedItems.filter( + existing => existing.type === itemType + ).length; const maxAllowed = getMaxAllowedForType(itemType, expectedOutputTypes); if (typeCount >= maxAllowed) { - errors.push(`Line ${i + 1}: Too many items of type '${itemType}'. Maximum allowed: ${maxAllowed}.`); + errors.push( + `Line ${i + 1}: Too many items of type '${itemType}'. Maximum allowed: ${maxAllowed}.` + ); continue; } // Basic validation based on type switch (itemType) { - case 'create-issue': - if (!item.title || typeof item.title !== 'string') { - errors.push(`Line ${i + 1}: create-issue requires a 'title' string field`); + case "create-issue": + if (!item.title || typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: create-issue requires a 'title' string field` + ); continue; } - if (!item.body || typeof item.body !== 'string') { - errors.push(`Line ${i + 1}: create-issue requires a 'body' string field`); + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-issue requires a 'body' string field` + ); continue; } // Sanitize text content @@ -597,107 +747,247 @@ jobs: item.body = sanitizeContent(item.body); // Sanitize labels if present if (item.labels && Array.isArray(item.labels)) { - item.labels = item.labels.map(label => typeof label === 'string' ? sanitizeContent(label) : label); + item.labels = item.labels.map(label => + typeof label === "string" ? sanitizeContent(label) : label + ); } break; - case 'add-issue-comment': - if (!item.body || typeof item.body !== 'string') { - errors.push(`Line ${i + 1}: add-issue-comment requires a 'body' string field`); + case "add-issue-comment": + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: add-issue-comment requires a 'body' string field` + ); continue; } // Sanitize text content item.body = sanitizeContent(item.body); break; - case 'create-pull-request': - if (!item.title || typeof item.title !== 'string') { - errors.push(`Line ${i + 1}: create-pull-request requires a 'title' string field`); + case "create-pull-request": + if (!item.title || typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request requires a 'title' string field` + ); continue; } - if (!item.body || typeof item.body !== 'string') { - errors.push(`Line ${i + 1}: create-pull-request requires a 'body' string field`); + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request requires a 'body' string field` + ); continue; } // Sanitize text content item.title = sanitizeContent(item.title); item.body = sanitizeContent(item.body); // Sanitize branch name if present - if (item.branch && typeof item.branch === 'string') { + if (item.branch && typeof item.branch === "string") { item.branch = sanitizeContent(item.branch); } // Sanitize labels if present if (item.labels && Array.isArray(item.labels)) { - item.labels = item.labels.map(label => typeof label === 'string' ? sanitizeContent(label) : label); + item.labels = item.labels.map(label => + typeof label === "string" ? sanitizeContent(label) : label + ); } break; - case 'add-issue-label': + case "add-issue-label": if (!item.labels || !Array.isArray(item.labels)) { - errors.push(`Line ${i + 1}: add-issue-label requires a 'labels' array field`); + errors.push( + `Line ${i + 1}: add-issue-label requires a 'labels' array field` + ); continue; } - if (item.labels.some(label => typeof label !== 'string')) { - errors.push(`Line ${i + 1}: add-issue-label labels array must contain only strings`); + if (item.labels.some(label => typeof label !== "string")) { + errors.push( + `Line ${i + 1}: add-issue-label labels array must contain only strings` + ); continue; } // Sanitize label strings item.labels = item.labels.map(label => sanitizeContent(label)); break; - case 'update-issue': + case "update-issue": // Check that at least one updateable field is provided - const hasValidField = (item.status !== undefined) || - (item.title !== undefined) || - (item.body !== undefined); + const hasValidField = + item.status !== undefined || + item.title !== undefined || + item.body !== undefined; if (!hasValidField) { - errors.push(`Line ${i + 1}: update-issue requires at least one of: 'status', 'title', or 'body' fields`); + errors.push( + `Line ${i + 1}: update-issue requires at least one of: 'status', 'title', or 'body' fields` + ); continue; } // Validate status if provided if (item.status !== undefined) { - if (typeof item.status !== 'string' || (item.status !== 'open' && item.status !== 'closed')) { - errors.push(`Line ${i + 1}: update-issue 'status' must be 'open' or 'closed'`); + if ( + typeof item.status !== "string" || + (item.status !== "open" && item.status !== "closed") + ) { + errors.push( + `Line ${i + 1}: update-issue 'status' must be 'open' or 'closed'` + ); continue; } } // Validate title if provided if (item.title !== undefined) { - if (typeof item.title !== 'string') { - errors.push(`Line ${i + 1}: update-issue 'title' must be a string`); + if (typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: update-issue 'title' must be a string` + ); continue; } item.title = sanitizeContent(item.title); } // Validate body if provided if (item.body !== undefined) { - if (typeof item.body !== 'string') { - errors.push(`Line ${i + 1}: update-issue 'body' must be a string`); + if (typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: update-issue 'body' must be a string` + ); continue; } item.body = sanitizeContent(item.body); } // Validate issue_number if provided (for target "*") if (item.issue_number !== undefined) { - if (typeof item.issue_number !== 'number' && typeof item.issue_number !== 'string') { - errors.push(`Line ${i + 1}: update-issue 'issue_number' must be a number or string`); + if ( + typeof item.issue_number !== "number" && + typeof item.issue_number !== "string" + ) { + errors.push( + `Line ${i + 1}: update-issue 'issue_number' must be a number or string` + ); continue; } } break; - case 'push-to-branch': + case "push-to-branch": // Validate message if provided (optional) if (item.message !== undefined) { - if (typeof item.message !== 'string') { - errors.push(`Line ${i + 1}: push-to-branch 'message' must be a string`); + if (typeof item.message !== "string") { + errors.push( + `Line ${i + 1}: push-to-branch 'message' must be a string` + ); continue; } item.message = sanitizeContent(item.message); } // Validate pull_request_number if provided (for target "*") if (item.pull_request_number !== undefined) { - if (typeof item.pull_request_number !== 'number' && typeof item.pull_request_number !== 'string') { - errors.push(`Line ${i + 1}: push-to-branch 'pull_request_number' must be a number or string`); + if ( + typeof item.pull_request_number !== "number" && + typeof item.pull_request_number !== "string" + ) { + errors.push( + `Line ${i + 1}: push-to-branch 'pull_request_number' must be a number or string` + ); continue; } } break; + case "create-pull-request-review-comment": + // Validate required path field + if (!item.path || typeof item.path !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment requires a 'path' string field` + ); + continue; + } + // Validate required line field + if ( + item.line === undefined || + (typeof item.line !== "number" && typeof item.line !== "string") + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment requires a 'line' number or string field` + ); + continue; + } + // Validate line is a positive integer + const lineNumber = + typeof item.line === "string" ? parseInt(item.line, 10) : item.line; + if ( + isNaN(lineNumber) || + lineNumber <= 0 || + !Number.isInteger(lineNumber) + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'line' must be a positive integer` + ); + continue; + } + // Validate required body field + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment requires a 'body' string field` + ); + continue; + } + // Sanitize required text content + item.body = sanitizeContent(item.body); + // Validate optional start_line field + if (item.start_line !== undefined) { + if ( + typeof item.start_line !== "number" && + typeof item.start_line !== "string" + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'start_line' must be a number or string` + ); + continue; + } + const startLineNumber = + typeof item.start_line === "string" + ? parseInt(item.start_line, 10) + : item.start_line; + if ( + isNaN(startLineNumber) || + startLineNumber <= 0 || + !Number.isInteger(startLineNumber) + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'start_line' must be a positive integer` + ); + continue; + } + if (startLineNumber > lineNumber) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'start_line' must be less than or equal to 'line'` + ); + continue; + } + } + // Validate optional side field + if (item.side !== undefined) { + if ( + typeof item.side !== "string" || + (item.side !== "LEFT" && item.side !== "RIGHT") + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'side' must be 'LEFT' or 'RIGHT'` + ); + continue; + } + } + break; + case "create-discussion": + if (!item.title || typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: create-discussion requires a 'title' string field` + ); + continue; + } + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-discussion requires a 'body' string field` + ); + continue; + } + // Sanitize text content + item.title = sanitizeContent(item.title); + item.body = sanitizeContent(item.body); + break; default: errors.push(`Line ${i + 1}: Unknown output type '${itemType}'`); continue; @@ -710,7 +1000,7 @@ jobs: } // Report validation results if (errors.length > 0) { - console.log('Validation errors found:'); + console.log("Validation errors found:"); errors.forEach(error => console.log(` - ${error}`)); // For now, we'll continue with valid items but log the errors // In the future, we might want to fail the workflow for invalid items @@ -719,10 +1009,10 @@ jobs: // Set the parsed and validated items as output const validatedOutput = { items: parsedItems, - errors: errors + errors: errors, }; - core.setOutput('output', JSON.stringify(validatedOutput)); - core.setOutput('raw_output', outputContent); + core.setOutput("output", JSON.stringify(validatedOutput)); + core.setOutput("raw_output", outputContent); } // Call the main function await main(); @@ -754,24 +1044,24 @@ jobs: with: script: | function main() { - const fs = require('fs'); + const fs = require("fs"); try { const logFile = process.env.AGENT_LOG_FILE; if (!logFile) { - console.log('No agent log file specified'); + console.log("No agent log file specified"); return; } if (!fs.existsSync(logFile)) { console.log(`Log file not found: ${logFile}`); return; } - const content = fs.readFileSync(logFile, 'utf8'); + const content = fs.readFileSync(logFile, "utf8"); const parsedLog = parseCodexLog(content); if (parsedLog) { core.summary.addRaw(parsedLog).write(); - console.log('Codex log parsed successfully'); + console.log("Codex log parsed successfully"); } else { - console.log('Failed to parse Codex log'); + console.log("Failed to parse Codex log"); } } catch (error) { core.setFailed(error.message); @@ -779,54 +1069,63 @@ jobs: } function parseCodexLog(logContent) { try { - const lines = logContent.split('\n'); - let markdown = '## 🤖 Commands and Tools\n\n'; + const lines = logContent.split("\n"); + let markdown = "## 🤖 Commands and Tools\n\n"; const commandSummary = []; // First pass: collect commands for summary for (let i = 0; i < lines.length; i++) { const line = lines[i]; // Detect tool usage and exec commands - if (line.includes('] tool ') && line.includes('(')) { + if (line.includes("] tool ") && line.includes("(")) { // Extract tool name const toolMatch = line.match(/\] tool ([^(]+)\(/); if (toolMatch) { const toolName = toolMatch[1]; // Look ahead to find the result status - let statusIcon = '❓'; // Unknown by default + let statusIcon = "❓"; // Unknown by default for (let j = i + 1; j < Math.min(i + 5, lines.length); j++) { const nextLine = lines[j]; - if (nextLine.includes('success in')) { - statusIcon = '✅'; + if (nextLine.includes("success in")) { + statusIcon = "✅"; break; - } else if (nextLine.includes('failure in') || nextLine.includes('error in') || nextLine.includes('failed in')) { - statusIcon = '❌'; + } else if ( + nextLine.includes("failure in") || + nextLine.includes("error in") || + nextLine.includes("failed in") + ) { + statusIcon = "❌"; break; } } - if (toolName.includes('.')) { + if (toolName.includes(".")) { // Format as provider::method - const parts = toolName.split('.'); + const parts = toolName.split("."); const provider = parts[0]; - const method = parts.slice(1).join('_'); - commandSummary.push(`* ${statusIcon} \`${provider}::${method}(...)\``); + const method = parts.slice(1).join("_"); + commandSummary.push( + `* ${statusIcon} \`${provider}::${method}(...)\`` + ); } else { commandSummary.push(`* ${statusIcon} \`${toolName}(...)\``); } } - } else if (line.includes('] exec ')) { + } else if (line.includes("] exec ")) { // Extract exec command const execMatch = line.match(/exec (.+?) in/); if (execMatch) { const formattedCommand = formatBashCommand(execMatch[1]); // Look ahead to find the result status - let statusIcon = '❓'; // Unknown by default + let statusIcon = "❓"; // Unknown by default for (let j = i + 1; j < Math.min(i + 5, lines.length); j++) { const nextLine = lines[j]; - if (nextLine.includes('succeeded in')) { - statusIcon = '✅'; + if (nextLine.includes("succeeded in")) { + statusIcon = "✅"; break; - } else if (nextLine.includes('failed in') || nextLine.includes('error')) { - statusIcon = '❌'; + } else if ( + nextLine.includes("failed in") || + nextLine.includes("error") + ) { + statusIcon = "❌"; break; } } @@ -840,10 +1139,10 @@ jobs: markdown += `${cmd}\n`; } } else { - markdown += 'No commands or tools used.\n'; + markdown += "No commands or tools used.\n"; } // Add Information section - markdown += '\n## 📊 Information\n\n'; + markdown += "\n## 📊 Information\n\n"; // Extract metadata from Codex logs let totalTokens = 0; const tokenMatches = logContent.match(/tokens used: (\d+)/g); @@ -865,46 +1164,57 @@ jobs: if (execCommands > 0) { markdown += `**Commands Executed:** ${execCommands}\n\n`; } - markdown += '\n## 🤖 Reasoning\n\n'; + markdown += "\n## 🤖 Reasoning\n\n"; // Second pass: process full conversation flow with interleaved reasoning, tools, and commands let inThinkingSection = false; for (let i = 0; i < lines.length; i++) { const line = lines[i]; // Skip metadata lines - if (line.includes('OpenAI Codex') || line.startsWith('--------') || - line.includes('workdir:') || line.includes('model:') || - line.includes('provider:') || line.includes('approval:') || - line.includes('sandbox:') || line.includes('reasoning effort:') || - line.includes('reasoning summaries:') || line.includes('tokens used:')) { + if ( + line.includes("OpenAI Codex") || + line.startsWith("--------") || + line.includes("workdir:") || + line.includes("model:") || + line.includes("provider:") || + line.includes("approval:") || + line.includes("sandbox:") || + line.includes("reasoning effort:") || + line.includes("reasoning summaries:") || + line.includes("tokens used:") + ) { continue; } // Process thinking sections - if (line.includes('] thinking')) { + if (line.includes("] thinking")) { inThinkingSection = true; continue; } // Process tool calls - if (line.includes('] tool ') && line.includes('(')) { + if (line.includes("] tool ") && line.includes("(")) { inThinkingSection = false; const toolMatch = line.match(/\] tool ([^(]+)\(/); if (toolMatch) { const toolName = toolMatch[1]; // Look ahead to find the result status - let statusIcon = '❓'; // Unknown by default + let statusIcon = "❓"; // Unknown by default for (let j = i + 1; j < Math.min(i + 5, lines.length); j++) { const nextLine = lines[j]; - if (nextLine.includes('success in')) { - statusIcon = '✅'; + if (nextLine.includes("success in")) { + statusIcon = "✅"; break; - } else if (nextLine.includes('failure in') || nextLine.includes('error in') || nextLine.includes('failed in')) { - statusIcon = '❌'; + } else if ( + nextLine.includes("failure in") || + nextLine.includes("error in") || + nextLine.includes("failed in") + ) { + statusIcon = "❌"; break; } } - if (toolName.includes('.')) { - const parts = toolName.split('.'); + if (toolName.includes(".")) { + const parts = toolName.split("."); const provider = parts[0]; - const method = parts.slice(1).join('_'); + const method = parts.slice(1).join("_"); markdown += `${statusIcon} ${provider}::${method}(...)\n\n`; } else { markdown += `${statusIcon} ${toolName}(...)\n\n`; @@ -913,20 +1223,23 @@ jobs: continue; } // Process exec commands - if (line.includes('] exec ')) { + if (line.includes("] exec ")) { inThinkingSection = false; const execMatch = line.match(/exec (.+?) in/); if (execMatch) { const formattedCommand = formatBashCommand(execMatch[1]); // Look ahead to find the result status - let statusIcon = '❓'; // Unknown by default + let statusIcon = "❓"; // Unknown by default for (let j = i + 1; j < Math.min(i + 5, lines.length); j++) { const nextLine = lines[j]; - if (nextLine.includes('succeeded in')) { - statusIcon = '✅'; + if (nextLine.includes("succeeded in")) { + statusIcon = "✅"; break; - } else if (nextLine.includes('failed in') || nextLine.includes('error')) { - statusIcon = '❌'; + } else if ( + nextLine.includes("failed in") || + nextLine.includes("error") + ) { + statusIcon = "❌"; break; } } @@ -935,7 +1248,11 @@ jobs: continue; } // Process thinking content - if (inThinkingSection && line.trim().length > 20 && !line.startsWith('[2025-')) { + if ( + inThinkingSection && + line.trim().length > 20 && + !line.startsWith("[2025-") + ) { const trimmed = line.trim(); // Add thinking content directly markdown += `${trimmed}\n\n`; @@ -943,36 +1260,36 @@ jobs: } return markdown; } catch (error) { - console.error('Error parsing Codex log:', error); - return '## 🤖 Commands and Tools\n\nError parsing log content.\n\n## 🤖 Reasoning\n\nUnable to parse reasoning from log.\n\n'; + console.error("Error parsing Codex log:", error); + return "## 🤖 Commands and Tools\n\nError parsing log content.\n\n## 🤖 Reasoning\n\nUnable to parse reasoning from log.\n\n"; } } function formatBashCommand(command) { - if (!command) return ''; + if (!command) return ""; // Convert multi-line commands to single line by replacing newlines with spaces // and collapsing multiple spaces let formatted = command - .replace(/\n/g, ' ') // Replace newlines with spaces - .replace(/\r/g, ' ') // Replace carriage returns with spaces - .replace(/\t/g, ' ') // Replace tabs with spaces - .replace(/\s+/g, ' ') // Collapse multiple spaces into one - .trim(); // Remove leading/trailing whitespace + .replace(/\n/g, " ") // Replace newlines with spaces + .replace(/\r/g, " ") // Replace carriage returns with spaces + .replace(/\t/g, " ") // Replace tabs with spaces + .replace(/\s+/g, " ") // Collapse multiple spaces into one + .trim(); // Remove leading/trailing whitespace // Escape backticks to prevent markdown issues - formatted = formatted.replace(/`/g, '\\`'); + formatted = formatted.replace(/`/g, "\\`"); // Truncate if too long (keep reasonable length for summary) const maxLength = 80; if (formatted.length > maxLength) { - formatted = formatted.substring(0, maxLength) + '...'; + formatted = formatted.substring(0, maxLength) + "..."; } return formatted; } function truncateString(str, maxLength) { - if (!str) return ''; + if (!str) return ""; if (str.length <= maxLength) return str; - return str.substring(0, maxLength) + '...'; + return str.substring(0, maxLength) + "..."; } // Export for testing - if (typeof module !== 'undefined' && module.exports) { + if (typeof module !== "undefined" && module.exports) { module.exports = { parseCodexLog, formatBashCommand, truncateString }; } main(); @@ -1006,30 +1323,35 @@ jobs: // Read the validated output content from environment variable const outputContent = process.env.GITHUB_AW_AGENT_OUTPUT; if (!outputContent) { - console.log('No GITHUB_AW_AGENT_OUTPUT environment variable found'); + console.log("No GITHUB_AW_AGENT_OUTPUT environment variable found"); return; } - if (outputContent.trim() === '') { - console.log('Agent output content is empty'); + if (outputContent.trim() === "") { + console.log("Agent output content is empty"); return; } - console.log('Agent output content length:', outputContent.length); + console.log("Agent output content length:", outputContent.length); // Parse the validated output JSON let validatedOutput; try { validatedOutput = JSON.parse(outputContent); } catch (error) { - console.log('Error parsing agent output JSON:', error instanceof Error ? error.message : String(error)); + console.log( + "Error parsing agent output JSON:", + error instanceof Error ? error.message : String(error) + ); return; } if (!validatedOutput.items || !Array.isArray(validatedOutput.items)) { - console.log('No valid items found in agent output'); + console.log("No valid items found in agent output"); return; } // Find all create-issue items - const createIssueItems = validatedOutput.items.filter(/** @param {any} item */ item => item.type === 'create-issue'); + const createIssueItems = validatedOutput.items.filter( + /** @param {any} item */ item => item.type === "create-issue" + ); if (createIssueItems.length === 0) { - console.log('No create-issue items found in agent output'); + console.log("No create-issue items found in agent output"); return; } console.log(`Found ${createIssueItems.length} create-issue item(s)`); @@ -1037,23 +1359,31 @@ jobs: const parentIssueNumber = context.payload?.issue?.number; // Parse labels from environment variable (comma-separated string) const labelsEnv = process.env.GITHUB_AW_ISSUE_LABELS; - let envLabels = labelsEnv ? labelsEnv.split(',').map(/** @param {string} label */ label => label.trim()).filter(/** @param {string} label */ label => label) : []; + let envLabels = labelsEnv + ? labelsEnv + .split(",") + .map(/** @param {string} label */ label => label.trim()) + .filter(/** @param {string} label */ label => label) + : []; const createdIssues = []; // Process each create-issue item for (let i = 0; i < createIssueItems.length; i++) { const createIssueItem = createIssueItems[i]; - console.log(`Processing create-issue item ${i + 1}/${createIssueItems.length}:`, { title: createIssueItem.title, bodyLength: createIssueItem.body.length }); + console.log( + `Processing create-issue item ${i + 1}/${createIssueItems.length}:`, + { title: createIssueItem.title, bodyLength: createIssueItem.body.length } + ); // Merge environment labels with item-specific labels let labels = [...envLabels]; if (createIssueItem.labels && Array.isArray(createIssueItem.labels)) { labels = [...labels, ...createIssueItem.labels].filter(Boolean); } // Extract title and body from the JSON item - let title = createIssueItem.title ? createIssueItem.title.trim() : ''; - let bodyLines = createIssueItem.body.split('\n'); + let title = createIssueItem.title ? createIssueItem.title.trim() : ""; + let bodyLines = createIssueItem.body.split("\n"); // If no title was found, use the body content as title (or a default) if (!title) { - title = createIssueItem.body || 'Agent Output'; + title = createIssueItem.body || "Agent Output"; } // Apply title prefix if provided via environment variable const titlePrefix = process.env.GITHUB_AW_ISSUE_TITLE_PREFIX; @@ -1061,22 +1391,27 @@ jobs: title = titlePrefix + title; } if (parentIssueNumber) { - console.log('Detected issue context, parent issue #' + parentIssueNumber); + console.log("Detected issue context, parent issue #" + parentIssueNumber); // Add reference to parent issue in the child issue body bodyLines.push(`Related to #${parentIssueNumber}`); } // Add AI disclaimer with run id, run htmlurl // Add AI disclaimer with workflow run information const runId = context.runId; - const runUrl = context.payload.repository + const runUrl = context.payload.repository ? `${context.payload.repository.html_url}/actions/runs/${runId}` - : `https://github.com/actions/runs/${runId}`; - bodyLines.push(``, ``, `> Generated by Agentic Workflow Run [${runId}](${runUrl})`, ''); + : `https://github.com/actions/runs/${runId}`; + bodyLines.push( + ``, + ``, + `> Generated by Agentic Workflow Run [${runId}](${runUrl})`, + "" + ); // Prepare the body content - const body = bodyLines.join('\n').trim(); - console.log('Creating issue with title:', title); - console.log('Labels:', labels); - console.log('Body length:', body.length); + const body = bodyLines.join("\n").trim(); + console.log("Creating issue with title:", title); + console.log("Labels:", labels); + console.log("Body length:", body.length); try { // Create the issue using GitHub API const { data: issue } = await github.rest.issues.create({ @@ -1084,9 +1419,9 @@ jobs: repo: context.repo.repo, title: title, body: body, - labels: labels + labels: labels, }); - console.log('Created issue #' + issue.number + ': ' + issue.html_url); + console.log("Created issue #" + issue.number + ": " + issue.html_url); createdIssues.push(issue); // If we have a parent issue, add a comment to it referencing the new child issue if (parentIssueNumber) { @@ -1095,26 +1430,32 @@ jobs: owner: context.repo.owner, repo: context.repo.repo, issue_number: parentIssueNumber, - body: `Created related issue: #${issue.number}` + body: `Created related issue: #${issue.number}`, }); - console.log('Added comment to parent issue #' + parentIssueNumber); + console.log("Added comment to parent issue #" + parentIssueNumber); } catch (error) { - console.log('Warning: Could not add comment to parent issue:', error instanceof Error ? error.message : String(error)); + console.log( + "Warning: Could not add comment to parent issue:", + error instanceof Error ? error.message : String(error) + ); } } // Set output for the last created issue (for backward compatibility) if (i === createIssueItems.length - 1) { - core.setOutput('issue_number', issue.number); - core.setOutput('issue_url', issue.html_url); + core.setOutput("issue_number", issue.number); + core.setOutput("issue_url", issue.html_url); } } catch (error) { - console.error(`✗ Failed to create issue "${title}":`, error instanceof Error ? error.message : String(error)); + console.error( + `✗ Failed to create issue "${title}":`, + error instanceof Error ? error.message : String(error) + ); throw error; } } // Write summary for all created issues if (createdIssues.length > 0) { - let summaryContent = '\n\n## GitHub Issues\n'; + let summaryContent = "\n\n## GitHub Issues\n"; for (const issue of createdIssues) { summaryContent += `- Issue #${issue.number}: [${issue.title}](${issue.html_url})\n`; } diff --git a/.github/workflows/test-codex-push-to-branch.lock.yml b/.github/workflows/test-codex-push-to-branch.lock.yml index 52ba352726..44a312d120 100644 --- a/.github/workflows/test-codex-push-to-branch.lock.yml +++ b/.github/workflows/test-codex-push-to-branch.lock.yml @@ -36,24 +36,28 @@ jobs: const { owner, repo } = context.repo; // Check if the actor has repository access (admin, maintain permissions) try { - console.log(`Checking if user '${actor}' is admin or maintainer of ${owner}/${repo}`); - const repoPermission = await github.rest.repos.getCollaboratorPermissionLevel({ - owner: owner, - repo: repo, - username: actor - }); + console.log( + `Checking if user '${actor}' is admin or maintainer of ${owner}/${repo}` + ); + const repoPermission = + await github.rest.repos.getCollaboratorPermissionLevel({ + owner: owner, + repo: repo, + username: actor, + }); const permission = repoPermission.data.permission; console.log(`Repository permission level: ${permission}`); - if (permission === 'admin' || permission === 'maintain') { + if (permission === "admin" || permission === "maintain") { console.log(`User has ${permission} access to repository`); - core.setOutput('is_team_member', 'true'); + core.setOutput("is_team_member", "true"); return; } } catch (repoError) { - const errorMessage = repoError instanceof Error ? repoError.message : String(repoError); + const errorMessage = + repoError instanceof Error ? repoError.message : String(repoError); console.log(`Repository permission check failed: ${errorMessage}`); } - core.setOutput('is_team_member', 'false'); + core.setOutput("is_team_member", "false"); } await main(); - name: Validate team membership @@ -84,23 +88,23 @@ jobs: with: script: | function main() { - const fs = require('fs'); - const crypto = require('crypto'); + const fs = require("fs"); + const crypto = require("crypto"); // Generate a random filename for the output file - const randomId = crypto.randomBytes(8).toString('hex'); + const randomId = crypto.randomBytes(8).toString("hex"); const outputFile = `/tmp/aw_output_${randomId}.txt`; // Ensure the /tmp directory exists and create empty output file - fs.mkdirSync('/tmp', { recursive: true }); - fs.writeFileSync(outputFile, '', { mode: 0o644 }); + fs.mkdirSync("/tmp", { recursive: true }); + fs.writeFileSync(outputFile, "", { mode: 0o644 }); // Verify the file was created and is writable if (!fs.existsSync(outputFile)) { throw new Error(`Failed to create output file: ${outputFile}`); } // Set the environment variable for subsequent steps - core.exportVariable('GITHUB_AW_SAFE_OUTPUTS', outputFile); - console.log('Created agentic output file:', outputFile); + core.exportVariable("GITHUB_AW_SAFE_OUTPUTS", outputFile); + console.log("Created agentic output file:", outputFile); // Also set as step output for reference - core.setOutput('output_file', outputFile); + core.setOutput("output_file", outputFile); } main(); - name: Setup MCPs @@ -253,13 +257,14 @@ jobs: if-no-files-found: warn - name: Run Codex run: | + set -o pipefail INSTRUCTION=$(cat /tmp/aw-prompts/prompt.txt) export CODEX_HOME=/tmp/mcp-config # Create log directory outside git repo mkdir -p /tmp/aw-logs - # Run codex with log capture + # Run codex with log capture - pipefail ensures codex exit code is preserved codex exec \ -c model=o4-mini \ --full-auto "$INSTRUCTION" 2>&1 | tee /tmp/test-codex-push-to-branch.log @@ -299,34 +304,37 @@ jobs: * @returns {string} The sanitized content */ function sanitizeContent(content) { - if (!content || typeof content !== 'string') { - return ''; + if (!content || typeof content !== "string") { + return ""; } // Read allowed domains from environment variable const allowedDomainsEnv = process.env.GITHUB_AW_ALLOWED_DOMAINS; const defaultAllowedDomains = [ - 'github.com', - 'github.io', - 'githubusercontent.com', - 'githubassets.com', - 'github.dev', - 'codespaces.new' + "github.com", + "github.io", + "githubusercontent.com", + "githubassets.com", + "github.dev", + "codespaces.new", ]; const allowedDomains = allowedDomainsEnv - ? allowedDomainsEnv.split(',').map(d => d.trim()).filter(d => d) + ? allowedDomainsEnv + .split(",") + .map(d => d.trim()) + .filter(d => d) : defaultAllowedDomains; let sanitized = content; // Neutralize @mentions to prevent unintended notifications sanitized = neutralizeMentions(sanitized); // Remove control characters (except newlines and tabs) - sanitized = sanitized.replace(/[\x00-\x08\x0B\x0C\x0E-\x1F\x7F]/g, ''); + sanitized = sanitized.replace(/[\x00-\x08\x0B\x0C\x0E-\x1F\x7F]/g, ""); // XML character escaping sanitized = sanitized - .replace(/&/g, '&') // Must be first to avoid double-escaping - .replace(//g, '>') - .replace(/"/g, '"') - .replace(/'/g, '''); + .replace(/&/g, "&") // Must be first to avoid double-escaping + .replace(//g, ">") + .replace(/"/g, """) + .replace(/'/g, "'"); // URI filtering - replace non-https protocols with "(redacted)" sanitized = sanitizeUrlProtocols(sanitized); // Domain filtering for HTTPS URIs @@ -334,16 +342,20 @@ jobs: // Limit total length to prevent DoS (0.5MB max) const maxLength = 524288; if (sanitized.length > maxLength) { - sanitized = sanitized.substring(0, maxLength) + '\n[Content truncated due to length]'; + sanitized = + sanitized.substring(0, maxLength) + + "\n[Content truncated due to length]"; } // Limit number of lines to prevent log flooding (65k max) - const lines = sanitized.split('\n'); + const lines = sanitized.split("\n"); const maxLines = 65000; if (lines.length > maxLines) { - sanitized = lines.slice(0, maxLines).join('\n') + '\n[Content truncated due to line count]'; + sanitized = + lines.slice(0, maxLines).join("\n") + + "\n[Content truncated due to line count]"; } // Remove ANSI escape sequences - sanitized = sanitized.replace(/\x1b\[[0-9;]*[mGKH]/g, ''); + sanitized = sanitized.replace(/\x1b\[[0-9;]*[mGKH]/g, ""); // Neutralize common bot trigger phrases sanitized = neutralizeBotTriggers(sanitized); // Trim excessive whitespace @@ -354,16 +366,22 @@ jobs: * @returns {string} The string with unknown domains redacted */ function sanitizeUrlDomains(s) { - return s.replace(/\bhttps:\/\/([^\/\s\])}'"<>&\x00-\x1f]+)/gi, (match, domain) => { - // Extract the hostname part (before first slash, colon, or other delimiter) - const hostname = domain.split(/[\/:\?#]/)[0].toLowerCase(); - // Check if this domain or any parent domain is in the allowlist - const isAllowed = allowedDomains.some(allowedDomain => { - const normalizedAllowed = allowedDomain.toLowerCase(); - return hostname === normalizedAllowed || hostname.endsWith('.' + normalizedAllowed); - }); - return isAllowed ? match : '(redacted)'; - }); + return s.replace( + /\bhttps:\/\/([^\/\s\])}'"<>&\x00-\x1f]+)/gi, + (match, domain) => { + // Extract the hostname part (before first slash, colon, or other delimiter) + const hostname = domain.split(/[\/:\?#]/)[0].toLowerCase(); + // Check if this domain or any parent domain is in the allowlist + const isAllowed = allowedDomains.some(allowedDomain => { + const normalizedAllowed = allowedDomain.toLowerCase(); + return ( + hostname === normalizedAllowed || + hostname.endsWith("." + normalizedAllowed) + ); + }); + return isAllowed ? match : "(redacted)"; + } + ); } /** * Remove unknown protocols except https @@ -372,10 +390,13 @@ jobs: */ function sanitizeUrlProtocols(s) { // Match both protocol:// and protocol: patterns - return s.replace(/\b(\w+):(?:\/\/)?[^\s\])}'"<>&\x00-\x1f]+/gi, (match, protocol) => { - // Allow https (case insensitive), redact everything else - return protocol.toLowerCase() === 'https' ? match : '(redacted)'; - }); + return s.replace( + /\b(\w+):(?:\/\/)?[^\s\])}'"<>&\x00-\x1f]+/gi, + (match, protocol) => { + // Allow https (case insensitive), redact everything else + return protocol.toLowerCase() === "https" ? match : "(redacted)"; + } + ); } /** * Neutralizes @mentions by wrapping them in backticks @@ -384,8 +405,10 @@ jobs: */ function neutralizeMentions(s) { // Replace @name or @org/team outside code with `@name` - return s.replace(/(^|[^\w`])@([A-Za-z0-9](?:[A-Za-z0-9-]{0,37}[A-Za-z0-9])?(?:\/[A-Za-z0-9._-]+)?)/g, - (_m, p1, p2) => `${p1}\`@${p2}\``); + return s.replace( + /(^|[^\w`])@([A-Za-z0-9](?:[A-Za-z0-9-]{0,37}[A-Za-z0-9])?(?:\/[A-Za-z0-9._-]+)?)/g, + (_m, p1, p2) => `${p1}\`@${p2}\`` + ); } /** * Neutralizes bot trigger phrases by wrapping them in backticks @@ -394,8 +417,10 @@ jobs: */ function neutralizeBotTriggers(s) { // Neutralize common bot trigger phrases like "fixes #123", "closes #asdfs", etc. - return s.replace(/\b(fixes?|closes?|resolves?|fix|close|resolve)\s+#(\w+)/gi, - (match, action, ref) => `\`${action} #${ref}\``); + return s.replace( + /\b(fixes?|closes?|resolves?|fix|close|resolve)\s+#(\w+)/gi, + (match, action, ref) => `\`${action} #${ref}\`` + ); } } /** @@ -406,65 +431,165 @@ jobs: */ function getMaxAllowedForType(itemType, config) { // Check if max is explicitly specified in config - if (config && config[itemType] && typeof config[itemType] === 'object' && config[itemType].max) { + if ( + config && + config[itemType] && + typeof config[itemType] === "object" && + config[itemType].max + ) { return config[itemType].max; } // Use default limits for plural-supported types switch (itemType) { - case 'create-issue': + case "create-issue": return 1; // Only one issue allowed - case 'add-issue-comment': + case "add-issue-comment": return 1; // Only one comment allowed - case 'create-pull-request': - return 1; // Only one pull request allowed - case 'add-issue-label': - return 5; // Only one labels operation allowed - case 'update-issue': - return 1; // Only one issue update allowed - case 'push-to-branch': - return 1; // Only one push to branch allowed + case "create-pull-request": + return 1; // Only one pull request allowed + case "create-pull-request-review-comment": + return 10; // Default to 10 review comments allowed + case "add-issue-label": + return 5; // Only one labels operation allowed + case "update-issue": + return 1; // Only one issue update allowed + case "push-to-branch": + return 1; // Only one push to branch allowed + case "create-discussion": + return 1; // Only one discussion allowed default: - return 1; // Default to single item for unknown types + return 1; // Default to single item for unknown types + } + } + /** + * Attempts to repair common JSON syntax issues in LLM-generated content + * @param {string} jsonStr - The potentially malformed JSON string + * @returns {string} The repaired JSON string + */ + function repairJson(jsonStr) { + let repaired = jsonStr.trim(); + // Fix single quotes to double quotes (must be done first) + repaired = repaired.replace(/'/g, '"'); + // Fix missing quotes around object keys + repaired = repaired.replace( + /([{,]\s*)([a-zA-Z_$][a-zA-Z0-9_$]*)\s*:/g, + '$1"$2":' + ); + // Fix newlines and tabs inside strings by escaping them + repaired = repaired.replace(/"([^"\\]*)"/g, (match, content) => { + if ( + content.includes("\n") || + content.includes("\r") || + content.includes("\t") + ) { + const escaped = content + .replace(/\\/g, "\\\\") + .replace(/\n/g, "\\n") + .replace(/\r/g, "\\r") + .replace(/\t/g, "\\t"); + return `"${escaped}"`; + } + return match; + }); + // Fix unescaped quotes inside string values + repaired = repaired.replace( + /"([^"]*)"([^":,}\]]*)"([^"]*)"(\s*[,:}\]])/g, + (match, p1, p2, p3, p4) => `"${p1}\\"${p2}\\"${p3}"${p4}` + ); + // Fix wrong bracket/brace types - arrays should end with ] not } + repaired = repaired.replace( + /(\[\s*(?:"[^"]*"(?:\s*,\s*"[^"]*")*\s*),?)\s*}/g, + "$1]" + ); + // Fix missing closing braces/brackets + const openBraces = (repaired.match(/\{/g) || []).length; + const closeBraces = (repaired.match(/\}/g) || []).length; + if (openBraces > closeBraces) { + repaired += "}".repeat(openBraces - closeBraces); + } else if (closeBraces > openBraces) { + repaired = "{".repeat(closeBraces - openBraces) + repaired; + } + // Fix missing closing brackets for arrays + const openBrackets = (repaired.match(/\[/g) || []).length; + const closeBrackets = (repaired.match(/\]/g) || []).length; + if (openBrackets > closeBrackets) { + repaired += "]".repeat(openBrackets - closeBrackets); + } else if (closeBrackets > openBrackets) { + repaired = "[".repeat(closeBrackets - openBrackets) + repaired; + } + // Fix trailing commas in objects and arrays (AFTER fixing brackets/braces) + repaired = repaired.replace(/,(\s*[}\]])/g, "$1"); + return repaired; + } + /** + * Attempts to parse JSON with repair fallback + * @param {string} jsonStr - The JSON string to parse + * @returns {Object|undefined} The parsed JSON object, or undefined if parsing fails + */ + function parseJsonWithRepair(jsonStr) { + try { + // First, try normal JSON.parse + return JSON.parse(jsonStr); + } catch (originalError) { + try { + // If that fails, try repairing and parsing again + const repairedJson = repairJson(jsonStr); + return JSON.parse(repairedJson); + } catch (repairError) { + // If repair also fails, print error to console and return undefined + console.log( + `JSON parsing failed. Original: ${originalError.message}. After repair: ${repairError.message}` + ); + return undefined; + } } } const outputFile = process.env.GITHUB_AW_SAFE_OUTPUTS; const safeOutputsConfig = process.env.GITHUB_AW_SAFE_OUTPUTS_CONFIG; if (!outputFile) { - console.log('GITHUB_AW_SAFE_OUTPUTS not set, no output to collect'); - core.setOutput('output', ''); + console.log("GITHUB_AW_SAFE_OUTPUTS not set, no output to collect"); + core.setOutput("output", ""); return; } if (!fs.existsSync(outputFile)) { - console.log('Output file does not exist:', outputFile); - core.setOutput('output', ''); + console.log("Output file does not exist:", outputFile); + core.setOutput("output", ""); return; } - const outputContent = fs.readFileSync(outputFile, 'utf8'); - if (outputContent.trim() === '') { - console.log('Output file is empty'); - core.setOutput('output', ''); + const outputContent = fs.readFileSync(outputFile, "utf8"); + if (outputContent.trim() === "") { + console.log("Output file is empty"); + core.setOutput("output", ""); return; } - console.log('Raw output content length:', outputContent.length); + console.log("Raw output content length:", outputContent.length); // Parse the safe-outputs configuration let expectedOutputTypes = {}; if (safeOutputsConfig) { try { expectedOutputTypes = JSON.parse(safeOutputsConfig); - console.log('Expected output types:', Object.keys(expectedOutputTypes)); + console.log("Expected output types:", Object.keys(expectedOutputTypes)); } catch (error) { - console.log('Warning: Could not parse safe-outputs config:', error.message); + console.log( + "Warning: Could not parse safe-outputs config:", + error.message + ); } } // Parse JSONL content - const lines = outputContent.trim().split('\n'); + const lines = outputContent.trim().split("\n"); const parsedItems = []; const errors = []; for (let i = 0; i < lines.length; i++) { const line = lines[i].trim(); - if (line === '') continue; // Skip empty lines + if (line === "") continue; // Skip empty lines try { - const item = JSON.parse(line); + const item = parseJsonWithRepair(line); + // If item is undefined (failed to parse), add error and process next line + if (item === undefined) { + errors.push(`Line ${i + 1}: Invalid JSON - JSON parsing failed`); + continue; + } // Validate that the item has a 'type' field if (!item.type) { errors.push(`Line ${i + 1}: Missing required 'type' field`); @@ -473,25 +598,35 @@ jobs: // Validate against expected output types const itemType = item.type; if (!expectedOutputTypes[itemType]) { - errors.push(`Line ${i + 1}: Unexpected output type '${itemType}'. Expected one of: ${Object.keys(expectedOutputTypes).join(', ')}`); + errors.push( + `Line ${i + 1}: Unexpected output type '${itemType}'. Expected one of: ${Object.keys(expectedOutputTypes).join(", ")}` + ); continue; } // Check for too many items of the same type - const typeCount = parsedItems.filter(existing => existing.type === itemType).length; + const typeCount = parsedItems.filter( + existing => existing.type === itemType + ).length; const maxAllowed = getMaxAllowedForType(itemType, expectedOutputTypes); if (typeCount >= maxAllowed) { - errors.push(`Line ${i + 1}: Too many items of type '${itemType}'. Maximum allowed: ${maxAllowed}.`); + errors.push( + `Line ${i + 1}: Too many items of type '${itemType}'. Maximum allowed: ${maxAllowed}.` + ); continue; } // Basic validation based on type switch (itemType) { - case 'create-issue': - if (!item.title || typeof item.title !== 'string') { - errors.push(`Line ${i + 1}: create-issue requires a 'title' string field`); + case "create-issue": + if (!item.title || typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: create-issue requires a 'title' string field` + ); continue; } - if (!item.body || typeof item.body !== 'string') { - errors.push(`Line ${i + 1}: create-issue requires a 'body' string field`); + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-issue requires a 'body' string field` + ); continue; } // Sanitize text content @@ -499,107 +634,247 @@ jobs: item.body = sanitizeContent(item.body); // Sanitize labels if present if (item.labels && Array.isArray(item.labels)) { - item.labels = item.labels.map(label => typeof label === 'string' ? sanitizeContent(label) : label); + item.labels = item.labels.map(label => + typeof label === "string" ? sanitizeContent(label) : label + ); } break; - case 'add-issue-comment': - if (!item.body || typeof item.body !== 'string') { - errors.push(`Line ${i + 1}: add-issue-comment requires a 'body' string field`); + case "add-issue-comment": + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: add-issue-comment requires a 'body' string field` + ); continue; } // Sanitize text content item.body = sanitizeContent(item.body); break; - case 'create-pull-request': - if (!item.title || typeof item.title !== 'string') { - errors.push(`Line ${i + 1}: create-pull-request requires a 'title' string field`); + case "create-pull-request": + if (!item.title || typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request requires a 'title' string field` + ); continue; } - if (!item.body || typeof item.body !== 'string') { - errors.push(`Line ${i + 1}: create-pull-request requires a 'body' string field`); + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request requires a 'body' string field` + ); continue; } // Sanitize text content item.title = sanitizeContent(item.title); item.body = sanitizeContent(item.body); // Sanitize branch name if present - if (item.branch && typeof item.branch === 'string') { + if (item.branch && typeof item.branch === "string") { item.branch = sanitizeContent(item.branch); } // Sanitize labels if present if (item.labels && Array.isArray(item.labels)) { - item.labels = item.labels.map(label => typeof label === 'string' ? sanitizeContent(label) : label); + item.labels = item.labels.map(label => + typeof label === "string" ? sanitizeContent(label) : label + ); } break; - case 'add-issue-label': + case "add-issue-label": if (!item.labels || !Array.isArray(item.labels)) { - errors.push(`Line ${i + 1}: add-issue-label requires a 'labels' array field`); + errors.push( + `Line ${i + 1}: add-issue-label requires a 'labels' array field` + ); continue; } - if (item.labels.some(label => typeof label !== 'string')) { - errors.push(`Line ${i + 1}: add-issue-label labels array must contain only strings`); + if (item.labels.some(label => typeof label !== "string")) { + errors.push( + `Line ${i + 1}: add-issue-label labels array must contain only strings` + ); continue; } // Sanitize label strings item.labels = item.labels.map(label => sanitizeContent(label)); break; - case 'update-issue': + case "update-issue": // Check that at least one updateable field is provided - const hasValidField = (item.status !== undefined) || - (item.title !== undefined) || - (item.body !== undefined); + const hasValidField = + item.status !== undefined || + item.title !== undefined || + item.body !== undefined; if (!hasValidField) { - errors.push(`Line ${i + 1}: update-issue requires at least one of: 'status', 'title', or 'body' fields`); + errors.push( + `Line ${i + 1}: update-issue requires at least one of: 'status', 'title', or 'body' fields` + ); continue; } // Validate status if provided if (item.status !== undefined) { - if (typeof item.status !== 'string' || (item.status !== 'open' && item.status !== 'closed')) { - errors.push(`Line ${i + 1}: update-issue 'status' must be 'open' or 'closed'`); + if ( + typeof item.status !== "string" || + (item.status !== "open" && item.status !== "closed") + ) { + errors.push( + `Line ${i + 1}: update-issue 'status' must be 'open' or 'closed'` + ); continue; } } // Validate title if provided if (item.title !== undefined) { - if (typeof item.title !== 'string') { - errors.push(`Line ${i + 1}: update-issue 'title' must be a string`); + if (typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: update-issue 'title' must be a string` + ); continue; } item.title = sanitizeContent(item.title); } // Validate body if provided if (item.body !== undefined) { - if (typeof item.body !== 'string') { - errors.push(`Line ${i + 1}: update-issue 'body' must be a string`); + if (typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: update-issue 'body' must be a string` + ); continue; } item.body = sanitizeContent(item.body); } // Validate issue_number if provided (for target "*") if (item.issue_number !== undefined) { - if (typeof item.issue_number !== 'number' && typeof item.issue_number !== 'string') { - errors.push(`Line ${i + 1}: update-issue 'issue_number' must be a number or string`); + if ( + typeof item.issue_number !== "number" && + typeof item.issue_number !== "string" + ) { + errors.push( + `Line ${i + 1}: update-issue 'issue_number' must be a number or string` + ); continue; } } break; - case 'push-to-branch': + case "push-to-branch": // Validate message if provided (optional) if (item.message !== undefined) { - if (typeof item.message !== 'string') { - errors.push(`Line ${i + 1}: push-to-branch 'message' must be a string`); + if (typeof item.message !== "string") { + errors.push( + `Line ${i + 1}: push-to-branch 'message' must be a string` + ); continue; } item.message = sanitizeContent(item.message); } // Validate pull_request_number if provided (for target "*") if (item.pull_request_number !== undefined) { - if (typeof item.pull_request_number !== 'number' && typeof item.pull_request_number !== 'string') { - errors.push(`Line ${i + 1}: push-to-branch 'pull_request_number' must be a number or string`); + if ( + typeof item.pull_request_number !== "number" && + typeof item.pull_request_number !== "string" + ) { + errors.push( + `Line ${i + 1}: push-to-branch 'pull_request_number' must be a number or string` + ); continue; } } break; + case "create-pull-request-review-comment": + // Validate required path field + if (!item.path || typeof item.path !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment requires a 'path' string field` + ); + continue; + } + // Validate required line field + if ( + item.line === undefined || + (typeof item.line !== "number" && typeof item.line !== "string") + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment requires a 'line' number or string field` + ); + continue; + } + // Validate line is a positive integer + const lineNumber = + typeof item.line === "string" ? parseInt(item.line, 10) : item.line; + if ( + isNaN(lineNumber) || + lineNumber <= 0 || + !Number.isInteger(lineNumber) + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'line' must be a positive integer` + ); + continue; + } + // Validate required body field + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment requires a 'body' string field` + ); + continue; + } + // Sanitize required text content + item.body = sanitizeContent(item.body); + // Validate optional start_line field + if (item.start_line !== undefined) { + if ( + typeof item.start_line !== "number" && + typeof item.start_line !== "string" + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'start_line' must be a number or string` + ); + continue; + } + const startLineNumber = + typeof item.start_line === "string" + ? parseInt(item.start_line, 10) + : item.start_line; + if ( + isNaN(startLineNumber) || + startLineNumber <= 0 || + !Number.isInteger(startLineNumber) + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'start_line' must be a positive integer` + ); + continue; + } + if (startLineNumber > lineNumber) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'start_line' must be less than or equal to 'line'` + ); + continue; + } + } + // Validate optional side field + if (item.side !== undefined) { + if ( + typeof item.side !== "string" || + (item.side !== "LEFT" && item.side !== "RIGHT") + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'side' must be 'LEFT' or 'RIGHT'` + ); + continue; + } + } + break; + case "create-discussion": + if (!item.title || typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: create-discussion requires a 'title' string field` + ); + continue; + } + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-discussion requires a 'body' string field` + ); + continue; + } + // Sanitize text content + item.title = sanitizeContent(item.title); + item.body = sanitizeContent(item.body); + break; default: errors.push(`Line ${i + 1}: Unknown output type '${itemType}'`); continue; @@ -612,7 +887,7 @@ jobs: } // Report validation results if (errors.length > 0) { - console.log('Validation errors found:'); + console.log("Validation errors found:"); errors.forEach(error => console.log(` - ${error}`)); // For now, we'll continue with valid items but log the errors // In the future, we might want to fail the workflow for invalid items @@ -621,10 +896,10 @@ jobs: // Set the parsed and validated items as output const validatedOutput = { items: parsedItems, - errors: errors + errors: errors, }; - core.setOutput('output', JSON.stringify(validatedOutput)); - core.setOutput('raw_output', outputContent); + core.setOutput("output", JSON.stringify(validatedOutput)); + core.setOutput("raw_output", outputContent); } // Call the main function await main(); @@ -656,24 +931,24 @@ jobs: with: script: | function main() { - const fs = require('fs'); + const fs = require("fs"); try { const logFile = process.env.AGENT_LOG_FILE; if (!logFile) { - console.log('No agent log file specified'); + console.log("No agent log file specified"); return; } if (!fs.existsSync(logFile)) { console.log(`Log file not found: ${logFile}`); return; } - const content = fs.readFileSync(logFile, 'utf8'); + const content = fs.readFileSync(logFile, "utf8"); const parsedLog = parseCodexLog(content); if (parsedLog) { core.summary.addRaw(parsedLog).write(); - console.log('Codex log parsed successfully'); + console.log("Codex log parsed successfully"); } else { - console.log('Failed to parse Codex log'); + console.log("Failed to parse Codex log"); } } catch (error) { core.setFailed(error.message); @@ -681,54 +956,63 @@ jobs: } function parseCodexLog(logContent) { try { - const lines = logContent.split('\n'); - let markdown = '## 🤖 Commands and Tools\n\n'; + const lines = logContent.split("\n"); + let markdown = "## 🤖 Commands and Tools\n\n"; const commandSummary = []; // First pass: collect commands for summary for (let i = 0; i < lines.length; i++) { const line = lines[i]; // Detect tool usage and exec commands - if (line.includes('] tool ') && line.includes('(')) { + if (line.includes("] tool ") && line.includes("(")) { // Extract tool name const toolMatch = line.match(/\] tool ([^(]+)\(/); if (toolMatch) { const toolName = toolMatch[1]; // Look ahead to find the result status - let statusIcon = '❓'; // Unknown by default + let statusIcon = "❓"; // Unknown by default for (let j = i + 1; j < Math.min(i + 5, lines.length); j++) { const nextLine = lines[j]; - if (nextLine.includes('success in')) { - statusIcon = '✅'; + if (nextLine.includes("success in")) { + statusIcon = "✅"; break; - } else if (nextLine.includes('failure in') || nextLine.includes('error in') || nextLine.includes('failed in')) { - statusIcon = '❌'; + } else if ( + nextLine.includes("failure in") || + nextLine.includes("error in") || + nextLine.includes("failed in") + ) { + statusIcon = "❌"; break; } } - if (toolName.includes('.')) { + if (toolName.includes(".")) { // Format as provider::method - const parts = toolName.split('.'); + const parts = toolName.split("."); const provider = parts[0]; - const method = parts.slice(1).join('_'); - commandSummary.push(`* ${statusIcon} \`${provider}::${method}(...)\``); + const method = parts.slice(1).join("_"); + commandSummary.push( + `* ${statusIcon} \`${provider}::${method}(...)\`` + ); } else { commandSummary.push(`* ${statusIcon} \`${toolName}(...)\``); } } - } else if (line.includes('] exec ')) { + } else if (line.includes("] exec ")) { // Extract exec command const execMatch = line.match(/exec (.+?) in/); if (execMatch) { const formattedCommand = formatBashCommand(execMatch[1]); // Look ahead to find the result status - let statusIcon = '❓'; // Unknown by default + let statusIcon = "❓"; // Unknown by default for (let j = i + 1; j < Math.min(i + 5, lines.length); j++) { const nextLine = lines[j]; - if (nextLine.includes('succeeded in')) { - statusIcon = '✅'; + if (nextLine.includes("succeeded in")) { + statusIcon = "✅"; break; - } else if (nextLine.includes('failed in') || nextLine.includes('error')) { - statusIcon = '❌'; + } else if ( + nextLine.includes("failed in") || + nextLine.includes("error") + ) { + statusIcon = "❌"; break; } } @@ -742,10 +1026,10 @@ jobs: markdown += `${cmd}\n`; } } else { - markdown += 'No commands or tools used.\n'; + markdown += "No commands or tools used.\n"; } // Add Information section - markdown += '\n## 📊 Information\n\n'; + markdown += "\n## 📊 Information\n\n"; // Extract metadata from Codex logs let totalTokens = 0; const tokenMatches = logContent.match(/tokens used: (\d+)/g); @@ -767,46 +1051,57 @@ jobs: if (execCommands > 0) { markdown += `**Commands Executed:** ${execCommands}\n\n`; } - markdown += '\n## 🤖 Reasoning\n\n'; + markdown += "\n## 🤖 Reasoning\n\n"; // Second pass: process full conversation flow with interleaved reasoning, tools, and commands let inThinkingSection = false; for (let i = 0; i < lines.length; i++) { const line = lines[i]; // Skip metadata lines - if (line.includes('OpenAI Codex') || line.startsWith('--------') || - line.includes('workdir:') || line.includes('model:') || - line.includes('provider:') || line.includes('approval:') || - line.includes('sandbox:') || line.includes('reasoning effort:') || - line.includes('reasoning summaries:') || line.includes('tokens used:')) { + if ( + line.includes("OpenAI Codex") || + line.startsWith("--------") || + line.includes("workdir:") || + line.includes("model:") || + line.includes("provider:") || + line.includes("approval:") || + line.includes("sandbox:") || + line.includes("reasoning effort:") || + line.includes("reasoning summaries:") || + line.includes("tokens used:") + ) { continue; } // Process thinking sections - if (line.includes('] thinking')) { + if (line.includes("] thinking")) { inThinkingSection = true; continue; } // Process tool calls - if (line.includes('] tool ') && line.includes('(')) { + if (line.includes("] tool ") && line.includes("(")) { inThinkingSection = false; const toolMatch = line.match(/\] tool ([^(]+)\(/); if (toolMatch) { const toolName = toolMatch[1]; // Look ahead to find the result status - let statusIcon = '❓'; // Unknown by default + let statusIcon = "❓"; // Unknown by default for (let j = i + 1; j < Math.min(i + 5, lines.length); j++) { const nextLine = lines[j]; - if (nextLine.includes('success in')) { - statusIcon = '✅'; + if (nextLine.includes("success in")) { + statusIcon = "✅"; break; - } else if (nextLine.includes('failure in') || nextLine.includes('error in') || nextLine.includes('failed in')) { - statusIcon = '❌'; + } else if ( + nextLine.includes("failure in") || + nextLine.includes("error in") || + nextLine.includes("failed in") + ) { + statusIcon = "❌"; break; } } - if (toolName.includes('.')) { - const parts = toolName.split('.'); + if (toolName.includes(".")) { + const parts = toolName.split("."); const provider = parts[0]; - const method = parts.slice(1).join('_'); + const method = parts.slice(1).join("_"); markdown += `${statusIcon} ${provider}::${method}(...)\n\n`; } else { markdown += `${statusIcon} ${toolName}(...)\n\n`; @@ -815,20 +1110,23 @@ jobs: continue; } // Process exec commands - if (line.includes('] exec ')) { + if (line.includes("] exec ")) { inThinkingSection = false; const execMatch = line.match(/exec (.+?) in/); if (execMatch) { const formattedCommand = formatBashCommand(execMatch[1]); // Look ahead to find the result status - let statusIcon = '❓'; // Unknown by default + let statusIcon = "❓"; // Unknown by default for (let j = i + 1; j < Math.min(i + 5, lines.length); j++) { const nextLine = lines[j]; - if (nextLine.includes('succeeded in')) { - statusIcon = '✅'; + if (nextLine.includes("succeeded in")) { + statusIcon = "✅"; break; - } else if (nextLine.includes('failed in') || nextLine.includes('error')) { - statusIcon = '❌'; + } else if ( + nextLine.includes("failed in") || + nextLine.includes("error") + ) { + statusIcon = "❌"; break; } } @@ -837,7 +1135,11 @@ jobs: continue; } // Process thinking content - if (inThinkingSection && line.trim().length > 20 && !line.startsWith('[2025-')) { + if ( + inThinkingSection && + line.trim().length > 20 && + !line.startsWith("[2025-") + ) { const trimmed = line.trim(); // Add thinking content directly markdown += `${trimmed}\n\n`; @@ -845,36 +1147,36 @@ jobs: } return markdown; } catch (error) { - console.error('Error parsing Codex log:', error); - return '## 🤖 Commands and Tools\n\nError parsing log content.\n\n## 🤖 Reasoning\n\nUnable to parse reasoning from log.\n\n'; + console.error("Error parsing Codex log:", error); + return "## 🤖 Commands and Tools\n\nError parsing log content.\n\n## 🤖 Reasoning\n\nUnable to parse reasoning from log.\n\n"; } } function formatBashCommand(command) { - if (!command) return ''; + if (!command) return ""; // Convert multi-line commands to single line by replacing newlines with spaces // and collapsing multiple spaces let formatted = command - .replace(/\n/g, ' ') // Replace newlines with spaces - .replace(/\r/g, ' ') // Replace carriage returns with spaces - .replace(/\t/g, ' ') // Replace tabs with spaces - .replace(/\s+/g, ' ') // Collapse multiple spaces into one - .trim(); // Remove leading/trailing whitespace + .replace(/\n/g, " ") // Replace newlines with spaces + .replace(/\r/g, " ") // Replace carriage returns with spaces + .replace(/\t/g, " ") // Replace tabs with spaces + .replace(/\s+/g, " ") // Collapse multiple spaces into one + .trim(); // Remove leading/trailing whitespace // Escape backticks to prevent markdown issues - formatted = formatted.replace(/`/g, '\\`'); + formatted = formatted.replace(/`/g, "\\`"); // Truncate if too long (keep reasonable length for summary) const maxLength = 80; if (formatted.length > maxLength) { - formatted = formatted.substring(0, maxLength) + '...'; + formatted = formatted.substring(0, maxLength) + "..."; } return formatted; } function truncateString(str, maxLength) { - if (!str) return ''; + if (!str) return ""; if (str.length <= maxLength) return str; - return str.substring(0, maxLength) + '...'; + return str.substring(0, maxLength) + "..."; } // Export for testing - if (typeof module !== 'undefined' && module.exports) { + if (typeof module !== "undefined" && module.exports) { module.exports = { parseCodexLog, formatBashCommand, truncateString }; } main(); @@ -1044,118 +1346,143 @@ jobs: // Environment validation - fail early if required variables are missing const branchName = process.env.GITHUB_AW_PUSH_BRANCH; if (!branchName) { - core.setFailed('GITHUB_AW_PUSH_BRANCH environment variable is required'); + core.setFailed("GITHUB_AW_PUSH_BRANCH environment variable is required"); return; } const outputContent = process.env.GITHUB_AW_AGENT_OUTPUT || ""; - if (outputContent.trim() === '') { - console.log('Agent output content is empty'); + if (outputContent.trim() === "") { + console.log("Agent output content is empty"); return; } const target = process.env.GITHUB_AW_PUSH_TARGET || "triggering"; // Check if patch file exists and has valid content - if (!fs.existsSync('/tmp/aw.patch')) { - core.setFailed('No patch file found - cannot push without changes'); + if (!fs.existsSync("/tmp/aw.patch")) { + core.setFailed("No patch file found - cannot push without changes"); return; } - const patchContent = fs.readFileSync('/tmp/aw.patch', 'utf8'); - if (!patchContent || !patchContent.trim() || patchContent.includes('Failed to generate patch')) { - core.setFailed('Patch file is empty or contains error message - cannot push without changes'); + const patchContent = fs.readFileSync("/tmp/aw.patch", "utf8"); + if ( + !patchContent || + !patchContent.trim() || + patchContent.includes("Failed to generate patch") + ) { + core.setFailed( + "Patch file is empty or contains error message - cannot push without changes" + ); return; } - console.log('Agent output content length:', outputContent.length); - console.log('Patch content validation passed'); - console.log('Target branch:', branchName); - console.log('Target configuration:', target); + console.log("Agent output content length:", outputContent.length); + console.log("Patch content validation passed"); + console.log("Target branch:", branchName); + console.log("Target configuration:", target); // Parse the validated output JSON let validatedOutput; try { validatedOutput = JSON.parse(outputContent); } catch (error) { - console.log('Error parsing agent output JSON:', error instanceof Error ? error.message : String(error)); + console.log( + "Error parsing agent output JSON:", + error instanceof Error ? error.message : String(error) + ); return; } if (!validatedOutput.items || !Array.isArray(validatedOutput.items)) { - console.log('No valid items found in agent output'); + console.log("No valid items found in agent output"); return; } // Find the push-to-branch item - const pushItem = validatedOutput.items.find(/** @param {any} item */ item => item.type === 'push-to-branch'); + const pushItem = validatedOutput.items.find( + /** @param {any} item */ item => item.type === "push-to-branch" + ); if (!pushItem) { - console.log('No push-to-branch item found in agent output'); + console.log("No push-to-branch item found in agent output"); return; } - console.log('Found push-to-branch item'); + console.log("Found push-to-branch item"); // Validate target configuration for pull request context if (target !== "*" && target !== "triggering") { // If target is a specific number, validate it's a valid pull request number const targetNumber = parseInt(target, 10); if (isNaN(targetNumber)) { - core.setFailed('Invalid target configuration: must be "triggering", "*", or a valid pull request number'); + core.setFailed( + 'Invalid target configuration: must be "triggering", "*", or a valid pull request number' + ); return; } } // Check if we're in a pull request context when required if (target === "triggering" && !context.payload.pull_request) { - core.setFailed('push-to-branch with target "triggering" requires pull request context'); + core.setFailed( + 'push-to-branch with target "triggering" requires pull request context' + ); return; } // Configure git (required for commits) - execSync('git config --global user.email "action@github.com"', { stdio: 'inherit' }); - execSync('git config --global user.name "GitHub Action"', { stdio: 'inherit' }); + execSync('git config --global user.email "action@github.com"', { + stdio: "inherit", + }); + execSync('git config --global user.name "GitHub Action"', { + stdio: "inherit", + }); // Switch to or create the target branch - console.log('Switching to branch:', branchName); + console.log("Switching to branch:", branchName); try { // Try to checkout existing branch first - execSync('git fetch origin', { stdio: 'inherit' }); - execSync(`git checkout ${branchName}`, { stdio: 'inherit' }); - console.log('Checked out existing branch:', branchName); + execSync("git fetch origin", { stdio: "inherit" }); + execSync(`git checkout ${branchName}`, { stdio: "inherit" }); + console.log("Checked out existing branch:", branchName); } catch (error) { // Branch doesn't exist, create it - console.log('Branch does not exist, creating new branch:', branchName); - execSync(`git checkout -b ${branchName}`, { stdio: 'inherit' }); + console.log("Branch does not exist, creating new branch:", branchName); + execSync(`git checkout -b ${branchName}`, { stdio: "inherit" }); } // Apply the patch using git CLI - console.log('Applying patch...'); + console.log("Applying patch..."); try { - execSync('git apply /tmp/aw.patch', { stdio: 'inherit' }); - console.log('Patch applied successfully'); + execSync("git apply /tmp/aw.patch", { stdio: "inherit" }); + console.log("Patch applied successfully"); } catch (error) { - console.error('Failed to apply patch:', error instanceof Error ? error.message : String(error)); - core.setFailed('Failed to apply patch'); + console.error( + "Failed to apply patch:", + error instanceof Error ? error.message : String(error) + ); + core.setFailed("Failed to apply patch"); return; } // Commit and push the changes - execSync('git add .', { stdio: 'inherit' }); + execSync("git add .", { stdio: "inherit" }); // Check if there are changes to commit try { - execSync('git diff --cached --exit-code', { stdio: 'ignore' }); - console.log('No changes to commit'); + execSync("git diff --cached --exit-code", { stdio: "ignore" }); + console.log("No changes to commit"); return; } catch (error) { // Exit code != 0 means there are changes to commit, which is what we want } - const commitMessage = pushItem.message || 'Apply agent changes'; - execSync(`git commit -m "${commitMessage}"`, { stdio: 'inherit' }); - execSync(`git push origin ${branchName}`, { stdio: 'inherit' }); - console.log('Changes committed and pushed to branch:', branchName); + const commitMessage = pushItem.message || "Apply agent changes"; + execSync(`git commit -m "${commitMessage}"`, { stdio: "inherit" }); + execSync(`git push origin ${branchName}`, { stdio: "inherit" }); + console.log("Changes committed and pushed to branch:", branchName); // Get commit SHA - const commitSha = execSync('git rev-parse HEAD', { encoding: 'utf8' }).trim(); - const pushUrl = context.payload.repository + const commitSha = execSync("git rev-parse HEAD", { encoding: "utf8" }).trim(); + const pushUrl = context.payload.repository ? `${context.payload.repository.html_url}/tree/${branchName}` : `https://github.com/${context.repo.owner}/${context.repo.repo}/tree/${branchName}`; // Set outputs - core.setOutput('branch_name', branchName); - core.setOutput('commit_sha', commitSha); - core.setOutput('push_url', pushUrl); + core.setOutput("branch_name", branchName); + core.setOutput("commit_sha", commitSha); + core.setOutput("push_url", pushUrl); // Write summary to GitHub Actions summary await core.summary - .addRaw(` + .addRaw( + ` ## Push to Branch - **Branch**: \`${branchName}\` - **Commit**: [${commitSha.substring(0, 7)}](${pushUrl}) - **URL**: [${pushUrl}](${pushUrl}) - `).write(); + ` + ) + .write(); } await main(); diff --git a/.github/workflows/test-codex-update-issue.lock.yml b/.github/workflows/test-codex-update-issue.lock.yml index ad4b0b64b4..9eef67a991 100644 --- a/.github/workflows/test-codex-update-issue.lock.yml +++ b/.github/workflows/test-codex-update-issue.lock.yml @@ -34,21 +34,32 @@ jobs: with: script: | async function main() { - // Read inputs from environment variables - const reaction = process.env.GITHUB_AW_REACTION || 'eyes'; + // Read inputs from environment variables + const reaction = process.env.GITHUB_AW_REACTION || "eyes"; const alias = process.env.GITHUB_AW_ALIAS; // Only present for alias workflows const runId = context.runId; - const runUrl = context.payload.repository + const runUrl = context.payload.repository ? `${context.payload.repository.html_url}/actions/runs/${runId}` : `https://github.com/${context.repo.owner}/${context.repo.repo}/actions/runs/${runId}`; - console.log('Reaction type:', reaction); - console.log('Alias name:', alias || 'none'); - console.log('Run ID:', runId); - console.log('Run URL:', runUrl); + console.log("Reaction type:", reaction); + console.log("Alias name:", alias || "none"); + console.log("Run ID:", runId); + console.log("Run URL:", runUrl); // Validate reaction type - const validReactions = ['+1', '-1', 'laugh', 'confused', 'heart', 'hooray', 'rocket', 'eyes']; + const validReactions = [ + "+1", + "-1", + "laugh", + "confused", + "heart", + "hooray", + "rocket", + "eyes", + ]; if (!validReactions.includes(reaction)) { - core.setFailed(`Invalid reaction type: ${reaction}. Valid reactions are: ${validReactions.join(', ')}`); + core.setFailed( + `Invalid reaction type: ${reaction}. Valid reactions are: ${validReactions.join(", ")}` + ); return; } // Determine the API endpoint based on the event type @@ -60,20 +71,20 @@ jobs: const repo = context.repo.repo; try { switch (eventName) { - case 'issues': + case "issues": const issueNumber = context.payload?.issue?.number; if (!issueNumber) { - core.setFailed('Issue number not found in event payload'); + core.setFailed("Issue number not found in event payload"); return; } reactionEndpoint = `/repos/${owner}/${repo}/issues/${issueNumber}/reactions`; // Don't edit issue bodies for now - this might be more complex shouldEditComment = false; break; - case 'issue_comment': + case "issue_comment": const commentId = context.payload?.comment?.id; if (!commentId) { - core.setFailed('Comment ID not found in event payload'); + core.setFailed("Comment ID not found in event payload"); return; } reactionEndpoint = `/repos/${owner}/${repo}/issues/comments/${commentId}/reactions`; @@ -81,10 +92,10 @@ jobs: // Only edit comments for alias workflows shouldEditComment = alias ? true : false; break; - case 'pull_request': + case "pull_request": const prNumber = context.payload?.pull_request?.number; if (!prNumber) { - core.setFailed('Pull request number not found in event payload'); + core.setFailed("Pull request number not found in event payload"); return; } // PRs are "issues" for the reactions endpoint @@ -92,10 +103,10 @@ jobs: // Don't edit PR bodies for now - this might be more complex shouldEditComment = false; break; - case 'pull_request_review_comment': + case "pull_request_review_comment": const reviewCommentId = context.payload?.comment?.id; if (!reviewCommentId) { - core.setFailed('Review comment ID not found in event payload'); + core.setFailed("Review comment ID not found in event payload"); return; } reactionEndpoint = `/repos/${owner}/${repo}/pulls/comments/${reviewCommentId}/reactions`; @@ -107,24 +118,28 @@ jobs: core.setFailed(`Unsupported event type: ${eventName}`); return; } - console.log('Reaction API endpoint:', reactionEndpoint); + console.log("Reaction API endpoint:", reactionEndpoint); // Add reaction first await addReaction(reactionEndpoint, reaction); // Then edit comment if applicable and if it's a comment event if (shouldEditComment && commentUpdateEndpoint) { - console.log('Comment update endpoint:', commentUpdateEndpoint); + console.log("Comment update endpoint:", commentUpdateEndpoint); await editCommentWithWorkflowLink(commentUpdateEndpoint, runUrl); } else { if (!alias && commentUpdateEndpoint) { - console.log('Skipping comment edit - only available for alias workflows'); + console.log( + "Skipping comment edit - only available for alias workflows" + ); } else { - console.log('Skipping comment edit for event type:', eventName); + console.log("Skipping comment edit for event type:", eventName); } } } catch (error) { const errorMessage = error instanceof Error ? error.message : String(error); - console.error('Failed to process reaction and comment edit:', errorMessage); - core.setFailed(`Failed to process reaction and comment edit: ${errorMessage}`); + console.error("Failed to process reaction and comment edit:", errorMessage); + core.setFailed( + `Failed to process reaction and comment edit: ${errorMessage}` + ); } } /** @@ -133,19 +148,19 @@ jobs: * @param {string} reaction - The reaction type to add */ async function addReaction(endpoint, reaction) { - const response = await github.request('POST ' + endpoint, { + const response = await github.request("POST " + endpoint, { content: reaction, headers: { - 'Accept': 'application/vnd.github+json' - } + Accept: "application/vnd.github+json", + }, }); const reactionId = response.data?.id; if (reactionId) { console.log(`Successfully added reaction: ${reaction} (id: ${reactionId})`); - core.setOutput('reaction-id', reactionId.toString()); + core.setOutput("reaction-id", reactionId.toString()); } else { console.log(`Successfully added reaction: ${reaction}`); - core.setOutput('reaction-id', ''); + core.setOutput("reaction-id", ""); } } /** @@ -156,33 +171,37 @@ jobs: async function editCommentWithWorkflowLink(endpoint, runUrl) { try { // First, get the current comment content - const getResponse = await github.request('GET ' + endpoint, { + const getResponse = await github.request("GET " + endpoint, { headers: { - 'Accept': 'application/vnd.github+json' - } + Accept: "application/vnd.github+json", + }, }); - const originalBody = getResponse.data.body || ''; + const originalBody = getResponse.data.body || ""; const workflowLinkText = `\n\n---\n*🤖 [Workflow run](${runUrl}) triggered by this comment*`; // Check if we've already added a workflow link to avoid duplicates - if (originalBody.includes('*🤖 [Workflow run](')) { - console.log('Comment already contains a workflow run link, skipping edit'); + if (originalBody.includes("*🤖 [Workflow run](")) { + console.log( + "Comment already contains a workflow run link, skipping edit" + ); return; } const updatedBody = originalBody + workflowLinkText; // Update the comment - const updateResponse = await github.request('PATCH ' + endpoint, { + const updateResponse = await github.request("PATCH " + endpoint, { body: updatedBody, headers: { - 'Accept': 'application/vnd.github+json' - } + Accept: "application/vnd.github+json", + }, }); console.log(`Successfully updated comment with workflow link`); console.log(`Comment ID: ${updateResponse.data.id}`); } catch (error) { // Don't fail the entire job if comment editing fails - just log it const errorMessage = error instanceof Error ? error.message : String(error); - console.warn('Failed to edit comment with workflow link:', errorMessage); - console.warn('This is not critical - the reaction was still added successfully'); + console.warn("Failed to edit comment with workflow link:", errorMessage); + console.warn( + "This is not critical - the reaction was still added successfully" + ); } } await main(); @@ -207,23 +226,23 @@ jobs: with: script: | function main() { - const fs = require('fs'); - const crypto = require('crypto'); + const fs = require("fs"); + const crypto = require("crypto"); // Generate a random filename for the output file - const randomId = crypto.randomBytes(8).toString('hex'); + const randomId = crypto.randomBytes(8).toString("hex"); const outputFile = `/tmp/aw_output_${randomId}.txt`; // Ensure the /tmp directory exists and create empty output file - fs.mkdirSync('/tmp', { recursive: true }); - fs.writeFileSync(outputFile, '', { mode: 0o644 }); + fs.mkdirSync("/tmp", { recursive: true }); + fs.writeFileSync(outputFile, "", { mode: 0o644 }); // Verify the file was created and is writable if (!fs.existsSync(outputFile)) { throw new Error(`Failed to create output file: ${outputFile}`); } // Set the environment variable for subsequent steps - core.exportVariable('GITHUB_AW_SAFE_OUTPUTS', outputFile); - console.log('Created agentic output file:', outputFile); + core.exportVariable("GITHUB_AW_SAFE_OUTPUTS", outputFile); + console.log("Created agentic output file:", outputFile); // Also set as step output for reference - core.setOutput('output_file', outputFile); + core.setOutput("output_file", outputFile); } main(); - name: Setup MCPs @@ -335,13 +354,14 @@ jobs: if-no-files-found: warn - name: Run Codex run: | + set -o pipefail INSTRUCTION=$(cat /tmp/aw-prompts/prompt.txt) export CODEX_HOME=/tmp/mcp-config # Create log directory outside git repo mkdir -p /tmp/aw-logs - # Run codex with log capture + # Run codex with log capture - pipefail ensures codex exit code is preserved codex exec \ -c model=o4-mini \ --full-auto "$INSTRUCTION" 2>&1 | tee /tmp/test-codex-update-issue.log @@ -381,34 +401,37 @@ jobs: * @returns {string} The sanitized content */ function sanitizeContent(content) { - if (!content || typeof content !== 'string') { - return ''; + if (!content || typeof content !== "string") { + return ""; } // Read allowed domains from environment variable const allowedDomainsEnv = process.env.GITHUB_AW_ALLOWED_DOMAINS; const defaultAllowedDomains = [ - 'github.com', - 'github.io', - 'githubusercontent.com', - 'githubassets.com', - 'github.dev', - 'codespaces.new' + "github.com", + "github.io", + "githubusercontent.com", + "githubassets.com", + "github.dev", + "codespaces.new", ]; const allowedDomains = allowedDomainsEnv - ? allowedDomainsEnv.split(',').map(d => d.trim()).filter(d => d) + ? allowedDomainsEnv + .split(",") + .map(d => d.trim()) + .filter(d => d) : defaultAllowedDomains; let sanitized = content; // Neutralize @mentions to prevent unintended notifications sanitized = neutralizeMentions(sanitized); // Remove control characters (except newlines and tabs) - sanitized = sanitized.replace(/[\x00-\x08\x0B\x0C\x0E-\x1F\x7F]/g, ''); + sanitized = sanitized.replace(/[\x00-\x08\x0B\x0C\x0E-\x1F\x7F]/g, ""); // XML character escaping sanitized = sanitized - .replace(/&/g, '&') // Must be first to avoid double-escaping - .replace(//g, '>') - .replace(/"/g, '"') - .replace(/'/g, '''); + .replace(/&/g, "&") // Must be first to avoid double-escaping + .replace(//g, ">") + .replace(/"/g, """) + .replace(/'/g, "'"); // URI filtering - replace non-https protocols with "(redacted)" sanitized = sanitizeUrlProtocols(sanitized); // Domain filtering for HTTPS URIs @@ -416,16 +439,20 @@ jobs: // Limit total length to prevent DoS (0.5MB max) const maxLength = 524288; if (sanitized.length > maxLength) { - sanitized = sanitized.substring(0, maxLength) + '\n[Content truncated due to length]'; + sanitized = + sanitized.substring(0, maxLength) + + "\n[Content truncated due to length]"; } // Limit number of lines to prevent log flooding (65k max) - const lines = sanitized.split('\n'); + const lines = sanitized.split("\n"); const maxLines = 65000; if (lines.length > maxLines) { - sanitized = lines.slice(0, maxLines).join('\n') + '\n[Content truncated due to line count]'; + sanitized = + lines.slice(0, maxLines).join("\n") + + "\n[Content truncated due to line count]"; } // Remove ANSI escape sequences - sanitized = sanitized.replace(/\x1b\[[0-9;]*[mGKH]/g, ''); + sanitized = sanitized.replace(/\x1b\[[0-9;]*[mGKH]/g, ""); // Neutralize common bot trigger phrases sanitized = neutralizeBotTriggers(sanitized); // Trim excessive whitespace @@ -436,16 +463,22 @@ jobs: * @returns {string} The string with unknown domains redacted */ function sanitizeUrlDomains(s) { - return s.replace(/\bhttps:\/\/([^\/\s\])}'"<>&\x00-\x1f]+)/gi, (match, domain) => { - // Extract the hostname part (before first slash, colon, or other delimiter) - const hostname = domain.split(/[\/:\?#]/)[0].toLowerCase(); - // Check if this domain or any parent domain is in the allowlist - const isAllowed = allowedDomains.some(allowedDomain => { - const normalizedAllowed = allowedDomain.toLowerCase(); - return hostname === normalizedAllowed || hostname.endsWith('.' + normalizedAllowed); - }); - return isAllowed ? match : '(redacted)'; - }); + return s.replace( + /\bhttps:\/\/([^\/\s\])}'"<>&\x00-\x1f]+)/gi, + (match, domain) => { + // Extract the hostname part (before first slash, colon, or other delimiter) + const hostname = domain.split(/[\/:\?#]/)[0].toLowerCase(); + // Check if this domain or any parent domain is in the allowlist + const isAllowed = allowedDomains.some(allowedDomain => { + const normalizedAllowed = allowedDomain.toLowerCase(); + return ( + hostname === normalizedAllowed || + hostname.endsWith("." + normalizedAllowed) + ); + }); + return isAllowed ? match : "(redacted)"; + } + ); } /** * Remove unknown protocols except https @@ -454,10 +487,13 @@ jobs: */ function sanitizeUrlProtocols(s) { // Match both protocol:// and protocol: patterns - return s.replace(/\b(\w+):(?:\/\/)?[^\s\])}'"<>&\x00-\x1f]+/gi, (match, protocol) => { - // Allow https (case insensitive), redact everything else - return protocol.toLowerCase() === 'https' ? match : '(redacted)'; - }); + return s.replace( + /\b(\w+):(?:\/\/)?[^\s\])}'"<>&\x00-\x1f]+/gi, + (match, protocol) => { + // Allow https (case insensitive), redact everything else + return protocol.toLowerCase() === "https" ? match : "(redacted)"; + } + ); } /** * Neutralizes @mentions by wrapping them in backticks @@ -466,8 +502,10 @@ jobs: */ function neutralizeMentions(s) { // Replace @name or @org/team outside code with `@name` - return s.replace(/(^|[^\w`])@([A-Za-z0-9](?:[A-Za-z0-9-]{0,37}[A-Za-z0-9])?(?:\/[A-Za-z0-9._-]+)?)/g, - (_m, p1, p2) => `${p1}\`@${p2}\``); + return s.replace( + /(^|[^\w`])@([A-Za-z0-9](?:[A-Za-z0-9-]{0,37}[A-Za-z0-9])?(?:\/[A-Za-z0-9._-]+)?)/g, + (_m, p1, p2) => `${p1}\`@${p2}\`` + ); } /** * Neutralizes bot trigger phrases by wrapping them in backticks @@ -476,8 +514,10 @@ jobs: */ function neutralizeBotTriggers(s) { // Neutralize common bot trigger phrases like "fixes #123", "closes #asdfs", etc. - return s.replace(/\b(fixes?|closes?|resolves?|fix|close|resolve)\s+#(\w+)/gi, - (match, action, ref) => `\`${action} #${ref}\``); + return s.replace( + /\b(fixes?|closes?|resolves?|fix|close|resolve)\s+#(\w+)/gi, + (match, action, ref) => `\`${action} #${ref}\`` + ); } } /** @@ -488,65 +528,165 @@ jobs: */ function getMaxAllowedForType(itemType, config) { // Check if max is explicitly specified in config - if (config && config[itemType] && typeof config[itemType] === 'object' && config[itemType].max) { + if ( + config && + config[itemType] && + typeof config[itemType] === "object" && + config[itemType].max + ) { return config[itemType].max; } // Use default limits for plural-supported types switch (itemType) { - case 'create-issue': + case "create-issue": return 1; // Only one issue allowed - case 'add-issue-comment': + case "add-issue-comment": return 1; // Only one comment allowed - case 'create-pull-request': - return 1; // Only one pull request allowed - case 'add-issue-label': - return 5; // Only one labels operation allowed - case 'update-issue': - return 1; // Only one issue update allowed - case 'push-to-branch': - return 1; // Only one push to branch allowed + case "create-pull-request": + return 1; // Only one pull request allowed + case "create-pull-request-review-comment": + return 10; // Default to 10 review comments allowed + case "add-issue-label": + return 5; // Only one labels operation allowed + case "update-issue": + return 1; // Only one issue update allowed + case "push-to-branch": + return 1; // Only one push to branch allowed + case "create-discussion": + return 1; // Only one discussion allowed default: - return 1; // Default to single item for unknown types + return 1; // Default to single item for unknown types + } + } + /** + * Attempts to repair common JSON syntax issues in LLM-generated content + * @param {string} jsonStr - The potentially malformed JSON string + * @returns {string} The repaired JSON string + */ + function repairJson(jsonStr) { + let repaired = jsonStr.trim(); + // Fix single quotes to double quotes (must be done first) + repaired = repaired.replace(/'/g, '"'); + // Fix missing quotes around object keys + repaired = repaired.replace( + /([{,]\s*)([a-zA-Z_$][a-zA-Z0-9_$]*)\s*:/g, + '$1"$2":' + ); + // Fix newlines and tabs inside strings by escaping them + repaired = repaired.replace(/"([^"\\]*)"/g, (match, content) => { + if ( + content.includes("\n") || + content.includes("\r") || + content.includes("\t") + ) { + const escaped = content + .replace(/\\/g, "\\\\") + .replace(/\n/g, "\\n") + .replace(/\r/g, "\\r") + .replace(/\t/g, "\\t"); + return `"${escaped}"`; + } + return match; + }); + // Fix unescaped quotes inside string values + repaired = repaired.replace( + /"([^"]*)"([^":,}\]]*)"([^"]*)"(\s*[,:}\]])/g, + (match, p1, p2, p3, p4) => `"${p1}\\"${p2}\\"${p3}"${p4}` + ); + // Fix wrong bracket/brace types - arrays should end with ] not } + repaired = repaired.replace( + /(\[\s*(?:"[^"]*"(?:\s*,\s*"[^"]*")*\s*),?)\s*}/g, + "$1]" + ); + // Fix missing closing braces/brackets + const openBraces = (repaired.match(/\{/g) || []).length; + const closeBraces = (repaired.match(/\}/g) || []).length; + if (openBraces > closeBraces) { + repaired += "}".repeat(openBraces - closeBraces); + } else if (closeBraces > openBraces) { + repaired = "{".repeat(closeBraces - openBraces) + repaired; + } + // Fix missing closing brackets for arrays + const openBrackets = (repaired.match(/\[/g) || []).length; + const closeBrackets = (repaired.match(/\]/g) || []).length; + if (openBrackets > closeBrackets) { + repaired += "]".repeat(openBrackets - closeBrackets); + } else if (closeBrackets > openBrackets) { + repaired = "[".repeat(closeBrackets - openBrackets) + repaired; + } + // Fix trailing commas in objects and arrays (AFTER fixing brackets/braces) + repaired = repaired.replace(/,(\s*[}\]])/g, "$1"); + return repaired; + } + /** + * Attempts to parse JSON with repair fallback + * @param {string} jsonStr - The JSON string to parse + * @returns {Object|undefined} The parsed JSON object, or undefined if parsing fails + */ + function parseJsonWithRepair(jsonStr) { + try { + // First, try normal JSON.parse + return JSON.parse(jsonStr); + } catch (originalError) { + try { + // If that fails, try repairing and parsing again + const repairedJson = repairJson(jsonStr); + return JSON.parse(repairedJson); + } catch (repairError) { + // If repair also fails, print error to console and return undefined + console.log( + `JSON parsing failed. Original: ${originalError.message}. After repair: ${repairError.message}` + ); + return undefined; + } } } const outputFile = process.env.GITHUB_AW_SAFE_OUTPUTS; const safeOutputsConfig = process.env.GITHUB_AW_SAFE_OUTPUTS_CONFIG; if (!outputFile) { - console.log('GITHUB_AW_SAFE_OUTPUTS not set, no output to collect'); - core.setOutput('output', ''); + console.log("GITHUB_AW_SAFE_OUTPUTS not set, no output to collect"); + core.setOutput("output", ""); return; } if (!fs.existsSync(outputFile)) { - console.log('Output file does not exist:', outputFile); - core.setOutput('output', ''); + console.log("Output file does not exist:", outputFile); + core.setOutput("output", ""); return; } - const outputContent = fs.readFileSync(outputFile, 'utf8'); - if (outputContent.trim() === '') { - console.log('Output file is empty'); - core.setOutput('output', ''); + const outputContent = fs.readFileSync(outputFile, "utf8"); + if (outputContent.trim() === "") { + console.log("Output file is empty"); + core.setOutput("output", ""); return; } - console.log('Raw output content length:', outputContent.length); + console.log("Raw output content length:", outputContent.length); // Parse the safe-outputs configuration let expectedOutputTypes = {}; if (safeOutputsConfig) { try { expectedOutputTypes = JSON.parse(safeOutputsConfig); - console.log('Expected output types:', Object.keys(expectedOutputTypes)); + console.log("Expected output types:", Object.keys(expectedOutputTypes)); } catch (error) { - console.log('Warning: Could not parse safe-outputs config:', error.message); + console.log( + "Warning: Could not parse safe-outputs config:", + error.message + ); } } // Parse JSONL content - const lines = outputContent.trim().split('\n'); + const lines = outputContent.trim().split("\n"); const parsedItems = []; const errors = []; for (let i = 0; i < lines.length; i++) { const line = lines[i].trim(); - if (line === '') continue; // Skip empty lines + if (line === "") continue; // Skip empty lines try { - const item = JSON.parse(line); + const item = parseJsonWithRepair(line); + // If item is undefined (failed to parse), add error and process next line + if (item === undefined) { + errors.push(`Line ${i + 1}: Invalid JSON - JSON parsing failed`); + continue; + } // Validate that the item has a 'type' field if (!item.type) { errors.push(`Line ${i + 1}: Missing required 'type' field`); @@ -555,25 +695,35 @@ jobs: // Validate against expected output types const itemType = item.type; if (!expectedOutputTypes[itemType]) { - errors.push(`Line ${i + 1}: Unexpected output type '${itemType}'. Expected one of: ${Object.keys(expectedOutputTypes).join(', ')}`); + errors.push( + `Line ${i + 1}: Unexpected output type '${itemType}'. Expected one of: ${Object.keys(expectedOutputTypes).join(", ")}` + ); continue; } // Check for too many items of the same type - const typeCount = parsedItems.filter(existing => existing.type === itemType).length; + const typeCount = parsedItems.filter( + existing => existing.type === itemType + ).length; const maxAllowed = getMaxAllowedForType(itemType, expectedOutputTypes); if (typeCount >= maxAllowed) { - errors.push(`Line ${i + 1}: Too many items of type '${itemType}'. Maximum allowed: ${maxAllowed}.`); + errors.push( + `Line ${i + 1}: Too many items of type '${itemType}'. Maximum allowed: ${maxAllowed}.` + ); continue; } // Basic validation based on type switch (itemType) { - case 'create-issue': - if (!item.title || typeof item.title !== 'string') { - errors.push(`Line ${i + 1}: create-issue requires a 'title' string field`); + case "create-issue": + if (!item.title || typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: create-issue requires a 'title' string field` + ); continue; } - if (!item.body || typeof item.body !== 'string') { - errors.push(`Line ${i + 1}: create-issue requires a 'body' string field`); + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-issue requires a 'body' string field` + ); continue; } // Sanitize text content @@ -581,107 +731,247 @@ jobs: item.body = sanitizeContent(item.body); // Sanitize labels if present if (item.labels && Array.isArray(item.labels)) { - item.labels = item.labels.map(label => typeof label === 'string' ? sanitizeContent(label) : label); + item.labels = item.labels.map(label => + typeof label === "string" ? sanitizeContent(label) : label + ); } break; - case 'add-issue-comment': - if (!item.body || typeof item.body !== 'string') { - errors.push(`Line ${i + 1}: add-issue-comment requires a 'body' string field`); + case "add-issue-comment": + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: add-issue-comment requires a 'body' string field` + ); continue; } // Sanitize text content item.body = sanitizeContent(item.body); break; - case 'create-pull-request': - if (!item.title || typeof item.title !== 'string') { - errors.push(`Line ${i + 1}: create-pull-request requires a 'title' string field`); + case "create-pull-request": + if (!item.title || typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request requires a 'title' string field` + ); continue; } - if (!item.body || typeof item.body !== 'string') { - errors.push(`Line ${i + 1}: create-pull-request requires a 'body' string field`); + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request requires a 'body' string field` + ); continue; } // Sanitize text content item.title = sanitizeContent(item.title); item.body = sanitizeContent(item.body); // Sanitize branch name if present - if (item.branch && typeof item.branch === 'string') { + if (item.branch && typeof item.branch === "string") { item.branch = sanitizeContent(item.branch); } // Sanitize labels if present if (item.labels && Array.isArray(item.labels)) { - item.labels = item.labels.map(label => typeof label === 'string' ? sanitizeContent(label) : label); + item.labels = item.labels.map(label => + typeof label === "string" ? sanitizeContent(label) : label + ); } break; - case 'add-issue-label': + case "add-issue-label": if (!item.labels || !Array.isArray(item.labels)) { - errors.push(`Line ${i + 1}: add-issue-label requires a 'labels' array field`); + errors.push( + `Line ${i + 1}: add-issue-label requires a 'labels' array field` + ); continue; } - if (item.labels.some(label => typeof label !== 'string')) { - errors.push(`Line ${i + 1}: add-issue-label labels array must contain only strings`); + if (item.labels.some(label => typeof label !== "string")) { + errors.push( + `Line ${i + 1}: add-issue-label labels array must contain only strings` + ); continue; } // Sanitize label strings item.labels = item.labels.map(label => sanitizeContent(label)); break; - case 'update-issue': + case "update-issue": // Check that at least one updateable field is provided - const hasValidField = (item.status !== undefined) || - (item.title !== undefined) || - (item.body !== undefined); + const hasValidField = + item.status !== undefined || + item.title !== undefined || + item.body !== undefined; if (!hasValidField) { - errors.push(`Line ${i + 1}: update-issue requires at least one of: 'status', 'title', or 'body' fields`); + errors.push( + `Line ${i + 1}: update-issue requires at least one of: 'status', 'title', or 'body' fields` + ); continue; } // Validate status if provided if (item.status !== undefined) { - if (typeof item.status !== 'string' || (item.status !== 'open' && item.status !== 'closed')) { - errors.push(`Line ${i + 1}: update-issue 'status' must be 'open' or 'closed'`); + if ( + typeof item.status !== "string" || + (item.status !== "open" && item.status !== "closed") + ) { + errors.push( + `Line ${i + 1}: update-issue 'status' must be 'open' or 'closed'` + ); continue; } } // Validate title if provided if (item.title !== undefined) { - if (typeof item.title !== 'string') { - errors.push(`Line ${i + 1}: update-issue 'title' must be a string`); + if (typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: update-issue 'title' must be a string` + ); continue; } item.title = sanitizeContent(item.title); } // Validate body if provided if (item.body !== undefined) { - if (typeof item.body !== 'string') { - errors.push(`Line ${i + 1}: update-issue 'body' must be a string`); + if (typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: update-issue 'body' must be a string` + ); continue; } item.body = sanitizeContent(item.body); } // Validate issue_number if provided (for target "*") if (item.issue_number !== undefined) { - if (typeof item.issue_number !== 'number' && typeof item.issue_number !== 'string') { - errors.push(`Line ${i + 1}: update-issue 'issue_number' must be a number or string`); + if ( + typeof item.issue_number !== "number" && + typeof item.issue_number !== "string" + ) { + errors.push( + `Line ${i + 1}: update-issue 'issue_number' must be a number or string` + ); continue; } } break; - case 'push-to-branch': + case "push-to-branch": // Validate message if provided (optional) if (item.message !== undefined) { - if (typeof item.message !== 'string') { - errors.push(`Line ${i + 1}: push-to-branch 'message' must be a string`); + if (typeof item.message !== "string") { + errors.push( + `Line ${i + 1}: push-to-branch 'message' must be a string` + ); continue; } item.message = sanitizeContent(item.message); } // Validate pull_request_number if provided (for target "*") if (item.pull_request_number !== undefined) { - if (typeof item.pull_request_number !== 'number' && typeof item.pull_request_number !== 'string') { - errors.push(`Line ${i + 1}: push-to-branch 'pull_request_number' must be a number or string`); + if ( + typeof item.pull_request_number !== "number" && + typeof item.pull_request_number !== "string" + ) { + errors.push( + `Line ${i + 1}: push-to-branch 'pull_request_number' must be a number or string` + ); + continue; + } + } + break; + case "create-pull-request-review-comment": + // Validate required path field + if (!item.path || typeof item.path !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment requires a 'path' string field` + ); + continue; + } + // Validate required line field + if ( + item.line === undefined || + (typeof item.line !== "number" && typeof item.line !== "string") + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment requires a 'line' number or string field` + ); + continue; + } + // Validate line is a positive integer + const lineNumber = + typeof item.line === "string" ? parseInt(item.line, 10) : item.line; + if ( + isNaN(lineNumber) || + lineNumber <= 0 || + !Number.isInteger(lineNumber) + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'line' must be a positive integer` + ); + continue; + } + // Validate required body field + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment requires a 'body' string field` + ); + continue; + } + // Sanitize required text content + item.body = sanitizeContent(item.body); + // Validate optional start_line field + if (item.start_line !== undefined) { + if ( + typeof item.start_line !== "number" && + typeof item.start_line !== "string" + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'start_line' must be a number or string` + ); + continue; + } + const startLineNumber = + typeof item.start_line === "string" + ? parseInt(item.start_line, 10) + : item.start_line; + if ( + isNaN(startLineNumber) || + startLineNumber <= 0 || + !Number.isInteger(startLineNumber) + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'start_line' must be a positive integer` + ); + continue; + } + if (startLineNumber > lineNumber) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'start_line' must be less than or equal to 'line'` + ); + continue; + } + } + // Validate optional side field + if (item.side !== undefined) { + if ( + typeof item.side !== "string" || + (item.side !== "LEFT" && item.side !== "RIGHT") + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'side' must be 'LEFT' or 'RIGHT'` + ); continue; } } break; + case "create-discussion": + if (!item.title || typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: create-discussion requires a 'title' string field` + ); + continue; + } + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-discussion requires a 'body' string field` + ); + continue; + } + // Sanitize text content + item.title = sanitizeContent(item.title); + item.body = sanitizeContent(item.body); + break; default: errors.push(`Line ${i + 1}: Unknown output type '${itemType}'`); continue; @@ -694,7 +984,7 @@ jobs: } // Report validation results if (errors.length > 0) { - console.log('Validation errors found:'); + console.log("Validation errors found:"); errors.forEach(error => console.log(` - ${error}`)); // For now, we'll continue with valid items but log the errors // In the future, we might want to fail the workflow for invalid items @@ -703,10 +993,10 @@ jobs: // Set the parsed and validated items as output const validatedOutput = { items: parsedItems, - errors: errors + errors: errors, }; - core.setOutput('output', JSON.stringify(validatedOutput)); - core.setOutput('raw_output', outputContent); + core.setOutput("output", JSON.stringify(validatedOutput)); + core.setOutput("raw_output", outputContent); } // Call the main function await main(); @@ -738,24 +1028,24 @@ jobs: with: script: | function main() { - const fs = require('fs'); + const fs = require("fs"); try { const logFile = process.env.AGENT_LOG_FILE; if (!logFile) { - console.log('No agent log file specified'); + console.log("No agent log file specified"); return; } if (!fs.existsSync(logFile)) { console.log(`Log file not found: ${logFile}`); return; } - const content = fs.readFileSync(logFile, 'utf8'); + const content = fs.readFileSync(logFile, "utf8"); const parsedLog = parseCodexLog(content); if (parsedLog) { core.summary.addRaw(parsedLog).write(); - console.log('Codex log parsed successfully'); + console.log("Codex log parsed successfully"); } else { - console.log('Failed to parse Codex log'); + console.log("Failed to parse Codex log"); } } catch (error) { core.setFailed(error.message); @@ -763,54 +1053,63 @@ jobs: } function parseCodexLog(logContent) { try { - const lines = logContent.split('\n'); - let markdown = '## 🤖 Commands and Tools\n\n'; + const lines = logContent.split("\n"); + let markdown = "## 🤖 Commands and Tools\n\n"; const commandSummary = []; // First pass: collect commands for summary for (let i = 0; i < lines.length; i++) { const line = lines[i]; // Detect tool usage and exec commands - if (line.includes('] tool ') && line.includes('(')) { + if (line.includes("] tool ") && line.includes("(")) { // Extract tool name const toolMatch = line.match(/\] tool ([^(]+)\(/); if (toolMatch) { const toolName = toolMatch[1]; // Look ahead to find the result status - let statusIcon = '❓'; // Unknown by default + let statusIcon = "❓"; // Unknown by default for (let j = i + 1; j < Math.min(i + 5, lines.length); j++) { const nextLine = lines[j]; - if (nextLine.includes('success in')) { - statusIcon = '✅'; + if (nextLine.includes("success in")) { + statusIcon = "✅"; break; - } else if (nextLine.includes('failure in') || nextLine.includes('error in') || nextLine.includes('failed in')) { - statusIcon = '❌'; + } else if ( + nextLine.includes("failure in") || + nextLine.includes("error in") || + nextLine.includes("failed in") + ) { + statusIcon = "❌"; break; } } - if (toolName.includes('.')) { + if (toolName.includes(".")) { // Format as provider::method - const parts = toolName.split('.'); + const parts = toolName.split("."); const provider = parts[0]; - const method = parts.slice(1).join('_'); - commandSummary.push(`* ${statusIcon} \`${provider}::${method}(...)\``); + const method = parts.slice(1).join("_"); + commandSummary.push( + `* ${statusIcon} \`${provider}::${method}(...)\`` + ); } else { commandSummary.push(`* ${statusIcon} \`${toolName}(...)\``); } } - } else if (line.includes('] exec ')) { + } else if (line.includes("] exec ")) { // Extract exec command const execMatch = line.match(/exec (.+?) in/); if (execMatch) { const formattedCommand = formatBashCommand(execMatch[1]); // Look ahead to find the result status - let statusIcon = '❓'; // Unknown by default + let statusIcon = "❓"; // Unknown by default for (let j = i + 1; j < Math.min(i + 5, lines.length); j++) { const nextLine = lines[j]; - if (nextLine.includes('succeeded in')) { - statusIcon = '✅'; + if (nextLine.includes("succeeded in")) { + statusIcon = "✅"; break; - } else if (nextLine.includes('failed in') || nextLine.includes('error')) { - statusIcon = '❌'; + } else if ( + nextLine.includes("failed in") || + nextLine.includes("error") + ) { + statusIcon = "❌"; break; } } @@ -824,10 +1123,10 @@ jobs: markdown += `${cmd}\n`; } } else { - markdown += 'No commands or tools used.\n'; + markdown += "No commands or tools used.\n"; } // Add Information section - markdown += '\n## 📊 Information\n\n'; + markdown += "\n## 📊 Information\n\n"; // Extract metadata from Codex logs let totalTokens = 0; const tokenMatches = logContent.match(/tokens used: (\d+)/g); @@ -849,46 +1148,57 @@ jobs: if (execCommands > 0) { markdown += `**Commands Executed:** ${execCommands}\n\n`; } - markdown += '\n## 🤖 Reasoning\n\n'; + markdown += "\n## 🤖 Reasoning\n\n"; // Second pass: process full conversation flow with interleaved reasoning, tools, and commands let inThinkingSection = false; for (let i = 0; i < lines.length; i++) { const line = lines[i]; // Skip metadata lines - if (line.includes('OpenAI Codex') || line.startsWith('--------') || - line.includes('workdir:') || line.includes('model:') || - line.includes('provider:') || line.includes('approval:') || - line.includes('sandbox:') || line.includes('reasoning effort:') || - line.includes('reasoning summaries:') || line.includes('tokens used:')) { + if ( + line.includes("OpenAI Codex") || + line.startsWith("--------") || + line.includes("workdir:") || + line.includes("model:") || + line.includes("provider:") || + line.includes("approval:") || + line.includes("sandbox:") || + line.includes("reasoning effort:") || + line.includes("reasoning summaries:") || + line.includes("tokens used:") + ) { continue; } // Process thinking sections - if (line.includes('] thinking')) { + if (line.includes("] thinking")) { inThinkingSection = true; continue; } // Process tool calls - if (line.includes('] tool ') && line.includes('(')) { + if (line.includes("] tool ") && line.includes("(")) { inThinkingSection = false; const toolMatch = line.match(/\] tool ([^(]+)\(/); if (toolMatch) { const toolName = toolMatch[1]; // Look ahead to find the result status - let statusIcon = '❓'; // Unknown by default + let statusIcon = "❓"; // Unknown by default for (let j = i + 1; j < Math.min(i + 5, lines.length); j++) { const nextLine = lines[j]; - if (nextLine.includes('success in')) { - statusIcon = '✅'; + if (nextLine.includes("success in")) { + statusIcon = "✅"; break; - } else if (nextLine.includes('failure in') || nextLine.includes('error in') || nextLine.includes('failed in')) { - statusIcon = '❌'; + } else if ( + nextLine.includes("failure in") || + nextLine.includes("error in") || + nextLine.includes("failed in") + ) { + statusIcon = "❌"; break; } } - if (toolName.includes('.')) { - const parts = toolName.split('.'); + if (toolName.includes(".")) { + const parts = toolName.split("."); const provider = parts[0]; - const method = parts.slice(1).join('_'); + const method = parts.slice(1).join("_"); markdown += `${statusIcon} ${provider}::${method}(...)\n\n`; } else { markdown += `${statusIcon} ${toolName}(...)\n\n`; @@ -897,20 +1207,23 @@ jobs: continue; } // Process exec commands - if (line.includes('] exec ')) { + if (line.includes("] exec ")) { inThinkingSection = false; const execMatch = line.match(/exec (.+?) in/); if (execMatch) { const formattedCommand = formatBashCommand(execMatch[1]); // Look ahead to find the result status - let statusIcon = '❓'; // Unknown by default + let statusIcon = "❓"; // Unknown by default for (let j = i + 1; j < Math.min(i + 5, lines.length); j++) { const nextLine = lines[j]; - if (nextLine.includes('succeeded in')) { - statusIcon = '✅'; + if (nextLine.includes("succeeded in")) { + statusIcon = "✅"; break; - } else if (nextLine.includes('failed in') || nextLine.includes('error')) { - statusIcon = '❌'; + } else if ( + nextLine.includes("failed in") || + nextLine.includes("error") + ) { + statusIcon = "❌"; break; } } @@ -919,7 +1232,11 @@ jobs: continue; } // Process thinking content - if (inThinkingSection && line.trim().length > 20 && !line.startsWith('[2025-')) { + if ( + inThinkingSection && + line.trim().length > 20 && + !line.startsWith("[2025-") + ) { const trimmed = line.trim(); // Add thinking content directly markdown += `${trimmed}\n\n`; @@ -927,36 +1244,36 @@ jobs: } return markdown; } catch (error) { - console.error('Error parsing Codex log:', error); - return '## 🤖 Commands and Tools\n\nError parsing log content.\n\n## 🤖 Reasoning\n\nUnable to parse reasoning from log.\n\n'; + console.error("Error parsing Codex log:", error); + return "## 🤖 Commands and Tools\n\nError parsing log content.\n\n## 🤖 Reasoning\n\nUnable to parse reasoning from log.\n\n"; } } function formatBashCommand(command) { - if (!command) return ''; + if (!command) return ""; // Convert multi-line commands to single line by replacing newlines with spaces // and collapsing multiple spaces let formatted = command - .replace(/\n/g, ' ') // Replace newlines with spaces - .replace(/\r/g, ' ') // Replace carriage returns with spaces - .replace(/\t/g, ' ') // Replace tabs with spaces - .replace(/\s+/g, ' ') // Collapse multiple spaces into one - .trim(); // Remove leading/trailing whitespace + .replace(/\n/g, " ") // Replace newlines with spaces + .replace(/\r/g, " ") // Replace carriage returns with spaces + .replace(/\t/g, " ") // Replace tabs with spaces + .replace(/\s+/g, " ") // Collapse multiple spaces into one + .trim(); // Remove leading/trailing whitespace // Escape backticks to prevent markdown issues - formatted = formatted.replace(/`/g, '\\`'); + formatted = formatted.replace(/`/g, "\\`"); // Truncate if too long (keep reasonable length for summary) const maxLength = 80; if (formatted.length > maxLength) { - formatted = formatted.substring(0, maxLength) + '...'; + formatted = formatted.substring(0, maxLength) + "..."; } return formatted; } function truncateString(str, maxLength) { - if (!str) return ''; + if (!str) return ""; if (str.length <= maxLength) return str; - return str.substring(0, maxLength) + '...'; + return str.substring(0, maxLength) + "..."; } // Export for testing - if (typeof module !== 'undefined' && module.exports) { + if (typeof module !== "undefined" && module.exports) { module.exports = { parseCodexLog, formatBashCommand, truncateString }; } main(); @@ -994,45 +1311,55 @@ jobs: // Read the validated output content from environment variable const outputContent = process.env.GITHUB_AW_AGENT_OUTPUT; if (!outputContent) { - console.log('No GITHUB_AW_AGENT_OUTPUT environment variable found'); + console.log("No GITHUB_AW_AGENT_OUTPUT environment variable found"); return; } - if (outputContent.trim() === '') { - console.log('Agent output content is empty'); + if (outputContent.trim() === "") { + console.log("Agent output content is empty"); return; } - console.log('Agent output content length:', outputContent.length); + console.log("Agent output content length:", outputContent.length); // Parse the validated output JSON let validatedOutput; try { validatedOutput = JSON.parse(outputContent); } catch (error) { - console.log('Error parsing agent output JSON:', error instanceof Error ? error.message : String(error)); + console.log( + "Error parsing agent output JSON:", + error instanceof Error ? error.message : String(error) + ); return; } if (!validatedOutput.items || !Array.isArray(validatedOutput.items)) { - console.log('No valid items found in agent output'); + console.log("No valid items found in agent output"); return; } // Find all update-issue items - const updateItems = validatedOutput.items.filter(/** @param {any} item */ item => item.type === 'update-issue'); + const updateItems = validatedOutput.items.filter( + /** @param {any} item */ item => item.type === "update-issue" + ); if (updateItems.length === 0) { - console.log('No update-issue items found in agent output'); + console.log("No update-issue items found in agent output"); return; } console.log(`Found ${updateItems.length} update-issue item(s)`); // Get the configuration from environment variables const updateTarget = process.env.GITHUB_AW_UPDATE_TARGET || "triggering"; - const canUpdateStatus = process.env.GITHUB_AW_UPDATE_STATUS === 'true'; - const canUpdateTitle = process.env.GITHUB_AW_UPDATE_TITLE === 'true'; - const canUpdateBody = process.env.GITHUB_AW_UPDATE_BODY === 'true'; + const canUpdateStatus = process.env.GITHUB_AW_UPDATE_STATUS === "true"; + const canUpdateTitle = process.env.GITHUB_AW_UPDATE_TITLE === "true"; + const canUpdateBody = process.env.GITHUB_AW_UPDATE_BODY === "true"; console.log(`Update target configuration: ${updateTarget}`); - console.log(`Can update status: ${canUpdateStatus}, title: ${canUpdateTitle}, body: ${canUpdateBody}`); + console.log( + `Can update status: ${canUpdateStatus}, title: ${canUpdateTitle}, body: ${canUpdateBody}` + ); // Check if we're in an issue context - const isIssueContext = context.eventName === 'issues' || context.eventName === 'issue_comment'; + const isIssueContext = + context.eventName === "issues" || context.eventName === "issue_comment"; // Validate context based on target configuration if (updateTarget === "triggering" && !isIssueContext) { - console.log('Target is "triggering" but not running in issue context, skipping issue update'); + console.log( + 'Target is "triggering" but not running in issue context, skipping issue update' + ); return; } const updatedIssues = []; @@ -1047,18 +1374,24 @@ jobs: if (updateItem.issue_number) { issueNumber = parseInt(updateItem.issue_number, 10); if (isNaN(issueNumber) || issueNumber <= 0) { - console.log(`Invalid issue number specified: ${updateItem.issue_number}`); + console.log( + `Invalid issue number specified: ${updateItem.issue_number}` + ); continue; } } else { - console.log('Target is "*" but no issue_number specified in update item'); + console.log( + 'Target is "*" but no issue_number specified in update item' + ); continue; } } else if (updateTarget && updateTarget !== "triggering") { // Explicit issue number specified in target issueNumber = parseInt(updateTarget, 10); if (isNaN(issueNumber) || issueNumber <= 0) { - console.log(`Invalid issue number in target configuration: ${updateTarget}`); + console.log( + `Invalid issue number in target configuration: ${updateTarget}` + ); continue; } } else { @@ -1067,16 +1400,16 @@ jobs: if (context.payload.issue) { issueNumber = context.payload.issue.number; } else { - console.log('Issue context detected but no issue found in payload'); + console.log("Issue context detected but no issue found in payload"); continue; } } else { - console.log('Could not determine issue number'); + console.log("Could not determine issue number"); continue; } } if (!issueNumber) { - console.log('Could not determine issue number'); + console.log("Could not determine issue number"); continue; } console.log(`Updating issue #${issueNumber}`); @@ -1085,34 +1418,39 @@ jobs: let hasUpdates = false; if (canUpdateStatus && updateItem.status !== undefined) { // Validate status value - if (updateItem.status === 'open' || updateItem.status === 'closed') { + if (updateItem.status === "open" || updateItem.status === "closed") { updateData.state = updateItem.status; hasUpdates = true; console.log(`Will update status to: ${updateItem.status}`); } else { - console.log(`Invalid status value: ${updateItem.status}. Must be 'open' or 'closed'`); + console.log( + `Invalid status value: ${updateItem.status}. Must be 'open' or 'closed'` + ); } } if (canUpdateTitle && updateItem.title !== undefined) { - if (typeof updateItem.title === 'string' && updateItem.title.trim().length > 0) { + if ( + typeof updateItem.title === "string" && + updateItem.title.trim().length > 0 + ) { updateData.title = updateItem.title.trim(); hasUpdates = true; console.log(`Will update title to: ${updateItem.title.trim()}`); } else { - console.log('Invalid title value: must be a non-empty string'); + console.log("Invalid title value: must be a non-empty string"); } } if (canUpdateBody && updateItem.body !== undefined) { - if (typeof updateItem.body === 'string') { + if (typeof updateItem.body === "string") { updateData.body = updateItem.body; hasUpdates = true; console.log(`Will update body (length: ${updateItem.body.length})`); } else { - console.log('Invalid body value: must be a string'); + console.log("Invalid body value: must be a string"); } } if (!hasUpdates) { - console.log('No valid updates to apply for this item'); + console.log("No valid updates to apply for this item"); continue; } try { @@ -1121,23 +1459,26 @@ jobs: owner: context.repo.owner, repo: context.repo.repo, issue_number: issueNumber, - ...updateData + ...updateData, }); - console.log('Updated issue #' + issue.number + ': ' + issue.html_url); + console.log("Updated issue #" + issue.number + ": " + issue.html_url); updatedIssues.push(issue); // Set output for the last updated issue (for backward compatibility) if (i === updateItems.length - 1) { - core.setOutput('issue_number', issue.number); - core.setOutput('issue_url', issue.html_url); + core.setOutput("issue_number", issue.number); + core.setOutput("issue_url", issue.html_url); } } catch (error) { - console.error(`✗ Failed to update issue #${issueNumber}:`, error instanceof Error ? error.message : String(error)); + console.error( + `✗ Failed to update issue #${issueNumber}:`, + error instanceof Error ? error.message : String(error) + ); throw error; } } // Write summary for all updated issues if (updatedIssues.length > 0) { - let summaryContent = '\n\n## Updated Issues\n'; + let summaryContent = "\n\n## Updated Issues\n"; for (const issue of updatedIssues) { summaryContent += `- Issue #${issue.number}: [${issue.title}](${issue.html_url})\n`; } diff --git a/.github/workflows/test-proxy.lock.yml b/.github/workflows/test-proxy.lock.yml index 77899a4740..19f98af3a6 100644 --- a/.github/workflows/test-proxy.lock.yml +++ b/.github/workflows/test-proxy.lock.yml @@ -139,23 +139,23 @@ jobs: with: script: | function main() { - const fs = require('fs'); - const crypto = require('crypto'); + const fs = require("fs"); + const crypto = require("crypto"); // Generate a random filename for the output file - const randomId = crypto.randomBytes(8).toString('hex'); + const randomId = crypto.randomBytes(8).toString("hex"); const outputFile = `/tmp/aw_output_${randomId}.txt`; // Ensure the /tmp directory exists and create empty output file - fs.mkdirSync('/tmp', { recursive: true }); - fs.writeFileSync(outputFile, '', { mode: 0o644 }); + fs.mkdirSync("/tmp", { recursive: true }); + fs.writeFileSync(outputFile, "", { mode: 0o644 }); // Verify the file was created and is writable if (!fs.existsSync(outputFile)) { throw new Error(`Failed to create output file: ${outputFile}`); } // Set the environment variable for subsequent steps - core.exportVariable('GITHUB_AW_SAFE_OUTPUTS', outputFile); - console.log('Created agentic output file:', outputFile); + core.exportVariable("GITHUB_AW_SAFE_OUTPUTS", outputFile); + console.log("Created agentic output file:", outputFile); // Also set as step output for reference - core.setOutput('output_file', outputFile); + core.setOutput("output_file", outputFile); } main(); - name: Setup Proxy Configuration for MCP Network Restrictions @@ -547,34 +547,37 @@ jobs: * @returns {string} The sanitized content */ function sanitizeContent(content) { - if (!content || typeof content !== 'string') { - return ''; + if (!content || typeof content !== "string") { + return ""; } // Read allowed domains from environment variable const allowedDomainsEnv = process.env.GITHUB_AW_ALLOWED_DOMAINS; const defaultAllowedDomains = [ - 'github.com', - 'github.io', - 'githubusercontent.com', - 'githubassets.com', - 'github.dev', - 'codespaces.new' + "github.com", + "github.io", + "githubusercontent.com", + "githubassets.com", + "github.dev", + "codespaces.new", ]; const allowedDomains = allowedDomainsEnv - ? allowedDomainsEnv.split(',').map(d => d.trim()).filter(d => d) + ? allowedDomainsEnv + .split(",") + .map(d => d.trim()) + .filter(d => d) : defaultAllowedDomains; let sanitized = content; // Neutralize @mentions to prevent unintended notifications sanitized = neutralizeMentions(sanitized); // Remove control characters (except newlines and tabs) - sanitized = sanitized.replace(/[\x00-\x08\x0B\x0C\x0E-\x1F\x7F]/g, ''); + sanitized = sanitized.replace(/[\x00-\x08\x0B\x0C\x0E-\x1F\x7F]/g, ""); // XML character escaping sanitized = sanitized - .replace(/&/g, '&') // Must be first to avoid double-escaping - .replace(//g, '>') - .replace(/"/g, '"') - .replace(/'/g, '''); + .replace(/&/g, "&") // Must be first to avoid double-escaping + .replace(//g, ">") + .replace(/"/g, """) + .replace(/'/g, "'"); // URI filtering - replace non-https protocols with "(redacted)" sanitized = sanitizeUrlProtocols(sanitized); // Domain filtering for HTTPS URIs @@ -582,16 +585,20 @@ jobs: // Limit total length to prevent DoS (0.5MB max) const maxLength = 524288; if (sanitized.length > maxLength) { - sanitized = sanitized.substring(0, maxLength) + '\n[Content truncated due to length]'; + sanitized = + sanitized.substring(0, maxLength) + + "\n[Content truncated due to length]"; } // Limit number of lines to prevent log flooding (65k max) - const lines = sanitized.split('\n'); + const lines = sanitized.split("\n"); const maxLines = 65000; if (lines.length > maxLines) { - sanitized = lines.slice(0, maxLines).join('\n') + '\n[Content truncated due to line count]'; + sanitized = + lines.slice(0, maxLines).join("\n") + + "\n[Content truncated due to line count]"; } // Remove ANSI escape sequences - sanitized = sanitized.replace(/\x1b\[[0-9;]*[mGKH]/g, ''); + sanitized = sanitized.replace(/\x1b\[[0-9;]*[mGKH]/g, ""); // Neutralize common bot trigger phrases sanitized = neutralizeBotTriggers(sanitized); // Trim excessive whitespace @@ -602,16 +609,22 @@ jobs: * @returns {string} The string with unknown domains redacted */ function sanitizeUrlDomains(s) { - return s.replace(/\bhttps:\/\/([^\/\s\])}'"<>&\x00-\x1f]+)/gi, (match, domain) => { - // Extract the hostname part (before first slash, colon, or other delimiter) - const hostname = domain.split(/[\/:\?#]/)[0].toLowerCase(); - // Check if this domain or any parent domain is in the allowlist - const isAllowed = allowedDomains.some(allowedDomain => { - const normalizedAllowed = allowedDomain.toLowerCase(); - return hostname === normalizedAllowed || hostname.endsWith('.' + normalizedAllowed); - }); - return isAllowed ? match : '(redacted)'; - }); + return s.replace( + /\bhttps:\/\/([^\/\s\])}'"<>&\x00-\x1f]+)/gi, + (match, domain) => { + // Extract the hostname part (before first slash, colon, or other delimiter) + const hostname = domain.split(/[\/:\?#]/)[0].toLowerCase(); + // Check if this domain or any parent domain is in the allowlist + const isAllowed = allowedDomains.some(allowedDomain => { + const normalizedAllowed = allowedDomain.toLowerCase(); + return ( + hostname === normalizedAllowed || + hostname.endsWith("." + normalizedAllowed) + ); + }); + return isAllowed ? match : "(redacted)"; + } + ); } /** * Remove unknown protocols except https @@ -620,10 +633,13 @@ jobs: */ function sanitizeUrlProtocols(s) { // Match both protocol:// and protocol: patterns - return s.replace(/\b(\w+):(?:\/\/)?[^\s\])}'"<>&\x00-\x1f]+/gi, (match, protocol) => { - // Allow https (case insensitive), redact everything else - return protocol.toLowerCase() === 'https' ? match : '(redacted)'; - }); + return s.replace( + /\b(\w+):(?:\/\/)?[^\s\])}'"<>&\x00-\x1f]+/gi, + (match, protocol) => { + // Allow https (case insensitive), redact everything else + return protocol.toLowerCase() === "https" ? match : "(redacted)"; + } + ); } /** * Neutralizes @mentions by wrapping them in backticks @@ -632,8 +648,10 @@ jobs: */ function neutralizeMentions(s) { // Replace @name or @org/team outside code with `@name` - return s.replace(/(^|[^\w`])@([A-Za-z0-9](?:[A-Za-z0-9-]{0,37}[A-Za-z0-9])?(?:\/[A-Za-z0-9._-]+)?)/g, - (_m, p1, p2) => `${p1}\`@${p2}\``); + return s.replace( + /(^|[^\w`])@([A-Za-z0-9](?:[A-Za-z0-9-]{0,37}[A-Za-z0-9])?(?:\/[A-Za-z0-9._-]+)?)/g, + (_m, p1, p2) => `${p1}\`@${p2}\`` + ); } /** * Neutralizes bot trigger phrases by wrapping them in backticks @@ -642,8 +660,10 @@ jobs: */ function neutralizeBotTriggers(s) { // Neutralize common bot trigger phrases like "fixes #123", "closes #asdfs", etc. - return s.replace(/\b(fixes?|closes?|resolves?|fix|close|resolve)\s+#(\w+)/gi, - (match, action, ref) => `\`${action} #${ref}\``); + return s.replace( + /\b(fixes?|closes?|resolves?|fix|close|resolve)\s+#(\w+)/gi, + (match, action, ref) => `\`${action} #${ref}\`` + ); } } /** @@ -654,65 +674,165 @@ jobs: */ function getMaxAllowedForType(itemType, config) { // Check if max is explicitly specified in config - if (config && config[itemType] && typeof config[itemType] === 'object' && config[itemType].max) { + if ( + config && + config[itemType] && + typeof config[itemType] === "object" && + config[itemType].max + ) { return config[itemType].max; } // Use default limits for plural-supported types switch (itemType) { - case 'create-issue': + case "create-issue": return 1; // Only one issue allowed - case 'add-issue-comment': + case "add-issue-comment": return 1; // Only one comment allowed - case 'create-pull-request': - return 1; // Only one pull request allowed - case 'add-issue-label': - return 5; // Only one labels operation allowed - case 'update-issue': - return 1; // Only one issue update allowed - case 'push-to-branch': - return 1; // Only one push to branch allowed + case "create-pull-request": + return 1; // Only one pull request allowed + case "create-pull-request-review-comment": + return 10; // Default to 10 review comments allowed + case "add-issue-label": + return 5; // Only one labels operation allowed + case "update-issue": + return 1; // Only one issue update allowed + case "push-to-branch": + return 1; // Only one push to branch allowed + case "create-discussion": + return 1; // Only one discussion allowed default: - return 1; // Default to single item for unknown types + return 1; // Default to single item for unknown types + } + } + /** + * Attempts to repair common JSON syntax issues in LLM-generated content + * @param {string} jsonStr - The potentially malformed JSON string + * @returns {string} The repaired JSON string + */ + function repairJson(jsonStr) { + let repaired = jsonStr.trim(); + // Fix single quotes to double quotes (must be done first) + repaired = repaired.replace(/'/g, '"'); + // Fix missing quotes around object keys + repaired = repaired.replace( + /([{,]\s*)([a-zA-Z_$][a-zA-Z0-9_$]*)\s*:/g, + '$1"$2":' + ); + // Fix newlines and tabs inside strings by escaping them + repaired = repaired.replace(/"([^"\\]*)"/g, (match, content) => { + if ( + content.includes("\n") || + content.includes("\r") || + content.includes("\t") + ) { + const escaped = content + .replace(/\\/g, "\\\\") + .replace(/\n/g, "\\n") + .replace(/\r/g, "\\r") + .replace(/\t/g, "\\t"); + return `"${escaped}"`; + } + return match; + }); + // Fix unescaped quotes inside string values + repaired = repaired.replace( + /"([^"]*)"([^":,}\]]*)"([^"]*)"(\s*[,:}\]])/g, + (match, p1, p2, p3, p4) => `"${p1}\\"${p2}\\"${p3}"${p4}` + ); + // Fix wrong bracket/brace types - arrays should end with ] not } + repaired = repaired.replace( + /(\[\s*(?:"[^"]*"(?:\s*,\s*"[^"]*")*\s*),?)\s*}/g, + "$1]" + ); + // Fix missing closing braces/brackets + const openBraces = (repaired.match(/\{/g) || []).length; + const closeBraces = (repaired.match(/\}/g) || []).length; + if (openBraces > closeBraces) { + repaired += "}".repeat(openBraces - closeBraces); + } else if (closeBraces > openBraces) { + repaired = "{".repeat(closeBraces - openBraces) + repaired; + } + // Fix missing closing brackets for arrays + const openBrackets = (repaired.match(/\[/g) || []).length; + const closeBrackets = (repaired.match(/\]/g) || []).length; + if (openBrackets > closeBrackets) { + repaired += "]".repeat(openBrackets - closeBrackets); + } else if (closeBrackets > openBrackets) { + repaired = "[".repeat(closeBrackets - openBrackets) + repaired; + } + // Fix trailing commas in objects and arrays (AFTER fixing brackets/braces) + repaired = repaired.replace(/,(\s*[}\]])/g, "$1"); + return repaired; + } + /** + * Attempts to parse JSON with repair fallback + * @param {string} jsonStr - The JSON string to parse + * @returns {Object|undefined} The parsed JSON object, or undefined if parsing fails + */ + function parseJsonWithRepair(jsonStr) { + try { + // First, try normal JSON.parse + return JSON.parse(jsonStr); + } catch (originalError) { + try { + // If that fails, try repairing and parsing again + const repairedJson = repairJson(jsonStr); + return JSON.parse(repairedJson); + } catch (repairError) { + // If repair also fails, print error to console and return undefined + console.log( + `JSON parsing failed. Original: ${originalError.message}. After repair: ${repairError.message}` + ); + return undefined; + } } } const outputFile = process.env.GITHUB_AW_SAFE_OUTPUTS; const safeOutputsConfig = process.env.GITHUB_AW_SAFE_OUTPUTS_CONFIG; if (!outputFile) { - console.log('GITHUB_AW_SAFE_OUTPUTS not set, no output to collect'); - core.setOutput('output', ''); + console.log("GITHUB_AW_SAFE_OUTPUTS not set, no output to collect"); + core.setOutput("output", ""); return; } if (!fs.existsSync(outputFile)) { - console.log('Output file does not exist:', outputFile); - core.setOutput('output', ''); + console.log("Output file does not exist:", outputFile); + core.setOutput("output", ""); return; } - const outputContent = fs.readFileSync(outputFile, 'utf8'); - if (outputContent.trim() === '') { - console.log('Output file is empty'); - core.setOutput('output', ''); + const outputContent = fs.readFileSync(outputFile, "utf8"); + if (outputContent.trim() === "") { + console.log("Output file is empty"); + core.setOutput("output", ""); return; } - console.log('Raw output content length:', outputContent.length); + console.log("Raw output content length:", outputContent.length); // Parse the safe-outputs configuration let expectedOutputTypes = {}; if (safeOutputsConfig) { try { expectedOutputTypes = JSON.parse(safeOutputsConfig); - console.log('Expected output types:', Object.keys(expectedOutputTypes)); + console.log("Expected output types:", Object.keys(expectedOutputTypes)); } catch (error) { - console.log('Warning: Could not parse safe-outputs config:', error.message); + console.log( + "Warning: Could not parse safe-outputs config:", + error.message + ); } } // Parse JSONL content - const lines = outputContent.trim().split('\n'); + const lines = outputContent.trim().split("\n"); const parsedItems = []; const errors = []; for (let i = 0; i < lines.length; i++) { const line = lines[i].trim(); - if (line === '') continue; // Skip empty lines + if (line === "") continue; // Skip empty lines try { - const item = JSON.parse(line); + const item = parseJsonWithRepair(line); + // If item is undefined (failed to parse), add error and process next line + if (item === undefined) { + errors.push(`Line ${i + 1}: Invalid JSON - JSON parsing failed`); + continue; + } // Validate that the item has a 'type' field if (!item.type) { errors.push(`Line ${i + 1}: Missing required 'type' field`); @@ -721,25 +841,35 @@ jobs: // Validate against expected output types const itemType = item.type; if (!expectedOutputTypes[itemType]) { - errors.push(`Line ${i + 1}: Unexpected output type '${itemType}'. Expected one of: ${Object.keys(expectedOutputTypes).join(', ')}`); + errors.push( + `Line ${i + 1}: Unexpected output type '${itemType}'. Expected one of: ${Object.keys(expectedOutputTypes).join(", ")}` + ); continue; } // Check for too many items of the same type - const typeCount = parsedItems.filter(existing => existing.type === itemType).length; + const typeCount = parsedItems.filter( + existing => existing.type === itemType + ).length; const maxAllowed = getMaxAllowedForType(itemType, expectedOutputTypes); if (typeCount >= maxAllowed) { - errors.push(`Line ${i + 1}: Too many items of type '${itemType}'. Maximum allowed: ${maxAllowed}.`); + errors.push( + `Line ${i + 1}: Too many items of type '${itemType}'. Maximum allowed: ${maxAllowed}.` + ); continue; } // Basic validation based on type switch (itemType) { - case 'create-issue': - if (!item.title || typeof item.title !== 'string') { - errors.push(`Line ${i + 1}: create-issue requires a 'title' string field`); + case "create-issue": + if (!item.title || typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: create-issue requires a 'title' string field` + ); continue; } - if (!item.body || typeof item.body !== 'string') { - errors.push(`Line ${i + 1}: create-issue requires a 'body' string field`); + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-issue requires a 'body' string field` + ); continue; } // Sanitize text content @@ -747,107 +877,247 @@ jobs: item.body = sanitizeContent(item.body); // Sanitize labels if present if (item.labels && Array.isArray(item.labels)) { - item.labels = item.labels.map(label => typeof label === 'string' ? sanitizeContent(label) : label); + item.labels = item.labels.map(label => + typeof label === "string" ? sanitizeContent(label) : label + ); } break; - case 'add-issue-comment': - if (!item.body || typeof item.body !== 'string') { - errors.push(`Line ${i + 1}: add-issue-comment requires a 'body' string field`); + case "add-issue-comment": + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: add-issue-comment requires a 'body' string field` + ); continue; } // Sanitize text content item.body = sanitizeContent(item.body); break; - case 'create-pull-request': - if (!item.title || typeof item.title !== 'string') { - errors.push(`Line ${i + 1}: create-pull-request requires a 'title' string field`); + case "create-pull-request": + if (!item.title || typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request requires a 'title' string field` + ); continue; } - if (!item.body || typeof item.body !== 'string') { - errors.push(`Line ${i + 1}: create-pull-request requires a 'body' string field`); + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request requires a 'body' string field` + ); continue; } // Sanitize text content item.title = sanitizeContent(item.title); item.body = sanitizeContent(item.body); // Sanitize branch name if present - if (item.branch && typeof item.branch === 'string') { + if (item.branch && typeof item.branch === "string") { item.branch = sanitizeContent(item.branch); } // Sanitize labels if present if (item.labels && Array.isArray(item.labels)) { - item.labels = item.labels.map(label => typeof label === 'string' ? sanitizeContent(label) : label); + item.labels = item.labels.map(label => + typeof label === "string" ? sanitizeContent(label) : label + ); } break; - case 'add-issue-label': + case "add-issue-label": if (!item.labels || !Array.isArray(item.labels)) { - errors.push(`Line ${i + 1}: add-issue-label requires a 'labels' array field`); + errors.push( + `Line ${i + 1}: add-issue-label requires a 'labels' array field` + ); continue; } - if (item.labels.some(label => typeof label !== 'string')) { - errors.push(`Line ${i + 1}: add-issue-label labels array must contain only strings`); + if (item.labels.some(label => typeof label !== "string")) { + errors.push( + `Line ${i + 1}: add-issue-label labels array must contain only strings` + ); continue; } // Sanitize label strings item.labels = item.labels.map(label => sanitizeContent(label)); break; - case 'update-issue': + case "update-issue": // Check that at least one updateable field is provided - const hasValidField = (item.status !== undefined) || - (item.title !== undefined) || - (item.body !== undefined); + const hasValidField = + item.status !== undefined || + item.title !== undefined || + item.body !== undefined; if (!hasValidField) { - errors.push(`Line ${i + 1}: update-issue requires at least one of: 'status', 'title', or 'body' fields`); + errors.push( + `Line ${i + 1}: update-issue requires at least one of: 'status', 'title', or 'body' fields` + ); continue; } // Validate status if provided if (item.status !== undefined) { - if (typeof item.status !== 'string' || (item.status !== 'open' && item.status !== 'closed')) { - errors.push(`Line ${i + 1}: update-issue 'status' must be 'open' or 'closed'`); + if ( + typeof item.status !== "string" || + (item.status !== "open" && item.status !== "closed") + ) { + errors.push( + `Line ${i + 1}: update-issue 'status' must be 'open' or 'closed'` + ); continue; } } // Validate title if provided if (item.title !== undefined) { - if (typeof item.title !== 'string') { - errors.push(`Line ${i + 1}: update-issue 'title' must be a string`); + if (typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: update-issue 'title' must be a string` + ); continue; } item.title = sanitizeContent(item.title); } // Validate body if provided if (item.body !== undefined) { - if (typeof item.body !== 'string') { - errors.push(`Line ${i + 1}: update-issue 'body' must be a string`); + if (typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: update-issue 'body' must be a string` + ); continue; } item.body = sanitizeContent(item.body); } // Validate issue_number if provided (for target "*") if (item.issue_number !== undefined) { - if (typeof item.issue_number !== 'number' && typeof item.issue_number !== 'string') { - errors.push(`Line ${i + 1}: update-issue 'issue_number' must be a number or string`); + if ( + typeof item.issue_number !== "number" && + typeof item.issue_number !== "string" + ) { + errors.push( + `Line ${i + 1}: update-issue 'issue_number' must be a number or string` + ); continue; } } break; - case 'push-to-branch': + case "push-to-branch": // Validate message if provided (optional) if (item.message !== undefined) { - if (typeof item.message !== 'string') { - errors.push(`Line ${i + 1}: push-to-branch 'message' must be a string`); + if (typeof item.message !== "string") { + errors.push( + `Line ${i + 1}: push-to-branch 'message' must be a string` + ); continue; } item.message = sanitizeContent(item.message); } // Validate pull_request_number if provided (for target "*") if (item.pull_request_number !== undefined) { - if (typeof item.pull_request_number !== 'number' && typeof item.pull_request_number !== 'string') { - errors.push(`Line ${i + 1}: push-to-branch 'pull_request_number' must be a number or string`); + if ( + typeof item.pull_request_number !== "number" && + typeof item.pull_request_number !== "string" + ) { + errors.push( + `Line ${i + 1}: push-to-branch 'pull_request_number' must be a number or string` + ); continue; } } break; + case "create-pull-request-review-comment": + // Validate required path field + if (!item.path || typeof item.path !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment requires a 'path' string field` + ); + continue; + } + // Validate required line field + if ( + item.line === undefined || + (typeof item.line !== "number" && typeof item.line !== "string") + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment requires a 'line' number or string field` + ); + continue; + } + // Validate line is a positive integer + const lineNumber = + typeof item.line === "string" ? parseInt(item.line, 10) : item.line; + if ( + isNaN(lineNumber) || + lineNumber <= 0 || + !Number.isInteger(lineNumber) + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'line' must be a positive integer` + ); + continue; + } + // Validate required body field + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment requires a 'body' string field` + ); + continue; + } + // Sanitize required text content + item.body = sanitizeContent(item.body); + // Validate optional start_line field + if (item.start_line !== undefined) { + if ( + typeof item.start_line !== "number" && + typeof item.start_line !== "string" + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'start_line' must be a number or string` + ); + continue; + } + const startLineNumber = + typeof item.start_line === "string" + ? parseInt(item.start_line, 10) + : item.start_line; + if ( + isNaN(startLineNumber) || + startLineNumber <= 0 || + !Number.isInteger(startLineNumber) + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'start_line' must be a positive integer` + ); + continue; + } + if (startLineNumber > lineNumber) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'start_line' must be less than or equal to 'line'` + ); + continue; + } + } + // Validate optional side field + if (item.side !== undefined) { + if ( + typeof item.side !== "string" || + (item.side !== "LEFT" && item.side !== "RIGHT") + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'side' must be 'LEFT' or 'RIGHT'` + ); + continue; + } + } + break; + case "create-discussion": + if (!item.title || typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: create-discussion requires a 'title' string field` + ); + continue; + } + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-discussion requires a 'body' string field` + ); + continue; + } + // Sanitize text content + item.title = sanitizeContent(item.title); + item.body = sanitizeContent(item.body); + break; default: errors.push(`Line ${i + 1}: Unknown output type '${itemType}'`); continue; @@ -860,7 +1130,7 @@ jobs: } // Report validation results if (errors.length > 0) { - console.log('Validation errors found:'); + console.log("Validation errors found:"); errors.forEach(error => console.log(` - ${error}`)); // For now, we'll continue with valid items but log the errors // In the future, we might want to fail the workflow for invalid items @@ -869,10 +1139,10 @@ jobs: // Set the parsed and validated items as output const validatedOutput = { items: parsedItems, - errors: errors + errors: errors, }; - core.setOutput('output', JSON.stringify(validatedOutput)); - core.setOutput('raw_output', outputContent); + core.setOutput("output", JSON.stringify(validatedOutput)); + core.setOutput("raw_output", outputContent); } // Call the main function await main(); @@ -931,24 +1201,24 @@ jobs: with: script: | function main() { - const fs = require('fs'); + const fs = require("fs"); try { // Get the log file path from environment const logFile = process.env.AGENT_LOG_FILE; if (!logFile) { - console.log('No agent log file specified'); + console.log("No agent log file specified"); return; } if (!fs.existsSync(logFile)) { console.log(`Log file not found: ${logFile}`); return; } - const logContent = fs.readFileSync(logFile, 'utf8'); + const logContent = fs.readFileSync(logFile, "utf8"); const markdown = parseClaudeLog(logContent); // Append to GitHub step summary core.summary.addRaw(markdown).write(); } catch (error) { - console.error('Error parsing Claude log:', error.message); + console.error("Error parsing Claude log:", error.message); core.setFailed(error.message); } } @@ -956,16 +1226,16 @@ jobs: try { const logEntries = JSON.parse(logContent); if (!Array.isArray(logEntries)) { - return '## Agent Log Summary\n\nLog format not recognized as Claude JSON array.\n'; + return "## Agent Log Summary\n\nLog format not recognized as Claude JSON array.\n"; } - let markdown = '## 🤖 Commands and Tools\n\n'; + let markdown = "## 🤖 Commands and Tools\n\n"; const toolUsePairs = new Map(); // Map tool_use_id to tool_result const commandSummary = []; // For the succinct summary // First pass: collect tool results by tool_use_id for (const entry of logEntries) { - if (entry.type === 'user' && entry.message?.content) { + if (entry.type === "user" && entry.message?.content) { for (const content of entry.message.content) { - if (content.type === 'tool_result' && content.tool_use_id) { + if (content.type === "tool_result" && content.tool_use_id) { toolUsePairs.set(content.tool_use_id, content); } } @@ -973,26 +1243,37 @@ jobs: } // Collect all tool uses for summary for (const entry of logEntries) { - if (entry.type === 'assistant' && entry.message?.content) { + if (entry.type === "assistant" && entry.message?.content) { for (const content of entry.message.content) { - if (content.type === 'tool_use') { + if (content.type === "tool_use") { const toolName = content.name; const input = content.input || {}; // Skip internal tools - only show external commands and API calls - if (['Read', 'Write', 'Edit', 'MultiEdit', 'LS', 'Grep', 'Glob', 'TodoWrite'].includes(toolName)) { + if ( + [ + "Read", + "Write", + "Edit", + "MultiEdit", + "LS", + "Grep", + "Glob", + "TodoWrite", + ].includes(toolName) + ) { continue; // Skip internal file operations and searches } // Find the corresponding tool result to get status const toolResult = toolUsePairs.get(content.id); - let statusIcon = '❓'; + let statusIcon = "❓"; if (toolResult) { - statusIcon = toolResult.is_error === true ? '❌' : '✅'; + statusIcon = toolResult.is_error === true ? "❌" : "✅"; } // Add to command summary (only external tools) - if (toolName === 'Bash') { - const formattedCommand = formatBashCommand(input.command || ''); + if (toolName === "Bash") { + const formattedCommand = formatBashCommand(input.command || ""); commandSummary.push(`* ${statusIcon} \`${formattedCommand}\``); - } else if (toolName.startsWith('mcp__')) { + } else if (toolName.startsWith("mcp__")) { const mcpName = formatMcpName(toolName); commandSummary.push(`* ${statusIcon} \`${mcpName}(...)\``); } else { @@ -1009,13 +1290,19 @@ jobs: markdown += `${cmd}\n`; } } else { - markdown += 'No commands or tools used.\n'; + markdown += "No commands or tools used.\n"; } // Add Information section from the last entry with result metadata - markdown += '\n## 📊 Information\n\n'; + markdown += "\n## 📊 Information\n\n"; // Find the last entry with metadata const lastEntry = logEntries[logEntries.length - 1]; - if (lastEntry && (lastEntry.num_turns || lastEntry.duration_ms || lastEntry.total_cost_usd || lastEntry.usage)) { + if ( + lastEntry && + (lastEntry.num_turns || + lastEntry.duration_ms || + lastEntry.total_cost_usd || + lastEntry.usage) + ) { if (lastEntry.num_turns) { markdown += `**Turns:** ${lastEntry.num_turns}\n\n`; } @@ -1032,29 +1319,36 @@ jobs: const usage = lastEntry.usage; if (usage.input_tokens || usage.output_tokens) { markdown += `**Token Usage:**\n`; - if (usage.input_tokens) markdown += `- Input: ${usage.input_tokens.toLocaleString()}\n`; - if (usage.cache_creation_input_tokens) markdown += `- Cache Creation: ${usage.cache_creation_input_tokens.toLocaleString()}\n`; - if (usage.cache_read_input_tokens) markdown += `- Cache Read: ${usage.cache_read_input_tokens.toLocaleString()}\n`; - if (usage.output_tokens) markdown += `- Output: ${usage.output_tokens.toLocaleString()}\n`; - markdown += '\n'; + if (usage.input_tokens) + markdown += `- Input: ${usage.input_tokens.toLocaleString()}\n`; + if (usage.cache_creation_input_tokens) + markdown += `- Cache Creation: ${usage.cache_creation_input_tokens.toLocaleString()}\n`; + if (usage.cache_read_input_tokens) + markdown += `- Cache Read: ${usage.cache_read_input_tokens.toLocaleString()}\n`; + if (usage.output_tokens) + markdown += `- Output: ${usage.output_tokens.toLocaleString()}\n`; + markdown += "\n"; } } - if (lastEntry.permission_denials && lastEntry.permission_denials.length > 0) { + if ( + lastEntry.permission_denials && + lastEntry.permission_denials.length > 0 + ) { markdown += `**Permission Denials:** ${lastEntry.permission_denials.length}\n\n`; } } - markdown += '\n## 🤖 Reasoning\n\n'; + markdown += "\n## 🤖 Reasoning\n\n"; // Second pass: process assistant messages in sequence for (const entry of logEntries) { - if (entry.type === 'assistant' && entry.message?.content) { + if (entry.type === "assistant" && entry.message?.content) { for (const content of entry.message.content) { - if (content.type === 'text' && content.text) { + if (content.type === "text" && content.text) { // Add reasoning text directly (no header) const text = content.text.trim(); if (text && text.length > 0) { - markdown += text + '\n\n'; + markdown += text + "\n\n"; } - } else if (content.type === 'tool_use') { + } else if (content.type === "tool_use") { // Process tool use with its result const toolResult = toolUsePairs.get(content.id); const toolMarkdown = formatToolUse(content, toolResult); @@ -1074,22 +1368,22 @@ jobs: const toolName = toolUse.name; const input = toolUse.input || {}; // Skip TodoWrite except the very last one (we'll handle this separately) - if (toolName === 'TodoWrite') { - return ''; // Skip for now, would need global context to find the last one + if (toolName === "TodoWrite") { + return ""; // Skip for now, would need global context to find the last one } // Helper function to determine status icon function getStatusIcon() { if (toolResult) { - return toolResult.is_error === true ? '❌' : '✅'; + return toolResult.is_error === true ? "❌" : "✅"; } - return '❓'; // Unknown by default + return "❓"; // Unknown by default } - let markdown = ''; + let markdown = ""; const statusIcon = getStatusIcon(); switch (toolName) { - case 'Bash': - const command = input.command || ''; - const description = input.description || ''; + case "Bash": + const command = input.command || ""; + const description = input.description || ""; // Format the command to be single line const formattedCommand = formatBashCommand(command); if (description) { @@ -1097,31 +1391,40 @@ jobs: } markdown += `${statusIcon} \`${formattedCommand}\`\n\n`; break; - case 'Read': - const filePath = input.file_path || input.path || ''; - const relativePath = filePath.replace(/^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, ''); // Remove /home/runner/work/repo/repo/ prefix + case "Read": + const filePath = input.file_path || input.path || ""; + const relativePath = filePath.replace( + /^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, + "" + ); // Remove /home/runner/work/repo/repo/ prefix markdown += `${statusIcon} Read \`${relativePath}\`\n\n`; break; - case 'Write': - case 'Edit': - case 'MultiEdit': - const writeFilePath = input.file_path || input.path || ''; - const writeRelativePath = writeFilePath.replace(/^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, ''); + case "Write": + case "Edit": + case "MultiEdit": + const writeFilePath = input.file_path || input.path || ""; + const writeRelativePath = writeFilePath.replace( + /^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, + "" + ); markdown += `${statusIcon} Write \`${writeRelativePath}\`\n\n`; break; - case 'Grep': - case 'Glob': - const query = input.query || input.pattern || ''; + case "Grep": + case "Glob": + const query = input.query || input.pattern || ""; markdown += `${statusIcon} Search for \`${truncateString(query, 80)}\`\n\n`; break; - case 'LS': - const lsPath = input.path || ''; - const lsRelativePath = lsPath.replace(/^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, ''); + case "LS": + const lsPath = input.path || ""; + const lsRelativePath = lsPath.replace( + /^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, + "" + ); markdown += `${statusIcon} LS: ${lsRelativePath || lsPath}\n\n`; break; default: // Handle MCP calls and other tools - if (toolName.startsWith('mcp__')) { + if (toolName.startsWith("mcp__")) { const mcpName = formatMcpName(toolName); const params = formatMcpParameters(input); markdown += `${statusIcon} ${mcpName}(${params})\n\n`; @@ -1130,8 +1433,11 @@ jobs: const keys = Object.keys(input); if (keys.length > 0) { // Try to find the most important parameter - const mainParam = keys.find(k => ['query', 'command', 'path', 'file_path', 'content'].includes(k)) || keys[0]; - const value = String(input[mainParam] || ''); + const mainParam = + keys.find(k => + ["query", "command", "path", "file_path", "content"].includes(k) + ) || keys[0]; + const value = String(input[mainParam] || ""); if (value) { markdown += `${statusIcon} ${toolName}: ${truncateString(value, 100)}\n\n`; } else { @@ -1146,11 +1452,11 @@ jobs: } function formatMcpName(toolName) { // Convert mcp__github__search_issues to github::search_issues - if (toolName.startsWith('mcp__')) { - const parts = toolName.split('__'); + if (toolName.startsWith("mcp__")) { + const parts = toolName.split("__"); if (parts.length >= 3) { const provider = parts[1]; // github, etc. - const method = parts.slice(2).join('_'); // search_issues, etc. + const method = parts.slice(2).join("_"); // search_issues, etc. return `${provider}::${method}`; } } @@ -1158,44 +1464,50 @@ jobs: } function formatMcpParameters(input) { const keys = Object.keys(input); - if (keys.length === 0) return ''; + if (keys.length === 0) return ""; const paramStrs = []; - for (const key of keys.slice(0, 4)) { // Show up to 4 parameters - const value = String(input[key] || ''); + for (const key of keys.slice(0, 4)) { + // Show up to 4 parameters + const value = String(input[key] || ""); paramStrs.push(`${key}: ${truncateString(value, 40)}`); } if (keys.length > 4) { - paramStrs.push('...'); + paramStrs.push("..."); } - return paramStrs.join(', '); + return paramStrs.join(", "); } function formatBashCommand(command) { - if (!command) return ''; + if (!command) return ""; // Convert multi-line commands to single line by replacing newlines with spaces // and collapsing multiple spaces let formatted = command - .replace(/\n/g, ' ') // Replace newlines with spaces - .replace(/\r/g, ' ') // Replace carriage returns with spaces - .replace(/\t/g, ' ') // Replace tabs with spaces - .replace(/\s+/g, ' ') // Collapse multiple spaces into one - .trim(); // Remove leading/trailing whitespace + .replace(/\n/g, " ") // Replace newlines with spaces + .replace(/\r/g, " ") // Replace carriage returns with spaces + .replace(/\t/g, " ") // Replace tabs with spaces + .replace(/\s+/g, " ") // Collapse multiple spaces into one + .trim(); // Remove leading/trailing whitespace // Escape backticks to prevent markdown issues - formatted = formatted.replace(/`/g, '\\`'); + formatted = formatted.replace(/`/g, "\\`"); // Truncate if too long (keep reasonable length for summary) const maxLength = 80; if (formatted.length > maxLength) { - formatted = formatted.substring(0, maxLength) + '...'; + formatted = formatted.substring(0, maxLength) + "..."; } return formatted; } function truncateString(str, maxLength) { - if (!str) return ''; + if (!str) return ""; if (str.length <= maxLength) return str; - return str.substring(0, maxLength) + '...'; + return str.substring(0, maxLength) + "..."; } // Export for testing - if (typeof module !== 'undefined' && module.exports) { - module.exports = { parseClaudeLog, formatToolUse, formatBashCommand, truncateString }; + if (typeof module !== "undefined" && module.exports) { + module.exports = { + parseClaudeLog, + formatToolUse, + formatBashCommand, + truncateString, + }; } main(); - name: Upload agent logs @@ -1230,30 +1542,35 @@ jobs: // Read the validated output content from environment variable const outputContent = process.env.GITHUB_AW_AGENT_OUTPUT; if (!outputContent) { - console.log('No GITHUB_AW_AGENT_OUTPUT environment variable found'); + console.log("No GITHUB_AW_AGENT_OUTPUT environment variable found"); return; } - if (outputContent.trim() === '') { - console.log('Agent output content is empty'); + if (outputContent.trim() === "") { + console.log("Agent output content is empty"); return; } - console.log('Agent output content length:', outputContent.length); + console.log("Agent output content length:", outputContent.length); // Parse the validated output JSON let validatedOutput; try { validatedOutput = JSON.parse(outputContent); } catch (error) { - console.log('Error parsing agent output JSON:', error instanceof Error ? error.message : String(error)); + console.log( + "Error parsing agent output JSON:", + error instanceof Error ? error.message : String(error) + ); return; } if (!validatedOutput.items || !Array.isArray(validatedOutput.items)) { - console.log('No valid items found in agent output'); + console.log("No valid items found in agent output"); return; } // Find all add-issue-comment items - const commentItems = validatedOutput.items.filter(/** @param {any} item */ item => item.type === 'add-issue-comment'); + const commentItems = validatedOutput.items.filter( + /** @param {any} item */ item => item.type === "add-issue-comment" + ); if (commentItems.length === 0) { - console.log('No add-issue-comment items found in agent output'); + console.log("No add-issue-comment items found in agent output"); return; } console.log(`Found ${commentItems.length} add-issue-comment item(s)`); @@ -1261,18 +1578,27 @@ jobs: const commentTarget = process.env.GITHUB_AW_COMMENT_TARGET || "triggering"; console.log(`Comment target configuration: ${commentTarget}`); // Check if we're in an issue or pull request context - const isIssueContext = context.eventName === 'issues' || context.eventName === 'issue_comment'; - const isPRContext = context.eventName === 'pull_request' || context.eventName === 'pull_request_review' || context.eventName === 'pull_request_review_comment'; + const isIssueContext = + context.eventName === "issues" || context.eventName === "issue_comment"; + const isPRContext = + context.eventName === "pull_request" || + context.eventName === "pull_request_review" || + context.eventName === "pull_request_review_comment"; // Validate context based on target configuration if (commentTarget === "triggering" && !isIssueContext && !isPRContext) { - console.log('Target is "triggering" but not running in issue or pull request context, skipping comment creation'); + console.log( + 'Target is "triggering" but not running in issue or pull request context, skipping comment creation' + ); return; } const createdComments = []; // Process each comment item for (let i = 0; i < commentItems.length; i++) { const commentItem = commentItems[i]; - console.log(`Processing add-issue-comment item ${i + 1}/${commentItems.length}:`, { bodyLength: commentItem.body.length }); + console.log( + `Processing add-issue-comment item ${i + 1}/${commentItems.length}:`, + { bodyLength: commentItem.body.length } + ); // Determine the issue/PR number and comment endpoint for this comment let issueNumber; let commentEndpoint; @@ -1281,79 +1607,90 @@ jobs: if (commentItem.issue_number) { issueNumber = parseInt(commentItem.issue_number, 10); if (isNaN(issueNumber) || issueNumber <= 0) { - console.log(`Invalid issue number specified: ${commentItem.issue_number}`); + console.log( + `Invalid issue number specified: ${commentItem.issue_number}` + ); continue; } - commentEndpoint = 'issues'; + commentEndpoint = "issues"; } else { - console.log('Target is "*" but no issue_number specified in comment item'); + console.log( + 'Target is "*" but no issue_number specified in comment item' + ); continue; } } else if (commentTarget && commentTarget !== "triggering") { // Explicit issue number specified in target issueNumber = parseInt(commentTarget, 10); if (isNaN(issueNumber) || issueNumber <= 0) { - console.log(`Invalid issue number in target configuration: ${commentTarget}`); + console.log( + `Invalid issue number in target configuration: ${commentTarget}` + ); continue; } - commentEndpoint = 'issues'; + commentEndpoint = "issues"; } else { // Default behavior: use triggering issue/PR if (isIssueContext) { if (context.payload.issue) { issueNumber = context.payload.issue.number; - commentEndpoint = 'issues'; + commentEndpoint = "issues"; } else { - console.log('Issue context detected but no issue found in payload'); + console.log("Issue context detected but no issue found in payload"); continue; } } else if (isPRContext) { if (context.payload.pull_request) { issueNumber = context.payload.pull_request.number; - commentEndpoint = 'issues'; // PR comments use the issues API endpoint + commentEndpoint = "issues"; // PR comments use the issues API endpoint } else { - console.log('Pull request context detected but no pull request found in payload'); + console.log( + "Pull request context detected but no pull request found in payload" + ); continue; } } } if (!issueNumber) { - console.log('Could not determine issue or pull request number'); + console.log("Could not determine issue or pull request number"); continue; } // Extract body from the JSON item let body = commentItem.body.trim(); // Add AI disclaimer with run id, run htmlurl const runId = context.runId; - const runUrl = context.payload.repository + const runUrl = context.payload.repository ? `${context.payload.repository.html_url}/actions/runs/${runId}` - : `https://github.com/actions/runs/${runId}`; + : `https://github.com/actions/runs/${runId}`; body += `\n\n> Generated by Agentic Workflow Run [${runId}](${runUrl})\n`; console.log(`Creating comment on ${commentEndpoint} #${issueNumber}`); - console.log('Comment content length:', body.length); + console.log("Comment content length:", body.length); try { // Create the comment using GitHub API const { data: comment } = await github.rest.issues.createComment({ owner: context.repo.owner, repo: context.repo.repo, issue_number: issueNumber, - body: body + body: body, }); - console.log('Created comment #' + comment.id + ': ' + comment.html_url); + console.log("Created comment #" + comment.id + ": " + comment.html_url); createdComments.push(comment); // Set output for the last created comment (for backward compatibility) if (i === commentItems.length - 1) { - core.setOutput('comment_id', comment.id); - core.setOutput('comment_url', comment.html_url); + core.setOutput("comment_id", comment.id); + core.setOutput("comment_url", comment.html_url); } } catch (error) { - console.error(`✗ Failed to create comment:`, error instanceof Error ? error.message : String(error)); + console.error( + `✗ Failed to create comment:`, + error instanceof Error ? error.message : String(error) + ); throw error; } } // Write summary for all created comments if (createdComments.length > 0) { - let summaryContent = '\n\n## GitHub Comments\n'; + let summaryContent = "\n\n## GitHub Comments\n"; for (const comment of createdComments) { summaryContent += `- Comment #${comment.id}: [View Comment](${comment.html_url})\n`; } diff --git a/.github/workflows/weekly-research.lock.yml b/.github/workflows/weekly-research.lock.yml index a0ffb27b64..eb364b3d3c 100644 --- a/.github/workflows/weekly-research.lock.yml +++ b/.github/workflows/weekly-research.lock.yml @@ -302,24 +302,24 @@ jobs: with: script: | function main() { - const fs = require('fs'); + const fs = require("fs"); try { // Get the log file path from environment const logFile = process.env.AGENT_LOG_FILE; if (!logFile) { - console.log('No agent log file specified'); + console.log("No agent log file specified"); return; } if (!fs.existsSync(logFile)) { console.log(`Log file not found: ${logFile}`); return; } - const logContent = fs.readFileSync(logFile, 'utf8'); + const logContent = fs.readFileSync(logFile, "utf8"); const markdown = parseClaudeLog(logContent); // Append to GitHub step summary core.summary.addRaw(markdown).write(); } catch (error) { - console.error('Error parsing Claude log:', error.message); + console.error("Error parsing Claude log:", error.message); core.setFailed(error.message); } } @@ -327,16 +327,16 @@ jobs: try { const logEntries = JSON.parse(logContent); if (!Array.isArray(logEntries)) { - return '## Agent Log Summary\n\nLog format not recognized as Claude JSON array.\n'; + return "## Agent Log Summary\n\nLog format not recognized as Claude JSON array.\n"; } - let markdown = '## 🤖 Commands and Tools\n\n'; + let markdown = "## 🤖 Commands and Tools\n\n"; const toolUsePairs = new Map(); // Map tool_use_id to tool_result const commandSummary = []; // For the succinct summary // First pass: collect tool results by tool_use_id for (const entry of logEntries) { - if (entry.type === 'user' && entry.message?.content) { + if (entry.type === "user" && entry.message?.content) { for (const content of entry.message.content) { - if (content.type === 'tool_result' && content.tool_use_id) { + if (content.type === "tool_result" && content.tool_use_id) { toolUsePairs.set(content.tool_use_id, content); } } @@ -344,26 +344,37 @@ jobs: } // Collect all tool uses for summary for (const entry of logEntries) { - if (entry.type === 'assistant' && entry.message?.content) { + if (entry.type === "assistant" && entry.message?.content) { for (const content of entry.message.content) { - if (content.type === 'tool_use') { + if (content.type === "tool_use") { const toolName = content.name; const input = content.input || {}; // Skip internal tools - only show external commands and API calls - if (['Read', 'Write', 'Edit', 'MultiEdit', 'LS', 'Grep', 'Glob', 'TodoWrite'].includes(toolName)) { + if ( + [ + "Read", + "Write", + "Edit", + "MultiEdit", + "LS", + "Grep", + "Glob", + "TodoWrite", + ].includes(toolName) + ) { continue; // Skip internal file operations and searches } // Find the corresponding tool result to get status const toolResult = toolUsePairs.get(content.id); - let statusIcon = '❓'; + let statusIcon = "❓"; if (toolResult) { - statusIcon = toolResult.is_error === true ? '❌' : '✅'; + statusIcon = toolResult.is_error === true ? "❌" : "✅"; } // Add to command summary (only external tools) - if (toolName === 'Bash') { - const formattedCommand = formatBashCommand(input.command || ''); + if (toolName === "Bash") { + const formattedCommand = formatBashCommand(input.command || ""); commandSummary.push(`* ${statusIcon} \`${formattedCommand}\``); - } else if (toolName.startsWith('mcp__')) { + } else if (toolName.startsWith("mcp__")) { const mcpName = formatMcpName(toolName); commandSummary.push(`* ${statusIcon} \`${mcpName}(...)\``); } else { @@ -380,13 +391,19 @@ jobs: markdown += `${cmd}\n`; } } else { - markdown += 'No commands or tools used.\n'; + markdown += "No commands or tools used.\n"; } // Add Information section from the last entry with result metadata - markdown += '\n## 📊 Information\n\n'; + markdown += "\n## 📊 Information\n\n"; // Find the last entry with metadata const lastEntry = logEntries[logEntries.length - 1]; - if (lastEntry && (lastEntry.num_turns || lastEntry.duration_ms || lastEntry.total_cost_usd || lastEntry.usage)) { + if ( + lastEntry && + (lastEntry.num_turns || + lastEntry.duration_ms || + lastEntry.total_cost_usd || + lastEntry.usage) + ) { if (lastEntry.num_turns) { markdown += `**Turns:** ${lastEntry.num_turns}\n\n`; } @@ -403,29 +420,36 @@ jobs: const usage = lastEntry.usage; if (usage.input_tokens || usage.output_tokens) { markdown += `**Token Usage:**\n`; - if (usage.input_tokens) markdown += `- Input: ${usage.input_tokens.toLocaleString()}\n`; - if (usage.cache_creation_input_tokens) markdown += `- Cache Creation: ${usage.cache_creation_input_tokens.toLocaleString()}\n`; - if (usage.cache_read_input_tokens) markdown += `- Cache Read: ${usage.cache_read_input_tokens.toLocaleString()}\n`; - if (usage.output_tokens) markdown += `- Output: ${usage.output_tokens.toLocaleString()}\n`; - markdown += '\n'; + if (usage.input_tokens) + markdown += `- Input: ${usage.input_tokens.toLocaleString()}\n`; + if (usage.cache_creation_input_tokens) + markdown += `- Cache Creation: ${usage.cache_creation_input_tokens.toLocaleString()}\n`; + if (usage.cache_read_input_tokens) + markdown += `- Cache Read: ${usage.cache_read_input_tokens.toLocaleString()}\n`; + if (usage.output_tokens) + markdown += `- Output: ${usage.output_tokens.toLocaleString()}\n`; + markdown += "\n"; } } - if (lastEntry.permission_denials && lastEntry.permission_denials.length > 0) { + if ( + lastEntry.permission_denials && + lastEntry.permission_denials.length > 0 + ) { markdown += `**Permission Denials:** ${lastEntry.permission_denials.length}\n\n`; } } - markdown += '\n## 🤖 Reasoning\n\n'; + markdown += "\n## 🤖 Reasoning\n\n"; // Second pass: process assistant messages in sequence for (const entry of logEntries) { - if (entry.type === 'assistant' && entry.message?.content) { + if (entry.type === "assistant" && entry.message?.content) { for (const content of entry.message.content) { - if (content.type === 'text' && content.text) { + if (content.type === "text" && content.text) { // Add reasoning text directly (no header) const text = content.text.trim(); if (text && text.length > 0) { - markdown += text + '\n\n'; + markdown += text + "\n\n"; } - } else if (content.type === 'tool_use') { + } else if (content.type === "tool_use") { // Process tool use with its result const toolResult = toolUsePairs.get(content.id); const toolMarkdown = formatToolUse(content, toolResult); @@ -445,22 +469,22 @@ jobs: const toolName = toolUse.name; const input = toolUse.input || {}; // Skip TodoWrite except the very last one (we'll handle this separately) - if (toolName === 'TodoWrite') { - return ''; // Skip for now, would need global context to find the last one + if (toolName === "TodoWrite") { + return ""; // Skip for now, would need global context to find the last one } // Helper function to determine status icon function getStatusIcon() { if (toolResult) { - return toolResult.is_error === true ? '❌' : '✅'; + return toolResult.is_error === true ? "❌" : "✅"; } - return '❓'; // Unknown by default + return "❓"; // Unknown by default } - let markdown = ''; + let markdown = ""; const statusIcon = getStatusIcon(); switch (toolName) { - case 'Bash': - const command = input.command || ''; - const description = input.description || ''; + case "Bash": + const command = input.command || ""; + const description = input.description || ""; // Format the command to be single line const formattedCommand = formatBashCommand(command); if (description) { @@ -468,31 +492,40 @@ jobs: } markdown += `${statusIcon} \`${formattedCommand}\`\n\n`; break; - case 'Read': - const filePath = input.file_path || input.path || ''; - const relativePath = filePath.replace(/^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, ''); // Remove /home/runner/work/repo/repo/ prefix + case "Read": + const filePath = input.file_path || input.path || ""; + const relativePath = filePath.replace( + /^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, + "" + ); // Remove /home/runner/work/repo/repo/ prefix markdown += `${statusIcon} Read \`${relativePath}\`\n\n`; break; - case 'Write': - case 'Edit': - case 'MultiEdit': - const writeFilePath = input.file_path || input.path || ''; - const writeRelativePath = writeFilePath.replace(/^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, ''); + case "Write": + case "Edit": + case "MultiEdit": + const writeFilePath = input.file_path || input.path || ""; + const writeRelativePath = writeFilePath.replace( + /^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, + "" + ); markdown += `${statusIcon} Write \`${writeRelativePath}\`\n\n`; break; - case 'Grep': - case 'Glob': - const query = input.query || input.pattern || ''; + case "Grep": + case "Glob": + const query = input.query || input.pattern || ""; markdown += `${statusIcon} Search for \`${truncateString(query, 80)}\`\n\n`; break; - case 'LS': - const lsPath = input.path || ''; - const lsRelativePath = lsPath.replace(/^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, ''); + case "LS": + const lsPath = input.path || ""; + const lsRelativePath = lsPath.replace( + /^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, + "" + ); markdown += `${statusIcon} LS: ${lsRelativePath || lsPath}\n\n`; break; default: // Handle MCP calls and other tools - if (toolName.startsWith('mcp__')) { + if (toolName.startsWith("mcp__")) { const mcpName = formatMcpName(toolName); const params = formatMcpParameters(input); markdown += `${statusIcon} ${mcpName}(${params})\n\n`; @@ -501,8 +534,11 @@ jobs: const keys = Object.keys(input); if (keys.length > 0) { // Try to find the most important parameter - const mainParam = keys.find(k => ['query', 'command', 'path', 'file_path', 'content'].includes(k)) || keys[0]; - const value = String(input[mainParam] || ''); + const mainParam = + keys.find(k => + ["query", "command", "path", "file_path", "content"].includes(k) + ) || keys[0]; + const value = String(input[mainParam] || ""); if (value) { markdown += `${statusIcon} ${toolName}: ${truncateString(value, 100)}\n\n`; } else { @@ -517,11 +553,11 @@ jobs: } function formatMcpName(toolName) { // Convert mcp__github__search_issues to github::search_issues - if (toolName.startsWith('mcp__')) { - const parts = toolName.split('__'); + if (toolName.startsWith("mcp__")) { + const parts = toolName.split("__"); if (parts.length >= 3) { const provider = parts[1]; // github, etc. - const method = parts.slice(2).join('_'); // search_issues, etc. + const method = parts.slice(2).join("_"); // search_issues, etc. return `${provider}::${method}`; } } @@ -529,44 +565,50 @@ jobs: } function formatMcpParameters(input) { const keys = Object.keys(input); - if (keys.length === 0) return ''; + if (keys.length === 0) return ""; const paramStrs = []; - for (const key of keys.slice(0, 4)) { // Show up to 4 parameters - const value = String(input[key] || ''); + for (const key of keys.slice(0, 4)) { + // Show up to 4 parameters + const value = String(input[key] || ""); paramStrs.push(`${key}: ${truncateString(value, 40)}`); } if (keys.length > 4) { - paramStrs.push('...'); + paramStrs.push("..."); } - return paramStrs.join(', '); + return paramStrs.join(", "); } function formatBashCommand(command) { - if (!command) return ''; + if (!command) return ""; // Convert multi-line commands to single line by replacing newlines with spaces // and collapsing multiple spaces let formatted = command - .replace(/\n/g, ' ') // Replace newlines with spaces - .replace(/\r/g, ' ') // Replace carriage returns with spaces - .replace(/\t/g, ' ') // Replace tabs with spaces - .replace(/\s+/g, ' ') // Collapse multiple spaces into one - .trim(); // Remove leading/trailing whitespace + .replace(/\n/g, " ") // Replace newlines with spaces + .replace(/\r/g, " ") // Replace carriage returns with spaces + .replace(/\t/g, " ") // Replace tabs with spaces + .replace(/\s+/g, " ") // Collapse multiple spaces into one + .trim(); // Remove leading/trailing whitespace // Escape backticks to prevent markdown issues - formatted = formatted.replace(/`/g, '\\`'); + formatted = formatted.replace(/`/g, "\\`"); // Truncate if too long (keep reasonable length for summary) const maxLength = 80; if (formatted.length > maxLength) { - formatted = formatted.substring(0, maxLength) + '...'; + formatted = formatted.substring(0, maxLength) + "..."; } return formatted; } function truncateString(str, maxLength) { - if (!str) return ''; + if (!str) return ""; if (str.length <= maxLength) return str; - return str.substring(0, maxLength) + '...'; + return str.substring(0, maxLength) + "..."; } // Export for testing - if (typeof module !== 'undefined' && module.exports) { - module.exports = { parseClaudeLog, formatToolUse, formatBashCommand, truncateString }; + if (typeof module !== "undefined" && module.exports) { + module.exports = { + parseClaudeLog, + formatToolUse, + formatBashCommand, + truncateString, + }; } main(); - name: Upload agent logs diff --git a/.prettierrc.json b/.prettierrc.json new file mode 100644 index 0000000000..8571b4a9c5 --- /dev/null +++ b/.prettierrc.json @@ -0,0 +1,10 @@ +{ + "parser": "typescript", + "semi": true, + "singleQuote": false, + "tabWidth": 2, + "trailingComma": "es5", + "printWidth": 80, + "bracketSpacing": true, + "arrowParens": "avoid" +} \ No newline at end of file diff --git a/Makefile b/Makefile index aaa6e02c4b..2c25e84e07 100644 --- a/Makefile +++ b/Makefile @@ -99,6 +99,11 @@ validate-workflows: fmt: go fmt ./... +# Format JavaScript (.cjs) files +.PHONY: fmt-cjs +fmt-cjs: + npm run format:cjs + # Run TypeScript compiler on JavaScript files .PHONY: js js: @@ -113,9 +118,19 @@ fmt-check: exit 1; \ fi +# Check JavaScript (.cjs) file formatting +.PHONY: fmt-check-cjs +fmt-check-cjs: + npm run lint:cjs + +# Lint JavaScript (.cjs) files +.PHONY: lint-cjs +lint-cjs: fmt-check-cjs + @echo "✓ JavaScript formatting validated" + # Validate all project files .PHONY: lint -lint: fmt-check golint +lint: fmt-check lint-cjs golint @echo "✓ All validations passed" # Install the binary locally @@ -205,7 +220,7 @@ copy-copilot-to-claude: # Agent should run this task before finishing its turns .PHONY: agent-finish -agent-finish: deps-dev fmt lint js build test-all recompile +agent-finish: deps-dev fmt fmt-cjs lint js build test-all recompile @echo "Agent finished tasks successfully." # Help target @@ -222,7 +237,10 @@ help: @echo " deps - Install dependencies" @echo " lint - Run linter" @echo " fmt - Format code" + @echo " fmt-cjs - Format JavaScript (.cjs) files" @echo " fmt-check - Check code formatting" + @echo " fmt-check-cjs - Check JavaScript (.cjs) file formatting" + @echo " lint-cjs - Lint JavaScript (.cjs) files" @echo " validate-workflows - Validate compiled workflow lock files" @echo " validate - Run all validations (fmt-check, lint, validate-workflows)" @echo " install - Install binary locally" diff --git a/docs/safe-outputs.md b/docs/safe-outputs.md index 1e130f525a..1d514971ef 100644 --- a/docs/safe-outputs.md +++ b/docs/safe-outputs.md @@ -27,10 +27,11 @@ For example: ```yaml safe-outputs: create-issue: + create-discussion: add-issue-comment: ``` -This declares that the workflow should create at most one new issue and add at most one comment to the triggering issue or pull request based on the agentic workflow's output. To create multiple issues or comments, use the `max` parameter. +This declares that the workflow should create at most one new issue, at most one new discussion, and add at most one comment to the triggering issue or pull request based on the agentic workflow's output. To create multiple issues, discussions, or comments, use the `max` parameter. ## Available Output Types @@ -66,6 +67,40 @@ Create new issues with your findings. For each issue, provide a title starting w The compiled workflow will have additional prompting describing that, to create issues, it should write the issue details to a file. +### New Discussion Creation (`create-discussion:`) + +Adding discussion creation to the `safe-outputs:` section declares that the workflow should conclude with the creation of GitHub discussions based on the workflow's output. + +**Basic Configuration:** +```yaml +safe-outputs: + create-discussion: +``` + +**With Configuration:** +```yaml +safe-outputs: + create-discussion: + title-prefix: "[ai] " # Optional: prefix for discussion titles + category-id: "DIC_kwDOGFsHUM4BsUn3" # Optional: specific discussion category ID + max: 3 # Optional: maximum number of discussions (default: 1) +``` + +The agentic part of your workflow should describe the discussion(s) it wants created. + +**Example markdown to generate the output:** + +```yaml +# Research Discussion Agent + +Research the latest developments in AI and create discussions to share findings. +Create new discussions with your research findings. For each discussion, provide a title starting with "AI Research Update" and detailed summary of the findings. +``` + +The compiled workflow will have additional prompting describing that, to create discussions, it should write the discussion details to a file. + +**Note:** If no `category-id` is specified, the workflow will use the first available discussion category in the repository. + ### Issue Comment Creation (`add-issue-comment:`) Adding comment creation to the `safe-outputs:` section declares that the workflow should conclude with posting comments based on the workflow's output. By default, comments are posted on the triggering issue or pull request, but this can be configured using the `target` option. @@ -135,6 +170,54 @@ Analyze the latest commit and suggest improvements. 2. Create a pull request for your improvements, with a descriptive title and detailed description of the changes made ``` +### Pull Request Review Comment Creation (`create-pull-request-review-comment:`) + +Adding `create-pull-request-review-comment:` to the `safe-outputs:` section declares that the workflow should conclude with creating review comments on specific lines of code in the current pull request based on the workflow's output. + +**Basic Configuration:** +```yaml +safe-outputs: + create-pull-request-review-comment: +``` + +**With Configuration:** +```yaml +safe-outputs: + create-pull-request-review-comment: + max: 3 # Optional: maximum number of review comments (default: 1) + side: "RIGHT" # Optional: side of the diff ("LEFT" or "RIGHT", default: "RIGHT") +``` + +The agentic part of your workflow should describe the review comment(s) it wants created with specific file paths and line numbers. + +**Example natural language to generate the output:** + +```markdown +# Code Review Agent + +Analyze the pull request changes and provide line-specific feedback. +Create review comments on the pull request with your analysis findings. For each comment, specify: +- The file path +- The line number (required) +- The start line number (optional, for multi-line comments) +- The comment body with specific feedback + +Review comments can target single lines or ranges of lines in the diff. +``` + +The compiled workflow will have additional prompting describing that, to create review comments, it should write the comment details to a special file with the following structure: +- `path`: The file path relative to the repository root +- `line`: The line number where the comment should be placed +- `start_line`: (Optional) The starting line number for multi-line comments +- `side`: (Optional) The side of the diff ("LEFT" for old version, "RIGHT" for new version) +- `body`: The comment content + +**Key Features:** +- Only works in pull request contexts for security +- Supports both single-line and multi-line code comments +- Comments are automatically positioned on the correct side of the diff +- Maximum comment limits prevent spam + ### Label Addition (`add-issue-label:`) Adding `add-issue-label:` to the `safe-outputs:` section of your workflow declares that the workflow should conclude with adding labels to the current issue or pull request based on the coding agent's analysis. diff --git a/package-lock.json b/package-lock.json index 1d42712421..822941ba4b 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,5 +1,5 @@ { - "name": "gh-aw", + "name": "gh-aw-copilots", "lockfileVersion": 3, "requires": true, "packages": { @@ -16,6 +16,7 @@ "@types/node": "^24.3.0", "@vitest/coverage-v8": "^3.2.4", "@vitest/ui": "^3.2.4", + "prettier": "^3.4.2", "typescript": "^5.9.2", "vitest": "^3.2.4" } @@ -1999,6 +2000,22 @@ "node": "^10 || ^12 || >=14" } }, + "node_modules/prettier": { + "version": "3.6.2", + "resolved": "https://registry.npmjs.org/prettier/-/prettier-3.6.2.tgz", + "integrity": "sha512-I7AIg5boAr5R0FFtJ6rCfD+LFsWHp81dolrFD8S79U9tb8Az2nGrJncnMSnys+bpQJfRUzqs9hnA81OAA3hCuQ==", + "dev": true, + "license": "MIT", + "bin": { + "prettier": "bin/prettier.cjs" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/prettier/prettier?sponsor=1" + } + }, "node_modules/rollup": { "version": "4.50.0", "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.50.0.tgz", diff --git a/package.json b/package.json index 88cd3920fb..c688fbff32 100644 --- a/package.json +++ b/package.json @@ -8,6 +8,7 @@ "@types/node": "^24.3.0", "@vitest/coverage-v8": "^3.2.4", "@vitest/ui": "^3.2.4", + "prettier": "^3.4.2", "typescript": "^5.9.2", "vitest": "^3.2.4" }, @@ -16,7 +17,9 @@ "test": "npm run typecheck && vitest run", "test:js": "vitest run", "test:js-watch": "vitest", - "test:js-coverage": "vitest run --coverage" + "test:js-coverage": "vitest run --coverage", + "format:cjs": "prettier --write 'pkg/workflow/js/**/*.cjs'", + "lint:cjs": "prettier --check 'pkg/workflow/js/**/*.cjs'" }, "dependencies": { "vite": "^7.1.4" diff --git a/pkg/cli/templates/instructions.md b/pkg/cli/templates/instructions.md index 10605d6582..8774d40313 100644 --- a/pkg/cli/templates/instructions.md +++ b/pkg/cli/templates/instructions.md @@ -114,6 +114,14 @@ The YAML frontmatter supports these fields: draft: true # Optional: create as draft PR (defaults to true) ``` When using `output.create-pull-request`, the main job does **not** need `contents: write` or `pull-requests: write` permissions since PR creation is handled by a separate job with appropriate permissions. + - `create-pull-request-review-comment:` - Safe PR review comment creation on code lines + ```yaml + safe-outputs: + create-pull-request-review-comment: + max: 3 # Optional: maximum number of review comments (default: 1) + side: "RIGHT" # Optional: side of diff ("LEFT" or "RIGHT", default: "RIGHT") + ``` + When using `safe-outputs.create-pull-request-review-comment`, the main job does **not** need `pull-requests: write` permission since review comment creation is handled by a separate job with appropriate permissions. - `update-issue:` - Safe issue updates ```yaml safe-outputs: diff --git a/pkg/parser/json_path_locator.go b/pkg/parser/json_path_locator.go new file mode 100644 index 0000000000..9b8c29f0d0 --- /dev/null +++ b/pkg/parser/json_path_locator.go @@ -0,0 +1,189 @@ +package parser + +import ( + "regexp" + "strconv" + "strings" + + "github.com/santhosh-tekuri/jsonschema/v6" +) + +// JSONPathLocation represents a location in YAML source corresponding to a JSON path +type JSONPathLocation struct { + Line int + Column int + Found bool +} + +// ExtractJSONPathFromValidationError extracts JSON path information from jsonschema validation errors +func ExtractJSONPathFromValidationError(err error) []JSONPathInfo { + var paths []JSONPathInfo + + if validationError, ok := err.(*jsonschema.ValidationError); ok { + // Process each cause (individual validation error) + for _, cause := range validationError.Causes { + path := JSONPathInfo{ + Path: convertInstanceLocationToJSONPath(cause.InstanceLocation), + Message: cause.Error(), + Location: cause.InstanceLocation, + } + paths = append(paths, path) + } + } + + return paths +} + +// JSONPathInfo holds information about a validation error and its path +type JSONPathInfo struct { + Path string // JSON path like "/tools/1" or "/age" + Message string // Error message + Location []string // Instance location from jsonschema (e.g., ["tools", "1"]) +} + +// convertInstanceLocationToJSONPath converts jsonschema InstanceLocation to JSON path string +func convertInstanceLocationToJSONPath(location []string) string { + if len(location) == 0 { + return "" + } + + var parts []string + for _, part := range location { + parts = append(parts, "/"+part) + } + return strings.Join(parts, "") +} + +// LocateJSONPathInYAML finds the line/column position of a JSON path in YAML source +func LocateJSONPathInYAML(yamlContent string, jsonPath string) JSONPathLocation { + if jsonPath == "" { + // Root level error - return start of content + return JSONPathLocation{Line: 1, Column: 1, Found: true} + } + + // Parse the path segments + pathSegments := parseJSONPath(jsonPath) + if len(pathSegments) == 0 { + return JSONPathLocation{Line: 1, Column: 1, Found: true} + } + + // For now, use a simple line-by-line approach to find the path + // This is less precise than using the YAML parser's position info, + // but will work as a starting point + location := findPathInYAMLLines(yamlContent, pathSegments) + return location +} + +// findPathInYAMLLines finds a JSON path in YAML content using line-by-line analysis +func findPathInYAMLLines(yamlContent string, pathSegments []PathSegment) JSONPathLocation { + lines := strings.Split(yamlContent, "\n") + + // Start from the beginning + currentLevel := 0 + arrayContexts := make(map[int]int) // level -> current array index + + for lineNum, line := range lines { + lineNumber := lineNum + 1 // 1-based line numbers + trimmedLine := strings.TrimSpace(line) + + if trimmedLine == "" || strings.HasPrefix(trimmedLine, "#") { + continue + } + + // Calculate indentation level + lineLevel := (len(line) - len(strings.TrimLeft(line, " \t"))) / 2 + + // Check if this line matches our path + matches, column := matchesPathAtLevel(line, pathSegments, lineLevel, arrayContexts) + if matches { + return JSONPathLocation{Line: lineNumber, Column: column, Found: true} + } + + // Update array contexts for list items + if strings.HasPrefix(trimmedLine, "-") { + arrayContexts[lineLevel]++ + } else if lineLevel <= currentLevel { + // Reset array contexts for deeper levels when we move to a shallower level + for level := lineLevel + 1; level <= currentLevel; level++ { + delete(arrayContexts, level) + } + } + + currentLevel = lineLevel + } + + return JSONPathLocation{Line: 1, Column: 1, Found: false} +} + +// matchesPathAtLevel checks if a line matches the target path at the current level +func matchesPathAtLevel(line string, pathSegments []PathSegment, level int, arrayContexts map[int]int) (bool, int) { + if len(pathSegments) == 0 { + return false, 0 + } + + trimmedLine := strings.TrimSpace(line) + + // For now, implement a simple key matching approach + // This is a simplified version - in a full implementation we'd need to track + // the complete path context as we traverse the YAML + + if level < len(pathSegments) { + segment := pathSegments[level] + + if segment.Type == "key" { + // Look for "key:" pattern + keyPattern := regexp.MustCompile(`^` + regexp.QuoteMeta(segment.Value) + `\s*:`) + if keyPattern.MatchString(trimmedLine) { + // Found the key - return position after the colon + colonIndex := strings.Index(line, ":") + if colonIndex != -1 { + return level == len(pathSegments)-1, colonIndex + 2 + } + } + } else if segment.Type == "index" { + // For array elements, check if this is a list item at the right index + if strings.HasPrefix(trimmedLine, "-") { + currentIndex := arrayContexts[level] + if currentIndex == segment.Index { + return level == len(pathSegments)-1, strings.Index(line, "-") + 2 + } + } + } + } + + return false, 0 +} + +// parseJSONPath parses a JSON path string into segments +func parseJSONPath(path string) []PathSegment { + if path == "" || path == "/" { + return []PathSegment{} + } + + // Remove leading slash and split by slash + path = strings.TrimPrefix(path, "/") + parts := strings.Split(path, "/") + + var segments []PathSegment + for _, part := range parts { + if part == "" { + continue + } + + // Check if this is an array index + if index, err := strconv.Atoi(part); err == nil { + segments = append(segments, PathSegment{Type: "index", Value: part, Index: index}) + } else { + segments = append(segments, PathSegment{Type: "key", Value: part}) + } + } + + return segments +} + +// PathSegment represents a segment in a JSON path +type PathSegment struct { + Type string // "key" or "index" + Value string // The raw value + Index int // Parsed index for array elements +} diff --git a/pkg/parser/json_path_locator_test.go b/pkg/parser/json_path_locator_test.go new file mode 100644 index 0000000000..8a14e20ce1 --- /dev/null +++ b/pkg/parser/json_path_locator_test.go @@ -0,0 +1,249 @@ +package parser + +import ( + "encoding/json" + "testing" + + "github.com/santhosh-tekuri/jsonschema/v6" +) + +func TestLocateJSONPathInYAML(t *testing.T) { + yamlContent := `name: John Doe +age: 30 +tools: + - name: tool1 + version: "1.0" + - name: tool2 + description: "second tool" +permissions: + read: true + write: false` + + tests := []struct { + name string + jsonPath string + expectLine int + expectCol int + shouldFind bool + }{ + { + name: "root level", + jsonPath: "", + expectLine: 1, + expectCol: 1, + shouldFind: true, + }, + { + name: "simple key", + jsonPath: "/name", + expectLine: 1, + expectCol: 6, // After "name:" + shouldFind: true, + }, + { + name: "simple key - age", + jsonPath: "/age", + expectLine: 2, + expectCol: 5, // After "age:" + shouldFind: true, + }, + { + name: "array element", + jsonPath: "/tools/0", + expectLine: 4, + expectCol: 4, // Start of first array element + shouldFind: true, + }, + { + name: "nested in array element", + jsonPath: "/tools/1", + expectLine: 6, + expectCol: 4, // Start of second array element + shouldFind: true, + }, + { + name: "nested object key", + jsonPath: "/permissions/read", + expectLine: 9, + expectCol: 8, // After "read: " + shouldFind: true, + }, + { + name: "invalid path", + jsonPath: "/nonexistent", + expectLine: 1, + expectCol: 1, + shouldFind: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + location := LocateJSONPathInYAML(yamlContent, tt.jsonPath) + + if location.Found != tt.shouldFind { + t.Errorf("Expected Found=%v, got Found=%v", tt.shouldFind, location.Found) + } + + if location.Line != tt.expectLine { + t.Errorf("Expected Line=%d, got Line=%d", tt.expectLine, location.Line) + } + + if location.Column != tt.expectCol { + t.Errorf("Expected Column=%d, got Column=%d", tt.expectCol, location.Column) + } + }) + } +} + +func TestExtractJSONPathFromValidationError(t *testing.T) { + // Create a schema with validation errors + schemaJSON := `{ + "type": "object", + "properties": { + "name": {"type": "string"}, + "age": {"type": "number"}, + "tools": { + "type": "array", + "items": { + "type": "object", + "properties": { + "name": {"type": "string"} + }, + "required": ["name"] + } + } + }, + "additionalProperties": false + }` + + // Create invalid data + invalidData := map[string]any{ + "name": "John", + "age": "not-a-number", // Should be number + "invalid_key": "value", // Additional property not allowed + "tools": []any{ + map[string]any{ + "name": "tool1", + }, + map[string]any{ + // Missing required "name" field + "description": "tool without name", + }, + }, + } + + // Compile schema and validate + compiler := jsonschema.NewCompiler() + var schemaDoc any + json.Unmarshal([]byte(schemaJSON), &schemaDoc) + + schemaURL := "http://example.com/schema.json" + compiler.AddResource(schemaURL, schemaDoc) + schema, err := compiler.Compile(schemaURL) + if err != nil { + t.Fatalf("Schema compilation error: %v", err) + } + + err = schema.Validate(invalidData) + if err == nil { + t.Fatal("Expected validation error, got nil") + } + + // Extract JSON path information + paths := ExtractJSONPathFromValidationError(err) + + if len(paths) != 3 { + t.Errorf("Expected 3 validation errors, got %d", len(paths)) + } + + // Check that we have the expected paths + expectedPaths := map[string]bool{ + "/tools/1": false, + "/age": false, + "": false, // Root level for additional properties + } + + for _, pathInfo := range paths { + if _, exists := expectedPaths[pathInfo.Path]; exists { + expectedPaths[pathInfo.Path] = true + t.Logf("Found expected path: %s with message: %s", pathInfo.Path, pathInfo.Message) + } else { + t.Errorf("Unexpected path: %s", pathInfo.Path) + } + } + + // Verify all expected paths were found + for path, found := range expectedPaths { + if !found { + t.Errorf("Expected path not found: %s", path) + } + } +} + +func TestParseJSONPath(t *testing.T) { + tests := []struct { + name string + path string + expected []PathSegment + }{ + { + name: "empty path", + path: "", + expected: []PathSegment{}, + }, + { + name: "root path", + path: "/", + expected: []PathSegment{}, + }, + { + name: "simple key", + path: "/name", + expected: []PathSegment{ + {Type: "key", Value: "name"}, + }, + }, + { + name: "array index", + path: "/tools/0", + expected: []PathSegment{ + {Type: "key", Value: "tools"}, + {Type: "index", Value: "0", Index: 0}, + }, + }, + { + name: "complex path", + path: "/tools/1/permissions/read", + expected: []PathSegment{ + {Type: "key", Value: "tools"}, + {Type: "index", Value: "1", Index: 1}, + {Type: "key", Value: "permissions"}, + {Type: "key", Value: "read"}, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := parseJSONPath(tt.path) + + if len(result) != len(tt.expected) { + t.Errorf("Expected %d segments, got %d", len(tt.expected), len(result)) + return + } + + for i, expected := range tt.expected { + if result[i].Type != expected.Type { + t.Errorf("Segment %d: expected Type=%s, got Type=%s", i, expected.Type, result[i].Type) + } + if result[i].Value != expected.Value { + t.Errorf("Segment %d: expected Value=%s, got Value=%s", i, expected.Value, result[i].Value) + } + if expected.Type == "index" && result[i].Index != expected.Index { + t.Errorf("Segment %d: expected Index=%d, got Index=%d", i, expected.Index, result[i].Index) + } + } + }) + } +} diff --git a/pkg/parser/schema.go b/pkg/parser/schema.go index 13e09affe8..d10fe5e20e 100644 --- a/pkg/parser/schema.go +++ b/pkg/parser/schema.go @@ -116,7 +116,7 @@ func validateWithSchemaAndLocation(frontmatter map[string]any, schemaJSON, conte return nil } - // If there's an error, try to format it with location information + // If there's an error, try to format it with precise location information errorMsg := err.Error() // Check if this is a jsonschema validation error before cleaning @@ -129,6 +129,9 @@ func validateWithSchemaAndLocation(frontmatter map[string]any, schemaJSON, conte // Try to read the actual file content for better context var contextLines []string + var frontmatterContent string + var frontmatterStart = 2 // Default: frontmatter starts at line 2 + if filePath != "" { if content, readErr := os.ReadFile(filePath); readErr == nil { lines := strings.Split(string(content), "\n") @@ -142,6 +145,11 @@ func validateWithSchemaAndLocation(frontmatter map[string]any, schemaJSON, conte break } } + // Extract frontmatter content for path resolution + frontmatterLines := lines[1:endIdx] + frontmatterContent = strings.Join(frontmatterLines, "\n") + frontmatterStart = 2 // Frontmatter content starts at line 2 + // Use the frontmatter lines as context (first few lines) maxLines := min(5, endIdx) for i := 0; i < maxLines; i++ { @@ -158,13 +166,45 @@ func validateWithSchemaAndLocation(frontmatter map[string]any, schemaJSON, conte contextLines = []string{"---", "# (frontmatter validation failed)", "---"} } - // Try to extract useful information from the error + // Try to extract precise location information from the error if isJSONSchemaError { - // Create a compiler error with location information + // Extract JSON path information from the validation error + jsonPaths := ExtractJSONPathFromValidationError(err) + + // If we have paths and frontmatter content, try to get precise locations + if len(jsonPaths) > 0 && frontmatterContent != "" { + // Use the first error path for the primary error location + primaryPath := jsonPaths[0] + location := LocateJSONPathInYAML(frontmatterContent, primaryPath.Path) + + if location.Found { + // Adjust line number to account for frontmatter position in file + adjustedLine := location.Line + frontmatterStart - 1 + + // Create a compiler error with precise location information + compilerErr := console.CompilerError{ + Position: console.ErrorPosition{ + File: filePath, + Line: adjustedLine, + Column: location.Column, + }, + Type: "error", + Message: primaryPath.Message, + Context: contextLines, + Hint: "Check the YAML frontmatter against the schema requirements", + } + + // Format and return the error + formattedErr := console.FormatError(compilerErr) + return errors.New(formattedErr) + } + } + + // Fallback: Create a compiler error with basic location information compilerErr := console.CompilerError{ Position: console.ErrorPosition{ File: filePath, - Line: 1, + Line: frontmatterStart, Column: 1, }, Type: "error", diff --git a/pkg/parser/schema_location_integration_test.go b/pkg/parser/schema_location_integration_test.go new file mode 100644 index 0000000000..e96b4c2b0f --- /dev/null +++ b/pkg/parser/schema_location_integration_test.go @@ -0,0 +1,147 @@ +package parser + +import ( + "os" + "strings" + "testing" +) + +func TestValidateWithSchemaAndLocation_PreciseLocation(t *testing.T) { + // Create a test file with invalid frontmatter + testContent := `--- +on: push +permissions: read +age: "not-a-number" +invalid_property: value +tools: + - name: tool1 + - description: missing name +timeout_minutes: 30 +--- + +# Test workflow content` + + tempFile := "/tmp/test_precise_location.md" + err := os.WriteFile(tempFile, []byte(testContent), 0644) + if err != nil { + t.Fatalf("Failed to create temp file: %v", err) + } + defer os.Remove(tempFile) + + // Create frontmatter that will trigger validation errors + frontmatter := map[string]any{ + "on": "push", + "permissions": "read", + "age": "not-a-number", // Should trigger error if age field exists in schema + "invalid_property": "value", // Should trigger additional properties error + "tools": []any{ + map[string]any{"name": "tool1"}, + map[string]any{"description": "missing name"}, // Should trigger missing name error + }, + "timeout_minutes": 30, + } + + // Test with main workflow schema + err = ValidateMainWorkflowFrontmatterWithSchemaAndLocation(frontmatter, tempFile) + + // We expect a validation error + if err == nil { + t.Log("No validation error - this might be expected if the schema doesn't validate these fields") + return + } + + errorMsg := err.Error() + t.Logf("Error message: %s", errorMsg) + + // Check that the error contains file path information + if !strings.Contains(errorMsg, tempFile) { + t.Errorf("Error message should contain file path, got: %s", errorMsg) + } + + // Check that the error contains line/column information in VS Code parseable format + // Should have format like "file.md:line:column: error: message" + if !strings.Contains(errorMsg, ":") { + t.Errorf("Error message should contain line:column information, got: %s", errorMsg) + } + + // The error should not contain raw jsonschema prefixes + if strings.Contains(errorMsg, "jsonschema validation failed") { + t.Errorf("Error message should not contain raw jsonschema prefix, got: %s", errorMsg) + } + + // Should contain cleaned error information + lines := strings.Split(errorMsg, "\n") + if len(lines) < 2 { + t.Errorf("Error message should be multi-line with context, got: %s", errorMsg) + } +} + +func TestLocateJSONPathInYAML_RealExample(t *testing.T) { + // Test with a real frontmatter example + yamlContent := `on: push +permissions: read +engine: claude +tools: + - name: github + description: GitHub tool + - name: filesystem + description: File operations +timeout_minutes: 30` + + tests := []struct { + name string + jsonPath string + wantLine int + wantCol int + }{ + { + name: "root permission", + jsonPath: "/permissions", + wantLine: 2, + wantCol: 14, // After "permissions: " + }, + { + name: "engine field", + jsonPath: "/engine", + wantLine: 3, + wantCol: 9, // After "engine: " + }, + { + name: "first tool", + jsonPath: "/tools/0", + wantLine: 5, + wantCol: 4, // At "- name: github" + }, + { + name: "second tool", + jsonPath: "/tools/1", + wantLine: 7, + wantCol: 4, // At "- name: filesystem" + }, + { + name: "timeout", + jsonPath: "/timeout_minutes", + wantLine: 9, + wantCol: 18, // After "timeout_minutes: " + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + location := LocateJSONPathInYAML(yamlContent, tt.jsonPath) + + if !location.Found { + t.Errorf("Path %s should be found", tt.jsonPath) + } + + // For this test, we mainly care that we get reasonable line numbers + // The exact column positions might vary based on implementation + if location.Line != tt.wantLine { + t.Errorf("Path %s: expected line %d, got line %d", tt.jsonPath, tt.wantLine, location.Line) + } + + // Log the actual results for reference + t.Logf("Path %s: Line=%d, Column=%d", tt.jsonPath, location.Line, location.Column) + }) + } +} diff --git a/pkg/parser/schema_location_test.go b/pkg/parser/schema_location_test.go index d6db6194bb..b26c08d8ca 100644 --- a/pkg/parser/schema_location_test.go +++ b/pkg/parser/schema_location_test.go @@ -49,7 +49,7 @@ func TestValidateWithSchemaAndLocation(t *testing.T) { filePath: "/test/file.md", wantErr: true, errContains: []string{ - "/test/file.md:1:1:", + "/test/file.md:2:1:", "additional properties 'invalid' not allowed", "hint:", }, @@ -173,7 +173,7 @@ func TestValidateMainWorkflowFrontmatterWithSchemaAndLocation(t *testing.T) { }, filePath: "/test/workflow.md", wantErr: true, - errContains: "/test/workflow.md:1:1:", + errContains: "/test/workflow.md:2:1:", }, } @@ -219,7 +219,7 @@ func TestValidateMainWorkflowFrontmatterWithSchemaAndLocation_AdditionalProperti }, filePath: "/test/workflow.md", wantErr: true, - errContains: "/test/workflow.md:1:1:", + errContains: "/test/workflow.md:2:1:", }, { name: "invalid trigger with additional property shows location", @@ -233,7 +233,7 @@ func TestValidateMainWorkflowFrontmatterWithSchemaAndLocation_AdditionalProperti }, filePath: "/test/workflow.md", wantErr: true, - errContains: "/test/workflow.md:1:1:", + errContains: "/test/workflow.md:2:1:", }, { name: "invalid tools configuration with additional property shows location", @@ -247,7 +247,7 @@ func TestValidateMainWorkflowFrontmatterWithSchemaAndLocation_AdditionalProperti }, filePath: "/test/workflow.md", wantErr: true, - errContains: "/test/workflow.md:1:1:", + errContains: "/test/workflow.md:2:1:", }, } diff --git a/pkg/parser/schemas/main_workflow_schema.json b/pkg/parser/schemas/main_workflow_schema.json index b873cd571a..7a3dc06072 100644 --- a/pkg/parser/schemas/main_workflow_schema.json +++ b/pkg/parser/schemas/main_workflow_schema.json @@ -1017,6 +1017,35 @@ } ] }, + "create-discussion": { + "oneOf": [ + { + "type": "object", + "description": "Configuration for creating GitHub discussions from agentic workflow output", + "properties": { + "title-prefix": { + "type": "string", + "description": "Optional prefix for the discussion title" + }, + "category-id": { + "type": "string", + "description": "Optional discussion category ID. If not specified, uses the first available category" + }, + "max": { + "type": "integer", + "description": "Maximum number of discussions to create (default: 1)", + "minimum": 1, + "maximum": 100 + } + }, + "additionalProperties": false + }, + { + "type": "null", + "description": "Enable discussion creation with default configuration" + } + ] + }, "add-issue-comment": { "oneOf": [ { @@ -1072,6 +1101,32 @@ } ] }, + "create-pull-request-review-comment": { + "oneOf": [ + { + "type": "object", + "description": "Configuration for creating GitHub pull request review comments from agentic workflow output", + "properties": { + "max": { + "type": "integer", + "description": "Maximum number of review comments to create (default: 1)", + "minimum": 1, + "maximum": 100 + }, + "side": { + "type": "string", + "description": "Side of the diff for comments: 'LEFT' or 'RIGHT' (default: 'RIGHT')", + "enum": ["LEFT", "RIGHT"] + } + }, + "additionalProperties": false + }, + { + "type": "null", + "description": "Enable PR review comment creation with default configuration" + } + ] + }, "add-issue-label": { "oneOf": [ { diff --git a/pkg/workflow/codex_engine.go b/pkg/workflow/codex_engine.go index 9749d0b206..2e6b4d0190 100644 --- a/pkg/workflow/codex_engine.go +++ b/pkg/workflow/codex_engine.go @@ -53,13 +53,14 @@ func (e *CodexEngine) GetExecutionConfig(workflowName string, logFile string, en model = engineConfig.Model } - command := fmt.Sprintf(`INSTRUCTION=$(cat /tmp/aw-prompts/prompt.txt) + command := fmt.Sprintf(`set -o pipefail +INSTRUCTION=$(cat /tmp/aw-prompts/prompt.txt) export CODEX_HOME=/tmp/mcp-config # Create log directory outside git repo mkdir -p /tmp/aw-logs -# Run codex with log capture +# Run codex with log capture - pipefail ensures codex exit code is preserved codex exec \ -c model=%s \ --full-auto "$INSTRUCTION" 2>&1 | tee %s`, model, logFile) diff --git a/pkg/workflow/codex_engine_test.go b/pkg/workflow/codex_engine_test.go index 6ca93fefc9..3be17cb619 100644 --- a/pkg/workflow/codex_engine_test.go +++ b/pkg/workflow/codex_engine_test.go @@ -64,6 +64,11 @@ func TestCodexEngine(t *testing.T) { t.Errorf("Expected command to contain log file name, got '%s'", config.Command) } + // Check that pipefail is enabled to preserve exit codes + if !strings.Contains(config.Command, "set -o pipefail") { + t.Errorf("Expected command to contain 'set -o pipefail' to preserve exit codes, got '%s'", config.Command) + } + // Check environment variables if config.Environment["OPENAI_API_KEY"] != "${{ secrets.OPENAI_API_KEY }}" { t.Errorf("Expected OPENAI_API_KEY environment variable, got '%s'", config.Environment["OPENAI_API_KEY"]) diff --git a/pkg/workflow/compiler.go b/pkg/workflow/compiler.go index 57216d4889..42d4168219 100644 --- a/pkg/workflow/compiler.go +++ b/pkg/workflow/compiler.go @@ -144,13 +144,15 @@ type WorkflowData struct { // SafeOutputsConfig holds configuration for automatic output routes type SafeOutputsConfig struct { - CreateIssues *CreateIssuesConfig `yaml:"create-issue,omitempty"` - AddIssueComments *AddIssueCommentsConfig `yaml:"add-issue-comment,omitempty"` - CreatePullRequests *CreatePullRequestsConfig `yaml:"create-pull-request,omitempty"` - AddIssueLabels *AddIssueLabelsConfig `yaml:"add-issue-label,omitempty"` - UpdateIssues *UpdateIssuesConfig `yaml:"update-issue,omitempty"` - PushToBranch *PushToBranchConfig `yaml:"push-to-branch,omitempty"` - AllowedDomains []string `yaml:"allowed-domains,omitempty"` + CreateIssues *CreateIssuesConfig `yaml:"create-issue,omitempty"` + CreateDiscussions *CreateDiscussionsConfig `yaml:"create-discussion,omitempty"` + AddIssueComments *AddIssueCommentsConfig `yaml:"add-issue-comment,omitempty"` + CreatePullRequests *CreatePullRequestsConfig `yaml:"create-pull-request,omitempty"` + CreatePullRequestReviewComments *CreatePullRequestReviewCommentsConfig `yaml:"create-pull-request-review-comment,omitempty"` + AddIssueLabels *AddIssueLabelsConfig `yaml:"add-issue-label,omitempty"` + UpdateIssues *UpdateIssuesConfig `yaml:"update-issue,omitempty"` + PushToBranch *PushToBranchConfig `yaml:"push-to-branch,omitempty"` + AllowedDomains []string `yaml:"allowed-domains,omitempty"` } // CreateIssuesConfig holds configuration for creating GitHub issues from agent output @@ -160,6 +162,13 @@ type CreateIssuesConfig struct { Max int `yaml:"max,omitempty"` // Maximum number of issues to create } +// CreateDiscussionsConfig holds configuration for creating GitHub discussions from agent output +type CreateDiscussionsConfig struct { + TitlePrefix string `yaml:"title-prefix,omitempty"` + CategoryId string `yaml:"category-id,omitempty"` // Discussion category ID + Max int `yaml:"max,omitempty"` // Maximum number of discussions to create +} + // AddIssueCommentConfig holds configuration for creating GitHub issue/PR comments from agent output (deprecated, use AddIssueCommentsConfig) type AddIssueCommentConfig struct { // Empty struct for now, as per requirements, but structured for future expansion @@ -179,6 +188,12 @@ type CreatePullRequestsConfig struct { Max int `yaml:"max,omitempty"` // Maximum number of pull requests to create } +// CreatePullRequestReviewCommentsConfig holds configuration for creating GitHub pull request review comments from agent output +type CreatePullRequestReviewCommentsConfig struct { + Max int `yaml:"max,omitempty"` // Maximum number of review comments to create (default: 1) + Side string `yaml:"side,omitempty"` // Side of the diff: "LEFT" or "RIGHT" (default: "RIGHT") +} + // AddIssueLabelsConfig holds configuration for adding labels to issues/PRs from agent output type AddIssueLabelsConfig struct { Allowed []string `yaml:"allowed,omitempty"` // Optional list of allowed labels. If omitted, any labels are allowed (including creating new ones). @@ -1708,6 +1723,17 @@ func (c *Compiler) buildJobs(data *WorkflowData) error { } } + // Build create_discussion job if output.create_discussion is configured + if data.SafeOutputs.CreateDiscussions != nil { + createDiscussionJob, err := c.buildCreateOutputDiscussionJob(data, jobName) + if err != nil { + return fmt.Errorf("failed to build create_discussion job: %w", err) + } + if err := c.jobManager.AddJob(createDiscussionJob); err != nil { + return fmt.Errorf("failed to add create_discussion job: %w", err) + } + } + // Build create_issue_comment job if output.add-issue-comment is configured if data.SafeOutputs.AddIssueComments != nil { createCommentJob, err := c.buildCreateOutputAddIssueCommentJob(data, jobName) @@ -1719,6 +1745,17 @@ func (c *Compiler) buildJobs(data *WorkflowData) error { } } + // Build create_pr_review_comment job if output.create-pull-request-review-comment is configured + if data.SafeOutputs.CreatePullRequestReviewComments != nil { + createPRReviewCommentJob, err := c.buildCreateOutputPullRequestReviewCommentJob(data, jobName) + if err != nil { + return fmt.Errorf("failed to build create_pr_review_comment job: %w", err) + } + if err := c.jobManager.AddJob(createPRReviewCommentJob); err != nil { + return fmt.Errorf("failed to add create_pr_review_comment job: %w", err) + } + } + // Build create_pull_request job if output.create-pull-request is configured if data.SafeOutputs.CreatePullRequests != nil { createPullRequestJob, err := c.buildCreateOutputPullRequestJob(data, jobName) @@ -1953,6 +1990,65 @@ func (c *Compiler) buildCreateOutputIssueJob(data *WorkflowData, mainJobName str return job, nil } +// buildCreateOutputDiscussionJob creates the create_discussion job +func (c *Compiler) buildCreateOutputDiscussionJob(data *WorkflowData, mainJobName string) (*Job, error) { + if data.SafeOutputs == nil || data.SafeOutputs.CreateDiscussions == nil { + return nil, fmt.Errorf("safe-outputs.create-discussion configuration is required") + } + + var steps []string + steps = append(steps, " - name: Create Output Discussion\n") + steps = append(steps, " id: create_discussion\n") + steps = append(steps, " uses: actions/github-script@v7\n") + + // Add environment variables + steps = append(steps, " env:\n") + // Pass the agent output content from the main job + steps = append(steps, fmt.Sprintf(" GITHUB_AW_AGENT_OUTPUT: ${{ needs.%s.outputs.output }}\n", mainJobName)) + if data.SafeOutputs.CreateDiscussions.TitlePrefix != "" { + steps = append(steps, fmt.Sprintf(" GITHUB_AW_DISCUSSION_TITLE_PREFIX: %q\n", data.SafeOutputs.CreateDiscussions.TitlePrefix)) + } + if data.SafeOutputs.CreateDiscussions.CategoryId != "" { + steps = append(steps, fmt.Sprintf(" GITHUB_AW_DISCUSSION_CATEGORY_ID: %q\n", data.SafeOutputs.CreateDiscussions.CategoryId)) + } + + steps = append(steps, " with:\n") + steps = append(steps, " script: |\n") + + // Add each line of the script with proper indentation + formattedScript := FormatJavaScriptForYAML(createDiscussionScript) + steps = append(steps, formattedScript...) + + outputs := map[string]string{ + "discussion_number": "${{ steps.create_discussion.outputs.discussion_number }}", + "discussion_url": "${{ steps.create_discussion.outputs.discussion_url }}", + } + + // Determine the job condition based on command configuration + var jobCondition string + if data.Command != "" { + // Build the command trigger condition + commandCondition := buildCommandOnlyCondition(data.Command) + commandConditionStr := commandCondition.Render() + jobCondition = fmt.Sprintf("if: %s", commandConditionStr) + } else { + jobCondition = "" // No conditional execution + } + + job := &Job{ + Name: "create_discussion", + If: jobCondition, + RunsOn: "runs-on: ubuntu-latest", + Permissions: "permissions:\n contents: read\n discussions: write", + TimeoutMinutes: 10, // 10-minute timeout as required + Steps: steps, + Outputs: outputs, + Depends: []string{mainJobName}, // Depend on the main workflow job + } + + return job, nil +} + // buildCreateOutputAddIssueCommentJob creates the create_issue_comment job func (c *Compiler) buildCreateOutputAddIssueCommentJob(data *WorkflowData, mainJobName string) (*Job, error) { if data.SafeOutputs == nil || data.SafeOutputs.AddIssueComments == nil { @@ -2030,6 +2126,70 @@ func (c *Compiler) buildCreateOutputAddIssueCommentJob(data *WorkflowData, mainJ return job, nil } +// buildCreateOutputPullRequestReviewCommentJob creates the create_pr_review_comment job +func (c *Compiler) buildCreateOutputPullRequestReviewCommentJob(data *WorkflowData, mainJobName string) (*Job, error) { + if data.SafeOutputs == nil || data.SafeOutputs.CreatePullRequestReviewComments == nil { + return nil, fmt.Errorf("safe-outputs.create-pull-request-review-comment configuration is required") + } + + var steps []string + steps = append(steps, " - name: Create PR Review Comment\n") + steps = append(steps, " id: create_pr_review_comment\n") + steps = append(steps, " uses: actions/github-script@v7\n") + + // Add environment variables + steps = append(steps, " env:\n") + // Pass the agent output content from the main job + steps = append(steps, fmt.Sprintf(" GITHUB_AW_AGENT_OUTPUT: ${{ needs.%s.outputs.output }}\n", mainJobName)) + // Pass the side configuration + if data.SafeOutputs.CreatePullRequestReviewComments.Side != "" { + steps = append(steps, fmt.Sprintf(" GITHUB_AW_PR_REVIEW_COMMENT_SIDE: %q\n", data.SafeOutputs.CreatePullRequestReviewComments.Side)) + } + + steps = append(steps, " with:\n") + steps = append(steps, " script: |\n") + + // Add each line of the script with proper indentation + formattedScript := FormatJavaScriptForYAML(createPRReviewCommentScript) + steps = append(steps, formattedScript...) + + // Create outputs for the job + outputs := map[string]string{ + "review_comment_id": "${{ steps.create_pr_review_comment.outputs.review_comment_id }}", + "review_comment_url": "${{ steps.create_pr_review_comment.outputs.review_comment_url }}", + } + + // Only run in pull request context + baseCondition := "github.event.pull_request.number" + + // If this is a command workflow, combine the command trigger condition with the base condition + var jobCondition string + if data.Command != "" { + // Build the command trigger condition + commandCondition := buildCommandOnlyCondition(data.Command) + commandConditionStr := commandCondition.Render() + + // Combine command condition with base condition using AND + jobCondition = fmt.Sprintf("if: (%s) && (%s)", commandConditionStr, baseCondition) + } else { + // No command trigger, just use the base condition + jobCondition = fmt.Sprintf("if: %s", baseCondition) + } + + job := &Job{ + Name: "create_pr_review_comment", + If: jobCondition, + RunsOn: "runs-on: ubuntu-latest", + Permissions: "permissions:\n contents: read\n pull-requests: write", + TimeoutMinutes: 10, // 10-minute timeout as required + Steps: steps, + Outputs: outputs, + Depends: []string{mainJobName}, // Depend on the main workflow job + } + + return job, nil +} + // buildCreateOutputPullRequestJob creates the create_pull_request job func (c *Compiler) buildCreateOutputPullRequestJob(data *WorkflowData, mainJobName string) (*Job, error) { if data.SafeOutputs == nil || data.SafeOutputs.CreatePullRequests == nil { @@ -2800,6 +2960,12 @@ func (c *Compiler) extractSafeOutputsConfig(frontmatter map[string]any) *SafeOut config.CreateIssues = issuesConfig } + // Handle create-discussion + discussionsConfig := c.parseDiscussionsConfig(outputMap) + if discussionsConfig != nil { + config.CreateDiscussions = discussionsConfig + } + // Handle add-issue-comment commentsConfig := c.parseCommentsConfig(outputMap) if commentsConfig != nil { @@ -2812,6 +2978,12 @@ func (c *Compiler) extractSafeOutputsConfig(frontmatter map[string]any) *SafeOut config.CreatePullRequests = pullRequestsConfig } + // Handle create-pull-request-review-comment + prReviewCommentsConfig := c.parsePullRequestReviewCommentsConfig(outputMap) + if prReviewCommentsConfig != nil { + config.CreatePullRequestReviewComments = prReviewCommentsConfig + } + // Parse allowed-domains configuration if allowedDomains, exists := outputMap["allowed-domains"]; exists { if domainsArray, ok := allowedDomains.([]any); ok { @@ -2932,6 +3104,40 @@ func (c *Compiler) parseIssuesConfig(outputMap map[string]any) *CreateIssuesConf return nil } +// parseDiscussionsConfig handles create-discussion configuration +func (c *Compiler) parseDiscussionsConfig(outputMap map[string]any) *CreateDiscussionsConfig { + if configData, exists := outputMap["create-discussion"]; exists { + discussionsConfig := &CreateDiscussionsConfig{Max: 1} // Default max is 1 + + if configMap, ok := configData.(map[string]any); ok { + // Parse title-prefix + if titlePrefix, exists := configMap["title-prefix"]; exists { + if titlePrefixStr, ok := titlePrefix.(string); ok { + discussionsConfig.TitlePrefix = titlePrefixStr + } + } + + // Parse category-id + if categoryId, exists := configMap["category-id"]; exists { + if categoryIdStr, ok := categoryId.(string); ok { + discussionsConfig.CategoryId = categoryIdStr + } + } + + // Parse max + if max, exists := configMap["max"]; exists { + if maxInt, ok := c.parseIntValue(max); ok { + discussionsConfig.Max = maxInt + } + } + } + + return discussionsConfig + } + + return nil +} + // parseCommentsConfig handles add-issue-comment configuration func (c *Compiler) parseCommentsConfig(outputMap map[string]any) *AddIssueCommentsConfig { if configData, exists := outputMap["add-issue-comment"]; exists { @@ -3004,6 +3210,37 @@ func (c *Compiler) parsePullRequestsConfig(outputMap map[string]any) *CreatePull return pullRequestsConfig } +// parsePullRequestReviewCommentsConfig handles create-pull-request-review-comment configuration +func (c *Compiler) parsePullRequestReviewCommentsConfig(outputMap map[string]any) *CreatePullRequestReviewCommentsConfig { + if _, exists := outputMap["create-pull-request-review-comment"]; !exists { + return nil + } + + configData := outputMap["create-pull-request-review-comment"] + prReviewCommentsConfig := &CreatePullRequestReviewCommentsConfig{Max: 10, Side: "RIGHT"} // Default max is 10, side is RIGHT + + if configMap, ok := configData.(map[string]any); ok { + // Parse max + if max, exists := configMap["max"]; exists { + if maxInt, ok := c.parseIntValue(max); ok { + prReviewCommentsConfig.Max = maxInt + } + } + + // Parse side + if side, exists := configMap["side"]; exists { + if sideStr, ok := side.(string); ok { + // Validate side value + if sideStr == "LEFT" || sideStr == "RIGHT" { + prReviewCommentsConfig.Side = sideStr + } + } + } + } + + return prReviewCommentsConfig +} + // parseIntValue safely parses various numeric types to int func (c *Compiler) parseIntValue(value any) (int, bool) { switch v := value.(type) { diff --git a/pkg/workflow/compiler_test.go b/pkg/workflow/compiler_test.go index db19b6dd9a..8aee28a231 100644 --- a/pkg/workflow/compiler_test.go +++ b/pkg/workflow/compiler_test.go @@ -4274,8 +4274,8 @@ engine: claude # Test Workflow Invalid YAML with non-boolean value for permissions.`, - expectedErrorLine: 1, - expectedErrorColumn: 1, + expectedErrorLine: 3, // The permissions field is on line 3 + expectedErrorColumn: 13, // After "permissions:" expectedMessagePart: "value must be one of 'read', 'write', 'none'", // Schema validation catches this description: "invalid boolean values should trigger schema validation error", }, @@ -4336,8 +4336,8 @@ engine: claude # Test Workflow Invalid YAML with invalid number format.`, - expectedErrorLine: 1, - expectedErrorColumn: 1, + expectedErrorLine: 3, // The timeout_minutes field is on line 3 + expectedErrorColumn: 17, // After "timeout_minutes: " expectedMessagePart: "got number, want integer", // Schema validation catches this description: "invalid number format should trigger schema validation error", }, @@ -4389,7 +4389,7 @@ engine: claude # Test Workflow YAML error that demonstrates column position handling.`, - expectedErrorLine: 1, + expectedErrorLine: 2, // The message field is on line 2 of the frontmatter (line 3 of file) expectedErrorColumn: 1, // Schema validation error expectedMessagePart: "additional properties 'message' not allowed", description: "yaml error should be extracted with column information when available", diff --git a/pkg/workflow/js.go b/pkg/workflow/js.go index 8ab49bfbcc..bbb7aef4c5 100644 --- a/pkg/workflow/js.go +++ b/pkg/workflow/js.go @@ -12,9 +12,15 @@ var createPullRequestScript string //go:embed js/create_issue.cjs var createIssueScript string +//go:embed js/create_discussion.cjs +var createDiscussionScript string + //go:embed js/create_comment.cjs var createCommentScript string +//go:embed js/create_pr_review_comment.cjs +var createPRReviewCommentScript string + //go:embed js/compute_text.cjs var computeTextScript string diff --git a/pkg/workflow/js/add_labels.cjs b/pkg/workflow/js/add_labels.cjs index 45cb7fc4dc..1d7dab894c 100644 --- a/pkg/workflow/js/add_labels.cjs +++ b/pkg/workflow/js/add_labels.cjs @@ -2,73 +2,91 @@ async function main() { // Read the validated output content from environment variable const outputContent = process.env.GITHUB_AW_AGENT_OUTPUT; if (!outputContent) { - console.log('No GITHUB_AW_AGENT_OUTPUT environment variable found'); + console.log("No GITHUB_AW_AGENT_OUTPUT environment variable found"); return; } - if (outputContent.trim() === '') { - console.log('Agent output content is empty'); + if (outputContent.trim() === "") { + console.log("Agent output content is empty"); return; } - console.log('Agent output content length:', outputContent.length); + console.log("Agent output content length:", outputContent.length); // Parse the validated output JSON let validatedOutput; try { validatedOutput = JSON.parse(outputContent); } catch (error) { - console.log('Error parsing agent output JSON:', error instanceof Error ? error.message : String(error)); + console.log( + "Error parsing agent output JSON:", + error instanceof Error ? error.message : String(error) + ); return; } if (!validatedOutput.items || !Array.isArray(validatedOutput.items)) { - console.log('No valid items found in agent output'); + console.log("No valid items found in agent output"); return; } // Find the add-issue-label item - const labelsItem = validatedOutput.items.find(/** @param {any} item */ item => item.type === 'add-issue-label'); + const labelsItem = validatedOutput.items.find( + /** @param {any} item */ item => item.type === "add-issue-label" + ); if (!labelsItem) { - console.log('No add-issue-label item found in agent output'); + console.log("No add-issue-label item found in agent output"); return; } - console.log('Found add-issue-label item:', { labelsCount: labelsItem.labels.length }); + console.log("Found add-issue-label item:", { + labelsCount: labelsItem.labels.length, + }); // Read the allowed labels from environment variable (optional) const allowedLabelsEnv = process.env.GITHUB_AW_LABELS_ALLOWED; let allowedLabels = null; - - if (allowedLabelsEnv && allowedLabelsEnv.trim() !== '') { - allowedLabels = allowedLabelsEnv.split(',').map(label => label.trim()).filter(label => label); + + if (allowedLabelsEnv && allowedLabelsEnv.trim() !== "") { + allowedLabels = allowedLabelsEnv + .split(",") + .map(label => label.trim()) + .filter(label => label); if (allowedLabels.length === 0) { allowedLabels = null; // Treat empty list as no restrictions } } if (allowedLabels) { - console.log('Allowed labels:', allowedLabels); + console.log("Allowed labels:", allowedLabels); } else { - console.log('No label restrictions - any labels are allowed'); + console.log("No label restrictions - any labels are allowed"); } // Read the max limit from environment variable (default: 3) const maxCountEnv = process.env.GITHUB_AW_LABELS_MAX_COUNT; const maxCount = maxCountEnv ? parseInt(maxCountEnv, 10) : 3; if (isNaN(maxCount) || maxCount < 1) { - core.setFailed(`Invalid max value: ${maxCountEnv}. Must be a positive integer`); + core.setFailed( + `Invalid max value: ${maxCountEnv}. Must be a positive integer` + ); return; } - console.log('Max count:', maxCount); + console.log("Max count:", maxCount); // Check if we're in an issue or pull request context - const isIssueContext = context.eventName === 'issues' || context.eventName === 'issue_comment'; - const isPRContext = context.eventName === 'pull_request' || context.eventName === 'pull_request_review' || context.eventName === 'pull_request_review_comment'; + const isIssueContext = + context.eventName === "issues" || context.eventName === "issue_comment"; + const isPRContext = + context.eventName === "pull_request" || + context.eventName === "pull_request_review" || + context.eventName === "pull_request_review_comment"; if (!isIssueContext && !isPRContext) { - core.setFailed('Not running in issue or pull request context, skipping label addition'); + core.setFailed( + "Not running in issue or pull request context, skipping label addition" + ); return; } @@ -79,34 +97,38 @@ async function main() { if (isIssueContext) { if (context.payload.issue) { issueNumber = context.payload.issue.number; - contextType = 'issue'; + contextType = "issue"; } else { - core.setFailed('Issue context detected but no issue found in payload'); + core.setFailed("Issue context detected but no issue found in payload"); return; } } else if (isPRContext) { if (context.payload.pull_request) { issueNumber = context.payload.pull_request.number; - contextType = 'pull request'; + contextType = "pull request"; } else { - core.setFailed('Pull request context detected but no pull request found in payload'); + core.setFailed( + "Pull request context detected but no pull request found in payload" + ); return; } } if (!issueNumber) { - core.setFailed('Could not determine issue or pull request number'); + core.setFailed("Could not determine issue or pull request number"); return; } // Extract labels from the JSON item const requestedLabels = labelsItem.labels || []; - console.log('Requested labels:', requestedLabels); + console.log("Requested labels:", requestedLabels); // Check for label removal attempts (labels starting with '-') for (const label of requestedLabels) { - if (label.startsWith('-')) { - core.setFailed(`Label removal is not permitted. Found line starting with '-': ${label}`); + if (label.startsWith("-")) { + core.setFailed( + `Label removal is not permitted. Found line starting with '-': ${label}` + ); return; } } @@ -114,7 +136,9 @@ async function main() { // Validate that all requested labels are in the allowed list (if restrictions are set) let validLabels; if (allowedLabels) { - validLabels = requestedLabels.filter(/** @param {string} label */ label => allowedLabels.includes(label)); + validLabels = requestedLabels.filter( + /** @param {string} label */ label => allowedLabels.includes(label) + ); } else { // No restrictions, all requested labels are valid validLabels = requestedLabels; @@ -125,22 +149,29 @@ async function main() { // Enforce max limit if (uniqueLabels.length > maxCount) { - console.log(`too many labels, keep ${maxCount}`) + console.log(`too many labels, keep ${maxCount}`); uniqueLabels = uniqueLabels.slice(0, maxCount); } if (uniqueLabels.length === 0) { - console.log('No labels to add'); - core.setOutput('labels_added', ''); - await core.summary.addRaw(` + console.log("No labels to add"); + core.setOutput("labels_added", ""); + await core.summary + .addRaw( + ` ## Label Addition No labels were added (no valid labels found in agent output). -`).write(); +` + ) + .write(); return; } - console.log(`Adding ${uniqueLabels.length} labels to ${contextType} #${issueNumber}:`, uniqueLabels); + console.log( + `Adding ${uniqueLabels.length} labels to ${contextType} #${issueNumber}:`, + uniqueLabels + ); try { // Add labels using GitHub API @@ -148,28 +179,35 @@ No labels were added (no valid labels found in agent output). owner: context.repo.owner, repo: context.repo.repo, issue_number: issueNumber, - labels: uniqueLabels + labels: uniqueLabels, }); - console.log(`Successfully added ${uniqueLabels.length} labels to ${contextType} #${issueNumber}`); + console.log( + `Successfully added ${uniqueLabels.length} labels to ${contextType} #${issueNumber}` + ); // Set output for other jobs to use - core.setOutput('labels_added', uniqueLabels.join('\n')); + core.setOutput("labels_added", uniqueLabels.join("\n")); // Write summary - const labelsListMarkdown = uniqueLabels.map(label => `- \`${label}\``).join('\n'); - await core.summary.addRaw(` + const labelsListMarkdown = uniqueLabels + .map(label => `- \`${label}\``) + .join("\n"); + await core.summary + .addRaw( + ` ## Label Addition Successfully added ${uniqueLabels.length} label(s) to ${contextType} #${issueNumber}: ${labelsListMarkdown} -`).write(); - +` + ) + .write(); } catch (error) { const errorMessage = error instanceof Error ? error.message : String(error); - console.error('Failed to add labels:', errorMessage); + console.error("Failed to add labels:", errorMessage); core.setFailed(`Failed to add labels: ${errorMessage}`); } } -await main(); \ No newline at end of file +await main(); diff --git a/pkg/workflow/js/add_labels.test.cjs b/pkg/workflow/js/add_labels.test.cjs index a267a2650d..fc003bfea1 100644 --- a/pkg/workflow/js/add_labels.test.cjs +++ b/pkg/workflow/js/add_labels.test.cjs @@ -1,6 +1,6 @@ -import { describe, it, expect, beforeEach, vi } from 'vitest'; -import fs from 'fs'; -import path from 'path'; +import { describe, it, expect, beforeEach, vi } from "vitest"; +import fs from "fs"; +import path from "path"; // Mock the global objects that GitHub Actions provides const mockCore = { @@ -8,29 +8,29 @@ const mockCore = { setOutput: vi.fn(), summary: { addRaw: vi.fn().mockReturnThis(), - write: vi.fn() - } + write: vi.fn(), + }, }; const mockGithub = { rest: { issues: { - addLabels: vi.fn() - } - } + addLabels: vi.fn(), + }, + }, }; const mockContext = { - eventName: 'issues', + eventName: "issues", repo: { - owner: 'testowner', - repo: 'testrepo' + owner: "testowner", + repo: "testrepo", }, payload: { issue: { - number: 123 - } - } + number: 123, + }, + }, }; // Set up global variables @@ -38,736 +38,873 @@ global.core = mockCore; global.github = mockGithub; global.context = mockContext; -describe('add_labels.cjs', () => { +describe("add_labels.cjs", () => { let addLabelsScript; beforeEach(() => { // Reset all mocks vi.clearAllMocks(); - + // Reset environment variables delete process.env.GITHUB_AW_AGENT_OUTPUT; delete process.env.GITHUB_AW_LABELS_ALLOWED; delete process.env.GITHUB_AW_LABELS_MAX_COUNT; - + // Reset context to default state - global.context.eventName = 'issues'; + global.context.eventName = "issues"; global.context.payload.issue = { number: 123 }; delete global.context.payload.pull_request; - + // Read the script content - const scriptPath = path.join(process.cwd(), 'pkg/workflow/js/add_labels.cjs'); - addLabelsScript = fs.readFileSync(scriptPath, 'utf8'); + const scriptPath = path.join( + process.cwd(), + "pkg/workflow/js/add_labels.cjs" + ); + addLabelsScript = fs.readFileSync(scriptPath, "utf8"); }); - describe('Environment variable validation', () => { - it('should skip when no agent output is provided', async () => { - process.env.GITHUB_AW_LABELS_ALLOWED = 'bug,enhancement'; + describe("Environment variable validation", () => { + it("should skip when no agent output is provided", async () => { + process.env.GITHUB_AW_LABELS_ALLOWED = "bug,enhancement"; delete process.env.GITHUB_AW_AGENT_OUTPUT; - - const consoleSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); - + + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); + // Execute the script await eval(`(async () => { ${addLabelsScript} })()`); - - expect(consoleSpy).toHaveBeenCalledWith('No GITHUB_AW_AGENT_OUTPUT environment variable found'); + + expect(consoleSpy).toHaveBeenCalledWith( + "No GITHUB_AW_AGENT_OUTPUT environment variable found" + ); expect(mockGithub.rest.issues.addLabels).not.toHaveBeenCalled(); - + consoleSpy.mockRestore(); }); - it('should skip when agent output is empty', async () => { - process.env.GITHUB_AW_AGENT_OUTPUT = ' '; - process.env.GITHUB_AW_LABELS_ALLOWED = 'bug,enhancement'; - - const consoleSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); - + it("should skip when agent output is empty", async () => { + process.env.GITHUB_AW_AGENT_OUTPUT = " "; + process.env.GITHUB_AW_LABELS_ALLOWED = "bug,enhancement"; + + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); + // Execute the script await eval(`(async () => { ${addLabelsScript} })()`); - - expect(consoleSpy).toHaveBeenCalledWith('Agent output content is empty'); + + expect(consoleSpy).toHaveBeenCalledWith("Agent output content is empty"); expect(mockGithub.rest.issues.addLabels).not.toHaveBeenCalled(); - + consoleSpy.mockRestore(); }); - it('should work when allowed labels are not provided (any labels allowed)', async () => { + it("should work when allowed labels are not provided (any labels allowed)", async () => { process.env.GITHUB_AW_AGENT_OUTPUT = JSON.stringify({ - items: [{ - type: 'add-issue-label', - labels: ['bug', 'enhancement', 'custom-label'] - }] + items: [ + { + type: "add-issue-label", + labels: ["bug", "enhancement", "custom-label"], + }, + ], }); delete process.env.GITHUB_AW_LABELS_ALLOWED; - + mockGithub.rest.issues.addLabels.mockResolvedValue({}); - - const consoleSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); - + + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); + // Execute the script await eval(`(async () => { ${addLabelsScript} })()`); - - expect(consoleSpy).toHaveBeenCalledWith('No label restrictions - any labels are allowed'); + + expect(consoleSpy).toHaveBeenCalledWith( + "No label restrictions - any labels are allowed" + ); expect(mockGithub.rest.issues.addLabels).toHaveBeenCalledWith({ - owner: 'testowner', - repo: 'testrepo', + owner: "testowner", + repo: "testrepo", issue_number: 123, - labels: ['bug', 'enhancement', 'custom-label'] + labels: ["bug", "enhancement", "custom-label"], }); - + consoleSpy.mockRestore(); }); - it('should work when allowed labels list is empty (any labels allowed)', async () => { + it("should work when allowed labels list is empty (any labels allowed)", async () => { process.env.GITHUB_AW_AGENT_OUTPUT = JSON.stringify({ - items: [{ - type: 'add-issue-label', - labels: ['bug', 'enhancement', 'custom-label'] - }] + items: [ + { + type: "add-issue-label", + labels: ["bug", "enhancement", "custom-label"], + }, + ], }); - process.env.GITHUB_AW_LABELS_ALLOWED = ' '; - + process.env.GITHUB_AW_LABELS_ALLOWED = " "; + mockGithub.rest.issues.addLabels.mockResolvedValue({}); - - const consoleSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); - + + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); + // Execute the script await eval(`(async () => { ${addLabelsScript} })()`); - - expect(consoleSpy).toHaveBeenCalledWith('No label restrictions - any labels are allowed'); + + expect(consoleSpy).toHaveBeenCalledWith( + "No label restrictions - any labels are allowed" + ); expect(mockGithub.rest.issues.addLabels).toHaveBeenCalledWith({ - owner: 'testowner', - repo: 'testrepo', + owner: "testowner", + repo: "testrepo", issue_number: 123, - labels: ['bug', 'enhancement', 'custom-label'] + labels: ["bug", "enhancement", "custom-label"], }); - + consoleSpy.mockRestore(); }); - it('should enforce allowed labels when restrictions are set', async () => { + it("should enforce allowed labels when restrictions are set", async () => { process.env.GITHUB_AW_AGENT_OUTPUT = JSON.stringify({ - items: [{ - type: 'add-issue-label', - labels: ['bug', 'enhancement', 'custom-label', 'documentation'] - }] + items: [ + { + type: "add-issue-label", + labels: ["bug", "enhancement", "custom-label", "documentation"], + }, + ], }); - process.env.GITHUB_AW_LABELS_ALLOWED = 'bug,enhancement'; - + process.env.GITHUB_AW_LABELS_ALLOWED = "bug,enhancement"; + mockGithub.rest.issues.addLabels.mockResolvedValue({}); - - const consoleSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); - + + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); + // Execute the script await eval(`(async () => { ${addLabelsScript} })()`); - - expect(consoleSpy).toHaveBeenCalledWith('Allowed labels:', ['bug', 'enhancement']); + + expect(consoleSpy).toHaveBeenCalledWith("Allowed labels:", [ + "bug", + "enhancement", + ]); expect(mockGithub.rest.issues.addLabels).toHaveBeenCalledWith({ - owner: 'testowner', - repo: 'testrepo', + owner: "testowner", + repo: "testrepo", issue_number: 123, - labels: ['bug', 'enhancement'] // 'custom-label' and 'documentation' filtered out + labels: ["bug", "enhancement"], // 'custom-label' and 'documentation' filtered out }); - + consoleSpy.mockRestore(); }); - it('should fail when max count is invalid', async () => { + it("should fail when max count is invalid", async () => { process.env.GITHUB_AW_AGENT_OUTPUT = JSON.stringify({ - items: [{ - type: 'add-issue-label', - labels: ['bug', 'enhancement'] - }] - }); - process.env.GITHUB_AW_LABELS_ALLOWED = 'bug,enhancement'; - process.env.GITHUB_AW_LABELS_MAX_COUNT = 'invalid'; - + items: [ + { + type: "add-issue-label", + labels: ["bug", "enhancement"], + }, + ], + }); + process.env.GITHUB_AW_LABELS_ALLOWED = "bug,enhancement"; + process.env.GITHUB_AW_LABELS_MAX_COUNT = "invalid"; + // Execute the script await eval(`(async () => { ${addLabelsScript} })()`); - - expect(mockCore.setFailed).toHaveBeenCalledWith('Invalid max value: invalid. Must be a positive integer'); + + expect(mockCore.setFailed).toHaveBeenCalledWith( + "Invalid max value: invalid. Must be a positive integer" + ); expect(mockGithub.rest.issues.addLabels).not.toHaveBeenCalled(); }); - it('should fail when max count is zero', async () => { + it("should fail when max count is zero", async () => { process.env.GITHUB_AW_AGENT_OUTPUT = JSON.stringify({ - items: [{ - type: 'add-issue-label', - labels: ['bug', 'enhancement'] - }] - }); - process.env.GITHUB_AW_LABELS_ALLOWED = 'bug,enhancement'; - process.env.GITHUB_AW_LABELS_MAX_COUNT = '0'; - + items: [ + { + type: "add-issue-label", + labels: ["bug", "enhancement"], + }, + ], + }); + process.env.GITHUB_AW_LABELS_ALLOWED = "bug,enhancement"; + process.env.GITHUB_AW_LABELS_MAX_COUNT = "0"; + // Execute the script await eval(`(async () => { ${addLabelsScript} })()`); - - expect(mockCore.setFailed).toHaveBeenCalledWith('Invalid max value: 0. Must be a positive integer'); + + expect(mockCore.setFailed).toHaveBeenCalledWith( + "Invalid max value: 0. Must be a positive integer" + ); expect(mockGithub.rest.issues.addLabels).not.toHaveBeenCalled(); }); - it('should use default max count when not specified', async () => { + it("should use default max count when not specified", async () => { process.env.GITHUB_AW_AGENT_OUTPUT = JSON.stringify({ - items: [{ - type: 'add-issue-label', - labels: ['bug', 'enhancement', 'feature', 'documentation'] - }] - }); - process.env.GITHUB_AW_LABELS_ALLOWED = 'bug,enhancement,feature,documentation'; + items: [ + { + type: "add-issue-label", + labels: ["bug", "enhancement", "feature", "documentation"], + }, + ], + }); + process.env.GITHUB_AW_LABELS_ALLOWED = + "bug,enhancement,feature,documentation"; delete process.env.GITHUB_AW_LABELS_MAX_COUNT; - - const consoleSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); - + + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); + // Execute the script await eval(`(async () => { ${addLabelsScript} })()`); - - expect(consoleSpy).toHaveBeenCalledWith('Max count:', 3); + + expect(consoleSpy).toHaveBeenCalledWith("Max count:", 3); expect(mockGithub.rest.issues.addLabels).toHaveBeenCalledWith({ - owner: 'testowner', - repo: 'testrepo', + owner: "testowner", + repo: "testrepo", issue_number: 123, - labels: ['bug', 'enhancement', 'feature'] // Only first 3 due to default max count + labels: ["bug", "enhancement", "feature"], // Only first 3 due to default max count }); - + consoleSpy.mockRestore(); }); }); - describe('Context validation', () => { - it('should fail when not in issue or PR context', async () => { + describe("Context validation", () => { + it("should fail when not in issue or PR context", async () => { process.env.GITHUB_AW_AGENT_OUTPUT = JSON.stringify({ - items: [{ - type: 'add-issue-label', - labels: ['bug', 'enhancement'] - }] - }); - process.env.GITHUB_AW_LABELS_ALLOWED = 'bug,enhancement'; - global.context.eventName = 'push'; - + items: [ + { + type: "add-issue-label", + labels: ["bug", "enhancement"], + }, + ], + }); + process.env.GITHUB_AW_LABELS_ALLOWED = "bug,enhancement"; + global.context.eventName = "push"; + // Execute the script await eval(`(async () => { ${addLabelsScript} })()`); - - expect(mockCore.setFailed).toHaveBeenCalledWith('Not running in issue or pull request context, skipping label addition'); + + expect(mockCore.setFailed).toHaveBeenCalledWith( + "Not running in issue or pull request context, skipping label addition" + ); expect(mockGithub.rest.issues.addLabels).not.toHaveBeenCalled(); }); - it('should work with issue_comment event', async () => { + it("should work with issue_comment event", async () => { process.env.GITHUB_AW_AGENT_OUTPUT = JSON.stringify({ - items: [{ - type: 'add-issue-label', - labels: ['bug'] - }] - }); - process.env.GITHUB_AW_LABELS_ALLOWED = 'bug,enhancement'; - global.context.eventName = 'issue_comment'; - - const consoleSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); - + items: [ + { + type: "add-issue-label", + labels: ["bug"], + }, + ], + }); + process.env.GITHUB_AW_LABELS_ALLOWED = "bug,enhancement"; + global.context.eventName = "issue_comment"; + + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); + // Execute the script await eval(`(async () => { ${addLabelsScript} })()`); - + expect(mockGithub.rest.issues.addLabels).toHaveBeenCalled(); - + consoleSpy.mockRestore(); }); - it('should work with pull_request event', async () => { + it("should work with pull_request event", async () => { process.env.GITHUB_AW_AGENT_OUTPUT = JSON.stringify({ - items: [{ - type: 'add-issue-label', - labels: ['bug'] - }] - }); - process.env.GITHUB_AW_LABELS_ALLOWED = 'bug,enhancement'; - global.context.eventName = 'pull_request'; + items: [ + { + type: "add-issue-label", + labels: ["bug"], + }, + ], + }); + process.env.GITHUB_AW_LABELS_ALLOWED = "bug,enhancement"; + global.context.eventName = "pull_request"; global.context.payload.pull_request = { number: 456 }; delete global.context.payload.issue; - - const consoleSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); - + + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); + // Execute the script await eval(`(async () => { ${addLabelsScript} })()`); - + expect(mockGithub.rest.issues.addLabels).toHaveBeenCalledWith({ - owner: 'testowner', - repo: 'testrepo', + owner: "testowner", + repo: "testrepo", issue_number: 456, - labels: ['bug'] + labels: ["bug"], }); - + consoleSpy.mockRestore(); }); - it('should work with pull_request_review event', async () => { + it("should work with pull_request_review event", async () => { process.env.GITHUB_AW_AGENT_OUTPUT = JSON.stringify({ - items: [{ - type: 'add-issue-label', - labels: ['bug'] - }] - }); - process.env.GITHUB_AW_LABELS_ALLOWED = 'bug,enhancement'; - global.context.eventName = 'pull_request_review'; + items: [ + { + type: "add-issue-label", + labels: ["bug"], + }, + ], + }); + process.env.GITHUB_AW_LABELS_ALLOWED = "bug,enhancement"; + global.context.eventName = "pull_request_review"; global.context.payload.pull_request = { number: 789 }; delete global.context.payload.issue; - - const consoleSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); - + + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); + // Execute the script await eval(`(async () => { ${addLabelsScript} })()`); - + expect(mockGithub.rest.issues.addLabels).toHaveBeenCalledWith({ - owner: 'testowner', - repo: 'testrepo', + owner: "testowner", + repo: "testrepo", issue_number: 789, - labels: ['bug'] + labels: ["bug"], }); - + consoleSpy.mockRestore(); }); - it('should fail when issue context detected but no issue in payload', async () => { + it("should fail when issue context detected but no issue in payload", async () => { process.env.GITHUB_AW_AGENT_OUTPUT = JSON.stringify({ - items: [{ - type: 'add-issue-label', - labels: ['bug'] - }] - }); - process.env.GITHUB_AW_LABELS_ALLOWED = 'bug,enhancement'; - global.context.eventName = 'issues'; + items: [ + { + type: "add-issue-label", + labels: ["bug"], + }, + ], + }); + process.env.GITHUB_AW_LABELS_ALLOWED = "bug,enhancement"; + global.context.eventName = "issues"; delete global.context.payload.issue; - + // Execute the script await eval(`(async () => { ${addLabelsScript} })()`); - - expect(mockCore.setFailed).toHaveBeenCalledWith('Issue context detected but no issue found in payload'); + + expect(mockCore.setFailed).toHaveBeenCalledWith( + "Issue context detected but no issue found in payload" + ); expect(mockGithub.rest.issues.addLabels).not.toHaveBeenCalled(); }); - it('should fail when PR context detected but no PR in payload', async () => { + it("should fail when PR context detected but no PR in payload", async () => { process.env.GITHUB_AW_AGENT_OUTPUT = JSON.stringify({ - items: [{ - type: 'add-issue-label', - labels: ['bug'] - }] - }); - process.env.GITHUB_AW_LABELS_ALLOWED = 'bug,enhancement'; - global.context.eventName = 'pull_request'; + items: [ + { + type: "add-issue-label", + labels: ["bug"], + }, + ], + }); + process.env.GITHUB_AW_LABELS_ALLOWED = "bug,enhancement"; + global.context.eventName = "pull_request"; delete global.context.payload.issue; delete global.context.payload.pull_request; - + // Execute the script await eval(`(async () => { ${addLabelsScript} })()`); - - expect(mockCore.setFailed).toHaveBeenCalledWith('Pull request context detected but no pull request found in payload'); + + expect(mockCore.setFailed).toHaveBeenCalledWith( + "Pull request context detected but no pull request found in payload" + ); expect(mockGithub.rest.issues.addLabels).not.toHaveBeenCalled(); }); }); - describe('Label parsing and validation', () => { - it('should parse labels from agent output and add valid ones', async () => { + describe("Label parsing and validation", () => { + it("should parse labels from agent output and add valid ones", async () => { process.env.GITHUB_AW_AGENT_OUTPUT = JSON.stringify({ - items: [{ - type: 'add-issue-label', - labels: ['bug', 'enhancement', 'documentation'] - }] - }); - process.env.GITHUB_AW_LABELS_ALLOWED = 'bug,enhancement,feature'; - - const consoleSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); - + items: [ + { + type: "add-issue-label", + labels: ["bug", "enhancement", "documentation"], + }, + ], + }); + process.env.GITHUB_AW_LABELS_ALLOWED = "bug,enhancement,feature"; + + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); + // Execute the script await eval(`(async () => { ${addLabelsScript} })()`); - + expect(mockGithub.rest.issues.addLabels).toHaveBeenCalledWith({ - owner: 'testowner', - repo: 'testrepo', + owner: "testowner", + repo: "testrepo", issue_number: 123, - labels: ['bug', 'enhancement'] // 'documentation' not in allowed list + labels: ["bug", "enhancement"], // 'documentation' not in allowed list }); - - expect(mockCore.setOutput).toHaveBeenCalledWith('labels_added', 'bug\nenhancement'); + + expect(mockCore.setOutput).toHaveBeenCalledWith( + "labels_added", + "bug\nenhancement" + ); expect(mockCore.summary.addRaw).toHaveBeenCalled(); expect(mockCore.summary.write).toHaveBeenCalled(); - + consoleSpy.mockRestore(); }); - it('should skip empty lines in agent output', async () => { + it("should skip empty lines in agent output", async () => { process.env.GITHUB_AW_AGENT_OUTPUT = JSON.stringify({ - items: [{ - type: 'add-issue-label', - labels: ['bug', 'enhancement'] - }] - }); - process.env.GITHUB_AW_LABELS_ALLOWED = 'bug,enhancement'; - - const consoleSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); - + items: [ + { + type: "add-issue-label", + labels: ["bug", "enhancement"], + }, + ], + }); + process.env.GITHUB_AW_LABELS_ALLOWED = "bug,enhancement"; + + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); + // Execute the script await eval(`(async () => { ${addLabelsScript} })()`); - + expect(mockGithub.rest.issues.addLabels).toHaveBeenCalledWith({ - owner: 'testowner', - repo: 'testrepo', + owner: "testowner", + repo: "testrepo", issue_number: 123, - labels: ['bug', 'enhancement'] + labels: ["bug", "enhancement"], }); - + consoleSpy.mockRestore(); }); - it('should fail when line starts with dash (removal indication)', async () => { + it("should fail when line starts with dash (removal indication)", async () => { process.env.GITHUB_AW_AGENT_OUTPUT = JSON.stringify({ - items: [{ - type: 'add-issue-label', - labels: ['bug', '-enhancement'] - }] + items: [ + { + type: "add-issue-label", + labels: ["bug", "-enhancement"], + }, + ], }); - process.env.GITHUB_AW_LABELS_ALLOWED = 'bug,enhancement'; - + process.env.GITHUB_AW_LABELS_ALLOWED = "bug,enhancement"; + // Execute the script await eval(`(async () => { ${addLabelsScript} })()`); - - expect(mockCore.setFailed).toHaveBeenCalledWith('Label removal is not permitted. Found line starting with \'-\': -enhancement'); + + expect(mockCore.setFailed).toHaveBeenCalledWith( + "Label removal is not permitted. Found line starting with '-': -enhancement" + ); expect(mockGithub.rest.issues.addLabels).not.toHaveBeenCalled(); }); - it('should remove duplicate labels', async () => { + it("should remove duplicate labels", async () => { process.env.GITHUB_AW_AGENT_OUTPUT = JSON.stringify({ - items: [{ - type: 'add-issue-label', - labels: ['bug', 'enhancement', 'bug', 'enhancement'] - }] - }); - process.env.GITHUB_AW_LABELS_ALLOWED = 'bug,enhancement'; - - const consoleSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); - + items: [ + { + type: "add-issue-label", + labels: ["bug", "enhancement", "bug", "enhancement"], + }, + ], + }); + process.env.GITHUB_AW_LABELS_ALLOWED = "bug,enhancement"; + + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); + // Execute the script await eval(`(async () => { ${addLabelsScript} })()`); - + expect(mockGithub.rest.issues.addLabels).toHaveBeenCalledWith({ - owner: 'testowner', - repo: 'testrepo', + owner: "testowner", + repo: "testrepo", issue_number: 123, - labels: ['bug', 'enhancement'] // Duplicates removed + labels: ["bug", "enhancement"], // Duplicates removed }); - + consoleSpy.mockRestore(); }); - it('should enforce max count limit', async () => { + it("should enforce max count limit", async () => { process.env.GITHUB_AW_AGENT_OUTPUT = JSON.stringify({ - items: [{ - type: 'add-issue-label', - labels: ['bug', 'enhancement', 'feature', 'documentation', 'question'] - }] - }); - process.env.GITHUB_AW_LABELS_ALLOWED = 'bug,enhancement,feature,documentation,question'; - process.env.GITHUB_AW_LABELS_MAX_COUNT = '2'; - - const consoleSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); - + items: [ + { + type: "add-issue-label", + labels: [ + "bug", + "enhancement", + "feature", + "documentation", + "question", + ], + }, + ], + }); + process.env.GITHUB_AW_LABELS_ALLOWED = + "bug,enhancement,feature,documentation,question"; + process.env.GITHUB_AW_LABELS_MAX_COUNT = "2"; + + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); + // Execute the script await eval(`(async () => { ${addLabelsScript} })()`); - - expect(consoleSpy).toHaveBeenCalledWith('too many labels, keep 2'); + + expect(consoleSpy).toHaveBeenCalledWith("too many labels, keep 2"); expect(mockGithub.rest.issues.addLabels).toHaveBeenCalledWith({ - owner: 'testowner', - repo: 'testrepo', + owner: "testowner", + repo: "testrepo", issue_number: 123, - labels: ['bug', 'enhancement'] // Only first 2 + labels: ["bug", "enhancement"], // Only first 2 }); - + consoleSpy.mockRestore(); }); - it('should skip when no valid labels found', async () => { + it("should skip when no valid labels found", async () => { process.env.GITHUB_AW_AGENT_OUTPUT = JSON.stringify({ - items: [{ - type: 'add-issue-label', - labels: ['invalid', 'another-invalid'] - }] - }); - process.env.GITHUB_AW_LABELS_ALLOWED = 'bug,enhancement'; - - const consoleSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); - + items: [ + { + type: "add-issue-label", + labels: ["invalid", "another-invalid"], + }, + ], + }); + process.env.GITHUB_AW_LABELS_ALLOWED = "bug,enhancement"; + + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); + // Execute the script await eval(`(async () => { ${addLabelsScript} })()`); - - expect(consoleSpy).toHaveBeenCalledWith('No labels to add'); - expect(mockCore.setOutput).toHaveBeenCalledWith('labels_added', ''); - expect(mockCore.summary.addRaw).toHaveBeenCalledWith(expect.stringContaining('No labels were added')); + + expect(consoleSpy).toHaveBeenCalledWith("No labels to add"); + expect(mockCore.setOutput).toHaveBeenCalledWith("labels_added", ""); + expect(mockCore.summary.addRaw).toHaveBeenCalledWith( + expect.stringContaining("No labels were added") + ); expect(mockGithub.rest.issues.addLabels).not.toHaveBeenCalled(); - + consoleSpy.mockRestore(); }); }); - describe('GitHub API integration', () => { - it('should successfully add labels to issue', async () => { + describe("GitHub API integration", () => { + it("should successfully add labels to issue", async () => { process.env.GITHUB_AW_AGENT_OUTPUT = JSON.stringify({ - items: [{ - type: 'add-issue-label', - labels: ['bug', 'enhancement'] - }] + items: [ + { + type: "add-issue-label", + labels: ["bug", "enhancement"], + }, + ], }); - process.env.GITHUB_AW_LABELS_ALLOWED = 'bug,enhancement,feature'; - + process.env.GITHUB_AW_LABELS_ALLOWED = "bug,enhancement,feature"; + mockGithub.rest.issues.addLabels.mockResolvedValue({}); - - const consoleSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); - + + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); + // Execute the script await eval(`(async () => { ${addLabelsScript} })()`); - + expect(mockGithub.rest.issues.addLabels).toHaveBeenCalledWith({ - owner: 'testowner', - repo: 'testrepo', + owner: "testowner", + repo: "testrepo", issue_number: 123, - labels: ['bug', 'enhancement'] - }); - - expect(consoleSpy).toHaveBeenCalledWith('Successfully added 2 labels to issue #123'); - expect(mockCore.setOutput).toHaveBeenCalledWith('labels_added', 'bug\nenhancement'); - - const summaryCall = mockCore.summary.addRaw.mock.calls.find(call => - call[0].includes('Successfully added 2 label(s) to issue #123') + labels: ["bug", "enhancement"], + }); + + expect(consoleSpy).toHaveBeenCalledWith( + "Successfully added 2 labels to issue #123" + ); + expect(mockCore.setOutput).toHaveBeenCalledWith( + "labels_added", + "bug\nenhancement" + ); + + const summaryCall = mockCore.summary.addRaw.mock.calls.find(call => + call[0].includes("Successfully added 2 label(s) to issue #123") ); expect(summaryCall).toBeDefined(); - expect(summaryCall[0]).toContain('- `bug`'); - expect(summaryCall[0]).toContain('- `enhancement`'); - + expect(summaryCall[0]).toContain("- `bug`"); + expect(summaryCall[0]).toContain("- `enhancement`"); + consoleSpy.mockRestore(); }); - it('should successfully add labels to pull request', async () => { + it("should successfully add labels to pull request", async () => { process.env.GITHUB_AW_AGENT_OUTPUT = JSON.stringify({ - items: [{ - type: 'add-issue-label', - labels: ['bug'] - }] - }); - process.env.GITHUB_AW_LABELS_ALLOWED = 'bug,enhancement'; - global.context.eventName = 'pull_request'; + items: [ + { + type: "add-issue-label", + labels: ["bug"], + }, + ], + }); + process.env.GITHUB_AW_LABELS_ALLOWED = "bug,enhancement"; + global.context.eventName = "pull_request"; global.context.payload.pull_request = { number: 456 }; delete global.context.payload.issue; - + mockGithub.rest.issues.addLabels.mockResolvedValue({}); - - const consoleSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); - + + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); + // Execute the script await eval(`(async () => { ${addLabelsScript} })()`); - - expect(consoleSpy).toHaveBeenCalledWith('Successfully added 1 labels to pull request #456'); - - const summaryCall = mockCore.summary.addRaw.mock.calls.find(call => - call[0].includes('Successfully added 1 label(s) to pull request #456') + + expect(consoleSpy).toHaveBeenCalledWith( + "Successfully added 1 labels to pull request #456" + ); + + const summaryCall = mockCore.summary.addRaw.mock.calls.find(call => + call[0].includes("Successfully added 1 label(s) to pull request #456") ); expect(summaryCall).toBeDefined(); - + consoleSpy.mockRestore(); }); - it('should handle GitHub API errors', async () => { + it("should handle GitHub API errors", async () => { process.env.GITHUB_AW_AGENT_OUTPUT = JSON.stringify({ - items: [{ - type: 'add-issue-label', - labels: ['bug'] - }] - }); - process.env.GITHUB_AW_LABELS_ALLOWED = 'bug,enhancement'; - - const apiError = new Error('Label does not exist'); + items: [ + { + type: "add-issue-label", + labels: ["bug"], + }, + ], + }); + process.env.GITHUB_AW_LABELS_ALLOWED = "bug,enhancement"; + + const apiError = new Error("Label does not exist"); mockGithub.rest.issues.addLabels.mockRejectedValue(apiError); - - const consoleSpy = vi.spyOn(console, 'error').mockImplementation(() => {}); - + + const consoleSpy = vi + .spyOn(console, "error") + .mockImplementation(() => {}); + // Execute the script await eval(`(async () => { ${addLabelsScript} })()`); - - expect(consoleSpy).toHaveBeenCalledWith('Failed to add labels:', 'Label does not exist'); - expect(mockCore.setFailed).toHaveBeenCalledWith('Failed to add labels: Label does not exist'); - + + expect(consoleSpy).toHaveBeenCalledWith( + "Failed to add labels:", + "Label does not exist" + ); + expect(mockCore.setFailed).toHaveBeenCalledWith( + "Failed to add labels: Label does not exist" + ); + consoleSpy.mockRestore(); }); - it('should handle non-Error objects in catch block', async () => { + it("should handle non-Error objects in catch block", async () => { process.env.GITHUB_AW_AGENT_OUTPUT = JSON.stringify({ - items: [{ - type: 'add-issue-label', - labels: ['bug'] - }] - }); - process.env.GITHUB_AW_LABELS_ALLOWED = 'bug,enhancement'; - - const stringError = 'Something went wrong'; + items: [ + { + type: "add-issue-label", + labels: ["bug"], + }, + ], + }); + process.env.GITHUB_AW_LABELS_ALLOWED = "bug,enhancement"; + + const stringError = "Something went wrong"; mockGithub.rest.issues.addLabels.mockRejectedValue(stringError); - - const consoleSpy = vi.spyOn(console, 'error').mockImplementation(() => {}); - + + const consoleSpy = vi + .spyOn(console, "error") + .mockImplementation(() => {}); + // Execute the script await eval(`(async () => { ${addLabelsScript} })()`); - - expect(consoleSpy).toHaveBeenCalledWith('Failed to add labels:', 'Something went wrong'); - expect(mockCore.setFailed).toHaveBeenCalledWith('Failed to add labels: Something went wrong'); - + + expect(consoleSpy).toHaveBeenCalledWith( + "Failed to add labels:", + "Something went wrong" + ); + expect(mockCore.setFailed).toHaveBeenCalledWith( + "Failed to add labels: Something went wrong" + ); + consoleSpy.mockRestore(); }); }); - describe('Output and logging', () => { - it('should log agent output content length', async () => { + describe("Output and logging", () => { + it("should log agent output content length", async () => { process.env.GITHUB_AW_AGENT_OUTPUT = JSON.stringify({ - items: [{ - type: 'add-issue-label', - labels: ['bug', 'enhancement'] - }] - }); - process.env.GITHUB_AW_LABELS_ALLOWED = 'bug,enhancement'; - - const consoleSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); - + items: [ + { + type: "add-issue-label", + labels: ["bug", "enhancement"], + }, + ], + }); + process.env.GITHUB_AW_LABELS_ALLOWED = "bug,enhancement"; + + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); + // Execute the script await eval(`(async () => { ${addLabelsScript} })()`); - - expect(consoleSpy).toHaveBeenCalledWith('Agent output content length:', 69); - + + expect(consoleSpy).toHaveBeenCalledWith( + "Agent output content length:", + 69 + ); + consoleSpy.mockRestore(); }); - it('should log allowed labels and max count', async () => { + it("should log allowed labels and max count", async () => { process.env.GITHUB_AW_AGENT_OUTPUT = JSON.stringify({ - items: [{ - type: 'add-issue-label', - labels: ['bug'] - }] - }); - process.env.GITHUB_AW_LABELS_ALLOWED = 'bug,enhancement,feature'; - process.env.GITHUB_AW_LABELS_MAX_COUNT = '5'; - - const consoleSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); - + items: [ + { + type: "add-issue-label", + labels: ["bug"], + }, + ], + }); + process.env.GITHUB_AW_LABELS_ALLOWED = "bug,enhancement,feature"; + process.env.GITHUB_AW_LABELS_MAX_COUNT = "5"; + + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); + // Execute the script await eval(`(async () => { ${addLabelsScript} })()`); - - expect(consoleSpy).toHaveBeenCalledWith('Allowed labels:', ['bug', 'enhancement', 'feature']); - expect(consoleSpy).toHaveBeenCalledWith('Max count:', 5); - + + expect(consoleSpy).toHaveBeenCalledWith("Allowed labels:", [ + "bug", + "enhancement", + "feature", + ]); + expect(consoleSpy).toHaveBeenCalledWith("Max count:", 5); + consoleSpy.mockRestore(); }); - it('should log requested labels', async () => { + it("should log requested labels", async () => { process.env.GITHUB_AW_AGENT_OUTPUT = JSON.stringify({ - items: [{ - type: 'add-issue-label', - labels: ['bug', 'enhancement', 'invalid'] - }] - }); - process.env.GITHUB_AW_LABELS_ALLOWED = 'bug,enhancement'; - - const consoleSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); - + items: [ + { + type: "add-issue-label", + labels: ["bug", "enhancement", "invalid"], + }, + ], + }); + process.env.GITHUB_AW_LABELS_ALLOWED = "bug,enhancement"; + + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); + // Execute the script await eval(`(async () => { ${addLabelsScript} })()`); - - expect(consoleSpy).toHaveBeenCalledWith('Requested labels:', ['bug', 'enhancement', 'invalid']); - + + expect(consoleSpy).toHaveBeenCalledWith("Requested labels:", [ + "bug", + "enhancement", + "invalid", + ]); + consoleSpy.mockRestore(); }); - it('should log final labels being added', async () => { + it("should log final labels being added", async () => { process.env.GITHUB_AW_AGENT_OUTPUT = JSON.stringify({ - items: [{ - type: 'add-issue-label', - labels: ['bug', 'enhancement'] - }] - }); - process.env.GITHUB_AW_LABELS_ALLOWED = 'bug,enhancement'; - - const consoleSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); - + items: [ + { + type: "add-issue-label", + labels: ["bug", "enhancement"], + }, + ], + }); + process.env.GITHUB_AW_LABELS_ALLOWED = "bug,enhancement"; + + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); + // Execute the script await eval(`(async () => { ${addLabelsScript} })()`); - - expect(consoleSpy).toHaveBeenCalledWith('Adding 2 labels to issue #123:', ['bug', 'enhancement']); - + + expect(consoleSpy).toHaveBeenCalledWith( + "Adding 2 labels to issue #123:", + ["bug", "enhancement"] + ); + consoleSpy.mockRestore(); }); }); - describe('Edge cases', () => { - it('should handle whitespace in allowed labels', async () => { + describe("Edge cases", () => { + it("should handle whitespace in allowed labels", async () => { process.env.GITHUB_AW_AGENT_OUTPUT = JSON.stringify({ - items: [{ - type: 'add-issue-label', - labels: ['bug', 'enhancement'] - }] - }); - process.env.GITHUB_AW_LABELS_ALLOWED = ' bug , enhancement , feature '; - - const consoleSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); - + items: [ + { + type: "add-issue-label", + labels: ["bug", "enhancement"], + }, + ], + }); + process.env.GITHUB_AW_LABELS_ALLOWED = " bug , enhancement , feature "; + + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); + // Execute the script await eval(`(async () => { ${addLabelsScript} })()`); - - expect(consoleSpy).toHaveBeenCalledWith('Allowed labels:', ['bug', 'enhancement', 'feature']); + + expect(consoleSpy).toHaveBeenCalledWith("Allowed labels:", [ + "bug", + "enhancement", + "feature", + ]); expect(mockGithub.rest.issues.addLabels).toHaveBeenCalledWith({ - owner: 'testowner', - repo: 'testrepo', + owner: "testowner", + repo: "testrepo", issue_number: 123, - labels: ['bug', 'enhancement'] + labels: ["bug", "enhancement"], }); - + consoleSpy.mockRestore(); }); - it('should handle empty entries in allowed labels', async () => { + it("should handle empty entries in allowed labels", async () => { process.env.GITHUB_AW_AGENT_OUTPUT = JSON.stringify({ - items: [{ - type: 'add-issue-label', - labels: ['bug'] - }] - }); - process.env.GITHUB_AW_LABELS_ALLOWED = 'bug,,enhancement,'; - - const consoleSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); - + items: [ + { + type: "add-issue-label", + labels: ["bug"], + }, + ], + }); + process.env.GITHUB_AW_LABELS_ALLOWED = "bug,,enhancement,"; + + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); + // Execute the script await eval(`(async () => { ${addLabelsScript} })()`); - - expect(consoleSpy).toHaveBeenCalledWith('Allowed labels:', ['bug', 'enhancement']); - + + expect(consoleSpy).toHaveBeenCalledWith("Allowed labels:", [ + "bug", + "enhancement", + ]); + consoleSpy.mockRestore(); }); - it('should handle single label output', async () => { + it("should handle single label output", async () => { process.env.GITHUB_AW_AGENT_OUTPUT = JSON.stringify({ - items: [{ - type: 'add-issue-label', - labels: ['bug'] - }] + items: [ + { + type: "add-issue-label", + labels: ["bug"], + }, + ], }); - process.env.GITHUB_AW_LABELS_ALLOWED = 'bug,enhancement'; - + process.env.GITHUB_AW_LABELS_ALLOWED = "bug,enhancement"; + mockGithub.rest.issues.addLabels.mockResolvedValue({}); - - const consoleSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); - + + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); + // Execute the script await eval(`(async () => { ${addLabelsScript} })()`); - + expect(mockGithub.rest.issues.addLabels).toHaveBeenCalledWith({ - owner: 'testowner', - repo: 'testrepo', + owner: "testowner", + repo: "testrepo", issue_number: 123, - labels: ['bug'] + labels: ["bug"], }); - - expect(mockCore.setOutput).toHaveBeenCalledWith('labels_added', 'bug'); - + + expect(mockCore.setOutput).toHaveBeenCalledWith("labels_added", "bug"); + consoleSpy.mockRestore(); }); }); diff --git a/pkg/workflow/js/add_reaction.cjs b/pkg/workflow/js/add_reaction.cjs index e456db7b79..66ed1c7852 100644 --- a/pkg/workflow/js/add_reaction.cjs +++ b/pkg/workflow/js/add_reaction.cjs @@ -1,13 +1,24 @@ async function main() { // Read inputs from environment variables - const reaction = process.env.GITHUB_AW_REACTION || 'eyes'; + const reaction = process.env.GITHUB_AW_REACTION || "eyes"; - console.log('Reaction type:', reaction); + console.log("Reaction type:", reaction); // Validate reaction type - const validReactions = ['+1', '-1', 'laugh', 'confused', 'heart', 'hooray', 'rocket', 'eyes']; + const validReactions = [ + "+1", + "-1", + "laugh", + "confused", + "heart", + "hooray", + "rocket", + "eyes", + ]; if (!validReactions.includes(reaction)) { - core.setFailed(`Invalid reaction type: ${reaction}. Valid reactions are: ${validReactions.join(', ')}`); + core.setFailed( + `Invalid reaction type: ${reaction}. Valid reactions are: ${validReactions.join(", ")}` + ); return; } @@ -19,39 +30,39 @@ async function main() { try { switch (eventName) { - case 'issues': + case "issues": const issueNumber = context.payload?.issue?.number; if (!issueNumber) { - core.setFailed('Issue number not found in event payload'); + core.setFailed("Issue number not found in event payload"); return; } endpoint = `/repos/${owner}/${repo}/issues/${issueNumber}/reactions`; break; - case 'issue_comment': + case "issue_comment": const commentId = context.payload?.comment?.id; if (!commentId) { - core.setFailed('Comment ID not found in event payload'); + core.setFailed("Comment ID not found in event payload"); return; } endpoint = `/repos/${owner}/${repo}/issues/comments/${commentId}/reactions`; break; - case 'pull_request': - case 'pull_request_target': + case "pull_request": + case "pull_request_target": const prNumber = context.payload?.pull_request?.number; if (!prNumber) { - core.setFailed('Pull request number not found in event payload'); + core.setFailed("Pull request number not found in event payload"); return; } // PRs are "issues" for the reactions endpoint endpoint = `/repos/${owner}/${repo}/issues/${prNumber}/reactions`; break; - case 'pull_request_review_comment': + case "pull_request_review_comment": const reviewCommentId = context.payload?.comment?.id; if (!reviewCommentId) { - core.setFailed('Review comment ID not found in event payload'); + core.setFailed("Review comment ID not found in event payload"); return; } endpoint = `/repos/${owner}/${repo}/pulls/comments/${reviewCommentId}/reactions`; @@ -62,13 +73,12 @@ async function main() { return; } - console.log('API endpoint:', endpoint); + console.log("API endpoint:", endpoint); await addReaction(endpoint, reaction); - } catch (error) { const errorMessage = error instanceof Error ? error.message : String(error); - console.error('Failed to add reaction:', errorMessage); + console.error("Failed to add reaction:", errorMessage); core.setFailed(`Failed to add reaction: ${errorMessage}`); } } @@ -79,21 +89,21 @@ async function main() { * @param {string} reaction - The reaction type to add */ async function addReaction(endpoint, reaction) { - const response = await github.request('POST ' + endpoint, { + const response = await github.request("POST " + endpoint, { content: reaction, headers: { - 'Accept': 'application/vnd.github+json' - } + Accept: "application/vnd.github+json", + }, }); const reactionId = response.data?.id; if (reactionId) { console.log(`Successfully added reaction: ${reaction} (id: ${reactionId})`); - core.setOutput('reaction-id', reactionId.toString()); + core.setOutput("reaction-id", reactionId.toString()); } else { console.log(`Successfully added reaction: ${reaction}`); - core.setOutput('reaction-id', ''); + core.setOutput("reaction-id", ""); } } -await main(); \ No newline at end of file +await main(); diff --git a/pkg/workflow/js/add_reaction.test.cjs b/pkg/workflow/js/add_reaction.test.cjs index 0f2c334ce2..640e34b20e 100644 --- a/pkg/workflow/js/add_reaction.test.cjs +++ b/pkg/workflow/js/add_reaction.test.cjs @@ -1,6 +1,6 @@ -import { describe, it, expect, beforeEach, vi } from 'vitest'; -import fs from 'fs'; -import path from 'path'; +import { describe, it, expect, beforeEach, vi } from "vitest"; +import fs from "fs"; +import path from "path"; // Mock the global objects that GitHub Actions provides const mockCore = { @@ -8,25 +8,25 @@ const mockCore = { setOutput: vi.fn(), summary: { addRaw: vi.fn().mockReturnThis(), - write: vi.fn() - } + write: vi.fn(), + }, }; const mockGithub = { - request: vi.fn() + request: vi.fn(), }; const mockContext = { - eventName: 'issues', + eventName: "issues", repo: { - owner: 'testowner', - repo: 'testrepo' + owner: "testowner", + repo: "testrepo", }, payload: { issue: { - number: 123 - } - } + number: 123, + }, + }, }; // Set up global variables @@ -34,286 +34,349 @@ global.core = mockCore; global.github = mockGithub; global.context = mockContext; -describe('add_reaction.cjs', () => { +describe("add_reaction.cjs", () => { let addReactionScript; beforeEach(() => { // Reset all mocks vi.clearAllMocks(); - + // Reset environment variables delete process.env.GITHUB_AW_REACTION; - + // Reset context to default global.context = { - eventName: 'issues', + eventName: "issues", repo: { - owner: 'testowner', - repo: 'testrepo' + owner: "testowner", + repo: "testrepo", }, payload: { issue: { - number: 123 - } - } + number: 123, + }, + }, }; // Load the script content - const scriptPath = path.join(process.cwd(), 'pkg/workflow/js/add_reaction.cjs'); - addReactionScript = fs.readFileSync(scriptPath, 'utf8'); + const scriptPath = path.join( + process.cwd(), + "pkg/workflow/js/add_reaction.cjs" + ); + addReactionScript = fs.readFileSync(scriptPath, "utf8"); }); - describe('Environment variable validation', () => { - it('should use default values when environment variables are not set', async () => { + describe("Environment variable validation", () => { + it("should use default values when environment variables are not set", async () => { mockGithub.request.mockResolvedValue({ - data: { id: 123, content: 'eyes' } + data: { id: 123, content: "eyes" }, }); - const consoleSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); await eval(`(async () => { ${addReactionScript} })()`); - expect(consoleSpy).toHaveBeenCalledWith('Reaction type:', 'eyes'); - + expect(consoleSpy).toHaveBeenCalledWith("Reaction type:", "eyes"); + consoleSpy.mockRestore(); }); - it('should fail with invalid reaction type', async () => { - process.env.GITHUB_AW_REACTION = 'invalid'; + it("should fail with invalid reaction type", async () => { + process.env.GITHUB_AW_REACTION = "invalid"; await eval(`(async () => { ${addReactionScript} })()`); expect(mockCore.setFailed).toHaveBeenCalledWith( - 'Invalid reaction type: invalid. Valid reactions are: +1, -1, laugh, confused, heart, hooray, rocket, eyes' + "Invalid reaction type: invalid. Valid reactions are: +1, -1, laugh, confused, heart, hooray, rocket, eyes" ); }); - it('should accept all valid reaction types', async () => { - const validReactions = ['+1', '-1', 'laugh', 'confused', 'heart', 'hooray', 'rocket', 'eyes']; - + it("should accept all valid reaction types", async () => { + const validReactions = [ + "+1", + "-1", + "laugh", + "confused", + "heart", + "hooray", + "rocket", + "eyes", + ]; + for (const reaction of validReactions) { vi.clearAllMocks(); process.env.GITHUB_AW_REACTION = reaction; - + mockGithub.request.mockResolvedValue({ - data: { id: 123, content: reaction } + data: { id: 123, content: reaction }, }); await eval(`(async () => { ${addReactionScript} })()`); expect(mockCore.setFailed).not.toHaveBeenCalled(); - expect(mockCore.setOutput).toHaveBeenCalledWith('reaction-id', '123'); + expect(mockCore.setOutput).toHaveBeenCalledWith("reaction-id", "123"); } }); }); - describe('Event context handling', () => { - it('should handle issues event', async () => { - global.context.eventName = 'issues'; + describe("Event context handling", () => { + it("should handle issues event", async () => { + global.context.eventName = "issues"; global.context.payload = { issue: { number: 123 } }; - + mockGithub.request.mockResolvedValue({ - data: { id: 456, content: 'eyes' } + data: { id: 456, content: "eyes" }, }); - const consoleSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); await eval(`(async () => { ${addReactionScript} })()`); - expect(consoleSpy).toHaveBeenCalledWith('API endpoint:', '/repos/testowner/testrepo/issues/123/reactions'); - expect(mockGithub.request).toHaveBeenCalledWith('POST /repos/testowner/testrepo/issues/123/reactions', { - content: 'eyes', - headers: { 'Accept': 'application/vnd.github+json' } - }); - + expect(consoleSpy).toHaveBeenCalledWith( + "API endpoint:", + "/repos/testowner/testrepo/issues/123/reactions" + ); + expect(mockGithub.request).toHaveBeenCalledWith( + "POST /repos/testowner/testrepo/issues/123/reactions", + { + content: "eyes", + headers: { Accept: "application/vnd.github+json" }, + } + ); + consoleSpy.mockRestore(); }); - it('should handle issue_comment event', async () => { - global.context.eventName = 'issue_comment'; + it("should handle issue_comment event", async () => { + global.context.eventName = "issue_comment"; global.context.payload = { comment: { id: 789 } }; - + mockGithub.request.mockResolvedValue({ - data: { id: 456, content: 'eyes' } + data: { id: 456, content: "eyes" }, }); - const consoleSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); await eval(`(async () => { ${addReactionScript} })()`); - expect(consoleSpy).toHaveBeenCalledWith('API endpoint:', '/repos/testowner/testrepo/issues/comments/789/reactions'); - expect(mockGithub.request).toHaveBeenCalledWith('POST /repos/testowner/testrepo/issues/comments/789/reactions', { - content: 'eyes', - headers: { 'Accept': 'application/vnd.github+json' } - }); - + expect(consoleSpy).toHaveBeenCalledWith( + "API endpoint:", + "/repos/testowner/testrepo/issues/comments/789/reactions" + ); + expect(mockGithub.request).toHaveBeenCalledWith( + "POST /repos/testowner/testrepo/issues/comments/789/reactions", + { + content: "eyes", + headers: { Accept: "application/vnd.github+json" }, + } + ); + consoleSpy.mockRestore(); }); - it('should handle pull_request event', async () => { - global.context.eventName = 'pull_request'; + it("should handle pull_request event", async () => { + global.context.eventName = "pull_request"; global.context.payload = { pull_request: { number: 456 } }; - + mockGithub.request.mockResolvedValue({ - data: { id: 789, content: 'eyes' } + data: { id: 789, content: "eyes" }, }); - const consoleSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); await eval(`(async () => { ${addReactionScript} })()`); - expect(consoleSpy).toHaveBeenCalledWith('API endpoint:', '/repos/testowner/testrepo/issues/456/reactions'); - expect(mockGithub.request).toHaveBeenCalledWith('POST /repos/testowner/testrepo/issues/456/reactions', { - content: 'eyes', - headers: { 'Accept': 'application/vnd.github+json' } - }); - + expect(consoleSpy).toHaveBeenCalledWith( + "API endpoint:", + "/repos/testowner/testrepo/issues/456/reactions" + ); + expect(mockGithub.request).toHaveBeenCalledWith( + "POST /repos/testowner/testrepo/issues/456/reactions", + { + content: "eyes", + headers: { Accept: "application/vnd.github+json" }, + } + ); + consoleSpy.mockRestore(); }); - it('should handle pull_request_review_comment event', async () => { - global.context.eventName = 'pull_request_review_comment'; + it("should handle pull_request_review_comment event", async () => { + global.context.eventName = "pull_request_review_comment"; global.context.payload = { comment: { id: 321 } }; - + mockGithub.request.mockResolvedValue({ - data: { id: 654, content: 'eyes' } + data: { id: 654, content: "eyes" }, }); - const consoleSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); await eval(`(async () => { ${addReactionScript} })()`); - expect(consoleSpy).toHaveBeenCalledWith('API endpoint:', '/repos/testowner/testrepo/pulls/comments/321/reactions'); - expect(mockGithub.request).toHaveBeenCalledWith('POST /repos/testowner/testrepo/pulls/comments/321/reactions', { - content: 'eyes', - headers: { 'Accept': 'application/vnd.github+json' } - }); - + expect(consoleSpy).toHaveBeenCalledWith( + "API endpoint:", + "/repos/testowner/testrepo/pulls/comments/321/reactions" + ); + expect(mockGithub.request).toHaveBeenCalledWith( + "POST /repos/testowner/testrepo/pulls/comments/321/reactions", + { + content: "eyes", + headers: { Accept: "application/vnd.github+json" }, + } + ); + consoleSpy.mockRestore(); }); - it('should fail on unsupported event type', async () => { - global.context.eventName = 'unsupported'; + it("should fail on unsupported event type", async () => { + global.context.eventName = "unsupported"; await eval(`(async () => { ${addReactionScript} })()`); - expect(mockCore.setFailed).toHaveBeenCalledWith('Unsupported event type: unsupported'); + expect(mockCore.setFailed).toHaveBeenCalledWith( + "Unsupported event type: unsupported" + ); }); - it('should fail when issue number is missing', async () => { - global.context.eventName = 'issues'; + it("should fail when issue number is missing", async () => { + global.context.eventName = "issues"; global.context.payload = {}; await eval(`(async () => { ${addReactionScript} })()`); - expect(mockCore.setFailed).toHaveBeenCalledWith('Issue number not found in event payload'); + expect(mockCore.setFailed).toHaveBeenCalledWith( + "Issue number not found in event payload" + ); }); - it('should fail when comment ID is missing', async () => { - global.context.eventName = 'issue_comment'; + it("should fail when comment ID is missing", async () => { + global.context.eventName = "issue_comment"; global.context.payload = {}; await eval(`(async () => { ${addReactionScript} })()`); - expect(mockCore.setFailed).toHaveBeenCalledWith('Comment ID not found in event payload'); + expect(mockCore.setFailed).toHaveBeenCalledWith( + "Comment ID not found in event payload" + ); }); }); - describe('Add reaction functionality', () => { - it('should successfully add reaction with direct response', async () => { - process.env.GITHUB_AW_REACTION = 'heart'; - + describe("Add reaction functionality", () => { + it("should successfully add reaction with direct response", async () => { + process.env.GITHUB_AW_REACTION = "heart"; + mockGithub.request.mockResolvedValue({ - data: { id: 123, content: 'heart' } + data: { id: 123, content: "heart" }, }); - const consoleSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); await eval(`(async () => { ${addReactionScript} })()`); - expect(consoleSpy).toHaveBeenCalledWith('Successfully added reaction: heart (id: 123)'); - expect(mockCore.setOutput).toHaveBeenCalledWith('reaction-id', '123'); - + expect(consoleSpy).toHaveBeenCalledWith( + "Successfully added reaction: heart (id: 123)" + ); + expect(mockCore.setOutput).toHaveBeenCalledWith("reaction-id", "123"); + consoleSpy.mockRestore(); }); - it('should handle response without ID', async () => { - process.env.GITHUB_AW_REACTION = 'rocket'; - + it("should handle response without ID", async () => { + process.env.GITHUB_AW_REACTION = "rocket"; + mockGithub.request.mockResolvedValue({ - data: { content: 'rocket' } + data: { content: "rocket" }, }); - const consoleSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); await eval(`(async () => { ${addReactionScript} })()`); - expect(consoleSpy).toHaveBeenCalledWith('Successfully added reaction: rocket'); - expect(mockCore.setOutput).toHaveBeenCalledWith('reaction-id', ''); - + expect(consoleSpy).toHaveBeenCalledWith( + "Successfully added reaction: rocket" + ); + expect(mockCore.setOutput).toHaveBeenCalledWith("reaction-id", ""); + consoleSpy.mockRestore(); }); }); - describe('Error handling', () => { - it('should handle API errors gracefully', async () => { + describe("Error handling", () => { + it("should handle API errors gracefully", async () => { // Mock the GitHub request to fail - mockGithub.request.mockRejectedValue(new Error('API Error')); + mockGithub.request.mockRejectedValue(new Error("API Error")); - const consoleSpy = vi.spyOn(console, 'error').mockImplementation(() => {}); + const consoleSpy = vi + .spyOn(console, "error") + .mockImplementation(() => {}); await eval(`(async () => { ${addReactionScript} })()`); - expect(consoleSpy).toHaveBeenCalledWith('Failed to add reaction:', 'API Error'); - expect(mockCore.setFailed).toHaveBeenCalledWith('Failed to add reaction: API Error'); - + expect(consoleSpy).toHaveBeenCalledWith( + "Failed to add reaction:", + "API Error" + ); + expect(mockCore.setFailed).toHaveBeenCalledWith( + "Failed to add reaction: API Error" + ); + consoleSpy.mockRestore(); }); - it('should handle non-Error objects in catch block', async () => { + it("should handle non-Error objects in catch block", async () => { // Mock the GitHub request to fail with string error - mockGithub.request.mockRejectedValue('String error'); + mockGithub.request.mockRejectedValue("String error"); - const consoleSpy = vi.spyOn(console, 'error').mockImplementation(() => {}); + const consoleSpy = vi + .spyOn(console, "error") + .mockImplementation(() => {}); await eval(`(async () => { ${addReactionScript} })()`); - expect(consoleSpy).toHaveBeenCalledWith('Failed to add reaction:', 'String error'); - expect(mockCore.setFailed).toHaveBeenCalledWith('Failed to add reaction: String error'); - + expect(consoleSpy).toHaveBeenCalledWith( + "Failed to add reaction:", + "String error" + ); + expect(mockCore.setFailed).toHaveBeenCalledWith( + "Failed to add reaction: String error" + ); + consoleSpy.mockRestore(); }); }); - describe('Output and logging', () => { - it('should log reaction type', async () => { - process.env.GITHUB_AW_REACTION = 'rocket'; - + describe("Output and logging", () => { + it("should log reaction type", async () => { + process.env.GITHUB_AW_REACTION = "rocket"; + mockGithub.request.mockResolvedValue({ - data: { id: 123, content: 'rocket' } + data: { id: 123, content: "rocket" }, }); - const consoleSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); await eval(`(async () => { ${addReactionScript} })()`); - expect(consoleSpy).toHaveBeenCalledWith('Reaction type:', 'rocket'); - + expect(consoleSpy).toHaveBeenCalledWith("Reaction type:", "rocket"); + consoleSpy.mockRestore(); }); - it('should log API endpoint', async () => { + it("should log API endpoint", async () => { mockGithub.request.mockResolvedValue({ - data: { id: 123, content: 'eyes' } + data: { id: 123, content: "eyes" }, }); - const consoleSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); await eval(`(async () => { ${addReactionScript} })()`); - expect(consoleSpy).toHaveBeenCalledWith('API endpoint:', '/repos/testowner/testrepo/issues/123/reactions'); - + expect(consoleSpy).toHaveBeenCalledWith( + "API endpoint:", + "/repos/testowner/testrepo/issues/123/reactions" + ); + consoleSpy.mockRestore(); }); }); -}); \ No newline at end of file +}); diff --git a/pkg/workflow/js/add_reaction_and_edit_comment.cjs b/pkg/workflow/js/add_reaction_and_edit_comment.cjs index 6eb76996ec..a39d3e0e0b 100644 --- a/pkg/workflow/js/add_reaction_and_edit_comment.cjs +++ b/pkg/workflow/js/add_reaction_and_edit_comment.cjs @@ -1,21 +1,32 @@ async function main() { - // Read inputs from environment variables - const reaction = process.env.GITHUB_AW_REACTION || 'eyes'; + // Read inputs from environment variables + const reaction = process.env.GITHUB_AW_REACTION || "eyes"; const alias = process.env.GITHUB_AW_ALIAS; // Only present for alias workflows const runId = context.runId; - const runUrl = context.payload.repository + const runUrl = context.payload.repository ? `${context.payload.repository.html_url}/actions/runs/${runId}` : `https://github.com/${context.repo.owner}/${context.repo.repo}/actions/runs/${runId}`; - console.log('Reaction type:', reaction); - console.log('Alias name:', alias || 'none'); - console.log('Run ID:', runId); - console.log('Run URL:', runUrl); + console.log("Reaction type:", reaction); + console.log("Alias name:", alias || "none"); + console.log("Run ID:", runId); + console.log("Run URL:", runUrl); // Validate reaction type - const validReactions = ['+1', '-1', 'laugh', 'confused', 'heart', 'hooray', 'rocket', 'eyes']; + const validReactions = [ + "+1", + "-1", + "laugh", + "confused", + "heart", + "hooray", + "rocket", + "eyes", + ]; if (!validReactions.includes(reaction)) { - core.setFailed(`Invalid reaction type: ${reaction}. Valid reactions are: ${validReactions.join(', ')}`); + core.setFailed( + `Invalid reaction type: ${reaction}. Valid reactions are: ${validReactions.join(", ")}` + ); return; } @@ -29,10 +40,10 @@ async function main() { try { switch (eventName) { - case 'issues': + case "issues": const issueNumber = context.payload?.issue?.number; if (!issueNumber) { - core.setFailed('Issue number not found in event payload'); + core.setFailed("Issue number not found in event payload"); return; } reactionEndpoint = `/repos/${owner}/${repo}/issues/${issueNumber}/reactions`; @@ -40,10 +51,10 @@ async function main() { shouldEditComment = false; break; - case 'issue_comment': + case "issue_comment": const commentId = context.payload?.comment?.id; if (!commentId) { - core.setFailed('Comment ID not found in event payload'); + core.setFailed("Comment ID not found in event payload"); return; } reactionEndpoint = `/repos/${owner}/${repo}/issues/comments/${commentId}/reactions`; @@ -52,10 +63,10 @@ async function main() { shouldEditComment = alias ? true : false; break; - case 'pull_request': + case "pull_request": const prNumber = context.payload?.pull_request?.number; if (!prNumber) { - core.setFailed('Pull request number not found in event payload'); + core.setFailed("Pull request number not found in event payload"); return; } // PRs are "issues" for the reactions endpoint @@ -64,10 +75,10 @@ async function main() { shouldEditComment = false; break; - case 'pull_request_review_comment': + case "pull_request_review_comment": const reviewCommentId = context.payload?.comment?.id; if (!reviewCommentId) { - core.setFailed('Review comment ID not found in event payload'); + core.setFailed("Review comment ID not found in event payload"); return; } reactionEndpoint = `/repos/${owner}/${repo}/pulls/comments/${reviewCommentId}/reactions`; @@ -81,27 +92,30 @@ async function main() { return; } - console.log('Reaction API endpoint:', reactionEndpoint); + console.log("Reaction API endpoint:", reactionEndpoint); // Add reaction first await addReaction(reactionEndpoint, reaction); // Then edit comment if applicable and if it's a comment event if (shouldEditComment && commentUpdateEndpoint) { - console.log('Comment update endpoint:', commentUpdateEndpoint); + console.log("Comment update endpoint:", commentUpdateEndpoint); await editCommentWithWorkflowLink(commentUpdateEndpoint, runUrl); } else { if (!alias && commentUpdateEndpoint) { - console.log('Skipping comment edit - only available for alias workflows'); + console.log( + "Skipping comment edit - only available for alias workflows" + ); } else { - console.log('Skipping comment edit for event type:', eventName); + console.log("Skipping comment edit for event type:", eventName); } } - } catch (error) { const errorMessage = error instanceof Error ? error.message : String(error); - console.error('Failed to process reaction and comment edit:', errorMessage); - core.setFailed(`Failed to process reaction and comment edit: ${errorMessage}`); + console.error("Failed to process reaction and comment edit:", errorMessage); + core.setFailed( + `Failed to process reaction and comment edit: ${errorMessage}` + ); } } @@ -111,20 +125,20 @@ async function main() { * @param {string} reaction - The reaction type to add */ async function addReaction(endpoint, reaction) { - const response = await github.request('POST ' + endpoint, { + const response = await github.request("POST " + endpoint, { content: reaction, headers: { - 'Accept': 'application/vnd.github+json' - } + Accept: "application/vnd.github+json", + }, }); const reactionId = response.data?.id; if (reactionId) { console.log(`Successfully added reaction: ${reaction} (id: ${reactionId})`); - core.setOutput('reaction-id', reactionId.toString()); + core.setOutput("reaction-id", reactionId.toString()); } else { console.log(`Successfully added reaction: ${reaction}`); - core.setOutput('reaction-id', ''); + core.setOutput("reaction-id", ""); } } @@ -136,39 +150,42 @@ async function addReaction(endpoint, reaction) { async function editCommentWithWorkflowLink(endpoint, runUrl) { try { // First, get the current comment content - const getResponse = await github.request('GET ' + endpoint, { + const getResponse = await github.request("GET " + endpoint, { headers: { - 'Accept': 'application/vnd.github+json' - } + Accept: "application/vnd.github+json", + }, }); - const originalBody = getResponse.data.body || ''; + const originalBody = getResponse.data.body || ""; const workflowLinkText = `\n\n---\n*🤖 [Workflow run](${runUrl}) triggered by this comment*`; - + // Check if we've already added a workflow link to avoid duplicates - if (originalBody.includes('*🤖 [Workflow run](')) { - console.log('Comment already contains a workflow run link, skipping edit'); + if (originalBody.includes("*🤖 [Workflow run](")) { + console.log( + "Comment already contains a workflow run link, skipping edit" + ); return; } const updatedBody = originalBody + workflowLinkText; // Update the comment - const updateResponse = await github.request('PATCH ' + endpoint, { + const updateResponse = await github.request("PATCH " + endpoint, { body: updatedBody, headers: { - 'Accept': 'application/vnd.github+json' - } + Accept: "application/vnd.github+json", + }, }); console.log(`Successfully updated comment with workflow link`); console.log(`Comment ID: ${updateResponse.data.id}`); - } catch (error) { // Don't fail the entire job if comment editing fails - just log it const errorMessage = error instanceof Error ? error.message : String(error); - console.warn('Failed to edit comment with workflow link:', errorMessage); - console.warn('This is not critical - the reaction was still added successfully'); + console.warn("Failed to edit comment with workflow link:", errorMessage); + console.warn( + "This is not critical - the reaction was still added successfully" + ); } } diff --git a/pkg/workflow/js/check_team_member.cjs b/pkg/workflow/js/check_team_member.cjs index e4e7e4a238..8db70a6e13 100644 --- a/pkg/workflow/js/check_team_member.cjs +++ b/pkg/workflow/js/check_team_member.cjs @@ -4,27 +4,31 @@ async function main() { // Check if the actor has repository access (admin, maintain permissions) try { - console.log(`Checking if user '${actor}' is admin or maintainer of ${owner}/${repo}`); + console.log( + `Checking if user '${actor}' is admin or maintainer of ${owner}/${repo}` + ); + + const repoPermission = + await github.rest.repos.getCollaboratorPermissionLevel({ + owner: owner, + repo: repo, + username: actor, + }); - const repoPermission = await github.rest.repos.getCollaboratorPermissionLevel({ - owner: owner, - repo: repo, - username: actor - }); - const permission = repoPermission.data.permission; console.log(`Repository permission level: ${permission}`); - - if (permission === 'admin' || permission === 'maintain') { + + if (permission === "admin" || permission === "maintain") { console.log(`User has ${permission} access to repository`); - core.setOutput('is_team_member', 'true'); + core.setOutput("is_team_member", "true"); return; } } catch (repoError) { - const errorMessage = repoError instanceof Error ? repoError.message : String(repoError); + const errorMessage = + repoError instanceof Error ? repoError.message : String(repoError); console.log(`Repository permission check failed: ${errorMessage}`); } - core.setOutput('is_team_member', 'false'); + core.setOutput("is_team_member", "false"); } -await main(); \ No newline at end of file +await main(); diff --git a/pkg/workflow/js/check_team_member.test.cjs b/pkg/workflow/js/check_team_member.test.cjs index 0cd95ac556..3071a90a54 100644 --- a/pkg/workflow/js/check_team_member.test.cjs +++ b/pkg/workflow/js/check_team_member.test.cjs @@ -1,26 +1,26 @@ -import { describe, it, expect, beforeEach, vi } from 'vitest'; -import fs from 'fs'; -import path from 'path'; +import { describe, it, expect, beforeEach, vi } from "vitest"; +import fs from "fs"; +import path from "path"; // Mock the global objects that GitHub Actions provides const mockCore = { - setOutput: vi.fn() + setOutput: vi.fn(), }; const mockGithub = { rest: { repos: { - getCollaboratorPermissionLevel: vi.fn() - } - } + getCollaboratorPermissionLevel: vi.fn(), + }, + }, }; const mockContext = { - actor: 'testuser', + actor: "testuser", repo: { - owner: 'testowner', - repo: 'testrepo' - } + owner: "testowner", + repo: "testrepo", + }, }; // Set up global variables @@ -28,244 +28,305 @@ global.core = mockCore; global.github = mockGithub; global.context = mockContext; -describe('check_team_member.cjs', () => { +describe("check_team_member.cjs", () => { let checkTeamMemberScript; beforeEach(() => { // Reset all mocks vi.clearAllMocks(); - + // Reset context to default state - global.context.actor = 'testuser'; + global.context.actor = "testuser"; global.context.repo = { - owner: 'testowner', - repo: 'testrepo' + owner: "testowner", + repo: "testrepo", }; - + // Read the script content - const scriptPath = path.join(process.cwd(), 'pkg/workflow/js/check_team_member.cjs'); - checkTeamMemberScript = fs.readFileSync(scriptPath, 'utf8'); + const scriptPath = path.join( + process.cwd(), + "pkg/workflow/js/check_team_member.cjs" + ); + checkTeamMemberScript = fs.readFileSync(scriptPath, "utf8"); }); - it('should set is_team_member to true for admin permission', async () => { + it("should set is_team_member to true for admin permission", async () => { mockGithub.rest.repos.getCollaboratorPermissionLevel.mockResolvedValue({ - data: { permission: 'admin' } + data: { permission: "admin" }, }); - const consoleSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); // Execute the script await eval(`(async () => { ${checkTeamMemberScript} })()`); - expect(mockGithub.rest.repos.getCollaboratorPermissionLevel).toHaveBeenCalledWith({ - owner: 'testowner', - repo: 'testrepo', - username: 'testuser' + expect( + mockGithub.rest.repos.getCollaboratorPermissionLevel + ).toHaveBeenCalledWith({ + owner: "testowner", + repo: "testrepo", + username: "testuser", }); - expect(consoleSpy).toHaveBeenCalledWith('Checking if user \'testuser\' is admin or maintainer of testowner/testrepo'); - expect(consoleSpy).toHaveBeenCalledWith('Repository permission level: admin'); - expect(consoleSpy).toHaveBeenCalledWith('User has admin access to repository'); - expect(mockCore.setOutput).toHaveBeenCalledWith('is_team_member', 'true'); + expect(consoleSpy).toHaveBeenCalledWith( + "Checking if user 'testuser' is admin or maintainer of testowner/testrepo" + ); + expect(consoleSpy).toHaveBeenCalledWith( + "Repository permission level: admin" + ); + expect(consoleSpy).toHaveBeenCalledWith( + "User has admin access to repository" + ); + expect(mockCore.setOutput).toHaveBeenCalledWith("is_team_member", "true"); consoleSpy.mockRestore(); }); - it('should set is_team_member to true for maintain permission', async () => { + it("should set is_team_member to true for maintain permission", async () => { mockGithub.rest.repos.getCollaboratorPermissionLevel.mockResolvedValue({ - data: { permission: 'maintain' } + data: { permission: "maintain" }, }); - const consoleSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); // Execute the script await eval(`(async () => { ${checkTeamMemberScript} })()`); - expect(mockGithub.rest.repos.getCollaboratorPermissionLevel).toHaveBeenCalledWith({ - owner: 'testowner', - repo: 'testrepo', - username: 'testuser' + expect( + mockGithub.rest.repos.getCollaboratorPermissionLevel + ).toHaveBeenCalledWith({ + owner: "testowner", + repo: "testrepo", + username: "testuser", }); - expect(consoleSpy).toHaveBeenCalledWith('Checking if user \'testuser\' is admin or maintainer of testowner/testrepo'); - expect(consoleSpy).toHaveBeenCalledWith('Repository permission level: maintain'); - expect(consoleSpy).toHaveBeenCalledWith('User has maintain access to repository'); - expect(mockCore.setOutput).toHaveBeenCalledWith('is_team_member', 'true'); + expect(consoleSpy).toHaveBeenCalledWith( + "Checking if user 'testuser' is admin or maintainer of testowner/testrepo" + ); + expect(consoleSpy).toHaveBeenCalledWith( + "Repository permission level: maintain" + ); + expect(consoleSpy).toHaveBeenCalledWith( + "User has maintain access to repository" + ); + expect(mockCore.setOutput).toHaveBeenCalledWith("is_team_member", "true"); consoleSpy.mockRestore(); }); - it('should set is_team_member to false for write permission', async () => { + it("should set is_team_member to false for write permission", async () => { mockGithub.rest.repos.getCollaboratorPermissionLevel.mockResolvedValue({ - data: { permission: 'write' } + data: { permission: "write" }, }); - const consoleSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); // Execute the script await eval(`(async () => { ${checkTeamMemberScript} })()`); - expect(mockGithub.rest.repos.getCollaboratorPermissionLevel).toHaveBeenCalledWith({ - owner: 'testowner', - repo: 'testrepo', - username: 'testuser' + expect( + mockGithub.rest.repos.getCollaboratorPermissionLevel + ).toHaveBeenCalledWith({ + owner: "testowner", + repo: "testrepo", + username: "testuser", }); - expect(consoleSpy).toHaveBeenCalledWith('Checking if user \'testuser\' is admin or maintainer of testowner/testrepo'); - expect(consoleSpy).toHaveBeenCalledWith('Repository permission level: write'); - expect(mockCore.setOutput).toHaveBeenCalledWith('is_team_member', 'false'); + expect(consoleSpy).toHaveBeenCalledWith( + "Checking if user 'testuser' is admin or maintainer of testowner/testrepo" + ); + expect(consoleSpy).toHaveBeenCalledWith( + "Repository permission level: write" + ); + expect(mockCore.setOutput).toHaveBeenCalledWith("is_team_member", "false"); consoleSpy.mockRestore(); }); - it('should set is_team_member to false for read permission', async () => { + it("should set is_team_member to false for read permission", async () => { mockGithub.rest.repos.getCollaboratorPermissionLevel.mockResolvedValue({ - data: { permission: 'read' } + data: { permission: "read" }, }); - const consoleSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); // Execute the script await eval(`(async () => { ${checkTeamMemberScript} })()`); - expect(mockGithub.rest.repos.getCollaboratorPermissionLevel).toHaveBeenCalledWith({ - owner: 'testowner', - repo: 'testrepo', - username: 'testuser' + expect( + mockGithub.rest.repos.getCollaboratorPermissionLevel + ).toHaveBeenCalledWith({ + owner: "testowner", + repo: "testrepo", + username: "testuser", }); - expect(consoleSpy).toHaveBeenCalledWith('Checking if user \'testuser\' is admin or maintainer of testowner/testrepo'); - expect(consoleSpy).toHaveBeenCalledWith('Repository permission level: read'); - expect(mockCore.setOutput).toHaveBeenCalledWith('is_team_member', 'false'); + expect(consoleSpy).toHaveBeenCalledWith( + "Checking if user 'testuser' is admin or maintainer of testowner/testrepo" + ); + expect(consoleSpy).toHaveBeenCalledWith( + "Repository permission level: read" + ); + expect(mockCore.setOutput).toHaveBeenCalledWith("is_team_member", "false"); consoleSpy.mockRestore(); }); - it('should set is_team_member to false for none permission', async () => { + it("should set is_team_member to false for none permission", async () => { mockGithub.rest.repos.getCollaboratorPermissionLevel.mockResolvedValue({ - data: { permission: 'none' } + data: { permission: "none" }, }); - const consoleSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); // Execute the script await eval(`(async () => { ${checkTeamMemberScript} })()`); - expect(mockGithub.rest.repos.getCollaboratorPermissionLevel).toHaveBeenCalledWith({ - owner: 'testowner', - repo: 'testrepo', - username: 'testuser' + expect( + mockGithub.rest.repos.getCollaboratorPermissionLevel + ).toHaveBeenCalledWith({ + owner: "testowner", + repo: "testrepo", + username: "testuser", }); - expect(consoleSpy).toHaveBeenCalledWith('Checking if user \'testuser\' is admin or maintainer of testowner/testrepo'); - expect(consoleSpy).toHaveBeenCalledWith('Repository permission level: none'); - expect(mockCore.setOutput).toHaveBeenCalledWith('is_team_member', 'false'); + expect(consoleSpy).toHaveBeenCalledWith( + "Checking if user 'testuser' is admin or maintainer of testowner/testrepo" + ); + expect(consoleSpy).toHaveBeenCalledWith( + "Repository permission level: none" + ); + expect(mockCore.setOutput).toHaveBeenCalledWith("is_team_member", "false"); consoleSpy.mockRestore(); }); - it('should handle API errors and set is_team_member to false', async () => { - const apiError = new Error('API Error: Not Found'); - mockGithub.rest.repos.getCollaboratorPermissionLevel.mockRejectedValue(apiError); + it("should handle API errors and set is_team_member to false", async () => { + const apiError = new Error("API Error: Not Found"); + mockGithub.rest.repos.getCollaboratorPermissionLevel.mockRejectedValue( + apiError + ); - const consoleSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); // Execute the script await eval(`(async () => { ${checkTeamMemberScript} })()`); - expect(mockGithub.rest.repos.getCollaboratorPermissionLevel).toHaveBeenCalledWith({ - owner: 'testowner', - repo: 'testrepo', - username: 'testuser' + expect( + mockGithub.rest.repos.getCollaboratorPermissionLevel + ).toHaveBeenCalledWith({ + owner: "testowner", + repo: "testrepo", + username: "testuser", }); - expect(consoleSpy).toHaveBeenCalledWith('Checking if user \'testuser\' is admin or maintainer of testowner/testrepo'); - expect(consoleSpy).toHaveBeenCalledWith('Repository permission check failed: API Error: Not Found'); - expect(mockCore.setOutput).toHaveBeenCalledWith('is_team_member', 'false'); + expect(consoleSpy).toHaveBeenCalledWith( + "Checking if user 'testuser' is admin or maintainer of testowner/testrepo" + ); + expect(consoleSpy).toHaveBeenCalledWith( + "Repository permission check failed: API Error: Not Found" + ); + expect(mockCore.setOutput).toHaveBeenCalledWith("is_team_member", "false"); consoleSpy.mockRestore(); }); - it('should handle different actor names correctly', async () => { - global.context.actor = 'different-user'; - + it("should handle different actor names correctly", async () => { + global.context.actor = "different-user"; + mockGithub.rest.repos.getCollaboratorPermissionLevel.mockResolvedValue({ - data: { permission: 'admin' } + data: { permission: "admin" }, }); - const consoleSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); // Execute the script await eval(`(async () => { ${checkTeamMemberScript} })()`); - expect(mockGithub.rest.repos.getCollaboratorPermissionLevel).toHaveBeenCalledWith({ - owner: 'testowner', - repo: 'testrepo', - username: 'different-user' + expect( + mockGithub.rest.repos.getCollaboratorPermissionLevel + ).toHaveBeenCalledWith({ + owner: "testowner", + repo: "testrepo", + username: "different-user", }); - expect(consoleSpy).toHaveBeenCalledWith('Checking if user \'different-user\' is admin or maintainer of testowner/testrepo'); - expect(mockCore.setOutput).toHaveBeenCalledWith('is_team_member', 'true'); + expect(consoleSpy).toHaveBeenCalledWith( + "Checking if user 'different-user' is admin or maintainer of testowner/testrepo" + ); + expect(mockCore.setOutput).toHaveBeenCalledWith("is_team_member", "true"); consoleSpy.mockRestore(); }); - it('should handle different repository contexts correctly', async () => { + it("should handle different repository contexts correctly", async () => { global.context.repo = { - owner: 'different-owner', - repo: 'different-repo' + owner: "different-owner", + repo: "different-repo", }; - + mockGithub.rest.repos.getCollaboratorPermissionLevel.mockResolvedValue({ - data: { permission: 'maintain' } + data: { permission: "maintain" }, }); - const consoleSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); // Execute the script await eval(`(async () => { ${checkTeamMemberScript} })()`); - expect(mockGithub.rest.repos.getCollaboratorPermissionLevel).toHaveBeenCalledWith({ - owner: 'different-owner', - repo: 'different-repo', - username: 'testuser' + expect( + mockGithub.rest.repos.getCollaboratorPermissionLevel + ).toHaveBeenCalledWith({ + owner: "different-owner", + repo: "different-repo", + username: "testuser", }); - expect(consoleSpy).toHaveBeenCalledWith('Checking if user \'testuser\' is admin or maintainer of different-owner/different-repo'); - expect(mockCore.setOutput).toHaveBeenCalledWith('is_team_member', 'true'); + expect(consoleSpy).toHaveBeenCalledWith( + "Checking if user 'testuser' is admin or maintainer of different-owner/different-repo" + ); + expect(mockCore.setOutput).toHaveBeenCalledWith("is_team_member", "true"); consoleSpy.mockRestore(); }); - it('should handle authentication errors gracefully', async () => { - const authError = new Error('Bad credentials'); + it("should handle authentication errors gracefully", async () => { + const authError = new Error("Bad credentials"); authError.status = 401; - mockGithub.rest.repos.getCollaboratorPermissionLevel.mockRejectedValue(authError); + mockGithub.rest.repos.getCollaboratorPermissionLevel.mockRejectedValue( + authError + ); - const consoleSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); // Execute the script await eval(`(async () => { ${checkTeamMemberScript} })()`); - expect(consoleSpy).toHaveBeenCalledWith('Repository permission check failed: Bad credentials'); - expect(mockCore.setOutput).toHaveBeenCalledWith('is_team_member', 'false'); + expect(consoleSpy).toHaveBeenCalledWith( + "Repository permission check failed: Bad credentials" + ); + expect(mockCore.setOutput).toHaveBeenCalledWith("is_team_member", "false"); consoleSpy.mockRestore(); }); - it('should handle rate limiting errors gracefully', async () => { - const rateLimitError = new Error('API rate limit exceeded'); + it("should handle rate limiting errors gracefully", async () => { + const rateLimitError = new Error("API rate limit exceeded"); rateLimitError.status = 403; - mockGithub.rest.repos.getCollaboratorPermissionLevel.mockRejectedValue(rateLimitError); + mockGithub.rest.repos.getCollaboratorPermissionLevel.mockRejectedValue( + rateLimitError + ); - const consoleSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); // Execute the script await eval(`(async () => { ${checkTeamMemberScript} })()`); - expect(consoleSpy).toHaveBeenCalledWith('Repository permission check failed: API rate limit exceeded'); - expect(mockCore.setOutput).toHaveBeenCalledWith('is_team_member', 'false'); + expect(consoleSpy).toHaveBeenCalledWith( + "Repository permission check failed: API rate limit exceeded" + ); + expect(mockCore.setOutput).toHaveBeenCalledWith("is_team_member", "false"); consoleSpy.mockRestore(); }); -}); \ No newline at end of file +}); diff --git a/pkg/workflow/js/collect_ndjson_output.cjs b/pkg/workflow/js/collect_ndjson_output.cjs index 240c8cb07c..6ac8066a45 100644 --- a/pkg/workflow/js/collect_ndjson_output.cjs +++ b/pkg/workflow/js/collect_ndjson_output.cjs @@ -1,29 +1,32 @@ async function main() { const fs = require("fs"); - + /** * Sanitizes content for safe output in GitHub Actions * @param {string} content - The content to sanitize * @returns {string} The sanitized content */ function sanitizeContent(content) { - if (!content || typeof content !== 'string') { - return ''; + if (!content || typeof content !== "string") { + return ""; } // Read allowed domains from environment variable const allowedDomainsEnv = process.env.GITHUB_AW_ALLOWED_DOMAINS; const defaultAllowedDomains = [ - 'github.com', - 'github.io', - 'githubusercontent.com', - 'githubassets.com', - 'github.dev', - 'codespaces.new' + "github.com", + "github.io", + "githubusercontent.com", + "githubassets.com", + "github.dev", + "codespaces.new", ]; const allowedDomains = allowedDomainsEnv - ? allowedDomainsEnv.split(',').map(d => d.trim()).filter(d => d) + ? allowedDomainsEnv + .split(",") + .map(d => d.trim()) + .filter(d => d) : defaultAllowedDomains; let sanitized = content; @@ -32,15 +35,15 @@ async function main() { sanitized = neutralizeMentions(sanitized); // Remove control characters (except newlines and tabs) - sanitized = sanitized.replace(/[\x00-\x08\x0B\x0C\x0E-\x1F\x7F]/g, ''); + sanitized = sanitized.replace(/[\x00-\x08\x0B\x0C\x0E-\x1F\x7F]/g, ""); // XML character escaping sanitized = sanitized - .replace(/&/g, '&') // Must be first to avoid double-escaping - .replace(//g, '>') - .replace(/"/g, '"') - .replace(/'/g, '''); + .replace(/&/g, "&") // Must be first to avoid double-escaping + .replace(//g, ">") + .replace(/"/g, """) + .replace(/'/g, "'"); // URI filtering - replace non-https protocols with "(redacted)" sanitized = sanitizeUrlProtocols(sanitized); @@ -51,18 +54,22 @@ async function main() { // Limit total length to prevent DoS (0.5MB max) const maxLength = 524288; if (sanitized.length > maxLength) { - sanitized = sanitized.substring(0, maxLength) + '\n[Content truncated due to length]'; + sanitized = + sanitized.substring(0, maxLength) + + "\n[Content truncated due to length]"; } // Limit number of lines to prevent log flooding (65k max) - const lines = sanitized.split('\n'); + const lines = sanitized.split("\n"); const maxLines = 65000; if (lines.length > maxLines) { - sanitized = lines.slice(0, maxLines).join('\n') + '\n[Content truncated due to line count]'; + sanitized = + lines.slice(0, maxLines).join("\n") + + "\n[Content truncated due to line count]"; } // Remove ANSI escape sequences - sanitized = sanitized.replace(/\x1b\[[0-9;]*[mGKH]/g, ''); + sanitized = sanitized.replace(/\x1b\[[0-9;]*[mGKH]/g, ""); // Neutralize common bot trigger phrases sanitized = neutralizeBotTriggers(sanitized); @@ -76,18 +83,24 @@ async function main() { * @returns {string} The string with unknown domains redacted */ function sanitizeUrlDomains(s) { - return s.replace(/\bhttps:\/\/([^\/\s\])}'"<>&\x00-\x1f]+)/gi, (match, domain) => { - // Extract the hostname part (before first slash, colon, or other delimiter) - const hostname = domain.split(/[\/:\?#]/)[0].toLowerCase(); - - // Check if this domain or any parent domain is in the allowlist - const isAllowed = allowedDomains.some(allowedDomain => { - const normalizedAllowed = allowedDomain.toLowerCase(); - return hostname === normalizedAllowed || hostname.endsWith('.' + normalizedAllowed); - }); - - return isAllowed ? match : '(redacted)'; - }); + return s.replace( + /\bhttps:\/\/([^\/\s\])}'"<>&\x00-\x1f]+)/gi, + (match, domain) => { + // Extract the hostname part (before first slash, colon, or other delimiter) + const hostname = domain.split(/[\/:\?#]/)[0].toLowerCase(); + + // Check if this domain or any parent domain is in the allowlist + const isAllowed = allowedDomains.some(allowedDomain => { + const normalizedAllowed = allowedDomain.toLowerCase(); + return ( + hostname === normalizedAllowed || + hostname.endsWith("." + normalizedAllowed) + ); + }); + + return isAllowed ? match : "(redacted)"; + } + ); } /** @@ -97,10 +110,13 @@ async function main() { */ function sanitizeUrlProtocols(s) { // Match both protocol:// and protocol: patterns - return s.replace(/\b(\w+):(?:\/\/)?[^\s\])}'"<>&\x00-\x1f]+/gi, (match, protocol) => { - // Allow https (case insensitive), redact everything else - return protocol.toLowerCase() === 'https' ? match : '(redacted)'; - }); + return s.replace( + /\b(\w+):(?:\/\/)?[^\s\])}'"<>&\x00-\x1f]+/gi, + (match, protocol) => { + // Allow https (case insensitive), redact everything else + return protocol.toLowerCase() === "https" ? match : "(redacted)"; + } + ); } /** @@ -110,8 +126,10 @@ async function main() { */ function neutralizeMentions(s) { // Replace @name or @org/team outside code with `@name` - return s.replace(/(^|[^\w`])@([A-Za-z0-9](?:[A-Za-z0-9-]{0,37}[A-Za-z0-9])?(?:\/[A-Za-z0-9._-]+)?)/g, - (_m, p1, p2) => `${p1}\`@${p2}\``); + return s.replace( + /(^|[^\w`])@([A-Za-z0-9](?:[A-Za-z0-9-]{0,37}[A-Za-z0-9])?(?:\/[A-Za-z0-9._-]+)?)/g, + (_m, p1, p2) => `${p1}\`@${p2}\`` + ); } /** @@ -121,11 +139,13 @@ async function main() { */ function neutralizeBotTriggers(s) { // Neutralize common bot trigger phrases like "fixes #123", "closes #asdfs", etc. - return s.replace(/\b(fixes?|closes?|resolves?|fix|close|resolve)\s+#(\w+)/gi, - (match, action, ref) => `\`${action} #${ref}\``); + return s.replace( + /\b(fixes?|closes?|resolves?|fix|close|resolve)\s+#(\w+)/gi, + (match, action, ref) => `\`${action} #${ref}\`` + ); } } - + /** * Gets the maximum allowed count for a given output type * @param {string} itemType - The output item type @@ -134,75 +154,189 @@ async function main() { */ function getMaxAllowedForType(itemType, config) { // Check if max is explicitly specified in config - if (config && config[itemType] && typeof config[itemType] === 'object' && config[itemType].max) { + if ( + config && + config[itemType] && + typeof config[itemType] === "object" && + config[itemType].max + ) { return config[itemType].max; } - + // Use default limits for plural-supported types switch (itemType) { - case 'create-issue': + case "create-issue": return 1; // Only one issue allowed - case 'add-issue-comment': + case "add-issue-comment": return 1; // Only one comment allowed - case 'create-pull-request': - return 1; // Only one pull request allowed - case 'add-issue-label': - return 5; // Only one labels operation allowed - case 'update-issue': - return 1; // Only one issue update allowed - case 'push-to-branch': - return 1; // Only one push to branch allowed + case "create-pull-request": + return 1; // Only one pull request allowed + case "create-pull-request-review-comment": + return 10; // Default to 10 review comments allowed + case "add-issue-label": + return 5; // Only one labels operation allowed + case "update-issue": + return 1; // Only one issue update allowed + case "push-to-branch": + return 1; // Only one push to branch allowed + case "create-discussion": + return 1; // Only one discussion allowed default: - return 1; // Default to single item for unknown types + return 1; // Default to single item for unknown types } } + + /** + * Attempts to repair common JSON syntax issues in LLM-generated content + * @param {string} jsonStr - The potentially malformed JSON string + * @returns {string} The repaired JSON string + */ + function repairJson(jsonStr) { + let repaired = jsonStr.trim(); + + // Fix single quotes to double quotes (must be done first) + repaired = repaired.replace(/'/g, '"'); + + // Fix missing quotes around object keys + repaired = repaired.replace( + /([{,]\s*)([a-zA-Z_$][a-zA-Z0-9_$]*)\s*:/g, + '$1"$2":' + ); + + // Fix newlines and tabs inside strings by escaping them + repaired = repaired.replace(/"([^"\\]*)"/g, (match, content) => { + if ( + content.includes("\n") || + content.includes("\r") || + content.includes("\t") + ) { + const escaped = content + .replace(/\\/g, "\\\\") + .replace(/\n/g, "\\n") + .replace(/\r/g, "\\r") + .replace(/\t/g, "\\t"); + return `"${escaped}"`; + } + return match; + }); + + // Fix unescaped quotes inside string values + repaired = repaired.replace( + /"([^"]*)"([^":,}\]]*)"([^"]*)"(\s*[,:}\]])/g, + (match, p1, p2, p3, p4) => `"${p1}\\"${p2}\\"${p3}"${p4}` + ); + + // Fix wrong bracket/brace types - arrays should end with ] not } + repaired = repaired.replace( + /(\[\s*(?:"[^"]*"(?:\s*,\s*"[^"]*")*\s*),?)\s*}/g, + "$1]" + ); + + // Fix missing closing braces/brackets + const openBraces = (repaired.match(/\{/g) || []).length; + const closeBraces = (repaired.match(/\}/g) || []).length; + + if (openBraces > closeBraces) { + repaired += "}".repeat(openBraces - closeBraces); + } else if (closeBraces > openBraces) { + repaired = "{".repeat(closeBraces - openBraces) + repaired; + } + + // Fix missing closing brackets for arrays + const openBrackets = (repaired.match(/\[/g) || []).length; + const closeBrackets = (repaired.match(/\]/g) || []).length; + + if (openBrackets > closeBrackets) { + repaired += "]".repeat(openBrackets - closeBrackets); + } else if (closeBrackets > openBrackets) { + repaired = "[".repeat(closeBrackets - openBrackets) + repaired; + } + + // Fix trailing commas in objects and arrays (AFTER fixing brackets/braces) + repaired = repaired.replace(/,(\s*[}\]])/g, "$1"); + + return repaired; + } + + /** + * Attempts to parse JSON with repair fallback + * @param {string} jsonStr - The JSON string to parse + * @returns {Object|undefined} The parsed JSON object, or undefined if parsing fails + */ + function parseJsonWithRepair(jsonStr) { + try { + // First, try normal JSON.parse + return JSON.parse(jsonStr); + } catch (originalError) { + try { + // If that fails, try repairing and parsing again + const repairedJson = repairJson(jsonStr); + return JSON.parse(repairedJson); + } catch (repairError) { + // If repair also fails, print error to console and return undefined + console.log( + `JSON parsing failed. Original: ${originalError.message}. After repair: ${repairError.message}` + ); + return undefined; + } + } + } + const outputFile = process.env.GITHUB_AW_SAFE_OUTPUTS; const safeOutputsConfig = process.env.GITHUB_AW_SAFE_OUTPUTS_CONFIG; - + if (!outputFile) { - console.log('GITHUB_AW_SAFE_OUTPUTS not set, no output to collect'); - core.setOutput('output', ''); + console.log("GITHUB_AW_SAFE_OUTPUTS not set, no output to collect"); + core.setOutput("output", ""); return; } if (!fs.existsSync(outputFile)) { - console.log('Output file does not exist:', outputFile); - core.setOutput('output', ''); + console.log("Output file does not exist:", outputFile); + core.setOutput("output", ""); return; } - const outputContent = fs.readFileSync(outputFile, 'utf8'); - if (outputContent.trim() === '') { - console.log('Output file is empty'); - core.setOutput('output', ''); + const outputContent = fs.readFileSync(outputFile, "utf8"); + if (outputContent.trim() === "") { + console.log("Output file is empty"); + core.setOutput("output", ""); return; } - console.log('Raw output content length:', outputContent.length); + console.log("Raw output content length:", outputContent.length); // Parse the safe-outputs configuration let expectedOutputTypes = {}; if (safeOutputsConfig) { try { expectedOutputTypes = JSON.parse(safeOutputsConfig); - console.log('Expected output types:', Object.keys(expectedOutputTypes)); + console.log("Expected output types:", Object.keys(expectedOutputTypes)); } catch (error) { - console.log('Warning: Could not parse safe-outputs config:', error.message); + console.log( + "Warning: Could not parse safe-outputs config:", + error.message + ); } } // Parse JSONL content - const lines = outputContent.trim().split('\n'); + const lines = outputContent.trim().split("\n"); const parsedItems = []; const errors = []; for (let i = 0; i < lines.length; i++) { const line = lines[i].trim(); - if (line === '') continue; // Skip empty lines - + if (line === "") continue; // Skip empty lines try { - const item = JSON.parse(line); - + const item = parseJsonWithRepair(line); + + // If item is undefined (failed to parse), add error and process next line + if (item === undefined) { + errors.push(`Line ${i + 1}: Invalid JSON - JSON parsing failed`); + continue; + } + // Validate that the item has a 'type' field if (!item.type) { errors.push(`Line ${i + 1}: Missing required 'type' field`); @@ -212,27 +346,37 @@ async function main() { // Validate against expected output types const itemType = item.type; if (!expectedOutputTypes[itemType]) { - errors.push(`Line ${i + 1}: Unexpected output type '${itemType}'. Expected one of: ${Object.keys(expectedOutputTypes).join(', ')}`); + errors.push( + `Line ${i + 1}: Unexpected output type '${itemType}'. Expected one of: ${Object.keys(expectedOutputTypes).join(", ")}` + ); continue; } // Check for too many items of the same type - const typeCount = parsedItems.filter(existing => existing.type === itemType).length; + const typeCount = parsedItems.filter( + existing => existing.type === itemType + ).length; const maxAllowed = getMaxAllowedForType(itemType, expectedOutputTypes); if (typeCount >= maxAllowed) { - errors.push(`Line ${i + 1}: Too many items of type '${itemType}'. Maximum allowed: ${maxAllowed}.`); + errors.push( + `Line ${i + 1}: Too many items of type '${itemType}'. Maximum allowed: ${maxAllowed}.` + ); continue; } // Basic validation based on type switch (itemType) { - case 'create-issue': - if (!item.title || typeof item.title !== 'string') { - errors.push(`Line ${i + 1}: create-issue requires a 'title' string field`); + case "create-issue": + if (!item.title || typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: create-issue requires a 'title' string field` + ); continue; } - if (!item.body || typeof item.body !== 'string') { - errors.push(`Line ${i + 1}: create-issue requires a 'body' string field`); + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-issue requires a 'body' string field` + ); continue; } // Sanitize text content @@ -240,111 +384,251 @@ async function main() { item.body = sanitizeContent(item.body); // Sanitize labels if present if (item.labels && Array.isArray(item.labels)) { - item.labels = item.labels.map(label => typeof label === 'string' ? sanitizeContent(label) : label); + item.labels = item.labels.map(label => + typeof label === "string" ? sanitizeContent(label) : label + ); } break; - case 'add-issue-comment': - if (!item.body || typeof item.body !== 'string') { - errors.push(`Line ${i + 1}: add-issue-comment requires a 'body' string field`); + case "add-issue-comment": + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: add-issue-comment requires a 'body' string field` + ); continue; } // Sanitize text content item.body = sanitizeContent(item.body); break; - case 'create-pull-request': - if (!item.title || typeof item.title !== 'string') { - errors.push(`Line ${i + 1}: create-pull-request requires a 'title' string field`); + case "create-pull-request": + if (!item.title || typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request requires a 'title' string field` + ); continue; } - if (!item.body || typeof item.body !== 'string') { - errors.push(`Line ${i + 1}: create-pull-request requires a 'body' string field`); + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request requires a 'body' string field` + ); continue; } // Sanitize text content item.title = sanitizeContent(item.title); item.body = sanitizeContent(item.body); // Sanitize branch name if present - if (item.branch && typeof item.branch === 'string') { + if (item.branch && typeof item.branch === "string") { item.branch = sanitizeContent(item.branch); } // Sanitize labels if present if (item.labels && Array.isArray(item.labels)) { - item.labels = item.labels.map(label => typeof label === 'string' ? sanitizeContent(label) : label); + item.labels = item.labels.map(label => + typeof label === "string" ? sanitizeContent(label) : label + ); } break; - case 'add-issue-label': + case "add-issue-label": if (!item.labels || !Array.isArray(item.labels)) { - errors.push(`Line ${i + 1}: add-issue-label requires a 'labels' array field`); + errors.push( + `Line ${i + 1}: add-issue-label requires a 'labels' array field` + ); continue; } - if (item.labels.some(label => typeof label !== 'string')) { - errors.push(`Line ${i + 1}: add-issue-label labels array must contain only strings`); + if (item.labels.some(label => typeof label !== "string")) { + errors.push( + `Line ${i + 1}: add-issue-label labels array must contain only strings` + ); continue; } // Sanitize label strings item.labels = item.labels.map(label => sanitizeContent(label)); break; - case 'update-issue': + case "update-issue": // Check that at least one updateable field is provided - const hasValidField = (item.status !== undefined) || - (item.title !== undefined) || - (item.body !== undefined); + const hasValidField = + item.status !== undefined || + item.title !== undefined || + item.body !== undefined; if (!hasValidField) { - errors.push(`Line ${i + 1}: update-issue requires at least one of: 'status', 'title', or 'body' fields`); + errors.push( + `Line ${i + 1}: update-issue requires at least one of: 'status', 'title', or 'body' fields` + ); continue; } // Validate status if provided if (item.status !== undefined) { - if (typeof item.status !== 'string' || (item.status !== 'open' && item.status !== 'closed')) { - errors.push(`Line ${i + 1}: update-issue 'status' must be 'open' or 'closed'`); + if ( + typeof item.status !== "string" || + (item.status !== "open" && item.status !== "closed") + ) { + errors.push( + `Line ${i + 1}: update-issue 'status' must be 'open' or 'closed'` + ); continue; } } // Validate title if provided if (item.title !== undefined) { - if (typeof item.title !== 'string') { - errors.push(`Line ${i + 1}: update-issue 'title' must be a string`); + if (typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: update-issue 'title' must be a string` + ); continue; } item.title = sanitizeContent(item.title); } // Validate body if provided if (item.body !== undefined) { - if (typeof item.body !== 'string') { - errors.push(`Line ${i + 1}: update-issue 'body' must be a string`); + if (typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: update-issue 'body' must be a string` + ); continue; } item.body = sanitizeContent(item.body); } // Validate issue_number if provided (for target "*") if (item.issue_number !== undefined) { - if (typeof item.issue_number !== 'number' && typeof item.issue_number !== 'string') { - errors.push(`Line ${i + 1}: update-issue 'issue_number' must be a number or string`); + if ( + typeof item.issue_number !== "number" && + typeof item.issue_number !== "string" + ) { + errors.push( + `Line ${i + 1}: update-issue 'issue_number' must be a number or string` + ); continue; } } break; - case 'push-to-branch': + case "push-to-branch": // Validate message if provided (optional) if (item.message !== undefined) { - if (typeof item.message !== 'string') { - errors.push(`Line ${i + 1}: push-to-branch 'message' must be a string`); + if (typeof item.message !== "string") { + errors.push( + `Line ${i + 1}: push-to-branch 'message' must be a string` + ); continue; } item.message = sanitizeContent(item.message); } // Validate pull_request_number if provided (for target "*") if (item.pull_request_number !== undefined) { - if (typeof item.pull_request_number !== 'number' && typeof item.pull_request_number !== 'string') { - errors.push(`Line ${i + 1}: push-to-branch 'pull_request_number' must be a number or string`); + if ( + typeof item.pull_request_number !== "number" && + typeof item.pull_request_number !== "string" + ) { + errors.push( + `Line ${i + 1}: push-to-branch 'pull_request_number' must be a number or string` + ); + continue; + } + } + break; + case "create-pull-request-review-comment": + // Validate required path field + if (!item.path || typeof item.path !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment requires a 'path' string field` + ); + continue; + } + // Validate required line field + if ( + item.line === undefined || + (typeof item.line !== "number" && typeof item.line !== "string") + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment requires a 'line' number or string field` + ); + continue; + } + // Validate line is a positive integer + const lineNumber = + typeof item.line === "string" ? parseInt(item.line, 10) : item.line; + if ( + isNaN(lineNumber) || + lineNumber <= 0 || + !Number.isInteger(lineNumber) + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'line' must be a positive integer` + ); + continue; + } + // Validate required body field + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment requires a 'body' string field` + ); + continue; + } + // Sanitize required text content + item.body = sanitizeContent(item.body); + // Validate optional start_line field + if (item.start_line !== undefined) { + if ( + typeof item.start_line !== "number" && + typeof item.start_line !== "string" + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'start_line' must be a number or string` + ); continue; } + const startLineNumber = + typeof item.start_line === "string" + ? parseInt(item.start_line, 10) + : item.start_line; + if ( + isNaN(startLineNumber) || + startLineNumber <= 0 || + !Number.isInteger(startLineNumber) + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'start_line' must be a positive integer` + ); + continue; + } + if (startLineNumber > lineNumber) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'start_line' must be less than or equal to 'line'` + ); + continue; + } + } + // Validate optional side field + if (item.side !== undefined) { + if ( + typeof item.side !== "string" || + (item.side !== "LEFT" && item.side !== "RIGHT") + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'side' must be 'LEFT' or 'RIGHT'` + ); + continue; + } + } + break; + case "create-discussion": + if (!item.title || typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: create-discussion requires a 'title' string field` + ); + continue; } + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-discussion requires a 'body' string field` + ); + continue; + } + // Sanitize text content + item.title = sanitizeContent(item.title); + item.body = sanitizeContent(item.body); break; default: @@ -354,7 +638,6 @@ async function main() { console.log(`Line ${i + 1}: Valid ${itemType} item`); parsedItems.push(item); - } catch (error) { errors.push(`Line ${i + 1}: Invalid JSON - ${error.message}`); } @@ -362,9 +645,9 @@ async function main() { // Report validation results if (errors.length > 0) { - console.log('Validation errors found:'); + console.log("Validation errors found:"); errors.forEach(error => console.log(` - ${error}`)); - + // For now, we'll continue with valid items but log the errors // In the future, we might want to fail the workflow for invalid items } @@ -374,11 +657,11 @@ async function main() { // Set the parsed and validated items as output const validatedOutput = { items: parsedItems, - errors: errors + errors: errors, }; - core.setOutput('output', JSON.stringify(validatedOutput)); - core.setOutput('raw_output', outputContent); + core.setOutput("output", JSON.stringify(validatedOutput)); + core.setOutput("raw_output", outputContent); } // Call the main function diff --git a/pkg/workflow/js/collect_ndjson_output.test.cjs b/pkg/workflow/js/collect_ndjson_output.test.cjs index f3177c63af..3e263023d7 100644 --- a/pkg/workflow/js/collect_ndjson_output.test.cjs +++ b/pkg/workflow/js/collect_ndjson_output.test.cjs @@ -1,30 +1,30 @@ -import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest'; -import fs from 'fs'; -import path from 'path'; +import { describe, it, expect, beforeEach, afterEach, vi } from "vitest"; +import fs from "fs"; +import path from "path"; -describe('collect_ndjson_output.cjs', () => { +describe("collect_ndjson_output.cjs", () => { let mockCore; let collectScript; beforeEach(() => { // Save original console before mocking global.originalConsole = global.console; - + // Mock console methods global.console = { log: vi.fn(), - error: vi.fn() + error: vi.fn(), }; // Mock core actions methods mockCore = { - setOutput: vi.fn() + setOutput: vi.fn(), }; global.core = mockCore; // Read the script file - const scriptPath = path.join(__dirname, 'collect_ndjson_output.cjs'); - collectScript = fs.readFileSync(scriptPath, 'utf8'); + const scriptPath = path.join(__dirname, "collect_ndjson_output.cjs"); + collectScript = fs.readFileSync(scriptPath, "utf8"); // Make fs available globally for the evaluated script global.fs = fs; @@ -32,7 +32,7 @@ describe('collect_ndjson_output.cjs', () => { afterEach(() => { // Clean up any test files - const testFiles = ['/tmp/test-ndjson-output.txt']; + const testFiles = ["/tmp/test-ndjson-output.txt"]; testFiles.forEach(file => { try { if (fs.existsSync(file)) { @@ -44,7 +44,7 @@ describe('collect_ndjson_output.cjs', () => { }); // Clean up globals safely - don't delete console as vitest may still need it - if (typeof global !== 'undefined') { + if (typeof global !== "undefined") { delete global.fs; delete global.core; // Restore original console instead of deleting @@ -55,209 +55,1015 @@ describe('collect_ndjson_output.cjs', () => { } }); - it('should handle missing GITHUB_AW_SAFE_OUTPUTS environment variable', async () => { + it("should handle missing GITHUB_AW_SAFE_OUTPUTS environment variable", async () => { delete process.env.GITHUB_AW_SAFE_OUTPUTS; - + await eval(`(async () => { ${collectScript} })()`); - - expect(mockCore.setOutput).toHaveBeenCalledWith('output', ''); - expect(console.log).toHaveBeenCalledWith('GITHUB_AW_SAFE_OUTPUTS not set, no output to collect'); + + expect(mockCore.setOutput).toHaveBeenCalledWith("output", ""); + expect(console.log).toHaveBeenCalledWith( + "GITHUB_AW_SAFE_OUTPUTS not set, no output to collect" + ); }); - it('should handle missing output file', async () => { - process.env.GITHUB_AW_SAFE_OUTPUTS = '/tmp/nonexistent-file.txt'; - + it("should handle missing output file", async () => { + process.env.GITHUB_AW_SAFE_OUTPUTS = "/tmp/nonexistent-file.txt"; + await eval(`(async () => { ${collectScript} })()`); - - expect(mockCore.setOutput).toHaveBeenCalledWith('output', ''); - expect(console.log).toHaveBeenCalledWith('Output file does not exist:', '/tmp/nonexistent-file.txt'); + + expect(mockCore.setOutput).toHaveBeenCalledWith("output", ""); + expect(console.log).toHaveBeenCalledWith( + "Output file does not exist:", + "/tmp/nonexistent-file.txt" + ); }); - it('should handle empty output file', async () => { - const testFile = '/tmp/test-ndjson-output.txt'; - fs.writeFileSync(testFile, ''); + it("should handle empty output file", async () => { + const testFile = "/tmp/test-ndjson-output.txt"; + fs.writeFileSync(testFile, ""); process.env.GITHUB_AW_SAFE_OUTPUTS = testFile; - + await eval(`(async () => { ${collectScript} })()`); - - expect(mockCore.setOutput).toHaveBeenCalledWith('output', ''); - expect(console.log).toHaveBeenCalledWith('Output file is empty'); + + expect(mockCore.setOutput).toHaveBeenCalledWith("output", ""); + expect(console.log).toHaveBeenCalledWith("Output file is empty"); }); - it('should validate and parse valid JSONL content', async () => { - const testFile = '/tmp/test-ndjson-output.txt'; + it("should validate and parse valid JSONL content", async () => { + const testFile = "/tmp/test-ndjson-output.txt"; const ndjsonContent = `{"type": "create-issue", "title": "Test Issue", "body": "Test body"} {"type": "add-issue-comment", "body": "Test comment"}`; - + fs.writeFileSync(testFile, ndjsonContent); process.env.GITHUB_AW_SAFE_OUTPUTS = testFile; - process.env.GITHUB_AW_SAFE_OUTPUTS_CONFIG = '{"create-issue": true, "add-issue-comment": true}'; - + process.env.GITHUB_AW_SAFE_OUTPUTS_CONFIG = + '{"create-issue": true, "add-issue-comment": true}'; + await eval(`(async () => { ${collectScript} })()`); - + const setOutputCalls = mockCore.setOutput.mock.calls; - const outputCall = setOutputCalls.find(call => call[0] === 'output'); + const outputCall = setOutputCalls.find(call => call[0] === "output"); expect(outputCall).toBeDefined(); - + const parsedOutput = JSON.parse(outputCall[1]); expect(parsedOutput.items).toHaveLength(2); - expect(parsedOutput.items[0].type).toBe('create-issue'); - expect(parsedOutput.items[1].type).toBe('add-issue-comment'); + expect(parsedOutput.items[0].type).toBe("create-issue"); + expect(parsedOutput.items[1].type).toBe("add-issue-comment"); expect(parsedOutput.errors).toHaveLength(0); }); - it('should reject items with unexpected output types', async () => { - const testFile = '/tmp/test-ndjson-output.txt'; + it("should reject items with unexpected output types", async () => { + const testFile = "/tmp/test-ndjson-output.txt"; const ndjsonContent = `{"type": "create-issue", "title": "Test Issue", "body": "Test body"} {"type": "unexpected-type", "data": "some data"}`; - + fs.writeFileSync(testFile, ndjsonContent); process.env.GITHUB_AW_SAFE_OUTPUTS = testFile; process.env.GITHUB_AW_SAFE_OUTPUTS_CONFIG = '{"create-issue": true}'; - + await eval(`(async () => { ${collectScript} })()`); - + const setOutputCalls = mockCore.setOutput.mock.calls; - const outputCall = setOutputCalls.find(call => call[0] === 'output'); + const outputCall = setOutputCalls.find(call => call[0] === "output"); expect(outputCall).toBeDefined(); - + const parsedOutput = JSON.parse(outputCall[1]); expect(parsedOutput.items).toHaveLength(1); - expect(parsedOutput.items[0].type).toBe('create-issue'); + expect(parsedOutput.items[0].type).toBe("create-issue"); expect(parsedOutput.errors).toHaveLength(1); - expect(parsedOutput.errors[0]).toContain('Unexpected output type'); + expect(parsedOutput.errors[0]).toContain("Unexpected output type"); }); - it('should validate required fields for create-issue type', async () => { - const testFile = '/tmp/test-ndjson-output.txt'; + it("should validate required fields for create-issue type", async () => { + const testFile = "/tmp/test-ndjson-output.txt"; const ndjsonContent = `{"type": "create-issue", "title": "Test Issue"} {"type": "create-issue", "body": "Test body"}`; - + fs.writeFileSync(testFile, ndjsonContent); process.env.GITHUB_AW_SAFE_OUTPUTS = testFile; process.env.GITHUB_AW_SAFE_OUTPUTS_CONFIG = '{"create-issue": true}'; - + await eval(`(async () => { ${collectScript} })()`); - + const setOutputCalls = mockCore.setOutput.mock.calls; - const outputCall = setOutputCalls.find(call => call[0] === 'output'); + const outputCall = setOutputCalls.find(call => call[0] === "output"); expect(outputCall).toBeDefined(); - + const parsedOutput = JSON.parse(outputCall[1]); expect(parsedOutput.items).toHaveLength(0); expect(parsedOutput.errors).toHaveLength(2); - expect(parsedOutput.errors[0]).toContain('requires a \'body\' string field'); - expect(parsedOutput.errors[1]).toContain('requires a \'title\' string field'); + expect(parsedOutput.errors[0]).toContain("requires a 'body' string field"); + expect(parsedOutput.errors[1]).toContain("requires a 'title' string field"); }); - it('should validate required fields for add-issue-label type', async () => { - const testFile = '/tmp/test-ndjson-output.txt'; + it("should validate required fields for add-issue-label type", async () => { + const testFile = "/tmp/test-ndjson-output.txt"; const ndjsonContent = `{"type": "add-issue-label", "labels": ["bug", "enhancement"]} {"type": "add-issue-label", "labels": "not-an-array"} {"type": "add-issue-label", "labels": [1, 2, 3]}`; - + fs.writeFileSync(testFile, ndjsonContent); process.env.GITHUB_AW_SAFE_OUTPUTS = testFile; process.env.GITHUB_AW_SAFE_OUTPUTS_CONFIG = '{"add-issue-label": true}'; - + await eval(`(async () => { ${collectScript} })()`); - + const setOutputCalls = mockCore.setOutput.mock.calls; - const outputCall = setOutputCalls.find(call => call[0] === 'output'); + const outputCall = setOutputCalls.find(call => call[0] === "output"); expect(outputCall).toBeDefined(); - + const parsedOutput = JSON.parse(outputCall[1]); expect(parsedOutput.items).toHaveLength(1); - expect(parsedOutput.items[0].labels).toEqual(['bug', 'enhancement']); + expect(parsedOutput.items[0].labels).toEqual(["bug", "enhancement"]); expect(parsedOutput.errors).toHaveLength(2); }); - it('should handle invalid JSON lines', async () => { - const testFile = '/tmp/test-ndjson-output.txt'; + it("should handle invalid JSON lines", async () => { + const testFile = "/tmp/test-ndjson-output.txt"; const ndjsonContent = `{"type": "create-issue", "title": "Test Issue", "body": "Test body"} {invalid json} {"type": "add-issue-comment", "body": "Test comment"}`; - + fs.writeFileSync(testFile, ndjsonContent); process.env.GITHUB_AW_SAFE_OUTPUTS = testFile; - process.env.GITHUB_AW_SAFE_OUTPUTS_CONFIG = '{"create-issue": true, "add-issue-comment": true}'; - + process.env.GITHUB_AW_SAFE_OUTPUTS_CONFIG = + '{"create-issue": true, "add-issue-comment": true}'; + await eval(`(async () => { ${collectScript} })()`); - + const setOutputCalls = mockCore.setOutput.mock.calls; - const outputCall = setOutputCalls.find(call => call[0] === 'output'); + const outputCall = setOutputCalls.find(call => call[0] === "output"); expect(outputCall).toBeDefined(); - + const parsedOutput = JSON.parse(outputCall[1]); expect(parsedOutput.items).toHaveLength(2); expect(parsedOutput.errors).toHaveLength(1); - expect(parsedOutput.errors[0]).toContain('Invalid JSON'); + expect(parsedOutput.errors[0]).toContain("Invalid JSON"); }); - it('should allow multiple items of supported types up to limits', async () => { - const testFile = '/tmp/test-ndjson-output.txt'; + it("should allow multiple items of supported types up to limits", async () => { + const testFile = "/tmp/test-ndjson-output.txt"; const ndjsonContent = `{"type": "create-issue", "title": "First Issue", "body": "First body"}`; - + fs.writeFileSync(testFile, ndjsonContent); process.env.GITHUB_AW_SAFE_OUTPUTS = testFile; process.env.GITHUB_AW_SAFE_OUTPUTS_CONFIG = '{"create-issue": true}'; - + await eval(`(async () => { ${collectScript} })()`); - + const setOutputCalls = mockCore.setOutput.mock.calls; - const outputCall = setOutputCalls.find(call => call[0] === 'output'); + const outputCall = setOutputCalls.find(call => call[0] === "output"); expect(outputCall).toBeDefined(); - + const parsedOutput = JSON.parse(outputCall[1]); expect(parsedOutput.items).toHaveLength(1); // Both items should be allowed - expect(parsedOutput.items[0].title).toBe('First Issue'); + expect(parsedOutput.items[0].title).toBe("First Issue"); expect(parsedOutput.errors).toHaveLength(0); // No errors for multiple items within limits }); - it('should respect max limits from config', async () => { - const testFile = '/tmp/test-ndjson-output.txt'; + it("should respect max limits from config", async () => { + const testFile = "/tmp/test-ndjson-output.txt"; const ndjsonContent = `{"type": "create-issue", "title": "First Issue", "body": "First body"} {"type": "create-issue", "title": "Second Issue", "body": "Second body"} {"type": "create-issue", "title": "Third Issue", "body": "Third body"}`; - + fs.writeFileSync(testFile, ndjsonContent); process.env.GITHUB_AW_SAFE_OUTPUTS = testFile; // Set max to 2 for create-issue process.env.GITHUB_AW_SAFE_OUTPUTS_CONFIG = '{"create-issue": {"max": 2}}'; - + await eval(`(async () => { ${collectScript} })()`); - + const setOutputCalls = mockCore.setOutput.mock.calls; - const outputCall = setOutputCalls.find(call => call[0] === 'output'); + const outputCall = setOutputCalls.find(call => call[0] === "output"); expect(outputCall).toBeDefined(); - + const parsedOutput = JSON.parse(outputCall[1]); expect(parsedOutput.items).toHaveLength(2); // Only first 2 items should be allowed - expect(parsedOutput.items[0].title).toBe('First Issue'); - expect(parsedOutput.items[1].title).toBe('Second Issue'); + expect(parsedOutput.items[0].title).toBe("First Issue"); + expect(parsedOutput.items[1].title).toBe("Second Issue"); expect(parsedOutput.errors).toHaveLength(1); // Error for the third item exceeding max - expect(parsedOutput.errors[0]).toContain('Too many items of type \'create-issue\'. Maximum allowed: 2'); + expect(parsedOutput.errors[0]).toContain( + "Too many items of type 'create-issue'. Maximum allowed: 2" + ); }); - it('should skip empty lines', async () => { - const testFile = '/tmp/test-ndjson-output.txt'; + it("should validate required fields for create-discussion type", async () => { + const testFile = "/tmp/test-ndjson-output.txt"; + const ndjsonContent = `{"type": "create-discussion", "title": "Test Discussion"} +{"type": "create-discussion", "body": "Test body"} +{"type": "create-discussion", "title": "Valid Discussion", "body": "Valid body"}`; + + fs.writeFileSync(testFile, ndjsonContent); + process.env.GITHUB_AW_SAFE_OUTPUTS = testFile; + process.env.GITHUB_AW_SAFE_OUTPUTS_CONFIG = '{"create-discussion": true}'; + + await eval(`(async () => { ${collectScript} })()`); + + const setOutputCalls = mockCore.setOutput.mock.calls; + const outputCall = setOutputCalls.find(call => call[0] === "output"); + expect(outputCall).toBeDefined(); + + const parsedOutput = JSON.parse(outputCall[1]); + expect(parsedOutput.items).toHaveLength(1); // Only the valid one + expect(parsedOutput.items[0].title).toBe("Valid Discussion"); + expect(parsedOutput.items[0].body).toBe("Valid body"); + expect(parsedOutput.errors).toHaveLength(2); + expect(parsedOutput.errors[0]).toContain("requires a 'body' string field"); + expect(parsedOutput.errors[1]).toContain("requires a 'title' string field"); + }); + + it("should skip empty lines", async () => { + const testFile = "/tmp/test-ndjson-output.txt"; const ndjsonContent = `{"type": "create-issue", "title": "Test Issue", "body": "Test body"} {"type": "add-issue-comment", "body": "Test comment"} `; - + fs.writeFileSync(testFile, ndjsonContent); process.env.GITHUB_AW_SAFE_OUTPUTS = testFile; - process.env.GITHUB_AW_SAFE_OUTPUTS_CONFIG = '{"create-issue": true, "add-issue-comment": true}'; - + process.env.GITHUB_AW_SAFE_OUTPUTS_CONFIG = + '{"create-issue": true, "add-issue-comment": true}'; + await eval(`(async () => { ${collectScript} })()`); - + const setOutputCalls = mockCore.setOutput.mock.calls; - const outputCall = setOutputCalls.find(call => call[0] === 'output'); + const outputCall = setOutputCalls.find(call => call[0] === "output"); expect(outputCall).toBeDefined(); - + const parsedOutput = JSON.parse(outputCall[1]); expect(parsedOutput.items).toHaveLength(2); expect(parsedOutput.errors).toHaveLength(0); }); + + it("should validate required fields for create-pull-request-review-comment type", async () => { + const testFile = "/tmp/test-ndjson-output.txt"; + const ndjsonContent = `{"type": "create-pull-request-review-comment", "path": "src/file.js", "line": 10, "body": "Good code"} +{"type": "create-pull-request-review-comment", "path": "src/file.js", "line": "invalid", "body": "Comment"} +{"type": "create-pull-request-review-comment", "path": "src/file.js", "body": "Missing line"} +{"type": "create-pull-request-review-comment", "line": 15} +{"type": "create-pull-request-review-comment", "path": "src/file.js", "line": 20, "start_line": 25, "body": "Invalid range"}`; + + fs.writeFileSync(testFile, ndjsonContent); + process.env.GITHUB_AW_SAFE_OUTPUTS = testFile; + process.env.GITHUB_AW_SAFE_OUTPUTS_CONFIG = + '{"create-pull-request-review-comment": true}'; + + await eval(`(async () => { ${collectScript} })()`); + + const setOutputCalls = mockCore.setOutput.mock.calls; + const outputCall = setOutputCalls.find(call => call[0] === "output"); + expect(outputCall).toBeDefined(); + + const parsedOutput = JSON.parse(outputCall[1]); + expect(parsedOutput.items).toHaveLength(1); // Only the first valid item + expect(parsedOutput.items[0].path).toBe("src/file.js"); + expect(parsedOutput.items[0].line).toBe(10); + expect(parsedOutput.items[0].body).toBeDefined(); + expect(parsedOutput.errors).toHaveLength(4); // 4 invalid items + expect( + parsedOutput.errors.some(e => + e.includes("line' must be a positive integer") + ) + ).toBe(true); + expect( + parsedOutput.errors.some(e => e.includes("requires a 'line' number")) + ).toBe(true); + expect( + parsedOutput.errors.some(e => e.includes("requires a 'path' string")) + ).toBe(true); + expect( + parsedOutput.errors.some(e => + e.includes("start_line' must be less than or equal to 'line'") + ) + ).toBe(true); + }); + + it("should validate optional fields for create-pull-request-review-comment type", async () => { + const testFile = "/tmp/test-ndjson-output.txt"; + const ndjsonContent = `{"type": "create-pull-request-review-comment", "path": "src/file.js", "line": 20, "start_line": 15, "side": "LEFT", "body": "Multi-line comment"} +{"type": "create-pull-request-review-comment", "path": "src/file.js", "line": 25, "side": "INVALID", "body": "Invalid side"}`; + + fs.writeFileSync(testFile, ndjsonContent); + process.env.GITHUB_AW_SAFE_OUTPUTS = testFile; + process.env.GITHUB_AW_SAFE_OUTPUTS_CONFIG = + '{"create-pull-request-review-comment": true}'; + + await eval(`(async () => { ${collectScript} })()`); + + const setOutputCalls = mockCore.setOutput.mock.calls; + const outputCall = setOutputCalls.find(call => call[0] === "output"); + expect(outputCall).toBeDefined(); + + const parsedOutput = JSON.parse(outputCall[1]); + expect(parsedOutput.items).toHaveLength(1); // Only the first valid item + expect(parsedOutput.items[0].side).toBe("LEFT"); + expect(parsedOutput.items[0].start_line).toBe(15); + expect(parsedOutput.errors).toHaveLength(1); // 1 invalid side + expect(parsedOutput.errors[0]).toContain("side' must be 'LEFT' or 'RIGHT'"); + }); + + it("should respect max limits for create-pull-request-review-comment from config", async () => { + const testFile = "/tmp/test-ndjson-output.txt"; + const items = []; + for (let i = 1; i <= 12; i++) { + items.push( + `{"type": "create-pull-request-review-comment", "path": "src/file.js", "line": ${i}, "body": "Comment ${i}"}` + ); + } + const ndjsonContent = items.join("\n"); + + fs.writeFileSync(testFile, ndjsonContent); + process.env.GITHUB_AW_SAFE_OUTPUTS = testFile; + // Set max to 5 for create-pull-request-review-comment + process.env.GITHUB_AW_SAFE_OUTPUTS_CONFIG = + '{"create-pull-request-review-comment": {"max": 5}}'; + + await eval(`(async () => { ${collectScript} })()`); + + const setOutputCalls = mockCore.setOutput.mock.calls; + const outputCall = setOutputCalls.find(call => call[0] === "output"); + expect(outputCall).toBeDefined(); + + const parsedOutput = JSON.parse(outputCall[1]); + expect(parsedOutput.items).toHaveLength(5); // Only first 5 items should be allowed + expect(parsedOutput.errors).toHaveLength(7); // 7 items exceeding max + expect( + parsedOutput.errors.every(e => + e.includes( + "Too many items of type 'create-pull-request-review-comment'. Maximum allowed: 5" + ) + ) + ).toBe(true); + }); + + describe("JSON repair functionality", () => { + it("should repair JSON with unescaped quotes in string values", async () => { + const testFile = "/tmp/test-ndjson-output.txt"; + const ndjsonContent = `{"type": "create-issue", "title": "Issue with "quotes" inside", "body": "Test body"}`; + + fs.writeFileSync(testFile, ndjsonContent); + process.env.GITHUB_AW_SAFE_OUTPUTS = testFile; + process.env.GITHUB_AW_SAFE_OUTPUTS_CONFIG = '{"create-issue": true}'; + + await eval(`(async () => { ${collectScript} })()`); + + const setOutputCalls = mockCore.setOutput.mock.calls; + const outputCall = setOutputCalls.find(call => call[0] === "output"); + expect(outputCall).toBeDefined(); + + const parsedOutput = JSON.parse(outputCall[1]); + expect(parsedOutput.items).toHaveLength(1); + expect(parsedOutput.items[0].title).toContain("quotes"); + expect(parsedOutput.errors).toHaveLength(0); + }); + + it("should repair JSON with missing quotes around object keys", async () => { + const testFile = "/tmp/test-ndjson-output.txt"; + const ndjsonContent = `{type: "create-issue", title: "Test Issue", body: "Test body"}`; + + fs.writeFileSync(testFile, ndjsonContent); + process.env.GITHUB_AW_SAFE_OUTPUTS = testFile; + process.env.GITHUB_AW_SAFE_OUTPUTS_CONFIG = '{"create-issue": true}'; + + await eval(`(async () => { ${collectScript} })()`); + + const setOutputCalls = mockCore.setOutput.mock.calls; + const outputCall = setOutputCalls.find(call => call[0] === "output"); + expect(outputCall).toBeDefined(); + + const parsedOutput = JSON.parse(outputCall[1]); + expect(parsedOutput.items).toHaveLength(1); + expect(parsedOutput.items[0].type).toBe("create-issue"); + expect(parsedOutput.errors).toHaveLength(0); + }); + + it("should repair JSON with trailing commas", async () => { + const testFile = "/tmp/test-ndjson-output.txt"; + const ndjsonContent = `{"type": "create-issue", "title": "Test Issue", "body": "Test body",}`; + + fs.writeFileSync(testFile, ndjsonContent); + process.env.GITHUB_AW_SAFE_OUTPUTS = testFile; + process.env.GITHUB_AW_SAFE_OUTPUTS_CONFIG = '{"create-issue": true}'; + + await eval(`(async () => { ${collectScript} })()`); + + const setOutputCalls = mockCore.setOutput.mock.calls; + const outputCall = setOutputCalls.find(call => call[0] === "output"); + expect(outputCall).toBeDefined(); + + const parsedOutput = JSON.parse(outputCall[1]); + expect(parsedOutput.items).toHaveLength(1); + expect(parsedOutput.items[0].type).toBe("create-issue"); + expect(parsedOutput.errors).toHaveLength(0); + }); + + it("should repair JSON with single quotes", async () => { + const testFile = "/tmp/test-ndjson-output.txt"; + const ndjsonContent = `{'type': 'create-issue', 'title': 'Test Issue', 'body': 'Test body'}`; + + fs.writeFileSync(testFile, ndjsonContent); + process.env.GITHUB_AW_SAFE_OUTPUTS = testFile; + process.env.GITHUB_AW_SAFE_OUTPUTS_CONFIG = '{"create-issue": true}'; + + await eval(`(async () => { ${collectScript} })()`); + + const setOutputCalls = mockCore.setOutput.mock.calls; + const outputCall = setOutputCalls.find(call => call[0] === "output"); + expect(outputCall).toBeDefined(); + + const parsedOutput = JSON.parse(outputCall[1]); + expect(parsedOutput.items).toHaveLength(1); + expect(parsedOutput.items[0].type).toBe("create-issue"); + expect(parsedOutput.errors).toHaveLength(0); + }); + + it("should repair JSON with missing closing braces", async () => { + const testFile = "/tmp/test-ndjson-output.txt"; + const ndjsonContent = `{"type": "create-issue", "title": "Test Issue", "body": "Test body"`; + + fs.writeFileSync(testFile, ndjsonContent); + process.env.GITHUB_AW_SAFE_OUTPUTS = testFile; + process.env.GITHUB_AW_SAFE_OUTPUTS_CONFIG = '{"create-issue": true}'; + + await eval(`(async () => { ${collectScript} })()`); + + const setOutputCalls = mockCore.setOutput.mock.calls; + const outputCall = setOutputCalls.find(call => call[0] === "output"); + expect(outputCall).toBeDefined(); + + const parsedOutput = JSON.parse(outputCall[1]); + expect(parsedOutput.items).toHaveLength(1); + expect(parsedOutput.items[0].type).toBe("create-issue"); + expect(parsedOutput.errors).toHaveLength(0); + }); + + it("should repair JSON with missing opening braces", async () => { + const testFile = "/tmp/test-ndjson-output.txt"; + const ndjsonContent = `"type": "create-issue", "title": "Test Issue", "body": "Test body"}`; + + fs.writeFileSync(testFile, ndjsonContent); + process.env.GITHUB_AW_SAFE_OUTPUTS = testFile; + process.env.GITHUB_AW_SAFE_OUTPUTS_CONFIG = '{"create-issue": true}'; + + await eval(`(async () => { ${collectScript} })()`); + + const setOutputCalls = mockCore.setOutput.mock.calls; + const outputCall = setOutputCalls.find(call => call[0] === "output"); + expect(outputCall).toBeDefined(); + + const parsedOutput = JSON.parse(outputCall[1]); + expect(parsedOutput.items).toHaveLength(1); + expect(parsedOutput.items[0].type).toBe("create-issue"); + expect(parsedOutput.errors).toHaveLength(0); + }); + + it("should repair JSON with newlines in string values", async () => { + const testFile = "/tmp/test-ndjson-output.txt"; + // Real JSONL would have actual \n in the string, not real newlines + const ndjsonContent = `{"type": "create-issue", "title": "Test Issue", "body": "Line 1\\nLine 2\\nLine 3"}`; + + fs.writeFileSync(testFile, ndjsonContent); + process.env.GITHUB_AW_SAFE_OUTPUTS = testFile; + process.env.GITHUB_AW_SAFE_OUTPUTS_CONFIG = '{"create-issue": true}'; + + await eval(`(async () => { ${collectScript} })()`); + + const setOutputCalls = mockCore.setOutput.mock.calls; + const outputCall = setOutputCalls.find(call => call[0] === "output"); + expect(outputCall).toBeDefined(); + + const parsedOutput = JSON.parse(outputCall[1]); + expect(parsedOutput.items).toHaveLength(1); + expect(parsedOutput.items[0].body).toContain("Line 1"); + expect(parsedOutput.errors).toHaveLength(0); + }); + + it("should repair JSON with tabs and special characters", async () => { + const testFile = "/tmp/test-ndjson-output.txt"; + const ndjsonContent = `{"type": "create-issue", "title": "Test Issue", "body": "Test\tbody"}`; + + fs.writeFileSync(testFile, ndjsonContent); + process.env.GITHUB_AW_SAFE_OUTPUTS = testFile; + process.env.GITHUB_AW_SAFE_OUTPUTS_CONFIG = '{"create-issue": true}'; + + await eval(`(async () => { ${collectScript} })()`); + + const setOutputCalls = mockCore.setOutput.mock.calls; + const outputCall = setOutputCalls.find(call => call[0] === "output"); + expect(outputCall).toBeDefined(); + + const parsedOutput = JSON.parse(outputCall[1]); + expect(parsedOutput.items).toHaveLength(1); + expect(parsedOutput.items[0].type).toBe("create-issue"); + expect(parsedOutput.errors).toHaveLength(0); + }); + + it("should repair JSON with array syntax issues", async () => { + const testFile = "/tmp/test-ndjson-output.txt"; + const ndjsonContent = `{"type": "add-issue-label", "labels": ["bug", "enhancement",}`; + + fs.writeFileSync(testFile, ndjsonContent); + process.env.GITHUB_AW_SAFE_OUTPUTS = testFile; + process.env.GITHUB_AW_SAFE_OUTPUTS_CONFIG = '{"add-issue-label": true}'; + + await eval(`(async () => { ${collectScript} })()`); + + const setOutputCalls = mockCore.setOutput.mock.calls; + const outputCall = setOutputCalls.find(call => call[0] === "output"); + expect(outputCall).toBeDefined(); + + const parsedOutput = JSON.parse(outputCall[1]); + expect(parsedOutput.items).toHaveLength(1); + expect(parsedOutput.items[0].labels).toEqual(["bug", "enhancement"]); + expect(parsedOutput.errors).toHaveLength(0); + }); + + it("should handle complex repair scenarios with multiple issues", async () => { + const testFile = "/tmp/test-ndjson-output.txt"; + // Make this a more realistic test case for JSON repair without real newlines breaking JSONL + const ndjsonContent = `{type: 'create-issue', title: 'Issue with "quotes" and trailing,', body: 'Multi\\nline\\ntext',`; + + fs.writeFileSync(testFile, ndjsonContent); + process.env.GITHUB_AW_SAFE_OUTPUTS = testFile; + process.env.GITHUB_AW_SAFE_OUTPUTS_CONFIG = '{"create-issue": true}'; + + await eval(`(async () => { ${collectScript} })()`); + + const setOutputCalls = mockCore.setOutput.mock.calls; + const outputCall = setOutputCalls.find(call => call[0] === "output"); + expect(outputCall).toBeDefined(); + + const parsedOutput = JSON.parse(outputCall[1]); + expect(parsedOutput.items).toHaveLength(1); + expect(parsedOutput.items[0].type).toBe("create-issue"); + expect(parsedOutput.errors).toHaveLength(0); + }); + + it("should handle JSON broken across multiple lines (real multiline scenario)", async () => { + const testFile = "/tmp/test-ndjson-output.txt"; + // This simulates what happens when LLMs output JSON with actual newlines + // The parser should treat this as one broken JSON item, not multiple lines + // For now, we'll test that it fails gracefully and reports an error + const ndjsonContent = `{"type": "create-issue", "title": "Test Issue", "body": "Line 1 +Line 2 +Line 3"} +{"type": "add-issue-comment", "body": "This is a valid line"}`; + + fs.writeFileSync(testFile, ndjsonContent); + process.env.GITHUB_AW_SAFE_OUTPUTS = testFile; + process.env.GITHUB_AW_SAFE_OUTPUTS_CONFIG = + '{"create-issue": true, "add-issue-comment": true}'; + + await eval(`(async () => { ${collectScript} })()`); + + const setOutputCalls = mockCore.setOutput.mock.calls; + const outputCall = setOutputCalls.find(call => call[0] === "output"); + expect(outputCall).toBeDefined(); + + const parsedOutput = JSON.parse(outputCall[1]); + // The first broken JSON should produce errors, but the last valid line should work + expect(parsedOutput.items).toHaveLength(1); + expect(parsedOutput.items[0].type).toBe("add-issue-comment"); + expect(parsedOutput.errors.length).toBeGreaterThan(0); + expect( + parsedOutput.errors.some(error => error.includes("JSON parsing failed")) + ).toBe(true); + }); + + it("should still report error if repair fails completely", async () => { + const testFile = "/tmp/test-ndjson-output.txt"; + const ndjsonContent = `{completely broken json with no hope: of repair [[[}}}`; + + fs.writeFileSync(testFile, ndjsonContent); + process.env.GITHUB_AW_SAFE_OUTPUTS = testFile; + process.env.GITHUB_AW_SAFE_OUTPUTS_CONFIG = '{"create-issue": true}'; + + await eval(`(async () => { ${collectScript} })()`); + + const setOutputCalls = mockCore.setOutput.mock.calls; + const outputCall = setOutputCalls.find(call => call[0] === "output"); + expect(outputCall).toBeDefined(); + + const parsedOutput = JSON.parse(outputCall[1]); + expect(parsedOutput.items).toHaveLength(0); + expect(parsedOutput.errors).toHaveLength(1); + expect(parsedOutput.errors[0]).toContain("JSON parsing failed"); + }); + + it("should preserve valid JSON without modification", async () => { + const testFile = "/tmp/test-ndjson-output.txt"; + const ndjsonContent = `{"type": "create-issue", "title": "Perfect JSON", "body": "This should not be modified"}`; + + fs.writeFileSync(testFile, ndjsonContent); + process.env.GITHUB_AW_SAFE_OUTPUTS = testFile; + process.env.GITHUB_AW_SAFE_OUTPUTS_CONFIG = '{"create-issue": true}'; + + await eval(`(async () => { ${collectScript} })()`); + + const setOutputCalls = mockCore.setOutput.mock.calls; + const outputCall = setOutputCalls.find(call => call[0] === "output"); + expect(outputCall).toBeDefined(); + + const parsedOutput = JSON.parse(outputCall[1]); + expect(parsedOutput.items).toHaveLength(1); + expect(parsedOutput.items[0].title).toBe("Perfect JSON"); + expect(parsedOutput.items[0].body).toBe("This should not be modified"); + expect(parsedOutput.errors).toHaveLength(0); + }); + + it("should repair mixed quote types in same object", async () => { + const testFile = "/tmp/test-ndjson-output.txt"; + const ndjsonContent = `{"type": 'create-issue', "title": 'Mixed quotes', 'body': "Test body"}`; + + fs.writeFileSync(testFile, ndjsonContent); + process.env.GITHUB_AW_SAFE_OUTPUTS = testFile; + process.env.GITHUB_AW_SAFE_OUTPUTS_CONFIG = '{"create-issue": true}'; + + await eval(`(async () => { ${collectScript} })()`); + + const setOutputCalls = mockCore.setOutput.mock.calls; + const outputCall = setOutputCalls.find(call => call[0] === "output"); + expect(outputCall).toBeDefined(); + + const parsedOutput = JSON.parse(outputCall[1]); + expect(parsedOutput.items).toHaveLength(1); + expect(parsedOutput.items[0].type).toBe("create-issue"); + expect(parsedOutput.items[0].title).toBe("Mixed quotes"); + expect(parsedOutput.errors).toHaveLength(0); + }); + + it("should repair arrays ending with wrong bracket type", async () => { + const testFile = "/tmp/test-ndjson-output.txt"; + const ndjsonContent = `{"type": "add-issue-label", "labels": ["bug", "feature", "enhancement"}`; + + fs.writeFileSync(testFile, ndjsonContent); + process.env.GITHUB_AW_SAFE_OUTPUTS = testFile; + process.env.GITHUB_AW_SAFE_OUTPUTS_CONFIG = '{"add-issue-label": true}'; + + await eval(`(async () => { ${collectScript} })()`); + + const setOutputCalls = mockCore.setOutput.mock.calls; + const outputCall = setOutputCalls.find(call => call[0] === "output"); + expect(outputCall).toBeDefined(); + + const parsedOutput = JSON.parse(outputCall[1]); + expect(parsedOutput.items).toHaveLength(1); + expect(parsedOutput.items[0].labels).toEqual([ + "bug", + "feature", + "enhancement", + ]); + expect(parsedOutput.errors).toHaveLength(0); + }); + + it("should handle simple missing closing brackets with graceful repair", async () => { + const testFile = "/tmp/test-ndjson-output.txt"; + const ndjsonContent = `{"type": "add-issue-label", "labels": ["bug", "feature"`; + + fs.writeFileSync(testFile, ndjsonContent); + process.env.GITHUB_AW_SAFE_OUTPUTS = testFile; + process.env.GITHUB_AW_SAFE_OUTPUTS_CONFIG = '{"add-issue-label": true}'; + + await eval(`(async () => { ${collectScript} })()`); + + const setOutputCalls = mockCore.setOutput.mock.calls; + const outputCall = setOutputCalls.find(call => call[0] === "output"); + expect(outputCall).toBeDefined(); + + const parsedOutput = JSON.parse(outputCall[1]); + // This case may be too complex for the current repair logic + if (parsedOutput.items.length === 1) { + expect(parsedOutput.items[0].type).toBe("add-issue-label"); + expect(parsedOutput.items[0].labels).toEqual(["bug", "feature"]); + expect(parsedOutput.errors).toHaveLength(0); + } else { + // If repair fails, it should report an error + expect(parsedOutput.items).toHaveLength(0); + expect(parsedOutput.errors).toHaveLength(1); + expect(parsedOutput.errors[0]).toContain("JSON parsing failed"); + } + }); + + it("should repair nested objects with multiple issues", async () => { + const testFile = "/tmp/test-ndjson-output.txt"; + const ndjsonContent = `{type: 'create-issue', title: 'Nested test', body: 'Body text', labels: ['bug', 'priority',}`; + + fs.writeFileSync(testFile, ndjsonContent); + process.env.GITHUB_AW_SAFE_OUTPUTS = testFile; + process.env.GITHUB_AW_SAFE_OUTPUTS_CONFIG = '{"create-issue": true}'; + + await eval(`(async () => { ${collectScript} })()`); + + const setOutputCalls = mockCore.setOutput.mock.calls; + const outputCall = setOutputCalls.find(call => call[0] === "output"); + expect(outputCall).toBeDefined(); + + const parsedOutput = JSON.parse(outputCall[1]); + expect(parsedOutput.items).toHaveLength(1); + expect(parsedOutput.items[0].type).toBe("create-issue"); + expect(parsedOutput.items[0].labels).toEqual(["bug", "priority"]); + expect(parsedOutput.errors).toHaveLength(0); + }); + + it("should repair JSON with Unicode characters and escape sequences", async () => { + const testFile = "/tmp/test-ndjson-output.txt"; + const ndjsonContent = `{type: 'create-issue', title: 'Unicode test \u00e9\u00f1', body: 'Body with \\u0040 symbols',`; + + fs.writeFileSync(testFile, ndjsonContent); + process.env.GITHUB_AW_SAFE_OUTPUTS = testFile; + process.env.GITHUB_AW_SAFE_OUTPUTS_CONFIG = '{"create-issue": true}'; + + await eval(`(async () => { ${collectScript} })()`); + + const setOutputCalls = mockCore.setOutput.mock.calls; + const outputCall = setOutputCalls.find(call => call[0] === "output"); + expect(outputCall).toBeDefined(); + + const parsedOutput = JSON.parse(outputCall[1]); + expect(parsedOutput.items).toHaveLength(1); + expect(parsedOutput.items[0].type).toBe("create-issue"); + expect(parsedOutput.items[0].title).toContain("é"); + expect(parsedOutput.errors).toHaveLength(0); + }); + + it("should repair JSON with numbers, booleans, and null values", async () => { + const testFile = "/tmp/test-ndjson-output.txt"; + const ndjsonContent = `{type: 'create-issue', title: 'Complex types test', body: 'Body text', priority: 5, urgent: true, assignee: null,}`; + + fs.writeFileSync(testFile, ndjsonContent); + process.env.GITHUB_AW_SAFE_OUTPUTS = testFile; + process.env.GITHUB_AW_SAFE_OUTPUTS_CONFIG = '{"create-issue": true}'; + + await eval(`(async () => { ${collectScript} })()`); + + const setOutputCalls = mockCore.setOutput.mock.calls; + const outputCall = setOutputCalls.find(call => call[0] === "output"); + expect(outputCall).toBeDefined(); + + const parsedOutput = JSON.parse(outputCall[1]); + expect(parsedOutput.items).toHaveLength(1); + expect(parsedOutput.items[0].type).toBe("create-issue"); + expect(parsedOutput.items[0].priority).toBe(5); + expect(parsedOutput.items[0].urgent).toBe(true); + expect(parsedOutput.items[0].assignee).toBe(null); + expect(parsedOutput.errors).toHaveLength(0); + }); + + it("should attempt repair but fail gracefully with excessive malformed JSON", async () => { + const testFile = "/tmp/test-ndjson-output.txt"; + const ndjsonContent = `{,type: 'create-issue',, title: 'Extra commas', body: 'Test',, labels: ['bug',,],}`; + + fs.writeFileSync(testFile, ndjsonContent); + process.env.GITHUB_AW_SAFE_OUTPUTS = testFile; + process.env.GITHUB_AW_SAFE_OUTPUTS_CONFIG = '{"create-issue": true}'; + + await eval(`(async () => { ${collectScript} })()`); + + const setOutputCalls = mockCore.setOutput.mock.calls; + const outputCall = setOutputCalls.find(call => call[0] === "output"); + expect(outputCall).toBeDefined(); + + const parsedOutput = JSON.parse(outputCall[1]); + // This JSON is too malformed to repair reliably, so we expect it to fail + expect(parsedOutput.items).toHaveLength(0); + expect(parsedOutput.errors).toHaveLength(1); + expect(parsedOutput.errors[0]).toContain("JSON parsing failed"); + }); + + it("should repair very long strings with multiple issues", async () => { + const testFile = "/tmp/test-ndjson-output.txt"; + const longBody = + 'This is a very long body text that contains "quotes" and other\\nspecial characters including tabs\\t and newlines\\r\\n and more text that goes on and on.'; + const ndjsonContent = `{type: 'create-issue', title: 'Long string test', body: '${longBody}',}`; + + fs.writeFileSync(testFile, ndjsonContent); + process.env.GITHUB_AW_SAFE_OUTPUTS = testFile; + process.env.GITHUB_AW_SAFE_OUTPUTS_CONFIG = '{"create-issue": true}'; + + await eval(`(async () => { ${collectScript} })()`); + + const setOutputCalls = mockCore.setOutput.mock.calls; + const outputCall = setOutputCalls.find(call => call[0] === "output"); + expect(outputCall).toBeDefined(); + + const parsedOutput = JSON.parse(outputCall[1]); + expect(parsedOutput.items).toHaveLength(1); + expect(parsedOutput.items[0].type).toBe("create-issue"); + expect(parsedOutput.items[0].body).toContain("very long body"); + expect(parsedOutput.errors).toHaveLength(0); + }); + + it("should repair deeply nested structures", async () => { + const testFile = "/tmp/test-ndjson-output.txt"; + const ndjsonContent = `{type: 'create-issue', title: 'Nested test', body: 'Body', metadata: {project: 'test', tags: ['important', 'urgent',}, version: 1.0,}`; + + fs.writeFileSync(testFile, ndjsonContent); + process.env.GITHUB_AW_SAFE_OUTPUTS = testFile; + process.env.GITHUB_AW_SAFE_OUTPUTS_CONFIG = '{"create-issue": true}'; + + await eval(`(async () => { ${collectScript} })()`); + + const setOutputCalls = mockCore.setOutput.mock.calls; + const outputCall = setOutputCalls.find(call => call[0] === "output"); + expect(outputCall).toBeDefined(); + + const parsedOutput = JSON.parse(outputCall[1]); + expect(parsedOutput.items).toHaveLength(1); + expect(parsedOutput.items[0].type).toBe("create-issue"); + expect(parsedOutput.items[0].metadata).toBeDefined(); + expect(parsedOutput.items[0].metadata.project).toBe("test"); + expect(parsedOutput.items[0].metadata.tags).toEqual([ + "important", + "urgent", + ]); + expect(parsedOutput.errors).toHaveLength(0); + }); + + it("should handle complex backslash scenarios with graceful failure", async () => { + const testFile = "/tmp/test-ndjson-output.txt"; + const ndjsonContent = `{type: 'create-issue', title: 'Escape test with "quotes" and \\\\backslashes', body: 'Test body',}`; + + fs.writeFileSync(testFile, ndjsonContent); + process.env.GITHUB_AW_SAFE_OUTPUTS = testFile; + process.env.GITHUB_AW_SAFE_OUTPUTS_CONFIG = '{"create-issue": true}'; + + await eval(`(async () => { ${collectScript} })()`); + + const setOutputCalls = mockCore.setOutput.mock.calls; + const outputCall = setOutputCalls.find(call => call[0] === "output"); + expect(outputCall).toBeDefined(); + + const parsedOutput = JSON.parse(outputCall[1]); + // This complex escape case might fail due to the embedded quotes and backslashes + // The repair function may not handle this level of complexity + if (parsedOutput.items.length === 1) { + expect(parsedOutput.items[0].type).toBe("create-issue"); + expect(parsedOutput.items[0].title).toContain("quotes"); + expect(parsedOutput.errors).toHaveLength(0); + } else { + // If repair fails, it should report an error + expect(parsedOutput.items).toHaveLength(0); + expect(parsedOutput.errors).toHaveLength(1); + expect(parsedOutput.errors[0]).toContain("JSON parsing failed"); + } + }); + + it("should repair JSON with carriage returns and form feeds", async () => { + const testFile = "/tmp/test-ndjson-output.txt"; + const ndjsonContent = `{type: 'create-issue', title: 'Special chars', body: 'Text with\\rcarriage\\fform feed',}`; + + fs.writeFileSync(testFile, ndjsonContent); + process.env.GITHUB_AW_SAFE_OUTPUTS = testFile; + process.env.GITHUB_AW_SAFE_OUTPUTS_CONFIG = '{"create-issue": true}'; + + await eval(`(async () => { ${collectScript} })()`); + + const setOutputCalls = mockCore.setOutput.mock.calls; + const outputCall = setOutputCalls.find(call => call[0] === "output"); + expect(outputCall).toBeDefined(); + + const parsedOutput = JSON.parse(outputCall[1]); + expect(parsedOutput.items).toHaveLength(1); + expect(parsedOutput.items[0].type).toBe("create-issue"); + expect(parsedOutput.errors).toHaveLength(0); + }); + + it("should gracefully handle repair attempts on fundamentally broken JSON", async () => { + const testFile = "/tmp/test-ndjson-output.txt"; + const ndjsonContent = `{{{[[[type]]]}}} === "broken" &&& title ??? 'impossible to repair' @@@ body`; + + fs.writeFileSync(testFile, ndjsonContent); + process.env.GITHUB_AW_SAFE_OUTPUTS = testFile; + process.env.GITHUB_AW_SAFE_OUTPUTS_CONFIG = '{"create-issue": true}'; + + await eval(`(async () => { ${collectScript} })()`); + + const setOutputCalls = mockCore.setOutput.mock.calls; + const outputCall = setOutputCalls.find(call => call[0] === "output"); + expect(outputCall).toBeDefined(); + + const parsedOutput = JSON.parse(outputCall[1]); + expect(parsedOutput.items).toHaveLength(0); + expect(parsedOutput.errors).toHaveLength(1); + expect(parsedOutput.errors[0]).toContain("JSON parsing failed"); + }); + + it("should handle repair of JSON with missing property separators", async () => { + const testFile = "/tmp/test-ndjson-output.txt"; + const ndjsonContent = `{type 'create-issue', title 'Missing colons', body 'Test body'}`; + + fs.writeFileSync(testFile, ndjsonContent); + process.env.GITHUB_AW_SAFE_OUTPUTS = testFile; + process.env.GITHUB_AW_SAFE_OUTPUTS_CONFIG = '{"create-issue": true}'; + + await eval(`(async () => { ${collectScript} })()`); + + const setOutputCalls = mockCore.setOutput.mock.calls; + const outputCall = setOutputCalls.find(call => call[0] === "output"); + expect(outputCall).toBeDefined(); + + const parsedOutput = JSON.parse(outputCall[1]); + // This should likely fail to repair since the repair function doesn't handle missing colons + expect(parsedOutput.items).toHaveLength(0); + expect(parsedOutput.errors).toHaveLength(1); + expect(parsedOutput.errors[0]).toContain("JSON parsing failed"); + }); + + it("should repair arrays with mixed bracket types in complex structures", async () => { + const testFile = "/tmp/test-ndjson-output.txt"; + const ndjsonContent = `{type: 'add-issue-label', labels: ['priority', 'bug', 'urgent'}, extra: ['data', 'here'}`; + + fs.writeFileSync(testFile, ndjsonContent); + process.env.GITHUB_AW_SAFE_OUTPUTS = testFile; + process.env.GITHUB_AW_SAFE_OUTPUTS_CONFIG = '{"add-issue-label": true}'; + + await eval(`(async () => { ${collectScript} })()`); + + const setOutputCalls = mockCore.setOutput.mock.calls; + const outputCall = setOutputCalls.find(call => call[0] === "output"); + expect(outputCall).toBeDefined(); + + const parsedOutput = JSON.parse(outputCall[1]); + expect(parsedOutput.items).toHaveLength(1); + expect(parsedOutput.items[0].type).toBe("add-issue-label"); + expect(parsedOutput.items[0].labels).toEqual([ + "priority", + "bug", + "urgent", + ]); + expect(parsedOutput.errors).toHaveLength(0); + }); + + it("should gracefully handle cases with multiple trailing commas", async () => { + const testFile = "/tmp/test-ndjson-output.txt"; + const ndjsonContent = `{"type": "create-issue", "title": "Test", "body": "Test body",,,}`; + + fs.writeFileSync(testFile, ndjsonContent); + process.env.GITHUB_AW_SAFE_OUTPUTS = testFile; + process.env.GITHUB_AW_SAFE_OUTPUTS_CONFIG = '{"create-issue": true}'; + + await eval(`(async () => { ${collectScript} })()`); + + const setOutputCalls = mockCore.setOutput.mock.calls; + const outputCall = setOutputCalls.find(call => call[0] === "output"); + expect(outputCall).toBeDefined(); + + const parsedOutput = JSON.parse(outputCall[1]); + // Multiple consecutive commas might be too complex for the repair function + if (parsedOutput.items.length === 1) { + expect(parsedOutput.items[0].type).toBe("create-issue"); + expect(parsedOutput.items[0].title).toBe("Test"); + expect(parsedOutput.errors).toHaveLength(0); + } else { + // If repair fails, it should report an error + expect(parsedOutput.items).toHaveLength(0); + expect(parsedOutput.errors).toHaveLength(1); + expect(parsedOutput.errors[0]).toContain("JSON parsing failed"); + } + }); + + it("should repair JSON with simple missing closing brackets", async () => { + const testFile = "/tmp/test-ndjson-output.txt"; + const ndjsonContent = `{"type": "add-issue-label", "labels": ["bug", "feature"]}`; + + fs.writeFileSync(testFile, ndjsonContent); + process.env.GITHUB_AW_SAFE_OUTPUTS = testFile; + process.env.GITHUB_AW_SAFE_OUTPUTS_CONFIG = '{"add-issue-label": true}'; + + await eval(`(async () => { ${collectScript} })()`); + + const setOutputCalls = mockCore.setOutput.mock.calls; + const outputCall = setOutputCalls.find(call => call[0] === "output"); + expect(outputCall).toBeDefined(); + + const parsedOutput = JSON.parse(outputCall[1]); + expect(parsedOutput.items).toHaveLength(1); + expect(parsedOutput.items[0].type).toBe("add-issue-label"); + expect(parsedOutput.items[0].labels).toEqual(["bug", "feature"]); + expect(parsedOutput.errors).toHaveLength(0); + }); + + it("should repair combination of unquoted keys and trailing commas", async () => { + const testFile = "/tmp/test-ndjson-output.txt"; + const ndjsonContent = `{type: "create-issue", title: "Combined issues", body: "Test body", priority: 1,}`; + + fs.writeFileSync(testFile, ndjsonContent); + process.env.GITHUB_AW_SAFE_OUTPUTS = testFile; + process.env.GITHUB_AW_SAFE_OUTPUTS_CONFIG = '{"create-issue": true}'; + + await eval(`(async () => { ${collectScript} })()`); + + const setOutputCalls = mockCore.setOutput.mock.calls; + const outputCall = setOutputCalls.find(call => call[0] === "output"); + expect(outputCall).toBeDefined(); + + const parsedOutput = JSON.parse(outputCall[1]); + expect(parsedOutput.items).toHaveLength(1); + expect(parsedOutput.items[0].type).toBe("create-issue"); + expect(parsedOutput.items[0].title).toBe("Combined issues"); + expect(parsedOutput.items[0].priority).toBe(1); + expect(parsedOutput.errors).toHaveLength(0); + }); + }); }); diff --git a/pkg/workflow/js/compute_text.cjs b/pkg/workflow/js/compute_text.cjs index 94a5205cce..ad6a1faabf 100644 --- a/pkg/workflow/js/compute_text.cjs +++ b/pkg/workflow/js/compute_text.cjs @@ -4,23 +4,26 @@ * @returns {string} The sanitized content */ function sanitizeContent(content) { - if (!content || typeof content !== 'string') { - return ''; + if (!content || typeof content !== "string") { + return ""; } // Read allowed domains from environment variable const allowedDomainsEnv = process.env.GITHUB_AW_ALLOWED_DOMAINS; const defaultAllowedDomains = [ - 'github.com', - 'github.io', - 'githubusercontent.com', - 'githubassets.com', - 'github.dev', - 'codespaces.new' + "github.com", + "github.io", + "githubusercontent.com", + "githubassets.com", + "github.dev", + "codespaces.new", ]; const allowedDomains = allowedDomainsEnv - ? allowedDomainsEnv.split(',').map(d => d.trim()).filter(d => d) + ? allowedDomainsEnv + .split(",") + .map(d => d.trim()) + .filter(d => d) : defaultAllowedDomains; let sanitized = content; @@ -29,15 +32,15 @@ function sanitizeContent(content) { sanitized = neutralizeMentions(sanitized); // Remove control characters (except newlines and tabs) - sanitized = sanitized.replace(/[\x00-\x08\x0B\x0C\x0E-\x1F\x7F]/g, ''); + sanitized = sanitized.replace(/[\x00-\x08\x0B\x0C\x0E-\x1F\x7F]/g, ""); // XML character escaping sanitized = sanitized - .replace(/&/g, '&') // Must be first to avoid double-escaping - .replace(//g, '>') - .replace(/"/g, '"') - .replace(/'/g, '''); + .replace(/&/g, "&") // Must be first to avoid double-escaping + .replace(//g, ">") + .replace(/"/g, """) + .replace(/'/g, "'"); // URI filtering - replace non-https protocols with "(redacted)" // Step 1: Temporarily mark HTTPS URLs to protect them @@ -50,18 +53,21 @@ function sanitizeContent(content) { // Limit total length to prevent DoS (0.5MB max) const maxLength = 524288; if (sanitized.length > maxLength) { - sanitized = sanitized.substring(0, maxLength) + '\n[Content truncated due to length]'; + sanitized = + sanitized.substring(0, maxLength) + "\n[Content truncated due to length]"; } // Limit number of lines to prevent log flooding (65k max) - const lines = sanitized.split('\n'); + const lines = sanitized.split("\n"); const maxLines = 65000; if (lines.length > maxLines) { - sanitized = lines.slice(0, maxLines).join('\n') + '\n[Content truncated due to line count]'; + sanitized = + lines.slice(0, maxLines).join("\n") + + "\n[Content truncated due to line count]"; } // Remove ANSI escape sequences - sanitized = sanitized.replace(/\x1b\[[0-9;]*[mGKH]/g, ''); + sanitized = sanitized.replace(/\x1b\[[0-9;]*[mGKH]/g, ""); // Neutralize common bot trigger phrases sanitized = neutralizeBotTriggers(sanitized); @@ -75,19 +81,25 @@ function sanitizeContent(content) { * @returns {string} The string with unknown domains redacted */ function sanitizeUrlDomains(s) { - s = s.replace(/\bhttps:\/\/([^\/\s\])}'"<>&\x00-\x1f]+)/gi, (match, domain) => { - // Extract the hostname part (before first slash, colon, or other delimiter) - const hostname = domain.split(/[\/:\?#]/)[0].toLowerCase(); - - // Check if this domain or any parent domain is in the allowlist - const isAllowed = allowedDomains.some(allowedDomain => { - const normalizedAllowed = allowedDomain.toLowerCase(); - return hostname === normalizedAllowed || hostname.endsWith('.' + normalizedAllowed); - }); - - return isAllowed ? match : '(redacted)'; - }); - + s = s.replace( + /\bhttps:\/\/([^\/\s\])}'"<>&\x00-\x1f]+)/gi, + (match, domain) => { + // Extract the hostname part (before first slash, colon, or other delimiter) + const hostname = domain.split(/[\/:\?#]/)[0].toLowerCase(); + + // Check if this domain or any parent domain is in the allowlist + const isAllowed = allowedDomains.some(allowedDomain => { + const normalizedAllowed = allowedDomain.toLowerCase(); + return ( + hostname === normalizedAllowed || + hostname.endsWith("." + normalizedAllowed) + ); + }); + + return isAllowed ? match : "(redacted)"; + } + ); + return s; } @@ -99,10 +111,13 @@ function sanitizeContent(content) { function sanitizeUrlProtocols(s) { // Match both protocol:// and protocol: patterns // This covers URLs like https://example.com, javascript:alert(), mailto:user@domain.com, etc. - return s.replace(/\b(\w+):(?:\/\/)?[^\s\])}'"<>&\x00-\x1f]+/gi, (match, protocol) => { - // Allow https (case insensitive), redact everything else - return protocol.toLowerCase() === 'https' ? match : '(redacted)'; - }); + return s.replace( + /\b(\w+):(?:\/\/)?[^\s\])}'"<>&\x00-\x1f]+/gi, + (match, protocol) => { + // Allow https (case insensitive), redact everything else + return protocol.toLowerCase() === "https" ? match : "(redacted)"; + } + ); } /** @@ -112,8 +127,10 @@ function sanitizeContent(content) { */ function neutralizeMentions(s) { // Replace @name or @org/team outside code with `@name` - return s.replace(/(^|[^\w`])@([A-Za-z0-9](?:[A-Za-z0-9-]{0,37}[A-Za-z0-9])?(?:\/[A-Za-z0-9._-]+)?)/g, - (_m, p1, p2) => `${p1}\`@${p2}\``); + return s.replace( + /(^|[^\w`])@([A-Za-z0-9](?:[A-Za-z0-9-]{0,37}[A-Za-z0-9])?(?:\/[A-Za-z0-9._-]+)?)/g, + (_m, p1, p2) => `${p1}\`@${p2}\`` + ); } /** @@ -123,96 +140,100 @@ function sanitizeContent(content) { */ function neutralizeBotTriggers(s) { // Neutralize common bot trigger phrases like "fixes #123", "closes #asdfs", etc. - return s.replace(/\b(fixes?|closes?|resolves?|fix|close|resolve)\s+#(\w+)/gi, - (match, action, ref) => `\`${action} #${ref}\``); + return s.replace( + /\b(fixes?|closes?|resolves?|fix|close|resolve)\s+#(\w+)/gi, + (match, action, ref) => `\`${action} #${ref}\`` + ); } } async function main() { - let text = ''; + let text = ""; const actor = context.actor; const { owner, repo } = context.repo; // Check if the actor has repository access (admin, maintain permissions) - const repoPermission = await github.rest.repos.getCollaboratorPermissionLevel({ - owner: owner, - repo: repo, - username: actor - }); - + const repoPermission = await github.rest.repos.getCollaboratorPermissionLevel( + { + owner: owner, + repo: repo, + username: actor, + } + ); + const permission = repoPermission.data.permission; console.log(`Repository permission level: ${permission}`); - - if (permission !== 'admin' && permission !== 'maintain') { - core.setOutput('text', ''); + + if (permission !== "admin" && permission !== "maintain") { + core.setOutput("text", ""); return; } - + // Determine current body text based on event context switch (context.eventName) { - case 'issues': + case "issues": // For issues: title + body if (context.payload.issue) { - const title = context.payload.issue.title || ''; - const body = context.payload.issue.body || ''; + const title = context.payload.issue.title || ""; + const body = context.payload.issue.body || ""; text = `${title}\n\n${body}`; } break; - - case 'pull_request': + + case "pull_request": // For pull requests: title + body if (context.payload.pull_request) { - const title = context.payload.pull_request.title || ''; - const body = context.payload.pull_request.body || ''; + const title = context.payload.pull_request.title || ""; + const body = context.payload.pull_request.body || ""; text = `${title}\n\n${body}`; } break; - - case 'pull_request_target': + + case "pull_request_target": // For pull request target events: title + body if (context.payload.pull_request) { - const title = context.payload.pull_request.title || ''; - const body = context.payload.pull_request.body || ''; + const title = context.payload.pull_request.title || ""; + const body = context.payload.pull_request.body || ""; text = `${title}\n\n${body}`; } break; - - case 'issue_comment': + + case "issue_comment": // For issue comments: comment body if (context.payload.comment) { - text = context.payload.comment.body || ''; + text = context.payload.comment.body || ""; } break; - - case 'pull_request_review_comment': + + case "pull_request_review_comment": // For PR review comments: comment body if (context.payload.comment) { - text = context.payload.comment.body || ''; + text = context.payload.comment.body || ""; } break; - - case 'pull_request_review': + + case "pull_request_review": // For PR reviews: review body if (context.payload.review) { - text = context.payload.review.body || ''; + text = context.payload.review.body || ""; } break; - + default: // Default: empty text - text = ''; + text = ""; break; } - + // Sanitize the text before output const sanitizedText = sanitizeContent(text); - + // Display sanitized text in logs console.log(`text: ${sanitizedText}`); // Set the sanitized text as output - core.setOutput('text', sanitizedText); + core.setOutput("text", sanitizedText); } -await main(); \ No newline at end of file +await main(); diff --git a/pkg/workflow/js/compute_text.test.cjs b/pkg/workflow/js/compute_text.test.cjs index 73a492b1a4..ba243e69ac 100644 --- a/pkg/workflow/js/compute_text.test.cjs +++ b/pkg/workflow/js/compute_text.test.cjs @@ -1,28 +1,28 @@ -import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest'; -import fs from 'fs'; -import path from 'path'; +import { describe, it, expect, beforeEach, afterEach, vi } from "vitest"; +import fs from "fs"; +import path from "path"; // Mock the global objects that GitHub Actions provides const mockCore = { - setOutput: vi.fn() + setOutput: vi.fn(), }; const mockGithub = { rest: { repos: { - getCollaboratorPermissionLevel: vi.fn() - } - } + getCollaboratorPermissionLevel: vi.fn(), + }, + }, }; const mockContext = { - actor: 'test-user', + actor: "test-user", repo: { - owner: 'test-owner', - repo: 'test-repo' + owner: "test-owner", + repo: "test-repo", }, - eventName: 'issues', - payload: {} + eventName: "issues", + payload: {}, }; // Set up global variables @@ -30,283 +30,299 @@ global.core = mockCore; global.github = mockGithub; global.context = mockContext; -describe('compute_text.cjs', () => { +describe("compute_text.cjs", () => { let computeTextScript; let sanitizeContentFunction; beforeEach(() => { // Reset all mocks vi.clearAllMocks(); - + // Reset context - mockContext.eventName = 'issues'; + mockContext.eventName = "issues"; mockContext.payload = {}; - + // Reset environment variables delete process.env.GITHUB_AW_ALLOWED_DOMAINS; - + // Read the script content - const scriptPath = path.join(process.cwd(), 'pkg/workflow/js/compute_text.cjs'); - computeTextScript = fs.readFileSync(scriptPath, 'utf8'); - + const scriptPath = path.join( + process.cwd(), + "pkg/workflow/js/compute_text.cjs" + ); + computeTextScript = fs.readFileSync(scriptPath, "utf8"); + // Extract sanitizeContent function for unit testing // We need to eval the script to get access to the function const scriptWithExport = computeTextScript.replace( - 'await main();', - 'global.testSanitizeContent = sanitizeContent; global.testMain = main;' + "await main();", + "global.testSanitizeContent = sanitizeContent; global.testMain = main;" ); eval(scriptWithExport); sanitizeContentFunction = global.testSanitizeContent; }); - describe('sanitizeContent function', () => { - it('should handle null and undefined inputs', () => { - expect(sanitizeContentFunction(null)).toBe(''); - expect(sanitizeContentFunction(undefined)).toBe(''); - expect(sanitizeContentFunction('')).toBe(''); + describe("sanitizeContent function", () => { + it("should handle null and undefined inputs", () => { + expect(sanitizeContentFunction(null)).toBe(""); + expect(sanitizeContentFunction(undefined)).toBe(""); + expect(sanitizeContentFunction("")).toBe(""); }); - it('should neutralize @mentions by wrapping in backticks', () => { - const input = 'Hello @user and @org/team'; + it("should neutralize @mentions by wrapping in backticks", () => { + const input = "Hello @user and @org/team"; const result = sanitizeContentFunction(input); - expect(result).toContain('`@user`'); - expect(result).toContain('`@org/team`'); + expect(result).toContain("`@user`"); + expect(result).toContain("`@org/team`"); }); - it('should neutralize bot trigger phrases', () => { - const input = 'This fixes #123 and closes #456'; + it("should neutralize bot trigger phrases", () => { + const input = "This fixes #123 and closes #456"; const result = sanitizeContentFunction(input); - expect(result).toContain('`fixes #123`'); - expect(result).toContain('`closes #456`'); + expect(result).toContain("`fixes #123`"); + expect(result).toContain("`closes #456`"); }); - it('should remove control characters', () => { - const input = 'Hello\x00\x01\x08world\x7F'; + it("should remove control characters", () => { + const input = "Hello\x00\x01\x08world\x7F"; const result = sanitizeContentFunction(input); - expect(result).toBe('Helloworld'); + expect(result).toBe("Helloworld"); }); - it('should escape XML characters', () => { + it("should escape XML characters", () => { const input = 'Test content & "quotes"'; const result = sanitizeContentFunction(input); - expect(result).toContain('<tag>'); - expect(result).toContain('&'); - expect(result).toContain('"quotes"'); + expect(result).toContain("<tag>"); + expect(result).toContain("&"); + expect(result).toContain(""quotes""); }); - it('should redact non-https protocols', () => { - const input = 'Visit http://example.com or ftp://files.com'; + it("should redact non-https protocols", () => { + const input = "Visit http://example.com or ftp://files.com"; const result = sanitizeContentFunction(input); - expect(result).toContain('(redacted)'); - expect(result).not.toContain('http://example.com'); + expect(result).toContain("(redacted)"); + expect(result).not.toContain("http://example.com"); }); - it('should allow github.com domains', () => { - const input = 'Visit https://github.com/user/repo'; + it("should allow github.com domains", () => { + const input = "Visit https://github.com/user/repo"; const result = sanitizeContentFunction(input); - expect(result).toContain('https://github.com/user/repo'); + expect(result).toContain("https://github.com/user/repo"); }); - it('should redact unknown domains', () => { - const input = 'Visit https://evil.com/malware'; + it("should redact unknown domains", () => { + const input = "Visit https://evil.com/malware"; const result = sanitizeContentFunction(input); - expect(result).toContain('(redacted)'); - expect(result).not.toContain('evil.com'); + expect(result).toContain("(redacted)"); + expect(result).not.toContain("evil.com"); }); - it('should truncate long content', () => { - const longContent = 'a'.repeat(600000); // Exceed 524288 limit + it("should truncate long content", () => { + const longContent = "a".repeat(600000); // Exceed 524288 limit const result = sanitizeContentFunction(longContent); expect(result.length).toBeLessThan(600000); - expect(result).toContain('[Content truncated due to length]'); + expect(result).toContain("[Content truncated due to length]"); }); - it('should truncate too many lines', () => { - const manyLines = Array(70000).fill('line').join('\n'); // Exceed 65000 limit + it("should truncate too many lines", () => { + const manyLines = Array(70000).fill("line").join("\n"); // Exceed 65000 limit const result = sanitizeContentFunction(manyLines); - expect(result.split('\n').length).toBeLessThan(70000); - expect(result).toContain('[Content truncated due to line count]'); + expect(result.split("\n").length).toBeLessThan(70000); + expect(result).toContain("[Content truncated due to line count]"); }); - it('should remove ANSI escape sequences', () => { - const input = 'Hello \u001b[31mred\u001b[0m world'; + it("should remove ANSI escape sequences", () => { + const input = "Hello \u001b[31mred\u001b[0m world"; const result = sanitizeContentFunction(input); // ANSI sequences should be removed, allowing for possible differences in regex matching expect(result).toMatch(/Hello.*red.*world/); expect(result).not.toMatch(/\u001b\[/); }); - it('should respect custom allowed domains', () => { - process.env.GITHUB_AW_ALLOWED_DOMAINS = 'example.com,trusted.org'; - const input = 'Visit https://example.com and https://trusted.org and https://evil.com'; + it("should respect custom allowed domains", () => { + process.env.GITHUB_AW_ALLOWED_DOMAINS = "example.com,trusted.org"; + const input = + "Visit https://example.com and https://trusted.org and https://evil.com"; const result = sanitizeContentFunction(input); - expect(result).toContain('https://example.com'); - expect(result).toContain('https://trusted.org'); - expect(result).toContain('(redacted)'); // for evil.com + expect(result).toContain("https://example.com"); + expect(result).toContain("https://trusted.org"); + expect(result).toContain("(redacted)"); // for evil.com }); }); - describe('main function', () => { + describe("main function", () => { let testMain; beforeEach(() => { // Set up default successful permission check mockGithub.rest.repos.getCollaboratorPermissionLevel.mockResolvedValue({ - data: { permission: 'admin' } + data: { permission: "admin" }, }); - + // Get the main function from global scope testMain = global.testMain; }); - it('should extract text from issue payload', async () => { - mockContext.eventName = 'issues'; + it("should extract text from issue payload", async () => { + mockContext.eventName = "issues"; mockContext.payload = { issue: { - title: 'Test Issue', - body: 'Issue description' - } + title: "Test Issue", + body: "Issue description", + }, }; await testMain(); - expect(mockCore.setOutput).toHaveBeenCalledWith('text', 'Test Issue\n\nIssue description'); + expect(mockCore.setOutput).toHaveBeenCalledWith( + "text", + "Test Issue\n\nIssue description" + ); }); - it('should extract text from pull request payload', async () => { - mockContext.eventName = 'pull_request'; + it("should extract text from pull request payload", async () => { + mockContext.eventName = "pull_request"; mockContext.payload = { pull_request: { - title: 'Test PR', - body: 'PR description' - } + title: "Test PR", + body: "PR description", + }, }; await testMain(); - expect(mockCore.setOutput).toHaveBeenCalledWith('text', 'Test PR\n\nPR description'); + expect(mockCore.setOutput).toHaveBeenCalledWith( + "text", + "Test PR\n\nPR description" + ); }); - it('should extract text from issue comment payload', async () => { - mockContext.eventName = 'issue_comment'; + it("should extract text from issue comment payload", async () => { + mockContext.eventName = "issue_comment"; mockContext.payload = { comment: { - body: 'This is a comment' - } + body: "This is a comment", + }, }; await testMain(); - expect(mockCore.setOutput).toHaveBeenCalledWith('text', 'This is a comment'); + expect(mockCore.setOutput).toHaveBeenCalledWith( + "text", + "This is a comment" + ); }); - it('should extract text from pull request target payload', async () => { - mockContext.eventName = 'pull_request_target'; + it("should extract text from pull request target payload", async () => { + mockContext.eventName = "pull_request_target"; mockContext.payload = { pull_request: { - title: 'Test PR Target', - body: 'PR target description' - } + title: "Test PR Target", + body: "PR target description", + }, }; await testMain(); - expect(mockCore.setOutput).toHaveBeenCalledWith('text', 'Test PR Target\n\nPR target description'); + expect(mockCore.setOutput).toHaveBeenCalledWith( + "text", + "Test PR Target\n\nPR target description" + ); }); - it('should extract text from pull request review comment payload', async () => { - mockContext.eventName = 'pull_request_review_comment'; + it("should extract text from pull request review comment payload", async () => { + mockContext.eventName = "pull_request_review_comment"; mockContext.payload = { comment: { - body: 'Review comment' - } + body: "Review comment", + }, }; await testMain(); - expect(mockCore.setOutput).toHaveBeenCalledWith('text', 'Review comment'); + expect(mockCore.setOutput).toHaveBeenCalledWith("text", "Review comment"); }); - it('should extract text from pull request review payload', async () => { - mockContext.eventName = 'pull_request_review'; + it("should extract text from pull request review payload", async () => { + mockContext.eventName = "pull_request_review"; mockContext.payload = { review: { - body: 'Review body' - } + body: "Review body", + }, }; await testMain(); - expect(mockCore.setOutput).toHaveBeenCalledWith('text', 'Review body'); + expect(mockCore.setOutput).toHaveBeenCalledWith("text", "Review body"); }); - it('should handle unknown event types', async () => { - mockContext.eventName = 'unknown_event'; + it("should handle unknown event types", async () => { + mockContext.eventName = "unknown_event"; mockContext.payload = {}; await testMain(); - expect(mockCore.setOutput).toHaveBeenCalledWith('text', ''); + expect(mockCore.setOutput).toHaveBeenCalledWith("text", ""); }); - it('should deny access for non-admin/maintain users', async () => { + it("should deny access for non-admin/maintain users", async () => { mockGithub.rest.repos.getCollaboratorPermissionLevel.mockResolvedValue({ - data: { permission: 'read' } + data: { permission: "read" }, }); - mockContext.eventName = 'issues'; + mockContext.eventName = "issues"; mockContext.payload = { issue: { - title: 'Test Issue', - body: 'Issue description' - } + title: "Test Issue", + body: "Issue description", + }, }; await testMain(); - expect(mockCore.setOutput).toHaveBeenCalledWith('text', ''); + expect(mockCore.setOutput).toHaveBeenCalledWith("text", ""); }); - it('should sanitize extracted text before output', async () => { - mockContext.eventName = 'issues'; + it("should sanitize extracted text before output", async () => { + mockContext.eventName = "issues"; mockContext.payload = { issue: { - title: 'Test @user fixes #123', - body: 'Visit https://evil.com' - } + title: "Test @user fixes #123", + body: "Visit https://evil.com", + }, }; await testMain(); const outputCall = mockCore.setOutput.mock.calls[0]; - expect(outputCall[1]).toContain('`@user`'); - expect(outputCall[1]).toContain('`fixes #123`'); - expect(outputCall[1]).toContain('(redacted)'); + expect(outputCall[1]).toContain("`@user`"); + expect(outputCall[1]).toContain("`fixes #123`"); + expect(outputCall[1]).toContain("(redacted)"); }); - it('should handle missing title and body gracefully', async () => { - mockContext.eventName = 'issues'; + it("should handle missing title and body gracefully", async () => { + mockContext.eventName = "issues"; mockContext.payload = { - issue: {} // No title or body + issue: {}, // No title or body }; await testMain(); // Since empty strings get sanitized/trimmed, expect empty string - expect(mockCore.setOutput).toHaveBeenCalledWith('text', ''); + expect(mockCore.setOutput).toHaveBeenCalledWith("text", ""); }); - it('should handle null values in payload', async () => { - mockContext.eventName = 'issue_comment'; + it("should handle null values in payload", async () => { + mockContext.eventName = "issue_comment"; mockContext.payload = { comment: { - body: null - } + body: null, + }, }; await testMain(); - expect(mockCore.setOutput).toHaveBeenCalledWith('text', ''); + expect(mockCore.setOutput).toHaveBeenCalledWith("text", ""); }); }); -}); \ No newline at end of file +}); diff --git a/pkg/workflow/js/create_comment.cjs b/pkg/workflow/js/create_comment.cjs index 1bfcf5a79d..b2e153414b 100644 --- a/pkg/workflow/js/create_comment.cjs +++ b/pkg/workflow/js/create_comment.cjs @@ -2,35 +2,40 @@ async function main() { // Read the validated output content from environment variable const outputContent = process.env.GITHUB_AW_AGENT_OUTPUT; if (!outputContent) { - console.log('No GITHUB_AW_AGENT_OUTPUT environment variable found'); + console.log("No GITHUB_AW_AGENT_OUTPUT environment variable found"); return; } - if (outputContent.trim() === '') { - console.log('Agent output content is empty'); + if (outputContent.trim() === "") { + console.log("Agent output content is empty"); return; } - console.log('Agent output content length:', outputContent.length); + console.log("Agent output content length:", outputContent.length); // Parse the validated output JSON let validatedOutput; try { validatedOutput = JSON.parse(outputContent); } catch (error) { - console.log('Error parsing agent output JSON:', error instanceof Error ? error.message : String(error)); + console.log( + "Error parsing agent output JSON:", + error instanceof Error ? error.message : String(error) + ); return; } if (!validatedOutput.items || !Array.isArray(validatedOutput.items)) { - console.log('No valid items found in agent output'); + console.log("No valid items found in agent output"); return; } // Find all add-issue-comment items - const commentItems = validatedOutput.items.filter(/** @param {any} item */ item => item.type === 'add-issue-comment'); + const commentItems = validatedOutput.items.filter( + /** @param {any} item */ item => item.type === "add-issue-comment" + ); if (commentItems.length === 0) { - console.log('No add-issue-comment items found in agent output'); + console.log("No add-issue-comment items found in agent output"); return; } @@ -41,12 +46,18 @@ async function main() { console.log(`Comment target configuration: ${commentTarget}`); // Check if we're in an issue or pull request context - const isIssueContext = context.eventName === 'issues' || context.eventName === 'issue_comment'; - const isPRContext = context.eventName === 'pull_request' || context.eventName === 'pull_request_review' || context.eventName === 'pull_request_review_comment'; + const isIssueContext = + context.eventName === "issues" || context.eventName === "issue_comment"; + const isPRContext = + context.eventName === "pull_request" || + context.eventName === "pull_request_review" || + context.eventName === "pull_request_review_comment"; // Validate context based on target configuration if (commentTarget === "triggering" && !isIssueContext && !isPRContext) { - console.log('Target is "triggering" but not running in issue or pull request context, skipping comment creation'); + console.log( + 'Target is "triggering" but not running in issue or pull request context, skipping comment creation' + ); return; } @@ -55,7 +66,10 @@ async function main() { // Process each comment item for (let i = 0; i < commentItems.length; i++) { const commentItem = commentItems[i]; - console.log(`Processing add-issue-comment item ${i + 1}/${commentItems.length}:`, { bodyLength: commentItem.body.length }); + console.log( + `Processing add-issue-comment item ${i + 1}/${commentItems.length}:`, + { bodyLength: commentItem.body.length } + ); // Determine the issue/PR number and comment endpoint for this comment let issueNumber; @@ -66,45 +80,53 @@ async function main() { if (commentItem.issue_number) { issueNumber = parseInt(commentItem.issue_number, 10); if (isNaN(issueNumber) || issueNumber <= 0) { - console.log(`Invalid issue number specified: ${commentItem.issue_number}`); + console.log( + `Invalid issue number specified: ${commentItem.issue_number}` + ); continue; } - commentEndpoint = 'issues'; + commentEndpoint = "issues"; } else { - console.log('Target is "*" but no issue_number specified in comment item'); + console.log( + 'Target is "*" but no issue_number specified in comment item' + ); continue; } } else if (commentTarget && commentTarget !== "triggering") { // Explicit issue number specified in target issueNumber = parseInt(commentTarget, 10); if (isNaN(issueNumber) || issueNumber <= 0) { - console.log(`Invalid issue number in target configuration: ${commentTarget}`); + console.log( + `Invalid issue number in target configuration: ${commentTarget}` + ); continue; } - commentEndpoint = 'issues'; + commentEndpoint = "issues"; } else { // Default behavior: use triggering issue/PR if (isIssueContext) { if (context.payload.issue) { issueNumber = context.payload.issue.number; - commentEndpoint = 'issues'; + commentEndpoint = "issues"; } else { - console.log('Issue context detected but no issue found in payload'); + console.log("Issue context detected but no issue found in payload"); continue; } } else if (isPRContext) { if (context.payload.pull_request) { issueNumber = context.payload.pull_request.number; - commentEndpoint = 'issues'; // PR comments use the issues API endpoint + commentEndpoint = "issues"; // PR comments use the issues API endpoint } else { - console.log('Pull request context detected but no pull request found in payload'); + console.log( + "Pull request context detected but no pull request found in payload" + ); continue; } } } if (!issueNumber) { - console.log('Could not determine issue or pull request number'); + console.log("Could not determine issue or pull request number"); continue; } @@ -112,13 +134,13 @@ async function main() { let body = commentItem.body.trim(); // Add AI disclaimer with run id, run htmlurl const runId = context.runId; - const runUrl = context.payload.repository + const runUrl = context.payload.repository ? `${context.payload.repository.html_url}/actions/runs/${runId}` - : `https://github.com/actions/runs/${runId}`; + : `https://github.com/actions/runs/${runId}`; body += `\n\n> Generated by Agentic Workflow Run [${runId}](${runUrl})\n`; console.log(`Creating comment on ${commentEndpoint} #${issueNumber}`); - console.log('Comment content length:', body.length); + console.log("Comment content length:", body.length); try { // Create the comment using GitHub API @@ -126,26 +148,29 @@ async function main() { owner: context.repo.owner, repo: context.repo.repo, issue_number: issueNumber, - body: body + body: body, }); - console.log('Created comment #' + comment.id + ': ' + comment.html_url); + console.log("Created comment #" + comment.id + ": " + comment.html_url); createdComments.push(comment); // Set output for the last created comment (for backward compatibility) if (i === commentItems.length - 1) { - core.setOutput('comment_id', comment.id); - core.setOutput('comment_url', comment.html_url); + core.setOutput("comment_id", comment.id); + core.setOutput("comment_url", comment.html_url); } } catch (error) { - console.error(`✗ Failed to create comment:`, error instanceof Error ? error.message : String(error)); + console.error( + `✗ Failed to create comment:`, + error instanceof Error ? error.message : String(error) + ); throw error; } } // Write summary for all created comments if (createdComments.length > 0) { - let summaryContent = '\n\n## GitHub Comments\n'; + let summaryContent = "\n\n## GitHub Comments\n"; for (const comment of createdComments) { summaryContent += `- Comment #${comment.id}: [View Comment](${comment.html_url})\n`; } @@ -154,6 +179,5 @@ async function main() { console.log(`Successfully created ${createdComments.length} comment(s)`); return createdComments; - } -await main(); \ No newline at end of file +await main(); diff --git a/pkg/workflow/js/create_comment.test.cjs b/pkg/workflow/js/create_comment.test.cjs index 0d9c3552cf..3bb24a0661 100644 --- a/pkg/workflow/js/create_comment.test.cjs +++ b/pkg/workflow/js/create_comment.test.cjs @@ -1,39 +1,39 @@ -import { describe, it, expect, beforeEach, vi } from 'vitest'; -import fs from 'fs'; -import path from 'path'; +import { describe, it, expect, beforeEach, vi } from "vitest"; +import fs from "fs"; +import path from "path"; // Mock the global objects that GitHub Actions provides const mockCore = { setOutput: vi.fn(), summary: { addRaw: vi.fn().mockReturnThis(), - write: vi.fn() - } + write: vi.fn(), + }, }; const mockGithub = { rest: { issues: { - createComment: vi.fn() - } - } + createComment: vi.fn(), + }, + }, }; const mockContext = { - eventName: 'issues', + eventName: "issues", runId: 12345, repo: { - owner: 'testowner', - repo: 'testrepo' + owner: "testowner", + repo: "testrepo", }, payload: { issue: { - number: 123 + number: 123, }, repository: { - html_url: 'https://github.com/testowner/testrepo' - } - } + html_url: "https://github.com/testowner/testrepo", + }, + }, }; // Set up global variables @@ -41,174 +41,203 @@ global.core = mockCore; global.github = mockGithub; global.context = mockContext; -describe('create_comment.cjs', () => { +describe("create_comment.cjs", () => { let createCommentScript; beforeEach(() => { // Reset all mocks vi.clearAllMocks(); - + // Reset environment variables delete process.env.GITHUB_AW_AGENT_OUTPUT; - + // Reset context to default state - global.context.eventName = 'issues'; + global.context.eventName = "issues"; global.context.payload.issue = { number: 123 }; - + // Read the script content - const scriptPath = path.join(process.cwd(), 'pkg/workflow/js/create_comment.cjs'); - createCommentScript = fs.readFileSync(scriptPath, 'utf8'); + const scriptPath = path.join( + process.cwd(), + "pkg/workflow/js/create_comment.cjs" + ); + createCommentScript = fs.readFileSync(scriptPath, "utf8"); }); - it('should skip when no agent output is provided', async () => { + it("should skip when no agent output is provided", async () => { // Remove the output content environment variable delete process.env.GITHUB_AW_AGENT_OUTPUT; - - const consoleSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); - + + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); + // Execute the script await eval(`(async () => { ${createCommentScript} })()`); - - expect(consoleSpy).toHaveBeenCalledWith('No GITHUB_AW_AGENT_OUTPUT environment variable found'); + + expect(consoleSpy).toHaveBeenCalledWith( + "No GITHUB_AW_AGENT_OUTPUT environment variable found" + ); expect(mockGithub.rest.issues.createComment).not.toHaveBeenCalled(); - + consoleSpy.mockRestore(); }); - it('should skip when agent output is empty', async () => { - process.env.GITHUB_AW_AGENT_OUTPUT = ' '; - - const consoleSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); - + it("should skip when agent output is empty", async () => { + process.env.GITHUB_AW_AGENT_OUTPUT = " "; + + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); + // Execute the script await eval(`(async () => { ${createCommentScript} })()`); - - expect(consoleSpy).toHaveBeenCalledWith('Agent output content is empty'); + + expect(consoleSpy).toHaveBeenCalledWith("Agent output content is empty"); expect(mockGithub.rest.issues.createComment).not.toHaveBeenCalled(); - + consoleSpy.mockRestore(); }); - it('should skip when not in issue or PR context', async () => { + it("should skip when not in issue or PR context", async () => { process.env.GITHUB_AW_AGENT_OUTPUT = JSON.stringify({ - items: [{ - type: 'add-issue-comment', - body: 'Test comment content' - }] + items: [ + { + type: "add-issue-comment", + body: "Test comment content", + }, + ], }); - global.context.eventName = 'push'; // Not an issue or PR event - - const consoleSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); - + global.context.eventName = "push"; // Not an issue or PR event + + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); + // Execute the script await eval(`(async () => { ${createCommentScript} })()`); - - expect(consoleSpy).toHaveBeenCalledWith('Target is "triggering" but not running in issue or pull request context, skipping comment creation'); + + expect(consoleSpy).toHaveBeenCalledWith( + 'Target is "triggering" but not running in issue or pull request context, skipping comment creation' + ); expect(mockGithub.rest.issues.createComment).not.toHaveBeenCalled(); - + consoleSpy.mockRestore(); }); - it('should create comment on issue successfully', async () => { + it("should create comment on issue successfully", async () => { process.env.GITHUB_AW_AGENT_OUTPUT = JSON.stringify({ - items: [{ - type: 'add-issue-comment', - body: 'Test comment content' - }] + items: [ + { + type: "add-issue-comment", + body: "Test comment content", + }, + ], }); - global.context.eventName = 'issues'; - + global.context.eventName = "issues"; + const mockComment = { id: 456, - html_url: 'https://github.com/testowner/testrepo/issues/123#issuecomment-456' + html_url: + "https://github.com/testowner/testrepo/issues/123#issuecomment-456", }; - - mockGithub.rest.issues.createComment.mockResolvedValue({ data: mockComment }); - - const consoleSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); - + + mockGithub.rest.issues.createComment.mockResolvedValue({ + data: mockComment, + }); + + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); + // Execute the script await eval(`(async () => { ${createCommentScript} })()`); - + expect(mockGithub.rest.issues.createComment).toHaveBeenCalledWith({ - owner: 'testowner', - repo: 'testrepo', + owner: "testowner", + repo: "testrepo", issue_number: 123, - body: expect.stringContaining('Test comment content') + body: expect.stringContaining("Test comment content"), }); - - expect(mockCore.setOutput).toHaveBeenCalledWith('comment_id', 456); - expect(mockCore.setOutput).toHaveBeenCalledWith('comment_url', mockComment.html_url); + + expect(mockCore.setOutput).toHaveBeenCalledWith("comment_id", 456); + expect(mockCore.setOutput).toHaveBeenCalledWith( + "comment_url", + mockComment.html_url + ); expect(mockCore.summary.addRaw).toHaveBeenCalled(); expect(mockCore.summary.write).toHaveBeenCalled(); - + consoleSpy.mockRestore(); }); - it('should create comment on pull request successfully', async () => { + it("should create comment on pull request successfully", async () => { process.env.GITHUB_AW_AGENT_OUTPUT = JSON.stringify({ - items: [{ - type: 'add-issue-comment', - body: 'Test PR comment content' - }] + items: [ + { + type: "add-issue-comment", + body: "Test PR comment content", + }, + ], }); - global.context.eventName = 'pull_request'; + global.context.eventName = "pull_request"; global.context.payload.pull_request = { number: 789 }; delete global.context.payload.issue; // Remove issue from payload - + const mockComment = { id: 789, - html_url: 'https://github.com/testowner/testrepo/issues/789#issuecomment-789' + html_url: + "https://github.com/testowner/testrepo/issues/789#issuecomment-789", }; - - mockGithub.rest.issues.createComment.mockResolvedValue({ data: mockComment }); - - const consoleSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); - + + mockGithub.rest.issues.createComment.mockResolvedValue({ + data: mockComment, + }); + + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); + // Execute the script await eval(`(async () => { ${createCommentScript} })()`); - + expect(mockGithub.rest.issues.createComment).toHaveBeenCalledWith({ - owner: 'testowner', - repo: 'testrepo', + owner: "testowner", + repo: "testrepo", issue_number: 789, - body: expect.stringContaining('Test PR comment content') + body: expect.stringContaining("Test PR comment content"), }); - + consoleSpy.mockRestore(); }); - it('should include run information in comment body', async () => { + it("should include run information in comment body", async () => { process.env.GITHUB_AW_AGENT_OUTPUT = JSON.stringify({ - items: [{ - type: 'add-issue-comment', - body: 'Test content' - }] + items: [ + { + type: "add-issue-comment", + body: "Test content", + }, + ], }); - global.context.eventName = 'issues'; + global.context.eventName = "issues"; global.context.payload.issue = { number: 123 }; // Make sure issue context is properly set - + const mockComment = { id: 456, - html_url: 'https://github.com/testowner/testrepo/issues/123#issuecomment-456' + html_url: + "https://github.com/testowner/testrepo/issues/123#issuecomment-456", }; - - mockGithub.rest.issues.createComment.mockResolvedValue({ data: mockComment }); - - const consoleSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); - + + mockGithub.rest.issues.createComment.mockResolvedValue({ + data: mockComment, + }); + + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); + // Execute the script await eval(`(async () => { ${createCommentScript} })()`); - + expect(mockGithub.rest.issues.createComment).toHaveBeenCalled(); expect(mockGithub.rest.issues.createComment.mock.calls).toHaveLength(1); - + const callArgs = mockGithub.rest.issues.createComment.mock.calls[0][0]; - expect(callArgs.body).toContain('Test content'); - expect(callArgs.body).toContain('Generated by Agentic Workflow Run'); - expect(callArgs.body).toContain('[12345]'); - expect(callArgs.body).toContain('https://github.com/testowner/testrepo/actions/runs/12345'); - + expect(callArgs.body).toContain("Test content"); + expect(callArgs.body).toContain("Generated by Agentic Workflow Run"); + expect(callArgs.body).toContain("[12345]"); + expect(callArgs.body).toContain( + "https://github.com/testowner/testrepo/actions/runs/12345" + ); + consoleSpy.mockRestore(); }); -}); \ No newline at end of file +}); diff --git a/pkg/workflow/js/create_discussion.cjs b/pkg/workflow/js/create_discussion.cjs new file mode 100644 index 0000000000..fea11ea237 --- /dev/null +++ b/pkg/workflow/js/create_discussion.cjs @@ -0,0 +1,178 @@ +async function main() { + // Read the validated output content from environment variable + const outputContent = process.env.GITHUB_AW_AGENT_OUTPUT; + if (!outputContent) { + console.log("No GITHUB_AW_AGENT_OUTPUT environment variable found"); + return; + } + if (outputContent.trim() === "") { + console.log("Agent output content is empty"); + return; + } + + console.log("Agent output content length:", outputContent.length); + + // Parse the validated output JSON + let validatedOutput; + try { + validatedOutput = JSON.parse(outputContent); + } catch (error) { + console.log( + "Error parsing agent output JSON:", + error instanceof Error ? error.message : String(error) + ); + return; + } + + if (!validatedOutput.items || !Array.isArray(validatedOutput.items)) { + console.log("No valid items found in agent output"); + return; + } + + // Find all create-discussion items + const createDiscussionItems = validatedOutput.items.filter( + /** @param {any} item */ item => item.type === "create-discussion" + ); + if (createDiscussionItems.length === 0) { + console.log("No create-discussion items found in agent output"); + return; + } + + console.log( + `Found ${createDiscussionItems.length} create-discussion item(s)` + ); + + // Get discussion categories using REST API + let discussionCategories = []; + try { + const { data: categories } = await github.request( + "GET /repos/{owner}/{repo}/discussions/categories", + { + owner: context.repo.owner, + repo: context.repo.repo, + } + ); + discussionCategories = categories || []; + console.log( + "Available categories:", + discussionCategories.map(cat => ({ name: cat.name, id: cat.id })) + ); + } catch (error) { + console.error( + "Failed to get discussion categories:", + error instanceof Error ? error.message : String(error) + ); + throw error; + } + + // Determine category ID + let categoryId = process.env.GITHUB_AW_DISCUSSION_CATEGORY_ID; + if (!categoryId && discussionCategories.length > 0) { + // Default to the first category if none specified + categoryId = discussionCategories[0].id; + console.log( + `No category-id specified, using default category: ${discussionCategories[0].name} (${categoryId})` + ); + } + if (!categoryId) { + console.error( + "No discussion category available and none specified in configuration" + ); + throw new Error("Discussion category is required but not available"); + } + + const createdDiscussions = []; + + // Process each create-discussion item + for (let i = 0; i < createDiscussionItems.length; i++) { + const createDiscussionItem = createDiscussionItems[i]; + console.log( + `Processing create-discussion item ${i + 1}/${createDiscussionItems.length}:`, + { + title: createDiscussionItem.title, + bodyLength: createDiscussionItem.body.length, + } + ); + + // Extract title and body from the JSON item + let title = createDiscussionItem.title + ? createDiscussionItem.title.trim() + : ""; + let bodyLines = createDiscussionItem.body.split("\n"); + + // If no title was found, use the body content as title (or a default) + if (!title) { + title = createDiscussionItem.body || "Agent Output"; + } + + // Apply title prefix if provided via environment variable + const titlePrefix = process.env.GITHUB_AW_DISCUSSION_TITLE_PREFIX; + if (titlePrefix && !title.startsWith(titlePrefix)) { + title = titlePrefix + title; + } + + // Add AI disclaimer with workflow run information + const runId = context.runId; + const runUrl = context.payload.repository + ? `${context.payload.repository.html_url}/actions/runs/${runId}` + : `https://github.com/actions/runs/${runId}`; + bodyLines.push( + ``, + ``, + `> Generated by Agentic Workflow Run [${runId}](${runUrl})`, + "" + ); + + // Prepare the body content + const body = bodyLines.join("\n").trim(); + + console.log("Creating discussion with title:", title); + console.log("Category ID:", categoryId); + console.log("Body length:", body.length); + + try { + // Create the discussion using GitHub REST API + const { data: discussion } = await github.request( + "POST /repos/{owner}/{repo}/discussions", + { + owner: context.repo.owner, + repo: context.repo.repo, + title: title, + body: body, + category_id: categoryId, + } + ); + + console.log( + "Created discussion #" + discussion.number + ": " + discussion.html_url + ); + createdDiscussions.push(discussion); + + // Set output for the last created discussion (for backward compatibility) + if (i === createDiscussionItems.length - 1) { + core.setOutput("discussion_number", discussion.number); + core.setOutput("discussion_url", discussion.html_url); + } + } catch (error) { + console.error( + `✗ Failed to create discussion "${title}":`, + error instanceof Error ? error.message : String(error) + ); + throw error; + } + } + + // Write summary for all created discussions + if (createdDiscussions.length > 0) { + let summaryContent = "\n\n## GitHub Discussions\n"; + for (const discussion of createdDiscussions) { + summaryContent += `- Discussion #${discussion.number}: [${discussion.title}](${discussion.html_url})\n`; + } + await core.summary.addRaw(summaryContent).write(); + } + + console.log( + `Successfully created ${createdDiscussions.length} discussion(s)` + ); +} +await main(); diff --git a/pkg/workflow/js/create_discussion.test.cjs b/pkg/workflow/js/create_discussion.test.cjs new file mode 100644 index 0000000000..ba9262710c --- /dev/null +++ b/pkg/workflow/js/create_discussion.test.cjs @@ -0,0 +1,273 @@ +import { describe, it, expect, beforeEach, vi } from "vitest"; +import fs from "fs"; +import path from "path"; + +// Mock the global objects that GitHub Actions provides +const mockCore = { + setOutput: vi.fn(), + summary: { + addRaw: vi.fn().mockReturnThis(), + write: vi.fn(), + }, +}; + +const mockGithub = { + request: vi.fn(), +}; + +const mockContext = { + runId: 12345, + repo: { + owner: "testowner", + repo: "testrepo", + }, + payload: { + repository: { + html_url: "https://github.com/testowner/testrepo", + }, + }, +}; + +// Set up global variables +global.core = mockCore; +global.github = mockGithub; +global.context = mockContext; + +describe("create_discussion.cjs", () => { + let createDiscussionScript; + + beforeEach(() => { + // Reset all mocks + vi.clearAllMocks(); + + // Reset environment variables + delete process.env.GITHUB_AW_AGENT_OUTPUT; + delete process.env.GITHUB_AW_DISCUSSION_TITLE_PREFIX; + delete process.env.GITHUB_AW_DISCUSSION_CATEGORY_ID; + + // Read the script content + const scriptPath = path.join( + process.cwd(), + "pkg/workflow/js/create_discussion.cjs" + ); + createDiscussionScript = fs.readFileSync(scriptPath, "utf8"); + }); + + it("should handle missing GITHUB_AW_AGENT_OUTPUT environment variable", async () => { + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); + + // Execute the script + await eval(`(async () => { ${createDiscussionScript} })()`); + + expect(consoleSpy).toHaveBeenCalledWith( + "No GITHUB_AW_AGENT_OUTPUT environment variable found" + ); + consoleSpy.mockRestore(); + }); + + it("should handle empty agent output", async () => { + process.env.GITHUB_AW_AGENT_OUTPUT = " "; // Use spaces instead of empty string + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); + + // Execute the script + await eval(`(async () => { ${createDiscussionScript} })()`); + + expect(consoleSpy).toHaveBeenCalledWith("Agent output content is empty"); + consoleSpy.mockRestore(); + }); + + it("should handle invalid JSON in agent output", async () => { + process.env.GITHUB_AW_AGENT_OUTPUT = "invalid json"; + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); + + // Execute the script + await eval(`(async () => { ${createDiscussionScript} })()`); + + // Check that it logs the content length first, then the error + expect(consoleSpy).toHaveBeenCalledWith("Agent output content length:", 12); + expect(consoleSpy).toHaveBeenCalledWith( + "Error parsing agent output JSON:", + expect.stringContaining("Unexpected token") + ); + consoleSpy.mockRestore(); + }); + + it("should handle missing create-discussion items", async () => { + const validOutput = { + items: [{ type: "create-issue", title: "Test Issue", body: "Test body" }], + }; + process.env.GITHUB_AW_AGENT_OUTPUT = JSON.stringify(validOutput); + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); + + // Execute the script + await eval(`(async () => { ${createDiscussionScript} })()`); + + expect(consoleSpy).toHaveBeenCalledWith( + "No create-discussion items found in agent output" + ); + consoleSpy.mockRestore(); + }); + + it("should create discussions successfully with basic configuration", async () => { + // Mock the REST API responses + mockGithub.request + .mockResolvedValueOnce({ + // Discussion categories response + data: [{ id: "DIC_test456", name: "General", slug: "general" }], + }) + .mockResolvedValueOnce({ + // Create discussion response + data: { + id: "D_test789", + number: 1, + title: "Test Discussion", + html_url: "https://github.com/testowner/testrepo/discussions/1", + }, + }); + + const validOutput = { + items: [ + { + type: "create-discussion", + title: "Test Discussion", + body: "Test discussion body", + }, + ], + }; + process.env.GITHUB_AW_AGENT_OUTPUT = JSON.stringify(validOutput); + + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); + + // Execute the script + await eval(`(async () => { ${createDiscussionScript} })()`); + + // Verify REST API calls + expect(mockGithub.request).toHaveBeenCalledTimes(2); + + // Verify discussion categories request + expect(mockGithub.request).toHaveBeenNthCalledWith( + 1, + "GET /repos/{owner}/{repo}/discussions/categories", + { owner: "testowner", repo: "testrepo" } + ); + + // Verify create discussion request + expect(mockGithub.request).toHaveBeenNthCalledWith( + 2, + "POST /repos/{owner}/{repo}/discussions", + { + owner: "testowner", + repo: "testrepo", + category_id: "DIC_test456", + title: "Test Discussion", + body: expect.stringContaining("Test discussion body"), + } + ); + + // Verify outputs were set + expect(mockCore.setOutput).toHaveBeenCalledWith("discussion_number", 1); + expect(mockCore.setOutput).toHaveBeenCalledWith( + "discussion_url", + "https://github.com/testowner/testrepo/discussions/1" + ); + + // Verify summary was written + expect(mockCore.summary.addRaw).toHaveBeenCalledWith( + expect.stringContaining("## GitHub Discussions") + ); + expect(mockCore.summary.write).toHaveBeenCalled(); + + consoleSpy.mockRestore(); + }); + + it("should apply title prefix when configured", async () => { + // Mock the REST API responses + mockGithub.request + .mockResolvedValueOnce({ + data: [{ id: "DIC_test456", name: "General", slug: "general" }], + }) + .mockResolvedValueOnce({ + data: { + id: "D_test789", + number: 1, + title: "[ai] Test Discussion", + html_url: "https://github.com/testowner/testrepo/discussions/1", + }, + }); + + const validOutput = { + items: [ + { + type: "create-discussion", + title: "Test Discussion", + body: "Test discussion body", + }, + ], + }; + process.env.GITHUB_AW_AGENT_OUTPUT = JSON.stringify(validOutput); + process.env.GITHUB_AW_DISCUSSION_TITLE_PREFIX = "[ai] "; + + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); + + // Execute the script + await eval(`(async () => { ${createDiscussionScript} })()`); + + // Verify the title was prefixed + expect(mockGithub.request).toHaveBeenNthCalledWith( + 2, + "POST /repos/{owner}/{repo}/discussions", + expect.objectContaining({ + title: "[ai] Test Discussion", + }) + ); + + consoleSpy.mockRestore(); + }); + + it("should use specified category ID when configured", async () => { + // Mock the REST API responses + mockGithub.request + .mockResolvedValueOnce({ + data: [ + { id: "DIC_test456", name: "General", slug: "general" }, + { id: "DIC_custom789", name: "Custom", slug: "custom" }, + ], + }) + .mockResolvedValueOnce({ + data: { + id: "D_test789", + number: 1, + title: "Test Discussion", + html_url: "https://github.com/testowner/testrepo/discussions/1", + }, + }); + + const validOutput = { + items: [ + { + type: "create-discussion", + title: "Test Discussion", + body: "Test discussion body", + }, + ], + }; + process.env.GITHUB_AW_AGENT_OUTPUT = JSON.stringify(validOutput); + process.env.GITHUB_AW_DISCUSSION_CATEGORY_ID = "DIC_custom789"; + + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); + + // Execute the script + await eval(`(async () => { ${createDiscussionScript} })()`); + + // Verify the specified category was used + expect(mockGithub.request).toHaveBeenNthCalledWith( + 2, + "POST /repos/{owner}/{repo}/discussions", + expect.objectContaining({ + category_id: "DIC_custom789", + }) + ); + + consoleSpy.mockRestore(); + }); +}); diff --git a/pkg/workflow/js/create_issue.cjs b/pkg/workflow/js/create_issue.cjs index bc64ee700e..ff240dea16 100644 --- a/pkg/workflow/js/create_issue.cjs +++ b/pkg/workflow/js/create_issue.cjs @@ -2,34 +2,39 @@ async function main() { // Read the validated output content from environment variable const outputContent = process.env.GITHUB_AW_AGENT_OUTPUT; if (!outputContent) { - console.log('No GITHUB_AW_AGENT_OUTPUT environment variable found'); + console.log("No GITHUB_AW_AGENT_OUTPUT environment variable found"); return; } - if (outputContent.trim() === '') { - console.log('Agent output content is empty'); + if (outputContent.trim() === "") { + console.log("Agent output content is empty"); return; } - - console.log('Agent output content length:', outputContent.length); - + + console.log("Agent output content length:", outputContent.length); + // Parse the validated output JSON let validatedOutput; try { validatedOutput = JSON.parse(outputContent); } catch (error) { - console.log('Error parsing agent output JSON:', error instanceof Error ? error.message : String(error)); + console.log( + "Error parsing agent output JSON:", + error instanceof Error ? error.message : String(error) + ); return; } if (!validatedOutput.items || !Array.isArray(validatedOutput.items)) { - console.log('No valid items found in agent output'); + console.log("No valid items found in agent output"); return; } // Find all create-issue items - const createIssueItems = validatedOutput.items.filter(/** @param {any} item */ item => item.type === 'create-issue'); + const createIssueItems = validatedOutput.items.filter( + /** @param {any} item */ item => item.type === "create-issue" + ); if (createIssueItems.length === 0) { - console.log('No create-issue items found in agent output'); + console.log("No create-issue items found in agent output"); return; } @@ -37,17 +42,25 @@ async function main() { // Check if we're in an issue context (triggered by an issue event) const parentIssueNumber = context.payload?.issue?.number; - + // Parse labels from environment variable (comma-separated string) const labelsEnv = process.env.GITHUB_AW_ISSUE_LABELS; - let envLabels = labelsEnv ? labelsEnv.split(',').map(/** @param {string} label */ label => label.trim()).filter(/** @param {string} label */ label => label) : []; - + let envLabels = labelsEnv + ? labelsEnv + .split(",") + .map(/** @param {string} label */ label => label.trim()) + .filter(/** @param {string} label */ label => label) + : []; + const createdIssues = []; - + // Process each create-issue item for (let i = 0; i < createIssueItems.length; i++) { const createIssueItem = createIssueItems[i]; - console.log(`Processing create-issue item ${i + 1}/${createIssueItems.length}:`, { title: createIssueItem.title, bodyLength: createIssueItem.body.length }); + console.log( + `Processing create-issue item ${i + 1}/${createIssueItems.length}:`, + { title: createIssueItem.title, bodyLength: createIssueItem.body.length } + ); // Merge environment labels with item-specific labels let labels = [...envLabels]; @@ -56,12 +69,12 @@ async function main() { } // Extract title and body from the JSON item - let title = createIssueItem.title ? createIssueItem.title.trim() : ''; - let bodyLines = createIssueItem.body.split('\n'); + let title = createIssueItem.title ? createIssueItem.title.trim() : ""; + let bodyLines = createIssueItem.body.split("\n"); // If no title was found, use the body content as title (or a default) if (!title) { - title = createIssueItem.body || 'Agent Output'; + title = createIssueItem.body || "Agent Output"; } // Apply title prefix if provided via environment variable @@ -71,7 +84,7 @@ async function main() { } if (parentIssueNumber) { - console.log('Detected issue context, parent issue #' + parentIssueNumber); + console.log("Detected issue context, parent issue #" + parentIssueNumber); // Add reference to parent issue in the child issue body bodyLines.push(`Related to #${parentIssueNumber}`); @@ -80,17 +93,22 @@ async function main() { // Add AI disclaimer with run id, run htmlurl // Add AI disclaimer with workflow run information const runId = context.runId; - const runUrl = context.payload.repository + const runUrl = context.payload.repository ? `${context.payload.repository.html_url}/actions/runs/${runId}` - : `https://github.com/actions/runs/${runId}`; - bodyLines.push(``, ``, `> Generated by Agentic Workflow Run [${runId}](${runUrl})`, ''); + : `https://github.com/actions/runs/${runId}`; + bodyLines.push( + ``, + ``, + `> Generated by Agentic Workflow Run [${runId}](${runUrl})`, + "" + ); // Prepare the body content - const body = bodyLines.join('\n').trim(); + const body = bodyLines.join("\n").trim(); - console.log('Creating issue with title:', title); - console.log('Labels:', labels); - console.log('Body length:', body.length); + console.log("Creating issue with title:", title); + console.log("Labels:", labels); + console.log("Body length:", body.length); try { // Create the issue using GitHub API @@ -99,10 +117,10 @@ async function main() { repo: context.repo.repo, title: title, body: body, - labels: labels + labels: labels, }); - console.log('Created issue #' + issue.number + ': ' + issue.html_url); + console.log("Created issue #" + issue.number + ": " + issue.html_url); createdIssues.push(issue); // If we have a parent issue, add a comment to it referencing the new child issue @@ -112,28 +130,34 @@ async function main() { owner: context.repo.owner, repo: context.repo.repo, issue_number: parentIssueNumber, - body: `Created related issue: #${issue.number}` + body: `Created related issue: #${issue.number}`, }); - console.log('Added comment to parent issue #' + parentIssueNumber); + console.log("Added comment to parent issue #" + parentIssueNumber); } catch (error) { - console.log('Warning: Could not add comment to parent issue:', error instanceof Error ? error.message : String(error)); + console.log( + "Warning: Could not add comment to parent issue:", + error instanceof Error ? error.message : String(error) + ); } } // Set output for the last created issue (for backward compatibility) if (i === createIssueItems.length - 1) { - core.setOutput('issue_number', issue.number); - core.setOutput('issue_url', issue.html_url); + core.setOutput("issue_number", issue.number); + core.setOutput("issue_url", issue.html_url); } } catch (error) { - console.error(`✗ Failed to create issue "${title}":`, error instanceof Error ? error.message : String(error)); + console.error( + `✗ Failed to create issue "${title}":`, + error instanceof Error ? error.message : String(error) + ); throw error; } } // Write summary for all created issues if (createdIssues.length > 0) { - let summaryContent = '\n\n## GitHub Issues\n'; + let summaryContent = "\n\n## GitHub Issues\n"; for (const issue of createdIssues) { summaryContent += `- Issue #${issue.number}: [${issue.title}](${issue.html_url})\n`; } @@ -142,4 +166,4 @@ async function main() { console.log(`Successfully created ${createdIssues.length} issue(s)`); } -await main(); \ No newline at end of file +await main(); diff --git a/pkg/workflow/js/create_issue.test.cjs b/pkg/workflow/js/create_issue.test.cjs index dff41ed00e..bbd6b35cba 100644 --- a/pkg/workflow/js/create_issue.test.cjs +++ b/pkg/workflow/js/create_issue.test.cjs @@ -1,36 +1,36 @@ -import { describe, it, expect, beforeEach, vi } from 'vitest'; -import fs from 'fs'; -import path from 'path'; +import { describe, it, expect, beforeEach, vi } from "vitest"; +import fs from "fs"; +import path from "path"; // Mock the global objects that GitHub Actions provides const mockCore = { setOutput: vi.fn(), summary: { addRaw: vi.fn().mockReturnThis(), - write: vi.fn() - } + write: vi.fn(), + }, }; const mockGithub = { rest: { issues: { create: vi.fn(), - createComment: vi.fn() - } - } + createComment: vi.fn(), + }, + }, }; const mockContext = { runId: 12345, repo: { - owner: 'testowner', - repo: 'testrepo' + owner: "testowner", + repo: "testrepo", }, payload: { repository: { - html_url: 'https://github.com/testowner/testrepo' - } - } + html_url: "https://github.com/testowner/testrepo", + }, + }, }; // Set up global variables @@ -38,293 +38,319 @@ global.core = mockCore; global.github = mockGithub; global.context = mockContext; -describe('create_issue.cjs', () => { +describe("create_issue.cjs", () => { let createIssueScript; beforeEach(() => { // Reset all mocks vi.clearAllMocks(); - + // Reset environment variables delete process.env.GITHUB_AW_AGENT_OUTPUT; delete process.env.GITHUB_AW_ISSUE_LABELS; delete process.env.GITHUB_AW_ISSUE_TITLE_PREFIX; - + // Reset context delete global.context.payload.issue; - + // Read the script content - const scriptPath = path.join(process.cwd(), 'pkg/workflow/js/create_issue.cjs'); - createIssueScript = fs.readFileSync(scriptPath, 'utf8'); + const scriptPath = path.join( + process.cwd(), + "pkg/workflow/js/create_issue.cjs" + ); + createIssueScript = fs.readFileSync(scriptPath, "utf8"); }); - it('should skip when no agent output is provided', async () => { + it("should skip when no agent output is provided", async () => { delete process.env.GITHUB_AW_AGENT_OUTPUT; - - const consoleSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); - + + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); + // Execute the script await eval(`(async () => { ${createIssueScript} })()`); - - expect(consoleSpy).toHaveBeenCalledWith('No GITHUB_AW_AGENT_OUTPUT environment variable found'); + + expect(consoleSpy).toHaveBeenCalledWith( + "No GITHUB_AW_AGENT_OUTPUT environment variable found" + ); expect(mockGithub.rest.issues.create).not.toHaveBeenCalled(); - + consoleSpy.mockRestore(); }); - it('should skip when agent output is empty', async () => { - process.env.GITHUB_AW_AGENT_OUTPUT = ' '; - - const consoleSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); - + it("should skip when agent output is empty", async () => { + process.env.GITHUB_AW_AGENT_OUTPUT = " "; + + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); + // Execute the script await eval(`(async () => { ${createIssueScript} })()`); - - expect(consoleSpy).toHaveBeenCalledWith('Agent output content is empty'); + + expect(consoleSpy).toHaveBeenCalledWith("Agent output content is empty"); expect(mockGithub.rest.issues.create).not.toHaveBeenCalled(); - + consoleSpy.mockRestore(); }); - it('should create issue with default title when only body content provided', async () => { + it("should create issue with default title when only body content provided", async () => { process.env.GITHUB_AW_AGENT_OUTPUT = JSON.stringify({ - items: [{ - type: 'create-issue', - body: 'This is the issue body content' - }] + items: [ + { + type: "create-issue", + body: "This is the issue body content", + }, + ], }); - + const mockIssue = { number: 456, - html_url: 'https://github.com/testowner/testrepo/issues/456' + html_url: "https://github.com/testowner/testrepo/issues/456", }; - + mockGithub.rest.issues.create.mockResolvedValue({ data: mockIssue }); - - const consoleSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); - + + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); + // Execute the script await eval(`(async () => { ${createIssueScript} })()`); - + expect(mockGithub.rest.issues.create).toHaveBeenCalledWith({ - owner: 'testowner', - repo: 'testrepo', - title: 'This is the issue body content', - body: expect.stringContaining('Generated by Agentic Workflow Run'), - labels: [] + owner: "testowner", + repo: "testrepo", + title: "This is the issue body content", + body: expect.stringContaining("Generated by Agentic Workflow Run"), + labels: [], }); - - expect(mockCore.setOutput).toHaveBeenCalledWith('issue_number', 456); - expect(mockCore.setOutput).toHaveBeenCalledWith('issue_url', mockIssue.html_url); - + + expect(mockCore.setOutput).toHaveBeenCalledWith("issue_number", 456); + expect(mockCore.setOutput).toHaveBeenCalledWith( + "issue_url", + mockIssue.html_url + ); + consoleSpy.mockRestore(); }); - it('should extract title from markdown heading', async () => { + it("should extract title from markdown heading", async () => { process.env.GITHUB_AW_AGENT_OUTPUT = JSON.stringify({ - items: [{ - type: 'create-issue', - title: 'Bug Report', - body: 'This is a detailed bug description\n\nSteps to reproduce:\n1. Step one' - }] + items: [ + { + type: "create-issue", + title: "Bug Report", + body: "This is a detailed bug description\n\nSteps to reproduce:\n1. Step one", + }, + ], }); - + const mockIssue = { number: 789, - html_url: 'https://github.com/testowner/testrepo/issues/789' + html_url: "https://github.com/testowner/testrepo/issues/789", }; - + mockGithub.rest.issues.create.mockResolvedValue({ data: mockIssue }); - - const consoleSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); - + + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); + // Execute the script await eval(`(async () => { ${createIssueScript} })()`); - + const callArgs = mockGithub.rest.issues.create.mock.calls[0][0]; - expect(callArgs.title).toBe('Bug Report'); - expect(callArgs.body).toContain('This is a detailed bug description'); - expect(callArgs.body).toContain('Steps to reproduce:'); - + expect(callArgs.title).toBe("Bug Report"); + expect(callArgs.body).toContain("This is a detailed bug description"); + expect(callArgs.body).toContain("Steps to reproduce:"); + consoleSpy.mockRestore(); }); - it('should handle labels from environment variable', async () => { + it("should handle labels from environment variable", async () => { process.env.GITHUB_AW_AGENT_OUTPUT = JSON.stringify({ - items: [{ - type: 'create-issue', - title: 'Issue with labels', - body: 'Issue with labels' - }] + items: [ + { + type: "create-issue", + title: "Issue with labels", + body: "Issue with labels", + }, + ], }); - process.env.GITHUB_AW_ISSUE_LABELS = 'bug, enhancement, high-priority'; - + process.env.GITHUB_AW_ISSUE_LABELS = "bug, enhancement, high-priority"; + const mockIssue = { number: 101, - html_url: 'https://github.com/testowner/testrepo/issues/101' + html_url: "https://github.com/testowner/testrepo/issues/101", }; - + mockGithub.rest.issues.create.mockResolvedValue({ data: mockIssue }); - - const consoleSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); - + + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); + // Execute the script await eval(`(async () => { ${createIssueScript} })()`); - + const callArgs = mockGithub.rest.issues.create.mock.calls[0][0]; - expect(callArgs.labels).toEqual(['bug', 'enhancement', 'high-priority']); - + expect(callArgs.labels).toEqual(["bug", "enhancement", "high-priority"]); + consoleSpy.mockRestore(); }); - it('should apply title prefix when provided', async () => { + it("should apply title prefix when provided", async () => { process.env.GITHUB_AW_AGENT_OUTPUT = JSON.stringify({ - items: [{ - type: 'create-issue', - title: 'Simple issue title', - body: 'Simple issue title' - }] + items: [ + { + type: "create-issue", + title: "Simple issue title", + body: "Simple issue title", + }, + ], }); - process.env.GITHUB_AW_ISSUE_TITLE_PREFIX = '[AUTO] '; - + process.env.GITHUB_AW_ISSUE_TITLE_PREFIX = "[AUTO] "; + const mockIssue = { number: 202, - html_url: 'https://github.com/testowner/testrepo/issues/202' + html_url: "https://github.com/testowner/testrepo/issues/202", }; - + mockGithub.rest.issues.create.mockResolvedValue({ data: mockIssue }); - - const consoleSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); - + + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); + // Execute the script await eval(`(async () => { ${createIssueScript} })()`); - + const callArgs = mockGithub.rest.issues.create.mock.calls[0][0]; - expect(callArgs.title).toBe('[AUTO] Simple issue title'); - + expect(callArgs.title).toBe("[AUTO] Simple issue title"); + consoleSpy.mockRestore(); }); - it('should not duplicate title prefix when already present', async () => { + it("should not duplicate title prefix when already present", async () => { process.env.GITHUB_AW_AGENT_OUTPUT = JSON.stringify({ - items: [{ - type: 'create-issue', - title: '[AUTO] Issue title already prefixed', - body: 'Issue body content' - }] + items: [ + { + type: "create-issue", + title: "[AUTO] Issue title already prefixed", + body: "Issue body content", + }, + ], }); - process.env.GITHUB_AW_ISSUE_TITLE_PREFIX = '[AUTO] '; - + process.env.GITHUB_AW_ISSUE_TITLE_PREFIX = "[AUTO] "; + const mockIssue = { number: 203, - html_url: 'https://github.com/testowner/testrepo/issues/203' + html_url: "https://github.com/testowner/testrepo/issues/203", }; - + mockGithub.rest.issues.create.mockResolvedValue({ data: mockIssue }); - - const consoleSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); - + + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); + // Execute the script await eval(`(async () => { ${createIssueScript} })()`); - + const callArgs = mockGithub.rest.issues.create.mock.calls[0][0]; - expect(callArgs.title).toBe('[AUTO] Issue title already prefixed'); // Should not be duplicated - + expect(callArgs.title).toBe("[AUTO] Issue title already prefixed"); // Should not be duplicated + consoleSpy.mockRestore(); }); - it('should handle parent issue context and create comment', async () => { + it("should handle parent issue context and create comment", async () => { process.env.GITHUB_AW_AGENT_OUTPUT = JSON.stringify({ - items: [{ - type: 'create-issue', - title: 'Child issue content', - body: 'Child issue content' - }] + items: [ + { + type: "create-issue", + title: "Child issue content", + body: "Child issue content", + }, + ], }); global.context.payload.issue = { number: 555 }; - + const mockIssue = { number: 666, - html_url: 'https://github.com/testowner/testrepo/issues/666' + html_url: "https://github.com/testowner/testrepo/issues/666", }; - + mockGithub.rest.issues.create.mockResolvedValue({ data: mockIssue }); mockGithub.rest.issues.createComment.mockResolvedValue({}); - - const consoleSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); - + + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); + // Execute the script await eval(`(async () => { ${createIssueScript} })()`); - + // Should create the child issue with reference to parent const createArgs = mockGithub.rest.issues.create.mock.calls[0][0]; - expect(createArgs.body).toContain('Related to #555'); - + expect(createArgs.body).toContain("Related to #555"); + // Should create comment on parent issue expect(mockGithub.rest.issues.createComment).toHaveBeenCalledWith({ - owner: 'testowner', - repo: 'testrepo', + owner: "testowner", + repo: "testrepo", issue_number: 555, - body: 'Created related issue: #666' + body: "Created related issue: #666", }); - + consoleSpy.mockRestore(); }); - it('should handle empty labels gracefully', async () => { + it("should handle empty labels gracefully", async () => { process.env.GITHUB_AW_AGENT_OUTPUT = JSON.stringify({ - items: [{ - type: 'create-issue', - title: 'Issue without labels', - body: 'Issue without labels' - }] + items: [ + { + type: "create-issue", + title: "Issue without labels", + body: "Issue without labels", + }, + ], }); - process.env.GITHUB_AW_ISSUE_LABELS = ' , , '; - + process.env.GITHUB_AW_ISSUE_LABELS = " , , "; + const mockIssue = { number: 303, - html_url: 'https://github.com/testowner/testrepo/issues/303' + html_url: "https://github.com/testowner/testrepo/issues/303", }; - + mockGithub.rest.issues.create.mockResolvedValue({ data: mockIssue }); - - const consoleSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); - + + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); + // Execute the script await eval(`(async () => { ${createIssueScript} })()`); - + const callArgs = mockGithub.rest.issues.create.mock.calls[0][0]; expect(callArgs.labels).toEqual([]); - + consoleSpy.mockRestore(); }); - it('should include run information in issue body', async () => { + it("should include run information in issue body", async () => { process.env.GITHUB_AW_AGENT_OUTPUT = JSON.stringify({ - items: [{ - type: 'create-issue', - title: 'Test issue content', - body: 'Test issue content' - }] + items: [ + { + type: "create-issue", + title: "Test issue content", + body: "Test issue content", + }, + ], }); - + const mockIssue = { number: 404, - html_url: 'https://github.com/testowner/testrepo/issues/404' + html_url: "https://github.com/testowner/testrepo/issues/404", }; - + mockGithub.rest.issues.create.mockResolvedValue({ data: mockIssue }); - - const consoleSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); - + + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); + // Execute the script await eval(`(async () => { ${createIssueScript} })()`); - + const callArgs = mockGithub.rest.issues.create.mock.calls[0][0]; - expect(callArgs.body).toContain('Generated by Agentic Workflow Run'); - expect(callArgs.body).toContain('[12345]'); - expect(callArgs.body).toContain('https://github.com/testowner/testrepo/actions/runs/12345'); - + expect(callArgs.body).toContain("Generated by Agentic Workflow Run"); + expect(callArgs.body).toContain("[12345]"); + expect(callArgs.body).toContain( + "https://github.com/testowner/testrepo/actions/runs/12345" + ); + consoleSpy.mockRestore(); }); -}); \ No newline at end of file +}); diff --git a/pkg/workflow/js/create_pr_review_comment.cjs b/pkg/workflow/js/create_pr_review_comment.cjs new file mode 100644 index 0000000000..a5352348c5 --- /dev/null +++ b/pkg/workflow/js/create_pr_review_comment.cjs @@ -0,0 +1,210 @@ +async function main() { + // Read the validated output content from environment variable + const outputContent = process.env.GITHUB_AW_AGENT_OUTPUT; + if (!outputContent) { + console.log("No GITHUB_AW_AGENT_OUTPUT environment variable found"); + return; + } + + if (outputContent.trim() === "") { + console.log("Agent output content is empty"); + return; + } + + console.log("Agent output content length:", outputContent.length); + + // Parse the validated output JSON + let validatedOutput; + try { + validatedOutput = JSON.parse(outputContent); + } catch (error) { + console.log( + "Error parsing agent output JSON:", + error instanceof Error ? error.message : String(error) + ); + return; + } + + if (!validatedOutput.items || !Array.isArray(validatedOutput.items)) { + console.log("No valid items found in agent output"); + return; + } + + // Find all create-pull-request-review-comment items + const reviewCommentItems = validatedOutput.items.filter( + /** @param {any} item */ item => + item.type === "create-pull-request-review-comment" + ); + if (reviewCommentItems.length === 0) { + console.log( + "No create-pull-request-review-comment items found in agent output" + ); + return; + } + + console.log( + `Found ${reviewCommentItems.length} create-pull-request-review-comment item(s)` + ); + + // Get the side configuration from environment variable + const defaultSide = process.env.GITHUB_AW_PR_REVIEW_COMMENT_SIDE || "RIGHT"; + console.log(`Default comment side configuration: ${defaultSide}`); + + // Check if we're in a pull request context + const isPRContext = + context.eventName === "pull_request" || + context.eventName === "pull_request_review" || + context.eventName === "pull_request_review_comment"; + + if (!isPRContext) { + console.log( + "Not running in pull request context, skipping review comment creation" + ); + return; + } + + if (!context.payload.pull_request) { + console.log( + "Pull request context detected but no pull request found in payload" + ); + return; + } + + const pullRequestNumber = context.payload.pull_request.number; + console.log(`Creating review comments on PR #${pullRequestNumber}`); + + const createdComments = []; + + // Process each review comment item + for (let i = 0; i < reviewCommentItems.length; i++) { + const commentItem = reviewCommentItems[i]; + console.log( + `Processing create-pull-request-review-comment item ${i + 1}/${reviewCommentItems.length}:`, + { + bodyLength: commentItem.body ? commentItem.body.length : "undefined", + path: commentItem.path, + line: commentItem.line, + startLine: commentItem.start_line, + } + ); + + // Validate required fields + if (!commentItem.path) { + console.log('Missing required field "path" in review comment item'); + continue; + } + + if ( + !commentItem.line || + (typeof commentItem.line !== "number" && + typeof commentItem.line !== "string") + ) { + console.log( + 'Missing or invalid required field "line" in review comment item' + ); + continue; + } + + if (!commentItem.body || typeof commentItem.body !== "string") { + console.log( + 'Missing or invalid required field "body" in review comment item' + ); + continue; + } + + // Parse line numbers + const line = parseInt(commentItem.line, 10); + if (isNaN(line) || line <= 0) { + console.log(`Invalid line number: ${commentItem.line}`); + continue; + } + + let startLine = undefined; + if (commentItem.start_line) { + startLine = parseInt(commentItem.start_line, 10); + if (isNaN(startLine) || startLine <= 0 || startLine > line) { + console.log( + `Invalid start_line number: ${commentItem.start_line} (must be <= line: ${line})` + ); + continue; + } + } + + // Determine side (LEFT or RIGHT) + const side = commentItem.side || defaultSide; + if (side !== "LEFT" && side !== "RIGHT") { + console.log(`Invalid side value: ${side} (must be LEFT or RIGHT)`); + continue; + } + + // Extract body from the JSON item + let body = commentItem.body.trim(); + + // Add AI disclaimer with run id, run htmlurl + const runId = context.runId; + const runUrl = context.payload.repository + ? `${context.payload.repository.html_url}/actions/runs/${runId}` + : `https://github.com/actions/runs/${runId}`; + body += `\n\n> Generated by Agentic Workflow Run [${runId}](${runUrl})\n`; + + console.log( + `Creating review comment on PR #${pullRequestNumber} at ${commentItem.path}:${line}${startLine ? ` (lines ${startLine}-${line})` : ""} [${side}]` + ); + console.log("Comment content length:", body.length); + + try { + // Prepare the request parameters + const requestParams = { + owner: context.repo.owner, + repo: context.repo.repo, + pull_number: pullRequestNumber, + body: body, + path: commentItem.path, + line: line, + side: side, + }; + + // Add start_line for multi-line comments + if (startLine !== undefined) { + requestParams.start_line = startLine; + requestParams.start_side = side; // start_side should match side for consistency + } + + // Create the review comment using GitHub API + const { data: comment } = + await github.rest.pulls.createReviewComment(requestParams); + + console.log( + "Created review comment #" + comment.id + ": " + comment.html_url + ); + createdComments.push(comment); + + // Set output for the last created comment (for backward compatibility) + if (i === reviewCommentItems.length - 1) { + core.setOutput("review_comment_id", comment.id); + core.setOutput("review_comment_url", comment.html_url); + } + } catch (error) { + console.error( + `✗ Failed to create review comment:`, + error instanceof Error ? error.message : String(error) + ); + throw error; + } + } + + // Write summary for all created comments + if (createdComments.length > 0) { + let summaryContent = "\n\n## GitHub PR Review Comments\n"; + for (const comment of createdComments) { + summaryContent += `- Review Comment #${comment.id}: [View Comment](${comment.html_url})\n`; + } + await core.summary.addRaw(summaryContent).write(); + } + + console.log( + `Successfully created ${createdComments.length} review comment(s)` + ); + return createdComments; +} +await main(); diff --git a/pkg/workflow/js/create_pr_review_comment.test.cjs b/pkg/workflow/js/create_pr_review_comment.test.cjs new file mode 100644 index 0000000000..501aabf744 --- /dev/null +++ b/pkg/workflow/js/create_pr_review_comment.test.cjs @@ -0,0 +1,376 @@ +import { describe, it, expect, beforeEach, vi } from "vitest"; +import fs from "fs"; +import path from "path"; + +// Mock the global objects that GitHub Actions provides +const mockCore = { + setOutput: vi.fn(), + summary: { + addRaw: vi.fn().mockReturnThis(), + write: vi.fn(), + }, +}; + +const mockGithub = { + rest: { + pulls: { + createReviewComment: vi.fn(), + }, + }, +}; + +const mockContext = { + eventName: "pull_request", + runId: 12345, + repo: { + owner: "testowner", + repo: "testrepo", + }, + payload: { + pull_request: { + number: 123, + }, + repository: { + html_url: "https://github.com/testowner/testrepo", + }, + }, +}; + +// Set up global variables +global.core = mockCore; +global.github = mockGithub; +global.context = mockContext; + +describe("create_pr_review_comment.cjs", () => { + let createPRReviewCommentScript; + + beforeEach(() => { + // Reset all mocks + vi.clearAllMocks(); + + // Read the script file + const scriptPath = path.join(__dirname, "create_pr_review_comment.cjs"); + createPRReviewCommentScript = fs.readFileSync(scriptPath, "utf8"); + + // Reset environment variables + delete process.env.GITHUB_AW_AGENT_OUTPUT; + delete process.env.GITHUB_AW_PR_REVIEW_COMMENT_SIDE; + + // Reset global context to default PR context + global.context = mockContext; + }); + + it("should create a single PR review comment with basic configuration", async () => { + // Mock the API response + mockGithub.rest.pulls.createReviewComment.mockResolvedValue({ + data: { + id: 456, + html_url: + "https://github.com/testowner/testrepo/pull/123#discussion_r456", + }, + }); + + // Set up environment + process.env.GITHUB_AW_AGENT_OUTPUT = JSON.stringify({ + items: [ + { + type: "create-pull-request-review-comment", + path: "src/main.js", + line: 10, + body: "Consider using const instead of let here.", + }, + ], + }); + + // Execute the script + await eval(`(async () => { ${createPRReviewCommentScript} })()`); + + // Verify the API was called correctly + expect(mockGithub.rest.pulls.createReviewComment).toHaveBeenCalledWith({ + owner: "testowner", + repo: "testrepo", + pull_number: 123, + body: expect.stringContaining( + "Consider using const instead of let here." + ), + path: "src/main.js", + line: 10, + side: "RIGHT", + }); + + // Verify outputs were set + expect(mockCore.setOutput).toHaveBeenCalledWith("review_comment_id", 456); + expect(mockCore.setOutput).toHaveBeenCalledWith( + "review_comment_url", + "https://github.com/testowner/testrepo/pull/123#discussion_r456" + ); + }); + + it("should create a multi-line PR review comment", async () => { + // Mock the API response + mockGithub.rest.pulls.createReviewComment.mockResolvedValue({ + data: { + id: 789, + html_url: + "https://github.com/testowner/testrepo/pull/123#discussion_r789", + }, + }); + + // Set up environment with multi-line comment + process.env.GITHUB_AW_AGENT_OUTPUT = JSON.stringify({ + items: [ + { + type: "create-pull-request-review-comment", + path: "src/utils.js", + line: 25, + start_line: 20, + side: "LEFT", + body: "This entire function could be simplified using modern JS features.", + }, + ], + }); + + // Execute the script + await eval(`(async () => { ${createPRReviewCommentScript} })()`); + + // Verify the API was called with multi-line parameters + expect(mockGithub.rest.pulls.createReviewComment).toHaveBeenCalledWith({ + owner: "testowner", + repo: "testrepo", + pull_number: 123, + body: expect.stringContaining( + "This entire function could be simplified using modern JS features." + ), + path: "src/utils.js", + line: 25, + start_line: 20, + side: "LEFT", + start_side: "LEFT", + }); + }); + + it("should handle multiple review comments", async () => { + // Mock multiple API responses + mockGithub.rest.pulls.createReviewComment + .mockResolvedValueOnce({ + data: { + id: 111, + html_url: + "https://github.com/testowner/testrepo/pull/123#discussion_r111", + }, + }) + .mockResolvedValueOnce({ + data: { + id: 222, + html_url: + "https://github.com/testowner/testrepo/pull/123#discussion_r222", + }, + }); + + // Set up environment with multiple comments + process.env.GITHUB_AW_AGENT_OUTPUT = JSON.stringify({ + items: [ + { + type: "create-pull-request-review-comment", + path: "src/main.js", + line: 10, + body: "First comment", + }, + { + type: "create-pull-request-review-comment", + path: "src/utils.js", + line: 25, + body: "Second comment", + }, + ], + }); + + // Execute the script + await eval(`(async () => { ${createPRReviewCommentScript} })()`); + + // Verify both API calls were made + expect(mockGithub.rest.pulls.createReviewComment).toHaveBeenCalledTimes(2); + + // Verify outputs were set for the last comment + expect(mockCore.setOutput).toHaveBeenCalledWith("review_comment_id", 222); + expect(mockCore.setOutput).toHaveBeenCalledWith( + "review_comment_url", + "https://github.com/testowner/testrepo/pull/123#discussion_r222" + ); + }); + + it("should use configured side from environment variable", async () => { + // Mock the API response + mockGithub.rest.pulls.createReviewComment.mockResolvedValue({ + data: { + id: 333, + html_url: + "https://github.com/testowner/testrepo/pull/123#discussion_r333", + }, + }); + + // Set up environment with custom side + process.env.GITHUB_AW_AGENT_OUTPUT = JSON.stringify({ + items: [ + { + type: "create-pull-request-review-comment", + path: "src/main.js", + line: 10, + body: "Comment on left side", + }, + ], + }); + process.env.GITHUB_AW_PR_REVIEW_COMMENT_SIDE = "LEFT"; + + // Execute the script + await eval(`(async () => { ${createPRReviewCommentScript} })()`); + + // Verify the configured side was used + expect(mockGithub.rest.pulls.createReviewComment).toHaveBeenCalledWith( + expect.objectContaining({ + side: "LEFT", + }) + ); + }); + + it("should skip when not in pull request context", async () => { + // Change context to non-PR event + global.context = { + ...mockContext, + eventName: "issues", + payload: { + issue: { number: 123 }, + repository: mockContext.payload.repository, + }, + }; + + process.env.GITHUB_AW_AGENT_OUTPUT = JSON.stringify({ + items: [ + { + type: "create-pull-request-review-comment", + path: "src/main.js", + line: 10, + body: "This should not be created", + }, + ], + }); + + // Execute the script + await eval(`(async () => { ${createPRReviewCommentScript} })()`); + + // Verify no API calls were made + expect(mockGithub.rest.pulls.createReviewComment).not.toHaveBeenCalled(); + expect(mockCore.setOutput).not.toHaveBeenCalled(); + }); + + it("should validate required fields and skip invalid items", async () => { + process.env.GITHUB_AW_AGENT_OUTPUT = JSON.stringify({ + items: [ + { + type: "create-pull-request-review-comment", + // Missing path + line: 10, + body: "Missing path", + }, + { + type: "create-pull-request-review-comment", + path: "src/main.js", + // Missing line + body: "Missing line", + }, + { + type: "create-pull-request-review-comment", + path: "src/main.js", + line: 10, + // Missing body + }, + { + type: "create-pull-request-review-comment", + path: "src/main.js", + line: "invalid", + body: "Invalid line number", + }, + ], + }); + + // Execute the script + await eval(`(async () => { ${createPRReviewCommentScript} })()`); + + // Verify no API calls were made due to validation failures + expect(mockGithub.rest.pulls.createReviewComment).not.toHaveBeenCalled(); + expect(mockCore.setOutput).not.toHaveBeenCalled(); + }); + + it("should validate start_line is not greater than line", async () => { + process.env.GITHUB_AW_AGENT_OUTPUT = JSON.stringify({ + items: [ + { + type: "create-pull-request-review-comment", + path: "src/main.js", + line: 10, + start_line: 15, // Invalid: start_line > line + body: "Invalid range", + }, + ], + }); + + // Execute the script + await eval(`(async () => { ${createPRReviewCommentScript} })()`); + + // Verify no API calls were made due to validation failure + expect(mockGithub.rest.pulls.createReviewComment).not.toHaveBeenCalled(); + }); + + it("should validate side values", async () => { + process.env.GITHUB_AW_AGENT_OUTPUT = JSON.stringify({ + items: [ + { + type: "create-pull-request-review-comment", + path: "src/main.js", + line: 10, + side: "INVALID_SIDE", + body: "Invalid side value", + }, + ], + }); + + // Execute the script + await eval(`(async () => { ${createPRReviewCommentScript} })()`); + + // Verify no API calls were made due to validation failure + expect(mockGithub.rest.pulls.createReviewComment).not.toHaveBeenCalled(); + }); + + it("should include AI disclaimer in comment body", async () => { + mockGithub.rest.pulls.createReviewComment.mockResolvedValue({ + data: { + id: 999, + html_url: + "https://github.com/testowner/testrepo/pull/123#discussion_r999", + }, + }); + + process.env.GITHUB_AW_AGENT_OUTPUT = JSON.stringify({ + items: [ + { + type: "create-pull-request-review-comment", + path: "src/main.js", + line: 10, + body: "Original comment", + }, + ], + }); + + // Execute the script + await eval(`(async () => { ${createPRReviewCommentScript} })()`); + + // Verify the body includes the AI disclaimer + expect(mockGithub.rest.pulls.createReviewComment).toHaveBeenCalledWith( + expect.objectContaining({ + body: expect.stringMatching( + /Original comment[\s\S]*Generated by Agentic Workflow Run/ + ), + }) + ); + }); +}); diff --git a/pkg/workflow/js/create_pull_request.cjs b/pkg/workflow/js/create_pull_request.cjs index 52ab8cac31..1ac4626af4 100644 --- a/pkg/workflow/js/create_pull_request.cjs +++ b/pkg/workflow/js/create_pull_request.cjs @@ -5,67 +5,84 @@ const crypto = require("crypto"); const { execSync } = require("child_process"); async function main() { - // Environment validation - fail early if required variables are missing const workflowId = process.env.GITHUB_AW_WORKFLOW_ID; if (!workflowId) { - throw new Error('GITHUB_AW_WORKFLOW_ID environment variable is required'); + throw new Error("GITHUB_AW_WORKFLOW_ID environment variable is required"); } const baseBranch = process.env.GITHUB_AW_BASE_BRANCH; if (!baseBranch) { - throw new Error('GITHUB_AW_BASE_BRANCH environment variable is required'); + throw new Error("GITHUB_AW_BASE_BRANCH environment variable is required"); } const outputContent = process.env.GITHUB_AW_AGENT_OUTPUT || ""; - if (outputContent.trim() === '') { - console.log('Agent output content is empty'); + if (outputContent.trim() === "") { + console.log("Agent output content is empty"); } // Check if patch file exists and has valid content - if (!fs.existsSync('/tmp/aw.patch')) { - throw new Error('No patch file found - cannot create pull request without changes'); + if (!fs.existsSync("/tmp/aw.patch")) { + throw new Error( + "No patch file found - cannot create pull request without changes" + ); } - const patchContent = fs.readFileSync('/tmp/aw.patch', 'utf8'); - if (!patchContent || !patchContent.trim() || patchContent.includes('Failed to generate patch')) { - throw new Error('Patch file is empty or contains error message - cannot create pull request without changes'); + const patchContent = fs.readFileSync("/tmp/aw.patch", "utf8"); + if ( + !patchContent || + !patchContent.trim() || + patchContent.includes("Failed to generate patch") + ) { + throw new Error( + "Patch file is empty or contains error message - cannot create pull request without changes" + ); } - console.log('Agent output content length:', outputContent.length); - console.log('Patch content validation passed'); + console.log("Agent output content length:", outputContent.length); + console.log("Patch content validation passed"); // Parse the validated output JSON let validatedOutput; try { validatedOutput = JSON.parse(outputContent); } catch (error) { - console.log('Error parsing agent output JSON:', error instanceof Error ? error.message : String(error)); + console.log( + "Error parsing agent output JSON:", + error instanceof Error ? error.message : String(error) + ); return; } if (!validatedOutput.items || !Array.isArray(validatedOutput.items)) { - console.log('No valid items found in agent output'); + console.log("No valid items found in agent output"); return; } // Find the create-pull-request item - const pullRequestItem = validatedOutput.items.find(/** @param {any} item */ item => item.type === 'create-pull-request'); + const pullRequestItem = validatedOutput.items.find( + /** @param {any} item */ item => item.type === "create-pull-request" + ); if (!pullRequestItem) { - console.log('No create-pull-request item found in agent output'); + console.log("No create-pull-request item found in agent output"); return; } - console.log('Found create-pull-request item:', { title: pullRequestItem.title, bodyLength: pullRequestItem.body.length }); + console.log("Found create-pull-request item:", { + title: pullRequestItem.title, + bodyLength: pullRequestItem.body.length, + }); // Extract title, body, and branch from the JSON item let title = pullRequestItem.title.trim(); - let bodyLines = pullRequestItem.body.split('\n'); - let branchName = pullRequestItem.branch ? pullRequestItem.branch.trim() : null; + let bodyLines = pullRequestItem.body.split("\n"); + let branchName = pullRequestItem.branch + ? pullRequestItem.branch.trim() + : null; // If no title was found, use a default if (!title) { - title = 'Agent Output'; + title = "Agent Output"; } // Apply title prefix if provided via environment variable @@ -76,71 +93,92 @@ async function main() { // Add AI disclaimer with run id, run htmlurl const runId = context.runId; - const runUrl = context.payload.repository + const runUrl = context.payload.repository ? `${context.payload.repository.html_url}/actions/runs/${runId}` : `https://github.com/actions/runs/${runId}`; - bodyLines.push(``, ``, `> Generated by Agentic Workflow Run [${runId}](${runUrl})`, ''); + bodyLines.push( + ``, + ``, + `> Generated by Agentic Workflow Run [${runId}](${runUrl})`, + "" + ); // Prepare the body content - const body = bodyLines.join('\n').trim(); + const body = bodyLines.join("\n").trim(); // Parse labels from environment variable (comma-separated string) const labelsEnv = process.env.GITHUB_AW_PR_LABELS; - const labels = labelsEnv ? labelsEnv.split(',').map(/** @param {string} label */ label => label.trim()).filter(/** @param {string} label */ label => label) : []; + const labels = labelsEnv + ? labelsEnv + .split(",") + .map(/** @param {string} label */ label => label.trim()) + .filter(/** @param {string} label */ label => label) + : []; // Parse draft setting from environment variable (defaults to true) const draftEnv = process.env.GITHUB_AW_PR_DRAFT; - const draft = draftEnv ? draftEnv.toLowerCase() === 'true' : true; + const draft = draftEnv ? draftEnv.toLowerCase() === "true" : true; - console.log('Creating pull request with title:', title); - console.log('Labels:', labels); - console.log('Draft:', draft); - console.log('Body length:', body.length); + console.log("Creating pull request with title:", title); + console.log("Labels:", labels); + console.log("Draft:", draft); + console.log("Body length:", body.length); // Use branch name from JSONL if provided, otherwise generate unique branch name if (!branchName) { - console.log('No branch name provided in JSONL, generating unique branch name'); + console.log( + "No branch name provided in JSONL, generating unique branch name" + ); // Generate unique branch name using cryptographic random hex - const randomHex = crypto.randomBytes(8).toString('hex'); + const randomHex = crypto.randomBytes(8).toString("hex"); branchName = `${workflowId}/${randomHex}`; } else { - console.log('Using branch name from JSONL:', branchName); + console.log("Using branch name from JSONL:", branchName); } - console.log('Generated branch name:', branchName); - console.log('Base branch:', baseBranch); + console.log("Generated branch name:", branchName); + console.log("Base branch:", baseBranch); // Create a new branch using git CLI // Configure git (required for commits) - execSync('git config --global user.email "action@github.com"', { stdio: 'inherit' }); - execSync('git config --global user.name "GitHub Action"', { stdio: 'inherit' }); + execSync('git config --global user.email "action@github.com"', { + stdio: "inherit", + }); + execSync('git config --global user.name "GitHub Action"', { + stdio: "inherit", + }); // Handle branch creation/checkout - const branchFromJsonl = pullRequestItem.branch ? pullRequestItem.branch.trim() : null; + const branchFromJsonl = pullRequestItem.branch + ? pullRequestItem.branch.trim() + : null; if (branchFromJsonl) { - console.log('Checking if branch from JSONL exists:', branchFromJsonl); - - console.log('Branch does not exist locally, creating new branch:', branchFromJsonl); - execSync(`git checkout -b ${branchFromJsonl}`, { stdio: 'inherit' }); - console.log('Using existing/created branch:', branchFromJsonl); + console.log("Checking if branch from JSONL exists:", branchFromJsonl); + + console.log( + "Branch does not exist locally, creating new branch:", + branchFromJsonl + ); + execSync(`git checkout -b ${branchFromJsonl}`, { stdio: "inherit" }); + console.log("Using existing/created branch:", branchFromJsonl); } else { // Create and checkout new branch with generated name - execSync(`git checkout -b ${branchName}`, { stdio: 'inherit' }); - console.log('Created and checked out new branch:', branchName); + execSync(`git checkout -b ${branchName}`, { stdio: "inherit" }); + console.log("Created and checked out new branch:", branchName); } // Apply the patch using git CLI - console.log('Applying patch...'); + console.log("Applying patch..."); // Apply the patch using git apply - execSync('git apply /tmp/aw.patch', { stdio: 'inherit' }); - console.log('Patch applied successfully'); + execSync("git apply /tmp/aw.patch", { stdio: "inherit" }); + console.log("Patch applied successfully"); // Commit and push the changes - execSync('git add .', { stdio: 'inherit' }); - execSync(`git commit -m "Add agent output: ${title}"`, { stdio: 'inherit' }); - execSync(`git push origin ${branchName}`, { stdio: 'inherit' }); - console.log('Changes committed and pushed'); + execSync("git add .", { stdio: "inherit" }); + execSync(`git commit -m "Add agent output: ${title}"`, { stdio: "inherit" }); + execSync(`git push origin ${branchName}`, { stdio: "inherit" }); + console.log("Changes committed and pushed"); // Create the pull request const { data: pullRequest } = await github.rest.pulls.create({ @@ -150,10 +188,12 @@ async function main() { body: body, head: branchName, base: baseBranch, - draft: draft + draft: draft, }); - console.log('Created pull request #' + pullRequest.number + ': ' + pullRequest.html_url); + console.log( + "Created pull request #" + pullRequest.number + ": " + pullRequest.html_url + ); // Add labels if specified if (labels.length > 0) { @@ -161,24 +201,27 @@ async function main() { owner: context.repo.owner, repo: context.repo.repo, issue_number: pullRequest.number, - labels: labels + labels: labels, }); - console.log('Added labels to pull request:', labels); + console.log("Added labels to pull request:", labels); } // Set output for other jobs to use - core.setOutput('pull_request_number', pullRequest.number); - core.setOutput('pull_request_url', pullRequest.html_url); - core.setOutput('branch_name', branchName); + core.setOutput("pull_request_number", pullRequest.number); + core.setOutput("pull_request_url", pullRequest.html_url); + core.setOutput("branch_name", branchName); // Write summary to GitHub Actions summary await core.summary - .addRaw(` + .addRaw( + ` ## Pull Request - **Pull Request**: [#${pullRequest.number}](${pullRequest.html_url}) - **Branch**: \`${branchName}\` - **Base Branch**: \`${baseBranch}\` -`).write(); +` + ) + .write(); } -await main(); \ No newline at end of file +await main(); diff --git a/pkg/workflow/js/create_pull_request.test.cjs b/pkg/workflow/js/create_pull_request.test.cjs index 1d69af16ba..c8bf75a888 100644 --- a/pkg/workflow/js/create_pull_request.test.cjs +++ b/pkg/workflow/js/create_pull_request.test.cjs @@ -1,17 +1,19 @@ -import { describe, it, expect, beforeEach, vi } from 'vitest'; -import { readFileSync } from 'fs'; -import path from 'path'; +import { describe, it, expect, beforeEach, vi } from "vitest"; +import { readFileSync } from "fs"; +import path from "path"; // Create standalone test functions by extracting parts of the script -const createTestableFunction = (scriptContent) => { +const createTestableFunction = scriptContent => { // Extract just the main function content and wrap it properly - const mainFunctionMatch = scriptContent.match(/async function main\(\) \{([\s\S]*?)\}\s*await main\(\);?$/); + const mainFunctionMatch = scriptContent.match( + /async function main\(\) \{([\s\S]*?)\}\s*await main\(\);?\s*$/ + ); if (!mainFunctionMatch) { - throw new Error('Could not extract main function from script'); + throw new Error("Could not extract main function from script"); } - + const mainFunctionBody = mainFunctionMatch[1]; - + // Create a testable function that has the same logic but can be called with dependencies return new Function(` const { fs, crypto, execSync, github, core, context, process, console } = arguments[0]; @@ -22,288 +24,361 @@ const createTestableFunction = (scriptContent) => { `); }; -describe('create_pull_request.cjs', () => { +describe("create_pull_request.cjs", () => { let createMainFunction; let mockDependencies; beforeEach(() => { // Read the script content - const scriptPath = path.join(process.cwd(), 'pkg/workflow/js/create_pull_request.cjs'); - const scriptContent = readFileSync(scriptPath, 'utf8'); - + const scriptPath = path.join( + process.cwd(), + "pkg/workflow/js/create_pull_request.cjs" + ); + const scriptContent = readFileSync(scriptPath, "utf8"); + // Create testable function createMainFunction = createTestableFunction(scriptContent); - + // Set up mock dependencies mockDependencies = { fs: { existsSync: vi.fn().mockReturnValue(true), - readFileSync: vi.fn().mockReturnValue('diff --git a/file.txt b/file.txt\n+new content') + readFileSync: vi + .fn() + .mockReturnValue("diff --git a/file.txt b/file.txt\n+new content"), }, crypto: { - randomBytes: vi.fn().mockReturnValue(Buffer.from('1234567890abcdef', 'hex')) + randomBytes: vi + .fn() + .mockReturnValue(Buffer.from("1234567890abcdef", "hex")), }, execSync: vi.fn(), github: { rest: { pulls: { - create: vi.fn() + create: vi.fn(), }, issues: { - addLabels: vi.fn() - } - } + addLabels: vi.fn(), + }, + }, }, core: { setOutput: vi.fn(), summary: { addRaw: vi.fn().mockReturnThis(), - write: vi.fn() - } + write: vi.fn(), + }, }, context: { runId: 12345, repo: { - owner: 'testowner', - repo: 'testrepo' + owner: "testowner", + repo: "testrepo", }, payload: { repository: { - html_url: 'https://github.com/testowner/testrepo' - } - } + html_url: "https://github.com/testowner/testrepo", + }, + }, }, process: { - env: {} + env: {}, }, console: { - log: vi.fn() - } + log: vi.fn(), + }, }; }); - it('should throw error when GITHUB_AW_WORKFLOW_ID is missing', async () => { + it("should throw error when GITHUB_AW_WORKFLOW_ID is missing", async () => { const mainFunction = createMainFunction(mockDependencies); - - await expect(mainFunction()).rejects.toThrow('GITHUB_AW_WORKFLOW_ID environment variable is required'); + + await expect(mainFunction()).rejects.toThrow( + "GITHUB_AW_WORKFLOW_ID environment variable is required" + ); }); - it('should throw error when GITHUB_AW_BASE_BRANCH is missing', async () => { - mockDependencies.process.env.GITHUB_AW_WORKFLOW_ID = 'test-workflow'; - + it("should throw error when GITHUB_AW_BASE_BRANCH is missing", async () => { + mockDependencies.process.env.GITHUB_AW_WORKFLOW_ID = "test-workflow"; + const mainFunction = createMainFunction(mockDependencies); - - await expect(mainFunction()).rejects.toThrow('GITHUB_AW_BASE_BRANCH environment variable is required'); + + await expect(mainFunction()).rejects.toThrow( + "GITHUB_AW_BASE_BRANCH environment variable is required" + ); }); - it('should throw error when patch file does not exist', async () => { - mockDependencies.process.env.GITHUB_AW_WORKFLOW_ID = 'test-workflow'; - mockDependencies.process.env.GITHUB_AW_BASE_BRANCH = 'main'; + it("should throw error when patch file does not exist", async () => { + mockDependencies.process.env.GITHUB_AW_WORKFLOW_ID = "test-workflow"; + mockDependencies.process.env.GITHUB_AW_BASE_BRANCH = "main"; mockDependencies.fs.existsSync.mockReturnValue(false); - + const mainFunction = createMainFunction(mockDependencies); - - await expect(mainFunction()).rejects.toThrow('No patch file found - cannot create pull request without changes'); + + await expect(mainFunction()).rejects.toThrow( + "No patch file found - cannot create pull request without changes" + ); }); - it('should throw error when patch file is empty', async () => { - mockDependencies.process.env.GITHUB_AW_WORKFLOW_ID = 'test-workflow'; - mockDependencies.process.env.GITHUB_AW_BASE_BRANCH = 'main'; - mockDependencies.fs.readFileSync.mockReturnValue(' '); - + it("should throw error when patch file is empty", async () => { + mockDependencies.process.env.GITHUB_AW_WORKFLOW_ID = "test-workflow"; + mockDependencies.process.env.GITHUB_AW_BASE_BRANCH = "main"; + mockDependencies.fs.readFileSync.mockReturnValue(" "); + const mainFunction = createMainFunction(mockDependencies); - - await expect(mainFunction()).rejects.toThrow('Patch file is empty or contains error message - cannot create pull request without changes'); + + await expect(mainFunction()).rejects.toThrow( + "Patch file is empty or contains error message - cannot create pull request without changes" + ); }); - it('should create pull request successfully with valid input', async () => { - mockDependencies.process.env.GITHUB_AW_WORKFLOW_ID = 'test-workflow'; - mockDependencies.process.env.GITHUB_AW_BASE_BRANCH = 'main'; + it("should create pull request successfully with valid input", async () => { + mockDependencies.process.env.GITHUB_AW_WORKFLOW_ID = "test-workflow"; + mockDependencies.process.env.GITHUB_AW_BASE_BRANCH = "main"; mockDependencies.process.env.GITHUB_AW_AGENT_OUTPUT = JSON.stringify({ - items: [{ - type: 'create-pull-request', - title: 'New Feature', - body: 'This adds a new feature to the codebase.' - }] + items: [ + { + type: "create-pull-request", + title: "New Feature", + body: "This adds a new feature to the codebase.", + }, + ], }); - + const mockPullRequest = { number: 123, - html_url: 'https://github.com/testowner/testrepo/pull/123' + html_url: "https://github.com/testowner/testrepo/pull/123", }; - - mockDependencies.github.rest.pulls.create.mockResolvedValue({ data: mockPullRequest }); - + + mockDependencies.github.rest.pulls.create.mockResolvedValue({ + data: mockPullRequest, + }); + const mainFunction = createMainFunction(mockDependencies); - + await mainFunction(); - + // Verify git operations - expect(mockDependencies.execSync).toHaveBeenCalledWith('git config --global user.email "action@github.com"', { stdio: 'inherit' }); - expect(mockDependencies.execSync).toHaveBeenCalledWith('git config --global user.name "GitHub Action"', { stdio: 'inherit' }); - expect(mockDependencies.execSync).toHaveBeenCalledWith('git checkout -b test-workflow/1234567890abcdef', { stdio: 'inherit' }); - expect(mockDependencies.execSync).toHaveBeenCalledWith('git apply /tmp/aw.patch', { stdio: 'inherit' }); - expect(mockDependencies.execSync).toHaveBeenCalledWith('git add .', { stdio: 'inherit' }); - expect(mockDependencies.execSync).toHaveBeenCalledWith('git commit -m "Add agent output: New Feature"', { stdio: 'inherit' }); - expect(mockDependencies.execSync).toHaveBeenCalledWith('git push origin test-workflow/1234567890abcdef', { stdio: 'inherit' }); - + expect(mockDependencies.execSync).toHaveBeenCalledWith( + 'git config --global user.email "action@github.com"', + { stdio: "inherit" } + ); + expect(mockDependencies.execSync).toHaveBeenCalledWith( + 'git config --global user.name "GitHub Action"', + { stdio: "inherit" } + ); + expect(mockDependencies.execSync).toHaveBeenCalledWith( + "git checkout -b test-workflow/1234567890abcdef", + { stdio: "inherit" } + ); + expect(mockDependencies.execSync).toHaveBeenCalledWith( + "git apply /tmp/aw.patch", + { stdio: "inherit" } + ); + expect(mockDependencies.execSync).toHaveBeenCalledWith("git add .", { + stdio: "inherit", + }); + expect(mockDependencies.execSync).toHaveBeenCalledWith( + 'git commit -m "Add agent output: New Feature"', + { stdio: "inherit" } + ); + expect(mockDependencies.execSync).toHaveBeenCalledWith( + "git push origin test-workflow/1234567890abcdef", + { stdio: "inherit" } + ); + // Verify PR creation expect(mockDependencies.github.rest.pulls.create).toHaveBeenCalledWith({ - owner: 'testowner', - repo: 'testrepo', - title: 'New Feature', - body: expect.stringContaining('This adds a new feature to the codebase.'), - head: 'test-workflow/1234567890abcdef', - base: 'main', - draft: true // default value + owner: "testowner", + repo: "testrepo", + title: "New Feature", + body: expect.stringContaining("This adds a new feature to the codebase."), + head: "test-workflow/1234567890abcdef", + base: "main", + draft: true, // default value }); - - expect(mockDependencies.core.setOutput).toHaveBeenCalledWith('pull_request_number', 123); - expect(mockDependencies.core.setOutput).toHaveBeenCalledWith('pull_request_url', mockPullRequest.html_url); - expect(mockDependencies.core.setOutput).toHaveBeenCalledWith('branch_name', 'test-workflow/1234567890abcdef'); + + expect(mockDependencies.core.setOutput).toHaveBeenCalledWith( + "pull_request_number", + 123 + ); + expect(mockDependencies.core.setOutput).toHaveBeenCalledWith( + "pull_request_url", + mockPullRequest.html_url + ); + expect(mockDependencies.core.setOutput).toHaveBeenCalledWith( + "branch_name", + "test-workflow/1234567890abcdef" + ); }); - it('should handle labels correctly', async () => { - mockDependencies.process.env.GITHUB_AW_WORKFLOW_ID = 'test-workflow'; - mockDependencies.process.env.GITHUB_AW_BASE_BRANCH = 'main'; + it("should handle labels correctly", async () => { + mockDependencies.process.env.GITHUB_AW_WORKFLOW_ID = "test-workflow"; + mockDependencies.process.env.GITHUB_AW_BASE_BRANCH = "main"; mockDependencies.process.env.GITHUB_AW_AGENT_OUTPUT = JSON.stringify({ - items: [{ - type: 'create-pull-request', - title: 'PR with labels', - body: 'PR with labels' - }] + items: [ + { + type: "create-pull-request", + title: "PR with labels", + body: "PR with labels", + }, + ], }); - mockDependencies.process.env.GITHUB_AW_PR_LABELS = 'enhancement, automated, needs-review'; - + mockDependencies.process.env.GITHUB_AW_PR_LABELS = + "enhancement, automated, needs-review"; + const mockPullRequest = { number: 456, - html_url: 'https://github.com/testowner/testrepo/pull/456' + html_url: "https://github.com/testowner/testrepo/pull/456", }; - - mockDependencies.github.rest.pulls.create.mockResolvedValue({ data: mockPullRequest }); + + mockDependencies.github.rest.pulls.create.mockResolvedValue({ + data: mockPullRequest, + }); mockDependencies.github.rest.issues.addLabels.mockResolvedValue({}); - + const mainFunction = createMainFunction(mockDependencies); - + await mainFunction(); - + // Verify labels were added expect(mockDependencies.github.rest.issues.addLabels).toHaveBeenCalledWith({ - owner: 'testowner', - repo: 'testrepo', + owner: "testowner", + repo: "testrepo", issue_number: 456, - labels: ['enhancement', 'automated', 'needs-review'] + labels: ["enhancement", "automated", "needs-review"], }); }); - it('should respect draft setting from environment', async () => { - mockDependencies.process.env.GITHUB_AW_WORKFLOW_ID = 'test-workflow'; - mockDependencies.process.env.GITHUB_AW_BASE_BRANCH = 'main'; + it("should respect draft setting from environment", async () => { + mockDependencies.process.env.GITHUB_AW_WORKFLOW_ID = "test-workflow"; + mockDependencies.process.env.GITHUB_AW_BASE_BRANCH = "main"; mockDependencies.process.env.GITHUB_AW_AGENT_OUTPUT = JSON.stringify({ - items: [{ - type: 'create-pull-request', - title: 'Non-draft PR', - body: 'Non-draft PR' - }] + items: [ + { + type: "create-pull-request", + title: "Non-draft PR", + body: "Non-draft PR", + }, + ], }); - mockDependencies.process.env.GITHUB_AW_PR_DRAFT = 'false'; - + mockDependencies.process.env.GITHUB_AW_PR_DRAFT = "false"; + const mockPullRequest = { number: 789, - html_url: 'https://github.com/testowner/testrepo/pull/789' + html_url: "https://github.com/testowner/testrepo/pull/789", }; - - mockDependencies.github.rest.pulls.create.mockResolvedValue({ data: mockPullRequest }); - + + mockDependencies.github.rest.pulls.create.mockResolvedValue({ + data: mockPullRequest, + }); + const mainFunction = createMainFunction(mockDependencies); - + await mainFunction(); - + const callArgs = mockDependencies.github.rest.pulls.create.mock.calls[0][0]; expect(callArgs.draft).toBe(false); }); - it('should include run information in PR body', async () => { - mockDependencies.process.env.GITHUB_AW_WORKFLOW_ID = 'test-workflow'; - mockDependencies.process.env.GITHUB_AW_BASE_BRANCH = 'main'; + it("should include run information in PR body", async () => { + mockDependencies.process.env.GITHUB_AW_WORKFLOW_ID = "test-workflow"; + mockDependencies.process.env.GITHUB_AW_BASE_BRANCH = "main"; mockDependencies.process.env.GITHUB_AW_AGENT_OUTPUT = JSON.stringify({ - items: [{ - type: 'create-pull-request', - title: 'Test PR Title', - body: 'Test PR content with detailed body information.' - }] + items: [ + { + type: "create-pull-request", + title: "Test PR Title", + body: "Test PR content with detailed body information.", + }, + ], }); - + const mockPullRequest = { number: 202, - html_url: 'https://github.com/testowner/testrepo/pull/202' + html_url: "https://github.com/testowner/testrepo/pull/202", }; - - mockDependencies.github.rest.pulls.create.mockResolvedValue({ data: mockPullRequest }); - + + mockDependencies.github.rest.pulls.create.mockResolvedValue({ + data: mockPullRequest, + }); + const mainFunction = createMainFunction(mockDependencies); - + await mainFunction(); - + const callArgs = mockDependencies.github.rest.pulls.create.mock.calls[0][0]; - expect(callArgs.title).toBe('Test PR Title'); - expect(callArgs.body).toContain('Test PR content with detailed body information.'); - expect(callArgs.body).toContain('Generated by Agentic Workflow Run'); - expect(callArgs.body).toContain('[12345]'); - expect(callArgs.body).toContain('https://github.com/testowner/testrepo/actions/runs/12345'); + expect(callArgs.title).toBe("Test PR Title"); + expect(callArgs.body).toContain( + "Test PR content with detailed body information." + ); + expect(callArgs.body).toContain("Generated by Agentic Workflow Run"); + expect(callArgs.body).toContain("[12345]"); + expect(callArgs.body).toContain( + "https://github.com/testowner/testrepo/actions/runs/12345" + ); }); - it('should apply title prefix when provided', async () => { - mockDependencies.process.env.GITHUB_AW_WORKFLOW_ID = 'test-workflow'; - mockDependencies.process.env.GITHUB_AW_BASE_BRANCH = 'main'; + it("should apply title prefix when provided", async () => { + mockDependencies.process.env.GITHUB_AW_WORKFLOW_ID = "test-workflow"; + mockDependencies.process.env.GITHUB_AW_BASE_BRANCH = "main"; mockDependencies.process.env.GITHUB_AW_AGENT_OUTPUT = JSON.stringify({ - items: [{ - type: 'create-pull-request', - title: 'Simple PR title', - body: 'Simple PR body content' - }] + items: [ + { + type: "create-pull-request", + title: "Simple PR title", + body: "Simple PR body content", + }, + ], }); - mockDependencies.process.env.GITHUB_AW_PR_TITLE_PREFIX = '[BOT] '; - + mockDependencies.process.env.GITHUB_AW_PR_TITLE_PREFIX = "[BOT] "; + const mockPullRequest = { number: 987, - html_url: 'https://github.com/testowner/testrepo/pull/987' + html_url: "https://github.com/testowner/testrepo/pull/987", }; - - mockDependencies.github.rest.pulls.create.mockResolvedValue({ data: mockPullRequest }); - + + mockDependencies.github.rest.pulls.create.mockResolvedValue({ + data: mockPullRequest, + }); + const mainFunction = createMainFunction(mockDependencies); - + await mainFunction(); - + const callArgs = mockDependencies.github.rest.pulls.create.mock.calls[0][0]; - expect(callArgs.title).toBe('[BOT] Simple PR title'); + expect(callArgs.title).toBe("[BOT] Simple PR title"); }); - it('should not duplicate title prefix when already present', async () => { - mockDependencies.process.env.GITHUB_AW_WORKFLOW_ID = 'test-workflow'; - mockDependencies.process.env.GITHUB_AW_BASE_BRANCH = 'main'; + it("should not duplicate title prefix when already present", async () => { + mockDependencies.process.env.GITHUB_AW_WORKFLOW_ID = "test-workflow"; + mockDependencies.process.env.GITHUB_AW_BASE_BRANCH = "main"; mockDependencies.process.env.GITHUB_AW_AGENT_OUTPUT = JSON.stringify({ - items: [{ - type: 'create-pull-request', - title: '[BOT] PR title already prefixed', - body: 'PR body content' - }] + items: [ + { + type: "create-pull-request", + title: "[BOT] PR title already prefixed", + body: "PR body content", + }, + ], }); - mockDependencies.process.env.GITHUB_AW_PR_TITLE_PREFIX = '[BOT] '; - + mockDependencies.process.env.GITHUB_AW_PR_TITLE_PREFIX = "[BOT] "; + const mockPullRequest = { number: 988, - html_url: 'https://github.com/testowner/testrepo/pull/988' + html_url: "https://github.com/testowner/testrepo/pull/988", }; - - mockDependencies.github.rest.pulls.create.mockResolvedValue({ data: mockPullRequest }); - + + mockDependencies.github.rest.pulls.create.mockResolvedValue({ + data: mockPullRequest, + }); + const mainFunction = createMainFunction(mockDependencies); - + await mainFunction(); - + const callArgs = mockDependencies.github.rest.pulls.create.mock.calls[0][0]; - expect(callArgs.title).toBe('[BOT] PR title already prefixed'); // Should not be duplicated + expect(callArgs.title).toBe("[BOT] PR title already prefixed"); // Should not be duplicated }); -}); \ No newline at end of file +}); diff --git a/pkg/workflow/js/parse_claude_log.cjs b/pkg/workflow/js/parse_claude_log.cjs index 2c824d90f2..07b3ff1af1 100644 --- a/pkg/workflow/js/parse_claude_log.cjs +++ b/pkg/workflow/js/parse_claude_log.cjs @@ -1,27 +1,26 @@ function main() { - const fs = require('fs'); - + const fs = require("fs"); + try { // Get the log file path from environment const logFile = process.env.AGENT_LOG_FILE; if (!logFile) { - console.log('No agent log file specified'); + console.log("No agent log file specified"); return; } - + if (!fs.existsSync(logFile)) { console.log(`Log file not found: ${logFile}`); return; } - - const logContent = fs.readFileSync(logFile, 'utf8'); + + const logContent = fs.readFileSync(logFile, "utf8"); const markdown = parseClaudeLog(logContent); - + // Append to GitHub step summary core.summary.addRaw(markdown).write(); - } catch (error) { - console.error('Error parsing Claude log:', error.message); + console.error("Error parsing Claude log:", error.message); core.setFailed(error.message); } } @@ -30,49 +29,60 @@ function parseClaudeLog(logContent) { try { const logEntries = JSON.parse(logContent); if (!Array.isArray(logEntries)) { - return '## Agent Log Summary\n\nLog format not recognized as Claude JSON array.\n'; + return "## Agent Log Summary\n\nLog format not recognized as Claude JSON array.\n"; } - - let markdown = '## 🤖 Commands and Tools\n\n'; + + let markdown = "## 🤖 Commands and Tools\n\n"; const toolUsePairs = new Map(); // Map tool_use_id to tool_result const commandSummary = []; // For the succinct summary - + // First pass: collect tool results by tool_use_id for (const entry of logEntries) { - if (entry.type === 'user' && entry.message?.content) { + if (entry.type === "user" && entry.message?.content) { for (const content of entry.message.content) { - if (content.type === 'tool_result' && content.tool_use_id) { + if (content.type === "tool_result" && content.tool_use_id) { toolUsePairs.set(content.tool_use_id, content); } } } } - + // Collect all tool uses for summary for (const entry of logEntries) { - if (entry.type === 'assistant' && entry.message?.content) { + if (entry.type === "assistant" && entry.message?.content) { for (const content of entry.message.content) { - if (content.type === 'tool_use') { + if (content.type === "tool_use") { const toolName = content.name; const input = content.input || {}; - + // Skip internal tools - only show external commands and API calls - if (['Read', 'Write', 'Edit', 'MultiEdit', 'LS', 'Grep', 'Glob', 'TodoWrite'].includes(toolName)) { + if ( + [ + "Read", + "Write", + "Edit", + "MultiEdit", + "LS", + "Grep", + "Glob", + "TodoWrite", + ].includes(toolName) + ) { continue; // Skip internal file operations and searches } - + // Find the corresponding tool result to get status const toolResult = toolUsePairs.get(content.id); - let statusIcon = '❓'; + let statusIcon = "❓"; if (toolResult) { - statusIcon = toolResult.is_error === true ? '❌' : '✅'; + statusIcon = toolResult.is_error === true ? "❌" : "✅"; } - + // Add to command summary (only external tools) - if (toolName === 'Bash') { - const formattedCommand = formatBashCommand(input.command || ''); + if (toolName === "Bash") { + const formattedCommand = formatBashCommand(input.command || ""); commandSummary.push(`* ${statusIcon} \`${formattedCommand}\``); - } else if (toolName.startsWith('mcp__')) { + } else if (toolName.startsWith("mcp__")) { const mcpName = formatMcpName(toolName); commandSummary.push(`* ${statusIcon} \`${mcpName}(...)\``); } else { @@ -83,67 +93,80 @@ function parseClaudeLog(logContent) { } } } - + // Add command summary if (commandSummary.length > 0) { for (const cmd of commandSummary) { markdown += `${cmd}\n`; } } else { - markdown += 'No commands or tools used.\n'; + markdown += "No commands or tools used.\n"; } - + // Add Information section from the last entry with result metadata - markdown += '\n## 📊 Information\n\n'; - + markdown += "\n## 📊 Information\n\n"; + // Find the last entry with metadata const lastEntry = logEntries[logEntries.length - 1]; - if (lastEntry && (lastEntry.num_turns || lastEntry.duration_ms || lastEntry.total_cost_usd || lastEntry.usage)) { + if ( + lastEntry && + (lastEntry.num_turns || + lastEntry.duration_ms || + lastEntry.total_cost_usd || + lastEntry.usage) + ) { if (lastEntry.num_turns) { markdown += `**Turns:** ${lastEntry.num_turns}\n\n`; } - + if (lastEntry.duration_ms) { const durationSec = Math.round(lastEntry.duration_ms / 1000); const minutes = Math.floor(durationSec / 60); const seconds = durationSec % 60; markdown += `**Duration:** ${minutes}m ${seconds}s\n\n`; } - + if (lastEntry.total_cost_usd) { markdown += `**Total Cost:** $${lastEntry.total_cost_usd.toFixed(4)}\n\n`; } - + if (lastEntry.usage) { const usage = lastEntry.usage; if (usage.input_tokens || usage.output_tokens) { markdown += `**Token Usage:**\n`; - if (usage.input_tokens) markdown += `- Input: ${usage.input_tokens.toLocaleString()}\n`; - if (usage.cache_creation_input_tokens) markdown += `- Cache Creation: ${usage.cache_creation_input_tokens.toLocaleString()}\n`; - if (usage.cache_read_input_tokens) markdown += `- Cache Read: ${usage.cache_read_input_tokens.toLocaleString()}\n`; - if (usage.output_tokens) markdown += `- Output: ${usage.output_tokens.toLocaleString()}\n`; - markdown += '\n'; + if (usage.input_tokens) + markdown += `- Input: ${usage.input_tokens.toLocaleString()}\n`; + if (usage.cache_creation_input_tokens) + markdown += `- Cache Creation: ${usage.cache_creation_input_tokens.toLocaleString()}\n`; + if (usage.cache_read_input_tokens) + markdown += `- Cache Read: ${usage.cache_read_input_tokens.toLocaleString()}\n`; + if (usage.output_tokens) + markdown += `- Output: ${usage.output_tokens.toLocaleString()}\n`; + markdown += "\n"; } } - - if (lastEntry.permission_denials && lastEntry.permission_denials.length > 0) { + + if ( + lastEntry.permission_denials && + lastEntry.permission_denials.length > 0 + ) { markdown += `**Permission Denials:** ${lastEntry.permission_denials.length}\n\n`; } } - - markdown += '\n## 🤖 Reasoning\n\n'; - + + markdown += "\n## 🤖 Reasoning\n\n"; + // Second pass: process assistant messages in sequence for (const entry of logEntries) { - if (entry.type === 'assistant' && entry.message?.content) { + if (entry.type === "assistant" && entry.message?.content) { for (const content of entry.message.content) { - if (content.type === 'text' && content.text) { + if (content.type === "text" && content.text) { // Add reasoning text directly (no header) const text = content.text.trim(); if (text && text.length > 0) { - markdown += text + '\n\n'; + markdown += text + "\n\n"; } - } else if (content.type === 'tool_use') { + } else if (content.type === "tool_use") { // Process tool use with its result const toolResult = toolUsePairs.get(content.id); const toolMarkdown = formatToolUse(content, toolResult); @@ -154,9 +177,8 @@ function parseClaudeLog(logContent) { } } } - + return markdown; - } catch (error) { return `## Agent Log Summary\n\nError parsing Claude log: ${error.message}\n`; } @@ -165,67 +187,76 @@ function parseClaudeLog(logContent) { function formatToolUse(toolUse, toolResult) { const toolName = toolUse.name; const input = toolUse.input || {}; - + // Skip TodoWrite except the very last one (we'll handle this separately) - if (toolName === 'TodoWrite') { - return ''; // Skip for now, would need global context to find the last one + if (toolName === "TodoWrite") { + return ""; // Skip for now, would need global context to find the last one } - + // Helper function to determine status icon function getStatusIcon() { if (toolResult) { - return toolResult.is_error === true ? '❌' : '✅'; + return toolResult.is_error === true ? "❌" : "✅"; } - return '❓'; // Unknown by default + return "❓"; // Unknown by default } - - let markdown = ''; + + let markdown = ""; const statusIcon = getStatusIcon(); - + switch (toolName) { - case 'Bash': - const command = input.command || ''; - const description = input.description || ''; - + case "Bash": + const command = input.command || ""; + const description = input.description || ""; + // Format the command to be single line const formattedCommand = formatBashCommand(command); - + if (description) { markdown += `${description}:\n\n`; } markdown += `${statusIcon} \`${formattedCommand}\`\n\n`; break; - case 'Read': - const filePath = input.file_path || input.path || ''; - const relativePath = filePath.replace(/^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, ''); // Remove /home/runner/work/repo/repo/ prefix + case "Read": + const filePath = input.file_path || input.path || ""; + const relativePath = filePath.replace( + /^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, + "" + ); // Remove /home/runner/work/repo/repo/ prefix markdown += `${statusIcon} Read \`${relativePath}\`\n\n`; break; - case 'Write': - case 'Edit': - case 'MultiEdit': - const writeFilePath = input.file_path || input.path || ''; - const writeRelativePath = writeFilePath.replace(/^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, ''); - + case "Write": + case "Edit": + case "MultiEdit": + const writeFilePath = input.file_path || input.path || ""; + const writeRelativePath = writeFilePath.replace( + /^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, + "" + ); + markdown += `${statusIcon} Write \`${writeRelativePath}\`\n\n`; break; - case 'Grep': - case 'Glob': - const query = input.query || input.pattern || ''; + case "Grep": + case "Glob": + const query = input.query || input.pattern || ""; markdown += `${statusIcon} Search for \`${truncateString(query, 80)}\`\n\n`; break; - case 'LS': - const lsPath = input.path || ''; - const lsRelativePath = lsPath.replace(/^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, ''); + case "LS": + const lsPath = input.path || ""; + const lsRelativePath = lsPath.replace( + /^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, + "" + ); markdown += `${statusIcon} LS: ${lsRelativePath || lsPath}\n\n`; break; default: // Handle MCP calls and other tools - if (toolName.startsWith('mcp__')) { + if (toolName.startsWith("mcp__")) { const mcpName = formatMcpName(toolName); const params = formatMcpParameters(input); markdown += `${statusIcon} ${mcpName}(${params})\n\n`; @@ -234,9 +265,12 @@ function formatToolUse(toolUse, toolResult) { const keys = Object.keys(input); if (keys.length > 0) { // Try to find the most important parameter - const mainParam = keys.find(k => ['query', 'command', 'path', 'file_path', 'content'].includes(k)) || keys[0]; - const value = String(input[mainParam] || ''); - + const mainParam = + keys.find(k => + ["query", "command", "path", "file_path", "content"].includes(k) + ) || keys[0]; + const value = String(input[mainParam] || ""); + if (value) { markdown += `${statusIcon} ${toolName}: ${truncateString(value, 100)}\n\n`; } else { @@ -247,17 +281,17 @@ function formatToolUse(toolUse, toolResult) { } } } - + return markdown; } function formatMcpName(toolName) { // Convert mcp__github__search_issues to github::search_issues - if (toolName.startsWith('mcp__')) { - const parts = toolName.split('__'); + if (toolName.startsWith("mcp__")) { + const parts = toolName.split("__"); if (parts.length >= 3) { const provider = parts[1]; // github, etc. - const method = parts.slice(2).join('_'); // search_issues, etc. + const method = parts.slice(2).join("_"); // search_issues, etc. return `${provider}::${method}`; } } @@ -266,54 +300,60 @@ function formatMcpName(toolName) { function formatMcpParameters(input) { const keys = Object.keys(input); - if (keys.length === 0) return ''; - + if (keys.length === 0) return ""; + const paramStrs = []; - for (const key of keys.slice(0, 4)) { // Show up to 4 parameters - const value = String(input[key] || ''); + for (const key of keys.slice(0, 4)) { + // Show up to 4 parameters + const value = String(input[key] || ""); paramStrs.push(`${key}: ${truncateString(value, 40)}`); } - + if (keys.length > 4) { - paramStrs.push('...'); + paramStrs.push("..."); } - - return paramStrs.join(', '); + + return paramStrs.join(", "); } function formatBashCommand(command) { - if (!command) return ''; - + if (!command) return ""; + // Convert multi-line commands to single line by replacing newlines with spaces // and collapsing multiple spaces let formatted = command - .replace(/\n/g, ' ') // Replace newlines with spaces - .replace(/\r/g, ' ') // Replace carriage returns with spaces - .replace(/\t/g, ' ') // Replace tabs with spaces - .replace(/\s+/g, ' ') // Collapse multiple spaces into one - .trim(); // Remove leading/trailing whitespace - + .replace(/\n/g, " ") // Replace newlines with spaces + .replace(/\r/g, " ") // Replace carriage returns with spaces + .replace(/\t/g, " ") // Replace tabs with spaces + .replace(/\s+/g, " ") // Collapse multiple spaces into one + .trim(); // Remove leading/trailing whitespace + // Escape backticks to prevent markdown issues - formatted = formatted.replace(/`/g, '\\`'); - + formatted = formatted.replace(/`/g, "\\`"); + // Truncate if too long (keep reasonable length for summary) const maxLength = 80; if (formatted.length > maxLength) { - formatted = formatted.substring(0, maxLength) + '...'; + formatted = formatted.substring(0, maxLength) + "..."; } - + return formatted; } function truncateString(str, maxLength) { - if (!str) return ''; + if (!str) return ""; if (str.length <= maxLength) return str; - return str.substring(0, maxLength) + '...'; + return str.substring(0, maxLength) + "..."; } // Export for testing -if (typeof module !== 'undefined' && module.exports) { - module.exports = { parseClaudeLog, formatToolUse, formatBashCommand, truncateString }; +if (typeof module !== "undefined" && module.exports) { + module.exports = { + parseClaudeLog, + formatToolUse, + formatBashCommand, + truncateString, + }; } main(); diff --git a/pkg/workflow/js/parse_codex_log.cjs b/pkg/workflow/js/parse_codex_log.cjs index 751e89d6f3..0209d4dc1e 100644 --- a/pkg/workflow/js/parse_codex_log.cjs +++ b/pkg/workflow/js/parse_codex_log.cjs @@ -1,26 +1,26 @@ function main() { - const fs = require('fs'); - + const fs = require("fs"); + try { const logFile = process.env.AGENT_LOG_FILE; if (!logFile) { - console.log('No agent log file specified'); + console.log("No agent log file specified"); return; } - + if (!fs.existsSync(logFile)) { console.log(`Log file not found: ${logFile}`); return; } - - const content = fs.readFileSync(logFile, 'utf8'); + + const content = fs.readFileSync(logFile, "utf8"); const parsedLog = parseCodexLog(content); - + if (parsedLog) { core.summary.addRaw(parsedLog).write(); - console.log('Codex log parsed successfully'); + console.log("Codex log parsed successfully"); } else { - console.log('Failed to parse Codex log'); + console.log("Failed to parse Codex log"); } } catch (error) { core.setFailed(error.message); @@ -29,81 +29,90 @@ function main() { function parseCodexLog(logContent) { try { - const lines = logContent.split('\n'); - let markdown = '## 🤖 Commands and Tools\n\n'; - + const lines = logContent.split("\n"); + let markdown = "## 🤖 Commands and Tools\n\n"; + const commandSummary = []; - + // First pass: collect commands for summary for (let i = 0; i < lines.length; i++) { const line = lines[i]; - + // Detect tool usage and exec commands - if (line.includes('] tool ') && line.includes('(')) { + if (line.includes("] tool ") && line.includes("(")) { // Extract tool name const toolMatch = line.match(/\] tool ([^(]+)\(/); if (toolMatch) { const toolName = toolMatch[1]; - + // Look ahead to find the result status - let statusIcon = '❓'; // Unknown by default + let statusIcon = "❓"; // Unknown by default for (let j = i + 1; j < Math.min(i + 5, lines.length); j++) { const nextLine = lines[j]; - if (nextLine.includes('success in')) { - statusIcon = '✅'; + if (nextLine.includes("success in")) { + statusIcon = "✅"; break; - } else if (nextLine.includes('failure in') || nextLine.includes('error in') || nextLine.includes('failed in')) { - statusIcon = '❌'; + } else if ( + nextLine.includes("failure in") || + nextLine.includes("error in") || + nextLine.includes("failed in") + ) { + statusIcon = "❌"; break; } } - - if (toolName.includes('.')) { + + if (toolName.includes(".")) { // Format as provider::method - const parts = toolName.split('.'); + const parts = toolName.split("."); const provider = parts[0]; - const method = parts.slice(1).join('_'); - commandSummary.push(`* ${statusIcon} \`${provider}::${method}(...)\``); + const method = parts.slice(1).join("_"); + commandSummary.push( + `* ${statusIcon} \`${provider}::${method}(...)\`` + ); } else { commandSummary.push(`* ${statusIcon} \`${toolName}(...)\``); } } - } else if (line.includes('] exec ')) { + } else if (line.includes("] exec ")) { // Extract exec command const execMatch = line.match(/exec (.+?) in/); if (execMatch) { const formattedCommand = formatBashCommand(execMatch[1]); - + // Look ahead to find the result status - let statusIcon = '❓'; // Unknown by default + let statusIcon = "❓"; // Unknown by default for (let j = i + 1; j < Math.min(i + 5, lines.length); j++) { const nextLine = lines[j]; - if (nextLine.includes('succeeded in')) { - statusIcon = '✅'; + if (nextLine.includes("succeeded in")) { + statusIcon = "✅"; break; - } else if (nextLine.includes('failed in') || nextLine.includes('error')) { - statusIcon = '❌'; + } else if ( + nextLine.includes("failed in") || + nextLine.includes("error") + ) { + statusIcon = "❌"; break; } } - + commandSummary.push(`* ${statusIcon} \`${formattedCommand}\``); } } } - + // Add command summary if (commandSummary.length > 0) { for (const cmd of commandSummary) { markdown += `${cmd}\n`; } } else { - markdown += 'No commands or tools used.\n'; + markdown += "No commands or tools used.\n"; } - + // Add Information section - markdown += '\n## 📊 Information\n\n'; - + markdown += "\n## 📊 Information\n\n"; + // Extract metadata from Codex logs let totalTokens = 0; const tokenMatches = logContent.match(/tokens used: (\d+)/g); @@ -113,70 +122,81 @@ function parseCodexLog(logContent) { totalTokens += tokens; } } - + if (totalTokens > 0) { markdown += `**Total Tokens Used:** ${totalTokens.toLocaleString()}\n\n`; } - + // Count tool calls and exec commands const toolCalls = (logContent.match(/\] tool /g) || []).length; const execCommands = (logContent.match(/\] exec /g) || []).length; - + if (toolCalls > 0) { markdown += `**Tool Calls:** ${toolCalls}\n\n`; } - + if (execCommands > 0) { markdown += `**Commands Executed:** ${execCommands}\n\n`; } - - markdown += '\n## 🤖 Reasoning\n\n'; - + + markdown += "\n## 🤖 Reasoning\n\n"; + // Second pass: process full conversation flow with interleaved reasoning, tools, and commands let inThinkingSection = false; - + for (let i = 0; i < lines.length; i++) { const line = lines[i]; - + // Skip metadata lines - if (line.includes('OpenAI Codex') || line.startsWith('--------') || - line.includes('workdir:') || line.includes('model:') || - line.includes('provider:') || line.includes('approval:') || - line.includes('sandbox:') || line.includes('reasoning effort:') || - line.includes('reasoning summaries:') || line.includes('tokens used:')) { + if ( + line.includes("OpenAI Codex") || + line.startsWith("--------") || + line.includes("workdir:") || + line.includes("model:") || + line.includes("provider:") || + line.includes("approval:") || + line.includes("sandbox:") || + line.includes("reasoning effort:") || + line.includes("reasoning summaries:") || + line.includes("tokens used:") + ) { continue; } - + // Process thinking sections - if (line.includes('] thinking')) { + if (line.includes("] thinking")) { inThinkingSection = true; continue; } - + // Process tool calls - if (line.includes('] tool ') && line.includes('(')) { + if (line.includes("] tool ") && line.includes("(")) { inThinkingSection = false; const toolMatch = line.match(/\] tool ([^(]+)\(/); if (toolMatch) { const toolName = toolMatch[1]; - + // Look ahead to find the result status - let statusIcon = '❓'; // Unknown by default + let statusIcon = "❓"; // Unknown by default for (let j = i + 1; j < Math.min(i + 5, lines.length); j++) { const nextLine = lines[j]; - if (nextLine.includes('success in')) { - statusIcon = '✅'; + if (nextLine.includes("success in")) { + statusIcon = "✅"; break; - } else if (nextLine.includes('failure in') || nextLine.includes('error in') || nextLine.includes('failed in')) { - statusIcon = '❌'; + } else if ( + nextLine.includes("failure in") || + nextLine.includes("error in") || + nextLine.includes("failed in") + ) { + statusIcon = "❌"; break; } } - - if (toolName.includes('.')) { - const parts = toolName.split('.'); + + if (toolName.includes(".")) { + const parts = toolName.split("."); const provider = parts[0]; - const method = parts.slice(1).join('_'); + const method = parts.slice(1).join("_"); markdown += `${statusIcon} ${provider}::${method}(...)\n\n`; } else { markdown += `${statusIcon} ${toolName}(...)\n\n`; @@ -184,79 +204,86 @@ function parseCodexLog(logContent) { } continue; } - + // Process exec commands - if (line.includes('] exec ')) { + if (line.includes("] exec ")) { inThinkingSection = false; const execMatch = line.match(/exec (.+?) in/); if (execMatch) { const formattedCommand = formatBashCommand(execMatch[1]); - + // Look ahead to find the result status - let statusIcon = '❓'; // Unknown by default + let statusIcon = "❓"; // Unknown by default for (let j = i + 1; j < Math.min(i + 5, lines.length); j++) { const nextLine = lines[j]; - if (nextLine.includes('succeeded in')) { - statusIcon = '✅'; + if (nextLine.includes("succeeded in")) { + statusIcon = "✅"; break; - } else if (nextLine.includes('failed in') || nextLine.includes('error')) { - statusIcon = '❌'; + } else if ( + nextLine.includes("failed in") || + nextLine.includes("error") + ) { + statusIcon = "❌"; break; } } - + markdown += `${statusIcon} \`${formattedCommand}\`\n\n`; } continue; } - + // Process thinking content - if (inThinkingSection && line.trim().length > 20 && !line.startsWith('[2025-')) { + if ( + inThinkingSection && + line.trim().length > 20 && + !line.startsWith("[2025-") + ) { const trimmed = line.trim(); // Add thinking content directly markdown += `${trimmed}\n\n`; } } - + return markdown; } catch (error) { - console.error('Error parsing Codex log:', error); - return '## 🤖 Commands and Tools\n\nError parsing log content.\n\n## 🤖 Reasoning\n\nUnable to parse reasoning from log.\n\n'; + console.error("Error parsing Codex log:", error); + return "## 🤖 Commands and Tools\n\nError parsing log content.\n\n## 🤖 Reasoning\n\nUnable to parse reasoning from log.\n\n"; } } function formatBashCommand(command) { - if (!command) return ''; - + if (!command) return ""; + // Convert multi-line commands to single line by replacing newlines with spaces // and collapsing multiple spaces let formatted = command - .replace(/\n/g, ' ') // Replace newlines with spaces - .replace(/\r/g, ' ') // Replace carriage returns with spaces - .replace(/\t/g, ' ') // Replace tabs with spaces - .replace(/\s+/g, ' ') // Collapse multiple spaces into one - .trim(); // Remove leading/trailing whitespace - + .replace(/\n/g, " ") // Replace newlines with spaces + .replace(/\r/g, " ") // Replace carriage returns with spaces + .replace(/\t/g, " ") // Replace tabs with spaces + .replace(/\s+/g, " ") // Collapse multiple spaces into one + .trim(); // Remove leading/trailing whitespace + // Escape backticks to prevent markdown issues - formatted = formatted.replace(/`/g, '\\`'); - + formatted = formatted.replace(/`/g, "\\`"); + // Truncate if too long (keep reasonable length for summary) const maxLength = 80; if (formatted.length > maxLength) { - formatted = formatted.substring(0, maxLength) + '...'; + formatted = formatted.substring(0, maxLength) + "..."; } - + return formatted; } function truncateString(str, maxLength) { - if (!str) return ''; + if (!str) return ""; if (str.length <= maxLength) return str; - return str.substring(0, maxLength) + '...'; + return str.substring(0, maxLength) + "..."; } // Export for testing -if (typeof module !== 'undefined' && module.exports) { +if (typeof module !== "undefined" && module.exports) { module.exports = { parseCodexLog, formatBashCommand, truncateString }; } diff --git a/pkg/workflow/js/push_to_branch.cjs b/pkg/workflow/js/push_to_branch.cjs index 0e60618d87..8c79fbd192 100644 --- a/pkg/workflow/js/push_to_branch.cjs +++ b/pkg/workflow/js/push_to_branch.cjs @@ -6,138 +6,163 @@ async function main() { // Environment validation - fail early if required variables are missing const branchName = process.env.GITHUB_AW_PUSH_BRANCH; if (!branchName) { - core.setFailed('GITHUB_AW_PUSH_BRANCH environment variable is required'); + core.setFailed("GITHUB_AW_PUSH_BRANCH environment variable is required"); return; } const outputContent = process.env.GITHUB_AW_AGENT_OUTPUT || ""; - if (outputContent.trim() === '') { - console.log('Agent output content is empty'); + if (outputContent.trim() === "") { + console.log("Agent output content is empty"); return; } const target = process.env.GITHUB_AW_PUSH_TARGET || "triggering"; // Check if patch file exists and has valid content - if (!fs.existsSync('/tmp/aw.patch')) { - core.setFailed('No patch file found - cannot push without changes'); + if (!fs.existsSync("/tmp/aw.patch")) { + core.setFailed("No patch file found - cannot push without changes"); return; } - const patchContent = fs.readFileSync('/tmp/aw.patch', 'utf8'); - if (!patchContent || !patchContent.trim() || patchContent.includes('Failed to generate patch')) { - core.setFailed('Patch file is empty or contains error message - cannot push without changes'); + const patchContent = fs.readFileSync("/tmp/aw.patch", "utf8"); + if ( + !patchContent || + !patchContent.trim() || + patchContent.includes("Failed to generate patch") + ) { + core.setFailed( + "Patch file is empty or contains error message - cannot push without changes" + ); return; } - console.log('Agent output content length:', outputContent.length); - console.log('Patch content validation passed'); - console.log('Target branch:', branchName); - console.log('Target configuration:', target); + console.log("Agent output content length:", outputContent.length); + console.log("Patch content validation passed"); + console.log("Target branch:", branchName); + console.log("Target configuration:", target); // Parse the validated output JSON let validatedOutput; try { validatedOutput = JSON.parse(outputContent); } catch (error) { - console.log('Error parsing agent output JSON:', error instanceof Error ? error.message : String(error)); + console.log( + "Error parsing agent output JSON:", + error instanceof Error ? error.message : String(error) + ); return; } if (!validatedOutput.items || !Array.isArray(validatedOutput.items)) { - console.log('No valid items found in agent output'); + console.log("No valid items found in agent output"); return; } // Find the push-to-branch item - const pushItem = validatedOutput.items.find(/** @param {any} item */ item => item.type === 'push-to-branch'); + const pushItem = validatedOutput.items.find( + /** @param {any} item */ item => item.type === "push-to-branch" + ); if (!pushItem) { - console.log('No push-to-branch item found in agent output'); + console.log("No push-to-branch item found in agent output"); return; } - console.log('Found push-to-branch item'); + console.log("Found push-to-branch item"); // Validate target configuration for pull request context if (target !== "*" && target !== "triggering") { // If target is a specific number, validate it's a valid pull request number const targetNumber = parseInt(target, 10); if (isNaN(targetNumber)) { - core.setFailed('Invalid target configuration: must be "triggering", "*", or a valid pull request number'); + core.setFailed( + 'Invalid target configuration: must be "triggering", "*", or a valid pull request number' + ); return; } } // Check if we're in a pull request context when required if (target === "triggering" && !context.payload.pull_request) { - core.setFailed('push-to-branch with target "triggering" requires pull request context'); + core.setFailed( + 'push-to-branch with target "triggering" requires pull request context' + ); return; } // Configure git (required for commits) - execSync('git config --global user.email "action@github.com"', { stdio: 'inherit' }); - execSync('git config --global user.name "GitHub Action"', { stdio: 'inherit' }); + execSync('git config --global user.email "action@github.com"', { + stdio: "inherit", + }); + execSync('git config --global user.name "GitHub Action"', { + stdio: "inherit", + }); // Switch to or create the target branch - console.log('Switching to branch:', branchName); + console.log("Switching to branch:", branchName); try { // Try to checkout existing branch first - execSync('git fetch origin', { stdio: 'inherit' }); - execSync(`git checkout ${branchName}`, { stdio: 'inherit' }); - console.log('Checked out existing branch:', branchName); + execSync("git fetch origin", { stdio: "inherit" }); + execSync(`git checkout ${branchName}`, { stdio: "inherit" }); + console.log("Checked out existing branch:", branchName); } catch (error) { // Branch doesn't exist, create it - console.log('Branch does not exist, creating new branch:', branchName); - execSync(`git checkout -b ${branchName}`, { stdio: 'inherit' }); + console.log("Branch does not exist, creating new branch:", branchName); + execSync(`git checkout -b ${branchName}`, { stdio: "inherit" }); } // Apply the patch using git CLI - console.log('Applying patch...'); + console.log("Applying patch..."); try { - execSync('git apply /tmp/aw.patch', { stdio: 'inherit' }); - console.log('Patch applied successfully'); + execSync("git apply /tmp/aw.patch", { stdio: "inherit" }); + console.log("Patch applied successfully"); } catch (error) { - console.error('Failed to apply patch:', error instanceof Error ? error.message : String(error)); - core.setFailed('Failed to apply patch'); + console.error( + "Failed to apply patch:", + error instanceof Error ? error.message : String(error) + ); + core.setFailed("Failed to apply patch"); return; } // Commit and push the changes - execSync('git add .', { stdio: 'inherit' }); - + execSync("git add .", { stdio: "inherit" }); + // Check if there are changes to commit try { - execSync('git diff --cached --exit-code', { stdio: 'ignore' }); - console.log('No changes to commit'); + execSync("git diff --cached --exit-code", { stdio: "ignore" }); + console.log("No changes to commit"); return; } catch (error) { // Exit code != 0 means there are changes to commit, which is what we want } - const commitMessage = pushItem.message || 'Apply agent changes'; - execSync(`git commit -m "${commitMessage}"`, { stdio: 'inherit' }); - execSync(`git push origin ${branchName}`, { stdio: 'inherit' }); - console.log('Changes committed and pushed to branch:', branchName); + const commitMessage = pushItem.message || "Apply agent changes"; + execSync(`git commit -m "${commitMessage}"`, { stdio: "inherit" }); + execSync(`git push origin ${branchName}`, { stdio: "inherit" }); + console.log("Changes committed and pushed to branch:", branchName); // Get commit SHA - const commitSha = execSync('git rev-parse HEAD', { encoding: 'utf8' }).trim(); - const pushUrl = context.payload.repository + const commitSha = execSync("git rev-parse HEAD", { encoding: "utf8" }).trim(); + const pushUrl = context.payload.repository ? `${context.payload.repository.html_url}/tree/${branchName}` : `https://github.com/${context.repo.owner}/${context.repo.repo}/tree/${branchName}`; // Set outputs - core.setOutput('branch_name', branchName); - core.setOutput('commit_sha', commitSha); - core.setOutput('push_url', pushUrl); + core.setOutput("branch_name", branchName); + core.setOutput("commit_sha", commitSha); + core.setOutput("push_url", pushUrl); // Write summary to GitHub Actions summary await core.summary - .addRaw(` + .addRaw( + ` ## Push to Branch - **Branch**: \`${branchName}\` - **Commit**: [${commitSha.substring(0, 7)}](${pushUrl}) - **URL**: [${pushUrl}](${pushUrl}) -`).write(); +` + ) + .write(); } await main(); diff --git a/pkg/workflow/js/push_to_branch.test.cjs b/pkg/workflow/js/push_to_branch.test.cjs index d960023c1b..83b851a2b6 100644 --- a/pkg/workflow/js/push_to_branch.test.cjs +++ b/pkg/workflow/js/push_to_branch.test.cjs @@ -1,8 +1,8 @@ -import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest'; -import fs from 'fs'; -import path from 'path'; +import { describe, it, expect, beforeEach, afterEach, vi } from "vitest"; +import fs from "fs"; +import path from "path"; -describe('push_to_branch.cjs', () => { +describe("push_to_branch.cjs", () => { let mockCore; beforeEach(() => { @@ -12,19 +12,19 @@ describe('push_to_branch.cjs', () => { setOutput: vi.fn(), summary: { addRaw: vi.fn().mockReturnThis(), - write: vi.fn() - } + write: vi.fn(), + }, }; global.core = mockCore; // Mock context object global.context = { - eventName: 'pull_request', + eventName: "pull_request", payload: { pull_request: { number: 123 }, - repository: { html_url: 'https://github.com/testowner/testrepo' } + repository: { html_url: "https://github.com/testowner/testrepo" }, }, - repo: { owner: 'testowner', repo: 'testrepo' } + repo: { owner: "testowner", repo: "testrepo" }, }; // Clear environment variables @@ -35,63 +35,63 @@ describe('push_to_branch.cjs', () => { afterEach(() => { // Clean up globals safely - if (typeof global !== 'undefined') { + if (typeof global !== "undefined") { delete global.core; delete global.context; } }); - describe('Script validation', () => { - it('should have valid JavaScript syntax', () => { - const scriptPath = path.join(__dirname, 'push_to_branch.cjs'); - const scriptContent = fs.readFileSync(scriptPath, 'utf8'); - + describe("Script validation", () => { + it("should have valid JavaScript syntax", () => { + const scriptPath = path.join(__dirname, "push_to_branch.cjs"); + const scriptContent = fs.readFileSync(scriptPath, "utf8"); + // Basic syntax validation - should not contain obvious errors - expect(scriptContent).toContain('async function main()'); - expect(scriptContent).toContain('GITHUB_AW_PUSH_BRANCH'); - expect(scriptContent).toContain('core.setFailed'); - expect(scriptContent).toContain('/tmp/aw.patch'); - expect(scriptContent).toContain('await main()'); + expect(scriptContent).toContain("async function main()"); + expect(scriptContent).toContain("GITHUB_AW_PUSH_BRANCH"); + expect(scriptContent).toContain("core.setFailed"); + expect(scriptContent).toContain("/tmp/aw.patch"); + expect(scriptContent).toContain("await main()"); }); - it('should export a main function', () => { - const scriptPath = path.join(__dirname, 'push_to_branch.cjs'); - const scriptContent = fs.readFileSync(scriptPath, 'utf8'); - + it("should export a main function", () => { + const scriptPath = path.join(__dirname, "push_to_branch.cjs"); + const scriptContent = fs.readFileSync(scriptPath, "utf8"); + // Check that the script has the expected structure expect(scriptContent).toMatch(/async function main\(\) \{[\s\S]*\}/); }); - it('should handle required environment variables', () => { - const scriptPath = path.join(__dirname, 'push_to_branch.cjs'); - const scriptContent = fs.readFileSync(scriptPath, 'utf8'); - + it("should handle required environment variables", () => { + const scriptPath = path.join(__dirname, "push_to_branch.cjs"); + const scriptContent = fs.readFileSync(scriptPath, "utf8"); + // Check that environment variables are handled - expect(scriptContent).toContain('process.env.GITHUB_AW_PUSH_BRANCH'); - expect(scriptContent).toContain('process.env.GITHUB_AW_AGENT_OUTPUT'); - expect(scriptContent).toContain('process.env.GITHUB_AW_PUSH_TARGET'); + expect(scriptContent).toContain("process.env.GITHUB_AW_PUSH_BRANCH"); + expect(scriptContent).toContain("process.env.GITHUB_AW_AGENT_OUTPUT"); + expect(scriptContent).toContain("process.env.GITHUB_AW_PUSH_TARGET"); }); - it('should handle patch file operations', () => { - const scriptPath = path.join(__dirname, 'push_to_branch.cjs'); - const scriptContent = fs.readFileSync(scriptPath, 'utf8'); - + it("should handle patch file operations", () => { + const scriptPath = path.join(__dirname, "push_to_branch.cjs"); + const scriptContent = fs.readFileSync(scriptPath, "utf8"); + // Check that patch operations are included - expect(scriptContent).toContain('fs.existsSync'); - expect(scriptContent).toContain('fs.readFileSync'); - expect(scriptContent).toContain('git apply'); - expect(scriptContent).toContain('git commit'); - expect(scriptContent).toContain('git push'); + expect(scriptContent).toContain("fs.existsSync"); + expect(scriptContent).toContain("fs.readFileSync"); + expect(scriptContent).toContain("git apply"); + expect(scriptContent).toContain("git commit"); + expect(scriptContent).toContain("git push"); }); - it('should validate branch operations', () => { - const scriptPath = path.join(__dirname, 'push_to_branch.cjs'); - const scriptContent = fs.readFileSync(scriptPath, 'utf8'); - + it("should validate branch operations", () => { + const scriptPath = path.join(__dirname, "push_to_branch.cjs"); + const scriptContent = fs.readFileSync(scriptPath, "utf8"); + // Check that git branch operations are handled - expect(scriptContent).toContain('git checkout'); - expect(scriptContent).toContain('git fetch'); - expect(scriptContent).toContain('git config'); + expect(scriptContent).toContain("git checkout"); + expect(scriptContent).toContain("git fetch"); + expect(scriptContent).toContain("git config"); }); }); }); diff --git a/pkg/workflow/js/sanitize_output.cjs b/pkg/workflow/js/sanitize_output.cjs index 8386cfbee9..a9f0e78c4b 100644 --- a/pkg/workflow/js/sanitize_output.cjs +++ b/pkg/workflow/js/sanitize_output.cjs @@ -4,23 +4,26 @@ * @returns {string} The sanitized content */ function sanitizeContent(content) { - if (!content || typeof content !== 'string') { - return ''; + if (!content || typeof content !== "string") { + return ""; } // Read allowed domains from environment variable const allowedDomainsEnv = process.env.GITHUB_AW_ALLOWED_DOMAINS; const defaultAllowedDomains = [ - 'github.com', - 'github.io', - 'githubusercontent.com', - 'githubassets.com', - 'github.dev', - 'codespaces.new' + "github.com", + "github.io", + "githubusercontent.com", + "githubassets.com", + "github.dev", + "codespaces.new", ]; const allowedDomains = allowedDomainsEnv - ? allowedDomainsEnv.split(',').map(d => d.trim()).filter(d => d) + ? allowedDomainsEnv + .split(",") + .map(d => d.trim()) + .filter(d => d) : defaultAllowedDomains; let sanitized = content; @@ -29,15 +32,15 @@ function sanitizeContent(content) { sanitized = neutralizeMentions(sanitized); // Remove control characters (except newlines and tabs) - sanitized = sanitized.replace(/[\x00-\x08\x0B\x0C\x0E-\x1F\x7F]/g, ''); + sanitized = sanitized.replace(/[\x00-\x08\x0B\x0C\x0E-\x1F\x7F]/g, ""); // XML character escaping sanitized = sanitized - .replace(/&/g, '&') // Must be first to avoid double-escaping - .replace(//g, '>') - .replace(/"/g, '"') - .replace(/'/g, '''); + .replace(/&/g, "&") // Must be first to avoid double-escaping + .replace(//g, ">") + .replace(/"/g, """) + .replace(/'/g, "'"); // URI filtering - replace non-https protocols with "(redacted)" // Step 1: Temporarily mark HTTPS URLs to protect them @@ -50,18 +53,21 @@ function sanitizeContent(content) { // Limit total length to prevent DoS (0.5MB max) const maxLength = 524288; if (sanitized.length > maxLength) { - sanitized = sanitized.substring(0, maxLength) + '\n[Content truncated due to length]'; + sanitized = + sanitized.substring(0, maxLength) + "\n[Content truncated due to length]"; } // Limit number of lines to prevent log flooding (65k max) - const lines = sanitized.split('\n'); + const lines = sanitized.split("\n"); const maxLines = 65000; if (lines.length > maxLines) { - sanitized = lines.slice(0, maxLines).join('\n') + '\n[Content truncated due to line count]'; + sanitized = + lines.slice(0, maxLines).join("\n") + + "\n[Content truncated due to line count]"; } // Remove ANSI escape sequences - sanitized = sanitized.replace(/\x1b\[[0-9;]*[mGKH]/g, ''); + sanitized = sanitized.replace(/\x1b\[[0-9;]*[mGKH]/g, ""); // Neutralize common bot trigger phrases sanitized = neutralizeBotTriggers(sanitized); @@ -75,19 +81,25 @@ function sanitizeContent(content) { * @returns {string} The string with unknown domains redacted */ function sanitizeUrlDomains(s) { - s = s.replace(/\bhttps:\/\/([^\/\s\])}'"<>&\x00-\x1f]+)/gi, (match, domain) => { - // Extract the hostname part (before first slash, colon, or other delimiter) - const hostname = domain.split(/[\/:\?#]/)[0].toLowerCase(); - - // Check if this domain or any parent domain is in the allowlist - const isAllowed = allowedDomains.some(allowedDomain => { - const normalizedAllowed = allowedDomain.toLowerCase(); - return hostname === normalizedAllowed || hostname.endsWith('.' + normalizedAllowed); - }); - - return isAllowed ? match : '(redacted)'; - }); - + s = s.replace( + /\bhttps:\/\/([^\/\s\])}'"<>&\x00-\x1f]+)/gi, + (match, domain) => { + // Extract the hostname part (before first slash, colon, or other delimiter) + const hostname = domain.split(/[\/:\?#]/)[0].toLowerCase(); + + // Check if this domain or any parent domain is in the allowlist + const isAllowed = allowedDomains.some(allowedDomain => { + const normalizedAllowed = allowedDomain.toLowerCase(); + return ( + hostname === normalizedAllowed || + hostname.endsWith("." + normalizedAllowed) + ); + }); + + return isAllowed ? match : "(redacted)"; + } + ); + return s; } @@ -99,10 +111,13 @@ function sanitizeContent(content) { function sanitizeUrlProtocols(s) { // Match both protocol:// and protocol: patterns // This covers URLs like https://example.com, javascript:alert(), mailto:user@domain.com, etc. - return s.replace(/\b(\w+):(?:\/\/)?[^\s\])}'"<>&\x00-\x1f]+/gi, (match, protocol) => { - // Allow https (case insensitive), redact everything else - return protocol.toLowerCase() === 'https' ? match : '(redacted)'; - }); + return s.replace( + /\b(\w+):(?:\/\/)?[^\s\])}'"<>&\x00-\x1f]+/gi, + (match, protocol) => { + // Allow https (case insensitive), redact everything else + return protocol.toLowerCase() === "https" ? match : "(redacted)"; + } + ); } /** @@ -112,8 +127,10 @@ function sanitizeContent(content) { */ function neutralizeMentions(s) { // Replace @name or @org/team outside code with `@name` - return s.replace(/(^|[^\w`])@([A-Za-z0-9](?:[A-Za-z0-9-]{0,37}[A-Za-z0-9])?(?:\/[A-Za-z0-9._-]+)?)/g, - (_m, p1, p2) => `${p1}\`@${p2}\``); + return s.replace( + /(^|[^\w`])@([A-Za-z0-9](?:[A-Za-z0-9-]{0,37}[A-Za-z0-9])?(?:\/[A-Za-z0-9._-]+)?)/g, + (_m, p1, p2) => `${p1}\`@${p2}\`` + ); } /** @@ -123,8 +140,10 @@ function sanitizeContent(content) { */ function neutralizeBotTriggers(s) { // Neutralize common bot trigger phrases like "fixes #123", "closes #asdfs", etc. - return s.replace(/\b(fixes?|closes?|resolves?|fix|close|resolve)\s+#(\w+)/gi, - (match, action, ref) => `\`${action} #${ref}\``); + return s.replace( + /\b(fixes?|closes?|resolves?|fix|close|resolve)\s+#(\w+)/gi, + (match, action, ref) => `\`${action} #${ref}\`` + ); } } @@ -132,26 +151,30 @@ async function main() { const fs = require("fs"); const outputFile = process.env.GITHUB_AW_SAFE_OUTPUTS; if (!outputFile) { - console.log('GITHUB_AW_SAFE_OUTPUTS not set, no output to collect'); - core.setOutput('output', ''); + console.log("GITHUB_AW_SAFE_OUTPUTS not set, no output to collect"); + core.setOutput("output", ""); return; } if (!fs.existsSync(outputFile)) { - console.log('Output file does not exist:', outputFile); - core.setOutput('output', ''); + console.log("Output file does not exist:", outputFile); + core.setOutput("output", ""); return; } - const outputContent = fs.readFileSync(outputFile, 'utf8'); - if (outputContent.trim() === '') { - console.log('Output file is empty'); - core.setOutput('output', ''); + const outputContent = fs.readFileSync(outputFile, "utf8"); + if (outputContent.trim() === "") { + console.log("Output file is empty"); + core.setOutput("output", ""); } else { const sanitizedContent = sanitizeContent(outputContent); - console.log('Collected agentic output (sanitized):', sanitizedContent.substring(0, 200) + (sanitizedContent.length > 200 ? '...' : '')); - core.setOutput('output', sanitizedContent); + console.log( + "Collected agentic output (sanitized):", + sanitizedContent.substring(0, 200) + + (sanitizedContent.length > 200 ? "..." : "") + ); + core.setOutput("output", sanitizedContent); } } -await main(); \ No newline at end of file +await main(); diff --git a/pkg/workflow/js/sanitize_output.test.cjs b/pkg/workflow/js/sanitize_output.test.cjs index 54162e52ed..af63b8d5aa 100644 --- a/pkg/workflow/js/sanitize_output.test.cjs +++ b/pkg/workflow/js/sanitize_output.test.cjs @@ -1,172 +1,180 @@ -import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest'; -import fs from 'fs'; -import path from 'path'; +import { describe, it, expect, beforeEach, afterEach, vi } from "vitest"; +import fs from "fs"; +import path from "path"; // Mock the global objects that GitHub Actions provides const mockCore = { - setOutput: vi.fn() + setOutput: vi.fn(), }; // Set up global variables global.core = mockCore; -describe('sanitize_output.cjs', () => { +describe("sanitize_output.cjs", () => { let sanitizeScript; let sanitizeContentFunction; beforeEach(() => { // Reset all mocks vi.clearAllMocks(); - + // Reset environment variables delete process.env.GITHUB_AW_SAFE_OUTPUTS; delete process.env.GITHUB_AW_ALLOWED_DOMAINS; - + // Read the script content - const scriptPath = path.join(process.cwd(), 'pkg/workflow/js/sanitize_output.cjs'); - sanitizeScript = fs.readFileSync(scriptPath, 'utf8'); - + const scriptPath = path.join( + process.cwd(), + "pkg/workflow/js/sanitize_output.cjs" + ); + sanitizeScript = fs.readFileSync(scriptPath, "utf8"); + // Extract sanitizeContent function for unit testing // We need to eval the script to get access to the function const scriptWithExport = sanitizeScript.replace( - 'await main();', - 'global.testSanitizeContent = sanitizeContent;' + "await main();", + "global.testSanitizeContent = sanitizeContent;" ); eval(scriptWithExport); sanitizeContentFunction = global.testSanitizeContent; }); - describe('sanitizeContent function', () => { - it('should handle null and undefined inputs', () => { - expect(sanitizeContentFunction(null)).toBe(''); - expect(sanitizeContentFunction(undefined)).toBe(''); - expect(sanitizeContentFunction('')).toBe(''); + describe("sanitizeContent function", () => { + it("should handle null and undefined inputs", () => { + expect(sanitizeContentFunction(null)).toBe(""); + expect(sanitizeContentFunction(undefined)).toBe(""); + expect(sanitizeContentFunction("")).toBe(""); }); - it('should neutralize @mentions by wrapping in backticks', () => { - const input = 'Hello @user and @org/team'; + it("should neutralize @mentions by wrapping in backticks", () => { + const input = "Hello @user and @org/team"; const result = sanitizeContentFunction(input); - expect(result).toContain('`@user`'); - expect(result).toContain('`@org/team`'); + expect(result).toContain("`@user`"); + expect(result).toContain("`@org/team`"); }); - it('should not neutralize @mentions inside code blocks', () => { - const input = 'Check `@user` in code and @realuser outside'; + it("should not neutralize @mentions inside code blocks", () => { + const input = "Check `@user` in code and @realuser outside"; const result = sanitizeContentFunction(input); - expect(result).toContain('`@user`'); // Already in backticks, stays as is - expect(result).toContain('`@realuser`'); // Gets wrapped + expect(result).toContain("`@user`"); // Already in backticks, stays as is + expect(result).toContain("`@realuser`"); // Gets wrapped }); - it('should neutralize bot trigger phrases', () => { - const input = 'This fixes #123 and closes #456. Also resolves #789'; + it("should neutralize bot trigger phrases", () => { + const input = "This fixes #123 and closes #456. Also resolves #789"; const result = sanitizeContentFunction(input); - expect(result).toContain('`fixes #123`'); - expect(result).toContain('`closes #456`'); - expect(result).toContain('`resolves #789`'); + expect(result).toContain("`fixes #123`"); + expect(result).toContain("`closes #456`"); + expect(result).toContain("`resolves #789`"); }); - it('should remove control characters except newlines and tabs', () => { - const input = 'Hello\x00world\x0C\nNext line\t\x1Fbad'; + it("should remove control characters except newlines and tabs", () => { + const input = "Hello\x00world\x0C\nNext line\t\x1Fbad"; const result = sanitizeContentFunction(input); - expect(result).not.toContain('\x00'); - expect(result).not.toContain('\x0C'); - expect(result).not.toContain('\x1F'); - expect(result).toContain('\n'); - expect(result).toContain('\t'); + expect(result).not.toContain("\x00"); + expect(result).not.toContain("\x0C"); + expect(result).not.toContain("\x1F"); + expect(result).toContain("\n"); + expect(result).toContain("\t"); }); - it('should escape XML characters', () => { + it("should escape XML characters", () => { const input = ' & more'; const result = sanitizeContentFunction(input); - expect(result).toContain('<script>'); - expect(result).toContain('"test"'); - expect(result).toContain('& more'); + expect(result).toContain("<script>"); + expect(result).toContain(""test""); + expect(result).toContain("& more"); }); - it('should block HTTP URLs while preserving HTTPS URLs', () => { - const input = 'HTTP: http://bad.com and HTTPS: https://github.com'; + it("should block HTTP URLs while preserving HTTPS URLs", () => { + const input = "HTTP: http://bad.com and HTTPS: https://github.com"; const result = sanitizeContentFunction(input); - expect(result).toContain('(redacted)'); // HTTP URL blocked - expect(result).toContain('https://github.com'); // HTTPS URL preserved - expect(result).not.toContain('http://bad.com'); + expect(result).toContain("(redacted)"); // HTTP URL blocked + expect(result).toContain("https://github.com"); // HTTPS URL preserved + expect(result).not.toContain("http://bad.com"); }); - it('should block various unsafe protocols', () => { - const input = 'Bad: ftp://file.com javascript:alert(1) file://local data:text/html,]]> @@ -385,38 +399,44 @@ Special chars: \x00\x1F & "quotes" 'apostrophes' `; const result = sanitizeContentFunction(input); - - expect(result).toContain('<xml attr="value & 'quotes'">'); - expect(result).toContain('<![CDATA[<script>alert("xss")</script>]]>'); - expect(result).toContain('<!-- comment with "quotes" & 'apostrophes' -->'); - expect(result).toContain('</xml>'); + + expect(result).toContain( + "<xml attr="value & 'quotes'">" + ); + expect(result).toContain( + "<![CDATA[<script>alert("xss")</script>]]>" + ); + expect(result).toContain( + "<!-- comment with "quotes" & 'apostrophes' -->" + ); + expect(result).toContain("</xml>"); }); - it('should handle non-string inputs robustly', () => { - expect(sanitizeContentFunction(123)).toBe(''); - expect(sanitizeContentFunction({})).toBe(''); - expect(sanitizeContentFunction([])).toBe(''); - expect(sanitizeContentFunction(true)).toBe(''); - expect(sanitizeContentFunction(false)).toBe(''); + it("should handle non-string inputs robustly", () => { + expect(sanitizeContentFunction(123)).toBe(""); + expect(sanitizeContentFunction({})).toBe(""); + expect(sanitizeContentFunction([])).toBe(""); + expect(sanitizeContentFunction(true)).toBe(""); + expect(sanitizeContentFunction(false)).toBe(""); }); - it('should preserve line breaks and tabs in content structure', () => { + it("should preserve line breaks and tabs in content structure", () => { const input = `Line 1 \t\tIndented line \n\nDouble newline \tTab at start`; const result = sanitizeContentFunction(input); - - expect(result).toContain('\n'); - expect(result).toContain('\t'); - expect(result.split('\n').length).toBeGreaterThan(1); - expect(result).toContain('Line 1'); - expect(result).toContain('Indented line'); - expect(result).toContain('Tab at start'); + + expect(result).toContain("\n"); + expect(result).toContain("\t"); + expect(result.split("\n").length).toBeGreaterThan(1); + expect(result).toContain("Line 1"); + expect(result).toContain("Indented line"); + expect(result).toContain("Tab at start"); }); - it('should handle simultaneous protocol and domain filtering', () => { + it("should handle simultaneous protocol and domain filtering", () => { const input = ` Good HTTPS: https://github.com/repo Bad HTTPS: https://evil.com/malware @@ -424,25 +444,25 @@ Special chars: \x00\x1F & "quotes" 'apostrophes' Mixed: https://evil.com/path?goto=https://github.com/safe `; const result = sanitizeContentFunction(input); - - expect(result).toContain('https://github.com/repo'); - expect(result).toContain('(redacted)'); // For evil.com and http://github.com - expect(result).not.toContain('https://evil.com'); - expect(result).not.toContain('http://github.com'); - + + expect(result).toContain("https://github.com/repo"); + expect(result).toContain("(redacted)"); // For evil.com and http://github.com + expect(result).not.toContain("https://evil.com"); + expect(result).not.toContain("http://github.com"); + // The safe URL in query param should still be preserved - expect(result).toContain('https://github.com/safe'); + expect(result).toContain("https://github.com/safe"); }); }); - describe('main function', () => { + describe("main function", () => { beforeEach(() => { // Clean up any test files - const testFile = '/tmp/test-output.txt'; + const testFile = "/tmp/test-output.txt"; if (fs.existsSync(testFile)) { fs.unlinkSync(testFile); } - + // Make fs available globally for the evaluated script global.fs = fs; }); @@ -452,120 +472,131 @@ Special chars: \x00\x1F & "quotes" 'apostrophes' delete global.fs; }); - it('should handle missing GITHUB_AW_SAFE_OUTPUTS environment variable', async () => { + it("should handle missing GITHUB_AW_SAFE_OUTPUTS environment variable", async () => { delete process.env.GITHUB_AW_SAFE_OUTPUTS; - - const consoleSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); - + + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); + // Execute the script await eval(`(async () => { ${sanitizeScript} })()`); - - expect(consoleSpy).toHaveBeenCalledWith('GITHUB_AW_SAFE_OUTPUTS not set, no output to collect'); - expect(mockCore.setOutput).toHaveBeenCalledWith('output', ''); - + + expect(consoleSpy).toHaveBeenCalledWith( + "GITHUB_AW_SAFE_OUTPUTS not set, no output to collect" + ); + expect(mockCore.setOutput).toHaveBeenCalledWith("output", ""); + consoleSpy.mockRestore(); }); - it('should handle non-existent output file', async () => { - process.env.GITHUB_AW_SAFE_OUTPUTS = '/tmp/non-existent-file.txt'; - - const consoleSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); - + it("should handle non-existent output file", async () => { + process.env.GITHUB_AW_SAFE_OUTPUTS = "/tmp/non-existent-file.txt"; + + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); + // Execute the script await eval(`(async () => { ${sanitizeScript} })()`); - - expect(consoleSpy).toHaveBeenCalledWith('Output file does not exist:', '/tmp/non-existent-file.txt'); - expect(mockCore.setOutput).toHaveBeenCalledWith('output', ''); - + + expect(consoleSpy).toHaveBeenCalledWith( + "Output file does not exist:", + "/tmp/non-existent-file.txt" + ); + expect(mockCore.setOutput).toHaveBeenCalledWith("output", ""); + consoleSpy.mockRestore(); }); - it('should handle empty output file', async () => { - const testFile = '/tmp/test-empty-output.txt'; - fs.writeFileSync(testFile, ' \n \t \n '); + it("should handle empty output file", async () => { + const testFile = "/tmp/test-empty-output.txt"; + fs.writeFileSync(testFile, " \n \t \n "); process.env.GITHUB_AW_SAFE_OUTPUTS = testFile; - - const consoleSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); - + + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); + // Execute the script await eval(`(async () => { ${sanitizeScript} })()`); - - expect(consoleSpy).toHaveBeenCalledWith('Output file is empty'); - expect(mockCore.setOutput).toHaveBeenCalledWith('output', ''); - + + expect(consoleSpy).toHaveBeenCalledWith("Output file is empty"); + expect(mockCore.setOutput).toHaveBeenCalledWith("output", ""); + consoleSpy.mockRestore(); fs.unlinkSync(testFile); }); - it('should process and sanitize output file content', async () => { - const testContent = 'Hello @user! This fixes #123. Link: http://bad.com and https://github.com/repo'; - const testFile = '/tmp/test-output.txt'; + it("should process and sanitize output file content", async () => { + const testContent = + "Hello @user! This fixes #123. Link: http://bad.com and https://github.com/repo"; + const testFile = "/tmp/test-output.txt"; fs.writeFileSync(testFile, testContent); process.env.GITHUB_AW_SAFE_OUTPUTS = testFile; - - const consoleSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); - + + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); + // Execute the script await eval(`(async () => { ${sanitizeScript} })()`); - + expect(consoleSpy).toHaveBeenCalledWith( - 'Collected agentic output (sanitized):', - expect.stringContaining('`@user`') + "Collected agentic output (sanitized):", + expect.stringContaining("`@user`") + ); + + const outputCall = mockCore.setOutput.mock.calls.find( + call => call[0] === "output" ); - - const outputCall = mockCore.setOutput.mock.calls.find(call => call[0] === 'output'); expect(outputCall).toBeDefined(); const sanitizedOutput = outputCall[1]; - + // Verify sanitization occurred - expect(sanitizedOutput).toContain('`@user`'); - expect(sanitizedOutput).toContain('`fixes #123`'); - expect(sanitizedOutput).toContain('(redacted)'); // HTTP URL - expect(sanitizedOutput).toContain('https://github.com/repo'); // HTTPS URL preserved - + expect(sanitizedOutput).toContain("`@user`"); + expect(sanitizedOutput).toContain("`fixes #123`"); + expect(sanitizedOutput).toContain("(redacted)"); // HTTP URL + expect(sanitizedOutput).toContain("https://github.com/repo"); // HTTPS URL preserved + consoleSpy.mockRestore(); fs.unlinkSync(testFile); }); - it('should truncate log output for very long content', async () => { - const longContent = 'x'.repeat(250); // More than 200 chars to trigger truncation - const testFile = '/tmp/test-long-output.txt'; + it("should truncate log output for very long content", async () => { + const longContent = "x".repeat(250); // More than 200 chars to trigger truncation + const testFile = "/tmp/test-long-output.txt"; fs.writeFileSync(testFile, longContent); process.env.GITHUB_AW_SAFE_OUTPUTS = testFile; - - const consoleSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); - + + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); + // Execute the script await eval(`(async () => { ${sanitizeScript} })()`); - + const logCalls = consoleSpy.mock.calls; - const outputLogCall = logCalls.find(call => - call[0] && call[0].includes('Collected agentic output (sanitized):') + const outputLogCall = logCalls.find( + call => + call[0] && call[0].includes("Collected agentic output (sanitized):") ); - + expect(outputLogCall).toBeDefined(); - expect(outputLogCall[1]).toContain('...'); + expect(outputLogCall[1]).toContain("..."); expect(outputLogCall[1].length).toBeLessThan(longContent.length); - + consoleSpy.mockRestore(); fs.unlinkSync(testFile); }); - it('should handle file read errors gracefully', async () => { + it("should handle file read errors gracefully", async () => { // Create a file and then remove read permissions - const testFile = '/tmp/test-no-read.txt'; - fs.writeFileSync(testFile, 'test content'); - + const testFile = "/tmp/test-no-read.txt"; + fs.writeFileSync(testFile, "test content"); + // Mock readFileSync to throw an error const originalReadFileSync = fs.readFileSync; - const readFileSyncSpy = vi.spyOn(fs, 'readFileSync').mockImplementation(() => { - throw new Error('Permission denied'); - }); - + const readFileSyncSpy = vi + .spyOn(fs, "readFileSync") + .mockImplementation(() => { + throw new Error("Permission denied"); + }); + process.env.GITHUB_AW_SAFE_OUTPUTS = testFile; - - const consoleSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); - + + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); + let thrownError = null; try { // Execute the script - it should throw but we catch it @@ -573,110 +604,118 @@ Special chars: \x00\x1F & "quotes" 'apostrophes' } catch (error) { thrownError = error; } - + expect(thrownError).toBeTruthy(); - expect(thrownError.message).toContain('Permission denied'); - + expect(thrownError.message).toContain("Permission denied"); + // Restore spies readFileSyncSpy.mockRestore(); consoleSpy.mockRestore(); - + // Clean up if (fs.existsSync(testFile)) { fs.unlinkSync(testFile); } }); - it('should handle binary file content', async () => { - const binaryData = Buffer.from([0x00, 0x01, 0x02, 0xFF, 0xFE, 0xFD]); - const testFile = '/tmp/test-binary.txt'; + it("should handle binary file content", async () => { + const binaryData = Buffer.from([0x00, 0x01, 0x02, 0xff, 0xfe, 0xfd]); + const testFile = "/tmp/test-binary.txt"; fs.writeFileSync(testFile, binaryData); process.env.GITHUB_AW_SAFE_OUTPUTS = testFile; - - const consoleSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); - + + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); + // Execute the script await eval(`(async () => { ${sanitizeScript} })()`); - + // Should handle binary data gracefully - const outputCall = mockCore.setOutput.mock.calls.find(call => call[0] === 'output'); + const outputCall = mockCore.setOutput.mock.calls.find( + call => call[0] === "output" + ); expect(outputCall).toBeDefined(); - + consoleSpy.mockRestore(); fs.unlinkSync(testFile); }); - it('should handle content with only whitespace', async () => { - const whitespaceContent = ' \n\n\t\t \r\n '; - const testFile = '/tmp/test-whitespace.txt'; + it("should handle content with only whitespace", async () => { + const whitespaceContent = " \n\n\t\t \r\n "; + const testFile = "/tmp/test-whitespace.txt"; fs.writeFileSync(testFile, whitespaceContent); process.env.GITHUB_AW_SAFE_OUTPUTS = testFile; - - const consoleSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); - + + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); + // Execute the script await eval(`(async () => { ${sanitizeScript} })()`); - - expect(consoleSpy).toHaveBeenCalledWith('Output file is empty'); - expect(mockCore.setOutput).toHaveBeenCalledWith('output', ''); - + + expect(consoleSpy).toHaveBeenCalledWith("Output file is empty"); + expect(mockCore.setOutput).toHaveBeenCalledWith("output", ""); + consoleSpy.mockRestore(); fs.unlinkSync(testFile); }); - it('should handle very large files with mixed content', async () => { + it("should handle very large files with mixed content", async () => { // Create content that will trigger both length and line truncation - const lineContent = 'This is a line with @user and https://evil.com plus \n'; + const lineContent = + 'This is a line with @user and https://evil.com plus \n'; const repeatedContent = lineContent.repeat(70000); // Will exceed line limit - - const testFile = '/tmp/test-large-mixed.txt'; + + const testFile = "/tmp/test-large-mixed.txt"; fs.writeFileSync(testFile, repeatedContent); process.env.GITHUB_AW_SAFE_OUTPUTS = testFile; - - const consoleSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); - + + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); + // Execute the script await eval(`(async () => { ${sanitizeScript} })()`); - - const outputCall = mockCore.setOutput.mock.calls.find(call => call[0] === 'output'); + + const outputCall = mockCore.setOutput.mock.calls.find( + call => call[0] === "output" + ); expect(outputCall).toBeDefined(); const result = outputCall[1]; - + // Should be truncated (could be due to line count or length limit) - expect(result).toMatch(/\[Content truncated due to (line count|length)\]/); - + expect(result).toMatch( + /\[Content truncated due to (line count|length)\]/ + ); + // But should still sanitize what it processes - expect(result).toContain('`@user`'); - expect(result).toContain('(redacted)'); // evil.com - expect(result).toContain('<script>'); // XML escaping - + expect(result).toContain("`@user`"); + expect(result).toContain("(redacted)"); // evil.com + expect(result).toContain("<script>"); // XML escaping + consoleSpy.mockRestore(); fs.unlinkSync(testFile); }); - it('should preserve log message format for short content', async () => { - const shortContent = 'Short message with @user'; - const testFile = '/tmp/test-short.txt'; + it("should preserve log message format for short content", async () => { + const shortContent = "Short message with @user"; + const testFile = "/tmp/test-short.txt"; fs.writeFileSync(testFile, shortContent); process.env.GITHUB_AW_SAFE_OUTPUTS = testFile; - - const consoleSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); - + + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); + // Execute the script await eval(`(async () => { ${sanitizeScript} })()`); - + const logCalls = consoleSpy.mock.calls; - const outputLogCall = logCalls.find(call => - call[0] && call[0].includes('Collected agentic output (sanitized):') + const outputLogCall = logCalls.find( + call => + call[0] && call[0].includes("Collected agentic output (sanitized):") ); - + expect(outputLogCall).toBeDefined(); // Should not have ... for short content - expect(outputLogCall[1]).not.toContain('...'); - expect(outputLogCall[1]).toContain('`@user`'); - + expect(outputLogCall[1]).not.toContain("..."); + expect(outputLogCall[1]).toContain("`@user`"); + consoleSpy.mockRestore(); fs.unlinkSync(testFile); }); }); -}); \ No newline at end of file +}); diff --git a/pkg/workflow/js/setup_agent_output.cjs b/pkg/workflow/js/setup_agent_output.cjs index 52521fdcc2..a236db35ce 100644 --- a/pkg/workflow/js/setup_agent_output.cjs +++ b/pkg/workflow/js/setup_agent_output.cjs @@ -1,14 +1,14 @@ function main() { - const fs = require('fs'); - const crypto = require('crypto'); + const fs = require("fs"); + const crypto = require("crypto"); // Generate a random filename for the output file - const randomId = crypto.randomBytes(8).toString('hex'); + const randomId = crypto.randomBytes(8).toString("hex"); const outputFile = `/tmp/aw_output_${randomId}.txt`; // Ensure the /tmp directory exists and create empty output file - fs.mkdirSync('/tmp', { recursive: true }); - fs.writeFileSync(outputFile, '', { mode: 0o644 }); + fs.mkdirSync("/tmp", { recursive: true }); + fs.writeFileSync(outputFile, "", { mode: 0o644 }); // Verify the file was created and is writable if (!fs.existsSync(outputFile)) { @@ -16,11 +16,11 @@ function main() { } // Set the environment variable for subsequent steps - core.exportVariable('GITHUB_AW_SAFE_OUTPUTS', outputFile); - console.log('Created agentic output file:', outputFile); + core.exportVariable("GITHUB_AW_SAFE_OUTPUTS", outputFile); + console.log("Created agentic output file:", outputFile); // Also set as step output for reference - core.setOutput('output_file', outputFile); + core.setOutput("output_file", outputFile); } -main(); \ No newline at end of file +main(); diff --git a/pkg/workflow/js/setup_agent_output.test.cjs b/pkg/workflow/js/setup_agent_output.test.cjs index 444de0ef41..89c7637b9b 100644 --- a/pkg/workflow/js/setup_agent_output.test.cjs +++ b/pkg/workflow/js/setup_agent_output.test.cjs @@ -1,135 +1,145 @@ -import { describe, it, expect, beforeEach, vi } from 'vitest'; -import fs from 'fs'; -import path from 'path'; +import { describe, it, expect, beforeEach, vi } from "vitest"; +import fs from "fs"; +import path from "path"; // Mock the global objects that GitHub Actions provides const mockCore = { exportVariable: vi.fn(), - setOutput: vi.fn() + setOutput: vi.fn(), }; // Set up global variables global.core = mockCore; -describe('setup_agent_output.cjs', () => { +describe("setup_agent_output.cjs", () => { let setupScript; beforeEach(() => { // Reset all mocks vi.clearAllMocks(); - + // Read the script content - const scriptPath = path.join(process.cwd(), 'pkg/workflow/js/setup_agent_output.cjs'); - setupScript = fs.readFileSync(scriptPath, 'utf8'); - + const scriptPath = path.join( + process.cwd(), + "pkg/workflow/js/setup_agent_output.cjs" + ); + setupScript = fs.readFileSync(scriptPath, "utf8"); + // Make fs available globally for the evaluated script global.fs = fs; }); afterEach(() => { // Clean up any test files - const files = fs.readdirSync('/tmp').filter(file => file.startsWith('aw_output_')); + const files = fs + .readdirSync("/tmp") + .filter(file => file.startsWith("aw_output_")); files.forEach(file => { try { - fs.unlinkSync(path.join('/tmp', file)); + fs.unlinkSync(path.join("/tmp", file)); } catch (e) { // Ignore cleanup errors } }); - + // Clean up globals delete global.fs; }); - describe('main function', () => { - it('should create output file and set environment variables', async () => { - const consoleSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); - + describe("main function", () => { + it("should create output file and set environment variables", async () => { + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); + // Execute the script await eval(`(async () => { ${setupScript} })()`); - + // Check that exportVariable was called with the correct pattern expect(mockCore.exportVariable).toHaveBeenCalledWith( - 'GITHUB_AW_SAFE_OUTPUTS', + "GITHUB_AW_SAFE_OUTPUTS", expect.stringMatching(/^\/tmp\/aw_output_[0-9a-f]{16}\.txt$/) ); - + // Check that setOutput was called with the same file path const exportCall = mockCore.exportVariable.mock.calls[0]; const outputCall = mockCore.setOutput.mock.calls[0]; - expect(outputCall[0]).toBe('output_file'); + expect(outputCall[0]).toBe("output_file"); expect(outputCall[1]).toBe(exportCall[1]); - + // Check that the file was actually created const outputFile = exportCall[1]; expect(fs.existsSync(outputFile)).toBe(true); - + // Check that console.log was called with the correct message - expect(consoleSpy).toHaveBeenCalledWith('Created agentic output file:', outputFile); - + expect(consoleSpy).toHaveBeenCalledWith( + "Created agentic output file:", + outputFile + ); + // Check that the file is empty (as expected) - const content = fs.readFileSync(outputFile, 'utf8'); - expect(content).toBe(''); - + const content = fs.readFileSync(outputFile, "utf8"); + expect(content).toBe(""); + consoleSpy.mockRestore(); }); - it('should create unique output file names on multiple runs', async () => { - const consoleSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); - + it("should create unique output file names on multiple runs", async () => { + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); + // Execute the script multiple times await eval(`(async () => { ${setupScript} })()`); const firstFile = mockCore.exportVariable.mock.calls[0][1]; - + // Reset mocks for second run mockCore.exportVariable.mockClear(); mockCore.setOutput.mockClear(); - + await eval(`(async () => { ${setupScript} })()`); const secondFile = mockCore.exportVariable.mock.calls[0][1]; - + // Files should be different expect(firstFile).not.toBe(secondFile); - + // Both files should exist expect(fs.existsSync(firstFile)).toBe(true); expect(fs.existsSync(secondFile)).toBe(true); - + consoleSpy.mockRestore(); }); - it('should handle file creation failure gracefully', async () => { + it("should handle file creation failure gracefully", async () => { // Mock fs.writeFileSync to throw an error const originalWriteFileSync = fs.writeFileSync; fs.writeFileSync = vi.fn().mockImplementation(() => { - throw new Error('Permission denied'); + throw new Error("Permission denied"); }); - + try { await eval(`(async () => { ${setupScript} })()`); - expect.fail('Should have thrown an error'); + expect.fail("Should have thrown an error"); } catch (error) { - expect(error.message).toBe('Permission denied'); + expect(error.message).toBe("Permission denied"); } - + // Restore original function fs.writeFileSync = originalWriteFileSync; }); - it('should verify file existence and throw error if file creation fails', async () => { + it("should verify file existence and throw error if file creation fails", async () => { // Mock fs.existsSync to return false (simulating failed file creation) const originalExistsSync = fs.existsSync; fs.existsSync = vi.fn().mockReturnValue(false); - + try { await eval(`(async () => { ${setupScript} })()`); - expect.fail('Should have thrown an error'); + expect.fail("Should have thrown an error"); } catch (error) { - expect(error.message).toMatch(/^Failed to create output file: \/tmp\/aw_output_[0-9a-f]{16}\.txt$/); + expect(error.message).toMatch( + /^Failed to create output file: \/tmp\/aw_output_[0-9a-f]{16}\.txt$/ + ); } - + // Restore original function fs.existsSync = originalExistsSync; }); }); -}); \ No newline at end of file +}); diff --git a/pkg/workflow/js/update_issue.cjs b/pkg/workflow/js/update_issue.cjs index 6b52a491c3..4f34d79825 100644 --- a/pkg/workflow/js/update_issue.cjs +++ b/pkg/workflow/js/update_issue.cjs @@ -2,35 +2,40 @@ async function main() { // Read the validated output content from environment variable const outputContent = process.env.GITHUB_AW_AGENT_OUTPUT; if (!outputContent) { - console.log('No GITHUB_AW_AGENT_OUTPUT environment variable found'); + console.log("No GITHUB_AW_AGENT_OUTPUT environment variable found"); return; } - if (outputContent.trim() === '') { - console.log('Agent output content is empty'); + if (outputContent.trim() === "") { + console.log("Agent output content is empty"); return; } - console.log('Agent output content length:', outputContent.length); + console.log("Agent output content length:", outputContent.length); // Parse the validated output JSON let validatedOutput; try { validatedOutput = JSON.parse(outputContent); } catch (error) { - console.log('Error parsing agent output JSON:', error instanceof Error ? error.message : String(error)); + console.log( + "Error parsing agent output JSON:", + error instanceof Error ? error.message : String(error) + ); return; } if (!validatedOutput.items || !Array.isArray(validatedOutput.items)) { - console.log('No valid items found in agent output'); + console.log("No valid items found in agent output"); return; } // Find all update-issue items - const updateItems = validatedOutput.items.filter(/** @param {any} item */ item => item.type === 'update-issue'); + const updateItems = validatedOutput.items.filter( + /** @param {any} item */ item => item.type === "update-issue" + ); if (updateItems.length === 0) { - console.log('No update-issue items found in agent output'); + console.log("No update-issue items found in agent output"); return; } @@ -38,19 +43,24 @@ async function main() { // Get the configuration from environment variables const updateTarget = process.env.GITHUB_AW_UPDATE_TARGET || "triggering"; - const canUpdateStatus = process.env.GITHUB_AW_UPDATE_STATUS === 'true'; - const canUpdateTitle = process.env.GITHUB_AW_UPDATE_TITLE === 'true'; - const canUpdateBody = process.env.GITHUB_AW_UPDATE_BODY === 'true'; + const canUpdateStatus = process.env.GITHUB_AW_UPDATE_STATUS === "true"; + const canUpdateTitle = process.env.GITHUB_AW_UPDATE_TITLE === "true"; + const canUpdateBody = process.env.GITHUB_AW_UPDATE_BODY === "true"; console.log(`Update target configuration: ${updateTarget}`); - console.log(`Can update status: ${canUpdateStatus}, title: ${canUpdateTitle}, body: ${canUpdateBody}`); + console.log( + `Can update status: ${canUpdateStatus}, title: ${canUpdateTitle}, body: ${canUpdateBody}` + ); // Check if we're in an issue context - const isIssueContext = context.eventName === 'issues' || context.eventName === 'issue_comment'; + const isIssueContext = + context.eventName === "issues" || context.eventName === "issue_comment"; // Validate context based on target configuration if (updateTarget === "triggering" && !isIssueContext) { - console.log('Target is "triggering" but not running in issue context, skipping issue update'); + console.log( + 'Target is "triggering" but not running in issue context, skipping issue update' + ); return; } @@ -69,18 +79,24 @@ async function main() { if (updateItem.issue_number) { issueNumber = parseInt(updateItem.issue_number, 10); if (isNaN(issueNumber) || issueNumber <= 0) { - console.log(`Invalid issue number specified: ${updateItem.issue_number}`); + console.log( + `Invalid issue number specified: ${updateItem.issue_number}` + ); continue; } } else { - console.log('Target is "*" but no issue_number specified in update item'); + console.log( + 'Target is "*" but no issue_number specified in update item' + ); continue; } } else if (updateTarget && updateTarget !== "triggering") { // Explicit issue number specified in target issueNumber = parseInt(updateTarget, 10); if (isNaN(issueNumber) || issueNumber <= 0) { - console.log(`Invalid issue number in target configuration: ${updateTarget}`); + console.log( + `Invalid issue number in target configuration: ${updateTarget}` + ); continue; } } else { @@ -89,17 +105,17 @@ async function main() { if (context.payload.issue) { issueNumber = context.payload.issue.number; } else { - console.log('Issue context detected but no issue found in payload'); + console.log("Issue context detected but no issue found in payload"); continue; } } else { - console.log('Could not determine issue number'); + console.log("Could not determine issue number"); continue; } } if (!issueNumber) { - console.log('Could not determine issue number'); + console.log("Could not determine issue number"); continue; } @@ -111,37 +127,42 @@ async function main() { if (canUpdateStatus && updateItem.status !== undefined) { // Validate status value - if (updateItem.status === 'open' || updateItem.status === 'closed') { + if (updateItem.status === "open" || updateItem.status === "closed") { updateData.state = updateItem.status; hasUpdates = true; console.log(`Will update status to: ${updateItem.status}`); } else { - console.log(`Invalid status value: ${updateItem.status}. Must be 'open' or 'closed'`); + console.log( + `Invalid status value: ${updateItem.status}. Must be 'open' or 'closed'` + ); } } if (canUpdateTitle && updateItem.title !== undefined) { - if (typeof updateItem.title === 'string' && updateItem.title.trim().length > 0) { + if ( + typeof updateItem.title === "string" && + updateItem.title.trim().length > 0 + ) { updateData.title = updateItem.title.trim(); hasUpdates = true; console.log(`Will update title to: ${updateItem.title.trim()}`); } else { - console.log('Invalid title value: must be a non-empty string'); + console.log("Invalid title value: must be a non-empty string"); } } if (canUpdateBody && updateItem.body !== undefined) { - if (typeof updateItem.body === 'string') { + if (typeof updateItem.body === "string") { updateData.body = updateItem.body; hasUpdates = true; console.log(`Will update body (length: ${updateItem.body.length})`); } else { - console.log('Invalid body value: must be a string'); + console.log("Invalid body value: must be a string"); } } if (!hasUpdates) { - console.log('No valid updates to apply for this item'); + console.log("No valid updates to apply for this item"); continue; } @@ -151,26 +172,29 @@ async function main() { owner: context.repo.owner, repo: context.repo.repo, issue_number: issueNumber, - ...updateData + ...updateData, }); - console.log('Updated issue #' + issue.number + ': ' + issue.html_url); + console.log("Updated issue #" + issue.number + ": " + issue.html_url); updatedIssues.push(issue); // Set output for the last updated issue (for backward compatibility) if (i === updateItems.length - 1) { - core.setOutput('issue_number', issue.number); - core.setOutput('issue_url', issue.html_url); + core.setOutput("issue_number", issue.number); + core.setOutput("issue_url", issue.html_url); } } catch (error) { - console.error(`✗ Failed to update issue #${issueNumber}:`, error instanceof Error ? error.message : String(error)); + console.error( + `✗ Failed to update issue #${issueNumber}:`, + error instanceof Error ? error.message : String(error) + ); throw error; } } // Write summary for all updated issues if (updatedIssues.length > 0) { - let summaryContent = '\n\n## Updated Issues\n'; + let summaryContent = "\n\n## Updated Issues\n"; for (const issue of updatedIssues) { summaryContent += `- Issue #${issue.number}: [${issue.title}](${issue.html_url})\n`; } diff --git a/pkg/workflow/js/update_issue.test.cjs b/pkg/workflow/js/update_issue.test.cjs index 3f5351d91e..d365e3b7c3 100644 --- a/pkg/workflow/js/update_issue.test.cjs +++ b/pkg/workflow/js/update_issue.test.cjs @@ -1,6 +1,6 @@ -import { describe, it, expect, beforeEach, vi } from 'vitest'; -import fs from 'fs'; -import path from 'path'; +import { describe, it, expect, beforeEach, vi } from "vitest"; +import fs from "fs"; +import path from "path"; // Mock the global objects that GitHub Actions provides const mockCore = { @@ -8,29 +8,29 @@ const mockCore = { setOutput: vi.fn(), summary: { addRaw: vi.fn().mockReturnThis(), - write: vi.fn() - } + write: vi.fn(), + }, }; const mockGithub = { rest: { issues: { - update: vi.fn() - } - } + update: vi.fn(), + }, + }, }; const mockContext = { - eventName: 'issues', + eventName: "issues", repo: { - owner: 'testowner', - repo: 'testrepo' + owner: "testowner", + repo: "testrepo", }, payload: { issue: { - number: 123 - } - } + number: 123, + }, + }, }; // Set up global variables @@ -38,261 +38,286 @@ global.core = mockCore; global.github = mockGithub; global.context = mockContext; -describe('update_issue.cjs', () => { +describe("update_issue.cjs", () => { let updateIssueScript; beforeEach(() => { // Reset all mocks vi.clearAllMocks(); - + // Reset environment variables delete process.env.GITHUB_AW_AGENT_OUTPUT; delete process.env.GITHUB_AW_UPDATE_STATUS; delete process.env.GITHUB_AW_UPDATE_TITLE; delete process.env.GITHUB_AW_UPDATE_BODY; delete process.env.GITHUB_AW_UPDATE_TARGET; - + // Set default values - process.env.GITHUB_AW_UPDATE_STATUS = 'false'; - process.env.GITHUB_AW_UPDATE_TITLE = 'false'; - process.env.GITHUB_AW_UPDATE_BODY = 'false'; - + process.env.GITHUB_AW_UPDATE_STATUS = "false"; + process.env.GITHUB_AW_UPDATE_TITLE = "false"; + process.env.GITHUB_AW_UPDATE_BODY = "false"; + // Read the script - const scriptPath = path.join(__dirname, 'update_issue.cjs'); - updateIssueScript = fs.readFileSync(scriptPath, 'utf8'); + const scriptPath = path.join(__dirname, "update_issue.cjs"); + updateIssueScript = fs.readFileSync(scriptPath, "utf8"); }); - it('should skip when no agent output is provided', async () => { - const consoleSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); - + it("should skip when no agent output is provided", async () => { + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); + // Execute the script await eval(`(async () => { ${updateIssueScript} })()`); - - expect(consoleSpy).toHaveBeenCalledWith('No GITHUB_AW_AGENT_OUTPUT environment variable found'); + + expect(consoleSpy).toHaveBeenCalledWith( + "No GITHUB_AW_AGENT_OUTPUT environment variable found" + ); expect(mockGithub.rest.issues.update).not.toHaveBeenCalled(); - + consoleSpy.mockRestore(); }); - it('should skip when agent output is empty', async () => { - process.env.GITHUB_AW_AGENT_OUTPUT = ' '; - - const consoleSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); - + it("should skip when agent output is empty", async () => { + process.env.GITHUB_AW_AGENT_OUTPUT = " "; + + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); + // Execute the script await eval(`(async () => { ${updateIssueScript} })()`); - - expect(consoleSpy).toHaveBeenCalledWith('Agent output content is empty'); + + expect(consoleSpy).toHaveBeenCalledWith("Agent output content is empty"); expect(mockGithub.rest.issues.update).not.toHaveBeenCalled(); - + consoleSpy.mockRestore(); }); - it('should skip when not in issue context for triggering target', async () => { + it("should skip when not in issue context for triggering target", async () => { process.env.GITHUB_AW_AGENT_OUTPUT = JSON.stringify({ - items: [{ - type: 'update-issue', - title: 'Updated title' - }] + items: [ + { + type: "update-issue", + title: "Updated title", + }, + ], }); - process.env.GITHUB_AW_UPDATE_TITLE = 'true'; - global.context.eventName = 'push'; // Not an issue event - - const consoleSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); - + process.env.GITHUB_AW_UPDATE_TITLE = "true"; + global.context.eventName = "push"; // Not an issue event + + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); + // Execute the script await eval(`(async () => { ${updateIssueScript} })()`); - - expect(consoleSpy).toHaveBeenCalledWith('Target is "triggering" but not running in issue context, skipping issue update'); + + expect(consoleSpy).toHaveBeenCalledWith( + 'Target is "triggering" but not running in issue context, skipping issue update' + ); expect(mockGithub.rest.issues.update).not.toHaveBeenCalled(); - + consoleSpy.mockRestore(); }); - it('should update issue title successfully', async () => { + it("should update issue title successfully", async () => { process.env.GITHUB_AW_AGENT_OUTPUT = JSON.stringify({ - items: [{ - type: 'update-issue', - title: 'Updated issue title' - }] + items: [ + { + type: "update-issue", + title: "Updated issue title", + }, + ], }); - process.env.GITHUB_AW_UPDATE_TITLE = 'true'; - global.context.eventName = 'issues'; - + process.env.GITHUB_AW_UPDATE_TITLE = "true"; + global.context.eventName = "issues"; + const mockIssue = { number: 123, - title: 'Updated issue title', - html_url: 'https://github.com/testowner/testrepo/issues/123' + title: "Updated issue title", + html_url: "https://github.com/testowner/testrepo/issues/123", }; - + mockGithub.rest.issues.update.mockResolvedValue({ data: mockIssue }); - - const consoleSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); - + + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); + // Execute the script await eval(`(async () => { ${updateIssueScript} })()`); - + expect(mockGithub.rest.issues.update).toHaveBeenCalledWith({ - owner: 'testowner', - repo: 'testrepo', + owner: "testowner", + repo: "testrepo", issue_number: 123, - title: 'Updated issue title' + title: "Updated issue title", }); - - expect(mockCore.setOutput).toHaveBeenCalledWith('issue_number', 123); - expect(mockCore.setOutput).toHaveBeenCalledWith('issue_url', mockIssue.html_url); + + expect(mockCore.setOutput).toHaveBeenCalledWith("issue_number", 123); + expect(mockCore.setOutput).toHaveBeenCalledWith( + "issue_url", + mockIssue.html_url + ); expect(mockCore.summary.addRaw).toHaveBeenCalled(); expect(mockCore.summary.write).toHaveBeenCalled(); - + consoleSpy.mockRestore(); }); - it('should update issue status successfully', async () => { + it("should update issue status successfully", async () => { process.env.GITHUB_AW_AGENT_OUTPUT = JSON.stringify({ - items: [{ - type: 'update-issue', - status: 'closed' - }] + items: [ + { + type: "update-issue", + status: "closed", + }, + ], }); - process.env.GITHUB_AW_UPDATE_STATUS = 'true'; - global.context.eventName = 'issues'; - + process.env.GITHUB_AW_UPDATE_STATUS = "true"; + global.context.eventName = "issues"; + const mockIssue = { number: 123, - html_url: 'https://github.com/testowner/testrepo/issues/123' + html_url: "https://github.com/testowner/testrepo/issues/123", }; - + mockGithub.rest.issues.update.mockResolvedValue({ data: mockIssue }); - - const consoleSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); - + + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); + // Execute the script await eval(`(async () => { ${updateIssueScript} })()`); - + expect(mockGithub.rest.issues.update).toHaveBeenCalledWith({ - owner: 'testowner', - repo: 'testrepo', + owner: "testowner", + repo: "testrepo", issue_number: 123, - state: 'closed' + state: "closed", }); - + consoleSpy.mockRestore(); }); - it('should update multiple fields successfully', async () => { + it("should update multiple fields successfully", async () => { process.env.GITHUB_AW_AGENT_OUTPUT = JSON.stringify({ - items: [{ - type: 'update-issue', - title: 'New title', - body: 'New body content', - status: 'open' - }] + items: [ + { + type: "update-issue", + title: "New title", + body: "New body content", + status: "open", + }, + ], }); - process.env.GITHUB_AW_UPDATE_TITLE = 'true'; - process.env.GITHUB_AW_UPDATE_BODY = 'true'; - process.env.GITHUB_AW_UPDATE_STATUS = 'true'; - global.context.eventName = 'issues'; - + process.env.GITHUB_AW_UPDATE_TITLE = "true"; + process.env.GITHUB_AW_UPDATE_BODY = "true"; + process.env.GITHUB_AW_UPDATE_STATUS = "true"; + global.context.eventName = "issues"; + const mockIssue = { number: 123, - html_url: 'https://github.com/testowner/testrepo/issues/123' + html_url: "https://github.com/testowner/testrepo/issues/123", }; - + mockGithub.rest.issues.update.mockResolvedValue({ data: mockIssue }); - - const consoleSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); - + + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); + // Execute the script await eval(`(async () => { ${updateIssueScript} })()`); - + expect(mockGithub.rest.issues.update).toHaveBeenCalledWith({ - owner: 'testowner', - repo: 'testrepo', + owner: "testowner", + repo: "testrepo", issue_number: 123, - title: 'New title', - body: 'New body content', - state: 'open' + title: "New title", + body: "New body content", + state: "open", }); - + consoleSpy.mockRestore(); }); it('should handle explicit issue number with target "*"', async () => { process.env.GITHUB_AW_AGENT_OUTPUT = JSON.stringify({ - items: [{ - type: 'update-issue', - issue_number: 456, - title: 'Updated title' - }] + items: [ + { + type: "update-issue", + issue_number: 456, + title: "Updated title", + }, + ], }); - process.env.GITHUB_AW_UPDATE_TITLE = 'true'; - process.env.GITHUB_AW_UPDATE_TARGET = '*'; - global.context.eventName = 'push'; // Not an issue event, but should work with explicit target - + process.env.GITHUB_AW_UPDATE_TITLE = "true"; + process.env.GITHUB_AW_UPDATE_TARGET = "*"; + global.context.eventName = "push"; // Not an issue event, but should work with explicit target + const mockIssue = { number: 456, - html_url: 'https://github.com/testowner/testrepo/issues/456' + html_url: "https://github.com/testowner/testrepo/issues/456", }; - + mockGithub.rest.issues.update.mockResolvedValue({ data: mockIssue }); - - const consoleSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); - + + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); + // Execute the script await eval(`(async () => { ${updateIssueScript} })()`); - + expect(mockGithub.rest.issues.update).toHaveBeenCalledWith({ - owner: 'testowner', - repo: 'testrepo', + owner: "testowner", + repo: "testrepo", issue_number: 456, - title: 'Updated title' + title: "Updated title", }); - + consoleSpy.mockRestore(); }); - it('should skip when no valid updates are provided', async () => { + it("should skip when no valid updates are provided", async () => { process.env.GITHUB_AW_AGENT_OUTPUT = JSON.stringify({ - items: [{ - type: 'update-issue', - title: 'New title' - }] + items: [ + { + type: "update-issue", + title: "New title", + }, + ], }); // All update flags are false - process.env.GITHUB_AW_UPDATE_STATUS = 'false'; - process.env.GITHUB_AW_UPDATE_TITLE = 'false'; - process.env.GITHUB_AW_UPDATE_BODY = 'false'; - global.context.eventName = 'issues'; - - const consoleSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); - + process.env.GITHUB_AW_UPDATE_STATUS = "false"; + process.env.GITHUB_AW_UPDATE_TITLE = "false"; + process.env.GITHUB_AW_UPDATE_BODY = "false"; + global.context.eventName = "issues"; + + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); + // Execute the script await eval(`(async () => { ${updateIssueScript} })()`); - - expect(consoleSpy).toHaveBeenCalledWith('No valid updates to apply for this item'); + + expect(consoleSpy).toHaveBeenCalledWith( + "No valid updates to apply for this item" + ); expect(mockGithub.rest.issues.update).not.toHaveBeenCalled(); - + consoleSpy.mockRestore(); }); - it('should validate status values', async () => { + it("should validate status values", async () => { process.env.GITHUB_AW_AGENT_OUTPUT = JSON.stringify({ - items: [{ - type: 'update-issue', - status: 'invalid' - }] + items: [ + { + type: "update-issue", + status: "invalid", + }, + ], }); - process.env.GITHUB_AW_UPDATE_STATUS = 'true'; - global.context.eventName = 'issues'; - - const consoleSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); - + process.env.GITHUB_AW_UPDATE_STATUS = "true"; + global.context.eventName = "issues"; + + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}); + // Execute the script await eval(`(async () => { ${updateIssueScript} })()`); - - expect(consoleSpy).toHaveBeenCalledWith('Invalid status value: invalid. Must be \'open\' or \'closed\''); + + expect(consoleSpy).toHaveBeenCalledWith( + "Invalid status value: invalid. Must be 'open' or 'closed'" + ); expect(mockGithub.rest.issues.update).not.toHaveBeenCalled(); - + consoleSpy.mockRestore(); }); }); diff --git a/pkg/workflow/output_config_test.go b/pkg/workflow/output_config_test.go index f68ac20889..be77c5daf1 100644 --- a/pkg/workflow/output_config_test.go +++ b/pkg/workflow/output_config_test.go @@ -114,3 +114,111 @@ func TestAllowedDomainsInWorkflow(t *testing.T) { } } } + +func TestCreateDiscussionConfigParsing(t *testing.T) { + tests := []struct { + name string + frontmatter map[string]any + expectedTitlePrefix string + expectedCategoryId string + expectedMax int + expectConfig bool + }{ + { + name: "no create-discussion config", + frontmatter: map[string]any{ + "engine": "claude", + }, + expectConfig: false, + }, + { + name: "basic create-discussion config", + frontmatter: map[string]any{ + "safe-outputs": map[string]any{ + "create-discussion": map[string]any{}, + }, + }, + expectedTitlePrefix: "", + expectedCategoryId: "", + expectedMax: 1, // default + expectConfig: true, + }, + { + name: "create-discussion with title-prefix", + frontmatter: map[string]any{ + "safe-outputs": map[string]any{ + "create-discussion": map[string]any{ + "title-prefix": "[ai] ", + }, + }, + }, + expectedTitlePrefix: "[ai] ", + expectedCategoryId: "", + expectedMax: 1, + expectConfig: true, + }, + { + name: "create-discussion with category-id", + frontmatter: map[string]any{ + "safe-outputs": map[string]any{ + "create-discussion": map[string]any{ + "category-id": "DIC_kwDOGFsHUM4BsUn3", + }, + }, + }, + expectedTitlePrefix: "", + expectedCategoryId: "DIC_kwDOGFsHUM4BsUn3", + expectedMax: 1, + expectConfig: true, + }, + { + name: "create-discussion with all options", + frontmatter: map[string]any{ + "safe-outputs": map[string]any{ + "create-discussion": map[string]any{ + "title-prefix": "[research] ", + "category-id": "DIC_kwDOGFsHUM4BsUn3", + "max": 3, + }, + }, + }, + expectedTitlePrefix: "[research] ", + expectedCategoryId: "DIC_kwDOGFsHUM4BsUn3", + expectedMax: 3, + expectConfig: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := NewCompiler(false, "", "test") + config := c.extractSafeOutputsConfig(tt.frontmatter) + + if !tt.expectConfig { + if config != nil && config.CreateDiscussions != nil { + t.Errorf("Expected no create-discussion config, but got one") + } + return + } + + if config == nil || config.CreateDiscussions == nil { + t.Errorf("Expected create-discussion config, but got nil") + return + } + + discussionConfig := config.CreateDiscussions + + if discussionConfig.TitlePrefix != tt.expectedTitlePrefix { + t.Errorf("Expected title prefix %q, but got %q", tt.expectedTitlePrefix, discussionConfig.TitlePrefix) + } + + if discussionConfig.CategoryId != tt.expectedCategoryId { + t.Errorf("Expected category ID %q, but got %q", tt.expectedCategoryId, discussionConfig.CategoryId) + } + + if discussionConfig.Max != tt.expectedMax { + t.Errorf("Expected max %d, but got %d", tt.expectedMax, discussionConfig.Max) + } + }) + } +} diff --git a/pkg/workflow/output_pr_review_comment_test.go b/pkg/workflow/output_pr_review_comment_test.go new file mode 100644 index 0000000000..62e0d57e8a --- /dev/null +++ b/pkg/workflow/output_pr_review_comment_test.go @@ -0,0 +1,269 @@ +package workflow + +import ( + "os" + "path/filepath" + "strings" + "testing" +) + +func TestPRReviewCommentConfigParsing(t *testing.T) { + // Create temporary directory for test files + tmpDir, err := os.MkdirTemp("", "output-pr-review-comment-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDir) + + t.Run("basic PR review comment configuration", func(t *testing.T) { + // Test case with basic create-pull-request-review-comment configuration + testContent := `--- +on: pull_request +permissions: + contents: read + pull-requests: write +engine: claude +safe-outputs: + create-pull-request-review-comment: +--- + +# Test PR Review Comment Configuration + +This workflow tests the create-pull-request-review-comment configuration parsing. +` + + testFile := filepath.Join(tmpDir, "test-pr-review-comment-basic.md") + if err := os.WriteFile(testFile, []byte(testContent), 0644); err != nil { + t.Fatal(err) + } + + compiler := NewCompiler(false, "", "test") + + // Parse the workflow data + workflowData, err := compiler.parseWorkflowFile(testFile) + if err != nil { + t.Fatalf("Unexpected error parsing workflow with PR review comment config: %v", err) + } + + // Verify output configuration is parsed correctly + if workflowData.SafeOutputs == nil { + t.Fatal("Expected safe-outputs configuration to be parsed") + } + + if workflowData.SafeOutputs.CreatePullRequestReviewComments == nil { + t.Fatal("Expected create-pull-request-review-comment configuration to be parsed") + } + + // Check default values + config := workflowData.SafeOutputs.CreatePullRequestReviewComments + if config.Max != 10 { + t.Errorf("Expected default max to be 10, got %d", config.Max) + } + + if config.Side != "RIGHT" { + t.Errorf("Expected default side to be RIGHT, got %s", config.Side) + } + }) + + t.Run("PR review comment configuration with custom values", func(t *testing.T) { + // Test case with custom PR review comment configuration + testContent := `--- +on: pull_request +engine: claude +safe-outputs: + create-pull-request-review-comment: + max: 5 + side: "LEFT" +--- + +# Test PR Review Comment Configuration with Custom Values + +This workflow tests custom configuration values. +` + + testFile := filepath.Join(tmpDir, "test-pr-review-comment-custom.md") + if err := os.WriteFile(testFile, []byte(testContent), 0644); err != nil { + t.Fatal(err) + } + + compiler := NewCompiler(false, "", "test") + + // Parse the workflow data + workflowData, err := compiler.parseWorkflowFile(testFile) + if err != nil { + t.Fatalf("Unexpected error parsing workflow with custom PR review comment config: %v", err) + } + + // Verify custom configuration values + if workflowData.SafeOutputs == nil || workflowData.SafeOutputs.CreatePullRequestReviewComments == nil { + t.Fatal("Expected create-pull-request-review-comment configuration to be parsed") + } + + config := workflowData.SafeOutputs.CreatePullRequestReviewComments + if config.Max != 5 { + t.Errorf("Expected max to be 5, got %d", config.Max) + } + + if config.Side != "LEFT" { + t.Errorf("Expected side to be LEFT, got %s", config.Side) + } + }) + + t.Run("PR review comment configuration with null value", func(t *testing.T) { + // Test case with null PR review comment configuration + testContent := `--- +on: pull_request +engine: claude +safe-outputs: + create-pull-request-review-comment: null +--- + +# Test PR Review Comment Configuration with Null + +This workflow tests null configuration. +` + + testFile := filepath.Join(tmpDir, "test-pr-review-comment-null.md") + if err := os.WriteFile(testFile, []byte(testContent), 0644); err != nil { + t.Fatal(err) + } + + compiler := NewCompiler(false, "", "test") + + // Parse the workflow data + workflowData, err := compiler.parseWorkflowFile(testFile) + if err != nil { + t.Fatalf("Unexpected error parsing workflow with null PR review comment config: %v", err) + } + + // Verify null configuration is handled correctly (should create default config) + if workflowData.SafeOutputs == nil || workflowData.SafeOutputs.CreatePullRequestReviewComments == nil { + t.Fatal("Expected create-pull-request-review-comment configuration to be parsed even with null value") + } + + config := workflowData.SafeOutputs.CreatePullRequestReviewComments + if config.Max != 10 { + t.Errorf("Expected default max to be 10 for null config, got %d", config.Max) + } + + if config.Side != "RIGHT" { + t.Errorf("Expected default side to be RIGHT for null config, got %s", config.Side) + } + }) + + t.Run("PR review comment configuration rejects invalid side values", func(t *testing.T) { + // Test case with invalid side value (should be rejected by schema validation) + testContent := `--- +on: pull_request +engine: claude +safe-outputs: + create-pull-request-review-comment: + max: 2 + side: "INVALID_SIDE" +--- + +# Test PR Review Comment Configuration with Invalid Side + +This workflow tests invalid side value handling. +` + + testFile := filepath.Join(tmpDir, "test-pr-review-comment-invalid-side.md") + if err := os.WriteFile(testFile, []byte(testContent), 0644); err != nil { + t.Fatal(err) + } + + compiler := NewCompiler(false, "", "test") + + // Parse the workflow data - this should fail due to schema validation + _, err := compiler.parseWorkflowFile(testFile) + if err == nil { + t.Fatal("Expected error parsing workflow with invalid side value, but got none") + } + + // Verify error message mentions the invalid side value + if !strings.Contains(err.Error(), "value must be one of 'LEFT', 'RIGHT'") { + t.Errorf("Expected error message to mention valid side values, got: %v", err) + } + }) +} + +func TestPRReviewCommentJobGeneration(t *testing.T) { + // Create temporary directory for test files + tmpDir, err := os.MkdirTemp("", "pr-review-comment-job-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDir) + + t.Run("generate PR review comment job", func(t *testing.T) { + testContent := `--- +on: pull_request +engine: claude +safe-outputs: + create-pull-request-review-comment: + max: 3 + side: "LEFT" +--- + +# Test PR Review Comment Job Generation + +This workflow tests job generation for PR review comments. +` + + testFile := filepath.Join(tmpDir, "test-pr-review-comment-job.md") + if err := os.WriteFile(testFile, []byte(testContent), 0644); err != nil { + t.Fatal(err) + } + + compiler := NewCompiler(false, "", "test") + + // Compile the workflow + err := compiler.CompileWorkflow(testFile) + if err != nil { + t.Fatalf("Unexpected error compiling workflow: %v", err) + } + + // Check that the output file exists + outputFile := filepath.Join(tmpDir, "test-pr-review-comment-job.lock.yml") + if _, err := os.Stat(outputFile); os.IsNotExist(err) { + t.Fatal("Expected output file to be created") + } + + // Read the output content + content, err := os.ReadFile(outputFile) + if err != nil { + t.Fatal(err) + } + + workflowContent := string(content) + + // Verify the PR review comment job is generated + if !strings.Contains(workflowContent, "create_pr_review_comment:") { + t.Error("Expected create_pr_review_comment job to be generated") + } + + // Verify job condition is correct for PR context + if !strings.Contains(workflowContent, "if: github.event.pull_request.number") { + t.Error("Expected job condition to check for pull request context") + } + + // Verify correct permissions are set + if !strings.Contains(workflowContent, "pull-requests: write") { + t.Error("Expected pull-requests: write permission to be set") + } + + // Verify environment variables are passed + if !strings.Contains(workflowContent, "GITHUB_AW_AGENT_OUTPUT:") { + t.Error("Expected GITHUB_AW_AGENT_OUTPUT environment variable to be passed") + } + + if !strings.Contains(workflowContent, `GITHUB_AW_PR_REVIEW_COMMENT_SIDE: "LEFT"`) { + t.Error("Expected GITHUB_AW_PR_REVIEW_COMMENT_SIDE environment variable to be set to LEFT") + } + + // Verify the JavaScript script is embedded + if !strings.Contains(workflowContent, "create-pull-request-review-comment") { + t.Error("Expected PR review comment script to be embedded") + } + }) +}