diff --git a/pkg/cli/logs_download_test.go b/pkg/cli/logs_download_test.go new file mode 100644 index 0000000000..5df10f43a6 --- /dev/null +++ b/pkg/cli/logs_download_test.go @@ -0,0 +1,537 @@ +package cli + +import ( + "archive/zip" + "context" + "errors" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/githubnext/gh-aw/pkg/cli/fileutil" + "github.com/githubnext/gh-aw/pkg/testutil" +) + +func TestDownloadWorkflowLogs(t *testing.T) { + t.Skip("Skipping slow network-dependent test") + + // Test the DownloadWorkflowLogs function + // This should either fail with auth error (if not authenticated) + // or succeed with no results (if authenticated but no workflows match) + err := DownloadWorkflowLogs(context.Background(), "", 1, "", "", "./test-logs", "", "", 0, 0, "", false, false, false, false, false, false, false, 0, false, "summary.json", "") + + // If GitHub CLI is authenticated, the function may succeed but find no results + // If not authenticated, it should return an auth error + if err != nil { + // If there's an error, it should be an authentication or workflow-related error + errMsg := strings.ToLower(err.Error()) + if !strings.Contains(errMsg, "authentication required") && + !strings.Contains(errMsg, "failed to list workflow runs") && + !strings.Contains(errMsg, "exit status 1") { + t.Errorf("Expected authentication error, workflow listing error, or no error, got: %v", err) + } + } + // If err is nil, that's also acceptable (authenticated case with no results) + + // Clean up + os.RemoveAll("./test-logs") +} + +func TestDirExists(t *testing.T) { + tmpDir := testutil.TempDir(t, "test-*") + + // Test existing directory + if !fileutil.DirExists(tmpDir) { + t.Errorf("DirExists should return true for existing directory") + } + + // Test non-existing directory + nonExistentDir := filepath.Join(tmpDir, "does-not-exist") + if fileutil.DirExists(nonExistentDir) { + t.Errorf("DirExists should return false for non-existing directory") + } + + // Test file vs directory + testFile := filepath.Join(tmpDir, "testfile") + err := os.WriteFile(testFile, []byte("test"), 0644) + if err != nil { + t.Fatalf("Failed to create test file: %v", err) + } + + if fileutil.DirExists(testFile) { + t.Errorf("DirExists should return false for a file") + } +} + +func TestIsDirEmpty(t *testing.T) { + tmpDir := testutil.TempDir(t, "test-*") + + // Test empty directory + emptyDir := filepath.Join(tmpDir, "empty") + err := os.Mkdir(emptyDir, 0755) + if err != nil { + t.Fatalf("Failed to create empty directory: %v", err) + } + + if !fileutil.IsDirEmpty(emptyDir) { + t.Errorf("IsDirEmpty should return true for empty directory") + } + + // Test directory with files + nonEmptyDir := filepath.Join(tmpDir, "nonempty") + err = os.Mkdir(nonEmptyDir, 0755) + if err != nil { + t.Fatalf("Failed to create non-empty directory: %v", err) + } + + testFile := filepath.Join(nonEmptyDir, "testfile") + err = os.WriteFile(testFile, []byte("test"), 0644) + if err != nil { + t.Fatalf("Failed to create test file: %v", err) + } + + if fileutil.IsDirEmpty(nonEmptyDir) { + t.Errorf("IsDirEmpty should return false for directory with files") + } + + // Test non-existing directory + nonExistentDir := filepath.Join(tmpDir, "does-not-exist") + if !fileutil.IsDirEmpty(nonExistentDir) { + t.Errorf("IsDirEmpty should return true for non-existing directory") + } +} + +func TestErrNoArtifacts(t *testing.T) { + // Test that ErrNoArtifacts is properly defined and can be used with errors.Is + err := ErrNoArtifacts + if !errors.Is(err, ErrNoArtifacts) { + t.Errorf("errors.Is should return true for ErrNoArtifacts") + } + + // Test wrapping + wrappedErr := errors.New("wrapped: " + ErrNoArtifacts.Error()) + if errors.Is(wrappedErr, ErrNoArtifacts) { + t.Errorf("errors.Is should return false for wrapped error that doesn't use errors.Wrap") + } +} + +func TestListWorkflowRunsWithPagination(t *testing.T) { + // Test that listWorkflowRunsWithPagination properly adds beforeDate filter + // Since we can't easily mock the GitHub CLI, we'll test with known auth issues + + // This should fail with authentication error (if not authenticated) + // or succeed with empty results (if authenticated but no workflows match) + runs, _, err := listWorkflowRunsWithPagination(ListWorkflowRunsOptions{ + WorkflowName: "nonexistent-workflow", + Limit: 5, + BeforeDate: "2024-01-01T00:00:00Z", + ProcessedCount: 0, + TargetCount: 5, + Verbose: false, + }) + + if err != nil { + // If there's an error, it should be an authentication error or workflow not found + if !strings.Contains(err.Error(), "authentication required") && !strings.Contains(err.Error(), "failed to list workflow runs") { + t.Errorf("Expected authentication error or workflow error, got: %v", err) + } + } else { + // If no error, should return empty results for nonexistent workflow + if len(runs) > 0 { + t.Errorf("Expected empty results for nonexistent workflow, got %d runs", len(runs)) + } + } +} + +func TestIterativeAlgorithmConstants(t *testing.T) { + // Test that our constants are reasonable + if MaxIterations <= 0 { + t.Errorf("MaxIterations should be positive, got %d", MaxIterations) + } + if MaxIterations > 20 { + t.Errorf("MaxIterations seems too high (%d), could cause performance issues", MaxIterations) + } + + if BatchSize <= 0 { + t.Errorf("BatchSize should be positive, got %d", BatchSize) + } + if BatchSize > 100 { + t.Errorf("BatchSize seems too high (%d), might hit GitHub API limits", BatchSize) + } +} + +func TestDownloadWorkflowLogsWithEngineFilter(t *testing.T) { + t.Skip("Skipping slow network-dependent test") + + // Test that the engine filter parameter is properly validated and passed through + tests := []struct { + name string + engine string + expectError bool + errorText string + }{ + { + name: "valid claude engine", + engine: "claude", + expectError: false, + }, + { + name: "valid codex engine", + engine: "codex", + expectError: false, + }, + { + name: "valid copilot engine", + engine: "copilot", + expectError: false, + }, + { + name: "empty engine (no filter)", + engine: "", + expectError: false, + }, + { + name: "invalid engine", + engine: "gpt", + expectError: true, + errorText: "invalid engine value 'gpt'", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // This function should validate the engine parameter + // If invalid, it would exit in the actual command but we can't test that easily + // So we just test that valid engines don't cause immediate errors + if !tt.expectError { + // For valid engines, test that the function can be called without panic + // It may still fail with auth errors, which is expected + err := DownloadWorkflowLogs(context.Background(), "", 1, "", "", "./test-logs", tt.engine, "", 0, 0, "", false, false, false, false, false, false, false, 0, false, "summary.json", "") + + // Clean up any created directories + os.RemoveAll("./test-logs") + + // If there's an error, it should be auth or workflow-related, not parameter validation + if err != nil { + errMsg := strings.ToLower(err.Error()) + if strings.Contains(errMsg, "invalid engine") { + t.Errorf("Got engine validation error for valid engine '%s': %v", tt.engine, err) + } + } + } + }) + } +} + +func TestUnzipFile(t *testing.T) { + // Create a temporary directory for the test + tmpDir := testutil.TempDir(t, "test-*") + + // Create a test zip file + zipPath := filepath.Join(tmpDir, "test.zip") + zipFile, err := os.Create(zipPath) + if err != nil { + t.Fatalf("Failed to create test zip file: %v", err) + } + + zipWriter := zip.NewWriter(zipFile) + + // Add a test file to the zip + testContent := "This is test content for workflow logs" + writer, err := zipWriter.Create("test-log.txt") + if err != nil { + zipFile.Close() + t.Fatalf("Failed to create file in zip: %v", err) + } + _, err = writer.Write([]byte(testContent)) + if err != nil { + zipFile.Close() + t.Fatalf("Failed to write content to zip: %v", err) + } + + // Add a subdirectory with a file + writer, err = zipWriter.Create("logs/job-1.txt") + if err != nil { + zipFile.Close() + t.Fatalf("Failed to create subdirectory file in zip: %v", err) + } + _, err = writer.Write([]byte("Job 1 logs")) + if err != nil { + zipFile.Close() + t.Fatalf("Failed to write subdirectory content to zip: %v", err) + } + + // Close the zip writer + err = zipWriter.Close() + if err != nil { + zipFile.Close() + t.Fatalf("Failed to close zip writer: %v", err) + } + zipFile.Close() + + // Create a destination directory + destDir := filepath.Join(tmpDir, "extracted") + err = os.MkdirAll(destDir, 0755) + if err != nil { + t.Fatalf("Failed to create destination directory: %v", err) + } + + // Test the unzipFile function + err = unzipFile(zipPath, destDir, false) + if err != nil { + t.Fatalf("unzipFile failed: %v", err) + } + + // Verify the extracted files + extractedFile := filepath.Join(destDir, "test-log.txt") + content, err := os.ReadFile(extractedFile) + if err != nil { + t.Fatalf("Failed to read extracted file: %v", err) + } + + if string(content) != testContent { + t.Errorf("Extracted content mismatch: got %q, want %q", string(content), testContent) + } + + // Verify subdirectory file + subdirFile := filepath.Join(destDir, "logs", "job-1.txt") + content, err = os.ReadFile(subdirFile) + if err != nil { + t.Fatalf("Failed to read extracted subdirectory file: %v", err) + } + + if string(content) != "Job 1 logs" { + t.Errorf("Extracted subdirectory content mismatch: got %q, want %q", string(content), "Job 1 logs") + } +} + +func TestUnzipFileZipSlipPrevention(t *testing.T) { + // Create a temporary directory for the test + tmpDir := testutil.TempDir(t, "test-*") + + // Create a test zip file with a malicious path + zipPath := filepath.Join(tmpDir, "malicious.zip") + zipFile, err := os.Create(zipPath) + if err != nil { + t.Fatalf("Failed to create test zip file: %v", err) + } + + zipWriter := zip.NewWriter(zipFile) + + // Try to create a file that escapes the destination directory + writer, err := zipWriter.Create("../../../etc/passwd") + if err != nil { + zipFile.Close() + t.Fatalf("Failed to create malicious file in zip: %v", err) + } + _, err = writer.Write([]byte("malicious content")) + if err != nil { + zipFile.Close() + t.Fatalf("Failed to write malicious content to zip: %v", err) + } + + err = zipWriter.Close() + if err != nil { + zipFile.Close() + t.Fatalf("Failed to close zip writer: %v", err) + } + zipFile.Close() + + // Create a destination directory + destDir := filepath.Join(tmpDir, "safe-extraction") + err = os.MkdirAll(destDir, 0755) + if err != nil { + t.Fatalf("Failed to create destination directory: %v", err) + } + + // Test that unzipFile rejects the malicious path + err = unzipFile(zipPath, destDir, false) + if err == nil { + t.Error("Expected unzipFile to reject malicious path, but it succeeded") + } + + if !strings.Contains(err.Error(), "invalid file path") { + t.Errorf("Expected error about invalid file path, got: %v", err) + } +} + +func TestDownloadWorkflowRunLogsStructure(t *testing.T) { + // This test verifies that workflow logs are extracted into a workflow-logs subdirectory + // Note: This test cannot fully test downloadWorkflowRunLogs without GitHub CLI authentication + // So we test the directory creation and unzipFile behavior that mimics the workflow + + tmpDir := testutil.TempDir(t, "test-*") + + // Create a mock workflow logs zip file similar to what GitHub API returns + zipPath := filepath.Join(tmpDir, "workflow-logs.zip") + zipFile, err := os.Create(zipPath) + if err != nil { + t.Fatalf("Failed to create test zip file: %v", err) + } + + zipWriter := zip.NewWriter(zipFile) + + // Add files that simulate GitHub Actions workflow logs structure + logFiles := map[string]string{ + "1_job1.txt": "Job 1 execution logs", + "2_job2.txt": "Job 2 execution logs", + "3_build/build.txt": "Build step logs", + "4_test/test-1.txt": "Test step 1 logs", + "4_test/test-2.txt": "Test step 2 logs", + } + + for filename, content := range logFiles { + writer, err := zipWriter.Create(filename) + if err != nil { + zipFile.Close() + t.Fatalf("Failed to create file %s in zip: %v", filename, err) + } + _, err = writer.Write([]byte(content)) + if err != nil { + zipFile.Close() + t.Fatalf("Failed to write content to %s: %v", filename, err) + } + } + + err = zipWriter.Close() + if err != nil { + zipFile.Close() + t.Fatalf("Failed to close zip writer: %v", err) + } + zipFile.Close() + + // Create a run directory (simulating logs/run-12345) + runDir := filepath.Join(tmpDir, "run-12345") + err = os.MkdirAll(runDir, 0755) + if err != nil { + t.Fatalf("Failed to create run directory: %v", err) + } + + // Create some other artifacts in the run directory (to verify they don't get mixed with logs) + err = os.WriteFile(filepath.Join(runDir, "aw_info.json"), []byte(`{"engine_id": "claude"}`), 0644) + if err != nil { + t.Fatalf("Failed to create aw_info.json: %v", err) + } + + // Create the workflow-logs subdirectory and extract logs there + workflowLogsDir := filepath.Join(runDir, "workflow-logs") + err = os.MkdirAll(workflowLogsDir, 0755) + if err != nil { + t.Fatalf("Failed to create workflow-logs directory: %v", err) + } + + // Extract logs into the workflow-logs subdirectory (mimics downloadWorkflowRunLogs behavior) + err = unzipFile(zipPath, workflowLogsDir, false) + if err != nil { + t.Fatalf("Failed to extract logs: %v", err) + } + + // Verify that workflow-logs directory exists + if !fileutil.DirExists(workflowLogsDir) { + t.Error("workflow-logs directory should exist") + } + + // Verify that log files are in the workflow-logs subdirectory, not in run root + for filename := range logFiles { + expectedPath := filepath.Join(workflowLogsDir, filename) + if !fileutil.FileExists(expectedPath) { + t.Errorf("Expected log file %s to be in workflow-logs subdirectory", filename) + } + + // Verify the file is NOT in the run directory root + wrongPath := filepath.Join(runDir, filename) + if fileutil.FileExists(wrongPath) { + t.Errorf("Log file %s should not be in run directory root", filename) + } + } + + // Verify that other artifacts remain in the run directory root + awInfoPath := filepath.Join(runDir, "aw_info.json") + if !fileutil.FileExists(awInfoPath) { + t.Error("aw_info.json should remain in run directory root") + } + + // Verify the content of one of the extracted log files + testLogPath := filepath.Join(workflowLogsDir, "1_job1.txt") + content, err := os.ReadFile(testLogPath) + if err != nil { + t.Fatalf("Failed to read extracted log file: %v", err) + } + + expectedContent := "Job 1 execution logs" + if string(content) != expectedContent { + t.Errorf("Log file content mismatch: got %q, want %q", string(content), expectedContent) + } + + // Verify nested directory structure is preserved + nestedLogPath := filepath.Join(workflowLogsDir, "3_build", "build.txt") + if !fileutil.FileExists(nestedLogPath) { + t.Error("Nested log directory structure should be preserved") + } +} + +// TestCountParameterBehavior verifies that the count parameter limits matching results +// not the number of runs fetched when date filters are specified +func TestCountParameterBehavior(t *testing.T) { + // This test documents the expected behavior: + // 1. When date filters (startDate/endDate) are specified, fetch ALL runs in that range + // 2. Apply post-download filters (engine, staged, etc.) + // 3. Limit final output to 'count' matching runs + // + // Without date filters: + // 1. Fetch runs iteratively until we have 'count' runs with artifacts + // 2. Apply filters during iteration (old behavior for backward compatibility) + + // Note: This is a documentation test - the actual logic is tested via integration tests + // that require GitHub CLI authentication and a real repository + + tests := []struct { + name string + startDate string + endDate string + count int + expectedFetchAll bool + }{ + { + name: "with startDate should fetch all in range", + startDate: "2024-01-01", + endDate: "", + count: 10, + expectedFetchAll: true, + }, + { + name: "with endDate should fetch all in range", + startDate: "", + endDate: "2024-12-31", + count: 10, + expectedFetchAll: true, + }, + { + name: "with both dates should fetch all in range", + startDate: "2024-01-01", + endDate: "2024-12-31", + count: 10, + expectedFetchAll: true, + }, + { + name: "without dates should use count as fetch limit", + startDate: "", + endDate: "", + count: 10, + expectedFetchAll: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // This documents the logic: when startDate or endDate is set, we fetch all + fetchAllInRange := tt.startDate != "" || tt.endDate != "" + + if fetchAllInRange != tt.expectedFetchAll { + t.Errorf("Expected fetchAllInRange=%v for startDate=%q endDate=%q, got %v", + tt.expectedFetchAll, tt.startDate, tt.endDate, fetchAllInRange) + } + }) + } +} diff --git a/pkg/cli/logs_filtering_test.go b/pkg/cli/logs_filtering_test.go new file mode 100644 index 0000000000..1e1f9e06c0 --- /dev/null +++ b/pkg/cli/logs_filtering_test.go @@ -0,0 +1,444 @@ +package cli + +import ( + "os" + "path/filepath" + "strings" + "testing" + + "github.com/githubnext/gh-aw/pkg/testutil" + "github.com/githubnext/gh-aw/pkg/workflow" +) + +func TestLogsCommandFlags(t *testing.T) { + // Test that the logs command has the expected flags including the new engine flag + cmd := NewLogsCommand() + + // Check that all expected flags are present + expectedFlags := []string{"count", "start-date", "end-date", "output", "engine", "ref", "before-run-id", "after-run-id"} + + for _, flagName := range expectedFlags { + flag := cmd.Flags().Lookup(flagName) + if flag == nil { + t.Errorf("Expected flag '%s' not found in logs command", flagName) + } + } + + // Test ref flag specifically + refFlag := cmd.Flags().Lookup("ref") + if refFlag == nil { + t.Fatal("Ref flag not found") + } + + if refFlag.Usage != "Filter runs by branch or tag name (e.g., main, v1.0.0)" { + t.Errorf("Unexpected ref flag usage text: %s", refFlag.Usage) + } + + if refFlag.DefValue != "" { + t.Errorf("Expected ref flag default value to be empty, got: %s", refFlag.DefValue) + } + + // Test before-run-id flag + beforeRunIDFlag := cmd.Flags().Lookup("before-run-id") + if beforeRunIDFlag == nil { + t.Fatal("Before-run-id flag not found") + } + + if beforeRunIDFlag.Usage != "Filter runs with database ID before this value (exclusive)" { + t.Errorf("Unexpected before-run-id flag usage text: %s", beforeRunIDFlag.Usage) + } + + // Test after-run-id flag + afterRunIDFlag := cmd.Flags().Lookup("after-run-id") + if afterRunIDFlag == nil { + t.Fatal("After-run-id flag not found") + } + + if afterRunIDFlag.Usage != "Filter runs with database ID after this value (exclusive)" { + t.Errorf("Unexpected after-run-id flag usage text: %s", afterRunIDFlag.Usage) + } + + // Test engine flag specifically + engineFlag := cmd.Flags().Lookup("engine") + if engineFlag == nil { + t.Fatal("Engine flag not found") + } + + if engineFlag.Usage != "Filter logs by AI engine (claude, codex, copilot, custom)" { + t.Errorf("Unexpected engine flag usage text: %s", engineFlag.Usage) + } + + if engineFlag.DefValue != "" { + t.Errorf("Expected engine flag default value to be empty, got: %s", engineFlag.DefValue) + } + + // Test that engine flag has the -e shorthand for consistency with other commands + if engineFlag.Shorthand != "e" { + t.Errorf("Expected engine flag shorthand to be 'e', got: %s", engineFlag.Shorthand) + } +} + +func TestRunIDFilteringLogic(t *testing.T) { + // Test the run ID filtering logic in isolation + testRuns := []WorkflowRun{ + {DatabaseID: 1000, WorkflowName: "Test Workflow"}, + {DatabaseID: 1500, WorkflowName: "Test Workflow"}, + {DatabaseID: 2000, WorkflowName: "Test Workflow"}, + {DatabaseID: 2500, WorkflowName: "Test Workflow"}, + {DatabaseID: 3000, WorkflowName: "Test Workflow"}, + } + + // Test before-run-id filter (exclusive) + var filteredRuns []WorkflowRun + beforeRunID := int64(2000) + for _, run := range testRuns { + if beforeRunID > 0 && run.DatabaseID >= beforeRunID { + continue + } + filteredRuns = append(filteredRuns, run) + } + + if len(filteredRuns) != 2 { + t.Errorf("Expected 2 runs before ID 2000 (exclusive), got %d", len(filteredRuns)) + } + if filteredRuns[0].DatabaseID != 1000 || filteredRuns[1].DatabaseID != 1500 { + t.Errorf("Expected runs 1000 and 1500, got %d and %d", filteredRuns[0].DatabaseID, filteredRuns[1].DatabaseID) + } + + // Test after-run-id filter (exclusive) + filteredRuns = nil + afterRunID := int64(2000) + for _, run := range testRuns { + if afterRunID > 0 && run.DatabaseID <= afterRunID { + continue + } + filteredRuns = append(filteredRuns, run) + } + + if len(filteredRuns) != 2 { + t.Errorf("Expected 2 runs after ID 2000 (exclusive), got %d", len(filteredRuns)) + } + if filteredRuns[0].DatabaseID != 2500 || filteredRuns[1].DatabaseID != 3000 { + t.Errorf("Expected runs 2500 and 3000, got %d and %d", filteredRuns[0].DatabaseID, filteredRuns[1].DatabaseID) + } + + // Test range filter (both before and after) + filteredRuns = nil + beforeRunID = int64(2500) + afterRunID = int64(1000) + for _, run := range testRuns { + // Apply before-run-id filter (exclusive) + if beforeRunID > 0 && run.DatabaseID >= beforeRunID { + continue + } + // Apply after-run-id filter (exclusive) + if afterRunID > 0 && run.DatabaseID <= afterRunID { + continue + } + filteredRuns = append(filteredRuns, run) + } + + if len(filteredRuns) != 2 { + t.Errorf("Expected 2 runs in range (1000, 2500), got %d", len(filteredRuns)) + } + if filteredRuns[0].DatabaseID != 1500 || filteredRuns[1].DatabaseID != 2000 { + t.Errorf("Expected runs 1500 and 2000, got %d and %d", filteredRuns[0].DatabaseID, filteredRuns[1].DatabaseID) + } +} + +func TestRefFilteringWithGitHubCLI(t *testing.T) { + // Test that ref filtering is properly added to GitHub CLI args + // This is a unit test for the args construction, not a network test + + // Simulate args construction for ref filtering + args := []string{"run", "list", "--json", "databaseId,number,url,status,conclusion,workflowName,createdAt,startedAt,updatedAt,event,headBranch,headSha,displayTitle"} + + ref := "feature-branch" + if ref != "" { + args = append(args, "--branch", ref) + } + + // Verify that the ref filter was added correctly (uses --branch flag under the hood) + found := false + for i, arg := range args { + if arg == "--branch" && i+1 < len(args) && args[i+1] == "feature-branch" { + found = true + break + } + } + + if !found { + t.Errorf("Expected ref filter '--branch feature-branch' not found in args: %v", args) + } +} + +func TestFindAgentLogFile(t *testing.T) { + // Create a temporary directory structure for testing + tmpDir := testutil.TempDir(t, "test-*") + + // Test 1: Copilot engine with agent_output directory + t.Run("Copilot engine uses agent_output", func(t *testing.T) { + copilotEngine := workflow.NewCopilotEngine() + + // Create agent_output directory with a log file + agentOutputDir := filepath.Join(tmpDir, "copilot_test", "agent_output") + err := os.MkdirAll(agentOutputDir, 0755) + if err != nil { + t.Fatalf("Failed to create agent_output directory: %v", err) + } + + logFile := filepath.Join(agentOutputDir, "debug.log") + err = os.WriteFile(logFile, []byte("test log content"), 0644) + if err != nil { + t.Fatalf("Failed to create log file: %v", err) + } + + // Create agent-stdio.log as well (should be ignored for Copilot) + stdioLog := filepath.Join(tmpDir, "copilot_test", "agent-stdio.log") + err = os.WriteFile(stdioLog, []byte("stdio content"), 0644) + if err != nil { + t.Fatalf("Failed to create agent-stdio.log: %v", err) + } + + // Test findAgentLogFile + found, ok := findAgentLogFile(filepath.Join(tmpDir, "copilot_test"), copilotEngine) + if !ok { + t.Errorf("Expected to find agent log file for Copilot engine") + } + + // Should find the file in agent_output directory + if !strings.Contains(found, "agent_output") { + t.Errorf("Expected to find file in agent_output directory, got: %s", found) + } + }) + + // Test Copilot engine with flattened agent_outputs artifact + // After flattening, session logs are at sandbox/agent/logs/ in the root + t.Run("copilot_engine_flattened_location", func(t *testing.T) { + copilotDir := filepath.Join(tmpDir, "copilot_flattened_test") + err := os.MkdirAll(copilotDir, 0755) + if err != nil { + t.Fatalf("Failed to create test directory: %v", err) + } + + // Create flattened session logs directory (after flattenAgentOutputsArtifact) + sessionLogsDir := filepath.Join(copilotDir, "sandbox", "agent", "logs") + err = os.MkdirAll(sessionLogsDir, 0755) + if err != nil { + t.Fatalf("Failed to create flattened session logs directory: %v", err) + } + + // Create a test session log file + sessionLog := filepath.Join(sessionLogsDir, "session-test-123.log") + err = os.WriteFile(sessionLog, []byte("test session log content"), 0644) + if err != nil { + t.Fatalf("Failed to create session log file: %v", err) + } + + copilotEngine := workflow.NewCopilotEngine() + + // Test findAgentLogFile - should find the session log in flattened location + found, ok := findAgentLogFile(copilotDir, copilotEngine) + if !ok { + t.Errorf("Expected to find agent log file for Copilot engine in flattened location") + } + + // Should find the session log file + if !strings.HasSuffix(found, "session-test-123.log") { + t.Errorf("Expected to find session-test-123.log, but found %s", found) + } + + // Verify the path is correct + expectedPath := filepath.Join(copilotDir, "sandbox", "agent", "logs", "session-test-123.log") + if found != expectedPath { + t.Errorf("Expected path %s, got %s", expectedPath, found) + } + }) + + // Test Copilot engine with session log directly in run directory (recursive search) + // This handles cases where artifact structure differs from expected + t.Run("copilot_engine_recursive_search", func(t *testing.T) { + copilotDir := filepath.Join(tmpDir, "copilot_recursive_test") + err := os.MkdirAll(copilotDir, 0755) + if err != nil { + t.Fatalf("Failed to create test directory: %v", err) + } + + // Create session log directly in the run directory + // This simulates the case where the artifact was flattened differently + sessionLog := filepath.Join(copilotDir, "session-direct-456.log") + err = os.WriteFile(sessionLog, []byte("test session log content"), 0644) + if err != nil { + t.Fatalf("Failed to create session log file: %v", err) + } + + copilotEngine := workflow.NewCopilotEngine() + + // Test findAgentLogFile - should find via recursive search + found, ok := findAgentLogFile(copilotDir, copilotEngine) + if !ok { + t.Errorf("Expected to find agent log file via recursive search") + } + + // Should find the session log file + if !strings.HasSuffix(found, "session-direct-456.log") { + t.Errorf("Expected to find session-direct-456.log, but found %s", found) + } + + // Verify the path is correct + if found != sessionLog { + t.Errorf("Expected path %s, got %s", sessionLog, found) + } + }) + + // Test Copilot engine with process log (new naming convention) + // Copilot changed from session-*.log to process-*.log + t.Run("copilot_engine_process_log", func(t *testing.T) { + copilotDir := filepath.Join(tmpDir, "copilot_process_test") + err := os.MkdirAll(copilotDir, 0755) + if err != nil { + t.Fatalf("Failed to create test directory: %v", err) + } + + // Create process log directly in the run directory + // This simulates the new naming convention for Copilot logs + processLog := filepath.Join(copilotDir, "process-12345.log") + err = os.WriteFile(processLog, []byte("test process log content"), 0644) + if err != nil { + t.Fatalf("Failed to create process log file: %v", err) + } + + copilotEngine := workflow.NewCopilotEngine() + + // Test findAgentLogFile - should find via recursive search + found, ok := findAgentLogFile(copilotDir, copilotEngine) + if !ok { + t.Errorf("Expected to find agent log file via recursive search") + } + + // Should find the process log file + if !strings.HasSuffix(found, "process-12345.log") { + t.Errorf("Expected to find process-12345.log, but found %s", found) + } + + // Verify the path is correct + if found != processLog { + t.Errorf("Expected path %s, got %s", processLog, found) + } + }) + + // Test Copilot engine with process log in nested directory + t.Run("copilot_engine_process_log_nested", func(t *testing.T) { + copilotDir := filepath.Join(tmpDir, "copilot_process_nested_test") + err := os.MkdirAll(copilotDir, 0755) + if err != nil { + t.Fatalf("Failed to create test directory: %v", err) + } + + // Create nested directory structure + processLogsDir := filepath.Join(copilotDir, "sandbox", "agent", "logs") + err = os.MkdirAll(processLogsDir, 0755) + if err != nil { + t.Fatalf("Failed to create process logs directory: %v", err) + } + + // Create a test process log file + processLog := filepath.Join(processLogsDir, "process-test-789.log") + err = os.WriteFile(processLog, []byte("test process log content"), 0644) + if err != nil { + t.Fatalf("Failed to create process log file: %v", err) + } + + copilotEngine := workflow.NewCopilotEngine() + + // Test findAgentLogFile - should find the process log in nested location + found, ok := findAgentLogFile(copilotDir, copilotEngine) + if !ok { + t.Errorf("Expected to find agent log file for Copilot engine in nested location") + } + + // Should find the process log file + if !strings.HasSuffix(found, "process-test-789.log") { + t.Errorf("Expected to find process-test-789.log, but found %s", found) + } + + // Verify the path is correct + expectedPath := filepath.Join(copilotDir, "sandbox", "agent", "logs", "process-test-789.log") + if found != expectedPath { + t.Errorf("Expected path %s, got %s", expectedPath, found) + } + }) + + // Test 2: Claude engine with agent-stdio.log + t.Run("Claude engine uses agent-stdio.log", func(t *testing.T) { + claudeEngine := workflow.NewClaudeEngine() + + // Create only agent-stdio.log + claudeDir := filepath.Join(tmpDir, "claude_test") + err := os.MkdirAll(claudeDir, 0755) + if err != nil { + t.Fatalf("Failed to create claude test directory: %v", err) + } + + stdioLog := filepath.Join(claudeDir, "agent-stdio.log") + err = os.WriteFile(stdioLog, []byte("stdio content"), 0644) + if err != nil { + t.Fatalf("Failed to create agent-stdio.log: %v", err) + } + + // Test findAgentLogFile + found, ok := findAgentLogFile(claudeDir, claudeEngine) + if !ok { + t.Errorf("Expected to find agent log file for Claude engine") + } + + // Should find agent-stdio.log + if !strings.Contains(found, "agent-stdio.log") { + t.Errorf("Expected to find agent-stdio.log, got: %s", found) + } + }) + + // Test 3: No logs found + t.Run("No logs found returns false", func(t *testing.T) { + emptyDir := filepath.Join(tmpDir, "empty_test") + err := os.MkdirAll(emptyDir, 0755) + if err != nil { + t.Fatalf("Failed to create empty test directory: %v", err) + } + + claudeEngine := workflow.NewClaudeEngine() + _, ok := findAgentLogFile(emptyDir, claudeEngine) + if ok { + t.Errorf("Expected to not find agent log file in empty directory") + } + }) + + // Test 4: Codex engine with agent-stdio.log + t.Run("Codex engine uses agent-stdio.log", func(t *testing.T) { + codexEngine := workflow.NewCodexEngine() + + // Create only agent-stdio.log + codexDir := filepath.Join(tmpDir, "codex_test") + err := os.MkdirAll(codexDir, 0755) + if err != nil { + t.Fatalf("Failed to create codex test directory: %v", err) + } + + stdioLog := filepath.Join(codexDir, "agent-stdio.log") + err = os.WriteFile(stdioLog, []byte("stdio content"), 0644) + if err != nil { + t.Fatalf("Failed to create agent-stdio.log: %v", err) + } + + // Test findAgentLogFile + found, ok := findAgentLogFile(codexDir, codexEngine) + if !ok { + t.Errorf("Expected to find agent log file for Codex engine") + } + + // Should find agent-stdio.log + if !strings.Contains(found, "agent-stdio.log") { + t.Errorf("Expected to find agent-stdio.log, got: %s", found) + } + }) +} diff --git a/pkg/cli/logs_formatting_test.go b/pkg/cli/logs_formatting_test.go new file mode 100644 index 0000000000..64e0438935 --- /dev/null +++ b/pkg/cli/logs_formatting_test.go @@ -0,0 +1,68 @@ +package cli + +import ( + "testing" + + "github.com/githubnext/gh-aw/pkg/console" +) + +func TestFormatNumber(t *testing.T) { + tests := []struct { + input int + expected string + }{ + {0, "0"}, + {5, "5"}, + {42, "42"}, + {999, "999"}, + {1000, "1.00k"}, + {1200, "1.20k"}, + {1234, "1.23k"}, + {12000, "12.0k"}, + {12300, "12.3k"}, + {123000, "123k"}, + {999999, "1000k"}, + {1000000, "1.00M"}, + {1200000, "1.20M"}, + {1234567, "1.23M"}, + {12000000, "12.0M"}, + {12300000, "12.3M"}, + {123000000, "123M"}, + {999999999, "1000M"}, + {1000000000, "1.00B"}, + {1200000000, "1.20B"}, + {1234567890, "1.23B"}, + {12000000000, "12.0B"}, + {123000000000, "123B"}, + } + + for _, test := range tests { + result := console.FormatNumber(test.input) + if result != test.expected { + t.Errorf("console.FormatNumber(%d) = %s, expected %s", test.input, result, test.expected) + } + } +} + +func TestFormatFileSize(t *testing.T) { + tests := []struct { + size int64 + expected string + }{ + {0, "0 B"}, + {100, "100 B"}, + {1024, "1.0 KB"}, + {1536, "1.5 KB"}, // 1.5 * 1024 + {1048576, "1.0 MB"}, // 1024 * 1024 + {2097152, "2.0 MB"}, // 2 * 1024 * 1024 + {1073741824, "1.0 GB"}, // 1024^3 + {1099511627776, "1.0 TB"}, // 1024^4 + } + + for _, tt := range tests { + result := console.FormatFileSize(tt.size) + if result != tt.expected { + t.Errorf("console.FormatFileSize(%d) = %q, expected %q", tt.size, result, tt.expected) + } + } +} diff --git a/pkg/cli/logs_parsing_test.go b/pkg/cli/logs_parsing_test.go new file mode 100644 index 0000000000..031074ecba --- /dev/null +++ b/pkg/cli/logs_parsing_test.go @@ -0,0 +1,760 @@ +package cli + +import ( + "os" + "path/filepath" + "testing" + + "github.com/githubnext/gh-aw/pkg/testutil" + "github.com/githubnext/gh-aw/pkg/workflow" +) + +func TestParseLogFileWithoutAwInfo(t *testing.T) { + // Create a temporary log file + tmpDir := testutil.TempDir(t, "test-*") + logFile := filepath.Join(tmpDir, "test.log") + + logContent := `2024-01-15T10:30:00Z Starting workflow execution +2024-01-15T10:30:15Z Claude API request initiated +2024-01-15T10:30:45Z Input tokens: 1250 +2024-01-15T10:30:45Z Output tokens: 850 +2024-01-15T10:30:45Z Total tokens used: 2100 +2024-01-15T10:30:45Z Cost: $0.025 +2024-01-15T10:31:30Z Workflow completed successfully` + + err := os.WriteFile(logFile, []byte(logContent), 0644) + if err != nil { + t.Fatalf("Failed to create test log file: %v", err) + } + + // Test parseLogFileWithEngine without an engine (simulates no aw_info.json) + metrics, err := parseLogFileWithEngine(logFile, nil, false, false) + if err != nil { + t.Fatalf("parseLogFileWithEngine failed: %v", err) + } + + // Without aw_info.json, should return empty metrics + if metrics.TokenUsage != 0 { + t.Errorf("Expected token usage 0 (no aw_info.json), got %d", metrics.TokenUsage) + } + + // Check cost - should be 0 without engine-specific parsing + if metrics.EstimatedCost != 0 { + t.Errorf("Expected cost 0 (no aw_info.json), got %f", metrics.EstimatedCost) + } + + // Duration is no longer extracted from logs - using GitHub API timestamps instead +} + +func TestExtractJSONMetrics(t *testing.T) { + tests := []struct { + name string + line string + expectedTokens int + expectedCost float64 + }{ + { + name: "Claude streaming format with usage", + line: `{"type": "message_delta", "delta": {"usage": {"input_tokens": 123, "output_tokens": 456}}}`, + expectedTokens: 579, // 123 + 456 + }, + { + name: "Simple token count (timestamp ignored)", + line: `{"tokens": 1234, "timestamp": "2024-01-15T10:30:00Z"}`, + expectedTokens: 1234, + }, + { + name: "Cost information", + line: `{"cost": 0.045, "price": 0.01}`, + expectedCost: 0.045, // Should pick up the first one found + }, + { + name: "Usage object with cost", + line: `{"usage": {"total_tokens": 999}, "billing": {"cost": 0.123}}`, + expectedTokens: 999, + expectedCost: 0.123, + }, + { + name: "Claude result format with total_cost_usd", + line: `{"type": "result", "total_cost_usd": 0.8606770999999999, "usage": {"input_tokens": 126, "output_tokens": 7685}}`, + expectedTokens: 7811, // 126 + 7685 + expectedCost: 0.8606770999999999, + }, + { + name: "Claude result format with cache tokens", + line: `{"type": "result", "total_cost_usd": 0.86, "usage": {"input_tokens": 126, "cache_creation_input_tokens": 100034, "cache_read_input_tokens": 1232098, "output_tokens": 7685}}`, + expectedTokens: 1339943, // 126 + 100034 + 1232098 + 7685 + expectedCost: 0.86, + }, + { + name: "Not JSON", + line: "regular log line with tokens: 123", + expectedTokens: 0, // Should return zero since it's not JSON + }, + { + name: "Invalid JSON", + line: `{"invalid": json}`, + expectedTokens: 0, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + metrics := extractJSONMetrics(tt.line, false) + + if metrics.TokenUsage != tt.expectedTokens { + t.Errorf("Expected tokens %d, got %d", tt.expectedTokens, metrics.TokenUsage) + } + + if metrics.EstimatedCost != tt.expectedCost { + t.Errorf("Expected cost %f, got %f", tt.expectedCost, metrics.EstimatedCost) + } + }) + } +} + +func TestParseLogFileWithJSON(t *testing.T) { + // Create a temporary log file with mixed JSON and text format + tmpDir := testutil.TempDir(t, "test-*") + logFile := filepath.Join(tmpDir, "test-mixed.log") + + logContent := `2024-01-15T10:30:00Z Starting workflow execution +{"type": "message_start"} +{"type": "content_block_delta", "delta": {"type": "text", "text": "Hello"}} +{"type": "message_delta", "delta": {"usage": {"input_tokens": 150, "output_tokens": 200}}} +Regular log line: tokens: 1000 +{"cost": 0.035} +2024-01-15T10:31:30Z Workflow completed successfully` + + err := os.WriteFile(logFile, []byte(logContent), 0644) + if err != nil { + t.Fatalf("Failed to create test log file: %v", err) + } + + metrics, err := parseLogFileWithEngine(logFile, nil, false, false) + if err != nil { + t.Fatalf("parseLogFileWithEngine failed: %v", err) + } + + // Without aw_info.json and specific engine, should return empty metrics + if metrics.TokenUsage != 0 { + t.Errorf("Expected token usage 0 (no aw_info.json), got %d", metrics.TokenUsage) + } + + // Should have no cost without engine-specific parsing + if metrics.EstimatedCost != 0 { + t.Errorf("Expected cost 0 (no aw_info.json), got %f", metrics.EstimatedCost) + } + + // Duration is no longer extracted from logs - using GitHub API timestamps instead +} + +func TestConvertToInt(t *testing.T) { + tests := []struct { + value any + expected int + }{ + {123, 123}, + {int64(456), 456}, + {789.0, 789}, + {"123", 123}, + {"invalid", 0}, + {nil, 0}, + } + + for _, tt := range tests { + result := workflow.ConvertToInt(tt.value) + if result != tt.expected { + t.Errorf("ConvertToInt(%v) = %d, expected %d", tt.value, result, tt.expected) + } + } +} + +func TestConvertToFloat(t *testing.T) { + tests := []struct { + value any + expected float64 + }{ + {123.45, 123.45}, + {123, 123.0}, + {int64(456), 456.0}, + {"123.45", 123.45}, + {"invalid", 0.0}, + {nil, 0.0}, + } + + for _, tt := range tests { + result := workflow.ConvertToFloat(tt.value) + if result != tt.expected { + t.Errorf("ConvertToFloat(%v) = %f, expected %f", tt.value, result, tt.expected) + } + } +} + +func TestExtractJSONCost(t *testing.T) { + tests := []struct { + name string + data map[string]any + expected float64 + }{ + { + name: "total_cost_usd field", + data: map[string]any{"total_cost_usd": 0.8606770999999999}, + expected: 0.8606770999999999, + }, + { + name: "traditional cost field", + data: map[string]any{"cost": 1.23}, + expected: 1.23, + }, + { + name: "nested billing cost", + data: map[string]any{"billing": map[string]any{"cost": 2.45}}, + expected: 2.45, + }, + { + name: "no cost fields", + data: map[string]any{"tokens": 1000}, + expected: 0, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := workflow.ExtractJSONCost(tt.data) + if result != tt.expected { + t.Errorf("ExtractJSONCost() = %f, expected %f", result, tt.expected) + } + }) + } +} + +func TestParseLogFileWithClaudeResult(t *testing.T) { + // Create a temporary log file with the exact Claude result format from the issue + tmpDir := testutil.TempDir(t, "test-*") + logFile := filepath.Join(tmpDir, "test-claude.log") + + // This is the exact JSON format provided in the issue (compacted to single line) + claudeResultJSON := `{"type": "result", "subtype": "success", "is_error": false, "duration_ms": 145056, "duration_api_ms": 142970, "num_turns": 66, "result": "**Integration test execution complete. All objectives achieved successfully.** 🎯", "session_id": "d0a2839f-3569-42e9-9ccb-70835de4e760", "total_cost_usd": 0.8606770999999999, "usage": {"input_tokens": 126, "cache_creation_input_tokens": 100034, "cache_read_input_tokens": 1232098, "output_tokens": 7685, "server_tool_use": {"web_search_requests": 0}, "service_tier": "standard"}}` + + logContent := `2024-01-15T10:30:00Z Starting Claude workflow execution +Claude processing request... +` + claudeResultJSON + ` +2024-01-15T10:32:30Z Workflow completed successfully` + + err := os.WriteFile(logFile, []byte(logContent), 0644) + if err != nil { + t.Fatalf("Failed to create test log file: %v", err) + } + + // Test with Claude engine to parse Claude-specific logs + claudeEngine := workflow.NewClaudeEngine() + metrics, err := parseLogFileWithEngine(logFile, claudeEngine, false, false) + if err != nil { + t.Fatalf("parseLogFileWithEngine failed: %v", err) + } + + // Check total token usage includes all token types from Claude + expectedTokens := 126 + 100034 + 1232098 + 7685 // input + cache_creation + cache_read + output + if metrics.TokenUsage != expectedTokens { + t.Errorf("Expected token usage %d, got %d", expectedTokens, metrics.TokenUsage) + } + + // Check cost extraction from total_cost_usd + expectedCost := 0.8606770999999999 + if metrics.EstimatedCost != expectedCost { + t.Errorf("Expected cost %f, got %f", expectedCost, metrics.EstimatedCost) + } + + // Check turns extraction from num_turns + expectedTurns := 66 + if metrics.Turns != expectedTurns { + t.Errorf("Expected turns %d, got %d", expectedTurns, metrics.Turns) + } + + // Duration is no longer extracted from logs - using GitHub API timestamps instead +} + +func TestParseLogFileWithCodexFormat(t *testing.T) { + // Create a temporary log file with the Codex output format from the issue + tmpDir := testutil.TempDir(t, "test-*") + logFile := filepath.Join(tmpDir, "test-codex.log") + + // This is the exact Codex format provided in the issue with thinking sections added + logContent := `[2025-08-13T00:24:45] Starting Codex workflow execution +[2025-08-13T00:24:50] thinking +I need to analyze the pull request details first. +[2025-08-13T00:24:50] codex + +I'm ready to generate a Codex PR summary, but I need the pull request number to fetch its details. Could you please share the PR number (and confirm the repo/owner if it isn't ` + "`githubnext/gh-aw`" + `)? +[2025-08-13T00:24:50] thinking +Now I need to wait for the user's response. +[2025-08-13T00:24:50] tokens used: 13934 +[2025-08-13T00:24:55] Workflow completed successfully` + + err := os.WriteFile(logFile, []byte(logContent), 0644) + if err != nil { + t.Fatalf("Failed to create test log file: %v", err) + } + + // Test with Codex engine to parse Codex-specific logs + codexEngine := workflow.NewCodexEngine() + metrics, err := parseLogFileWithEngine(logFile, codexEngine, false, false) + if err != nil { + t.Fatalf("parseLogFileWithEngine failed: %v", err) + } + + // Check token usage extraction from Codex format + expectedTokens := 13934 + if metrics.TokenUsage != expectedTokens { + t.Errorf("Expected token usage %d, got %d", expectedTokens, metrics.TokenUsage) + } + + // Check turns extraction from thinking sections + expectedTurns := 2 // Two thinking sections in the test data + if metrics.Turns != expectedTurns { + t.Errorf("Expected turns %d, got %d", expectedTurns, metrics.Turns) + } + + // Duration is no longer extracted from logs - using GitHub API timestamps instead +} + +func TestParseLogFileWithCodexTokenSumming(t *testing.T) { + // Create a temporary log file with multiple Codex token entries + tmpDir := testutil.TempDir(t, "test-*") + logFile := filepath.Join(tmpDir, "test-codex-tokens.log") + + // Simulate the exact Codex format from the issue + logContent := ` ] +} +[2025-08-13T04:38:03] tokens used: 32169 +[2025-08-13T04:38:06] codex +I've posted the PR summary comment with analysis and recommendations. Let me know if you'd like to adjust any details or add further insights! +[2025-08-13T04:38:06] tokens used: 28828 +[2025-08-13T04:38:10] Processing complete +[2025-08-13T04:38:15] tokens used: 5000` + + err := os.WriteFile(logFile, []byte(logContent), 0644) + if err != nil { + t.Fatalf("Failed to create test log file: %v", err) + } + + // Test with Codex engine + codexEngine := workflow.NewCodexEngine() + metrics, err := parseLogFileWithEngine(logFile, codexEngine, false, false) + if err != nil { + t.Fatalf("parseLogFileWithEngine failed: %v", err) + } + + // Check that all token entries are summed + expectedTokens := 32169 + 28828 + 5000 // 65997 + if metrics.TokenUsage != expectedTokens { + t.Errorf("Expected summed token usage %d, got %d", expectedTokens, metrics.TokenUsage) + } +} + +func TestParseLogFileWithCodexRustFormat(t *testing.T) { + // Create a temporary log file with the new Rust-based Codex format + tmpDir := testutil.TempDir(t, "test-*") + logFile := filepath.Join(tmpDir, "test-codex-rust.log") + + // This simulates the new Rust format from the Codex engine + logContent := `2025-01-15T10:30:00.123456Z INFO codex: Starting codex execution +2025-01-15T10:30:00.234567Z DEBUG codex_core: Initializing MCP servers +2025-01-15T10:30:01.123456Z INFO codex: Session initialized +thinking +Let me fetch the list of pull requests first to see what we're working with. +2025-01-15T10:30:02.123456Z DEBUG codex_exec: Executing tool call +tool github.list_pull_requests({"state": "closed", "per_page": 5}) +2025-01-15T10:30:03.456789Z DEBUG codex_core: Tool execution started +2025-01-15T10:30:04.567890Z INFO codex: github.list_pull_requests(...) success in 2.1s +thinking +Now I need to get details for each PR to write a comprehensive summary. +2025-01-15T10:30:05.123456Z DEBUG codex_exec: Executing tool call +tool github.get_pull_request({"pull_number": 123}) +2025-01-15T10:30:06.234567Z INFO codex: github.get_pull_request(...) success in 0.8s +2025-01-15T10:30:07.345678Z DEBUG codex_core: Processing response +thinking +I have all the information I need. Let me create a summary issue. +2025-01-15T10:30:08.456789Z DEBUG codex_exec: Executing tool call +tool safe_outputs.create_issue({"title": "PR Summary", "body": "..."}) +2025-01-15T10:30:09.567890Z INFO codex: safe_outputs.create_issue(...) success in 1.2s +2025-01-15T10:30:10.123456Z DEBUG codex_core: Workflow completing +tokens used: 15234 +2025-01-15T10:30:10.234567Z INFO codex: Execution complete` + + err := os.WriteFile(logFile, []byte(logContent), 0644) + if err != nil { + t.Fatalf("Failed to create test log file: %v", err) + } + + // Test with Codex engine to parse new Rust format + codexEngine := workflow.NewCodexEngine() + metrics, err := parseLogFileWithEngine(logFile, codexEngine, false, false) + if err != nil { + t.Fatalf("parseLogFileWithEngine failed: %v", err) + } + + // Check token usage extraction from Rust format + expectedTokens := 15234 + if metrics.TokenUsage != expectedTokens { + t.Errorf("Expected token usage %d, got %d", expectedTokens, metrics.TokenUsage) + } + + // Check turns extraction from thinking sections (new Rust format uses standalone "thinking" lines) + expectedTurns := 3 // Three thinking sections in the test data + if metrics.Turns != expectedTurns { + t.Errorf("Expected turns %d, got %d", expectedTurns, metrics.Turns) + } + + // Check tool calls extraction from new Rust format + expectedToolCount := 3 + if len(metrics.ToolCalls) != expectedToolCount { + t.Errorf("Expected %d tool calls, got %d", expectedToolCount, len(metrics.ToolCalls)) + } + + // Verify the specific tools were detected + toolNames := make(map[string]bool) + for _, tool := range metrics.ToolCalls { + toolNames[tool.Name] = true + } + + expectedTools := []string{"github_list_pull_requests", "github_get_pull_request", "safe_outputs_create_issue"} + for _, expectedTool := range expectedTools { + if !toolNames[expectedTool] { + t.Errorf("Expected tool %s not found in tool calls", expectedTool) + } + } +} + +func TestParseLogFileWithCodexMixedFormats(t *testing.T) { + // Create a temporary log file with mixed old TypeScript and new Rust formats + tmpDir := testutil.TempDir(t, "test-*") + logFile := filepath.Join(tmpDir, "test-codex-mixed.log") + + // Mix both formats to test backward compatibility + logContent := `[2025-08-13T00:24:45] Starting Codex workflow execution +[2025-08-13T00:24:50] thinking +Old format thinking section +[2025-08-13T00:24:50] tool github.list_repos({"org": "test"}) +[2025-08-13T00:24:51] codex +Response from old format +2025-01-15T10:30:00.123456Z INFO codex: Starting execution +thinking +New Rust format thinking section +tool github.create_issue({"title": "Test", "body": "Body"}) +2025-01-15T10:30:05.567890Z INFO codex: github.create_issue(...) success in 1.2s +[2025-08-13T00:24:52] tokens used: 5000 +tokens used: 10000 +2025-01-15T10:30:10.234567Z INFO codex: Execution complete` + + err := os.WriteFile(logFile, []byte(logContent), 0644) + if err != nil { + t.Fatalf("Failed to create test log file: %v", err) + } + + // Test with Codex engine to parse mixed formats + codexEngine := workflow.NewCodexEngine() + metrics, err := parseLogFileWithEngine(logFile, codexEngine, false, false) + if err != nil { + t.Fatalf("parseLogFileWithEngine failed: %v", err) + } + + // Check token usage is summed from both formats + expectedTokens := 15000 // 5000 + 10000 + if metrics.TokenUsage != expectedTokens { + t.Errorf("Expected token usage %d (summed from both formats), got %d", expectedTokens, metrics.TokenUsage) + } + + // Check turns from both formats + expectedTurns := 2 // One from old format, one from new format + if metrics.Turns != expectedTurns { + t.Errorf("Expected turns %d (from both formats), got %d", expectedTurns, metrics.Turns) + } + + // Check tool calls from both formats + expectedToolCount := 2 + if len(metrics.ToolCalls) != expectedToolCount { + t.Errorf("Expected %d tool calls, got %d", expectedToolCount, len(metrics.ToolCalls)) + } + + // Verify the specific tools were detected from both formats + toolNames := make(map[string]bool) + for _, tool := range metrics.ToolCalls { + toolNames[tool.Name] = true + } + + expectedTools := []string{"github_list_repos", "github_create_issue"} + for _, expectedTool := range expectedTools { + if !toolNames[expectedTool] { + t.Errorf("Expected tool %s not found in tool calls", expectedTool) + } + } +} + +func TestParseLogFileWithMixedTokenFormats(t *testing.T) { + // Create a temporary log file with mixed token formats + tmpDir := testutil.TempDir(t, "test-*") + logFile := filepath.Join(tmpDir, "test-mixed-tokens.log") + + // Mix of Codex and non-Codex formats - should prioritize Codex summing + logContent := `[2025-08-13T04:38:03] tokens used: 1000 +tokens: 5000 +[2025-08-13T04:38:06] tokens used: 2000 +token_count: 10000` + + err := os.WriteFile(logFile, []byte(logContent), 0644) + if err != nil { + t.Fatalf("Failed to create test log file: %v", err) + } + + // Get the Codex engine for testing + registry := workflow.NewEngineRegistry() + codexEngine, err := registry.GetEngine("codex") + if err != nil { + t.Fatalf("Failed to get Codex engine: %v", err) + } + + metrics, err := parseLogFileWithEngine(logFile, codexEngine, false, false) + if err != nil { + t.Fatalf("parseLogFile failed: %v", err) + } + + // Should sum Codex entries: 1000 + 2000 = 3000 (ignoring non-Codex formats) + expectedTokens := 1000 + 2000 + if metrics.TokenUsage != expectedTokens { + t.Errorf("Expected token usage %d (sum of Codex entries), got %d", expectedTokens, metrics.TokenUsage) + } +} + +func TestExtractEngineFromAwInfoNestedDirectory(t *testing.T) { + tmpDir := testutil.TempDir(t, "test-*") + + // Test Case 1: aw_info.json as a regular file + awInfoFile := filepath.Join(tmpDir, "aw_info.json") + awInfoContent := `{ + "engine_id": "claude", + "engine_name": "Claude", + "model": "claude-3-sonnet", + "version": "20240620", + "workflow_name": "Test Claude", + "experimental": false, + "supports_tools_allowlist": true, + "supports_http_transport": false, + "run_id": 123456789, + "run_number": 42, + "run_attempt": "1", + "repository": "githubnext/gh-aw", + "ref": "refs/heads/main", + "sha": "abc123", + "actor": "testuser", + "event_name": "workflow_dispatch", + "created_at": "2025-08-13T13:36:39.704Z" + }` + + err := os.WriteFile(awInfoFile, []byte(awInfoContent), 0644) + if err != nil { + t.Fatalf("Failed to create aw_info.json file: %v", err) + } + + // Test regular file extraction + engine := extractEngineFromAwInfo(awInfoFile, true) + if engine == nil { + t.Errorf("Expected to extract engine from regular aw_info.json file, got nil") + } else if engine.GetID() != "claude" { + t.Errorf("Expected engine ID 'claude', got '%s'", engine.GetID()) + } + + // Clean up for next test + os.Remove(awInfoFile) + + // Test Case 2: aw_info.json as a directory containing the actual file + awInfoDir := filepath.Join(tmpDir, "aw_info.json") + err = os.Mkdir(awInfoDir, 0755) + if err != nil { + t.Fatalf("Failed to create aw_info.json directory: %v", err) + } + + // Create the nested aw_info.json file inside the directory + nestedAwInfoFile := filepath.Join(awInfoDir, "aw_info.json") + awInfoContentCodex := `{ + "engine_id": "codex", + "engine_name": "Codex", + "model": "o4-mini", + "version": "", + "workflow_name": "Test Codex", + "experimental": true, + "supports_tools_allowlist": true, + "supports_http_transport": false, + "run_id": 987654321, + "run_number": 7, + "run_attempt": "1", + "repository": "githubnext/gh-aw", + "ref": "refs/heads/copilot/fix-24", + "sha": "def456", + "actor": "testuser2", + "event_name": "workflow_dispatch", + "created_at": "2025-08-13T13:36:39.704Z" + }` + + err = os.WriteFile(nestedAwInfoFile, []byte(awInfoContentCodex), 0644) + if err != nil { + t.Fatalf("Failed to create nested aw_info.json file: %v", err) + } + + // Test directory-based extraction (the main fix) + engine = extractEngineFromAwInfo(awInfoDir, true) + if engine == nil { + t.Errorf("Expected to extract engine from aw_info.json directory, got nil") + } else if engine.GetID() != "codex" { + t.Errorf("Expected engine ID 'codex', got '%s'", engine.GetID()) + } + + // Test Case 3: Non-existent aw_info.json should return nil + nonExistentPath := filepath.Join(tmpDir, "nonexistent", "aw_info.json") + engine = extractEngineFromAwInfo(nonExistentPath, false) + if engine != nil { + t.Errorf("Expected nil for non-existent aw_info.json, got engine: %s", engine.GetID()) + } + + // Test Case 4: Directory without nested aw_info.json should return nil + emptyDir := filepath.Join(tmpDir, "empty_aw_info.json") + err = os.Mkdir(emptyDir, 0755) + if err != nil { + t.Fatalf("Failed to create empty directory: %v", err) + } + + engine = extractEngineFromAwInfo(emptyDir, false) + if engine != nil { + t.Errorf("Expected nil for directory without nested aw_info.json, got engine: %s", engine.GetID()) + } + + // Test Case 5: Invalid JSON should return nil + invalidAwInfoFile := filepath.Join(tmpDir, "invalid_aw_info.json") + invalidContent := `{invalid json content` + err = os.WriteFile(invalidAwInfoFile, []byte(invalidContent), 0644) + if err != nil { + t.Fatalf("Failed to create invalid aw_info.json file: %v", err) + } + + engine = extractEngineFromAwInfo(invalidAwInfoFile, false) + if engine != nil { + t.Errorf("Expected nil for invalid JSON aw_info.json, got engine: %s", engine.GetID()) + } + + // Test Case 6: Missing engine_id should return nil + missingEngineIDFile := filepath.Join(tmpDir, "missing_engine_id_aw_info.json") + missingEngineIDContent := `{ + "workflow_name": "Test Workflow", + "run_id": 123456789 + }` + err = os.WriteFile(missingEngineIDFile, []byte(missingEngineIDContent), 0644) + if err != nil { + t.Fatalf("Failed to create aw_info.json file without engine_id: %v", err) + } + + engine = extractEngineFromAwInfo(missingEngineIDFile, false) + if engine != nil { + t.Errorf("Expected nil for aw_info.json without engine_id, got engine: %s", engine.GetID()) + } +} + +func TestParseLogFileWithNonCodexTokensOnly(t *testing.T) { + // Create a temporary log file with only non-Codex token formats + tmpDir := testutil.TempDir(t, "test-*") + logFile := filepath.Join(tmpDir, "test-generic-tokens.log") + + // Only non-Codex formats - should keep maximum behavior + logContent := `tokens: 5000 +token_count: 10000 +input_tokens: 2000` + + err := os.WriteFile(logFile, []byte(logContent), 0644) + if err != nil { + t.Fatalf("Failed to create test log file: %v", err) + } + + // Without aw_info.json and specific engine, should return empty metrics + metrics, err := parseLogFileWithEngine(logFile, nil, false, false) + if err != nil { + t.Fatalf("parseLogFileWithEngine failed: %v", err) + } + + // Without engine-specific parsing, should return 0 + if metrics.TokenUsage != 0 { + t.Errorf("Expected token usage 0 (no aw_info.json), got %d", metrics.TokenUsage) + } +} + +func TestExtractLogMetricsWithAwOutputFile(t *testing.T) { + // Create a temporary directory structure + tmpDir := testutil.TempDir(t, "test-*") + logsDir := filepath.Join(tmpDir, "run-123456") + err := os.Mkdir(logsDir, 0755) + if err != nil { + t.Fatalf("Failed to create logs directory: %v", err) + } + + // Create aw-output.json file + awOutputFile := filepath.Join(logsDir, "aw-output.json") + awOutputContent := `{ + "status": "success", + "duration_ms": 45000, + "token_usage": 5432, + "estimated_cost": 0.123, + "turns": 8 + }` + err = os.WriteFile(awOutputFile, []byte(awOutputContent), 0644) + if err != nil { + t.Fatalf("Failed to create aw-output.json file: %v", err) + } + + // Test metrics extraction from aw-output.json + metrics, err := extractLogMetrics(logsDir, false) + if err != nil { + t.Fatalf("extractLogMetrics failed: %v", err) + } + + // Verify metrics from aw-output.json + if metrics.TokenUsage != 5432 { + t.Errorf("Expected token usage 5432 from aw-output.json, got %d", metrics.TokenUsage) + } + + if metrics.EstimatedCost != 0.123 { + t.Errorf("Expected cost 0.123 from aw-output.json, got %f", metrics.EstimatedCost) + } + + if metrics.Turns != 8 { + t.Errorf("Expected turns 8 from aw-output.json, got %d", metrics.Turns) + } +} + +func TestCustomEngineParseLogMetrics(t *testing.T) { + // Create a temporary log file with custom engine format + tmpDir := testutil.TempDir(t, "test-*") + logFile := filepath.Join(tmpDir, "test-custom.log") + + logContent := `2024-01-15T10:30:00Z Starting custom engine execution +Custom engine processing... +Tokens used: 3456 +Estimated cost: $0.067 +2024-01-15T10:31:30Z Workflow completed` + + err := os.WriteFile(logFile, []byte(logContent), 0644) + if err != nil { + t.Fatalf("Failed to create test log file: %v", err) + } + + // Test with custom engine + customEngine := workflow.NewCustomEngine() + metrics, err := parseLogFileWithEngine(logFile, customEngine, false, false) + if err != nil { + t.Fatalf("parseLogFileWithEngine failed: %v", err) + } + + // Custom engine should extract basic token info + // Note: The actual behavior depends on the custom engine implementation + if metrics.TokenUsage < 0 { + t.Errorf("Expected non-negative token usage, got %d", metrics.TokenUsage) + } +} diff --git a/pkg/cli/logs_test.go b/pkg/cli/logs_test.go deleted file mode 100644 index 3f3fcb9ed4..0000000000 --- a/pkg/cli/logs_test.go +++ /dev/null @@ -1,1803 +0,0 @@ -package cli - -import ( - "archive/zip" - "context" - "errors" - "os" - "path/filepath" - "strings" - "testing" - - "github.com/githubnext/gh-aw/pkg/testutil" - - "github.com/githubnext/gh-aw/pkg/cli/fileutil" - "github.com/githubnext/gh-aw/pkg/console" - "github.com/githubnext/gh-aw/pkg/workflow" -) - -func TestDownloadWorkflowLogs(t *testing.T) { - t.Skip("Skipping slow network-dependent test") - - // Test the DownloadWorkflowLogs function - // This should either fail with auth error (if not authenticated) - // or succeed with no results (if authenticated but no workflows match) - err := DownloadWorkflowLogs(context.Background(), "", 1, "", "", "./test-logs", "", "", 0, 0, "", false, false, false, false, false, false, false, 0, false, "summary.json", "") - - // If GitHub CLI is authenticated, the function may succeed but find no results - // If not authenticated, it should return an auth error - if err != nil { - // If there's an error, it should be an authentication or workflow-related error - errMsg := strings.ToLower(err.Error()) - if !strings.Contains(errMsg, "authentication required") && - !strings.Contains(errMsg, "failed to list workflow runs") && - !strings.Contains(errMsg, "exit status 1") { - t.Errorf("Expected authentication error, workflow listing error, or no error, got: %v", err) - } - } - // If err is nil, that's also acceptable (authenticated case with no results) - - // Clean up - os.RemoveAll("./test-logs") -} - -func TestFormatNumber(t *testing.T) { - tests := []struct { - input int - expected string - }{ - {0, "0"}, - {5, "5"}, - {42, "42"}, - {999, "999"}, - {1000, "1.00k"}, - {1200, "1.20k"}, - {1234, "1.23k"}, - {12000, "12.0k"}, - {12300, "12.3k"}, - {123000, "123k"}, - {999999, "1000k"}, - {1000000, "1.00M"}, - {1200000, "1.20M"}, - {1234567, "1.23M"}, - {12000000, "12.0M"}, - {12300000, "12.3M"}, - {123000000, "123M"}, - {999999999, "1000M"}, - {1000000000, "1.00B"}, - {1200000000, "1.20B"}, - {1234567890, "1.23B"}, - {12000000000, "12.0B"}, - {123000000000, "123B"}, - } - - for _, test := range tests { - result := console.FormatNumber(test.input) - if result != test.expected { - t.Errorf("console.FormatNumber(%d) = %s, expected %s", test.input, result, test.expected) - } - } -} - -func TestParseLogFileWithoutAwInfo(t *testing.T) { - // Create a temporary log file - tmpDir := testutil.TempDir(t, "test-*") - logFile := filepath.Join(tmpDir, "test.log") - - logContent := `2024-01-15T10:30:00Z Starting workflow execution -2024-01-15T10:30:15Z Claude API request initiated -2024-01-15T10:30:45Z Input tokens: 1250 -2024-01-15T10:30:45Z Output tokens: 850 -2024-01-15T10:30:45Z Total tokens used: 2100 -2024-01-15T10:30:45Z Cost: $0.025 -2024-01-15T10:31:30Z Workflow completed successfully` - - err := os.WriteFile(logFile, []byte(logContent), 0644) - if err != nil { - t.Fatalf("Failed to create test log file: %v", err) - } - - // Test parseLogFileWithEngine without an engine (simulates no aw_info.json) - metrics, err := parseLogFileWithEngine(logFile, nil, false, false) - if err != nil { - t.Fatalf("parseLogFileWithEngine failed: %v", err) - } - - // Without aw_info.json, should return empty metrics - if metrics.TokenUsage != 0 { - t.Errorf("Expected token usage 0 (no aw_info.json), got %d", metrics.TokenUsage) - } - - // Check cost - should be 0 without engine-specific parsing - if metrics.EstimatedCost != 0 { - t.Errorf("Expected cost 0 (no aw_info.json), got %f", metrics.EstimatedCost) - } - - // Duration is no longer extracted from logs - using GitHub API timestamps instead -} - -func TestExtractJSONMetrics(t *testing.T) { - tests := []struct { - name string - line string - expectedTokens int - expectedCost float64 - }{ - { - name: "Claude streaming format with usage", - line: `{"type": "message_delta", "delta": {"usage": {"input_tokens": 123, "output_tokens": 456}}}`, - expectedTokens: 579, // 123 + 456 - }, - { - name: "Simple token count (timestamp ignored)", - line: `{"tokens": 1234, "timestamp": "2024-01-15T10:30:00Z"}`, - expectedTokens: 1234, - }, - { - name: "Cost information", - line: `{"cost": 0.045, "price": 0.01}`, - expectedCost: 0.045, // Should pick up the first one found - }, - { - name: "Usage object with cost", - line: `{"usage": {"total_tokens": 999}, "billing": {"cost": 0.123}}`, - expectedTokens: 999, - expectedCost: 0.123, - }, - { - name: "Claude result format with total_cost_usd", - line: `{"type": "result", "total_cost_usd": 0.8606770999999999, "usage": {"input_tokens": 126, "output_tokens": 7685}}`, - expectedTokens: 7811, // 126 + 7685 - expectedCost: 0.8606770999999999, - }, - { - name: "Claude result format with cache tokens", - line: `{"type": "result", "total_cost_usd": 0.86, "usage": {"input_tokens": 126, "cache_creation_input_tokens": 100034, "cache_read_input_tokens": 1232098, "output_tokens": 7685}}`, - expectedTokens: 1339943, // 126 + 100034 + 1232098 + 7685 - expectedCost: 0.86, - }, - { - name: "Not JSON", - line: "regular log line with tokens: 123", - expectedTokens: 0, // Should return zero since it's not JSON - }, - { - name: "Invalid JSON", - line: `{"invalid": json}`, - expectedTokens: 0, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - metrics := extractJSONMetrics(tt.line, false) - - if metrics.TokenUsage != tt.expectedTokens { - t.Errorf("Expected tokens %d, got %d", tt.expectedTokens, metrics.TokenUsage) - } - - if metrics.EstimatedCost != tt.expectedCost { - t.Errorf("Expected cost %f, got %f", tt.expectedCost, metrics.EstimatedCost) - } - }) - } -} - -func TestParseLogFileWithJSON(t *testing.T) { - // Create a temporary log file with mixed JSON and text format - tmpDir := testutil.TempDir(t, "test-*") - logFile := filepath.Join(tmpDir, "test-mixed.log") - - logContent := `2024-01-15T10:30:00Z Starting workflow execution -{"type": "message_start"} -{"type": "content_block_delta", "delta": {"type": "text", "text": "Hello"}} -{"type": "message_delta", "delta": {"usage": {"input_tokens": 150, "output_tokens": 200}}} -Regular log line: tokens: 1000 -{"cost": 0.035} -2024-01-15T10:31:30Z Workflow completed successfully` - - err := os.WriteFile(logFile, []byte(logContent), 0644) - if err != nil { - t.Fatalf("Failed to create test log file: %v", err) - } - - metrics, err := parseLogFileWithEngine(logFile, nil, false, false) - if err != nil { - t.Fatalf("parseLogFileWithEngine failed: %v", err) - } - - // Without aw_info.json and specific engine, should return empty metrics - if metrics.TokenUsage != 0 { - t.Errorf("Expected token usage 0 (no aw_info.json), got %d", metrics.TokenUsage) - } - - // Should have no cost without engine-specific parsing - if metrics.EstimatedCost != 0 { - t.Errorf("Expected cost 0 (no aw_info.json), got %f", metrics.EstimatedCost) - } - - // Duration is no longer extracted from logs - using GitHub API timestamps instead -} - -func TestConvertToInt(t *testing.T) { - tests := []struct { - value any - expected int - }{ - {123, 123}, - {int64(456), 456}, - {789.0, 789}, - {"123", 123}, - {"invalid", 0}, - {nil, 0}, - } - - for _, tt := range tests { - result := workflow.ConvertToInt(tt.value) - if result != tt.expected { - t.Errorf("ConvertToInt(%v) = %d, expected %d", tt.value, result, tt.expected) - } - } -} - -func TestConvertToFloat(t *testing.T) { - tests := []struct { - value any - expected float64 - }{ - {123.45, 123.45}, - {123, 123.0}, - {int64(456), 456.0}, - {"123.45", 123.45}, - {"invalid", 0.0}, - {nil, 0.0}, - } - - for _, tt := range tests { - result := workflow.ConvertToFloat(tt.value) - if result != tt.expected { - t.Errorf("ConvertToFloat(%v) = %f, expected %f", tt.value, result, tt.expected) - } - } -} - -func TestDirExists(t *testing.T) { - tmpDir := testutil.TempDir(t, "test-*") - - // Test existing directory - if !fileutil.DirExists(tmpDir) { - t.Errorf("DirExists should return true for existing directory") - } - - // Test non-existing directory - nonExistentDir := filepath.Join(tmpDir, "does-not-exist") - if fileutil.DirExists(nonExistentDir) { - t.Errorf("DirExists should return false for non-existing directory") - } - - // Test file vs directory - testFile := filepath.Join(tmpDir, "testfile") - err := os.WriteFile(testFile, []byte("test"), 0644) - if err != nil { - t.Fatalf("Failed to create test file: %v", err) - } - - if fileutil.DirExists(testFile) { - t.Errorf("DirExists should return false for a file") - } -} - -func TestIsDirEmpty(t *testing.T) { - tmpDir := testutil.TempDir(t, "test-*") - - // Test empty directory - emptyDir := filepath.Join(tmpDir, "empty") - err := os.Mkdir(emptyDir, 0755) - if err != nil { - t.Fatalf("Failed to create empty directory: %v", err) - } - - if !fileutil.IsDirEmpty(emptyDir) { - t.Errorf("IsDirEmpty should return true for empty directory") - } - - // Test directory with files - nonEmptyDir := filepath.Join(tmpDir, "nonempty") - err = os.Mkdir(nonEmptyDir, 0755) - if err != nil { - t.Fatalf("Failed to create non-empty directory: %v", err) - } - - testFile := filepath.Join(nonEmptyDir, "testfile") - err = os.WriteFile(testFile, []byte("test"), 0644) - if err != nil { - t.Fatalf("Failed to create test file: %v", err) - } - - if fileutil.IsDirEmpty(nonEmptyDir) { - t.Errorf("IsDirEmpty should return false for directory with files") - } - - // Test non-existing directory - nonExistentDir := filepath.Join(tmpDir, "does-not-exist") - if !fileutil.IsDirEmpty(nonExistentDir) { - t.Errorf("IsDirEmpty should return true for non-existing directory") - } -} - -func TestErrNoArtifacts(t *testing.T) { - // Test that ErrNoArtifacts is properly defined and can be used with errors.Is - err := ErrNoArtifacts - if !errors.Is(err, ErrNoArtifacts) { - t.Errorf("errors.Is should return true for ErrNoArtifacts") - } - - // Test wrapping - wrappedErr := errors.New("wrapped: " + ErrNoArtifacts.Error()) - if errors.Is(wrappedErr, ErrNoArtifacts) { - t.Errorf("errors.Is should return false for wrapped error that doesn't use errors.Wrap") - } -} - -func TestListWorkflowRunsWithPagination(t *testing.T) { - // Test that listWorkflowRunsWithPagination properly adds beforeDate filter - // Since we can't easily mock the GitHub CLI, we'll test with known auth issues - - // This should fail with authentication error (if not authenticated) - // or succeed with empty results (if authenticated but no workflows match) - runs, _, err := listWorkflowRunsWithPagination(ListWorkflowRunsOptions{ - WorkflowName: "nonexistent-workflow", - Limit: 5, - BeforeDate: "2024-01-01T00:00:00Z", - ProcessedCount: 0, - TargetCount: 5, - Verbose: false, - }) - - if err != nil { - // If there's an error, it should be an authentication error or workflow not found - if !strings.Contains(err.Error(), "authentication required") && !strings.Contains(err.Error(), "failed to list workflow runs") { - t.Errorf("Expected authentication error or workflow error, got: %v", err) - } - } else { - // If no error, should return empty results for nonexistent workflow - if len(runs) > 0 { - t.Errorf("Expected empty results for nonexistent workflow, got %d runs", len(runs)) - } - } -} - -func TestIterativeAlgorithmConstants(t *testing.T) { - // Test that our constants are reasonable - if MaxIterations <= 0 { - t.Errorf("MaxIterations should be positive, got %d", MaxIterations) - } - if MaxIterations > 20 { - t.Errorf("MaxIterations seems too high (%d), could cause performance issues", MaxIterations) - } - - if BatchSize <= 0 { - t.Errorf("BatchSize should be positive, got %d", BatchSize) - } - if BatchSize > 100 { - t.Errorf("BatchSize seems too high (%d), might hit GitHub API limits", BatchSize) - } -} - -func TestExtractJSONCost(t *testing.T) { - tests := []struct { - name string - data map[string]any - expected float64 - }{ - { - name: "total_cost_usd field", - data: map[string]any{"total_cost_usd": 0.8606770999999999}, - expected: 0.8606770999999999, - }, - { - name: "traditional cost field", - data: map[string]any{"cost": 1.23}, - expected: 1.23, - }, - { - name: "nested billing cost", - data: map[string]any{"billing": map[string]any{"cost": 2.45}}, - expected: 2.45, - }, - { - name: "no cost fields", - data: map[string]any{"tokens": 1000}, - expected: 0, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result := workflow.ExtractJSONCost(tt.data) - if result != tt.expected { - t.Errorf("ExtractJSONCost() = %f, expected %f", result, tt.expected) - } - }) - } -} - -func TestParseLogFileWithClaudeResult(t *testing.T) { - // Create a temporary log file with the exact Claude result format from the issue - tmpDir := testutil.TempDir(t, "test-*") - logFile := filepath.Join(tmpDir, "test-claude.log") - - // This is the exact JSON format provided in the issue (compacted to single line) - claudeResultJSON := `{"type": "result", "subtype": "success", "is_error": false, "duration_ms": 145056, "duration_api_ms": 142970, "num_turns": 66, "result": "**Integration test execution complete. All objectives achieved successfully.** 🎯", "session_id": "d0a2839f-3569-42e9-9ccb-70835de4e760", "total_cost_usd": 0.8606770999999999, "usage": {"input_tokens": 126, "cache_creation_input_tokens": 100034, "cache_read_input_tokens": 1232098, "output_tokens": 7685, "server_tool_use": {"web_search_requests": 0}, "service_tier": "standard"}}` - - logContent := `2024-01-15T10:30:00Z Starting Claude workflow execution -Claude processing request... -` + claudeResultJSON + ` -2024-01-15T10:32:30Z Workflow completed successfully` - - err := os.WriteFile(logFile, []byte(logContent), 0644) - if err != nil { - t.Fatalf("Failed to create test log file: %v", err) - } - - // Test with Claude engine to parse Claude-specific logs - claudeEngine := workflow.NewClaudeEngine() - metrics, err := parseLogFileWithEngine(logFile, claudeEngine, false, false) - if err != nil { - t.Fatalf("parseLogFileWithEngine failed: %v", err) - } - - // Check total token usage includes all token types from Claude - expectedTokens := 126 + 100034 + 1232098 + 7685 // input + cache_creation + cache_read + output - if metrics.TokenUsage != expectedTokens { - t.Errorf("Expected token usage %d, got %d", expectedTokens, metrics.TokenUsage) - } - - // Check cost extraction from total_cost_usd - expectedCost := 0.8606770999999999 - if metrics.EstimatedCost != expectedCost { - t.Errorf("Expected cost %f, got %f", expectedCost, metrics.EstimatedCost) - } - - // Check turns extraction from num_turns - expectedTurns := 66 - if metrics.Turns != expectedTurns { - t.Errorf("Expected turns %d, got %d", expectedTurns, metrics.Turns) - } - - // Duration is no longer extracted from logs - using GitHub API timestamps instead -} - -func TestParseLogFileWithCodexFormat(t *testing.T) { - // Create a temporary log file with the Codex output format from the issue - tmpDir := testutil.TempDir(t, "test-*") - logFile := filepath.Join(tmpDir, "test-codex.log") - - // This is the exact Codex format provided in the issue with thinking sections added - logContent := `[2025-08-13T00:24:45] Starting Codex workflow execution -[2025-08-13T00:24:50] thinking -I need to analyze the pull request details first. -[2025-08-13T00:24:50] codex - -I'm ready to generate a Codex PR summary, but I need the pull request number to fetch its details. Could you please share the PR number (and confirm the repo/owner if it isn't ` + "`githubnext/gh-aw`" + `)? -[2025-08-13T00:24:50] thinking -Now I need to wait for the user's response. -[2025-08-13T00:24:50] tokens used: 13934 -[2025-08-13T00:24:55] Workflow completed successfully` - - err := os.WriteFile(logFile, []byte(logContent), 0644) - if err != nil { - t.Fatalf("Failed to create test log file: %v", err) - } - - // Test with Codex engine to parse Codex-specific logs - codexEngine := workflow.NewCodexEngine() - metrics, err := parseLogFileWithEngine(logFile, codexEngine, false, false) - if err != nil { - t.Fatalf("parseLogFileWithEngine failed: %v", err) - } - - // Check token usage extraction from Codex format - expectedTokens := 13934 - if metrics.TokenUsage != expectedTokens { - t.Errorf("Expected token usage %d, got %d", expectedTokens, metrics.TokenUsage) - } - - // Check turns extraction from thinking sections - expectedTurns := 2 // Two thinking sections in the test data - if metrics.Turns != expectedTurns { - t.Errorf("Expected turns %d, got %d", expectedTurns, metrics.Turns) - } - - // Duration is no longer extracted from logs - using GitHub API timestamps instead -} - -func TestParseLogFileWithCodexTokenSumming(t *testing.T) { - // Create a temporary log file with multiple Codex token entries - tmpDir := testutil.TempDir(t, "test-*") - logFile := filepath.Join(tmpDir, "test-codex-tokens.log") - - // Simulate the exact Codex format from the issue - logContent := ` ] -} -[2025-08-13T04:38:03] tokens used: 32169 -[2025-08-13T04:38:06] codex -I've posted the PR summary comment with analysis and recommendations. Let me know if you'd like to adjust any details or add further insights! -[2025-08-13T04:38:06] tokens used: 28828 -[2025-08-13T04:38:10] Processing complete -[2025-08-13T04:38:15] tokens used: 5000` - - err := os.WriteFile(logFile, []byte(logContent), 0644) - if err != nil { - t.Fatalf("Failed to create test log file: %v", err) - } - - // Test with Codex engine - codexEngine := workflow.NewCodexEngine() - metrics, err := parseLogFileWithEngine(logFile, codexEngine, false, false) - if err != nil { - t.Fatalf("parseLogFileWithEngine failed: %v", err) - } - - // Check that all token entries are summed - expectedTokens := 32169 + 28828 + 5000 // 65997 - if metrics.TokenUsage != expectedTokens { - t.Errorf("Expected summed token usage %d, got %d", expectedTokens, metrics.TokenUsage) - } -} - -func TestParseLogFileWithCodexRustFormat(t *testing.T) { - // Create a temporary log file with the new Rust-based Codex format - tmpDir := testutil.TempDir(t, "test-*") - logFile := filepath.Join(tmpDir, "test-codex-rust.log") - - // This simulates the new Rust format from the Codex engine - logContent := `2025-01-15T10:30:00.123456Z INFO codex: Starting codex execution -2025-01-15T10:30:00.234567Z DEBUG codex_core: Initializing MCP servers -2025-01-15T10:30:01.123456Z INFO codex: Session initialized -thinking -Let me fetch the list of pull requests first to see what we're working with. -2025-01-15T10:30:02.123456Z DEBUG codex_exec: Executing tool call -tool github.list_pull_requests({"state": "closed", "per_page": 5}) -2025-01-15T10:30:03.456789Z DEBUG codex_core: Tool execution started -2025-01-15T10:30:04.567890Z INFO codex: github.list_pull_requests(...) success in 2.1s -thinking -Now I need to get details for each PR to write a comprehensive summary. -2025-01-15T10:30:05.123456Z DEBUG codex_exec: Executing tool call -tool github.get_pull_request({"pull_number": 123}) -2025-01-15T10:30:06.234567Z INFO codex: github.get_pull_request(...) success in 0.8s -2025-01-15T10:30:07.345678Z DEBUG codex_core: Processing response -thinking -I have all the information I need. Let me create a summary issue. -2025-01-15T10:30:08.456789Z DEBUG codex_exec: Executing tool call -tool safe_outputs.create_issue({"title": "PR Summary", "body": "..."}) -2025-01-15T10:30:09.567890Z INFO codex: safe_outputs.create_issue(...) success in 1.2s -2025-01-15T10:30:10.123456Z DEBUG codex_core: Workflow completing -tokens used: 15234 -2025-01-15T10:30:10.234567Z INFO codex: Execution complete` - - err := os.WriteFile(logFile, []byte(logContent), 0644) - if err != nil { - t.Fatalf("Failed to create test log file: %v", err) - } - - // Test with Codex engine to parse new Rust format - codexEngine := workflow.NewCodexEngine() - metrics, err := parseLogFileWithEngine(logFile, codexEngine, false, false) - if err != nil { - t.Fatalf("parseLogFileWithEngine failed: %v", err) - } - - // Check token usage extraction from Rust format - expectedTokens := 15234 - if metrics.TokenUsage != expectedTokens { - t.Errorf("Expected token usage %d, got %d", expectedTokens, metrics.TokenUsage) - } - - // Check turns extraction from thinking sections (new Rust format uses standalone "thinking" lines) - expectedTurns := 3 // Three thinking sections in the test data - if metrics.Turns != expectedTurns { - t.Errorf("Expected turns %d, got %d", expectedTurns, metrics.Turns) - } - - // Check tool calls extraction from new Rust format - expectedToolCount := 3 - if len(metrics.ToolCalls) != expectedToolCount { - t.Errorf("Expected %d tool calls, got %d", expectedToolCount, len(metrics.ToolCalls)) - } - - // Verify the specific tools were detected - toolNames := make(map[string]bool) - for _, tool := range metrics.ToolCalls { - toolNames[tool.Name] = true - } - - expectedTools := []string{"github_list_pull_requests", "github_get_pull_request", "safe_outputs_create_issue"} - for _, expectedTool := range expectedTools { - if !toolNames[expectedTool] { - t.Errorf("Expected tool %s not found in tool calls", expectedTool) - } - } -} - -func TestParseLogFileWithCodexMixedFormats(t *testing.T) { - // Create a temporary log file with mixed old TypeScript and new Rust formats - tmpDir := testutil.TempDir(t, "test-*") - logFile := filepath.Join(tmpDir, "test-codex-mixed.log") - - // Mix both formats to test backward compatibility - logContent := `[2025-08-13T00:24:45] Starting Codex workflow execution -[2025-08-13T00:24:50] thinking -Old format thinking section -[2025-08-13T00:24:50] tool github.list_repos({"org": "test"}) -[2025-08-13T00:24:51] codex -Response from old format -2025-01-15T10:30:00.123456Z INFO codex: Starting execution -thinking -New Rust format thinking section -tool github.create_issue({"title": "Test", "body": "Body"}) -2025-01-15T10:30:05.567890Z INFO codex: github.create_issue(...) success in 1.2s -[2025-08-13T00:24:52] tokens used: 5000 -tokens used: 10000 -2025-01-15T10:30:10.234567Z INFO codex: Execution complete` - - err := os.WriteFile(logFile, []byte(logContent), 0644) - if err != nil { - t.Fatalf("Failed to create test log file: %v", err) - } - - // Test with Codex engine to parse mixed formats - codexEngine := workflow.NewCodexEngine() - metrics, err := parseLogFileWithEngine(logFile, codexEngine, false, false) - if err != nil { - t.Fatalf("parseLogFileWithEngine failed: %v", err) - } - - // Check token usage is summed from both formats - expectedTokens := 15000 // 5000 + 10000 - if metrics.TokenUsage != expectedTokens { - t.Errorf("Expected token usage %d (summed from both formats), got %d", expectedTokens, metrics.TokenUsage) - } - - // Check turns from both formats - expectedTurns := 2 // One from old format, one from new format - if metrics.Turns != expectedTurns { - t.Errorf("Expected turns %d (from both formats), got %d", expectedTurns, metrics.Turns) - } - - // Check tool calls from both formats - expectedToolCount := 2 - if len(metrics.ToolCalls) != expectedToolCount { - t.Errorf("Expected %d tool calls, got %d", expectedToolCount, len(metrics.ToolCalls)) - } - - // Verify the specific tools were detected from both formats - toolNames := make(map[string]bool) - for _, tool := range metrics.ToolCalls { - toolNames[tool.Name] = true - } - - expectedTools := []string{"github_list_repos", "github_create_issue"} - for _, expectedTool := range expectedTools { - if !toolNames[expectedTool] { - t.Errorf("Expected tool %s not found in tool calls", expectedTool) - } - } -} - -func TestParseLogFileWithMixedTokenFormats(t *testing.T) { - // Create a temporary log file with mixed token formats - tmpDir := testutil.TempDir(t, "test-*") - logFile := filepath.Join(tmpDir, "test-mixed-tokens.log") - - // Mix of Codex and non-Codex formats - should prioritize Codex summing - logContent := `[2025-08-13T04:38:03] tokens used: 1000 -tokens: 5000 -[2025-08-13T04:38:06] tokens used: 2000 -token_count: 10000` - - err := os.WriteFile(logFile, []byte(logContent), 0644) - if err != nil { - t.Fatalf("Failed to create test log file: %v", err) - } - - // Get the Codex engine for testing - registry := workflow.NewEngineRegistry() - codexEngine, err := registry.GetEngine("codex") - if err != nil { - t.Fatalf("Failed to get Codex engine: %v", err) - } - - metrics, err := parseLogFileWithEngine(logFile, codexEngine, false, false) - if err != nil { - t.Fatalf("parseLogFile failed: %v", err) - } - - // Should sum Codex entries: 1000 + 2000 = 3000 (ignoring non-Codex formats) - expectedTokens := 1000 + 2000 - if metrics.TokenUsage != expectedTokens { - t.Errorf("Expected token usage %d (sum of Codex entries), got %d", expectedTokens, metrics.TokenUsage) - } -} - -func TestExtractEngineFromAwInfoNestedDirectory(t *testing.T) { - tmpDir := testutil.TempDir(t, "test-*") - - // Test Case 1: aw_info.json as a regular file - awInfoFile := filepath.Join(tmpDir, "aw_info.json") - awInfoContent := `{ - "engine_id": "claude", - "engine_name": "Claude", - "model": "claude-3-sonnet", - "version": "20240620", - "workflow_name": "Test Claude", - "experimental": false, - "supports_tools_allowlist": true, - "supports_http_transport": false, - "run_id": 123456789, - "run_number": 42, - "run_attempt": "1", - "repository": "githubnext/gh-aw", - "ref": "refs/heads/main", - "sha": "abc123", - "actor": "testuser", - "event_name": "workflow_dispatch", - "created_at": "2025-08-13T13:36:39.704Z" - }` - - err := os.WriteFile(awInfoFile, []byte(awInfoContent), 0644) - if err != nil { - t.Fatalf("Failed to create aw_info.json file: %v", err) - } - - // Test regular file extraction - engine := extractEngineFromAwInfo(awInfoFile, true) - if engine == nil { - t.Errorf("Expected to extract engine from regular aw_info.json file, got nil") - } else if engine.GetID() != "claude" { - t.Errorf("Expected engine ID 'claude', got '%s'", engine.GetID()) - } - - // Clean up for next test - os.Remove(awInfoFile) - - // Test Case 2: aw_info.json as a directory containing the actual file - awInfoDir := filepath.Join(tmpDir, "aw_info.json") - err = os.Mkdir(awInfoDir, 0755) - if err != nil { - t.Fatalf("Failed to create aw_info.json directory: %v", err) - } - - // Create the nested aw_info.json file inside the directory - nestedAwInfoFile := filepath.Join(awInfoDir, "aw_info.json") - awInfoContentCodex := `{ - "engine_id": "codex", - "engine_name": "Codex", - "model": "o4-mini", - "version": "", - "workflow_name": "Test Codex", - "experimental": true, - "supports_tools_allowlist": true, - "supports_http_transport": false, - "run_id": 987654321, - "run_number": 7, - "run_attempt": "1", - "repository": "githubnext/gh-aw", - "ref": "refs/heads/copilot/fix-24", - "sha": "def456", - "actor": "testuser2", - "event_name": "workflow_dispatch", - "created_at": "2025-08-13T13:36:39.704Z" - }` - - err = os.WriteFile(nestedAwInfoFile, []byte(awInfoContentCodex), 0644) - if err != nil { - t.Fatalf("Failed to create nested aw_info.json file: %v", err) - } - - // Test directory-based extraction (the main fix) - engine = extractEngineFromAwInfo(awInfoDir, true) - if engine == nil { - t.Errorf("Expected to extract engine from aw_info.json directory, got nil") - } else if engine.GetID() != "codex" { - t.Errorf("Expected engine ID 'codex', got '%s'", engine.GetID()) - } - - // Test Case 3: Non-existent aw_info.json should return nil - nonExistentPath := filepath.Join(tmpDir, "nonexistent", "aw_info.json") - engine = extractEngineFromAwInfo(nonExistentPath, false) - if engine != nil { - t.Errorf("Expected nil for non-existent aw_info.json, got engine: %s", engine.GetID()) - } - - // Test Case 4: Directory without nested aw_info.json should return nil - emptyDir := filepath.Join(tmpDir, "empty_aw_info.json") - err = os.Mkdir(emptyDir, 0755) - if err != nil { - t.Fatalf("Failed to create empty directory: %v", err) - } - - engine = extractEngineFromAwInfo(emptyDir, false) - if engine != nil { - t.Errorf("Expected nil for directory without nested aw_info.json, got engine: %s", engine.GetID()) - } - - // Test Case 5: Invalid JSON should return nil - invalidAwInfoFile := filepath.Join(tmpDir, "invalid_aw_info.json") - invalidContent := `{invalid json content` - err = os.WriteFile(invalidAwInfoFile, []byte(invalidContent), 0644) - if err != nil { - t.Fatalf("Failed to create invalid aw_info.json file: %v", err) - } - - engine = extractEngineFromAwInfo(invalidAwInfoFile, false) - if engine != nil { - t.Errorf("Expected nil for invalid JSON aw_info.json, got engine: %s", engine.GetID()) - } - - // Test Case 6: Missing engine_id should return nil - missingEngineIDFile := filepath.Join(tmpDir, "missing_engine_id_aw_info.json") - missingEngineIDContent := `{ - "workflow_name": "Test Workflow", - "run_id": 123456789 - }` - err = os.WriteFile(missingEngineIDFile, []byte(missingEngineIDContent), 0644) - if err != nil { - t.Fatalf("Failed to create aw_info.json file without engine_id: %v", err) - } - - engine = extractEngineFromAwInfo(missingEngineIDFile, false) - if engine != nil { - t.Errorf("Expected nil for aw_info.json without engine_id, got engine: %s", engine.GetID()) - } -} - -func TestParseLogFileWithNonCodexTokensOnly(t *testing.T) { - // Create a temporary log file with only non-Codex token formats - tmpDir := testutil.TempDir(t, "test-*") - logFile := filepath.Join(tmpDir, "test-generic-tokens.log") - - // Only non-Codex formats - should keep maximum behavior - logContent := `tokens: 5000 -token_count: 10000 -input_tokens: 2000` - - err := os.WriteFile(logFile, []byte(logContent), 0644) - if err != nil { - t.Fatalf("Failed to create test log file: %v", err) - } - - // Without aw_info.json and specific engine, should return empty metrics - metrics, err := parseLogFileWithEngine(logFile, nil, false, false) - if err != nil { - t.Fatalf("parseLogFileWithEngine failed: %v", err) - } - - // Without engine-specific parsing, should return 0 - if metrics.TokenUsage != 0 { - t.Errorf("Expected token usage 0 (no aw_info.json), got %d", metrics.TokenUsage) - } -} - -func TestDownloadWorkflowLogsWithEngineFilter(t *testing.T) { - t.Skip("Skipping slow network-dependent test") - - // Test that the engine filter parameter is properly validated and passed through - tests := []struct { - name string - engine string - expectError bool - errorText string - }{ - { - name: "valid claude engine", - engine: "claude", - expectError: false, - }, - { - name: "valid codex engine", - engine: "codex", - expectError: false, - }, - { - name: "valid copilot engine", - engine: "copilot", - expectError: false, - }, - { - name: "empty engine (no filter)", - engine: "", - expectError: false, - }, - { - name: "invalid engine", - engine: "gpt", - expectError: true, - errorText: "invalid engine value 'gpt'", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - // This function should validate the engine parameter - // If invalid, it would exit in the actual command but we can't test that easily - // So we just test that valid engines don't cause immediate errors - if !tt.expectError { - // For valid engines, test that the function can be called without panic - // It may still fail with auth errors, which is expected - err := DownloadWorkflowLogs(context.Background(), "", 1, "", "", "./test-logs", tt.engine, "", 0, 0, "", false, false, false, false, false, false, false, 0, false, "summary.json", "") - - // Clean up any created directories - os.RemoveAll("./test-logs") - - // If there's an error, it should be auth or workflow-related, not parameter validation - if err != nil { - errMsg := strings.ToLower(err.Error()) - if strings.Contains(errMsg, "invalid engine") { - t.Errorf("Got engine validation error for valid engine '%s': %v", tt.engine, err) - } - } - } - }) - } -} -func TestLogsCommandFlags(t *testing.T) { - // Test that the logs command has the expected flags including the new engine flag - cmd := NewLogsCommand() - - // Check that all expected flags are present - expectedFlags := []string{"count", "start-date", "end-date", "output", "engine", "ref", "before-run-id", "after-run-id"} - - for _, flagName := range expectedFlags { - flag := cmd.Flags().Lookup(flagName) - if flag == nil { - t.Errorf("Expected flag '%s' not found in logs command", flagName) - } - } - - // Test ref flag specifically - refFlag := cmd.Flags().Lookup("ref") - if refFlag == nil { - t.Fatal("Ref flag not found") - } - - if refFlag.Usage != "Filter runs by branch or tag name (e.g., main, v1.0.0)" { - t.Errorf("Unexpected ref flag usage text: %s", refFlag.Usage) - } - - if refFlag.DefValue != "" { - t.Errorf("Expected ref flag default value to be empty, got: %s", refFlag.DefValue) - } - - // Test before-run-id flag - beforeRunIDFlag := cmd.Flags().Lookup("before-run-id") - if beforeRunIDFlag == nil { - t.Fatal("Before-run-id flag not found") - } - - if beforeRunIDFlag.Usage != "Filter runs with database ID before this value (exclusive)" { - t.Errorf("Unexpected before-run-id flag usage text: %s", beforeRunIDFlag.Usage) - } - - // Test after-run-id flag - afterRunIDFlag := cmd.Flags().Lookup("after-run-id") - if afterRunIDFlag == nil { - t.Fatal("After-run-id flag not found") - } - - if afterRunIDFlag.Usage != "Filter runs with database ID after this value (exclusive)" { - t.Errorf("Unexpected after-run-id flag usage text: %s", afterRunIDFlag.Usage) - } - - // Test engine flag specifically - engineFlag := cmd.Flags().Lookup("engine") - if engineFlag == nil { - t.Fatal("Engine flag not found") - } - - if engineFlag.Usage != "Filter logs by AI engine (claude, codex, copilot, custom)" { - t.Errorf("Unexpected engine flag usage text: %s", engineFlag.Usage) - } - - if engineFlag.DefValue != "" { - t.Errorf("Expected engine flag default value to be empty, got: %s", engineFlag.DefValue) - } - - // Test that engine flag has the -e shorthand for consistency with other commands - if engineFlag.Shorthand != "e" { - t.Errorf("Expected engine flag shorthand to be 'e', got: %s", engineFlag.Shorthand) - } -} - -func TestFormatFileSize(t *testing.T) { - tests := []struct { - size int64 - expected string - }{ - {0, "0 B"}, - {100, "100 B"}, - {1024, "1.0 KB"}, - {1536, "1.5 KB"}, // 1.5 * 1024 - {1048576, "1.0 MB"}, // 1024 * 1024 - {2097152, "2.0 MB"}, // 2 * 1024 * 1024 - {1073741824, "1.0 GB"}, // 1024^3 - {1099511627776, "1.0 TB"}, // 1024^4 - } - - for _, tt := range tests { - result := console.FormatFileSize(tt.size) - if result != tt.expected { - t.Errorf("console.FormatFileSize(%d) = %q, expected %q", tt.size, result, tt.expected) - } - } -} - -func TestExtractLogMetricsWithAwOutputFile(t *testing.T) { - // Create a temporary directory with safe_output.jsonl - tmpDir := testutil.TempDir(t, "test-*") - - // Create safe_output.jsonl file - awOutputPath := filepath.Join(tmpDir, "safe_output.jsonl") - awOutputContent := "This is the agent's output content.\nIt contains multiple lines." - err := os.WriteFile(awOutputPath, []byte(awOutputContent), 0644) - if err != nil { - t.Fatalf("Failed to create safe_output.jsonl: %v", err) - } - - // Test that extractLogMetrics doesn't fail with safe_output.jsonl present - metrics, err := extractLogMetrics(tmpDir, false) - if err != nil { - t.Fatalf("extractLogMetrics failed: %v", err) - } - - // Without an engine, should return empty metrics but not error - if metrics.TokenUsage != 0 { - t.Errorf("Expected token usage 0 (no engine), got %d", metrics.TokenUsage) - } - - // Test verbose mode to ensure it detects the file - // We can't easily test the console output, but we can ensure it doesn't error - metrics, err = extractLogMetrics(tmpDir, true) - if err != nil { - t.Fatalf("extractLogMetrics in verbose mode failed: %v", err) - } -} - -func TestCustomEngineParseLogMetrics(t *testing.T) { - // Test that custom engine tries both Claude and Codex parsing approaches - customEngine := workflow.NewCustomEngine() - - // Test Case 1: Claude-style logs (properly formatted as JSON array) - claudeLogContent := `[{"type": "message", "content": "Starting workflow"}, {"type": "result", "subtype": "success", "is_error": false, "num_turns": 42, "total_cost_usd": 1.5, "usage": {"input_tokens": 1000, "output_tokens": 500}}]` - - metrics := customEngine.ParseLogMetrics(claudeLogContent, false) - - // Should extract turns, tokens, and cost from Claude format - if metrics.Turns != 42 { - t.Errorf("Expected turns 42 from Claude-style logs, got %d", metrics.Turns) - } - if metrics.TokenUsage != 1500 { - t.Errorf("Expected token usage 1500 from Claude-style logs, got %d", metrics.TokenUsage) - } - if metrics.EstimatedCost != 1.5 { - t.Errorf("Expected cost 1.5 from Claude-style logs, got %f", metrics.EstimatedCost) - } - - // Test Case 2: Codex-style logs - codexLogContent := `[2025-08-13T00:24:45] Starting workflow -[2025-08-13T00:24:50] thinking -I need to analyze the problem. -[2025-08-13T00:24:51] codex -Working on solution. -[2025-08-13T00:24:52] thinking -Now I'll implement the solution. -[2025-08-13T00:24:53] tokens used: 5000 -[2025-08-13T00:24:55] Workflow completed` - - metrics = customEngine.ParseLogMetrics(codexLogContent, false) - - // Should extract turns and tokens from Codex format - if metrics.Turns != 2 { - t.Errorf("Expected turns 2 from Codex-style logs, got %d", metrics.Turns) - } - if metrics.TokenUsage != 5000 { - t.Errorf("Expected token usage 5000 from Codex-style logs, got %d", metrics.TokenUsage) - } - - // Test Case 3: Generic logs (fallback - no error detection) - // Custom engine no longer has its own error detection patterns - // It relies on Claude/Codex parsers which don't match plain text logs - genericLogContent := `2025-08-13T00:24:45 Starting workflow -2025-08-13T00:24:50 Processing request -2025-08-13T00:24:52 Warning: Something happened -2025-08-13T00:24:53 Error: Failed to process -2025-08-13T00:24:55 Workflow completed` - - metrics = customEngine.ParseLogMetrics(genericLogContent, false) - - // Should fall back to basic parsing (no metrics extracted) - if metrics.Turns != 0 { - t.Errorf("Expected turns 0 from generic logs, got %d", metrics.Turns) - } - if metrics.TokenUsage != 0 { - t.Errorf("Expected token usage 0 from generic logs, got %d", metrics.TokenUsage) - } - // Error patterns have been removed - no error/warning counting -} - -func TestRunIDFilteringLogic(t *testing.T) { - // Test the run ID filtering logic in isolation - testRuns := []WorkflowRun{ - {DatabaseID: 1000, WorkflowName: "Test Workflow"}, - {DatabaseID: 1500, WorkflowName: "Test Workflow"}, - {DatabaseID: 2000, WorkflowName: "Test Workflow"}, - {DatabaseID: 2500, WorkflowName: "Test Workflow"}, - {DatabaseID: 3000, WorkflowName: "Test Workflow"}, - } - - // Test before-run-id filter (exclusive) - var filteredRuns []WorkflowRun - beforeRunID := int64(2000) - for _, run := range testRuns { - if beforeRunID > 0 && run.DatabaseID >= beforeRunID { - continue - } - filteredRuns = append(filteredRuns, run) - } - - if len(filteredRuns) != 2 { - t.Errorf("Expected 2 runs before ID 2000 (exclusive), got %d", len(filteredRuns)) - } - if filteredRuns[0].DatabaseID != 1000 || filteredRuns[1].DatabaseID != 1500 { - t.Errorf("Expected runs 1000 and 1500, got %d and %d", filteredRuns[0].DatabaseID, filteredRuns[1].DatabaseID) - } - - // Test after-run-id filter (exclusive) - filteredRuns = nil - afterRunID := int64(2000) - for _, run := range testRuns { - if afterRunID > 0 && run.DatabaseID <= afterRunID { - continue - } - filteredRuns = append(filteredRuns, run) - } - - if len(filteredRuns) != 2 { - t.Errorf("Expected 2 runs after ID 2000 (exclusive), got %d", len(filteredRuns)) - } - if filteredRuns[0].DatabaseID != 2500 || filteredRuns[1].DatabaseID != 3000 { - t.Errorf("Expected runs 2500 and 3000, got %d and %d", filteredRuns[0].DatabaseID, filteredRuns[1].DatabaseID) - } - - // Test range filter (both before and after) - filteredRuns = nil - beforeRunID = int64(2500) - afterRunID = int64(1000) - for _, run := range testRuns { - // Apply before-run-id filter (exclusive) - if beforeRunID > 0 && run.DatabaseID >= beforeRunID { - continue - } - // Apply after-run-id filter (exclusive) - if afterRunID > 0 && run.DatabaseID <= afterRunID { - continue - } - filteredRuns = append(filteredRuns, run) - } - - if len(filteredRuns) != 2 { - t.Errorf("Expected 2 runs in range (1000, 2500), got %d", len(filteredRuns)) - } - if filteredRuns[0].DatabaseID != 1500 || filteredRuns[1].DatabaseID != 2000 { - t.Errorf("Expected runs 1500 and 2000, got %d and %d", filteredRuns[0].DatabaseID, filteredRuns[1].DatabaseID) - } -} - -func TestRefFilteringWithGitHubCLI(t *testing.T) { - // Test that ref filtering is properly added to GitHub CLI args - // This is a unit test for the args construction, not a network test - - // Simulate args construction for ref filtering - args := []string{"run", "list", "--json", "databaseId,number,url,status,conclusion,workflowName,createdAt,startedAt,updatedAt,event,headBranch,headSha,displayTitle"} - - ref := "feature-branch" - if ref != "" { - args = append(args, "--branch", ref) - } - - // Verify that the ref filter was added correctly (uses --branch flag under the hood) - found := false - for i, arg := range args { - if arg == "--branch" && i+1 < len(args) && args[i+1] == "feature-branch" { - found = true - break - } - } - - if !found { - t.Errorf("Expected ref filter '--branch feature-branch' not found in args: %v", args) - } -} - -func TestFindAgentLogFile(t *testing.T) { - // Create a temporary directory structure for testing - tmpDir := testutil.TempDir(t, "test-*") - - // Test 1: Copilot engine with agent_output directory - t.Run("Copilot engine uses agent_output", func(t *testing.T) { - copilotEngine := workflow.NewCopilotEngine() - - // Create agent_output directory with a log file - agentOutputDir := filepath.Join(tmpDir, "copilot_test", "agent_output") - err := os.MkdirAll(agentOutputDir, 0755) - if err != nil { - t.Fatalf("Failed to create agent_output directory: %v", err) - } - - logFile := filepath.Join(agentOutputDir, "debug.log") - err = os.WriteFile(logFile, []byte("test log content"), 0644) - if err != nil { - t.Fatalf("Failed to create log file: %v", err) - } - - // Create agent-stdio.log as well (should be ignored for Copilot) - stdioLog := filepath.Join(tmpDir, "copilot_test", "agent-stdio.log") - err = os.WriteFile(stdioLog, []byte("stdio content"), 0644) - if err != nil { - t.Fatalf("Failed to create agent-stdio.log: %v", err) - } - - // Test findAgentLogFile - found, ok := findAgentLogFile(filepath.Join(tmpDir, "copilot_test"), copilotEngine) - if !ok { - t.Errorf("Expected to find agent log file for Copilot engine") - } - - // Should find the file in agent_output directory - if !strings.Contains(found, "agent_output") { - t.Errorf("Expected to find file in agent_output directory, got: %s", found) - } - }) - - // Test Copilot engine with flattened agent_outputs artifact - // After flattening, session logs are at sandbox/agent/logs/ in the root - t.Run("copilot_engine_flattened_location", func(t *testing.T) { - copilotDir := filepath.Join(tmpDir, "copilot_flattened_test") - err := os.MkdirAll(copilotDir, 0755) - if err != nil { - t.Fatalf("Failed to create test directory: %v", err) - } - - // Create flattened session logs directory (after flattenAgentOutputsArtifact) - sessionLogsDir := filepath.Join(copilotDir, "sandbox", "agent", "logs") - err = os.MkdirAll(sessionLogsDir, 0755) - if err != nil { - t.Fatalf("Failed to create flattened session logs directory: %v", err) - } - - // Create a test session log file - sessionLog := filepath.Join(sessionLogsDir, "session-test-123.log") - err = os.WriteFile(sessionLog, []byte("test session log content"), 0644) - if err != nil { - t.Fatalf("Failed to create session log file: %v", err) - } - - copilotEngine := workflow.NewCopilotEngine() - - // Test findAgentLogFile - should find the session log in flattened location - found, ok := findAgentLogFile(copilotDir, copilotEngine) - if !ok { - t.Errorf("Expected to find agent log file for Copilot engine in flattened location") - } - - // Should find the session log file - if !strings.HasSuffix(found, "session-test-123.log") { - t.Errorf("Expected to find session-test-123.log, but found %s", found) - } - - // Verify the path is correct - expectedPath := filepath.Join(copilotDir, "sandbox", "agent", "logs", "session-test-123.log") - if found != expectedPath { - t.Errorf("Expected path %s, got %s", expectedPath, found) - } - }) - - // Test Copilot engine with session log directly in run directory (recursive search) - // This handles cases where artifact structure differs from expected - t.Run("copilot_engine_recursive_search", func(t *testing.T) { - copilotDir := filepath.Join(tmpDir, "copilot_recursive_test") - err := os.MkdirAll(copilotDir, 0755) - if err != nil { - t.Fatalf("Failed to create test directory: %v", err) - } - - // Create session log directly in the run directory - // This simulates the case where the artifact was flattened differently - sessionLog := filepath.Join(copilotDir, "session-direct-456.log") - err = os.WriteFile(sessionLog, []byte("test session log content"), 0644) - if err != nil { - t.Fatalf("Failed to create session log file: %v", err) - } - - copilotEngine := workflow.NewCopilotEngine() - - // Test findAgentLogFile - should find via recursive search - found, ok := findAgentLogFile(copilotDir, copilotEngine) - if !ok { - t.Errorf("Expected to find agent log file via recursive search") - } - - // Should find the session log file - if !strings.HasSuffix(found, "session-direct-456.log") { - t.Errorf("Expected to find session-direct-456.log, but found %s", found) - } - - // Verify the path is correct - if found != sessionLog { - t.Errorf("Expected path %s, got %s", sessionLog, found) - } - }) - - // Test Copilot engine with process log (new naming convention) - // Copilot changed from session-*.log to process-*.log - t.Run("copilot_engine_process_log", func(t *testing.T) { - copilotDir := filepath.Join(tmpDir, "copilot_process_test") - err := os.MkdirAll(copilotDir, 0755) - if err != nil { - t.Fatalf("Failed to create test directory: %v", err) - } - - // Create process log directly in the run directory - // This simulates the new naming convention for Copilot logs - processLog := filepath.Join(copilotDir, "process-12345.log") - err = os.WriteFile(processLog, []byte("test process log content"), 0644) - if err != nil { - t.Fatalf("Failed to create process log file: %v", err) - } - - copilotEngine := workflow.NewCopilotEngine() - - // Test findAgentLogFile - should find via recursive search - found, ok := findAgentLogFile(copilotDir, copilotEngine) - if !ok { - t.Errorf("Expected to find agent log file via recursive search") - } - - // Should find the process log file - if !strings.HasSuffix(found, "process-12345.log") { - t.Errorf("Expected to find process-12345.log, but found %s", found) - } - - // Verify the path is correct - if found != processLog { - t.Errorf("Expected path %s, got %s", processLog, found) - } - }) - - // Test Copilot engine with process log in nested directory - t.Run("copilot_engine_process_log_nested", func(t *testing.T) { - copilotDir := filepath.Join(tmpDir, "copilot_process_nested_test") - err := os.MkdirAll(copilotDir, 0755) - if err != nil { - t.Fatalf("Failed to create test directory: %v", err) - } - - // Create nested directory structure - processLogsDir := filepath.Join(copilotDir, "sandbox", "agent", "logs") - err = os.MkdirAll(processLogsDir, 0755) - if err != nil { - t.Fatalf("Failed to create process logs directory: %v", err) - } - - // Create a test process log file - processLog := filepath.Join(processLogsDir, "process-test-789.log") - err = os.WriteFile(processLog, []byte("test process log content"), 0644) - if err != nil { - t.Fatalf("Failed to create process log file: %v", err) - } - - copilotEngine := workflow.NewCopilotEngine() - - // Test findAgentLogFile - should find the process log in nested location - found, ok := findAgentLogFile(copilotDir, copilotEngine) - if !ok { - t.Errorf("Expected to find agent log file for Copilot engine in nested location") - } - - // Should find the process log file - if !strings.HasSuffix(found, "process-test-789.log") { - t.Errorf("Expected to find process-test-789.log, but found %s", found) - } - - // Verify the path is correct - expectedPath := filepath.Join(copilotDir, "sandbox", "agent", "logs", "process-test-789.log") - if found != expectedPath { - t.Errorf("Expected path %s, got %s", expectedPath, found) - } - }) - - // Test 2: Claude engine with agent-stdio.log - t.Run("Claude engine uses agent-stdio.log", func(t *testing.T) { - claudeEngine := workflow.NewClaudeEngine() - - // Create only agent-stdio.log - claudeDir := filepath.Join(tmpDir, "claude_test") - err := os.MkdirAll(claudeDir, 0755) - if err != nil { - t.Fatalf("Failed to create claude test directory: %v", err) - } - - stdioLog := filepath.Join(claudeDir, "agent-stdio.log") - err = os.WriteFile(stdioLog, []byte("stdio content"), 0644) - if err != nil { - t.Fatalf("Failed to create agent-stdio.log: %v", err) - } - - // Test findAgentLogFile - found, ok := findAgentLogFile(claudeDir, claudeEngine) - if !ok { - t.Errorf("Expected to find agent log file for Claude engine") - } - - // Should find agent-stdio.log - if !strings.Contains(found, "agent-stdio.log") { - t.Errorf("Expected to find agent-stdio.log, got: %s", found) - } - }) - - // Test 3: No logs found - t.Run("No logs found returns false", func(t *testing.T) { - emptyDir := filepath.Join(tmpDir, "empty_test") - err := os.MkdirAll(emptyDir, 0755) - if err != nil { - t.Fatalf("Failed to create empty test directory: %v", err) - } - - claudeEngine := workflow.NewClaudeEngine() - _, ok := findAgentLogFile(emptyDir, claudeEngine) - if ok { - t.Errorf("Expected to not find agent log file in empty directory") - } - }) - - // Test 4: Codex engine with agent-stdio.log - t.Run("Codex engine uses agent-stdio.log", func(t *testing.T) { - codexEngine := workflow.NewCodexEngine() - - // Create only agent-stdio.log - codexDir := filepath.Join(tmpDir, "codex_test") - err := os.MkdirAll(codexDir, 0755) - if err != nil { - t.Fatalf("Failed to create codex test directory: %v", err) - } - - stdioLog := filepath.Join(codexDir, "agent-stdio.log") - err = os.WriteFile(stdioLog, []byte("stdio content"), 0644) - if err != nil { - t.Fatalf("Failed to create agent-stdio.log: %v", err) - } - - // Test findAgentLogFile - found, ok := findAgentLogFile(codexDir, codexEngine) - if !ok { - t.Errorf("Expected to find agent log file for Codex engine") - } - - // Should find agent-stdio.log - if !strings.Contains(found, "agent-stdio.log") { - t.Errorf("Expected to find agent-stdio.log, got: %s", found) - } - }) -} - -func TestUnzipFile(t *testing.T) { - // Create a temporary directory for the test - tmpDir := testutil.TempDir(t, "test-*") - - // Create a test zip file - zipPath := filepath.Join(tmpDir, "test.zip") - zipFile, err := os.Create(zipPath) - if err != nil { - t.Fatalf("Failed to create test zip file: %v", err) - } - - zipWriter := zip.NewWriter(zipFile) - - // Add a test file to the zip - testContent := "This is test content for workflow logs" - writer, err := zipWriter.Create("test-log.txt") - if err != nil { - zipFile.Close() - t.Fatalf("Failed to create file in zip: %v", err) - } - _, err = writer.Write([]byte(testContent)) - if err != nil { - zipFile.Close() - t.Fatalf("Failed to write content to zip: %v", err) - } - - // Add a subdirectory with a file - writer, err = zipWriter.Create("logs/job-1.txt") - if err != nil { - zipFile.Close() - t.Fatalf("Failed to create subdirectory file in zip: %v", err) - } - _, err = writer.Write([]byte("Job 1 logs")) - if err != nil { - zipFile.Close() - t.Fatalf("Failed to write subdirectory content to zip: %v", err) - } - - // Close the zip writer - err = zipWriter.Close() - if err != nil { - zipFile.Close() - t.Fatalf("Failed to close zip writer: %v", err) - } - zipFile.Close() - - // Create a destination directory - destDir := filepath.Join(tmpDir, "extracted") - err = os.MkdirAll(destDir, 0755) - if err != nil { - t.Fatalf("Failed to create destination directory: %v", err) - } - - // Test the unzipFile function - err = unzipFile(zipPath, destDir, false) - if err != nil { - t.Fatalf("unzipFile failed: %v", err) - } - - // Verify the extracted files - extractedFile := filepath.Join(destDir, "test-log.txt") - content, err := os.ReadFile(extractedFile) - if err != nil { - t.Fatalf("Failed to read extracted file: %v", err) - } - - if string(content) != testContent { - t.Errorf("Extracted content mismatch: got %q, want %q", string(content), testContent) - } - - // Verify subdirectory file - subdirFile := filepath.Join(destDir, "logs", "job-1.txt") - content, err = os.ReadFile(subdirFile) - if err != nil { - t.Fatalf("Failed to read extracted subdirectory file: %v", err) - } - - if string(content) != "Job 1 logs" { - t.Errorf("Extracted subdirectory content mismatch: got %q, want %q", string(content), "Job 1 logs") - } -} - -func TestUnzipFileZipSlipPrevention(t *testing.T) { - // Create a temporary directory for the test - tmpDir := testutil.TempDir(t, "test-*") - - // Create a test zip file with a malicious path - zipPath := filepath.Join(tmpDir, "malicious.zip") - zipFile, err := os.Create(zipPath) - if err != nil { - t.Fatalf("Failed to create test zip file: %v", err) - } - - zipWriter := zip.NewWriter(zipFile) - - // Try to create a file that escapes the destination directory - writer, err := zipWriter.Create("../../../etc/passwd") - if err != nil { - zipFile.Close() - t.Fatalf("Failed to create malicious file in zip: %v", err) - } - _, err = writer.Write([]byte("malicious content")) - if err != nil { - zipFile.Close() - t.Fatalf("Failed to write malicious content to zip: %v", err) - } - - err = zipWriter.Close() - if err != nil { - zipFile.Close() - t.Fatalf("Failed to close zip writer: %v", err) - } - zipFile.Close() - - // Create a destination directory - destDir := filepath.Join(tmpDir, "safe-extraction") - err = os.MkdirAll(destDir, 0755) - if err != nil { - t.Fatalf("Failed to create destination directory: %v", err) - } - - // Test that unzipFile rejects the malicious path - err = unzipFile(zipPath, destDir, false) - if err == nil { - t.Error("Expected unzipFile to reject malicious path, but it succeeded") - } - - if !strings.Contains(err.Error(), "invalid file path") { - t.Errorf("Expected error about invalid file path, got: %v", err) - } -} - -func TestDownloadWorkflowRunLogsStructure(t *testing.T) { - // This test verifies that workflow logs are extracted into a workflow-logs subdirectory - // Note: This test cannot fully test downloadWorkflowRunLogs without GitHub CLI authentication - // So we test the directory creation and unzipFile behavior that mimics the workflow - - tmpDir := testutil.TempDir(t, "test-*") - - // Create a mock workflow logs zip file similar to what GitHub API returns - zipPath := filepath.Join(tmpDir, "workflow-logs.zip") - zipFile, err := os.Create(zipPath) - if err != nil { - t.Fatalf("Failed to create test zip file: %v", err) - } - - zipWriter := zip.NewWriter(zipFile) - - // Add files that simulate GitHub Actions workflow logs structure - logFiles := map[string]string{ - "1_job1.txt": "Job 1 execution logs", - "2_job2.txt": "Job 2 execution logs", - "3_build/build.txt": "Build step logs", - "4_test/test-1.txt": "Test step 1 logs", - "4_test/test-2.txt": "Test step 2 logs", - } - - for filename, content := range logFiles { - writer, err := zipWriter.Create(filename) - if err != nil { - zipFile.Close() - t.Fatalf("Failed to create file %s in zip: %v", filename, err) - } - _, err = writer.Write([]byte(content)) - if err != nil { - zipFile.Close() - t.Fatalf("Failed to write content to %s: %v", filename, err) - } - } - - err = zipWriter.Close() - if err != nil { - zipFile.Close() - t.Fatalf("Failed to close zip writer: %v", err) - } - zipFile.Close() - - // Create a run directory (simulating logs/run-12345) - runDir := filepath.Join(tmpDir, "run-12345") - err = os.MkdirAll(runDir, 0755) - if err != nil { - t.Fatalf("Failed to create run directory: %v", err) - } - - // Create some other artifacts in the run directory (to verify they don't get mixed with logs) - err = os.WriteFile(filepath.Join(runDir, "aw_info.json"), []byte(`{"engine_id": "claude"}`), 0644) - if err != nil { - t.Fatalf("Failed to create aw_info.json: %v", err) - } - - // Create the workflow-logs subdirectory and extract logs there - workflowLogsDir := filepath.Join(runDir, "workflow-logs") - err = os.MkdirAll(workflowLogsDir, 0755) - if err != nil { - t.Fatalf("Failed to create workflow-logs directory: %v", err) - } - - // Extract logs into the workflow-logs subdirectory (mimics downloadWorkflowRunLogs behavior) - err = unzipFile(zipPath, workflowLogsDir, false) - if err != nil { - t.Fatalf("Failed to extract logs: %v", err) - } - - // Verify that workflow-logs directory exists - if !fileutil.DirExists(workflowLogsDir) { - t.Error("workflow-logs directory should exist") - } - - // Verify that log files are in the workflow-logs subdirectory, not in run root - for filename := range logFiles { - expectedPath := filepath.Join(workflowLogsDir, filename) - if !fileutil.FileExists(expectedPath) { - t.Errorf("Expected log file %s to be in workflow-logs subdirectory", filename) - } - - // Verify the file is NOT in the run directory root - wrongPath := filepath.Join(runDir, filename) - if fileutil.FileExists(wrongPath) { - t.Errorf("Log file %s should not be in run directory root", filename) - } - } - - // Verify that other artifacts remain in the run directory root - awInfoPath := filepath.Join(runDir, "aw_info.json") - if !fileutil.FileExists(awInfoPath) { - t.Error("aw_info.json should remain in run directory root") - } - - // Verify the content of one of the extracted log files - testLogPath := filepath.Join(workflowLogsDir, "1_job1.txt") - content, err := os.ReadFile(testLogPath) - if err != nil { - t.Fatalf("Failed to read extracted log file: %v", err) - } - - expectedContent := "Job 1 execution logs" - if string(content) != expectedContent { - t.Errorf("Log file content mismatch: got %q, want %q", string(content), expectedContent) - } - - // Verify nested directory structure is preserved - nestedLogPath := filepath.Join(workflowLogsDir, "3_build", "build.txt") - if !fileutil.FileExists(nestedLogPath) { - t.Error("Nested log directory structure should be preserved") - } -} - -// TestCountParameterBehavior verifies that the count parameter limits matching results -// not the number of runs fetched when date filters are specified -func TestCountParameterBehavior(t *testing.T) { - // This test documents the expected behavior: - // 1. When date filters (startDate/endDate) are specified, fetch ALL runs in that range - // 2. Apply post-download filters (engine, staged, etc.) - // 3. Limit final output to 'count' matching runs - // - // Without date filters: - // 1. Fetch runs iteratively until we have 'count' runs with artifacts - // 2. Apply filters during iteration (old behavior for backward compatibility) - - // Note: This is a documentation test - the actual logic is tested via integration tests - // that require GitHub CLI authentication and a real repository - - tests := []struct { - name string - startDate string - endDate string - count int - expectedFetchAll bool - }{ - { - name: "with startDate should fetch all in range", - startDate: "2024-01-01", - endDate: "", - count: 10, - expectedFetchAll: true, - }, - { - name: "with endDate should fetch all in range", - startDate: "", - endDate: "2024-12-31", - count: 10, - expectedFetchAll: true, - }, - { - name: "with both dates should fetch all in range", - startDate: "2024-01-01", - endDate: "2024-12-31", - count: 10, - expectedFetchAll: true, - }, - { - name: "without dates should use count as fetch limit", - startDate: "", - endDate: "", - count: 10, - expectedFetchAll: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - // This documents the logic: when startDate or endDate is set, we fetch all - fetchAllInRange := tt.startDate != "" || tt.endDate != "" - - if fetchAllInRange != tt.expectedFetchAll { - t.Errorf("Expected fetchAllInRange=%v for startDate=%q endDate=%q, got %v", - tt.expectedFetchAll, tt.startDate, tt.endDate, fetchAllInRange) - } - }) - } -} diff --git a/pkg/cli/mcp_server_test.go b/pkg/cli/mcp_server_compile_test.go similarity index 60% rename from pkg/cli/mcp_server_test.go rename to pkg/cli/mcp_server_compile_test.go index c81397d749..dc690da37f 100644 --- a/pkg/cli/mcp_server_test.go +++ b/pkg/cli/mcp_server_compile_test.go @@ -10,312 +10,12 @@ import ( "path/filepath" "strings" "testing" - "time" "github.com/githubnext/gh-aw/pkg/testutil" - "github.com/modelcontextprotocol/go-sdk/mcp" ) -// TestMCPServer_ListTools tests that the MCP server exposes the expected tools -func TestMCPServer_ListTools(t *testing.T) { - // Skip if the binary doesn't exist - binaryPath := "../../gh-aw" - if _, err := os.Stat(binaryPath); os.IsNotExist(err) { - t.Skip("Skipping test: gh-aw binary not found. Run 'make build' first.") - } - - // Create MCP client - client := mcp.NewClient(&mcp.Implementation{ - Name: "test-client", - Version: "1.0.0", - }, nil) - - // Start the MCP server as a subprocess - serverCmd := exec.Command(binaryPath, "mcp-server") - transport := &mcp.CommandTransport{Command: serverCmd} - - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - - session, err := client.Connect(ctx, transport, nil) - if err != nil { - t.Fatalf("Failed to connect to MCP server: %v", err) - } - defer session.Close() - - // List tools - result, err := session.ListTools(ctx, &mcp.ListToolsParams{}) - if err != nil { - t.Fatalf("Failed to list tools: %v", err) - } - - // Verify expected tools are present - expectedTools := []string{"status", "compile", "logs", "audit", "mcp-inspect", "add", "update", "fix"} - toolNames := make(map[string]bool) - for _, tool := range result.Tools { - toolNames[tool.Name] = true - } - - for _, expected := range expectedTools { - if !toolNames[expected] { - t.Errorf("Expected tool '%s' not found in MCP server tools", expected) - } - } - - // Verify we have exactly the expected number of tools - if len(result.Tools) != len(expectedTools) { - t.Errorf("Expected %d tools, got %d", len(expectedTools), len(result.Tools)) - } -} - -// TestMCPServer_CustomCmd tests that the --cmd flag works correctly -func TestMCPServer_CustomCmd(t *testing.T) { - // Skip if the binary doesn't exist - binaryPath := "../../gh-aw" - if _, err := os.Stat(binaryPath); os.IsNotExist(err) { - t.Skip("Skipping test: gh-aw binary not found. Run 'make build' first.") - } - - // Create a temporary directory with a workflow file - tmpDir := testutil.TempDir(t, "test-*") - workflowsDir := filepath.Join(tmpDir, ".github", "workflows") - if err := os.MkdirAll(workflowsDir, 0755); err != nil { - t.Fatalf("Failed to create workflows directory: %v", err) - } - - // Create a test workflow file - workflowContent := `--- -on: push -engine: copilot ---- -# Test Workflow - -This is a test workflow. -` - workflowPath := filepath.Join(workflowsDir, "test.md") - if err := os.WriteFile(workflowPath, []byte(workflowContent), 0644); err != nil { - t.Fatalf("Failed to write workflow file: %v", err) - } - - // Change to the temporary directory - originalDir, _ := os.Getwd() - defer os.Chdir(originalDir) - os.Chdir(tmpDir) - - // Initialize git repository in the temp directory - initCmd := exec.Command("git", "init") - initCmd.Dir = tmpDir - if err := initCmd.Run(); err != nil { - t.Fatalf("Failed to initialize git repository: %v", err) - } - - // Get absolute path to binary - absBinaryPath, err := filepath.Abs(filepath.Join(originalDir, binaryPath)) - if err != nil { - t.Fatalf("Failed to get absolute path: %v", err) - } - - // Create MCP client - client := mcp.NewClient(&mcp.Implementation{ - Name: "test-client", - Version: "1.0.0", - }, nil) - - // Start the MCP server with --cmd flag pointing to the binary - serverCmd := exec.Command(absBinaryPath, "mcp-server", "--cmd", absBinaryPath) - serverCmd.Dir = tmpDir - transport := &mcp.CommandTransport{Command: serverCmd} - - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - - session, err := client.Connect(ctx, transport, nil) - if err != nil { - t.Fatalf("Failed to connect to MCP server: %v", err) - } - defer session.Close() - - // Call status tool - params := &mcp.CallToolParams{ - Name: "status", - Arguments: map[string]any{}, - } - result, err := session.CallTool(ctx, params) - if err != nil { - t.Fatalf("Failed to call status tool: %v", err) - } - - // Verify result is not empty - if len(result.Content) == 0 { - t.Error("Expected non-empty result from status tool") - } - - // Verify result contains text content - if textContent, ok := result.Content[0].(*mcp.TextContent); ok { - if textContent.Text == "" { - t.Error("Expected non-empty text content from status tool") - } - t.Logf("Status tool output with custom cmd: %s", textContent.Text) - } else { - t.Error("Expected text content from status tool") - } -} - -// TestMCPServer_StatusTool tests the status tool -func TestMCPServer_StatusTool(t *testing.T) { - // Skip if the binary doesn't exist - binaryPath := "../../gh-aw" - if _, err := os.Stat(binaryPath); os.IsNotExist(err) { - t.Skip("Skipping test: gh-aw binary not found. Run 'make build' first.") - } - - // Create a temporary directory with a workflow file - tmpDir := testutil.TempDir(t, "test-*") - workflowsDir := filepath.Join(tmpDir, ".github", "workflows") - if err := os.MkdirAll(workflowsDir, 0755); err != nil { - t.Fatalf("Failed to create workflows directory: %v", err) - } - - // Create a test workflow file - workflowContent := `--- -on: push -engine: copilot ---- -# Test Workflow - -This is a test workflow. -` - workflowPath := filepath.Join(workflowsDir, "test.md") - if err := os.WriteFile(workflowPath, []byte(workflowContent), 0644); err != nil { - t.Fatalf("Failed to write workflow file: %v", err) - } - - // Change to the temporary directory - originalDir, _ := os.Getwd() - defer os.Chdir(originalDir) - os.Chdir(tmpDir) - - // Initialize git repository in the temp directory - initCmd := exec.Command("git", "init") - initCmd.Dir = tmpDir - if err := initCmd.Run(); err != nil { - t.Fatalf("Failed to initialize git repository: %v", err) - } - - // Create MCP client - client := mcp.NewClient(&mcp.Implementation{ - Name: "test-client", - Version: "1.0.0", - }, nil) - - // Start the MCP server as a subprocess - serverCmd := exec.Command(filepath.Join(originalDir, binaryPath), "mcp-server") - serverCmd.Dir = tmpDir - transport := &mcp.CommandTransport{Command: serverCmd} - - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - - session, err := client.Connect(ctx, transport, nil) - if err != nil { - t.Fatalf("Failed to connect to MCP server: %v", err) - } - defer session.Close() - - // Call status tool - params := &mcp.CallToolParams{ - Name: "status", - Arguments: map[string]any{}, - } - result, err := session.CallTool(ctx, params) - if err != nil { - t.Fatalf("Failed to call status tool: %v", err) - } - - // Verify result is not empty - if len(result.Content) == 0 { - t.Error("Expected non-empty result from status tool") - } - - // Verify result contains text content - if textContent, ok := result.Content[0].(*mcp.TextContent); ok { - if textContent.Text == "" { - t.Error("Expected non-empty text content from status tool") - } - } else { - t.Error("Expected text content from status tool") - } -} - -// TestMCPServer_AuditTool tests the audit tool -func TestMCPServer_AuditTool(t *testing.T) { - // Skip if the binary doesn't exist - binaryPath := "../../gh-aw" - if _, err := os.Stat(binaryPath); os.IsNotExist(err) { - t.Skip("Skipping test: gh-aw binary not found. Run 'make build' first.") - } - - // Get the current directory for proper path resolution - originalDir, _ := os.Getwd() - - // Create MCP client - client := mcp.NewClient(&mcp.Implementation{ - Name: "test-client", - Version: "1.0.0", - }, nil) - - // Start the MCP server as a subprocess - serverCmd := exec.Command(filepath.Join(originalDir, binaryPath), "mcp-server") - transport := &mcp.CommandTransport{Command: serverCmd} - - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - - session, err := client.Connect(ctx, transport, nil) - if err != nil { - t.Fatalf("Failed to connect to MCP server: %v", err) - } - defer session.Close() - - // Call audit tool with an invalid run ID - // The tool should return an MCP error for invalid run IDs - params := &mcp.CallToolParams{ - Name: "audit", - Arguments: map[string]any{ - "run_id_or_url": "1", - }, - } - result, err := session.CallTool(ctx, params) - if err != nil { - // Expected behavior: audit command fails with invalid run ID - t.Logf("Audit tool correctly returned error for invalid run ID: %v", err) - return - } - - // Verify result is not empty - if len(result.Content) == 0 { - t.Error("Expected non-empty result from audit tool") - } - - // Verify result contains text content - textContent, ok := result.Content[0].(*mcp.TextContent) - if !ok { - t.Fatal("Expected text content from audit tool") - } - - if textContent.Text == "" { - t.Error("Expected non-empty text content from audit tool") - } - - // The audit command should fail with an invalid run ID, but should return - // a proper error message rather than crashing - // We just verify that we got some output (either error or success) - t.Logf("Audit tool output: %s", textContent.Text) -} - -// TestMCPServer_CompileTool tests the compile tool func TestMCPServer_CompileTool(t *testing.T) { // Skip if the binary doesn't exist binaryPath := "../../gh-aw" @@ -466,51 +166,6 @@ This is a test workflow for compilation. // t.Logf("Logs tool output: %s", textContent.Text) // } -// TestMCPServer_ServerInfo tests that server info is correctly returned -func TestMCPServer_ServerInfo(t *testing.T) { - // Skip if the binary doesn't exist - binaryPath := "../../gh-aw" - if _, err := os.Stat(binaryPath); os.IsNotExist(err) { - t.Skip("Skipping test: gh-aw binary not found. Run 'make build' first.") - } - - // Get the current directory for proper path resolution - originalDir, _ := os.Getwd() - - // Create MCP client - client := mcp.NewClient(&mcp.Implementation{ - Name: "test-client", - Version: "1.0.0", - }, nil) - - // Start the MCP server as a subprocess - serverCmd := exec.Command(filepath.Join(originalDir, binaryPath), "mcp-server") - transport := &mcp.CommandTransport{Command: serverCmd} - - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - - session, err := client.Connect(ctx, transport, nil) - if err != nil { - t.Fatalf("Failed to connect to MCP server: %v", err) - } - defer session.Close() - - // List tools to verify server is working properly - result, err := session.ListTools(ctx, &mcp.ListToolsParams{}) - if err != nil { - t.Fatalf("Failed to list tools: %v", err) - } - - // Verify we can get tools, which means server initialized correctly - if len(result.Tools) == 0 { - t.Error("Expected server to have tools available") - } - - t.Logf("Server initialized successfully with %d tools", len(result.Tools)) -} - -// TestMCPServer_CompileWithSpecificWorkflow tests compiling a specific workflow func TestMCPServer_CompileWithSpecificWorkflow(t *testing.T) { // Skip if the binary doesn't exist binaryPath := "../../gh-aw" @@ -615,276 +270,7 @@ This is the second test workflow. } // TestMCPServer_UpdateToolSchema tests that the update tool has the correct schema -func TestMCPServer_UpdateToolSchema(t *testing.T) { - // Skip if the binary doesn't exist - binaryPath := "../../gh-aw" - if _, err := os.Stat(binaryPath); os.IsNotExist(err) { - t.Skip("Skipping test: gh-aw binary not found. Run 'make build' first.") - } - - // Create MCP client - client := mcp.NewClient(&mcp.Implementation{ - Name: "test-client", - Version: "1.0.0", - }, nil) - - // Start the MCP server as a subprocess - serverCmd := exec.Command(binaryPath, "mcp-server") - transport := &mcp.CommandTransport{Command: serverCmd} - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - - session, err := client.Connect(ctx, transport, nil) - if err != nil { - t.Fatalf("Failed to connect to MCP server: %v", err) - } - defer session.Close() - - // List tools - result, err := session.ListTools(ctx, &mcp.ListToolsParams{}) - if err != nil { - t.Fatalf("Failed to list tools: %v", err) - } - - // Find the update tool - var updateTool *mcp.Tool - for i := range result.Tools { - if result.Tools[i].Name == "update" { - updateTool = result.Tools[i] - break - } - } - - if updateTool == nil { - t.Fatal("Update tool not found in MCP server tools") - } - - // Verify the tool has a description - if updateTool.Description == "" { - t.Error("Update tool should have a description") - } - - // Verify description mentions key functionality - if !strings.Contains(updateTool.Description, "workflows") { - t.Error("Update tool description should mention workflows") - } - - // Verify the tool has input schema - if updateTool.InputSchema == nil { - t.Error("Update tool should have an input schema") - } - - t.Logf("Update tool description: %s", updateTool.Description) - t.Logf("Update tool schema: %+v", updateTool.InputSchema) -} - -// TestMCPServer_CapabilitiesConfiguration tests that server capabilities are correctly configured -func TestMCPServer_CapabilitiesConfiguration(t *testing.T) { - // Skip if the binary doesn't exist - binaryPath := "../../gh-aw" - if _, err := os.Stat(binaryPath); os.IsNotExist(err) { - t.Skip("Skipping test: gh-aw binary not found. Run 'make build' first.") - } - - // Get the current directory for proper path resolution - originalDir, _ := os.Getwd() - - // Create MCP client - client := mcp.NewClient(&mcp.Implementation{ - Name: "test-client", - Version: "1.0.0", - }, nil) - - // Start the MCP server as a subprocess - serverCmd := exec.Command(filepath.Join(originalDir, binaryPath), "mcp-server") - transport := &mcp.CommandTransport{Command: serverCmd} - - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - - session, err := client.Connect(ctx, transport, nil) - if err != nil { - t.Fatalf("Failed to connect to MCP server: %v", err) - } - defer session.Close() - - // Get server capabilities from the initialize result - initResult := session.InitializeResult() - if initResult == nil { - t.Fatal("Expected non-nil InitializeResult") - } - - serverCapabilities := initResult.Capabilities - - // Verify Tools capability is present - if serverCapabilities.Tools == nil { - t.Fatal("Expected server to advertise Tools capability") - } - - // Verify ListChanged is set to false - if serverCapabilities.Tools.ListChanged { - t.Error("Expected Tools.ListChanged to be false (tools are static)") - } - - t.Logf("Server capabilities configured correctly: Tools.ListChanged = %v", serverCapabilities.Tools.ListChanged) -} - -// TestMCPServer_ContextCancellation tests that tool handlers properly respond to context cancellation -func TestMCPServer_ContextCancellation(t *testing.T) { - // Skip if the binary doesn't exist - binaryPath := "../../gh-aw" - if _, err := os.Stat(binaryPath); os.IsNotExist(err) { - t.Skip("Skipping test: gh-aw binary not found. Run 'make build' first.") - } - - // Get the current directory for proper path resolution - originalDir, _ := os.Getwd() - - // Create MCP client - client := mcp.NewClient(&mcp.Implementation{ - Name: "test-client", - Version: "1.0.0", - }, nil) - - // Start the MCP server as a subprocess - serverCmd := exec.Command(filepath.Join(originalDir, binaryPath), "mcp-server") - transport := &mcp.CommandTransport{Command: serverCmd} - - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - - session, err := client.Connect(ctx, transport, nil) - if err != nil { - t.Fatalf("Failed to connect to MCP server: %v", err) - } - defer session.Close() - - // Test context cancellation for different tools - tools := []string{"status", "audit"} - - for _, toolName := range tools { - t.Run(toolName, func(t *testing.T) { - // Create a context that's already cancelled - cancelledCtx, immediateCancel := context.WithCancel(context.Background()) - immediateCancel() // Cancel immediately - - var params *mcp.CallToolParams - switch toolName { - case "status": - params = &mcp.CallToolParams{ - Name: "status", - Arguments: map[string]any{}, - } - case "audit": - params = &mcp.CallToolParams{ - Name: "audit", - Arguments: map[string]any{ - "run_id_or_url": "1", - }, - } - } - - // Call the tool with a cancelled context - result, err := session.CallTool(cancelledCtx, params) - - // The tool should handle the cancellation gracefully - // It should either return an error OR return a result with error message - if err != nil { - // Check if it's a context cancellation error - if !strings.Contains(err.Error(), "context") && !strings.Contains(err.Error(), "cancel") { - t.Logf("Tool returned error (acceptable): %v", err) - } else { - t.Logf("Tool properly detected cancellation via error: %v", err) - } - } else if result != nil && len(result.Content) > 0 { - // Check if the result contains an error message about cancellation - if textContent, ok := result.Content[0].(*mcp.TextContent); ok { - text := textContent.Text - if strings.Contains(text, "context") || strings.Contains(text, "cancel") { - t.Logf("Tool properly detected cancellation via result: %s", text) - } else { - t.Logf("Tool returned result (may not have detected cancellation immediately): %s", text) - } - } - } - - // The important thing is that the tool doesn't hang or crash - // Either returning an error or a result with error message is acceptable - }) - } -} - -// TestMCPServer_ToolIcons tests that all tools have icons -func TestMCPServer_ToolIcons(t *testing.T) { - // Skip if the binary doesn't exist - binaryPath := "../../gh-aw" - if _, err := os.Stat(binaryPath); os.IsNotExist(err) { - t.Skip("Skipping test: gh-aw binary not found. Run 'make build' first.") - } - - // Create MCP client - client := mcp.NewClient(&mcp.Implementation{ - Name: "test-client", - Version: "1.0.0", - }, nil) - - // Start the MCP server as a subprocess - serverCmd := exec.Command(binaryPath, "mcp-server") - transport := &mcp.CommandTransport{Command: serverCmd} - - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - - session, err := client.Connect(ctx, transport, nil) - if err != nil { - t.Fatalf("Failed to connect to MCP server: %v", err) - } - defer session.Close() - - // List tools - result, err := session.ListTools(ctx, &mcp.ListToolsParams{}) - if err != nil { - t.Fatalf("Failed to list tools: %v", err) - } - - // Expected icons for each tool - expectedIcons := map[string]string{ - "status": "📊", - "compile": "🔨", - "logs": "📜", - "audit": "🔍", - "mcp-inspect": "🔎", - "add": "➕", - "update": "🔄", - } - - // Verify each tool has an icon - for _, tool := range result.Tools { - if len(tool.Icons) == 0 { - t.Errorf("Tool '%s' is missing an icon", tool.Name) - continue - } - - // Check that the icon source matches expected emoji - if expectedIcon, ok := expectedIcons[tool.Name]; ok { - if tool.Icons[0].Source != expectedIcon { - t.Errorf("Tool '%s' has unexpected icon. Expected: %s, Got: %s", - tool.Name, expectedIcon, tool.Icons[0].Source) - } - t.Logf("Tool '%s' has correct icon: %s", tool.Name, tool.Icons[0].Source) - } else { - t.Logf("Tool '%s' has icon (not in expected list): %s", tool.Name, tool.Icons[0].Source) - } - } - - // Verify we checked all expected tools - if len(result.Tools) != len(expectedIcons) { - t.Errorf("Expected %d tools with icons, got %d tools", len(expectedIcons), len(result.Tools)) - } -} - -// TestMCPServer_CompileToolWithErrors tests that compile tool returns output even when compilation fails func TestMCPServer_CompileToolWithErrors(t *testing.T) { // Skip if the binary doesn't exist binaryPath := "../../gh-aw" @@ -989,6 +375,7 @@ This workflow has a syntax error in the frontmatter. } // TestMCPServer_CompileToolWithMultipleWorkflows tests compiling multiple workflows with mixed results + func TestMCPServer_CompileToolWithMultipleWorkflows(t *testing.T) { // Skip if the binary doesn't exist binaryPath := "../../gh-aw" @@ -1101,6 +488,7 @@ This workflow has an unknown field. } // TestMCPServer_CompileToolWithStrictMode tests compile with strict mode flag + func TestMCPServer_CompileToolWithStrictMode(t *testing.T) { // Skip if the binary doesn't exist binaryPath := "../../gh-aw" @@ -1189,6 +577,7 @@ This workflow has strict mode disabled in frontmatter. } // TestMCPServer_CompileToolWithJqFilter tests compile with jq filter parameter + func TestMCPServer_CompileToolWithJqFilter(t *testing.T) { // Skip if the binary doesn't exist binaryPath := "../../gh-aw" @@ -1283,6 +672,7 @@ Test workflow for jq filtering. } // TestMCPServer_CompileToolWithInvalidJqFilter tests compile with invalid jq filter + func TestMCPServer_CompileToolWithInvalidJqFilter(t *testing.T) { // Skip if the binary doesn't exist binaryPath := "../../gh-aw" @@ -1371,6 +761,7 @@ Test workflow. } // TestMCPServer_CompileToolWithSpecificWorkflows tests compiling specific workflows by name + func TestMCPServer_CompileToolWithSpecificWorkflows(t *testing.T) { // Skip if the binary doesn't exist binaryPath := "../../gh-aw" @@ -1493,6 +884,7 @@ Second test workflow. // TestMCPServer_CompileToolDescriptionMentionsRecompileRequirement tests that the compile tool // description clearly states that changes to .md files must be compiled + func TestMCPServer_CompileToolDescriptionMentionsRecompileRequirement(t *testing.T) { // Skip if the binary doesn't exist binaryPath := "../../gh-aw" diff --git a/pkg/cli/mcp_server_operations_test.go b/pkg/cli/mcp_server_operations_test.go new file mode 100644 index 0000000000..ad69d2574e --- /dev/null +++ b/pkg/cli/mcp_server_operations_test.go @@ -0,0 +1,344 @@ +//go:build integration + +package cli + +import ( + "context" + "os" + "os/exec" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/githubnext/gh-aw/pkg/testutil" + "github.com/modelcontextprotocol/go-sdk/mcp" +) + +func TestMCPServer_CustomCmd(t *testing.T) { + // Skip if the binary doesn't exist + binaryPath := "../../gh-aw" + if _, err := os.Stat(binaryPath); os.IsNotExist(err) { + t.Skip("Skipping test: gh-aw binary not found. Run 'make build' first.") + } + + // Create a temporary directory with a workflow file + tmpDir := testutil.TempDir(t, "test-*") + workflowsDir := filepath.Join(tmpDir, ".github", "workflows") + if err := os.MkdirAll(workflowsDir, 0755); err != nil { + t.Fatalf("Failed to create workflows directory: %v", err) + } + + // Create a test workflow file + workflowContent := `--- +on: push +engine: copilot +--- +# Test Workflow + +This is a test workflow. +` + workflowPath := filepath.Join(workflowsDir, "test.md") + if err := os.WriteFile(workflowPath, []byte(workflowContent), 0644); err != nil { + t.Fatalf("Failed to write workflow file: %v", err) + } + + // Change to the temporary directory + originalDir, _ := os.Getwd() + defer os.Chdir(originalDir) + os.Chdir(tmpDir) + + // Initialize git repository in the temp directory + initCmd := exec.Command("git", "init") + initCmd.Dir = tmpDir + if err := initCmd.Run(); err != nil { + t.Fatalf("Failed to initialize git repository: %v", err) + } + + // Get absolute path to binary + absBinaryPath, err := filepath.Abs(filepath.Join(originalDir, binaryPath)) + if err != nil { + t.Fatalf("Failed to get absolute path: %v", err) + } + + // Create MCP client + client := mcp.NewClient(&mcp.Implementation{ + Name: "test-client", + Version: "1.0.0", + }, nil) + + // Start the MCP server with --cmd flag pointing to the binary + serverCmd := exec.Command(absBinaryPath, "mcp-server", "--cmd", absBinaryPath) + serverCmd.Dir = tmpDir + transport := &mcp.CommandTransport{Command: serverCmd} + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + session, err := client.Connect(ctx, transport, nil) + if err != nil { + t.Fatalf("Failed to connect to MCP server: %v", err) + } + defer session.Close() + + // Call status tool + params := &mcp.CallToolParams{ + Name: "status", + Arguments: map[string]any{}, + } + result, err := session.CallTool(ctx, params) + if err != nil { + t.Fatalf("Failed to call status tool: %v", err) + } + + // Verify result is not empty + if len(result.Content) == 0 { + t.Error("Expected non-empty result from status tool") + } + + // Verify result contains text content + if textContent, ok := result.Content[0].(*mcp.TextContent); ok { + if textContent.Text == "" { + t.Error("Expected non-empty text content from status tool") + } + t.Logf("Status tool output with custom cmd: %s", textContent.Text) + } else { + t.Error("Expected text content from status tool") + } +} + +func TestMCPServer_StatusTool(t *testing.T) { + // Skip if the binary doesn't exist + binaryPath := "../../gh-aw" + if _, err := os.Stat(binaryPath); os.IsNotExist(err) { + t.Skip("Skipping test: gh-aw binary not found. Run 'make build' first.") + } + + // Create a temporary directory with a workflow file + tmpDir := testutil.TempDir(t, "test-*") + workflowsDir := filepath.Join(tmpDir, ".github", "workflows") + if err := os.MkdirAll(workflowsDir, 0755); err != nil { + t.Fatalf("Failed to create workflows directory: %v", err) + } + + // Create a test workflow file + workflowContent := `--- +on: push +engine: copilot +--- +# Test Workflow + +This is a test workflow. +` + workflowPath := filepath.Join(workflowsDir, "test.md") + if err := os.WriteFile(workflowPath, []byte(workflowContent), 0644); err != nil { + t.Fatalf("Failed to write workflow file: %v", err) + } + + // Change to the temporary directory + originalDir, _ := os.Getwd() + defer os.Chdir(originalDir) + os.Chdir(tmpDir) + + // Initialize git repository in the temp directory + initCmd := exec.Command("git", "init") + initCmd.Dir = tmpDir + if err := initCmd.Run(); err != nil { + t.Fatalf("Failed to initialize git repository: %v", err) + } + + // Create MCP client + client := mcp.NewClient(&mcp.Implementation{ + Name: "test-client", + Version: "1.0.0", + }, nil) + + // Start the MCP server as a subprocess + serverCmd := exec.Command(filepath.Join(originalDir, binaryPath), "mcp-server") + serverCmd.Dir = tmpDir + transport := &mcp.CommandTransport{Command: serverCmd} + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + session, err := client.Connect(ctx, transport, nil) + if err != nil { + t.Fatalf("Failed to connect to MCP server: %v", err) + } + defer session.Close() + + // Call status tool + params := &mcp.CallToolParams{ + Name: "status", + Arguments: map[string]any{}, + } + result, err := session.CallTool(ctx, params) + if err != nil { + t.Fatalf("Failed to call status tool: %v", err) + } + + // Verify result is not empty + if len(result.Content) == 0 { + t.Error("Expected non-empty result from status tool") + } + + // Verify result contains text content + if textContent, ok := result.Content[0].(*mcp.TextContent); ok { + if textContent.Text == "" { + t.Error("Expected non-empty text content from status tool") + } + } else { + t.Error("Expected text content from status tool") + } +} + +func TestMCPServer_AuditTool(t *testing.T) { + // Skip if the binary doesn't exist + binaryPath := "../../gh-aw" + if _, err := os.Stat(binaryPath); os.IsNotExist(err) { + t.Skip("Skipping test: gh-aw binary not found. Run 'make build' first.") + } + + // Get the current directory for proper path resolution + originalDir, _ := os.Getwd() + + // Create MCP client + client := mcp.NewClient(&mcp.Implementation{ + Name: "test-client", + Version: "1.0.0", + }, nil) + + // Start the MCP server as a subprocess + serverCmd := exec.Command(filepath.Join(originalDir, binaryPath), "mcp-server") + transport := &mcp.CommandTransport{Command: serverCmd} + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + session, err := client.Connect(ctx, transport, nil) + if err != nil { + t.Fatalf("Failed to connect to MCP server: %v", err) + } + defer session.Close() + + // Call audit tool with an invalid run ID + // The tool should return an MCP error for invalid run IDs + params := &mcp.CallToolParams{ + Name: "audit", + Arguments: map[string]any{ + "run_id_or_url": "1", + }, + } + result, err := session.CallTool(ctx, params) + if err != nil { + // Expected behavior: audit command fails with invalid run ID + t.Logf("Audit tool correctly returned error for invalid run ID: %v", err) + return + } + + // Verify result is not empty + if len(result.Content) == 0 { + t.Error("Expected non-empty result from audit tool") + } + + // Verify result contains text content + textContent, ok := result.Content[0].(*mcp.TextContent) + if !ok { + t.Fatal("Expected text content from audit tool") + } + + if textContent.Text == "" { + t.Error("Expected non-empty text content from audit tool") + } + + // The audit command should fail with an invalid run ID, but should return + // a proper error message rather than crashing + // We just verify that we got some output (either error or success) + t.Logf("Audit tool output: %s", textContent.Text) +} + +func TestMCPServer_ContextCancellation(t *testing.T) { + // Skip if the binary doesn't exist + binaryPath := "../../gh-aw" + if _, err := os.Stat(binaryPath); os.IsNotExist(err) { + t.Skip("Skipping test: gh-aw binary not found. Run 'make build' first.") + } + + // Get the current directory for proper path resolution + originalDir, _ := os.Getwd() + + // Create MCP client + client := mcp.NewClient(&mcp.Implementation{ + Name: "test-client", + Version: "1.0.0", + }, nil) + + // Start the MCP server as a subprocess + serverCmd := exec.Command(filepath.Join(originalDir, binaryPath), "mcp-server") + transport := &mcp.CommandTransport{Command: serverCmd} + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + session, err := client.Connect(ctx, transport, nil) + if err != nil { + t.Fatalf("Failed to connect to MCP server: %v", err) + } + defer session.Close() + + // Test context cancellation for different tools + tools := []string{"status", "audit"} + + for _, toolName := range tools { + t.Run(toolName, func(t *testing.T) { + // Create a context that's already cancelled + cancelledCtx, immediateCancel := context.WithCancel(context.Background()) + immediateCancel() // Cancel immediately + + var params *mcp.CallToolParams + switch toolName { + case "status": + params = &mcp.CallToolParams{ + Name: "status", + Arguments: map[string]any{}, + } + case "audit": + params = &mcp.CallToolParams{ + Name: "audit", + Arguments: map[string]any{ + "run_id_or_url": "1", + }, + } + } + + // Call the tool with a cancelled context + result, err := session.CallTool(cancelledCtx, params) + + // The tool should handle the cancellation gracefully + // It should either return an error OR return a result with error message + if err != nil { + // Check if it's a context cancellation error + if !strings.Contains(err.Error(), "context") && !strings.Contains(err.Error(), "cancel") { + t.Logf("Tool returned error (acceptable): %v", err) + } else { + t.Logf("Tool properly detected cancellation via error: %v", err) + } + } else if result != nil && len(result.Content) > 0 { + // Check if the result contains an error message about cancellation + if textContent, ok := result.Content[0].(*mcp.TextContent); ok { + text := textContent.Text + if strings.Contains(text, "context") || strings.Contains(text, "cancel") { + t.Logf("Tool properly detected cancellation via result: %s", text) + } else { + t.Logf("Tool returned result (may not have detected cancellation immediately): %s", text) + } + } + } + + // The important thing is that the tool doesn't hang or crash + // Either returning an error or a result with error message is acceptable + }) + } +} + +// TestMCPServer_ToolIcons tests that all tools have icons diff --git a/pkg/cli/mcp_server_tools_test.go b/pkg/cli/mcp_server_tools_test.go new file mode 100644 index 0000000000..d75cbc22ec --- /dev/null +++ b/pkg/cli/mcp_server_tools_test.go @@ -0,0 +1,297 @@ +//go:build integration + +package cli + +import ( + "context" + "os" + "os/exec" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/modelcontextprotocol/go-sdk/mcp" +) + +func TestMCPServer_ListTools(t *testing.T) { + // Skip if the binary doesn't exist + binaryPath := "../../gh-aw" + if _, err := os.Stat(binaryPath); os.IsNotExist(err) { + t.Skip("Skipping test: gh-aw binary not found. Run 'make build' first.") + } + + // Create MCP client + client := mcp.NewClient(&mcp.Implementation{ + Name: "test-client", + Version: "1.0.0", + }, nil) + + // Start the MCP server as a subprocess + serverCmd := exec.Command(binaryPath, "mcp-server") + transport := &mcp.CommandTransport{Command: serverCmd} + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + session, err := client.Connect(ctx, transport, nil) + if err != nil { + t.Fatalf("Failed to connect to MCP server: %v", err) + } + defer session.Close() + + // List tools + result, err := session.ListTools(ctx, &mcp.ListToolsParams{}) + if err != nil { + t.Fatalf("Failed to list tools: %v", err) + } + + // Verify expected tools are present + expectedTools := []string{"status", "compile", "logs", "audit", "mcp-inspect", "add", "update", "fix"} + toolNames := make(map[string]bool) + for _, tool := range result.Tools { + toolNames[tool.Name] = true + } + + for _, expected := range expectedTools { + if !toolNames[expected] { + t.Errorf("Expected tool '%s' not found in MCP server tools", expected) + } + } + + // Verify we have exactly the expected number of tools + if len(result.Tools) != len(expectedTools) { + t.Errorf("Expected %d tools, got %d", len(expectedTools), len(result.Tools)) + } +} + +func TestMCPServer_ServerInfo(t *testing.T) { + // Skip if the binary doesn't exist + binaryPath := "../../gh-aw" + if _, err := os.Stat(binaryPath); os.IsNotExist(err) { + t.Skip("Skipping test: gh-aw binary not found. Run 'make build' first.") + } + + // Get the current directory for proper path resolution + originalDir, _ := os.Getwd() + + // Create MCP client + client := mcp.NewClient(&mcp.Implementation{ + Name: "test-client", + Version: "1.0.0", + }, nil) + + // Start the MCP server as a subprocess + serverCmd := exec.Command(filepath.Join(originalDir, binaryPath), "mcp-server") + transport := &mcp.CommandTransport{Command: serverCmd} + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + session, err := client.Connect(ctx, transport, nil) + if err != nil { + t.Fatalf("Failed to connect to MCP server: %v", err) + } + defer session.Close() + + // List tools to verify server is working properly + result, err := session.ListTools(ctx, &mcp.ListToolsParams{}) + if err != nil { + t.Fatalf("Failed to list tools: %v", err) + } + + // Verify we can get tools, which means server initialized correctly + if len(result.Tools) == 0 { + t.Error("Expected server to have tools available") + } + + t.Logf("Server initialized successfully with %d tools", len(result.Tools)) +} + +func TestMCPServer_UpdateToolSchema(t *testing.T) { + // Skip if the binary doesn't exist + binaryPath := "../../gh-aw" + if _, err := os.Stat(binaryPath); os.IsNotExist(err) { + t.Skip("Skipping test: gh-aw binary not found. Run 'make build' first.") + } + + // Create MCP client + client := mcp.NewClient(&mcp.Implementation{ + Name: "test-client", + Version: "1.0.0", + }, nil) + + // Start the MCP server as a subprocess + serverCmd := exec.Command(binaryPath, "mcp-server") + transport := &mcp.CommandTransport{Command: serverCmd} + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + session, err := client.Connect(ctx, transport, nil) + if err != nil { + t.Fatalf("Failed to connect to MCP server: %v", err) + } + defer session.Close() + + // List tools + result, err := session.ListTools(ctx, &mcp.ListToolsParams{}) + if err != nil { + t.Fatalf("Failed to list tools: %v", err) + } + + // Find the update tool + var updateTool *mcp.Tool + for i := range result.Tools { + if result.Tools[i].Name == "update" { + updateTool = result.Tools[i] + break + } + } + + if updateTool == nil { + t.Fatal("Update tool not found in MCP server tools") + } + + // Verify the tool has a description + if updateTool.Description == "" { + t.Error("Update tool should have a description") + } + + // Verify description mentions key functionality + if !strings.Contains(updateTool.Description, "workflows") { + t.Error("Update tool description should mention workflows") + } + + // Verify the tool has input schema + if updateTool.InputSchema == nil { + t.Error("Update tool should have an input schema") + } + + t.Logf("Update tool description: %s", updateTool.Description) + t.Logf("Update tool schema: %+v", updateTool.InputSchema) +} + +// TestMCPServer_CapabilitiesConfiguration tests that server capabilities are correctly configured + +func TestMCPServer_CapabilitiesConfiguration(t *testing.T) { + // Skip if the binary doesn't exist + binaryPath := "../../gh-aw" + if _, err := os.Stat(binaryPath); os.IsNotExist(err) { + t.Skip("Skipping test: gh-aw binary not found. Run 'make build' first.") + } + + // Get the current directory for proper path resolution + originalDir, _ := os.Getwd() + + // Create MCP client + client := mcp.NewClient(&mcp.Implementation{ + Name: "test-client", + Version: "1.0.0", + }, nil) + + // Start the MCP server as a subprocess + serverCmd := exec.Command(filepath.Join(originalDir, binaryPath), "mcp-server") + transport := &mcp.CommandTransport{Command: serverCmd} + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + session, err := client.Connect(ctx, transport, nil) + if err != nil { + t.Fatalf("Failed to connect to MCP server: %v", err) + } + defer session.Close() + + // Get server capabilities from the initialize result + initResult := session.InitializeResult() + if initResult == nil { + t.Fatal("Expected non-nil InitializeResult") + } + + serverCapabilities := initResult.Capabilities + + // Verify Tools capability is present + if serverCapabilities.Tools == nil { + t.Fatal("Expected server to advertise Tools capability") + } + + // Verify ListChanged is set to false + if serverCapabilities.Tools.ListChanged { + t.Error("Expected Tools.ListChanged to be false (tools are static)") + } + + t.Logf("Server capabilities configured correctly: Tools.ListChanged = %v", serverCapabilities.Tools.ListChanged) +} + +// TestMCPServer_ContextCancellation tests that tool handlers properly respond to context cancellation + +func TestMCPServer_ToolIcons(t *testing.T) { + // Skip if the binary doesn't exist + binaryPath := "../../gh-aw" + if _, err := os.Stat(binaryPath); os.IsNotExist(err) { + t.Skip("Skipping test: gh-aw binary not found. Run 'make build' first.") + } + + // Create MCP client + client := mcp.NewClient(&mcp.Implementation{ + Name: "test-client", + Version: "1.0.0", + }, nil) + + // Start the MCP server as a subprocess + serverCmd := exec.Command(binaryPath, "mcp-server") + transport := &mcp.CommandTransport{Command: serverCmd} + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + session, err := client.Connect(ctx, transport, nil) + if err != nil { + t.Fatalf("Failed to connect to MCP server: %v", err) + } + defer session.Close() + + // List tools + result, err := session.ListTools(ctx, &mcp.ListToolsParams{}) + if err != nil { + t.Fatalf("Failed to list tools: %v", err) + } + + // Expected icons for each tool + expectedIcons := map[string]string{ + "status": "📊", + "compile": "🔨", + "logs": "📜", + "audit": "🔍", + "mcp-inspect": "🔎", + "add": "➕", + "update": "🔄", + } + + // Verify each tool has an icon + for _, tool := range result.Tools { + if len(tool.Icons) == 0 { + t.Errorf("Tool '%s' is missing an icon", tool.Name) + continue + } + + // Check that the icon source matches expected emoji + if expectedIcon, ok := expectedIcons[tool.Name]; ok { + if tool.Icons[0].Source != expectedIcon { + t.Errorf("Tool '%s' has unexpected icon. Expected: %s, Got: %s", + tool.Name, expectedIcon, tool.Icons[0].Source) + } + t.Logf("Tool '%s' has correct icon: %s", tool.Name, tool.Icons[0].Source) + } else { + t.Logf("Tool '%s' has icon (not in expected list): %s", tool.Name, tool.Icons[0].Source) + } + } + + // Verify we checked all expected tools + if len(result.Tools) != len(expectedIcons) { + t.Errorf("Expected %d tools with icons, got %d tools", len(expectedIcons), len(result.Tools)) + } +} + +// TestMCPServer_CompileToolWithErrors tests that compile tool returns output even when compilation fails diff --git a/pkg/workflow/permissions_operations_test.go b/pkg/workflow/permissions_operations_test.go new file mode 100644 index 0000000000..ba397d4feb --- /dev/null +++ b/pkg/workflow/permissions_operations_test.go @@ -0,0 +1,599 @@ +package workflow + +import ( + "testing" +) + +func TestNewPermissions(t *testing.T) { + p := NewPermissions() + if p == nil { + t.Fatal("NewPermissions() returned nil") + } + if p.shorthand != "" { + t.Errorf("expected empty shorthand, got %q", p.shorthand) + } + if p.permissions == nil { + t.Error("expected permissions map to be initialized") + } + if len(p.permissions) != 0 { + t.Errorf("expected empty permissions map, got %d entries", len(p.permissions)) + } +} + +func TestNewPermissionsShorthand(t *testing.T) { + tests := []struct { + name string + fn func() *Permissions + shorthand string + }{ + {"read-all", NewPermissionsReadAll, "read-all"}, + {"write-all", NewPermissionsWriteAll, "write-all"}, + {"none", NewPermissionsNone, "none"}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + p := tt.fn() + if p.shorthand != tt.shorthand { + t.Errorf("expected shorthand %q, got %q", tt.shorthand, p.shorthand) + } + }) + } +} + +func TestNewPermissionsFromMap(t *testing.T) { + perms := map[PermissionScope]PermissionLevel{ + PermissionContents: PermissionRead, + PermissionIssues: PermissionWrite, + } + + p := NewPermissionsFromMap(perms) + if p.shorthand != "" { + t.Errorf("expected empty shorthand, got %q", p.shorthand) + } + if len(p.permissions) != 2 { + t.Errorf("expected 2 permissions, got %d", len(p.permissions)) + } + + level, exists := p.Get(PermissionContents) + if !exists || level != PermissionRead { + t.Errorf("expected contents: read, got %v (exists: %v)", level, exists) + } + + level, exists = p.Get(PermissionIssues) + if !exists || level != PermissionWrite { + t.Errorf("expected issues: write, got %v (exists: %v)", level, exists) + } +} + +func TestPermissionsSet(t *testing.T) { + p := NewPermissions() + p.Set(PermissionContents, PermissionRead) + + level, exists := p.Get(PermissionContents) + if !exists || level != PermissionRead { + t.Errorf("expected contents: read, got %v (exists: %v)", level, exists) + } + + // Test setting on shorthand converts to map + p2 := NewPermissionsReadAll() + p2.Set(PermissionIssues, PermissionWrite) + if p2.shorthand != "" { + t.Error("expected shorthand to be cleared after Set") + } + level, exists = p2.Get(PermissionIssues) + if !exists || level != PermissionWrite { + t.Errorf("expected issues: write, got %v (exists: %v)", level, exists) + } +} + +func TestPermissionsGet(t *testing.T) { + tests := []struct { + name string + permissions *Permissions + scope PermissionScope + wantLevel PermissionLevel + wantExists bool + }{ + { + name: "read-all shorthand", + permissions: NewPermissionsReadAll(), + scope: PermissionContents, + wantLevel: PermissionRead, + wantExists: true, + }, + { + name: "write-all shorthand", + permissions: NewPermissionsWriteAll(), + scope: PermissionIssues, + wantLevel: PermissionWrite, + wantExists: true, + }, + { + name: "none shorthand", + permissions: NewPermissionsNone(), + scope: PermissionContents, + wantLevel: PermissionNone, + wantExists: true, + }, + { + name: "specific permission", + permissions: NewPermissionsFromMap(map[PermissionScope]PermissionLevel{ + PermissionContents: PermissionRead, + }), + scope: PermissionContents, + wantLevel: PermissionRead, + wantExists: true, + }, + { + name: "non-existent permission", + permissions: NewPermissions(), + scope: PermissionContents, + wantLevel: "", + wantExists: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + level, exists := tt.permissions.Get(tt.scope) + if exists != tt.wantExists { + t.Errorf("Get() exists = %v, want %v", exists, tt.wantExists) + } + if level != tt.wantLevel { + t.Errorf("Get() level = %v, want %v", level, tt.wantLevel) + } + }) + } +} + +func TestPermissionsMerge(t *testing.T) { + tests := []struct { + name string + base *Permissions + merge *Permissions + want map[PermissionScope]PermissionLevel + wantSH string + }{ + // Map-to-Map merges + { + name: "merge two maps - write overrides read", + base: NewPermissionsFromMap(map[PermissionScope]PermissionLevel{PermissionContents: PermissionRead}), + merge: NewPermissionsFromMap(map[PermissionScope]PermissionLevel{PermissionContents: PermissionWrite}), + want: map[PermissionScope]PermissionLevel{PermissionContents: PermissionWrite}, + }, + { + name: "merge two maps - read doesn't override write", + base: NewPermissionsFromMap(map[PermissionScope]PermissionLevel{PermissionContents: PermissionWrite}), + merge: NewPermissionsFromMap(map[PermissionScope]PermissionLevel{PermissionContents: PermissionRead}), + want: map[PermissionScope]PermissionLevel{PermissionContents: PermissionWrite}, + }, + { + name: "merge two maps - different scopes", + base: NewPermissionsFromMap(map[PermissionScope]PermissionLevel{PermissionContents: PermissionRead}), + merge: NewPermissionsFromMap(map[PermissionScope]PermissionLevel{PermissionIssues: PermissionWrite}), + want: map[PermissionScope]PermissionLevel{ + PermissionContents: PermissionRead, + PermissionIssues: PermissionWrite, + }, + }, + { + name: "merge two maps - multiple scopes with conflicts", + base: NewPermissionsFromMap(map[PermissionScope]PermissionLevel{ + PermissionContents: PermissionRead, + PermissionIssues: PermissionWrite, + PermissionPullRequests: PermissionRead, + }), + merge: NewPermissionsFromMap(map[PermissionScope]PermissionLevel{ + PermissionContents: PermissionWrite, + PermissionIssues: PermissionRead, + PermissionDiscussions: PermissionWrite, + }), + want: map[PermissionScope]PermissionLevel{ + PermissionContents: PermissionWrite, // write wins + PermissionIssues: PermissionWrite, // write preserved + PermissionPullRequests: PermissionRead, // kept from base + PermissionDiscussions: PermissionWrite, // added from merge + }, + }, + { + name: "merge two maps - none overrides read", + base: NewPermissionsFromMap(map[PermissionScope]PermissionLevel{PermissionContents: PermissionRead}), + merge: NewPermissionsFromMap(map[PermissionScope]PermissionLevel{PermissionContents: PermissionNone}), + want: map[PermissionScope]PermissionLevel{PermissionContents: PermissionRead}, + }, + { + name: "merge two maps - none overrides none", + base: NewPermissionsFromMap(map[PermissionScope]PermissionLevel{PermissionContents: PermissionNone}), + merge: NewPermissionsFromMap(map[PermissionScope]PermissionLevel{PermissionContents: PermissionNone}), + want: map[PermissionScope]PermissionLevel{PermissionContents: PermissionNone}, + }, + { + name: "merge two maps - write overrides none", + base: NewPermissionsFromMap(map[PermissionScope]PermissionLevel{PermissionContents: PermissionNone}), + merge: NewPermissionsFromMap(map[PermissionScope]PermissionLevel{PermissionContents: PermissionWrite}), + want: map[PermissionScope]PermissionLevel{PermissionContents: PermissionWrite}, + }, + { + name: "merge two maps - all permission scopes", + base: NewPermissionsFromMap(map[PermissionScope]PermissionLevel{ + PermissionActions: PermissionRead, + PermissionChecks: PermissionRead, + PermissionContents: PermissionRead, + PermissionDeployments: PermissionRead, + PermissionDiscussions: PermissionRead, + PermissionIssues: PermissionRead, + PermissionPackages: PermissionRead, + }), + merge: NewPermissionsFromMap(map[PermissionScope]PermissionLevel{ + PermissionPages: PermissionWrite, + PermissionPullRequests: PermissionWrite, + PermissionRepositoryProj: PermissionWrite, + PermissionSecurityEvents: PermissionWrite, + PermissionStatuses: PermissionWrite, + PermissionModels: PermissionWrite, + }), + want: map[PermissionScope]PermissionLevel{ + PermissionActions: PermissionRead, + PermissionChecks: PermissionRead, + PermissionContents: PermissionRead, + PermissionDeployments: PermissionRead, + PermissionDiscussions: PermissionRead, + PermissionIssues: PermissionRead, + PermissionPackages: PermissionRead, + PermissionPages: PermissionWrite, + PermissionPullRequests: PermissionWrite, + PermissionRepositoryProj: PermissionWrite, + PermissionSecurityEvents: PermissionWrite, + PermissionStatuses: PermissionWrite, + PermissionModels: PermissionWrite, + }, + }, + + // Shorthand-to-Shorthand merges + { + name: "merge shorthand - write-all wins over read-all", + base: NewPermissionsReadAll(), + merge: NewPermissionsWriteAll(), + wantSH: "write-all", + }, + { + name: "merge shorthand - write-all wins over read", + base: NewPermissionsReadAll(), + merge: NewPermissionsWriteAll(), + wantSH: "write-all", + }, + { + name: "merge shorthand - write-all wins over write", + base: NewPermissionsWriteAll(), + merge: NewPermissionsWriteAll(), + wantSH: "write-all", + }, + { + name: "merge shorthand - write-all wins over none", + base: NewPermissionsNone(), + merge: NewPermissionsWriteAll(), + wantSH: "write-all", + }, + { + name: "merge shorthand - write-all wins over read-all", + base: NewPermissionsReadAll(), + merge: NewPermissionsWriteAll(), + wantSH: "write-all", + }, + { + name: "merge shorthand - write-all wins over read-all (duplicate for coverage)", + base: NewPermissionsReadAll(), + merge: NewPermissionsWriteAll(), + wantSH: "write-all", + }, + { + name: "merge shorthand - write-all wins over none", + base: NewPermissionsNone(), + merge: NewPermissionsWriteAll(), + wantSH: "write-all", + }, + { + name: "merge shorthand - read-all wins over read-all", + base: NewPermissionsReadAll(), + merge: NewPermissionsReadAll(), + wantSH: "read-all", + }, + { + name: "merge shorthand - read-all wins over none", + base: NewPermissionsNone(), + merge: NewPermissionsReadAll(), + wantSH: "read-all", + }, + { + name: "merge shorthand - read-all wins over none (duplicate for coverage)", + base: NewPermissionsNone(), + merge: NewPermissionsReadAll(), + wantSH: "read-all", + }, + { + name: "merge shorthand - read-all preserved when merging read", + base: NewPermissionsReadAll(), + merge: NewPermissionsReadAll(), + wantSH: "read-all", + }, + { + name: "merge shorthand - write-all preserved when merging write", + base: NewPermissionsWriteAll(), + merge: NewPermissionsWriteAll(), + wantSH: "write-all", + }, + { + name: "merge shorthand - same shorthand preserved (read-all)", + base: NewPermissionsReadAll(), + merge: NewPermissionsReadAll(), + wantSH: "read-all", + }, + { + name: "merge shorthand - same shorthand preserved (write-all)", + base: NewPermissionsWriteAll(), + merge: NewPermissionsWriteAll(), + wantSH: "write-all", + }, + { + name: "merge shorthand - same shorthand preserved (none)", + base: NewPermissionsNone(), + merge: NewPermissionsNone(), + wantSH: "none", + }, + + // Shorthand-to-Map merges + { + name: "merge read-all shorthand into map - adds all missing scopes as read", + base: NewPermissionsFromMap(map[PermissionScope]PermissionLevel{PermissionContents: PermissionWrite}), + merge: NewPermissionsReadAll(), + want: map[PermissionScope]PermissionLevel{ + PermissionContents: PermissionWrite, // preserved + PermissionActions: PermissionRead, // added + PermissionAttestations: PermissionRead, + PermissionChecks: PermissionRead, + PermissionDeployments: PermissionRead, + PermissionDiscussions: PermissionRead, + PermissionIssues: PermissionRead, + PermissionPackages: PermissionRead, + PermissionPages: PermissionRead, + PermissionPullRequests: PermissionRead, + PermissionRepositoryProj: PermissionRead, + PermissionOrganizationProj: PermissionRead, + PermissionSecurityEvents: PermissionRead, + PermissionStatuses: PermissionRead, + PermissionModels: PermissionRead, + // Note: id-token is NOT included because it doesn't support read level + }, + }, + { + name: "merge write-all shorthand into map - adds all missing scopes as write", + base: NewPermissionsFromMap(map[PermissionScope]PermissionLevel{PermissionContents: PermissionRead}), + merge: NewPermissionsWriteAll(), + want: map[PermissionScope]PermissionLevel{ + PermissionContents: PermissionRead, // preserved (not overwritten) + PermissionActions: PermissionWrite, + PermissionAttestations: PermissionWrite, + PermissionChecks: PermissionWrite, + PermissionDeployments: PermissionWrite, + PermissionDiscussions: PermissionWrite, + PermissionIdToken: PermissionWrite, // id-token supports write + PermissionIssues: PermissionWrite, + PermissionPackages: PermissionWrite, + PermissionPages: PermissionWrite, + PermissionPullRequests: PermissionWrite, + PermissionRepositoryProj: PermissionWrite, + PermissionOrganizationProj: PermissionWrite, + PermissionSecurityEvents: PermissionWrite, + PermissionStatuses: PermissionWrite, + PermissionModels: PermissionWrite, + }, + }, + { + name: "merge read shorthand into map - adds all missing scopes as read", + base: NewPermissionsFromMap(map[PermissionScope]PermissionLevel{PermissionContents: PermissionWrite}), + merge: NewPermissionsReadAll(), + want: map[PermissionScope]PermissionLevel{ + PermissionContents: PermissionWrite, + PermissionActions: PermissionRead, + PermissionAttestations: PermissionRead, + PermissionChecks: PermissionRead, + PermissionDeployments: PermissionRead, + PermissionDiscussions: PermissionRead, + PermissionIssues: PermissionRead, + PermissionPackages: PermissionRead, + PermissionPages: PermissionRead, + PermissionPullRequests: PermissionRead, + PermissionRepositoryProj: PermissionRead, + PermissionOrganizationProj: PermissionRead, + PermissionSecurityEvents: PermissionRead, + PermissionStatuses: PermissionRead, + PermissionModels: PermissionRead, + // Note: id-token is NOT included because it doesn't support read level + }, + }, + { + name: "merge write shorthand into map - adds all missing scopes as write", + base: NewPermissionsFromMap(map[PermissionScope]PermissionLevel{PermissionIssues: PermissionRead}), + merge: NewPermissionsWriteAll(), + want: map[PermissionScope]PermissionLevel{ + PermissionIssues: PermissionRead, + PermissionActions: PermissionWrite, + PermissionAttestations: PermissionWrite, + PermissionChecks: PermissionWrite, + PermissionContents: PermissionWrite, + PermissionDeployments: PermissionWrite, + PermissionDiscussions: PermissionWrite, + PermissionIdToken: PermissionWrite, // id-token supports write + PermissionPackages: PermissionWrite, + PermissionPages: PermissionWrite, + PermissionPullRequests: PermissionWrite, + PermissionRepositoryProj: PermissionWrite, + PermissionOrganizationProj: PermissionWrite, + PermissionSecurityEvents: PermissionWrite, + PermissionStatuses: PermissionWrite, + PermissionModels: PermissionWrite, + }, + }, + { + name: "merge none shorthand into map - no change", + base: NewPermissionsFromMap(map[PermissionScope]PermissionLevel{PermissionContents: PermissionRead}), + merge: NewPermissionsNone(), + want: map[PermissionScope]PermissionLevel{PermissionContents: PermissionRead}, + }, + + // Map-to-Shorthand merges (shorthand converts to map) + { + name: "merge map into read-all shorthand - shorthand cleared, map created", + base: NewPermissionsReadAll(), + merge: NewPermissionsFromMap(map[PermissionScope]PermissionLevel{PermissionIssues: PermissionWrite}), + want: map[PermissionScope]PermissionLevel{PermissionIssues: PermissionWrite}, + }, + { + name: "merge map into write-all shorthand - shorthand cleared, map created", + base: NewPermissionsWriteAll(), + merge: NewPermissionsFromMap(map[PermissionScope]PermissionLevel{PermissionContents: PermissionRead}), + want: map[PermissionScope]PermissionLevel{PermissionContents: PermissionRead}, + }, + { + name: "merge map into none shorthand - shorthand cleared, map created", + base: NewPermissionsNone(), + merge: NewPermissionsFromMap(map[PermissionScope]PermissionLevel{PermissionIssues: PermissionWrite}), + want: map[PermissionScope]PermissionLevel{PermissionIssues: PermissionWrite}, + }, + { + name: "merge complex map into read shorthand", + base: NewPermissionsReadAll(), + merge: NewPermissionsFromMap(map[PermissionScope]PermissionLevel{ + PermissionContents: PermissionWrite, + PermissionIssues: PermissionRead, + PermissionPullRequests: PermissionWrite, + }), + want: map[PermissionScope]PermissionLevel{ + PermissionContents: PermissionWrite, + PermissionIssues: PermissionRead, + PermissionPullRequests: PermissionWrite, + }, + }, + + // Nil and edge cases + { + name: "merge nil into map - no change", + base: NewPermissionsFromMap(map[PermissionScope]PermissionLevel{PermissionContents: PermissionRead}), + merge: nil, + want: map[PermissionScope]PermissionLevel{PermissionContents: PermissionRead}, + }, + { + name: "merge nil into shorthand - no change", + base: NewPermissionsReadAll(), + merge: nil, + wantSH: "read-all", + }, + { + name: "merge empty map into map - no change", + base: NewPermissionsFromMap(map[PermissionScope]PermissionLevel{PermissionContents: PermissionRead}), + merge: NewPermissionsFromMap(map[PermissionScope]PermissionLevel{}), + want: map[PermissionScope]PermissionLevel{PermissionContents: PermissionRead}, + }, + { + name: "merge map into empty map - scopes added", + base: NewPermissionsFromMap(map[PermissionScope]PermissionLevel{}), + merge: NewPermissionsFromMap(map[PermissionScope]PermissionLevel{PermissionIssues: PermissionWrite}), + want: map[PermissionScope]PermissionLevel{PermissionIssues: PermissionWrite}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tt.base.Merge(tt.merge) + + if tt.wantSH != "" { + if tt.base.shorthand != tt.wantSH { + t.Errorf("after merge, shorthand = %q, want %q", tt.base.shorthand, tt.wantSH) + } + return + } + + if len(tt.want) != len(tt.base.permissions) { + t.Errorf("after merge, got %d permissions, want %d", len(tt.base.permissions), len(tt.want)) + } + + for scope, wantLevel := range tt.want { + gotLevel, exists := tt.base.Get(scope) + if !exists { + t.Errorf("after merge, scope %s not found", scope) + continue + } + if gotLevel != wantLevel { + t.Errorf("after merge, scope %s = %v, want %v", scope, gotLevel, wantLevel) + } + } + }) + } +} + +func TestPermissions_AllRead(t *testing.T) { + tests := []struct { + name string + perms *Permissions + scope PermissionScope + expected PermissionLevel + exists bool + }{ + { + name: "all: read returns read for contents", + perms: NewPermissionsAllRead(), + scope: PermissionContents, + expected: PermissionRead, + exists: true, + }, + { + name: "all: read returns read for issues", + perms: NewPermissionsAllRead(), + scope: PermissionIssues, + expected: PermissionRead, + exists: true, + }, + { + name: "all: read with explicit override", + perms: func() *Permissions { + p := NewPermissionsAllRead() + p.Set(PermissionContents, PermissionWrite) + return p + }(), + scope: PermissionContents, + expected: PermissionWrite, + exists: true, + }, + { + name: "all: read does not include id-token (not supported at read level)", + perms: NewPermissionsAllRead(), + scope: PermissionIdToken, + expected: "", // Should be empty since the permission doesn't exist + exists: false, // Should not exist because id-token doesn't support read + }, + { + name: "all: read with explicit id-token: write override", + perms: func() *Permissions { + p := NewPermissionsAllRead() + p.Set(PermissionIdToken, PermissionWrite) + return p + }(), + scope: PermissionIdToken, + expected: PermissionWrite, + exists: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + level, exists := tt.perms.Get(tt.scope) + if exists != tt.exists { + t.Errorf("Get(%s) exists = %v, want %v", tt.scope, exists, tt.exists) + } + if level != tt.expected { + t.Errorf("Get(%s) = %v, want %v", tt.scope, level, tt.expected) + } + }) + } +} diff --git a/pkg/workflow/permissions_parser_test.go b/pkg/workflow/permissions_parser_test.go new file mode 100644 index 0000000000..88d786473d --- /dev/null +++ b/pkg/workflow/permissions_parser_test.go @@ -0,0 +1,703 @@ +package workflow + +import ( + "strings" + "testing" +) + +func TestPermissionsParser_HasContentsReadAccess(t *testing.T) { + tests := []struct { + name string + permissions string + expected bool + }{ + { + name: "shorthand read-all grants contents access", + permissions: "permissions: read-all", + expected: true, + }, + { + name: "shorthand write-all grants contents access", + permissions: "permissions: write-all", + expected: true, + }, + { + name: "invalid shorthand read does not grant contents access", + permissions: "permissions: read", + expected: false, // "read" is no longer a valid shorthand + }, + { + name: "invalid shorthand write does not grant contents access", + permissions: "permissions: write", + expected: false, // "write" is no longer a valid shorthand + }, + { + name: "shorthand none denies contents access", + permissions: "permissions: none", + expected: false, + }, + { + name: "explicit contents read grants access", + permissions: `permissions: + contents: read + issues: write`, + expected: true, + }, + { + name: "explicit contents write grants access", + permissions: `permissions: + contents: write + issues: read`, + expected: true, + }, + { + name: "no contents permission denies access", + permissions: `permissions: + issues: write + pull-requests: read`, + expected: false, + }, + { + name: "explicit contents none denies access", + permissions: `permissions: + contents: none + issues: write`, + expected: false, + }, + { + name: "empty permissions denies access", + permissions: "", + expected: false, + }, + { + name: "just permissions label denies access", + permissions: "permissions:", + expected: false, + }, + // Additional extensive edge case tests + { + name: "whitespace only permissions denies access", + permissions: "permissions: \n \t", + expected: false, + }, + { + name: "permissions with extra whitespace", + permissions: `permissions: + contents: read + issues: write`, + expected: true, + }, + { + name: "invalid shorthand permission denies access", + permissions: "permissions: invalid-permission", + expected: false, + }, + { + name: "mixed case contents permission", + permissions: `permissions: + CONTENTS: read`, + expected: false, // YAML is case-sensitive + }, + { + name: "contents with mixed case value", + permissions: `permissions: + contents: READ`, + expected: false, // Values are case-sensitive + }, + { + name: "permissions with numeric contents value", + permissions: `permissions: + contents: 123`, + expected: false, + }, + { + name: "permissions with boolean contents value", + permissions: `permissions: + contents: true`, + expected: false, + }, + { + name: "deeply nested permissions structure", + permissions: `permissions: + security: + contents: read + contents: write`, + expected: true, // Should parse the top-level contents + }, + { + name: "permissions with comments", + permissions: `permissions: + contents: read # This grants read access + issues: write`, + expected: true, + }, + { + name: "permissions with array syntax", + permissions: `permissions: + contents: [read, write]`, + expected: false, // Array values not supported + }, + { + name: "permissions with quoted values", + permissions: `permissions: + contents: "read" + issues: write`, + expected: true, + }, + { + name: "permissions with single quotes", + permissions: `permissions: + contents: 'write' + issues: read`, + expected: true, + }, + { + name: "malformed YAML permissions", + permissions: `permissions: + contents: read + issues: write`, // Bad indentation + expected: false, + }, + { + name: "permissions without colon separator", + permissions: `permissions + contents read`, + expected: false, + }, + { + name: "extremely long permission value", + permissions: "permissions: " + strings.Repeat("a", 1000), + expected: false, + }, + { + name: "special characters in permission values", + permissions: `permissions: + contents: read@#$% + issues: write`, + expected: false, + }, + { + name: "unicode characters in permissions", + permissions: `permissions: + contents: 读取 + issues: write`, + expected: false, + }, + { + name: "null value for contents", + permissions: `permissions: + contents: null + issues: write`, + expected: false, + }, + { + name: "empty string for contents", + permissions: `permissions: + contents: "" + issues: write`, + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + parser := NewPermissionsParser(tt.permissions) + result := parser.HasContentsReadAccess() + if result != tt.expected { + t.Errorf("HasContentsReadAccess() = %v, expected %v", result, tt.expected) + } + }) + } +} + +func TestContainsCheckout(t *testing.T) { + tests := []struct { + name string + customSteps string + expected bool + }{ + { + name: "empty steps", + customSteps: "", + expected: false, + }, + { + name: "contains actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd", + customSteps: `steps: + - name: Checkout + uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd`, + expected: true, + }, + { + name: "contains actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd", + customSteps: `steps: + - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd + with: + token: ${{ secrets.GITHUB_TOKEN }}`, + expected: true, + }, + { + name: "contains different action", + customSteps: `steps: + - name: Setup Node + uses: actions/setup-node@395ad3262231945c25e8478fd5baf05154b1d79f + with: + node-version: '18'`, + expected: false, + }, + { + name: "mixed steps with checkout", + customSteps: `steps: + - name: Checkout repository + uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd + - name: Setup Node + uses: actions/setup-node@395ad3262231945c25e8478fd5baf05154b1d79f`, + expected: true, + }, + { + name: "case insensitive detection", + customSteps: `steps: + - name: Checkout + uses: Actions/Checkout@v5`, + expected: true, + }, + { + name: "checkout in middle of other text", + customSteps: `steps: + - name: Custom step + run: echo "before checkout" + - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd + - name: After checkout + run: echo "done"`, + expected: true, + }, + // Additional extensive edge case tests for ContainsCheckout + { + name: "checkout with no version", + customSteps: `steps: + - uses: actions/checkout`, + expected: true, + }, + { + name: "checkout with specific commit", + customSteps: `steps: + - uses: actions/checkout@8f4b7f84864484a7bf31766abe9204da3cbe65b3`, + expected: true, + }, + { + name: "checkout with branch reference", + customSteps: `steps: + - uses: actions/checkout@main`, + expected: true, + }, + { + name: "checkout action in quotes", + customSteps: `steps: + - name: Checkout + uses: "actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd"`, + expected: true, + }, + { + name: "checkout action in single quotes", + customSteps: `steps: + - uses: 'actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd'`, + expected: true, + }, + { + name: "checkout with extra whitespace", + customSteps: `steps: + - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd `, + expected: true, + }, + { + name: "checkout in multiline YAML", + customSteps: `steps: + - name: Checkout + uses: > + actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd`, + expected: true, + }, + { + name: "checkout in run command (should not match)", + customSteps: `steps: + - name: Echo checkout + run: echo "actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd"`, + expected: true, // Current implementation does simple string match + }, + { + name: "checkout in comment (should not match)", + customSteps: `steps: + - name: Setup + uses: actions/setup-node@395ad3262231945c25e8478fd5baf05154b1d79f + # TODO: add actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd`, + expected: true, // Current implementation does simple string match + }, + { + name: "similar but not checkout action", + customSteps: `steps: + - uses: actions/cache@v3 + - uses: my-actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd`, + expected: true, // Current implementation matches substring + }, + { + name: "checkout in different format", + customSteps: `steps: + - name: Checkout code + uses: | + actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd`, + expected: true, + }, + { + name: "malformed YAML with checkout", + customSteps: `steps + - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd`, + expected: true, // Still detects the string + }, + { + name: "checkout with complex parameters", + customSteps: `steps: + - name: Checkout repository + uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd + with: + fetch-depth: 0 + token: ${{ secrets.GITHUB_TOKEN }} + submodules: recursive`, + expected: true, + }, + { + name: "multiple checkouts", + customSteps: `steps: + - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd + - name: Setup + run: echo "setup" + - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd + with: + path: subdirectory`, + expected: true, + }, + { + name: "checkout with unusual casing", + customSteps: `steps: + - uses: ACTIONS/CHECKOUT@V5`, + expected: true, + }, + { + name: "checkout in conditional step", + customSteps: `steps: + - if: github.event_name == 'push' + uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd`, + expected: true, + }, + { + name: "very long steps with checkout buried inside", + customSteps: `steps: + - name: Step 1 + run: echo "first" + - name: Step 2 + run: echo "second" + - name: Step 3 + run: echo "third" + - name: Checkout buried + uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd + - name: Step 5 + run: echo "fifth"`, + expected: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := ContainsCheckout(tt.customSteps) + if result != tt.expected { + t.Errorf("ContainsCheckout() = %v, expected %v", result, tt.expected) + } + }) + } +} + +func TestPermissionsParser_Parse(t *testing.T) { + tests := []struct { + name string + permissions string + expectedMap map[string]string + expectedShort bool + expectedValue string + }{ + { + name: "shorthand read-all", + permissions: "permissions: read-all", + expectedMap: map[string]string{}, + expectedShort: true, + expectedValue: "read-all", + }, + { + name: "explicit map permissions", + permissions: `permissions: + contents: read + issues: write`, + expectedMap: map[string]string{ + "contents": "read", + "issues": "write", + }, + expectedShort: false, + expectedValue: "", + }, + { + name: "multiline without permissions prefix", + permissions: `contents: read +issues: write +pull-requests: read`, + expectedMap: map[string]string{ + "contents": "read", + "issues": "write", + "pull-requests": "read", + }, + expectedShort: false, + expectedValue: "", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + parser := NewPermissionsParser(tt.permissions) + + if parser.isShorthand != tt.expectedShort { + t.Errorf("isShorthand = %v, expected %v", parser.isShorthand, tt.expectedShort) + } + + if parser.shorthandValue != tt.expectedValue { + t.Errorf("shorthandValue = %v, expected %v", parser.shorthandValue, tt.expectedValue) + } + + if !tt.expectedShort { + for key, expectedValue := range tt.expectedMap { + if actualValue, exists := parser.parsedPerms[key]; !exists || actualValue != expectedValue { + t.Errorf("parsedPerms[%s] = %v, expected %v", key, actualValue, expectedValue) + } + } + } + }) + } +} + +func TestPermissionsParser_AllRead(t *testing.T) { + tests := []struct { + name string + permissions string + expected bool + scope string + level string + }{ + { + name: "all: read grants contents read access", + permissions: `permissions: + all: read + contents: write`, + expected: true, + scope: "contents", + level: "read", + }, + { + name: "all: read grants contents write access when overridden", + permissions: `permissions: + all: read + contents: write`, + expected: true, + scope: "contents", + level: "write", + }, + { + name: "all: read grants issues read access", + permissions: `permissions: + all: read + contents: write`, + expected: true, + scope: "issues", + level: "read", + }, + { + name: "all: read denies issues write access by default", + permissions: `permissions: + all: read`, + expected: false, + scope: "issues", + level: "write", + }, + { + name: "all: read with explicit write override", + permissions: `permissions: + all: read + issues: write`, + expected: true, + scope: "issues", + level: "write", + }, + { + name: "all: write is not allowed - should fail parsing", + permissions: `permissions: + all: write`, + expected: false, + scope: "contents", + level: "read", + }, + { + name: "all: read with none is not allowed - should fail parsing", + permissions: `permissions: + all: read + contents: none`, + expected: false, + scope: "contents", + level: "read", + }, + { + name: "all: read grants id-token write access when overridden", + permissions: `permissions: + all: read + id-token: write`, + expected: true, + scope: "id-token", + level: "write", + }, + { + name: "all: read does not grant id-token read access (not supported)", + permissions: `permissions: + all: read`, + expected: false, + scope: "id-token", + level: "read", + }, + { + name: "all: read denies id-token write access by default (not included in expansion)", + permissions: `permissions: + all: read`, + expected: false, + scope: "id-token", + level: "write", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + parser := NewPermissionsParser(tt.permissions) + result := parser.IsAllowed(tt.scope, tt.level) + if result != tt.expected { + t.Errorf("IsAllowed(%s, %s) = %v, want %v", tt.scope, tt.level, result, tt.expected) + } + }) + } +} + +func TestPermissionsParser_ToPermissions(t *testing.T) { + tests := []struct { + name string + input any + wantYAML string + contains []string + notContains []string + }{ + { + name: "shorthand read-all", + input: "read-all", + wantYAML: "permissions: read-all", + }, + { + name: "shorthand write-all", + input: "write-all", + wantYAML: "permissions: write-all", + }, + { + name: "all: read without overrides", + input: map[string]any{ + "all": "read", + }, + contains: []string{ + "permissions:", + " actions: read", + " contents: read", + " issues: read", + }, + notContains: []string{ + " id-token: read", // id-token doesn't support read + }, + }, + { + name: "all: read with contents: write override", + input: map[string]any{ + "all": "read", + "contents": "write", + }, + contains: []string{ + "permissions:", + " actions: read", + " contents: write", // Override + " issues: read", + }, + notContains: []string{ + " contents: read", + }, + }, + { + name: "all: read with id-token: write override", + input: map[string]any{ + "all": "read", + "id-token": "write", + }, + contains: []string{ + "permissions:", + " actions: read", + " contents: read", + " id-token: write", // Explicitly set + }, + notContains: []string{ + " id-token: read", + }, + }, + { + name: "explicit permissions without all", + input: map[string]any{ + "contents": "read", + "issues": "write", + }, + wantYAML: "permissions:\n contents: read\n issues: write", + }, + { + name: "all: write is not allowed", + input: map[string]any{ + "all": "write", + }, + wantYAML: "", + }, + { + name: "all: read with none is not allowed", + input: map[string]any{ + "all": "read", + "contents": "none", + }, + wantYAML: "", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + parser := NewPermissionsParserFromValue(tt.input) + permissions := parser.ToPermissions() + yaml := permissions.RenderToYAML() + + if tt.wantYAML != "" && yaml != tt.wantYAML { + t.Errorf("ToPermissions().RenderToYAML() = %q, want %q", yaml, tt.wantYAML) + } + + for _, expected := range tt.contains { + if !strings.Contains(yaml, expected) { + t.Errorf("ToPermissions().RenderToYAML() should contain %q, but got:\n%s", expected, yaml) + } + } + + for _, notExpected := range tt.notContains { + if strings.Contains(yaml, notExpected) { + t.Errorf("ToPermissions().RenderToYAML() should NOT contain %q, but got:\n%s", notExpected, yaml) + } + } + }) + } +} diff --git a/pkg/workflow/permissions_rendering_test.go b/pkg/workflow/permissions_rendering_test.go new file mode 100644 index 0000000000..3a349240b1 --- /dev/null +++ b/pkg/workflow/permissions_rendering_test.go @@ -0,0 +1,260 @@ +package workflow + +import ( + "strings" + "testing" +) + +func TestPermissionsRenderToYAML(t *testing.T) { + tests := []struct { + name string + permissions *Permissions + want string + }{ + { + name: "nil permissions", + permissions: nil, + want: "", + }, + { + name: "read-all shorthand", + permissions: NewPermissionsReadAll(), + want: "permissions: read-all", + }, + { + name: "write-all shorthand", + permissions: NewPermissionsWriteAll(), + want: "permissions: write-all", + }, + { + name: "empty permissions", + permissions: NewPermissions(), + want: "", + }, + { + name: "single permission", + permissions: NewPermissionsFromMap(map[PermissionScope]PermissionLevel{ + PermissionContents: PermissionRead, + }), + want: "permissions:\n contents: read", + }, + { + name: "multiple permissions - sorted", + permissions: NewPermissionsFromMap(map[PermissionScope]PermissionLevel{ + PermissionIssues: PermissionWrite, + PermissionContents: PermissionRead, + PermissionPullRequests: PermissionWrite, + }), + want: "permissions:\n contents: read\n issues: write\n pull-requests: write", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := tt.permissions.RenderToYAML() + if got != tt.want { + t.Errorf("RenderToYAML() = %q, want %q", got, tt.want) + } + }) + } +} + +func TestPermissions_AllReadWithIdTokenWrite(t *testing.T) { + // Test that "all: read" with "id-token: write" works as expected + // id-token is special because it only supports write and none, not read + + // Create permissions with all: read and id-token: write + perms := &Permissions{ + hasAll: true, + allLevel: PermissionRead, + permissions: map[PermissionScope]PermissionLevel{ + PermissionIdToken: PermissionWrite, + }, + } + + // Test that all normal scopes have read access + normalScopes := []PermissionScope{ + PermissionActions, PermissionAttestations, PermissionChecks, PermissionContents, + PermissionDeployments, PermissionDiscussions, PermissionIssues, PermissionPackages, + PermissionPages, PermissionPullRequests, PermissionRepositoryProj, + PermissionSecurityEvents, PermissionStatuses, PermissionModels, + } + + for _, scope := range normalScopes { + level, allowed := perms.Get(scope) + if !allowed || level != PermissionRead { + t.Errorf("scope %s should have read access, got allowed=%v, level=%s", scope, allowed, level) + } + } + + // Test that id-token has write access (explicit override) + level, allowed := perms.Get(PermissionIdToken) + if !allowed || level != PermissionWrite { + t.Errorf("id-token should have write access, got allowed=%v, level=%s", allowed, level) + } + + // Test that id-token does NOT get read access from all: read + // This should return false because id-token doesn't support read + if level, allowed := perms.Get(PermissionIdToken); allowed && level == PermissionRead { + t.Errorf("id-token should NOT have read access from all: read") + } + + // Test YAML rendering excludes id-token: read but includes id-token: write + yaml := perms.RenderToYAML() + + // Should contain all normal scopes with read access + expectedLines := []string{ + " actions: read", + " attestations: read", + " checks: read", + " contents: read", + " deployments: read", + " discussions: read", + " issues: read", + " packages: read", + " pages: read", + " pull-requests: read", + " repository-projects: read", + " security-events: read", + " statuses: read", + " models: read", + " id-token: write", // explicit override + } + + for _, expected := range expectedLines { + if !strings.Contains(yaml, expected) { + t.Errorf("YAML should contain %q, but got:\n%s", expected, yaml) + } + } + + // Should NOT contain id-token: read + if strings.Contains(yaml, "id-token: read") { + t.Errorf("YAML should NOT contain 'id-token: read', but got:\n%s", yaml) + } +} + +func TestPermissions_AllReadRenderToYAML(t *testing.T) { + tests := []struct { + name string + perms *Permissions + contains []string // Check that output contains these lines + notContains []string // Check that output does NOT contain these lines + }{ + { + name: "all: read expands to individual permissions", + perms: NewPermissionsAllRead(), + contains: []string{ + "permissions:", + " actions: read", + " attestations: read", + " checks: read", + " contents: read", + " deployments: read", + " discussions: read", + " issues: read", + " models: read", + " packages: read", + " pages: read", + " pull-requests: read", + " repository-projects: read", + " security-events: read", + " statuses: read", + }, + }, + { + name: "all: read with explicit override - write overrides read", + perms: func() *Permissions { + p := NewPermissionsAllRead() + p.Set(PermissionContents, PermissionWrite) + return p + }(), + contains: []string{ + "permissions:", + " actions: read", + " contents: write", // Overridden to write + " issues: read", + }, + notContains: []string{ + " contents: read", // Should NOT contain contents: read when explicitly set to write + }, + }, + { + name: "all: read with multiple explicit overrides", + perms: func() *Permissions { + p := NewPermissionsAllRead() + p.Set(PermissionContents, PermissionWrite) + p.Set(PermissionIssues, PermissionWrite) + return p + }(), + contains: []string{ + "permissions:", + " actions: read", + " contents: write", + " issues: write", + " packages: read", + }, + notContains: []string{ + " contents: read", // Should NOT contain contents: read + " issues: read", // Should NOT contain issues: read + }, + }, + { + name: "all: read with id-token: write - id-token should be excluded from all: read expansion but included when explicitly set to write", + perms: func() *Permissions { + p := NewPermissionsAllRead() + p.Set(PermissionIdToken, PermissionWrite) + return p + }(), + contains: []string{ + "permissions:", + " actions: read", + " contents: read", + " id-token: write", // Explicitly set to write + " issues: read", + }, + notContains: []string{ + " id-token: read", // Should NOT contain id-token: read (not supported) + }, + }, + { + name: "all: read excludes id-token since it doesn't support read level", + perms: NewPermissionsAllRead(), + contains: []string{ + "permissions:", + " actions: read", + " attestations: read", + " checks: read", + " contents: read", + " deployments: read", + " discussions: read", + " issues: read", + " models: read", + " packages: read", + " pages: read", + " pull-requests: read", + " repository-projects: read", + " security-events: read", + " statuses: read", + }, + notContains: []string{ + " id-token: read", // Should NOT be included since id-token doesn't support read + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := tt.perms.RenderToYAML() + for _, expected := range tt.contains { + if !strings.Contains(result, expected) { + t.Errorf("RenderToYAML() should contain %q, but got:\n%s", expected, result) + } + } + for _, notExpected := range tt.notContains { + if strings.Contains(result, notExpected) { + t.Errorf("RenderToYAML() should NOT contain %q, but got:\n%s", notExpected, result) + } + } + }) + } +} diff --git a/pkg/workflow/permissions_test.go b/pkg/workflow/permissions_test.go deleted file mode 100644 index caf4b7bf64..0000000000 --- a/pkg/workflow/permissions_test.go +++ /dev/null @@ -1,1551 +0,0 @@ -package workflow - -import ( - "strings" - "testing" -) - -func TestPermissionsParser_HasContentsReadAccess(t *testing.T) { - tests := []struct { - name string - permissions string - expected bool - }{ - { - name: "shorthand read-all grants contents access", - permissions: "permissions: read-all", - expected: true, - }, - { - name: "shorthand write-all grants contents access", - permissions: "permissions: write-all", - expected: true, - }, - { - name: "invalid shorthand read does not grant contents access", - permissions: "permissions: read", - expected: false, // "read" is no longer a valid shorthand - }, - { - name: "invalid shorthand write does not grant contents access", - permissions: "permissions: write", - expected: false, // "write" is no longer a valid shorthand - }, - { - name: "shorthand none denies contents access", - permissions: "permissions: none", - expected: false, - }, - { - name: "explicit contents read grants access", - permissions: `permissions: - contents: read - issues: write`, - expected: true, - }, - { - name: "explicit contents write grants access", - permissions: `permissions: - contents: write - issues: read`, - expected: true, - }, - { - name: "no contents permission denies access", - permissions: `permissions: - issues: write - pull-requests: read`, - expected: false, - }, - { - name: "explicit contents none denies access", - permissions: `permissions: - contents: none - issues: write`, - expected: false, - }, - { - name: "empty permissions denies access", - permissions: "", - expected: false, - }, - { - name: "just permissions label denies access", - permissions: "permissions:", - expected: false, - }, - // Additional extensive edge case tests - { - name: "whitespace only permissions denies access", - permissions: "permissions: \n \t", - expected: false, - }, - { - name: "permissions with extra whitespace", - permissions: `permissions: - contents: read - issues: write`, - expected: true, - }, - { - name: "invalid shorthand permission denies access", - permissions: "permissions: invalid-permission", - expected: false, - }, - { - name: "mixed case contents permission", - permissions: `permissions: - CONTENTS: read`, - expected: false, // YAML is case-sensitive - }, - { - name: "contents with mixed case value", - permissions: `permissions: - contents: READ`, - expected: false, // Values are case-sensitive - }, - { - name: "permissions with numeric contents value", - permissions: `permissions: - contents: 123`, - expected: false, - }, - { - name: "permissions with boolean contents value", - permissions: `permissions: - contents: true`, - expected: false, - }, - { - name: "deeply nested permissions structure", - permissions: `permissions: - security: - contents: read - contents: write`, - expected: true, // Should parse the top-level contents - }, - { - name: "permissions with comments", - permissions: `permissions: - contents: read # This grants read access - issues: write`, - expected: true, - }, - { - name: "permissions with array syntax", - permissions: `permissions: - contents: [read, write]`, - expected: false, // Array values not supported - }, - { - name: "permissions with quoted values", - permissions: `permissions: - contents: "read" - issues: write`, - expected: true, - }, - { - name: "permissions with single quotes", - permissions: `permissions: - contents: 'write' - issues: read`, - expected: true, - }, - { - name: "malformed YAML permissions", - permissions: `permissions: - contents: read - issues: write`, // Bad indentation - expected: false, - }, - { - name: "permissions without colon separator", - permissions: `permissions - contents read`, - expected: false, - }, - { - name: "extremely long permission value", - permissions: "permissions: " + strings.Repeat("a", 1000), - expected: false, - }, - { - name: "special characters in permission values", - permissions: `permissions: - contents: read@#$% - issues: write`, - expected: false, - }, - { - name: "unicode characters in permissions", - permissions: `permissions: - contents: 读取 - issues: write`, - expected: false, - }, - { - name: "null value for contents", - permissions: `permissions: - contents: null - issues: write`, - expected: false, - }, - { - name: "empty string for contents", - permissions: `permissions: - contents: "" - issues: write`, - expected: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - parser := NewPermissionsParser(tt.permissions) - result := parser.HasContentsReadAccess() - if result != tt.expected { - t.Errorf("HasContentsReadAccess() = %v, expected %v", result, tt.expected) - } - }) - } -} - -func TestContainsCheckout(t *testing.T) { - tests := []struct { - name string - customSteps string - expected bool - }{ - { - name: "empty steps", - customSteps: "", - expected: false, - }, - { - name: "contains actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd", - customSteps: `steps: - - name: Checkout - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd`, - expected: true, - }, - { - name: "contains actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd", - customSteps: `steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd - with: - token: ${{ secrets.GITHUB_TOKEN }}`, - expected: true, - }, - { - name: "contains different action", - customSteps: `steps: - - name: Setup Node - uses: actions/setup-node@395ad3262231945c25e8478fd5baf05154b1d79f - with: - node-version: '18'`, - expected: false, - }, - { - name: "mixed steps with checkout", - customSteps: `steps: - - name: Checkout repository - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd - - name: Setup Node - uses: actions/setup-node@395ad3262231945c25e8478fd5baf05154b1d79f`, - expected: true, - }, - { - name: "case insensitive detection", - customSteps: `steps: - - name: Checkout - uses: Actions/Checkout@v5`, - expected: true, - }, - { - name: "checkout in middle of other text", - customSteps: `steps: - - name: Custom step - run: echo "before checkout" - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd - - name: After checkout - run: echo "done"`, - expected: true, - }, - // Additional extensive edge case tests for ContainsCheckout - { - name: "checkout with no version", - customSteps: `steps: - - uses: actions/checkout`, - expected: true, - }, - { - name: "checkout with specific commit", - customSteps: `steps: - - uses: actions/checkout@8f4b7f84864484a7bf31766abe9204da3cbe65b3`, - expected: true, - }, - { - name: "checkout with branch reference", - customSteps: `steps: - - uses: actions/checkout@main`, - expected: true, - }, - { - name: "checkout action in quotes", - customSteps: `steps: - - name: Checkout - uses: "actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd"`, - expected: true, - }, - { - name: "checkout action in single quotes", - customSteps: `steps: - - uses: 'actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd'`, - expected: true, - }, - { - name: "checkout with extra whitespace", - customSteps: `steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd `, - expected: true, - }, - { - name: "checkout in multiline YAML", - customSteps: `steps: - - name: Checkout - uses: > - actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd`, - expected: true, - }, - { - name: "checkout in run command (should not match)", - customSteps: `steps: - - name: Echo checkout - run: echo "actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd"`, - expected: true, // Current implementation does simple string match - }, - { - name: "checkout in comment (should not match)", - customSteps: `steps: - - name: Setup - uses: actions/setup-node@395ad3262231945c25e8478fd5baf05154b1d79f - # TODO: add actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd`, - expected: true, // Current implementation does simple string match - }, - { - name: "similar but not checkout action", - customSteps: `steps: - - uses: actions/cache@v3 - - uses: my-actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd`, - expected: true, // Current implementation matches substring - }, - { - name: "checkout in different format", - customSteps: `steps: - - name: Checkout code - uses: | - actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd`, - expected: true, - }, - { - name: "malformed YAML with checkout", - customSteps: `steps - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd`, - expected: true, // Still detects the string - }, - { - name: "checkout with complex parameters", - customSteps: `steps: - - name: Checkout repository - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd - with: - fetch-depth: 0 - token: ${{ secrets.GITHUB_TOKEN }} - submodules: recursive`, - expected: true, - }, - { - name: "multiple checkouts", - customSteps: `steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd - - name: Setup - run: echo "setup" - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd - with: - path: subdirectory`, - expected: true, - }, - { - name: "checkout with unusual casing", - customSteps: `steps: - - uses: ACTIONS/CHECKOUT@V5`, - expected: true, - }, - { - name: "checkout in conditional step", - customSteps: `steps: - - if: github.event_name == 'push' - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd`, - expected: true, - }, - { - name: "very long steps with checkout buried inside", - customSteps: `steps: - - name: Step 1 - run: echo "first" - - name: Step 2 - run: echo "second" - - name: Step 3 - run: echo "third" - - name: Checkout buried - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd - - name: Step 5 - run: echo "fifth"`, - expected: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result := ContainsCheckout(tt.customSteps) - if result != tt.expected { - t.Errorf("ContainsCheckout() = %v, expected %v", result, tt.expected) - } - }) - } -} - -func TestPermissionsParser_Parse(t *testing.T) { - tests := []struct { - name string - permissions string - expectedMap map[string]string - expectedShort bool - expectedValue string - }{ - { - name: "shorthand read-all", - permissions: "permissions: read-all", - expectedMap: map[string]string{}, - expectedShort: true, - expectedValue: "read-all", - }, - { - name: "explicit map permissions", - permissions: `permissions: - contents: read - issues: write`, - expectedMap: map[string]string{ - "contents": "read", - "issues": "write", - }, - expectedShort: false, - expectedValue: "", - }, - { - name: "multiline without permissions prefix", - permissions: `contents: read -issues: write -pull-requests: read`, - expectedMap: map[string]string{ - "contents": "read", - "issues": "write", - "pull-requests": "read", - }, - expectedShort: false, - expectedValue: "", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - parser := NewPermissionsParser(tt.permissions) - - if parser.isShorthand != tt.expectedShort { - t.Errorf("isShorthand = %v, expected %v", parser.isShorthand, tt.expectedShort) - } - - if parser.shorthandValue != tt.expectedValue { - t.Errorf("shorthandValue = %v, expected %v", parser.shorthandValue, tt.expectedValue) - } - - if !tt.expectedShort { - for key, expectedValue := range tt.expectedMap { - if actualValue, exists := parser.parsedPerms[key]; !exists || actualValue != expectedValue { - t.Errorf("parsedPerms[%s] = %v, expected %v", key, actualValue, expectedValue) - } - } - } - }) - } -} - -func TestNewPermissions(t *testing.T) { - p := NewPermissions() - if p == nil { - t.Fatal("NewPermissions() returned nil") - } - if p.shorthand != "" { - t.Errorf("expected empty shorthand, got %q", p.shorthand) - } - if p.permissions == nil { - t.Error("expected permissions map to be initialized") - } - if len(p.permissions) != 0 { - t.Errorf("expected empty permissions map, got %d entries", len(p.permissions)) - } -} - -func TestNewPermissionsShorthand(t *testing.T) { - tests := []struct { - name string - fn func() *Permissions - shorthand string - }{ - {"read-all", NewPermissionsReadAll, "read-all"}, - {"write-all", NewPermissionsWriteAll, "write-all"}, - {"none", NewPermissionsNone, "none"}, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - p := tt.fn() - if p.shorthand != tt.shorthand { - t.Errorf("expected shorthand %q, got %q", tt.shorthand, p.shorthand) - } - }) - } -} - -func TestNewPermissionsFromMap(t *testing.T) { - perms := map[PermissionScope]PermissionLevel{ - PermissionContents: PermissionRead, - PermissionIssues: PermissionWrite, - } - - p := NewPermissionsFromMap(perms) - if p.shorthand != "" { - t.Errorf("expected empty shorthand, got %q", p.shorthand) - } - if len(p.permissions) != 2 { - t.Errorf("expected 2 permissions, got %d", len(p.permissions)) - } - - level, exists := p.Get(PermissionContents) - if !exists || level != PermissionRead { - t.Errorf("expected contents: read, got %v (exists: %v)", level, exists) - } - - level, exists = p.Get(PermissionIssues) - if !exists || level != PermissionWrite { - t.Errorf("expected issues: write, got %v (exists: %v)", level, exists) - } -} - -func TestPermissionsSet(t *testing.T) { - p := NewPermissions() - p.Set(PermissionContents, PermissionRead) - - level, exists := p.Get(PermissionContents) - if !exists || level != PermissionRead { - t.Errorf("expected contents: read, got %v (exists: %v)", level, exists) - } - - // Test setting on shorthand converts to map - p2 := NewPermissionsReadAll() - p2.Set(PermissionIssues, PermissionWrite) - if p2.shorthand != "" { - t.Error("expected shorthand to be cleared after Set") - } - level, exists = p2.Get(PermissionIssues) - if !exists || level != PermissionWrite { - t.Errorf("expected issues: write, got %v (exists: %v)", level, exists) - } -} - -func TestPermissionsGet(t *testing.T) { - tests := []struct { - name string - permissions *Permissions - scope PermissionScope - wantLevel PermissionLevel - wantExists bool - }{ - { - name: "read-all shorthand", - permissions: NewPermissionsReadAll(), - scope: PermissionContents, - wantLevel: PermissionRead, - wantExists: true, - }, - { - name: "write-all shorthand", - permissions: NewPermissionsWriteAll(), - scope: PermissionIssues, - wantLevel: PermissionWrite, - wantExists: true, - }, - { - name: "none shorthand", - permissions: NewPermissionsNone(), - scope: PermissionContents, - wantLevel: PermissionNone, - wantExists: true, - }, - { - name: "specific permission", - permissions: NewPermissionsFromMap(map[PermissionScope]PermissionLevel{ - PermissionContents: PermissionRead, - }), - scope: PermissionContents, - wantLevel: PermissionRead, - wantExists: true, - }, - { - name: "non-existent permission", - permissions: NewPermissions(), - scope: PermissionContents, - wantLevel: "", - wantExists: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - level, exists := tt.permissions.Get(tt.scope) - if exists != tt.wantExists { - t.Errorf("Get() exists = %v, want %v", exists, tt.wantExists) - } - if level != tt.wantLevel { - t.Errorf("Get() level = %v, want %v", level, tt.wantLevel) - } - }) - } -} - -func TestPermissionsMerge(t *testing.T) { - tests := []struct { - name string - base *Permissions - merge *Permissions - want map[PermissionScope]PermissionLevel - wantSH string - }{ - // Map-to-Map merges - { - name: "merge two maps - write overrides read", - base: NewPermissionsFromMap(map[PermissionScope]PermissionLevel{PermissionContents: PermissionRead}), - merge: NewPermissionsFromMap(map[PermissionScope]PermissionLevel{PermissionContents: PermissionWrite}), - want: map[PermissionScope]PermissionLevel{PermissionContents: PermissionWrite}, - }, - { - name: "merge two maps - read doesn't override write", - base: NewPermissionsFromMap(map[PermissionScope]PermissionLevel{PermissionContents: PermissionWrite}), - merge: NewPermissionsFromMap(map[PermissionScope]PermissionLevel{PermissionContents: PermissionRead}), - want: map[PermissionScope]PermissionLevel{PermissionContents: PermissionWrite}, - }, - { - name: "merge two maps - different scopes", - base: NewPermissionsFromMap(map[PermissionScope]PermissionLevel{PermissionContents: PermissionRead}), - merge: NewPermissionsFromMap(map[PermissionScope]PermissionLevel{PermissionIssues: PermissionWrite}), - want: map[PermissionScope]PermissionLevel{ - PermissionContents: PermissionRead, - PermissionIssues: PermissionWrite, - }, - }, - { - name: "merge two maps - multiple scopes with conflicts", - base: NewPermissionsFromMap(map[PermissionScope]PermissionLevel{ - PermissionContents: PermissionRead, - PermissionIssues: PermissionWrite, - PermissionPullRequests: PermissionRead, - }), - merge: NewPermissionsFromMap(map[PermissionScope]PermissionLevel{ - PermissionContents: PermissionWrite, - PermissionIssues: PermissionRead, - PermissionDiscussions: PermissionWrite, - }), - want: map[PermissionScope]PermissionLevel{ - PermissionContents: PermissionWrite, // write wins - PermissionIssues: PermissionWrite, // write preserved - PermissionPullRequests: PermissionRead, // kept from base - PermissionDiscussions: PermissionWrite, // added from merge - }, - }, - { - name: "merge two maps - none overrides read", - base: NewPermissionsFromMap(map[PermissionScope]PermissionLevel{PermissionContents: PermissionRead}), - merge: NewPermissionsFromMap(map[PermissionScope]PermissionLevel{PermissionContents: PermissionNone}), - want: map[PermissionScope]PermissionLevel{PermissionContents: PermissionRead}, - }, - { - name: "merge two maps - none overrides none", - base: NewPermissionsFromMap(map[PermissionScope]PermissionLevel{PermissionContents: PermissionNone}), - merge: NewPermissionsFromMap(map[PermissionScope]PermissionLevel{PermissionContents: PermissionNone}), - want: map[PermissionScope]PermissionLevel{PermissionContents: PermissionNone}, - }, - { - name: "merge two maps - write overrides none", - base: NewPermissionsFromMap(map[PermissionScope]PermissionLevel{PermissionContents: PermissionNone}), - merge: NewPermissionsFromMap(map[PermissionScope]PermissionLevel{PermissionContents: PermissionWrite}), - want: map[PermissionScope]PermissionLevel{PermissionContents: PermissionWrite}, - }, - { - name: "merge two maps - all permission scopes", - base: NewPermissionsFromMap(map[PermissionScope]PermissionLevel{ - PermissionActions: PermissionRead, - PermissionChecks: PermissionRead, - PermissionContents: PermissionRead, - PermissionDeployments: PermissionRead, - PermissionDiscussions: PermissionRead, - PermissionIssues: PermissionRead, - PermissionPackages: PermissionRead, - }), - merge: NewPermissionsFromMap(map[PermissionScope]PermissionLevel{ - PermissionPages: PermissionWrite, - PermissionPullRequests: PermissionWrite, - PermissionRepositoryProj: PermissionWrite, - PermissionSecurityEvents: PermissionWrite, - PermissionStatuses: PermissionWrite, - PermissionModels: PermissionWrite, - }), - want: map[PermissionScope]PermissionLevel{ - PermissionActions: PermissionRead, - PermissionChecks: PermissionRead, - PermissionContents: PermissionRead, - PermissionDeployments: PermissionRead, - PermissionDiscussions: PermissionRead, - PermissionIssues: PermissionRead, - PermissionPackages: PermissionRead, - PermissionPages: PermissionWrite, - PermissionPullRequests: PermissionWrite, - PermissionRepositoryProj: PermissionWrite, - PermissionSecurityEvents: PermissionWrite, - PermissionStatuses: PermissionWrite, - PermissionModels: PermissionWrite, - }, - }, - - // Shorthand-to-Shorthand merges - { - name: "merge shorthand - write-all wins over read-all", - base: NewPermissionsReadAll(), - merge: NewPermissionsWriteAll(), - wantSH: "write-all", - }, - { - name: "merge shorthand - write-all wins over read", - base: NewPermissionsReadAll(), - merge: NewPermissionsWriteAll(), - wantSH: "write-all", - }, - { - name: "merge shorthand - write-all wins over write", - base: NewPermissionsWriteAll(), - merge: NewPermissionsWriteAll(), - wantSH: "write-all", - }, - { - name: "merge shorthand - write-all wins over none", - base: NewPermissionsNone(), - merge: NewPermissionsWriteAll(), - wantSH: "write-all", - }, - { - name: "merge shorthand - write-all wins over read-all", - base: NewPermissionsReadAll(), - merge: NewPermissionsWriteAll(), - wantSH: "write-all", - }, - { - name: "merge shorthand - write-all wins over read-all (duplicate for coverage)", - base: NewPermissionsReadAll(), - merge: NewPermissionsWriteAll(), - wantSH: "write-all", - }, - { - name: "merge shorthand - write-all wins over none", - base: NewPermissionsNone(), - merge: NewPermissionsWriteAll(), - wantSH: "write-all", - }, - { - name: "merge shorthand - read-all wins over read-all", - base: NewPermissionsReadAll(), - merge: NewPermissionsReadAll(), - wantSH: "read-all", - }, - { - name: "merge shorthand - read-all wins over none", - base: NewPermissionsNone(), - merge: NewPermissionsReadAll(), - wantSH: "read-all", - }, - { - name: "merge shorthand - read-all wins over none (duplicate for coverage)", - base: NewPermissionsNone(), - merge: NewPermissionsReadAll(), - wantSH: "read-all", - }, - { - name: "merge shorthand - read-all preserved when merging read", - base: NewPermissionsReadAll(), - merge: NewPermissionsReadAll(), - wantSH: "read-all", - }, - { - name: "merge shorthand - write-all preserved when merging write", - base: NewPermissionsWriteAll(), - merge: NewPermissionsWriteAll(), - wantSH: "write-all", - }, - { - name: "merge shorthand - same shorthand preserved (read-all)", - base: NewPermissionsReadAll(), - merge: NewPermissionsReadAll(), - wantSH: "read-all", - }, - { - name: "merge shorthand - same shorthand preserved (write-all)", - base: NewPermissionsWriteAll(), - merge: NewPermissionsWriteAll(), - wantSH: "write-all", - }, - { - name: "merge shorthand - same shorthand preserved (none)", - base: NewPermissionsNone(), - merge: NewPermissionsNone(), - wantSH: "none", - }, - - // Shorthand-to-Map merges - { - name: "merge read-all shorthand into map - adds all missing scopes as read", - base: NewPermissionsFromMap(map[PermissionScope]PermissionLevel{PermissionContents: PermissionWrite}), - merge: NewPermissionsReadAll(), - want: map[PermissionScope]PermissionLevel{ - PermissionContents: PermissionWrite, // preserved - PermissionActions: PermissionRead, // added - PermissionAttestations: PermissionRead, - PermissionChecks: PermissionRead, - PermissionDeployments: PermissionRead, - PermissionDiscussions: PermissionRead, - PermissionIssues: PermissionRead, - PermissionPackages: PermissionRead, - PermissionPages: PermissionRead, - PermissionPullRequests: PermissionRead, - PermissionRepositoryProj: PermissionRead, - PermissionOrganizationProj: PermissionRead, - PermissionSecurityEvents: PermissionRead, - PermissionStatuses: PermissionRead, - PermissionModels: PermissionRead, - // Note: id-token is NOT included because it doesn't support read level - }, - }, - { - name: "merge write-all shorthand into map - adds all missing scopes as write", - base: NewPermissionsFromMap(map[PermissionScope]PermissionLevel{PermissionContents: PermissionRead}), - merge: NewPermissionsWriteAll(), - want: map[PermissionScope]PermissionLevel{ - PermissionContents: PermissionRead, // preserved (not overwritten) - PermissionActions: PermissionWrite, - PermissionAttestations: PermissionWrite, - PermissionChecks: PermissionWrite, - PermissionDeployments: PermissionWrite, - PermissionDiscussions: PermissionWrite, - PermissionIdToken: PermissionWrite, // id-token supports write - PermissionIssues: PermissionWrite, - PermissionPackages: PermissionWrite, - PermissionPages: PermissionWrite, - PermissionPullRequests: PermissionWrite, - PermissionRepositoryProj: PermissionWrite, - PermissionOrganizationProj: PermissionWrite, - PermissionSecurityEvents: PermissionWrite, - PermissionStatuses: PermissionWrite, - PermissionModels: PermissionWrite, - }, - }, - { - name: "merge read shorthand into map - adds all missing scopes as read", - base: NewPermissionsFromMap(map[PermissionScope]PermissionLevel{PermissionContents: PermissionWrite}), - merge: NewPermissionsReadAll(), - want: map[PermissionScope]PermissionLevel{ - PermissionContents: PermissionWrite, - PermissionActions: PermissionRead, - PermissionAttestations: PermissionRead, - PermissionChecks: PermissionRead, - PermissionDeployments: PermissionRead, - PermissionDiscussions: PermissionRead, - PermissionIssues: PermissionRead, - PermissionPackages: PermissionRead, - PermissionPages: PermissionRead, - PermissionPullRequests: PermissionRead, - PermissionRepositoryProj: PermissionRead, - PermissionOrganizationProj: PermissionRead, - PermissionSecurityEvents: PermissionRead, - PermissionStatuses: PermissionRead, - PermissionModels: PermissionRead, - // Note: id-token is NOT included because it doesn't support read level - }, - }, - { - name: "merge write shorthand into map - adds all missing scopes as write", - base: NewPermissionsFromMap(map[PermissionScope]PermissionLevel{PermissionIssues: PermissionRead}), - merge: NewPermissionsWriteAll(), - want: map[PermissionScope]PermissionLevel{ - PermissionIssues: PermissionRead, - PermissionActions: PermissionWrite, - PermissionAttestations: PermissionWrite, - PermissionChecks: PermissionWrite, - PermissionContents: PermissionWrite, - PermissionDeployments: PermissionWrite, - PermissionDiscussions: PermissionWrite, - PermissionIdToken: PermissionWrite, // id-token supports write - PermissionPackages: PermissionWrite, - PermissionPages: PermissionWrite, - PermissionPullRequests: PermissionWrite, - PermissionRepositoryProj: PermissionWrite, - PermissionOrganizationProj: PermissionWrite, - PermissionSecurityEvents: PermissionWrite, - PermissionStatuses: PermissionWrite, - PermissionModels: PermissionWrite, - }, - }, - { - name: "merge none shorthand into map - no change", - base: NewPermissionsFromMap(map[PermissionScope]PermissionLevel{PermissionContents: PermissionRead}), - merge: NewPermissionsNone(), - want: map[PermissionScope]PermissionLevel{PermissionContents: PermissionRead}, - }, - - // Map-to-Shorthand merges (shorthand converts to map) - { - name: "merge map into read-all shorthand - shorthand cleared, map created", - base: NewPermissionsReadAll(), - merge: NewPermissionsFromMap(map[PermissionScope]PermissionLevel{PermissionIssues: PermissionWrite}), - want: map[PermissionScope]PermissionLevel{PermissionIssues: PermissionWrite}, - }, - { - name: "merge map into write-all shorthand - shorthand cleared, map created", - base: NewPermissionsWriteAll(), - merge: NewPermissionsFromMap(map[PermissionScope]PermissionLevel{PermissionContents: PermissionRead}), - want: map[PermissionScope]PermissionLevel{PermissionContents: PermissionRead}, - }, - { - name: "merge map into none shorthand - shorthand cleared, map created", - base: NewPermissionsNone(), - merge: NewPermissionsFromMap(map[PermissionScope]PermissionLevel{PermissionIssues: PermissionWrite}), - want: map[PermissionScope]PermissionLevel{PermissionIssues: PermissionWrite}, - }, - { - name: "merge complex map into read shorthand", - base: NewPermissionsReadAll(), - merge: NewPermissionsFromMap(map[PermissionScope]PermissionLevel{ - PermissionContents: PermissionWrite, - PermissionIssues: PermissionRead, - PermissionPullRequests: PermissionWrite, - }), - want: map[PermissionScope]PermissionLevel{ - PermissionContents: PermissionWrite, - PermissionIssues: PermissionRead, - PermissionPullRequests: PermissionWrite, - }, - }, - - // Nil and edge cases - { - name: "merge nil into map - no change", - base: NewPermissionsFromMap(map[PermissionScope]PermissionLevel{PermissionContents: PermissionRead}), - merge: nil, - want: map[PermissionScope]PermissionLevel{PermissionContents: PermissionRead}, - }, - { - name: "merge nil into shorthand - no change", - base: NewPermissionsReadAll(), - merge: nil, - wantSH: "read-all", - }, - { - name: "merge empty map into map - no change", - base: NewPermissionsFromMap(map[PermissionScope]PermissionLevel{PermissionContents: PermissionRead}), - merge: NewPermissionsFromMap(map[PermissionScope]PermissionLevel{}), - want: map[PermissionScope]PermissionLevel{PermissionContents: PermissionRead}, - }, - { - name: "merge map into empty map - scopes added", - base: NewPermissionsFromMap(map[PermissionScope]PermissionLevel{}), - merge: NewPermissionsFromMap(map[PermissionScope]PermissionLevel{PermissionIssues: PermissionWrite}), - want: map[PermissionScope]PermissionLevel{PermissionIssues: PermissionWrite}, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - tt.base.Merge(tt.merge) - - if tt.wantSH != "" { - if tt.base.shorthand != tt.wantSH { - t.Errorf("after merge, shorthand = %q, want %q", tt.base.shorthand, tt.wantSH) - } - return - } - - if len(tt.want) != len(tt.base.permissions) { - t.Errorf("after merge, got %d permissions, want %d", len(tt.base.permissions), len(tt.want)) - } - - for scope, wantLevel := range tt.want { - gotLevel, exists := tt.base.Get(scope) - if !exists { - t.Errorf("after merge, scope %s not found", scope) - continue - } - if gotLevel != wantLevel { - t.Errorf("after merge, scope %s = %v, want %v", scope, gotLevel, wantLevel) - } - } - }) - } -} - -func TestPermissionsRenderToYAML(t *testing.T) { - tests := []struct { - name string - permissions *Permissions - want string - }{ - { - name: "nil permissions", - permissions: nil, - want: "", - }, - { - name: "read-all shorthand", - permissions: NewPermissionsReadAll(), - want: "permissions: read-all", - }, - { - name: "write-all shorthand", - permissions: NewPermissionsWriteAll(), - want: "permissions: write-all", - }, - { - name: "empty permissions", - permissions: NewPermissions(), - want: "", - }, - { - name: "single permission", - permissions: NewPermissionsFromMap(map[PermissionScope]PermissionLevel{ - PermissionContents: PermissionRead, - }), - want: "permissions:\n contents: read", - }, - { - name: "multiple permissions - sorted", - permissions: NewPermissionsFromMap(map[PermissionScope]PermissionLevel{ - PermissionIssues: PermissionWrite, - PermissionContents: PermissionRead, - PermissionPullRequests: PermissionWrite, - }), - want: "permissions:\n contents: read\n issues: write\n pull-requests: write", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got := tt.permissions.RenderToYAML() - if got != tt.want { - t.Errorf("RenderToYAML() = %q, want %q", got, tt.want) - } - }) - } -} - -func TestPermissions_AllReadWithIdTokenWrite(t *testing.T) { - // Test that "all: read" with "id-token: write" works as expected - // id-token is special because it only supports write and none, not read - - // Create permissions with all: read and id-token: write - perms := &Permissions{ - hasAll: true, - allLevel: PermissionRead, - permissions: map[PermissionScope]PermissionLevel{ - PermissionIdToken: PermissionWrite, - }, - } - - // Test that all normal scopes have read access - normalScopes := []PermissionScope{ - PermissionActions, PermissionAttestations, PermissionChecks, PermissionContents, - PermissionDeployments, PermissionDiscussions, PermissionIssues, PermissionPackages, - PermissionPages, PermissionPullRequests, PermissionRepositoryProj, - PermissionSecurityEvents, PermissionStatuses, PermissionModels, - } - - for _, scope := range normalScopes { - level, allowed := perms.Get(scope) - if !allowed || level != PermissionRead { - t.Errorf("scope %s should have read access, got allowed=%v, level=%s", scope, allowed, level) - } - } - - // Test that id-token has write access (explicit override) - level, allowed := perms.Get(PermissionIdToken) - if !allowed || level != PermissionWrite { - t.Errorf("id-token should have write access, got allowed=%v, level=%s", allowed, level) - } - - // Test that id-token does NOT get read access from all: read - // This should return false because id-token doesn't support read - if level, allowed := perms.Get(PermissionIdToken); allowed && level == PermissionRead { - t.Errorf("id-token should NOT have read access from all: read") - } - - // Test YAML rendering excludes id-token: read but includes id-token: write - yaml := perms.RenderToYAML() - - // Should contain all normal scopes with read access - expectedLines := []string{ - " actions: read", - " attestations: read", - " checks: read", - " contents: read", - " deployments: read", - " discussions: read", - " issues: read", - " packages: read", - " pages: read", - " pull-requests: read", - " repository-projects: read", - " security-events: read", - " statuses: read", - " models: read", - " id-token: write", // explicit override - } - - for _, expected := range expectedLines { - if !strings.Contains(yaml, expected) { - t.Errorf("YAML should contain %q, but got:\n%s", expected, yaml) - } - } - - // Should NOT contain id-token: read - if strings.Contains(yaml, "id-token: read") { - t.Errorf("YAML should NOT contain 'id-token: read', but got:\n%s", yaml) - } -} - -func TestPermissionsParser_AllRead(t *testing.T) { - tests := []struct { - name string - permissions string - expected bool - scope string - level string - }{ - { - name: "all: read grants contents read access", - permissions: `permissions: - all: read - contents: write`, - expected: true, - scope: "contents", - level: "read", - }, - { - name: "all: read grants contents write access when overridden", - permissions: `permissions: - all: read - contents: write`, - expected: true, - scope: "contents", - level: "write", - }, - { - name: "all: read grants issues read access", - permissions: `permissions: - all: read - contents: write`, - expected: true, - scope: "issues", - level: "read", - }, - { - name: "all: read denies issues write access by default", - permissions: `permissions: - all: read`, - expected: false, - scope: "issues", - level: "write", - }, - { - name: "all: read with explicit write override", - permissions: `permissions: - all: read - issues: write`, - expected: true, - scope: "issues", - level: "write", - }, - { - name: "all: write is not allowed - should fail parsing", - permissions: `permissions: - all: write`, - expected: false, - scope: "contents", - level: "read", - }, - { - name: "all: read with none is not allowed - should fail parsing", - permissions: `permissions: - all: read - contents: none`, - expected: false, - scope: "contents", - level: "read", - }, - { - name: "all: read grants id-token write access when overridden", - permissions: `permissions: - all: read - id-token: write`, - expected: true, - scope: "id-token", - level: "write", - }, - { - name: "all: read does not grant id-token read access (not supported)", - permissions: `permissions: - all: read`, - expected: false, - scope: "id-token", - level: "read", - }, - { - name: "all: read denies id-token write access by default (not included in expansion)", - permissions: `permissions: - all: read`, - expected: false, - scope: "id-token", - level: "write", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - parser := NewPermissionsParser(tt.permissions) - result := parser.IsAllowed(tt.scope, tt.level) - if result != tt.expected { - t.Errorf("IsAllowed(%s, %s) = %v, want %v", tt.scope, tt.level, result, tt.expected) - } - }) - } -} - -func TestPermissions_AllRead(t *testing.T) { - tests := []struct { - name string - perms *Permissions - scope PermissionScope - expected PermissionLevel - exists bool - }{ - { - name: "all: read returns read for contents", - perms: NewPermissionsAllRead(), - scope: PermissionContents, - expected: PermissionRead, - exists: true, - }, - { - name: "all: read returns read for issues", - perms: NewPermissionsAllRead(), - scope: PermissionIssues, - expected: PermissionRead, - exists: true, - }, - { - name: "all: read with explicit override", - perms: func() *Permissions { - p := NewPermissionsAllRead() - p.Set(PermissionContents, PermissionWrite) - return p - }(), - scope: PermissionContents, - expected: PermissionWrite, - exists: true, - }, - { - name: "all: read does not include id-token (not supported at read level)", - perms: NewPermissionsAllRead(), - scope: PermissionIdToken, - expected: "", // Should be empty since the permission doesn't exist - exists: false, // Should not exist because id-token doesn't support read - }, - { - name: "all: read with explicit id-token: write override", - perms: func() *Permissions { - p := NewPermissionsAllRead() - p.Set(PermissionIdToken, PermissionWrite) - return p - }(), - scope: PermissionIdToken, - expected: PermissionWrite, - exists: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - level, exists := tt.perms.Get(tt.scope) - if exists != tt.exists { - t.Errorf("Get(%s) exists = %v, want %v", tt.scope, exists, tt.exists) - } - if level != tt.expected { - t.Errorf("Get(%s) = %v, want %v", tt.scope, level, tt.expected) - } - }) - } -} - -func TestPermissions_AllReadRenderToYAML(t *testing.T) { - tests := []struct { - name string - perms *Permissions - contains []string // Check that output contains these lines - notContains []string // Check that output does NOT contain these lines - }{ - { - name: "all: read expands to individual permissions", - perms: NewPermissionsAllRead(), - contains: []string{ - "permissions:", - " actions: read", - " attestations: read", - " checks: read", - " contents: read", - " deployments: read", - " discussions: read", - " issues: read", - " models: read", - " packages: read", - " pages: read", - " pull-requests: read", - " repository-projects: read", - " security-events: read", - " statuses: read", - }, - }, - { - name: "all: read with explicit override - write overrides read", - perms: func() *Permissions { - p := NewPermissionsAllRead() - p.Set(PermissionContents, PermissionWrite) - return p - }(), - contains: []string{ - "permissions:", - " actions: read", - " contents: write", // Overridden to write - " issues: read", - }, - notContains: []string{ - " contents: read", // Should NOT contain contents: read when explicitly set to write - }, - }, - { - name: "all: read with multiple explicit overrides", - perms: func() *Permissions { - p := NewPermissionsAllRead() - p.Set(PermissionContents, PermissionWrite) - p.Set(PermissionIssues, PermissionWrite) - return p - }(), - contains: []string{ - "permissions:", - " actions: read", - " contents: write", - " issues: write", - " packages: read", - }, - notContains: []string{ - " contents: read", // Should NOT contain contents: read - " issues: read", // Should NOT contain issues: read - }, - }, - { - name: "all: read with id-token: write - id-token should be excluded from all: read expansion but included when explicitly set to write", - perms: func() *Permissions { - p := NewPermissionsAllRead() - p.Set(PermissionIdToken, PermissionWrite) - return p - }(), - contains: []string{ - "permissions:", - " actions: read", - " contents: read", - " id-token: write", // Explicitly set to write - " issues: read", - }, - notContains: []string{ - " id-token: read", // Should NOT contain id-token: read (not supported) - }, - }, - { - name: "all: read excludes id-token since it doesn't support read level", - perms: NewPermissionsAllRead(), - contains: []string{ - "permissions:", - " actions: read", - " attestations: read", - " checks: read", - " contents: read", - " deployments: read", - " discussions: read", - " issues: read", - " models: read", - " packages: read", - " pages: read", - " pull-requests: read", - " repository-projects: read", - " security-events: read", - " statuses: read", - }, - notContains: []string{ - " id-token: read", // Should NOT be included since id-token doesn't support read - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result := tt.perms.RenderToYAML() - for _, expected := range tt.contains { - if !strings.Contains(result, expected) { - t.Errorf("RenderToYAML() should contain %q, but got:\n%s", expected, result) - } - } - for _, notExpected := range tt.notContains { - if strings.Contains(result, notExpected) { - t.Errorf("RenderToYAML() should NOT contain %q, but got:\n%s", notExpected, result) - } - } - }) - } -} - -func TestPermissionsParser_ToPermissions(t *testing.T) { - tests := []struct { - name string - input any - wantYAML string - contains []string - notContains []string - }{ - { - name: "shorthand read-all", - input: "read-all", - wantYAML: "permissions: read-all", - }, - { - name: "shorthand write-all", - input: "write-all", - wantYAML: "permissions: write-all", - }, - { - name: "all: read without overrides", - input: map[string]any{ - "all": "read", - }, - contains: []string{ - "permissions:", - " actions: read", - " contents: read", - " issues: read", - }, - notContains: []string{ - " id-token: read", // id-token doesn't support read - }, - }, - { - name: "all: read with contents: write override", - input: map[string]any{ - "all": "read", - "contents": "write", - }, - contains: []string{ - "permissions:", - " actions: read", - " contents: write", // Override - " issues: read", - }, - notContains: []string{ - " contents: read", - }, - }, - { - name: "all: read with id-token: write override", - input: map[string]any{ - "all": "read", - "id-token": "write", - }, - contains: []string{ - "permissions:", - " actions: read", - " contents: read", - " id-token: write", // Explicitly set - }, - notContains: []string{ - " id-token: read", - }, - }, - { - name: "explicit permissions without all", - input: map[string]any{ - "contents": "read", - "issues": "write", - }, - wantYAML: "permissions:\n contents: read\n issues: write", - }, - { - name: "all: write is not allowed", - input: map[string]any{ - "all": "write", - }, - wantYAML: "", - }, - { - name: "all: read with none is not allowed", - input: map[string]any{ - "all": "read", - "contents": "none", - }, - wantYAML: "", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - parser := NewPermissionsParserFromValue(tt.input) - permissions := parser.ToPermissions() - yaml := permissions.RenderToYAML() - - if tt.wantYAML != "" && yaml != tt.wantYAML { - t.Errorf("ToPermissions().RenderToYAML() = %q, want %q", yaml, tt.wantYAML) - } - - for _, expected := range tt.contains { - if !strings.Contains(yaml, expected) { - t.Errorf("ToPermissions().RenderToYAML() should contain %q, but got:\n%s", expected, yaml) - } - } - - for _, notExpected := range tt.notContains { - if strings.Contains(yaml, notExpected) { - t.Errorf("ToPermissions().RenderToYAML() should NOT contain %q, but got:\n%s", notExpected, yaml) - } - } - }) - } -}