Skip to content

Commit

Permalink
Merge branch 'main' into mehulkar/turbo-873-log-run-summary-file-when…
Browse files Browse the repository at this point in the history
  • Loading branch information
mehulkar authored Mar 29, 2023
2 parents 50ad8ef + 0d20a5e commit d2214b4
Show file tree
Hide file tree
Showing 96 changed files with 15,164 additions and 421,761 deletions.
80 changes: 65 additions & 15 deletions .github/actions/next-integration-stat/index.js
Original file line number Diff line number Diff line change
Expand Up @@ -16356,21 +16356,71 @@
nextjsVersion,
ref: sha,
};
const failedJobResults = fullJobLogsFromWorkflow.reduce(
(acc, { logs, job }) => {
// Split logs per each test suites, exclude if it's arbitrary log does not contain test data
const splittedLogs = logs
.split("NEXT_INTEGRATION_TEST: true")
.filter((log) => log.includes("--test output start--"));
// Iterate each chunk of logs, find out test name and corresponding test data
const failedTestResultsData = collectFailedTestResults(
splittedLogs,
job
);
return acc.concat(failedTestResultsData);
},
[]
);
const [failedJobResults, flakyMonitorJobResults] =
fullJobLogsFromWorkflow.reduce(
(acc, { logs, job }) => {
var _a, _b, _c;
// Split logs per each test suites, exclude if it's arbitrary log does not contain test data
const splittedLogs = logs
.split("NEXT_INTEGRATION_TEST: true")
.filter((log) => log.includes("--test output start--"));
// There is a job named `Next.js integration test (FLAKY_SUBSET)`, which we runs known subset of the tests
// that are flaky. If given job is flaky subset monitoring, we are interested in to grab test results only.
// [NOTE]: this is similar to `collectFailedTestResults`, but not identical: collectFailedTestResults intentionally
// skips if test success, while in here we want to collect all the test results.
if (job.name.includes("FLAKY_SUBSET")) {
const splittedLogs = logs.split("--test output start--");
const ret = [];
let logLine = splittedLogs.shift();
while (logLine) {
try {
const testData =
(_c =
(_b =
(_a =
logLine === null || logLine === void 0
? void 0
: logLine
.split("--test output start--")
.pop()) === null || _a === void 0
? void 0
: _a.split("--test output end--")) === null ||
_b === void 0
? void 0
: _b.shift()) === null || _c === void 0
? void 0
: _c.trim();
ret.push({
job: job.name,
// We may able to parse test suite name, but skipping for now
name: "empty",
data: JSON.parse(testData),
});
} catch (_) {
console.log("Failed to parse flaky subset test results", {
logs,
});
} finally {
logLine = splittedLogs.shift();
}
}
acc[1] = acc[1].concat(ret);
} else {
// Iterate each chunk of logs, find out test name and corresponding test data
const failedTestResultsData = collectFailedTestResults(
splittedLogs,
job
);
acc[0] = acc[0].concat(failedTestResultsData);
}
return acc;
},
[[], []]
);
console.log(`Flakyness test subset results`, {
flakyMonitorJobResults,
});
testResultManifest.flakyMonitorJobResults = flakyMonitorJobResults;
testResultManifest.result = failedJobResults;
// Collect all test results into single manifest to store into file. This'll allow to upload / compare test results
// across different runs.
Expand Down
67 changes: 55 additions & 12 deletions .github/actions/next-integration-stat/src/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,7 @@ interface TestResultManifest {
nextjsVersion: string;
ref: string;
result: Array<FailedJobResult>;
flakyMonitorJobResults: Array<FailedJobResult>;
}

// A comment marker to identify the comment created by this action.
Expand Down Expand Up @@ -470,21 +471,63 @@ async function getFailedJobResults(
ref: sha,
} as any;

const failedJobResults = fullJobLogsFromWorkflow.reduce(
(acc, { logs, job }) => {
// Split logs per each test suites, exclude if it's arbitrary log does not contain test data
const splittedLogs = logs
.split("NEXT_INTEGRATION_TEST: true")
.filter((log) => log.includes("--test output start--"));
const [failedJobResults, flakyMonitorJobResults] =
fullJobLogsFromWorkflow.reduce(
(acc, { logs, job }) => {
// Split logs per each test suites, exclude if it's arbitrary log does not contain test data
const splittedLogs = logs
.split("NEXT_INTEGRATION_TEST: true")
.filter((log) => log.includes("--test output start--"));

// There is a job named `Next.js integration test (FLAKY_SUBSET)`, which we runs known subset of the tests
// that are flaky. If given job is flaky subset monitoring, we are interested in to grab test results only.
// [NOTE]: this is similar to `collectFailedTestResults`, but not identical: collectFailedTestResults intentionally
// skips if test success, while in here we want to collect all the test results.
if (job.name.includes("FLAKY_SUBSET")) {
const splittedLogs = logs.split("--test output start--");
const ret = [];
let logLine = splittedLogs.shift();
while (logLine) {
try {
const testData = logLine
?.split("--test output start--")
.pop()
?.split("--test output end--")
?.shift()
?.trim()!;

ret.push({
job: job.name,
// We may able to parse test suite name, but skipping for now
name: "empty",
data: JSON.parse(testData),
});
} catch (_) {
console.log("Failed to parse flaky subset test results", {
logs,
});
} finally {
logLine = splittedLogs.shift();
}
}
acc[1] = acc[1].concat(ret);
} else {
// Iterate each chunk of logs, find out test name and corresponding test data
const failedTestResultsData = collectFailedTestResults(
splittedLogs,
job
);
acc[0] = acc[0].concat(failedTestResultsData);
}

// Iterate each chunk of logs, find out test name and corresponding test data
const failedTestResultsData = collectFailedTestResults(splittedLogs, job);
return acc;
},
[[], []] as [Array<FailedJobResult>, Array<FailedJobResult>]
);

return acc.concat(failedTestResultsData);
},
[] as Array<FailedJobResult>
);
console.log(`Flakyness test subset results`, { flakyMonitorJobResults });

testResultManifest.flakyMonitorJobResults = flakyMonitorJobResults;
testResultManifest.result = failedJobResults;

// Collect all test results into single manifest to store into file. This'll allow to upload / compare test results
Expand Down
48 changes: 47 additions & 1 deletion .github/workflows/nextjs-integration-test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -225,10 +225,56 @@ jobs:
RECORD_REPLAY_METADATA_TEST_RUN_TITLE: testIntegration / Group ${{ matrix.group }}
NEXT_INTEGRATION_TEST: true
test_flaky_subset:
# This job name is being used in github action to collect test results. Do not change it, or should update
# ./.github/actions/next-integration-stat to match the new name.
name: Next.js integration test (FLAKY_SUBSET)
runs-on: ubuntu-latest
needs: [setup_nextjs]
env:
# Enabling backtrace will makes snapshot tests fail
RUST_BACKTRACE: 0
NEXT_TELEMETRY_DISABLED: 1
# Path to the next-dev binary located in **docker container** image.
NEXT_DEV_BIN: /work/next-dev
FAILED_TEST_LIST_PATH: /work/integration-test-data/test-results/main/failed-test-path-list.json
# Glob pattern to run specific tests with --turbo.
NEXT_DEV_TEST_GLOB: "*"
# pnpm version should match to what upstream next.js uses
PNPM_VERSION: 7.24.3
# List of test files to run that expected to pass always. Whole test suite should pass.
TEST_FILES_LIST: |
test/development/acceptance-app/dynamic-error.test.ts \
test/development/acceptance-app/unsupported-app-features.test.ts \
test/development/acceptance-app/ReactRefresh.test.ts
strategy:
matrix:
node: [16]

steps:
- uses: actions/cache/restore@v3
id: restore-build
with:
path: ./*
key: ${{ inputs.version }}-${{ github.sha }}
fail-on-cache-miss: true

- run: |
docker run -i -v nextjs-test-volume:/volume --rm loomchild/volume-backup restore < volume.tar.bz2
docker run --rm --mount src=nextjs-test-volume,dst=/work mcr.microsoft.com/playwright:v1.28.1-focal /bin/bash -c "cd /work && ls && curl https://install-node.vercel.app/v${{ matrix.node }} | FORCE=1 bash && node -v && npm i -g pnpm@${PNPM_VERSION} && __INTERNAL_CUSTOM_TURBOPACK_BINARY=${NEXT_DEV_BIN} __INTERNAL_NEXT_DEV_TEST_TURBO_GLOB_MATCH=${NEXT_DEV_TEST_GLOB} NEXT_TEST_CONTINUE_ON_ERROR=TRUE NEXT_E2E_TEST_TIMEOUT=240000 NEXT_TEST_JOB=1 NEXT_TEST_MODE=dev xvfb-run node run-tests.js --type development -c 1 $TEST_FILES_LIST >> /proc/1/fd/1"
name: Run test/development
# This should not fail, but if fails collect result to update dashboard.
continue-on-error: true
env:
# marker to parse log output, do not delete / change.
NEXT_INTEGRATION_TEST: true
# Collect integration test results from execute_tests,
# Store it as github artifact for next step to consume.
collect_nextjs_integration_stat:
needs: [test_dev, test_dev_e2e, test_cna, test_integration]
needs:
[test_dev, test_dev_e2e, test_cna, test_integration, test_flaky_subset]
name: Next.js integration test status report
runs-on: ubuntu-latest
permissions:
Expand Down
2 changes: 1 addition & 1 deletion cli/cmd/turbo/version.go
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
package main

const turboVersion = "1.8.7-canary.1"
const turboVersion = "1.8.7"
8 changes: 2 additions & 6 deletions crates/node-file-trace/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ use turbopack::{
use turbopack_cli_utils::issue::{ConsoleUiVc, IssueSeverityCliOption, LogOptions};
use turbopack_core::{
asset::{Asset, AssetVc, AssetsVc},
compile_time_info::{CompileTimeDefinesVc, CompileTimeInfo},
compile_time_info::CompileTimeInfo,
context::{AssetContext, AssetContextVc},
environment::{EnvironmentIntention, EnvironmentVc, ExecutionEnvironment, NodeJsEnvironment},
issue::{IssueContextExt, IssueReporter, IssueSeverity, IssueVc},
Expand Down Expand Up @@ -646,11 +646,7 @@ async fn create_module_asset(
)),
Value::new(EnvironmentIntention::Api),
);
let compile_time_info = CompileTimeInfo {
environment: env,
defines: CompileTimeDefinesVc::empty(),
}
.cell();
let compile_time_info = CompileTimeInfo::builder(env).cell();
let glob_mappings = vec![
(
root,
Expand Down
Loading

0 comments on commit d2214b4

Please sign in to comment.