From 0dcccf6aa828810b179e06dfff2e56df142ed7ed Mon Sep 17 00:00:00 2001 From: Inphi Date: Wed, 28 Aug 2024 22:26:43 -0400 Subject: [PATCH 01/19] cannon: Fix stack patching (#11632) * cannon: Fix stack patching And add `memprofilerate=0` to envp * Update cannon/mipsevm/program/patch.go Co-authored-by: protolambda * cleanup argv/envp string ptrs * nit * fix envar name * Update cannon/mipsevm/program/patch.go Co-authored-by: mbaxter * align op-program arg0 --------- Co-authored-by: protolambda Co-authored-by: mbaxter --- cannon/mipsevm/program/patch.go | 34 +++++++++++-------- cannon/mipsevm/tests/evm_common_test.go | 44 +++++++++++++++++++++++++ cannon/testdata/example/entry/go.mod | 3 ++ cannon/testdata/example/entry/main.go | 30 +++++++++++++++++ 4 files changed, 97 insertions(+), 14 deletions(-) create mode 100644 cannon/testdata/example/entry/go.mod create mode 100644 cannon/testdata/example/entry/main.go diff --git a/cannon/mipsevm/program/patch.go b/cannon/mipsevm/program/patch.go index 52a262fee585..46b75a69ff1c 100644 --- a/cannon/mipsevm/program/patch.go +++ b/cannon/mipsevm/program/patch.go @@ -47,16 +47,11 @@ func PatchGo(f *elf.File, st mipsevm.FPVMState) error { })); err != nil { return fmt.Errorf("failed to patch Go runtime.gcenable: %w", err) } - case "runtime.MemProfileRate": - if err := st.GetMemory().SetMemoryRange(uint32(s.Value), bytes.NewReader(make([]byte, 4))); err != nil { // disable mem profiling, to avoid a lot of unnecessary floating point ops - return err - } } } return nil } -// TODO(cp-903) Consider setting envar "GODEBUG=memprofilerate=0" for go programs to disable memprofiling, instead of patching it out in PatchGo() func PatchStack(st mipsevm.FPVMState) error { // setup stack pointer sp := uint32(0x7f_ff_d0_00) @@ -73,16 +68,27 @@ func PatchStack(st mipsevm.FPVMState) error { } // init argc, argv, aux on stack - storeMem(sp+4*1, 0x42) // argc = 0 (argument count) - storeMem(sp+4*2, 0x35) // argv[n] = 0 (terminating argv) - storeMem(sp+4*3, 0) // envp[term] = 0 (no env vars) - storeMem(sp+4*4, 6) // auxv[0] = _AT_PAGESZ = 6 (key) - storeMem(sp+4*5, 4096) // auxv[1] = page size of 4 KiB (value) - (== minPhysPageSize) - storeMem(sp+4*6, 25) // auxv[2] = AT_RANDOM - storeMem(sp+4*7, sp+4*9) // auxv[3] = address of 16 bytes containing random value - storeMem(sp+4*8, 0) // auxv[term] = 0 + storeMem(sp+4*0, 1) // argc = 1 (argument count) + storeMem(sp+4*1, sp+4*21) // argv[0] + storeMem(sp+4*2, 0) // argv[1] = terminating + storeMem(sp+4*3, sp+4*14) // envp[0] = x (offset to first env var) + storeMem(sp+4*4, 0) // envp[1] = terminating + storeMem(sp+4*5, 6) // auxv[0] = _AT_PAGESZ = 6 (key) + storeMem(sp+4*6, 4096) // auxv[1] = page size of 4 KiB (value) - (== minPhysPageSize) + storeMem(sp+4*7, 25) // auxv[2] = AT_RANDOM + storeMem(sp+4*8, sp+4*10) // auxv[3] = address of 16 bytes containing random value + storeMem(sp+4*9, 0) // auxv[term] = 0 + + _ = st.GetMemory().SetMemoryRange(sp+4*10, bytes.NewReader([]byte("4;byfairdiceroll"))) // 16 bytes of "randomness" + + // append 4 extra zero bytes to end at 4-byte alignment + envar := append([]byte("GODEBUG=memprofilerate=0"), 0x0, 0x0, 0x0, 0x0) + _ = st.GetMemory().SetMemoryRange(sp+4*14, bytes.NewReader(envar)) - _ = st.GetMemory().SetMemoryRange(sp+4*9, bytes.NewReader([]byte("4;byfairdiceroll"))) // 16 bytes of "randomness" + // 24 bytes for GODEBUG=memprofilerate=0 + 4 null bytes + // Then append program name + 2 null bytes for 4-byte alignment + programName := append([]byte("op-program"), 0x0, 0x0) + _ = st.GetMemory().SetMemoryRange(sp+4*21, bytes.NewReader(programName)) return nil } diff --git a/cannon/mipsevm/tests/evm_common_test.go b/cannon/mipsevm/tests/evm_common_test.go index 8294cb12c3df..445ed5031d2d 100644 --- a/cannon/mipsevm/tests/evm_common_test.go +++ b/cannon/mipsevm/tests/evm_common_test.go @@ -591,3 +591,47 @@ func TestClaimEVM(t *testing.T) { }) } } + +func TestEntryEVM(t *testing.T) { + var tracer *tracing.Hooks // no-tracer by default, but see test_util.MarkdownTracer + versions := GetMipsVersionTestCases(t) + + for _, v := range versions { + t.Run(v.Name, func(t *testing.T) { + evm := testutil.NewMIPSEVM(v.Contracts) + evm.SetTracer(tracer) + testutil.LogStepFailureAtCleanup(t, evm) + + var stdOutBuf, stdErrBuf bytes.Buffer + elfFile := "../../testdata/example/bin/entry.elf" + goVm := v.ElfVMFactory(t, elfFile, nil, io.MultiWriter(&stdOutBuf, os.Stdout), io.MultiWriter(&stdErrBuf, os.Stderr), testutil.CreateLogger()) + state := goVm.GetState() + + start := time.Now() + for i := 0; i < 400_000; i++ { + curStep := goVm.GetState().GetStep() + if goVm.GetState().GetExited() { + break + } + insn := state.GetMemory().GetMemory(state.GetPC()) + if i%10_000 == 0 { // avoid spamming test logs, we are executing many steps + t.Logf("step: %4d pc: 0x%08x insn: 0x%08x", state.GetStep(), state.GetPC(), insn) + } + + stepWitness, err := goVm.Step(true) + require.NoError(t, err) + evmPost := evm.Step(t, stepWitness, curStep, v.StateHashFn) + // verify the post-state matches. + goPost, _ := goVm.GetState().EncodeWitness() + require.Equal(t, hexutil.Bytes(goPost).String(), hexutil.Bytes(evmPost).String(), + "mipsevm produced different state than EVM") + } + end := time.Now() + delta := end.Sub(start) + t.Logf("test took %s, %d instructions, %s per instruction", delta, state.GetStep(), delta/time.Duration(state.GetStep())) + + require.True(t, state.GetExited(), "must complete program") + require.Equal(t, uint8(0), state.GetExitCode(), "exit with 0") + }) + } +} diff --git a/cannon/testdata/example/entry/go.mod b/cannon/testdata/example/entry/go.mod new file mode 100644 index 000000000000..2e4d29124f54 --- /dev/null +++ b/cannon/testdata/example/entry/go.mod @@ -0,0 +1,3 @@ +module entry + +go 1.21 diff --git a/cannon/testdata/example/entry/main.go b/cannon/testdata/example/entry/main.go new file mode 100644 index 000000000000..78866f88abeb --- /dev/null +++ b/cannon/testdata/example/entry/main.go @@ -0,0 +1,30 @@ +package main + +import ( + "os" + "runtime" +) + +func main() { + if len(os.Args) != 1 { + panic("expected 1 arg") + } + if os.Args[0] != "op-program" { + panic("unexpected arg0") + } + + var memProfileRate bool + env := os.Environ() + for _, env := range env { + if env != "GODEBUG=memprofilerate=0" { + panic("invalid envar") + } + memProfileRate = true + } + if !memProfileRate { + panic("memProfileRate env is not set") + } + if runtime.MemProfileRate != 0 { + panic("runtime.MemProfileRate is non-zero") + } +} From aebf669c108240298e14bd0e29068a18e0b0a52e Mon Sep 17 00:00:00 2001 From: Matthew Slipper Date: Wed, 28 Aug 2024 23:41:10 -0600 Subject: [PATCH 02/19] Archive contract artifacts (#11626) To demonstrate how we can make our deployments more modular, this PR proposes archiving smart contract artifacts as tarballs that get uploaded to GCS. This allows deployment tools to use precompiled artifacts rather than generating them on-the fly. The archives are named after the hash of all Solidity files in the contracts-bedrock folder, including those in `lib/`, plus some additional metadata files like `foundry.toml` and `semver-lock.json`. See `calculate-checksum.sh` for details on how the algorithm works. I'm open to feedback around what should make up the checksum. Since the atifacts are content-addressable, this PR also updates the CI pipeline to download the artifacts from GCS prior to running `pnpm monorepo`. When the Solidity codebase doesn't change, this allows the `pnpm monorepo` job to skip compiling Solidity altogether. While this won't work as well when we're actively modifying the Solidity codebase, since the hash will change, it does provide a modest speedup in CI. --- .circleci/config.yml | 27 ++++++++++ .../scripts/ops/calculate-checksum.sh | 25 +++++++++ .../scripts/ops/publish-artifacts.sh | 54 +++++++++++++++++++ .../scripts/ops/pull-artifacts.sh | 41 ++++++++++++++ 4 files changed, 147 insertions(+) create mode 100644 packages/contracts-bedrock/scripts/ops/calculate-checksum.sh create mode 100644 packages/contracts-bedrock/scripts/ops/publish-artifacts.sh create mode 100644 packages/contracts-bedrock/scripts/ops/pull-artifacts.sh diff --git a/.circleci/config.yml b/.circleci/config.yml index 5917b7f91d01..82b3119acc8b 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -194,6 +194,10 @@ jobs: - run: name: print forge version command: forge --version + - run: + name: Pull artifacts + command: bash scripts/ops/pull-artifacts.sh + working_directory: packages/contracts-bedrock - run: name: Build contracts environment: @@ -1461,6 +1465,24 @@ jobs: working_directory: ./packages/contracts-bedrock - notify-failures-on-develop + publish-contract-artifacts: + docker: + - image: <> + resource_class: medium + steps: + - gcp-cli/install + - gcp-oidc-authenticate: + gcp_cred_config_file_path: /root/gcp_cred_config.json + oidc_token_file_path: /root/oidc_token.json + service_account_email: GCP_SERVICE_CONTRACTS_ACCOUNT_EMAIL + - checkout + - attach_workspace: { at: "." } + - install-contracts-dependencies + - run: + name: Publish artifacts + command: bash scripts/ops/publish-artifacts.sh + working_directory: packages/contracts-bedrock + workflows: main: when: @@ -1475,6 +1497,11 @@ workflows: jobs: - pnpm-monorepo: name: pnpm-monorepo + - publish-contract-artifacts: + requires: + - pnpm-monorepo + context: + - oplabs-gcr-release - contracts-bedrock-tests - contracts-bedrock-coverage - contracts-bedrock-checks: diff --git a/packages/contracts-bedrock/scripts/ops/calculate-checksum.sh b/packages/contracts-bedrock/scripts/ops/calculate-checksum.sh new file mode 100644 index 000000000000..6278e37ffa74 --- /dev/null +++ b/packages/contracts-bedrock/scripts/ops/calculate-checksum.sh @@ -0,0 +1,25 @@ +#!/usr/bin/env bash + +set -euo pipefail + +echoerr() { + echo "$@" 1>&2 +} + +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) +CONTRACTS_DIR="$SCRIPT_DIR/../.." + +cd "$CONTRACTS_DIR" + +echoerr "> Calculating contracts checksum..." + +find . -type f -name '*.sol' -exec sha256sum {} + > manifest.txt +sha256sum semver-lock.json >> manifest.txt +sha256sum foundry.toml >> manifest.txt +# need to specify the locale to ensure consistent sorting across platforms +LC_ALL=C sort -o manifest.txt manifest.txt +checksum=$(sha256sum manifest.txt | awk '{print $1}') +rm manifest.txt +echoerr "> Done." + +echo -n "$checksum" \ No newline at end of file diff --git a/packages/contracts-bedrock/scripts/ops/publish-artifacts.sh b/packages/contracts-bedrock/scripts/ops/publish-artifacts.sh new file mode 100644 index 000000000000..309b2d818eda --- /dev/null +++ b/packages/contracts-bedrock/scripts/ops/publish-artifacts.sh @@ -0,0 +1,54 @@ +#!/usr/bin/env bash + +set -euo pipefail + +echoerr() { + echo "$@" 1>&2 +} + +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) +CONTRACTS_DIR="$SCRIPT_DIR/../.." +DEPLOY_BUCKET="oplabs-contract-artifacts" + +cd "$CONTRACTS_DIR" + +# ensure that artifacts exists and is non-empty +if [ ! -d "forge-artifacts" ] || [ -z "$(ls -A forge-artifacts)" ]; then + echoerr "> No forge-artifacts directory found." + exit 1 +fi + +if [ ! -d "artifacts" ] || [ -z "$(ls -A artifacts)" ]; then + echoerr "> No artifacts directory found." + exit 1 +fi + +checksum=$(bash scripts/ops/calculate-checksum.sh) + +echoerr "> Checking for existing artifacts..." +exists=$(curl -s -o /dev/null --fail -LI "https://storage.googleapis.com/$DEPLOY_BUCKET/artifacts-v1-$checksum.tar.gz" || echo "fail") + +if [ "$exists" != "fail" ]; then + echoerr "> Existing artifacts found, nothing to do." + exit 0 +fi + +echoerr "> Archiving artifacts..." +archive_name="artifacts-v1-$checksum.tar.gz" + +# use gtar on darwin +if [[ "$OSTYPE" == "darwin"* ]]; then + tar="gtar" +else + tar="tar" +fi + +"$tar" -czf "$archive_name" artifacts forge-artifacts cache +du -sh "$archive_name" | awk '{$1=$1};1' # trim leading whitespace +echoerr "> Done." + +echoerr "> Uploading artifacts to GCS..." +gcloud storage cp "$archive_name" "gs://$DEPLOY_BUCKET/$archive_name" +echoerr "> Done." + +rm "$archive_name" \ No newline at end of file diff --git a/packages/contracts-bedrock/scripts/ops/pull-artifacts.sh b/packages/contracts-bedrock/scripts/ops/pull-artifacts.sh new file mode 100644 index 000000000000..f119b087f38c --- /dev/null +++ b/packages/contracts-bedrock/scripts/ops/pull-artifacts.sh @@ -0,0 +1,41 @@ +#!/usr/bin/env bash + +set -euo pipefail + +echoerr() { + echo "$@" 1>&2 +} + +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) +CONTRACTS_DIR="$SCRIPT_DIR/../.." + +cd "$CONTRACTS_DIR" + +checksum=$(bash scripts/ops/calculate-checksum.sh) +archive_name="artifacts-v1-$checksum.tar.gz" + +echoerr "> Checking for existing artifacts..." +exists=$(curl -s -o /dev/null --fail -LI "https://storage.googleapis.com/oplabs-contract-artifacts/$archive_name" || echo "fail") + +if [ "$exists" == "fail" ]; then + echoerr "> No existing artifacts found, exiting." + exit 0 +fi + +echoerr "> Cleaning up existing artifacts..." +rm -rf artifacts +rm -rf forge-artifacts +rm -rf cache +echoerr "> Done." + +echoerr "> Found existing artifacts. Downloading..." +curl -o "$archive_name" "https://storage.googleapis.com/oplabs-contract-artifacts/$archive_name" +echoerr "> Done." + +echoerr "> Extracting existing artifacts..." +tar -xzvf "$archive_name" +echoerr "> Done." + +echoerr "> Cleaning up." +rm "$archive_name" +echoerr "> Done." \ No newline at end of file From e53a86aca813aad18914a3fa4b3ad9c4f2599c3f Mon Sep 17 00:00:00 2001 From: mbaxter Date: Thu, 29 Aug 2024 10:25:42 -0400 Subject: [PATCH 03/19] cannon: Add MTCannon-specific differential tests (#11605) * cannon: Implement multithreaded clone fuzz test * cannon: Add more clone evm tests * cannon: Add evm test for GetTID syscall * cannon: Add evm test for SysExit * cannon: Add evm test for popping exited threads from the stack * cannon: Fix futex wait handling, add evm test * cannon: Add evm test for handling waiting thread * cannon: Add test utils for defining / validating MTState expectations * cannon: Add tests for futex wake, wake traversal * cannon: Add test for SysYield * cannon: Add SysOpen test, todos * cannon: Add test for SchedQuantum preemption, fix inconsistency * cannon: Add tests for noop, unsupported syscalls * cannon: Remove duplicate constants * cannon: Add tests for unsupported futex ops * cannon: Group traversal tests, fix TestEVM_WakeupTraversalStep * cannon: Add tests for nanosleep * cannon: Add additional testcase for wakeup traversal * cannon: Tweak futex wake tests * cannon: Update mt fuzz test to use new test utils * cannon: Rename contructor method for consistency * cannon: Add some simple tests for ExpectedMTState util * cannon: Add another validation test * cannon: Move syscall lists to tests where they're used * cannon: Add comment * cannon: Extract some evm test helpers * cannon: Cleanup - use require.Equalf for formatting * cannon: Rename test util to AssertEVMReverts * cannon: Add GetThreadStacks helper * cannon: Add a few more traversal tests --- cannon/mipsevm/exec/mips_syscalls.go | 2 +- cannon/mipsevm/multithreaded/mips.go | 8 +- cannon/mipsevm/multithreaded/state.go | 2 +- .../multithreaded/testutil/expectations.go | 203 ++++ .../testutil/expectations_test.go | 179 ++++ .../multithreaded/testutil/mutators.go | 87 ++ .../mipsevm/multithreaded/testutil/state.go | 73 +- .../mipsevm/multithreaded/testutil/thread.go | 166 ++++ .../mipsevm/singlethreaded/testutil/state.go | 24 +- cannon/mipsevm/tests/evm_common_test.go | 66 +- .../mipsevm/tests/evm_multithreaded_test.go | 906 +++++++++++++++++- cannon/mipsevm/tests/fuzz_evm_common_test.go | 73 +- .../tests/fuzz_evm_multithreaded_test.go | 59 +- .../tests/fuzz_evm_singlethreaded_test.go | 2 +- cannon/mipsevm/testutil/mips.go | 33 + cannon/mipsevm/testutil/rand.go | 53 + cannon/mipsevm/testutil/state.go | 68 +- packages/contracts-bedrock/semver-lock.json | 4 +- .../contracts-bedrock/src/cannon/MIPS2.sol | 14 +- .../src/cannon/libraries/MIPSSyscalls.sol | 5 +- .../contracts-bedrock/test/cannon/MIPS2.t.sol | 2 +- 21 files changed, 1748 insertions(+), 281 deletions(-) create mode 100644 cannon/mipsevm/multithreaded/testutil/expectations.go create mode 100644 cannon/mipsevm/multithreaded/testutil/expectations_test.go create mode 100644 cannon/mipsevm/multithreaded/testutil/mutators.go create mode 100644 cannon/mipsevm/multithreaded/testutil/thread.go create mode 100644 cannon/mipsevm/testutil/rand.go diff --git a/cannon/mipsevm/exec/mips_syscalls.go b/cannon/mipsevm/exec/mips_syscalls.go index af205ab5f46a..d2f49f279573 100644 --- a/cannon/mipsevm/exec/mips_syscalls.go +++ b/cannon/mipsevm/exec/mips_syscalls.go @@ -15,7 +15,6 @@ import ( // Syscall codes const ( SysMmap = 4090 - SysMunmap = 4091 SysBrk = 4045 SysClone = 4120 SysExitGroup = 4246 @@ -32,6 +31,7 @@ const ( // Noop Syscall codes const ( + SysMunmap = 4091 SysGetAffinity = 4240 SysMadvise = 4218 SysRtSigprocmask = 4195 diff --git a/cannon/mipsevm/multithreaded/mips.go b/cannon/mipsevm/multithreaded/mips.go index 7949bbe6c771..8f7b594fcab8 100644 --- a/cannon/mipsevm/multithreaded/mips.go +++ b/cannon/mipsevm/multithreaded/mips.go @@ -102,13 +102,13 @@ func (m *InstrumentedState) handleSyscall() error { // args: a0 = addr, a1 = op, a2 = val, a3 = timeout switch a1 { case exec.FutexWaitPrivate: - thread.FutexAddr = a0 m.memoryTracker.TrackMemAccess(a0) mem := m.state.Memory.GetMemory(a0) if mem != a2 { v0 = exec.SysErrorSignal v1 = exec.MipsEAGAIN } else { + thread.FutexAddr = a0 thread.FutexVal = a2 if a3 == 0 { thread.FutexTimeoutStep = exec.FutexNoTimeout @@ -242,11 +242,11 @@ func (m *InstrumentedState) mipsStep() error { if m.state.StepsSinceLastContextSwitch >= exec.SchedQuantum { // Force a context switch as this thread has been active too long - if m.state.threadCount() > 1 { + if m.state.ThreadCount() > 1 { // Log if we're hitting our context switch limit - only matters if we have > 1 thread if m.log.Enabled(context.Background(), log.LevelTrace) { msg := fmt.Sprintf("Thread has reached maximum execution steps (%v) - preempting.", exec.SchedQuantum) - m.log.Trace(msg, "threadId", thread.ThreadId, "threadCount", m.state.threadCount(), "pc", thread.Cpu.PC) + m.log.Trace(msg, "threadId", thread.ThreadId, "threadCount", m.state.ThreadCount(), "pc", thread.Cpu.PC) } } m.preemptThread(thread) @@ -339,5 +339,5 @@ func (m *InstrumentedState) popThread() { } func (m *InstrumentedState) lastThreadRemaining() bool { - return m.state.threadCount() == 1 + return m.state.ThreadCount() == 1 } diff --git a/cannon/mipsevm/multithreaded/state.go b/cannon/mipsevm/multithreaded/state.go index 6602357747bf..17d54c241a5e 100644 --- a/cannon/mipsevm/multithreaded/state.go +++ b/cannon/mipsevm/multithreaded/state.go @@ -215,7 +215,7 @@ func (s *State) EncodeThreadProof() []byte { return out } -func (s *State) threadCount() int { +func (s *State) ThreadCount() int { return len(s.LeftThreadStack) + len(s.RightThreadStack) } diff --git a/cannon/mipsevm/multithreaded/testutil/expectations.go b/cannon/mipsevm/multithreaded/testutil/expectations.go new file mode 100644 index 000000000000..f2e1bbb200f0 --- /dev/null +++ b/cannon/mipsevm/multithreaded/testutil/expectations.go @@ -0,0 +1,203 @@ +package testutil + +import ( + "fmt" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/stretchr/testify/require" + + "github.com/ethereum-optimism/optimism/cannon/mipsevm/multithreaded" +) + +// ExpectedMTState is a test utility that basically stores a copy of a state that can be explicitly mutated +// to define an expected post-state. The post-state is then validated with ExpectedMTState.Validate(t, postState) +type ExpectedMTState struct { + PreimageKey common.Hash + PreimageOffset uint32 + Heap uint32 + ExitCode uint8 + Exited bool + Step uint64 + LastHint hexutil.Bytes + MemoryRoot common.Hash + // Threading-related expectations + StepsSinceLastContextSwitch uint64 + Wakeup uint32 + TraverseRight bool + NextThreadId uint32 + ThreadCount int + RightStackSize int + LeftStackSize int + prestateActiveThreadId uint32 + prestateActiveThreadOrig ExpectedThreadState // Cached for internal use + ActiveThreadId uint32 + threadExpectations map[uint32]*ExpectedThreadState +} + +type ExpectedThreadState struct { + ThreadId uint32 + ExitCode uint8 + Exited bool + FutexAddr uint32 + FutexVal uint32 + FutexTimeoutStep uint64 + PC uint32 + NextPC uint32 + HI uint32 + LO uint32 + Registers [32]uint32 + Dropped bool +} + +func NewExpectedMTState(fromState *multithreaded.State) *ExpectedMTState { + currentThread := fromState.GetCurrentThread() + + expectedThreads := make(map[uint32]*ExpectedThreadState) + for _, t := range GetAllThreads(fromState) { + expectedThreads[t.ThreadId] = newExpectedThreadState(t) + } + + return &ExpectedMTState{ + // General Fields + PreimageKey: fromState.GetPreimageKey(), + PreimageOffset: fromState.GetPreimageOffset(), + Heap: fromState.GetHeap(), + ExitCode: fromState.GetExitCode(), + Exited: fromState.GetExited(), + Step: fromState.GetStep(), + LastHint: fromState.GetLastHint(), + MemoryRoot: fromState.GetMemory().MerkleRoot(), + // Thread-related global fields + StepsSinceLastContextSwitch: fromState.StepsSinceLastContextSwitch, + Wakeup: fromState.Wakeup, + TraverseRight: fromState.TraverseRight, + NextThreadId: fromState.NextThreadId, + ThreadCount: fromState.ThreadCount(), + RightStackSize: len(fromState.RightThreadStack), + LeftStackSize: len(fromState.LeftThreadStack), + // ThreadState expectations + prestateActiveThreadId: currentThread.ThreadId, + prestateActiveThreadOrig: *newExpectedThreadState(currentThread), // Cache prestate thread for internal use + ActiveThreadId: currentThread.ThreadId, + threadExpectations: expectedThreads, + } +} + +func newExpectedThreadState(fromThread *multithreaded.ThreadState) *ExpectedThreadState { + return &ExpectedThreadState{ + ThreadId: fromThread.ThreadId, + ExitCode: fromThread.ExitCode, + Exited: fromThread.Exited, + FutexAddr: fromThread.FutexAddr, + FutexVal: fromThread.FutexVal, + FutexTimeoutStep: fromThread.FutexTimeoutStep, + PC: fromThread.Cpu.PC, + NextPC: fromThread.Cpu.NextPC, + HI: fromThread.Cpu.HI, + LO: fromThread.Cpu.LO, + Registers: fromThread.Registers, + Dropped: false, + } +} + +func (e *ExpectedMTState) ExpectStep() { + // Set some standard expectations for a normal step + e.Step += 1 + e.PrestateActiveThread().PC += 4 + e.PrestateActiveThread().NextPC += 4 + e.StepsSinceLastContextSwitch += 1 +} + +func (e *ExpectedMTState) ExpectPreemption(preState *multithreaded.State) { + e.ActiveThreadId = FindNextThread(preState).ThreadId + e.StepsSinceLastContextSwitch = 0 + if preState.TraverseRight { + e.TraverseRight = e.RightStackSize > 1 + e.RightStackSize -= 1 + e.LeftStackSize += 1 + } else { + e.TraverseRight = e.LeftStackSize == 1 + e.LeftStackSize -= 1 + e.RightStackSize += 1 + } +} + +func (e *ExpectedMTState) ExpectNewThread() *ExpectedThreadState { + newThreadId := e.NextThreadId + e.NextThreadId += 1 + e.ThreadCount += 1 + + // Clone expectations from prestate active thread's original state (bf changing any expectations) + newThread := &ExpectedThreadState{} + *newThread = e.prestateActiveThreadOrig + + newThread.ThreadId = newThreadId + e.threadExpectations[newThreadId] = newThread + + return newThread +} + +func (e *ExpectedMTState) ActiveThread() *ExpectedThreadState { + return e.threadExpectations[e.ActiveThreadId] +} + +func (e *ExpectedMTState) PrestateActiveThread() *ExpectedThreadState { + return e.threadExpectations[e.prestateActiveThreadId] +} + +func (e *ExpectedMTState) Thread(threadId uint32) *ExpectedThreadState { + return e.threadExpectations[threadId] +} + +func (e *ExpectedMTState) Validate(t require.TestingT, actualState *multithreaded.State) { + require.Equalf(t, e.PreimageKey, actualState.GetPreimageKey(), "Expect preimageKey = %v", e.PreimageKey) + require.Equalf(t, e.PreimageOffset, actualState.GetPreimageOffset(), "Expect preimageOffset = %v", e.PreimageOffset) + require.Equalf(t, e.Heap, actualState.GetHeap(), "Expect heap = 0x%x", e.Heap) + require.Equalf(t, e.ExitCode, actualState.GetExitCode(), "Expect exitCode = 0x%x", e.ExitCode) + require.Equalf(t, e.Exited, actualState.GetExited(), "Expect exited = %v", e.Exited) + require.Equalf(t, e.Step, actualState.GetStep(), "Expect step = %d", e.Step) + require.Equalf(t, e.LastHint, actualState.GetLastHint(), "Expect lastHint = %v", e.LastHint) + require.Equalf(t, e.MemoryRoot, common.Hash(actualState.GetMemory().MerkleRoot()), "Expect memory root = %v", e.MemoryRoot) + // Thread-related global fields + require.Equalf(t, e.StepsSinceLastContextSwitch, actualState.StepsSinceLastContextSwitch, "Expect StepsSinceLastContextSwitch = %v", e.StepsSinceLastContextSwitch) + require.Equalf(t, e.Wakeup, actualState.Wakeup, "Expect Wakeup = %v", e.Wakeup) + require.Equalf(t, e.TraverseRight, actualState.TraverseRight, "Expect TraverseRight = %v", e.TraverseRight) + require.Equalf(t, e.NextThreadId, actualState.NextThreadId, "Expect NextThreadId = %v", e.NextThreadId) + require.Equalf(t, e.ThreadCount, actualState.ThreadCount(), "Expect thread count = %v", e.ThreadCount) + require.Equalf(t, e.RightStackSize, len(actualState.RightThreadStack), "Expect right stack size = %v", e.RightStackSize) + require.Equalf(t, e.LeftStackSize, len(actualState.LeftThreadStack), "Expect right stack size = %v", e.LeftStackSize) + + // Check active thread + activeThread := actualState.GetCurrentThread() + require.Equal(t, e.ActiveThreadId, activeThread.ThreadId) + // Check all threads + expectedThreadCount := 0 + for tid, exp := range e.threadExpectations { + actualThread := FindThread(actualState, tid) + isActive := tid == activeThread.ThreadId + if exp.Dropped { + require.Nil(t, actualThread, "Thread %v should have been dropped", tid) + } else { + require.NotNil(t, actualThread, "Could not find thread matching expected thread with id %v", tid) + e.validateThread(t, exp, actualThread, isActive) + expectedThreadCount++ + } + } + require.Equal(t, expectedThreadCount, actualState.ThreadCount(), "Thread expectations do not match thread count") +} + +func (e *ExpectedMTState) validateThread(t require.TestingT, et *ExpectedThreadState, actual *multithreaded.ThreadState, isActive bool) { + threadInfo := fmt.Sprintf("tid = %v, active = %v", actual.ThreadId, isActive) + require.Equalf(t, et.ThreadId, actual.ThreadId, "Expect ThreadId = 0x%x (%v)", et.ThreadId, threadInfo) + require.Equalf(t, et.PC, actual.Cpu.PC, "Expect PC = 0x%x (%v)", et.PC, threadInfo) + require.Equalf(t, et.NextPC, actual.Cpu.NextPC, "Expect nextPC = 0x%x (%v)", et.NextPC, threadInfo) + require.Equalf(t, et.HI, actual.Cpu.HI, "Expect HI = 0x%x (%v)", et.HI, threadInfo) + require.Equalf(t, et.LO, actual.Cpu.LO, "Expect LO = 0x%x (%v)", et.LO, threadInfo) + require.Equalf(t, et.Registers, actual.Registers, "Expect registers to match (%v)", threadInfo) + require.Equalf(t, et.ExitCode, actual.ExitCode, "Expect exitCode = %v (%v)", et.ExitCode, threadInfo) + require.Equalf(t, et.Exited, actual.Exited, "Expect exited = %v (%v)", et.Exited, threadInfo) + require.Equalf(t, et.FutexAddr, actual.FutexAddr, "Expect futexAddr = %v (%v)", et.FutexAddr, threadInfo) + require.Equalf(t, et.FutexVal, actual.FutexVal, "Expect futexVal = %v (%v)", et.FutexVal, threadInfo) + require.Equalf(t, et.FutexTimeoutStep, actual.FutexTimeoutStep, "Expect futexTimeoutStep = %v (%v)", et.FutexTimeoutStep, threadInfo) +} diff --git a/cannon/mipsevm/multithreaded/testutil/expectations_test.go b/cannon/mipsevm/multithreaded/testutil/expectations_test.go new file mode 100644 index 000000000000..15cba8b00469 --- /dev/null +++ b/cannon/mipsevm/multithreaded/testutil/expectations_test.go @@ -0,0 +1,179 @@ +package testutil + +import ( + "fmt" + "testing" + + //"github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" + + "github.com/ethereum-optimism/optimism/cannon/mipsevm/multithreaded" +) + +type ExpectationMutator func(e *ExpectedMTState, st *multithreaded.State) + +func TestValidate_shouldCatchMutations(t *testing.T) { + states := []*multithreaded.State{ + RandomState(0), + RandomState(1), + RandomState(2), + } + var emptyHash [32]byte + someThread := RandomThread(123) + + cases := []struct { + name string + mut ExpectationMutator + }{ + {name: "PreimageKey", mut: func(e *ExpectedMTState, st *multithreaded.State) { e.PreimageKey = emptyHash }}, + {name: "PreimageOffset", mut: func(e *ExpectedMTState, st *multithreaded.State) { e.PreimageOffset += 1 }}, + {name: "Heap", mut: func(e *ExpectedMTState, st *multithreaded.State) { e.Heap += 1 }}, + {name: "ExitCode", mut: func(e *ExpectedMTState, st *multithreaded.State) { e.ExitCode += 1 }}, + {name: "Exited", mut: func(e *ExpectedMTState, st *multithreaded.State) { e.Exited = !e.Exited }}, + {name: "Step", mut: func(e *ExpectedMTState, st *multithreaded.State) { e.Step += 1 }}, + {name: "LastHint", mut: func(e *ExpectedMTState, st *multithreaded.State) { e.LastHint = []byte{7, 8, 9, 10} }}, + {name: "MemoryRoot", mut: func(e *ExpectedMTState, st *multithreaded.State) { e.MemoryRoot = emptyHash }}, + {name: "StepsSinceLastContextSwitch", mut: func(e *ExpectedMTState, st *multithreaded.State) { e.StepsSinceLastContextSwitch += 1 }}, + {name: "Wakeup", mut: func(e *ExpectedMTState, st *multithreaded.State) { e.Wakeup += 1 }}, + {name: "TraverseRight", mut: func(e *ExpectedMTState, st *multithreaded.State) { e.TraverseRight = !e.TraverseRight }}, + {name: "NextThreadId", mut: func(e *ExpectedMTState, st *multithreaded.State) { e.NextThreadId += 1 }}, + {name: "ThreadCount", mut: func(e *ExpectedMTState, st *multithreaded.State) { e.ThreadCount += 1 }}, + {name: "RightStackSize", mut: func(e *ExpectedMTState, st *multithreaded.State) { e.RightStackSize += 1 }}, + {name: "LeftStackSize", mut: func(e *ExpectedMTState, st *multithreaded.State) { e.LeftStackSize += 1 }}, + {name: "ActiveThreadId", mut: func(e *ExpectedMTState, st *multithreaded.State) { e.ActiveThreadId += 1 }}, + {name: "Empty thread expectations", mut: func(e *ExpectedMTState, st *multithreaded.State) { + e.threadExpectations = map[uint32]*ExpectedThreadState{} + }}, + {name: "Mismatched thread expectations", mut: func(e *ExpectedMTState, st *multithreaded.State) { + e.threadExpectations = map[uint32]*ExpectedThreadState{someThread.ThreadId: newExpectedThreadState(someThread)} + }}, + {name: "Active threadId", mut: func(e *ExpectedMTState, st *multithreaded.State) { + e.threadExpectations[st.GetCurrentThread().ThreadId].ThreadId += 1 + }}, + {name: "Active thread exitCode", mut: func(e *ExpectedMTState, st *multithreaded.State) { + e.threadExpectations[st.GetCurrentThread().ThreadId].ExitCode += 1 + }}, + {name: "Active thread exited", mut: func(e *ExpectedMTState, st *multithreaded.State) { + e.threadExpectations[st.GetCurrentThread().ThreadId].Exited = !st.GetCurrentThread().Exited + }}, + {name: "Active thread futexAddr", mut: func(e *ExpectedMTState, st *multithreaded.State) { + e.threadExpectations[st.GetCurrentThread().ThreadId].FutexAddr += 1 + }}, + {name: "Active thread futexVal", mut: func(e *ExpectedMTState, st *multithreaded.State) { + e.threadExpectations[st.GetCurrentThread().ThreadId].FutexVal += 1 + }}, + {name: "Active thread FutexTimeoutStep", mut: func(e *ExpectedMTState, st *multithreaded.State) { + e.threadExpectations[st.GetCurrentThread().ThreadId].FutexTimeoutStep += 1 + }}, + {name: "Active thread PC", mut: func(e *ExpectedMTState, st *multithreaded.State) { + e.threadExpectations[st.GetCurrentThread().ThreadId].PC += 1 + }}, + {name: "Active thread NextPC", mut: func(e *ExpectedMTState, st *multithreaded.State) { + e.threadExpectations[st.GetCurrentThread().ThreadId].NextPC += 1 + }}, + {name: "Active thread HI", mut: func(e *ExpectedMTState, st *multithreaded.State) { + e.threadExpectations[st.GetCurrentThread().ThreadId].HI += 1 + }}, + {name: "Active thread LO", mut: func(e *ExpectedMTState, st *multithreaded.State) { + e.threadExpectations[st.GetCurrentThread().ThreadId].LO += 1 + }}, + {name: "Active thread Registers", mut: func(e *ExpectedMTState, st *multithreaded.State) { + e.threadExpectations[st.GetCurrentThread().ThreadId].Registers[0] += 1 + }}, + {name: "Active thread dropped", mut: func(e *ExpectedMTState, st *multithreaded.State) { + e.threadExpectations[st.GetCurrentThread().ThreadId].Dropped = true + }}, + {name: "Inactive threadId", mut: func(e *ExpectedMTState, st *multithreaded.State) { + e.threadExpectations[FindNextThread(st).ThreadId].ThreadId += 1 + }}, + {name: "Inactive thread exitCode", mut: func(e *ExpectedMTState, st *multithreaded.State) { + e.threadExpectations[FindNextThread(st).ThreadId].ExitCode += 1 + }}, + {name: "Inactive thread exited", mut: func(e *ExpectedMTState, st *multithreaded.State) { + e.threadExpectations[FindNextThread(st).ThreadId].Exited = !FindNextThread(st).Exited + }}, + {name: "Inactive thread futexAddr", mut: func(e *ExpectedMTState, st *multithreaded.State) { + e.threadExpectations[FindNextThread(st).ThreadId].FutexAddr += 1 + }}, + {name: "Inactive thread futexVal", mut: func(e *ExpectedMTState, st *multithreaded.State) { + e.threadExpectations[FindNextThread(st).ThreadId].FutexVal += 1 + }}, + {name: "Inactive thread FutexTimeoutStep", mut: func(e *ExpectedMTState, st *multithreaded.State) { + e.threadExpectations[FindNextThread(st).ThreadId].FutexTimeoutStep += 1 + }}, + {name: "Inactive thread PC", mut: func(e *ExpectedMTState, st *multithreaded.State) { + e.threadExpectations[FindNextThread(st).ThreadId].PC += 1 + }}, + {name: "Inactive thread NextPC", mut: func(e *ExpectedMTState, st *multithreaded.State) { + e.threadExpectations[FindNextThread(st).ThreadId].NextPC += 1 + }}, + {name: "Inactive thread HI", mut: func(e *ExpectedMTState, st *multithreaded.State) { + e.threadExpectations[FindNextThread(st).ThreadId].HI += 1 + }}, + {name: "Inactive thread LO", mut: func(e *ExpectedMTState, st *multithreaded.State) { + e.threadExpectations[FindNextThread(st).ThreadId].LO += 1 + }}, + {name: "Inactive thread Registers", mut: func(e *ExpectedMTState, st *multithreaded.State) { + e.threadExpectations[FindNextThread(st).ThreadId].Registers[0] += 1 + }}, + {name: "Inactive thread dropped", mut: func(e *ExpectedMTState, st *multithreaded.State) { + e.threadExpectations[FindNextThread(st).ThreadId].Dropped = true + }}, + } + for _, c := range cases { + for i, state := range states { + testName := fmt.Sprintf("%v (state #%v)", c.name, i) + t.Run(testName, func(t *testing.T) { + expected := NewExpectedMTState(state) + c.mut(expected, state) + + // We should detect the change and fail + mockT := &MockTestingT{} + expected.Validate(mockT, state) + mockT.RequireFailed(t) + }) + } + + } +} + +func TestValidate_shouldPassUnchangedExpectations(t *testing.T) { + states := []*multithreaded.State{ + RandomState(0), + RandomState(1), + RandomState(2), + } + + for i, state := range states { + testName := fmt.Sprintf("State #%v", i) + t.Run(testName, func(t *testing.T) { + expected := NewExpectedMTState(state) + + mockT := &MockTestingT{} + expected.Validate(mockT, state) + mockT.RequireNoFailure(t) + }) + } +} + +type MockTestingT struct { + errCount int +} + +var _ require.TestingT = (*MockTestingT)(nil) + +func (m *MockTestingT) Errorf(format string, args ...interface{}) { + m.errCount += 1 +} + +func (m *MockTestingT) FailNow() { + m.errCount += 1 +} + +func (m *MockTestingT) RequireFailed(t require.TestingT) { + require.Greater(t, m.errCount, 0, "Should have tracked a failure") +} + +func (m *MockTestingT) RequireNoFailure(t require.TestingT) { + require.Equal(t, m.errCount, 0, "Should not have tracked a failure") +} diff --git a/cannon/mipsevm/multithreaded/testutil/mutators.go b/cannon/mipsevm/multithreaded/testutil/mutators.go new file mode 100644 index 000000000000..30a97691609e --- /dev/null +++ b/cannon/mipsevm/multithreaded/testutil/mutators.go @@ -0,0 +1,87 @@ +package testutil + +import ( + "math/rand" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + + "github.com/ethereum-optimism/optimism/cannon/mipsevm/exec" + "github.com/ethereum-optimism/optimism/cannon/mipsevm/multithreaded" + "github.com/ethereum-optimism/optimism/cannon/mipsevm/testutil" +) + +type StateMutatorMultiThreaded struct { + state *multithreaded.State +} + +var _ testutil.StateMutator = (*StateMutatorMultiThreaded)(nil) + +func NewStateMutatorMultiThreaded(state *multithreaded.State) testutil.StateMutator { + return &StateMutatorMultiThreaded{state: state} +} + +func (m *StateMutatorMultiThreaded) Randomize(randSeed int64) { + r := rand.New(rand.NewSource(randSeed)) + + step := testutil.RandStep(r) + + m.state.PreimageKey = testutil.RandHash(r) + m.state.PreimageOffset = r.Uint32() + m.state.Heap = r.Uint32() + m.state.Step = step + m.state.LastHint = testutil.RandHint(r) + m.state.StepsSinceLastContextSwitch = uint64(r.Intn(exec.SchedQuantum)) + + // Randomize threads + activeStackThreads := r.Intn(2) + 1 + inactiveStackThreads := r.Intn(3) + traverseRight := r.Intn(2) == 1 + SetupThreads(randSeed+1, m.state, traverseRight, activeStackThreads, inactiveStackThreads) +} + +func (m *StateMutatorMultiThreaded) SetHI(val uint32) { + m.state.GetCurrentThread().Cpu.HI = val +} + +func (m *StateMutatorMultiThreaded) SetLO(val uint32) { + m.state.GetCurrentThread().Cpu.LO = val +} + +func (m *StateMutatorMultiThreaded) SetExitCode(val uint8) { + m.state.ExitCode = val +} + +func (m *StateMutatorMultiThreaded) SetExited(val bool) { + m.state.Exited = val +} + +func (m *StateMutatorMultiThreaded) SetPC(val uint32) { + thread := m.state.GetCurrentThread() + thread.Cpu.PC = val +} + +func (m *StateMutatorMultiThreaded) SetHeap(val uint32) { + m.state.Heap = val +} + +func (m *StateMutatorMultiThreaded) SetNextPC(val uint32) { + thread := m.state.GetCurrentThread() + thread.Cpu.NextPC = val +} + +func (m *StateMutatorMultiThreaded) SetLastHint(val hexutil.Bytes) { + m.state.LastHint = val +} + +func (m *StateMutatorMultiThreaded) SetPreimageKey(val common.Hash) { + m.state.PreimageKey = val +} + +func (m *StateMutatorMultiThreaded) SetPreimageOffset(val uint32) { + m.state.PreimageOffset = val +} + +func (m *StateMutatorMultiThreaded) SetStep(val uint64) { + m.state.Step = val +} diff --git a/cannon/mipsevm/multithreaded/testutil/state.go b/cannon/mipsevm/multithreaded/testutil/state.go index d7896005eafd..b240c95c0bba 100644 --- a/cannon/mipsevm/multithreaded/testutil/state.go +++ b/cannon/mipsevm/multithreaded/testutil/state.go @@ -1,69 +1,24 @@ package testutil import ( - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/stretchr/testify/require" + "github.com/ethereum-optimism/optimism/cannon/mipsevm" "github.com/ethereum-optimism/optimism/cannon/mipsevm/multithreaded" - "github.com/ethereum-optimism/optimism/cannon/mipsevm/testutil" ) -type StateMutatorMultiThreaded struct { - state *multithreaded.State +func GetMtState(t require.TestingT, vm mipsevm.FPVM) *multithreaded.State { + state := vm.GetState() + mtState, ok := state.(*multithreaded.State) + if !ok { + require.Fail(t, "Failed to cast FPVMState to multithreaded State type") + } + return mtState } -var _ testutil.StateMutator = (*StateMutatorMultiThreaded)(nil) - -func NewStateMutatorMultiThreaded(state *multithreaded.State) testutil.StateMutator { - return &StateMutatorMultiThreaded{state: state} -} - -func (m *StateMutatorMultiThreaded) SetHI(val uint32) { - m.state.GetCurrentThread().Cpu.HI = val -} - -func (m *StateMutatorMultiThreaded) SetLO(val uint32) { - m.state.GetCurrentThread().Cpu.LO = val -} - -func (m *StateMutatorMultiThreaded) SetExitCode(val uint8) { - m.state.ExitCode = val -} - -func (m *StateMutatorMultiThreaded) SetExited(val bool) { - m.state.Exited = val -} - -func (m *StateMutatorMultiThreaded) SetPC(val uint32) { - thread := m.state.GetCurrentThread() - thread.Cpu.PC = val -} - -func (m *StateMutatorMultiThreaded) SetHeap(val uint32) { - m.state.Heap = val -} - -func (m *StateMutatorMultiThreaded) SetNextPC(val uint32) { - thread := m.state.GetCurrentThread() - thread.Cpu.NextPC = val -} - -func (m *StateMutatorMultiThreaded) SetLastHint(val hexutil.Bytes) { - m.state.LastHint = val -} - -func (m *StateMutatorMultiThreaded) SetPreimageKey(val common.Hash) { - m.state.PreimageKey = val -} - -func (m *StateMutatorMultiThreaded) SetPreimageOffset(val uint32) { - m.state.PreimageOffset = val -} - -func (m *StateMutatorMultiThreaded) SetStep(val uint64) { - m.state.Step = val -} - -func (m *StateMutatorMultiThreaded) GetRegistersRef() *[32]uint32 { - return m.state.GetRegistersRef() +func RandomState(seed int) *multithreaded.State { + state := multithreaded.CreateEmptyState() + mut := StateMutatorMultiThreaded{state} + mut.Randomize(int64(seed)) + return state } diff --git a/cannon/mipsevm/multithreaded/testutil/thread.go b/cannon/mipsevm/multithreaded/testutil/thread.go new file mode 100644 index 000000000000..217ab94e8185 --- /dev/null +++ b/cannon/mipsevm/multithreaded/testutil/thread.go @@ -0,0 +1,166 @@ +package testutil + +import ( + "math/rand" + + "github.com/ethereum-optimism/optimism/cannon/mipsevm/multithreaded" + "github.com/ethereum-optimism/optimism/cannon/mipsevm/testutil" +) + +func RandomThread(randSeed int64) *multithreaded.ThreadState { + r := rand.New(rand.NewSource(randSeed)) + thread := multithreaded.CreateEmptyThread() + + pc := testutil.RandPC(r) + + thread.Registers = *testutil.RandRegisters(r) + thread.Cpu.PC = pc + thread.Cpu.NextPC = pc + 4 + thread.Cpu.HI = r.Uint32() + thread.Cpu.LO = r.Uint32() + + return thread +} + +func InitializeSingleThread(randSeed int, state *multithreaded.State, traverseRight bool) { + singleThread := RandomThread(int64(randSeed)) + + state.NextThreadId = singleThread.ThreadId + 1 + state.TraverseRight = traverseRight + if traverseRight { + state.RightThreadStack = []*multithreaded.ThreadState{singleThread} + state.LeftThreadStack = []*multithreaded.ThreadState{} + } else { + state.RightThreadStack = []*multithreaded.ThreadState{} + state.LeftThreadStack = []*multithreaded.ThreadState{singleThread} + } +} + +func SetupThreads(randomSeed int64, state *multithreaded.State, traverseRight bool, activeStackSize, otherStackSize int) { + var activeStack, otherStack []*multithreaded.ThreadState + + tid := uint32(0) + for i := 0; i < activeStackSize; i++ { + thread := RandomThread(randomSeed + int64(i)) + thread.ThreadId = tid + activeStack = append(activeStack, thread) + tid++ + } + + for i := 0; i < otherStackSize; i++ { + thread := RandomThread(randomSeed + int64(i+activeStackSize)) + thread.ThreadId = tid + otherStack = append(otherStack, thread) + tid++ + } + + state.NextThreadId = tid + state.TraverseRight = traverseRight + if traverseRight { + state.RightThreadStack = activeStack + state.LeftThreadStack = otherStack + } else { + state.LeftThreadStack = activeStack + state.RightThreadStack = otherStack + } +} + +type ThreadIterator struct { + left []*multithreaded.ThreadState + right []*multithreaded.ThreadState + traverseRight bool +} + +func NewThreadIterator(state *multithreaded.State) ThreadIterator { + return ThreadIterator{ + left: state.LeftThreadStack, + right: state.RightThreadStack, + traverseRight: state.TraverseRight, + } +} + +func (i *ThreadIterator) currentThread() *multithreaded.ThreadState { + var currentThread *multithreaded.ThreadState + if i.traverseRight { + currentThread = i.right[len(i.right)-1] + } else { + currentThread = i.left[len(i.left)-1] + } + return currentThread +} + +func (i *ThreadIterator) Next() *multithreaded.ThreadState { + rightLen := len(i.right) + leftLen := len(i.left) + activeThread := i.currentThread() + + if i.traverseRight { + i.right = i.right[:rightLen-1] + i.left = append(i.left, activeThread) + i.traverseRight = len(i.right) > 0 + } else { + i.left = i.left[:leftLen-1] + i.right = append(i.right, activeThread) + i.traverseRight = len(i.left) == 0 + } + + return i.currentThread() +} + +// FindNextThread Finds the next thread in line according to thread traversal logic +func FindNextThread(state *multithreaded.State) *multithreaded.ThreadState { + it := NewThreadIterator(state) + return it.Next() +} + +type ThreadFilter func(thread *multithreaded.ThreadState) bool + +func FindNextThreadFiltered(state *multithreaded.State, filter ThreadFilter) *multithreaded.ThreadState { + it := NewThreadIterator(state) + + // Worst case - walk all the way left, then all the way back right + // Example w 3 threads: 1,2,3,3,2,1,0 -> 7 steps to find thread 0 + maxIterations := state.ThreadCount()*2 + 1 + for i := 0; i < maxIterations; i++ { + next := it.Next() + if filter(next) { + return next + } + } + + return nil +} + +func FindNextThreadExcluding(state *multithreaded.State, threadId uint32) *multithreaded.ThreadState { + return FindNextThreadFiltered(state, func(t *multithreaded.ThreadState) bool { + return t.ThreadId != threadId + }) +} + +func FindThread(state *multithreaded.State, threadId uint32) *multithreaded.ThreadState { + for _, t := range GetAllThreads(state) { + if t.ThreadId == threadId { + return t + } + } + return nil +} + +func GetAllThreads(state *multithreaded.State) []*multithreaded.ThreadState { + allThreads := make([]*multithreaded.ThreadState, 0, state.ThreadCount()) + allThreads = append(allThreads, state.RightThreadStack[:]...) + allThreads = append(allThreads, state.LeftThreadStack[:]...) + + return allThreads +} + +func GetThreadStacks(state *multithreaded.State) (activeStack, inactiveStack []*multithreaded.ThreadState) { + if state.TraverseRight { + activeStack = state.RightThreadStack + inactiveStack = state.LeftThreadStack + } else { + activeStack = state.LeftThreadStack + inactiveStack = state.RightThreadStack + } + return activeStack, inactiveStack +} diff --git a/cannon/mipsevm/singlethreaded/testutil/state.go b/cannon/mipsevm/singlethreaded/testutil/state.go index 9d8909eee772..f09b56175af9 100644 --- a/cannon/mipsevm/singlethreaded/testutil/state.go +++ b/cannon/mipsevm/singlethreaded/testutil/state.go @@ -1,6 +1,8 @@ package testutil import ( + "math/rand" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" @@ -12,6 +14,24 @@ type StateMutatorSingleThreaded struct { state *singlethreaded.State } +func (m *StateMutatorSingleThreaded) Randomize(randSeed int64) { + r := rand.New(rand.NewSource(randSeed)) + + pc := testutil.RandPC(r) + step := testutil.RandStep(r) + + m.state.PreimageKey = testutil.RandHash(r) + m.state.PreimageOffset = r.Uint32() + m.state.Cpu.PC = pc + m.state.Cpu.NextPC = pc + 4 + m.state.Cpu.HI = r.Uint32() + m.state.Cpu.LO = r.Uint32() + m.state.Heap = r.Uint32() + m.state.Step = step + m.state.LastHint = testutil.RandHint(r) + m.state.Registers = *testutil.RandRegisters(r) +} + var _ testutil.StateMutator = (*StateMutatorSingleThreaded)(nil) func NewStateMutatorSingleThreaded(state *singlethreaded.State) testutil.StateMutator { @@ -61,7 +81,3 @@ func (m *StateMutatorSingleThreaded) SetPreimageOffset(val uint32) { func (m *StateMutatorSingleThreaded) SetStep(val uint64) { m.state.Step = val } - -func (m *StateMutatorSingleThreaded) GetRegistersRef() *[32]uint32 { - return m.state.GetRegistersRef() -} diff --git a/cannon/mipsevm/tests/evm_common_test.go b/cannon/mipsevm/tests/evm_common_test.go index 445ed5031d2d..063c63c83c6c 100644 --- a/cannon/mipsevm/tests/evm_common_test.go +++ b/cannon/mipsevm/tests/evm_common_test.go @@ -10,13 +10,10 @@ import ( "testing" "time" - "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/core/tracing" - "github.com/ethereum/go-ethereum/core/vm" "github.com/stretchr/testify/require" - "github.com/ethereum-optimism/optimism/cannon/mipsevm" "github.com/ethereum-optimism/optimism/cannon/mipsevm/exec" "github.com/ethereum-optimism/optimism/cannon/mipsevm/memory" "github.com/ethereum-optimism/optimism/cannon/mipsevm/program" @@ -143,10 +140,10 @@ func TestEVMSingleStep(t *testing.T) { goVm := v.VMFactory(nil, os.Stdout, os.Stderr, testutil.CreateLogger(), testutil.WithRandomization(int64(i)), testutil.WithPC(tt.pc), testutil.WithNextPC(tt.nextPC)) state := goVm.GetState() state.GetMemory().SetMemory(tt.pc, tt.insn) - curStep := state.GetStep() + step := state.GetStep() // Setup expectations - expected := testutil.CreateExpectedState(state) + expected := testutil.NewExpectedState(state) expected.Step += 1 expected.PC = state.GetCpu().NextPC expected.NextPC = tt.expectNextPC @@ -159,15 +156,7 @@ func TestEVMSingleStep(t *testing.T) { // Check expectations expected.Validate(t, state) - - evm := testutil.NewMIPSEVM(v.Contracts) - evm.SetTracer(tracer) - testutil.LogStepFailureAtCleanup(t, evm) - - evmPost := evm.Step(t, stepWitness, curStep, v.StateHashFn) - goPost, _ := goVm.GetState().EncodeWitness() - require.Equal(t, hexutil.Bytes(goPost).String(), hexutil.Bytes(evmPost).String(), - "mipsevm produced different state than EVM") + testutil.ValidateEVM(t, stepWitness, step, goVm, v.StateHashFn, v.Contracts, tracer) }) } } @@ -209,7 +198,7 @@ func TestEVM_MMap(t *testing.T) { state.GetRegistersRef()[5] = c.size step := state.GetStep() - expected := testutil.CreateExpectedState(state) + expected := testutil.NewExpectedState(state) expected.Step += 1 expected.PC = state.GetCpu().NextPC expected.NextPC = state.GetCpu().NextPC + 4 @@ -232,15 +221,7 @@ func TestEVM_MMap(t *testing.T) { // Check expectations expected.Validate(t, state) - - evm := testutil.NewMIPSEVM(v.Contracts) - evm.SetTracer(tracer) - testutil.LogStepFailureAtCleanup(t, evm) - - evmPost := evm.Step(t, stepWitness, step, v.StateHashFn) - goPost, _ := goVm.GetState().EncodeWitness() - require.Equal(t, hexutil.Bytes(goPost).String(), hexutil.Bytes(evmPost).String(), - "mipsevm produced different state than EVM") + testutil.ValidateEVM(t, stepWitness, step, goVm, v.StateHashFn, v.Contracts, tracer) }) } } @@ -420,9 +401,9 @@ func TestEVMSysWriteHint(t *testing.T) { err := state.GetMemory().SetMemoryRange(uint32(tt.memOffset), bytes.NewReader(tt.hintData)) require.NoError(t, err) state.GetMemory().SetMemory(state.GetPC(), insn) - curStep := state.GetStep() + step := state.GetStep() - expected := testutil.CreateExpectedState(state) + expected := testutil.NewExpectedState(state) expected.Step += 1 expected.PC = state.GetCpu().NextPC expected.NextPC = state.GetCpu().NextPC + 4 @@ -435,15 +416,7 @@ func TestEVMSysWriteHint(t *testing.T) { expected.Validate(t, state) require.Equal(t, tt.expectedHints, oracle.Hints()) - - evm := testutil.NewMIPSEVM(v.Contracts) - evm.SetTracer(tracer) - testutil.LogStepFailureAtCleanup(t, evm) - - evmPost := evm.Step(t, stepWitness, curStep, v.StateHashFn) - goPost, _ := goVm.GetState().EncodeWitness() - require.Equal(t, hexutil.Bytes(goPost).String(), hexutil.Bytes(evmPost).String(), - "mipsevm produced different state than EVM") + testutil.ValidateEVM(t, stepWitness, step, goVm, v.StateHashFn, v.Contracts, tracer) }) } } @@ -451,7 +424,6 @@ func TestEVMSysWriteHint(t *testing.T) { func TestEVMFault(t *testing.T) { var tracer *tracing.Hooks // no-tracer by default, but see test_util.MarkdownTracer - sender := common.Address{0x13, 0x37} versions := GetMipsVersionTestCases(t) cases := []struct { @@ -468,9 +440,6 @@ func TestEVMFault(t *testing.T) { for _, tt := range cases { testName := fmt.Sprintf("%v (%v)", tt.name, v.Name) t.Run(testName, func(t *testing.T) { - env, evmState := testutil.NewEVMEnv(v.Contracts) - env.Config.Tracer = tracer - goVm := v.VMFactory(nil, os.Stdout, os.Stderr, testutil.CreateLogger(), testutil.WithNextPC(tt.nextPC)) state := goVm.GetState() state.GetMemory().SetMemory(0, tt.insn) @@ -478,20 +447,7 @@ func TestEVMFault(t *testing.T) { state.GetRegistersRef()[31] = testutil.EndAddr require.Panics(t, func() { _, _ = goVm.Step(true) }) - - insnProof := state.GetMemory().MerkleProof(0) - encodedWitness, _ := state.EncodeWitness() - stepWitness := &mipsevm.StepWitness{ - State: encodedWitness, - ProofData: insnProof[:], - } - input := testutil.EncodeStepInput(t, stepWitness, mipsevm.LocalContext{}, v.Contracts.Artifacts.MIPS) - startingGas := uint64(30_000_000) - - _, _, err := env.Call(vm.AccountRef(sender), v.Contracts.Addresses.MIPS, input, startingGas, common.U2560) - require.EqualValues(t, err, vm.ErrExecutionReverted) - logs := evmState.Logs() - require.Equal(t, 0, len(logs)) + testutil.AssertEVMReverts(t, state, v.Contracts, tracer) }) } } @@ -514,7 +470,7 @@ func TestHelloEVM(t *testing.T) { start := time.Now() for i := 0; i < 400_000; i++ { - curStep := goVm.GetState().GetStep() + step := goVm.GetState().GetStep() if goVm.GetState().GetExited() { break } @@ -525,7 +481,7 @@ func TestHelloEVM(t *testing.T) { stepWitness, err := goVm.Step(true) require.NoError(t, err) - evmPost := evm.Step(t, stepWitness, curStep, v.StateHashFn) + evmPost := evm.Step(t, stepWitness, step, v.StateHashFn) // verify the post-state matches. // TODO: maybe more readable to decode the evmPost state, and do attribute-wise comparison. goPost, _ := goVm.GetState().EncodeWitness() diff --git a/cannon/mipsevm/tests/evm_multithreaded_test.go b/cannon/mipsevm/tests/evm_multithreaded_test.go index 32d90b74211c..ebd5dc904e28 100644 --- a/cannon/mipsevm/tests/evm_multithreaded_test.go +++ b/cannon/mipsevm/tests/evm_multithreaded_test.go @@ -1,20 +1,24 @@ package tests import ( + "fmt" "os" + "slices" "testing" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/core/tracing" "github.com/stretchr/testify/require" + "golang.org/x/exp/maps" "github.com/ethereum-optimism/optimism/cannon/mipsevm" "github.com/ethereum-optimism/optimism/cannon/mipsevm/exec" "github.com/ethereum-optimism/optimism/cannon/mipsevm/multithreaded" + mttestutil "github.com/ethereum-optimism/optimism/cannon/mipsevm/multithreaded/testutil" "github.com/ethereum-optimism/optimism/cannon/mipsevm/testutil" ) -func TestEVM_CloneFlags(t *testing.T) { +func TestEVM_SysClone_FlagHandling(t *testing.T) { contracts := testutil.TestContractsSetup(t, testutil.MipsMultithreaded) var tracer *tracing.Hooks @@ -34,27 +38,32 @@ func TestEVM_CloneFlags(t *testing.T) { {"multiple unsupported flags", exec.CloneUntraced | exec.CloneParentSettid, false}, } - const insn = uint32(0x00_00_00_0C) // syscall instruction - for _, tt := range cases { - t.Run(tt.name, func(t *testing.T) { + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { state := multithreaded.CreateEmptyState() - state.Memory.SetMemory(state.GetPC(), insn) + state.Memory.SetMemory(state.GetPC(), syscallInsn) state.GetRegistersRef()[2] = exec.SysClone // Set syscall number - state.GetRegistersRef()[4] = tt.flags // Set first argument + state.GetRegistersRef()[4] = c.flags // Set first argument curStep := state.Step var err error var stepWitness *mipsevm.StepWitness us := multithreaded.NewInstrumentedState(state, nil, os.Stdout, os.Stderr, nil) - if !tt.valid { + if !c.valid { // The VM should exit stepWitness, err = us.Step(true) require.NoError(t, err) + require.Equal(t, curStep+1, state.GetStep()) require.Equal(t, true, us.GetState().GetExited()) require.Equal(t, uint8(mipsevm.VMStatusPanic), us.GetState().GetExitCode()) + require.Equal(t, 1, state.ThreadCount()) } else { stepWitness, err = us.Step(true) require.NoError(t, err) + require.Equal(t, curStep+1, state.GetStep()) + require.Equal(t, false, us.GetState().GetExited()) + require.Equal(t, uint8(0), us.GetState().GetExitCode()) + require.Equal(t, 2, state.ThreadCount()) } evm := testutil.NewMIPSEVM(contracts) @@ -68,3 +77,886 @@ func TestEVM_CloneFlags(t *testing.T) { }) } } + +func TestEVM_SysClone_Successful(t *testing.T) { + var tracer *tracing.Hooks + cases := []struct { + name string + traverseRight bool + }{ + {"traverse left", false}, + {"traverse right", true}, + } + + for i, c := range cases { + t.Run(c.name, func(t *testing.T) { + stackPtr := uint32(100) + + goVm, state, contracts := setup(t, i) + mttestutil.InitializeSingleThread(i*333, state, c.traverseRight) + state.Memory.SetMemory(state.GetPC(), syscallInsn) + state.GetRegistersRef()[2] = exec.SysClone // the syscall number + state.GetRegistersRef()[4] = exec.ValidCloneFlags // a0 - first argument, clone flags + state.GetRegistersRef()[5] = stackPtr // a1 - the stack pointer + step := state.GetStep() + + // Sanity-check assumptions + require.Equal(t, uint32(1), state.NextThreadId) + + // Setup expectations + expected := mttestutil.NewExpectedMTState(state) + expected.Step += 1 + expectedNewThread := expected.ExpectNewThread() + expected.ActiveThreadId = expectedNewThread.ThreadId + expected.StepsSinceLastContextSwitch = 0 + if c.traverseRight { + expected.RightStackSize += 1 + } else { + expected.LeftStackSize += 1 + } + // Original thread expectations + expected.PrestateActiveThread().PC = state.GetCpu().NextPC + expected.PrestateActiveThread().NextPC = state.GetCpu().NextPC + 4 + expected.PrestateActiveThread().Registers[2] = 1 + expected.PrestateActiveThread().Registers[7] = 0 + // New thread expectations + expectedNewThread.PC = state.GetCpu().NextPC + expectedNewThread.NextPC = state.GetCpu().NextPC + 4 + expectedNewThread.ThreadId = 1 + expectedNewThread.Registers[2] = 0 + expectedNewThread.Registers[7] = 0 + expectedNewThread.Registers[29] = stackPtr + + var err error + var stepWitness *mipsevm.StepWitness + stepWitness, err = goVm.Step(true) + require.NoError(t, err) + + expected.Validate(t, state) + activeStack, inactiveStack := mttestutil.GetThreadStacks(state) + require.Equal(t, 2, len(activeStack)) + require.Equal(t, 0, len(inactiveStack)) + testutil.ValidateEVM(t, stepWitness, step, goVm, multithreaded.GetStateHashFn(), contracts, tracer) + }) + } +} + +func TestEVM_SysGetTID(t *testing.T) { + var tracer *tracing.Hooks + cases := []struct { + name string + threadId uint32 + }{ + {"zero", 0}, + {"non-zero", 11}, + } + + for i, c := range cases { + t.Run(c.name, func(t *testing.T) { + goVm, state, contracts := setup(t, i*789) + mttestutil.InitializeSingleThread(i*789, state, false) + + state.GetCurrentThread().ThreadId = c.threadId + state.Memory.SetMemory(state.GetPC(), syscallInsn) + state.GetRegistersRef()[2] = exec.SysGetTID // Set syscall number + step := state.Step + + // Set up post-state expectations + expected := mttestutil.NewExpectedMTState(state) + expected.ExpectStep() + expected.ActiveThread().Registers[2] = c.threadId + expected.ActiveThread().Registers[7] = 0 + + // State transition + var err error + var stepWitness *mipsevm.StepWitness + stepWitness, err = goVm.Step(true) + require.NoError(t, err) + + // Validate post-state + expected.Validate(t, state) + testutil.ValidateEVM(t, stepWitness, step, goVm, multithreaded.GetStateHashFn(), contracts, tracer) + }) + } +} + +func TestEVM_SysExit(t *testing.T) { + var tracer *tracing.Hooks + cases := []struct { + name string + threadCount int + shouldExitGlobally bool + }{ + // If we exit the last thread, the whole process should exit + {name: "one thread", threadCount: 1, shouldExitGlobally: true}, + {name: "two threads ", threadCount: 2}, + {name: "three threads ", threadCount: 3}, + } + + for i, c := range cases { + t.Run(c.name, func(t *testing.T) { + exitCode := uint8(3) + + goVm, state, contracts := setup(t, i*133) + mttestutil.SetupThreads(int64(i*1111), state, i%2 == 0, c.threadCount, 0) + + state.Memory.SetMemory(state.GetPC(), syscallInsn) + state.GetRegistersRef()[2] = exec.SysExit // Set syscall number + state.GetRegistersRef()[4] = uint32(exitCode) // The first argument (exit code) + step := state.Step + + // Set up expectations + expected := mttestutil.NewExpectedMTState(state) + expected.Step += 1 + expected.StepsSinceLastContextSwitch += 1 + expected.ActiveThread().Exited = true + expected.ActiveThread().ExitCode = exitCode + if c.shouldExitGlobally { + expected.Exited = true + expected.ExitCode = exitCode + } + + // State transition + var err error + var stepWitness *mipsevm.StepWitness + stepWitness, err = goVm.Step(true) + require.NoError(t, err) + + // Validate post-state + expected.Validate(t, state) + testutil.ValidateEVM(t, stepWitness, step, goVm, multithreaded.GetStateHashFn(), contracts, tracer) + }) + } +} + +func TestEVM_PopExitedThread(t *testing.T) { + var tracer *tracing.Hooks + cases := []struct { + name string + traverseRight bool + activeStackThreadCount int + expectTraverseRightPostState bool + }{ + {name: "traverse right", traverseRight: true, activeStackThreadCount: 2, expectTraverseRightPostState: true}, + {name: "traverse right, switch directions", traverseRight: true, activeStackThreadCount: 1, expectTraverseRightPostState: false}, + {name: "traverse left", traverseRight: false, activeStackThreadCount: 2, expectTraverseRightPostState: false}, + {name: "traverse left, switch directions", traverseRight: false, activeStackThreadCount: 1, expectTraverseRightPostState: true}, + } + + for i, c := range cases { + t.Run(c.name, func(t *testing.T) { + goVm, state, contracts := setup(t, i*133) + mttestutil.SetupThreads(int64(i*222), state, c.traverseRight, c.activeStackThreadCount, 1) + step := state.Step + + // Setup thread to be dropped + threadToPop := state.GetCurrentThread() + threadToPop.Exited = true + threadToPop.ExitCode = 1 + + // Set up expectations + expected := mttestutil.NewExpectedMTState(state) + expected.Step += 1 + expected.ActiveThreadId = mttestutil.FindNextThreadExcluding(state, threadToPop.ThreadId).ThreadId + expected.StepsSinceLastContextSwitch = 0 + expected.ThreadCount -= 1 + expected.TraverseRight = c.expectTraverseRightPostState + expected.Thread(threadToPop.ThreadId).Dropped = true + if c.traverseRight { + expected.RightStackSize -= 1 + } else { + expected.LeftStackSize -= 1 + } + + // State transition + var err error + var stepWitness *mipsevm.StepWitness + stepWitness, err = goVm.Step(true) + require.NoError(t, err) + + // Validate post-state + expected.Validate(t, state) + testutil.ValidateEVM(t, stepWitness, step, goVm, multithreaded.GetStateHashFn(), contracts, tracer) + }) + } +} + +func TestEVM_SysFutex_WaitPrivate(t *testing.T) { + var tracer *tracing.Hooks + cases := []struct { + name string + address uint32 + targetValue uint32 + actualValue uint32 + timeout uint32 + shouldFail bool + shouldSetTimeout bool + }{ + {name: "successful wait, no timeout", address: 0x1234, targetValue: 0x01, actualValue: 0x01}, + {name: "memory mismatch, no timeout", address: 0x1200, targetValue: 0x01, actualValue: 0x02, shouldFail: true}, + {name: "successful wait w timeout", address: 0x1234, targetValue: 0x01, actualValue: 0x01, timeout: 1000000, shouldSetTimeout: true}, + {name: "memory mismatch w timeout", address: 0x1200, targetValue: 0x01, actualValue: 0x02, timeout: 2000000, shouldFail: true}, + } + + for i, c := range cases { + t.Run(c.name, func(t *testing.T) { + goVm, state, contracts := setup(t, i*1234) + step := state.GetStep() + + state.Memory.SetMemory(state.GetPC(), syscallInsn) + state.Memory.SetMemory(c.address, c.actualValue) + state.GetRegistersRef()[2] = exec.SysFutex // Set syscall number + state.GetRegistersRef()[4] = c.address + state.GetRegistersRef()[5] = exec.FutexWaitPrivate + state.GetRegistersRef()[6] = c.targetValue + state.GetRegistersRef()[7] = c.timeout + + // Setup expectations + expected := mttestutil.NewExpectedMTState(state) + expected.Step += 1 + expected.StepsSinceLastContextSwitch += 1 + if c.shouldFail { + expected.ActiveThread().PC = state.GetCpu().NextPC + expected.ActiveThread().NextPC = state.GetCpu().NextPC + 4 + expected.ActiveThread().Registers[2] = exec.SysErrorSignal + expected.ActiveThread().Registers[7] = exec.MipsEAGAIN + } else { + // PC and return registers should not update on success, updates happen when wait completes + expected.ActiveThread().FutexAddr = c.address + expected.ActiveThread().FutexVal = c.targetValue + expected.ActiveThread().FutexTimeoutStep = exec.FutexNoTimeout + if c.shouldSetTimeout { + expected.ActiveThread().FutexTimeoutStep = step + exec.FutexTimeoutSteps + 1 + } + } + + // State transition + var err error + var stepWitness *mipsevm.StepWitness + stepWitness, err = goVm.Step(true) + require.NoError(t, err) + + // Validate post-state + expected.Validate(t, state) + testutil.ValidateEVM(t, stepWitness, step, goVm, multithreaded.GetStateHashFn(), contracts, tracer) + }) + } +} + +func TestEVM_SysFutex_WakePrivate(t *testing.T) { + var tracer *tracing.Hooks + cases := []struct { + name string + address uint32 + activeThreadCount int + inactiveThreadCount int + traverseRight bool + expectTraverseRight bool + }{ + {name: "Traverse right", address: 0x6789, activeThreadCount: 2, inactiveThreadCount: 1, traverseRight: true}, + {name: "Traverse right, no left threads", address: 0x6789, activeThreadCount: 2, inactiveThreadCount: 0, traverseRight: true}, + {name: "Traverse right, single thread", address: 0x6789, activeThreadCount: 1, inactiveThreadCount: 0, traverseRight: true}, + {name: "Traverse left", address: 0x6789, activeThreadCount: 2, inactiveThreadCount: 1, traverseRight: false}, + {name: "Traverse left, switch directions", address: 0x6789, activeThreadCount: 1, inactiveThreadCount: 1, traverseRight: false, expectTraverseRight: true}, + {name: "Traverse left, single thread", address: 0x6789, activeThreadCount: 1, inactiveThreadCount: 0, traverseRight: false, expectTraverseRight: true}, + } + + for i, c := range cases { + t.Run(c.name, func(t *testing.T) { + goVm, state, contracts := setup(t, i*1122) + mttestutil.SetupThreads(int64(i*2244), state, c.traverseRight, c.activeThreadCount, c.inactiveThreadCount) + step := state.Step + + state.Memory.SetMemory(state.GetPC(), syscallInsn) + state.GetRegistersRef()[2] = exec.SysFutex // Set syscall number + state.GetRegistersRef()[4] = c.address + state.GetRegistersRef()[5] = exec.FutexWakePrivate + + // Set up post-state expectations + expected := mttestutil.NewExpectedMTState(state) + expected.ExpectStep() + expected.ActiveThread().Registers[2] = 0 + expected.ActiveThread().Registers[7] = 0 + expected.Wakeup = c.address + expected.ExpectPreemption(state) + expected.TraverseRight = c.expectTraverseRight + if c.traverseRight != c.expectTraverseRight { + // If we preempt the current thread and then switch directions, the same + // thread will remain active + expected.ActiveThreadId = state.GetCurrentThread().ThreadId + } + + // State transition + var err error + var stepWitness *mipsevm.StepWitness + stepWitness, err = goVm.Step(true) + require.NoError(t, err) + + // Validate post-state + expected.Validate(t, state) + testutil.ValidateEVM(t, stepWitness, step, goVm, multithreaded.GetStateHashFn(), contracts, tracer) + }) + } +} + +func TestEVM_SysFutex_UnsupportedOp(t *testing.T) { + var tracer *tracing.Hooks + + // From: https://github.com/torvalds/linux/blob/5be63fc19fcaa4c236b307420483578a56986a37/include/uapi/linux/futex.h + const FUTEX_PRIVATE_FLAG = 128 + const FUTEX_WAIT = 0 + const FUTEX_WAKE = 1 + const FUTEX_FD = 2 + const FUTEX_REQUEUE = 3 + const FUTEX_CMP_REQUEUE = 4 + const FUTEX_WAKE_OP = 5 + const FUTEX_LOCK_PI = 6 + const FUTEX_UNLOCK_PI = 7 + const FUTEX_TRYLOCK_PI = 8 + const FUTEX_WAIT_BITSET = 9 + const FUTEX_WAKE_BITSET = 10 + const FUTEX_WAIT_REQUEUE_PI = 11 + const FUTEX_CMP_REQUEUE_PI = 12 + const FUTEX_LOCK_PI2 = 13 + + unsupportedFutexOps := map[string]uint32{ + "FUTEX_WAIT": FUTEX_WAIT, + "FUTEX_WAKE": FUTEX_WAKE, + "FUTEX_FD": FUTEX_FD, + "FUTEX_REQUEUE": FUTEX_REQUEUE, + "FUTEX_CMP_REQUEUE": FUTEX_CMP_REQUEUE, + "FUTEX_WAKE_OP": FUTEX_WAKE_OP, + "FUTEX_LOCK_PI": FUTEX_LOCK_PI, + "FUTEX_UNLOCK_PI": FUTEX_UNLOCK_PI, + "FUTEX_TRYLOCK_PI": FUTEX_TRYLOCK_PI, + "FUTEX_WAIT_BITSET": FUTEX_WAIT_BITSET, + "FUTEX_WAKE_BITSET": FUTEX_WAKE_BITSET, + "FUTEX_WAIT_REQUEUE_PI": FUTEX_WAIT_REQUEUE_PI, + "FUTEX_CMP_REQUEUE_PI": FUTEX_CMP_REQUEUE_PI, + "FUTEX_LOCK_PI2": FUTEX_LOCK_PI2, + "FUTEX_REQUEUE_PRIVATE": (FUTEX_REQUEUE | FUTEX_PRIVATE_FLAG), + "FUTEX_CMP_REQUEUE_PRIVATE": (FUTEX_CMP_REQUEUE | FUTEX_PRIVATE_FLAG), + "FUTEX_WAKE_OP_PRIVATE": (FUTEX_WAKE_OP | FUTEX_PRIVATE_FLAG), + "FUTEX_LOCK_PI_PRIVATE": (FUTEX_LOCK_PI | FUTEX_PRIVATE_FLAG), + "FUTEX_LOCK_PI2_PRIVATE": (FUTEX_LOCK_PI2 | FUTEX_PRIVATE_FLAG), + "FUTEX_UNLOCK_PI_PRIVATE": (FUTEX_UNLOCK_PI | FUTEX_PRIVATE_FLAG), + "FUTEX_TRYLOCK_PI_PRIVATE": (FUTEX_TRYLOCK_PI | FUTEX_PRIVATE_FLAG), + "FUTEX_WAIT_BITSET_PRIVATE": (FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG), + "FUTEX_WAKE_BITSET_PRIVATE": (FUTEX_WAKE_BITSET | FUTEX_PRIVATE_FLAG), + "FUTEX_WAIT_REQUEUE_PI_PRIVATE": (FUTEX_WAIT_REQUEUE_PI | FUTEX_PRIVATE_FLAG), + "FUTEX_CMP_REQUEUE_PI_PRIVATE": (FUTEX_CMP_REQUEUE_PI | FUTEX_PRIVATE_FLAG), + } + + for name, op := range unsupportedFutexOps { + t.Run(name, func(t *testing.T) { + goVm, state, contracts := setup(t, int(op)) + step := state.GetStep() + + state.Memory.SetMemory(state.GetPC(), syscallInsn) + state.GetRegistersRef()[2] = exec.SysFutex // Set syscall number + state.GetRegistersRef()[5] = op + + // Setup expectations + expected := mttestutil.NewExpectedMTState(state) + expected.Step += 1 + expected.StepsSinceLastContextSwitch += 1 + expected.ActiveThread().PC = state.GetCpu().NextPC + expected.ActiveThread().NextPC = state.GetCpu().NextPC + 4 + expected.ActiveThread().Registers[2] = exec.SysErrorSignal + expected.ActiveThread().Registers[7] = exec.MipsEINVAL + + // State transition + var err error + var stepWitness *mipsevm.StepWitness + stepWitness, err = goVm.Step(true) + require.NoError(t, err) + + // Validate post-state + expected.Validate(t, state) + testutil.ValidateEVM(t, stepWitness, step, goVm, multithreaded.GetStateHashFn(), contracts, tracer) + }) + } +} + +func TestEVM_SysYield(t *testing.T) { + runPreemptSyscall(t, "SysSchedYield", exec.SysSchedYield) +} + +func TestEVM_SysNanosleep(t *testing.T) { + runPreemptSyscall(t, "SysNanosleep", exec.SysNanosleep) +} + +func runPreemptSyscall(t *testing.T, syscallName string, syscallNum uint32) { + var tracer *tracing.Hooks + cases := []struct { + name string + traverseRight bool + activeThreads int + inactiveThreads int + }{ + {name: "Last active thread", activeThreads: 1, inactiveThreads: 2}, + {name: "Only thread", activeThreads: 1, inactiveThreads: 0}, + {name: "Do not change directions", activeThreads: 2, inactiveThreads: 2}, + {name: "Do not change directions", activeThreads: 3, inactiveThreads: 0}, + } + + for i, c := range cases { + for _, traverseRight := range []bool{true, false} { + testName := fmt.Sprintf("%v: %v (traverseRight = %v)", syscallName, c.name, traverseRight) + t.Run(testName, func(t *testing.T) { + goVm, state, contracts := setup(t, i*789) + mttestutil.SetupThreads(int64(i*3259), state, traverseRight, c.activeThreads, c.inactiveThreads) + + state.Memory.SetMemory(state.GetPC(), syscallInsn) + state.GetRegistersRef()[2] = syscallNum // Set syscall number + step := state.Step + + // Set up post-state expectations + expected := mttestutil.NewExpectedMTState(state) + expected.ExpectStep() + expected.ExpectPreemption(state) + expected.PrestateActiveThread().Registers[2] = 0 + expected.PrestateActiveThread().Registers[7] = 0 + + // State transition + var err error + var stepWitness *mipsevm.StepWitness + stepWitness, err = goVm.Step(true) + require.NoError(t, err) + + // Validate post-state + expected.Validate(t, state) + testutil.ValidateEVM(t, stepWitness, step, goVm, multithreaded.GetStateHashFn(), contracts, tracer) + }) + } + } +} + +func TestEVM_SysOpen(t *testing.T) { + var tracer *tracing.Hooks + + goVm, state, contracts := setup(t, 5512) + + state.Memory.SetMemory(state.GetPC(), syscallInsn) + state.GetRegistersRef()[2] = exec.SysOpen // Set syscall number + step := state.Step + + // Set up post-state expectations + expected := mttestutil.NewExpectedMTState(state) + expected.ExpectStep() + expected.ActiveThread().Registers[2] = exec.SysErrorSignal + expected.ActiveThread().Registers[7] = exec.MipsEBADF + + // State transition + var err error + var stepWitness *mipsevm.StepWitness + stepWitness, err = goVm.Step(true) + require.NoError(t, err) + + // Validate post-state + expected.Validate(t, state) + testutil.ValidateEVM(t, stepWitness, step, goVm, multithreaded.GetStateHashFn(), contracts, tracer) +} + +var NoopSyscalls = map[string]uint32{ + "SysGetAffinity": 4240, + "SysMadvise": 4218, + "SysRtSigprocmask": 4195, + "SysSigaltstack": 4206, + "SysRtSigaction": 4194, + "SysPrlimit64": 4338, + "SysClose": 4006, + "SysPread64": 4200, + "SysFstat64": 4215, + "SysOpenAt": 4288, + "SysReadlink": 4085, + "SysReadlinkAt": 4298, + "SysIoctl": 4054, + "SysEpollCreate1": 4326, + "SysPipe2": 4328, + "SysEpollCtl": 4249, + "SysEpollPwait": 4313, + "SysGetRandom": 4353, + "SysUname": 4122, + "SysStat64": 4213, + "SysGetuid": 4024, + "SysGetgid": 4047, + "SysLlseek": 4140, + "SysMinCore": 4217, + "SysTgkill": 4266, + "SysMunmap": 4091, + "SysSetITimer": 4104, + "SysTimerCreate": 4257, + "SysTimerSetTime": 4258, + "SysTimerDelete": 4261, + "SysClockGetTime": 4263, +} + +func TestEVM_NoopSyscall(t *testing.T) { + var tracer *tracing.Hooks + for noopName, noopVal := range NoopSyscalls { + t.Run(noopName, func(t *testing.T) { + goVm, state, contracts := setup(t, int(noopVal)) + + state.Memory.SetMemory(state.GetPC(), syscallInsn) + state.GetRegistersRef()[2] = noopVal // Set syscall number + step := state.Step + + // Set up post-state expectations + expected := mttestutil.NewExpectedMTState(state) + expected.ExpectStep() + expected.ActiveThread().Registers[2] = 0 + expected.ActiveThread().Registers[7] = 0 + + // State transition + var err error + var stepWitness *mipsevm.StepWitness + stepWitness, err = goVm.Step(true) + require.NoError(t, err) + + // Validate post-state + expected.Validate(t, state) + testutil.ValidateEVM(t, stepWitness, step, goVm, multithreaded.GetStateHashFn(), contracts, tracer) + }) + + } +} + +func TestEVM_UnsupportedSyscall(t *testing.T) { + var tracer *tracing.Hooks + + var NoopSyscallNums = maps.Values(NoopSyscalls) + var SupportedSyscalls = []uint32{exec.SysMmap, exec.SysBrk, exec.SysClone, exec.SysExitGroup, exec.SysRead, exec.SysWrite, exec.SysFcntl, exec.SysExit, exec.SysSchedYield, exec.SysGetTID, exec.SysFutex, exec.SysOpen, exec.SysNanosleep} + unsupportedSyscalls := make([]uint32, 0, 400) + for i := 4000; i < 4400; i++ { + candidate := uint32(i) + if slices.Contains(SupportedSyscalls, candidate) || slices.Contains(NoopSyscallNums, candidate) { + continue + } + unsupportedSyscalls = append(unsupportedSyscalls, candidate) + } + + for i, syscallNum := range unsupportedSyscalls { + testName := fmt.Sprintf("Unsupported syscallNum %v", syscallNum) + t.Run(testName, func(t *testing.T) { + goVm, state, contracts := setup(t, i*3434) + // Setup basic getThreadId syscall instruction + state.Memory.SetMemory(state.GetPC(), syscallInsn) + state.GetRegistersRef()[2] = syscallNum + + // Set up post-state expectations + require.Panics(t, func() { _, _ = goVm.Step(true) }) + testutil.AssertEVMReverts(t, state, contracts, tracer) + }) + } +} + +func TestEVM_NormalTraversalStep_HandleWaitingThread(t *testing.T) { + var tracer *tracing.Hooks + cases := []struct { + name string + step uint64 + activeStackSize int + otherStackSize int + futexAddr uint32 + targetValue uint32 + actualValue uint32 + timeoutStep uint64 + shouldWakeup bool + shouldTimeout bool + }{ + {name: "Preempt, no timeout #1", step: 100, activeStackSize: 1, otherStackSize: 0, futexAddr: 0x100, targetValue: 0x01, actualValue: 0x01, timeoutStep: exec.FutexNoTimeout}, + {name: "Preempt, no timeout #2", step: 100, activeStackSize: 1, otherStackSize: 1, futexAddr: 0x100, targetValue: 0x01, actualValue: 0x01, timeoutStep: exec.FutexNoTimeout}, + {name: "Preempt, no timeout #3", step: 100, activeStackSize: 2, otherStackSize: 1, futexAddr: 0x100, targetValue: 0x01, actualValue: 0x01, timeoutStep: exec.FutexNoTimeout}, + {name: "Preempt, with timeout #1", step: 100, activeStackSize: 2, otherStackSize: 1, futexAddr: 0x100, targetValue: 0x01, actualValue: 0x01, timeoutStep: 101}, + {name: "Preempt, with timeout #2", step: 100, activeStackSize: 1, otherStackSize: 1, futexAddr: 0x100, targetValue: 0x01, actualValue: 0x01, timeoutStep: 150}, + {name: "Wakeup, no timeout #1", step: 100, activeStackSize: 1, otherStackSize: 0, futexAddr: 0x100, targetValue: 0x01, actualValue: 0x02, timeoutStep: exec.FutexNoTimeout, shouldWakeup: true}, + {name: "Wakeup, no timeout #2", step: 100, activeStackSize: 2, otherStackSize: 1, futexAddr: 0x100, targetValue: 0x01, actualValue: 0x02, timeoutStep: exec.FutexNoTimeout, shouldWakeup: true}, + {name: "Wakeup with timeout #1", step: 100, activeStackSize: 2, otherStackSize: 1, futexAddr: 0x100, targetValue: 0x01, actualValue: 0x02, timeoutStep: 100, shouldWakeup: true, shouldTimeout: true}, + {name: "Wakeup with timeout #2", step: 100, activeStackSize: 2, otherStackSize: 1, futexAddr: 0x100, targetValue: 0x02, actualValue: 0x02, timeoutStep: 100, shouldWakeup: true, shouldTimeout: true}, + {name: "Wakeup with timeout #3", step: 100, activeStackSize: 2, otherStackSize: 1, futexAddr: 0x100, targetValue: 0x02, actualValue: 0x02, timeoutStep: 50, shouldWakeup: true, shouldTimeout: true}, + } + + for _, c := range cases { + for i, traverseRight := range []bool{true, false} { + testName := fmt.Sprintf("%v (traverseRight=%v)", c.name, traverseRight) + t.Run(testName, func(t *testing.T) { + // Sanity check + if !c.shouldWakeup && c.shouldTimeout { + require.Fail(t, "Invalid test case - cannot expect a timeout with no wakeup") + } + + goVm, state, contracts := setup(t, i) + mttestutil.SetupThreads(int64(i*101), state, traverseRight, c.activeStackSize, c.otherStackSize) + state.Step = c.step + + activeThread := state.GetCurrentThread() + activeThread.FutexAddr = c.futexAddr + activeThread.FutexVal = c.targetValue + activeThread.FutexTimeoutStep = c.timeoutStep + state.GetMemory().SetMemory(c.futexAddr, c.actualValue) + + // Set up post-state expectations + expected := mttestutil.NewExpectedMTState(state) + expected.Step += 1 + if c.shouldWakeup { + expected.ActiveThread().FutexAddr = exec.FutexEmptyAddr + expected.ActiveThread().FutexVal = 0 + expected.ActiveThread().FutexTimeoutStep = 0 + // PC and return registers are updated onWaitComplete + expected.ActiveThread().PC = state.GetCpu().NextPC + expected.ActiveThread().NextPC = state.GetCpu().NextPC + 4 + if c.shouldTimeout { + expected.ActiveThread().Registers[2] = exec.SysErrorSignal + expected.ActiveThread().Registers[7] = exec.MipsETIMEDOUT + } else { + expected.ActiveThread().Registers[2] = 0 + expected.ActiveThread().Registers[7] = 0 + } + } else { + expected.ExpectPreemption(state) + } + + // State transition + var err error + var stepWitness *mipsevm.StepWitness + stepWitness, err = goVm.Step(true) + require.NoError(t, err) + + // Validate post-state + expected.Validate(t, state) + testutil.ValidateEVM(t, stepWitness, c.step, goVm, multithreaded.GetStateHashFn(), contracts, tracer) + }) + + } + } +} + +func TestEVM_NormalTraversal_Full(t *testing.T) { + var tracer *tracing.Hooks + cases := []struct { + name string + threadCount int + }{ + {"1 thread", 1}, + {"2 threads", 2}, + {"3 threads", 3}, + } + + for i, c := range cases { + for _, traverseRight := range []bool{true, false} { + testName := fmt.Sprintf("%v (traverseRight = %v)", c.name, traverseRight) + t.Run(testName, func(t *testing.T) { + // Setup + goVm, state, contracts := setup(t, i*789) + mttestutil.SetupThreads(int64(i*2947), state, traverseRight, c.threadCount, 0) + // Put threads into a waiting state so that we just traverse through them + for _, thread := range mttestutil.GetAllThreads(state) { + thread.FutexAddr = 0x04 + thread.FutexTimeoutStep = exec.FutexNoTimeout + } + step := state.Step + + initialState := mttestutil.NewExpectedMTState(state) + + // Loop through all the threads to get back to the starting state + iterations := c.threadCount * 2 + for i := 0; i < iterations; i++ { + // Set up post-state expectations + expected := mttestutil.NewExpectedMTState(state) + expected.Step += 1 + expected.ExpectPreemption(state) + + // State transition + var err error + var stepWitness *mipsevm.StepWitness + stepWitness, err = goVm.Step(true) + require.NoError(t, err) + + // Validate post-state + expected.Validate(t, state) + testutil.ValidateEVM(t, stepWitness, step, goVm, multithreaded.GetStateHashFn(), contracts, tracer) + } + + // We should be back to the original state with only a few modifications + initialState.Step += uint64(iterations) + initialState.StepsSinceLastContextSwitch = 0 + initialState.Validate(t, state) + }) + } + } +} + +func TestEVM_WakeupTraversalStep(t *testing.T) { + wakeupAddr := uint32(0x1234) + wakeupVal := uint32(0x999) + var tracer *tracing.Hooks + cases := []struct { + name string + futexAddr uint32 + targetVal uint32 + traverseRight bool + activeStackSize int + otherStackSize int + shouldClearWakeup bool + shouldPreempt bool + }{ + {name: "Matching addr, not wakeable, first thread", futexAddr: wakeupAddr, targetVal: wakeupVal, traverseRight: false, activeStackSize: 3, otherStackSize: 0, shouldClearWakeup: true}, + {name: "Matching addr, wakeable, first thread", futexAddr: wakeupAddr, targetVal: wakeupVal + 1, traverseRight: false, activeStackSize: 3, otherStackSize: 0, shouldClearWakeup: true}, + {name: "Matching addr, not wakeable, last thread", futexAddr: wakeupAddr, targetVal: wakeupVal, traverseRight: true, activeStackSize: 1, otherStackSize: 2, shouldClearWakeup: true}, + {name: "Matching addr, wakeable, last thread", futexAddr: wakeupAddr, targetVal: wakeupVal + 1, traverseRight: true, activeStackSize: 1, otherStackSize: 2, shouldClearWakeup: true}, + {name: "Matching addr, not wakeable, intermediate thread", futexAddr: wakeupAddr, targetVal: wakeupVal, traverseRight: false, activeStackSize: 2, otherStackSize: 2, shouldClearWakeup: true}, + {name: "Matching addr, wakeable, intermediate thread", futexAddr: wakeupAddr, targetVal: wakeupVal + 1, traverseRight: true, activeStackSize: 2, otherStackSize: 2, shouldClearWakeup: true}, + {name: "Mismatched addr, last thread", futexAddr: wakeupAddr + 4, traverseRight: true, activeStackSize: 1, otherStackSize: 2, shouldPreempt: true, shouldClearWakeup: true}, + {name: "Mismatched addr", futexAddr: wakeupAddr + 4, traverseRight: true, activeStackSize: 2, otherStackSize: 2, shouldPreempt: true}, + {name: "Mismatched addr", futexAddr: wakeupAddr + 4, traverseRight: false, activeStackSize: 2, otherStackSize: 0, shouldPreempt: true}, + {name: "Mismatched addr", futexAddr: wakeupAddr + 4, traverseRight: false, activeStackSize: 1, otherStackSize: 0, shouldPreempt: true}, + {name: "Non-waiting thread", futexAddr: exec.FutexEmptyAddr, traverseRight: false, activeStackSize: 1, otherStackSize: 0, shouldPreempt: true}, + {name: "Non-waiting thread", futexAddr: exec.FutexEmptyAddr, traverseRight: true, activeStackSize: 2, otherStackSize: 1, shouldPreempt: true}, + {name: "Non-waiting thread, last thread", futexAddr: exec.FutexEmptyAddr, traverseRight: true, activeStackSize: 1, otherStackSize: 1, shouldPreempt: true, shouldClearWakeup: true}, + } + + for i, c := range cases { + t.Run(c.name, func(t *testing.T) { + goVm, state, contracts := setup(t, i*2000) + mttestutil.SetupThreads(int64(i*101), state, c.traverseRight, c.activeStackSize, c.otherStackSize) + step := state.Step + + state.Wakeup = wakeupAddr + state.GetMemory().SetMemory(wakeupAddr, wakeupVal) + activeThread := state.GetCurrentThread() + activeThread.FutexAddr = c.futexAddr + activeThread.FutexVal = c.targetVal + activeThread.FutexTimeoutStep = exec.FutexNoTimeout + + // Set up post-state expectations + expected := mttestutil.NewExpectedMTState(state) + expected.Step += 1 + if c.shouldClearWakeup { + expected.Wakeup = exec.FutexEmptyAddr + } + if c.shouldPreempt { + // Just preempt the current thread + expected.ExpectPreemption(state) + } + + // State transition + var err error + var stepWitness *mipsevm.StepWitness + stepWitness, err = goVm.Step(true) + require.NoError(t, err) + + // Validate post-state + expected.Validate(t, state) + testutil.ValidateEVM(t, stepWitness, step, goVm, multithreaded.GetStateHashFn(), contracts, tracer) + }) + } +} + +func TestEVM_WakeupTraversal_Full(t *testing.T) { + var tracer *tracing.Hooks + cases := []struct { + name string + threadCount int + }{ + {"1 thread", 1}, + {"2 threads", 2}, + {"3 threads", 3}, + } + for i, c := range cases { + t.Run(c.name, func(t *testing.T) { + // Setup + goVm, state, contracts := setup(t, i*789) + mttestutil.SetupThreads(int64(i*2947), state, false, c.threadCount, 0) + state.Wakeup = 0x08 + step := state.Step + + initialState := mttestutil.NewExpectedMTState(state) + + // Loop through all the threads to get back to the starting state + iterations := c.threadCount * 2 + for i := 0; i < iterations; i++ { + // Set up post-state expectations + expected := mttestutil.NewExpectedMTState(state) + expected.Step += 1 + expected.ExpectPreemption(state) + + // State transition + var err error + var stepWitness *mipsevm.StepWitness + stepWitness, err = goVm.Step(true) + require.NoError(t, err) + + // We should clear the wakeup on the last step + if i == iterations-1 { + expected.Wakeup = exec.FutexEmptyAddr + } + + // Validate post-state + expected.Validate(t, state) + testutil.ValidateEVM(t, stepWitness, step, goVm, multithreaded.GetStateHashFn(), contracts, tracer) + } + + // We should be back to the original state with only a few modifications + initialState.Step += uint64(iterations) + initialState.StepsSinceLastContextSwitch = 0 + initialState.Wakeup = exec.FutexEmptyAddr + initialState.Validate(t, state) + }) + } +} + +func TestEVM_SchedQuantumThreshold(t *testing.T) { + var tracer *tracing.Hooks + cases := []struct { + name string + stepsSinceLastContextSwitch uint64 + shouldPreempt bool + }{ + {name: "just under threshold", stepsSinceLastContextSwitch: exec.SchedQuantum - 1}, + {name: "at threshold", stepsSinceLastContextSwitch: exec.SchedQuantum, shouldPreempt: true}, + {name: "beyond threshold", stepsSinceLastContextSwitch: exec.SchedQuantum + 1, shouldPreempt: true}, + } + + for i, c := range cases { + t.Run(c.name, func(t *testing.T) { + goVm, state, contracts := setup(t, i*789) + // Setup basic getThreadId syscall instruction + state.Memory.SetMemory(state.GetPC(), syscallInsn) + state.GetRegistersRef()[2] = exec.SysGetTID // Set syscall number + state.StepsSinceLastContextSwitch = c.stepsSinceLastContextSwitch + step := state.Step + + // Set up post-state expectations + expected := mttestutil.NewExpectedMTState(state) + if c.shouldPreempt { + expected.Step += 1 + expected.ExpectPreemption(state) + } else { + // Otherwise just expect a normal step + expected.ExpectStep() + expected.ActiveThread().Registers[2] = state.GetCurrentThread().ThreadId + expected.ActiveThread().Registers[7] = 0 + } + + // State transition + var err error + var stepWitness *mipsevm.StepWitness + stepWitness, err = goVm.Step(true) + require.NoError(t, err) + + // Validate post-state + expected.Validate(t, state) + testutil.ValidateEVM(t, stepWitness, step, goVm, multithreaded.GetStateHashFn(), contracts, tracer) + }) + } +} + +func setup(t require.TestingT, randomSeed int) (mipsevm.FPVM, *multithreaded.State, *testutil.ContractMetadata) { + v := GetMultiThreadedTestCase(t) + vm := v.VMFactory(nil, os.Stdout, os.Stderr, testutil.CreateLogger(), testutil.WithRandomization(int64(randomSeed))) + state := mttestutil.GetMtState(t, vm) + + return vm, state, v.Contracts + +} diff --git a/cannon/mipsevm/tests/fuzz_evm_common_test.go b/cannon/mipsevm/tests/fuzz_evm_common_test.go index e96aa54d0aa4..2f68706b60d3 100644 --- a/cannon/mipsevm/tests/fuzz_evm_common_test.go +++ b/cannon/mipsevm/tests/fuzz_evm_common_test.go @@ -6,7 +6,6 @@ import ( "testing" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/crypto" "github.com/stretchr/testify/require" @@ -30,7 +29,7 @@ func FuzzStateSyscallBrk(f *testing.F) { state.GetMemory().SetMemory(state.GetPC(), syscallInsn) step := state.GetStep() - expected := testutil.CreateExpectedState(state) + expected := testutil.NewExpectedState(state) expected.Step += 1 expected.PC = state.GetCpu().NextPC expected.NextPC = state.GetCpu().NextPC + 4 @@ -42,12 +41,7 @@ func FuzzStateSyscallBrk(f *testing.F) { require.False(t, stepWitness.HasPreimage()) expected.Validate(t, state) - - evm := testutil.NewMIPSEVM(v.Contracts) - evmPost := evm.Step(t, stepWitness, step, v.StateHashFn) - goPost, _ := goVm.GetState().EncodeWitness() - require.Equal(t, hexutil.Bytes(goPost).String(), hexutil.Bytes(evmPost).String(), - "mipsevm produced different state than EVM") + testutil.ValidateEVM(t, stepWitness, step, goVm, v.StateHashFn, v.Contracts, nil) }) } }) @@ -74,7 +68,7 @@ func FuzzStateSyscallMmap(f *testing.F) { state.GetRegistersRef()[5] = siz state.GetMemory().SetMemory(state.GetPC(), syscallInsn) - expected := testutil.CreateExpectedState(state) + expected := testutil.NewExpectedState(state) expected.Step += 1 expected.PC = state.GetCpu().NextPC expected.NextPC = state.GetCpu().NextPC + 4 @@ -102,12 +96,7 @@ func FuzzStateSyscallMmap(f *testing.F) { require.False(t, stepWitness.HasPreimage()) expected.Validate(t, state) - - evm := testutil.NewMIPSEVM(v.Contracts) - evmPost := evm.Step(t, stepWitness, step, v.StateHashFn) - goPost, _ := goVm.GetState().EncodeWitness() - require.Equal(t, hexutil.Bytes(goPost).String(), hexutil.Bytes(evmPost).String(), - "mipsevm produced different state than EVM") + testutil.ValidateEVM(t, stepWitness, step, goVm, v.StateHashFn, v.Contracts, nil) }) } }) @@ -126,7 +115,7 @@ func FuzzStateSyscallExitGroup(f *testing.F) { state.GetMemory().SetMemory(state.GetPC(), syscallInsn) step := state.GetStep() - expected := testutil.CreateExpectedState(state) + expected := testutil.NewExpectedState(state) expected.Step += 1 expected.Exited = true expected.ExitCode = exitCode @@ -136,12 +125,7 @@ func FuzzStateSyscallExitGroup(f *testing.F) { require.False(t, stepWitness.HasPreimage()) expected.Validate(t, state) - - evm := testutil.NewMIPSEVM(v.Contracts) - evmPost := evm.Step(t, stepWitness, step, v.StateHashFn) - goPost, _ := goVm.GetState().EncodeWitness() - require.Equal(t, hexutil.Bytes(goPost).String(), hexutil.Bytes(evmPost).String(), - "mipsevm produced different state than EVM") + testutil.ValidateEVM(t, stepWitness, step, goVm, v.StateHashFn, v.Contracts, nil) }) } }) @@ -161,7 +145,7 @@ func FuzzStateSyscallFcntl(f *testing.F) { state.GetMemory().SetMemory(state.GetPC(), syscallInsn) step := state.GetStep() - expected := testutil.CreateExpectedState(state) + expected := testutil.NewExpectedState(state) expected.Step += 1 expected.PC = state.GetCpu().NextPC expected.NextPC = state.GetCpu().NextPC + 4 @@ -187,12 +171,7 @@ func FuzzStateSyscallFcntl(f *testing.F) { require.False(t, stepWitness.HasPreimage()) expected.Validate(t, state) - - evm := testutil.NewMIPSEVM(v.Contracts) - evmPost := evm.Step(t, stepWitness, step, v.StateHashFn) - goPost, _ := goVm.GetState().EncodeWitness() - require.Equal(t, hexutil.Bytes(goPost).String(), hexutil.Bytes(evmPost).String(), - "mipsevm produced different state than EVM") + testutil.ValidateEVM(t, stepWitness, step, goVm, v.StateHashFn, v.Contracts, nil) }) } }) @@ -217,7 +196,7 @@ func FuzzStateHintRead(f *testing.F) { state.GetMemory().SetMemory(state.GetPC(), syscallInsn) step := state.GetStep() - expected := testutil.CreateExpectedState(state) + expected := testutil.NewExpectedState(state) expected.Step += 1 expected.PC = state.GetCpu().NextPC expected.NextPC = state.GetCpu().NextPC + 4 @@ -229,12 +208,7 @@ func FuzzStateHintRead(f *testing.F) { require.False(t, stepWitness.HasPreimage()) expected.Validate(t, state) - - evm := testutil.NewMIPSEVM(v.Contracts) - evmPost := evm.Step(t, stepWitness, step, v.StateHashFn) - goPost, _ := goVm.GetState().EncodeWitness() - require.Equal(t, hexutil.Bytes(goPost).String(), hexutil.Bytes(evmPost).String(), - "mipsevm produced different state than EVM") + testutil.ValidateEVM(t, stepWitness, step, goVm, v.StateHashFn, v.Contracts, nil) }) } }) @@ -273,7 +247,7 @@ func FuzzStatePreimageRead(f *testing.F) { writeLen = preimageDataLen - preimageOffset } - expected := testutil.CreateExpectedState(state) + expected := testutil.NewExpectedState(state) expected.Step += 1 expected.PC = state.GetCpu().NextPC expected.NextPC = state.GetCpu().NextPC + 4 @@ -292,12 +266,7 @@ func FuzzStatePreimageRead(f *testing.F) { // modify memory - it's possible we just write the leading zero bytes of the length prefix require.Equal(t, expected.MemoryRoot, common.Hash(state.GetMemory().MerkleRoot())) } - - evm := testutil.NewMIPSEVM(v.Contracts) - evmPost := evm.Step(t, stepWitness, step, v.StateHashFn) - goPost, _ := goVm.GetState().EncodeWitness() - require.Equal(t, hexutil.Bytes(goPost).String(), hexutil.Bytes(evmPost).String(), - "mipsevm produced different state than EVM") + testutil.ValidateEVM(t, stepWitness, step, goVm, v.StateHashFn, v.Contracts, nil) }) } }) @@ -329,7 +298,7 @@ func FuzzStateHintWrite(f *testing.F) { // Set instruction state.GetMemory().SetMemory(state.GetPC(), syscallInsn) - expected := testutil.CreateExpectedState(state) + expected := testutil.NewExpectedState(state) expected.Step += 1 expected.PC = state.GetCpu().NextPC expected.NextPC = state.GetCpu().NextPC + 4 @@ -342,12 +311,7 @@ func FuzzStateHintWrite(f *testing.F) { // TODO(cp-983) - validate expected hints expected.Validate(t, state, testutil.SkipHintValidation) - - evm := testutil.NewMIPSEVM(v.Contracts) - evmPost := evm.Step(t, stepWitness, step, v.StateHashFn) - goPost, _ := goVm.GetState().EncodeWitness() - require.Equal(t, hexutil.Bytes(goPost).String(), hexutil.Bytes(evmPost).String(), - "mipsevm produced different state than EVM") + testutil.ValidateEVM(t, stepWitness, step, goVm, v.StateHashFn, v.Contracts, nil) }) } }) @@ -377,7 +341,7 @@ func FuzzStatePreimageWrite(f *testing.F) { count = sz } - expected := testutil.CreateExpectedState(state) + expected := testutil.NewExpectedState(state) expected.Step += 1 expected.PC = state.GetCpu().NextPC expected.NextPC = state.GetCpu().NextPC + 4 @@ -391,12 +355,7 @@ func FuzzStatePreimageWrite(f *testing.F) { // TODO(cp-983) - validate preimage key expected.Validate(t, state, testutil.SkipPreimageKeyValidation) - - evm := testutil.NewMIPSEVM(v.Contracts) - evmPost := evm.Step(t, stepWitness, step, v.StateHashFn) - goPost, _ := goVm.GetState().EncodeWitness() - require.Equal(t, hexutil.Bytes(goPost).String(), hexutil.Bytes(evmPost).String(), - "mipsevm produced different state than EVM") + testutil.ValidateEVM(t, stepWitness, step, goVm, v.StateHashFn, v.Contracts, nil) }) } }) diff --git a/cannon/mipsevm/tests/fuzz_evm_multithreaded_test.go b/cannon/mipsevm/tests/fuzz_evm_multithreaded_test.go index 2b8224b10725..828f9c558739 100644 --- a/cannon/mipsevm/tests/fuzz_evm_multithreaded_test.go +++ b/cannon/mipsevm/tests/fuzz_evm_multithreaded_test.go @@ -4,44 +4,63 @@ import ( "os" "testing" - "github.com/ethereum/go-ethereum/common/hexutil" "github.com/stretchr/testify/require" "github.com/ethereum-optimism/optimism/cannon/mipsevm/exec" + "github.com/ethereum-optimism/optimism/cannon/mipsevm/multithreaded" + mttestutil "github.com/ethereum-optimism/optimism/cannon/mipsevm/multithreaded/testutil" "github.com/ethereum-optimism/optimism/cannon/mipsevm/testutil" ) -// TODO func FuzzStateSyscallCloneMT(f *testing.F) { v := GetMultiThreadedTestCase(f) - // t.Skip is causing linting check to fail, disable for now - //nolint:staticcheck - f.Fuzz(func(t *testing.T, seed int64) { - // TODO(cp-903) Customize test for multi-threaded vm - t.Skip("TODO - customize this test for MTCannon") + f.Fuzz(func(t *testing.T, nextThreadId, stackPtr uint32, seed int64) { goVm := v.VMFactory(nil, os.Stdout, os.Stderr, testutil.CreateLogger(), testutil.WithRandomization(seed)) - state := goVm.GetState() - state.GetRegistersRef()[2] = exec.SysClone + state := mttestutil.GetMtState(t, goVm) + // Update existing threads to avoid collision with nextThreadId + if mttestutil.FindThread(state, nextThreadId) != nil { + for i, t := range mttestutil.GetAllThreads(state) { + t.ThreadId = nextThreadId - uint32(i+1) + } + } + + // Setup + state.NextThreadId = nextThreadId state.GetMemory().SetMemory(state.GetPC(), syscallInsn) + state.GetRegistersRef()[2] = exec.SysClone + state.GetRegistersRef()[4] = exec.ValidCloneFlags + state.GetRegistersRef()[5] = stackPtr step := state.GetStep() - expected := testutil.CreateExpectedState(state) + // Set up expectations + expected := mttestutil.NewExpectedMTState(state) expected.Step += 1 - expected.PC = state.GetCpu().NextPC - expected.NextPC = state.GetCpu().NextPC + 4 - expected.Registers[2] = 0x1 - expected.Registers[7] = 0 + // Set original thread expectations + expected.PrestateActiveThread().PC = state.GetCpu().NextPC + expected.PrestateActiveThread().NextPC = state.GetCpu().NextPC + 4 + expected.PrestateActiveThread().Registers[2] = nextThreadId + expected.PrestateActiveThread().Registers[7] = 0 + // Set expectations for new, cloned thread + expected.ActiveThreadId = nextThreadId + epxectedNewThread := expected.ExpectNewThread() + epxectedNewThread.PC = state.GetCpu().NextPC + epxectedNewThread.NextPC = state.GetCpu().NextPC + 4 + epxectedNewThread.Registers[2] = 0 + epxectedNewThread.Registers[7] = 0 + epxectedNewThread.Registers[29] = stackPtr + expected.NextThreadId = nextThreadId + 1 + expected.StepsSinceLastContextSwitch = 0 + if state.TraverseRight { + expected.RightStackSize += 1 + } else { + expected.LeftStackSize += 1 + } stepWitness, err := goVm.Step(true) require.NoError(t, err) require.False(t, stepWitness.HasPreimage()) expected.Validate(t, state) - - evm := testutil.NewMIPSEVM(v.Contracts) - evmPost := evm.Step(t, stepWitness, step, v.StateHashFn) - goPost, _ := goVm.GetState().EncodeWitness() - require.Equal(t, hexutil.Bytes(goPost).String(), hexutil.Bytes(evmPost).String(), - "mipsevm produced different state than EVM") + testutil.ValidateEVM(t, stepWitness, step, goVm, multithreaded.GetStateHashFn(), v.Contracts, nil) }) } diff --git a/cannon/mipsevm/tests/fuzz_evm_singlethreaded_test.go b/cannon/mipsevm/tests/fuzz_evm_singlethreaded_test.go index be5c995d352c..cc30c0040196 100644 --- a/cannon/mipsevm/tests/fuzz_evm_singlethreaded_test.go +++ b/cannon/mipsevm/tests/fuzz_evm_singlethreaded_test.go @@ -20,7 +20,7 @@ func FuzzStateSyscallCloneST(f *testing.F) { state.GetMemory().SetMemory(state.GetPC(), syscallInsn) step := state.GetStep() - expected := testutil.CreateExpectedState(state) + expected := testutil.NewExpectedState(state) expected.Step += 1 expected.PC = state.GetCpu().NextPC expected.NextPC = state.GetCpu().NextPC + 4 diff --git a/cannon/mipsevm/testutil/mips.go b/cannon/mipsevm/testutil/mips.go index 39d8fbed0b46..f596e0e4de7b 100644 --- a/cannon/mipsevm/testutil/mips.go +++ b/cannon/mipsevm/testutil/mips.go @@ -9,6 +9,7 @@ import ( "testing" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/tracing" "github.com/ethereum/go-ethereum/core/vm" @@ -151,3 +152,35 @@ func LogStepFailureAtCleanup(t *testing.T, mipsEvm *MIPSEVM) { } }) } + +// ValidateEVM runs a single evm step and validates against an FPVM poststate +func ValidateEVM(t *testing.T, stepWitness *mipsevm.StepWitness, step uint64, goVm mipsevm.FPVM, hashFn mipsevm.HashFn, contracts *ContractMetadata, tracer *tracing.Hooks) { + evm := NewMIPSEVM(contracts) + evm.SetTracer(tracer) + LogStepFailureAtCleanup(t, evm) + + evmPost := evm.Step(t, stepWitness, step, hashFn) + goPost, _ := goVm.GetState().EncodeWitness() + require.Equal(t, hexutil.Bytes(goPost).String(), hexutil.Bytes(evmPost).String(), + "mipsevm produced different state than EVM") +} + +// AssertEVMReverts runs a single evm step from an FPVM prestate and asserts that the VM panics +func AssertEVMReverts(t *testing.T, state mipsevm.FPVMState, contracts *ContractMetadata, tracer *tracing.Hooks) { + insnProof := state.GetMemory().MerkleProof(state.GetPC()) + encodedWitness, _ := state.EncodeWitness() + stepWitness := &mipsevm.StepWitness{ + State: encodedWitness, + ProofData: insnProof[:], + } + input := EncodeStepInput(t, stepWitness, mipsevm.LocalContext{}, contracts.Artifacts.MIPS) + startingGas := uint64(30_000_000) + + env, evmState := NewEVMEnv(contracts) + env.Config.Tracer = tracer + sender := common.Address{0x13, 0x37} + _, _, err := env.Call(vm.AccountRef(sender), contracts.Addresses.MIPS, input, startingGas, common.U2560) + require.EqualValues(t, err, vm.ErrExecutionReverted) + logs := evmState.Logs() + require.Equal(t, 0, len(logs)) +} diff --git a/cannon/mipsevm/testutil/rand.go b/cannon/mipsevm/testutil/rand.go new file mode 100644 index 000000000000..2364c7ad5e92 --- /dev/null +++ b/cannon/mipsevm/testutil/rand.go @@ -0,0 +1,53 @@ +package testutil + +import ( + "math/rand" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +func RandHash(r *rand.Rand) common.Hash { + var bytes [32]byte + _, err := r.Read(bytes[:]) + if err != nil { + panic(err) + } + return bytes +} + +func RandHint(r *rand.Rand) []byte { + count := r.Intn(10) + + bytes := make([]byte, count) + _, err := r.Read(bytes[:]) + if err != nil { + panic(err) + } + return bytes +} + +func RandRegisters(r *rand.Rand) *[32]uint32 { + registers := new([32]uint32) + for i := 0; i < 32; i++ { + registers[i] = r.Uint32() + } + return registers +} + +func RandomBytes(t require.TestingT, seed int64, length uint32) []byte { + r := rand.New(rand.NewSource(seed)) + randBytes := make([]byte, length) + if _, err := r.Read(randBytes); err != nil { + require.NoError(t, err) + } + return randBytes +} + +func RandPC(r *rand.Rand) uint32 { + return AlignPC(r.Uint32()) +} + +func RandStep(r *rand.Rand) uint64 { + return BoundStep(r.Uint64()) +} diff --git a/cannon/mipsevm/testutil/state.go b/cannon/mipsevm/testutil/state.go index da0c9d1657ee..3b912c8dd067 100644 --- a/cannon/mipsevm/testutil/state.go +++ b/cannon/mipsevm/testutil/state.go @@ -2,7 +2,6 @@ package testutil import ( "fmt" - "math/rand" "slices" "testing" @@ -31,7 +30,7 @@ type StateMutator interface { SetExited(val bool) SetStep(val uint64) SetLastHint(val hexutil.Bytes) - GetRegistersRef() *[32]uint32 + Randomize(randSeed int64) } type StateOption func(state StateMutator) @@ -80,36 +79,26 @@ func WithStep(step uint64) StateOption { func WithRandomization(seed int64) StateOption { return func(mut StateMutator) { - RandomizeState(seed, mut) + mut.Randomize(seed) } } -func RandomizeState(seed int64, mut StateMutator) { - r := rand.New(rand.NewSource(seed)) - +func AlignPC(pc uint32) uint32 { // Memory-align random pc and leave room for nextPC - pc := r.Uint32() & 0xFF_FF_FF_FC // Align address + pc = pc & 0xFF_FF_FF_FC // Align address if pc >= 0xFF_FF_FF_FC { // Leave room to set and then increment nextPC pc = 0xFF_FF_FF_FC - 8 } + return pc +} - // Set random step, but leave room to increment - step := r.Uint64() +func BoundStep(step uint64) uint64 { + // Leave room to increment step at least once if step == ^uint64(0) { step -= 1 } - - mut.SetPreimageKey(randHash(r)) - mut.SetPreimageOffset(r.Uint32()) - mut.SetPC(pc) - mut.SetNextPC(pc + 4) - mut.SetHI(r.Uint32()) - mut.SetLO(r.Uint32()) - mut.SetHeap(r.Uint32()) - mut.SetStep(step) - mut.SetLastHint(randHint(r)) - *mut.GetRegistersRef() = *randRegisters(r) + return step } type ExpectedState struct { @@ -128,7 +117,7 @@ type ExpectedState struct { MemoryRoot common.Hash } -func CreateExpectedState(fromState mipsevm.FPVMState) *ExpectedState { +func NewExpectedState(fromState mipsevm.FPVMState) *ExpectedState { return &ExpectedState{ PreimageKey: fromState.GetPreimageKey(), PreimageOffset: fromState.GetPreimageOffset(), @@ -176,40 +165,3 @@ func (e *ExpectedState) Validate(t testing.TB, actualState mipsevm.FPVMState, fl require.Equal(t, e.MemoryRoot, common.Hash(actualState.GetMemory().MerkleRoot()), fmt.Sprintf("Expect memory root = %v", e.MemoryRoot)) } } - -func randHash(r *rand.Rand) common.Hash { - var bytes [32]byte - _, err := r.Read(bytes[:]) - if err != nil { - panic(err) - } - return bytes -} - -func randHint(r *rand.Rand) []byte { - count := r.Intn(10) - - bytes := make([]byte, count) - _, err := r.Read(bytes[:]) - if err != nil { - panic(err) - } - return bytes -} - -func randRegisters(r *rand.Rand) *[32]uint32 { - registers := new([32]uint32) - for i := 0; i < 32; i++ { - registers[i] = r.Uint32() - } - return registers -} - -func RandomBytes(t require.TestingT, seed int64, length uint32) []byte { - r := rand.New(rand.NewSource(seed)) - randBytes := make([]byte, length) - if _, err := r.Read(randBytes); err != nil { - require.NoError(t, err) - } - return randBytes -} diff --git a/packages/contracts-bedrock/semver-lock.json b/packages/contracts-bedrock/semver-lock.json index c9fe50d36c6e..59def1ae86cf 100644 --- a/packages/contracts-bedrock/semver-lock.json +++ b/packages/contracts-bedrock/semver-lock.json @@ -148,8 +148,8 @@ "sourceCodeHash": "0xb6e219e8c2d81d75c48a1459907609e9096fe032a7447c88cd3e0d134752ac8e" }, "src/cannon/MIPS2.sol": { - "initCodeHash": "0xbb425bd1c3cad13a77f5c9676b577606e2f8f320687739f529b257a042f58d85", - "sourceCodeHash": "0xe66f19942947f53ccd658b94c1ef6db39e947419d4ec7436067c6cc44452ff73" + "initCodeHash": "0x36b7c32cf9eba05e6db44910a25c800b801c075f8e053eca9515c6e0e4d8a902", + "sourceCodeHash": "0xa307c44a2d67bc84e75f4b7341345ed236da2e63c1f3f442416f14cd262126bf" }, "src/cannon/PreimageOracle.sol": { "initCodeHash": "0xce7a1c3265e457a05d17b6d1a2ef93c4639caac3733c9cf88bfd192eae2c5788", diff --git a/packages/contracts-bedrock/src/cannon/MIPS2.sol b/packages/contracts-bedrock/src/cannon/MIPS2.sol index 142af042b99a..a1bcea86edb8 100644 --- a/packages/contracts-bedrock/src/cannon/MIPS2.sol +++ b/packages/contracts-bedrock/src/cannon/MIPS2.sol @@ -51,8 +51,8 @@ contract MIPS2 is ISemver { } /// @notice The semantic version of the MIPS2 contract. - /// @custom:semver 1.0.0-beta.4 - string public constant version = "1.0.0-beta.4"; + /// @custom:semver 1.0.0-beta.5 + string public constant version = "1.0.0-beta.5"; /// @notice The preimage oracle contract. IPreimageOracle internal immutable ORACLE; @@ -202,7 +202,7 @@ contract MIPS2 is ISemver { } } - if (state.stepsSinceLastContextSwitch == sys.SCHED_QUANTUM) { + if (state.stepsSinceLastContextSwitch >= sys.SCHED_QUANTUM) { preemptThread(state, thread); return outputState(); } @@ -338,7 +338,6 @@ contract MIPS2 is ISemver { } else if (syscall_no == sys.SYS_FUTEX) { // args: a0 = addr, a1 = op, a2 = val, a3 = timeout if (a1 == sys.FUTEX_WAIT_PRIVATE) { - thread.futexAddr = a0; uint32 mem = MIPSMemory.readMem( state.memRoot, a0 & 0xFFffFFfc, MIPSMemory.memoryProofOffset(MEM_PROOF_OFFSET, 1) ); @@ -346,6 +345,7 @@ contract MIPS2 is ISemver { v0 = sys.SYS_ERROR_SIGNAL; v1 = sys.EAGAIN; } else { + thread.futexAddr = a0; thread.futexVal = a2; thread.futexTimeoutStep = a3 == 0 ? sys.FUTEX_NO_TIMEOUT : state.step + sys.FUTEX_TIMEOUT_STEPS; // Leave cpu scalars as-is. This instruction will be completed by `onWaitComplete` @@ -381,9 +381,9 @@ contract MIPS2 is ISemver { } else if (syscall_no == sys.SYS_OPEN) { v0 = sys.SYS_ERROR_SIGNAL; v1 = sys.EBADF; - } else if (syscall_no == sys.SYS_CLOCK_GETTIME) { + } else if (syscall_no == sys.SYS_MUNMAP) { // ignored - } else if (syscall_no == sys.SYS_GET_AFFINITY) { + } else if (syscall_no == sys.SYS_GETAFFINITY) { // ignored } else if (syscall_no == sys.SYS_MADVISE) { // ignored @@ -443,8 +443,6 @@ contract MIPS2 is ISemver { // ignored } else if (syscall_no == sys.SYS_CLOCKGETTIME) { // ignored - } else if (syscall_no == sys.SYS_MUNMAP) { - // ignored } else { revert("MIPS2: unimplemented syscall"); } diff --git a/packages/contracts-bedrock/src/cannon/libraries/MIPSSyscalls.sol b/packages/contracts-bedrock/src/cannon/libraries/MIPSSyscalls.sol index f12f9b789a23..0576dbc7ae2c 100644 --- a/packages/contracts-bedrock/src/cannon/libraries/MIPSSyscalls.sol +++ b/packages/contracts-bedrock/src/cannon/libraries/MIPSSyscalls.sol @@ -42,8 +42,7 @@ library MIPSSyscalls { uint32 internal constant SYS_OPEN = 4005; uint32 internal constant SYS_NANOSLEEP = 4166; // unused syscalls - uint32 internal constant SYS_CLOCK_GETTIME = 4263; - uint32 internal constant SYS_GET_AFFINITY = 4240; + uint32 internal constant SYS_MUNMAP = 4091; uint32 internal constant SYS_GETAFFINITY = 4240; uint32 internal constant SYS_MADVISE = 4218; uint32 internal constant SYS_RTSIGPROCMASK = 4195; @@ -69,13 +68,13 @@ library MIPSSyscalls { uint32 internal constant SYS_LLSEEK = 4140; uint32 internal constant SYS_MINCORE = 4217; uint32 internal constant SYS_TGKILL = 4266; + // profiling-related syscalls - ignored uint32 internal constant SYS_SETITIMER = 4104; uint32 internal constant SYS_TIMERCREATE = 4257; uint32 internal constant SYS_TIMERSETTIME = 4258; uint32 internal constant SYS_TIMERDELETE = 4261; uint32 internal constant SYS_CLOCKGETTIME = 4263; - uint32 internal constant SYS_MUNMAP = 4091; uint32 internal constant FD_STDIN = 0; uint32 internal constant FD_STDOUT = 1; diff --git a/packages/contracts-bedrock/test/cannon/MIPS2.t.sol b/packages/contracts-bedrock/test/cannon/MIPS2.t.sol index 30d6899caad6..dc377dcea8fa 100644 --- a/packages/contracts-bedrock/test/cannon/MIPS2.t.sol +++ b/packages/contracts-bedrock/test/cannon/MIPS2.t.sol @@ -435,7 +435,7 @@ contract MIPS2_Test is CommonTest { MIPS2.ThreadState memory expectThread = copyThread(thread); expectThread.pc = thread.nextPC; expectThread.nextPC = thread.nextPC + 4; - expectThread.futexAddr = futexAddr; + expectThread.futexAddr = sys.FUTEX_EMPTY_ADDR; expectThread.registers[2] = sys.SYS_ERROR_SIGNAL; expectThread.registers[7] = sys.EAGAIN; // errno threading.replaceCurrent(expectThread); From c7b91ab24fdbe83c43a8e716962186863606cef9 Mon Sep 17 00:00:00 2001 From: Matt Joiner Date: Fri, 30 Aug 2024 01:55:32 +1000 Subject: [PATCH 04/19] Conductor and sequencer p2p refactoring (#11455) * Shutdown sequencer before stopping p2p * Check p2p isn't also disabled Co-authored-by: Sebastian Stammler * Remove missed time.Sleep * Fix up use of SetupP2P.Disabled * Revert error check after RPC boundary * Add comment about context for StopSequencer * Add Config.p2pEnabled * op-node: Make Config.P2PEnabled public --------- Co-authored-by: Sebastian Stammler --- op-chain-ops/script/console2_gen.go | 1 - op-conductor/conductor/service.go | 36 ++++-- op-e2e/setup.go | 2 +- op-node/node/config.go | 4 + op-node/node/node.go | 62 +++++++--- op-node/p2p/config.go | 1 + op-node/p2p/node.go | 172 ++++++++++++++++------------ 7 files changed, 176 insertions(+), 102 deletions(-) diff --git a/op-chain-ops/script/console2_gen.go b/op-chain-ops/script/console2_gen.go index f8b8aecf7497..0c6dcc44f462 100644 --- a/op-chain-ops/script/console2_gen.go +++ b/op-chain-ops/script/console2_gen.go @@ -1511,4 +1511,3 @@ func (c *ConsolePrecompile) Log_59cfcbe3(p0 *big.Int, p1 *big.Int, p2 *big.Int, func (c *ConsolePrecompile) Log_193fb800(p0 *big.Int, p1 *big.Int, p2 *big.Int, p3 *big.Int) { c.log(p0, p1, p2, p3) } - diff --git a/op-conductor/conductor/service.go b/op-conductor/conductor/service.go index a1ccef871538..2a39193c6bcc 100644 --- a/op-conductor/conductor/service.go +++ b/op-conductor/conductor/service.go @@ -647,9 +647,12 @@ func (oc *OpConductor) action() { oc.log.Debug("exiting action with status and error", "status", status, "err", err) if err != nil { - oc.log.Error("failed to execute step, queueing another one to retry", "err", err, "status", status) - time.Sleep(oc.retryBackoff()) - oc.queueAction() + select { + case <-oc.shutdownCtx.Done(): + case <-time.After(oc.retryBackoff()): + oc.log.Error("failed to execute step, queueing another one to retry", "err", err, "status", status) + oc.queueAction() + } return } @@ -683,18 +686,33 @@ func (oc *OpConductor) transferLeader() error { } func (oc *OpConductor) stopSequencer() error { - oc.log.Info("stopping sequencer", "server", oc.cons.ServerID(), "leader", oc.leader.Load(), "healthy", oc.healthy.Load(), "active", oc.seqActive.Load()) - - _, err := oc.ctrl.StopSequencer(context.Background()) - if err != nil { + oc.log.Info( + "stopping sequencer", + "server", oc.cons.ServerID(), + "leader", oc.leader.Load(), + "healthy", oc.healthy.Load(), + "active", oc.seqActive.Load()) + + // Quoting (@zhwrd): StopSequencer is called after conductor loses leadership. In the event that + // the StopSequencer call fails, it actually has little real consequences because the sequencer + // cant produce a block and gossip / commit it to the raft log (requires leadership). Once + // conductor comes back up it will check its leader and sequencer state and attempt to stop the + // sequencer again. So it is "okay" to fail to stop a sequencer, the state will eventually be + // rectified and we won't have two active sequencers that are actually producing blocks. + // + // To that end we allow to cancel the StopSequencer call if we're shutting down. + latestHead, err := oc.ctrl.StopSequencer(oc.shutdownCtx) + if err == nil { + // None of the consensus state should have changed here so don't log it again. + oc.log.Info("stopped sequencer", "latestHead", latestHead) + } else { if strings.Contains(err.Error(), driver.ErrSequencerAlreadyStopped.Error()) { - oc.log.Warn("sequencer already stopped.", "err", err) + oc.log.Warn("sequencer already stopped", "err", err) } else { return errors.Wrap(err, "failed to stop sequencer") } } oc.metrics.RecordStopSequencer(err == nil) - oc.seqActive.Store(false) return nil } diff --git a/op-e2e/setup.go b/op-e2e/setup.go index c0168b7d207d..cd07e081d0dd 100644 --- a/op-e2e/setup.go +++ b/op-e2e/setup.go @@ -415,7 +415,7 @@ func (sys *System) Close() { } for name, node := range sys.RollupNodes { - if err := node.Stop(postCtx); err != nil && !errors.Is(err, rollupNode.ErrAlreadyClosed) { + if err := node.Stop(postCtx); err != nil && !errors.Is(err, rollupNode.ErrAlreadyClosed) && !errors.Is(err, postCtx.Err()) { combinedErr = errors.Join(combinedErr, fmt.Errorf("stop rollup node %v: %w", name, err)) } } diff --git a/op-node/node/config.go b/op-node/node/config.go index 5ca724d905c6..6b7be8f268fd 100644 --- a/op-node/node/config.go +++ b/op-node/node/config.go @@ -172,3 +172,7 @@ func (cfg *Config) Check() error { } return nil } + +func (cfg *Config) P2PEnabled() bool { + return cfg.P2P != nil && !cfg.P2P.Disabled() +} diff --git a/op-node/node/node.go b/op-node/node/node.go index a4fd3e8db08f..0f174c7a372c 100644 --- a/op-node/node/node.go +++ b/op-node/node/node.go @@ -8,6 +8,8 @@ import ( "sync/atomic" "time" + "github.com/ethereum-optimism/optimism/op-node/rollup/sequencing" + "github.com/hashicorp/go-multierror" "github.com/libp2p/go-libp2p/core/peer" @@ -40,6 +42,8 @@ type closableSafeDB interface { } type OpNode struct { + // Retain the config to test for active features rather than test for runtime state. + cfg *Config log log.Logger appVersion string metrics *metrics.Metrics @@ -93,6 +97,7 @@ func New(ctx context.Context, cfg *Config, log log.Logger, appVersion string, m } n := &OpNode{ + cfg: cfg, log: log, appVersion: appVersion, metrics: m, @@ -134,7 +139,7 @@ func (n *OpNode) init(ctx context.Context, cfg *Config) error { if err := n.initP2PSigner(ctx, cfg); err != nil { return fmt.Errorf("failed to init the P2P signer: %w", err) } - if err := n.initP2P(ctx, cfg); err != nil { + if err := n.initP2P(cfg); err != nil { return fmt.Errorf("failed to init the P2P stack: %w", err) } // Only expose the server at the end, ensuring all RPC backend components are initialized. @@ -407,7 +412,7 @@ func (n *OpNode) initRPCServer(cfg *Config) error { if err != nil { return err } - if n.p2pNode != nil { + if n.p2pEnabled() { server.EnableP2P(p2p.NewP2PAPIBackend(n.p2pNode, n.log, n.metrics)) } if cfg.RPC.EnableAdmin { @@ -454,14 +459,20 @@ func (n *OpNode) initPProf(cfg *Config) error { return nil } -func (n *OpNode) initP2P(ctx context.Context, cfg *Config) error { - if cfg.P2P != nil { +func (n *OpNode) p2pEnabled() bool { + return n.cfg.P2PEnabled() +} + +func (n *OpNode) initP2P(cfg *Config) (err error) { + if n.p2pNode != nil { + panic("p2p node already initialized") + } + if n.p2pEnabled() { // TODO(protocol-quest/97): Use EL Sync instead of CL Alt sync for fetching missing blocks in the payload queue. - p2pNode, err := p2p.NewNodeP2P(n.resourcesCtx, &cfg.Rollup, n.log, cfg.P2P, n, n.l2Source, n.runCfg, n.metrics, false) - if err != nil || p2pNode == nil { - return err + n.p2pNode, err = p2p.NewNodeP2P(n.resourcesCtx, &cfg.Rollup, n.log, cfg.P2P, n, n.l2Source, n.runCfg, n.metrics, false) + if err != nil { + return } - n.p2pNode = p2pNode if n.p2pNode.Dv5Udp() != nil { go n.p2pNode.DiscoveryProcess(n.resourcesCtx, n.log, &cfg.Rollup, cfg.P2P.TargetPeers()) } @@ -469,15 +480,14 @@ func (n *OpNode) initP2P(ctx context.Context, cfg *Config) error { return nil } -func (n *OpNode) initP2PSigner(ctx context.Context, cfg *Config) error { +func (n *OpNode) initP2PSigner(ctx context.Context, cfg *Config) (err error) { // the p2p signer setup is optional if cfg.P2PSigner == nil { - return nil + return } // p2pSigner may still be nil, the signer setup may not create any signer, the signer is optional - var err error n.p2pSigner, err = cfg.P2PSigner.SetupSigner(ctx) - return err + return } func (n *OpNode) Start(ctx context.Context) error { @@ -533,7 +543,7 @@ func (n *OpNode) PublishL2Payload(ctx context.Context, envelope *eth.ExecutionPa n.tracer.OnPublishL2Payload(ctx, envelope) // publish to p2p, if we are running p2p at all - if n.p2pNode != nil { + if n.p2pEnabled() { payload := envelope.ExecutionPayload if n.p2pSigner == nil { return fmt.Errorf("node has no p2p signer, payload %s cannot be published", payload.ID()) @@ -547,7 +557,7 @@ func (n *OpNode) PublishL2Payload(ctx context.Context, envelope *eth.ExecutionPa func (n *OpNode) OnUnsafeL2Payload(ctx context.Context, from peer.ID, envelope *eth.ExecutionPayloadEnvelope) error { // ignore if it's from ourselves - if n.p2pNode != nil && from == n.p2pNode.Host().ID() { + if n.p2pEnabled() && from == n.p2pNode.Host().ID() { return nil } @@ -568,9 +578,13 @@ func (n *OpNode) OnUnsafeL2Payload(ctx context.Context, from peer.ID, envelope * } func (n *OpNode) RequestL2Range(ctx context.Context, start, end eth.L2BlockRef) error { - if n.p2pNode != nil && n.p2pNode.AltSyncEnabled() { + if n.p2pEnabled() && n.p2pNode.AltSyncEnabled() { if unixTimeStale(start.Time, 12*time.Hour) { - n.log.Debug("ignoring request to sync L2 range, timestamp is too old for p2p", "start", start, "end", end, "start_time", start.Time) + n.log.Debug( + "ignoring request to sync L2 range, timestamp is too old for p2p", + "start", start, + "end", end, + "start_time", start.Time) return nil } return n.p2pNode.RequestL2Range(ctx, start, end) @@ -606,10 +620,26 @@ func (n *OpNode) Stop(ctx context.Context) error { result = multierror.Append(result, fmt.Errorf("failed to close RPC server: %w", err)) } } + + // Stop sequencer and report last hash. l2Driver can be nil if we're cleaning up a failed init. + if n.l2Driver != nil { + latestHead, err := n.l2Driver.StopSequencer(ctx) + switch { + case errors.Is(err, sequencing.ErrSequencerNotEnabled): + case errors.Is(err, driver.ErrSequencerAlreadyStopped): + n.log.Info("stopping node: sequencer already stopped", "latestHead", latestHead) + case err == nil: + n.log.Info("stopped sequencer", "latestHead", latestHead) + default: + result = multierror.Append(result, fmt.Errorf("error stopping sequencer: %w", err)) + } + } if n.p2pNode != nil { if err := n.p2pNode.Close(); err != nil { result = multierror.Append(result, fmt.Errorf("failed to close p2p node: %w", err)) } + // Prevent further use of p2p. + n.p2pNode = nil } if n.p2pSigner != nil { if err := n.p2pSigner.Close(); err != nil { diff --git a/op-node/p2p/config.go b/op-node/p2p/config.go index 94b75a95de26..ee21ba20fc39 100644 --- a/op-node/p2p/config.go +++ b/op-node/p2p/config.go @@ -48,6 +48,7 @@ type HostMetrics interface { // SetupP2P provides a host and discovery service for usage in the rollup node. type SetupP2P interface { Check() error + // Looks like this was started to prevent partially inited p2p. Disabled() bool // Host creates a libp2p host service. Returns nil, nil if p2p is disabled. Host(log log.Logger, reporter metrics.Reporter, metrics HostMetrics) (host.Host, error) diff --git a/op-node/p2p/node.go b/op-node/p2p/node.go index 4c88556ddd9c..70f7dbc67c0b 100644 --- a/op-node/p2p/node.go +++ b/op-node/p2p/node.go @@ -52,10 +52,23 @@ type NodeP2P struct { // NewNodeP2P creates a new p2p node, and returns a reference to it. If the p2p is disabled, it returns nil. // If metrics are configured, a bandwidth monitor will be spawned in a goroutine. -func NewNodeP2P(resourcesCtx context.Context, rollupCfg *rollup.Config, log log.Logger, setup SetupP2P, gossipIn GossipIn, l2Chain L2Chain, runCfg GossipRuntimeConfig, metrics metrics.Metricer, elSyncEnabled bool) (*NodeP2P, error) { +func NewNodeP2P( + resourcesCtx context.Context, + rollupCfg *rollup.Config, + log log.Logger, + setup SetupP2P, + gossipIn GossipIn, + l2Chain L2Chain, + runCfg GossipRuntimeConfig, + metrics metrics.Metricer, + elSyncEnabled bool, +) (*NodeP2P, error) { if setup == nil { return nil, errors.New("p2p node cannot be created without setup") } + if setup.Disabled() { + return nil, errors.New("SetupP2P.Disabled is true") + } var n NodeP2P if err := n.init(resourcesCtx, rollupCfg, log, setup, gossipIn, l2Chain, runCfg, metrics, elSyncEnabled); err != nil { closeErr := n.Close() @@ -65,12 +78,24 @@ func NewNodeP2P(resourcesCtx context.Context, rollupCfg *rollup.Config, log log. return nil, err } if n.host == nil { - return nil, nil + // See prior comment about n.host optionality: + // TODO(CLI-4016): host is not optional, NodeP2P as a whole is. + panic("host is not optional if p2p is enabled") } return &n, nil } -func (n *NodeP2P) init(resourcesCtx context.Context, rollupCfg *rollup.Config, log log.Logger, setup SetupP2P, gossipIn GossipIn, l2Chain L2Chain, runCfg GossipRuntimeConfig, metrics metrics.Metricer, elSyncEnabled bool) error { +func (n *NodeP2P) init( + resourcesCtx context.Context, + rollupCfg *rollup.Config, + log log.Logger, + setup SetupP2P, + gossipIn GossipIn, + l2Chain L2Chain, + runCfg GossipRuntimeConfig, + metrics metrics.Metricer, + elSyncEnabled bool, +) error { bwc := p2pmetrics.NewBandwidthCounter() n.log = log @@ -85,86 +110,83 @@ func (n *NodeP2P) init(resourcesCtx context.Context, rollupCfg *rollup.Config, l return fmt.Errorf("failed to start p2p host: %w", err) } - // TODO(CLI-4016): host is not optional, NodeP2P as a whole is. This if statement is wrong - if n.host != nil { - // Enable extra features, if any. During testing we don't setup the most advanced host all the time. - if extra, ok := n.host.(ExtraHostFeatures); ok { - n.gater = extra.ConnectionGater() - n.connMgr = extra.ConnectionManager() - } - eps, ok := n.host.Peerstore().(store.ExtendedPeerstore) - if !ok { - return fmt.Errorf("cannot init without extended peerstore: %w", err) - } - n.store = eps - scoreParams := setup.PeerScoringParams() + // Enable extra features, if any. During testing we don't setup the most advanced host all the time. + if extra, ok := n.host.(ExtraHostFeatures); ok { + n.gater = extra.ConnectionGater() + n.connMgr = extra.ConnectionManager() + } + eps, ok := n.host.Peerstore().(store.ExtendedPeerstore) + if !ok { + return fmt.Errorf("cannot init without extended peerstore: %w", err) + } + n.store = eps + scoreParams := setup.PeerScoringParams() - if scoreParams != nil { - n.appScorer = newPeerApplicationScorer(resourcesCtx, log, clock.SystemClock, &scoreParams.ApplicationScoring, eps, n.host.Network().Peers) - } else { - n.appScorer = &NoopApplicationScorer{} - } - // Activate the P2P req-resp sync if enabled by feature-flag. - if setup.ReqRespSyncEnabled() && !elSyncEnabled { - n.syncCl = NewSyncClient(log, rollupCfg, n.host, gossipIn.OnUnsafeL2Payload, metrics, n.appScorer) - n.host.Network().Notify(&network.NotifyBundle{ - ConnectedF: func(nw network.Network, conn network.Conn) { - n.syncCl.AddPeer(conn.RemotePeer()) - }, - DisconnectedF: func(nw network.Network, conn network.Conn) { - // only when no connection is available, we can remove the peer - if nw.Connectedness(conn.RemotePeer()) == network.NotConnected { - n.syncCl.RemovePeer(conn.RemotePeer()) - } - }, - }) - n.syncCl.Start() - // the host may already be connected to peers, add them all to the sync client - for _, peerID := range n.host.Network().Peers() { - n.syncCl.AddPeer(peerID) - } - if l2Chain != nil { // Only enable serving side of req-resp sync if we have a data-source, to make minimal P2P testing easy - n.syncSrv = NewReqRespServer(rollupCfg, l2Chain, metrics) - // register the sync protocol with libp2p host - payloadByNumber := MakeStreamHandler(resourcesCtx, log.New("serve", "payloads_by_number"), n.syncSrv.HandleSyncRequest) - n.host.SetStreamHandler(PayloadByNumberProtocolID(rollupCfg.L2ChainID), payloadByNumber) - } + if scoreParams != nil { + n.appScorer = newPeerApplicationScorer(resourcesCtx, log, clock.SystemClock, &scoreParams.ApplicationScoring, eps, n.host.Network().Peers) + } else { + n.appScorer = &NoopApplicationScorer{} + } + // Activate the P2P req-resp sync if enabled by feature-flag. + if setup.ReqRespSyncEnabled() && !elSyncEnabled { + n.syncCl = NewSyncClient(log, rollupCfg, n.host, gossipIn.OnUnsafeL2Payload, metrics, n.appScorer) + n.host.Network().Notify(&network.NotifyBundle{ + ConnectedF: func(nw network.Network, conn network.Conn) { + n.syncCl.AddPeer(conn.RemotePeer()) + }, + DisconnectedF: func(nw network.Network, conn network.Conn) { + // only when no connection is available, we can remove the peer + if nw.Connectedness(conn.RemotePeer()) == network.NotConnected { + n.syncCl.RemovePeer(conn.RemotePeer()) + } + }, + }) + n.syncCl.Start() + // the host may already be connected to peers, add them all to the sync client + for _, peerID := range n.host.Network().Peers() { + n.syncCl.AddPeer(peerID) } - n.scorer = NewScorer(rollupCfg, eps, metrics, n.appScorer, log) - // notify of any new connections/streams/etc. - n.host.Network().Notify(NewNetworkNotifier(log, metrics)) - // note: the IDDelta functionality was removed from libP2P, and no longer needs to be explicitly disabled. - n.gs, err = NewGossipSub(resourcesCtx, n.host, rollupCfg, setup, n.scorer, metrics, log) - if err != nil { - return fmt.Errorf("failed to start gossipsub router: %w", err) + if l2Chain != nil { // Only enable serving side of req-resp sync if we have a data-source, to make minimal P2P testing easy + n.syncSrv = NewReqRespServer(rollupCfg, l2Chain, metrics) + // register the sync protocol with libp2p host + payloadByNumber := MakeStreamHandler(resourcesCtx, log.New("serve", "payloads_by_number"), n.syncSrv.HandleSyncRequest) + n.host.SetStreamHandler(PayloadByNumberProtocolID(rollupCfg.L2ChainID), payloadByNumber) } - n.gsOut, err = JoinGossip(n.host.ID(), n.gs, log, rollupCfg, runCfg, gossipIn) - if err != nil { - return fmt.Errorf("failed to join blocks gossip topic: %w", err) - } - log.Info("started p2p host", "addrs", n.host.Addrs(), "peerID", n.host.ID().String()) + } + n.scorer = NewScorer(rollupCfg, eps, metrics, n.appScorer, log) + // notify of any new connections/streams/etc. + n.host.Network().Notify(NewNetworkNotifier(log, metrics)) + // note: the IDDelta functionality was removed from libP2P, and no longer needs to be explicitly disabled. + n.gs, err = NewGossipSub(resourcesCtx, n.host, rollupCfg, setup, n.scorer, metrics, log) + if err != nil { + return fmt.Errorf("failed to start gossipsub router: %w", err) + } + n.gsOut, err = JoinGossip(n.host.ID(), n.gs, log, rollupCfg, runCfg, gossipIn) + if err != nil { + return fmt.Errorf("failed to join blocks gossip topic: %w", err) + } + log.Info("started p2p host", "addrs", n.host.Addrs(), "peerID", n.host.ID().String()) - tcpPort, err := FindActiveTCPPort(n.host) - if err != nil { - log.Warn("failed to find what TCP port p2p is binded to", "err", err) - } + tcpPort, err := FindActiveTCPPort(n.host) + if err != nil { + log.Warn("failed to find what TCP port p2p is binded to", "err", err) + } - // All nil if disabled. - n.dv5Local, n.dv5Udp, err = setup.Discovery(log.New("p2p", "discv5"), rollupCfg, tcpPort) - if err != nil { - return fmt.Errorf("failed to start discv5: %w", err) - } + // All nil if disabled. + n.dv5Local, n.dv5Udp, err = setup.Discovery(log.New("p2p", "discv5"), rollupCfg, tcpPort) + if err != nil { + return fmt.Errorf("failed to start discv5: %w", err) + } - if metrics != nil { - go metrics.RecordBandwidth(resourcesCtx, bwc) - } + if metrics != nil { + go metrics.RecordBandwidth(resourcesCtx, bwc) + } - if setup.BanPeers() { - n.peerMonitor = monitor.NewPeerMonitor(resourcesCtx, log, clock.SystemClock, n, setup.BanThreshold(), setup.BanDuration()) - n.peerMonitor.Start() - } - n.appScorer.start() + if setup.BanPeers() { + n.peerMonitor = monitor.NewPeerMonitor(resourcesCtx, log, clock.SystemClock, n, setup.BanThreshold(), setup.BanDuration()) + n.peerMonitor.Start() } + n.appScorer.start() return nil } From 323e688d93c3ec4c67d39349c23dcd239af2fbdc Mon Sep 17 00:00:00 2001 From: AgusDuha <81362284+agusduha@users.noreply.github.com> Date: Thu, 29 Aug 2024 13:17:41 -0300 Subject: [PATCH 05/19] test: fix standard bridge interop tests (#11668) * test: fix L2 standard bridge interop tests * test: mock factory implementation instead of proxy --- .../contracts-bedrock/test/L2/L2StandardBridgeInterop.t.sol | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/packages/contracts-bedrock/test/L2/L2StandardBridgeInterop.t.sol b/packages/contracts-bedrock/test/L2/L2StandardBridgeInterop.t.sol index 812ab36deed0..d60ceb264a73 100644 --- a/packages/contracts-bedrock/test/L2/L2StandardBridgeInterop.t.sol +++ b/packages/contracts-bedrock/test/L2/L2StandardBridgeInterop.t.sol @@ -29,6 +29,10 @@ contract L2StandardBridgeInterop_Test is Bridge_Initializer { function setUp() public virtual override { super.enableInterop(); super.setUp(); + + // TODO: Remove it once the `OptimismSuperchainERC20Factory` is added to predeploys. + // Ensure OPTIMISM_SUPERCHAIN_ERC20_FACTORY's code is not empty. + vm.etch(Predeploys.predeployToCodeNamespace(Predeploys.OPTIMISM_SUPERCHAIN_ERC20_FACTORY), address(this).code); } /// @notice Helper function to setup a mock and expect a call to it. From 7373ce7615b3607b328ad22672b1adb3c7cf3701 Mon Sep 17 00:00:00 2001 From: Roberto Bayardo Date: Thu, 29 Aug 2024 09:37:52 -0700 Subject: [PATCH 06/19] - make state publishing loop abort if the txpool state is not good (#11633) - protect txpool state vars with a mutex so they can be automically updated to avoid potential race condition --- op-batcher/batcher/driver.go | 66 ++++++++++++++++++++++++------------ 1 file changed, 45 insertions(+), 21 deletions(-) diff --git a/op-batcher/batcher/driver.go b/op-batcher/batcher/driver.go index 39ebf2f25b24..0af4c86ca40a 100644 --- a/op-batcher/batcher/driver.go +++ b/op-batcher/batcher/driver.go @@ -8,7 +8,6 @@ import ( "math/big" _ "net/http/pprof" "sync" - "sync/atomic" "time" altda "github.com/ethereum-optimism/optimism/op-alt-da" @@ -83,6 +82,10 @@ type BatchSubmitter struct { mutex sync.Mutex running bool + txpoolMutex sync.Mutex // guards txpoolState and txpoolBlockedBlob + txpoolState int + txpoolBlockedBlob bool + // lastStoredBlock is the last block loaded into `state`. If it is empty it should be set to the l2 safe head. lastStoredBlock eth.BlockID lastL1Tip eth.L1BlockRef @@ -289,7 +292,7 @@ const ( // send a cancellation transaction. // TxpoolCancelPending -> TxpoolGood: // happens once the cancel transaction completes, whether successfully or in error. - TxpoolGood int32 = iota + TxpoolGood int = iota TxpoolBlocked TxpoolCancelPending ) @@ -304,23 +307,25 @@ func (l *BatchSubmitter) loop() { receiptLoopDone := make(chan struct{}) defer close(receiptLoopDone) // shut down receipt loop - var ( - txpoolState atomic.Int32 - txpoolBlockedBlob bool - ) - txpoolState.Store(TxpoolGood) + l.txpoolMutex.Lock() + l.txpoolState = TxpoolGood + l.txpoolMutex.Unlock() go func() { for { select { case r := <-receiptsCh: - if errors.Is(r.Err, txpool.ErrAlreadyReserved) && txpoolState.CompareAndSwap(TxpoolGood, TxpoolBlocked) { - txpoolBlockedBlob = r.ID.isBlob - l.Log.Info("incompatible tx in txpool") - } else if r.ID.isCancel && txpoolState.CompareAndSwap(TxpoolCancelPending, TxpoolGood) { + l.txpoolMutex.Lock() + if errors.Is(r.Err, txpool.ErrAlreadyReserved) && l.txpoolState == TxpoolGood { + l.txpoolState = TxpoolBlocked + l.txpoolBlockedBlob = r.ID.isBlob + l.Log.Info("incompatible tx in txpool", "is_blob", r.ID.isBlob) + } else if r.ID.isCancel && l.txpoolState == TxpoolCancelPending { // Set state to TxpoolGood even if the cancellation transaction ended in error // since the stuck transaction could have cleared while we were waiting. + l.txpoolState = TxpoolGood l.Log.Info("txpool may no longer be blocked", "err", r.Err) } + l.txpoolMutex.Unlock() l.Log.Info("Handling receipt", "id", r.ID) l.handleReceipt(r) case <-receiptLoopDone: @@ -345,13 +350,7 @@ func (l *BatchSubmitter) loop() { for { select { case <-ticker.C: - if txpoolState.CompareAndSwap(TxpoolBlocked, TxpoolCancelPending) { - // txpoolState is set to Blocked only if Send() is returning - // ErrAlreadyReserved. In this case, the TxMgr nonce should be reset to nil, - // allowing us to send a cancellation transaction. - l.cancelBlockingTx(queue, receiptsCh, txpoolBlockedBlob) - } - if txpoolState.Load() != TxpoolGood { + if !l.checkTxpool(queue, receiptsCh) { continue } if err := l.loadBlocksIntoState(l.shutdownCtx); errors.Is(err, ErrReorg) { @@ -433,7 +432,12 @@ func (l *BatchSubmitter) publishStateToL1(queue *txmgr.Queue[txRef], receiptsCh l.Log.Info("Txmgr is closed, aborting state publishing") return } + if !l.checkTxpool(queue, receiptsCh) { + l.Log.Info("txpool state is not good, aborting state publishing") + return + } err := l.publishTxToL1(l.killCtx, queue, receiptsCh) + if err != nil { if err != io.EOF { l.Log.Error("Error publishing tx to l1", "err", err) @@ -545,10 +549,11 @@ func (l *BatchSubmitter) cancelBlockingTx(queue *txmgr.Queue[txRef], receiptsCh panic(err) // this error should not happen } l.Log.Warn("sending a cancellation transaction to unblock txpool", "blocked_blob", isBlockedBlob) - l.queueTx(txData{}, true, candidate, queue, receiptsCh) + l.sendTx(txData{}, true, candidate, queue, receiptsCh) } // sendTransaction creates & queues for sending a transaction to the batch inbox address with the given `txData`. +// This call will block if the txmgr queue is at the max-pending limit. // The method will block if the queue's MaxPendingTransactions is exceeded. func (l *BatchSubmitter) sendTransaction(ctx context.Context, txdata txData, queue *txmgr.Queue[txRef], receiptsCh chan txmgr.TxReceipt[txRef]) error { var err error @@ -585,11 +590,13 @@ func (l *BatchSubmitter) sendTransaction(ctx context.Context, txdata txData, que candidate = l.calldataTxCandidate(data) } - l.queueTx(txdata, false, candidate, queue, receiptsCh) + l.sendTx(txdata, false, candidate, queue, receiptsCh) return nil } -func (l *BatchSubmitter) queueTx(txdata txData, isCancel bool, candidate *txmgr.TxCandidate, queue *txmgr.Queue[txRef], receiptsCh chan txmgr.TxReceipt[txRef]) { +// sendTx uses the txmgr queue to send the given transaction candidate after setting its +// gaslimit. It will block if the txmgr queue has reached its MaxPendingTransactions limit. +func (l *BatchSubmitter) sendTx(txdata txData, isCancel bool, candidate *txmgr.TxCandidate, queue *txmgr.Queue[txRef], receiptsCh chan txmgr.TxReceipt[txRef]) { intrinsicGas, err := core.IntrinsicGas(candidate.TxData, nil, false, true, true, false) if err != nil { // we log instead of return an error here because txmgr can do its own gas estimation @@ -665,6 +672,23 @@ func (l *BatchSubmitter) l1Tip(ctx context.Context) (eth.L1BlockRef, error) { return eth.InfoToL1BlockRef(eth.HeaderBlockInfo(head)), nil } +func (l *BatchSubmitter) checkTxpool(queue *txmgr.Queue[txRef], receiptsCh chan txmgr.TxReceipt[txRef]) bool { + l.txpoolMutex.Lock() + if l.txpoolState == TxpoolBlocked { + // txpoolState is set to Blocked only if Send() is returning + // ErrAlreadyReserved. In this case, the TxMgr nonce should be reset to nil, + // allowing us to send a cancellation transaction. + l.txpoolState = TxpoolCancelPending + isBlob := l.txpoolBlockedBlob + l.txpoolMutex.Unlock() + l.cancelBlockingTx(queue, receiptsCh, isBlob) + return false + } + r := l.txpoolState == TxpoolGood + l.txpoolMutex.Unlock() + return r +} + func logFields(xs ...any) (fs []any) { for _, x := range xs { switch v := x.(type) { From db8154b098cc417099f13c2b4722f593753833c8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 29 Aug 2024 17:48:46 -0600 Subject: [PATCH 07/19] dependabot(gomod): bump github.com/hashicorp/raft from 1.7.0 to 1.7.1 (#11670) Bumps [github.com/hashicorp/raft](https://github.com/hashicorp/raft) from 1.7.0 to 1.7.1. - [Release notes](https://github.com/hashicorp/raft/releases) - [Changelog](https://github.com/hashicorp/raft/blob/main/CHANGELOG.md) - [Commits](https://github.com/hashicorp/raft/compare/v1.7.0...v1.7.1) --- updated-dependencies: - dependency-name: github.com/hashicorp/raft dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index e8a150cb6a1d..95845e360db8 100644 --- a/go.mod +++ b/go.mod @@ -20,7 +20,7 @@ require ( github.com/google/gofuzz v1.2.1-0.20220503160820-4a35382e8fc8 github.com/hashicorp/go-multierror v1.1.1 github.com/hashicorp/golang-lru/v2 v2.0.7 - github.com/hashicorp/raft v1.7.0 + github.com/hashicorp/raft v1.7.1 github.com/hashicorp/raft-boltdb/v2 v2.3.0 github.com/holiman/uint256 v1.3.1 github.com/ipfs/go-datastore v0.6.0 @@ -113,7 +113,7 @@ require ( github.com/hashicorp/go-bexpr v0.1.11 // indirect github.com/hashicorp/go-hclog v1.6.2 // indirect github.com/hashicorp/go-immutable-radix v1.0.0 // indirect - github.com/hashicorp/go-msgpack/v2 v2.1.1 // indirect + github.com/hashicorp/go-msgpack/v2 v2.1.2 // indirect github.com/hashicorp/golang-lru v0.5.0 // indirect github.com/hashicorp/golang-lru/arc/v2 v2.0.7 // indirect github.com/hashicorp/raft-boltdb v0.0.0-20231211162105-6c830fa4535e // indirect diff --git a/go.sum b/go.sum index 0c661e610830..fcbc2cc0172e 100644 --- a/go.sum +++ b/go.sum @@ -321,8 +321,8 @@ github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxB github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI= github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-msgpack/v2 v2.1.1 h1:xQEY9yB2wnHitoSzk/B9UjXWRQ67QKu5AOm8aFp8N3I= -github.com/hashicorp/go-msgpack/v2 v2.1.1/go.mod h1:upybraOAblm4S7rx0+jeNy+CWWhzywQsSRV5033mMu4= +github.com/hashicorp/go-msgpack/v2 v2.1.2 h1:4Ee8FTp834e+ewB71RDrQ0VKpyFdrKOjvYtnQ/ltVj0= +github.com/hashicorp/go-msgpack/v2 v2.1.2/go.mod h1:upybraOAblm4S7rx0+jeNy+CWWhzywQsSRV5033mMu4= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= @@ -335,8 +335,8 @@ github.com/hashicorp/golang-lru/arc/v2 v2.0.7/go.mod h1:Pe7gBlGdc8clY5LJ0LpJXMt5 github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hashicorp/raft v1.1.0/go.mod h1:4Ak7FSPnuvmb0GV6vgIAJ4vYT4bek9bb6Q+7HVbyzqM= -github.com/hashicorp/raft v1.7.0 h1:4u24Qn6lQ6uwziM++UgsyiT64Q8GyRn43CV41qPiz1o= -github.com/hashicorp/raft v1.7.0/go.mod h1:N1sKh6Vn47mrWvEArQgILTyng8GoDRNYlgKyK7PMjs0= +github.com/hashicorp/raft v1.7.1 h1:ytxsNx4baHsRZrhUcbt3+79zc4ly8qm7pi0393pSchY= +github.com/hashicorp/raft v1.7.1/go.mod h1:hUeiEwQQR/Nk2iKDD0dkEhklSsu3jcAcqvPzPoZSAEM= github.com/hashicorp/raft-boltdb v0.0.0-20231211162105-6c830fa4535e h1:SK4y8oR4ZMHPvwVHryKI88kJPJda4UyWYvG5A6iEQxc= github.com/hashicorp/raft-boltdb v0.0.0-20231211162105-6c830fa4535e/go.mod h1:EMz/UIuG93P0MBeHh6CbXQAEe8ckVJLZjhD17lBzK5Q= github.com/hashicorp/raft-boltdb/v2 v2.3.0 h1:fPpQR1iGEVYjZ2OELvUHX600VAK5qmdnDEv3eXOwZUA= From d520441b008833d8ee8ee69bbe2e8158366279d0 Mon Sep 17 00:00:00 2001 From: Matt Joiner Date: Fri, 30 Aug 2024 11:16:49 +1000 Subject: [PATCH 08/19] Use context interrupts consistently in more places (#11511) * Use context interrupts consistently in more places * Fix CI lint errors (cherry picked from commit 0410b7e448e063fa9e30295fbe423ff1c0171d12) * op-service/ctxinterrupt: address review comments --------- Co-authored-by: protolambda --- cannon/main.go | 16 +--- op-alt-da/cmd/daserver/entrypoint.go | 6 +- op-alt-da/cmd/daserver/main.go | 4 +- op-batcher/cmd/main.go | 4 +- op-bootnode/bootnode/entrypoint.go | 6 +- op-chain-ops/cmd/check-ecotone/main.go | 4 +- op-chain-ops/cmd/check-fjord/main.go | 4 +- op-chain-ops/cmd/op-simulate/main.go | 4 +- op-challenger/cmd/main.go | 4 +- op-challenger/cmd/utils.go | 4 +- op-conductor/cmd/main.go | 4 +- op-conductor/consensus/raft_fsm.go | 2 +- op-dispute-mon/cmd/main.go | 4 +- op-e2e/external_geth/main.go | 3 +- op-node/cmd/main.go | 4 +- op-proposer/cmd/main.go | 6 +- op-service/cliapp/lifecycle.go | 35 ++----- op-service/cliapp/lifecycle_test.go | 15 ++- op-service/ctxinterrupt/context.go | 29 ++++++ op-service/ctxinterrupt/context_test.go | 19 ++++ op-service/ctxinterrupt/doc.go | 3 + op-service/ctxinterrupt/funcs.go | 55 +++++++++++ op-service/ctxinterrupt/signal-waiter.go | 50 ++++++++++ op-service/ctxinterrupt/waiter.go | 38 ++++++++ op-service/opio/interrupts.go | 114 ----------------------- op-service/util.go | 38 +++----- op-supervisor/cmd/main.go | 4 +- op-wheel/commands.go | 4 +- op-wheel/engine/engine.go | 11 ++- 29 files changed, 273 insertions(+), 221 deletions(-) create mode 100644 op-service/ctxinterrupt/context.go create mode 100644 op-service/ctxinterrupt/context_test.go create mode 100644 op-service/ctxinterrupt/doc.go create mode 100644 op-service/ctxinterrupt/funcs.go create mode 100644 op-service/ctxinterrupt/signal-waiter.go create mode 100644 op-service/ctxinterrupt/waiter.go delete mode 100644 op-service/opio/interrupts.go diff --git a/cannon/main.go b/cannon/main.go index 769c1da0b6fe..176ce315708f 100644 --- a/cannon/main.go +++ b/cannon/main.go @@ -5,9 +5,8 @@ import ( "errors" "fmt" "os" - "os/signal" - "syscall" + "github.com/ethereum-optimism/optimism/op-service/ctxinterrupt" "github.com/urfave/cli/v2" "github.com/ethereum-optimism/optimism/cannon/cmd" @@ -23,18 +22,7 @@ func main() { cmd.WitnessCommand, cmd.RunCommand, } - ctx, cancel := context.WithCancel(context.Background()) - - c := make(chan os.Signal, 1) - signal.Notify(c, syscall.SIGINT, syscall.SIGTERM) - go func() { - for { - <-c - cancel() - fmt.Println("\r\nExiting...") - } - }() - + ctx := ctxinterrupt.WithSignalWaiterMain(context.Background()) err := app.RunContext(ctx, os.Args) if err != nil { if errors.Is(err, ctx.Err()) { diff --git a/op-alt-da/cmd/daserver/entrypoint.go b/op-alt-da/cmd/daserver/entrypoint.go index 96cc9fe48db1..32ff7d29f651 100644 --- a/op-alt-da/cmd/daserver/entrypoint.go +++ b/op-alt-da/cmd/daserver/entrypoint.go @@ -6,8 +6,8 @@ import ( "github.com/urfave/cli/v2" altda "github.com/ethereum-optimism/optimism/op-alt-da" + "github.com/ethereum-optimism/optimism/op-service/ctxinterrupt" oplog "github.com/ethereum-optimism/optimism/op-service/log" - "github.com/ethereum-optimism/optimism/op-service/opio" ) func StartDAServer(cliCtx *cli.Context) error { @@ -55,7 +55,5 @@ func StartDAServer(cliCtx *cli.Context) error { } }() - opio.BlockOnInterrupts() - - return nil + return ctxinterrupt.Wait(cliCtx.Context) } diff --git a/op-alt-da/cmd/daserver/main.go b/op-alt-da/cmd/daserver/main.go index f45c63ffebef..3ed37bd05321 100644 --- a/op-alt-da/cmd/daserver/main.go +++ b/op-alt-da/cmd/daserver/main.go @@ -9,8 +9,8 @@ import ( opservice "github.com/ethereum-optimism/optimism/op-service" "github.com/ethereum-optimism/optimism/op-service/cliapp" + "github.com/ethereum-optimism/optimism/op-service/ctxinterrupt" oplog "github.com/ethereum-optimism/optimism/op-service/log" - "github.com/ethereum-optimism/optimism/op-service/opio" ) var Version = "v0.0.1" @@ -26,7 +26,7 @@ func main() { app.Description = "Service for storing AltDA inputs" app.Action = StartDAServer - ctx := opio.WithInterruptBlocker(context.Background()) + ctx := ctxinterrupt.WithSignalWaiterMain(context.Background()) err := app.RunContext(ctx, os.Args) if err != nil { log.Crit("Application failed", "message", err) diff --git a/op-batcher/cmd/main.go b/op-batcher/cmd/main.go index 91e032ff36b4..82472006da27 100644 --- a/op-batcher/cmd/main.go +++ b/op-batcher/cmd/main.go @@ -11,9 +11,9 @@ import ( "github.com/ethereum-optimism/optimism/op-batcher/metrics" opservice "github.com/ethereum-optimism/optimism/op-service" "github.com/ethereum-optimism/optimism/op-service/cliapp" + "github.com/ethereum-optimism/optimism/op-service/ctxinterrupt" oplog "github.com/ethereum-optimism/optimism/op-service/log" "github.com/ethereum-optimism/optimism/op-service/metrics/doc" - "github.com/ethereum-optimism/optimism/op-service/opio" "github.com/ethereum/go-ethereum/log" ) @@ -40,7 +40,7 @@ func main() { }, } - ctx := opio.WithInterruptBlocker(context.Background()) + ctx := ctxinterrupt.WithSignalWaiterMain(context.Background()) err := app.RunContext(ctx, os.Args) if err != nil { log.Crit("Application failed", "message", err) diff --git a/op-bootnode/bootnode/entrypoint.go b/op-bootnode/bootnode/entrypoint.go index 0dcf356fa1f3..0c33383c70c7 100644 --- a/op-bootnode/bootnode/entrypoint.go +++ b/op-bootnode/bootnode/entrypoint.go @@ -17,10 +17,10 @@ import ( "github.com/ethereum-optimism/optimism/op-node/p2p" p2pcli "github.com/ethereum-optimism/optimism/op-node/p2p/cli" "github.com/ethereum-optimism/optimism/op-node/rollup" + "github.com/ethereum-optimism/optimism/op-service/ctxinterrupt" "github.com/ethereum-optimism/optimism/op-service/eth" oplog "github.com/ethereum-optimism/optimism/op-service/log" opmetrics "github.com/ethereum-optimism/optimism/op-service/metrics" - "github.com/ethereum-optimism/optimism/op-service/opio" oprpc "github.com/ethereum-optimism/optimism/op-service/rpc" ) @@ -116,9 +116,7 @@ func Main(cliCtx *cli.Context) error { m.RecordUp() } - opio.BlockOnInterrupts() - - return nil + return ctxinterrupt.Wait(ctx) } // validateConfig ensures the minimal config required to run a bootnode diff --git a/op-chain-ops/cmd/check-ecotone/main.go b/op-chain-ops/cmd/check-ecotone/main.go index 2f46f03d29d0..58a11e9ce1ad 100644 --- a/op-chain-ops/cmd/check-ecotone/main.go +++ b/op-chain-ops/cmd/check-ecotone/main.go @@ -32,10 +32,10 @@ import ( op_service "github.com/ethereum-optimism/optimism/op-service" "github.com/ethereum-optimism/optimism/op-service/cliapp" "github.com/ethereum-optimism/optimism/op-service/client" + "github.com/ethereum-optimism/optimism/op-service/ctxinterrupt" "github.com/ethereum-optimism/optimism/op-service/dial" "github.com/ethereum-optimism/optimism/op-service/eth" oplog "github.com/ethereum-optimism/optimism/op-service/log" - "github.com/ethereum-optimism/optimism/op-service/opio" "github.com/ethereum-optimism/optimism/op-service/predeploys" "github.com/ethereum-optimism/optimism/op-service/retry" "github.com/ethereum-optimism/optimism/op-service/sources" @@ -168,7 +168,7 @@ func makeCommandAction(fn CheckAction) func(c *cli.Context) error { logCfg := oplog.ReadCLIConfig(c) logger := oplog.NewLogger(c.App.Writer, logCfg) - c.Context = opio.CancelOnInterrupt(c.Context) + c.Context = ctxinterrupt.WithCancelOnInterrupt(c.Context) l1Cl, err := ethclient.DialContext(c.Context, c.String(EndpointL1.Name)) if err != nil { return fmt.Errorf("failed to dial L1 RPC: %w", err) diff --git a/op-chain-ops/cmd/check-fjord/main.go b/op-chain-ops/cmd/check-fjord/main.go index 5a2ef66fbcb9..c63cc43493f5 100644 --- a/op-chain-ops/cmd/check-fjord/main.go +++ b/op-chain-ops/cmd/check-fjord/main.go @@ -9,8 +9,8 @@ import ( "github.com/ethereum-optimism/optimism/op-chain-ops/cmd/check-fjord/checks" op_service "github.com/ethereum-optimism/optimism/op-service" "github.com/ethereum-optimism/optimism/op-service/cliapp" + "github.com/ethereum-optimism/optimism/op-service/ctxinterrupt" oplog "github.com/ethereum-optimism/optimism/op-service/log" - "github.com/ethereum-optimism/optimism/op-service/opio" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/ethclient" "github.com/urfave/cli/v2" @@ -54,7 +54,7 @@ func makeCommandAction(fn CheckAction) func(c *cli.Context) error { logCfg := oplog.ReadCLIConfig(c) logger := oplog.NewLogger(c.App.Writer, logCfg) - c.Context = opio.CancelOnInterrupt(c.Context) + c.Context = ctxinterrupt.WithCancelOnInterrupt(c.Context) l2Cl, err := ethclient.DialContext(c.Context, c.String(EndpointL2.Name)) if err != nil { return fmt.Errorf("failed to dial L2 RPC: %w", err) diff --git a/op-chain-ops/cmd/op-simulate/main.go b/op-chain-ops/cmd/op-simulate/main.go index 83aab8714218..8b0986e4554f 100644 --- a/op-chain-ops/cmd/op-simulate/main.go +++ b/op-chain-ops/cmd/op-simulate/main.go @@ -33,8 +33,8 @@ import ( op_service "github.com/ethereum-optimism/optimism/op-service" "github.com/ethereum-optimism/optimism/op-service/cliapp" + "github.com/ethereum-optimism/optimism/op-service/ctxinterrupt" oplog "github.com/ethereum-optimism/optimism/op-service/log" - "github.com/ethereum-optimism/optimism/op-service/opio" ) var EnvPrefix = "OP_SIMULATE" @@ -82,7 +82,7 @@ func main() { } func mainAction(c *cli.Context) error { - ctx := opio.CancelOnInterrupt(c.Context) + ctx := ctxinterrupt.WithCancelOnInterrupt(c.Context) logCfg := oplog.ReadCLIConfig(c) logger := oplog.NewLogger(c.App.Writer, logCfg) diff --git a/op-challenger/cmd/main.go b/op-challenger/cmd/main.go index 50d842592ab5..dda1fa641757 100644 --- a/op-challenger/cmd/main.go +++ b/op-challenger/cmd/main.go @@ -15,8 +15,8 @@ import ( "github.com/ethereum-optimism/optimism/op-challenger/version" opservice "github.com/ethereum-optimism/optimism/op-service" "github.com/ethereum-optimism/optimism/op-service/cliapp" + "github.com/ethereum-optimism/optimism/op-service/ctxinterrupt" oplog "github.com/ethereum-optimism/optimism/op-service/log" - "github.com/ethereum-optimism/optimism/op-service/opio" ) var ( @@ -29,7 +29,7 @@ var VersionWithMeta = opservice.FormatVersion(version.Version, GitCommit, GitDat func main() { args := os.Args - ctx := opio.WithInterruptBlocker(context.Background()) + ctx := ctxinterrupt.WithSignalWaiterMain(context.Background()) if err := run(ctx, args, func(ctx context.Context, l log.Logger, config *config.Config) (cliapp.Lifecycle, error) { return challenger.Main(ctx, l, config, metrics.NewMetrics()) }); err != nil { diff --git a/op-challenger/cmd/utils.go b/op-challenger/cmd/utils.go index 110f1dddd641..82c35f7fa0f5 100644 --- a/op-challenger/cmd/utils.go +++ b/op-challenger/cmd/utils.go @@ -7,8 +7,8 @@ import ( "github.com/ethereum-optimism/optimism/op-challenger/flags" contractMetrics "github.com/ethereum-optimism/optimism/op-challenger/game/fault/contracts/metrics" opservice "github.com/ethereum-optimism/optimism/op-service" + "github.com/ethereum-optimism/optimism/op-service/ctxinterrupt" "github.com/ethereum-optimism/optimism/op-service/dial" - "github.com/ethereum-optimism/optimism/op-service/opio" "github.com/ethereum-optimism/optimism/op-service/sources/batching" "github.com/ethereum-optimism/optimism/op-service/txmgr" "github.com/ethereum-optimism/optimism/op-service/txmgr/metrics" @@ -20,7 +20,7 @@ type ContractCreator[T any] func(context.Context, contractMetrics.ContractMetric func Interruptible(action cli.ActionFunc) cli.ActionFunc { return func(ctx *cli.Context) error { - ctx.Context = opio.CancelOnInterrupt(ctx.Context) + ctx.Context = ctxinterrupt.WithCancelOnInterrupt(ctx.Context) return action(ctx) } } diff --git a/op-conductor/cmd/main.go b/op-conductor/cmd/main.go index 508ffe220df0..3497e85f7917 100644 --- a/op-conductor/cmd/main.go +++ b/op-conductor/cmd/main.go @@ -12,8 +12,8 @@ import ( "github.com/ethereum-optimism/optimism/op-conductor/flags" opservice "github.com/ethereum-optimism/optimism/op-service" "github.com/ethereum-optimism/optimism/op-service/cliapp" + "github.com/ethereum-optimism/optimism/op-service/ctxinterrupt" oplog "github.com/ethereum-optimism/optimism/op-service/log" - "github.com/ethereum-optimism/optimism/op-service/opio" ) var ( @@ -34,7 +34,7 @@ func main() { app.Action = cliapp.LifecycleCmd(OpConductorMain) app.Commands = []*cli.Command{} - ctx := opio.WithInterruptBlocker(context.Background()) + ctx := ctxinterrupt.WithSignalWaiterMain(context.Background()) err := app.RunContext(ctx, os.Args) if err != nil { log.Crit("Application failed", "message", err) diff --git a/op-conductor/consensus/raft_fsm.go b/op-conductor/consensus/raft_fsm.go index 31631d2c91e6..28c85c36d904 100644 --- a/op-conductor/consensus/raft_fsm.go +++ b/op-conductor/consensus/raft_fsm.go @@ -29,7 +29,7 @@ func NewUnsafeHeadTracker(log log.Logger) *unsafeHeadTracker { // Apply implements raft.FSM, it applies the latest change (latest unsafe head payload) to FSM. func (t *unsafeHeadTracker) Apply(l *raft.Log) interface{} { - if l.Data == nil || len(l.Data) == 0 { + if len(l.Data) == 0 { return fmt.Errorf("log data is nil or empty") } diff --git a/op-dispute-mon/cmd/main.go b/op-dispute-mon/cmd/main.go index 4b2c1f46bd17..619fad23592e 100644 --- a/op-dispute-mon/cmd/main.go +++ b/op-dispute-mon/cmd/main.go @@ -14,8 +14,8 @@ import ( "github.com/ethereum-optimism/optimism/op-dispute-mon/version" opservice "github.com/ethereum-optimism/optimism/op-service" "github.com/ethereum-optimism/optimism/op-service/cliapp" + "github.com/ethereum-optimism/optimism/op-service/ctxinterrupt" oplog "github.com/ethereum-optimism/optimism/op-service/log" - "github.com/ethereum-optimism/optimism/op-service/opio" ) var ( @@ -28,7 +28,7 @@ var VersionWithMeta = opservice.FormatVersion(version.Version, GitCommit, GitDat func main() { args := os.Args - ctx := opio.WithInterruptBlocker(context.Background()) + ctx := ctxinterrupt.WithSignalWaiterMain(context.Background()) if err := run(ctx, args, monitor.Main); err != nil { log.Crit("Application failed", "err", err) } diff --git a/op-e2e/external_geth/main.go b/op-e2e/external_geth/main.go index c97061d868ff..c8921b9b3ece 100644 --- a/op-e2e/external_geth/main.go +++ b/op-e2e/external_geth/main.go @@ -73,7 +73,8 @@ func run(configPath string) error { fmt.Printf("================== op-geth shim awaiting termination ==========================\n") sigs := make(chan os.Signal, 1) - signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM) + defer signal.Stop(sigs) + signal.Notify(sigs, os.Interrupt, syscall.SIGTERM) select { case <-sigs: diff --git a/op-node/cmd/main.go b/op-node/cmd/main.go index adb3d3aeb636..8f6688b51cbf 100644 --- a/op-node/cmd/main.go +++ b/op-node/cmd/main.go @@ -20,9 +20,9 @@ import ( "github.com/ethereum-optimism/optimism/op-node/version" opservice "github.com/ethereum-optimism/optimism/op-service" "github.com/ethereum-optimism/optimism/op-service/cliapp" + "github.com/ethereum-optimism/optimism/op-service/ctxinterrupt" oplog "github.com/ethereum-optimism/optimism/op-service/log" "github.com/ethereum-optimism/optimism/op-service/metrics/doc" - "github.com/ethereum-optimism/optimism/op-service/opio" ) var ( @@ -64,7 +64,7 @@ func main() { }, } - ctx := opio.WithInterruptBlocker(context.Background()) + ctx := ctxinterrupt.WithSignalWaiterMain(context.Background()) err := app.RunContext(ctx, os.Args) if err != nil { log.Crit("Application failed", "message", err) diff --git a/op-proposer/cmd/main.go b/op-proposer/cmd/main.go index bdfc730b4bf3..cbb21fb28516 100644 --- a/op-proposer/cmd/main.go +++ b/op-proposer/cmd/main.go @@ -1,8 +1,11 @@ package main import ( + "context" "os" + "github.com/ethereum-optimism/optimism/op-service/ctxinterrupt" + opservice "github.com/ethereum-optimism/optimism/op-service" "github.com/urfave/cli/v2" @@ -38,7 +41,8 @@ func main() { }, } - err := app.Run(os.Args) + ctx := ctxinterrupt.WithSignalWaiterMain(context.Background()) + err := app.RunContext(ctx, os.Args) if err != nil { log.Crit("Application failed", "message", err) } diff --git a/op-service/cliapp/lifecycle.go b/op-service/cliapp/lifecycle.go index 2154c025645d..a1f134979134 100644 --- a/op-service/cliapp/lifecycle.go +++ b/op-service/cliapp/lifecycle.go @@ -7,7 +7,7 @@ import ( "github.com/urfave/cli/v2" - "github.com/ethereum-optimism/optimism/op-service/opio" + "github.com/ethereum-optimism/optimism/op-service/ctxinterrupt" ) type Lifecycle interface { @@ -29,36 +29,26 @@ type Lifecycle interface { // a shutdown when the Stop context is not expired. type LifecycleAction func(ctx *cli.Context, close context.CancelCauseFunc) (Lifecycle, error) -var interruptErr = errors.New("interrupt signal") - // LifecycleCmd turns a LifecycleAction into an CLI action, -// by instrumenting it with CLI context and signal based termination. -// The signals are caught with the opio.BlockFn attached to the context, if any. -// If no block function is provided, it adds default interrupt handling. +// by instrumenting it with CLI context and signal based cancellation. +// The signals are caught with the ctxinterrupt.waiter attached to the context, or default +// interrupt signal handling if not already provided. // The app may continue to run post-processing until fully shutting down. // The user can force an early shut-down during post-processing by sending a second interruption signal. func LifecycleCmd(fn LifecycleAction) cli.ActionFunc { return func(ctx *cli.Context) error { - hostCtx := ctx.Context - blockOnInterrupt := opio.BlockerFromContext(hostCtx) - if blockOnInterrupt == nil { // add default interrupt blocker to context if none is set. - hostCtx = opio.WithInterruptBlocker(hostCtx) - blockOnInterrupt = opio.BlockerFromContext(hostCtx) - } - appCtx, appCancel := context.WithCancelCause(hostCtx) + hostCtx, stop := ctxinterrupt.WithSignalWaiter(ctx.Context) + defer stop() + appCtx, appCancel := context.WithCancelCause(ctxinterrupt.WithCancelOnInterrupt(hostCtx)) + // This is updated so the fn callback cli.Context uses the appCtx we just made. ctx.Context = appCtx - go func() { - blockOnInterrupt(appCtx) - appCancel(interruptErr) - }() - appLifecycle, err := fn(ctx, appCancel) if err != nil { // join errors to include context cause (nil errors are dropped) return errors.Join( fmt.Errorf("failed to setup: %w", err), - context.Cause(appCtx), + context.Cause(ctx.Context), ) } @@ -75,15 +65,10 @@ func LifecycleCmd(fn LifecycleAction) cli.ActionFunc { // Graceful stop context. // This allows the service to idle before shutdown, if halted. User may interrupt. - stopCtx, stopCancel := context.WithCancelCause(hostCtx) - go func() { - blockOnInterrupt(stopCtx) - stopCancel(interruptErr) - }() + stopCtx := ctxinterrupt.WithCancelOnInterrupt(hostCtx) // Execute graceful stop. stopErr := appLifecycle.Stop(stopCtx) - stopCancel(nil) // note: Stop implementation may choose to suppress a context error, // if it handles it well (e.g. stop idling after a halt). if stopErr != nil { diff --git a/op-service/cliapp/lifecycle_test.go b/op-service/cliapp/lifecycle_test.go index 4c421a35493e..d9ffd086ed73 100644 --- a/op-service/cliapp/lifecycle_test.go +++ b/op-service/cliapp/lifecycle_test.go @@ -9,9 +9,11 @@ import ( "github.com/stretchr/testify/require" "github.com/urfave/cli/v2" - "github.com/ethereum-optimism/optimism/op-service/opio" + "github.com/ethereum-optimism/optimism/op-service/ctxinterrupt" ) +var mockInterruptErr = errors.New("mock interrupt") + type fakeLifecycle struct { startCh, stopCh chan error stopped bool @@ -85,11 +87,14 @@ func TestLifecycleCmd(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Minute) // puppeteer system signal interrupts by hooking up the test signal channel as "blocker" for the app to use. - ctx = opio.WithBlocker(ctx, func(ctx context.Context) { + ctx = ctxinterrupt.WithWaiterFunc(ctx, func(ctx context.Context) (interrupt, ctxErr error) { select { case <-ctx.Done(): + ctxErr = context.Cause(ctx) case <-signalCh: + interrupt = mockInterruptErr } + return }) t.Cleanup(cancel) @@ -124,7 +129,7 @@ func TestLifecycleCmd(t *testing.T) { signalCh, _, _, _, resultCh, _ := appSetup(t) signalCh <- struct{}{} res := <-resultCh - require.ErrorIs(t, res, interruptErr) + require.ErrorIs(t, res, mockInterruptErr) require.ErrorContains(t, res, "failed to setup") }) t.Run("failed init", func(t *testing.T) { @@ -142,7 +147,7 @@ func TestLifecycleCmd(t *testing.T) { require.False(t, app.Stopped()) signalCh <- struct{}{} res := <-resultCh - require.ErrorIs(t, res, interruptErr) + require.ErrorIs(t, res, mockInterruptErr) require.ErrorContains(t, res, "failed to start") require.True(t, app.Stopped()) }) @@ -178,7 +183,7 @@ func TestLifecycleCmd(t *testing.T) { signalCh <- struct{}{} // start graceful shutdown signalCh <- struct{}{} // interrupt before the shutdown process is allowed to complete res := <-resultCh - require.ErrorIs(t, res, interruptErr) + require.ErrorIs(t, res, mockInterruptErr) require.ErrorContains(t, res, "failed to stop") require.True(t, app.Stopped()) // still fully closes, interrupts only accelerate shutdown where possible. }) diff --git a/op-service/ctxinterrupt/context.go b/op-service/ctxinterrupt/context.go new file mode 100644 index 000000000000..d7e7446d0dc3 --- /dev/null +++ b/op-service/ctxinterrupt/context.go @@ -0,0 +1,29 @@ +package ctxinterrupt + +import ( + "context" +) + +// Newtyping empty struct prevents collision with other empty struct keys in the Context. +type interruptWaiterContextKeyType struct{} + +var waiterContextKey = interruptWaiterContextKeyType{} + +// WithInterruptWaiter overrides the interrupt waiter value, e.g. to insert a function that mocks +// interrupt signals for testing CLI shutdown without actual process signals. +func WithWaiterFunc(ctx context.Context, fn WaiterFunc) context.Context { + return withInterruptWaiter(ctx, fn) +} + +func withInterruptWaiter(ctx context.Context, value waiter) context.Context { + return context.WithValue(ctx, waiterContextKey, value) +} + +// contextInterruptWaiter returns a interruptWaiter that blocks on interrupts when called. +func contextInterruptWaiter(ctx context.Context) waiter { + v := ctx.Value(waiterContextKey) + if v == nil { + return nil + } + return v.(waiter) +} diff --git a/op-service/ctxinterrupt/context_test.go b/op-service/ctxinterrupt/context_test.go new file mode 100644 index 000000000000..c853a3c5f7c6 --- /dev/null +++ b/op-service/ctxinterrupt/context_test.go @@ -0,0 +1,19 @@ +package ctxinterrupt + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestContextKeyIsUnique(t *testing.T) { + ass := require.New(t) + ctx := context.Background() + ass.Nil(ctx.Value(waiterContextKey)) + ctx = context.WithValue(ctx, waiterContextKey, 1) + ass.Equal(ctx.Value(waiterContextKey), 1) + ctx = context.WithValue(ctx, waiterContextKey, 2) + ass.Equal(ctx.Value(waiterContextKey), 2) + ass.Nil(ctx.Value(struct{}{})) +} diff --git a/op-service/ctxinterrupt/doc.go b/op-service/ctxinterrupt/doc.go new file mode 100644 index 000000000000..4846d544eba8 --- /dev/null +++ b/op-service/ctxinterrupt/doc.go @@ -0,0 +1,3 @@ +// Implements interrupts: events that normally signal intent to cancel a Context, but may be +// repeated to encourage closure of new Contexts used to clean up resources. +package ctxinterrupt diff --git a/op-service/ctxinterrupt/funcs.go b/op-service/ctxinterrupt/funcs.go new file mode 100644 index 000000000000..90ab19e19df1 --- /dev/null +++ b/op-service/ctxinterrupt/funcs.go @@ -0,0 +1,55 @@ +package ctxinterrupt + +import ( + "context" +) + +// Wait blocks until an interrupt is received, defaulting to interrupting on the default +// signals if no interrupt blocker is present in the Context. Returns nil if an interrupt occurs, +// else the Context error when it's done. +func Wait(ctx context.Context) error { + iw := contextInterruptWaiter(ctx) + if iw == nil { + catcher := newSignalWaiter() + defer catcher.Stop() + iw = catcher + } + return iw.waitForInterrupt(ctx).CtxError +} + +// WithSignalWaiter attaches an interrupt signal handler to the context which continues to receive +// signals after every wait, and also prevents the interrupt signals being handled before we're +// ready to wait for them. This helps functions wait on individual consecutive interrupts. +func WithSignalWaiter(ctx context.Context) (_ context.Context, stop func()) { + if ctx.Value(waiterContextKey) != nil { // already has an interrupt waiter + return ctx, func() {} + } + catcher := newSignalWaiter() + return withInterruptWaiter(ctx, catcher), catcher.Stop +} + +// WithSignalWaiterMain returns a Context with a signal interrupt blocker and leaks the destructor. Intended for use in +// main functions where we exit right after using the returned context anyway. +func WithSignalWaiterMain(ctx context.Context) context.Context { + ctx, _ = WithSignalWaiter(ctx) + return ctx +} + +// WithCancelOnInterrupt returns a Context that is cancelled when Wait returns on the waiter in ctx. +// If there's no waiter, the default interrupt signals are used: In this case the signal hooking is +// not stopped until the original ctx is cancelled. +func WithCancelOnInterrupt(ctx context.Context) context.Context { + interruptWaiter := contextInterruptWaiter(ctx) + ctx, cancel := context.WithCancelCause(ctx) + stop := func() {} + if interruptWaiter == nil { + catcher := newSignalWaiter() + stop = catcher.Stop + interruptWaiter = catcher + } + go func() { + defer stop() + cancel(interruptWaiter.waitForInterrupt(ctx).Cause()) + }() + return ctx +} diff --git a/op-service/ctxinterrupt/signal-waiter.go b/op-service/ctxinterrupt/signal-waiter.go new file mode 100644 index 000000000000..a37dde7a981b --- /dev/null +++ b/op-service/ctxinterrupt/signal-waiter.go @@ -0,0 +1,50 @@ +package ctxinterrupt + +import ( + "context" + "fmt" + "os" + "os/signal" + "syscall" +) + +// defaultSignals is a set of default interrupt signals. +var defaultSignals = []os.Signal{ + // Let's not catch SIGQUIT as it's expected to terminate with a stack trace in Go. os.Kill + // should not/cannot be caught on most systems. + os.Interrupt, + syscall.SIGTERM, +} + +type signalWaiter struct { + incoming chan os.Signal +} + +func newSignalWaiter() signalWaiter { + catcher := signalWaiter{ + // Buffer, in case we are slow to act on older signals, + // but still want to handle repeat-signals as special case (e.g. to force shutdown) + incoming: make(chan os.Signal, 10), + } + signal.Notify(catcher.incoming, defaultSignals...) + return catcher +} + +func (me signalWaiter) Stop() { + signal.Stop(me.incoming) +} + +// Block blocks until either an interrupt signal is received, or the context is cancelled. +// No error is returned on interrupt. +func (me signalWaiter) waitForInterrupt(ctx context.Context) waitResult { + select { + case signalValue, ok := <-me.incoming: + if !ok { + // Signal channels are not closed. + panic("signal channel closed") + } + return waitResult{Interrupt: fmt.Errorf("received interrupt signal %v", signalValue)} + case <-ctx.Done(): + return waitResult{CtxError: context.Cause(ctx)} + } +} diff --git a/op-service/ctxinterrupt/waiter.go b/op-service/ctxinterrupt/waiter.go new file mode 100644 index 000000000000..4e32a84b0a44 --- /dev/null +++ b/op-service/ctxinterrupt/waiter.go @@ -0,0 +1,38 @@ +package ctxinterrupt + +import ( + "context" + "fmt" +) + +// waiter describes a value that can wait for interrupts and context cancellation at the same time. +type waiter interface { + waitForInterrupt(ctx context.Context) waitResult +} + +// Waits for an interrupt or context cancellation. ctxErr should be the context.Cause of ctx when it +// is done. interrupt is only inspected if ctxErr is nil, and is not required to be set. +type WaiterFunc func(ctx context.Context) (interrupt, ctxErr error) + +func (me WaiterFunc) waitForInterrupt(ctx context.Context) (res waitResult) { + res.Interrupt, res.CtxError = me(ctx) + return +} + +// Either CtxError is not nil and is set to the context error cause, or the wait was interrupted. +type waitResult struct { + // Not required to be non-nil on an interrupt. + Interrupt error + // Maybe set this using context.Cause. + CtxError error +} + +func (me waitResult) Cause() error { + if me.CtxError != nil { + return me.CtxError + } + if me.Interrupt != nil { + return fmt.Errorf("interrupted: %w", me.Interrupt) + } + return nil +} diff --git a/op-service/opio/interrupts.go b/op-service/opio/interrupts.go deleted file mode 100644 index cd1b8485791d..000000000000 --- a/op-service/opio/interrupts.go +++ /dev/null @@ -1,114 +0,0 @@ -package opio - -import ( - "context" - "os" - "os/signal" - "syscall" -) - -// DefaultInterruptSignals is a set of default interrupt signals. -var DefaultInterruptSignals = []os.Signal{ - os.Interrupt, - os.Kill, - syscall.SIGTERM, - syscall.SIGQUIT, -} - -// BlockOnInterrupts blocks until a SIGTERM is received. -// Passing in signals will override the default signals. -func BlockOnInterrupts(signals ...os.Signal) { - if len(signals) == 0 { - signals = DefaultInterruptSignals - } - interruptChannel := make(chan os.Signal, 1) - signal.Notify(interruptChannel, signals...) - <-interruptChannel -} - -// BlockOnInterruptsContext blocks until a SIGTERM is received. -// Passing in signals will override the default signals. -// The function will stop blocking if the context is closed. -func BlockOnInterruptsContext(ctx context.Context, signals ...os.Signal) { - if len(signals) == 0 { - signals = DefaultInterruptSignals - } - interruptChannel := make(chan os.Signal, 1) - signal.Notify(interruptChannel, signals...) - select { - case <-interruptChannel: - case <-ctx.Done(): - signal.Stop(interruptChannel) - } -} - -type interruptContextKeyType struct{} - -var blockerContextKey = interruptContextKeyType{} - -type interruptCatcher struct { - incoming chan os.Signal -} - -// Block blocks until either an interrupt signal is received, or the context is cancelled. -// No error is returned on interrupt. -func (c *interruptCatcher) Block(ctx context.Context) { - select { - case <-c.incoming: - case <-ctx.Done(): - } -} - -// WithInterruptBlocker attaches an interrupt handler to the context, -// which continues to receive signals after every block. -// This helps functions block on individual consecutive interrupts. -func WithInterruptBlocker(ctx context.Context) context.Context { - if ctx.Value(blockerContextKey) != nil { // already has an interrupt handler - return ctx - } - catcher := &interruptCatcher{ - incoming: make(chan os.Signal, 10), - } - signal.Notify(catcher.incoming, DefaultInterruptSignals...) - - return context.WithValue(ctx, blockerContextKey, BlockFn(catcher.Block)) -} - -// WithBlocker overrides the interrupt blocker value, -// e.g. to insert a block-function for testing CLI shutdown without actual process signals. -func WithBlocker(ctx context.Context, fn BlockFn) context.Context { - return context.WithValue(ctx, blockerContextKey, fn) -} - -// BlockFn simply blocks until the implementation of the blocker interrupts it, or till the given context is cancelled. -type BlockFn func(ctx context.Context) - -// BlockerFromContext returns a BlockFn that blocks on interrupts when called. -func BlockerFromContext(ctx context.Context) BlockFn { - v := ctx.Value(blockerContextKey) - if v == nil { - return nil - } - return v.(BlockFn) -} - -// CancelOnInterrupt cancels the given context on interrupt. -// If a BlockFn is attached to the context, this is used as interrupt-blocking. -// If not, then the context blocks on a manually handled interrupt signal. -func CancelOnInterrupt(ctx context.Context) context.Context { - inner, cancel := context.WithCancel(ctx) - - blockOnInterrupt := BlockerFromContext(ctx) - if blockOnInterrupt == nil { - blockOnInterrupt = func(ctx context.Context) { - BlockOnInterruptsContext(ctx) // default signals - } - } - - go func() { - blockOnInterrupt(ctx) - cancel() - }() - - return inner -} diff --git a/op-service/util.go b/op-service/util.go index 0a51147fd7d8..e9dae2ec033f 100644 --- a/op-service/util.go +++ b/op-service/util.go @@ -5,13 +5,13 @@ import ( "errors" "fmt" "os" - "os/signal" "path/filepath" "reflect" "strings" - "syscall" "time" + "github.com/ethereum-optimism/optimism/op-service/ctxinterrupt" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" "github.com/urfave/cli/v2" @@ -86,37 +86,27 @@ func ParseAddress(address string) (common.Address, error) { return common.Address{}, fmt.Errorf("invalid address: %v", address) } -// CloseAction runs the function in the background, until it finishes or until it is closed by the user with an interrupt. -func CloseAction(fn func(ctx context.Context, shutdown <-chan struct{}) error) error { - stopped := make(chan error, 1) - shutdown := make(chan struct{}, 1) - - ctx, cancel := context.WithCancel(context.Background()) +// CloseAction runs the function in the background, until it finishes or until it is closed by the +// user with an interrupt. +func CloseAction(ctx context.Context, fn func(ctx context.Context) error) error { + ctx, stop := ctxinterrupt.WithSignalWaiter(ctx) + defer stop() + finished := make(chan error, 1) go func() { - stopped <- fn(ctx, shutdown) + finished <- fn(ctx) }() - doneCh := make(chan os.Signal, 1) - signal.Notify(doneCh, []os.Signal{ - os.Interrupt, - os.Kill, - syscall.SIGTERM, - syscall.SIGQUIT, - }...) - select { - case <-doneCh: - cancel() - shutdown <- struct{}{} - + case <-ctx.Done(): + // Stop catching interrupts. + stop() select { - case err := <-stopped: + case err := <-finished: return err case <-time.After(time.Second * 10): return errors.New("command action is unresponsive for more than 10 seconds... shutting down") } - case err := <-stopped: - cancel() + case err := <-finished: return err } } diff --git a/op-supervisor/cmd/main.go b/op-supervisor/cmd/main.go index 01444e01b925..8e306bf9009d 100644 --- a/op-supervisor/cmd/main.go +++ b/op-supervisor/cmd/main.go @@ -11,9 +11,9 @@ import ( opservice "github.com/ethereum-optimism/optimism/op-service" "github.com/ethereum-optimism/optimism/op-service/cliapp" + "github.com/ethereum-optimism/optimism/op-service/ctxinterrupt" oplog "github.com/ethereum-optimism/optimism/op-service/log" "github.com/ethereum-optimism/optimism/op-service/metrics/doc" - "github.com/ethereum-optimism/optimism/op-service/opio" "github.com/ethereum-optimism/optimism/op-supervisor/flags" "github.com/ethereum-optimism/optimism/op-supervisor/metrics" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor" @@ -26,7 +26,7 @@ var ( ) func main() { - ctx := opio.WithInterruptBlocker(context.Background()) + ctx := ctxinterrupt.WithSignalWaiterMain(context.Background()) err := run(ctx, os.Args, fromConfig) if err != nil { log.Crit("Application failed", "message", err) diff --git a/op-wheel/commands.go b/op-wheel/commands.go index 5a643b7ec12d..521578a34cb6 100644 --- a/op-wheel/commands.go +++ b/op-wheel/commands.go @@ -504,7 +504,7 @@ var ( metricsCfg := opmetrics.ReadCLIConfig(ctx) - return opservice.CloseAction(func(ctx context.Context, shutdown <-chan struct{}) error { + return opservice.CloseAction(ctx.Context, func(ctx context.Context) error { registry := opmetrics.NewRegistry() metrics := engine.NewMetrics("wheel", registry) if metricsCfg.Enabled { @@ -519,7 +519,7 @@ var ( } }() } - return engine.Auto(ctx, metrics, client, l, shutdown, settings) + return engine.Auto(ctx, metrics, client, l, settings) }) }), } diff --git a/op-wheel/engine/engine.go b/op-wheel/engine/engine.go index 8518a48031b0..8a00e212b3ca 100644 --- a/op-wheel/engine/engine.go +++ b/op-wheel/engine/engine.go @@ -189,7 +189,13 @@ func newPayloadAttributes(evp sources.EngineVersionProvider, timestamp uint64, p return pa } -func Auto(ctx context.Context, metrics Metricer, client *sources.EngineAPIClient, log log.Logger, shutdown <-chan struct{}, settings *BlockBuildingSettings) error { +func Auto( + ctx context.Context, + metrics Metricer, + client *sources.EngineAPIClient, + log log.Logger, + settings *BlockBuildingSettings, +) error { ticker := time.NewTicker(time.Millisecond * 100) defer ticker.Stop() @@ -197,9 +203,6 @@ func Auto(ctx context.Context, metrics Metricer, client *sources.EngineAPIClient var buildErr error for { select { - case <-shutdown: - log.Info("shutting down") - return nil case <-ctx.Done(): log.Info("context closed", "err", ctx.Err()) return ctx.Err() From f370113e8de436ad2af5dd093b45366bc8ab352f Mon Sep 17 00:00:00 2001 From: AgusDuha <81362284+agusduha@users.noreply.github.com> Date: Fri, 30 Aug 2024 17:35:38 -0300 Subject: [PATCH 09/19] test: fix superchain erc20 invariants (#11688) --------- Co-authored-by: 0xDiscotech <131301107+0xDiscotech@users.noreply.github.com> --- .../invariant-docs/OptimismSuperchainERC20.md | 4 ++-- .../test/invariants/OptimismSuperchainERC20.t.sol | 2 ++ 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/packages/contracts-bedrock/invariant-docs/OptimismSuperchainERC20.md b/packages/contracts-bedrock/invariant-docs/OptimismSuperchainERC20.md index 0e3150624da5..13d03f304d45 100644 --- a/packages/contracts-bedrock/invariant-docs/OptimismSuperchainERC20.md +++ b/packages/contracts-bedrock/invariant-docs/OptimismSuperchainERC20.md @@ -1,10 +1,10 @@ # `OptimismSuperchainERC20` Invariants ## Calls to sendERC20 should always succeed as long as the actor has enough balance. Actor's balance should also not increase out of nowhere but instead should decrease by the amount sent. -**Test:** [`OptimismSuperchainERC20.t.sol#L194`](../test/invariants/OptimismSuperchainERC20.t.sol#L194) +**Test:** [`OptimismSuperchainERC20.t.sol#L196`](../test/invariants/OptimismSuperchainERC20.t.sol#L196) ## Calls to relayERC20 should always succeeds when a message is received from another chain. Actor's balance should only increase by the amount relayed. -**Test:** [`OptimismSuperchainERC20.t.sol#L212`](../test/invariants/OptimismSuperchainERC20.t.sol#L212) +**Test:** [`OptimismSuperchainERC20.t.sol#L214`](../test/invariants/OptimismSuperchainERC20.t.sol#L214) diff --git a/packages/contracts-bedrock/test/invariants/OptimismSuperchainERC20.t.sol b/packages/contracts-bedrock/test/invariants/OptimismSuperchainERC20.t.sol index 028a0124e6ca..70081560c59f 100644 --- a/packages/contracts-bedrock/test/invariants/OptimismSuperchainERC20.t.sol +++ b/packages/contracts-bedrock/test/invariants/OptimismSuperchainERC20.t.sol @@ -62,6 +62,8 @@ contract OptimismSuperchainERC20_User is StdUtils { return; } + if (_chainId == block.chainid) return; + // Bound send amount to our ERC20 balance. _amount = bound(_amount, 0, superchainERC20.balanceOf(address(this))); From 814c9de7d8cda1b9eaf11ffbc1f6191090b63269 Mon Sep 17 00:00:00 2001 From: AgusDuha <81362284+agusduha@users.noreply.github.com> Date: Fri, 30 Aug 2024 18:34:40 -0300 Subject: [PATCH 10/19] feat: add createX preinstall (#29) (#11618) * feat: add createX preinstall * feat: change name from CreateXDeployer to CreateX --- packages/contracts-bedrock/scripts/L2Genesis.s.sol | 1 + packages/contracts-bedrock/src/libraries/Preinstalls.sol | 8 ++++++++ packages/contracts-bedrock/test/L2Genesis.t.sol | 2 +- packages/contracts-bedrock/test/Preinstalls.t.sol | 4 ++++ packages/contracts-bedrock/test/setup/Setup.sol | 1 + 5 files changed, 15 insertions(+), 1 deletion(-) diff --git a/packages/contracts-bedrock/scripts/L2Genesis.s.sol b/packages/contracts-bedrock/scripts/L2Genesis.s.sol index 7d4568653936..ba2c5e4bf4f4 100644 --- a/packages/contracts-bedrock/scripts/L2Genesis.s.sol +++ b/packages/contracts-bedrock/scripts/L2Genesis.s.sol @@ -525,6 +525,7 @@ contract L2Genesis is Deployer { _setPreinstallCode(Preinstalls.SenderCreator_v070); // ERC 4337 v0.7.0 _setPreinstallCode(Preinstalls.EntryPoint_v070); // ERC 4337 v0.7.0 _setPreinstallCode(Preinstalls.BeaconBlockRoots); + _setPreinstallCode(Preinstalls.CreateX); // 4788 sender nonce must be incremented, since it's part of later upgrade-transactions. // For the upgrade-tx to not create a contract that conflicts with an already-existing copy, // the nonce must be bumped. diff --git a/packages/contracts-bedrock/src/libraries/Preinstalls.sol b/packages/contracts-bedrock/src/libraries/Preinstalls.sol index 327b34c0abf4..db886f57f10a 100644 --- a/packages/contracts-bedrock/src/libraries/Preinstalls.sol +++ b/packages/contracts-bedrock/src/libraries/Preinstalls.sol @@ -44,6 +44,9 @@ library Preinstalls { /// @notice Address of the EntryPoint_v070 predeploy. address internal constant EntryPoint_v070 = 0x0000000071727De22E5E9d8BAf0edAc6f37da032; + /// @notice Address of the CreateX predeploy. + address internal constant CreateX = 0xba5Ed099633D3B313e4D5F7bdc1305d3c28ba5Ed; + /// @notice Address of beacon block roots contract, introduced in the Cancun upgrade. /// See BEACON_ROOTS_ADDRESS in EIP-4788. /// This contract is introduced in L2 through an Ecotone upgrade transaction, if not already in genesis. @@ -99,6 +102,9 @@ library Preinstalls { bytes internal constant EntryPoint_v070Code = hex"60806040526004361015610024575b361561001957600080fd5b61002233612748565b005b60003560e01c806242dc5314611b0057806301ffc9a7146119ae5780630396cb60146116765780630bd28e3b146115fa5780631b2e01b814611566578063205c2878146113d157806322cdde4c1461136b57806335567e1a146112b35780635287ce12146111a557806370a0823114611140578063765e827f14610e82578063850aaf6214610dc35780639b249f6914610c74578063b760faf914610c3a578063bb9fe6bf14610a68578063c23a5cea146107c4578063dbed18e0146101a15763fc7e286d0361000e573461019c5760207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc36011261019c5773ffffffffffffffffffffffffffffffffffffffff61013a61229f565b16600052600060205260a0604060002065ffffffffffff6001825492015460405192835260ff8116151560208401526dffffffffffffffffffffffffffff8160081c16604084015263ffffffff8160781c16606084015260981c166080820152f35b600080fd5b3461019c576101af36612317565b906101b86129bd565b60009160005b82811061056f57506101d08493612588565b6000805b8481106102fc5750507fbb47ee3e183a558b1a2ff0874b079f3fc5478b7454eacf2bfc5af2ff5878f972600080a16000809360005b81811061024757610240868660007f575ff3acadd5ab348fe1855e217e0f3678f8d767d7494c9f9fefbee2e17cca4d8180a2613ba7565b6001600255005b6102a261025582848a612796565b73ffffffffffffffffffffffffffffffffffffffff6102766020830161282a565b167f575ff3acadd5ab348fe1855e217e0f3678f8d767d7494c9f9fefbee2e17cca4d600080a2806127d6565b906000915b8083106102b957505050600101610209565b909194976102f36102ed6001926102e78c8b6102e0826102da8e8b8d61269d565b9261265a565b5191613597565b90612409565b99612416565b950191906102a7565b6020610309828789612796565b61031f61031682806127d6565b9390920161282a565b9160009273ffffffffffffffffffffffffffffffffffffffff8091165b8285106103505750505050506001016101d4565b909192939561037f83610378610366848c61265a565b516103728b898b61269d565b856129f6565b9290613dd7565b9116840361050a576104a5576103958491613dd7565b9116610440576103b5576103aa600191612416565b96019392919061033c565b60a487604051907f220266b6000000000000000000000000000000000000000000000000000000008252600482015260406024820152602160448201527f41413332207061796d61737465722065787069726564206f72206e6f7420647560648201527f65000000000000000000000000000000000000000000000000000000000000006084820152fd5b608488604051907f220266b6000000000000000000000000000000000000000000000000000000008252600482015260406024820152601460448201527f41413334207369676e6174757265206572726f720000000000000000000000006064820152fd5b608488604051907f220266b6000000000000000000000000000000000000000000000000000000008252600482015260406024820152601760448201527f414132322065787069726564206f72206e6f74206475650000000000000000006064820152fd5b608489604051907f220266b6000000000000000000000000000000000000000000000000000000008252600482015260406024820152601460448201527f41413234207369676e6174757265206572726f720000000000000000000000006064820152fd5b61057a818487612796565b9361058585806127d6565b919095602073ffffffffffffffffffffffffffffffffffffffff6105aa82840161282a565b1697600192838a1461076657896105da575b5050505060019293949550906105d191612409565b939291016101be565b8060406105e892019061284b565b918a3b1561019c57929391906040519485937f2dd8113300000000000000000000000000000000000000000000000000000000855288604486016040600488015252606490818601918a60051b8701019680936000915b8c83106106e657505050505050838392610684927ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc8560009803016024860152612709565b03818a5afa90816106d7575b506106c657602486604051907f86a9f7500000000000000000000000000000000000000000000000000000000082526004820152fd5b93945084936105d1600189806105bc565b6106e0906121bd565b88610690565b91939596977fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff9c908a9294969a0301865288357ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffee18336030181121561019c57836107538793858394016128ec565b9a0196019301909189979695949261063f565b606483604051907f08c379a00000000000000000000000000000000000000000000000000000000082526004820152601760248201527f4141393620696e76616c69642061676772656761746f720000000000000000006044820152fd5b3461019c576020807ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc36011261019c576107fc61229f565b33600052600082526001604060002001908154916dffffffffffffffffffffffffffff8360081c16928315610a0a5765ffffffffffff8160981c1680156109ac57421061094e5760009373ffffffffffffffffffffffffffffffffffffffff859485947fffffffffffffff000000000000000000000000000000000000000000000000ff86951690556040517fb7c918e0e249f999e965cafeb6c664271b3f4317d296461500e71da39f0cbda33391806108da8786836020909392919373ffffffffffffffffffffffffffffffffffffffff60408201951681520152565b0390a2165af16108e8612450565b50156108f057005b606490604051907f08c379a00000000000000000000000000000000000000000000000000000000082526004820152601860248201527f6661696c656420746f207769746864726177207374616b6500000000000000006044820152fd5b606485604051907f08c379a00000000000000000000000000000000000000000000000000000000082526004820152601b60248201527f5374616b65207769746864726177616c206973206e6f742064756500000000006044820152fd5b606486604051907f08c379a00000000000000000000000000000000000000000000000000000000082526004820152601d60248201527f6d7573742063616c6c20756e6c6f636b5374616b6528292066697273740000006044820152fd5b606485604051907f08c379a00000000000000000000000000000000000000000000000000000000082526004820152601460248201527f4e6f207374616b6520746f2077697468647261770000000000000000000000006044820152fd5b3461019c5760007ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc36011261019c573360005260006020526001604060002001805463ffffffff8160781c16908115610bdc5760ff1615610b7e5765ffffffffffff908142160191818311610b4f5780547fffffffffffffff000000000000ffffffffffffffffffffffffffffffffffff001678ffffffffffff00000000000000000000000000000000000000609885901b161790556040519116815233907ffa9b3c14cc825c412c9ed81b3ba365a5b459439403f18829e572ed53a4180f0a90602090a2005b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b60646040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601160248201527f616c726561647920756e7374616b696e670000000000000000000000000000006044820152fd5b60646040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152600a60248201527f6e6f74207374616b6564000000000000000000000000000000000000000000006044820152fd5b60207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc36011261019c57610022610c6f61229f565b612748565b3461019c5760207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc36011261019c5760043567ffffffffffffffff811161019c576020610cc8610d1b9236906004016122c2565b919073ffffffffffffffffffffffffffffffffffffffff9260405194859283927f570e1a360000000000000000000000000000000000000000000000000000000084528560048501526024840191612709565b03816000857f000000000000000000000000efc2c1444ebcc4db75e7613d20c6a62ff67a167c165af1908115610db757602492600092610d86575b50604051917f6ca7b806000000000000000000000000000000000000000000000000000000008352166004820152fd5b610da991925060203d602011610db0575b610da181836121ed565b8101906126dd565b9083610d56565b503d610d97565b6040513d6000823e3d90fd5b3461019c5760407ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc36011261019c57610dfa61229f565b60243567ffffffffffffffff811161019c57600091610e1e839236906004016122c2565b90816040519283928337810184815203915af4610e39612450565b90610e7e6040519283927f99410554000000000000000000000000000000000000000000000000000000008452151560048401526040602484015260448301906123c6565b0390fd5b3461019c57610e9036612317565b610e9b9291926129bd565b610ea483612588565b60005b848110610f1c57506000927fbb47ee3e183a558b1a2ff0874b079f3fc5478b7454eacf2bfc5af2ff5878f972600080a16000915b858310610eec576102408585613ba7565b909193600190610f12610f0087898761269d565b610f0a888661265a565b519088613597565b0194019190610edb565b610f47610f40610f2e8385979561265a565b51610f3a84898761269d565b846129f6565b9190613dd7565b73ffffffffffffffffffffffffffffffffffffffff929183166110db5761107657610f7190613dd7565b911661101157610f8657600101929092610ea7565b60a490604051907f220266b6000000000000000000000000000000000000000000000000000000008252600482015260406024820152602160448201527f41413332207061796d61737465722065787069726564206f72206e6f7420647560648201527f65000000000000000000000000000000000000000000000000000000000000006084820152fd5b608482604051907f220266b6000000000000000000000000000000000000000000000000000000008252600482015260406024820152601460448201527f41413334207369676e6174757265206572726f720000000000000000000000006064820152fd5b608483604051907f220266b6000000000000000000000000000000000000000000000000000000008252600482015260406024820152601760448201527f414132322065787069726564206f72206e6f74206475650000000000000000006064820152fd5b608484604051907f220266b6000000000000000000000000000000000000000000000000000000008252600482015260406024820152601460448201527f41413234207369676e6174757265206572726f720000000000000000000000006064820152fd5b3461019c5760207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc36011261019c5773ffffffffffffffffffffffffffffffffffffffff61118c61229f565b1660005260006020526020604060002054604051908152f35b3461019c5760207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc36011261019c5773ffffffffffffffffffffffffffffffffffffffff6111f161229f565b6000608060405161120181612155565b828152826020820152826040820152826060820152015216600052600060205260a06040600020608060405161123681612155565b6001835493848352015490602081019060ff8316151582526dffffffffffffffffffffffffffff60408201818560081c16815263ffffffff936060840193858760781c16855265ffffffffffff978891019660981c1686526040519788525115156020880152511660408601525116606084015251166080820152f35b3461019c5760407ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc36011261019c5760206112ec61229f565b73ffffffffffffffffffffffffffffffffffffffff6113096122f0565b911660005260018252604060002077ffffffffffffffffffffffffffffffffffffffffffffffff821660005282526040600020547fffffffffffffffffffffffffffffffffffffffffffffffff00000000000000006040519260401b16178152f35b3461019c577ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc60208136011261019c576004359067ffffffffffffffff821161019c5761012090823603011261019c576113c9602091600401612480565b604051908152f35b3461019c5760407ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc36011261019c5761140861229f565b60243590336000526000602052604060002090815491828411611508576000808573ffffffffffffffffffffffffffffffffffffffff8295839561144c848a612443565b90556040805173ffffffffffffffffffffffffffffffffffffffff831681526020810185905233917fd1c19fbcd4551a5edfb66d43d2e337c04837afda3482b42bdf569a8fccdae5fb91a2165af16114a2612450565b50156114aa57005b60646040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601260248201527f6661696c656420746f20776974686472617700000000000000000000000000006044820152fd5b60646040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601960248201527f576974686472617720616d6f756e7420746f6f206c61726765000000000000006044820152fd5b3461019c5760407ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc36011261019c5761159d61229f565b73ffffffffffffffffffffffffffffffffffffffff6115ba6122f0565b9116600052600160205277ffffffffffffffffffffffffffffffffffffffffffffffff604060002091166000526020526020604060002054604051908152f35b3461019c5760207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc36011261019c5760043577ffffffffffffffffffffffffffffffffffffffffffffffff811680910361019c5733600052600160205260406000209060005260205260406000206116728154612416565b9055005b6020807ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc36011261019c5760043563ffffffff9182821680920361019c5733600052600081526040600020928215611950576001840154908160781c1683106118f2576116f86dffffffffffffffffffffffffffff9182349160081c16612409565b93841561189457818511611836579065ffffffffffff61180592546040519061172082612155565b8152848101926001845260408201908816815260608201878152600160808401936000855233600052600089526040600020905181550194511515917fffffffffffffffffffffffffff0000000000000000000000000000000000000060ff72ffffffff0000000000000000000000000000006effffffffffffffffffffffffffff008954945160081b16945160781b1694169116171717835551167fffffffffffffff000000000000ffffffffffffffffffffffffffffffffffffff78ffffffffffff0000000000000000000000000000000000000083549260981b169116179055565b6040519283528201527fa5ae833d0bb1dcd632d98a8b70973e8516812898e19bf27b70071ebc8dc52c0160403392a2005b606483604051907f08c379a00000000000000000000000000000000000000000000000000000000082526004820152600e60248201527f7374616b65206f766572666c6f770000000000000000000000000000000000006044820152fd5b606483604051907f08c379a00000000000000000000000000000000000000000000000000000000082526004820152601260248201527f6e6f207374616b652073706563696669656400000000000000000000000000006044820152fd5b606482604051907f08c379a00000000000000000000000000000000000000000000000000000000082526004820152601c60248201527f63616e6e6f7420646563726561736520756e7374616b652074696d65000000006044820152fd5b606482604051907f08c379a00000000000000000000000000000000000000000000000000000000082526004820152601a60248201527f6d757374207370656369667920756e7374616b652064656c61790000000000006044820152fd5b3461019c5760207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc36011261019c576004357fffffffff00000000000000000000000000000000000000000000000000000000811680910361019c57807f60fc6b6e0000000000000000000000000000000000000000000000000000000060209214908115611ad6575b8115611aac575b8115611a82575b8115611a58575b506040519015158152f35b7f01ffc9a70000000000000000000000000000000000000000000000000000000091501482611a4d565b7f3e84f0210000000000000000000000000000000000000000000000000000000081149150611a46565b7fcf28ef970000000000000000000000000000000000000000000000000000000081149150611a3f565b7f915074d80000000000000000000000000000000000000000000000000000000081149150611a38565b3461019c576102007ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc36011261019c5767ffffffffffffffff60043581811161019c573660238201121561019c57611b62903690602481600401359101612268565b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffdc36016101c0811261019c5761014060405191611b9e83612155565b1261019c5760405192611bb0846121a0565b60243573ffffffffffffffffffffffffffffffffffffffff8116810361019c578452602093604435858201526064356040820152608435606082015260a435608082015260c43560a082015260e43560c08201526101043573ffffffffffffffffffffffffffffffffffffffff8116810361019c5760e08201526101243561010082015261014435610120820152825261016435848301526101843560408301526101a43560608301526101c43560808301526101e43590811161019c57611c7c9036906004016122c2565b905a3033036120f7578351606081015195603f5a0260061c61271060a0840151890101116120ce5760009681519182611ff0575b5050505090611cca915a9003608085015101923691612268565b925a90600094845193611cdc85613ccc565b9173ffffffffffffffffffffffffffffffffffffffff60e0870151168015600014611ea957505073ffffffffffffffffffffffffffffffffffffffff855116935b5a9003019360a06060820151910151016080860151850390818111611e95575b50508302604085015192818410600014611dce5750506003811015611da157600203611d79576113c99293508093611d7481613d65565b613cf6565b5050507fdeadaa51000000000000000000000000000000000000000000000000000000008152fd5b6024857f4e487b710000000000000000000000000000000000000000000000000000000081526021600452fd5b81611dde92979396940390613c98565b506003841015611e6857507f49628fd1471006c1482da88028e9ce4dbb080b815c9b0344d39e5a8e6ec1419f60808683015192519473ffffffffffffffffffffffffffffffffffffffff865116948873ffffffffffffffffffffffffffffffffffffffff60e0890151169701519160405192835215898301528760408301526060820152a46113c9565b807f4e487b7100000000000000000000000000000000000000000000000000000000602492526021600452fd5b6064919003600a0204909301928780611d3d565b8095918051611eba575b5050611d1d565b6003861015611fc1576002860315611eb35760a088015190823b1561019c57600091611f2491836040519586809581947f7c627b210000000000000000000000000000000000000000000000000000000083528d60048401526080602484015260848301906123c6565b8b8b0260448301528b60648301520393f19081611fad575b50611fa65787893d610800808211611f9e575b506040519282828501016040528184528284013e610e7e6040519283927fad7954bc000000000000000000000000000000000000000000000000000000008452600484015260248301906123c6565b905083611f4f565b8980611eb3565b611fb89199506121bd565b6000978a611f3c565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052602160045260246000fd5b91600092918380938c73ffffffffffffffffffffffffffffffffffffffff885116910192f115612023575b808080611cb0565b611cca929195503d6108008082116120c6575b5060405190888183010160405280825260008983013e805161205f575b5050600194909161201b565b7f1c4fada7374c0a9ee8841fc38afe82932dc0f8e69012e927f061a8bae611a20188870151918973ffffffffffffffffffffffffffffffffffffffff8551169401516120bc604051928392835260408d84015260408301906123c6565b0390a38680612053565b905088612036565b877fdeaddead000000000000000000000000000000000000000000000000000000006000526000fd5b606486604051907f08c379a00000000000000000000000000000000000000000000000000000000082526004820152601760248201527f4141393220696e7465726e616c2063616c6c206f6e6c790000000000000000006044820152fd5b60a0810190811067ffffffffffffffff82111761217157604052565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b610140810190811067ffffffffffffffff82111761217157604052565b67ffffffffffffffff811161217157604052565b6060810190811067ffffffffffffffff82111761217157604052565b90601f7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0910116810190811067ffffffffffffffff82111761217157604052565b67ffffffffffffffff811161217157601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe01660200190565b9291926122748261222e565b9161228260405193846121ed565b82948184528183011161019c578281602093846000960137010152565b6004359073ffffffffffffffffffffffffffffffffffffffff8216820361019c57565b9181601f8401121561019c5782359167ffffffffffffffff831161019c576020838186019501011161019c57565b6024359077ffffffffffffffffffffffffffffffffffffffffffffffff8216820361019c57565b9060407ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc83011261019c5760043567ffffffffffffffff9283821161019c578060238301121561019c57816004013593841161019c5760248460051b8301011161019c57602401919060243573ffffffffffffffffffffffffffffffffffffffff8116810361019c5790565b60005b8381106123b65750506000910152565b81810151838201526020016123a6565b907fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f602093612402815180928187528780880191016123a3565b0116010190565b91908201809211610b4f57565b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8114610b4f5760010190565b91908203918211610b4f57565b3d1561247b573d906124618261222e565b9161246f60405193846121ed565b82523d6000602084013e565b606090565b604061248e8183018361284b565b90818351918237206124a3606084018461284b565b90818451918237209260c06124bb60e083018361284b565b908186519182372091845195602087019473ffffffffffffffffffffffffffffffffffffffff833516865260208301358789015260608801526080870152608081013560a087015260a081013582870152013560e08501526101009081850152835261012083019167ffffffffffffffff918484108385111761217157838252845190206101408501908152306101608601524661018086015260608452936101a00191821183831017612171575251902090565b67ffffffffffffffff81116121715760051b60200190565b9061259282612570565b6040906125a260405191826121ed565b8381527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe06125d08295612570565b019160005b8381106125e25750505050565b60209082516125f081612155565b83516125fb816121a0565b600081526000849181838201528187820152816060818184015260809282848201528260a08201528260c08201528260e082015282610100820152826101208201528652818587015281898701528501528301528286010152016125d5565b805182101561266e5760209160051b010190565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b919081101561266e5760051b810135907ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffee18136030182121561019c570190565b9081602091031261019c575173ffffffffffffffffffffffffffffffffffffffff8116810361019c5790565b601f82602094937fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0938186528686013760008582860101520116010190565b7f2da466a7b24304f47e87fa2e1e5a81b9831ce54fec19055ce277ca2f39ba42c4602073ffffffffffffffffffffffffffffffffffffffff61278a3485613c98565b936040519485521692a2565b919081101561266e5760051b810135907fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffa18136030182121561019c570190565b9035907fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe18136030182121561019c570180359067ffffffffffffffff821161019c57602001918160051b3603831361019c57565b3573ffffffffffffffffffffffffffffffffffffffff8116810361019c5790565b9035907fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe18136030182121561019c570180359067ffffffffffffffff821161019c5760200191813603831361019c57565b90357fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe18236030181121561019c57016020813591019167ffffffffffffffff821161019c57813603831361019c57565b61012091813573ffffffffffffffffffffffffffffffffffffffff811680910361019c576129626129476129ba9561299b93855260208601356020860152612937604087018761289c565b9091806040880152860191612709565b612954606086018661289c565b908583036060870152612709565b6080840135608084015260a084013560a084015260c084013560c084015261298d60e085018561289c565b9084830360e0860152612709565b916129ac610100918281019061289c565b929091818503910152612709565b90565b60028054146129cc5760028055565b60046040517f3ee5aeb5000000000000000000000000000000000000000000000000000000008152fd5b926000905a93805194843573ffffffffffffffffffffffffffffffffffffffff811680910361019c5786526020850135602087015260808501356fffffffffffffffffffffffffffffffff90818116606089015260801c604088015260a086013560c088015260c086013590811661010088015260801c610120870152612a8060e086018661284b565b801561357b576034811061351d578060141161019c578060241161019c5760341161019c57602481013560801c60a0880152601481013560801c60808801523560601c60e08701525b612ad285612480565b60208301526040860151946effffffffffffffffffffffffffffff8660c08901511760608901511760808901511760a0890151176101008901511761012089015117116134bf57604087015160608801510160808801510160a08801510160c0880151016101008801510296835173ffffffffffffffffffffffffffffffffffffffff81511690612b66604085018561284b565b806131e4575b505060e0015173ffffffffffffffffffffffffffffffffffffffff1690600082156131ac575b6020612bd7918b828a01516000868a604051978896879586937f19822f7c00000000000000000000000000000000000000000000000000000000855260048501613db5565b0393f160009181613178575b50612c8b573d8c610800808311612c83575b50604051916020818401016040528083526000602084013e610e7e6040519283927f65c8fd4d000000000000000000000000000000000000000000000000000000008452600484015260606024840152600d60648401527f4141323320726576657274656400000000000000000000000000000000000000608484015260a0604484015260a48301906123c6565b915082612bf5565b9a92939495969798999a91156130f2575b509773ffffffffffffffffffffffffffffffffffffffff835116602084015190600052600160205260406000208160401c60005260205267ffffffffffffffff604060002091825492612cee84612416565b9055160361308d575a8503116130285773ffffffffffffffffffffffffffffffffffffffff60e0606093015116612d42575b509060a09184959697986040608096015260608601520135905a900301910152565b969550505a9683519773ffffffffffffffffffffffffffffffffffffffff60e08a01511680600052600060205260406000208054848110612fc3576080612dcd9a9b9c600093878094039055015192602089015183604051809d819582947f52b7512c0000000000000000000000000000000000000000000000000000000084528c60048501613db5565b039286f1978860009160009a612f36575b50612e86573d8b610800808311612e7e575b50604051916020818401016040528083526000602084013e610e7e6040519283927f65c8fd4d000000000000000000000000000000000000000000000000000000008452600484015260606024840152600d60648401527f4141333320726576657274656400000000000000000000000000000000000000608484015260a0604484015260a48301906123c6565b915082612df0565b9991929394959697989998925a900311612eab57509096959094939291906080612d20565b60a490604051907f220266b6000000000000000000000000000000000000000000000000000000008252600482015260406024820152602760448201527f41413336206f766572207061796d6173746572566572696669636174696f6e4760648201527f61734c696d6974000000000000000000000000000000000000000000000000006084820152fd5b915098503d90816000823e612f4b82826121ed565b604081838101031261019c5780519067ffffffffffffffff821161019c57828101601f83830101121561019c578181015191612f868361222e565b93612f9460405195866121ed565b838552820160208483850101011161019c57602092612fba9184808701918501016123a3565b01519838612dde565b60848b604051907f220266b6000000000000000000000000000000000000000000000000000000008252600482015260406024820152601e60448201527f41413331207061796d6173746572206465706f73697420746f6f206c6f7700006064820152fd5b608490604051907f220266b6000000000000000000000000000000000000000000000000000000008252600482015260406024820152601e60448201527f41413236206f76657220766572696669636174696f6e4761734c696d697400006064820152fd5b608482604051907f220266b6000000000000000000000000000000000000000000000000000000008252600482015260406024820152601a60448201527f4141323520696e76616c6964206163636f756e74206e6f6e63650000000000006064820152fd5b600052600060205260406000208054808c11613113578b9003905538612c9c565b608484604051907f220266b6000000000000000000000000000000000000000000000000000000008252600482015260406024820152601760448201527f41413231206469646e2774207061792070726566756e640000000000000000006064820152fd5b9091506020813d6020116131a4575b81613194602093836121ed565b8101031261019c57519038612be3565b3d9150613187565b508060005260006020526040600020548a81116000146131d75750612bd7602060005b915050612b92565b6020612bd7918c036131cf565b833b61345a57604088510151602060405180927f570e1a360000000000000000000000000000000000000000000000000000000082528260048301528160008161323260248201898b612709565b039273ffffffffffffffffffffffffffffffffffffffff7f000000000000000000000000efc2c1444ebcc4db75e7613d20c6a62ff67a167c1690f1908115610db75760009161343b575b5073ffffffffffffffffffffffffffffffffffffffff811680156133d6578503613371573b1561330c5760141161019c5773ffffffffffffffffffffffffffffffffffffffff9183887fd51a9c61267aa6196961883ecf5ff2da6619c37dac0fa92122513fb32c032d2d604060e0958787602086015195510151168251913560601c82526020820152a391612b6c565b60848d604051907f220266b6000000000000000000000000000000000000000000000000000000008252600482015260406024820152602060448201527f4141313520696e6974436f6465206d757374206372656174652073656e6465726064820152fd5b60848e604051907f220266b6000000000000000000000000000000000000000000000000000000008252600482015260406024820152602060448201527f4141313420696e6974436f6465206d7573742072657475726e2073656e6465726064820152fd5b60848f604051907f220266b6000000000000000000000000000000000000000000000000000000008252600482015260406024820152601b60448201527f4141313320696e6974436f6465206661696c6564206f72204f4f4700000000006064820152fd5b613454915060203d602011610db057610da181836121ed565b3861327c565b60848d604051907f220266b6000000000000000000000000000000000000000000000000000000008252600482015260406024820152601f60448201527f414131302073656e64657220616c726561647920636f6e7374727563746564006064820152fd5b60646040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601860248201527f41413934206761732076616c756573206f766572666c6f7700000000000000006044820152fd5b60646040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601d60248201527f4141393320696e76616c6964207061796d6173746572416e64446174610000006044820152fd5b5050600060e087015260006080870152600060a0870152612ac9565b9092915a906060810151916040928351967fffffffff00000000000000000000000000000000000000000000000000000000886135d7606084018461284b565b600060038211613b9f575b7f8dd7712f0000000000000000000000000000000000000000000000000000000094168403613a445750505061379d6000926136b292602088015161363a8a5193849360208501528b602485015260648401906128ec565b90604483015203906136727fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0928381018352826121ed565b61379189519485927e42dc5300000000000000000000000000000000000000000000000000000000602085015261020060248501526102248401906123c6565b613760604484018b60806101a091805173ffffffffffffffffffffffffffffffffffffffff808251168652602082015160208701526040820151604087015260608201516060870152838201518487015260a082015160a087015260c082015160c087015260e08201511660e0860152610100808201519086015261012080910151908501526020810151610140850152604081015161016085015260608101516101808501520151910152565b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffdc83820301610204840152876123c6565b039081018352826121ed565b6020918183809351910182305af1600051988652156137bf575b505050505050565b909192939495965060003d8214613a3a575b7fdeaddead00000000000000000000000000000000000000000000000000000000810361385b57608487878051917f220266b600000000000000000000000000000000000000000000000000000000835260048301526024820152600f60448201527f41413935206f7574206f662067617300000000000000000000000000000000006064820152fd5b7fdeadaa510000000000000000000000000000000000000000000000000000000091929395949650146000146138c55750506138a961389e6138b8935a90612443565b608085015190612409565b9083015183611d748295613d65565b905b3880808080806137b7565b909261395290828601518651907ff62676f440ff169a3a9afdbf812e89e7f95975ee8e5c31214ffdef631c5f479273ffffffffffffffffffffffffffffffffffffffff9580878551169401516139483d610800808211613a32575b508a519084818301018c5280825260008583013e8a805194859485528401528a8301906123c6565b0390a35a90612443565b916139636080860193845190612409565b926000905a94829488519761397789613ccc565b948260e08b0151168015600014613a1857505050875116955b5a9003019560a06060820151910151019051860390818111613a04575b5050840290850151928184106000146139de57505080611e68575090816139d89293611d7481613d65565b906138ba565b6139ee9082849397950390613c98565b50611e68575090826139ff92613cf6565b6139d8565b6064919003600a02049094019338806139ad565b90919892509751613a2a575b50613990565b955038613a24565b905038613920565b8181803e516137d1565b613b97945082935090613a8c917e42dc53000000000000000000000000000000000000000000000000000000006020613b6b9501526102006024860152610224850191612709565b613b3a604484018860806101a091805173ffffffffffffffffffffffffffffffffffffffff808251168652602082015160208701526040820151604087015260608201516060870152838201518487015260a082015160a087015260c082015160c087015260e08201511660e0860152610100808201519086015261012080910151908501526020810151610140850152604081015161016085015260608101516101808501520151910152565b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffdc83820301610204840152846123c6565b037fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe081018952886121ed565b60008761379d565b5081356135e2565b73ffffffffffffffffffffffffffffffffffffffff168015613c3a57600080809381935af1613bd4612450565b5015613bdc57565b60646040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601f60248201527f41413931206661696c65642073656e6420746f2062656e6566696369617279006044820152fd5b60646040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601860248201527f4141393020696e76616c69642062656e656669636961727900000000000000006044820152fd5b73ffffffffffffffffffffffffffffffffffffffff166000526000602052613cc66040600020918254612409565b80915590565b610120610100820151910151808214613cf257480180821015613ced575090565b905090565b5090565b9190917f49628fd1471006c1482da88028e9ce4dbb080b815c9b0344d39e5a8e6ec1419f6080602083015192519473ffffffffffffffffffffffffffffffffffffffff946020868851169660e089015116970151916040519283526000602084015260408301526060820152a4565b60208101519051907f67b4fa9642f42120bf031f3051d1824b0fe25627945b27b8a6a65d5761d5482e60208073ffffffffffffffffffffffffffffffffffffffff855116940151604051908152a3565b613dcd604092959493956060835260608301906128ec565b9460208201520152565b8015613e6457600060408051613dec816121d1565b828152826020820152015273ffffffffffffffffffffffffffffffffffffffff811690604065ffffffffffff91828160a01c16908115613e5c575b60d01c92825191613e37836121d1565b8583528460208401521691829101524211908115613e5457509091565b905042109091565b839150613e27565b5060009060009056fea2646970667358221220b094fd69f04977ae9458e5ba422d01cd2d20dbcfca0992ff37f19aa07deec25464736f6c63430008170033"; + bytes internal constant CreateXCode = + hex"60806040526004361061018a5760003560e01c806381503da1116100d6578063d323826a1161007f578063e96deee411610059578063e96deee414610395578063f5745aba146103a8578063f9664498146103bb57600080fd5b8063d323826a1461034f578063ddda0acb1461036f578063e437252a1461038257600080fd5b80639c36a286116100b05780639c36a28614610316578063a7db93f214610329578063c3fe107b1461033c57600080fd5b806381503da1146102d0578063890c283b146102e357806398e810771461030357600080fd5b80632f990e3f116101385780636cec2536116101125780636cec25361461027d57806374637a7a1461029d5780637f565360146102bd57600080fd5b80632f990e3f1461023757806331a7c8c81461024a57806342d654fc1461025d57600080fd5b806327fe18221161016957806327fe1822146101f15780632852527a1461020457806328ddd0461461021757600080fd5b8062d84acb1461018f57806326307668146101cb57806326a32fc7146101de575b600080fd5b6101a261019d366004612915565b6103ce565b60405173ffffffffffffffffffffffffffffffffffffffff909116815260200160405180910390f35b6101a26101d9366004612994565b6103e6565b6101a26101ec3660046129db565b610452565b6101a26101ff3660046129db565b6104de565b6101a2610212366004612a39565b610539565b34801561022357600080fd5b506101a2610232366004612a90565b6106fe565b6101a2610245366004612aa9565b61072a565b6101a2610258366004612aa9565b6107bb565b34801561026957600080fd5b506101a2610278366004612b1e565b6107c9565b34801561028957600080fd5b506101a2610298366004612a90565b610823565b3480156102a957600080fd5b506101a26102b8366004612b4a565b61084f565b6101a26102cb3660046129db565b611162565b6101a26102de366004612b74565b6111e8565b3480156102ef57600080fd5b506101a26102fe366004612bac565b611276565b6101a2610311366004612bce565b6112a3565b6101a2610324366004612994565b611505565b6101a2610337366004612c49565b6116f1565b6101a261034a366004612aa9565b611964565b34801561035b57600080fd5b506101a261036a366004612cd9565b6119ed565b6101a261037d366004612c49565b611a17565b6101a2610390366004612bce565b611e0c565b6101a26103a3366004612915565b611e95565b6101a26103b6366004612bce565b611ea4565b6101a26103c9366004612b74565b611f2d565b60006103dd8585858533611a17565b95945050505050565b6000806103f2846120db565b90508083516020850134f59150610408826123d3565b604051819073ffffffffffffffffffffffffffffffffffffffff8416907fb8fda7e00c6b06a2b54e58521bc5894fee35f1090e5a3bb6390bfe2b98b497f790600090a35092915050565b60006104d86104d260408051437fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08101406020830152419282019290925260608101919091524260808201524460a08201524660c08201523360e08201526000906101000160405160208183030381529060405280519060200120905090565b836103e6565b92915050565b600081516020830134f090506104f3816123d3565b60405173ffffffffffffffffffffffffffffffffffffffff8216907f4db17dd5e4732fb6da34a148104a592783ca119a1e7bb8829eba6cbadef0b51190600090a2919050565b600080610545856120db565b905060008460601b90506040517f3d602d80600a3d3981f3363d3d373d3d3d363d7300000000000000000000000081528160148201527f5af43d82803e903d91602b57fd5bf300000000000000000000000000000000006028820152826037826000f593505073ffffffffffffffffffffffffffffffffffffffff8316610635576040517fc05cee7a00000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff7f000000000000000000000000ba5ed099633d3b313e4d5f7bdc1305d3c28ba5ed1660048201526024015b60405180910390fd5b604051829073ffffffffffffffffffffffffffffffffffffffff8516907fb8fda7e00c6b06a2b54e58521bc5894fee35f1090e5a3bb6390bfe2b98b497f790600090a36000808473ffffffffffffffffffffffffffffffffffffffff1634876040516106a19190612d29565b60006040518083038185875af1925050503d80600081146106de576040519150601f19603f3d011682016040523d82523d6000602084013e6106e3565b606091505b50915091506106f382828961247d565b505050509392505050565b60006104d87f000000000000000000000000ba5ed099633d3b313e4d5f7bdc1305d3c28ba5ed8361084f565b60006107b36107aa60408051437fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08101406020830152419282019290925260608101919091524260808201524460a08201524660c08201523360e08201526000906101000160405160208183030381529060405280519060200120905090565b85858533611a17565b949350505050565b60006107b3848484336112a3565b60006040518260005260ff600b53836020527f21c35dbe1b344a2488cf3321d6ce542f8e9f305544ff09e4993a62319a497c1f6040526055600b20601452806040525061d694600052600160345350506017601e20919050565b60006104d8827f000000000000000000000000ba5ed099633d3b313e4d5f7bdc1305d3c28ba5ed6107c9565b600060607f9400000000000000000000000000000000000000000000000000000000000000610887600167ffffffffffffffff612d45565b67ffffffffffffffff16841115610902576040517f3c55ab3b00000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff7f000000000000000000000000ba5ed099633d3b313e4d5f7bdc1305d3c28ba5ed16600482015260240161062c565b836000036109c7576040517fd60000000000000000000000000000000000000000000000000000000000000060208201527fff00000000000000000000000000000000000000000000000000000000000000821660218201527fffffffffffffffffffffffffffffffffffffffff000000000000000000000000606087901b1660228201527f800000000000000000000000000000000000000000000000000000000000000060368201526037015b6040516020818303038152906040529150611152565b607f8411610a60576040517fd60000000000000000000000000000000000000000000000000000000000000060208201527fff0000000000000000000000000000000000000000000000000000000000000080831660218301527fffffffffffffffffffffffffffffffffffffffff000000000000000000000000606088901b16602283015260f886901b1660368201526037016109b1565b60ff8411610b1f576040517fd70000000000000000000000000000000000000000000000000000000000000060208201527fff0000000000000000000000000000000000000000000000000000000000000080831660218301527fffffffffffffffffffffffffffffffffffffffff000000000000000000000000606088901b1660228301527f8100000000000000000000000000000000000000000000000000000000000000603683015260f886901b1660378201526038016109b1565b61ffff8411610bff576040517fd80000000000000000000000000000000000000000000000000000000000000060208201527fff00000000000000000000000000000000000000000000000000000000000000821660218201527fffffffffffffffffffffffffffffffffffffffff000000000000000000000000606087901b1660228201527f820000000000000000000000000000000000000000000000000000000000000060368201527fffff00000000000000000000000000000000000000000000000000000000000060f086901b1660378201526039016109b1565b62ffffff8411610ce0576040517fd90000000000000000000000000000000000000000000000000000000000000060208201527fff00000000000000000000000000000000000000000000000000000000000000821660218201527fffffffffffffffffffffffffffffffffffffffff000000000000000000000000606087901b1660228201527f830000000000000000000000000000000000000000000000000000000000000060368201527fffffff000000000000000000000000000000000000000000000000000000000060e886901b166037820152603a016109b1565b63ffffffff8411610dc2576040517fda0000000000000000000000000000000000000000000000000000000000000060208201527fff00000000000000000000000000000000000000000000000000000000000000821660218201527fffffffffffffffffffffffffffffffffffffffff000000000000000000000000606087901b1660228201527f840000000000000000000000000000000000000000000000000000000000000060368201527fffffffff0000000000000000000000000000000000000000000000000000000060e086901b166037820152603b016109b1565b64ffffffffff8411610ea5576040517fdb0000000000000000000000000000000000000000000000000000000000000060208201527fff00000000000000000000000000000000000000000000000000000000000000821660218201527fffffffffffffffffffffffffffffffffffffffff000000000000000000000000606087901b1660228201527f850000000000000000000000000000000000000000000000000000000000000060368201527fffffffffff00000000000000000000000000000000000000000000000000000060d886901b166037820152603c016109b1565b65ffffffffffff8411610f89576040517fdc0000000000000000000000000000000000000000000000000000000000000060208201527fff00000000000000000000000000000000000000000000000000000000000000821660218201527fffffffffffffffffffffffffffffffffffffffff000000000000000000000000606087901b1660228201527f860000000000000000000000000000000000000000000000000000000000000060368201527fffffffffffff000000000000000000000000000000000000000000000000000060d086901b166037820152603d016109b1565b66ffffffffffffff841161106e576040517fdd0000000000000000000000000000000000000000000000000000000000000060208201527fff00000000000000000000000000000000000000000000000000000000000000821660218201527fffffffffffffffffffffffffffffffffffffffff000000000000000000000000606087901b1660228201527f870000000000000000000000000000000000000000000000000000000000000060368201527fffffffffffffff0000000000000000000000000000000000000000000000000060c886901b166037820152603e016109b1565b6040517fde0000000000000000000000000000000000000000000000000000000000000060208201527fff00000000000000000000000000000000000000000000000000000000000000821660218201527fffffffffffffffffffffffffffffffffffffffff000000000000000000000000606087901b1660228201527f880000000000000000000000000000000000000000000000000000000000000060368201527fffffffffffffffff00000000000000000000000000000000000000000000000060c086901b166037820152603f0160405160208183030381529060405291505b5080516020909101209392505050565b60006104d86111e260408051437fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08101406020830152419282019290925260608101919091524260808201524460a08201524660c08201523360e08201526000906101000160405160208183030381529060405280519060200120905090565b83611505565b600061126f61126860408051437fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08101406020830152419282019290925260608101919091524260808201524460a08201524660c08201523360e08201526000906101000160405160208183030381529060405280519060200120905090565b8484610539565b9392505050565b600061126f83837f000000000000000000000000ba5ed099633d3b313e4d5f7bdc1305d3c28ba5ed6119ed565b60008451602086018451f090506112b9816123d3565b60405173ffffffffffffffffffffffffffffffffffffffff8216907f4db17dd5e4732fb6da34a148104a592783ca119a1e7bb8829eba6cbadef0b51190600090a26000808273ffffffffffffffffffffffffffffffffffffffff168560200151876040516113279190612d29565b60006040518083038185875af1925050503d8060008114611364576040519150601f19603f3d011682016040523d82523d6000602084013e611369565b606091505b5091509150816113c9577f000000000000000000000000ba5ed099633d3b313e4d5f7bdc1305d3c28ba5ed816040517fa57ca23900000000000000000000000000000000000000000000000000000000815260040161062c929190612d94565b73ffffffffffffffffffffffffffffffffffffffff7f000000000000000000000000ba5ed099633d3b313e4d5f7bdc1305d3c28ba5ed1631156114fb578373ffffffffffffffffffffffffffffffffffffffff167f000000000000000000000000ba5ed099633d3b313e4d5f7bdc1305d3c28ba5ed73ffffffffffffffffffffffffffffffffffffffff163160405160006040518083038185875af1925050503d8060008114611495576040519150601f19603f3d011682016040523d82523d6000602084013e61149a565b606091505b509092509050816114fb577f000000000000000000000000ba5ed099633d3b313e4d5f7bdc1305d3c28ba5ed816040517fc2b3f44500000000000000000000000000000000000000000000000000000000815260040161062c929190612d94565b5050949350505050565b600080611511846120db565b905060006040518060400160405280601081526020017f67363d3d37363d34f03d5260086018f30000000000000000000000000000000081525090506000828251602084016000f5905073ffffffffffffffffffffffffffffffffffffffff81166115e0576040517fc05cee7a00000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff7f000000000000000000000000ba5ed099633d3b313e4d5f7bdc1305d3c28ba5ed16600482015260240161062c565b604051839073ffffffffffffffffffffffffffffffffffffffff8316907f2feea65dd4e9f9cbd86b74b7734210c59a1b2981b5b137bd0ee3e208200c906790600090a361162c83610823565b935060008173ffffffffffffffffffffffffffffffffffffffff1634876040516116569190612d29565b60006040518083038185875af1925050503d8060008114611693576040519150601f19603f3d011682016040523d82523d6000602084013e611698565b606091505b505090506116a681866124ff565b60405173ffffffffffffffffffffffffffffffffffffffff8616907f4db17dd5e4732fb6da34a148104a592783ca119a1e7bb8829eba6cbadef0b51190600090a25050505092915050565b6000806116fd876120db565b9050808651602088018651f59150611714826123d3565b604051819073ffffffffffffffffffffffffffffffffffffffff8416907fb8fda7e00c6b06a2b54e58521bc5894fee35f1090e5a3bb6390bfe2b98b497f790600090a36000808373ffffffffffffffffffffffffffffffffffffffff168660200151886040516117849190612d29565b60006040518083038185875af1925050503d80600081146117c1576040519150601f19603f3d011682016040523d82523d6000602084013e6117c6565b606091505b509150915081611826577f000000000000000000000000ba5ed099633d3b313e4d5f7bdc1305d3c28ba5ed816040517fa57ca23900000000000000000000000000000000000000000000000000000000815260040161062c929190612d94565b73ffffffffffffffffffffffffffffffffffffffff7f000000000000000000000000ba5ed099633d3b313e4d5f7bdc1305d3c28ba5ed163115611958578473ffffffffffffffffffffffffffffffffffffffff167f000000000000000000000000ba5ed099633d3b313e4d5f7bdc1305d3c28ba5ed73ffffffffffffffffffffffffffffffffffffffff163160405160006040518083038185875af1925050503d80600081146118f2576040519150601f19603f3d011682016040523d82523d6000602084013e6118f7565b606091505b50909250905081611958577f000000000000000000000000ba5ed099633d3b313e4d5f7bdc1305d3c28ba5ed816040517fc2b3f44500000000000000000000000000000000000000000000000000000000815260040161062c929190612d94565b50505095945050505050565b60006107b36119e460408051437fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08101406020830152419282019290925260608101919091524260808201524460a08201524660c08201523360e08201526000906101000160405160208183030381529060405280519060200120905090565b858585336116f1565b6000604051836040820152846020820152828152600b8101905060ff815360559020949350505050565b600080611a23876120db565b905060006040518060400160405280601081526020017f67363d3d37363d34f03d5260086018f30000000000000000000000000000000081525090506000828251602084016000f5905073ffffffffffffffffffffffffffffffffffffffff8116611af2576040517fc05cee7a00000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff7f000000000000000000000000ba5ed099633d3b313e4d5f7bdc1305d3c28ba5ed16600482015260240161062c565b604051839073ffffffffffffffffffffffffffffffffffffffff8316907f2feea65dd4e9f9cbd86b74b7734210c59a1b2981b5b137bd0ee3e208200c906790600090a3611b3e83610823565b935060008173ffffffffffffffffffffffffffffffffffffffff1687600001518a604051611b6c9190612d29565b60006040518083038185875af1925050503d8060008114611ba9576040519150601f19603f3d011682016040523d82523d6000602084013e611bae565b606091505b50509050611bbc81866124ff565b60405173ffffffffffffffffffffffffffffffffffffffff8616907f4db17dd5e4732fb6da34a148104a592783ca119a1e7bb8829eba6cbadef0b51190600090a260608573ffffffffffffffffffffffffffffffffffffffff1688602001518a604051611c299190612d29565b60006040518083038185875af1925050503d8060008114611c66576040519150601f19603f3d011682016040523d82523d6000602084013e611c6b565b606091505b50909250905081611ccc577f000000000000000000000000ba5ed099633d3b313e4d5f7bdc1305d3c28ba5ed816040517fa57ca23900000000000000000000000000000000000000000000000000000000815260040161062c929190612d94565b73ffffffffffffffffffffffffffffffffffffffff7f000000000000000000000000ba5ed099633d3b313e4d5f7bdc1305d3c28ba5ed163115611dfe578673ffffffffffffffffffffffffffffffffffffffff167f000000000000000000000000ba5ed099633d3b313e4d5f7bdc1305d3c28ba5ed73ffffffffffffffffffffffffffffffffffffffff163160405160006040518083038185875af1925050503d8060008114611d98576040519150601f19603f3d011682016040523d82523d6000602084013e611d9d565b606091505b50909250905081611dfe577f000000000000000000000000ba5ed099633d3b313e4d5f7bdc1305d3c28ba5ed816040517fc2b3f44500000000000000000000000000000000000000000000000000000000815260040161062c929190612d94565b505050505095945050505050565b60006103dd611e8c60408051437fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08101406020830152419282019290925260608101919091524260808201524460a08201524660c08201523360e08201526000906101000160405160208183030381529060405280519060200120905090565b868686866116f1565b60006103dd85858585336116f1565b60006103dd611f2460408051437fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08101406020830152419282019290925260608101919091524260808201524460a08201524660c08201523360e08201526000906101000160405160208183030381529060405280519060200120905090565b86868686611a17565b6000808360601b90506040517f3d602d80600a3d3981f3363d3d373d3d3d363d7300000000000000000000000081528160148201527f5af43d82803e903d91602b57fd5bf3000000000000000000000000000000000060288201526037816000f092505073ffffffffffffffffffffffffffffffffffffffff8216612016576040517fc05cee7a00000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff7f000000000000000000000000ba5ed099633d3b313e4d5f7bdc1305d3c28ba5ed16600482015260240161062c565b60405173ffffffffffffffffffffffffffffffffffffffff8316907f4db17dd5e4732fb6da34a148104a592783ca119a1e7bb8829eba6cbadef0b51190600090a26000808373ffffffffffffffffffffffffffffffffffffffff1634866040516120809190612d29565b60006040518083038185875af1925050503d80600081146120bd576040519150601f19603f3d011682016040523d82523d6000602084013e6120c2565b606091505b50915091506120d282828861247d565b50505092915050565b60008060006120e9846125b3565b9092509050600082600281111561210257612102612e02565b1480156121205750600081600281111561211e5761211e612e02565b145b1561215e57604080513360208201524691810191909152606081018590526080016040516020818303038152906040528051906020012092506123cc565b600082600281111561217257612172612e02565b1480156121905750600181600281111561218e5761218e612e02565b145b156121b0576121a9338560009182526020526040902090565b92506123cc565b60008260028111156121c4576121c4612e02565b03612233576040517f13b3a2a100000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff7f000000000000000000000000ba5ed099633d3b313e4d5f7bdc1305d3c28ba5ed16600482015260240161062c565b600182600281111561224757612247612e02565b1480156122655750600081600281111561226357612263612e02565b145b1561227e576121a9468560009182526020526040902090565b600182600281111561229257612292612e02565b1480156122b0575060028160028111156122ae576122ae612e02565b145b1561231f576040517f13b3a2a100000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff7f000000000000000000000000ba5ed099633d3b313e4d5f7bdc1305d3c28ba5ed16600482015260240161062c565b61239a60408051437fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08101406020830152419282019290925260608101919091524260808201524460a08201524660c08201523360e08201526000906101000160405160208183030381529060405280519060200120905090565b84036123a657836123c9565b604080516020810186905201604051602081830303815290604052805190602001205b92505b5050919050565b73ffffffffffffffffffffffffffffffffffffffff8116158061240b575073ffffffffffffffffffffffffffffffffffffffff81163b155b1561247a576040517fc05cee7a00000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff7f000000000000000000000000ba5ed099633d3b313e4d5f7bdc1305d3c28ba5ed16600482015260240161062c565b50565b82158061249f575073ffffffffffffffffffffffffffffffffffffffff81163b155b156124fa577f000000000000000000000000ba5ed099633d3b313e4d5f7bdc1305d3c28ba5ed826040517fa57ca23900000000000000000000000000000000000000000000000000000000815260040161062c929190612d94565b505050565b811580612520575073ffffffffffffffffffffffffffffffffffffffff8116155b80612540575073ffffffffffffffffffffffffffffffffffffffff81163b155b156125af576040517fc05cee7a00000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff7f000000000000000000000000ba5ed099633d3b313e4d5f7bdc1305d3c28ba5ed16600482015260240161062c565b5050565b600080606083901c3314801561261057508260141a60f81b7effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff19167f0100000000000000000000000000000000000000000000000000000000000000145b1561262057506000905080915091565b606083901c3314801561265a57507fff00000000000000000000000000000000000000000000000000000000000000601484901a60f81b16155b1561266b5750600090506001915091565b33606084901c036126825750600090506002915091565b606083901c1580156126db57508260141a60f81b7effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff19167f0100000000000000000000000000000000000000000000000000000000000000145b156126ec5750600190506000915091565b606083901c15801561272557507fff00000000000000000000000000000000000000000000000000000000000000601484901a60f81b16155b1561273557506001905080915091565b606083901c61274a5750600190506002915091565b8260141a60f81b7effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff19167f0100000000000000000000000000000000000000000000000000000000000000036127a55750600290506000915091565b8260141a60f81b7effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff19166000036127e15750600290506001915091565b506002905080915091565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b600082601f83011261282c57600080fd5b813567ffffffffffffffff80821115612847576128476127ec565b604051601f83017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0908116603f0116810190828211818310171561288d5761288d6127ec565b816040528381528660208588010111156128a657600080fd5b836020870160208301376000602085830101528094505050505092915050565b6000604082840312156128d857600080fd5b6040516040810181811067ffffffffffffffff821117156128fb576128fb6127ec565b604052823581526020928301359281019290925250919050565b60008060008060a0858703121561292b57600080fd5b84359350602085013567ffffffffffffffff8082111561294a57600080fd5b6129568883890161281b565b9450604087013591508082111561296c57600080fd5b506129798782880161281b565b92505061298986606087016128c6565b905092959194509250565b600080604083850312156129a757600080fd5b82359150602083013567ffffffffffffffff8111156129c557600080fd5b6129d18582860161281b565b9150509250929050565b6000602082840312156129ed57600080fd5b813567ffffffffffffffff811115612a0457600080fd5b6107b38482850161281b565b803573ffffffffffffffffffffffffffffffffffffffff81168114612a3457600080fd5b919050565b600080600060608486031215612a4e57600080fd5b83359250612a5e60208501612a10565b9150604084013567ffffffffffffffff811115612a7a57600080fd5b612a868682870161281b565b9150509250925092565b600060208284031215612aa257600080fd5b5035919050565b600080600060808486031215612abe57600080fd5b833567ffffffffffffffff80821115612ad657600080fd5b612ae28783880161281b565b94506020860135915080821115612af857600080fd5b50612b058682870161281b565b925050612b1585604086016128c6565b90509250925092565b60008060408385031215612b3157600080fd5b82359150612b4160208401612a10565b90509250929050565b60008060408385031215612b5d57600080fd5b612b6683612a10565b946020939093013593505050565b60008060408385031215612b8757600080fd5b612b9083612a10565b9150602083013567ffffffffffffffff8111156129c557600080fd5b60008060408385031215612bbf57600080fd5b50508035926020909101359150565b60008060008060a08587031215612be457600080fd5b843567ffffffffffffffff80821115612bfc57600080fd5b612c088883890161281b565b95506020870135915080821115612c1e57600080fd5b50612c2b8782880161281b565b935050612c3b86604087016128c6565b915061298960808601612a10565b600080600080600060c08688031215612c6157600080fd5b85359450602086013567ffffffffffffffff80821115612c8057600080fd5b612c8c89838a0161281b565b95506040880135915080821115612ca257600080fd5b50612caf8882890161281b565b935050612cbf87606088016128c6565b9150612ccd60a08701612a10565b90509295509295909350565b600080600060608486031215612cee57600080fd5b8335925060208401359150612b1560408501612a10565b60005b83811015612d20578181015183820152602001612d08565b50506000910152565b60008251612d3b818460208701612d05565b9190910192915050565b67ffffffffffffffff828116828216039080821115612d8d577f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b5092915050565b73ffffffffffffffffffffffffffffffffffffffff831681526040602082015260008251806040840152612dcf816060850160208701612d05565b601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016919091016060019392505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052602160045260246000fdfea164736f6c6343000817000a"; + bytes internal constant BeaconBlockRootsCode = hex"3373fffffffffffffffffffffffffffffffffffffffe14604d57602036146024575f5ffd5b5f35801560495762001fff810690815414603c575f5ffd5b62001fff01545f5260205ff35b5f5ffd5b62001fff42064281555f359062001fff015500"; @@ -118,6 +124,7 @@ library Preinstalls { if (_addr == Permit2) return getPermit2Code(_chainID); if (_addr == BeaconBlockRoots) return BeaconBlockRootsCode; + if (_addr == CreateX) return CreateXCode; revert("Preinstalls: unknown preinstall"); } @@ -138,6 +145,7 @@ library Preinstalls { if (_addr == SenderCreator_v070) return "SenderCreator_v070"; if (_addr == EntryPoint_v070) return "EntryPoint_v070"; if (_addr == BeaconBlockRoots) return "BeaconBlockRoots"; + if (_addr == CreateX) return "CreateX"; revert("Preinstalls: unnamed preinstall"); } diff --git a/packages/contracts-bedrock/test/L2Genesis.t.sol b/packages/contracts-bedrock/test/L2Genesis.t.sol index 23c457da6a2f..be5cc0c50edf 100644 --- a/packages/contracts-bedrock/test/L2Genesis.t.sol +++ b/packages/contracts-bedrock/test/L2Genesis.t.sol @@ -188,7 +188,7 @@ contract L2GenesisTest is Test { expected += 2048 - 2; // predeploy proxies expected += 21; // predeploy implementations (excl. legacy erc20-style eth and legacy message sender) expected += 256; // precompiles - expected += 12; // preinstalls + expected += 13; // preinstalls expected += 1; // 4788 deployer account // 16 prefunded dev accounts are excluded assertEq(expected, getJSONKeyCount(_path), "key count check"); diff --git a/packages/contracts-bedrock/test/Preinstalls.t.sol b/packages/contracts-bedrock/test/Preinstalls.t.sol index eca9a063b21c..a0da8c5e32cc 100644 --- a/packages/contracts-bedrock/test/Preinstalls.t.sol +++ b/packages/contracts-bedrock/test/Preinstalls.t.sol @@ -118,4 +118,8 @@ contract PreinstallsTest is CommonTest { assertPreinstall(Preinstalls.BeaconBlockRoots, Preinstalls.BeaconBlockRootsCode); assertEq(vm.getNonce(Preinstalls.BeaconBlockRootsSender), 1, "4788 sender must have nonce=1"); } + + function test_preinstall_createX_succeeds() external view { + assertPreinstall(Preinstalls.CreateX, Preinstalls.CreateXCode); + } } diff --git a/packages/contracts-bedrock/test/setup/Setup.sol b/packages/contracts-bedrock/test/setup/Setup.sol index 0dfb6bd47f24..325281f890df 100644 --- a/packages/contracts-bedrock/test/setup/Setup.sol +++ b/packages/contracts-bedrock/test/setup/Setup.sol @@ -233,6 +233,7 @@ contract Setup { labelPreinstall(Preinstalls.SenderCreator_v070); labelPreinstall(Preinstalls.EntryPoint_v070); labelPreinstall(Preinstalls.BeaconBlockRoots); + labelPreinstall(Preinstalls.CreateX); console.log("Setup: completed L2 genesis"); } From 3e68cf018d8b9b474e918def32a56d1dbf028d83 Mon Sep 17 00:00:00 2001 From: Matthew Slipper Date: Fri, 30 Aug 2024 15:35:34 -0600 Subject: [PATCH 11/19] Update contracts publish job to use env vars rather than contexts (#11687) Contexts are scoped to a specific GitHub user group, which doesn't work with the GitHub merge queue or OSS contributors. This PR updates the packaging job to use raw project-level env vars instead (which are not user-scoped), and to only run on commits to `develop`. --- .circleci/config.yml | 30 +++++++++++++++++++++++------- 1 file changed, 23 insertions(+), 7 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 82b3119acc8b..ed4a77f196ec 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -35,6 +35,9 @@ parameters: docker_publish_dispatch: type: boolean default: false + publish_contract_artifacts_dispatch: + type: boolean + default: false orbs: go: circleci/go@1.8.0 @@ -1474,10 +1477,20 @@ jobs: - gcp-oidc-authenticate: gcp_cred_config_file_path: /root/gcp_cred_config.json oidc_token_file_path: /root/oidc_token.json - service_account_email: GCP_SERVICE_CONTRACTS_ACCOUNT_EMAIL + project_id: GCP_TOOLS_ARTIFACTS_PROJECT_ID + service_account_email: GCP_CONTRACTS_PUBLISHER_SERVICE_ACCOUNT_EMAIL - checkout - - attach_workspace: { at: "." } - install-contracts-dependencies + - run: + name: Pull artifacts + command: bash scripts/ops/pull-artifacts.sh + working_directory: packages/contracts-bedrock + - run: + name: Build contracts + environment: + FOUNDRY_PROFILE: ci + command: just build + working_directory: packages/contracts-bedrock - run: name: Publish artifacts command: bash scripts/ops/publish-artifacts.sh @@ -1497,11 +1510,6 @@ workflows: jobs: - pnpm-monorepo: name: pnpm-monorepo - - publish-contract-artifacts: - requires: - - pnpm-monorepo - context: - - oplabs-gcr-release - contracts-bedrock-tests - contracts-bedrock-coverage - contracts-bedrock-checks: @@ -1955,6 +1963,14 @@ workflows: - slack - oplabs-fpp-nodes + develop-publish-contract-artifacts: + when: + or: + - equal: [ "develop", <> ] + - equal: [ true, <> ] + jobs: + - publish-contract-artifacts + develop-fault-proofs: when: and: From 6acfb99cf85eecfc2d225ef3349a3d9cc3b80c68 Mon Sep 17 00:00:00 2001 From: Samuel Laferriere Date: Thu, 22 Aug 2024 15:52:20 -0700 Subject: [PATCH 12/19] feat: initial goroutine blob submission implementation test(batcher): add e2e test for concurrent altda requests doc: add explanation comment for FakeDAServer chore: fix if condition in altda sendTransaction path feat: add maxConcurrentDaRequests config flag + semaphore refactor: batcher to use errgroup for da instead of separate semaphore/waitgroup fix: nil pointer bug after using wrong function after rebase fix: defn of maxConcurrentDaRequests=0 fix: TestBatcherConcurrentAltDARequests chore: remove unneeded if statement around time.Sleep refactor: use TryGo instead of Go to make logic local and easier to read chore: clean up some comments in batcher chore: make batcher shutdown cancel pending altda requests by using shutdownCtx instead of killCtx --- op-alt-da/cli.go | 56 +++++++++++++++------ op-alt-da/daclient.go | 18 +++++-- op-alt-da/daclient_test.go | 38 +-------------- op-alt-da/damock.go | 85 ++++++++++++++++++++++++++++++++ op-alt-da/daserver.go | 4 +- op-batcher/batcher/driver.go | 91 ++++++++++++++++++++++++++--------- op-batcher/batcher/service.go | 2 + op-e2e/setup.go | 67 ++++++++++++++++++++------ op-e2e/system_test.go | 57 +++++++++++++++++++++- 9 files changed, 321 insertions(+), 97 deletions(-) diff --git a/op-alt-da/cli.go b/op-alt-da/cli.go index e931707b67f5..30ce2168f570 100644 --- a/op-alt-da/cli.go +++ b/op-alt-da/cli.go @@ -3,15 +3,19 @@ package altda import ( "fmt" "net/url" + "time" "github.com/urfave/cli/v2" ) var ( - EnabledFlagName = altDAFlags("enabled") - DaServerAddressFlagName = altDAFlags("da-server") - VerifyOnReadFlagName = altDAFlags("verify-on-read") - DaServiceFlag = altDAFlags("da-service") + EnabledFlagName = altDAFlags("enabled") + DaServerAddressFlagName = altDAFlags("da-server") + VerifyOnReadFlagName = altDAFlags("verify-on-read") + DaServiceFlagName = altDAFlags("da-service") + PutTimeoutFlagName = altDAFlags("put-timeout") + GetTimeoutFlagName = altDAFlags("get-timeout") + MaxConcurrentRequestsFlagName = altDAFlags("max-concurrent-da-requests") ) // altDAFlags returns the flag names for altDA @@ -46,20 +50,41 @@ func CLIFlags(envPrefix string, category string) []cli.Flag { Category: category, }, &cli.BoolFlag{ - Name: DaServiceFlag, + Name: DaServiceFlagName, Usage: "Use DA service type where commitments are generated by Alt-DA server", Value: false, EnvVars: altDAEnvs(envPrefix, "DA_SERVICE"), Category: category, }, + &cli.DurationFlag{ + Name: PutTimeoutFlagName, + Usage: "Timeout for put requests. 0 means no timeout.", + Value: time.Duration(0), + EnvVars: altDAEnvs(envPrefix, "PUT_TIMEOUT"), + }, + &cli.DurationFlag{ + Name: GetTimeoutFlagName, + Usage: "Timeout for get requests. 0 means no timeout.", + Value: time.Duration(0), + EnvVars: altDAEnvs(envPrefix, "GET_TIMEOUT"), + }, + &cli.Uint64Flag{ + Name: MaxConcurrentRequestsFlagName, + Usage: "Maximum number of concurrent requests to the DA server", + Value: 1, + EnvVars: altDAEnvs(envPrefix, "MAX_CONCURRENT_DA_REQUESTS"), + }, } } type CLIConfig struct { - Enabled bool - DAServerURL string - VerifyOnRead bool - GenericDA bool + Enabled bool + DAServerURL string + VerifyOnRead bool + GenericDA bool + PutTimeout time.Duration + GetTimeout time.Duration + MaxConcurrentRequests uint64 } func (c CLIConfig) Check() error { @@ -75,14 +100,17 @@ func (c CLIConfig) Check() error { } func (c CLIConfig) NewDAClient() *DAClient { - return &DAClient{url: c.DAServerURL, verify: c.VerifyOnRead, precompute: !c.GenericDA} + return &DAClient{url: c.DAServerURL, verify: c.VerifyOnRead, precompute: !c.GenericDA, getTimeout: c.GetTimeout, putTimeout: c.PutTimeout} } func ReadCLIConfig(c *cli.Context) CLIConfig { return CLIConfig{ - Enabled: c.Bool(EnabledFlagName), - DAServerURL: c.String(DaServerAddressFlagName), - VerifyOnRead: c.Bool(VerifyOnReadFlagName), - GenericDA: c.Bool(DaServiceFlag), + Enabled: c.Bool(EnabledFlagName), + DAServerURL: c.String(DaServerAddressFlagName), + VerifyOnRead: c.Bool(VerifyOnReadFlagName), + GenericDA: c.Bool(DaServiceFlagName), + PutTimeout: c.Duration(PutTimeoutFlagName), + GetTimeout: c.Duration(GetTimeoutFlagName), + MaxConcurrentRequests: c.Uint64(MaxConcurrentRequestsFlagName), } } diff --git a/op-alt-da/daclient.go b/op-alt-da/daclient.go index db9c66ce5c21..269b71f3c104 100644 --- a/op-alt-da/daclient.go +++ b/op-alt-da/daclient.go @@ -7,6 +7,7 @@ import ( "fmt" "io" "net/http" + "time" ) // ErrNotFound is returned when the server could not find the input. @@ -23,10 +24,16 @@ type DAClient struct { verify bool // whether commitment is precomputable (only applicable to keccak256) precompute bool + getTimeout time.Duration + putTimeout time.Duration } func NewDAClient(url string, verify bool, pc bool) *DAClient { - return &DAClient{url, verify, pc} + return &DAClient{ + url: url, + verify: verify, + precompute: pc, + } } // GetInput returns the input data for the given encoded commitment bytes. @@ -35,7 +42,8 @@ func (c *DAClient) GetInput(ctx context.Context, comm CommitmentData) ([]byte, e if err != nil { return nil, fmt.Errorf("failed to create HTTP request: %w", err) } - resp, err := http.DefaultClient.Do(req) + client := &http.Client{Timeout: c.getTimeout} + resp, err := client.Do(req) if err != nil { return nil, err } @@ -91,7 +99,8 @@ func (c *DAClient) setInputWithCommit(ctx context.Context, comm CommitmentData, return fmt.Errorf("failed to create HTTP request: %w", err) } req.Header.Set("Content-Type", "application/octet-stream") - resp, err := http.DefaultClient.Do(req) + client := &http.Client{Timeout: c.putTimeout} + resp, err := client.Do(req) if err != nil { return err } @@ -116,7 +125,8 @@ func (c *DAClient) setInput(ctx context.Context, img []byte) (CommitmentData, er return nil, fmt.Errorf("failed to create HTTP request: %w", err) } req.Header.Set("Content-Type", "application/octet-stream") - resp, err := http.DefaultClient.Do(req) + client := &http.Client{Timeout: c.putTimeout} + resp, err := client.Do(req) if err != nil { return nil, err } diff --git a/op-alt-da/daclient_test.go b/op-alt-da/daclient_test.go index 02a9611ae276..d9f7902aadee 100644 --- a/op-alt-da/daclient_test.go +++ b/op-alt-da/daclient_test.go @@ -2,48 +2,14 @@ package altda import ( "context" - "fmt" "math/rand" - "sync" "testing" "github.com/ethereum-optimism/optimism/op-service/testlog" - "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" "github.com/stretchr/testify/require" ) -type MemStore struct { - db map[string][]byte - lock sync.RWMutex -} - -func NewMemStore() *MemStore { - return &MemStore{ - db: make(map[string][]byte), - } -} - -// Get retrieves the given key if it's present in the key-value store. -func (s *MemStore) Get(ctx context.Context, key []byte) ([]byte, error) { - s.lock.RLock() - defer s.lock.RUnlock() - - if entry, ok := s.db[string(key)]; ok { - return common.CopyBytes(entry), nil - } - return nil, ErrNotFound -} - -// Put inserts the given value into the key-value store. -func (s *MemStore) Put(ctx context.Context, key []byte, value []byte) error { - s.lock.Lock() - defer s.lock.Unlock() - - s.db[string(key)] = common.CopyBytes(value) - return nil -} - func TestDAClientPrecomputed(t *testing.T) { store := NewMemStore() logger := testlog.Logger(t, log.LevelDebug) @@ -56,7 +22,7 @@ func TestDAClientPrecomputed(t *testing.T) { cfg := CLIConfig{ Enabled: true, - DAServerURL: fmt.Sprintf("http://%s", server.Endpoint()), + DAServerURL: server.HttpEndpoint(), VerifyOnRead: true, } require.NoError(t, cfg.Check()) @@ -113,7 +79,7 @@ func TestDAClientService(t *testing.T) { cfg := CLIConfig{ Enabled: true, - DAServerURL: fmt.Sprintf("http://%s", server.Endpoint()), + DAServerURL: server.HttpEndpoint(), VerifyOnRead: false, GenericDA: false, } diff --git a/op-alt-da/damock.go b/op-alt-da/damock.go index b56b73fdfcc9..0db129171a82 100644 --- a/op-alt-da/damock.go +++ b/op-alt-da/damock.go @@ -4,8 +4,12 @@ import ( "context" "errors" "io" + "net/http" + "sync" + "time" "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb/memorydb" "github.com/ethereum/go-ethereum/log" @@ -99,3 +103,84 @@ func (d *AltDADisabled) OnFinalizedHeadSignal(f HeadSignalFn) { func (d *AltDADisabled) AdvanceL1Origin(ctx context.Context, l1 L1Fetcher, blockId eth.BlockID) error { return ErrNotEnabled } + +// FakeDAServer is a fake DA server for e2e tests. +// It is a small wrapper around DAServer that allows for setting request latencies, +// to mimic a DA service with slow responses (eg. eigenDA with 10 min batching interval). +type FakeDAServer struct { + *DAServer + putRequestLatency time.Duration + getRequestLatency time.Duration +} + +func NewFakeDAServer(host string, port int, log log.Logger) *FakeDAServer { + store := NewMemStore() + fakeDAServer := &FakeDAServer{ + DAServer: NewDAServer(host, port, store, log, true), + putRequestLatency: 0, + getRequestLatency: 0, + } + return fakeDAServer +} + +func (s *FakeDAServer) HandleGet(w http.ResponseWriter, r *http.Request) { + time.Sleep(s.getRequestLatency) + s.DAServer.HandleGet(w, r) +} + +func (s *FakeDAServer) HandlePut(w http.ResponseWriter, r *http.Request) { + time.Sleep(s.putRequestLatency) + s.DAServer.HandlePut(w, r) +} + +func (s *FakeDAServer) Start() error { + err := s.DAServer.Start() + if err != nil { + return err + } + // Override the HandleGet/Put method registrations + mux := http.NewServeMux() + mux.HandleFunc("/get/", s.HandleGet) + mux.HandleFunc("/put/", s.HandlePut) + s.httpServer.Handler = mux + return nil +} + +func (s *FakeDAServer) SetPutRequestLatency(latency time.Duration) { + s.putRequestLatency = latency +} + +func (s *FakeDAServer) SetGetRequestLatency(latency time.Duration) { + s.getRequestLatency = latency +} + +type MemStore struct { + db map[string][]byte + lock sync.RWMutex +} + +func NewMemStore() *MemStore { + return &MemStore{ + db: make(map[string][]byte), + } +} + +// Get retrieves the given key if it's present in the key-value store. +func (s *MemStore) Get(ctx context.Context, key []byte) ([]byte, error) { + s.lock.RLock() + defer s.lock.RUnlock() + + if entry, ok := s.db[string(key)]; ok { + return common.CopyBytes(entry), nil + } + return nil, ErrNotFound +} + +// Put inserts the given value into the key-value store. +func (s *MemStore) Put(ctx context.Context, key []byte, value []byte) error { + s.lock.Lock() + defer s.lock.Unlock() + + s.db[string(key)] = common.CopyBytes(value) + return nil +} diff --git a/op-alt-da/daserver.go b/op-alt-da/daserver.go index ef43fd27fef3..94446944b543 100644 --- a/op-alt-da/daserver.go +++ b/op-alt-da/daserver.go @@ -187,8 +187,8 @@ func (d *DAServer) HandlePut(w http.ResponseWriter, r *http.Request) { } } -func (b *DAServer) Endpoint() string { - return b.listener.Addr().String() +func (b *DAServer) HttpEndpoint() string { + return fmt.Sprintf("http://%s", b.listener.Addr().String()) } func (b *DAServer) Stop() error { diff --git a/op-batcher/batcher/driver.go b/op-batcher/batcher/driver.go index 0af4c86ca40a..48f60b619206 100644 --- a/op-batcher/batcher/driver.go +++ b/op-batcher/batcher/driver.go @@ -22,6 +22,7 @@ import ( "github.com/ethereum/go-ethereum/core/txpool" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/log" + "golang.org/x/sync/errgroup" ) var ( @@ -302,6 +303,12 @@ func (l *BatchSubmitter) loop() { receiptsCh := make(chan txmgr.TxReceipt[txRef]) queue := txmgr.NewQueue[txRef](l.killCtx, l.Txmgr, l.Config.MaxPendingTransactions) + daGroup := &errgroup.Group{} + // errgroup with limit of 0 means no goroutine is able to run concurrently, + // so we only set the limit if it is greater than 0. + if l.Config.MaxConcurrentDARequests > 0 { + daGroup.SetLimit(int(l.Config.MaxConcurrentDARequests)) + } // start the receipt/result processing loop receiptLoopDone := make(chan struct{}) @@ -339,8 +346,11 @@ func (l *BatchSubmitter) loop() { defer ticker.Stop() publishAndWait := func() { - l.publishStateToL1(queue, receiptsCh) + l.publishStateToL1(queue, receiptsCh, daGroup) if !l.Txmgr.IsClosed() { + l.Log.Info("Wait for pure DA writes, not L1 txs") + _ = daGroup.Wait() + l.Log.Info("Wait for L1 writes (blobs or DA commitments)") queue.Wait() } else { l.Log.Info("Txmgr is closed, remaining channel data won't be sent") @@ -368,7 +378,7 @@ func (l *BatchSubmitter) loop() { l.clearState(l.shutdownCtx) continue } - l.publishStateToL1(queue, receiptsCh) + l.publishStateToL1(queue, receiptsCh, daGroup) case <-l.shutdownCtx.Done(): if l.Txmgr.IsClosed() { l.Log.Info("Txmgr is closed, remaining channel data won't be sent") @@ -425,7 +435,7 @@ func (l *BatchSubmitter) waitNodeSync() error { // publishStateToL1 queues up all pending TxData to be published to the L1, returning when there is // no more data to queue for publishing or if there was an error queing the data. -func (l *BatchSubmitter) publishStateToL1(queue *txmgr.Queue[txRef], receiptsCh chan txmgr.TxReceipt[txRef]) { +func (l *BatchSubmitter) publishStateToL1(queue *txmgr.Queue[txRef], receiptsCh chan txmgr.TxReceipt[txRef], daGroup *errgroup.Group) { for { // if the txmgr is closed, we stop the transaction sending if l.Txmgr.IsClosed() { @@ -436,7 +446,7 @@ func (l *BatchSubmitter) publishStateToL1(queue *txmgr.Queue[txRef], receiptsCh l.Log.Info("txpool state is not good, aborting state publishing") return } - err := l.publishTxToL1(l.killCtx, queue, receiptsCh) + err := l.publishTxToL1(l.killCtx, queue, receiptsCh, daGroup) if err != nil { if err != io.EOF { @@ -487,7 +497,7 @@ func (l *BatchSubmitter) clearState(ctx context.Context) { } // publishTxToL1 submits a single state tx to the L1 -func (l *BatchSubmitter) publishTxToL1(ctx context.Context, queue *txmgr.Queue[txRef], receiptsCh chan txmgr.TxReceipt[txRef]) error { +func (l *BatchSubmitter) publishTxToL1(ctx context.Context, queue *txmgr.Queue[txRef], receiptsCh chan txmgr.TxReceipt[txRef], daGroup *errgroup.Group) error { // send all available transactions l1tip, err := l.l1Tip(ctx) if err != nil { @@ -496,7 +506,8 @@ func (l *BatchSubmitter) publishTxToL1(ctx context.Context, queue *txmgr.Queue[t } l.recordL1Tip(l1tip) - // Collect next transaction data + // Collect next transaction data. This pulls data out of the channel, so we need to make sure + // to put it back if ever da or txmgr requests fail, by calling l.recordFailedDARequest/recordFailedTx. txdata, err := l.state.TxData(l1tip.ID()) if err == io.EOF { @@ -507,7 +518,7 @@ func (l *BatchSubmitter) publishTxToL1(ctx context.Context, queue *txmgr.Queue[t return err } - if err = l.sendTransaction(ctx, txdata, queue, receiptsCh); err != nil { + if err = l.sendTransaction(ctx, txdata, queue, receiptsCh, daGroup); err != nil { return fmt.Errorf("BatchSubmitter.sendTransaction failed: %w", err) } return nil @@ -555,9 +566,48 @@ func (l *BatchSubmitter) cancelBlockingTx(queue *txmgr.Queue[txRef], receiptsCh // sendTransaction creates & queues for sending a transaction to the batch inbox address with the given `txData`. // This call will block if the txmgr queue is at the max-pending limit. // The method will block if the queue's MaxPendingTransactions is exceeded. -func (l *BatchSubmitter) sendTransaction(ctx context.Context, txdata txData, queue *txmgr.Queue[txRef], receiptsCh chan txmgr.TxReceipt[txRef]) error { +func (l *BatchSubmitter) sendTransaction(ctx context.Context, txdata txData, queue *txmgr.Queue[txRef], receiptsCh chan txmgr.TxReceipt[txRef], daGroup *errgroup.Group) error { var err error - // Do the gas estimation offline. A value of 0 will cause the [txmgr] to estimate the gas limit. + + // if Alt DA is enabled we post the txdata to the DA Provider and replace it with the commitment. + if l.Config.UseAltDA { + // sanity checks + if nf := len(txdata.frames); nf != 1 { + l.Log.Crit("Unexpected number of frames in calldata tx", "num_frames", nf) + } + if txdata.asBlob { + l.Log.Crit("Unexpected blob txdata with AltDA enabled") + } + + // when posting txdata to an external DA Provider, we use a goroutine to avoid blocking the main loop + // since it may take a while for the request to return. + goroutineSpawned := daGroup.TryGo(func() error { + // TODO: probably shouldn't be using the global shutdownCtx here, see https://go.dev/blog/context-and-structs + // but sendTransaction receives l.killCtx as an argument, which currently is only canceled after waiting for the main loop + // to exit, which would wait on this DA call to finish, which would take a long time. + // So we prefer to mimic the behavior of txmgr and cancel all pending DA/txmgr requests when the batcher is stopped. + comm, err := l.AltDA.SetInput(l.shutdownCtx, txdata.CallData()) + if err != nil { + l.Log.Error("Failed to post input to Alt DA", "error", err) + // requeue frame if we fail to post to the DA Provider so it can be retried + // note: this assumes that the da server caches requests, otherwise it might lead to resubmissions of the blobs + l.recordFailedDARequest(txdata.ID(), err) + return nil + } + l.Log.Info("Set altda input", "commitment", comm, "tx", txdata.ID()) + candidate := l.calldataTxCandidate(comm.TxData()) + l.sendTx(txdata, false, candidate, queue, receiptsCh) + return nil + }) + if !goroutineSpawned { + // We couldn't start the goroutine because the errgroup.Group limit + // is already reached. Since we can't send the txdata, we have to + // return it for later processing. We use nil error to skip error logging. + l.recordFailedDARequest(txdata.ID(), nil) + } + // we return nil to allow publishStateToL1 to keep processing the next txdata + return nil + } var candidate *txmgr.TxCandidate if txdata.asBlob { @@ -573,21 +623,7 @@ func (l *BatchSubmitter) sendTransaction(ctx context.Context, txdata txData, que if nf := len(txdata.frames); nf != 1 { l.Log.Crit("Unexpected number of frames in calldata tx", "num_frames", nf) } - data := txdata.CallData() - // if AltDA is enabled we post the txdata to the DA Provider and replace it with the commitment. - if l.Config.UseAltDA { - comm, err := l.AltDA.SetInput(ctx, data) - if err != nil { - l.Log.Error("Failed to post input to Alt DA", "error", err) - // requeue frame if we fail to post to the DA Provider so it can be retried - l.recordFailedTx(txdata.ID(), err) - return nil - } - l.Log.Info("Set AltDA input", "commitment", comm, "tx", txdata.ID()) - // signal AltDA commitment tx with TxDataVersion1 - data = comm.TxData() - } - candidate = l.calldataTxCandidate(data) + candidate = l.calldataTxCandidate(txdata.CallData()) } l.sendTx(txdata, false, candidate, queue, receiptsCh) @@ -649,6 +685,13 @@ func (l *BatchSubmitter) recordL1Tip(l1tip eth.L1BlockRef) { l.Metr.RecordLatestL1Block(l1tip) } +func (l *BatchSubmitter) recordFailedDARequest(id txID, err error) { + if err != nil { + l.Log.Warn("DA request failed", logFields(id, err)...) + } + l.state.TxFailed(id) +} + func (l *BatchSubmitter) recordFailedTx(id txID, err error) { l.Log.Warn("Transaction failed to send", logFields(id, err)...) l.state.TxFailed(id) diff --git a/op-batcher/batcher/service.go b/op-batcher/batcher/service.go index 00d3d32071f7..1fe813f9913f 100644 --- a/op-batcher/batcher/service.go +++ b/op-batcher/batcher/service.go @@ -39,6 +39,8 @@ type BatcherConfig struct { // UseAltDA is true if the rollup config has a DA challenge address so the batcher // will post inputs to the DA server and post commitments to blobs or calldata. UseAltDA bool + // maximum number of concurrent blob put requests to the DA server + MaxConcurrentDARequests uint64 WaitNodeSync bool CheckRecentTxsDepth int diff --git a/op-e2e/setup.go b/op-e2e/setup.go index cd07e081d0dd..ab2218b097d4 100644 --- a/op-e2e/setup.go +++ b/op-e2e/setup.go @@ -39,6 +39,7 @@ import ( "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rpc" + altda "github.com/ethereum-optimism/optimism/op-alt-da" bss "github.com/ethereum-optimism/optimism/op-batcher/batcher" batcherFlags "github.com/ethereum-optimism/optimism/op-batcher/flags" "github.com/ethereum-optimism/optimism/op-chain-ops/genesis" @@ -164,18 +165,19 @@ func DefaultSystemConfig(t testing.TB) SystemConfig { }, }, Loggers: map[string]log.Logger{ - RoleVerif: testlog.Logger(t, log.LevelInfo).New("role", RoleVerif), - RoleSeq: testlog.Logger(t, log.LevelInfo).New("role", RoleSeq), - "batcher": testlog.Logger(t, log.LevelInfo).New("role", "batcher"), - "proposer": testlog.Logger(t, log.LevelInfo).New("role", "proposer"), + RoleVerif: testlog.Logger(t, log.LevelInfo).New("role", RoleVerif), + RoleSeq: testlog.Logger(t, log.LevelInfo).New("role", RoleSeq), + "batcher": testlog.Logger(t, log.LevelInfo).New("role", "batcher"), + "proposer": testlog.Logger(t, log.LevelInfo).New("role", "proposer"), + "da-server": testlog.Logger(t, log.LevelInfo).New("role", "da-server"), }, - GethOptions: map[string][]geth.GethOption{}, - P2PTopology: nil, // no P2P connectivity by default - NonFinalizedProposals: false, - ExternalL2Shim: config.ExternalL2Shim, - DataAvailabilityType: batcherFlags.CalldataType, - MaxPendingTransactions: 1, - BatcherTargetNumFrames: 1, + GethOptions: map[string][]geth.GethOption{}, + P2PTopology: nil, // no P2P connectivity by default + NonFinalizedProposals: false, + ExternalL2Shim: config.ExternalL2Shim, + DataAvailabilityType: batcherFlags.CalldataType, + BatcherMaxPendingTransactions: 1, + BatcherTargetNumFrames: 1, } } @@ -298,12 +300,16 @@ type SystemConfig struct { // If >0, limits the number of blocks per span batch BatcherMaxBlocksPerSpanBatch int + // BatcherMaxPendingTransactions determines how many transactions the batcher will try to send + // concurrently. 0 means unlimited. + BatcherMaxPendingTransactions uint64 + + // BatcherMaxConcurrentDARequest determines how many DAserver requests the batcher is allowed to + // make concurrently. 0 means unlimited. + BatcherMaxConcurrentDARequest uint64 + // SupportL1TimeTravel determines if the L1 node supports quickly skipping forward in time SupportL1TimeTravel bool - - // MaxPendingTransactions determines how many transactions the batcher will try to send - // concurrently. 0 means unlimited. - MaxPendingTransactions uint64 } type System struct { @@ -319,6 +325,7 @@ type System struct { L2OutputSubmitter *l2os.ProposerService BatchSubmitter *bss.BatcherService Mocknet mocknet.Mocknet + FakeAltDAServer *altda.FakeDAServer L1BeaconAPIAddr endpoint.RestHTTP @@ -543,6 +550,16 @@ func (cfg SystemConfig) Start(t *testing.T, _opts ...SystemConfigOption) (*Syste } } + var rollupAltDAConfig *rollup.AltDAConfig + if cfg.DeployConfig.UseAltDA { + rollupAltDAConfig = &rollup.AltDAConfig{ + DAChallengeAddress: cfg.L1Deployments.DataAvailabilityChallengeProxy, + DAChallengeWindow: cfg.DeployConfig.DAChallengeWindow, + DAResolveWindow: cfg.DeployConfig.DAResolveWindow, + CommitmentType: altda.GenericCommitmentString, + } + } + makeRollupConfig := func() rollup.Config { return rollup.Config{ Genesis: rollup.Genesis{ @@ -574,6 +591,7 @@ func (cfg SystemConfig) Start(t *testing.T, _opts ...SystemConfigOption) (*Syste GraniteTime: cfg.DeployConfig.GraniteTime(uint64(cfg.DeployConfig.L1GenesisBlockTimestamp)), InteropTime: cfg.DeployConfig.InteropTime(uint64(cfg.DeployConfig.L1GenesisBlockTimestamp)), ProtocolVersionsAddress: cfg.L1Deployments.ProtocolVersionsProxy, + AltDAConfig: rollupAltDAConfig, } } defaultConfig := makeRollupConfig() @@ -819,11 +837,27 @@ func (cfg SystemConfig) Start(t *testing.T, _opts ...SystemConfigOption) (*Syste compressionAlgo = derive.Brotli10 } + var batcherAltDACLIConfig altda.CLIConfig + if cfg.DeployConfig.UseAltDA { + fakeAltDAServer := altda.NewFakeDAServer("127.0.0.1", 0, sys.Cfg.Loggers["da-server"]) + if err := fakeAltDAServer.Start(); err != nil { + return nil, fmt.Errorf("failed to start fake altDA server: %w", err) + } + sys.FakeAltDAServer = fakeAltDAServer + + batcherAltDACLIConfig = altda.CLIConfig{ + Enabled: cfg.DeployConfig.UseAltDA, + DAServerURL: fakeAltDAServer.HttpEndpoint(), + VerifyOnRead: true, + GenericDA: true, + MaxConcurrentRequests: cfg.BatcherMaxConcurrentDARequest, + } + } batcherCLIConfig := &bss.CLIConfig{ L1EthRpc: sys.EthInstances[RoleL1].UserRPC().RPC(), L2EthRpc: sys.EthInstances[RoleSeq].UserRPC().RPC(), RollupRpc: sys.RollupNodes[RoleSeq].UserRPC().RPC(), - MaxPendingTransactions: cfg.MaxPendingTransactions, + MaxPendingTransactions: cfg.BatcherMaxPendingTransactions, MaxChannelDuration: 1, MaxL1TxSize: batcherMaxL1TxSizeBytes, TestUseMaxTxSizeForBlobs: cfg.BatcherUseMaxTxSizeForBlobs, @@ -841,6 +875,7 @@ func (cfg SystemConfig) Start(t *testing.T, _opts ...SystemConfigOption) (*Syste MaxBlocksPerSpanBatch: cfg.BatcherMaxBlocksPerSpanBatch, DataAvailabilityType: sys.Cfg.DataAvailabilityType, CompressionAlgo: compressionAlgo, + AltDA: batcherAltDACLIConfig, } // Batch Submitter batcher, err := bss.BatcherServiceFromCLIConfig(context.Background(), "0.0.1", batcherCLIConfig, sys.Cfg.Loggers["batcher"]) diff --git a/op-e2e/system_test.go b/op-e2e/system_test.go index a168a8c90df6..d75147ae2146 100644 --- a/op-e2e/system_test.go +++ b/op-e2e/system_test.go @@ -14,6 +14,7 @@ import ( "github.com/libp2p/go-libp2p/core/peer" "github.com/stretchr/testify/require" + "github.com/ethereum-optimism/optimism/op-batcher/flags" "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" @@ -1362,7 +1363,7 @@ func TestBatcherMultiTx(t *testing.T) { InitParallel(t) cfg := DefaultSystemConfig(t) - cfg.MaxPendingTransactions = 0 // no limit on parallel txs + cfg.BatcherMaxPendingTransactions = 0 // no limit on parallel txs // ensures that batcher txs are as small as possible cfg.BatcherMaxL1TxSizeBytes = derive.FrameV0OverHeadSize + 1 /*version bytes*/ + 1 cfg.DisableBatcher = true @@ -1402,6 +1403,60 @@ func TestBatcherMultiTx(t *testing.T) { t.Fatal("Expected at least 10 transactions from the batcher") } +func TestBatcherConcurrentAltDARequests(t *testing.T) { + InitParallel(t) + + cfg := DefaultSystemConfig(t) + cfg.DeployConfig.UseAltDA = true + cfg.BatcherMaxPendingTransactions = 0 // no limit on parallel txs + // ensures that batcher txs are as small as possible + cfg.BatcherMaxL1TxSizeBytes = derive.FrameV0OverHeadSize + 1 /*version bytes*/ + 1 + cfg.BatcherBatchType = 0 + cfg.DataAvailabilityType = flags.CalldataType + cfg.BatcherMaxConcurrentDARequest = 0 // no limit + + // disable batcher because we start it manually below + cfg.DisableBatcher = true + sys, err := cfg.Start(t) + require.NoError(t, err, "Error starting up system") + defer sys.Close() + + // make every request take 5 seconds, such that only concurrent requests will be able to make progress fast enough + sys.FakeAltDAServer.SetPutRequestLatency(5 * time.Second) + + l1Client := sys.NodeClient("l1") + l2Seq := sys.NodeClient("sequencer") + + // we wait for some L2 blocks to have been produced, just to make sure the sequencer is working properly + _, err = geth.WaitForBlock(big.NewInt(10), l2Seq, time.Duration(cfg.DeployConfig.L2BlockTime*15)*time.Second) + require.NoError(t, err, "Waiting for L2 blocks") + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + startingL1BlockNum, err := l1Client.BlockNumber(ctx) + require.NoError(t, err) + + // start batch submission + driver := sys.BatchSubmitter.TestDriver() + err = driver.StartBatchSubmitting() + require.NoError(t, err) + + totalTxCount := 0 + // wait for up to 10 L1 blocks, expecting 10 L2 batcher txs in them. + // usually only 3 is required, but it's possible additional L1 blocks will be created + // before the batcher starts, so we wait additional blocks. + for i := int64(0); i < 10; i++ { + block, err := geth.WaitForBlock(big.NewInt(int64(startingL1BlockNum)+i), l1Client, time.Duration(cfg.DeployConfig.L1BlockTime*5)*time.Second) + require.NoError(t, err, "Waiting for l1 blocks") + totalTxCount += len(block.Transactions()) + + if totalTxCount >= 10 { + return + } + } + + t.Fatal("Expected at least 10 transactions from the batcher") +} + func latestBlock(t *testing.T, client *ethclient.Client) uint64 { ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() From 2efafab0ad0e0208d7fe853fd840259f24c4e856 Mon Sep 17 00:00:00 2001 From: Samuel Laferriere Date: Mon, 2 Sep 2024 19:41:00 -0700 Subject: [PATCH 13/19] chore(batcher): make altda wg wait + log only when useAltDa is true --- op-batcher/batcher/driver.go | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/op-batcher/batcher/driver.go b/op-batcher/batcher/driver.go index 48f60b619206..dd5ab4f0ff71 100644 --- a/op-batcher/batcher/driver.go +++ b/op-batcher/batcher/driver.go @@ -348,9 +348,11 @@ func (l *BatchSubmitter) loop() { publishAndWait := func() { l.publishStateToL1(queue, receiptsCh, daGroup) if !l.Txmgr.IsClosed() { - l.Log.Info("Wait for pure DA writes, not L1 txs") - _ = daGroup.Wait() - l.Log.Info("Wait for L1 writes (blobs or DA commitments)") + if l.Config.UseAltDA { + l.Log.Info("Waiting for altDA writes to complete...") + _ = daGroup.Wait() + } + l.Log.Info("Waiting for L1 txs to be confirmed...") queue.Wait() } else { l.Log.Info("Txmgr is closed, remaining channel data won't be sent") From f0e6886a0f33a3103c8c825f017793ceceb363e9 Mon Sep 17 00:00:00 2001 From: Samuel Laferriere Date: Mon, 2 Sep 2024 19:54:22 -0700 Subject: [PATCH 14/19] refactor: batcher altda submission code into its own function --- op-batcher/batcher/driver.go | 73 +++++++++++++++++++----------------- 1 file changed, 39 insertions(+), 34 deletions(-) diff --git a/op-batcher/batcher/driver.go b/op-batcher/batcher/driver.go index dd5ab4f0ff71..e470e5388b58 100644 --- a/op-batcher/batcher/driver.go +++ b/op-batcher/batcher/driver.go @@ -565,6 +565,44 @@ func (l *BatchSubmitter) cancelBlockingTx(queue *txmgr.Queue[txRef], receiptsCh l.sendTx(txData{}, true, candidate, queue, receiptsCh) } +// publishToAltDAAndL1 posts the txdata to the DA Provider and then sends the commitment to L1. +func (l *BatchSubmitter) publishToAltDAAndL1(txdata txData, queue *txmgr.Queue[txRef], receiptsCh chan txmgr.TxReceipt[txRef], daGroup *errgroup.Group) { + // sanity checks + if nf := len(txdata.frames); nf != 1 { + l.Log.Crit("Unexpected number of frames in calldata tx", "num_frames", nf) + } + if txdata.asBlob { + l.Log.Crit("Unexpected blob txdata with AltDA enabled") + } + + // when posting txdata to an external DA Provider, we use a goroutine to avoid blocking the main loop + // since it may take a while for the request to return. + goroutineSpawned := daGroup.TryGo(func() error { + // TODO: probably shouldn't be using the global shutdownCtx here, see https://go.dev/blog/context-and-structs + // but sendTransaction receives l.killCtx as an argument, which currently is only canceled after waiting for the main loop + // to exit, which would wait on this DA call to finish, which would take a long time. + // So we prefer to mimic the behavior of txmgr and cancel all pending DA/txmgr requests when the batcher is stopped. + comm, err := l.AltDA.SetInput(l.shutdownCtx, txdata.CallData()) + if err != nil { + l.Log.Error("Failed to post input to Alt DA", "error", err) + // requeue frame if we fail to post to the DA Provider so it can be retried + // note: this assumes that the da server caches requests, otherwise it might lead to resubmissions of the blobs + l.recordFailedDARequest(txdata.ID(), err) + return nil + } + l.Log.Info("Set altda input", "commitment", comm, "tx", txdata.ID()) + candidate := l.calldataTxCandidate(comm.TxData()) + l.sendTx(txdata, false, candidate, queue, receiptsCh) + return nil + }) + if !goroutineSpawned { + // We couldn't start the goroutine because the errgroup.Group limit + // is already reached. Since we can't send the txdata, we have to + // return it for later processing. We use nil error to skip error logging. + l.recordFailedDARequest(txdata.ID(), nil) + } +} + // sendTransaction creates & queues for sending a transaction to the batch inbox address with the given `txData`. // This call will block if the txmgr queue is at the max-pending limit. // The method will block if the queue's MaxPendingTransactions is exceeded. @@ -573,40 +611,7 @@ func (l *BatchSubmitter) sendTransaction(ctx context.Context, txdata txData, que // if Alt DA is enabled we post the txdata to the DA Provider and replace it with the commitment. if l.Config.UseAltDA { - // sanity checks - if nf := len(txdata.frames); nf != 1 { - l.Log.Crit("Unexpected number of frames in calldata tx", "num_frames", nf) - } - if txdata.asBlob { - l.Log.Crit("Unexpected blob txdata with AltDA enabled") - } - - // when posting txdata to an external DA Provider, we use a goroutine to avoid blocking the main loop - // since it may take a while for the request to return. - goroutineSpawned := daGroup.TryGo(func() error { - // TODO: probably shouldn't be using the global shutdownCtx here, see https://go.dev/blog/context-and-structs - // but sendTransaction receives l.killCtx as an argument, which currently is only canceled after waiting for the main loop - // to exit, which would wait on this DA call to finish, which would take a long time. - // So we prefer to mimic the behavior of txmgr and cancel all pending DA/txmgr requests when the batcher is stopped. - comm, err := l.AltDA.SetInput(l.shutdownCtx, txdata.CallData()) - if err != nil { - l.Log.Error("Failed to post input to Alt DA", "error", err) - // requeue frame if we fail to post to the DA Provider so it can be retried - // note: this assumes that the da server caches requests, otherwise it might lead to resubmissions of the blobs - l.recordFailedDARequest(txdata.ID(), err) - return nil - } - l.Log.Info("Set altda input", "commitment", comm, "tx", txdata.ID()) - candidate := l.calldataTxCandidate(comm.TxData()) - l.sendTx(txdata, false, candidate, queue, receiptsCh) - return nil - }) - if !goroutineSpawned { - // We couldn't start the goroutine because the errgroup.Group limit - // is already reached. Since we can't send the txdata, we have to - // return it for later processing. We use nil error to skip error logging. - l.recordFailedDARequest(txdata.ID(), nil) - } + l.publishToAltDAAndL1(txdata, queue, receiptsCh, daGroup) // we return nil to allow publishStateToL1 to keep processing the next txdata return nil } From 1185bf87cfebab54f680984010275303cfbff8a2 Mon Sep 17 00:00:00 2001 From: Samuel Laferriere Date: Mon, 2 Sep 2024 20:18:17 -0700 Subject: [PATCH 15/19] test: refactor batcher e2e test to only count batcher txs --- op-e2e/system_test.go | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/op-e2e/system_test.go b/op-e2e/system_test.go index d75147ae2146..66f1a8970960 100644 --- a/op-e2e/system_test.go +++ b/op-e2e/system_test.go @@ -1440,16 +1440,25 @@ func TestBatcherConcurrentAltDARequests(t *testing.T) { err = driver.StartBatchSubmitting() require.NoError(t, err) - totalTxCount := 0 + totalBatcherTxsCount := 0 // wait for up to 10 L1 blocks, expecting 10 L2 batcher txs in them. // usually only 3 is required, but it's possible additional L1 blocks will be created // before the batcher starts, so we wait additional blocks. for i := int64(0); i < 10; i++ { block, err := geth.WaitForBlock(big.NewInt(int64(startingL1BlockNum)+i), l1Client, time.Duration(cfg.DeployConfig.L1BlockTime*5)*time.Second) require.NoError(t, err, "Waiting for l1 blocks") - totalTxCount += len(block.Transactions()) + for _, tx := range block.Transactions() { + signer := types.NewCancunSigner(tx.ChainId()) + sender, err := types.Sender(signer, tx) + require.NoError(t, err) + // there are possibly other services (proposer/challenger) in the background sending txs + // so we only count the batcher txs + if sender == cfg.DeployConfig.BatchSenderAddress { + totalBatcherTxsCount++ + } + } - if totalTxCount >= 10 { + if totalBatcherTxsCount >= 10 { return } } From 3d31b9a5d0fafc414d55252fe2823ee33dcf64ce Mon Sep 17 00:00:00 2001 From: Samuel Laferriere Date: Wed, 4 Sep 2024 17:15:48 -0700 Subject: [PATCH 16/19] chore: log errors from wait functions --- op-batcher/batcher/driver.go | 10 ++++++++-- op-service/txmgr/queue.go | 6 +++--- op-service/txmgr/queue_test.go | 2 +- 3 files changed, 12 insertions(+), 6 deletions(-) diff --git a/op-batcher/batcher/driver.go b/op-batcher/batcher/driver.go index e470e5388b58..856c546054e5 100644 --- a/op-batcher/batcher/driver.go +++ b/op-batcher/batcher/driver.go @@ -350,10 +350,16 @@ func (l *BatchSubmitter) loop() { if !l.Txmgr.IsClosed() { if l.Config.UseAltDA { l.Log.Info("Waiting for altDA writes to complete...") - _ = daGroup.Wait() + err := daGroup.Wait() + if err != nil { + l.Log.Error("Error returned by one of the altda goroutines waited on", "err", err) + } } l.Log.Info("Waiting for L1 txs to be confirmed...") - queue.Wait() + err := queue.Wait() + if err != nil { + l.Log.Error("Error returned by one of the txmgr goroutines waited on", "err", err) + } } else { l.Log.Info("Txmgr is closed, remaining channel data won't be sent") } diff --git a/op-service/txmgr/queue.go b/op-service/txmgr/queue.go index cf83b00d5f8a..ee7a03ffa928 100644 --- a/op-service/txmgr/queue.go +++ b/op-service/txmgr/queue.go @@ -44,11 +44,11 @@ func NewQueue[T any](ctx context.Context, txMgr TxManager, maxPending uint64) *Q } // Wait waits for all pending txs to complete (or fail). -func (q *Queue[T]) Wait() { +func (q *Queue[T]) Wait() error { if q.group == nil { - return + return nil } - _ = q.group.Wait() + return q.group.Wait() } // Send will wait until the number of pending txs is below the max pending, diff --git a/op-service/txmgr/queue_test.go b/op-service/txmgr/queue_test.go index 678de8dbb28c..549142c8592a 100644 --- a/op-service/txmgr/queue_test.go +++ b/op-service/txmgr/queue_test.go @@ -222,7 +222,7 @@ func TestQueue_Send(t *testing.T) { require.Equal(t, c.queued, queued, msg) } // wait for the queue to drain (all txs complete or failed) - queue.Wait() + _ = queue.Wait() duration := time.Since(start) // expect the execution time within a certain window now := time.Now() From d4362f34111c878a7738419164c8cf54afbcc792 Mon Sep 17 00:00:00 2001 From: Samuel Laferriere Date: Wed, 4 Sep 2024 18:08:45 -0700 Subject: [PATCH 17/19] chore: refactor and minimize time that e2e batcher system tests can run --- op-batcher/batcher/driver.go | 4 +-- op-e2e/e2eutils/transactions/count.go | 21 ++++++++++++ op-e2e/system_test.go | 47 ++++++++++++++------------- 3 files changed, 47 insertions(+), 25 deletions(-) create mode 100644 op-e2e/e2eutils/transactions/count.go diff --git a/op-batcher/batcher/driver.go b/op-batcher/batcher/driver.go index 856c546054e5..9dbd9e802688 100644 --- a/op-batcher/batcher/driver.go +++ b/op-batcher/batcher/driver.go @@ -526,7 +526,7 @@ func (l *BatchSubmitter) publishTxToL1(ctx context.Context, queue *txmgr.Queue[t return err } - if err = l.sendTransaction(ctx, txdata, queue, receiptsCh, daGroup); err != nil { + if err = l.sendTransaction(txdata, queue, receiptsCh, daGroup); err != nil { return fmt.Errorf("BatchSubmitter.sendTransaction failed: %w", err) } return nil @@ -612,7 +612,7 @@ func (l *BatchSubmitter) publishToAltDAAndL1(txdata txData, queue *txmgr.Queue[t // sendTransaction creates & queues for sending a transaction to the batch inbox address with the given `txData`. // This call will block if the txmgr queue is at the max-pending limit. // The method will block if the queue's MaxPendingTransactions is exceeded. -func (l *BatchSubmitter) sendTransaction(ctx context.Context, txdata txData, queue *txmgr.Queue[txRef], receiptsCh chan txmgr.TxReceipt[txRef], daGroup *errgroup.Group) error { +func (l *BatchSubmitter) sendTransaction(txdata txData, queue *txmgr.Queue[txRef], receiptsCh chan txmgr.TxReceipt[txRef], daGroup *errgroup.Group) error { var err error // if Alt DA is enabled we post the txdata to the DA Provider and replace it with the commitment. diff --git a/op-e2e/e2eutils/transactions/count.go b/op-e2e/e2eutils/transactions/count.go new file mode 100644 index 000000000000..0f4d41fe0478 --- /dev/null +++ b/op-e2e/e2eutils/transactions/count.go @@ -0,0 +1,21 @@ +package transactions + +import ( + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" +) + +func TransactionsBySender(block *types.Block, sender common.Address) (int64, error) { + txCount := int64(0) + for _, tx := range block.Transactions() { + signer := types.NewCancunSigner(tx.ChainId()) + txSender, err := types.Sender(signer, tx) + if err != nil { + return 0, err + } + if txSender == sender { + txCount++ + } + } + return txCount, nil +} diff --git a/op-e2e/system_test.go b/op-e2e/system_test.go index 66f1a8970960..681e230b9e0d 100644 --- a/op-e2e/system_test.go +++ b/op-e2e/system_test.go @@ -1376,7 +1376,7 @@ func TestBatcherMultiTx(t *testing.T) { _, err = geth.WaitForBlock(big.NewInt(10), l2Seq, time.Duration(cfg.DeployConfig.L2BlockTime*15)*time.Second) require.NoError(t, err, "Waiting for L2 blocks") - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() l1Number, err := l1Client.BlockNumber(ctx) require.NoError(t, err) @@ -1386,16 +1386,20 @@ func TestBatcherMultiTx(t *testing.T) { err = driver.StartBatchSubmitting() require.NoError(t, err) - totalTxCount := 0 - // wait for up to 10 L1 blocks, usually only 3 is required, but it's + totalBatcherTxsCount := int64(0) + // wait for up to 5 L1 blocks, usually only 3 is required, but it's // possible additional L1 blocks will be created before the batcher starts, // so we wait additional blocks. - for i := int64(0); i < 10; i++ { + for i := int64(0); i < 5; i++ { block, err := geth.WaitForBlock(big.NewInt(int64(l1Number)+i), l1Client, time.Duration(cfg.DeployConfig.L1BlockTime*5)*time.Second) require.NoError(t, err, "Waiting for l1 blocks") - totalTxCount += len(block.Transactions()) + // there are possibly other services (proposer/challenger) in the background sending txs + // so we only count the batcher txs + batcherTxCount, err := transactions.TransactionsBySender(block, cfg.DeployConfig.BatchSenderAddress) + require.NoError(t, err) + totalBatcherTxsCount += int64(batcherTxCount) - if totalTxCount >= 10 { + if totalBatcherTxsCount >= 10 { return } } @@ -1406,6 +1410,8 @@ func TestBatcherMultiTx(t *testing.T) { func TestBatcherConcurrentAltDARequests(t *testing.T) { InitParallel(t) + numL1TxsExpected := int64(10) + cfg := DefaultSystemConfig(t) cfg.DeployConfig.UseAltDA = true cfg.BatcherMaxPendingTransactions = 0 // no limit on parallel txs @@ -1413,7 +1419,7 @@ func TestBatcherConcurrentAltDARequests(t *testing.T) { cfg.BatcherMaxL1TxSizeBytes = derive.FrameV0OverHeadSize + 1 /*version bytes*/ + 1 cfg.BatcherBatchType = 0 cfg.DataAvailabilityType = flags.CalldataType - cfg.BatcherMaxConcurrentDARequest = 0 // no limit + cfg.BatcherMaxConcurrentDARequest = uint64(numL1TxsExpected) // disable batcher because we start it manually below cfg.DisableBatcher = true @@ -1427,8 +1433,8 @@ func TestBatcherConcurrentAltDARequests(t *testing.T) { l1Client := sys.NodeClient("l1") l2Seq := sys.NodeClient("sequencer") - // we wait for some L2 blocks to have been produced, just to make sure the sequencer is working properly - _, err = geth.WaitForBlock(big.NewInt(10), l2Seq, time.Duration(cfg.DeployConfig.L2BlockTime*15)*time.Second) + // we wait for numL1TxsExpected L2 blocks to have been produced, just to make sure the sequencer is working properly + _, err = geth.WaitForBlock(big.NewInt(numL1TxsExpected), l2Seq, time.Duration(cfg.DeployConfig.L2BlockTime*uint64(numL1TxsExpected))*time.Second) require.NoError(t, err, "Waiting for L2 blocks") ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() @@ -1440,25 +1446,20 @@ func TestBatcherConcurrentAltDARequests(t *testing.T) { err = driver.StartBatchSubmitting() require.NoError(t, err) - totalBatcherTxsCount := 0 - // wait for up to 10 L1 blocks, expecting 10 L2 batcher txs in them. + totalBatcherTxsCount := int64(0) + // wait for up to 5 L1 blocks, expecting 10 L2 batcher txs in them. // usually only 3 is required, but it's possible additional L1 blocks will be created // before the batcher starts, so we wait additional blocks. - for i := int64(0); i < 10; i++ { + for i := int64(0); i < 5; i++ { block, err := geth.WaitForBlock(big.NewInt(int64(startingL1BlockNum)+i), l1Client, time.Duration(cfg.DeployConfig.L1BlockTime*5)*time.Second) require.NoError(t, err, "Waiting for l1 blocks") - for _, tx := range block.Transactions() { - signer := types.NewCancunSigner(tx.ChainId()) - sender, err := types.Sender(signer, tx) - require.NoError(t, err) - // there are possibly other services (proposer/challenger) in the background sending txs - // so we only count the batcher txs - if sender == cfg.DeployConfig.BatchSenderAddress { - totalBatcherTxsCount++ - } - } + // there are possibly other services (proposer/challenger) in the background sending txs + // so we only count the batcher txs + batcherTxCount, err := transactions.TransactionsBySender(block, cfg.DeployConfig.BatchSenderAddress) + require.NoError(t, err) + totalBatcherTxsCount += int64(batcherTxCount) - if totalBatcherTxsCount >= 10 { + if totalBatcherTxsCount >= numL1TxsExpected { return } } From 4a43bd04b5ce1d32cb3a48d8bf77602b4f426880 Mon Sep 17 00:00:00 2001 From: Samuel Laferriere Date: Wed, 4 Sep 2024 18:11:28 -0700 Subject: [PATCH 18/19] chore: lower timeout duration in test --- op-e2e/system_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/op-e2e/system_test.go b/op-e2e/system_test.go index 681e230b9e0d..37fbfe0e7ad8 100644 --- a/op-e2e/system_test.go +++ b/op-e2e/system_test.go @@ -1391,7 +1391,7 @@ func TestBatcherMultiTx(t *testing.T) { // possible additional L1 blocks will be created before the batcher starts, // so we wait additional blocks. for i := int64(0); i < 5; i++ { - block, err := geth.WaitForBlock(big.NewInt(int64(l1Number)+i), l1Client, time.Duration(cfg.DeployConfig.L1BlockTime*5)*time.Second) + block, err := geth.WaitForBlock(big.NewInt(int64(l1Number)+i), l1Client, time.Duration(cfg.DeployConfig.L1BlockTime*2)*time.Second) require.NoError(t, err, "Waiting for l1 blocks") // there are possibly other services (proposer/challenger) in the background sending txs // so we only count the batcher txs @@ -1451,7 +1451,7 @@ func TestBatcherConcurrentAltDARequests(t *testing.T) { // usually only 3 is required, but it's possible additional L1 blocks will be created // before the batcher starts, so we wait additional blocks. for i := int64(0); i < 5; i++ { - block, err := geth.WaitForBlock(big.NewInt(int64(startingL1BlockNum)+i), l1Client, time.Duration(cfg.DeployConfig.L1BlockTime*5)*time.Second) + block, err := geth.WaitForBlock(big.NewInt(int64(startingL1BlockNum)+i), l1Client, time.Duration(cfg.DeployConfig.L1BlockTime*2)*time.Second) require.NoError(t, err, "Waiting for l1 blocks") // there are possibly other services (proposer/challenger) in the background sending txs // so we only count the batcher txs From 5e519ca9dc22bc9cea0469455b9dd1ed4a1f703a Mon Sep 17 00:00:00 2001 From: Samuel Laferriere Date: Mon, 9 Sep 2024 17:33:35 -0700 Subject: [PATCH 19/19] fix(batcher): maxConcurentDARequests was not being initialized --- op-batcher/batcher/service.go | 1 + 1 file changed, 1 insertion(+) diff --git a/op-batcher/batcher/service.go b/op-batcher/batcher/service.go index 1fe813f9913f..667f75db1016 100644 --- a/op-batcher/batcher/service.go +++ b/op-batcher/batcher/service.go @@ -95,6 +95,7 @@ func (bs *BatcherService) initFromCLIConfig(ctx context.Context, version string, bs.PollInterval = cfg.PollInterval bs.MaxPendingTransactions = cfg.MaxPendingTransactions + bs.MaxConcurrentDARequests = cfg.AltDA.MaxConcurrentRequests bs.NetworkTimeout = cfg.TxMgrConfig.NetworkTimeout bs.CheckRecentTxsDepth = cfg.CheckRecentTxsDepth bs.WaitNodeSync = cfg.WaitNodeSync