From a0f754dfc510f4badf41a35bcaa6df162a14aab3 Mon Sep 17 00:00:00 2001 From: Daniel Jones Date: Tue, 22 Nov 2022 15:34:25 -0600 Subject: [PATCH 01/56] Adding in Mumbai/Mainnet precursor deb packaging for tests to use during upgrade(iterations to come) --- .github/workflows/packager.yml | 318 ++++++++++++++++++ packaging/deb/.README.md.swp | Bin 0 -> 12288 bytes packaging/deb/README.md | 23 ++ packaging/deb/bor/DEBIAN/changelog | 0 packaging/deb/bor/DEBIAN/control | 0 packaging/deb/bor/DEBIAN/postinst | 4 + packaging/deb/bor/DEBIAN/postrm | 6 + packaging/deb/bor/DEBIAN/prerm | 9 + packaging/requirements/README.md | 1 + packaging/rpm/TODO | 0 .../templates/mainnet-v1/archive/config.toml | 129 +++++++ .../mainnet-v1/sentry/sentry/bor/config.toml | 129 +++++++ .../sentry/validator/bor/config.toml | 131 ++++++++ .../mainnet-v1/without-sentry/bor/config.toml | 131 ++++++++ packaging/templates/package_scripts/changelog | 3 + .../package_scripts/changelog.profile | 3 + packaging/templates/package_scripts/control | 12 + .../templates/package_scripts/control.arm64 | 13 + .../package_scripts/control.profile.amd64 | 14 + .../package_scripts/control.profile.arm64 | 12 + .../package_scripts/control.validator | 12 + .../package_scripts/control.validator.arm64 | 13 + packaging/templates/package_scripts/postinst | 7 + .../package_scripts/postinst.profile | 6 + packaging/templates/package_scripts/postrm | 8 + packaging/templates/package_scripts/preinst | 7 + packaging/templates/package_scripts/prerm | 8 + packaging/templates/systemd/bor.service | 16 + packaging/templates/systemd/bord.service | 36 ++ .../templates/testnet-v4/archive/config.toml | 129 +++++++ .../testnet-v4/sentry/sentry/bor/config.toml | 129 +++++++ .../sentry/validator/bor/config.toml | 131 ++++++++ .../testnet-v4/without-sentry/bor/config.toml | 131 ++++++++ 33 files changed, 1571 insertions(+) create mode 100644 .github/workflows/packager.yml create mode 100644 packaging/deb/.README.md.swp create mode 100644 packaging/deb/README.md create mode 100644 packaging/deb/bor/DEBIAN/changelog create mode 100644 packaging/deb/bor/DEBIAN/control create mode 100755 packaging/deb/bor/DEBIAN/postinst create mode 100755 packaging/deb/bor/DEBIAN/postrm create mode 100755 packaging/deb/bor/DEBIAN/prerm create mode 100644 packaging/requirements/README.md create mode 100644 packaging/rpm/TODO create mode 100644 packaging/templates/mainnet-v1/archive/config.toml create mode 100644 packaging/templates/mainnet-v1/sentry/sentry/bor/config.toml create mode 100644 packaging/templates/mainnet-v1/sentry/validator/bor/config.toml create mode 100644 packaging/templates/mainnet-v1/without-sentry/bor/config.toml create mode 100644 packaging/templates/package_scripts/changelog create mode 100644 packaging/templates/package_scripts/changelog.profile create mode 100644 packaging/templates/package_scripts/control create mode 100644 packaging/templates/package_scripts/control.arm64 create mode 100644 packaging/templates/package_scripts/control.profile.amd64 create mode 100644 packaging/templates/package_scripts/control.profile.arm64 create mode 100644 packaging/templates/package_scripts/control.validator create mode 100644 packaging/templates/package_scripts/control.validator.arm64 create mode 100755 packaging/templates/package_scripts/postinst create mode 100755 packaging/templates/package_scripts/postinst.profile create mode 100755 packaging/templates/package_scripts/postrm create mode 100755 packaging/templates/package_scripts/preinst create mode 100755 packaging/templates/package_scripts/prerm create mode 100644 packaging/templates/systemd/bor.service create mode 100644 packaging/templates/systemd/bord.service create mode 100644 packaging/templates/testnet-v4/archive/config.toml create mode 100644 packaging/templates/testnet-v4/sentry/sentry/bor/config.toml create mode 100644 packaging/templates/testnet-v4/sentry/validator/bor/config.toml create mode 100644 packaging/templates/testnet-v4/without-sentry/bor/config.toml diff --git a/.github/workflows/packager.yml b/.github/workflows/packager.yml new file mode 100644 index 0000000000..4b6c52f601 --- /dev/null +++ b/.github/workflows/packager.yml @@ -0,0 +1,318 @@ +name: packager + +on: + push: + branches: + - 'main' + paths: + - '**' + tags: + - 'v*.*.*' + - 'v*.*.*-*' + +jobs: + build: + runs-on: ubuntu-18.04 + steps: + - name: Checkout + uses: actions/checkout@v2 + with: + fetch-depth: 0 + - name: Set up Go + uses: actions/setup-go@master + with: + go-version: 1.19 + - name: Generate release tag + id: tag + run: echo "::set-output name=release_tag::bor_$(date +"%Y.%m.%d_%H-%M")" + + - name: Cleaning repo + run: make clean + - name: Building for amd64 + run: make bor + + - name: Making directory structure + run: mkdir -p packaging/deb/bor/usr/bin + - name: Making directory structure for toml + run: mkdir -p packaging/deb/bor/var/lib/bor + - name: Copying necessary files + run: cp -rp build/bin/bor packaging/deb/bor/usr/bin/ + - name: copying control file + run: cp -rp packaging/templates/package_scripts/control packaging/deb/bor/DEBIAN/control + - name: removing systemd file for binary + run: rm -rf lib/systemd/system/bor.service + + - name: Creating package for binary only bor + run: cp -rp packaging/deb/bor packaging/deb/bor-v0.3.0-beta-amd64 + - name: Running package build + run: dpkg-deb --build --root-owner-group packaging/deb/bor-v0.3.0-beta-amd64 + + - name: Removing the bor binary + run: rm -rf packaging/deb/bor/usr/bin/bor + + - name: making directory structure for systemd + run: mkdir -p packaging/deb/bor/lib/systemd/system + - name: Copying systemd file + run: cp -rp packaging/templates/systemd/bor.service packaging/deb/bor/lib/systemd/system/bor.service + + - name: Prepping Mumbai Sentry Node Profile for amd64 + run: cp -rp packaging/deb/bor packaging/deb/bor-mumbai-sentry-config_v0.3.0-beta-amd64 + - name: Putting toml + run: cp -rp packaging/templates/testnet-v4/sentry/sentry/bor/config.toml packaging/deb/bor-mumbai-sentry-config_v0.3.0-beta-amd64/var/lib/bor/ + - name: Copying the preinst + run: cp -rp packaging/templates/package_scripts/preinst packaging/deb/bor-mumbai-sentry-config_v0.3.0-beta-amd64/DEBIAN/preinst + - name: Copying the postinst + run: cp -rp packaging/templates/package_scripts/postinst.profile packaging/deb/bor-mumbai-sentry-config_v0.3.0-beta-amd64/DEBIAN/postinst + - name: Copying the prerm + run: cp -rp packaging/templates/package_scripts/prerm packaging/deb/bor-mumbai-sentry-config_v0.3.0-beta-amd64/DEBIAN/prerm + - name: Copying the postrm + run: cp -rp packaging/templates/package_scripts/postrm packaging/deb/bor-mumbai-sentry-config_v0.3.0-beta-amd64/DEBIAN/postrm + - name: Copying systemd file + run: cp -rp packaging/templates/systemd/bor.service packaging/deb/bor-mumbai-sentry-config_v0.3.0-beta-amd64/lib/systemd/system/ + - name: Copying profile control file + run: cp -rp packaging/templates/package_scripts/control.profile.amd64 packaging/deb/bor-mumbai-sentry-config_v0.3.0-beta-amd64/DEBIAN/control + - name: Running package build for sentry on amd64 + run: dpkg-deb --build --root-owner-group packaging/deb/bor-mumbai-sentry-config_v0.3.0-beta-amd64 + + - name: Setting up Mainnet Sentry Config for amd64 + run: cp -rp packaging/deb/bor packaging/deb/bor-mainnet-sentry-config_v0.3.0-beta-amd64 + - name: Copying control file + run: cp -rp packaging/templates/package_scripts/control packaging/deb/bor-mainnet-sentry-config_v0.3.0-beta-amd64/DEBIAN/control + - name: Putting toml + run: cp -rp packaging/templates/mainnet-v1/sentry/sentry/bor/config.toml packaging/deb/bor-mainnet-sentry-config_v0.3.0-beta-amd64/var/lib/bor/ + - name: Copying the preinst + run: cp -rp packaging/templates/package_scripts/preinst packaging/deb/bor-mainnet-sentry-config_v0.3.0-beta-amd64/DEBIAN/preinst + - name: Copying the postinst + run: cp -rp packaging/templates/package_scripts/postinst.profile packaging/deb/bor-mainnet-sentry-config_v0.3.0-beta-amd64/DEBIAN/postinst + - name: Copying the prerm + run: cp -rp packaging/templates/package_scripts/prerm packaging/deb/bor-mainnet-sentry-config_v0.3.0-beta-amd64/DEBIAN/prerm + - name: Copying the postrm + run: cp -rp packaging/templates/package_scripts/postrm packaging/deb/bor-mainnet-sentry-config_v0.3.0-beta-amd64/DEBIAN/postrm + - name: Copying profile control file + run: cp -rp packaging/templates/package_scripts/control.profile.amd64 packaging/deb/bor-mainnet-sentry-config_v0.3.0-beta-amd64/DEBIAN/control + - name: Building Sentry for amd64 + run: dpkg-deb --build --root-owner-group packaging/deb/bor-mainnet-sentry-config_v0.3.0-beta-amd64 + + - name: Prepping Bor Validator Profile for amd64 + run: cp -rp packaging/deb/bor packaging/deb/bor-mumbai-validator-config_v0.3.0-beta-amd64 + - name: Copying control file for validator on amd64 + run: cp -rp packaging/templates/package_scripts/control.validator packaging/deb/bor-mumbai-validator-config_v0.3.0-beta-amd64/DEBIAN/control + - name: Copying Postinstall script + run: cp -rp packaging/templates/package_scripts/postinst.profile packaging/deb/bor-mumbai-validator-config_v0.3.0-beta-amd64/DEBIAN/postinst + - name: Copying Prerm script + run: cp -rp packaging/templates/package_scripts/prerm packaging/deb/bor-mumbai-validator-config_v0.3.0-beta-amd64/DEBIAN/prerm + - name: Copying Postrm script + run: cp -rp packaging/templates/package_scripts/postrm packaging/deb/bor-mumbai-validator-config_v0.3.0-beta-amd64/DEBIAN/postrm + - name: Copying config.toml for validator for bor + run: cp -rp packaging/templates/testnet-v4/sentry/validator/bor/config.toml packaging/deb/bor-mumbai-validator-config_v0.3.0-beta-amd64/var/lib/bor/ + - name: Copying profile control file + run: cp -rp packaging/templates/package_scripts/control.profile.amd64 packaging/deb/bor-mumbai-validator-config_v0.3.0-beta-amd64/DEBIAN/control + - name: Building bor validator on amd64 + run: dpkg-deb --build --root-owner-group packaging/deb/bor-mumbai-validator-config_v0.3.0-beta-amd64 + + - name: Prepping Bor Mainnet Validator for amd64 + run: cp -rp packaging/deb/bor packaging/deb/bor-mainnet-validator-config_v0.3.0-beta-amd64 + - name: Copying control file for validator on arm64 + run: cp -rp packaging/templates/package_scripts/control.validator packaging/deb/bor-mainnet-validator-config_v0.3.0-beta-amd64/DEBIAN/control + - name: Copying the preinst + run: cp -rp packaging/templates/package_scripts/preinst packaging/deb/bor-mainnet-validator-config_v0.3.0-beta-amd64/DEBIAN/preinst + - name: Copying Postinstall script + run: cp -rp packaging/templates/package_scripts/postinst.profile packaging/deb/bor-mainnet-validator-config_v0.3.0-beta-amd64/DEBIAN/postinst + - name: Copying the prerm + run: cp -rp packaging/templates/package_scripts/prerm packaging/deb/bor-mainnet-validator-config_v0.3.0-beta-amd64/DEBIAN/prerm + - name: Copying the postrm + run: cp -rp packaging/templates/package_scripts/postrm packaging/deb/bor-mainnet-validator-config_v0.3.0-beta-amd64/DEBIAN/postrm + - name: Copying config.toml for validator for bor + run: cp -rp packaging/templates/mainnet-v1/sentry/validator/bor/config.toml packaging/deb/bor-mainnet-validator-config_v0.3.0-beta-amd64/var/lib/bor/ + - name: Copying profile control file + run: cp -rp packaging/templates/package_scripts/control.profile.amd64 packaging/deb/bor-mainnet-validator-config_v0.3.0-beta-amd64/DEBIAN/control + - name: Building bor validator on amd64 + run: dpkg-deb --build --root-owner-group packaging/deb/bor-mainnet-validator-config_v0.3.0-beta-amd64 + + - name: Creating mumbai archive node profile for amd64 + run: cp -rp packaging/deb/bor packaging/deb/bor-mumbai-archive-config_v0.3.0-beta-amd64 + - name: Copying profile control file + run: cp -rp packaging/templates/package_scripts/control.profile.amd64 packaging/deb/bor-mumbai-archive-config_v0.3.0-beta-amd64/DEBIAN/control + - name: Copying profile preinst file + run: cp -rp packaging/templates/package_scripts/preinst packaging/deb/bor-mumbai-archive-config_v0.3.0-beta-amd64/DEBIAN/preinst + - name: Copying the profile postinst + run: cp -rp packaging/templates/package_scripts/postinst.profile packaging/deb/bor-mumbai-archive-config_v0.3.0-beta-amd64/DEBIAN/postinst + - name: Copying profile prerm file + run: cp -rp packaging/templates/package_scripts/prerm packaging/deb/bor-mumbai-archive-config_v0.3.0-beta-amd64/DEBIAN/prerm + - name: Copying profile postrm file + run: cp -rp packaging/templates/package_scripts/postrm packaging/deb/bor-mumbai-archive-config_v0.3.0-beta-amd64/DEBIAN/postrm + - name: Copying the toml + run: cp -rp packaging/templates/testnet-v4/archive/config.toml packaging/deb/bor-mumbai-archive-config_v0.3.0-beta-amd64/var/lib/bor/ + - name: Building profile package + run: dpkg-deb --build --root-owner-group packaging/deb/bor-mumbai-archive-config_v0.3.0-beta-amd64 + + - name: Creating mainnet archive node profile for amd64 + run: cp -rp packaging/deb/bor packaging/deb/bor-mainnet-archive-config_v0.3.0-beta-amd64 + - name: Copying profile control file + run: cp -rp packaging/templates/package_scripts/control.profile.amd64 packaging/deb/bor-mumbai-archive-config_v0.3.0-beta-amd64/DEBIAN/control + - name: Copying profile preinst file + run: cp -rp packaging/templates/package_scripts/preinst packaging/deb/bor-mainnet-archive-config_v0.3.0-beta-amd64/DEBIAN/preinst + - name: Copying the profile postinst + run: cp -rp packaging/templates/package_scripts/postinst.profile packaging/deb/bor-mainnet-archive-config_v0.3.0-beta-amd64/DEBIAN/postinst + - name: Copying profile prerm file + run: cp -rp packaging/templates/package_scripts/prerm packaging/deb/bor-mainnet-archive-config_v0.3.0-beta-amd64/DEBIAN/prerm + - name: Copying profile postrm file + run: cp -rp packaging/templates/package_scripts/postrm packaging/deb/bor-mainnet-archive-config_v0.3.0-beta-amd64/DEBIAN/postrm + - name: Copying the toml + run: cp -rp packaging/templates/mainnet-v1/archive/config.toml packaging/deb/bor-mainnet-archive-config_v0.3.0-beta-amd64/var/lib/bor/ + - name: Building porfile package + run: dpkg-deb --build --root-owner-group packaging/deb/bor-mainnet-archive-config_v0.3.0-beta-amd64 + + - name: Cleaning build directory for arm64 build + run: make clean + + - name: Removing systemd file + run: rm -rf packaging/deb/bor/lib/systemd/system/bor.service + + - name: Adding requirements for cross compile + run: sudo apt-get install g++-aarch64-linux-gnu gcc-aarch64-linux-gnu + + - name: removing systemd file for binary + run: rm -rf lib/systemd/system/bor.service + + - name: Building bor for arm64 + run: GOARCH=arm64 GOOS=linux CC=aarch64-linux-gnu-gcc CXX=aarch64-linux-gnu-g++ CGO_ENABLED=1 go build -o build/bin/bor ./cmd/cli/main.go + + - name: Copying bor arm64 for use with packaging + run: cp -rp build/bin/bor packaging/deb/bor/usr/bin/ + + - name: Creating package for binary only bor + run: cp -rp packaging/deb/bor packaging/deb/bor-v0.3.0-beta-arm64 + - name: Copying control file + run: cp -rp packaging/templates/package_scripts/control.arm64 packaging/deb/bor-v0.3.0-beta-arm64/DEBIAN/control + - name: Running package build + run: dpkg-deb --build --root-owner-group packaging/deb/bor-v0.3.0-beta-arm64 + + - name: Removing the bor binary + run: rm -rf packaging/deb/bor/usr/bin/bor + + - name: Copying systemd file + run: cp -rp packaging/templates/systemd/bor.service packaging/deb/bor/lib/systemd/system/bor.service + + - name: Updating the control file to use with the arm64 profile + run: cp -rp packaging/templates/package_scripts/control.profile.arm64 packaging/deb/bor/DEBIAN/control + + - name: Setting up Mumbai Sentry Config for arm64 + run: cp -rp packaging/deb/bor packaging/deb/bor-mumbai-sentry-config_v0.3.0-beta-arm64 + - name: Copying control file + run: cp -rp packaging/templates/package_scripts/control.arm64 packaging/deb/bor-mumbai-sentry-config_v0.3.0-beta-arm64/DEBIAN/control + - name: Putting toml + run: cp -rp packaging/templates/testnet-v4/sentry/sentry/bor/config.toml packaging/deb/bor-mumbai-sentry-config_v0.3.0-beta-arm64/var/lib/bor/ + - name: Copying the preinst + run: cp -rp packaging/templates/package_scripts/preinst packaging/deb/bor-mumbai-sentry-config_v0.3.0-beta-arm64/DEBIAN/preinst + - name: Copying the prerm + run: cp -rp packaging/templates/package_scripts/prerm packaging/deb/bor-mumbai-sentry-config_v0.3.0-beta-arm64/DEBIAN/prerm + - name: Copying the postrm + run: cp -rp packaging/templates/package_scripts/postrm packaging/deb/bor-mumbai-sentry-config_v0.3.0-beta-arm64/DEBIAN/postrm + - name: Copying the postinst + run: cp -rp packaging/templates/package_scripts/postinst.profile packaging/deb/bor-mumbai-sentry-config_v0.3.0-beta-arm64/DEBIAN/postinst + - name: Copying profile control file + run: cp -rp packaging/templates/package_scripts/control.profile.arm64 packaging/deb/bor-mumbai-sentry-config_v0.3.0-beta-arm64/DEBIAN/control + - name: Building Sentry for arm64 + run: dpkg-deb --build --root-owner-group packaging/deb/bor-mumbai-sentry-config_v0.3.0-beta-arm64 + + - name: Setting up Mainnet Sentry Config for arm64 + run: cp -rp packaging/deb/bor packaging/deb/bor-mainnet-sentry-config_v0.3.0-beta-arm64 + - name: Copying control file + run: cp -rp packaging/templates/package_scripts/control.arm64 packaging/deb/bor-mainnet-sentry-config_v0.3.0-beta-arm64/DEBIAN/control + - name: Putting toml + run: cp -rp packaging/templates/mainnet-v1/sentry/sentry/bor/config.toml packaging/deb/bor-mainnet-sentry-config_v0.3.0-beta-arm64/var/lib/bor/ + - name: Copying the preinst + run: cp -rp packaging/templates/package_scripts/preinst packaging/deb/bor-mainnet-sentry-config_v0.3.0-beta-arm64/DEBIAN/preinst + - name: Copying the prerm + run: cp -rp packaging/templates/package_scripts/prerm packaging/deb/bor-mainnet-sentry-config_v0.3.0-beta-arm64/DEBIAN/prerm + - name: Copying the postrm + run: cp -rp packaging/templates/package_scripts/postrm packaging/deb/bor-mainnet-sentry-config_v0.3.0-beta-arm64/DEBIAN/postrm + - name: Copying the postinst + run: cp -rp packaging/templates/package_scripts/postinst.profile packaging/deb/bor-mainnet-sentry-config_v0.3.0-beta-arm64/DEBIAN/postinst + - name: Copying profile control file + run: cp -rp packaging/templates/package_scripts/control.profile.arm64 packaging/deb/bor-mainnet-sentry-config_v0.3.0-beta-arm64/DEBIAN/control + - name: Building Sentry for arm64 + run: dpkg-deb --build --root-owner-group packaging/deb/bor-mainnet-sentry-config_v0.3.0-beta-arm64 + + - name: Prepping Bor Mumbai Validator for arm64 + run: cp -rp packaging/deb/bor packaging/deb/bor-mumbai-validator-config_v0.3.0-beta-arm64 + - name: Copying control file for validator on arm64 + run: cp -rp packaging/templates/package_scripts/control.validator.arm64 packaging/deb/bor-mumbai-validator-config_v0.3.0-beta-arm64/DEBIAN/control + - name: Copying the preinst + run: cp -rp packaging/templates/package_scripts/preinst packaging/deb/bor-mumbai-sentry-config_v0.3.0-beta-arm64/DEBIAN/preinst + - name: Copying Postinstall script + run: cp -rp packaging/templates/package_scripts/postinst.profile packaging/deb/bor-mumbai-validator-config_v0.3.0-beta-arm64/DEBIAN/postinst + - name: Copying the prerm + run: cp -rp packaging/templates/package_scripts/prerm packaging/deb/bor-mumbai-sentry-config_v0.3.0-beta-arm64/DEBIAN/prerm + - name: Copying the postrm + run: cp -rp packaging/templates/package_scripts/postrm packaging/deb/bor-mumbai-sentry-config_v0.3.0-beta-arm64/DEBIAN/postrm + - name: Copying config.toml for validator for bor + run: cp -rp packaging/templates/testnet-v4/sentry/validator/bor/config.toml packaging/deb/bor-mumbai-validator-config_v0.3.0-beta-arm64/var/lib/bor/ + - name: Copying profile control file + run: cp -rp packaging/templates/package_scripts/control.profile.arm64 packaging/deb/bor-mumbai-validator-config_v0.3.0-beta-arm64/DEBIAN/control + - name: Building bor validator on arm64 + run: dpkg-deb --build --root-owner-group packaging/deb/bor-mumbai-validator-config_v0.3.0-beta-arm64 + + - name: Prepping Bor Mainnet Validator for arm64 + run: cp -rp packaging/deb/bor packaging/deb/bor-mainnet-validator-config_v0.3.0-beta-arm64 + - name: Copying control file for validator on arm64 + run: cp -rp packaging/templates/package_scripts/control.validator.arm64 packaging/deb/bor-mainnet-validator-config_v0.3.0-beta-arm64/DEBIAN/control + - name: Copying the preinst + run: cp -rp packaging/templates/package_scripts/preinst packaging/deb/bor-mainnet-validator-config_v0.3.0-beta-arm64/DEBIAN/preinst + - name: Copying Postinstall script + run: cp -rp packaging/templates/package_scripts/postinst.profile packaging/deb/bor-mainnet-validator-config_v0.3.0-beta-arm64/DEBIAN/postinst + - name: Copying the prerm + run: cp -rp packaging/templates/package_scripts/prerm packaging/deb/bor-mainnet-validator-config_v0.3.0-beta-arm64/DEBIAN/prerm + - name: Copying the postrm + run: cp -rp packaging/templates/package_scripts/postrm packaging/deb/bor-mainnet-validator-config_v0.3.0-beta-arm64/DEBIAN/postrm + - name: Copying config.toml for validator for bor + run: cp -rp packaging/templates/mainnet-v1/sentry/validator/bor/config.toml packaging/deb/bor-mainnet-validator-config_v0.3.0-beta-arm64/var/lib/bor/ + - name: Copying profile control file + run: cp -rp packaging/templates/package_scripts/control.profile.arm64 packaging/deb/bor-mainnet-validator-config_v0.3.0-beta-arm64/DEBIAN/control + - name: Building bor validator on arm64 + run: dpkg-deb --build --root-owner-group packaging/deb/bor-mainnet-validator-config_v0.3.0-beta-arm64 + + - name: Updating the control file to use with the arm64 profile + run: cp -rp packaging/templates/package_scripts/control.profile.arm64 packaging/deb/bor/DEBIAN/control + + - name: Creating mumbai archive node profile for arm64 + run: cp -rp packaging/deb/bor packaging/deb/bor-mumbai-archive-config_v0.3.0-beta-arm64 + - name: Copying profile control file + run: cp -rp packaging/templates/package_scripts/control.profile.arm64 packaging/deb/bor-mumbai-archive-config_v0.3.0-beta-arm64/DEBIAN/control + - name: Copying over profile postinst + run: cp -rp packaging/templates/package_scripts/postinst.profile packaging/deb/bor-mumbai-archive-config_v0.3.0-beta-arm64/DEBIAN/postinst + - name: Copying prerm + run: cp -rp packaging/templates/package_scripts/prerm packaging/deb/bor-mumbai-archive-config_v0.3.0-beta-arm64/DEBIAN/prerm + - name: Copying postrm + run: cp -rp packaging/templates/package_scripts/postrm packaging/deb/bor-mumbai-archive-config_v0.3.0-beta-arm64/DEBIAN/postrm + - name: Copying the toml + run: cp -rp packaging/templates/testnet-v4/archive/config.toml packaging/deb/bor-mumbai-archive-config_v0.3.0-beta-arm64/var/lib/bor/ + - name: Building profile package + run: dpkg-deb --build --root-owner-group packaging/deb/bor-mumbai-archive-config_v0.3.0-beta-arm64 + + - name: Creating mainnet archive node profile for arm64 + run: cp -rp packaging/deb/bor packaging/deb/bor-mainnet-archive-config_v0.3.0-beta-arm64 + - name: Copying profile control file + run: cp -rp packaging/templates/package_scripts/control.profile.arm64 packaging/deb/bor-mainnet-archive-config_v0.3.0-beta-arm64/DEBIAN/control + - name: Copying over profile postinst + run: cp -rp packaging/templates/package_scripts/postinst.profile packaging/deb/bor-mainnet-archive-config_v0.3.0-beta-arm64/DEBIAN/postinst + - name: Copying prerm + run: cp -rp packaging/templates/package_scripts/prerm packaging/deb/bor-mainnet-archive-config_v0.3.0-beta-arm64/DEBIAN/prerm + - name: Copying postrm + run: cp -rp packaging/templates/package_scripts/postrm packaging/deb/bor-mainnet-archive-config_v0.3.0-beta-arm64/DEBIAN/postrm + - name: Copying the toml + run: cp -rp packaging/templates/mainnet-v1/archive/config.toml packaging/deb/bor-mainnet-archive-config_v0.3.0-beta-arm64/var/lib/bor/ + - name: Building porfile package + run: dpkg-deb --build --root-owner-group packaging/deb/bor-mainnet-archive-config_v0.3.0-beta-arm64 + + - name: Confirming package built + run: ls -ltr packaging/deb/ | grep bor + + - name: Release bor Packages + uses: softprops/action-gh-release@v1 + with: + tag_name: ${{ steps.tag.outputs.release_tag }} + files: | + packaging/deb/bor**.deb + binary/bo** \ No newline at end of file diff --git a/packaging/deb/.README.md.swp b/packaging/deb/.README.md.swp new file mode 100644 index 0000000000000000000000000000000000000000..765002d189fd7864fe06952777e7df26258b51ff GIT binary patch literal 12288 zcmeI2KW`H;6u@2f0)m0{K}VEa2n%$9Rtcy>8iWc9F!=7AI}^w6Wczw#fgQdA3x7tw z4HE2#k5FI(o-bFa1u=ChdRBh1&VGLP?x!r#y>#$!X973+LqX%b5FZb&W;ee~#o(0? zxwKlDr)ZUryZ;=cHqQ4ZGTTO+_wG6D4f_T&X^x$Fek5UXSBJ7rlN4Q2$!snQZHpvV zX>xzOHJXh3i~QJGzF-4v;G_oD#r5H}LCmso<~18jf|umLu}22P;?Nw3+z zt_$(_EEP}h{}2EF|2QYacj6myK)fg35pRh%#B1Uu(GV$do47>?;uq!mNqijP?PVQqq@#M%p0urlaX5IJLliX_BgTg#xsP3s8G%A&0!07eee} zP!LM(fj&NHV?aWKZca5o<*;zd`%O~47h{^ZpVD2NtSXg5Ub~nolnT;Xn;cdh>MEYh z=-_IVLPUHHqtnDMeNcZd79dtmQ@mf;pZL2&9T4KE_+bSxUbjh41DtLy*U=zzXl7fQ&>uF-j4H&fv F#cwS6$qoPj literal 0 HcmV?d00001 diff --git a/packaging/deb/README.md b/packaging/deb/README.md new file mode 100644 index 0000000000..7e84275f38 --- /dev/null +++ b/packaging/deb/README.md @@ -0,0 +1,23 @@ +# Debian + + + +For debian packages you will need to add the following layouts during the build + + + +bor/ + DEBIAN/control + DEBIAN/postinst + usr/local/bin/bor + lib/systemd/system/bor.service + +This will be wrapped during the build package process building + + +Note this is still a work in progress: + +TODO: removal/purge on removal using dpkg + cleanup of control files to list what we want + copyright inclusuion + diff --git a/packaging/deb/bor/DEBIAN/changelog b/packaging/deb/bor/DEBIAN/changelog new file mode 100644 index 0000000000..e69de29bb2 diff --git a/packaging/deb/bor/DEBIAN/control b/packaging/deb/bor/DEBIAN/control new file mode 100644 index 0000000000..e69de29bb2 diff --git a/packaging/deb/bor/DEBIAN/postinst b/packaging/deb/bor/DEBIAN/postinst new file mode 100755 index 0000000000..e23f4d6897 --- /dev/null +++ b/packaging/deb/bor/DEBIAN/postinst @@ -0,0 +1,4 @@ +#!/bin/bash +# This is a postinstallation script so the service can be configured and started when requested +# +sudo adduser --disabled-password --disabled-login --shell /usr/sbin/nologin --quiet --system --no-create-home --home /nonexistent bor diff --git a/packaging/deb/bor/DEBIAN/postrm b/packaging/deb/bor/DEBIAN/postrm new file mode 100755 index 0000000000..7602789a01 --- /dev/null +++ b/packaging/deb/bor/DEBIAN/postrm @@ -0,0 +1,6 @@ +#!/bin/bash +# +############### +# Remove bor installs +############## +sudo rm -rf /usr/bin/bor diff --git a/packaging/deb/bor/DEBIAN/prerm b/packaging/deb/bor/DEBIAN/prerm new file mode 100755 index 0000000000..e40aed2c80 --- /dev/null +++ b/packaging/deb/bor/DEBIAN/prerm @@ -0,0 +1,9 @@ +#!/bin/bash +# +# +############## +# Stop bor before removal +############## +#sudo systemctl stop bor.service +############# + diff --git a/packaging/requirements/README.md b/packaging/requirements/README.md new file mode 100644 index 0000000000..48cdce8528 --- /dev/null +++ b/packaging/requirements/README.md @@ -0,0 +1 @@ +placeholder diff --git a/packaging/rpm/TODO b/packaging/rpm/TODO new file mode 100644 index 0000000000..e69de29bb2 diff --git a/packaging/templates/mainnet-v1/archive/config.toml b/packaging/templates/mainnet-v1/archive/config.toml new file mode 100644 index 0000000000..a44a2ed0b5 --- /dev/null +++ b/packaging/templates/mainnet-v1/archive/config.toml @@ -0,0 +1,129 @@ +chain = "mainnet" +# identity = "node_name" +# log-level = "INFO" +datadir = "/var/lib/bor/data" +# keystore = "" +syncmode = "full" +gcmode = "archive" +# snapshot = true +# ethstats = "" + +# ["eth.requiredblocks"] + +[p2p] + maxpeers = 50 + port = 30303 + # maxpendpeers = 50 + # bind = "0.0.0.0" + # nodiscover = false + # nat = "any" + # [p2p.discovery] + # v5disc = false + # bootnodes = [] + # bootnodesv4 = [] + # bootnodesv5 = [] + # static-nodes = [] + # trusted-nodes = [] + # dns = [] + +# [heimdall] + # url = "http://localhost:1317" + # "bor.without" = false + # grpc-address = "" + +[txpool] + nolocals = true + pricelimit = 30000000000 + accountslots = 16 + globalslots = 32768 + accountqueue = 16 + globalqueue = 32768 + lifetime = "1h30m0s" + # locals = [] + # journal = "" + # rejournal = "1h0m0s" + # pricebump = 10 + +[miner] + gaslimit = 20000000 + gasprice = "30000000000" + # mine = false + # etherbase = "" + # extradata = "" + +[jsonrpc] + ipcpath = "/var/lib/bor/bor.ipc" + # ipcdisable = false + # gascap = 50000000 + # txfeecap = 5.0 + [jsonrpc.http] + enabled = true + port = 8545 + host = "127.0.0.1" + api = ["eth", "net", "web3", "txpool", "bor"] + vhosts = ["*"] + corsdomain = ["*"] + # prefix = "" + [jsonrpc.ws] + enabled = true + port = 8546 + # prefix = "" + # host = "localhost" + # api = ["web3", "net"] + origins = ["*"] + # [jsonrpc.graphql] + # enabled = false + # port = 0 + # prefix = "" + # host = "" + # vhosts = ["*"] + # corsdomain = ["*"] + +# [gpo] + # blocks = 20 + # percentile = 60 + # maxprice = "5000000000000" + # ignoreprice = "2" + +[telemetry] + metrics = true + # expensive = false + # prometheus-addr = "" + # opencollector-endpoint = "" + # [telemetry.influx] + # influxdb = false + # endpoint = "" + # database = "" + # username = "" + # password = "" + # influxdbv2 = false + # token = "" + # bucket = "" + # organization = "" + # [telemetry.influx.tags] + +# [cache] + # cache = 1024 + # gc = 25 + # snapshot = 10 + # database = 50 + # trie = 15 + # journal = "triecache" + # rejournal = "1h0m0s" + # noprefetch = false + # preimages = false + # txlookuplimit = 2350000 + +# [accounts] + # unlock = [] + # password = "" + # allow-insecure-unlock = false + # lightkdf = false + # disable-bor-wallet = false + +# [grpc] + # addr = ":3131" + +# [developer] + # dev = false + # period = 0 diff --git a/packaging/templates/mainnet-v1/sentry/sentry/bor/config.toml b/packaging/templates/mainnet-v1/sentry/sentry/bor/config.toml new file mode 100644 index 0000000000..5bb226b913 --- /dev/null +++ b/packaging/templates/mainnet-v1/sentry/sentry/bor/config.toml @@ -0,0 +1,129 @@ +chain = "mainnet" +# identity = "node_name" +# log-level = "INFO" +datadir = "/var/lib/bor/data" +# keystore = "" +syncmode = "full" +# gcmode = "full" +# snapshot = true +# ethstats = "" + +# ["eth.requiredblocks"] + +[p2p] + maxpeers = 50 + port = 30303 + # maxpendpeers = 50 + # bind = "0.0.0.0" + # nodiscover = false + # nat = "any" + # [p2p.discovery] + # v5disc = false + # bootnodes = [] + # bootnodesv4 = [] + # bootnodesv5 = [] + # static-nodes = [] + # trusted-nodes = [] + # dns = [] + +# [heimdall] + # url = "http://localhost:1317" + # "bor.without" = false + # grpc-address = "" + +[txpool] + nolocals = true + pricelimit = 30000000000 + accountslots = 16 + globalslots = 32768 + accountqueue = 16 + globalqueue = 32768 + lifetime = "1h30m0s" + # locals = [] + # journal = "" + # rejournal = "1h0m0s" + # pricebump = 10 + +[miner] + gaslimit = 20000000 + gasprice = "30000000000" + # mine = false + # etherbase = "" + # extradata = "" + +[jsonrpc] + ipcpath = "/var/lib/bor/bor.ipc" + # ipcdisable = false + # gascap = 50000000 + # txfeecap = 5.0 + [jsonrpc.http] + enabled = true + port = 8545 + host = "127.0.0.1" + api = ["eth", "net", "web3", "txpool", "bor"] + vhosts = ["*"] + corsdomain = ["*"] + # prefix = "" + # [jsonrpc.ws] + # enabled = false + # port = 8546 + # prefix = "" + # host = "localhost" + # api = ["web3", "net"] + # origins = ["*"] + # [jsonrpc.graphql] + # enabled = false + # port = 0 + # prefix = "" + # host = "" + # vhosts = ["*"] + # corsdomain = ["*"] + +# [gpo] + # blocks = 20 + # percentile = 60 + # maxprice = "5000000000000" + # ignoreprice = "2" + +[telemetry] + metrics = true + # expensive = false + # prometheus-addr = "" + # opencollector-endpoint = "" + # [telemetry.influx] + # influxdb = false + # endpoint = "" + # database = "" + # username = "" + # password = "" + # influxdbv2 = false + # token = "" + # bucket = "" + # organization = "" + # [telemetry.influx.tags] + +# [cache] + # cache = 1024 + # gc = 25 + # snapshot = 10 + # database = 50 + # trie = 15 + # journal = "triecache" + # rejournal = "1h0m0s" + # noprefetch = false + # preimages = false + # txlookuplimit = 2350000 + +# [accounts] + # unlock = [] + # password = "" + # allow-insecure-unlock = false + # lightkdf = false + # disable-bor-wallet = false + +# [grpc] + # addr = ":3131" + +# [developer] + # dev = false + # period = 0 diff --git a/packaging/templates/mainnet-v1/sentry/validator/bor/config.toml b/packaging/templates/mainnet-v1/sentry/validator/bor/config.toml new file mode 100644 index 0000000000..5a7688f9e8 --- /dev/null +++ b/packaging/templates/mainnet-v1/sentry/validator/bor/config.toml @@ -0,0 +1,131 @@ +# NOTE: Update and uncomment: `keystore`, `password`, and `unlock` fields. + +chain = "mainnet" +# identity = "node_name" +# log-level = "INFO" +datadir = "/var/lib/bor/data" +# keystore = "$BOR_DIR/keystore" +syncmode = "full" +# gcmode = "full" +# snapshot = true +# ethstats = "" + +# ["eth.requiredblocks"] + +[p2p] + maxpeers = 1 + port = 30303 + nodiscover = true + # maxpendpeers = 50 + # bind = "0.0.0.0" + # nat = "any" + # [p2p.discovery] + # v5disc = false + # bootnodes = [] + # bootnodesv4 = [] + # bootnodesv5 = [] + # static-nodes = [] + # trusted-nodes = [] + # dns = [] + +# [heimdall] + # url = "http://localhost:1317" + # "bor.without" = false + # grpc-address = "" + +[txpool] + nolocals = true + pricelimit = 30000000000 + accountslots = 16 + globalslots = 32768 + accountqueue = 16 + globalqueue = 32768 + lifetime = "1h30m0s" + # locals = [] + # journal = "" + # rejournal = "1h0m0s" + # pricebump = 10 + +[miner] + mine = true + gaslimit = 20000000 + gasprice = "30000000000" + # etherbase = "" + # extradata = "" + +[jsonrpc] + ipcpath = "/var/lib/bor/bor.ipc" + # ipcdisable = false + # gascap = 50000000 + # txfeecap = 5.0 + [jsonrpc.http] + enabled = true + port = 8545 + host = "127.0.0.1" + api = ["eth", "net", "web3", "txpool", "bor"] + vhosts = ["*"] + corsdomain = ["*"] + # prefix = "" + # [jsonrpc.ws] + # enabled = false + # port = 8546 + # prefix = "" + # host = "localhost" + # api = ["web3", "net"] + # origins = ["*"] + # [jsonrpc.graphql] + # enabled = false + # port = 0 + # prefix = "" + # host = "" + # vhosts = ["*"] + # corsdomain = ["*"] + +# [gpo] + # blocks = 20 + # percentile = 60 + # maxprice = "5000000000000" + # ignoreprice = "2" + +[telemetry] + metrics = true + # expensive = false + # prometheus-addr = "" + # opencollector-endpoint = "" + # [telemetry.influx] + # influxdb = false + # endpoint = "" + # database = "" + # username = "" + # password = "" + # influxdbv2 = false + # token = "" + # bucket = "" + # organization = "" + # [telemetry.influx.tags] + +# [cache] + # cache = 1024 + # gc = 25 + # snapshot = 10 + # database = 50 + # trie = 15 + # journal = "triecache" + # rejournal = "1h0m0s" + # noprefetch = false + # preimages = false + # txlookuplimit = 2350000 + +[accounts] + allow-insecure-unlock = true + # password = "$BOR_DIR/password.txt" + # unlock = ["$ADDRESS"] + # lightkdf = false + # disable-bor-wallet = false + +# [grpc] + # addr = ":3131" + +# [developer] + # dev = false + # period = 0 diff --git a/packaging/templates/mainnet-v1/without-sentry/bor/config.toml b/packaging/templates/mainnet-v1/without-sentry/bor/config.toml new file mode 100644 index 0000000000..74094bfa80 --- /dev/null +++ b/packaging/templates/mainnet-v1/without-sentry/bor/config.toml @@ -0,0 +1,131 @@ +# NOTE: Update and uncomment: `keystore`, `password`, and `unlock` fields. + +chain = "mainnet" +# identity = "node_name" +# log-level = "INFO" +datadir = "/var/lib/bor/data" +# keystore = "$BOR_DIR/keystore" +syncmode = "full" +# gcmode = "full" +# snapshot = true +# ethstats = "" + +# ["eth.requiredblocks"] + +[p2p] + maxpeers = 50 + port = 30303 + # maxpendpeers = 50 + # bind = "0.0.0.0" + # nodiscover = false + # nat = "any" + # [p2p.discovery] + # v5disc = false + # bootnodes = [] + # bootnodesv4 = [] + # bootnodesv5 = [] + # static-nodes = [] + # trusted-nodes = [] + # dns = [] + +# [heimdall] + # url = "http://localhost:1317" + # "bor.without" = false + # grpc-address = "" + +[txpool] + nolocals = true + pricelimit = 30000000000 + accountslots = 16 + globalslots = 32768 + accountqueue = 16 + globalqueue = 32768 + lifetime = "1h30m0s" + # locals = [] + # journal = "" + # rejournal = "1h0m0s" + # pricebump = 10 + +[miner] + mine = true + gaslimit = 20000000 + gasprice = "30000000000" + # etherbase = "" + # extradata = "" + +[jsonrpc] + ipcpath = "/var/lib/bor/bor.ipc" + # ipcdisable = false + # gascap = 50000000 + # txfeecap = 5.0 + [jsonrpc.http] + enabled = true + port = 8545 + host = "127.0.0.1" + api = ["eth", "net", "web3", "txpool", "bor"] + vhosts = ["*"] + corsdomain = ["*"] + # prefix = "" + # [jsonrpc.ws] + # enabled = false + # port = 8546 + # prefix = "" + # host = "localhost" + # api = ["web3", "net"] + # origins = ["*"] + # [jsonrpc.graphql] + # enabled = false + # port = 0 + # prefix = "" + # host = "" + # vhosts = ["*"] + # corsdomain = ["*"] + +# [gpo] +# blocks = 20 +# percentile = 60 +# maxprice = "5000000000000" +# ignoreprice = "2" + +[telemetry] + metrics = true + # expensive = false + # prometheus-addr = "" + # opencollector-endpoint = "" + # [telemetry.influx] + # influxdb = false + # endpoint = "" + # database = "" + # username = "" + # password = "" + # influxdbv2 = false + # token = "" + # bucket = "" + # organization = "" + # [telemetry.influx.tags] + +# [cache] +# cache = 1024 +# gc = 25 +# snapshot = 10 +# database = 50 +# trie = 15 +# journal = "triecache" +# rejournal = "1h0m0s" +# noprefetch = false +# preimages = false +# txlookuplimit = 2350000 + +[accounts] + allow-insecure-unlock = true + # password = "$BOR_DIR/password.txt" + # unlock = ["$ADDRESS"] + # lightkdf = false + # disable-bor-wallet = false + +# [grpc] + # addr = ":3131" + +# [developer] + # dev = false + # period = 0 diff --git a/packaging/templates/package_scripts/changelog b/packaging/templates/package_scripts/changelog new file mode 100644 index 0000000000..2395bcaef1 --- /dev/null +++ b/packaging/templates/package_scripts/changelog @@ -0,0 +1,3 @@ +bor (2.10.11) unstable; urgency=low + +-- Polygon Team Mon, 10 Nov 2022 00:37:31 +0100 \ No newline at end of file diff --git a/packaging/templates/package_scripts/changelog.profile b/packaging/templates/package_scripts/changelog.profile new file mode 100644 index 0000000000..b84fa22646 --- /dev/null +++ b/packaging/templates/package_scripts/changelog.profile @@ -0,0 +1,3 @@ +bor-profile (2.10.11) unstable; urgency=low + +-- Polygon Team Mon, 10 Nov 2022 00:37:31 +0100 \ No newline at end of file diff --git a/packaging/templates/package_scripts/control b/packaging/templates/package_scripts/control new file mode 100644 index 0000000000..ed0ff46c06 --- /dev/null +++ b/packaging/templates/package_scripts/control @@ -0,0 +1,12 @@ +Source: bor +Version: 0.3.0 +Section: develop +Priority: standard +Maintainer: Polygon +Build-Depends: debhelper-compat (= 13) +Package: bor +Rules-Requires-Root: yes +Architecture: amd64 +Multi-Arch: foreign +Depends: +Description: This is the bor package from Polygon Technology. diff --git a/packaging/templates/package_scripts/control.arm64 b/packaging/templates/package_scripts/control.arm64 new file mode 100644 index 0000000000..2c624a4c45 --- /dev/null +++ b/packaging/templates/package_scripts/control.arm64 @@ -0,0 +1,13 @@ +Source: bor +Version: 0.3.0 +Section: develop +Priority: standard +Maintainer: Polygon +Build-Depends: debhelper-compat (= 13) +Rules-Requires-Root: yes +Package: bor +Architecture: arm64 +Multi-Arch: foreign +Depends: +Description: This is the bor package from Polygon Technology. + diff --git a/packaging/templates/package_scripts/control.profile.amd64 b/packaging/templates/package_scripts/control.profile.amd64 new file mode 100644 index 0000000000..087dabb1f6 --- /dev/null +++ b/packaging/templates/package_scripts/control.profile.amd64 @@ -0,0 +1,14 @@ +Source: bor-profile +Version: 0.3.0 +Section: develop +Priority: standard +Maintainer: Polygon +Build-Depends: debhelper-compat (= 13) +Rules-Requires-Root: yes +Package: bor-profile +Architecture: amd64 +Multi-Arch: foreign +Depends: +Description: This is the bor package from Polygon Technology. + + diff --git a/packaging/templates/package_scripts/control.profile.arm64 b/packaging/templates/package_scripts/control.profile.arm64 new file mode 100644 index 0000000000..9de0c50253 --- /dev/null +++ b/packaging/templates/package_scripts/control.profile.arm64 @@ -0,0 +1,12 @@ +Source: bor-profile +Version: 0.3.0 +Section: develop +Priority: standard +Maintainer: Polygon +Build-Depends: debhelper-compat (= 13) +Rules-Requires-Root: yes +Package: bor-profile +Architecture: arm64 +Multi-Arch: foreign +Depends: +Description: This is the bor package from Polygon Technology. diff --git a/packaging/templates/package_scripts/control.validator b/packaging/templates/package_scripts/control.validator new file mode 100644 index 0000000000..f3f5652a31 --- /dev/null +++ b/packaging/templates/package_scripts/control.validator @@ -0,0 +1,12 @@ +Source: bor-profile +Version: 0.3.0 +Section: develop +Priority: standard +Maintainer: Polygon +Build-Depends: debhelper-compat (= 13) +Package: bor-profile +Rules-Requires-Root: yes +Architecture: amd64 +Multi-Arch: foreign +Depends: +Description: This is the bor package from Polygon Technology. diff --git a/packaging/templates/package_scripts/control.validator.arm64 b/packaging/templates/package_scripts/control.validator.arm64 new file mode 100644 index 0000000000..97712830ff --- /dev/null +++ b/packaging/templates/package_scripts/control.validator.arm64 @@ -0,0 +1,13 @@ +Source: bor-profile +Version: 0.3.0 +Section: develop +Priority: standard +Maintainer: Polygon +Build-Depends: debhelper-compat (= 13) +Rules-Requires-Root: yes +Package: bor-profile +Architecture: arm64 +Multi-Arch: foreign +Depends: +Description: This is the bor package from Polygon Technology. + diff --git a/packaging/templates/package_scripts/postinst b/packaging/templates/package_scripts/postinst new file mode 100755 index 0000000000..761ee29f26 --- /dev/null +++ b/packaging/templates/package_scripts/postinst @@ -0,0 +1,7 @@ +#!/bin/bash +# This is a postinstallation script so the service can be configured and started when requested +# +sudo adduser --disabled-password --disabled-login --shell /usr/sbin/nologin --quiet --system --no-create-home --home /nonexistent bor +sudo mkdir -p /var/lib/bor +sudo chown -R bor /var/lib/bor +sudo systemctl daemon-reload diff --git a/packaging/templates/package_scripts/postinst.profile b/packaging/templates/package_scripts/postinst.profile new file mode 100755 index 0000000000..80b8381203 --- /dev/null +++ b/packaging/templates/package_scripts/postinst.profile @@ -0,0 +1,6 @@ +#!/bin/bash +# This is a postinstallation script so the service can be configured and started when requested +# +sudo mkdir -p /var/lib/bor +sudo chown -R bor /var/lib/bor +sudo systemctl daemon-reload diff --git a/packaging/templates/package_scripts/postrm b/packaging/templates/package_scripts/postrm new file mode 100755 index 0000000000..55bbb87a4f --- /dev/null +++ b/packaging/templates/package_scripts/postrm @@ -0,0 +1,8 @@ +#!/bin/bash +# +############### +# Remove bor installs +############## +sudo rm -rf /var/lib/bor/config.toml +sudo rm -rf /lib/systemd/system/bor.service +sudo systemctl daemon-reload diff --git a/packaging/templates/package_scripts/preinst b/packaging/templates/package_scripts/preinst new file mode 100755 index 0000000000..b9efb0091d --- /dev/null +++ b/packaging/templates/package_scripts/preinst @@ -0,0 +1,7 @@ +#!/bin/bash +# +################# +# Stop existing bor in case of upgrade +################ +#sudo systemctl stop bor.service +###################### diff --git a/packaging/templates/package_scripts/prerm b/packaging/templates/package_scripts/prerm new file mode 100755 index 0000000000..b2b2b4fce9 --- /dev/null +++ b/packaging/templates/package_scripts/prerm @@ -0,0 +1,8 @@ +#!/bin/bash +# +# +############## +# Stop bor before removal +############## +#sudo systemctl stop bor.service +############# \ No newline at end of file diff --git a/packaging/templates/systemd/bor.service b/packaging/templates/systemd/bor.service new file mode 100644 index 0000000000..aa63871d83 --- /dev/null +++ b/packaging/templates/systemd/bor.service @@ -0,0 +1,16 @@ +[Unit] + Description=bor + StartLimitIntervalSec=500 + StartLimitBurst=5 + +[Service] + Restart=on-failure + RestartSec=5s + ExecStart=/usr/bin/bor server -config="/var/lib/bor/config.toml" + Type=simple + KillSignal=SIGINT + User=bor + TimeoutStopSec=120 + +[Install] + WantedBy=multi-user.target diff --git a/packaging/templates/systemd/bord.service b/packaging/templates/systemd/bord.service new file mode 100644 index 0000000000..a2f3c0498a --- /dev/null +++ b/packaging/templates/systemd/bord.service @@ -0,0 +1,36 @@ +[Unit] + Description=bor + StartLimitIntervalSec=500 + StartLimitBurst=5 + +[Service] + Restart=on-failure + RestartSec=5s + ExecStart=/usr/bin/bor server \ + -chain=mumbai \ + -port=30303 \ + -datadir /var/lib/bor/data \ + -http \ + -http.addr '0.0.0.0' \ + -http.port 8545 \ + -http.api='eth,net,web3,txpool,bor' \ + -ethstats matic-sentry-01:testnet@bor-mumbai.vitwit.com:3000 \ + -maxpeers 200 \ + -txpool.nolocals \ + -txpool.accountslots '128' \ + -txpool.lifetime '0h16m0s' \ + -txpool.globalslots '20000' \ + -http.vhosts '*' \ + -http.corsdomain '*' \ + -ipcpath /var/lib/bor/data/bor.ipc \ + -miner.gaslimit '20000000' \ + -metrics \ + -metrics.expensive \ + -metrics.prometheus-addr="127.0.0.1:7071" \ + -metrics.opencollector-endpoint "0.0.0.0:4317" + Type=simple + KillSignal=SIGINT + TimeoutStopSec=120 + +[Install] + WantedBy=multi-user.target diff --git a/packaging/templates/testnet-v4/archive/config.toml b/packaging/templates/testnet-v4/archive/config.toml new file mode 100644 index 0000000000..0438ffe3ae --- /dev/null +++ b/packaging/templates/testnet-v4/archive/config.toml @@ -0,0 +1,129 @@ +chain = "mumbai" +# identity = "node_name" +# log-level = "INFO" +datadir = "/var/lib/bor/data" +# keystore = "" +syncmode = "full" +# gcmode = "full" +# snapshot = true +# ethstats = "" + +# ["eth.requiredblocks"] + +[p2p] + maxpeers = 50 + port = 30303 + # maxpendpeers = 50 + # bind = "0.0.0.0" + # nodiscover = false + # nat = "any" + # [p2p.discovery] + # v5disc = false + # bootnodes = [] + # bootnodesv4 = [] + # bootnodesv5 = [] + # static-nodes = [] + # trusted-nodes = [] + # dns = [] + +# [heimdall] + # url = "http://localhost:1317" + # "bor.without" = false + # grpc-address = "" + +[txpool] + nolocals = true + accountslots = 16 + globalslots = 131072 + accountqueue = 64 + globalqueue = 131072 + lifetime = "1h30m0s" + # locals = [] + # journal = "" + # rejournal = "1h0m0s" + # pricelimit = 30000000000 + # pricebump = 10 + +[miner] + gaslimit = 20000000 + # gasprice = "30000000000" + # mine = false + # etherbase = "" + # extradata = "" + +[jsonrpc] + ipcpath = "/var/lib/bor/bor.ipc" + # ipcdisable = false + # gascap = 50000000 + # txfeecap = 5.0 + [jsonrpc.http] + enabled = true + port = 8545 + host = "0.0.0.0" + api = ["eth", "net", "web3", "txpool", "bor"] + vhosts = ["*"] + corsdomain = ["*"] + # prefix = "" + [jsonrpc.ws] + enabled = true + port = 8546 + # prefix = "" + # host = "localhost" + # api = ["web3", "net"] + origins = ["*"] + # [jsonrpc.graphql] + # enabled = false + # port = 0 + # prefix = "" + # host = "" + # vhosts = ["*"] + # corsdomain = ["*"] + +# [gpo] + # blocks = 20 + # percentile = 60 + # maxprice = "5000000000000" + # ignoreprice = "2" + +[telemetry] + metrics = true + # expensive = false + # prometheus-addr = "" + # opencollector-endpoint = "" + # [telemetry.influx] + # influxdb = false + # endpoint = "" + # database = "" + # username = "" + # password = "" + # influxdbv2 = false + # token = "" + # bucket = "" + # organization = "" + # [telemetry.influx.tags] + +# [cache] + # cache = 1024 + # gc = 25 + # snapshot = 10 + # database = 50 + # trie = 15 + # journal = "triecache" + # rejournal = "1h0m0s" + # noprefetch = false + # preimages = false + # txlookuplimit = 2350000 + +# [accounts] + # unlock = [] + # password = "" + # allow-insecure-unlock = false + # lightkdf = false + # disable-bor-wallet = false + +# [grpc] + # addr = ":3131" + +# [developer] + # dev = false + # period = 0 diff --git a/packaging/templates/testnet-v4/sentry/sentry/bor/config.toml b/packaging/templates/testnet-v4/sentry/sentry/bor/config.toml new file mode 100644 index 0000000000..155a33373b --- /dev/null +++ b/packaging/templates/testnet-v4/sentry/sentry/bor/config.toml @@ -0,0 +1,129 @@ +chain = "mumbai" +# identity = "node_name" +# log-level = "INFO" +datadir = "/var/lib/bor/data" +# keystore = "" +syncmode = "full" +# gcmode = "full" +# snapshot = true +# ethstats = "" + +# ["eth.requiredblocks"] + +[p2p] + maxpeers = 50 + port = 30303 + # maxpendpeers = 50 + # bind = "0.0.0.0" + # nodiscover = false + # nat = "any" + # [p2p.discovery] + # v5disc = false + # bootnodes = [] + # bootnodesv4 = [] + # bootnodesv5 = [] + # static-nodes = [] + # trusted-nodes = [] + # dns = [] + +# [heimdall] + # url = "http://localhost:1317" + # "bor.without" = false + # grpc-address = "" + +[txpool] + nolocals = true + accountslots = 16 + globalslots = 131072 + accountqueue = 64 + globalqueue = 131072 + lifetime = "1h30m0s" + # locals = [] + # journal = "" + # rejournal = "1h0m0s" + # pricelimit = 30000000000 + # pricebump = 10 + +[miner] + gaslimit = 20000000 + # gasprice = "30000000000" + # mine = false + # etherbase = "" + # extradata = "" + +[jsonrpc] + ipcpath = "/var/lib/bor/bor.ipc" + # ipcdisable = false + # gascap = 50000000 + # txfeecap = 5.0 + [jsonrpc.http] + enabled = true + port = 8545 + host = "0.0.0.0" + api = ["eth", "net", "web3", "txpool", "bor"] + vhosts = ["*"] + corsdomain = ["*"] + # prefix = "" + # [jsonrpc.ws] + # enabled = false + # port = 8546 + # prefix = "" + # host = "localhost" + # api = ["web3", "net"] + # origins = ["*"] + # [jsonrpc.graphql] + # enabled = false + # port = 0 + # prefix = "" + # host = "" + # vhosts = ["*"] + # corsdomain = ["*"] + +# [gpo] + # blocks = 20 + # percentile = 60 + # maxprice = "5000000000000" + # ignoreprice = "2" + +[telemetry] + metrics = true + # expensive = false + # prometheus-addr = "" + # opencollector-endpoint = "" + # [telemetry.influx] + # influxdb = false + # endpoint = "" + # database = "" + # username = "" + # password = "" + # influxdbv2 = false + # token = "" + # bucket = "" + # organization = "" + # [telemetry.influx.tags] + +# [cache] + # cache = 1024 + # gc = 25 + # snapshot = 10 + # database = 50 + # trie = 15 + # journal = "triecache" + # rejournal = "1h0m0s" + # noprefetch = false + # preimages = false + # txlookuplimit = 2350000 + +# [accounts] + # unlock = [] + # password = "" + # allow-insecure-unlock = false + # lightkdf = false + # disable-bor-wallet = false + +# [grpc] + # addr = ":3131" + +# [developer] + # dev = false + # period = 0 diff --git a/packaging/templates/testnet-v4/sentry/validator/bor/config.toml b/packaging/templates/testnet-v4/sentry/validator/bor/config.toml new file mode 100644 index 0000000000..19b9a4452c --- /dev/null +++ b/packaging/templates/testnet-v4/sentry/validator/bor/config.toml @@ -0,0 +1,131 @@ +# NOTE: Update and uncomment: `keystore`, `password`, and `unlock` fields. + +chain = "mumbai" +# identity = "node_name" +# log-level = "INFO" +datadir = "/var/lib/bor/data" +# keystore = "$BOR_DIR/keystore" +syncmode = "full" +# gcmode = "full" +# snapshot = true +# ethstats = "" + +# ["eth.requiredblocks"] + +[p2p] + maxpeers = 1 + port = 30303 + nodiscover = true + # maxpendpeers = 50 + # bind = "0.0.0.0" + # nat = "any" + # [p2p.discovery] + # v5disc = false + # bootnodes = [] + # bootnodesv4 = [] + # bootnodesv5 = [] + # static-nodes = [] + # trusted-nodes = [] + # dns = [] + +# [heimdall] + # url = "http://localhost:1317" + # "bor.without" = false + # grpc-address = "" + +[txpool] + nolocals = true + accountslots = 16 + globalslots = 131072 + accountqueue = 64 + globalqueue = 131072 + lifetime = "1h30m0s" + # locals = [] + # journal = "" + # rejournal = "1h0m0s" + # pricelimit = 30000000000 + # pricebump = 10 + +[miner] + mine = true + gaslimit = 20000000 + # gasprice = "30000000000" + # etherbase = "" + # extradata = "" + +[jsonrpc] + ipcpath = "/var/lib/bor/bor.ipc" + # ipcdisable = false + # gascap = 50000000 + # txfeecap = 5.0 + [jsonrpc.http] + enabled = true + port = 8545 + host = "0.0.0.0" + api = ["eth", "net", "web3", "txpool", "bor"] + vhosts = ["*"] + corsdomain = ["*"] + # prefix = "" + # [jsonrpc.ws] + # enabled = false + # port = 8546 + # prefix = "" + # host = "localhost" + # api = ["web3", "net"] + # origins = ["*"] + # [jsonrpc.graphql] + # enabled = false + # port = 0 + # prefix = "" + # host = "" + # vhosts = ["*"] + # corsdomain = ["*"] + +# [gpo] + # blocks = 20 + # percentile = 60 + # maxprice = "5000000000000" + # ignoreprice = "2" + +[telemetry] + metrics = true + # expensive = false + # prometheus-addr = "" + # opencollector-endpoint = "" + # [telemetry.influx] + # influxdb = false + # endpoint = "" + # database = "" + # username = "" + # password = "" + # influxdbv2 = false + # token = "" + # bucket = "" + # organization = "" + # [telemetry.influx.tags] + +# [cache] + # cache = 1024 + # gc = 25 + # snapshot = 10 + # database = 50 + # trie = 15 + # journal = "triecache" + # rejournal = "1h0m0s" + # noprefetch = false + # preimages = false + # txlookuplimit = 2350000 + +[accounts] + allow-insecure-unlock = true + # password = "$BOR_DIR/password.txt" + # unlock = ["$ADDRESS"] + # lightkdf = false + # disable-bor-wallet = false + +# [grpc] + # addr = ":3131" + +# [developer] + # dev = false + # period = 0 diff --git a/packaging/templates/testnet-v4/without-sentry/bor/config.toml b/packaging/templates/testnet-v4/without-sentry/bor/config.toml new file mode 100644 index 0000000000..c4de3e87c4 --- /dev/null +++ b/packaging/templates/testnet-v4/without-sentry/bor/config.toml @@ -0,0 +1,131 @@ +# NOTE: Update and uncomment: `keystore`, `password`, and `unlock` fields. + +chain = "mumbai" +# identity = "node_name" +# log-level = "INFO" +datadir = "/var/lib/bor/data" +# keystore = "$BOR_DIR/keystore" +syncmode = "full" +# gcmode = "full" +# snapshot = true +# ethstats = "" + +# ["eth.requiredblocks"] + +[p2p] + maxpeers = 50 + port = 30303 + # maxpendpeers = 50 + # bind = "0.0.0.0" + # nodiscover = false + # nat = "any" + # [p2p.discovery] + # v5disc = false + # bootnodes = [] + # bootnodesv4 = [] + # bootnodesv5 = [] + # static-nodes = [] + # trusted-nodes = [] + # dns = [] + +# [heimdall] + # url = "http://localhost:1317" + # "bor.without" = false + # grpc-address = "" + +[txpool] + nolocals = true + accountslots = 16 + globalslots = 131072 + accountqueue = 64 + globalqueue = 131072 + lifetime = "1h30m0s" + # locals = [] + # journal = "" + # rejournal = "1h0m0s" + # pricelimit = 30000000000 + # pricebump = 10 + +[miner] + mine = true + gaslimit = 20000000 + # gasprice = "30000000000" + # etherbase = "" + # extradata = "" + +[jsonrpc] + ipcpath = "/var/lib/bor/bor.ipc" + # ipcdisable = false + # gascap = 50000000 + # txfeecap = 5.0 + [jsonrpc.http] + enabled = true + port = 8545 + host = "0.0.0.0" + api = ["eth", "net", "web3", "txpool", "bor"] + vhosts = ["*"] + corsdomain = ["*"] + # prefix = "" + # [jsonrpc.ws] + # enabled = false + # port = 8546 + # prefix = "" + # host = "localhost" + # api = ["web3", "net"] + # origins = ["*"] + # [jsonrpc.graphql] + # enabled = false + # port = 0 + # prefix = "" + # host = "" + # vhosts = ["*"] + # corsdomain = ["*"] + +# [gpo] +# blocks = 20 +# percentile = 60 +# maxprice = "5000000000000" +# ignoreprice = "2" + +[telemetry] + metrics = true + # expensive = false + # prometheus-addr = "" + # opencollector-endpoint = "" + # [telemetry.influx] + # influxdb = false + # endpoint = "" + # database = "" + # username = "" + # password = "" + # influxdbv2 = false + # token = "" + # bucket = "" + # organization = "" + # [telemetry.influx.tags] + +# [cache] +# cache = 1024 +# gc = 25 +# snapshot = 10 +# database = 50 +# trie = 15 +# journal = "triecache" +# rejournal = "1h0m0s" +# noprefetch = false +# preimages = false +# txlookuplimit = 2350000 + +[accounts] + allow-insecure-unlock = true + # password = "$BOR_DIR/password.txt" + # unlock = ["$ADDRESS"] + # lightkdf = false + # disable-bor-wallet = false + +# [grpc] + # addr = ":3131" + +# [developer] + # dev = false + # period = 0 From 5e92a7fe257e4ba950d3f73d99855196f5890c5b Mon Sep 17 00:00:00 2001 From: Daniel Jones Date: Wed, 23 Nov 2022 07:54:15 -0600 Subject: [PATCH 02/56] Added changes per discussion in PR, more changes may be necessary --- .github/workflows/packager.yml | 15 ++++------ packaging/templates/systemd/bord.service | 36 ------------------------ 2 files changed, 5 insertions(+), 46 deletions(-) delete mode 100644 packaging/templates/systemd/bord.service diff --git a/.github/workflows/packager.yml b/.github/workflows/packager.yml index 4b6c52f601..016912301c 100644 --- a/.github/workflows/packager.yml +++ b/.github/workflows/packager.yml @@ -2,10 +2,6 @@ name: packager on: push: - branches: - - 'main' - paths: - - '**' tags: - 'v*.*.*' - 'v*.*.*-*' @@ -22,9 +18,8 @@ jobs: uses: actions/setup-go@master with: go-version: 1.19 - - name: Generate release tag - id: tag - run: echo "::set-output name=release_tag::bor_$(date +"%Y.%m.%d_%H-%M")" + - name: Adding TAG to ENV + run: echo "GIT_TAG=`echo $(git describe --tags --abbrev=0)`" >> $GITHUB_ENV - name: Cleaning repo run: make clean @@ -309,10 +304,10 @@ jobs: - name: Confirming package built run: ls -ltr packaging/deb/ | grep bor - - name: Release bor Packages + - name: Pre-Release bor Packages uses: softprops/action-gh-release@v1 with: - tag_name: ${{ steps.tag.outputs.release_tag }} + tag_name: ${{ env.GIT_TAG }} files: | packaging/deb/bor**.deb - binary/bo** \ No newline at end of file + binary/bo** diff --git a/packaging/templates/systemd/bord.service b/packaging/templates/systemd/bord.service deleted file mode 100644 index a2f3c0498a..0000000000 --- a/packaging/templates/systemd/bord.service +++ /dev/null @@ -1,36 +0,0 @@ -[Unit] - Description=bor - StartLimitIntervalSec=500 - StartLimitBurst=5 - -[Service] - Restart=on-failure - RestartSec=5s - ExecStart=/usr/bin/bor server \ - -chain=mumbai \ - -port=30303 \ - -datadir /var/lib/bor/data \ - -http \ - -http.addr '0.0.0.0' \ - -http.port 8545 \ - -http.api='eth,net,web3,txpool,bor' \ - -ethstats matic-sentry-01:testnet@bor-mumbai.vitwit.com:3000 \ - -maxpeers 200 \ - -txpool.nolocals \ - -txpool.accountslots '128' \ - -txpool.lifetime '0h16m0s' \ - -txpool.globalslots '20000' \ - -http.vhosts '*' \ - -http.corsdomain '*' \ - -ipcpath /var/lib/bor/data/bor.ipc \ - -miner.gaslimit '20000000' \ - -metrics \ - -metrics.expensive \ - -metrics.prometheus-addr="127.0.0.1:7071" \ - -metrics.opencollector-endpoint "0.0.0.0:4317" - Type=simple - KillSignal=SIGINT - TimeoutStopSec=120 - -[Install] - WantedBy=multi-user.target From 1f0e49b0d8f85d37643be06a7698977abb2ec397 Mon Sep 17 00:00:00 2001 From: Daniel Jones Date: Wed, 23 Nov 2022 09:07:41 -0600 Subject: [PATCH 03/56] Adding prerelease true --- .github/workflows/packager.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/packager.yml b/.github/workflows/packager.yml index 016912301c..998008ede8 100644 --- a/.github/workflows/packager.yml +++ b/.github/workflows/packager.yml @@ -308,6 +308,7 @@ jobs: uses: softprops/action-gh-release@v1 with: tag_name: ${{ env.GIT_TAG }} + prerelease: true files: | packaging/deb/bor**.deb binary/bo** From ba56c98769eecfcbd0ecda736cde29d717daf1c4 Mon Sep 17 00:00:00 2001 From: Daniel Jones Date: Wed, 23 Nov 2022 09:10:41 -0600 Subject: [PATCH 04/56] Disabling goreleaser --- .goreleaser.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.goreleaser.yml b/.goreleaser.yml index 331d8de6b5..229d76a3f6 100644 --- a/.goreleaser.yml +++ b/.goreleaser.yml @@ -1,7 +1,7 @@ project_name: bor release: - disable: false + disable: true draft: true prerelease: auto From c1e84fd6945eb502ca336d00c35edd7bc2238386 Mon Sep 17 00:00:00 2001 From: Daniel Jones Date: Wed, 23 Nov 2022 09:12:42 -0600 Subject: [PATCH 05/56] Removing README swap file --- packaging/deb/.README.md.swp | Bin 12288 -> 0 bytes 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 packaging/deb/.README.md.swp diff --git a/packaging/deb/.README.md.swp b/packaging/deb/.README.md.swp deleted file mode 100644 index 765002d189fd7864fe06952777e7df26258b51ff..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 12288 zcmeI2KW`H;6u@2f0)m0{K}VEa2n%$9Rtcy>8iWc9F!=7AI}^w6Wczw#fgQdA3x7tw z4HE2#k5FI(o-bFa1u=ChdRBh1&VGLP?x!r#y>#$!X973+LqX%b5FZb&W;ee~#o(0? zxwKlDr)ZUryZ;=cHqQ4ZGTTO+_wG6D4f_T&X^x$Fek5UXSBJ7rlN4Q2$!snQZHpvV zX>xzOHJXh3i~QJGzF-4v;G_oD#r5H}LCmso<~18jf|umLu}22P;?Nw3+z zt_$(_EEP}h{}2EF|2QYacj6myK)fg35pRh%#B1Uu(GV$do47>?;uq!mNqijP?PVQqq@#M%p0urlaX5IJLliX_BgTg#xsP3s8G%A&0!07eee} zP!LM(fj&NHV?aWKZca5o<*;zd`%O~47h{^ZpVD2NtSXg5Ub~nolnT;Xn;cdh>MEYh z=-_IVLPUHHqtnDMeNcZd79dtmQ@mf;pZL2&9T4KE_+bSxUbjh41DtLy*U=zzXl7fQ&>uF-j4H&fv F#cwS6$qoPj From 08ec50a846d5ea47ac19ea08c30f9bc20d5a14a0 Mon Sep 17 00:00:00 2001 From: Will Button Date: Wed, 23 Nov 2022 09:43:34 -0700 Subject: [PATCH 06/56] change bor_dir and add bor user for v0.3.0 release --- Dockerfile | 9 ++++++--- Dockerfile.alltools | 2 +- Dockerfile.release | 18 ++++++++++++++---- 3 files changed, 21 insertions(+), 8 deletions(-) diff --git a/Dockerfile b/Dockerfile index 7a2770ce9a..0e7a91e96c 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,17 +1,20 @@ FROM golang:latest -ARG BOR_DIR=/bor +ARG BOR_DIR=/var/lib/bor ENV BOR_DIR=$BOR_DIR RUN apt-get update -y && apt-get upgrade -y \ && apt install build-essential git -y \ - && mkdir -p /bor + && mkdir -p ${BOR_DIR} WORKDIR ${BOR_DIR} COPY . . RUN make bor -RUN cp build/bin/bor /usr/local/bin/ +RUN cp build/bin/bor /usr/bin/ +RUN groupadd -g 10137 bor \ + && useradd -u 10137 --no-log-init --create-home -r -g bor bor \ + && chown -R bor:bor ${BOR_DIR} ENV SHELL /bin/bash EXPOSE 8545 8546 8547 30303 30303/udp diff --git a/Dockerfile.alltools b/Dockerfile.alltools index a3f36d4a04..1c4437e251 100644 --- a/Dockerfile.alltools +++ b/Dockerfile.alltools @@ -13,6 +13,6 @@ RUN set -x \ && apk add --update --no-cache \ ca-certificates \ && rm -rf /var/cache/apk/* -COPY --from=builder /bor/build/bin/* /usr/local/bin/ +COPY --from=builder /bor/build/bin/* /usr/bin/ EXPOSE 8545 8546 30303 30303/udp diff --git a/Dockerfile.release b/Dockerfile.release index 66dd589e82..cd90450bc3 100644 --- a/Dockerfile.release +++ b/Dockerfile.release @@ -1,10 +1,20 @@ FROM alpine:3.14 +ARG BOR_DIR=/var/lib/bor +ENV BOR_DIR=$BOR_DIR + RUN apk add --no-cache ca-certificates && \ - mkdir -p /etc/bor -COPY bor /usr/local/bin/ -COPY builder/files/genesis-mainnet-v1.json /etc/bor/ -COPY builder/files/genesis-testnet-v4.json /etc/bor/ + mkdir -p ${BOR_DIR} + +WORKDIR ${BOR_DIR} +COPY bor /usr/bin/ +COPY builder/files/genesis-mainnet-v1.json ${BOR_DIR} +COPY builder/files/genesis-testnet-v4.json ${BOR_DIR} +RUN groupadd -g 10137 bor \ + && useradd -u 10137 --no-log-init --create-home -r -g bor bor \ + && chown -R bor:bor ${BOR_DIR} + +USER bor EXPOSE 8545 8546 8547 30303 30303/udp ENTRYPOINT ["bor"] From 5fe83d35cff09d5df40dbd4d4d0a3755e6f49cb7 Mon Sep 17 00:00:00 2001 From: Will Button Date: Wed, 23 Nov 2022 14:16:36 -0700 Subject: [PATCH 07/56] rollback bor user and use root --- Dockerfile | 3 --- Dockerfile.release | 5 ----- 2 files changed, 8 deletions(-) diff --git a/Dockerfile b/Dockerfile index 0e7a91e96c..6c65faf12d 100644 --- a/Dockerfile +++ b/Dockerfile @@ -12,9 +12,6 @@ COPY . . RUN make bor RUN cp build/bin/bor /usr/bin/ -RUN groupadd -g 10137 bor \ - && useradd -u 10137 --no-log-init --create-home -r -g bor bor \ - && chown -R bor:bor ${BOR_DIR} ENV SHELL /bin/bash EXPOSE 8545 8546 8547 30303 30303/udp diff --git a/Dockerfile.release b/Dockerfile.release index cd90450bc3..2a026566d7 100644 --- a/Dockerfile.release +++ b/Dockerfile.release @@ -10,11 +10,6 @@ WORKDIR ${BOR_DIR} COPY bor /usr/bin/ COPY builder/files/genesis-mainnet-v1.json ${BOR_DIR} COPY builder/files/genesis-testnet-v4.json ${BOR_DIR} -RUN groupadd -g 10137 bor \ - && useradd -u 10137 --no-log-init --create-home -r -g bor bor \ - && chown -R bor:bor ${BOR_DIR} - -USER bor EXPOSE 8545 8546 8547 30303 30303/udp ENTRYPOINT ["bor"] From 137fb19d2ad478a87475d792f3358b76b54f6228 Mon Sep 17 00:00:00 2001 From: Manav Darji Date: Thu, 24 Nov 2022 19:42:32 +0530 Subject: [PATCH 08/56] metrics: handle equal to separated config flag (#596) * metrics: handle based config path * internal/cli/server: add more context to logs * use space separated flag and value in bor.service --- builder/files/bor.service | 2 +- internal/cli/server/server.go | 8 +++++++- metrics/metrics.go | 3 +++ packaging/templates/systemd/bor.service | 2 +- 4 files changed, 12 insertions(+), 3 deletions(-) diff --git a/builder/files/bor.service b/builder/files/bor.service index 2deff3dbc9..758553299e 100644 --- a/builder/files/bor.service +++ b/builder/files/bor.service @@ -6,7 +6,7 @@ [Service] Restart=on-failure RestartSec=5s - ExecStart=/usr/local/bin/bor server -config="/var/lib/bor/config.toml" + ExecStart=/usr/local/bin/bor server -config "/var/lib/bor/config.toml" Type=simple User=bor KillSignal=SIGINT diff --git a/internal/cli/server/server.go b/internal/cli/server/server.go index 1346fe613a..70187d6985 100644 --- a/internal/cli/server/server.go +++ b/internal/cli/server/server.go @@ -235,7 +235,13 @@ func (s *Server) Stop() { func (s *Server) setupMetrics(config *TelemetryConfig, serviceName string) error { // Check the global metrics if they're matching with the provided config if metrics.Enabled != config.Enabled || metrics.EnabledExpensive != config.Expensive { - log.Warn("Metric misconfiguration, some of them might not be visible") + log.Warn( + "Metric misconfiguration, some of them might not be visible", + "metrics", metrics.Enabled, + "config.metrics", config.Enabled, + "expensive", metrics.EnabledExpensive, + "config.expensive", config.Expensive, + ) } // Update the values anyways (for services which don't need immediate attention) diff --git a/metrics/metrics.go b/metrics/metrics.go index 1d0133e850..1c0cf11ab4 100644 --- a/metrics/metrics.go +++ b/metrics/metrics.go @@ -49,6 +49,9 @@ func init() { // check for existence of `config` flag if flag == configFlag && i < len(os.Args)-1 { configFile = strings.TrimLeft(os.Args[i+1], "-") // find the value of flag + } else if len(flag) > 6 && flag[:6] == configFlag { + // Checks for `=` separated flag (e.g. config=path) + configFile = strings.TrimLeft(flag[6:], "=") } for _, enabler := range enablerFlags { diff --git a/packaging/templates/systemd/bor.service b/packaging/templates/systemd/bor.service index aa63871d83..b92bdd3cc5 100644 --- a/packaging/templates/systemd/bor.service +++ b/packaging/templates/systemd/bor.service @@ -6,7 +6,7 @@ [Service] Restart=on-failure RestartSec=5s - ExecStart=/usr/bin/bor server -config="/var/lib/bor/config.toml" + ExecStart=/usr/bin/bor server -config "/var/lib/bor/config.toml" Type=simple KillSignal=SIGINT User=bor From d69cb20ba93054ffc28038888017dae9412e0985 Mon Sep 17 00:00:00 2001 From: Pratik Patil Date: Fri, 25 Nov 2022 17:44:05 +0530 Subject: [PATCH 09/56] fixed static-nodes related buf (os independent) (#598) * fixed static-nodes related buf (os independent) * taking static-nodes as input if default not present --- scripts/getconfig.go | 2 +- scripts/getconfig.sh | 13 +++++++++++++ 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/scripts/getconfig.go b/scripts/getconfig.go index caf3f45a8e..7fbd39ac6d 100644 --- a/scripts/getconfig.go +++ b/scripts/getconfig.go @@ -375,7 +375,7 @@ func getStaticTrustedNodes(args []string) { fmt.Println("only TOML config file is supported through CLI") } } else { - path := "~/.bor/data/bor/static-nodes.json" + path := "./static-nodes.json" if !checkFileExists(path) { return } diff --git a/scripts/getconfig.sh b/scripts/getconfig.sh index a2971c4f12..472af08802 100755 --- a/scripts/getconfig.sh +++ b/scripts/getconfig.sh @@ -24,6 +24,14 @@ then fi read -p "* Your validator address (e.g. 0xca67a8D767e45056DC92384b488E9Af654d78DE2), or press Enter to skip if running a sentry node: " ADD +if [[ -f $HOME/.bor/data/bor/static-nodes.json ]] +then +cp $HOME/.bor/data/bor/static-nodes.json ./static-nodes.json +else +read -p "* You dont have '~/.bor/data/bor/static-nodes.json' file. If you want to use static nodes, enter the path to 'static-nodes.json' here (press Enter to skip): " STAT +if [[ -f $STAT ]]; then cp $STAT ./static-nodes.json; fi +fi + printf "\nThank you, your inputs are:\n" echo "Path to start.sh: "$startPath echo "Address: "$ADD @@ -113,4 +121,9 @@ chmod +x $tmpDir/3305fe263dd4a999d58f96deb064e21bb70123d9.sh $tmpDir/3305fe263dd4a999d58f96deb064e21bb70123d9.sh $ADD rm $tmpDir/3305fe263dd4a999d58f96deb064e21bb70123d9.sh +if [[ -f $HOME/.bor/data/bor/static-nodes.json ]] +then +rm ./static-nodes.json +fi + exit 0 From 095ce5e1dae6591625b1bd50e9f7c032f6bd8f70 Mon Sep 17 00:00:00 2001 From: Manav Darji Date: Fri, 25 Nov 2022 21:56:31 +0530 Subject: [PATCH 10/56] Update default flags (#600) * internal/cli/server: use geth's default for txpool.pricelimit and add comments * builder/files: update config.toml for mainnet * packaging/templates: update defaults for mainnet and mumbai * internal/cli/server: skip overriding cache * packaging/templates: update cache value for mainnet * packaging/templates: update gcmode for archive mumbai node --- builder/files/config.toml | 6 +++--- internal/cli/server/config.go | 15 ++++----------- .../templates/mainnet-v1/archive/config.toml | 10 +++++----- .../mainnet-v1/sentry/sentry/bor/config.toml | 10 +++++----- .../mainnet-v1/sentry/validator/bor/config.toml | 10 +++++----- .../mainnet-v1/without-sentry/bor/config.toml | 10 +++++----- .../templates/testnet-v4/archive/config.toml | 8 ++++---- .../testnet-v4/sentry/sentry/bor/config.toml | 6 +++--- .../testnet-v4/sentry/validator/bor/config.toml | 6 +++--- .../testnet-v4/without-sentry/bor/config.toml | 6 +++--- 10 files changed, 40 insertions(+), 47 deletions(-) diff --git a/builder/files/config.toml b/builder/files/config.toml index 8f70f62a13..f79b892e7f 100644 --- a/builder/files/config.toml +++ b/builder/files/config.toml @@ -52,7 +52,7 @@ syncmode = "full" # pricebump = 10 [miner] - gaslimit = 20000000 + gaslimit = 30000000 gasprice = "30000000000" # mine = true # etherbase = "VALIDATOR ADDRESS" @@ -87,11 +87,11 @@ syncmode = "full" # vhosts = ["*"] # corsdomain = ["*"] -# [gpo] +[gpo] # blocks = 20 # percentile = 60 # maxprice = "5000000000000" - # ignoreprice = "2" + ignoreprice = "30000000000" [telemetry] metrics = true diff --git a/internal/cli/server/config.go b/internal/cli/server/config.go index 7070afca24..2e0a8f21f5 100644 --- a/internal/cli/server/config.go +++ b/internal/cli/server/config.go @@ -428,7 +428,7 @@ func DefaultConfig() *Config { NoLocals: false, Journal: "transactions.rlp", Rejournal: 1 * time.Hour, - PriceLimit: 30000000000, + PriceLimit: 1, // geth's default PriceBump: 10, AccountSlots: 16, GlobalSlots: 32768, @@ -439,8 +439,8 @@ func DefaultConfig() *Config { Sealer: &SealerConfig{ Enabled: false, Etherbase: "", - GasCeil: 30_000_000, - GasPrice: big.NewInt(1 * params.GWei), + GasCeil: 30_000_000, // geth's default + GasPrice: big.NewInt(1 * params.GWei), // geth's default ExtraData: "", }, Gpo: &GpoConfig{ @@ -497,7 +497,7 @@ func DefaultConfig() *Config { }, }, Cache: &CacheConfig{ - Cache: 1024, + Cache: 1024, // geth's default (suitable for mumbai) PercDatabase: 50, PercTrie: 15, PercGc: 25, @@ -626,13 +626,6 @@ func (c *Config) loadChain() error { c.P2P.Discovery.DNS = c.chain.DNS } - // depending on the chain we have different cache values - if c.Chain == "mainnet" { - c.Cache.Cache = 4096 - } else { - c.Cache.Cache = 1024 - } - return nil } diff --git a/packaging/templates/mainnet-v1/archive/config.toml b/packaging/templates/mainnet-v1/archive/config.toml index a44a2ed0b5..8989a1b7b5 100644 --- a/packaging/templates/mainnet-v1/archive/config.toml +++ b/packaging/templates/mainnet-v1/archive/config.toml @@ -45,7 +45,7 @@ gcmode = "archive" # pricebump = 10 [miner] - gaslimit = 20000000 + gaslimit = 30000000 gasprice = "30000000000" # mine = false # etherbase = "" @@ -79,11 +79,11 @@ gcmode = "archive" # vhosts = ["*"] # corsdomain = ["*"] -# [gpo] +[gpo] # blocks = 20 # percentile = 60 # maxprice = "5000000000000" - # ignoreprice = "2" + ignoreprice = "30000000000" [telemetry] metrics = true @@ -102,8 +102,8 @@ gcmode = "archive" # organization = "" # [telemetry.influx.tags] -# [cache] - # cache = 1024 +[cache] + cache = 4096 # gc = 25 # snapshot = 10 # database = 50 diff --git a/packaging/templates/mainnet-v1/sentry/sentry/bor/config.toml b/packaging/templates/mainnet-v1/sentry/sentry/bor/config.toml index 5bb226b913..d91e12b31e 100644 --- a/packaging/templates/mainnet-v1/sentry/sentry/bor/config.toml +++ b/packaging/templates/mainnet-v1/sentry/sentry/bor/config.toml @@ -45,7 +45,7 @@ syncmode = "full" # pricebump = 10 [miner] - gaslimit = 20000000 + gaslimit = 30000000 gasprice = "30000000000" # mine = false # etherbase = "" @@ -79,11 +79,11 @@ syncmode = "full" # vhosts = ["*"] # corsdomain = ["*"] -# [gpo] +[gpo] # blocks = 20 # percentile = 60 # maxprice = "5000000000000" - # ignoreprice = "2" + ignoreprice = "30000000000" [telemetry] metrics = true @@ -102,8 +102,8 @@ syncmode = "full" # organization = "" # [telemetry.influx.tags] -# [cache] - # cache = 1024 +[cache] + cache = 4096 # gc = 25 # snapshot = 10 # database = 50 diff --git a/packaging/templates/mainnet-v1/sentry/validator/bor/config.toml b/packaging/templates/mainnet-v1/sentry/validator/bor/config.toml index 5a7688f9e8..c9f583aae3 100644 --- a/packaging/templates/mainnet-v1/sentry/validator/bor/config.toml +++ b/packaging/templates/mainnet-v1/sentry/validator/bor/config.toml @@ -48,7 +48,7 @@ syncmode = "full" [miner] mine = true - gaslimit = 20000000 + gaslimit = 30000000 gasprice = "30000000000" # etherbase = "" # extradata = "" @@ -81,11 +81,11 @@ syncmode = "full" # vhosts = ["*"] # corsdomain = ["*"] -# [gpo] +[gpo] # blocks = 20 # percentile = 60 # maxprice = "5000000000000" - # ignoreprice = "2" + ignoreprice = "30000000000" [telemetry] metrics = true @@ -104,8 +104,8 @@ syncmode = "full" # organization = "" # [telemetry.influx.tags] -# [cache] - # cache = 1024 +[cache] + cache = 4096 # gc = 25 # snapshot = 10 # database = 50 diff --git a/packaging/templates/mainnet-v1/without-sentry/bor/config.toml b/packaging/templates/mainnet-v1/without-sentry/bor/config.toml index 74094bfa80..5c4a057b91 100644 --- a/packaging/templates/mainnet-v1/without-sentry/bor/config.toml +++ b/packaging/templates/mainnet-v1/without-sentry/bor/config.toml @@ -48,7 +48,7 @@ syncmode = "full" [miner] mine = true - gaslimit = 20000000 + gaslimit = 30000000 gasprice = "30000000000" # etherbase = "" # extradata = "" @@ -81,11 +81,11 @@ syncmode = "full" # vhosts = ["*"] # corsdomain = ["*"] -# [gpo] +[gpo] # blocks = 20 # percentile = 60 # maxprice = "5000000000000" -# ignoreprice = "2" + ignoreprice = "30000000000" [telemetry] metrics = true @@ -104,8 +104,8 @@ syncmode = "full" # organization = "" # [telemetry.influx.tags] -# [cache] -# cache = 1024 +[cache] + cache = 4096 # gc = 25 # snapshot = 10 # database = 50 diff --git a/packaging/templates/testnet-v4/archive/config.toml b/packaging/templates/testnet-v4/archive/config.toml index 0438ffe3ae..5b7cbdd78e 100644 --- a/packaging/templates/testnet-v4/archive/config.toml +++ b/packaging/templates/testnet-v4/archive/config.toml @@ -4,7 +4,7 @@ chain = "mumbai" datadir = "/var/lib/bor/data" # keystore = "" syncmode = "full" -# gcmode = "full" +gcmode = "archive" # snapshot = true # ethstats = "" @@ -41,12 +41,12 @@ syncmode = "full" # locals = [] # journal = "" # rejournal = "1h0m0s" - # pricelimit = 30000000000 + # pricelimit = 1 # pricebump = 10 [miner] - gaslimit = 20000000 - # gasprice = "30000000000" + gaslimit = 30000000 + # gasprice = "1000000000" # mine = false # etherbase = "" # extradata = "" diff --git a/packaging/templates/testnet-v4/sentry/sentry/bor/config.toml b/packaging/templates/testnet-v4/sentry/sentry/bor/config.toml index 155a33373b..f98e04ff42 100644 --- a/packaging/templates/testnet-v4/sentry/sentry/bor/config.toml +++ b/packaging/templates/testnet-v4/sentry/sentry/bor/config.toml @@ -41,12 +41,12 @@ syncmode = "full" # locals = [] # journal = "" # rejournal = "1h0m0s" - # pricelimit = 30000000000 + # pricelimit = 1 # pricebump = 10 [miner] - gaslimit = 20000000 - # gasprice = "30000000000" + gaslimit = 30000000 + # gasprice = "1000000000" # mine = false # etherbase = "" # extradata = "" diff --git a/packaging/templates/testnet-v4/sentry/validator/bor/config.toml b/packaging/templates/testnet-v4/sentry/validator/bor/config.toml index 19b9a4452c..e2a404263d 100644 --- a/packaging/templates/testnet-v4/sentry/validator/bor/config.toml +++ b/packaging/templates/testnet-v4/sentry/validator/bor/config.toml @@ -43,13 +43,13 @@ syncmode = "full" # locals = [] # journal = "" # rejournal = "1h0m0s" - # pricelimit = 30000000000 + # pricelimit = 1 # pricebump = 10 [miner] mine = true - gaslimit = 20000000 - # gasprice = "30000000000" + gaslimit = 30000000 + # gasprice = "1000000000" # etherbase = "" # extradata = "" diff --git a/packaging/templates/testnet-v4/without-sentry/bor/config.toml b/packaging/templates/testnet-v4/without-sentry/bor/config.toml index c4de3e87c4..567c9a5b0a 100644 --- a/packaging/templates/testnet-v4/without-sentry/bor/config.toml +++ b/packaging/templates/testnet-v4/without-sentry/bor/config.toml @@ -43,13 +43,13 @@ syncmode = "full" # locals = [] # journal = "" # rejournal = "1h0m0s" - # pricelimit = 30000000000 + # pricelimit = 1 # pricebump = 10 [miner] mine = true - gaslimit = 20000000 - # gasprice = "30000000000" + gaslimit = 30000000 + # gasprice = "1000000000" # etherbase = "" # extradata = "" From 3e7160997a79c890b1f271b2e1c03a0c2eeeb303 Mon Sep 17 00:00:00 2001 From: Manav Darji Date: Sat, 26 Nov 2022 01:54:50 +0530 Subject: [PATCH 11/56] metrics: handle nil telemetry config (#601) --- metrics/metrics.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/metrics/metrics.go b/metrics/metrics.go index 1c0cf11ab4..a22f99feaa 100644 --- a/metrics/metrics.go +++ b/metrics/metrics.go @@ -93,7 +93,8 @@ func updateMetricsFromConfig(path string) { conf := &CliConfig{} - if _, err := toml.Decode(tomlData, &conf); err != nil || conf == nil { + _, err = toml.Decode(tomlData, &conf) + if err != nil || conf == nil || conf.Telemetry == nil { return } From 59dccd37c3795402729308e0a19b23d4a3d37a51 Mon Sep 17 00:00:00 2001 From: Arpit Temani Date: Tue, 29 Nov 2022 13:03:49 +0530 Subject: [PATCH 12/56] resolve merge conflicts --- internal/cli/server/config.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/internal/cli/server/config.go b/internal/cli/server/config.go index 62c90ae263..e58838cf59 100644 --- a/internal/cli/server/config.go +++ b/internal/cli/server/config.go @@ -435,11 +435,7 @@ func DefaultConfig() *Config { NoLocals: false, Journal: "transactions.rlp", Rejournal: 1 * time.Hour, -<<<<<<< HEAD - PriceLimit: 1, -======= PriceLimit: 1, // geth's default ->>>>>>> 3e7160997a79c890b1f271b2e1c03a0c2eeeb303 PriceBump: 10, AccountSlots: 16, GlobalSlots: 32768, From d96662ab63381f17cb5e34c84e91750d5d200f55 Mon Sep 17 00:00:00 2001 From: Arpit Temani Date: Wed, 30 Nov 2022 00:21:26 +0530 Subject: [PATCH 13/56] update go version in release.yml --- .github/workflows/release.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index b615cf639e..92b960f5cd 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -21,7 +21,7 @@ jobs: - name: Set up Go uses: actions/setup-go@master with: - go-version: 1.17.x + go-version: 1.19.x - name: Prepare id: prepare From 742990cfcd5d0de8405d21bfa25c08fb275b8a69 Mon Sep 17 00:00:00 2001 From: Arpit Temani Date: Wed, 30 Nov 2022 00:40:56 +0530 Subject: [PATCH 14/56] update goversion in makefile --- Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index e51d2d99eb..bb5c6df0cf 100644 --- a/Makefile +++ b/Makefile @@ -37,7 +37,7 @@ protoc: generate-mocks: go generate mockgen -destination=./tests/bor/mocks/IHeimdallClient.go -package=mocks ./consensus/bor IHeimdallClient go generate mockgen -destination=./eth/filters/IBackend.go -package=filters ./eth/filters Backend - + geth: $(GORUN) build/ci.go install ./cmd/geth @echo "Done building." @@ -196,7 +196,7 @@ geth-windows-amd64: @ls -ld $(GOBIN)/geth-windows-* | grep amd64 PACKAGE_NAME := github.com/maticnetwork/bor -GOLANG_CROSS_VERSION ?= v1.18.1 +GOLANG_CROSS_VERSION ?= v1.19.1 .PHONY: release-dry-run release-dry-run: From 4d19cf5342a439d98cca21b03c63a0bc075769cf Mon Sep 17 00:00:00 2001 From: Will Button Date: Wed, 30 Nov 2022 08:00:31 -0800 Subject: [PATCH 15/56] update Docker login for goreleaser-cross v1.19 --- .github/workflows/release.yml | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index b615cf639e..0d45f0bc98 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -29,6 +29,12 @@ jobs: TAG=${GITHUB_REF#refs/tags/} echo ::set-output name=tag_name::${TAG} + - name: Login to Docker Hub + uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKERHUB }} + password: ${{ secrets.DOCKERHUB_KEY }} + - name: Set up QEMU uses: docker/setup-qemu-action@v1 @@ -39,5 +45,3 @@ jobs: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} VERSION: ${{ steps.prepare.outputs.tag_name }} SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }} - DOCKER_USERNAME: ${{ secrets.DOCKERHUB }} - DOCKER_PASSWORD: ${{ secrets.DOCKERHUB_KEY }} From 1cf78fd9c936cad37196a3f3cd752bb7864db244 Mon Sep 17 00:00:00 2001 From: Daniel Jones Date: Wed, 30 Nov 2022 16:38:48 -0600 Subject: [PATCH 16/56] Cleanup for the packager to use git tag in the package profile naming. Added conditional check for directory structure, this is in prep for v0.3.1, as this will create a failure on upgrade path in package due to file exist --- .github/workflows/packager.yml | 839 +++++++++++++----- packaging/templates/package_scripts/postinst | 9 +- .../package_scripts/postinst.profile | 9 +- 3 files changed, 643 insertions(+), 214 deletions(-) diff --git a/.github/workflows/packager.yml b/.github/workflows/packager.yml index 998008ede8..5c59b1751a 100644 --- a/.github/workflows/packager.yml +++ b/.github/workflows/packager.yml @@ -2,6 +2,10 @@ name: packager on: push: + branches: + - 'main' + paths: + - '**' tags: - 'v*.*.*' - 'v*.*.*-*' @@ -37,10 +41,15 @@ jobs: - name: removing systemd file for binary run: rm -rf lib/systemd/system/bor.service - - name: Creating package for binary only bor - run: cp -rp packaging/deb/bor packaging/deb/bor-v0.3.0-beta-amd64 + - name: Creating package for binary for bor ${{ env.ARCH }} + run: cp -rp packaging/deb/bor packaging/deb/bor-${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: amd64 + - name: Running package build - run: dpkg-deb --build --root-owner-group packaging/deb/bor-v0.3.0-beta-amd64 + run: dpkg-deb --build --root-owner-group packaging/deb/bor-${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: amd64 - name: Removing the bor binary run: rm -rf packaging/deb/bor/usr/bin/bor @@ -50,113 +59,317 @@ jobs: - name: Copying systemd file run: cp -rp packaging/templates/systemd/bor.service packaging/deb/bor/lib/systemd/system/bor.service - - name: Prepping Mumbai Sentry Node Profile for amd64 - run: cp -rp packaging/deb/bor packaging/deb/bor-mumbai-sentry-config_v0.3.0-beta-amd64 - - name: Putting toml - run: cp -rp packaging/templates/testnet-v4/sentry/sentry/bor/config.toml packaging/deb/bor-mumbai-sentry-config_v0.3.0-beta-amd64/var/lib/bor/ - - name: Copying the preinst - run: cp -rp packaging/templates/package_scripts/preinst packaging/deb/bor-mumbai-sentry-config_v0.3.0-beta-amd64/DEBIAN/preinst - - name: Copying the postinst - run: cp -rp packaging/templates/package_scripts/postinst.profile packaging/deb/bor-mumbai-sentry-config_v0.3.0-beta-amd64/DEBIAN/postinst - - name: Copying the prerm - run: cp -rp packaging/templates/package_scripts/prerm packaging/deb/bor-mumbai-sentry-config_v0.3.0-beta-amd64/DEBIAN/prerm - - name: Copying the postrm - run: cp -rp packaging/templates/package_scripts/postrm packaging/deb/bor-mumbai-sentry-config_v0.3.0-beta-amd64/DEBIAN/postrm - - name: Copying systemd file - run: cp -rp packaging/templates/systemd/bor.service packaging/deb/bor-mumbai-sentry-config_v0.3.0-beta-amd64/lib/systemd/system/ - - name: Copying profile control file - run: cp -rp packaging/templates/package_scripts/control.profile.amd64 packaging/deb/bor-mumbai-sentry-config_v0.3.0-beta-amd64/DEBIAN/control - - name: Running package build for sentry on amd64 - run: dpkg-deb --build --root-owner-group packaging/deb/bor-mumbai-sentry-config_v0.3.0-beta-amd64 + - name: Prepping ${{ env.NETWORK }} ${{ env.NODE }} node for ${{ env.ARCH }} + run: cp -rp packaging/deb/bor packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: amd64 + NODE: sentry + NETWORK: mumbai + - name: Putting toml for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/testnet-v4/sentry/sentry/bor/config.toml packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/var/lib/bor/ + env: + ARCH: amd64 + NODE: sentry + NETWORK: mumbai + - name: Copying the preinst for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/preinst packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/preinst + env: + ARCH: amd64 + NODE: sentry + NETWORK: mumbai + - name: Copying the postinst for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/postinst.profile packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postinst + env: + ARCH: amd64 + NODE: sentry + NETWORK: mumbai + - name: Copying the prerm for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/prerm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/prerm + env: + ARCH: amd64 + NODE: sentry + NETWORK: mumbai + - name: Copying the postrm for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/postrm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postrm + env: + ARCH: amd64 + NODE: sentry + NETWORK: mumbai + - name: Copying systemd file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/systemd/bor.service packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/lib/systemd/system/ + env: + ARCH: amd64 + NODE: sentry + NETWORK: mumbai + - name: Copying profile control file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/control.profile.amd64 packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/control + env: + ARCH: amd64 + NODE: sentry + NETWORK: mumbai + - name: Running package build for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: dpkg-deb --build --root-owner-group packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: amd64 + NODE: sentry + NETWORK: mumbai - - name: Setting up Mainnet Sentry Config for amd64 - run: cp -rp packaging/deb/bor packaging/deb/bor-mainnet-sentry-config_v0.3.0-beta-amd64 - - name: Copying control file - run: cp -rp packaging/templates/package_scripts/control packaging/deb/bor-mainnet-sentry-config_v0.3.0-beta-amd64/DEBIAN/control - - name: Putting toml - run: cp -rp packaging/templates/mainnet-v1/sentry/sentry/bor/config.toml packaging/deb/bor-mainnet-sentry-config_v0.3.0-beta-amd64/var/lib/bor/ - - name: Copying the preinst - run: cp -rp packaging/templates/package_scripts/preinst packaging/deb/bor-mainnet-sentry-config_v0.3.0-beta-amd64/DEBIAN/preinst - - name: Copying the postinst - run: cp -rp packaging/templates/package_scripts/postinst.profile packaging/deb/bor-mainnet-sentry-config_v0.3.0-beta-amd64/DEBIAN/postinst - - name: Copying the prerm - run: cp -rp packaging/templates/package_scripts/prerm packaging/deb/bor-mainnet-sentry-config_v0.3.0-beta-amd64/DEBIAN/prerm - - name: Copying the postrm - run: cp -rp packaging/templates/package_scripts/postrm packaging/deb/bor-mainnet-sentry-config_v0.3.0-beta-amd64/DEBIAN/postrm - - name: Copying profile control file - run: cp -rp packaging/templates/package_scripts/control.profile.amd64 packaging/deb/bor-mainnet-sentry-config_v0.3.0-beta-amd64/DEBIAN/control - - name: Building Sentry for amd64 - run: dpkg-deb --build --root-owner-group packaging/deb/bor-mainnet-sentry-config_v0.3.0-beta-amd64 + - name: Setting up ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/deb/bor packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: amd64 + NODE: sentry + NETWORK: mainnet + - name: Copying control file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/control packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/control + env: + ARCH: amd64 + NODE: sentry + NETWORK: mainnet + - name: Putting toml for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/mainnet-v1/sentry/sentry/bor/config.toml packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/var/lib/bor/ + env: + ARCH: amd64 + NODE: sentry + NETWORK: mainnet + - name: Copying the preinst for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/preinst packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/preinst + env: + ARCH: amd64 + NODE: sentry + NETWORK: mainnet + - name: Copying the postinst for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/postinst.profile packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postinst + env: + ARCH: amd64 + NODE: sentry + NETWORK: mainnet + - name: Copying the prerm for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/prerm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/prerm + env: + ARCH: amd64 + NODE: sentry + NETWORK: mainnet + - name: Copying the postrm for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/postrm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postrm + env: + ARCH: amd64 + NODE: sentry + NETWORK: mainnet + - name: Copying profile control file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/control.profile.amd64 packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/control + env: + ARCH: amd64 + NODE: sentry + NETWORK: mainnet + - name: Building ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} profile + run: dpkg-deb --build --root-owner-group packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: amd64 + NODE: sentry + NETWORK: mainnet - - name: Prepping Bor Validator Profile for amd64 - run: cp -rp packaging/deb/bor packaging/deb/bor-mumbai-validator-config_v0.3.0-beta-amd64 - - name: Copying control file for validator on amd64 - run: cp -rp packaging/templates/package_scripts/control.validator packaging/deb/bor-mumbai-validator-config_v0.3.0-beta-amd64/DEBIAN/control - - name: Copying Postinstall script - run: cp -rp packaging/templates/package_scripts/postinst.profile packaging/deb/bor-mumbai-validator-config_v0.3.0-beta-amd64/DEBIAN/postinst - - name: Copying Prerm script - run: cp -rp packaging/templates/package_scripts/prerm packaging/deb/bor-mumbai-validator-config_v0.3.0-beta-amd64/DEBIAN/prerm - - name: Copying Postrm script - run: cp -rp packaging/templates/package_scripts/postrm packaging/deb/bor-mumbai-validator-config_v0.3.0-beta-amd64/DEBIAN/postrm - - name: Copying config.toml for validator for bor - run: cp -rp packaging/templates/testnet-v4/sentry/validator/bor/config.toml packaging/deb/bor-mumbai-validator-config_v0.3.0-beta-amd64/var/lib/bor/ - - name: Copying profile control file - run: cp -rp packaging/templates/package_scripts/control.profile.amd64 packaging/deb/bor-mumbai-validator-config_v0.3.0-beta-amd64/DEBIAN/control - - name: Building bor validator on amd64 - run: dpkg-deb --build --root-owner-group packaging/deb/bor-mumbai-validator-config_v0.3.0-beta-amd64 + - name: Prepping Bor ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/deb/bor packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: amd64 + NODE: validator + NETWORK: mumbai + - name: Copying control file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/control.validator packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/control + env: + ARCH: amd64 + NODE: validator + NETWORK: mumbai + - name: Copying Postinstall script for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/postinst.profile packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postinst + env: + ARCH: amd64 + NODE: validator + NETWORK: mumbai + - name: Copying Prerm script for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/prerm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/prerm + env: + ARCH: amd64 + NODE: validator + NETWORK: mumbai + - name: Copying Postrm script for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/postrm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postrm + env: + ARCH: amd64 + NODE: validator + NETWORK: mumbai + - name: Copying config.toml for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/testnet-v4/sentry/validator/bor/config.toml packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/var/lib/bor/ + env: + ARCH: amd64 + NODE: validator + NETWORK: mumbai + - name: Copying profile control file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/control.profile.amd64 packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/control + env: + ARCH: amd64 + NODE: validator + NETWORK: mumbai + - name: Building bor ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} profile + run: dpkg-deb --build --root-owner-group packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: amd64 + NODE: validator + NETWORK: mumbai - - name: Prepping Bor Mainnet Validator for amd64 - run: cp -rp packaging/deb/bor packaging/deb/bor-mainnet-validator-config_v0.3.0-beta-amd64 - - name: Copying control file for validator on arm64 - run: cp -rp packaging/templates/package_scripts/control.validator packaging/deb/bor-mainnet-validator-config_v0.3.0-beta-amd64/DEBIAN/control - - name: Copying the preinst - run: cp -rp packaging/templates/package_scripts/preinst packaging/deb/bor-mainnet-validator-config_v0.3.0-beta-amd64/DEBIAN/preinst - - name: Copying Postinstall script - run: cp -rp packaging/templates/package_scripts/postinst.profile packaging/deb/bor-mainnet-validator-config_v0.3.0-beta-amd64/DEBIAN/postinst - - name: Copying the prerm - run: cp -rp packaging/templates/package_scripts/prerm packaging/deb/bor-mainnet-validator-config_v0.3.0-beta-amd64/DEBIAN/prerm - - name: Copying the postrm - run: cp -rp packaging/templates/package_scripts/postrm packaging/deb/bor-mainnet-validator-config_v0.3.0-beta-amd64/DEBIAN/postrm - - name: Copying config.toml for validator for bor - run: cp -rp packaging/templates/mainnet-v1/sentry/validator/bor/config.toml packaging/deb/bor-mainnet-validator-config_v0.3.0-beta-amd64/var/lib/bor/ - - name: Copying profile control file - run: cp -rp packaging/templates/package_scripts/control.profile.amd64 packaging/deb/bor-mainnet-validator-config_v0.3.0-beta-amd64/DEBIAN/control - - name: Building bor validator on amd64 - run: dpkg-deb --build --root-owner-group packaging/deb/bor-mainnet-validator-config_v0.3.0-beta-amd64 + - name: Prepping Bor ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/deb/bor packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: amd64 + NODE: validator + NETWORK: mainnet + - name: Copying control file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/control.validator packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/control + env: + ARCH: amd64 + NODE: validator + NETWORK: mainnet + - name: Copying the preinst for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/preinst packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/preinst + env: + ARCH: amd64 + NODE: validator + NETWORK: mainnet + - name: Copying Postinstall script for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/postinst.profile packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postinst + env: + ARCH: amd64 + NODE: validator + NETWORK: mainnet + - name: Copying the prerm for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/prerm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/prerm + env: + ARCH: amd64 + NODE: validator + NETWORK: mainnet + - name: Copying the postrm for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/postrm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postrm + env: + ARCH: amd64 + NODE: validator + NETWORK: mainnet + - name: Copying config.toml for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/mainnet-v1/sentry/validator/bor/config.toml packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/var/lib/bor/ + env: + ARCH: amd64 + NODE: validator + NETWORK: mainnet + - name: Copying profile control file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/control.profile.amd64 packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/control + env: + ARCH: amd64 + NODE: validator + NETWORK: mainnet + - name: Building bor ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} profile + run: dpkg-deb --build --root-owner-group packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: amd64 + NODE: validator + NETWORK: mainnet - - name: Creating mumbai archive node profile for amd64 - run: cp -rp packaging/deb/bor packaging/deb/bor-mumbai-archive-config_v0.3.0-beta-amd64 - - name: Copying profile control file - run: cp -rp packaging/templates/package_scripts/control.profile.amd64 packaging/deb/bor-mumbai-archive-config_v0.3.0-beta-amd64/DEBIAN/control - - name: Copying profile preinst file - run: cp -rp packaging/templates/package_scripts/preinst packaging/deb/bor-mumbai-archive-config_v0.3.0-beta-amd64/DEBIAN/preinst - - name: Copying the profile postinst - run: cp -rp packaging/templates/package_scripts/postinst.profile packaging/deb/bor-mumbai-archive-config_v0.3.0-beta-amd64/DEBIAN/postinst - - name: Copying profile prerm file - run: cp -rp packaging/templates/package_scripts/prerm packaging/deb/bor-mumbai-archive-config_v0.3.0-beta-amd64/DEBIAN/prerm - - name: Copying profile postrm file - run: cp -rp packaging/templates/package_scripts/postrm packaging/deb/bor-mumbai-archive-config_v0.3.0-beta-amd64/DEBIAN/postrm - - name: Copying the toml - run: cp -rp packaging/templates/testnet-v4/archive/config.toml packaging/deb/bor-mumbai-archive-config_v0.3.0-beta-amd64/var/lib/bor/ - - name: Building profile package - run: dpkg-deb --build --root-owner-group packaging/deb/bor-mumbai-archive-config_v0.3.0-beta-amd64 + - name: Creating bor ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/deb/bor packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: amd64 + NODE: archive + NETWORK: mumbai + - name: Copying profile control file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/control.profile.amd64 packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/control + env: + ARCH: amd64 + NODE: archive + NETWORK: mumbai + - name: Copying profile preinst file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/preinst packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/preinst + env: + ARCH: amd64 + NODE: archive + NETWORK: mumbai + - name: Copying the profile postinst for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/postinst.profile packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postinst + env: + ARCH: amd64 + NODE: archive + NETWORK: mumbai + - name: Copying profile prerm file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/prerm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/prerm + env: + ARCH: amd64 + NODE: archive + NETWORK: mumbai + - name: Copying profile postrm file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/postrm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postrm + env: + ARCH: amd64 + NODE: archive + NETWORK: mumbai + - name: Copying the toml for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/testnet-v4/archive/config.toml packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/var/lib/bor/ + env: + ARCH: amd64 + NODE: archive + NETWORK: mumbai + - name: Building bor ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} profile + run: dpkg-deb --build --root-owner-group packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: amd64 + NODE: archive + NETWORK: mumbai - - name: Creating mainnet archive node profile for amd64 - run: cp -rp packaging/deb/bor packaging/deb/bor-mainnet-archive-config_v0.3.0-beta-amd64 - - name: Copying profile control file - run: cp -rp packaging/templates/package_scripts/control.profile.amd64 packaging/deb/bor-mumbai-archive-config_v0.3.0-beta-amd64/DEBIAN/control - - name: Copying profile preinst file - run: cp -rp packaging/templates/package_scripts/preinst packaging/deb/bor-mainnet-archive-config_v0.3.0-beta-amd64/DEBIAN/preinst - - name: Copying the profile postinst - run: cp -rp packaging/templates/package_scripts/postinst.profile packaging/deb/bor-mainnet-archive-config_v0.3.0-beta-amd64/DEBIAN/postinst - - name: Copying profile prerm file - run: cp -rp packaging/templates/package_scripts/prerm packaging/deb/bor-mainnet-archive-config_v0.3.0-beta-amd64/DEBIAN/prerm - - name: Copying profile postrm file - run: cp -rp packaging/templates/package_scripts/postrm packaging/deb/bor-mainnet-archive-config_v0.3.0-beta-amd64/DEBIAN/postrm - - name: Copying the toml - run: cp -rp packaging/templates/mainnet-v1/archive/config.toml packaging/deb/bor-mainnet-archive-config_v0.3.0-beta-amd64/var/lib/bor/ - - name: Building porfile package - run: dpkg-deb --build --root-owner-group packaging/deb/bor-mainnet-archive-config_v0.3.0-beta-amd64 + - name: Creating bor ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/deb/bor packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: amd64 + NODE: archive + NETWORK: mainnet + - name: Copying control file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/control.profile.amd64 packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/control + env: + ARCH: amd64 + NODE: archive + NETWORK: mainnet + - name: Copying profile preinst file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/preinst packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/preinst + env: + ARCH: amd64 + NODE: archive + NETWORK: mainnet + - name: Copying the profile postinst for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/postinst.profile packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postinst + env: + ARCH: amd64 + NODE: archive + NETWORK: mainnet + - name: Copying profile prerm file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/prerm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/prerm + env: + ARCH: amd64 + NODE: archive + NETWORK: mainnet + - name: Copying profile postrm file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/postrm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postrm + env: + ARCH: amd64 + NODE: archive + NETWORK: mainnet + - name: Copying the toml for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/mainnet-v1/archive/config.toml packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/var/lib/bor/ + env: + ARCH: amd64 + NODE: archive + NETWORK: mainnet + - name: Building bor ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} profile + run: dpkg-deb --build --root-owner-group packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: amd64 + NODE: archive + NETWORK: mainnet - name: Cleaning build directory for arm64 build run: make clean @@ -177,11 +390,17 @@ jobs: run: cp -rp build/bin/bor packaging/deb/bor/usr/bin/ - name: Creating package for binary only bor - run: cp -rp packaging/deb/bor packaging/deb/bor-v0.3.0-beta-arm64 + run: cp -rp packaging/deb/bor packaging/deb/bor-${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: arm64 - name: Copying control file - run: cp -rp packaging/templates/package_scripts/control.arm64 packaging/deb/bor-v0.3.0-beta-arm64/DEBIAN/control + run: cp -rp packaging/templates/package_scripts/control.arm64 packaging/deb/bor-${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/control + env: + ARCH: arm64 - name: Running package build - run: dpkg-deb --build --root-owner-group packaging/deb/bor-v0.3.0-beta-arm64 + run: dpkg-deb --build --root-owner-group packaging/deb/bor-${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: arm64 - name: Removing the bor binary run: rm -rf packaging/deb/bor/usr/bin/bor @@ -192,123 +411,323 @@ jobs: - name: Updating the control file to use with the arm64 profile run: cp -rp packaging/templates/package_scripts/control.profile.arm64 packaging/deb/bor/DEBIAN/control - - name: Setting up Mumbai Sentry Config for arm64 - run: cp -rp packaging/deb/bor packaging/deb/bor-mumbai-sentry-config_v0.3.0-beta-arm64 - - name: Copying control file - run: cp -rp packaging/templates/package_scripts/control.arm64 packaging/deb/bor-mumbai-sentry-config_v0.3.0-beta-arm64/DEBIAN/control - - name: Putting toml - run: cp -rp packaging/templates/testnet-v4/sentry/sentry/bor/config.toml packaging/deb/bor-mumbai-sentry-config_v0.3.0-beta-arm64/var/lib/bor/ - - name: Copying the preinst - run: cp -rp packaging/templates/package_scripts/preinst packaging/deb/bor-mumbai-sentry-config_v0.3.0-beta-arm64/DEBIAN/preinst - - name: Copying the prerm - run: cp -rp packaging/templates/package_scripts/prerm packaging/deb/bor-mumbai-sentry-config_v0.3.0-beta-arm64/DEBIAN/prerm - - name: Copying the postrm - run: cp -rp packaging/templates/package_scripts/postrm packaging/deb/bor-mumbai-sentry-config_v0.3.0-beta-arm64/DEBIAN/postrm - - name: Copying the postinst - run: cp -rp packaging/templates/package_scripts/postinst.profile packaging/deb/bor-mumbai-sentry-config_v0.3.0-beta-arm64/DEBIAN/postinst - - name: Copying profile control file - run: cp -rp packaging/templates/package_scripts/control.profile.arm64 packaging/deb/bor-mumbai-sentry-config_v0.3.0-beta-arm64/DEBIAN/control - - name: Building Sentry for arm64 - run: dpkg-deb --build --root-owner-group packaging/deb/bor-mumbai-sentry-config_v0.3.0-beta-arm64 + - name: Setting up bor for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/deb/bor packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: arm64 + NODE: sentry + NETWORK: mumbai + - name: Copying control file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/control.arm64 packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/control + env: + ARCH: arm64 + NODE: sentry + NETWORK: mumbai + - name: Putting toml for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/testnet-v4/sentry/sentry/bor/config.toml packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/var/lib/bor/ + env: + ARCH: arm64 + NODE: sentry + NETWORK: mumbai + - name: Copying the preinst for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/preinst packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/preinst + env: + ARCH: arm64 + NODE: sentry + NETWORK: mumbai + - name: Copying the prerm for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/prerm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/prerm + env: + ARCH: arm64 + NODE: sentry + NETWORK: mumbai + - name: Copying the postrm for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/postrm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postrm + env: + ARCH: arm64 + NODE: sentry + NETWORK: mumbai + - name: Copying the postinst for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/postinst.profile packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postinst + env: + ARCH: arm64 + NODE: sentry + NETWORK: mumbai + - name: Copying profile control file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/control.profile.arm64 packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/control + env: + ARCH: arm64 + NODE: sentry + NETWORK: mumbai + - name: Building bor ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} profile + run: dpkg-deb --build --root-owner-group packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: arm64 + NODE: sentry + NETWORK: mumbai - - name: Setting up Mainnet Sentry Config for arm64 - run: cp -rp packaging/deb/bor packaging/deb/bor-mainnet-sentry-config_v0.3.0-beta-arm64 - - name: Copying control file - run: cp -rp packaging/templates/package_scripts/control.arm64 packaging/deb/bor-mainnet-sentry-config_v0.3.0-beta-arm64/DEBIAN/control - - name: Putting toml - run: cp -rp packaging/templates/mainnet-v1/sentry/sentry/bor/config.toml packaging/deb/bor-mainnet-sentry-config_v0.3.0-beta-arm64/var/lib/bor/ - - name: Copying the preinst - run: cp -rp packaging/templates/package_scripts/preinst packaging/deb/bor-mainnet-sentry-config_v0.3.0-beta-arm64/DEBIAN/preinst - - name: Copying the prerm - run: cp -rp packaging/templates/package_scripts/prerm packaging/deb/bor-mainnet-sentry-config_v0.3.0-beta-arm64/DEBIAN/prerm - - name: Copying the postrm - run: cp -rp packaging/templates/package_scripts/postrm packaging/deb/bor-mainnet-sentry-config_v0.3.0-beta-arm64/DEBIAN/postrm - - name: Copying the postinst - run: cp -rp packaging/templates/package_scripts/postinst.profile packaging/deb/bor-mainnet-sentry-config_v0.3.0-beta-arm64/DEBIAN/postinst - - name: Copying profile control file - run: cp -rp packaging/templates/package_scripts/control.profile.arm64 packaging/deb/bor-mainnet-sentry-config_v0.3.0-beta-arm64/DEBIAN/control - - name: Building Sentry for arm64 - run: dpkg-deb --build --root-owner-group packaging/deb/bor-mainnet-sentry-config_v0.3.0-beta-arm64 + - name: Setting up bor for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/deb/bor packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: arm64 + NODE: sentry + NETWORK: mainnet + - name: Copying control file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/control.arm64 packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/control + env: + ARCH: arm64 + NODE: sentry + NETWORK: mainnet + - name: Putting toml for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/mainnet-v1/sentry/sentry/bor/config.toml packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/var/lib/bor/ + env: + ARCH: arm64 + NODE: sentry + NETWORK: mainnet + - name: Copying the preinst for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/preinst packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/preinst + env: + ARCH: arm64 + NODE: sentry + NETWORK: mainnet + - name: Copying the prerm for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/prerm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/prerm + env: + ARCH: arm64 + NODE: sentry + NETWORK: mainnet + - name: Copying the postrm for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/postrm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postrm + env: + ARCH: arm64 + NODE: sentry + NETWORK: mainnet + - name: Copying the postinst for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/postinst.profile packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postinst + env: + ARCH: arm64 + NODE: sentry + NETWORK: mainnet + - name: Copying profile control file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/control.profile.arm64 packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/control + env: + ARCH: arm64 + NODE: sentry + NETWORK: mainnet + - name: Building bor ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} profile + run: dpkg-deb --build --root-owner-group packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: arm64 + NODE: sentry + NETWORK: mainnet - - name: Prepping Bor Mumbai Validator for arm64 - run: cp -rp packaging/deb/bor packaging/deb/bor-mumbai-validator-config_v0.3.0-beta-arm64 - - name: Copying control file for validator on arm64 - run: cp -rp packaging/templates/package_scripts/control.validator.arm64 packaging/deb/bor-mumbai-validator-config_v0.3.0-beta-arm64/DEBIAN/control - - name: Copying the preinst - run: cp -rp packaging/templates/package_scripts/preinst packaging/deb/bor-mumbai-sentry-config_v0.3.0-beta-arm64/DEBIAN/preinst - - name: Copying Postinstall script - run: cp -rp packaging/templates/package_scripts/postinst.profile packaging/deb/bor-mumbai-validator-config_v0.3.0-beta-arm64/DEBIAN/postinst - - name: Copying the prerm - run: cp -rp packaging/templates/package_scripts/prerm packaging/deb/bor-mumbai-sentry-config_v0.3.0-beta-arm64/DEBIAN/prerm - - name: Copying the postrm - run: cp -rp packaging/templates/package_scripts/postrm packaging/deb/bor-mumbai-sentry-config_v0.3.0-beta-arm64/DEBIAN/postrm - - name: Copying config.toml for validator for bor - run: cp -rp packaging/templates/testnet-v4/sentry/validator/bor/config.toml packaging/deb/bor-mumbai-validator-config_v0.3.0-beta-arm64/var/lib/bor/ - - name: Copying profile control file - run: cp -rp packaging/templates/package_scripts/control.profile.arm64 packaging/deb/bor-mumbai-validator-config_v0.3.0-beta-arm64/DEBIAN/control - - name: Building bor validator on arm64 - run: dpkg-deb --build --root-owner-group packaging/deb/bor-mumbai-validator-config_v0.3.0-beta-arm64 + - name: Prepping Bor for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/deb/bor packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: arm64 + NODE: validator + NETWORK: mumbai + - name: Copying control file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/control.validator.arm64 packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/control + env: + ARCH: arm64 + NODE: validator + NETWORK: mumbai + - name: Copying the preinst for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/preinst packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/preinst + env: + ARCH: arm64 + NODE: validator + NETWORK: mumbai + - name: Copying Postinstall script for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/postinst.profile packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postinst + env: + ARCH: arm64 + NODE: validator + NETWORK: mumbai + - name: Copying the prerm for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/prerm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/prerm + env: + ARCH: arm64 + NODE: validator + NETWORK: mumbai + - name: Copying the postrm for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/postrm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postrm + env: + ARCH: arm64 + NODE: validator + NETWORK: mumbai + - name: Copying config.toml for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/testnet-v4/sentry/validator/bor/config.toml packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/var/lib/bor/ + env: + ARCH: arm64 + NODE: validator + NETWORK: mumbai + - name: Copying profile control file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/control.profile.arm64 packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/control + env: + ARCH: arm64 + NODE: validator + NETWORK: mumbai + - name: Building bor ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} profile + run: dpkg-deb --build --root-owner-group packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: arm64 + NODE: validator + NETWORK: mumbai - - name: Prepping Bor Mainnet Validator for arm64 - run: cp -rp packaging/deb/bor packaging/deb/bor-mainnet-validator-config_v0.3.0-beta-arm64 - - name: Copying control file for validator on arm64 - run: cp -rp packaging/templates/package_scripts/control.validator.arm64 packaging/deb/bor-mainnet-validator-config_v0.3.0-beta-arm64/DEBIAN/control - - name: Copying the preinst - run: cp -rp packaging/templates/package_scripts/preinst packaging/deb/bor-mainnet-validator-config_v0.3.0-beta-arm64/DEBIAN/preinst - - name: Copying Postinstall script - run: cp -rp packaging/templates/package_scripts/postinst.profile packaging/deb/bor-mainnet-validator-config_v0.3.0-beta-arm64/DEBIAN/postinst - - name: Copying the prerm - run: cp -rp packaging/templates/package_scripts/prerm packaging/deb/bor-mainnet-validator-config_v0.3.0-beta-arm64/DEBIAN/prerm - - name: Copying the postrm - run: cp -rp packaging/templates/package_scripts/postrm packaging/deb/bor-mainnet-validator-config_v0.3.0-beta-arm64/DEBIAN/postrm - - name: Copying config.toml for validator for bor - run: cp -rp packaging/templates/mainnet-v1/sentry/validator/bor/config.toml packaging/deb/bor-mainnet-validator-config_v0.3.0-beta-arm64/var/lib/bor/ - - name: Copying profile control file - run: cp -rp packaging/templates/package_scripts/control.profile.arm64 packaging/deb/bor-mainnet-validator-config_v0.3.0-beta-arm64/DEBIAN/control - - name: Building bor validator on arm64 - run: dpkg-deb --build --root-owner-group packaging/deb/bor-mainnet-validator-config_v0.3.0-beta-arm64 + - name: Prepping Bor ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/deb/bor packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: arm64 + NODE: validator + NETWORK: mainnet + - name: Copying control file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/control.validator.arm64 packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/control + env: + ARCH: arm64 + NODE: validator + NETWORK: mainnet + - name: Copying the preinst for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/preinst packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/preinst + env: + ARCH: arm64 + NODE: validator + NETWORK: mainnet + - name: Copying Postinstall script for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/postinst.profile packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postinst + env: + ARCH: arm64 + NODE: validator + NETWORK: mainnet + - name: Copying the prerm for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/prerm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/prerm + env: + ARCH: arm64 + NODE: validator + NETWORK: mainnet + - name: Copying the postrm for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/postrm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postrm + env: + ARCH: arm64 + NODE: validator + NETWORK: mainnet + - name: Copying config.toml for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/mainnet-v1/sentry/validator/bor/config.toml packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/var/lib/bor/ + env: + ARCH: arm64 + NODE: validator + NETWORK: mainnet + - name: Copying profile control file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/control.profile.arm64 packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/control + env: + ARCH: arm64 + NODE: validator + NETWORK: mainnet + - name: Building bor ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} profile + run: dpkg-deb --build --root-owner-group packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: arm64 + NODE: validator + NETWORK: mainnet - name: Updating the control file to use with the arm64 profile run: cp -rp packaging/templates/package_scripts/control.profile.arm64 packaging/deb/bor/DEBIAN/control - - name: Creating mumbai archive node profile for arm64 - run: cp -rp packaging/deb/bor packaging/deb/bor-mumbai-archive-config_v0.3.0-beta-arm64 - - name: Copying profile control file - run: cp -rp packaging/templates/package_scripts/control.profile.arm64 packaging/deb/bor-mumbai-archive-config_v0.3.0-beta-arm64/DEBIAN/control - - name: Copying over profile postinst - run: cp -rp packaging/templates/package_scripts/postinst.profile packaging/deb/bor-mumbai-archive-config_v0.3.0-beta-arm64/DEBIAN/postinst - - name: Copying prerm - run: cp -rp packaging/templates/package_scripts/prerm packaging/deb/bor-mumbai-archive-config_v0.3.0-beta-arm64/DEBIAN/prerm - - name: Copying postrm - run: cp -rp packaging/templates/package_scripts/postrm packaging/deb/bor-mumbai-archive-config_v0.3.0-beta-arm64/DEBIAN/postrm - - name: Copying the toml - run: cp -rp packaging/templates/testnet-v4/archive/config.toml packaging/deb/bor-mumbai-archive-config_v0.3.0-beta-arm64/var/lib/bor/ - - name: Building profile package - run: dpkg-deb --build --root-owner-group packaging/deb/bor-mumbai-archive-config_v0.3.0-beta-arm64 + - name: Creating bor ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/deb/bor packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: arm64 + NODE: archive + NETWORK: mumbai + - name: Copying profile control file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/control.profile.arm64 packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/control + env: + ARCH: arm64 + NODE: archive + NETWORK: mumbai + - name: Copying over profile postinst for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/postinst.profile packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postinst + env: + ARCH: arm64 + NODE: archive + NETWORK: mumbai + - name: Copying prerm for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/prerm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/prerm + env: + ARCH: arm64 + NODE: archive + NETWORK: mumbai + - name: Copying postrm for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/postrm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postrm + env: + ARCH: arm64 + NODE: archive + NETWORK: mumbai + - name: Copying the toml for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/testnet-v4/archive/config.toml packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/var/lib/bor/ + env: + ARCH: arm64 + NODE: archive + NETWORK: mumbai + - name: Building bor ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} profile + run: dpkg-deb --build --root-owner-group packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: arm64 + NODE: archive + NETWORK: mumbai - - name: Creating mainnet archive node profile for arm64 - run: cp -rp packaging/deb/bor packaging/deb/bor-mainnet-archive-config_v0.3.0-beta-arm64 - - name: Copying profile control file - run: cp -rp packaging/templates/package_scripts/control.profile.arm64 packaging/deb/bor-mainnet-archive-config_v0.3.0-beta-arm64/DEBIAN/control - - name: Copying over profile postinst - run: cp -rp packaging/templates/package_scripts/postinst.profile packaging/deb/bor-mainnet-archive-config_v0.3.0-beta-arm64/DEBIAN/postinst - - name: Copying prerm - run: cp -rp packaging/templates/package_scripts/prerm packaging/deb/bor-mainnet-archive-config_v0.3.0-beta-arm64/DEBIAN/prerm - - name: Copying postrm - run: cp -rp packaging/templates/package_scripts/postrm packaging/deb/bor-mainnet-archive-config_v0.3.0-beta-arm64/DEBIAN/postrm - - name: Copying the toml - run: cp -rp packaging/templates/mainnet-v1/archive/config.toml packaging/deb/bor-mainnet-archive-config_v0.3.0-beta-arm64/var/lib/bor/ - - name: Building porfile package - run: dpkg-deb --build --root-owner-group packaging/deb/bor-mainnet-archive-config_v0.3.0-beta-arm64 + - name: Creating bor ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/deb/bor packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: arm64 + NODE: archive + NETWORK: mainnet + - name: Copying profile control file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/control.profile.arm64 packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/control + env: + ARCH: arm64 + NODE: archive + NETWORK: mainnet + - name: Copying over profile postinst for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/postinst.profile packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postinst + env: + ARCH: arm64 + NODE: archive + NETWORK: mainnet + - name: Copying prerm for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/prerm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/prerm + env: + ARCH: arm64 + NODE: archive + NETWORK: mainnet + - name: Copying postrm for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/postrm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postrm + env: + ARCH: arm64 + NODE: archive + NETWORK: mainnet + - name: Copying the toml for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/mainnet-v1/archive/config.toml packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/var/lib/bor/ + env: + ARCH: arm64 + NODE: archive + NETWORK: mainnet + - name: Building bor ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} profile + run: dpkg-deb --build --root-owner-group packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: arm64 + NODE: archive + NETWORK: mainnet - name: Confirming package built run: ls -ltr packaging/deb/ | grep bor - - name: Pre-Release bor Packages + - name: Release bor Packages uses: softprops/action-gh-release@v1 with: tag_name: ${{ env.GIT_TAG }} prerelease: true files: | packaging/deb/bor**.deb - binary/bo** + binary/bo** \ No newline at end of file diff --git a/packaging/templates/package_scripts/postinst b/packaging/templates/package_scripts/postinst index 761ee29f26..7272b4b1aa 100755 --- a/packaging/templates/package_scripts/postinst +++ b/packaging/templates/package_scripts/postinst @@ -2,6 +2,11 @@ # This is a postinstallation script so the service can be configured and started when requested # sudo adduser --disabled-password --disabled-login --shell /usr/sbin/nologin --quiet --system --no-create-home --home /nonexistent bor -sudo mkdir -p /var/lib/bor -sudo chown -R bor /var/lib/bor +if [ -d "/var/lib/bor" ] +then + echo "Directory /var/lib/bor exists." +else + mkdir -p /var/lib/bor + sudo chown -R bor /var/lib/bor +fi sudo systemctl daemon-reload diff --git a/packaging/templates/package_scripts/postinst.profile b/packaging/templates/package_scripts/postinst.profile index 80b8381203..e9a497906d 100755 --- a/packaging/templates/package_scripts/postinst.profile +++ b/packaging/templates/package_scripts/postinst.profile @@ -1,6 +1,11 @@ #!/bin/bash # This is a postinstallation script so the service can be configured and started when requested # -sudo mkdir -p /var/lib/bor -sudo chown -R bor /var/lib/bor +if [ -d "/var/lib/bor" ] +then + echo "Directory /var/lib/bor exists." +else + mkdir -p /var/lib/bor + sudo chown -R bor /var/lib/bor +fi sudo systemctl daemon-reload From 4d06349d954c10f4207695d3ca358a0a0ad1f177 Mon Sep 17 00:00:00 2001 From: Pratik Patil Date: Thu, 1 Dec 2022 09:20:17 +0530 Subject: [PATCH 17/56] added a toml configuration file with comments describing each flag (#607) * added a toml configuration file with comments describing each flag * internal/cli/server: update flag description * docs/cli: update example config and description of flags * docs: update new-cli docs Co-authored-by: Manav Darji --- docs/README.md | 12 ++- docs/cli/example_config.toml | 141 +++++++++++++++++++++++++++++++++ docs/cli/server.md | 14 ++-- docs/config.md | 146 ----------------------------------- internal/cli/server/flags.go | 10 +-- 5 files changed, 159 insertions(+), 164 deletions(-) create mode 100644 docs/cli/example_config.toml delete mode 100644 docs/config.md diff --git a/docs/README.md b/docs/README.md index 5ebdbd7e26..45021e8c7f 100644 --- a/docs/README.md +++ b/docs/README.md @@ -1,9 +1,7 @@ # Documentation -- [Command-line-interface](./cli) - -- [Configuration file](./config.md) +- [The new command line interface](./cli) ## Additional notes @@ -13,8 +11,14 @@ $ bor server ``` -- Toml files used earlier just to configure static/trusted nodes are being deprecated. Instead, a toml file now can be used instead of flags and can contain all configuration for the node to run. The link to a sample config file is given above. To simply run bor with a configuration file, the following command can be used. +- The `bor dumpconfig` sub-command prints the default configurations, in the TOML format, on the terminal. One can `pipe (>)` this to a file (say `config.toml`) and use it to start bor. + +- A toml file now can be used instead of flags and can contain all configuration for the node to run. To simply run bor with a configuration file, the following command can be used. ``` $ bor server --config ``` + +- You can find an example config file [here](./cli/example_config.toml) to know more about what each flag is used for, what are the defaults and recommended values for different networks. + +- Toml files used earlier (with `--config` flag) to configure additional fields (like static and trusted nodes) are being deprecated and have been converted to flags. diff --git a/docs/cli/example_config.toml b/docs/cli/example_config.toml new file mode 100644 index 0000000000..bc48b752d5 --- /dev/null +++ b/docs/cli/example_config.toml @@ -0,0 +1,141 @@ +# This configuration file is for reference and learning purpose only. +# The default value of the flags is provided below (except a few flags which has custom defaults which are explicitly mentioned). +# Recommended values for mainnet and/or mumbai are also provided. + +chain = "mainnet" # Name of the chain to sync ("mumbai", "mainnet") or path to a genesis file +identity = "Annon-Identity" # Name/Identity of the node (default = OS hostname) +log-level = "INFO" # Set log level for the server +datadir = "var/lib/bor" # Path of the data directory to store information +keystore = "" # Path of the directory where keystores are located +syncmode = "full" # Blockchain sync mode (only "full" sync supported) +gcmode = "full" # Blockchain garbage collection mode ("full", "archive") +snapshot = true # Enables the snapshot-database mode +"bor.logs" = false # Enables bor log retrieval +ethstats = "" # Reporting URL of a ethstats service (nodename:secret@host:port) + +["eth.requiredblocks"] # Comma separated block number-to-hash mappings to require for peering (=) (default = empty map) + "31000000" = "0x2087b9e2b353209c2c21e370c82daa12278efd0fe5f0febe6c29035352cf050e" + "32000000" = "0x875500011e5eecc0c554f95d07b31cf59df4ca2505f4dbbfffa7d4e4da917c68" + +[p2p] + maxpeers = 50 # Maximum number of network peers (network disabled if set to 0) + maxpendpeers = 50 # Maximum number of pending connection attempts + bind = "0.0.0.0" # Network binding address + port = 30303 # Network listening port + nodiscover = false # Disables the peer discovery mechanism (manual peer addition) + nat = "any" # NAT port mapping mechanism (any|none|upnp|pmp|extip:) + [p2p.discovery] + v5disc = false # Enables the experimental RLPx V5 (Topic Discovery) mechanism + bootnodes = [] # Comma separated enode URLs for P2P discovery bootstrap + bootnodesv4 = [] # List of initial v4 bootnodes + bootnodesv5 = [] # List of initial v5 bootnodes + static-nodes = [] # List of static nodes + trusted-nodes = [] # List of trusted nodes + dns = [] # List of enrtree:// URLs which will be queried for nodes to connect to + +[heimdall] + url = "http://localhost:1317" # URL of Heimdall service + "bor.without" = false # Run without Heimdall service (for testing purpose) + grpc-address = "" # Address of Heimdall gRPC service + +[txpool] + locals = [] # Comma separated accounts to treat as locals (no flush, priority inclusion) + nolocals = false # Disables price exemptions for locally submitted transactions + journal = "transactions.rlp" # Disk journal for local transaction to survive node restarts + rejournal = "1h0m0s" # Time interval to regenerate the local transaction journal + pricelimit = 1 # Minimum gas price limit to enforce for acceptance into the pool (mainnet = 30000000000) + pricebump = 10 # Price bump percentage to replace an already existing transaction + accountslots = 16 # Minimum number of executable transaction slots guaranteed per account + globalslots = 32768 # Maximum number of executable transaction slots for all accounts + accountqueue = 16 # Maximum number of non-executable transaction slots permitted per account + globalqueue = 32768 # Maximum number of non-executable transaction slots for all accounts + lifetime = "3h0m0s" # Maximum amount of time non-executable transaction are queued + +[miner] + mine = false # Enable mining + etherbase = "" # Public address for block mining rewards + extradata = "" # Block extra data set by the miner (default = client version) + gaslimit = 30000000 # Target gas ceiling for mined blocks + gasprice = "1000000000" # Minimum gas price for mining a transaction (recommended for mainnet = 30000000000, default suitable for mumbai/devnet) + +[jsonrpc] + ipcdisable = false # Disable the IPC-RPC server + ipcpath = "" # Filename for IPC socket/pipe within the datadir (explicit paths escape it) + gascap = 50000000 # Sets a cap on gas that can be used in eth_call/estimateGas (0=infinite) + txfeecap = 5.0 # Sets a cap on transaction fee (in ether) that can be sent via the RPC APIs (0 = no cap) + [jsonrpc.http] + enabled = false # Enable the HTTP-RPC server + port = 8545 # http.port + prefix = "" # http.rpcprefix + host = "localhost" # HTTP-RPC server listening interface + api = ["eth", "net", "web3", "txpool", "bor"] # API's offered over the HTTP-RPC interface + vhosts = ["localhost"] # Comma separated list of virtual hostnames from which to accept requests (server enforced). Accepts '*' wildcard. + corsdomain = ["localhost"] # Comma separated list of domains from which to accept cross origin requests (browser enforced) + [jsonrpc.ws] + enabled = false # Enable the WS-RPC server + port = 8546 # WS-RPC server listening port + prefix = "" # HTTP path prefix on which JSON-RPC is served. Use '/' to serve on all paths. + host = "localhost" # ws.addr + api = ["net", "web3"] # API's offered over the WS-RPC interface + origins = ["localhost"] # Origins from which to accept websockets requests + [jsonrpc.graphql] + enabled = false # Enable GraphQL on the HTTP-RPC server. Note that GraphQL can only be started if an HTTP server is started as well. + port = 0 # + prefix = "" # + host = "" # + vhosts = ["localhost"] # Comma separated list of virtual hostnames from which to accept requests (server enforced). Accepts '*' wildcard. + corsdomain = ["localhost"] # Comma separated list of domains from which to accept cross origin requests (browser enforced) + +[gpo] + blocks = 20 # Number of recent blocks to check for gas prices + percentile = 60 # Suggested gas price is the given percentile of a set of recent transaction gas prices + maxprice = "5000000000000" # Maximum gas price will be recommended by gpo + ignoreprice = "2" # Gas price below which gpo will ignore transactions (recommended for mainnet = 30000000000, default suitable for mumbai/devnet) + +[telemetry] + metrics = false # Enable metrics collection and reporting + expensive = false # Enable expensive metrics collection and reporting + prometheus-addr = "127.0.0.1:7071" # Address for Prometheus Server + opencollector-endpoint = "127.0.0.1:4317" # OpenCollector Endpoint (host:port) + [telemetry.influx] + influxdb = false # Enable metrics export/push to an external InfluxDB database (v1) + endpoint = "" # InfluxDB API endpoint to report metrics to + database = "" # InfluxDB database name to push reported metrics to + username = "" # Username to authorize access to the database + password = "" # Password to authorize access to the database + influxdbv2 = false # Enable metrics export/push to an external InfluxDB v2 database + token = "" # Token to authorize access to the database (v2 only) + bucket = "" # InfluxDB bucket name to push reported metrics to (v2 only) + organization = "" # InfluxDB organization name (v2 only) + [telemetry.influx.tags] # Comma-separated InfluxDB tags (key/values) attached to all measurements + cloud = "aws" + host = "annon-host" + ip = "99.911.221.66" + region = "us-north-1" + +[cache] + cache = 1024 # Megabytes of memory allocated to internal caching (recommended for mainnet = 4096, default suitable for mumbai/devnet) + gc = 25 # Percentage of cache memory allowance to use for trie pruning (default = 25% full mode, 0% archive mode) + snapshot = 10 # Percentage of cache memory allowance to use for snapshot caching (default = 10% full mode, 20% archive mode) + database = 50 # Percentage of cache memory allowance to use for database io + trie = 15 # Percentage of cache memory allowance to use for trie caching (default = 15% full mode, 30% archive mode) + journal = "triecache" # Disk journal directory for trie cache to survive node restarts + rejournal = "1h0m0s" # Time interval to regenerate the trie cache journal + noprefetch = false # Disable heuristic state prefetch during block import (less CPU and disk IO, more time waiting for data) + preimages = false # Enable recording the SHA3/keccak preimages of trie keys + txlookuplimit = 2350000 # Number of recent blocks to maintain transactions index for (default = about 56 days, 0 = entire chain) + triesinmemory = 128 # Number of block states (tries) to keep in memory + +[accounts] + unlock = [] # Comma separated list of accounts to unlock + password = "" # Password file to use for non-interactive password input + allow-insecure-unlock = false # Allow insecure account unlocking when account-related RPCs are exposed by http + lightkdf = false # Reduce key-derivation RAM & CPU usage at some expense of KDF strength + disable-bor-wallet = true # Disable the personal wallet endpoints + +[grpc] + addr = ":3131" # Address and port to bind the GRPC server + +[developer] + dev = false # Enable developer mode with ephemeral proof-of-authority network and a pre-funded developer account, mining enabled + period = 0 # Block period to use in developer mode (0 = mine only if transaction pending) diff --git a/docs/cli/server.md b/docs/cli/server.md index d52b135fa3..4c291a74b9 100644 --- a/docs/cli/server.md +++ b/docs/cli/server.md @@ -4,7 +4,7 @@ The ```bor server``` command runs the Bor client. ## Options -- ```chain```: Name of the chain to sync +- ```chain```: Name of the chain to sync ('mumbai', 'mainnet') or path to a genesis file - ```identity```: Name/Identity of the node @@ -12,7 +12,7 @@ The ```bor server``` command runs the Bor client. - ```datadir```: Path of the data directory to store information -- ```keystore```: Path of the directory to store keystores +- ```keystore```: Path of the directory where keystores are located - ```config```: File for the config file @@ -30,8 +30,6 @@ The ```bor server``` command runs the Bor client. - ```bor.withoutheimdall```: Run without Heimdall service (for testing purpose) -- ```bor.heimdallgRPC```: Address of Heimdall gRPC service - - ```ethstats```: Reporting URL of a ethstats service (nodename:secret@host:port) - ```gpo.blocks```: Number of recent blocks to check for gas prices @@ -80,8 +78,6 @@ The ```bor server``` command runs the Bor client. - ```cache.preimages```: Enable recording the SHA3/keccak preimages of trie keys -- ```cache.triesinmemory```: Number of block states (tries) to keep in memory (default = 128) - - ```txlookuplimit```: Number of recent blocks to maintain transactions index for (default = about 56 days, 0 = entire chain) ### JsonRPC Options @@ -136,7 +132,7 @@ The ```bor server``` command runs the Bor client. - ```maxpeers```: Maximum number of network peers (network disabled if set to 0) -- ```maxpendpeers```: Maximum number of pending connection attempts (defaults used if set to 0) +- ```maxpendpeers```: Maximum number of pending connection attempts - ```nat```: NAT port mapping mechanism (any|none|upnp|pmp|extip:) @@ -148,11 +144,11 @@ The ```bor server``` command runs the Bor client. - ```mine```: Enable mining -- ```miner.etherbase```: Public address for block mining rewards (default = first account) +- ```miner.etherbase```: Public address for block mining rewards - ```miner.extradata```: Block extra data set by the miner (default = client version) -- ```miner.gaslimit```: Target gas ceiling for mined blocks +- ```miner.gaslimit```: Target gas ceiling (gas limit) for mined blocks - ```miner.gasprice```: Minimum gas price for mining a transaction diff --git a/docs/config.md b/docs/config.md deleted file mode 100644 index 57f4c25fef..0000000000 --- a/docs/config.md +++ /dev/null @@ -1,146 +0,0 @@ - -# Config - -- The `bor dumpconfig` command prints the default configurations, in the TOML format, on the terminal. - - One can `pipe (>)` this to a file (say `config.toml`) and use it to start bor. - - Command to provide a config file: `bor server -config config.toml` -- Bor uses TOML, HCL, and JSON format config files. -- This is the format of the config file in TOML: - - **NOTE: The values of these following flags are just for reference** - - `config.toml` file: -``` -chain = "mainnet" -identity = "myIdentity" -log-level = "INFO" -datadir = "/var/lib/bor/data" -keystore = "path/to/keystore" -syncmode = "full" -gcmode = "full" -snapshot = true -ethstats = "" - -["eth.requiredblocks"] - -[p2p] -maxpeers = 50 -maxpendpeers = 50 -bind = "0.0.0.0" -port = 30303 -nodiscover = false -nat = "any" - -[p2p.discovery] -v5disc = false -bootnodes = ["enode://d860a01f9722d78051619d1e2351aba3f43f943f6f00718d1b9baa4101932a1f5011f16bb2b1bb35db20d6fe28fa0bf09636d26a87d31de9ec6203eeedb1f666@18.138.108.67:30303", "enode://22a8232c3abc76a16ae9d6c3b164f98775fe226f0917b0ca871128a74a8e9630b458460865bab457221f1d448dd9791d24c4e5d88786180ac185df813a68d4de@3.209.45.79:30303"] -bootnodesv4 = [] -bootnodesv5 = ["enr:-KG4QOtcP9X1FbIMOe17QNMKqDxCpm14jcX5tiOE4_TyMrFqbmhPZHK_ZPG2Gxb1GE2xdtodOfx9-cgvNtxnRyHEmC0ghGV0aDKQ9aX9QgAAAAD__________4JpZIJ2NIJpcIQDE8KdiXNlY3AyNTZrMaEDhpehBDbZjM_L9ek699Y7vhUJ-eAdMyQW_Fil522Y0fODdGNwgiMog3VkcIIjKA", "enr:-KG4QDyytgmE4f7AnvW-ZaUOIi9i79qX4JwjRAiXBZCU65wOfBu-3Nb5I7b_Rmg3KCOcZM_C3y5pg7EBU5XGrcLTduQEhGV0aDKQ9aX9QgAAAAD__________4JpZIJ2NIJpcIQ2_DUbiXNlY3AyNTZrMaEDKnz_-ps3UUOfHWVYaskI5kWYO_vtYMGYCQRAR3gHDouDdGNwgiMog3VkcIIjKA"] -static-nodes = ["enode://8499da03c47d637b20eee24eec3c356c9a2e6148d6fe25ca195c7949ab8ec2c03e3556126b0d7ed644675e78c4318b08691b7b57de10e5f0d40d05b09238fa0a@52.187.207.27:30303"] -trusted-nodes = ["enode://2b252ab6a1d0f971d9722cb839a42cb81db019ba44c08754628ab4a823487071b5695317c8ccd085219c3a03af063495b2f1da8d18218da2d6a82981b45e6ffc@65.108.70.101:30303"] -dns = [] - -[heimdall] -url = "http://localhost:1317" -"bor.without" = false - -[txpool] -locals = ["$ADDRESS1", "$ADDRESS2"] -nolocals = false -journal = "" -rejournal = "1h0m0s" -pricelimit = 30000000000 -pricebump = 10 -accountslots = 16 -globalslots = 32768 -accountqueue = 16 -globalqueue = 32768 -lifetime = "3h0m0s" - -[miner] -mine = false -etherbase = "" -extradata = "" -gaslimit = 20000000 -gasprice = "30000000000" - -[jsonrpc] -ipcdisable = false -ipcpath = "/var/lib/bor/bor.ipc" -gascap = 50000000 -txfeecap = 5e+00 - -[jsonrpc.http] -enabled = false -port = 8545 -prefix = "" -host = "localhost" -api = ["eth", "net", "web3", "txpool", "bor"] -vhosts = ["*"] -corsdomain = ["*"] - -[jsonrpc.ws] -enabled = false -port = 8546 -prefix = "" -host = "localhost" -api = ["web3", "net"] -vhosts = ["*"] -corsdomain = ["*"] - -[jsonrpc.graphql] -enabled = false -port = 0 -prefix = "" -host = "" -api = [] -vhosts = ["*"] -corsdomain = ["*"] - -[gpo] -blocks = 20 -percentile = 60 -maxprice = "5000000000000" -ignoreprice = "2" - -[telemetry] -metrics = false -expensive = false -prometheus-addr = "" -opencollector-endpoint = "" - -[telemetry.influx] -influxdb = false -endpoint = "" -database = "" -username = "" -password = "" -influxdbv2 = false -token = "" -bucket = "" -organization = "" - -[cache] -cache = 1024 -gc = 25 -snapshot = 10 -database = 50 -trie = 15 -journal = "triecache" -rejournal = "1h0m0s" -noprefetch = false -preimages = false -txlookuplimit = 2350000 - -[accounts] -unlock = ["$ADDRESS1", "$ADDRESS2"] -password = "path/to/password.txt" -allow-insecure-unlock = false -lightkdf = false -disable-bor-wallet = false - -[grpc] -addr = ":3131" - -[developer] -dev = false -period = 0 -``` diff --git a/internal/cli/server/flags.go b/internal/cli/server/flags.go index 79aec3157a..550d76942b 100644 --- a/internal/cli/server/flags.go +++ b/internal/cli/server/flags.go @@ -11,7 +11,7 @@ func (c *Command) Flags() *flagset.Flagset { f.StringFlag(&flagset.StringFlag{ Name: "chain", - Usage: "Name of the chain to sync", + Usage: "Name of the chain to sync ('mumbai', 'mainnet') or path to a genesis file", Value: &c.cliConfig.Chain, Default: c.cliConfig.Chain, }) @@ -35,7 +35,7 @@ func (c *Command) Flags() *flagset.Flagset { }) f.StringFlag(&flagset.StringFlag{ Name: "keystore", - Usage: "Path of the directory to store keystores", + Usage: "Path of the directory where keystores are located", Value: &c.cliConfig.KeyStoreDir, }) f.StringFlag(&flagset.StringFlag{ @@ -176,7 +176,7 @@ func (c *Command) Flags() *flagset.Flagset { }) f.StringFlag(&flagset.StringFlag{ Name: "miner.etherbase", - Usage: "Public address for block mining rewards (default = first account)", + Usage: "Public address for block mining rewards", Value: &c.cliConfig.Sealer.Etherbase, Default: c.cliConfig.Sealer.Etherbase, Group: "Sealer", @@ -190,7 +190,7 @@ func (c *Command) Flags() *flagset.Flagset { }) f.Uint64Flag(&flagset.Uint64Flag{ Name: "miner.gaslimit", - Usage: "Target gas ceiling for mined blocks", + Usage: "Target gas ceiling (gas limit) for mined blocks", Value: &c.cliConfig.Sealer.GasCeil, Default: c.cliConfig.Sealer.GasCeil, Group: "Sealer", @@ -485,7 +485,7 @@ func (c *Command) Flags() *flagset.Flagset { }) f.Uint64Flag(&flagset.Uint64Flag{ Name: "maxpendpeers", - Usage: "Maximum number of pending connection attempts (defaults used if set to 0)", + Usage: "Maximum number of pending connection attempts", Value: &c.cliConfig.P2P.MaxPendPeers, Default: c.cliConfig.P2P.MaxPendPeers, Group: "P2P", From dab31611b8bf94005d5ba5ca900df2a8137c2b9d Mon Sep 17 00:00:00 2001 From: Daniel Jones Date: Thu, 1 Dec 2022 15:50:00 -0600 Subject: [PATCH 18/56] Adding of 0.3.0 package changes, control file updates, postinst changes, and packager update --- .github/workflows/packager.yml | 839 +++++++++++++----- packaging/templates/package_scripts/control | 2 +- .../templates/package_scripts/control.arm64 | 2 +- .../package_scripts/control.profile.amd64 | 2 +- .../package_scripts/control.profile.arm64 | 2 +- .../package_scripts/control.validator | 2 +- .../package_scripts/control.validator.arm64 | 2 +- packaging/templates/package_scripts/postinst | 9 +- .../package_scripts/postinst.profile | 9 +- 9 files changed, 649 insertions(+), 220 deletions(-) diff --git a/.github/workflows/packager.yml b/.github/workflows/packager.yml index 998008ede8..5c59b1751a 100644 --- a/.github/workflows/packager.yml +++ b/.github/workflows/packager.yml @@ -2,6 +2,10 @@ name: packager on: push: + branches: + - 'main' + paths: + - '**' tags: - 'v*.*.*' - 'v*.*.*-*' @@ -37,10 +41,15 @@ jobs: - name: removing systemd file for binary run: rm -rf lib/systemd/system/bor.service - - name: Creating package for binary only bor - run: cp -rp packaging/deb/bor packaging/deb/bor-v0.3.0-beta-amd64 + - name: Creating package for binary for bor ${{ env.ARCH }} + run: cp -rp packaging/deb/bor packaging/deb/bor-${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: amd64 + - name: Running package build - run: dpkg-deb --build --root-owner-group packaging/deb/bor-v0.3.0-beta-amd64 + run: dpkg-deb --build --root-owner-group packaging/deb/bor-${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: amd64 - name: Removing the bor binary run: rm -rf packaging/deb/bor/usr/bin/bor @@ -50,113 +59,317 @@ jobs: - name: Copying systemd file run: cp -rp packaging/templates/systemd/bor.service packaging/deb/bor/lib/systemd/system/bor.service - - name: Prepping Mumbai Sentry Node Profile for amd64 - run: cp -rp packaging/deb/bor packaging/deb/bor-mumbai-sentry-config_v0.3.0-beta-amd64 - - name: Putting toml - run: cp -rp packaging/templates/testnet-v4/sentry/sentry/bor/config.toml packaging/deb/bor-mumbai-sentry-config_v0.3.0-beta-amd64/var/lib/bor/ - - name: Copying the preinst - run: cp -rp packaging/templates/package_scripts/preinst packaging/deb/bor-mumbai-sentry-config_v0.3.0-beta-amd64/DEBIAN/preinst - - name: Copying the postinst - run: cp -rp packaging/templates/package_scripts/postinst.profile packaging/deb/bor-mumbai-sentry-config_v0.3.0-beta-amd64/DEBIAN/postinst - - name: Copying the prerm - run: cp -rp packaging/templates/package_scripts/prerm packaging/deb/bor-mumbai-sentry-config_v0.3.0-beta-amd64/DEBIAN/prerm - - name: Copying the postrm - run: cp -rp packaging/templates/package_scripts/postrm packaging/deb/bor-mumbai-sentry-config_v0.3.0-beta-amd64/DEBIAN/postrm - - name: Copying systemd file - run: cp -rp packaging/templates/systemd/bor.service packaging/deb/bor-mumbai-sentry-config_v0.3.0-beta-amd64/lib/systemd/system/ - - name: Copying profile control file - run: cp -rp packaging/templates/package_scripts/control.profile.amd64 packaging/deb/bor-mumbai-sentry-config_v0.3.0-beta-amd64/DEBIAN/control - - name: Running package build for sentry on amd64 - run: dpkg-deb --build --root-owner-group packaging/deb/bor-mumbai-sentry-config_v0.3.0-beta-amd64 + - name: Prepping ${{ env.NETWORK }} ${{ env.NODE }} node for ${{ env.ARCH }} + run: cp -rp packaging/deb/bor packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: amd64 + NODE: sentry + NETWORK: mumbai + - name: Putting toml for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/testnet-v4/sentry/sentry/bor/config.toml packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/var/lib/bor/ + env: + ARCH: amd64 + NODE: sentry + NETWORK: mumbai + - name: Copying the preinst for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/preinst packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/preinst + env: + ARCH: amd64 + NODE: sentry + NETWORK: mumbai + - name: Copying the postinst for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/postinst.profile packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postinst + env: + ARCH: amd64 + NODE: sentry + NETWORK: mumbai + - name: Copying the prerm for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/prerm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/prerm + env: + ARCH: amd64 + NODE: sentry + NETWORK: mumbai + - name: Copying the postrm for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/postrm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postrm + env: + ARCH: amd64 + NODE: sentry + NETWORK: mumbai + - name: Copying systemd file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/systemd/bor.service packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/lib/systemd/system/ + env: + ARCH: amd64 + NODE: sentry + NETWORK: mumbai + - name: Copying profile control file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/control.profile.amd64 packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/control + env: + ARCH: amd64 + NODE: sentry + NETWORK: mumbai + - name: Running package build for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: dpkg-deb --build --root-owner-group packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: amd64 + NODE: sentry + NETWORK: mumbai - - name: Setting up Mainnet Sentry Config for amd64 - run: cp -rp packaging/deb/bor packaging/deb/bor-mainnet-sentry-config_v0.3.0-beta-amd64 - - name: Copying control file - run: cp -rp packaging/templates/package_scripts/control packaging/deb/bor-mainnet-sentry-config_v0.3.0-beta-amd64/DEBIAN/control - - name: Putting toml - run: cp -rp packaging/templates/mainnet-v1/sentry/sentry/bor/config.toml packaging/deb/bor-mainnet-sentry-config_v0.3.0-beta-amd64/var/lib/bor/ - - name: Copying the preinst - run: cp -rp packaging/templates/package_scripts/preinst packaging/deb/bor-mainnet-sentry-config_v0.3.0-beta-amd64/DEBIAN/preinst - - name: Copying the postinst - run: cp -rp packaging/templates/package_scripts/postinst.profile packaging/deb/bor-mainnet-sentry-config_v0.3.0-beta-amd64/DEBIAN/postinst - - name: Copying the prerm - run: cp -rp packaging/templates/package_scripts/prerm packaging/deb/bor-mainnet-sentry-config_v0.3.0-beta-amd64/DEBIAN/prerm - - name: Copying the postrm - run: cp -rp packaging/templates/package_scripts/postrm packaging/deb/bor-mainnet-sentry-config_v0.3.0-beta-amd64/DEBIAN/postrm - - name: Copying profile control file - run: cp -rp packaging/templates/package_scripts/control.profile.amd64 packaging/deb/bor-mainnet-sentry-config_v0.3.0-beta-amd64/DEBIAN/control - - name: Building Sentry for amd64 - run: dpkg-deb --build --root-owner-group packaging/deb/bor-mainnet-sentry-config_v0.3.0-beta-amd64 + - name: Setting up ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/deb/bor packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: amd64 + NODE: sentry + NETWORK: mainnet + - name: Copying control file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/control packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/control + env: + ARCH: amd64 + NODE: sentry + NETWORK: mainnet + - name: Putting toml for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/mainnet-v1/sentry/sentry/bor/config.toml packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/var/lib/bor/ + env: + ARCH: amd64 + NODE: sentry + NETWORK: mainnet + - name: Copying the preinst for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/preinst packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/preinst + env: + ARCH: amd64 + NODE: sentry + NETWORK: mainnet + - name: Copying the postinst for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/postinst.profile packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postinst + env: + ARCH: amd64 + NODE: sentry + NETWORK: mainnet + - name: Copying the prerm for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/prerm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/prerm + env: + ARCH: amd64 + NODE: sentry + NETWORK: mainnet + - name: Copying the postrm for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/postrm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postrm + env: + ARCH: amd64 + NODE: sentry + NETWORK: mainnet + - name: Copying profile control file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/control.profile.amd64 packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/control + env: + ARCH: amd64 + NODE: sentry + NETWORK: mainnet + - name: Building ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} profile + run: dpkg-deb --build --root-owner-group packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: amd64 + NODE: sentry + NETWORK: mainnet - - name: Prepping Bor Validator Profile for amd64 - run: cp -rp packaging/deb/bor packaging/deb/bor-mumbai-validator-config_v0.3.0-beta-amd64 - - name: Copying control file for validator on amd64 - run: cp -rp packaging/templates/package_scripts/control.validator packaging/deb/bor-mumbai-validator-config_v0.3.0-beta-amd64/DEBIAN/control - - name: Copying Postinstall script - run: cp -rp packaging/templates/package_scripts/postinst.profile packaging/deb/bor-mumbai-validator-config_v0.3.0-beta-amd64/DEBIAN/postinst - - name: Copying Prerm script - run: cp -rp packaging/templates/package_scripts/prerm packaging/deb/bor-mumbai-validator-config_v0.3.0-beta-amd64/DEBIAN/prerm - - name: Copying Postrm script - run: cp -rp packaging/templates/package_scripts/postrm packaging/deb/bor-mumbai-validator-config_v0.3.0-beta-amd64/DEBIAN/postrm - - name: Copying config.toml for validator for bor - run: cp -rp packaging/templates/testnet-v4/sentry/validator/bor/config.toml packaging/deb/bor-mumbai-validator-config_v0.3.0-beta-amd64/var/lib/bor/ - - name: Copying profile control file - run: cp -rp packaging/templates/package_scripts/control.profile.amd64 packaging/deb/bor-mumbai-validator-config_v0.3.0-beta-amd64/DEBIAN/control - - name: Building bor validator on amd64 - run: dpkg-deb --build --root-owner-group packaging/deb/bor-mumbai-validator-config_v0.3.0-beta-amd64 + - name: Prepping Bor ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/deb/bor packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: amd64 + NODE: validator + NETWORK: mumbai + - name: Copying control file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/control.validator packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/control + env: + ARCH: amd64 + NODE: validator + NETWORK: mumbai + - name: Copying Postinstall script for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/postinst.profile packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postinst + env: + ARCH: amd64 + NODE: validator + NETWORK: mumbai + - name: Copying Prerm script for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/prerm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/prerm + env: + ARCH: amd64 + NODE: validator + NETWORK: mumbai + - name: Copying Postrm script for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/postrm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postrm + env: + ARCH: amd64 + NODE: validator + NETWORK: mumbai + - name: Copying config.toml for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/testnet-v4/sentry/validator/bor/config.toml packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/var/lib/bor/ + env: + ARCH: amd64 + NODE: validator + NETWORK: mumbai + - name: Copying profile control file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/control.profile.amd64 packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/control + env: + ARCH: amd64 + NODE: validator + NETWORK: mumbai + - name: Building bor ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} profile + run: dpkg-deb --build --root-owner-group packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: amd64 + NODE: validator + NETWORK: mumbai - - name: Prepping Bor Mainnet Validator for amd64 - run: cp -rp packaging/deb/bor packaging/deb/bor-mainnet-validator-config_v0.3.0-beta-amd64 - - name: Copying control file for validator on arm64 - run: cp -rp packaging/templates/package_scripts/control.validator packaging/deb/bor-mainnet-validator-config_v0.3.0-beta-amd64/DEBIAN/control - - name: Copying the preinst - run: cp -rp packaging/templates/package_scripts/preinst packaging/deb/bor-mainnet-validator-config_v0.3.0-beta-amd64/DEBIAN/preinst - - name: Copying Postinstall script - run: cp -rp packaging/templates/package_scripts/postinst.profile packaging/deb/bor-mainnet-validator-config_v0.3.0-beta-amd64/DEBIAN/postinst - - name: Copying the prerm - run: cp -rp packaging/templates/package_scripts/prerm packaging/deb/bor-mainnet-validator-config_v0.3.0-beta-amd64/DEBIAN/prerm - - name: Copying the postrm - run: cp -rp packaging/templates/package_scripts/postrm packaging/deb/bor-mainnet-validator-config_v0.3.0-beta-amd64/DEBIAN/postrm - - name: Copying config.toml for validator for bor - run: cp -rp packaging/templates/mainnet-v1/sentry/validator/bor/config.toml packaging/deb/bor-mainnet-validator-config_v0.3.0-beta-amd64/var/lib/bor/ - - name: Copying profile control file - run: cp -rp packaging/templates/package_scripts/control.profile.amd64 packaging/deb/bor-mainnet-validator-config_v0.3.0-beta-amd64/DEBIAN/control - - name: Building bor validator on amd64 - run: dpkg-deb --build --root-owner-group packaging/deb/bor-mainnet-validator-config_v0.3.0-beta-amd64 + - name: Prepping Bor ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/deb/bor packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: amd64 + NODE: validator + NETWORK: mainnet + - name: Copying control file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/control.validator packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/control + env: + ARCH: amd64 + NODE: validator + NETWORK: mainnet + - name: Copying the preinst for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/preinst packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/preinst + env: + ARCH: amd64 + NODE: validator + NETWORK: mainnet + - name: Copying Postinstall script for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/postinst.profile packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postinst + env: + ARCH: amd64 + NODE: validator + NETWORK: mainnet + - name: Copying the prerm for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/prerm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/prerm + env: + ARCH: amd64 + NODE: validator + NETWORK: mainnet + - name: Copying the postrm for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/postrm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postrm + env: + ARCH: amd64 + NODE: validator + NETWORK: mainnet + - name: Copying config.toml for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/mainnet-v1/sentry/validator/bor/config.toml packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/var/lib/bor/ + env: + ARCH: amd64 + NODE: validator + NETWORK: mainnet + - name: Copying profile control file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/control.profile.amd64 packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/control + env: + ARCH: amd64 + NODE: validator + NETWORK: mainnet + - name: Building bor ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} profile + run: dpkg-deb --build --root-owner-group packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: amd64 + NODE: validator + NETWORK: mainnet - - name: Creating mumbai archive node profile for amd64 - run: cp -rp packaging/deb/bor packaging/deb/bor-mumbai-archive-config_v0.3.0-beta-amd64 - - name: Copying profile control file - run: cp -rp packaging/templates/package_scripts/control.profile.amd64 packaging/deb/bor-mumbai-archive-config_v0.3.0-beta-amd64/DEBIAN/control - - name: Copying profile preinst file - run: cp -rp packaging/templates/package_scripts/preinst packaging/deb/bor-mumbai-archive-config_v0.3.0-beta-amd64/DEBIAN/preinst - - name: Copying the profile postinst - run: cp -rp packaging/templates/package_scripts/postinst.profile packaging/deb/bor-mumbai-archive-config_v0.3.0-beta-amd64/DEBIAN/postinst - - name: Copying profile prerm file - run: cp -rp packaging/templates/package_scripts/prerm packaging/deb/bor-mumbai-archive-config_v0.3.0-beta-amd64/DEBIAN/prerm - - name: Copying profile postrm file - run: cp -rp packaging/templates/package_scripts/postrm packaging/deb/bor-mumbai-archive-config_v0.3.0-beta-amd64/DEBIAN/postrm - - name: Copying the toml - run: cp -rp packaging/templates/testnet-v4/archive/config.toml packaging/deb/bor-mumbai-archive-config_v0.3.0-beta-amd64/var/lib/bor/ - - name: Building profile package - run: dpkg-deb --build --root-owner-group packaging/deb/bor-mumbai-archive-config_v0.3.0-beta-amd64 + - name: Creating bor ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/deb/bor packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: amd64 + NODE: archive + NETWORK: mumbai + - name: Copying profile control file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/control.profile.amd64 packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/control + env: + ARCH: amd64 + NODE: archive + NETWORK: mumbai + - name: Copying profile preinst file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/preinst packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/preinst + env: + ARCH: amd64 + NODE: archive + NETWORK: mumbai + - name: Copying the profile postinst for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/postinst.profile packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postinst + env: + ARCH: amd64 + NODE: archive + NETWORK: mumbai + - name: Copying profile prerm file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/prerm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/prerm + env: + ARCH: amd64 + NODE: archive + NETWORK: mumbai + - name: Copying profile postrm file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/postrm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postrm + env: + ARCH: amd64 + NODE: archive + NETWORK: mumbai + - name: Copying the toml for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/testnet-v4/archive/config.toml packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/var/lib/bor/ + env: + ARCH: amd64 + NODE: archive + NETWORK: mumbai + - name: Building bor ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} profile + run: dpkg-deb --build --root-owner-group packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: amd64 + NODE: archive + NETWORK: mumbai - - name: Creating mainnet archive node profile for amd64 - run: cp -rp packaging/deb/bor packaging/deb/bor-mainnet-archive-config_v0.3.0-beta-amd64 - - name: Copying profile control file - run: cp -rp packaging/templates/package_scripts/control.profile.amd64 packaging/deb/bor-mumbai-archive-config_v0.3.0-beta-amd64/DEBIAN/control - - name: Copying profile preinst file - run: cp -rp packaging/templates/package_scripts/preinst packaging/deb/bor-mainnet-archive-config_v0.3.0-beta-amd64/DEBIAN/preinst - - name: Copying the profile postinst - run: cp -rp packaging/templates/package_scripts/postinst.profile packaging/deb/bor-mainnet-archive-config_v0.3.0-beta-amd64/DEBIAN/postinst - - name: Copying profile prerm file - run: cp -rp packaging/templates/package_scripts/prerm packaging/deb/bor-mainnet-archive-config_v0.3.0-beta-amd64/DEBIAN/prerm - - name: Copying profile postrm file - run: cp -rp packaging/templates/package_scripts/postrm packaging/deb/bor-mainnet-archive-config_v0.3.0-beta-amd64/DEBIAN/postrm - - name: Copying the toml - run: cp -rp packaging/templates/mainnet-v1/archive/config.toml packaging/deb/bor-mainnet-archive-config_v0.3.0-beta-amd64/var/lib/bor/ - - name: Building porfile package - run: dpkg-deb --build --root-owner-group packaging/deb/bor-mainnet-archive-config_v0.3.0-beta-amd64 + - name: Creating bor ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/deb/bor packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: amd64 + NODE: archive + NETWORK: mainnet + - name: Copying control file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/control.profile.amd64 packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/control + env: + ARCH: amd64 + NODE: archive + NETWORK: mainnet + - name: Copying profile preinst file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/preinst packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/preinst + env: + ARCH: amd64 + NODE: archive + NETWORK: mainnet + - name: Copying the profile postinst for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/postinst.profile packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postinst + env: + ARCH: amd64 + NODE: archive + NETWORK: mainnet + - name: Copying profile prerm file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/prerm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/prerm + env: + ARCH: amd64 + NODE: archive + NETWORK: mainnet + - name: Copying profile postrm file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/postrm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postrm + env: + ARCH: amd64 + NODE: archive + NETWORK: mainnet + - name: Copying the toml for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/mainnet-v1/archive/config.toml packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/var/lib/bor/ + env: + ARCH: amd64 + NODE: archive + NETWORK: mainnet + - name: Building bor ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} profile + run: dpkg-deb --build --root-owner-group packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: amd64 + NODE: archive + NETWORK: mainnet - name: Cleaning build directory for arm64 build run: make clean @@ -177,11 +390,17 @@ jobs: run: cp -rp build/bin/bor packaging/deb/bor/usr/bin/ - name: Creating package for binary only bor - run: cp -rp packaging/deb/bor packaging/deb/bor-v0.3.0-beta-arm64 + run: cp -rp packaging/deb/bor packaging/deb/bor-${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: arm64 - name: Copying control file - run: cp -rp packaging/templates/package_scripts/control.arm64 packaging/deb/bor-v0.3.0-beta-arm64/DEBIAN/control + run: cp -rp packaging/templates/package_scripts/control.arm64 packaging/deb/bor-${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/control + env: + ARCH: arm64 - name: Running package build - run: dpkg-deb --build --root-owner-group packaging/deb/bor-v0.3.0-beta-arm64 + run: dpkg-deb --build --root-owner-group packaging/deb/bor-${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: arm64 - name: Removing the bor binary run: rm -rf packaging/deb/bor/usr/bin/bor @@ -192,123 +411,323 @@ jobs: - name: Updating the control file to use with the arm64 profile run: cp -rp packaging/templates/package_scripts/control.profile.arm64 packaging/deb/bor/DEBIAN/control - - name: Setting up Mumbai Sentry Config for arm64 - run: cp -rp packaging/deb/bor packaging/deb/bor-mumbai-sentry-config_v0.3.0-beta-arm64 - - name: Copying control file - run: cp -rp packaging/templates/package_scripts/control.arm64 packaging/deb/bor-mumbai-sentry-config_v0.3.0-beta-arm64/DEBIAN/control - - name: Putting toml - run: cp -rp packaging/templates/testnet-v4/sentry/sentry/bor/config.toml packaging/deb/bor-mumbai-sentry-config_v0.3.0-beta-arm64/var/lib/bor/ - - name: Copying the preinst - run: cp -rp packaging/templates/package_scripts/preinst packaging/deb/bor-mumbai-sentry-config_v0.3.0-beta-arm64/DEBIAN/preinst - - name: Copying the prerm - run: cp -rp packaging/templates/package_scripts/prerm packaging/deb/bor-mumbai-sentry-config_v0.3.0-beta-arm64/DEBIAN/prerm - - name: Copying the postrm - run: cp -rp packaging/templates/package_scripts/postrm packaging/deb/bor-mumbai-sentry-config_v0.3.0-beta-arm64/DEBIAN/postrm - - name: Copying the postinst - run: cp -rp packaging/templates/package_scripts/postinst.profile packaging/deb/bor-mumbai-sentry-config_v0.3.0-beta-arm64/DEBIAN/postinst - - name: Copying profile control file - run: cp -rp packaging/templates/package_scripts/control.profile.arm64 packaging/deb/bor-mumbai-sentry-config_v0.3.0-beta-arm64/DEBIAN/control - - name: Building Sentry for arm64 - run: dpkg-deb --build --root-owner-group packaging/deb/bor-mumbai-sentry-config_v0.3.0-beta-arm64 + - name: Setting up bor for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/deb/bor packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: arm64 + NODE: sentry + NETWORK: mumbai + - name: Copying control file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/control.arm64 packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/control + env: + ARCH: arm64 + NODE: sentry + NETWORK: mumbai + - name: Putting toml for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/testnet-v4/sentry/sentry/bor/config.toml packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/var/lib/bor/ + env: + ARCH: arm64 + NODE: sentry + NETWORK: mumbai + - name: Copying the preinst for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/preinst packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/preinst + env: + ARCH: arm64 + NODE: sentry + NETWORK: mumbai + - name: Copying the prerm for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/prerm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/prerm + env: + ARCH: arm64 + NODE: sentry + NETWORK: mumbai + - name: Copying the postrm for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/postrm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postrm + env: + ARCH: arm64 + NODE: sentry + NETWORK: mumbai + - name: Copying the postinst for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/postinst.profile packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postinst + env: + ARCH: arm64 + NODE: sentry + NETWORK: mumbai + - name: Copying profile control file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/control.profile.arm64 packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/control + env: + ARCH: arm64 + NODE: sentry + NETWORK: mumbai + - name: Building bor ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} profile + run: dpkg-deb --build --root-owner-group packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: arm64 + NODE: sentry + NETWORK: mumbai - - name: Setting up Mainnet Sentry Config for arm64 - run: cp -rp packaging/deb/bor packaging/deb/bor-mainnet-sentry-config_v0.3.0-beta-arm64 - - name: Copying control file - run: cp -rp packaging/templates/package_scripts/control.arm64 packaging/deb/bor-mainnet-sentry-config_v0.3.0-beta-arm64/DEBIAN/control - - name: Putting toml - run: cp -rp packaging/templates/mainnet-v1/sentry/sentry/bor/config.toml packaging/deb/bor-mainnet-sentry-config_v0.3.0-beta-arm64/var/lib/bor/ - - name: Copying the preinst - run: cp -rp packaging/templates/package_scripts/preinst packaging/deb/bor-mainnet-sentry-config_v0.3.0-beta-arm64/DEBIAN/preinst - - name: Copying the prerm - run: cp -rp packaging/templates/package_scripts/prerm packaging/deb/bor-mainnet-sentry-config_v0.3.0-beta-arm64/DEBIAN/prerm - - name: Copying the postrm - run: cp -rp packaging/templates/package_scripts/postrm packaging/deb/bor-mainnet-sentry-config_v0.3.0-beta-arm64/DEBIAN/postrm - - name: Copying the postinst - run: cp -rp packaging/templates/package_scripts/postinst.profile packaging/deb/bor-mainnet-sentry-config_v0.3.0-beta-arm64/DEBIAN/postinst - - name: Copying profile control file - run: cp -rp packaging/templates/package_scripts/control.profile.arm64 packaging/deb/bor-mainnet-sentry-config_v0.3.0-beta-arm64/DEBIAN/control - - name: Building Sentry for arm64 - run: dpkg-deb --build --root-owner-group packaging/deb/bor-mainnet-sentry-config_v0.3.0-beta-arm64 + - name: Setting up bor for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/deb/bor packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: arm64 + NODE: sentry + NETWORK: mainnet + - name: Copying control file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/control.arm64 packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/control + env: + ARCH: arm64 + NODE: sentry + NETWORK: mainnet + - name: Putting toml for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/mainnet-v1/sentry/sentry/bor/config.toml packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/var/lib/bor/ + env: + ARCH: arm64 + NODE: sentry + NETWORK: mainnet + - name: Copying the preinst for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/preinst packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/preinst + env: + ARCH: arm64 + NODE: sentry + NETWORK: mainnet + - name: Copying the prerm for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/prerm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/prerm + env: + ARCH: arm64 + NODE: sentry + NETWORK: mainnet + - name: Copying the postrm for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/postrm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postrm + env: + ARCH: arm64 + NODE: sentry + NETWORK: mainnet + - name: Copying the postinst for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/postinst.profile packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postinst + env: + ARCH: arm64 + NODE: sentry + NETWORK: mainnet + - name: Copying profile control file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/control.profile.arm64 packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/control + env: + ARCH: arm64 + NODE: sentry + NETWORK: mainnet + - name: Building bor ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} profile + run: dpkg-deb --build --root-owner-group packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: arm64 + NODE: sentry + NETWORK: mainnet - - name: Prepping Bor Mumbai Validator for arm64 - run: cp -rp packaging/deb/bor packaging/deb/bor-mumbai-validator-config_v0.3.0-beta-arm64 - - name: Copying control file for validator on arm64 - run: cp -rp packaging/templates/package_scripts/control.validator.arm64 packaging/deb/bor-mumbai-validator-config_v0.3.0-beta-arm64/DEBIAN/control - - name: Copying the preinst - run: cp -rp packaging/templates/package_scripts/preinst packaging/deb/bor-mumbai-sentry-config_v0.3.0-beta-arm64/DEBIAN/preinst - - name: Copying Postinstall script - run: cp -rp packaging/templates/package_scripts/postinst.profile packaging/deb/bor-mumbai-validator-config_v0.3.0-beta-arm64/DEBIAN/postinst - - name: Copying the prerm - run: cp -rp packaging/templates/package_scripts/prerm packaging/deb/bor-mumbai-sentry-config_v0.3.0-beta-arm64/DEBIAN/prerm - - name: Copying the postrm - run: cp -rp packaging/templates/package_scripts/postrm packaging/deb/bor-mumbai-sentry-config_v0.3.0-beta-arm64/DEBIAN/postrm - - name: Copying config.toml for validator for bor - run: cp -rp packaging/templates/testnet-v4/sentry/validator/bor/config.toml packaging/deb/bor-mumbai-validator-config_v0.3.0-beta-arm64/var/lib/bor/ - - name: Copying profile control file - run: cp -rp packaging/templates/package_scripts/control.profile.arm64 packaging/deb/bor-mumbai-validator-config_v0.3.0-beta-arm64/DEBIAN/control - - name: Building bor validator on arm64 - run: dpkg-deb --build --root-owner-group packaging/deb/bor-mumbai-validator-config_v0.3.0-beta-arm64 + - name: Prepping Bor for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/deb/bor packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: arm64 + NODE: validator + NETWORK: mumbai + - name: Copying control file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/control.validator.arm64 packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/control + env: + ARCH: arm64 + NODE: validator + NETWORK: mumbai + - name: Copying the preinst for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/preinst packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/preinst + env: + ARCH: arm64 + NODE: validator + NETWORK: mumbai + - name: Copying Postinstall script for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/postinst.profile packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postinst + env: + ARCH: arm64 + NODE: validator + NETWORK: mumbai + - name: Copying the prerm for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/prerm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/prerm + env: + ARCH: arm64 + NODE: validator + NETWORK: mumbai + - name: Copying the postrm for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/postrm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postrm + env: + ARCH: arm64 + NODE: validator + NETWORK: mumbai + - name: Copying config.toml for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/testnet-v4/sentry/validator/bor/config.toml packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/var/lib/bor/ + env: + ARCH: arm64 + NODE: validator + NETWORK: mumbai + - name: Copying profile control file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/control.profile.arm64 packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/control + env: + ARCH: arm64 + NODE: validator + NETWORK: mumbai + - name: Building bor ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} profile + run: dpkg-deb --build --root-owner-group packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: arm64 + NODE: validator + NETWORK: mumbai - - name: Prepping Bor Mainnet Validator for arm64 - run: cp -rp packaging/deb/bor packaging/deb/bor-mainnet-validator-config_v0.3.0-beta-arm64 - - name: Copying control file for validator on arm64 - run: cp -rp packaging/templates/package_scripts/control.validator.arm64 packaging/deb/bor-mainnet-validator-config_v0.3.0-beta-arm64/DEBIAN/control - - name: Copying the preinst - run: cp -rp packaging/templates/package_scripts/preinst packaging/deb/bor-mainnet-validator-config_v0.3.0-beta-arm64/DEBIAN/preinst - - name: Copying Postinstall script - run: cp -rp packaging/templates/package_scripts/postinst.profile packaging/deb/bor-mainnet-validator-config_v0.3.0-beta-arm64/DEBIAN/postinst - - name: Copying the prerm - run: cp -rp packaging/templates/package_scripts/prerm packaging/deb/bor-mainnet-validator-config_v0.3.0-beta-arm64/DEBIAN/prerm - - name: Copying the postrm - run: cp -rp packaging/templates/package_scripts/postrm packaging/deb/bor-mainnet-validator-config_v0.3.0-beta-arm64/DEBIAN/postrm - - name: Copying config.toml for validator for bor - run: cp -rp packaging/templates/mainnet-v1/sentry/validator/bor/config.toml packaging/deb/bor-mainnet-validator-config_v0.3.0-beta-arm64/var/lib/bor/ - - name: Copying profile control file - run: cp -rp packaging/templates/package_scripts/control.profile.arm64 packaging/deb/bor-mainnet-validator-config_v0.3.0-beta-arm64/DEBIAN/control - - name: Building bor validator on arm64 - run: dpkg-deb --build --root-owner-group packaging/deb/bor-mainnet-validator-config_v0.3.0-beta-arm64 + - name: Prepping Bor ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/deb/bor packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: arm64 + NODE: validator + NETWORK: mainnet + - name: Copying control file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/control.validator.arm64 packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/control + env: + ARCH: arm64 + NODE: validator + NETWORK: mainnet + - name: Copying the preinst for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/preinst packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/preinst + env: + ARCH: arm64 + NODE: validator + NETWORK: mainnet + - name: Copying Postinstall script for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/postinst.profile packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postinst + env: + ARCH: arm64 + NODE: validator + NETWORK: mainnet + - name: Copying the prerm for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/prerm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/prerm + env: + ARCH: arm64 + NODE: validator + NETWORK: mainnet + - name: Copying the postrm for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/postrm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postrm + env: + ARCH: arm64 + NODE: validator + NETWORK: mainnet + - name: Copying config.toml for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/mainnet-v1/sentry/validator/bor/config.toml packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/var/lib/bor/ + env: + ARCH: arm64 + NODE: validator + NETWORK: mainnet + - name: Copying profile control file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/control.profile.arm64 packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/control + env: + ARCH: arm64 + NODE: validator + NETWORK: mainnet + - name: Building bor ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} profile + run: dpkg-deb --build --root-owner-group packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: arm64 + NODE: validator + NETWORK: mainnet - name: Updating the control file to use with the arm64 profile run: cp -rp packaging/templates/package_scripts/control.profile.arm64 packaging/deb/bor/DEBIAN/control - - name: Creating mumbai archive node profile for arm64 - run: cp -rp packaging/deb/bor packaging/deb/bor-mumbai-archive-config_v0.3.0-beta-arm64 - - name: Copying profile control file - run: cp -rp packaging/templates/package_scripts/control.profile.arm64 packaging/deb/bor-mumbai-archive-config_v0.3.0-beta-arm64/DEBIAN/control - - name: Copying over profile postinst - run: cp -rp packaging/templates/package_scripts/postinst.profile packaging/deb/bor-mumbai-archive-config_v0.3.0-beta-arm64/DEBIAN/postinst - - name: Copying prerm - run: cp -rp packaging/templates/package_scripts/prerm packaging/deb/bor-mumbai-archive-config_v0.3.0-beta-arm64/DEBIAN/prerm - - name: Copying postrm - run: cp -rp packaging/templates/package_scripts/postrm packaging/deb/bor-mumbai-archive-config_v0.3.0-beta-arm64/DEBIAN/postrm - - name: Copying the toml - run: cp -rp packaging/templates/testnet-v4/archive/config.toml packaging/deb/bor-mumbai-archive-config_v0.3.0-beta-arm64/var/lib/bor/ - - name: Building profile package - run: dpkg-deb --build --root-owner-group packaging/deb/bor-mumbai-archive-config_v0.3.0-beta-arm64 + - name: Creating bor ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/deb/bor packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: arm64 + NODE: archive + NETWORK: mumbai + - name: Copying profile control file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/control.profile.arm64 packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/control + env: + ARCH: arm64 + NODE: archive + NETWORK: mumbai + - name: Copying over profile postinst for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/postinst.profile packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postinst + env: + ARCH: arm64 + NODE: archive + NETWORK: mumbai + - name: Copying prerm for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/prerm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/prerm + env: + ARCH: arm64 + NODE: archive + NETWORK: mumbai + - name: Copying postrm for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/postrm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postrm + env: + ARCH: arm64 + NODE: archive + NETWORK: mumbai + - name: Copying the toml for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/testnet-v4/archive/config.toml packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/var/lib/bor/ + env: + ARCH: arm64 + NODE: archive + NETWORK: mumbai + - name: Building bor ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} profile + run: dpkg-deb --build --root-owner-group packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: arm64 + NODE: archive + NETWORK: mumbai - - name: Creating mainnet archive node profile for arm64 - run: cp -rp packaging/deb/bor packaging/deb/bor-mainnet-archive-config_v0.3.0-beta-arm64 - - name: Copying profile control file - run: cp -rp packaging/templates/package_scripts/control.profile.arm64 packaging/deb/bor-mainnet-archive-config_v0.3.0-beta-arm64/DEBIAN/control - - name: Copying over profile postinst - run: cp -rp packaging/templates/package_scripts/postinst.profile packaging/deb/bor-mainnet-archive-config_v0.3.0-beta-arm64/DEBIAN/postinst - - name: Copying prerm - run: cp -rp packaging/templates/package_scripts/prerm packaging/deb/bor-mainnet-archive-config_v0.3.0-beta-arm64/DEBIAN/prerm - - name: Copying postrm - run: cp -rp packaging/templates/package_scripts/postrm packaging/deb/bor-mainnet-archive-config_v0.3.0-beta-arm64/DEBIAN/postrm - - name: Copying the toml - run: cp -rp packaging/templates/mainnet-v1/archive/config.toml packaging/deb/bor-mainnet-archive-config_v0.3.0-beta-arm64/var/lib/bor/ - - name: Building porfile package - run: dpkg-deb --build --root-owner-group packaging/deb/bor-mainnet-archive-config_v0.3.0-beta-arm64 + - name: Creating bor ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/deb/bor packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: arm64 + NODE: archive + NETWORK: mainnet + - name: Copying profile control file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/control.profile.arm64 packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/control + env: + ARCH: arm64 + NODE: archive + NETWORK: mainnet + - name: Copying over profile postinst for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/postinst.profile packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postinst + env: + ARCH: arm64 + NODE: archive + NETWORK: mainnet + - name: Copying prerm for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/prerm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/prerm + env: + ARCH: arm64 + NODE: archive + NETWORK: mainnet + - name: Copying postrm for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/package_scripts/postrm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postrm + env: + ARCH: arm64 + NODE: archive + NETWORK: mainnet + - name: Copying the toml for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} + run: cp -rp packaging/templates/mainnet-v1/archive/config.toml packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/var/lib/bor/ + env: + ARCH: arm64 + NODE: archive + NETWORK: mainnet + - name: Building bor ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} profile + run: dpkg-deb --build --root-owner-group packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: arm64 + NODE: archive + NETWORK: mainnet - name: Confirming package built run: ls -ltr packaging/deb/ | grep bor - - name: Pre-Release bor Packages + - name: Release bor Packages uses: softprops/action-gh-release@v1 with: tag_name: ${{ env.GIT_TAG }} prerelease: true files: | packaging/deb/bor**.deb - binary/bo** + binary/bo** \ No newline at end of file diff --git a/packaging/templates/package_scripts/control b/packaging/templates/package_scripts/control index ed0ff46c06..faa6df2712 100644 --- a/packaging/templates/package_scripts/control +++ b/packaging/templates/package_scripts/control @@ -1,5 +1,5 @@ Source: bor -Version: 0.3.0 +Version: 0.3.1 Section: develop Priority: standard Maintainer: Polygon diff --git a/packaging/templates/package_scripts/control.arm64 b/packaging/templates/package_scripts/control.arm64 index 2c624a4c45..e0590d7c6a 100644 --- a/packaging/templates/package_scripts/control.arm64 +++ b/packaging/templates/package_scripts/control.arm64 @@ -1,5 +1,5 @@ Source: bor -Version: 0.3.0 +Version: 0.3.1 Section: develop Priority: standard Maintainer: Polygon diff --git a/packaging/templates/package_scripts/control.profile.amd64 b/packaging/templates/package_scripts/control.profile.amd64 index 087dabb1f6..0be61b7427 100644 --- a/packaging/templates/package_scripts/control.profile.amd64 +++ b/packaging/templates/package_scripts/control.profile.amd64 @@ -1,5 +1,5 @@ Source: bor-profile -Version: 0.3.0 +Version: 0.3.1 Section: develop Priority: standard Maintainer: Polygon diff --git a/packaging/templates/package_scripts/control.profile.arm64 b/packaging/templates/package_scripts/control.profile.arm64 index 9de0c50253..29d8328b8a 100644 --- a/packaging/templates/package_scripts/control.profile.arm64 +++ b/packaging/templates/package_scripts/control.profile.arm64 @@ -1,5 +1,5 @@ Source: bor-profile -Version: 0.3.0 +Version: 0.3.1 Section: develop Priority: standard Maintainer: Polygon diff --git a/packaging/templates/package_scripts/control.validator b/packaging/templates/package_scripts/control.validator index f3f5652a31..51b4d86ef5 100644 --- a/packaging/templates/package_scripts/control.validator +++ b/packaging/templates/package_scripts/control.validator @@ -1,5 +1,5 @@ Source: bor-profile -Version: 0.3.0 +Version: 0.3.1 Section: develop Priority: standard Maintainer: Polygon diff --git a/packaging/templates/package_scripts/control.validator.arm64 b/packaging/templates/package_scripts/control.validator.arm64 index 97712830ff..6813fba631 100644 --- a/packaging/templates/package_scripts/control.validator.arm64 +++ b/packaging/templates/package_scripts/control.validator.arm64 @@ -1,5 +1,5 @@ Source: bor-profile -Version: 0.3.0 +Version: 0.3.1 Section: develop Priority: standard Maintainer: Polygon diff --git a/packaging/templates/package_scripts/postinst b/packaging/templates/package_scripts/postinst index 761ee29f26..7272b4b1aa 100755 --- a/packaging/templates/package_scripts/postinst +++ b/packaging/templates/package_scripts/postinst @@ -2,6 +2,11 @@ # This is a postinstallation script so the service can be configured and started when requested # sudo adduser --disabled-password --disabled-login --shell /usr/sbin/nologin --quiet --system --no-create-home --home /nonexistent bor -sudo mkdir -p /var/lib/bor -sudo chown -R bor /var/lib/bor +if [ -d "/var/lib/bor" ] +then + echo "Directory /var/lib/bor exists." +else + mkdir -p /var/lib/bor + sudo chown -R bor /var/lib/bor +fi sudo systemctl daemon-reload diff --git a/packaging/templates/package_scripts/postinst.profile b/packaging/templates/package_scripts/postinst.profile index 80b8381203..e9a497906d 100755 --- a/packaging/templates/package_scripts/postinst.profile +++ b/packaging/templates/package_scripts/postinst.profile @@ -1,6 +1,11 @@ #!/bin/bash # This is a postinstallation script so the service can be configured and started when requested # -sudo mkdir -p /var/lib/bor -sudo chown -R bor /var/lib/bor +if [ -d "/var/lib/bor" ] +then + echo "Directory /var/lib/bor exists." +else + mkdir -p /var/lib/bor + sudo chown -R bor /var/lib/bor +fi sudo systemctl daemon-reload From e3303294b4d14f31df742a3ed277de13303bf210 Mon Sep 17 00:00:00 2001 From: Pratik Patil Date: Sun, 4 Dec 2022 10:02:55 +0530 Subject: [PATCH 19/56] added ancient datadir flag and toml field, need to decide on default value and update the conversion script --- internal/cli/server/config.go | 8 ++++++++ internal/cli/server/flags.go | 6 ++++++ 2 files changed, 14 insertions(+) diff --git a/internal/cli/server/config.go b/internal/cli/server/config.go index 2e0a8f21f5..6bbe3d781a 100644 --- a/internal/cli/server/config.go +++ b/internal/cli/server/config.go @@ -53,6 +53,9 @@ type Config struct { // DataDir is the directory to store the state in DataDir string `hcl:"datadir,optional" toml:"datadir,optional"` + // Ancient is the directory to store the state in + Ancient string `hcl:"ancient,optional" toml:"ancient,optional"` + // KeyStoreDir is the directory to store keystores KeyStoreDir string `hcl:"keystore,optional" toml:"keystore,optional"` @@ -398,6 +401,7 @@ func DefaultConfig() *Config { RequiredBlocks: map[string]string{}, LogLevel: "INFO", DataDir: DefaultDataDir(), + Ancient: "", P2P: &P2PConfig{ MaxPeers: 50, MaxPendPeers: 50, @@ -879,6 +883,10 @@ func (c *Config) buildEth(stack *node.Node, accountManager *accounts.Manager) (* n.BorLogs = c.BorLogs n.DatabaseHandles = dbHandles + if c.Ancient != "" { + n.DatabaseFreezer = c.Ancient + } + return &n, nil } diff --git a/internal/cli/server/flags.go b/internal/cli/server/flags.go index 550d76942b..a53e1c3e46 100644 --- a/internal/cli/server/flags.go +++ b/internal/cli/server/flags.go @@ -33,6 +33,12 @@ func (c *Command) Flags() *flagset.Flagset { Value: &c.cliConfig.DataDir, Default: c.cliConfig.DataDir, }) + f.StringFlag(&flagset.StringFlag{ + Name: "datadir.ancient", + Usage: "Data directory for ancient chain segments (default = inside chaindata)", + Value: &c.cliConfig.Ancient, + Default: c.cliConfig.Ancient, + }) f.StringFlag(&flagset.StringFlag{ Name: "keystore", Usage: "Path of the directory where keystores are located", From 531d67a53757fb5ab6776861eaa4f1e933cf098a Mon Sep 17 00:00:00 2001 From: Pratik Patil Date: Mon, 5 Dec 2022 07:52:26 +0530 Subject: [PATCH 20/56] updated toml files with ancient field --- builder/files/config.toml | 1 + docs/cli/example_config.toml | 1 + packaging/templates/mainnet-v1/archive/config.toml | 1 + packaging/templates/mainnet-v1/sentry/sentry/bor/config.toml | 1 + packaging/templates/mainnet-v1/sentry/validator/bor/config.toml | 1 + packaging/templates/mainnet-v1/without-sentry/bor/config.toml | 1 + packaging/templates/testnet-v4/archive/config.toml | 1 + packaging/templates/testnet-v4/sentry/sentry/bor/config.toml | 1 + packaging/templates/testnet-v4/sentry/validator/bor/config.toml | 1 + packaging/templates/testnet-v4/without-sentry/bor/config.toml | 1 + 10 files changed, 10 insertions(+) diff --git a/builder/files/config.toml b/builder/files/config.toml index f79b892e7f..e2267469bb 100644 --- a/builder/files/config.toml +++ b/builder/files/config.toml @@ -6,6 +6,7 @@ chain = "mainnet" # identity = "Pratiks-MacBook-Pro.local" # log-level = "INFO" datadir = "/var/lib/bor/data" +# ancient = "" # keystore = "/var/lib/bor/keystore" syncmode = "full" # gcmode = "full" diff --git a/docs/cli/example_config.toml b/docs/cli/example_config.toml index bc48b752d5..cdf2a989d7 100644 --- a/docs/cli/example_config.toml +++ b/docs/cli/example_config.toml @@ -6,6 +6,7 @@ chain = "mainnet" # Name of the chain to sync ("mumbai", "mainnet") o identity = "Annon-Identity" # Name/Identity of the node (default = OS hostname) log-level = "INFO" # Set log level for the server datadir = "var/lib/bor" # Path of the data directory to store information +ancient = "" # Data directory for ancient chain segments (default = inside chaindata) keystore = "" # Path of the directory where keystores are located syncmode = "full" # Blockchain sync mode (only "full" sync supported) gcmode = "full" # Blockchain garbage collection mode ("full", "archive") diff --git a/packaging/templates/mainnet-v1/archive/config.toml b/packaging/templates/mainnet-v1/archive/config.toml index 8989a1b7b5..a0eda44c15 100644 --- a/packaging/templates/mainnet-v1/archive/config.toml +++ b/packaging/templates/mainnet-v1/archive/config.toml @@ -2,6 +2,7 @@ chain = "mainnet" # identity = "node_name" # log-level = "INFO" datadir = "/var/lib/bor/data" +# ancient = "" # keystore = "" syncmode = "full" gcmode = "archive" diff --git a/packaging/templates/mainnet-v1/sentry/sentry/bor/config.toml b/packaging/templates/mainnet-v1/sentry/sentry/bor/config.toml index d91e12b31e..b6d6b34f4d 100644 --- a/packaging/templates/mainnet-v1/sentry/sentry/bor/config.toml +++ b/packaging/templates/mainnet-v1/sentry/sentry/bor/config.toml @@ -2,6 +2,7 @@ chain = "mainnet" # identity = "node_name" # log-level = "INFO" datadir = "/var/lib/bor/data" +# ancient = "" # keystore = "" syncmode = "full" # gcmode = "full" diff --git a/packaging/templates/mainnet-v1/sentry/validator/bor/config.toml b/packaging/templates/mainnet-v1/sentry/validator/bor/config.toml index c9f583aae3..e0a9be320f 100644 --- a/packaging/templates/mainnet-v1/sentry/validator/bor/config.toml +++ b/packaging/templates/mainnet-v1/sentry/validator/bor/config.toml @@ -4,6 +4,7 @@ chain = "mainnet" # identity = "node_name" # log-level = "INFO" datadir = "/var/lib/bor/data" +# ancient = "" # keystore = "$BOR_DIR/keystore" syncmode = "full" # gcmode = "full" diff --git a/packaging/templates/mainnet-v1/without-sentry/bor/config.toml b/packaging/templates/mainnet-v1/without-sentry/bor/config.toml index 5c4a057b91..b316ce4e8f 100644 --- a/packaging/templates/mainnet-v1/without-sentry/bor/config.toml +++ b/packaging/templates/mainnet-v1/without-sentry/bor/config.toml @@ -4,6 +4,7 @@ chain = "mainnet" # identity = "node_name" # log-level = "INFO" datadir = "/var/lib/bor/data" +# ancient = "" # keystore = "$BOR_DIR/keystore" syncmode = "full" # gcmode = "full" diff --git a/packaging/templates/testnet-v4/archive/config.toml b/packaging/templates/testnet-v4/archive/config.toml index 5b7cbdd78e..ce0d0ff983 100644 --- a/packaging/templates/testnet-v4/archive/config.toml +++ b/packaging/templates/testnet-v4/archive/config.toml @@ -2,6 +2,7 @@ chain = "mumbai" # identity = "node_name" # log-level = "INFO" datadir = "/var/lib/bor/data" +# ancient = "" # keystore = "" syncmode = "full" gcmode = "archive" diff --git a/packaging/templates/testnet-v4/sentry/sentry/bor/config.toml b/packaging/templates/testnet-v4/sentry/sentry/bor/config.toml index f98e04ff42..3655478d45 100644 --- a/packaging/templates/testnet-v4/sentry/sentry/bor/config.toml +++ b/packaging/templates/testnet-v4/sentry/sentry/bor/config.toml @@ -2,6 +2,7 @@ chain = "mumbai" # identity = "node_name" # log-level = "INFO" datadir = "/var/lib/bor/data" +# ancient = "" # keystore = "" syncmode = "full" # gcmode = "full" diff --git a/packaging/templates/testnet-v4/sentry/validator/bor/config.toml b/packaging/templates/testnet-v4/sentry/validator/bor/config.toml index e2a404263d..9d30428205 100644 --- a/packaging/templates/testnet-v4/sentry/validator/bor/config.toml +++ b/packaging/templates/testnet-v4/sentry/validator/bor/config.toml @@ -4,6 +4,7 @@ chain = "mumbai" # identity = "node_name" # log-level = "INFO" datadir = "/var/lib/bor/data" +# ancient = "" # keystore = "$BOR_DIR/keystore" syncmode = "full" # gcmode = "full" diff --git a/packaging/templates/testnet-v4/without-sentry/bor/config.toml b/packaging/templates/testnet-v4/without-sentry/bor/config.toml index 567c9a5b0a..0e0aeae3a1 100644 --- a/packaging/templates/testnet-v4/without-sentry/bor/config.toml +++ b/packaging/templates/testnet-v4/without-sentry/bor/config.toml @@ -4,6 +4,7 @@ chain = "mumbai" # identity = "node_name" # log-level = "INFO" datadir = "/var/lib/bor/data" +# ancient = "" # keystore = "$BOR_DIR/keystore" syncmode = "full" # gcmode = "full" From 57075d000d1a6396c33d42e5da97be19bfcfb6f1 Mon Sep 17 00:00:00 2001 From: Pratik Patil Date: Mon, 5 Dec 2022 19:21:33 +0530 Subject: [PATCH 21/56] Add support for new flags in new config.toml, which were present in old config.toml (#612) * added HTTPTimeouts, and TrieTimeout flag in new tol, from old toml * added RAW fields for these time.Duration flags * updated the conversion script to support these extra 4 flags * removed hcl and json config tests as we are only supporting toml config files * updated toml files with cache.timeout field * updated toml files with jsonrpc.timeouts field * tests/bor: expect a call for latest checkpoint * tests/bor: expect a call for latest checkpoint * packaging/templates: update cache values for archive nodes Co-authored-by: Manav Darji --- builder/files/config.toml | 57 ++++++++++--------- docs/cli/example_config.toml | 5 ++ internal/cli/dumpconfig.go | 4 ++ internal/cli/server/config.go | 50 ++++++++++++++++ internal/cli/server/config_test.go | 33 ----------- internal/cli/server/testdata/test.hcl | 13 ----- internal/cli/server/testdata/test.json | 12 ---- .../templates/mainnet-v1/archive/config.toml | 11 +++- .../mainnet-v1/sentry/sentry/bor/config.toml | 5 ++ .../sentry/validator/bor/config.toml | 5 ++ .../mainnet-v1/without-sentry/bor/config.toml | 5 ++ .../templates/testnet-v4/archive/config.toml | 13 +++-- .../testnet-v4/sentry/sentry/bor/config.toml | 5 ++ .../sentry/validator/bor/config.toml | 5 ++ .../testnet-v4/without-sentry/bor/config.toml | 5 ++ scripts/getconfig.go | 30 +++++++++- scripts/getconfig.sh | 48 ++++++++++++++++ tests/bor/bor_test.go | 26 ++++++--- 18 files changed, 233 insertions(+), 99 deletions(-) delete mode 100644 internal/cli/server/testdata/test.hcl delete mode 100644 internal/cli/server/testdata/test.json diff --git a/builder/files/config.toml b/builder/files/config.toml index e2267469bb..fc95cd1a64 100644 --- a/builder/files/config.toml +++ b/builder/files/config.toml @@ -61,32 +61,36 @@ syncmode = "full" # [jsonrpc] - # ipcdisable = false - # ipcpath = "" - # gascap = 50000000 - # txfeecap = 5.0 - # [jsonrpc.http] - # enabled = false - # port = 8545 - # prefix = "" - # host = "localhost" - # api = ["eth", "net", "web3", "txpool", "bor"] - # vhosts = ["*"] - # corsdomain = ["*"] - # [jsonrpc.ws] - # enabled = false - # port = 8546 - # prefix = "" - # host = "localhost" - # api = ["web3", "net"] - # origins = ["*"] - # [jsonrpc.graphql] - # enabled = false - # port = 0 - # prefix = "" - # host = "" - # vhosts = ["*"] - # corsdomain = ["*"] +# ipcdisable = false +# ipcpath = "" +# gascap = 50000000 +# txfeecap = 5.0 +# [jsonrpc.http] +# enabled = false +# port = 8545 +# prefix = "" +# host = "localhost" +# api = ["eth", "net", "web3", "txpool", "bor"] +# vhosts = ["*"] +# corsdomain = ["*"] +# [jsonrpc.ws] +# enabled = false +# port = 8546 +# prefix = "" +# host = "localhost" +# api = ["web3", "net"] +# origins = ["*"] +# [jsonrpc.graphql] +# enabled = false +# port = 0 +# prefix = "" +# host = "" +# vhosts = ["*"] +# corsdomain = ["*"] +# [jsonrpc.timeouts] +# read = "30s" +# write = "30s" +# idle = "2m0s" [gpo] # blocks = 20 @@ -122,6 +126,7 @@ syncmode = "full" # noprefetch = false # preimages = false # txlookuplimit = 2350000 + # timeout = "1h0m0s" [accounts] # allow-insecure-unlock = true diff --git a/docs/cli/example_config.toml b/docs/cli/example_config.toml index cdf2a989d7..64ef60ae12 100644 --- a/docs/cli/example_config.toml +++ b/docs/cli/example_config.toml @@ -86,6 +86,10 @@ ethstats = "" # Reporting URL of a ethstats service (nodename:sec host = "" # vhosts = ["localhost"] # Comma separated list of virtual hostnames from which to accept requests (server enforced). Accepts '*' wildcard. corsdomain = ["localhost"] # Comma separated list of domains from which to accept cross origin requests (browser enforced) + [jsonrpc.timeouts] + read = "30s" + write = "30s" + idle = "2m0s" [gpo] blocks = 20 # Number of recent blocks to check for gas prices @@ -126,6 +130,7 @@ ethstats = "" # Reporting URL of a ethstats service (nodename:sec preimages = false # Enable recording the SHA3/keccak preimages of trie keys txlookuplimit = 2350000 # Number of recent blocks to maintain transactions index for (default = about 56 days, 0 = entire chain) triesinmemory = 128 # Number of block states (tries) to keep in memory + timeout = "1h0m0s" # Time after which the Merkle Patricia Trie is stored to disc from memory [accounts] unlock = [] # Comma separated list of accounts to unlock diff --git a/internal/cli/dumpconfig.go b/internal/cli/dumpconfig.go index dad0be923d..a748af3357 100644 --- a/internal/cli/dumpconfig.go +++ b/internal/cli/dumpconfig.go @@ -52,12 +52,16 @@ func (c *DumpconfigCommand) Run(args []string) int { userConfig := command.GetConfig() // convert the big.Int and time.Duration fields to their corresponding Raw fields + userConfig.JsonRPC.HttpTimeout.ReadTimeoutRaw = userConfig.JsonRPC.HttpTimeout.ReadTimeout.String() + userConfig.JsonRPC.HttpTimeout.WriteTimeoutRaw = userConfig.JsonRPC.HttpTimeout.WriteTimeout.String() + userConfig.JsonRPC.HttpTimeout.IdleTimeoutRaw = userConfig.JsonRPC.HttpTimeout.IdleTimeout.String() userConfig.TxPool.RejournalRaw = userConfig.TxPool.Rejournal.String() userConfig.TxPool.LifeTimeRaw = userConfig.TxPool.LifeTime.String() userConfig.Sealer.GasPriceRaw = userConfig.Sealer.GasPrice.String() userConfig.Gpo.MaxPriceRaw = userConfig.Gpo.MaxPrice.String() userConfig.Gpo.IgnorePriceRaw = userConfig.Gpo.IgnorePrice.String() userConfig.Cache.RejournalRaw = userConfig.Cache.Rejournal.String() + userConfig.Cache.TrieTimeoutRaw = userConfig.Cache.TrieTimeout.String() if err := toml.NewEncoder(os.Stdout).Encode(userConfig); err != nil { c.UI.Error(err.Error()) diff --git a/internal/cli/server/config.go b/internal/cli/server/config.go index 6bbe3d781a..5657e85b20 100644 --- a/internal/cli/server/config.go +++ b/internal/cli/server/config.go @@ -33,6 +33,7 @@ import ( "github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/p2p/nat" "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/rpc" ) type Config struct { @@ -238,6 +239,8 @@ type JsonRPCConfig struct { // Graphql has the json-rpc graphql related settings Graphql *APIConfig `hcl:"graphql,block" toml:"graphql,block"` + + HttpTimeout *HttpTimeouts `hcl:"timeouts,block" toml:"timeouts,block"` } type GRPCConfig struct { @@ -271,6 +274,33 @@ type APIConfig struct { Origins []string `hcl:"origins,optional" toml:"origins,optional"` } +// Used from rpc.HTTPTimeouts +type HttpTimeouts struct { + // ReadTimeout is the maximum duration for reading the entire + // request, including the body. + // + // Because ReadTimeout does not let Handlers make per-request + // decisions on each request body's acceptable deadline or + // upload rate, most users will prefer to use + // ReadHeaderTimeout. It is valid to use them both. + ReadTimeout time.Duration `hcl:"-,optional" toml:"-"` + ReadTimeoutRaw string `hcl:"read,optional" toml:"read,optional"` + + // WriteTimeout is the maximum duration before timing out + // writes of the response. It is reset whenever a new + // request's header is read. Like ReadTimeout, it does not + // let Handlers make decisions on a per-request basis. + WriteTimeout time.Duration `hcl:"-,optional" toml:"-"` + WriteTimeoutRaw string `hcl:"write,optional" toml:"write,optional"` + + // IdleTimeout is the maximum amount of time to wait for the + // next request when keep-alives are enabled. If IdleTimeout + // is zero, the value of ReadTimeout is used. If both are + // zero, ReadHeaderTimeout is used. + IdleTimeout time.Duration `hcl:"-,optional" toml:"-"` + IdleTimeoutRaw string `hcl:"idle,optional" toml:"idle,optional"` +} + type GpoConfig struct { // Blocks is the number of blocks to track to compute the price oracle Blocks uint64 `hcl:"blocks,optional" toml:"blocks,optional"` @@ -367,6 +397,10 @@ type CacheConfig struct { // TxLookupLimit sets the maximum number of blocks from head whose tx indices are reserved. TxLookupLimit uint64 `hcl:"txlookuplimit,optional" toml:"txlookuplimit,optional"` + + // Time after which the Merkle Patricia Trie is stored to disc from memory + TrieTimeout time.Duration `hcl:"-,optional" toml:"-"` + TrieTimeoutRaw string `hcl:"timeout,optional" toml:"timeout,optional"` } type AccountsConfig struct { @@ -480,6 +514,11 @@ func DefaultConfig() *Config { Cors: []string{"localhost"}, VHost: []string{"localhost"}, }, + HttpTimeout: &HttpTimeouts{ + ReadTimeout: 30 * time.Second, + WriteTimeout: 30 * time.Second, + IdleTimeout: 120 * time.Second, + }, }, Ethstats: "", Telemetry: &TelemetryConfig{ @@ -511,6 +550,7 @@ func DefaultConfig() *Config { NoPrefetch: false, Preimages: false, TxLookupLimit: 2350000, + TrieTimeout: 60 * time.Minute, }, Accounts: &AccountsConfig{ Unlock: []string{}, @@ -570,9 +610,13 @@ func (c *Config) fillTimeDurations() error { td *time.Duration str *string }{ + {"jsonrpc.timeouts.read", &c.JsonRPC.HttpTimeout.ReadTimeout, &c.JsonRPC.HttpTimeout.ReadTimeoutRaw}, + {"jsonrpc.timeouts.write", &c.JsonRPC.HttpTimeout.WriteTimeout, &c.JsonRPC.HttpTimeout.WriteTimeoutRaw}, + {"jsonrpc.timeouts.idle", &c.JsonRPC.HttpTimeout.IdleTimeout, &c.JsonRPC.HttpTimeout.IdleTimeoutRaw}, {"txpool.lifetime", &c.TxPool.LifeTime, &c.TxPool.LifeTimeRaw}, {"txpool.rejournal", &c.TxPool.Rejournal, &c.TxPool.RejournalRaw}, {"cache.rejournal", &c.Cache.Rejournal, &c.Cache.RejournalRaw}, + {"cache.timeout", &c.Cache.TrieTimeout, &c.Cache.TrieTimeoutRaw}, } for _, x := range tds { @@ -830,6 +874,7 @@ func (c *Config) buildEth(stack *node.Node, accountManager *accounts.Manager) (* n.NoPrefetch = c.Cache.NoPrefetch n.Preimages = c.Cache.Preimages n.TxLookupLimit = c.Cache.TxLookupLimit + n.TrieTimeout = c.Cache.TrieTimeout } n.RPCGasCap = c.JsonRPC.GasCap @@ -928,6 +973,11 @@ func (c *Config) buildNode() (*node.Config, error) { WSPathPrefix: c.JsonRPC.Ws.Prefix, GraphQLCors: c.JsonRPC.Graphql.Cors, GraphQLVirtualHosts: c.JsonRPC.Graphql.VHost, + HTTPTimeouts: rpc.HTTPTimeouts{ + ReadTimeout: c.JsonRPC.HttpTimeout.ReadTimeout, + WriteTimeout: c.JsonRPC.HttpTimeout.WriteTimeout, + IdleTimeout: c.JsonRPC.HttpTimeout.IdleTimeout, + }, } // dev mode diff --git a/internal/cli/server/config_test.go b/internal/cli/server/config_test.go index 5f3118996b..3e6bb76b59 100644 --- a/internal/cli/server/config_test.go +++ b/internal/cli/server/config_test.go @@ -1,7 +1,6 @@ package server import ( - "math/big" "testing" "time" @@ -101,38 +100,6 @@ func TestDefaultDatatypeOverride(t *testing.T) { assert.Equal(t, c0, expected) } -func TestConfigLoadFile(t *testing.T) { - readFile := func(path string) { - config, err := readConfigFile(path) - assert.NoError(t, err) - - assert.Equal(t, config, &Config{ - DataDir: "./data", - P2P: &P2PConfig{ - MaxPeers: 30, - }, - TxPool: &TxPoolConfig{ - LifeTime: 1 * time.Second, - }, - Gpo: &GpoConfig{ - MaxPrice: big.NewInt(100), - }, - Sealer: &SealerConfig{}, - Cache: &CacheConfig{}, - }) - } - - // read file in hcl format - t.Run("hcl", func(t *testing.T) { - readFile("./testdata/test.hcl") - }) - - // read file in json format - t.Run("json", func(t *testing.T) { - readFile("./testdata/test.json") - }) -} - var dummyEnodeAddr = "enode://0cb82b395094ee4a2915e9714894627de9ed8498fb881cec6db7c65e8b9a5bd7f2f25cc84e71e89d0947e51c76e85d0847de848c7782b13c0255247a6758178c@44.232.55.71:30303" func TestConfigBootnodesDefault(t *testing.T) { diff --git a/internal/cli/server/testdata/test.hcl b/internal/cli/server/testdata/test.hcl deleted file mode 100644 index 44138970fc..0000000000 --- a/internal/cli/server/testdata/test.hcl +++ /dev/null @@ -1,13 +0,0 @@ -datadir = "./data" - -p2p { - maxpeers = 30 -} - -txpool { - lifetime = "1s" -} - -gpo { - maxprice = "100" -} \ No newline at end of file diff --git a/internal/cli/server/testdata/test.json b/internal/cli/server/testdata/test.json deleted file mode 100644 index a08e5aceb1..0000000000 --- a/internal/cli/server/testdata/test.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "datadir": "./data", - "p2p": { - "maxpeers": 30 - }, - "txpool": { - "lifetime": "1s" - }, - "gpo": { - "maxprice": "100" - } -} \ No newline at end of file diff --git a/packaging/templates/mainnet-v1/archive/config.toml b/packaging/templates/mainnet-v1/archive/config.toml index a0eda44c15..9eaafd3bee 100644 --- a/packaging/templates/mainnet-v1/archive/config.toml +++ b/packaging/templates/mainnet-v1/archive/config.toml @@ -79,6 +79,10 @@ gcmode = "archive" # host = "" # vhosts = ["*"] # corsdomain = ["*"] + # [jsonrpc.timeouts] + # read = "30s" + # write = "30s" + # idle = "2m0s" [gpo] # blocks = 20 @@ -105,15 +109,16 @@ gcmode = "archive" [cache] cache = 4096 - # gc = 25 - # snapshot = 10 + gc = 0 + snapshot = 20 # database = 50 - # trie = 15 + trie = 30 # journal = "triecache" # rejournal = "1h0m0s" # noprefetch = false # preimages = false # txlookuplimit = 2350000 + # timeout = "1h0m0s" # [accounts] # unlock = [] diff --git a/packaging/templates/mainnet-v1/sentry/sentry/bor/config.toml b/packaging/templates/mainnet-v1/sentry/sentry/bor/config.toml index b6d6b34f4d..94dd6634f0 100644 --- a/packaging/templates/mainnet-v1/sentry/sentry/bor/config.toml +++ b/packaging/templates/mainnet-v1/sentry/sentry/bor/config.toml @@ -79,6 +79,10 @@ syncmode = "full" # host = "" # vhosts = ["*"] # corsdomain = ["*"] + # [jsonrpc.timeouts] + # read = "30s" + # write = "30s" + # idle = "2m0s" [gpo] # blocks = 20 @@ -114,6 +118,7 @@ syncmode = "full" # noprefetch = false # preimages = false # txlookuplimit = 2350000 + # timeout = "1h0m0s" # [accounts] # unlock = [] diff --git a/packaging/templates/mainnet-v1/sentry/validator/bor/config.toml b/packaging/templates/mainnet-v1/sentry/validator/bor/config.toml index e0a9be320f..9c55683c96 100644 --- a/packaging/templates/mainnet-v1/sentry/validator/bor/config.toml +++ b/packaging/templates/mainnet-v1/sentry/validator/bor/config.toml @@ -81,6 +81,10 @@ syncmode = "full" # host = "" # vhosts = ["*"] # corsdomain = ["*"] + # [jsonrpc.timeouts] + # read = "30s" + # write = "30s" + # idle = "2m0s" [gpo] # blocks = 20 @@ -116,6 +120,7 @@ syncmode = "full" # noprefetch = false # preimages = false # txlookuplimit = 2350000 + # timeout = "1h0m0s" [accounts] allow-insecure-unlock = true diff --git a/packaging/templates/mainnet-v1/without-sentry/bor/config.toml b/packaging/templates/mainnet-v1/without-sentry/bor/config.toml index b316ce4e8f..573f1f3be8 100644 --- a/packaging/templates/mainnet-v1/without-sentry/bor/config.toml +++ b/packaging/templates/mainnet-v1/without-sentry/bor/config.toml @@ -81,6 +81,10 @@ syncmode = "full" # host = "" # vhosts = ["*"] # corsdomain = ["*"] + # [jsonrpc.timeouts] + # read = "30s" + # write = "30s" + # idle = "2m0s" [gpo] # blocks = 20 @@ -116,6 +120,7 @@ syncmode = "full" # noprefetch = false # preimages = false # txlookuplimit = 2350000 +# timeout = "1h0m0s" [accounts] allow-insecure-unlock = true diff --git a/packaging/templates/testnet-v4/archive/config.toml b/packaging/templates/testnet-v4/archive/config.toml index ce0d0ff983..1762fdf117 100644 --- a/packaging/templates/testnet-v4/archive/config.toml +++ b/packaging/templates/testnet-v4/archive/config.toml @@ -79,6 +79,10 @@ gcmode = "archive" # host = "" # vhosts = ["*"] # corsdomain = ["*"] + # [jsonrpc.timeouts] + # read = "30s" + # write = "30s" + # idle = "2m0s" # [gpo] # blocks = 20 @@ -103,17 +107,18 @@ gcmode = "archive" # organization = "" # [telemetry.influx.tags] -# [cache] +[cache] # cache = 1024 - # gc = 25 - # snapshot = 10 + gc = 0 + snapshot = 20 # database = 50 - # trie = 15 + trie = 30 # journal = "triecache" # rejournal = "1h0m0s" # noprefetch = false # preimages = false # txlookuplimit = 2350000 + # timeout = "1h0m0s" # [accounts] # unlock = [] diff --git a/packaging/templates/testnet-v4/sentry/sentry/bor/config.toml b/packaging/templates/testnet-v4/sentry/sentry/bor/config.toml index 3655478d45..ae191cec2c 100644 --- a/packaging/templates/testnet-v4/sentry/sentry/bor/config.toml +++ b/packaging/templates/testnet-v4/sentry/sentry/bor/config.toml @@ -79,6 +79,10 @@ syncmode = "full" # host = "" # vhosts = ["*"] # corsdomain = ["*"] + # [jsonrpc.timeouts] + # read = "30s" + # write = "30s" + # idle = "2m0s" # [gpo] # blocks = 20 @@ -114,6 +118,7 @@ syncmode = "full" # noprefetch = false # preimages = false # txlookuplimit = 2350000 + # timeout = "1h0m0s" # [accounts] # unlock = [] diff --git a/packaging/templates/testnet-v4/sentry/validator/bor/config.toml b/packaging/templates/testnet-v4/sentry/validator/bor/config.toml index 9d30428205..b441cc137d 100644 --- a/packaging/templates/testnet-v4/sentry/validator/bor/config.toml +++ b/packaging/templates/testnet-v4/sentry/validator/bor/config.toml @@ -81,6 +81,10 @@ syncmode = "full" # host = "" # vhosts = ["*"] # corsdomain = ["*"] + # [jsonrpc.timeouts] + # read = "30s" + # write = "30s" + # idle = "2m0s" # [gpo] # blocks = 20 @@ -116,6 +120,7 @@ syncmode = "full" # noprefetch = false # preimages = false # txlookuplimit = 2350000 + # timeout = "1h0m0s" [accounts] allow-insecure-unlock = true diff --git a/packaging/templates/testnet-v4/without-sentry/bor/config.toml b/packaging/templates/testnet-v4/without-sentry/bor/config.toml index 0e0aeae3a1..05a254e184 100644 --- a/packaging/templates/testnet-v4/without-sentry/bor/config.toml +++ b/packaging/templates/testnet-v4/without-sentry/bor/config.toml @@ -81,6 +81,10 @@ syncmode = "full" # host = "" # vhosts = ["*"] # corsdomain = ["*"] + # [jsonrpc.timeouts] + # read = "30s" + # write = "30s" + # idle = "2m0s" # [gpo] # blocks = 20 @@ -116,6 +120,7 @@ syncmode = "full" # noprefetch = false # preimages = false # txlookuplimit = 2350000 +# timeout = "1h0m0s" [accounts] allow-insecure-unlock = true diff --git a/scripts/getconfig.go b/scripts/getconfig.go index 7fbd39ac6d..59e3ff749d 100644 --- a/scripts/getconfig.go +++ b/scripts/getconfig.go @@ -357,6 +357,34 @@ func writeTempStaticTrustedTOML(path string) { log.Fatal(err) } } + + if data.Has("Node.HTTPTimeouts.ReadTimeout") { + err = os.WriteFile("./tempHTTPTimeoutsReadTimeout.toml", []byte(data.Get("Node.HTTPTimeouts.ReadTimeout").(string)), 0600) + if err != nil { + log.Fatal(err) + } + } + + if data.Has("Node.HTTPTimeouts.WriteTimeout") { + err = os.WriteFile("./tempHTTPTimeoutsWriteTimeout.toml", []byte(data.Get("Node.HTTPTimeouts.WriteTimeout").(string)), 0600) + if err != nil { + log.Fatal(err) + } + } + + if data.Has("Node.HTTPTimeouts.IdleTimeout") { + err = os.WriteFile("./tempHTTPTimeoutsIdleTimeout.toml", []byte(data.Get("Node.HTTPTimeouts.IdleTimeout").(string)), 0600) + if err != nil { + log.Fatal(err) + } + } + + if data.Has("Eth.TrieTimeout") { + err = os.WriteFile("./tempHTTPTimeoutsTrieTimeout.toml", []byte(data.Get("Eth.TrieTimeout").(string)), 0600) + if err != nil { + log.Fatal(err) + } + } } func getStaticTrustedNodes(args []string) { @@ -574,7 +602,7 @@ func commentFlags(path string, updatedArgs []string) { flag = strconv.Itoa(passwordFlag) + "-" + flag } - if flag != "static-nodes" && flag != "trusted-nodes" { + if flag != "static-nodes" && flag != "trusted-nodes" && flag != "read" && flag != "write" && flag != "idle" && flag != "timeout" { flag = nameTagMap[flag] tempFlag := false diff --git a/scripts/getconfig.sh b/scripts/getconfig.sh index 472af08802..d00bf35ec8 100755 --- a/scripts/getconfig.sh +++ b/scripts/getconfig.sh @@ -112,6 +112,54 @@ else echo "neither JSON nor TOML TrustedNodes found" fi +if [[ -f ./tempHTTPTimeoutsReadTimeout.toml ]] +then + echo "HTTPTimeouts.ReadTimeout found" + read=$(head -1 ./tempHTTPTimeoutsReadTimeout.toml) + shopt -s nocasematch; if [[ "$OS" == "darwin"* ]]; then + sed -i '' "s%read = \"30s\"%read = \"${read}\"%" $confPath + else + sed -i "s%read = \"30s\"%read = \"${read}\"%" $confPath + fi + rm ./tempHTTPTimeoutsReadTimeout.toml +fi + +if [[ -f ./tempHTTPTimeoutsWriteTimeout.toml ]] +then + echo "HTTPTimeouts.WriteTimeout found" + write=$(head -1 ./tempHTTPTimeoutsWriteTimeout.toml) + shopt -s nocasematch; if [[ "$OS" == "darwin"* ]]; then + sed -i '' "s%write = \"30s\"%write = \"${write}\"%" $confPath + else + sed -i "s%write = \"30s\"%write = \"${write}\"%" $confPath + fi + rm ./tempHTTPTimeoutsWriteTimeout.toml +fi + +if [[ -f ./tempHTTPTimeoutsIdleTimeout.toml ]] +then + echo "HTTPTimeouts.IdleTimeout found" + idle=$(head -1 ./tempHTTPTimeoutsIdleTimeout.toml) + shopt -s nocasematch; if [[ "$OS" == "darwin"* ]]; then + sed -i '' "s%idle = \"2m0s\"%idle = \"${idle}\"%" $confPath + else + sed -i "s%idle = \"2m0s\"%idle = \"${idle}\"%" $confPath + fi + rm ./tempHTTPTimeoutsIdleTimeout.toml +fi + +if [[ -f ./tempHTTPTimeoutsTrieTimeout.toml ]] +then + echo "Eth.TrieTimeout found" + timeout=$(head -1 ./tempHTTPTimeoutsTrieTimeout.toml) + shopt -s nocasematch; if [[ "$OS" == "darwin"* ]]; then + sed -i '' "s%timeout = \"1h0m0s\"%timeout = \"${timeout}\"%" $confPath + else + sed -i "s%timeout = \"1h0m0s\"%timeout = \"${timeout}\"%" $confPath + fi + rm ./tempHTTPTimeoutsTrieTimeout.toml +fi + printf "\n" # comment flags in $configPath that were not passed through $startPath diff --git a/tests/bor/bor_test.go b/tests/bor/bor_test.go index 36d515c557..243b0182bb 100644 --- a/tests/bor/bor_test.go +++ b/tests/bor/bor_test.go @@ -101,6 +101,11 @@ func TestFetchStateSyncEvents(t *testing.T) { h := mocks.NewMockIHeimdallClient(ctrl) h.EXPECT().Close().AnyTimes() h.EXPECT().Span(uint64(1)).Return(&res.Result, nil).AnyTimes() + h.EXPECT().FetchLatestCheckpoint().Return(&checkpoint.Checkpoint{ + StartBlock: big.NewInt(1), + EndBlock: big.NewInt(2), + RootHash: common.Hash{}, + }, nil).AnyTimes() // B.2 Mock State Sync events fromID := uint64(1) @@ -136,6 +141,11 @@ func TestFetchStateSyncEvents_2(t *testing.T) { h := mocks.NewMockIHeimdallClient(ctrl) h.EXPECT().Close().AnyTimes() h.EXPECT().Span(uint64(1)).Return(&res.Result, nil).AnyTimes() + h.EXPECT().FetchLatestCheckpoint().Return(&checkpoint.Checkpoint{ + StartBlock: big.NewInt(1), + EndBlock: big.NewInt(2), + RootHash: common.Hash{}, + }, nil).AnyTimes() // Mock State Sync events // at # sprintSize, events are fetched for [fromID, (block-sprint).Time) @@ -287,6 +297,8 @@ func getMockedHeimdallClient(t *testing.T) (*mocks.MockIHeimdallClient, *span.He h.EXPECT().StateSyncEvents(gomock.Any(), gomock.Any()). Return([]*clerk.EventRecordWithTime{getSampleEventRecord(t)}, nil).AnyTimes() + // h.EXPECT().FetchLatestCheckpoint().Return([]*clerk.EventRecordWithTime{getSampleEventRecord(t)}, nil).AnyTimes() + return h, heimdallSpan, ctrl } @@ -324,13 +336,13 @@ func getEventRecords(t *testing.T) []*clerk.EventRecordWithTime { // TestEIP1559Transition tests the following: // -// 1. A transaction whose gasFeeCap is greater than the baseFee is valid. -// 2. Gas accounting for access lists on EIP-1559 transactions is correct. -// 3. Only the transaction's tip will be received by the coinbase. -// 4. The transaction sender pays for both the tip and baseFee. -// 5. The coinbase receives only the partially realized tip when -// gasFeeCap - gasTipCap < baseFee. -// 6. Legacy transaction behave as expected (e.g. gasPrice = gasFeeCap = gasTipCap). +// 1. A transaction whose gasFeeCap is greater than the baseFee is valid. +// 2. Gas accounting for access lists on EIP-1559 transactions is correct. +// 3. Only the transaction's tip will be received by the coinbase. +// 4. The transaction sender pays for both the tip and baseFee. +// 5. The coinbase receives only the partially realized tip when +// gasFeeCap - gasTipCap < baseFee. +// 6. Legacy transaction behave as expected (e.g. gasPrice = gasFeeCap = gasTipCap). func TestEIP1559Transition(t *testing.T) { var ( aa = common.HexToAddress("0x000000000000000000000000000000000000aaaa") From 180d4444df4484749fd38c5177e041dee26badd6 Mon Sep 17 00:00:00 2001 From: Arpit Temani Date: Mon, 5 Dec 2022 22:05:34 +0530 Subject: [PATCH 22/56] remove unwanted code --- tests/bor/bor_test.go | 9 --------- 1 file changed, 9 deletions(-) diff --git a/tests/bor/bor_test.go b/tests/bor/bor_test.go index 6db3d90030..d059956e6a 100644 --- a/tests/bor/bor_test.go +++ b/tests/bor/bor_test.go @@ -480,16 +480,7 @@ func TestFetchStateSyncEvents_2(t *testing.T) { h := mocks.NewMockIHeimdallClient(ctrl) h.EXPECT().Close().AnyTimes() -<<<<<<< HEAD h.EXPECT().Span(gomock.Any(), uint64(1)).Return(&res.Result, nil).AnyTimes() -======= - h.EXPECT().Span(uint64(1)).Return(&res.Result, nil).AnyTimes() - h.EXPECT().FetchLatestCheckpoint().Return(&checkpoint.Checkpoint{ - StartBlock: big.NewInt(1), - EndBlock: big.NewInt(2), - RootHash: common.Hash{}, - }, nil).AnyTimes() ->>>>>>> 57075d000d1a6396c33d42e5da97be19bfcfb6f1 // Mock State Sync events // at # sprintSize, events are fetched for [fromID, (block-sprint).Time) From e68d2d1a8768dc896981ed0992d33bd745f8d2e7 Mon Sep 17 00:00:00 2001 From: Jerry Date: Mon, 5 Dec 2022 12:18:06 -0800 Subject: [PATCH 23/56] Fix docker publish authentication issue In gorelease-cross 1.19+, dockerhub authentication will require docker logion action followed by mounting docker config file. See https://github.com/goreleaser/goreleaser-cross#github-actions. --- Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/Makefile b/Makefile index bb5c6df0cf..242435df76 100644 --- a/Makefile +++ b/Makefile @@ -224,6 +224,7 @@ release: -e DOCKER_PASSWORD \ -e SLACK_WEBHOOK \ -v /var/run/docker.sock:/var/run/docker.sock \ + -v $(HOME)/.docker/config.json:/root/.docker/config.json \ -v `pwd`:/go/src/$(PACKAGE_NAME) \ -w /go/src/$(PACKAGE_NAME) \ goreleaser/goreleaser-cross:${GOLANG_CROSS_VERSION} \ From 31da9729fb6cffa4404a88d208ba1b582e0ce4f8 Mon Sep 17 00:00:00 2001 From: Jerry Date: Mon, 5 Dec 2022 22:35:27 -0800 Subject: [PATCH 24/56] Revert "update Docker login for goreleaser-cross v1.19" This reverts commit 4d19cf5342a439d98cca21b03c63a0bc075769cf. --- .github/workflows/release.yml | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 0d45f0bc98..b615cf639e 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -29,12 +29,6 @@ jobs: TAG=${GITHUB_REF#refs/tags/} echo ::set-output name=tag_name::${TAG} - - name: Login to Docker Hub - uses: docker/login-action@v2 - with: - username: ${{ secrets.DOCKERHUB }} - password: ${{ secrets.DOCKERHUB_KEY }} - - name: Set up QEMU uses: docker/setup-qemu-action@v1 @@ -45,3 +39,5 @@ jobs: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} VERSION: ${{ steps.prepare.outputs.tag_name }} SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }} + DOCKER_USERNAME: ${{ secrets.DOCKERHUB }} + DOCKER_PASSWORD: ${{ secrets.DOCKERHUB_KEY }} From 43d894c44e1e5a544e915f4c950ce6981cd57536 Mon Sep 17 00:00:00 2001 From: Jerry Date: Tue, 6 Dec 2022 11:01:22 -0800 Subject: [PATCH 25/56] Bump version to stable --- params/version.go | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/params/version.go b/params/version.go index 83e04a66f8..d2746b37ee 100644 --- a/params/version.go +++ b/params/version.go @@ -21,10 +21,10 @@ import ( ) const ( - VersionMajor = 0 // Major version component of the current release - VersionMinor = 3 // Minor version component of the current release - VersionPatch = 0 // Patch version component of the current release - VersionMeta = "beta" // Version metadata to append to the version string + VersionMajor = 0 // Major version component of the current release + VersionMinor = 3 // Minor version component of the current release + VersionPatch = 0 // Patch version component of the current release + VersionMeta = "stable" // Version metadata to append to the version string ) // Version holds the textual version string. @@ -43,7 +43,8 @@ var VersionWithMeta = func() string { // ArchiveVersion holds the textual version string used for Geth archives. // e.g. "1.8.11-dea1ce05" for stable releases, or -// "1.8.13-unstable-21c059b6" for unstable releases +// +// "1.8.13-unstable-21c059b6" for unstable releases func ArchiveVersion(gitCommit string) string { vsn := Version if VersionMeta != "stable" { From 6d11117edc687716549e69cb64147c182e116a3f Mon Sep 17 00:00:00 2001 From: Manav Darji Date: Wed, 14 Dec 2022 00:44:01 +0530 Subject: [PATCH 26/56] Revert "Merge pull request #435 from maticnetwork/POS-553" This reverts commit 657d262defc9c94e9513b3d45230492d8b20eac7, reversing changes made to 88dbfa1c13c15464d3c1a3085a9f12d0ffb9b218. --- internal/ethapi/api.go | 25 ------------------------- 1 file changed, 25 deletions(-) diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go index 082dfea66f..2fd148c7c6 100644 --- a/internal/ethapi/api.go +++ b/internal/ethapi/api.go @@ -819,20 +819,6 @@ func (s *PublicBlockChainAPI) GetHeaderByHash(ctx context.Context, hash common.H return nil } -// getAuthor: returns the author of the Block -func (s *PublicBlockChainAPI) getAuthor(head *types.Header) *common.Address { - // get author using Author() function from: /consensus/clique/clique.go - // In Production: get author using Author() function from: /consensus/bor/bor.go - author, err := s.b.Engine().Author(head) - // make sure we don't send error to the user, return 0x0 instead - if err != nil { - add := common.HexToAddress("0x0000000000000000000000000000000000000000") - return &add - } - // change the coinbase (0x0) with the miner address - return &author -} - // GetBlockByNumber returns the requested canonical block. // - When blockNr is -1 the chain head is returned. // - When blockNr is -2 the pending chain head is returned. @@ -841,7 +827,6 @@ func (s *PublicBlockChainAPI) getAuthor(head *types.Header) *common.Address { func (s *PublicBlockChainAPI) GetBlockByNumber(ctx context.Context, number rpc.BlockNumber, fullTx bool) (map[string]interface{}, error) { block, err := s.b.BlockByNumber(ctx, number) if block != nil && err == nil { - response, err := s.rpcMarshalBlock(ctx, block, true, fullTx) if err == nil && number == rpc.PendingBlockNumber { // Pending blocks need to nil out a few fields @@ -850,12 +835,6 @@ func (s *PublicBlockChainAPI) GetBlockByNumber(ctx context.Context, number rpc.B } } - if err == nil && number != rpc.PendingBlockNumber { - author := s.getAuthor(block.Header()) - - response["miner"] = author - } - // append marshalled bor transaction if err == nil && response != nil { response = s.appendRPCMarshalBorTransaction(ctx, block, response, fullTx) @@ -874,10 +853,6 @@ func (s *PublicBlockChainAPI) GetBlockByHash(ctx context.Context, hash common.Ha response, err := s.rpcMarshalBlock(ctx, block, true, fullTx) // append marshalled bor transaction if err == nil && response != nil { - author := s.getAuthor(block.Header()) - - response["miner"] = author - return s.appendRPCMarshalBorTransaction(ctx, block, response, fullTx), err } return response, err From 7364f940ba418e32507f7095fb583f641d06e710 Mon Sep 17 00:00:00 2001 From: Arpit Temani Date: Wed, 14 Dec 2022 00:59:54 +0530 Subject: [PATCH 27/56] revert change for release for go1.19 --- .github/workflows/release.yml | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 92b960f5cd..2ceda3d2ee 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -29,6 +29,12 @@ jobs: TAG=${GITHUB_REF#refs/tags/} echo ::set-output name=tag_name::${TAG} + - name: Login to Docker Hub + uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKERHUB }} + password: ${{ secrets.DOCKERHUB_KEY }} + - name: Set up QEMU uses: docker/setup-qemu-action@v1 @@ -39,5 +45,3 @@ jobs: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} VERSION: ${{ steps.prepare.outputs.tag_name }} SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }} - DOCKER_USERNAME: ${{ secrets.DOCKERHUB }} - DOCKER_PASSWORD: ${{ secrets.DOCKERHUB_KEY }} From a37af35c361eb7b61be6962402460909010f409e Mon Sep 17 00:00:00 2001 From: Jerry Date: Tue, 13 Dec 2022 15:17:29 -0800 Subject: [PATCH 28/56] Add default values to CLI helper and docs This commit adds default values to CLI helper and docs. When the default value of a string flag, slice string flag, or map string flag is empty, its helper message won't show any default value. --- docs/README.md | 2 + docs/cli/bootnode.md | 10 +- docs/cli/chain_sethead.md | 4 +- docs/cli/debug_block.md | 2 +- docs/cli/debug_pprof.md | 4 +- docs/cli/peers_add.md | 4 +- docs/cli/peers_list.md | 2 +- docs/cli/peers_remove.md | 4 +- docs/cli/peers_status.md | 2 +- docs/cli/removedb.md | 2 +- docs/cli/server.md | 144 ++++++++++++++-------------- internal/cli/flagset/flagset.go | 160 +++++++++++++++++++++----------- internal/cli/server/flags.go | 71 +++++++------- 13 files changed, 237 insertions(+), 174 deletions(-) diff --git a/docs/README.md b/docs/README.md index 45021e8c7f..4622bb8f00 100644 --- a/docs/README.md +++ b/docs/README.md @@ -11,6 +11,8 @@ $ bor server ``` + See [here](./cli/server.md) for more flag details. + - The `bor dumpconfig` sub-command prints the default configurations, in the TOML format, on the terminal. One can `pipe (>)` this to a file (say `config.toml`) and use it to start bor. - A toml file now can be used instead of flags and can contain all configuration for the node to run. To simply run bor with a configuration file, the following command can be used. diff --git a/docs/cli/bootnode.md b/docs/cli/bootnode.md index 48e933a934..064de39014 100644 --- a/docs/cli/bootnode.md +++ b/docs/cli/bootnode.md @@ -2,16 +2,16 @@ ## Options -- ```listen-addr```: listening address of bootnode (:) +- ```listen-addr```: listening address of bootnode (:) (default: 0.0.0.0:30303) -- ```v5```: Enable UDP v5 +- ```v5```: Enable UDP v5 (default: false) -- ```log-level```: Log level (trace|debug|info|warn|error|crit) +- ```log-level```: Log level (trace|debug|info|warn|error|crit) (default: info) -- ```nat```: port mapping mechanism (any|none|upnp|pmp|extip:) +- ```nat```: port mapping mechanism (any|none|upnp|pmp|extip:) (default: none) - ```node-key```: file or hex node key - ```save-key```: path to save the ecdsa private key -- ```dry-run```: validates parameters and prints bootnode configurations, but does not start bootnode \ No newline at end of file +- ```dry-run```: validates parameters and prints bootnode configurations, but does not start bootnode (default: false) \ No newline at end of file diff --git a/docs/cli/chain_sethead.md b/docs/cli/chain_sethead.md index bf97990e62..09cd37baa1 100644 --- a/docs/cli/chain_sethead.md +++ b/docs/cli/chain_sethead.md @@ -8,6 +8,6 @@ The ```chain sethead ``` command sets the current chain to a certain blo ## Options -- ```address```: Address of the grpc endpoint +- ```address```: Address of the grpc endpoint (default: 127.0.0.1:3131) -- ```yes```: Force set head \ No newline at end of file +- ```yes```: Force set head (default: false) \ No newline at end of file diff --git a/docs/cli/debug_block.md b/docs/cli/debug_block.md index ced7e482ee..efcead2626 100644 --- a/docs/cli/debug_block.md +++ b/docs/cli/debug_block.md @@ -4,6 +4,6 @@ The ```bor debug block ``` command will create an archive containing tra ## Options -- ```address```: Address of the grpc endpoint +- ```address```: Address of the grpc endpoint (default: 127.0.0.1:3131) - ```output```: Output directory \ No newline at end of file diff --git a/docs/cli/debug_pprof.md b/docs/cli/debug_pprof.md index 86a84b6065..2e7e40b677 100644 --- a/docs/cli/debug_pprof.md +++ b/docs/cli/debug_pprof.md @@ -4,8 +4,8 @@ The ```debug pprof ``` command will create an archive containing bor ppro ## Options -- ```address```: Address of the grpc endpoint +- ```address```: Address of the grpc endpoint (default: 127.0.0.1:3131) -- ```seconds```: seconds to trace +- ```seconds```: seconds to trace (default: 2) - ```output```: Output directory \ No newline at end of file diff --git a/docs/cli/peers_add.md b/docs/cli/peers_add.md index 5bc4ed1448..7b879cdf0d 100644 --- a/docs/cli/peers_add.md +++ b/docs/cli/peers_add.md @@ -4,6 +4,6 @@ The ```peers add ``` command joins the local client to another remote pee ## Options -- ```address```: Address of the grpc endpoint +- ```address```: Address of the grpc endpoint (default: 127.0.0.1:3131) -- ```trusted```: Add the peer as a trusted \ No newline at end of file +- ```trusted```: Add the peer as a trusted (default: false) \ No newline at end of file diff --git a/docs/cli/peers_list.md b/docs/cli/peers_list.md index 41f398b764..5d30d1d32e 100644 --- a/docs/cli/peers_list.md +++ b/docs/cli/peers_list.md @@ -4,4 +4,4 @@ The ```peers list``` command lists the connected peers. ## Options -- ```address```: Address of the grpc endpoint \ No newline at end of file +- ```address```: Address of the grpc endpoint (default: 127.0.0.1:3131) \ No newline at end of file diff --git a/docs/cli/peers_remove.md b/docs/cli/peers_remove.md index 2cac1e7656..f731f12f6f 100644 --- a/docs/cli/peers_remove.md +++ b/docs/cli/peers_remove.md @@ -4,6 +4,6 @@ The ```peers remove ``` command disconnects the local client from a conne ## Options -- ```address```: Address of the grpc endpoint +- ```address```: Address of the grpc endpoint (default: 127.0.0.1:3131) -- ```trusted```: Add the peer as a trusted \ No newline at end of file +- ```trusted```: Add the peer as a trusted (default: false) \ No newline at end of file diff --git a/docs/cli/peers_status.md b/docs/cli/peers_status.md index 65a0fe9d8f..9806bfb638 100644 --- a/docs/cli/peers_status.md +++ b/docs/cli/peers_status.md @@ -4,4 +4,4 @@ The ```peers status ``` command displays the status of a peer by its id ## Options -- ```address```: Address of the grpc endpoint \ No newline at end of file +- ```address```: Address of the grpc endpoint (default: 127.0.0.1:3131) \ No newline at end of file diff --git a/docs/cli/removedb.md b/docs/cli/removedb.md index 473d47ecef..7ee09568b9 100644 --- a/docs/cli/removedb.md +++ b/docs/cli/removedb.md @@ -4,6 +4,6 @@ The ```bor removedb``` command will remove the blockchain and state databases at ## Options -- ```address```: Address of the grpc endpoint +- ```address```: Address of the grpc endpoint (default: 127.0.0.1:3131) - ```datadir```: Path of the data directory to store information \ No newline at end of file diff --git a/docs/cli/server.md b/docs/cli/server.md index 4c291a74b9..777856de34 100644 --- a/docs/cli/server.md +++ b/docs/cli/server.md @@ -4,49 +4,51 @@ The ```bor server``` command runs the Bor client. ## Options -- ```chain```: Name of the chain to sync ('mumbai', 'mainnet') or path to a genesis file +- ```chain```: Name of the chain to sync ('mumbai', 'mainnet') or path to a genesis file (default: mainnet) - ```identity```: Name/Identity of the node -- ```log-level```: Set log level for the server +- ```log-level```: Set log level for the server (default: INFO) - ```datadir```: Path of the data directory to store information +- ```datadir.ancient```: Data directory for ancient chain segments (default = inside chaindata) + - ```keystore```: Path of the directory where keystores are located - ```config```: File for the config file -- ```syncmode```: Blockchain sync mode (only "full" sync supported) +- ```syncmode```: Blockchain sync mode (only "full" sync supported) (default: full) -- ```gcmode```: Blockchain garbage collection mode ("full", "archive") +- ```gcmode```: Blockchain garbage collection mode ("full", "archive") (default: full) - ```eth.requiredblocks```: Comma separated block number-to-hash mappings to require for peering (=) -- ```snapshot```: Enables the snapshot-database mode (default = true) +- ```snapshot```: Enables the snapshot-database mode (default: true) -- ```bor.logs```: Enables bor log retrieval (default = false) +- ```bor.logs```: Enables bor log retrieval (default: false) -- ```bor.heimdall```: URL of Heimdall service +- ```bor.heimdall```: URL of Heimdall service (default: http://localhost:1317) -- ```bor.withoutheimdall```: Run without Heimdall service (for testing purpose) +- ```bor.withoutheimdall```: Run without Heimdall service (for testing purpose) (default: false) - ```ethstats```: Reporting URL of a ethstats service (nodename:secret@host:port) -- ```gpo.blocks```: Number of recent blocks to check for gas prices +- ```gpo.blocks```: Number of recent blocks to check for gas prices (default: 20) -- ```gpo.percentile```: Suggested gas price is the given percentile of a set of recent transaction gas prices +- ```gpo.percentile```: Suggested gas price is the given percentile of a set of recent transaction gas prices (default: 60) -- ```gpo.maxprice```: Maximum gas price will be recommended by gpo +- ```gpo.maxprice```: Maximum gas price will be recommended by gpo (default: 5000000000000) -- ```gpo.ignoreprice```: Gas price below which gpo will ignore transactions +- ```gpo.ignoreprice```: Gas price below which gpo will ignore transactions (default: 2) -- ```disable-bor-wallet```: Disable the personal wallet endpoints +- ```disable-bor-wallet```: Disable the personal wallet endpoints (default: true) -- ```grpc.addr```: Address and port to bind the GRPC server +- ```grpc.addr```: Address and port to bind the GRPC server (default: :3131) -- ```dev```: Enable developer mode with ephemeral proof-of-authority network and a pre-funded developer account, mining enabled +- ```dev```: Enable developer mode with ephemeral proof-of-authority network and a pre-funded developer account, mining enabled (default: false) -- ```dev.period```: Block period to use in developer mode (0 = mine only if transaction pending) +- ```dev.period```: Block period to use in developer mode (0 = mine only if transaction pending) (default: 0) ### Account Management Options @@ -54,111 +56,111 @@ The ```bor server``` command runs the Bor client. - ```password```: Password file to use for non-interactive password input -- ```allow-insecure-unlock```: Allow insecure account unlocking when account-related RPCs are exposed by http +- ```allow-insecure-unlock```: Allow insecure account unlocking when account-related RPCs are exposed by http (default: false) -- ```lightkdf```: Reduce key-derivation RAM & CPU usage at some expense of KDF strength +- ```lightkdf```: Reduce key-derivation RAM & CPU usage at some expense of KDF strength (default: false) ### Cache Options -- ```cache```: Megabytes of memory allocated to internal caching (default = 4096 mainnet full node) +- ```cache```: Megabytes of memory allocated to internal caching (default: 1024) -- ```cache.database```: Percentage of cache memory allowance to use for database io +- ```cache.database```: Percentage of cache memory allowance to use for database io (default: 50) -- ```cache.trie```: Percentage of cache memory allowance to use for trie caching (default = 15% full mode, 30% archive mode) +- ```cache.trie```: Percentage of cache memory allowance to use for trie caching (default: 15) -- ```cache.trie.journal```: Disk journal directory for trie cache to survive node restarts +- ```cache.trie.journal```: Disk journal directory for trie cache to survive node restarts (default: triecache) -- ```cache.trie.rejournal```: Time interval to regenerate the trie cache journal +- ```cache.trie.rejournal```: Time interval to regenerate the trie cache journal (default: 1h0m0s) -- ```cache.gc```: Percentage of cache memory allowance to use for trie pruning (default = 25% full mode, 0% archive mode) +- ```cache.gc```: Percentage of cache memory allowance to use for trie pruning (default: 25) -- ```cache.snapshot```: Percentage of cache memory allowance to use for snapshot caching (default = 10% full mode, 20% archive mode) +- ```cache.snapshot```: Percentage of cache memory allowance to use for snapshot caching (default: 10) -- ```cache.noprefetch```: Disable heuristic state prefetch during block import (less CPU and disk IO, more time waiting for data) +- ```cache.noprefetch```: Disable heuristic state prefetch during block import (less CPU and disk IO, more time waiting for data) (default: false) -- ```cache.preimages```: Enable recording the SHA3/keccak preimages of trie keys +- ```cache.preimages```: Enable recording the SHA3/keccak preimages of trie keys (default: false) -- ```txlookuplimit```: Number of recent blocks to maintain transactions index for (default = about 56 days, 0 = entire chain) +- ```txlookuplimit```: Number of recent blocks to maintain transactions index for (default: 2350000) ### JsonRPC Options -- ```rpc.gascap```: Sets a cap on gas that can be used in eth_call/estimateGas (0=infinite) +- ```rpc.gascap```: Sets a cap on gas that can be used in eth_call/estimateGas (0=infinite) (default: 50000000) -- ```rpc.txfeecap```: Sets a cap on transaction fee (in ether) that can be sent via the RPC APIs (0 = no cap) +- ```rpc.txfeecap```: Sets a cap on transaction fee (in ether) that can be sent via the RPC APIs (0 = no cap) (default: 5) -- ```ipcdisable```: Disable the IPC-RPC server +- ```ipcdisable```: Disable the IPC-RPC server (default: false) - ```ipcpath```: Filename for IPC socket/pipe within the datadir (explicit paths escape it) -- ```http.corsdomain```: Comma separated list of domains from which to accept cross origin requests (browser enforced) +- ```http.corsdomain```: Comma separated list of domains from which to accept cross origin requests (browser enforced) (default: localhost) -- ```http.vhosts```: Comma separated list of virtual hostnames from which to accept requests (server enforced). Accepts '*' wildcard. +- ```http.vhosts```: Comma separated list of virtual hostnames from which to accept requests (server enforced). Accepts '*' wildcard. (default: localhost) -- ```ws.origins```: Origins from which to accept websockets requests +- ```ws.origins```: Origins from which to accept websockets requests (default: localhost) -- ```graphql.corsdomain```: Comma separated list of domains from which to accept cross origin requests (browser enforced) +- ```graphql.corsdomain```: Comma separated list of domains from which to accept cross origin requests (browser enforced) (default: localhost) -- ```graphql.vhosts```: Comma separated list of virtual hostnames from which to accept requests (server enforced). Accepts '*' wildcard. +- ```graphql.vhosts```: Comma separated list of virtual hostnames from which to accept requests (server enforced). Accepts '*' wildcard. (default: localhost) -- ```http```: Enable the HTTP-RPC server +- ```http```: Enable the HTTP-RPC server (default: false) -- ```http.addr```: HTTP-RPC server listening interface +- ```http.addr```: HTTP-RPC server listening interface (default: localhost) -- ```http.port```: HTTP-RPC server listening port +- ```http.port```: HTTP-RPC server listening port (default: 8545) - ```http.rpcprefix```: HTTP path path prefix on which JSON-RPC is served. Use '/' to serve on all paths. -- ```http.api```: API's offered over the HTTP-RPC interface +- ```http.api```: API's offered over the HTTP-RPC interface (default: eth,net,web3,txpool,bor) -- ```ws```: Enable the WS-RPC server +- ```ws```: Enable the WS-RPC server (default: false) -- ```ws.addr```: WS-RPC server listening interface +- ```ws.addr```: WS-RPC server listening interface (default: localhost) -- ```ws.port```: WS-RPC server listening port +- ```ws.port```: WS-RPC server listening port (default: 8546) - ```ws.rpcprefix```: HTTP path prefix on which JSON-RPC is served. Use '/' to serve on all paths. -- ```ws.api```: API's offered over the WS-RPC interface +- ```ws.api```: API's offered over the WS-RPC interface (default: net,web3) -- ```graphql```: Enable GraphQL on the HTTP-RPC server. Note that GraphQL can only be started if an HTTP server is started as well. +- ```graphql```: Enable GraphQL on the HTTP-RPC server. Note that GraphQL can only be started if an HTTP server is started as well. (default: false) ### P2P Options -- ```bind```: Network binding address +- ```bind```: Network binding address (default: 0.0.0.0) -- ```port```: Network listening port +- ```port```: Network listening port (default: 30303) - ```bootnodes```: Comma separated enode URLs for P2P discovery bootstrap -- ```maxpeers```: Maximum number of network peers (network disabled if set to 0) +- ```maxpeers```: Maximum number of network peers (network disabled if set to 0) (default: 50) -- ```maxpendpeers```: Maximum number of pending connection attempts +- ```maxpendpeers```: Maximum number of pending connection attempts (default: 50) -- ```nat```: NAT port mapping mechanism (any|none|upnp|pmp|extip:) +- ```nat```: NAT port mapping mechanism (any|none|upnp|pmp|extip:) (default: any) -- ```nodiscover```: Disables the peer discovery mechanism (manual peer addition) +- ```nodiscover```: Disables the peer discovery mechanism (manual peer addition) (default: false) -- ```v5disc```: Enables the experimental RLPx V5 (Topic Discovery) mechanism +- ```v5disc```: Enables the experimental RLPx V5 (Topic Discovery) mechanism (default: false) ### Sealer Options -- ```mine```: Enable mining +- ```mine```: Enable mining (default: false) - ```miner.etherbase```: Public address for block mining rewards - ```miner.extradata```: Block extra data set by the miner (default = client version) -- ```miner.gaslimit```: Target gas ceiling (gas limit) for mined blocks +- ```miner.gaslimit```: Target gas ceiling (gas limit) for mined blocks (default: 30000000) -- ```miner.gasprice```: Minimum gas price for mining a transaction +- ```miner.gasprice```: Minimum gas price for mining a transaction (default: 1000000000) ### Telemetry Options -- ```metrics```: Enable metrics collection and reporting +- ```metrics```: Enable metrics collection and reporting (default: false) -- ```metrics.expensive```: Enable expensive metrics collection and reporting +- ```metrics.expensive```: Enable expensive metrics collection and reporting (default: false) -- ```metrics.influxdb```: Enable metrics export/push to an external InfluxDB database (v1) +- ```metrics.influxdb```: Enable metrics export/push to an external InfluxDB database (v1) (default: false) - ```metrics.influxdb.endpoint```: InfluxDB API endpoint to report metrics to @@ -170,11 +172,11 @@ The ```bor server``` command runs the Bor client. - ```metrics.influxdb.tags```: Comma-separated InfluxDB tags (key/values) attached to all measurements -- ```metrics.prometheus-addr```: Address for Prometheus Server +- ```metrics.prometheus-addr```: Address for Prometheus Server (default: 127.0.0.1:7071) -- ```metrics.opencollector-endpoint```: OpenCollector Endpoint (host:port) +- ```metrics.opencollector-endpoint```: OpenCollector Endpoint (host:port) (default: 127.0.0.1:4317) -- ```metrics.influxdbv2```: Enable metrics export/push to an external InfluxDB v2 database +- ```metrics.influxdbv2```: Enable metrics export/push to an external InfluxDB v2 database (default: false) - ```metrics.influxdb.token```: Token to authorize access to the database (v2 only) @@ -186,22 +188,22 @@ The ```bor server``` command runs the Bor client. - ```txpool.locals```: Comma separated accounts to treat as locals (no flush, priority inclusion) -- ```txpool.nolocals```: Disables price exemptions for locally submitted transactions +- ```txpool.nolocals```: Disables price exemptions for locally submitted transactions (default: false) -- ```txpool.journal```: Disk journal for local transaction to survive node restarts +- ```txpool.journal```: Disk journal for local transaction to survive node restarts (default: transactions.rlp) -- ```txpool.rejournal```: Time interval to regenerate the local transaction journal +- ```txpool.rejournal```: Time interval to regenerate the local transaction journal (default: 1h0m0s) -- ```txpool.pricelimit```: Minimum gas price limit to enforce for acceptance into the pool +- ```txpool.pricelimit```: Minimum gas price limit to enforce for acceptance into the pool (default: 1) -- ```txpool.pricebump```: Price bump percentage to replace an already existing transaction +- ```txpool.pricebump```: Price bump percentage to replace an already existing transaction (default: 10) -- ```txpool.accountslots```: Minimum number of executable transaction slots guaranteed per account +- ```txpool.accountslots```: Minimum number of executable transaction slots guaranteed per account (default: 16) -- ```txpool.globalslots```: Maximum number of executable transaction slots for all accounts +- ```txpool.globalslots```: Maximum number of executable transaction slots for all accounts (default: 32768) -- ```txpool.accountqueue```: Maximum number of non-executable transaction slots permitted per account +- ```txpool.accountqueue```: Maximum number of non-executable transaction slots permitted per account (default: 16) -- ```txpool.globalqueue```: Maximum number of non-executable transaction slots for all accounts +- ```txpool.globalqueue```: Maximum number of non-executable transaction slots for all accounts (default: 32768) -- ```txpool.lifetime```: Maximum amount of time non-executable transaction are queued \ No newline at end of file +- ```txpool.lifetime```: Maximum amount of time non-executable transaction are queued (default: 3h0m0s) \ No newline at end of file diff --git a/internal/cli/flagset/flagset.go b/internal/cli/flagset/flagset.go index 933fe59060..74249df395 100644 --- a/internal/cli/flagset/flagset.go +++ b/internal/cli/flagset/flagset.go @@ -24,9 +24,10 @@ func NewFlagSet(name string) *Flagset { } type FlagVar struct { - Name string - Usage string - Group string + Name string + Usage string + Group string + Default any } func (f *Flagset) addFlag(fl *FlagVar) { @@ -38,7 +39,11 @@ func (f *Flagset) Help() string { items := []string{} for _, item := range f.flags { - items = append(items, fmt.Sprintf(" -%s\n %s", item.Name, item.Usage)) + if item.Default != nil { + items = append(items, fmt.Sprintf(" -%s\n %s (default: %v)", item.Name, item.Usage, item.Default)) + } else { + items = append(items, fmt.Sprintf(" -%s\n %s", item.Name, item.Usage)) + } } return str + strings.Join(items, "\n\n") @@ -85,7 +90,11 @@ func (f *Flagset) MarkDown() string { } for _, item := range groups[k] { - items = append(items, fmt.Sprintf("- ```%s```: %s", item.Name, item.Usage)) + if item.Default != nil { + items = append(items, fmt.Sprintf("- ```%s```: %s (default: %v)", item.Name, item.Usage, item.Default)) + } else { + items = append(items, fmt.Sprintf("- ```%s```: %s", item.Name, item.Usage)) + } } } @@ -110,27 +119,39 @@ type BoolFlag struct { func (f *Flagset) BoolFlag(b *BoolFlag) { f.addFlag(&FlagVar{ - Name: b.Name, - Usage: b.Usage, - Group: b.Group, + Name: b.Name, + Usage: b.Usage, + Group: b.Group, + Default: b.Default, }) f.set.BoolVar(b.Value, b.Name, b.Default, b.Usage) } type StringFlag struct { - Name string - Usage string - Default string - Value *string - Group string + Name string + Usage string + Default string + Value *string + Group string + HideDefaultFromDoc bool } func (f *Flagset) StringFlag(b *StringFlag) { - f.addFlag(&FlagVar{ - Name: b.Name, - Usage: b.Usage, - Group: b.Group, - }) + if b.Default == "" || b.HideDefaultFromDoc { + f.addFlag(&FlagVar{ + Name: b.Name, + Usage: b.Usage, + Group: b.Group, + Default: nil, + }) + } else { + f.addFlag(&FlagVar{ + Name: b.Name, + Usage: b.Usage, + Group: b.Group, + Default: b.Default, + }) + } f.set.StringVar(b.Value, b.Name, b.Default, b.Usage) } @@ -144,9 +165,10 @@ type IntFlag struct { func (f *Flagset) IntFlag(i *IntFlag) { f.addFlag(&FlagVar{ - Name: i.Name, - Usage: i.Usage, - Group: i.Group, + Name: i.Name, + Usage: i.Usage, + Group: i.Group, + Default: i.Default, }) f.set.IntVar(i.Value, i.Name, i.Default, i.Usage) } @@ -161,18 +183,20 @@ type Uint64Flag struct { func (f *Flagset) Uint64Flag(i *Uint64Flag) { f.addFlag(&FlagVar{ - Name: i.Name, - Usage: i.Usage, - Group: i.Group, + Name: i.Name, + Usage: i.Usage, + Group: i.Group, + Default: fmt.Sprintf("%d", i.Default), }) f.set.Uint64Var(i.Value, i.Name, i.Default, i.Usage) } type BigIntFlag struct { - Name string - Usage string - Value *big.Int - Group string + Name string + Usage string + Value *big.Int + Group string + Default *big.Int } func (b *BigIntFlag) String() string { @@ -204,9 +228,10 @@ func (b *BigIntFlag) Set(value string) error { func (f *Flagset) BigIntFlag(b *BigIntFlag) { f.addFlag(&FlagVar{ - Name: b.Name, - Usage: b.Usage, - Group: b.Group, + Name: b.Name, + Usage: b.Usage, + Group: b.Group, + Default: b.Default, }) f.set.Var(b, b.Name, b.Usage) } @@ -247,11 +272,21 @@ func (i *SliceStringFlag) Set(value string) error { } func (f *Flagset) SliceStringFlag(s *SliceStringFlag) { - f.addFlag(&FlagVar{ - Name: s.Name, - Usage: s.Usage, - Group: s.Group, - }) + if s.Default == nil || len(s.Default) == 0 { + f.addFlag(&FlagVar{ + Name: s.Name, + Usage: s.Usage, + Group: s.Group, + Default: nil, + }) + } else { + f.addFlag(&FlagVar{ + Name: s.Name, + Usage: s.Usage, + Group: s.Group, + Default: strings.Join(s.Default, ","), + }) + } f.set.Var(s, s.Name, s.Usage) } @@ -265,33 +300,39 @@ type DurationFlag struct { func (f *Flagset) DurationFlag(d *DurationFlag) { f.addFlag(&FlagVar{ - Name: d.Name, - Usage: d.Usage, - Group: d.Group, + Name: d.Name, + Usage: d.Usage, + Group: d.Group, + Default: d.Default, }) f.set.DurationVar(d.Value, d.Name, d.Default, "") } type MapStringFlag struct { - Name string - Usage string - Value *map[string]string - Group string + Name string + Usage string + Value *map[string]string + Group string + Default map[string]string } -func (m *MapStringFlag) String() string { - if m.Value == nil { +func formatMapString(m map[string]string) string { + if len(m) == 0 { return "" } ls := []string{} - for k, v := range *m.Value { + for k, v := range m { ls = append(ls, k+"="+v) } return strings.Join(ls, ",") } +func (m *MapStringFlag) String() string { + return formatMapString(*m.Value) +} + func (m *MapStringFlag) Set(value string) error { if m.Value == nil { m.Value = &map[string]string{} @@ -311,11 +352,21 @@ func (m *MapStringFlag) Set(value string) error { } func (f *Flagset) MapStringFlag(m *MapStringFlag) { - f.addFlag(&FlagVar{ - Name: m.Name, - Usage: m.Usage, - Group: m.Group, - }) + if m.Default == nil || len(m.Default) == 0 { + f.addFlag(&FlagVar{ + Name: m.Name, + Usage: m.Usage, + Group: m.Group, + Default: nil, + }) + } else { + f.addFlag(&FlagVar{ + Name: m.Name, + Usage: m.Usage, + Group: m.Group, + Default: formatMapString(m.Default), + }) + } f.set.Var(m, m.Name, m.Usage) } @@ -329,9 +380,10 @@ type Float64Flag struct { func (f *Flagset) Float64Flag(i *Float64Flag) { f.addFlag(&FlagVar{ - Name: i.Name, - Usage: i.Usage, - Group: i.Group, + Name: i.Name, + Usage: i.Usage, + Group: i.Group, + Default: i.Default, }) f.set.Float64Var(i.Value, i.Name, i.Default, "") } diff --git a/internal/cli/server/flags.go b/internal/cli/server/flags.go index a53e1c3e46..c1560d9347 100644 --- a/internal/cli/server/flags.go +++ b/internal/cli/server/flags.go @@ -16,10 +16,11 @@ func (c *Command) Flags() *flagset.Flagset { Default: c.cliConfig.Chain, }) f.StringFlag(&flagset.StringFlag{ - Name: "identity", - Usage: "Name/Identity of the node", - Value: &c.cliConfig.Identity, - Default: c.cliConfig.Identity, + Name: "identity", + Usage: "Name/Identity of the node", + Value: &c.cliConfig.Identity, + Default: c.cliConfig.Identity, + HideDefaultFromDoc: true, }) f.StringFlag(&flagset.StringFlag{ Name: "log-level", @@ -28,10 +29,11 @@ func (c *Command) Flags() *flagset.Flagset { Default: c.cliConfig.LogLevel, }) f.StringFlag(&flagset.StringFlag{ - Name: "datadir", - Usage: "Path of the data directory to store information", - Value: &c.cliConfig.DataDir, - Default: c.cliConfig.DataDir, + Name: "datadir", + Usage: "Path of the data directory to store information", + Value: &c.cliConfig.DataDir, + Default: c.cliConfig.DataDir, + HideDefaultFromDoc: true, }) f.StringFlag(&flagset.StringFlag{ Name: "datadir.ancient", @@ -62,19 +64,20 @@ func (c *Command) Flags() *flagset.Flagset { Default: c.cliConfig.GcMode, }) f.MapStringFlag(&flagset.MapStringFlag{ - Name: "eth.requiredblocks", - Usage: "Comma separated block number-to-hash mappings to require for peering (=)", - Value: &c.cliConfig.RequiredBlocks, + Name: "eth.requiredblocks", + Usage: "Comma separated block number-to-hash mappings to require for peering (=)", + Value: &c.cliConfig.RequiredBlocks, + Default: c.cliConfig.RequiredBlocks, }) f.BoolFlag(&flagset.BoolFlag{ Name: "snapshot", - Usage: `Enables the snapshot-database mode (default = true)`, + Usage: `Enables the snapshot-database mode`, Value: &c.cliConfig.Snapshot, Default: c.cliConfig.Snapshot, }) f.BoolFlag(&flagset.BoolFlag{ Name: "bor.logs", - Usage: `Enables bor log retrieval (default = false)`, + Usage: `Enables bor log retrieval`, Value: &c.cliConfig.BorLogs, Default: c.cliConfig.BorLogs, }) @@ -202,10 +205,11 @@ func (c *Command) Flags() *flagset.Flagset { Group: "Sealer", }) f.BigIntFlag(&flagset.BigIntFlag{ - Name: "miner.gasprice", - Usage: "Minimum gas price for mining a transaction", - Value: c.cliConfig.Sealer.GasPrice, - Group: "Sealer", + Name: "miner.gasprice", + Usage: "Minimum gas price for mining a transaction", + Value: c.cliConfig.Sealer.GasPrice, + Group: "Sealer", + Default: c.cliConfig.Sealer.GasPrice, }) // ethstats @@ -230,20 +234,22 @@ func (c *Command) Flags() *flagset.Flagset { Default: c.cliConfig.Gpo.Percentile, }) f.BigIntFlag(&flagset.BigIntFlag{ - Name: "gpo.maxprice", - Usage: "Maximum gas price will be recommended by gpo", - Value: c.cliConfig.Gpo.MaxPrice, + Name: "gpo.maxprice", + Usage: "Maximum gas price will be recommended by gpo", + Value: c.cliConfig.Gpo.MaxPrice, + Default: c.cliConfig.Gpo.MaxPrice, }) f.BigIntFlag(&flagset.BigIntFlag{ - Name: "gpo.ignoreprice", - Usage: "Gas price below which gpo will ignore transactions", - Value: c.cliConfig.Gpo.IgnorePrice, + Name: "gpo.ignoreprice", + Usage: "Gas price below which gpo will ignore transactions", + Value: c.cliConfig.Gpo.IgnorePrice, + Default: c.cliConfig.Gpo.IgnorePrice, }) // cache options f.Uint64Flag(&flagset.Uint64Flag{ Name: "cache", - Usage: "Megabytes of memory allocated to internal caching (default = 4096 mainnet full node)", + Usage: "Megabytes of memory allocated to internal caching", Value: &c.cliConfig.Cache.Cache, Default: c.cliConfig.Cache.Cache, Group: "Cache", @@ -257,7 +263,7 @@ func (c *Command) Flags() *flagset.Flagset { }) f.Uint64Flag(&flagset.Uint64Flag{ Name: "cache.trie", - Usage: "Percentage of cache memory allowance to use for trie caching (default = 15% full mode, 30% archive mode)", + Usage: "Percentage of cache memory allowance to use for trie caching", Value: &c.cliConfig.Cache.PercTrie, Default: c.cliConfig.Cache.PercTrie, Group: "Cache", @@ -278,14 +284,14 @@ func (c *Command) Flags() *flagset.Flagset { }) f.Uint64Flag(&flagset.Uint64Flag{ Name: "cache.gc", - Usage: "Percentage of cache memory allowance to use for trie pruning (default = 25% full mode, 0% archive mode)", + Usage: "Percentage of cache memory allowance to use for trie pruning", Value: &c.cliConfig.Cache.PercGc, Default: c.cliConfig.Cache.PercGc, Group: "Cache", }) f.Uint64Flag(&flagset.Uint64Flag{ Name: "cache.snapshot", - Usage: "Percentage of cache memory allowance to use for snapshot caching (default = 10% full mode, 20% archive mode)", + Usage: "Percentage of cache memory allowance to use for snapshot caching", Value: &c.cliConfig.Cache.PercSnapshot, Default: c.cliConfig.Cache.PercSnapshot, Group: "Cache", @@ -306,7 +312,7 @@ func (c *Command) Flags() *flagset.Flagset { }) f.Uint64Flag(&flagset.Uint64Flag{ Name: "txlookuplimit", - Usage: "Number of recent blocks to maintain transactions index for (default = about 56 days, 0 = entire chain)", + Usage: "Number of recent blocks to maintain transactions index for", Value: &c.cliConfig.Cache.TxLookupLimit, Default: c.cliConfig.Cache.TxLookupLimit, Group: "Cache", @@ -569,10 +575,11 @@ func (c *Command) Flags() *flagset.Flagset { Group: "Telemetry", }) f.MapStringFlag(&flagset.MapStringFlag{ - Name: "metrics.influxdb.tags", - Usage: "Comma-separated InfluxDB tags (key/values) attached to all measurements", - Value: &c.cliConfig.Telemetry.InfluxDB.Tags, - Group: "Telemetry", + Name: "metrics.influxdb.tags", + Usage: "Comma-separated InfluxDB tags (key/values) attached to all measurements", + Value: &c.cliConfig.Telemetry.InfluxDB.Tags, + Group: "Telemetry", + Default: c.cliConfig.Telemetry.InfluxDB.Tags, }) f.StringFlag(&flagset.StringFlag{ Name: "metrics.prometheus-addr", From ddf1a4db8ca805705ecc4b20dc063b3ec5e9e875 Mon Sep 17 00:00:00 2001 From: Jerry Date: Tue, 13 Dec 2022 23:46:43 -0800 Subject: [PATCH 29/56] Add a summary of new CLI in docs --- docs/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/README.md b/docs/README.md index 4622bb8f00..2f75b218e4 100644 --- a/docs/README.md +++ b/docs/README.md @@ -1,7 +1,7 @@ # Documentation -- [The new command line interface](./cli) +[The new command line interface (CLI)](./cli) in this version of Bor aims to give users more control over the codebase when interacting with and starting a node. We have made every effort to keep most of the flags similar to the old CLI, except for a few notable changes. One major change is the use of the --config flag, which previously represented fields without available flags. It now represents all flags available to the user, and will overwrite any other flags if provided. As a node operator, you still have the flexibility to modify flags as needed. Please note that this change does not affect the internal functionality of the node, and it remains compatible with Geth and the Ethereum Virtual Machine (EVM). ## Additional notes From 2e45f3badc64bfb700ec35b466bebe4c30fc52d4 Mon Sep 17 00:00:00 2001 From: Daniel Jones Date: Wed, 14 Dec 2022 11:06:17 -0600 Subject: [PATCH 30/56] Updating packager as binutils changed version so that apt-get installs current versions --- .github/workflows/packager.yml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.github/workflows/packager.yml b/.github/workflows/packager.yml index 5c59b1751a..7485aca976 100644 --- a/.github/workflows/packager.yml +++ b/.github/workflows/packager.yml @@ -377,6 +377,9 @@ jobs: - name: Removing systemd file run: rm -rf packaging/deb/bor/lib/systemd/system/bor.service + - name: Updating the apt-get + run: sudo apt-get update -y + - name: Adding requirements for cross compile run: sudo apt-get install g++-aarch64-linux-gnu gcc-aarch64-linux-gnu @@ -730,4 +733,4 @@ jobs: prerelease: true files: | packaging/deb/bor**.deb - binary/bo** \ No newline at end of file + binary/bo** From b7ed9de45b4339e09ce50fb8990623059766bb6f Mon Sep 17 00:00:00 2001 From: Jerry Date: Wed, 14 Dec 2022 14:18:56 -0800 Subject: [PATCH 31/56] Add state pruning to new CLI --- docs/cli/README.md | 4 + docs/cli/account_import.md | 2 +- docs/cli/account_list.md | 2 +- docs/cli/account_new.md | 2 +- docs/cli/server.md | 4 + docs/cli/snapshot.md | 5 + docs/cli/snapshot_prune-state.md | 21 ++++ internal/cli/command.go | 12 +- internal/cli/removedb.go | 7 +- internal/cli/server/config.go | 4 +- internal/cli/snapshot.go | 183 +++++++++++++++++++++++++++++++ 11 files changed, 237 insertions(+), 9 deletions(-) create mode 100644 docs/cli/snapshot.md create mode 100644 docs/cli/snapshot_prune-state.md create mode 100644 internal/cli/snapshot.go diff --git a/docs/cli/README.md b/docs/cli/README.md index bf37d6ef56..d52a4fd836 100644 --- a/docs/cli/README.md +++ b/docs/cli/README.md @@ -44,6 +44,10 @@ - [```server```](./server.md) +- [```snapshot```](./snapshot.md) + +- [```snapshot prune-state```](./snapshot_prune-state.md) + - [```status```](./status.md) - [```version```](./version.md) \ No newline at end of file diff --git a/docs/cli/account_import.md b/docs/cli/account_import.md index d7b02195bc..697d951fd3 100644 --- a/docs/cli/account_import.md +++ b/docs/cli/account_import.md @@ -6,4 +6,4 @@ The ```account import``` command imports an account in Json format to the Bor da - ```datadir```: Path of the data directory to store information -- ```keystore```: Path of the data directory to store information \ No newline at end of file +- ```keystore```: Path of the data directory to store keys \ No newline at end of file diff --git a/docs/cli/account_list.md b/docs/cli/account_list.md index 61ebf9e776..a11b4a05e7 100644 --- a/docs/cli/account_list.md +++ b/docs/cli/account_list.md @@ -6,4 +6,4 @@ The `account list` command lists all the accounts in the Bor data directory. - ```datadir```: Path of the data directory to store information -- ```keystore```: Path of the data directory to store information \ No newline at end of file +- ```keystore```: Path of the data directory to store keys \ No newline at end of file diff --git a/docs/cli/account_new.md b/docs/cli/account_new.md index dd62061ba0..bd47ecb371 100644 --- a/docs/cli/account_new.md +++ b/docs/cli/account_new.md @@ -6,4 +6,4 @@ The `account new` command creates a new local account file on the Bor data direc - ```datadir```: Path of the data directory to store information -- ```keystore```: Path of the data directory to store information \ No newline at end of file +- ```keystore```: Path of the data directory to store keys \ No newline at end of file diff --git a/docs/cli/server.md b/docs/cli/server.md index 777856de34..5bc0ff1024 100644 --- a/docs/cli/server.md +++ b/docs/cli/server.md @@ -32,6 +32,8 @@ The ```bor server``` command runs the Bor client. - ```bor.withoutheimdall```: Run without Heimdall service (for testing purpose) (default: false) +- ```bor.heimdallgRPC```: Address of Heimdall gRPC service + - ```ethstats```: Reporting URL of a ethstats service (nodename:secret@host:port) - ```gpo.blocks```: Number of recent blocks to check for gas prices (default: 20) @@ -80,6 +82,8 @@ The ```bor server``` command runs the Bor client. - ```cache.preimages```: Enable recording the SHA3/keccak preimages of trie keys (default: false) +- ```cache.triesinmemory```: Number of block states (tries) to keep in memory (default = 128) (default: 128) + - ```txlookuplimit```: Number of recent blocks to maintain transactions index for (default: 2350000) ### JsonRPC Options diff --git a/docs/cli/snapshot.md b/docs/cli/snapshot.md new file mode 100644 index 0000000000..8a7a6b96e5 --- /dev/null +++ b/docs/cli/snapshot.md @@ -0,0 +1,5 @@ +# snapshot + +The ```snapshot``` command groups snapshot related actions: + +- [```snapshot prune-state```](./snapshot_prune-state.md): Joins the local client to another remote peer. \ No newline at end of file diff --git a/docs/cli/snapshot_prune-state.md b/docs/cli/snapshot_prune-state.md new file mode 100644 index 0000000000..73742faeac --- /dev/null +++ b/docs/cli/snapshot_prune-state.md @@ -0,0 +1,21 @@ +# Prune state + +The ```bor snapshot prune-state``` command will prune historical state data with the help of the state snapshot. All trie nodes and contract codes that do not belong to the specified version state will be deleted from the database. After pruning, only two version states are available: genesis and the specific one. + +## Options + +- ```datadir```: Path of the data directory to store information + +- ```keystore```: Path of the data directory to store keys + +- ```datadir.ancient```: Path of the ancient data directory to store information + +- ```bloomfilter.size```: Size of the bloom filter (default: 2048) + +### Cache Options + +- ```cache```: Megabytes of memory allocated to internal caching (default: 1024) + +- ```cache.trie```: Percentage of cache memory allowance to use for trie caching (default: 25) + +- ```cache.trie.journal```: Path of the trie journal directory to store information (default: triecache) \ No newline at end of file diff --git a/internal/cli/command.go b/internal/cli/command.go index 93dca4cb3e..95f7776df6 100644 --- a/internal/cli/command.go +++ b/internal/cli/command.go @@ -189,6 +189,16 @@ func Commands() map[string]MarkDownCommandFactory { Meta2: meta2, }, nil }, + "snapshot": func() (MarkDownCommand, error) { + return &SnapshotCommand{ + UI: ui, + }, nil + }, + "snapshot prune-state": func() (MarkDownCommand, error) { + return &PruneStateCommand{ + Meta: meta, + }, nil + }, } } @@ -248,7 +258,7 @@ func (m *Meta) NewFlagSet(n string) *flagset.Flagset { f.StringFlag(&flagset.StringFlag{ Name: "keystore", Value: &m.keyStoreDir, - Usage: "Path of the data directory to store information", + Usage: "Path of the data directory to store keys", }) return f diff --git a/internal/cli/removedb.go b/internal/cli/removedb.go index 4a604086ed..224dae95d5 100644 --- a/internal/cli/removedb.go +++ b/internal/cli/removedb.go @@ -24,9 +24,10 @@ type RemoveDBCommand struct { } const ( - chaindataPath string = "chaindata" - ancientPath string = "ancient" - lightchaindataPath string = "lightchaindata" + chaindataPath string = "chaindata" + ancientPath string = "ancient" + trieCacheJournalPath string = "triecache" + lightchaindataPath string = "lightchaindata" ) // MarkDown implements cli.MarkDown interface diff --git a/internal/cli/server/config.go b/internal/cli/server/config.go index 1ab0a83712..35d7e19359 100644 --- a/internal/cli/server/config.go +++ b/internal/cli/server/config.go @@ -686,7 +686,7 @@ func (c *Config) loadChain() error { //nolint:gocognit func (c *Config) buildEth(stack *node.Node, accountManager *accounts.Manager) (*ethconfig.Config, error) { - dbHandles, err := makeDatabaseHandles() + dbHandles, err := MakeDatabaseHandles() if err != nil { return nil, err } @@ -1075,7 +1075,7 @@ func (c *Config) Merge(cc ...*Config) error { return nil } -func makeDatabaseHandles() (int, error) { +func MakeDatabaseHandles() (int, error) { limit, err := fdlimit.Maximum() if err != nil { return -1, err diff --git a/internal/cli/snapshot.go b/internal/cli/snapshot.go new file mode 100644 index 0000000000..b48712d35f --- /dev/null +++ b/internal/cli/snapshot.go @@ -0,0 +1,183 @@ +// Snapshot related commands + +package cli + +import ( + "strings" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/state/pruner" + "github.com/ethereum/go-ethereum/internal/cli/flagset" + "github.com/ethereum/go-ethereum/internal/cli/server" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/node" + + "github.com/mitchellh/cli" +) + +// SnapshotCommand is the command to group the snapshot commands +type SnapshotCommand struct { + UI cli.Ui +} + +// MarkDown implements cli.MarkDown interface +func (a *SnapshotCommand) MarkDown() string { + items := []string{ + "# snapshot", + "The ```snapshot``` command groups snapshot related actions:", + "- [```snapshot prune-state```](./snapshot_prune-state.md): Joins the local client to another remote peer.", + } + + return strings.Join(items, "\n\n") +} + +// Help implements the cli.Command interface +func (c *SnapshotCommand) Help() string { + return `Usage: bor snapshot + + This command groups snapshot related actions. + + Prune the state trie: + + $ bor snapshot prune-state` +} + +// Synopsis implements the cli.Command interface +func (c *SnapshotCommand) Synopsis() string { + return "Snapshot related commands" +} + +// Run implements the cli.Command interface +func (c *SnapshotCommand) Run(args []string) int { + return cli.RunResultHelp +} + +type PruneStateCommand struct { + *Meta + + datadirAncient string + cache uint64 + cacheTrie uint64 + cacheTrieJournal string + bloomfilterSize uint64 +} + +// MarkDown implements cli.MarkDown interface +func (c *PruneStateCommand) MarkDown() string { + items := []string{ + "# Prune state", + "The ```bor snapshot prune-state``` command will prune historical state data with the help of the state snapshot. All trie nodes and contract codes that do not belong to the specified version state will be deleted from the database. After pruning, only two version states are available: genesis and the specific one.", + c.Flags().MarkDown(), + } + + return strings.Join(items, "\n\n") +} + +// Help implements the cli.Command interface +func (c *PruneStateCommand) Help() string { + return `Usage: bor snapshot prune-state + + This command will prune state databases at the given datadir location` + c.Flags().Help() +} + +// Synopsis implements the cli.Command interface +func (c *PruneStateCommand) Synopsis() string { + return "Prune state databases" +} + +// Flags: datadir, datadir.ancient, cache.trie.journal, bloomfilter.size +func (c *PruneStateCommand) Flags() *flagset.Flagset { + flags := c.NewFlagSet("prune-state") + + flags.StringFlag(&flagset.StringFlag{ + Name: "datadir.ancient", + Value: &c.datadirAncient, + Usage: "Path of the ancient data directory to store information", + Default: "", + }) + + flags.Uint64Flag(&flagset.Uint64Flag{ + Name: "cache", + Usage: "Megabytes of memory allocated to internal caching", + Value: &c.cache, + Default: 1024.0, + Group: "Cache", + }) + + flags.Uint64Flag(&flagset.Uint64Flag{ + Name: "cache.trie", + Usage: "Percentage of cache memory allowance to use for trie caching", + Value: &c.cacheTrie, + Default: 25, + Group: "Cache", + }) + + flags.StringFlag(&flagset.StringFlag{ + Name: "cache.trie.journal", + Value: &c.cacheTrieJournal, + Usage: "Path of the trie journal directory to store information", + Default: trieCacheJournalPath, + Group: "Cache", + }) + + flags.Uint64Flag(&flagset.Uint64Flag{ + Name: "bloomfilter.size", + Value: &c.bloomfilterSize, + Usage: "Size of the bloom filter", + Default: 2048, + }) + + return flags +} + +// Run implements the cli.Command interface +func (c *PruneStateCommand) Run(args []string) int { + flags := c.Flags() + + if err := flags.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + datadir := c.dataDir + if datadir == "" { + c.UI.Error("datadir is required") + return 1 + } + + // Create the node + node, err := node.New(&node.Config{ + DataDir: datadir, + }) + + if err != nil { + c.UI.Error(err.Error()) + return 1 + } + + dbHandles, err := server.MakeDatabaseHandles() + if err != nil { + c.UI.Error(err.Error()) + return 1 + } + + chaindb, err := node.OpenDatabaseWithFreezer(chaindataPath, int(c.cache), dbHandles, c.datadirAncient, "", false) + + if err != nil { + c.UI.Error(err.Error()) + return 1 + } + + pruner, err := pruner.NewPruner(chaindb, node.ResolvePath(""), node.ResolvePath(c.cacheTrieJournal), c.bloomfilterSize) + if err != nil { + log.Error("Failed to open snapshot tree", "err", err) + return 1 + } + + if err = pruner.Prune(common.Hash{}); err != nil { + log.Error("Failed to prune state", "err", err) + return 1 + } + + return 0 +} From 9b2407a58f8b94f9101869f850171731144b014f Mon Sep 17 00:00:00 2001 From: Jerry Date: Wed, 14 Dec 2022 21:48:54 -0800 Subject: [PATCH 32/56] Minor wording fix in prune state description --- docs/cli/snapshot.md | 2 +- internal/cli/snapshot.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/cli/snapshot.md b/docs/cli/snapshot.md index 8a7a6b96e5..376220749b 100644 --- a/docs/cli/snapshot.md +++ b/docs/cli/snapshot.md @@ -2,4 +2,4 @@ The ```snapshot``` command groups snapshot related actions: -- [```snapshot prune-state```](./snapshot_prune-state.md): Joins the local client to another remote peer. \ No newline at end of file +- [```snapshot prune-state```](./snapshot_prune-state.md): Prune state databases at the given datadir location. \ No newline at end of file diff --git a/internal/cli/snapshot.go b/internal/cli/snapshot.go index b48712d35f..3c8e4ec97d 100644 --- a/internal/cli/snapshot.go +++ b/internal/cli/snapshot.go @@ -25,7 +25,7 @@ func (a *SnapshotCommand) MarkDown() string { items := []string{ "# snapshot", "The ```snapshot``` command groups snapshot related actions:", - "- [```snapshot prune-state```](./snapshot_prune-state.md): Joins the local client to another remote peer.", + "- [```snapshot prune-state```](./snapshot_prune-state.md): Prune state databases at the given datadir location.", } return strings.Join(items, "\n\n") From b7b15456fb09d00004f12c5a386ba3943dc4b27b Mon Sep 17 00:00:00 2001 From: Daniel Jones Date: Sun, 18 Dec 2022 17:27:49 -0600 Subject: [PATCH 33/56] Bumping control file versions --- packaging/templates/package_scripts/control | 2 +- packaging/templates/package_scripts/control.arm64 | 2 +- packaging/templates/package_scripts/control.profile.amd64 | 2 +- packaging/templates/package_scripts/control.profile.arm64 | 2 +- packaging/templates/package_scripts/control.validator | 2 +- packaging/templates/package_scripts/control.validator.arm64 | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/packaging/templates/package_scripts/control b/packaging/templates/package_scripts/control index faa6df2712..03f040dd63 100644 --- a/packaging/templates/package_scripts/control +++ b/packaging/templates/package_scripts/control @@ -1,5 +1,5 @@ Source: bor -Version: 0.3.1 +Version: 0.3.2 Section: develop Priority: standard Maintainer: Polygon diff --git a/packaging/templates/package_scripts/control.arm64 b/packaging/templates/package_scripts/control.arm64 index e0590d7c6a..4070f5b9e9 100644 --- a/packaging/templates/package_scripts/control.arm64 +++ b/packaging/templates/package_scripts/control.arm64 @@ -1,5 +1,5 @@ Source: bor -Version: 0.3.1 +Version: 0.3.2 Section: develop Priority: standard Maintainer: Polygon diff --git a/packaging/templates/package_scripts/control.profile.amd64 b/packaging/templates/package_scripts/control.profile.amd64 index 0be61b7427..fa39632654 100644 --- a/packaging/templates/package_scripts/control.profile.amd64 +++ b/packaging/templates/package_scripts/control.profile.amd64 @@ -1,5 +1,5 @@ Source: bor-profile -Version: 0.3.1 +Version: 0.3.2 Section: develop Priority: standard Maintainer: Polygon diff --git a/packaging/templates/package_scripts/control.profile.arm64 b/packaging/templates/package_scripts/control.profile.arm64 index 29d8328b8a..5bfc993dae 100644 --- a/packaging/templates/package_scripts/control.profile.arm64 +++ b/packaging/templates/package_scripts/control.profile.arm64 @@ -1,5 +1,5 @@ Source: bor-profile -Version: 0.3.1 +Version: 0.3.2 Section: develop Priority: standard Maintainer: Polygon diff --git a/packaging/templates/package_scripts/control.validator b/packaging/templates/package_scripts/control.validator index 51b4d86ef5..efb2ee1593 100644 --- a/packaging/templates/package_scripts/control.validator +++ b/packaging/templates/package_scripts/control.validator @@ -1,5 +1,5 @@ Source: bor-profile -Version: 0.3.1 +Version: 0.3.2 Section: develop Priority: standard Maintainer: Polygon diff --git a/packaging/templates/package_scripts/control.validator.arm64 b/packaging/templates/package_scripts/control.validator.arm64 index 6813fba631..7173b2c30e 100644 --- a/packaging/templates/package_scripts/control.validator.arm64 +++ b/packaging/templates/package_scripts/control.validator.arm64 @@ -1,5 +1,5 @@ Source: bor-profile -Version: 0.3.1 +Version: 0.3.2 Section: develop Priority: standard Maintainer: Polygon From 59bdbef42a2f6cd7fa8d849879be8f791c839764 Mon Sep 17 00:00:00 2001 From: Jerry Date: Wed, 4 Jan 2023 12:11:49 -0800 Subject: [PATCH 34/56] Mainnet Delhi fork --- builder/files/genesis-mainnet-v1.json | 7 +++++-- internal/cli/server/chains/mainnet.go | 7 +++++-- params/config.go | 7 +++++-- 3 files changed, 15 insertions(+), 6 deletions(-) diff --git a/builder/files/genesis-mainnet-v1.json b/builder/files/genesis-mainnet-v1.json index d3f0d02206..ca443529dc 100644 --- a/builder/files/genesis-mainnet-v1.json +++ b/builder/files/genesis-mainnet-v1.json @@ -15,14 +15,17 @@ "londonBlock": 23850000, "bor": { "jaipurBlock": 23850000, + "delhiBlock": 38174376, "period": { "0": 2 }, "producerDelay": { - "0": 6 + "0": 6, + "38174376": 4 }, "sprint": { - "0": 64 + "0": 64, + "38174376": 16 }, "backupMultiplier": { "0": 2 diff --git a/internal/cli/server/chains/mainnet.go b/internal/cli/server/chains/mainnet.go index 7aee9cd606..b52883c0b3 100644 --- a/internal/cli/server/chains/mainnet.go +++ b/internal/cli/server/chains/mainnet.go @@ -30,14 +30,17 @@ var mainnetBor = &Chain{ LondonBlock: big.NewInt(23850000), Bor: ¶ms.BorConfig{ JaipurBlock: big.NewInt(23850000), + DelhiBlock: big.NewInt(38174376), Period: map[string]uint64{ "0": 2, }, ProducerDelay: map[string]uint64{ - "0": 6, + "0": 6, + "38174376": 4, }, Sprint: map[string]uint64{ - "0": 64, + "0": 64, + "38174376": 16, }, BackupMultiplier: map[string]uint64{ "0": 2, diff --git a/params/config.go b/params/config.go index d97d6957fa..c645b2dc76 100644 --- a/params/config.go +++ b/params/config.go @@ -404,14 +404,17 @@ var ( LondonBlock: big.NewInt(23850000), Bor: &BorConfig{ JaipurBlock: big.NewInt(23850000), + DelhiBlock: big.NewInt(38174376), Period: map[string]uint64{ "0": 2, }, ProducerDelay: map[string]uint64{ - "0": 6, + "0": 6, + "38174376": 4, }, Sprint: map[string]uint64{ - "0": 64, + "0": 64, + "38174376": 16, }, BackupMultiplier: map[string]uint64{ "0": 2, From c12e8f2aa88350533ec586a0962179ba626b9434 Mon Sep 17 00:00:00 2001 From: Jerry Date: Wed, 4 Jan 2023 12:12:19 -0800 Subject: [PATCH 35/56] Set version to stable --- params/version.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/params/version.go b/params/version.go index 24f5cf4966..a415e0a9b9 100644 --- a/params/version.go +++ b/params/version.go @@ -21,10 +21,10 @@ import ( ) const ( - VersionMajor = 0 // Major version component of the current release - VersionMinor = 3 // Minor version component of the current release - VersionPatch = 2 // Patch version component of the current release - VersionMeta = "beta" // Version metadata to append to the version string + VersionMajor = 0 // Major version component of the current release + VersionMinor = 3 // Minor version component of the current release + VersionPatch = 2 // Patch version component of the current release + VersionMeta = "stable" // Version metadata to append to the version string ) // Version holds the textual version string. From db7eb29d886da0ead09a45a59114e5ae432d6eeb Mon Sep 17 00:00:00 2001 From: Arpit Temani Date: Thu, 5 Jan 2023 10:41:27 +0530 Subject: [PATCH 36/56] change delhi hardfork block number --- builder/files/genesis-mainnet-v1.json | 6 +++--- internal/cli/server/chains/mainnet.go | 6 +++--- params/config.go | 6 +++--- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/builder/files/genesis-mainnet-v1.json b/builder/files/genesis-mainnet-v1.json index ca443529dc..b01313bd57 100644 --- a/builder/files/genesis-mainnet-v1.json +++ b/builder/files/genesis-mainnet-v1.json @@ -15,17 +15,17 @@ "londonBlock": 23850000, "bor": { "jaipurBlock": 23850000, - "delhiBlock": 38174376, + "delhiBlock": 38189056, "period": { "0": 2 }, "producerDelay": { "0": 6, - "38174376": 4 + "38189056": 4 }, "sprint": { "0": 64, - "38174376": 16 + "38189056": 16 }, "backupMultiplier": { "0": 2 diff --git a/internal/cli/server/chains/mainnet.go b/internal/cli/server/chains/mainnet.go index b52883c0b3..b2570e9b2f 100644 --- a/internal/cli/server/chains/mainnet.go +++ b/internal/cli/server/chains/mainnet.go @@ -30,17 +30,17 @@ var mainnetBor = &Chain{ LondonBlock: big.NewInt(23850000), Bor: ¶ms.BorConfig{ JaipurBlock: big.NewInt(23850000), - DelhiBlock: big.NewInt(38174376), + DelhiBlock: big.NewInt(38189056), Period: map[string]uint64{ "0": 2, }, ProducerDelay: map[string]uint64{ "0": 6, - "38174376": 4, + "38189056": 4, }, Sprint: map[string]uint64{ "0": 64, - "38174376": 16, + "38189056": 16, }, BackupMultiplier: map[string]uint64{ "0": 2, diff --git a/params/config.go b/params/config.go index c645b2dc76..94729224bb 100644 --- a/params/config.go +++ b/params/config.go @@ -404,17 +404,17 @@ var ( LondonBlock: big.NewInt(23850000), Bor: &BorConfig{ JaipurBlock: big.NewInt(23850000), - DelhiBlock: big.NewInt(38174376), + DelhiBlock: big.NewInt(38189056), Period: map[string]uint64{ "0": 2, }, ProducerDelay: map[string]uint64{ "0": 6, - "38174376": 4, + "38189056": 4, }, Sprint: map[string]uint64{ "0": 64, - "38174376": 16, + "38189056": 16, }, BackupMultiplier: map[string]uint64{ "0": 2, From b480db16e87eb86ae8c0ada538b288bec072d3b2 Mon Sep 17 00:00:00 2001 From: Manav Darji Date: Sat, 7 Jan 2023 11:58:03 +0530 Subject: [PATCH 37/56] handle future chain import and skip peer drop (#650) * handle future chain import and skip peer drop * add block import metric * params: bump version to v0.3.3-stable --- core/blockchain.go | 6 +++++ core/forkchoice.go | 4 +--- core/forkchoice_test.go | 30 ++++++++++++------------ eth/downloader/downloader.go | 11 +++++---- eth/downloader/downloader_test.go | 4 ++-- eth/downloader/whitelist/service.go | 15 ++++++------ eth/downloader/whitelist/service_test.go | 18 +++++++++----- interfaces.go | 2 +- params/version.go | 2 +- 9 files changed, 53 insertions(+), 39 deletions(-) diff --git a/core/blockchain.go b/core/blockchain.go index 8103e4a05e..74fd4bfeda 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -68,6 +68,7 @@ var ( snapshotStorageReadTimer = metrics.NewRegisteredTimer("chain/snapshot/storage/reads", nil) snapshotCommitTimer = metrics.NewRegisteredTimer("chain/snapshot/commits", nil) + blockImportTimer = metrics.NewRegisteredMeter("chain/imports", nil) blockInsertTimer = metrics.NewRegisteredTimer("chain/inserts", nil) blockValidationTimer = metrics.NewRegisteredTimer("chain/validation", nil) blockExecutionTimer = metrics.NewRegisteredTimer("chain/execution", nil) @@ -1518,6 +1519,11 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals, setHead bool) it := newInsertIterator(chain, results, bc.validator) block, err := it.next() + // Update the block import meter; it will just record chains we've received + // from other peers. (Note that the actual chain which gets imported would be + // quite low). + blockImportTimer.Mark(int64(len(headers))) + // Check the validity of incoming chain isValid, err1 := bc.forker.ValidateReorg(bc.CurrentBlock().Header(), headers) if err1 != nil { diff --git a/core/forkchoice.go b/core/forkchoice.go index 018afdfac9..7dd1a86307 100644 --- a/core/forkchoice.go +++ b/core/forkchoice.go @@ -114,9 +114,7 @@ func (f *ForkChoice) ReorgNeeded(current *types.Header, header *types.Header) (b func (f *ForkChoice) ValidateReorg(current *types.Header, chain []*types.Header) (bool, error) { // Call the bor chain validator service if f.validator != nil { - if isValid := f.validator.IsValidChain(current, chain); !isValid { - return false, nil - } + return f.validator.IsValidChain(current, chain) } return true, nil diff --git a/core/forkchoice_test.go b/core/forkchoice_test.go index 2e7b40d8ff..2493d4701f 100644 --- a/core/forkchoice_test.go +++ b/core/forkchoice_test.go @@ -13,7 +13,7 @@ import ( // chainValidatorFake is a mock for the chain validator service type chainValidatorFake struct { - validate func(currentHeader *types.Header, chain []*types.Header) bool + validate func(currentHeader *types.Header, chain []*types.Header) (bool, error) } // chainReaderFake is a mock for the chain reader service @@ -21,7 +21,7 @@ type chainReaderFake struct { getTd func(hash common.Hash, number uint64) *big.Int } -func newChainValidatorFake(validate func(currentHeader *types.Header, chain []*types.Header) bool) *chainValidatorFake { +func newChainValidatorFake(validate func(currentHeader *types.Header, chain []*types.Header) (bool, error)) *chainValidatorFake { return &chainValidatorFake{validate: validate} } @@ -46,18 +46,18 @@ func TestPastChainInsert(t *testing.T) { getTd := func(hash common.Hash, number uint64) *big.Int { return big.NewInt(int64(number)) } - validate := func(currentHeader *types.Header, chain []*types.Header) bool { + validate := func(currentHeader *types.Header, chain []*types.Header) (bool, error) { // Put all explicit conditions here // If canonical chain is empty and we're importing a chain of 64 blocks if currentHeader.Number.Uint64() == uint64(0) && len(chain) == 64 { - return true + return true, nil } // If canonical chain is of len 64 and we're importing a past chain from 54-64, then accept it if currentHeader.Number.Uint64() == uint64(64) && chain[0].Number.Uint64() == 55 && len(chain) == 10 { - return true + return true, nil } - return false + return false, nil } mockChainReader := newChainReaderFake(getTd) mockChainValidator := newChainValidatorFake(validate) @@ -116,18 +116,18 @@ func TestFutureChainInsert(t *testing.T) { getTd := func(hash common.Hash, number uint64) *big.Int { return big.NewInt(int64(number)) } - validate := func(currentHeader *types.Header, chain []*types.Header) bool { + validate := func(currentHeader *types.Header, chain []*types.Header) (bool, error) { // Put all explicit conditions here // If canonical chain is empty and we're importing a chain of 64 blocks if currentHeader.Number.Uint64() == uint64(0) && len(chain) == 64 { - return true + return true, nil } // If length of future chains > some value, they should not be accepted if currentHeader.Number.Uint64() == uint64(64) && len(chain) <= 10 { - return true + return true, nil } - return false + return false, nil } mockChainReader := newChainReaderFake(getTd) mockChainValidator := newChainValidatorFake(validate) @@ -174,18 +174,18 @@ func TestOverlappingChainInsert(t *testing.T) { getTd := func(hash common.Hash, number uint64) *big.Int { return big.NewInt(int64(number)) } - validate := func(currentHeader *types.Header, chain []*types.Header) bool { + validate := func(currentHeader *types.Header, chain []*types.Header) (bool, error) { // Put all explicit conditions here // If canonical chain is empty and we're importing a chain of 64 blocks if currentHeader.Number.Uint64() == uint64(0) && len(chain) == 64 { - return true + return true, nil } // If length of chain is > some fixed value then don't accept it if currentHeader.Number.Uint64() == uint64(64) && len(chain) <= 20 { - return true + return true, nil } - return false + return false, nil } mockChainReader := newChainReaderFake(getTd) mockChainValidator := newChainValidatorFake(validate) @@ -227,7 +227,7 @@ func (c *chainReaderFake) GetTd(hash common.Hash, number uint64) *big.Int { func (w *chainValidatorFake) IsValidPeer(remoteHeader *types.Header, fetchHeadersByNumber func(number uint64, amount int, skip int, reverse bool) ([]*types.Header, []common.Hash, error)) (bool, error) { return true, nil } -func (w *chainValidatorFake) IsValidChain(current *types.Header, headers []*types.Header) bool { +func (w *chainValidatorFake) IsValidChain(current *types.Header, headers []*types.Header) (bool, error) { return w.validate(current, headers) } func (w *chainValidatorFake) ProcessCheckpoint(endBlockNum uint64, endBlockHash common.Hash) {} diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index f92bc652a6..135defc0b9 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -357,10 +357,6 @@ func (d *Downloader) LegacySync(id string, head common.Hash, td, ttd *big.Int, m return err // This is an expected fault, don't keep printing it in a spin-loop } - if errors.Is(err, whitelist.ErrNoRemoteCheckoint) { - log.Warn("Doesn't have remote checkpoint yet", "peer", id, "err", err) - } - log.Warn("Synchronisation failed, retrying", "peer", id, "err", err) return err @@ -1581,6 +1577,13 @@ func (d *Downloader) importBlockResults(results []*fetchResult) error { // of the blocks delivered from the downloader, and the indexing will be off. log.Debug("Downloaded item processing failed on sidechain import", "index", index, "err", err) } + + // If we've received too long future chain error (from whitelisting service), + // return that as the root error and `errInvalidChain` as context. + if errors.Is(err, whitelist.ErrLongFutureChain) { + return fmt.Errorf("%v: %w", errInvalidChain, err) + } + return fmt.Errorf("%w: %v", errInvalidChain, err) } return nil diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go index d8765ef077..a9242fba5b 100644 --- a/eth/downloader/downloader_test.go +++ b/eth/downloader/downloader_test.go @@ -1426,8 +1426,8 @@ func (w *whitelistFake) IsValidPeer(_ *types.Header, _ func(number uint64, amoun return w.validate(w.count) } -func (w *whitelistFake) IsValidChain(current *types.Header, headers []*types.Header) bool { - return true +func (w *whitelistFake) IsValidChain(current *types.Header, headers []*types.Header) (bool, error) { + return true, nil } func (w *whitelistFake) ProcessCheckpoint(_ uint64, _ common.Hash) {} diff --git a/eth/downloader/whitelist/service.go b/eth/downloader/whitelist/service.go index 0e905cce28..3cb402c442 100644 --- a/eth/downloader/whitelist/service.go +++ b/eth/downloader/whitelist/service.go @@ -30,6 +30,7 @@ func NewService(maxCapacity uint) *Service { var ( ErrCheckpointMismatch = errors.New("checkpoint mismatch") + ErrLongFutureChain = errors.New("received future chain of unacceptable length") ErrNoRemoteCheckoint = errors.New("remote peer doesn't have a checkoint") ) @@ -74,16 +75,16 @@ func (w *Service) IsValidPeer(remoteHeader *types.Header, fetchHeadersByNumber f // IsValidChain checks the validity of chain by comparing it // against the local checkpoint entries -func (w *Service) IsValidChain(currentHeader *types.Header, chain []*types.Header) bool { +func (w *Service) IsValidChain(currentHeader *types.Header, chain []*types.Header) (bool, error) { // Check if we have checkpoints to validate incoming chain in memory if len(w.checkpointWhitelist) == 0 { // We don't have any entries, no additional validation will be possible - return true + return true, nil } // Return if we've received empty chain if len(chain) == 0 { - return false + return false, nil } var ( @@ -95,7 +96,7 @@ func (w *Service) IsValidChain(currentHeader *types.Header, chain []*types.Heade if chain[len(chain)-1].Number.Uint64() < oldestCheckpointNumber { // We have future whitelisted entries, so no additional validation will be possible // This case will occur when bor is in middle of sync, but heimdall is ahead/fully synced. - return true + return true, nil } // Split the chain into past and future chain @@ -109,18 +110,18 @@ func (w *Service) IsValidChain(currentHeader *types.Header, chain []*types.Heade // Don't accept future chain of unacceptable length (from current block) if len(futureChain)+offset > int(w.checkpointInterval) { - return false + return false, ErrLongFutureChain } // Iterate over the chain and validate against the last checkpoint // It will handle all cases where the incoming chain has atleast one checkpoint for i := len(pastChain) - 1; i >= 0; i-- { if _, ok := w.checkpointWhitelist[pastChain[i].Number.Uint64()]; ok { - return pastChain[i].Hash() == w.checkpointWhitelist[pastChain[i].Number.Uint64()] + return pastChain[i].Hash() == w.checkpointWhitelist[pastChain[i].Number.Uint64()], nil } } - return true + return true, nil } func splitChain(current uint64, chain []*types.Header) ([]*types.Header, []*types.Header) { diff --git a/eth/downloader/whitelist/service_test.go b/eth/downloader/whitelist/service_test.go index c21490d125..df23df2fc9 100644 --- a/eth/downloader/whitelist/service_test.go +++ b/eth/downloader/whitelist/service_test.go @@ -119,8 +119,9 @@ func TestIsValidChain(t *testing.T) { s := NewMockService(10, 10) chainA := createMockChain(1, 20) // A1->A2...A19->A20 // case1: no checkpoint whitelist, should consider the chain as valid - res := s.IsValidChain(nil, chainA) + res, err := s.IsValidChain(nil, chainA) require.Equal(t, res, true, "expected chain to be valid") + require.Equal(t, err, nil, "expected error to be nil") tempChain := createMockChain(21, 22) // A21->A22 @@ -132,8 +133,9 @@ func TestIsValidChain(t *testing.T) { // case2: We're behind the oldest whitelisted block entry, should consider // the chain as valid as we're still far behind the latest blocks - res = s.IsValidChain(chainA[len(chainA)-1], chainA) + res, err = s.IsValidChain(chainA[len(chainA)-1], chainA) require.Equal(t, res, true, "expected chain to be valid") + require.Equal(t, err, nil, "expected error to be nil") // Clear checkpoint whitelist and add blocks A5 and A15 in whitelist s.PurgeCheckpointWhitelist() @@ -144,8 +146,9 @@ func TestIsValidChain(t *testing.T) { // case3: Try importing a past chain having valid checkpoint, should // consider the chain as valid - res = s.IsValidChain(chainA[len(chainA)-1], chainA) + res, err = s.IsValidChain(chainA[len(chainA)-1], chainA) require.Equal(t, res, true, "expected chain to be valid") + require.Equal(t, err, nil, "expected error to be nil") // Clear checkpoint whitelist and mock blocks in whitelist tempChain = createMockChain(20, 20) // A20 @@ -156,22 +159,25 @@ func TestIsValidChain(t *testing.T) { require.Equal(t, s.length(), 1, "expected 1 items in whitelist") // case4: Try importing a past chain having invalid checkpoint - res = s.IsValidChain(chainA[len(chainA)-1], chainA) + res, _ = s.IsValidChain(chainA[len(chainA)-1], chainA) require.Equal(t, res, false, "expected chain to be invalid") + // Not checking error here because we return nil in case of checkpoint mismatch // create a future chain to be imported of length <= `checkpointInterval` chainB := createMockChain(21, 30) // B21->B22...B29->B30 // case5: Try importing a future chain of acceptable length - res = s.IsValidChain(chainA[len(chainA)-1], chainB) + res, err = s.IsValidChain(chainA[len(chainA)-1], chainB) require.Equal(t, res, true, "expected chain to be valid") + require.Equal(t, err, nil, "expected error to be nil") // create a future chain to be imported of length > `checkpointInterval` chainB = createMockChain(21, 40) // C21->C22...C39->C40 // case5: Try importing a future chain of unacceptable length - res = s.IsValidChain(chainA[len(chainA)-1], chainB) + res, err = s.IsValidChain(chainA[len(chainA)-1], chainB) require.Equal(t, res, false, "expected chain to be invalid") + require.Equal(t, err, ErrLongFutureChain, "expected error") } func TestSplitChain(t *testing.T) { diff --git a/interfaces.go b/interfaces.go index ff6d80b1ec..88a173adea 100644 --- a/interfaces.go +++ b/interfaces.go @@ -242,7 +242,7 @@ type StateSyncFilter struct { // interface for whitelist service type ChainValidator interface { IsValidPeer(remoteHeader *types.Header, fetchHeadersByNumber func(number uint64, amount int, skip int, reverse bool) ([]*types.Header, []common.Hash, error)) (bool, error) - IsValidChain(currentHeader *types.Header, chain []*types.Header) bool + IsValidChain(currentHeader *types.Header, chain []*types.Header) (bool, error) ProcessCheckpoint(endBlockNum uint64, endBlockHash common.Hash) GetCheckpointWhitelist() map[uint64]common.Hash PurgeCheckpointWhitelist() diff --git a/params/version.go b/params/version.go index a415e0a9b9..199e49095f 100644 --- a/params/version.go +++ b/params/version.go @@ -23,7 +23,7 @@ import ( const ( VersionMajor = 0 // Major version component of the current release VersionMinor = 3 // Minor version component of the current release - VersionPatch = 2 // Patch version component of the current release + VersionPatch = 3 // Patch version component of the current release VersionMeta = "stable" // Version metadata to append to the version string ) From dcdac12c449c4fee6977ac544b724d6c53b21d98 Mon Sep 17 00:00:00 2001 From: Manav Darji Date: Mon, 9 Jan 2023 23:00:49 +0530 Subject: [PATCH 38/56] Bump bor version in control files for v0.3.3 mainnet release --- packaging/templates/package_scripts/control | 2 +- packaging/templates/package_scripts/control.arm64 | 2 +- packaging/templates/package_scripts/control.profile.amd64 | 2 +- packaging/templates/package_scripts/control.profile.arm64 | 2 +- packaging/templates/package_scripts/control.validator | 2 +- packaging/templates/package_scripts/control.validator.arm64 | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/packaging/templates/package_scripts/control b/packaging/templates/package_scripts/control index 03f040dd63..cb62165a5e 100644 --- a/packaging/templates/package_scripts/control +++ b/packaging/templates/package_scripts/control @@ -1,5 +1,5 @@ Source: bor -Version: 0.3.2 +Version: 0.3.3 Section: develop Priority: standard Maintainer: Polygon diff --git a/packaging/templates/package_scripts/control.arm64 b/packaging/templates/package_scripts/control.arm64 index 4070f5b9e9..56276cb43a 100644 --- a/packaging/templates/package_scripts/control.arm64 +++ b/packaging/templates/package_scripts/control.arm64 @@ -1,5 +1,5 @@ Source: bor -Version: 0.3.2 +Version: 0.3.3 Section: develop Priority: standard Maintainer: Polygon diff --git a/packaging/templates/package_scripts/control.profile.amd64 b/packaging/templates/package_scripts/control.profile.amd64 index fa39632654..4ddd8424ff 100644 --- a/packaging/templates/package_scripts/control.profile.amd64 +++ b/packaging/templates/package_scripts/control.profile.amd64 @@ -1,5 +1,5 @@ Source: bor-profile -Version: 0.3.2 +Version: 0.3.3 Section: develop Priority: standard Maintainer: Polygon diff --git a/packaging/templates/package_scripts/control.profile.arm64 b/packaging/templates/package_scripts/control.profile.arm64 index 5bfc993dae..9f9301c925 100644 --- a/packaging/templates/package_scripts/control.profile.arm64 +++ b/packaging/templates/package_scripts/control.profile.arm64 @@ -1,5 +1,5 @@ Source: bor-profile -Version: 0.3.2 +Version: 0.3.3 Section: develop Priority: standard Maintainer: Polygon diff --git a/packaging/templates/package_scripts/control.validator b/packaging/templates/package_scripts/control.validator index efb2ee1593..d43250c891 100644 --- a/packaging/templates/package_scripts/control.validator +++ b/packaging/templates/package_scripts/control.validator @@ -1,5 +1,5 @@ Source: bor-profile -Version: 0.3.2 +Version: 0.3.3 Section: develop Priority: standard Maintainer: Polygon diff --git a/packaging/templates/package_scripts/control.validator.arm64 b/packaging/templates/package_scripts/control.validator.arm64 index 7173b2c30e..5a50f8cb39 100644 --- a/packaging/templates/package_scripts/control.validator.arm64 +++ b/packaging/templates/package_scripts/control.validator.arm64 @@ -1,5 +1,5 @@ Source: bor-profile -Version: 0.3.2 +Version: 0.3.3 Section: develop Priority: standard Maintainer: Polygon From e4dd2ee1ac591c4363ef4c9bc19823e2653660a4 Mon Sep 17 00:00:00 2001 From: Pratik Patil Date: Thu, 19 Jan 2023 14:40:24 +0530 Subject: [PATCH 39/56] Added checks to RPC requests and introduced new flags to customise the parameters (#657) * added a check to reject rpc requests with batch size > the one set using a newly added flag (rpcbatchlimit) * added a check to reject rpc requests whose result size > the one set using a newly added flag (rpcreturndatalimit) * updated the config files and docs --- builder/files/config.toml | 2 ++ docs/cli/example_config.toml | 2 ++ docs/cli/server.md | 4 ++++ eth/api_backend.go | 4 ++++ eth/ethconfig/config.go | 14 +++++++---- eth/tracers/api_test.go | 4 ++++ internal/cli/server/config.go | 23 ++++++++++++++----- internal/cli/server/flags.go | 12 ++++++++++ internal/ethapi/api.go | 5 ++++ internal/ethapi/backend.go | 9 ++++---- les/api_backend.go | 4 ++++ node/config.go | 3 +++ node/node.go | 11 +++++---- node/rpcstack.go | 8 +++++-- node/rpcstack_test.go | 2 +- .../templates/mainnet-v1/archive/config.toml | 2 ++ .../mainnet-v1/sentry/sentry/bor/config.toml | 2 ++ .../sentry/validator/bor/config.toml | 2 ++ .../mainnet-v1/without-sentry/bor/config.toml | 2 ++ .../templates/testnet-v4/archive/config.toml | 2 ++ .../testnet-v4/sentry/sentry/bor/config.toml | 2 ++ .../sentry/validator/bor/config.toml | 2 ++ .../testnet-v4/without-sentry/bor/config.toml | 2 ++ rpc/server.go | 22 ++++++++++++++++-- 24 files changed, 121 insertions(+), 24 deletions(-) diff --git a/builder/files/config.toml b/builder/files/config.toml index 0f2919807f..f577706f7b 100644 --- a/builder/files/config.toml +++ b/builder/files/config.toml @@ -8,6 +8,8 @@ chain = "mainnet" datadir = "/var/lib/bor/data" # ancient = "" # keystore = "/var/lib/bor/keystore" +# "rpc.batchlimit" = 100 +# "rpc.returndatalimit" = 100000 syncmode = "full" # gcmode = "full" # snapshot = true diff --git a/docs/cli/example_config.toml b/docs/cli/example_config.toml index 64ef60ae12..6bf58a8361 100644 --- a/docs/cli/example_config.toml +++ b/docs/cli/example_config.toml @@ -8,6 +8,8 @@ log-level = "INFO" # Set log level for the server datadir = "var/lib/bor" # Path of the data directory to store information ancient = "" # Data directory for ancient chain segments (default = inside chaindata) keystore = "" # Path of the directory where keystores are located +"rpc.batchlimit" = 100 # Maximum number of messages in a batch (default=100, use 0 for no limits) +"rpc.returndatalimit" = 100000 # Maximum size (in bytes) a result of an rpc request could have (default=100000, use 0 for no limits) syncmode = "full" # Blockchain sync mode (only "full" sync supported) gcmode = "full" # Blockchain garbage collection mode ("full", "archive") snapshot = true # Enables the snapshot-database mode diff --git a/docs/cli/server.md b/docs/cli/server.md index 5bc0ff1024..caf10070c0 100644 --- a/docs/cli/server.md +++ b/docs/cli/server.md @@ -16,6 +16,10 @@ The ```bor server``` command runs the Bor client. - ```keystore```: Path of the directory where keystores are located +- ```rpc.batchlimit```: Maximum number of messages in a batch (default=100, use 0 for no limits) (default: 100) + +- ```rpc.returndatalimit```: Maximum size (in bytes) a result of an rpc request could have (default=100000, use 0 for no limits) (default: 100000) + - ```config```: File for the config file - ```syncmode```: Blockchain sync mode (only "full" sync supported) (default: full) diff --git a/eth/api_backend.go b/eth/api_backend.go index c33f3cf6f2..60aea7527e 100644 --- a/eth/api_backend.go +++ b/eth/api_backend.go @@ -317,6 +317,10 @@ func (b *EthAPIBackend) RPCGasCap() uint64 { return b.eth.config.RPCGasCap } +func (b *EthAPIBackend) RPCRpcReturnDataLimit() uint64 { + return b.eth.config.RPCReturnDataLimit +} + func (b *EthAPIBackend) RPCEVMTimeout() time.Duration { return b.eth.config.RPCEVMTimeout } diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go index c9272758ab..68cf733cc6 100644 --- a/eth/ethconfig/config.go +++ b/eth/ethconfig/config.go @@ -94,11 +94,12 @@ var Defaults = Config{ GasPrice: big.NewInt(params.GWei), Recommit: 125 * time.Second, }, - TxPool: core.DefaultTxPoolConfig, - RPCGasCap: 50000000, - RPCEVMTimeout: 5 * time.Second, - GPO: FullNodeGPO, - RPCTxFeeCap: 5, // 5 matic + TxPool: core.DefaultTxPoolConfig, + RPCGasCap: 50000000, + RPCReturnDataLimit: 100000, + RPCEVMTimeout: 5 * time.Second, + GPO: FullNodeGPO, + RPCTxFeeCap: 5, // 5 matic } func init() { @@ -199,6 +200,9 @@ type Config struct { // RPCGasCap is the global gas cap for eth-call variants. RPCGasCap uint64 + // Maximum size (in bytes) a result of an rpc request could have + RPCReturnDataLimit uint64 + // RPCEVMTimeout is the global timeout for eth-call. RPCEVMTimeout time.Duration diff --git a/eth/tracers/api_test.go b/eth/tracers/api_test.go index 6dd94e4870..d394e4fbe3 100644 --- a/eth/tracers/api_test.go +++ b/eth/tracers/api_test.go @@ -126,6 +126,10 @@ func (b *testBackend) RPCGasCap() uint64 { return 25000000 } +func (b *testBackend) RPCRpcReturnDataLimit() uint64 { + return 100000 +} + func (b *testBackend) ChainConfig() *params.ChainConfig { return b.chainConfig } diff --git a/internal/cli/server/config.go b/internal/cli/server/config.go index 35d7e19359..e9321bfa01 100644 --- a/internal/cli/server/config.go +++ b/internal/cli/server/config.go @@ -60,6 +60,12 @@ type Config struct { // KeyStoreDir is the directory to store keystores KeyStoreDir string `hcl:"keystore,optional" toml:"keystore,optional"` + // Maximum number of messages in a batch (default=100, use 0 for no limits) + RPCBatchLimit uint64 `hcl:"rpc.batchlimit,optional" toml:"rpc.batchlimit,optional"` + + // Maximum size (in bytes) a result of an rpc request could have (default=100000, use 0 for no limits) + RPCReturnDataLimit uint64 `hcl:"rpc.returndatalimit,optional" toml:"rpc.returndatalimit,optional"` + // SyncMode selects the sync protocol SyncMode string `hcl:"syncmode,optional" toml:"syncmode,optional"` @@ -435,12 +441,14 @@ type DeveloperConfig struct { func DefaultConfig() *Config { return &Config{ - Chain: "mainnet", - Identity: Hostname(), - RequiredBlocks: map[string]string{}, - LogLevel: "INFO", - DataDir: DefaultDataDir(), - Ancient: "", + Chain: "mainnet", + Identity: Hostname(), + RequiredBlocks: map[string]string{}, + LogLevel: "INFO", + DataDir: DefaultDataDir(), + Ancient: "", + RPCBatchLimit: 100, + RPCReturnDataLimit: 100000, P2P: &P2PConfig{ MaxPeers: 50, MaxPendPeers: 50, @@ -936,6 +944,8 @@ func (c *Config) buildEth(stack *node.Node, accountManager *accounts.Manager) (* n.BorLogs = c.BorLogs n.DatabaseHandles = dbHandles + n.RPCReturnDataLimit = c.RPCReturnDataLimit + if c.Ancient != "" { n.DatabaseFreezer = c.Ancient } @@ -986,6 +996,7 @@ func (c *Config) buildNode() (*node.Config, error) { WriteTimeout: c.JsonRPC.HttpTimeout.WriteTimeout, IdleTimeout: c.JsonRPC.HttpTimeout.IdleTimeout, }, + RPCBatchLimit: c.RPCBatchLimit, } // dev mode diff --git a/internal/cli/server/flags.go b/internal/cli/server/flags.go index 822bb81aef..22d5b73485 100644 --- a/internal/cli/server/flags.go +++ b/internal/cli/server/flags.go @@ -46,6 +46,18 @@ func (c *Command) Flags() *flagset.Flagset { Usage: "Path of the directory where keystores are located", Value: &c.cliConfig.KeyStoreDir, }) + f.Uint64Flag(&flagset.Uint64Flag{ + Name: "rpc.batchlimit", + Usage: "Maximum number of messages in a batch (default=100, use 0 for no limits)", + Value: &c.cliConfig.RPCBatchLimit, + Default: c.cliConfig.RPCBatchLimit, + }) + f.Uint64Flag(&flagset.Uint64Flag{ + Name: "rpc.returndatalimit", + Usage: "Maximum size (in bytes) a result of an rpc request could have (default=100000, use 0 for no limits)", + Value: &c.cliConfig.RPCReturnDataLimit, + Default: c.cliConfig.RPCReturnDataLimit, + }) f.StringFlag(&flagset.StringFlag{ Name: "config", Usage: "File for the config file", diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go index 2fd148c7c6..372d630c07 100644 --- a/internal/ethapi/api.go +++ b/internal/ethapi/api.go @@ -1078,6 +1078,11 @@ func (s *PublicBlockChainAPI) Call(ctx context.Context, args TransactionArgs, bl if err != nil { return nil, err } + + if int(s.b.RPCRpcReturnDataLimit()) > 0 && len(result.ReturnData) > int(s.b.RPCRpcReturnDataLimit()) { + return nil, fmt.Errorf("call returned result of length %d exceeding limit %d", len(result.ReturnData), int(s.b.RPCRpcReturnDataLimit())) + } + // If the result contains a revert reason, try to unpack and return it. if len(result.Revert()) > 0 { return nil, newRevertError(result) diff --git a/internal/ethapi/backend.go b/internal/ethapi/backend.go index 1287640b83..14ddbba70e 100644 --- a/internal/ethapi/backend.go +++ b/internal/ethapi/backend.go @@ -48,10 +48,11 @@ type Backend interface { ChainDb() ethdb.Database AccountManager() *accounts.Manager ExtRPCEnabled() bool - RPCGasCap() uint64 // global gas cap for eth_call over rpc: DoS protection - RPCEVMTimeout() time.Duration // global timeout for eth_call over rpc: DoS protection - RPCTxFeeCap() float64 // global tx fee cap for all transaction related APIs - UnprotectedAllowed() bool // allows only for EIP155 transactions. + RPCGasCap() uint64 // global gas cap for eth_call over rpc: DoS protection + RPCRpcReturnDataLimit() uint64 // Maximum size (in bytes) a result of an rpc request could have + RPCEVMTimeout() time.Duration // global timeout for eth_call over rpc: DoS protection + RPCTxFeeCap() float64 // global tx fee cap for all transaction related APIs + UnprotectedAllowed() bool // allows only for EIP155 transactions. // Blockchain API SetHead(number uint64) diff --git a/les/api_backend.go b/les/api_backend.go index c716a3967f..786e77ed46 100644 --- a/les/api_backend.go +++ b/les/api_backend.go @@ -294,6 +294,10 @@ func (b *LesApiBackend) RPCGasCap() uint64 { return b.eth.config.RPCGasCap } +func (b *LesApiBackend) RPCRpcReturnDataLimit() uint64 { + return b.eth.config.RPCReturnDataLimit +} + func (b *LesApiBackend) RPCEVMTimeout() time.Duration { return b.eth.config.RPCEVMTimeout } diff --git a/node/config.go b/node/config.go index 853190c95f..495e4c5fcb 100644 --- a/node/config.go +++ b/node/config.go @@ -204,6 +204,9 @@ type Config struct { // JWTSecret is the hex-encoded jwt secret. JWTSecret string `toml:",omitempty"` + + // Maximum number of messages in a batch + RPCBatchLimit uint64 `toml:",omitempty"` } // IPCEndpoint resolves an IPC endpoint based on a configured value, taking into diff --git a/node/node.go b/node/node.go index e12bcf6675..94fcfb8cbf 100644 --- a/node/node.go +++ b/node/node.go @@ -113,6 +113,9 @@ func New(conf *Config) (*Node, error) { databases: make(map[*closeTrackingDB]struct{}), } + // set RPC batch limit + node.inprocHandler.SetRPCBatchLimit(conf.RPCBatchLimit) + // Register built-in APIs. node.rpcAPIs = append(node.rpcAPIs, node.apis()...) @@ -153,10 +156,10 @@ func New(conf *Config) (*Node, error) { } // Configure RPC servers. - node.http = newHTTPServer(node.log, conf.HTTPTimeouts) - node.httpAuth = newHTTPServer(node.log, conf.HTTPTimeouts) - node.ws = newHTTPServer(node.log, rpc.DefaultHTTPTimeouts) - node.wsAuth = newHTTPServer(node.log, rpc.DefaultHTTPTimeouts) + node.http = newHTTPServer(node.log, conf.HTTPTimeouts, conf.RPCBatchLimit) + node.httpAuth = newHTTPServer(node.log, conf.HTTPTimeouts, conf.RPCBatchLimit) + node.ws = newHTTPServer(node.log, rpc.DefaultHTTPTimeouts, conf.RPCBatchLimit) + node.wsAuth = newHTTPServer(node.log, rpc.DefaultHTTPTimeouts, conf.RPCBatchLimit) node.ipc = newIPCServer(node.log, conf.IPCEndpoint()) return node, nil diff --git a/node/rpcstack.go b/node/rpcstack.go index eabf1dcae7..f2c31ecb08 100644 --- a/node/rpcstack.go +++ b/node/rpcstack.go @@ -81,10 +81,12 @@ type httpServer struct { port int handlerNames map[string]string + + RPCBatchLimit uint64 } -func newHTTPServer(log log.Logger, timeouts rpc.HTTPTimeouts) *httpServer { - h := &httpServer{log: log, timeouts: timeouts, handlerNames: make(map[string]string)} +func newHTTPServer(log log.Logger, timeouts rpc.HTTPTimeouts, rpcBatchLimit uint64) *httpServer { + h := &httpServer{log: log, timeouts: timeouts, handlerNames: make(map[string]string), RPCBatchLimit: rpcBatchLimit} h.httpHandler.Store((*rpcHandler)(nil)) h.wsHandler.Store((*rpcHandler)(nil)) @@ -283,6 +285,7 @@ func (h *httpServer) enableRPC(apis []rpc.API, config httpConfig) error { // Create RPC server and handler. srv := rpc.NewServer() + srv.SetRPCBatchLimit(h.RPCBatchLimit) if err := RegisterApis(apis, config.Modules, srv, false); err != nil { return err } @@ -314,6 +317,7 @@ func (h *httpServer) enableWS(apis []rpc.API, config wsConfig) error { } // Create RPC server and handler. srv := rpc.NewServer() + srv.SetRPCBatchLimit(h.RPCBatchLimit) if err := RegisterApis(apis, config.Modules, srv, false); err != nil { return err } diff --git a/node/rpcstack_test.go b/node/rpcstack_test.go index 60fcab5a90..49db8435ac 100644 --- a/node/rpcstack_test.go +++ b/node/rpcstack_test.go @@ -234,7 +234,7 @@ func Test_checkPath(t *testing.T) { func createAndStartServer(t *testing.T, conf *httpConfig, ws bool, wsConf *wsConfig) *httpServer { t.Helper() - srv := newHTTPServer(testlog.Logger(t, log.LvlDebug), rpc.DefaultHTTPTimeouts) + srv := newHTTPServer(testlog.Logger(t, log.LvlDebug), rpc.DefaultHTTPTimeouts, 100) assert.NoError(t, srv.enableRPC(nil, *conf)) if ws { assert.NoError(t, srv.enableWS(nil, *wsConf)) diff --git a/packaging/templates/mainnet-v1/archive/config.toml b/packaging/templates/mainnet-v1/archive/config.toml index 9eaafd3bee..8e98736196 100644 --- a/packaging/templates/mainnet-v1/archive/config.toml +++ b/packaging/templates/mainnet-v1/archive/config.toml @@ -4,6 +4,8 @@ chain = "mainnet" datadir = "/var/lib/bor/data" # ancient = "" # keystore = "" +# "rpc.batchlimit" = 100 +# "rpc.returndatalimit" = 100000 syncmode = "full" gcmode = "archive" # snapshot = true diff --git a/packaging/templates/mainnet-v1/sentry/sentry/bor/config.toml b/packaging/templates/mainnet-v1/sentry/sentry/bor/config.toml index 94dd6634f0..853b2ed313 100644 --- a/packaging/templates/mainnet-v1/sentry/sentry/bor/config.toml +++ b/packaging/templates/mainnet-v1/sentry/sentry/bor/config.toml @@ -4,6 +4,8 @@ chain = "mainnet" datadir = "/var/lib/bor/data" # ancient = "" # keystore = "" +# "rpc.batchlimit" = 100 +# "rpc.returndatalimit" = 100000 syncmode = "full" # gcmode = "full" # snapshot = true diff --git a/packaging/templates/mainnet-v1/sentry/validator/bor/config.toml b/packaging/templates/mainnet-v1/sentry/validator/bor/config.toml index 9c55683c96..284445113c 100644 --- a/packaging/templates/mainnet-v1/sentry/validator/bor/config.toml +++ b/packaging/templates/mainnet-v1/sentry/validator/bor/config.toml @@ -6,6 +6,8 @@ chain = "mainnet" datadir = "/var/lib/bor/data" # ancient = "" # keystore = "$BOR_DIR/keystore" +# "rpc.batchlimit" = 100 +# "rpc.returndatalimit" = 100000 syncmode = "full" # gcmode = "full" # snapshot = true diff --git a/packaging/templates/mainnet-v1/without-sentry/bor/config.toml b/packaging/templates/mainnet-v1/without-sentry/bor/config.toml index 573f1f3be8..22361b64bc 100644 --- a/packaging/templates/mainnet-v1/without-sentry/bor/config.toml +++ b/packaging/templates/mainnet-v1/without-sentry/bor/config.toml @@ -6,6 +6,8 @@ chain = "mainnet" datadir = "/var/lib/bor/data" # ancient = "" # keystore = "$BOR_DIR/keystore" +# "rpc.batchlimit" = 100 +# "rpc.returndatalimit" = 100000 syncmode = "full" # gcmode = "full" # snapshot = true diff --git a/packaging/templates/testnet-v4/archive/config.toml b/packaging/templates/testnet-v4/archive/config.toml index 1762fdf117..992aff0c68 100644 --- a/packaging/templates/testnet-v4/archive/config.toml +++ b/packaging/templates/testnet-v4/archive/config.toml @@ -4,6 +4,8 @@ chain = "mumbai" datadir = "/var/lib/bor/data" # ancient = "" # keystore = "" +# "rpc.batchlimit" = 100 +# "rpc.returndatalimit" = 100000 syncmode = "full" gcmode = "archive" # snapshot = true diff --git a/packaging/templates/testnet-v4/sentry/sentry/bor/config.toml b/packaging/templates/testnet-v4/sentry/sentry/bor/config.toml index ae191cec2c..1a335a42a3 100644 --- a/packaging/templates/testnet-v4/sentry/sentry/bor/config.toml +++ b/packaging/templates/testnet-v4/sentry/sentry/bor/config.toml @@ -4,6 +4,8 @@ chain = "mumbai" datadir = "/var/lib/bor/data" # ancient = "" # keystore = "" +# "rpc.batchlimit" = 100 +# "rpc.returndatalimit" = 100000 syncmode = "full" # gcmode = "full" # snapshot = true diff --git a/packaging/templates/testnet-v4/sentry/validator/bor/config.toml b/packaging/templates/testnet-v4/sentry/validator/bor/config.toml index b441cc137d..798375364e 100644 --- a/packaging/templates/testnet-v4/sentry/validator/bor/config.toml +++ b/packaging/templates/testnet-v4/sentry/validator/bor/config.toml @@ -6,6 +6,8 @@ chain = "mumbai" datadir = "/var/lib/bor/data" # ancient = "" # keystore = "$BOR_DIR/keystore" +# "rpc.batchlimit" = 100 +# "rpc.returndatalimit" = 100000 syncmode = "full" # gcmode = "full" # snapshot = true diff --git a/packaging/templates/testnet-v4/without-sentry/bor/config.toml b/packaging/templates/testnet-v4/without-sentry/bor/config.toml index 05a254e184..47a3053b58 100644 --- a/packaging/templates/testnet-v4/without-sentry/bor/config.toml +++ b/packaging/templates/testnet-v4/without-sentry/bor/config.toml @@ -6,6 +6,8 @@ chain = "mumbai" datadir = "/var/lib/bor/data" # ancient = "" # keystore = "$BOR_DIR/keystore" +# "rpc.batchlimit" = 100 +# "rpc.returndatalimit" = 100000 syncmode = "full" # gcmode = "full" # snapshot = true diff --git a/rpc/server.go b/rpc/server.go index babc5688e2..96c3861d66 100644 --- a/rpc/server.go +++ b/rpc/server.go @@ -18,6 +18,7 @@ package rpc import ( "context" + "fmt" "io" "sync/atomic" @@ -47,6 +48,8 @@ type Server struct { idgen func() ID run int32 codecs mapset.Set + + BatchLimit uint64 } // NewServer creates a new server instance with no registered handlers. @@ -59,6 +62,10 @@ func NewServer() *Server { return server } +func (s *Server) SetRPCBatchLimit(batchLimit uint64) { + s.BatchLimit = batchLimit +} + // RegisterName creates a service for the given receiver type under the given name. When no // methods on the given receiver match the criteria to be either a RPC method or a // subscription an error is returned. Otherwise a new service is created and added to the @@ -105,12 +112,23 @@ func (s *Server) serveSingleRequest(ctx context.Context, codec ServerCodec) { reqs, batch, err := codec.readBatch() if err != nil { if err != io.EOF { - codec.writeJSON(ctx, errorMessage(&invalidMessageError{"parse error"})) + if err1 := codec.writeJSON(ctx, err); err1 != nil { + log.Warn("WARNING - error in reading batch", "err", err1) + return + } } return } + if batch { - h.handleBatch(reqs) + if s.BatchLimit > 0 && len(reqs) > int(s.BatchLimit) { + if err1 := codec.writeJSON(ctx, errorMessage(fmt.Errorf("batch limit %d exceeded: %d requests given", s.BatchLimit, len(reqs)))); err1 != nil { + log.Warn("WARNING - requests given exceeds the batch limit", "err", err1) + log.Debug("batch limit %d exceeded: %d requests given", s.BatchLimit, len(reqs)) + } + } else { + h.handleBatch(reqs) + } } else { h.handleMsg(reqs[0]) } From 241843c7e7bb18e64d2e157fd6fbbd665f6ce9d9 Mon Sep 17 00:00:00 2001 From: SHIVAM SHARMA Date: Thu, 19 Jan 2023 14:41:11 +0530 Subject: [PATCH 40/56] chg : trieTimeout from 60 to 10 mins (#692) * chg : trieTimeout from 60 to 10 mins * chg : cache.timout to 10m from 1h in configs --- builder/files/config.toml | 2 +- docs/cli/example_config.toml | 2 +- internal/cli/server/config.go | 2 +- packaging/templates/mainnet-v1/archive/config.toml | 2 +- packaging/templates/mainnet-v1/sentry/sentry/bor/config.toml | 2 +- packaging/templates/mainnet-v1/sentry/validator/bor/config.toml | 2 +- packaging/templates/mainnet-v1/without-sentry/bor/config.toml | 2 +- packaging/templates/testnet-v4/archive/config.toml | 2 +- packaging/templates/testnet-v4/sentry/sentry/bor/config.toml | 2 +- packaging/templates/testnet-v4/sentry/validator/bor/config.toml | 2 +- packaging/templates/testnet-v4/without-sentry/bor/config.toml | 2 +- 11 files changed, 11 insertions(+), 11 deletions(-) diff --git a/builder/files/config.toml b/builder/files/config.toml index f577706f7b..aa6ca0f208 100644 --- a/builder/files/config.toml +++ b/builder/files/config.toml @@ -129,7 +129,7 @@ syncmode = "full" # preimages = false # txlookuplimit = 2350000 # triesinmemory = 128 - # timeout = "1h0m0s" + # timeout = "10m0s" [accounts] # allow-insecure-unlock = true diff --git a/docs/cli/example_config.toml b/docs/cli/example_config.toml index 6bf58a8361..2a768e6bd2 100644 --- a/docs/cli/example_config.toml +++ b/docs/cli/example_config.toml @@ -132,7 +132,7 @@ ethstats = "" # Reporting URL of a ethstats service (nodename:sec preimages = false # Enable recording the SHA3/keccak preimages of trie keys txlookuplimit = 2350000 # Number of recent blocks to maintain transactions index for (default = about 56 days, 0 = entire chain) triesinmemory = 128 # Number of block states (tries) to keep in memory - timeout = "1h0m0s" # Time after which the Merkle Patricia Trie is stored to disc from memory + timeout = "10m0s" # Time after which the Merkle Patricia Trie is stored to disc from memory [accounts] unlock = [] # Comma separated list of accounts to unlock diff --git a/internal/cli/server/config.go b/internal/cli/server/config.go index e9321bfa01..dedab303a6 100644 --- a/internal/cli/server/config.go +++ b/internal/cli/server/config.go @@ -565,7 +565,7 @@ func DefaultConfig() *Config { Preimages: false, TxLookupLimit: 2350000, TriesInMemory: 128, - TrieTimeout: 60 * time.Minute, + TrieTimeout: 10 * time.Minute, }, Accounts: &AccountsConfig{ Unlock: []string{}, diff --git a/packaging/templates/mainnet-v1/archive/config.toml b/packaging/templates/mainnet-v1/archive/config.toml index 8e98736196..181502a92e 100644 --- a/packaging/templates/mainnet-v1/archive/config.toml +++ b/packaging/templates/mainnet-v1/archive/config.toml @@ -120,7 +120,7 @@ gcmode = "archive" # noprefetch = false # preimages = false # txlookuplimit = 2350000 - # timeout = "1h0m0s" + # timeout = "10m0s" # [accounts] # unlock = [] diff --git a/packaging/templates/mainnet-v1/sentry/sentry/bor/config.toml b/packaging/templates/mainnet-v1/sentry/sentry/bor/config.toml index 853b2ed313..ecb38b4609 100644 --- a/packaging/templates/mainnet-v1/sentry/sentry/bor/config.toml +++ b/packaging/templates/mainnet-v1/sentry/sentry/bor/config.toml @@ -120,7 +120,7 @@ syncmode = "full" # noprefetch = false # preimages = false # txlookuplimit = 2350000 - # timeout = "1h0m0s" + # timeout = "10m0s" # [accounts] # unlock = [] diff --git a/packaging/templates/mainnet-v1/sentry/validator/bor/config.toml b/packaging/templates/mainnet-v1/sentry/validator/bor/config.toml index 284445113c..508df5f6b9 100644 --- a/packaging/templates/mainnet-v1/sentry/validator/bor/config.toml +++ b/packaging/templates/mainnet-v1/sentry/validator/bor/config.toml @@ -122,7 +122,7 @@ syncmode = "full" # noprefetch = false # preimages = false # txlookuplimit = 2350000 - # timeout = "1h0m0s" + # timeout = "10m0s" [accounts] allow-insecure-unlock = true diff --git a/packaging/templates/mainnet-v1/without-sentry/bor/config.toml b/packaging/templates/mainnet-v1/without-sentry/bor/config.toml index 22361b64bc..3fa20ef121 100644 --- a/packaging/templates/mainnet-v1/without-sentry/bor/config.toml +++ b/packaging/templates/mainnet-v1/without-sentry/bor/config.toml @@ -122,7 +122,7 @@ syncmode = "full" # noprefetch = false # preimages = false # txlookuplimit = 2350000 -# timeout = "1h0m0s" +# timeout = "10m0s" [accounts] allow-insecure-unlock = true diff --git a/packaging/templates/testnet-v4/archive/config.toml b/packaging/templates/testnet-v4/archive/config.toml index 992aff0c68..110be1a14c 100644 --- a/packaging/templates/testnet-v4/archive/config.toml +++ b/packaging/templates/testnet-v4/archive/config.toml @@ -120,7 +120,7 @@ gcmode = "archive" # noprefetch = false # preimages = false # txlookuplimit = 2350000 - # timeout = "1h0m0s" + # timeout = "10m0s" # [accounts] # unlock = [] diff --git a/packaging/templates/testnet-v4/sentry/sentry/bor/config.toml b/packaging/templates/testnet-v4/sentry/sentry/bor/config.toml index 1a335a42a3..8814be618f 100644 --- a/packaging/templates/testnet-v4/sentry/sentry/bor/config.toml +++ b/packaging/templates/testnet-v4/sentry/sentry/bor/config.toml @@ -120,7 +120,7 @@ syncmode = "full" # noprefetch = false # preimages = false # txlookuplimit = 2350000 - # timeout = "1h0m0s" + # timeout = "10m0s" # [accounts] # unlock = [] diff --git a/packaging/templates/testnet-v4/sentry/validator/bor/config.toml b/packaging/templates/testnet-v4/sentry/validator/bor/config.toml index 798375364e..69b6c1214e 100644 --- a/packaging/templates/testnet-v4/sentry/validator/bor/config.toml +++ b/packaging/templates/testnet-v4/sentry/validator/bor/config.toml @@ -122,7 +122,7 @@ syncmode = "full" # noprefetch = false # preimages = false # txlookuplimit = 2350000 - # timeout = "1h0m0s" + # timeout = "10m0s" [accounts] allow-insecure-unlock = true diff --git a/packaging/templates/testnet-v4/without-sentry/bor/config.toml b/packaging/templates/testnet-v4/without-sentry/bor/config.toml index 47a3053b58..bc3b5c0723 100644 --- a/packaging/templates/testnet-v4/without-sentry/bor/config.toml +++ b/packaging/templates/testnet-v4/without-sentry/bor/config.toml @@ -122,7 +122,7 @@ syncmode = "full" # noprefetch = false # preimages = false # txlookuplimit = 2350000 -# timeout = "1h0m0s" +# timeout = "10m0s" [accounts] allow-insecure-unlock = true From 38145fae21acaf142f51786e4c33a253419edae6 Mon Sep 17 00:00:00 2001 From: SHIVAM SHARMA Date: Thu, 19 Jan 2023 14:41:38 +0530 Subject: [PATCH 41/56] internal/cli/server : fix : added triesInMemory in config (#691) --- internal/cli/server/config.go | 1 + 1 file changed, 1 insertion(+) diff --git a/internal/cli/server/config.go b/internal/cli/server/config.go index dedab303a6..e61364b075 100644 --- a/internal/cli/server/config.go +++ b/internal/cli/server/config.go @@ -891,6 +891,7 @@ func (c *Config) buildEth(stack *node.Node, accountManager *accounts.Manager) (* n.Preimages = c.Cache.Preimages n.TxLookupLimit = c.Cache.TxLookupLimit n.TrieTimeout = c.Cache.TrieTimeout + n.TriesInMemory = c.Cache.TriesInMemory } n.RPCGasCap = c.JsonRPC.GasCap From 168ec6e8b0d68672bd823bc016d98a516393aed1 Mon Sep 17 00:00:00 2001 From: Pratik Patil Date: Thu, 19 Jan 2023 14:51:20 +0530 Subject: [PATCH 42/56] changed version from 0.3.0 to 0.3.4-beta (#693) --- packaging/templates/package_scripts/control | 2 +- packaging/templates/package_scripts/control.arm64 | 2 +- packaging/templates/package_scripts/control.profile.amd64 | 2 +- packaging/templates/package_scripts/control.profile.arm64 | 2 +- packaging/templates/package_scripts/control.validator | 2 +- .../templates/package_scripts/control.validator.arm64 | 2 +- params/version.go | 8 ++++---- 7 files changed, 10 insertions(+), 10 deletions(-) diff --git a/packaging/templates/package_scripts/control b/packaging/templates/package_scripts/control index cb62165a5e..b487371358 100644 --- a/packaging/templates/package_scripts/control +++ b/packaging/templates/package_scripts/control @@ -1,5 +1,5 @@ Source: bor -Version: 0.3.3 +Version: 0.3.4-beta Section: develop Priority: standard Maintainer: Polygon diff --git a/packaging/templates/package_scripts/control.arm64 b/packaging/templates/package_scripts/control.arm64 index 56276cb43a..684088051f 100644 --- a/packaging/templates/package_scripts/control.arm64 +++ b/packaging/templates/package_scripts/control.arm64 @@ -1,5 +1,5 @@ Source: bor -Version: 0.3.3 +Version: 0.3.4-beta Section: develop Priority: standard Maintainer: Polygon diff --git a/packaging/templates/package_scripts/control.profile.amd64 b/packaging/templates/package_scripts/control.profile.amd64 index 4ddd8424ff..48ad7831d0 100644 --- a/packaging/templates/package_scripts/control.profile.amd64 +++ b/packaging/templates/package_scripts/control.profile.amd64 @@ -1,5 +1,5 @@ Source: bor-profile -Version: 0.3.3 +Version: 0.3.4-beta Section: develop Priority: standard Maintainer: Polygon diff --git a/packaging/templates/package_scripts/control.profile.arm64 b/packaging/templates/package_scripts/control.profile.arm64 index 9f9301c925..6cc46bdbf5 100644 --- a/packaging/templates/package_scripts/control.profile.arm64 +++ b/packaging/templates/package_scripts/control.profile.arm64 @@ -1,5 +1,5 @@ Source: bor-profile -Version: 0.3.3 +Version: 0.3.4-beta Section: develop Priority: standard Maintainer: Polygon diff --git a/packaging/templates/package_scripts/control.validator b/packaging/templates/package_scripts/control.validator index d43250c891..dd28aae21d 100644 --- a/packaging/templates/package_scripts/control.validator +++ b/packaging/templates/package_scripts/control.validator @@ -1,5 +1,5 @@ Source: bor-profile -Version: 0.3.3 +Version: 0.3.4-beta Section: develop Priority: standard Maintainer: Polygon diff --git a/packaging/templates/package_scripts/control.validator.arm64 b/packaging/templates/package_scripts/control.validator.arm64 index 5a50f8cb39..80f62ed71d 100644 --- a/packaging/templates/package_scripts/control.validator.arm64 +++ b/packaging/templates/package_scripts/control.validator.arm64 @@ -1,5 +1,5 @@ Source: bor-profile -Version: 0.3.3 +Version: 0.3.4-beta Section: develop Priority: standard Maintainer: Polygon diff --git a/params/version.go b/params/version.go index 199e49095f..5d59c13d71 100644 --- a/params/version.go +++ b/params/version.go @@ -21,10 +21,10 @@ import ( ) const ( - VersionMajor = 0 // Major version component of the current release - VersionMinor = 3 // Minor version component of the current release - VersionPatch = 3 // Patch version component of the current release - VersionMeta = "stable" // Version metadata to append to the version string + VersionMajor = 0 // Major version component of the current release + VersionMinor = 3 // Minor version component of the current release + VersionPatch = 4 // Patch version component of the current release + VersionMeta = "beta" // Version metadata to append to the version string ) // Version holds the textual version string. From cbbc27c27a957eff4276581dadc7d489b3f67806 Mon Sep 17 00:00:00 2001 From: Manav Darji Date: Wed, 25 Jan 2023 17:13:16 +0530 Subject: [PATCH 43/56] fix nil state-sync issue, increase grpc limit (#695) * Increase grpc message size limit in pprof * consensus/bor/bor.go : stateSyncs init fixed [Fix #686] * eth/filters: handle nil state-sync before notify * eth/filters: update check Co-authored-by: Jerry Co-authored-by: Daniil --- consensus/bor/bor.go | 2 +- eth/filters/bor_api.go | 7 +++---- internal/cli/debug_pprof.go | 3 ++- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/consensus/bor/bor.go b/consensus/bor/bor.go index 1b4ddec45d..5b32263762 100644 --- a/consensus/bor/bor.go +++ b/consensus/bor/bor.go @@ -1161,7 +1161,7 @@ func (c *Bor) CommitStates( processStart := time.Now() totalGas := 0 /// limit on gas for state sync per block chainID := c.chainConfig.ChainID.String() - stateSyncs := make([]*types.StateSyncData, len(eventRecords)) + stateSyncs := make([]*types.StateSyncData, 0, len(eventRecords)) var gasUsed uint64 diff --git a/eth/filters/bor_api.go b/eth/filters/bor_api.go index db13c95959..aeb370d6be 100644 --- a/eth/filters/bor_api.go +++ b/eth/filters/bor_api.go @@ -1,7 +1,6 @@ package filters import ( - "bytes" "context" "errors" @@ -19,7 +18,7 @@ func (api *PublicFilterAPI) SetChainConfig(chainConfig *params.ChainConfig) { func (api *PublicFilterAPI) GetBorBlockLogs(ctx context.Context, crit FilterCriteria) ([]*types.Log, error) { if api.chainConfig == nil { - return nil, errors.New("No chain config found. Proper PublicFilterAPI initialization required") + return nil, errors.New("no chain config found. Proper PublicFilterAPI initialization required") } // get sprint from bor config @@ -67,8 +66,8 @@ func (api *PublicFilterAPI) NewDeposits(ctx context.Context, crit ethereum.State for { select { case h := <-stateSyncData: - if crit.ID == h.ID || bytes.Compare(crit.Contract.Bytes(), h.Contract.Bytes()) == 0 || - (crit.ID == 0 && crit.Contract == common.Address{}) { + if h != nil && (crit.ID == h.ID || crit.Contract == h.Contract || + (crit.ID == 0 && crit.Contract == common.Address{})) { notifier.Notify(rpcSub.ID, h) } case <-rpcSub.Err(): diff --git a/internal/cli/debug_pprof.go b/internal/cli/debug_pprof.go index 01698719e5..4cbe989408 100644 --- a/internal/cli/debug_pprof.go +++ b/internal/cli/debug_pprof.go @@ -7,6 +7,7 @@ import ( "fmt" "strings" + "google.golang.org/grpc" empty "google.golang.org/protobuf/types/known/emptypb" "github.com/ethereum/go-ethereum/internal/cli/flagset" @@ -103,7 +104,7 @@ func (d *DebugPprofCommand) Run(args []string) int { req.Profile = profile } - stream, err := clt.DebugPprof(ctx, req) + stream, err := clt.DebugPprof(ctx, req, grpc.MaxCallRecvMsgSize(1024*1024*1024)) if err != nil { return err From a533ffb2892c5296c61fd1bd4ebb1820b6326003 Mon Sep 17 00:00:00 2001 From: Manav Darji Date: Thu, 2 Feb 2023 14:21:45 +0530 Subject: [PATCH 44/56] core, tests/bor: add more tests for state-sync validation (#710) * core: add get state sync function for tests * tests/bor: add validation for state sync events post consensus --- core/blockchain_reader.go | 4 ++++ tests/bor/bor_test.go | 13 +++++++++++++ tests/bor/helper.go | 2 +- 3 files changed, 18 insertions(+), 1 deletion(-) diff --git a/core/blockchain_reader.go b/core/blockchain_reader.go index f61f930496..8405d4a54c 100644 --- a/core/blockchain_reader.go +++ b/core/blockchain_reader.go @@ -422,6 +422,10 @@ func (bc *BlockChain) SetStateSync(stateData []*types.StateSyncData) { bc.stateSyncData = stateData } +func (bc *BlockChain) GetStateSync() []*types.StateSyncData { + return bc.stateSyncData +} + // SubscribeStateSyncEvent registers a subscription of StateSyncEvent. func (bc *BlockChain) SubscribeStateSyncEvent(ch chan<- StateSyncEvent) event.Subscription { return bc.scope.Track(bc.stateSyncFeed.Subscribe(ch)) diff --git a/tests/bor/bor_test.go b/tests/bor/bor_test.go index d059956e6a..2dc20a915e 100644 --- a/tests/bor/bor_test.go +++ b/tests/bor/bor_test.go @@ -7,6 +7,7 @@ import ( "context" "crypto/ecdsa" "encoding/hex" + "fmt" "io" "math/big" "os" @@ -458,9 +459,21 @@ func TestFetchStateSyncEvents(t *testing.T) { _bor.SetHeimdallClient(h) block = buildNextBlock(t, _bor, chain, block, nil, init.genesis.Config.Bor, nil, res.Result.ValidatorSet.Validators) + + // Validate the state sync transactions set by consensus + validateStateSyncEvents(t, eventRecords, chain.GetStateSync()) + insertNewBlock(t, chain, block) } +func validateStateSyncEvents(t *testing.T, expected []*clerk.EventRecordWithTime, got []*types.StateSyncData) { + require.Equal(t, len(expected), len(got), "number of state sync events should be equal") + + for i := 0; i < len(expected); i++ { + require.Equal(t, expected[i].ID, got[i].ID, fmt.Sprintf("state sync ids should be equal - index: %d, expected: %d, got: %d", i, expected[i].ID, got[i].ID)) + } +} + func TestFetchStateSyncEvents_2(t *testing.T) { init := buildEthereumInstance(t, rawdb.NewMemoryDatabase()) chain := init.ethereum.BlockChain() diff --git a/tests/bor/helper.go b/tests/bor/helper.go index 64d5c299ac..e28076a3b1 100644 --- a/tests/bor/helper.go +++ b/tests/bor/helper.go @@ -360,7 +360,7 @@ func generateFakeStateSyncEvents(sample *clerk.EventRecordWithTime, count int) [ *events[0] = event for i := 1; i < count; i++ { - event.ID = uint64(i) + event.ID = uint64(i + 1) event.Time = event.Time.Add(1 * time.Second) events[i] = &clerk.EventRecordWithTime{} *events[i] = event From 2be6ae43a1eb828ce24cd0ddd5894f40b42fc9d4 Mon Sep 17 00:00:00 2001 From: Arpit Temani Date: Fri, 3 Feb 2023 15:42:12 +0530 Subject: [PATCH 45/56] Arpit/temp bor sync (#701) * Increase grpc message size limit in pprof * ReadBorReceipts improvements * use internal function * fix tests * fetch geth upstread for ReadBorReceiptRLP * Only query bor receipt when the query index is equal to # tx in block body This change reduces the frequency of calling ReadBorReceipt and ReadBorTransaction, which are CPU and db intensive. * Revert "fetch geth upstread for ReadBorReceiptRLP" This reverts commit 2e838a6b1313d26674f3a8df4b044e35dcbf35a0. * Restore ReadBorReceiptRLP * fix bor receipts * remove unused * fix lints --------- Co-authored-by: Jerry Co-authored-by: Manav Darji Co-authored-by: Evgeny Danienko <6655321@bk.ru> --- core/blockchain.go | 2 +- core/bor_blockchain.go | 2 +- core/rawdb/bor_receipt.go | 64 ++++++++++++++----------------------- eth/filters/test_backend.go | 2 +- eth/tracers/api.go | 2 +- internal/ethapi/api.go | 24 +++++++++----- params/config.go | 4 +++ rpc/server.go | 3 ++ 8 files changed, 51 insertions(+), 52 deletions(-) diff --git a/core/blockchain.go b/core/blockchain.go index 74fd4bfeda..cbcf02fef4 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -2059,7 +2059,7 @@ func (bc *BlockChain) collectLogs(hash common.Hash, removed bool) []*types.Log { receipts := rawdb.ReadReceipts(bc.db, hash, *number, bc.chainConfig) // Append bor receipt - borReceipt := rawdb.ReadBorReceipt(bc.db, hash, *number) + borReceipt := rawdb.ReadBorReceipt(bc.db, hash, *number, bc.chainConfig) if borReceipt != nil { receipts = append(receipts, borReceipt) } diff --git a/core/bor_blockchain.go b/core/bor_blockchain.go index ae2cdf3c6f..49973421bd 100644 --- a/core/bor_blockchain.go +++ b/core/bor_blockchain.go @@ -19,7 +19,7 @@ func (bc *BlockChain) GetBorReceiptByHash(hash common.Hash) *types.Receipt { } // read bor reciept by hash and number - receipt := rawdb.ReadBorReceipt(bc.db, hash, *number) + receipt := rawdb.ReadBorReceipt(bc.db, hash, *number, bc.chainConfig) if receipt == nil { return nil } diff --git a/core/rawdb/bor_receipt.go b/core/rawdb/bor_receipt.go index e225083741..0739c67a9f 100644 --- a/core/rawdb/bor_receipt.go +++ b/core/rawdb/bor_receipt.go @@ -7,6 +7,7 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rlp" ) @@ -33,49 +34,28 @@ func borTxLookupKey(hash common.Hash) []byte { return append(borTxLookupPrefix, hash.Bytes()...) } -// HasBorReceipt verifies the existence of all block receipt belonging -// to a block. -func HasBorReceipt(db ethdb.Reader, hash common.Hash, number uint64) bool { - if has, err := db.Ancient(freezerHashTable, number); err == nil && common.BytesToHash(has) == hash { - return true - } - - if has, err := db.Has(borReceiptKey(number, hash)); !has || err != nil { - return false - } +func ReadBorReceiptRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue { + var data []byte - return true -} + err := db.ReadAncients(func(reader ethdb.AncientReader) error { + // Check if the data is in ancients + if isCanon(reader, number, hash) { + data, _ = reader.Ancient(freezerBorReceiptTable, number) -// ReadBorReceiptRLP retrieves the block receipt belonging to a block in RLP encoding. -func ReadBorReceiptRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue { - // First try to look up the data in ancient database. Extra hash - // comparison is necessary since ancient database only maintains - // the canonical data. - data, _ := db.Ancient(freezerBorReceiptTable, number) - if len(data) > 0 { - h, _ := db.Ancient(freezerHashTable, number) - if common.BytesToHash(h) == hash { - return data - } - } - // Then try to look up the data in leveldb. - data, _ = db.Get(borReceiptKey(number, hash)) - if len(data) > 0 { - return data - } - // In the background freezer is moving data from leveldb to flatten files. - // So during the first check for ancient db, the data is not yet in there, - // but when we reach into leveldb, the data was already moved. That would - // result in a not found error. - data, _ = db.Ancient(freezerBorReceiptTable, number) - if len(data) > 0 { - h, _ := db.Ancient(freezerHashTable, number) - if common.BytesToHash(h) == hash { - return data + return nil } + + // If not, try reading from leveldb + data, _ = db.Get(borReceiptKey(number, hash)) + + return nil + }) + + if err != nil { + log.Warn("during ReadBorReceiptRLP", "number", number, "hash", hash, "err", err) } - return nil // Can't find the data anywhere. + + return data } // ReadRawBorReceipt retrieves the block receipt belonging to a block. @@ -101,7 +81,11 @@ func ReadRawBorReceipt(db ethdb.Reader, hash common.Hash, number uint64) *types. // ReadBorReceipt retrieves all the bor block receipts belonging to a block, including // its correspoinding metadata fields. If it is unable to populate these metadata // fields then nil is returned. -func ReadBorReceipt(db ethdb.Reader, hash common.Hash, number uint64) *types.Receipt { +func ReadBorReceipt(db ethdb.Reader, hash common.Hash, number uint64, config *params.ChainConfig) *types.Receipt { + if config != nil && config.Bor != nil && config.Bor.Sprint != nil && !config.Bor.IsSprintStart(number) { + return nil + } + // We're deriving many fields from the block body, retrieve beside the receipt borReceipt := ReadRawBorReceipt(db, hash, number) if borReceipt == nil { diff --git a/eth/filters/test_backend.go b/eth/filters/test_backend.go index 979ed3efb6..8b2ef4a7f2 100644 --- a/eth/filters/test_backend.go +++ b/eth/filters/test_backend.go @@ -38,7 +38,7 @@ func (b *TestBackend) GetBorBlockReceipt(ctx context.Context, hash common.Hash) return &types.Receipt{}, nil } - receipt := rawdb.ReadBorReceipt(b.DB, hash, *number) + receipt := rawdb.ReadBorReceipt(b.DB, hash, *number, nil) if receipt == nil { return &types.Receipt{}, nil } diff --git a/eth/tracers/api.go b/eth/tracers/api.go index 3fce91ac9c..13f5c627cd 100644 --- a/eth/tracers/api.go +++ b/eth/tracers/api.go @@ -177,7 +177,7 @@ func (api *API) getAllBlockTransactions(ctx context.Context, block *types.Block) stateSyncPresent := false - borReceipt := rawdb.ReadBorReceipt(api.backend.ChainDb(), block.Hash(), block.NumberU64()) + borReceipt := rawdb.ReadBorReceipt(api.backend.ChainDb(), block.Hash(), block.NumberU64(), api.backend.ChainConfig()) if borReceipt != nil { txHash := types.GetDerivedBorTxHash(types.BorReceiptKey(block.Number().Uint64(), block.Hash())) if txHash != (common.Hash{}) { diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go index 372d630c07..6bb7c225be 100644 --- a/internal/ethapi/api.go +++ b/internal/ethapi/api.go @@ -636,7 +636,7 @@ func (s *PublicBlockChainAPI) GetTransactionReceiptsByBlock(ctx context.Context, var txHash common.Hash - borReceipt := rawdb.ReadBorReceipt(s.b.ChainDb(), block.Hash(), block.NumberU64()) + borReceipt := rawdb.ReadBorReceipt(s.b.ChainDb(), block.Hash(), block.NumberU64(), s.b.ChainConfig()) if borReceipt != nil { receipts = append(receipts, borReceipt) txHash = types.GetDerivedBorTxHash(types.BorReceiptKey(block.Number().Uint64(), block.Hash())) @@ -1453,15 +1453,23 @@ func newRPCPendingTransaction(tx *types.Transaction, current *types.Header, conf func newRPCTransactionFromBlockIndex(b *types.Block, index uint64, config *params.ChainConfig, db ethdb.Database) *RPCTransaction { txs := b.Transactions() - borReceipt := rawdb.ReadBorReceipt(db, b.Hash(), b.NumberU64()) - if borReceipt != nil { - tx, _, _, _ := rawdb.ReadBorTransaction(db, borReceipt.TxHash) + if index >= uint64(len(txs)+1) { + return nil + } - if tx != nil { - txs = append(txs, tx) + // If the index out of the range of transactions defined in block body, it means that the transaction is a bor state sync transaction, and we need to fetch it from the database + if index == uint64(len(txs)) { + borReceipt := rawdb.ReadBorReceipt(db, b.Hash(), b.NumberU64(), config) + if borReceipt != nil { + tx, _, _, _ := rawdb.ReadBorTransaction(db, borReceipt.TxHash) + + if tx != nil { + txs = append(txs, tx) + } } } + // If the index is still out of the range after checking bor state sync transaction, it means that the transaction index is invalid if index >= uint64(len(txs)) { return nil } @@ -1602,7 +1610,7 @@ func (api *PublicTransactionPoolAPI) getAllBlockTransactions(ctx context.Context stateSyncPresent := false - borReceipt := rawdb.ReadBorReceipt(api.b.ChainDb(), block.Hash(), block.NumberU64()) + borReceipt := rawdb.ReadBorReceipt(api.b.ChainDb(), block.Hash(), block.NumberU64(), api.b.ChainConfig()) if borReceipt != nil { txHash := types.GetDerivedBorTxHash(types.BorReceiptKey(block.Number().Uint64(), block.Hash())) if txHash != (common.Hash{}) { @@ -1772,7 +1780,7 @@ func (s *PublicTransactionPoolAPI) GetTransactionReceipt(ctx context.Context, ha if borTx { // Fetch bor block receipt - receipt = rawdb.ReadBorReceipt(s.b.ChainDb(), blockHash, blockNumber) + receipt = rawdb.ReadBorReceipt(s.b.ChainDb(), blockHash, blockNumber, s.b.ChainConfig()) } else { receipts, err := s.b.GetReceipts(ctx, blockHash) if err != nil { diff --git a/params/config.go b/params/config.go index 94729224bb..9833c9eac5 100644 --- a/params/config.go +++ b/params/config.go @@ -617,6 +617,10 @@ func (c *BorConfig) IsDelhi(number *big.Int) bool { return isForked(c.DelhiBlock, number) } +func (c *BorConfig) IsSprintStart(number uint64) bool { + return number%c.CalculateSprint(number) == 0 +} + func (c *BorConfig) calculateBorConfigHelper(field map[string]uint64, number uint64) uint64 { keys := make([]string, 0, len(field)) for k := range field { diff --git a/rpc/server.go b/rpc/server.go index 96c3861d66..dc8afa0b6e 100644 --- a/rpc/server.go +++ b/rpc/server.go @@ -23,6 +23,7 @@ import ( "sync/atomic" mapset "github.com/deckarep/golang-set" + "github.com/ethereum/go-ethereum/log" ) @@ -127,9 +128,11 @@ func (s *Server) serveSingleRequest(ctx context.Context, codec ServerCodec) { log.Debug("batch limit %d exceeded: %d requests given", s.BatchLimit, len(reqs)) } } else { + //nolint:contextcheck h.handleBatch(reqs) } } else { + //nolint:contextcheck h.handleMsg(reqs[0]) } } From fe1034e5e13051a91f13793bc25c20bfce9ac5d8 Mon Sep 17 00:00:00 2001 From: Manav Darji Date: Fri, 3 Feb 2023 15:42:38 +0530 Subject: [PATCH 46/56] Revert "chg : trieTimeout from 60 to 10 mins (#692)" (#720) This reverts commit 241843c7e7bb18e64d2e157fd6fbbd665f6ce9d9. --- builder/files/config.toml | 2 +- docs/cli/example_config.toml | 2 +- internal/cli/server/config.go | 2 +- packaging/templates/mainnet-v1/archive/config.toml | 2 +- packaging/templates/mainnet-v1/sentry/sentry/bor/config.toml | 2 +- packaging/templates/mainnet-v1/sentry/validator/bor/config.toml | 2 +- packaging/templates/mainnet-v1/without-sentry/bor/config.toml | 2 +- packaging/templates/testnet-v4/archive/config.toml | 2 +- packaging/templates/testnet-v4/sentry/sentry/bor/config.toml | 2 +- packaging/templates/testnet-v4/sentry/validator/bor/config.toml | 2 +- packaging/templates/testnet-v4/without-sentry/bor/config.toml | 2 +- 11 files changed, 11 insertions(+), 11 deletions(-) diff --git a/builder/files/config.toml b/builder/files/config.toml index aa6ca0f208..f577706f7b 100644 --- a/builder/files/config.toml +++ b/builder/files/config.toml @@ -129,7 +129,7 @@ syncmode = "full" # preimages = false # txlookuplimit = 2350000 # triesinmemory = 128 - # timeout = "10m0s" + # timeout = "1h0m0s" [accounts] # allow-insecure-unlock = true diff --git a/docs/cli/example_config.toml b/docs/cli/example_config.toml index 2a768e6bd2..6bf58a8361 100644 --- a/docs/cli/example_config.toml +++ b/docs/cli/example_config.toml @@ -132,7 +132,7 @@ ethstats = "" # Reporting URL of a ethstats service (nodename:sec preimages = false # Enable recording the SHA3/keccak preimages of trie keys txlookuplimit = 2350000 # Number of recent blocks to maintain transactions index for (default = about 56 days, 0 = entire chain) triesinmemory = 128 # Number of block states (tries) to keep in memory - timeout = "10m0s" # Time after which the Merkle Patricia Trie is stored to disc from memory + timeout = "1h0m0s" # Time after which the Merkle Patricia Trie is stored to disc from memory [accounts] unlock = [] # Comma separated list of accounts to unlock diff --git a/internal/cli/server/config.go b/internal/cli/server/config.go index e61364b075..ac4e9c8d53 100644 --- a/internal/cli/server/config.go +++ b/internal/cli/server/config.go @@ -565,7 +565,7 @@ func DefaultConfig() *Config { Preimages: false, TxLookupLimit: 2350000, TriesInMemory: 128, - TrieTimeout: 10 * time.Minute, + TrieTimeout: 60 * time.Minute, }, Accounts: &AccountsConfig{ Unlock: []string{}, diff --git a/packaging/templates/mainnet-v1/archive/config.toml b/packaging/templates/mainnet-v1/archive/config.toml index 181502a92e..8e98736196 100644 --- a/packaging/templates/mainnet-v1/archive/config.toml +++ b/packaging/templates/mainnet-v1/archive/config.toml @@ -120,7 +120,7 @@ gcmode = "archive" # noprefetch = false # preimages = false # txlookuplimit = 2350000 - # timeout = "10m0s" + # timeout = "1h0m0s" # [accounts] # unlock = [] diff --git a/packaging/templates/mainnet-v1/sentry/sentry/bor/config.toml b/packaging/templates/mainnet-v1/sentry/sentry/bor/config.toml index ecb38b4609..853b2ed313 100644 --- a/packaging/templates/mainnet-v1/sentry/sentry/bor/config.toml +++ b/packaging/templates/mainnet-v1/sentry/sentry/bor/config.toml @@ -120,7 +120,7 @@ syncmode = "full" # noprefetch = false # preimages = false # txlookuplimit = 2350000 - # timeout = "10m0s" + # timeout = "1h0m0s" # [accounts] # unlock = [] diff --git a/packaging/templates/mainnet-v1/sentry/validator/bor/config.toml b/packaging/templates/mainnet-v1/sentry/validator/bor/config.toml index 508df5f6b9..284445113c 100644 --- a/packaging/templates/mainnet-v1/sentry/validator/bor/config.toml +++ b/packaging/templates/mainnet-v1/sentry/validator/bor/config.toml @@ -122,7 +122,7 @@ syncmode = "full" # noprefetch = false # preimages = false # txlookuplimit = 2350000 - # timeout = "10m0s" + # timeout = "1h0m0s" [accounts] allow-insecure-unlock = true diff --git a/packaging/templates/mainnet-v1/without-sentry/bor/config.toml b/packaging/templates/mainnet-v1/without-sentry/bor/config.toml index 3fa20ef121..22361b64bc 100644 --- a/packaging/templates/mainnet-v1/without-sentry/bor/config.toml +++ b/packaging/templates/mainnet-v1/without-sentry/bor/config.toml @@ -122,7 +122,7 @@ syncmode = "full" # noprefetch = false # preimages = false # txlookuplimit = 2350000 -# timeout = "10m0s" +# timeout = "1h0m0s" [accounts] allow-insecure-unlock = true diff --git a/packaging/templates/testnet-v4/archive/config.toml b/packaging/templates/testnet-v4/archive/config.toml index 110be1a14c..992aff0c68 100644 --- a/packaging/templates/testnet-v4/archive/config.toml +++ b/packaging/templates/testnet-v4/archive/config.toml @@ -120,7 +120,7 @@ gcmode = "archive" # noprefetch = false # preimages = false # txlookuplimit = 2350000 - # timeout = "10m0s" + # timeout = "1h0m0s" # [accounts] # unlock = [] diff --git a/packaging/templates/testnet-v4/sentry/sentry/bor/config.toml b/packaging/templates/testnet-v4/sentry/sentry/bor/config.toml index 8814be618f..1a335a42a3 100644 --- a/packaging/templates/testnet-v4/sentry/sentry/bor/config.toml +++ b/packaging/templates/testnet-v4/sentry/sentry/bor/config.toml @@ -120,7 +120,7 @@ syncmode = "full" # noprefetch = false # preimages = false # txlookuplimit = 2350000 - # timeout = "10m0s" + # timeout = "1h0m0s" # [accounts] # unlock = [] diff --git a/packaging/templates/testnet-v4/sentry/validator/bor/config.toml b/packaging/templates/testnet-v4/sentry/validator/bor/config.toml index 69b6c1214e..798375364e 100644 --- a/packaging/templates/testnet-v4/sentry/validator/bor/config.toml +++ b/packaging/templates/testnet-v4/sentry/validator/bor/config.toml @@ -122,7 +122,7 @@ syncmode = "full" # noprefetch = false # preimages = false # txlookuplimit = 2350000 - # timeout = "10m0s" + # timeout = "1h0m0s" [accounts] allow-insecure-unlock = true diff --git a/packaging/templates/testnet-v4/without-sentry/bor/config.toml b/packaging/templates/testnet-v4/without-sentry/bor/config.toml index bc3b5c0723..47a3053b58 100644 --- a/packaging/templates/testnet-v4/without-sentry/bor/config.toml +++ b/packaging/templates/testnet-v4/without-sentry/bor/config.toml @@ -122,7 +122,7 @@ syncmode = "full" # noprefetch = false # preimages = false # txlookuplimit = 2350000 -# timeout = "10m0s" +# timeout = "1h0m0s" [accounts] allow-insecure-unlock = true From 9fa20a7da857e2cd08463759bd0afc09f3576b34 Mon Sep 17 00:00:00 2001 From: Arpit Temani Date: Fri, 3 Feb 2023 18:22:57 +0530 Subject: [PATCH 47/56] Arpit/add execution pool 2 (#719) * initial * linters * linters * remove timeout * update pool * change pool size function * check nil * check nil * fix tests * Use execution pool from server in all handlers * simplify things * test fix * add support for cli, config * add to cli and config * merge base branch * debug statements * fix bug * atomic pointer timeout * add apis * update workerpool * fix issues * change params * fix issues * fix ipc issue * remove execution pool from IPC * revert * fix tests * mutex * refactor flag and value names * ordering fix * refactor flag and value names * update default ep size to 40 * fix bor start issues * revert file changes * debug statements * fix bug * update workerpool * atomic pointer timeout * add apis * Merge branch 'add-execution-pool' of github.com:maticnetwork/bor into arpit/add-execution-pool * fix issues * change params * fix issues * fix ipc issue * remove execution pool from IPC * revert * merge base branch * Merge branch 'add-execution-pool' of github.com:maticnetwork/bor into arpit/add-execution-pool * mutex * fix tests * Merge branch 'arpit/add-execution-pool' of github.com:maticnetwork/bor into arpit/add-execution-pool * Change default size of execution pool to 40 * refactor flag and value names * fix merge conflicts * ordering fix * refactor flag and value names * update default ep size to 40 * fix bor start issues * revert file changes * fix linters * fix go.mod * change sec to ms * change default value for ep timeout * fix node api calls * comment setter for ep timeout --------- Co-authored-by: Evgeny Danienko <6655321@bk.ru> Co-authored-by: Jerry Co-authored-by: Manav Darji --- builder/files/config.toml | 4 + cmd/clef/main.go | 2 +- docs/cli/example_config.toml | 23 +++-- docs/cli/server.md | 8 ++ go.mod | 3 + go.sum | 4 + internal/cli/dumpconfig.go | 2 + internal/cli/server/config.go | 45 ++++++--- internal/cli/server/flags.go | 28 ++++++ internal/web3ext/web3ext.go | 28 ++++++ node/api.go | 89 +++++++++++++++++ node/config.go | 6 ++ node/node.go | 20 ++-- node/rpcstack.go | 13 ++- .../templates/mainnet-v1/archive/config.toml | 4 + .../mainnet-v1/sentry/sentry/bor/config.toml | 4 + .../sentry/validator/bor/config.toml | 4 + .../mainnet-v1/without-sentry/bor/config.toml | 4 + .../templates/testnet-v4/archive/config.toml | 4 + .../testnet-v4/sentry/sentry/bor/config.toml | 4 + .../sentry/validator/bor/config.toml | 4 + .../testnet-v4/without-sentry/bor/config.toml | 4 + rpc/client.go | 2 +- rpc/client_test.go | 7 +- rpc/endpoints.go | 2 +- rpc/execution_pool.go | 99 +++++++++++++++++++ rpc/handler.go | 38 ++++--- rpc/http_test.go | 2 +- rpc/inproc.go | 8 +- rpc/ipc.go | 6 +- rpc/server.go | 33 ++++++- rpc/server_test.go | 2 +- rpc/subscription_test.go | 2 +- rpc/testservice_test.go | 2 +- rpc/websocket_test.go | 2 +- 35 files changed, 450 insertions(+), 62 deletions(-) create mode 100644 rpc/execution_pool.go diff --git a/builder/files/config.toml b/builder/files/config.toml index f577706f7b..1b8d915b7b 100644 --- a/builder/files/config.toml +++ b/builder/files/config.toml @@ -75,6 +75,8 @@ syncmode = "full" # api = ["eth", "net", "web3", "txpool", "bor"] # vhosts = ["*"] # corsdomain = ["*"] +# ep-size = 40 +# ep-requesttimeout = "0s" # [jsonrpc.ws] # enabled = false # port = 8546 @@ -82,6 +84,8 @@ syncmode = "full" # host = "localhost" # api = ["web3", "net"] # origins = ["*"] +# ep-size = 40 +# ep-requesttimeout = "0s" # [jsonrpc.graphql] # enabled = false # port = 0 diff --git a/cmd/clef/main.go b/cmd/clef/main.go index f7c3adebc4..1bfb2610e5 100644 --- a/cmd/clef/main.go +++ b/cmd/clef/main.go @@ -656,7 +656,7 @@ func signer(c *cli.Context) error { vhosts := utils.SplitAndTrim(c.GlobalString(utils.HTTPVirtualHostsFlag.Name)) cors := utils.SplitAndTrim(c.GlobalString(utils.HTTPCORSDomainFlag.Name)) - srv := rpc.NewServer() + srv := rpc.NewServer(0, 0) err := node.RegisterApis(rpcAPI, []string{"account"}, srv, false) if err != nil { utils.Fatalf("Could not register API: %w", err) diff --git a/docs/cli/example_config.toml b/docs/cli/example_config.toml index 6bf58a8361..c32c40e2c6 100644 --- a/docs/cli/example_config.toml +++ b/docs/cli/example_config.toml @@ -74,18 +74,22 @@ ethstats = "" # Reporting URL of a ethstats service (nodename:sec api = ["eth", "net", "web3", "txpool", "bor"] # API's offered over the HTTP-RPC interface vhosts = ["localhost"] # Comma separated list of virtual hostnames from which to accept requests (server enforced). Accepts '*' wildcard. corsdomain = ["localhost"] # Comma separated list of domains from which to accept cross origin requests (browser enforced) + ep-size = 40 # Maximum size of workers to run in rpc execution pool for HTTP requests (default: 40) + ep-requesttimeout = "0s" # Request Timeout for rpc execution pool for HTTP requests (default: 0s, 0s = disabled) [jsonrpc.ws] - enabled = false # Enable the WS-RPC server - port = 8546 # WS-RPC server listening port - prefix = "" # HTTP path prefix on which JSON-RPC is served. Use '/' to serve on all paths. - host = "localhost" # ws.addr - api = ["net", "web3"] # API's offered over the WS-RPC interface - origins = ["localhost"] # Origins from which to accept websockets requests + enabled = false # Enable the WS-RPC server + port = 8546 # WS-RPC server listening port + prefix = "" # HTTP path prefix on which JSON-RPC is served. Use '/' to serve on all paths. + host = "localhost" # ws.addr + api = ["net", "web3"] # API's offered over the WS-RPC interface + origins = ["localhost"] # Origins from which to accept websockets requests + ep-size = 40 # Maximum size of workers to run in rpc execution pool for WS requests (default: 40) + ep-requesttimeout = "0s" # Request Timeout for rpc execution pool for WS requests (default: 0s, 0s = disabled) [jsonrpc.graphql] enabled = false # Enable GraphQL on the HTTP-RPC server. Note that GraphQL can only be started if an HTTP server is started as well. - port = 0 # - prefix = "" # - host = "" # + port = 0 # + prefix = "" # + host = "" # vhosts = ["localhost"] # Comma separated list of virtual hostnames from which to accept requests (server enforced). Accepts '*' wildcard. corsdomain = ["localhost"] # Comma separated list of domains from which to accept cross origin requests (browser enforced) [jsonrpc.timeouts] @@ -93,6 +97,7 @@ ethstats = "" # Reporting URL of a ethstats service (nodename:sec write = "30s" idle = "2m0s" + [gpo] blocks = 20 # Number of recent blocks to check for gas prices percentile = 60 # Suggested gas price is the given percentile of a set of recent transaction gas prices diff --git a/docs/cli/server.md b/docs/cli/server.md index caf10070c0..b91b000eb6 100644 --- a/docs/cli/server.md +++ b/docs/cli/server.md @@ -120,6 +120,10 @@ The ```bor server``` command runs the Bor client. - ```http.api```: API's offered over the HTTP-RPC interface (default: eth,net,web3,txpool,bor) +- ```http.ep-size```: Maximum size of workers to run in rpc execution pool for HTTP requests (default: 40) + +- ```http.ep-requesttimeout```: Request Timeout for rpc execution pool for HTTP requests (default: 0s) + - ```ws```: Enable the WS-RPC server (default: false) - ```ws.addr```: WS-RPC server listening interface (default: localhost) @@ -130,6 +134,10 @@ The ```bor server``` command runs the Bor client. - ```ws.api```: API's offered over the WS-RPC interface (default: net,web3) +- ```ws.ep-size```: Maximum size of workers to run in rpc execution pool for WS requests (default: 40) + +- ```ws.ep-requesttimeout```: Request Timeout for rpc execution pool for WS requests (default: 0s) + - ```graphql```: Enable GraphQL on the HTTP-RPC server. Note that GraphQL can only be started if an HTTP server is started as well. (default: false) ### P2P Options diff --git a/go.mod b/go.mod index 36595ca307..f55b2f9aa7 100644 --- a/go.mod +++ b/go.mod @@ -6,6 +6,7 @@ require ( github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.3.0 github.com/BurntSushi/toml v1.1.0 github.com/JekaMas/go-grpc-net-conn v0.0.0-20220708155319-6aff21f2d13d + github.com/JekaMas/workerpool v1.1.5 github.com/VictoriaMetrics/fastcache v1.6.0 github.com/aws/aws-sdk-go-v2 v1.2.0 github.com/aws/aws-sdk-go-v2/config v1.1.1 @@ -84,6 +85,8 @@ require ( pgregory.net/rapid v0.4.8 ) +require github.com/gammazero/deque v0.2.1 // indirect + require ( github.com/Azure/azure-sdk-for-go/sdk/azcore v0.21.1 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v0.8.3 // indirect diff --git a/go.sum b/go.sum index 96fa9d3f04..4b312ccfb1 100644 --- a/go.sum +++ b/go.sum @@ -31,6 +31,8 @@ github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= github.com/JekaMas/go-grpc-net-conn v0.0.0-20220708155319-6aff21f2d13d h1:RO27lgfZF8s9lZ3pWyzc0gCE0RZC+6/PXbRjAa0CNp8= github.com/JekaMas/go-grpc-net-conn v0.0.0-20220708155319-6aff21f2d13d/go.mod h1:romz7UPgSYhfJkKOalzEEyV6sWtt/eAEm0nX2aOrod0= +github.com/JekaMas/workerpool v1.1.5 h1:xmrx2Zyft95CEGiEqzDxiawptCIRZQ0zZDhTGDFOCaw= +github.com/JekaMas/workerpool v1.1.5/go.mod h1:IoDWPpwMcA27qbuugZKeBslDrgX09lVmksuh9sjzbhc= github.com/Masterminds/goutils v1.1.0 h1:zukEsf/1JZwCMgHiK3GZftabmxiCw4apj3a28RPBiVg= github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= @@ -157,6 +159,8 @@ github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/gammazero/deque v0.2.1 h1:qSdsbG6pgp6nL7A0+K/B7s12mcCY/5l5SIUpMOl+dC0= +github.com/gammazero/deque v0.2.1/go.mod h1:LFroj8x4cMYCukHJDbxFCkT+r9AndaJnFMuZDV34tuU= github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI= github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww= github.com/getkin/kin-openapi v0.53.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4= diff --git a/internal/cli/dumpconfig.go b/internal/cli/dumpconfig.go index a748af3357..787eab2d13 100644 --- a/internal/cli/dumpconfig.go +++ b/internal/cli/dumpconfig.go @@ -55,6 +55,8 @@ func (c *DumpconfigCommand) Run(args []string) int { userConfig.JsonRPC.HttpTimeout.ReadTimeoutRaw = userConfig.JsonRPC.HttpTimeout.ReadTimeout.String() userConfig.JsonRPC.HttpTimeout.WriteTimeoutRaw = userConfig.JsonRPC.HttpTimeout.WriteTimeout.String() userConfig.JsonRPC.HttpTimeout.IdleTimeoutRaw = userConfig.JsonRPC.HttpTimeout.IdleTimeout.String() + userConfig.JsonRPC.Http.ExecutionPoolRequestTimeoutRaw = userConfig.JsonRPC.Http.ExecutionPoolRequestTimeout.String() + userConfig.JsonRPC.Ws.ExecutionPoolRequestTimeoutRaw = userConfig.JsonRPC.Ws.ExecutionPoolRequestTimeout.String() userConfig.TxPool.RejournalRaw = userConfig.TxPool.Rejournal.String() userConfig.TxPool.LifeTimeRaw = userConfig.TxPool.LifeTime.String() userConfig.Sealer.GasPriceRaw = userConfig.Sealer.GasPrice.String() diff --git a/internal/cli/server/config.go b/internal/cli/server/config.go index ac4e9c8d53..ca7a235ace 100644 --- a/internal/cli/server/config.go +++ b/internal/cli/server/config.go @@ -281,6 +281,13 @@ type APIConfig struct { // Origins is the list of endpoints to accept requests from (only consumed for websockets) Origins []string `hcl:"origins,optional" toml:"origins,optional"` + + // ExecutionPoolSize is max size of workers to be used for rpc execution + ExecutionPoolSize uint64 `hcl:"ep-size,optional" toml:"ep-size,optional"` + + // ExecutionPoolRequestTimeout is timeout used by execution pool for rpc execution + ExecutionPoolRequestTimeout time.Duration `hcl:"-,optional" toml:"-"` + ExecutionPoolRequestTimeoutRaw string `hcl:"ep-requesttimeout,optional" toml:"ep-requesttimeout,optional"` } // Used from rpc.HTTPTimeouts @@ -507,21 +514,25 @@ func DefaultConfig() *Config { GasCap: ethconfig.Defaults.RPCGasCap, TxFeeCap: ethconfig.Defaults.RPCTxFeeCap, Http: &APIConfig{ - Enabled: false, - Port: 8545, - Prefix: "", - Host: "localhost", - API: []string{"eth", "net", "web3", "txpool", "bor"}, - Cors: []string{"localhost"}, - VHost: []string{"localhost"}, + Enabled: false, + Port: 8545, + Prefix: "", + Host: "localhost", + API: []string{"eth", "net", "web3", "txpool", "bor"}, + Cors: []string{"localhost"}, + VHost: []string{"localhost"}, + ExecutionPoolSize: 40, + ExecutionPoolRequestTimeout: 0, }, Ws: &APIConfig{ - Enabled: false, - Port: 8546, - Prefix: "", - Host: "localhost", - API: []string{"net", "web3"}, - Origins: []string{"localhost"}, + Enabled: false, + Port: 8546, + Prefix: "", + Host: "localhost", + API: []string{"net", "web3"}, + Origins: []string{"localhost"}, + ExecutionPoolSize: 40, + ExecutionPoolRequestTimeout: 0, }, Graphql: &APIConfig{ Enabled: false, @@ -628,6 +639,8 @@ func (c *Config) fillTimeDurations() error { {"jsonrpc.timeouts.read", &c.JsonRPC.HttpTimeout.ReadTimeout, &c.JsonRPC.HttpTimeout.ReadTimeoutRaw}, {"jsonrpc.timeouts.write", &c.JsonRPC.HttpTimeout.WriteTimeout, &c.JsonRPC.HttpTimeout.WriteTimeoutRaw}, {"jsonrpc.timeouts.idle", &c.JsonRPC.HttpTimeout.IdleTimeout, &c.JsonRPC.HttpTimeout.IdleTimeoutRaw}, + {"jsonrpc.ws.ep-requesttimeout", &c.JsonRPC.Ws.ExecutionPoolRequestTimeout, &c.JsonRPC.Ws.ExecutionPoolRequestTimeoutRaw}, + {"jsonrpc.http.ep-requesttimeout", &c.JsonRPC.Http.ExecutionPoolRequestTimeout, &c.JsonRPC.Http.ExecutionPoolRequestTimeoutRaw}, {"txpool.lifetime", &c.TxPool.LifeTime, &c.TxPool.LifeTimeRaw}, {"txpool.rejournal", &c.TxPool.Rejournal, &c.TxPool.RejournalRaw}, {"cache.rejournal", &c.Cache.Rejournal, &c.Cache.RejournalRaw}, @@ -997,7 +1010,11 @@ func (c *Config) buildNode() (*node.Config, error) { WriteTimeout: c.JsonRPC.HttpTimeout.WriteTimeout, IdleTimeout: c.JsonRPC.HttpTimeout.IdleTimeout, }, - RPCBatchLimit: c.RPCBatchLimit, + RPCBatchLimit: c.RPCBatchLimit, + WSJsonRPCExecutionPoolSize: c.JsonRPC.Ws.ExecutionPoolSize, + WSJsonRPCExecutionPoolRequestTimeout: c.JsonRPC.Ws.ExecutionPoolRequestTimeout, + HTTPJsonRPCExecutionPoolSize: c.JsonRPC.Http.ExecutionPoolSize, + HTTPJsonRPCExecutionPoolRequestTimeout: c.JsonRPC.Http.ExecutionPoolRequestTimeout, } // dev mode diff --git a/internal/cli/server/flags.go b/internal/cli/server/flags.go index 22d5b73485..abf5fa3465 100644 --- a/internal/cli/server/flags.go +++ b/internal/cli/server/flags.go @@ -444,6 +444,20 @@ func (c *Command) Flags() *flagset.Flagset { Default: c.cliConfig.JsonRPC.Http.API, Group: "JsonRPC", }) + f.Uint64Flag(&flagset.Uint64Flag{ + Name: "http.ep-size", + Usage: "Maximum size of workers to run in rpc execution pool for HTTP requests", + Value: &c.cliConfig.JsonRPC.Http.ExecutionPoolSize, + Default: c.cliConfig.JsonRPC.Http.ExecutionPoolSize, + Group: "JsonRPC", + }) + f.DurationFlag(&flagset.DurationFlag{ + Name: "http.ep-requesttimeout", + Usage: "Request Timeout for rpc execution pool for HTTP requests", + Value: &c.cliConfig.JsonRPC.Http.ExecutionPoolRequestTimeout, + Default: c.cliConfig.JsonRPC.Http.ExecutionPoolRequestTimeout, + Group: "JsonRPC", + }) // ws options f.BoolFlag(&flagset.BoolFlag{ @@ -481,6 +495,20 @@ func (c *Command) Flags() *flagset.Flagset { Default: c.cliConfig.JsonRPC.Ws.API, Group: "JsonRPC", }) + f.Uint64Flag(&flagset.Uint64Flag{ + Name: "ws.ep-size", + Usage: "Maximum size of workers to run in rpc execution pool for WS requests", + Value: &c.cliConfig.JsonRPC.Ws.ExecutionPoolSize, + Default: c.cliConfig.JsonRPC.Ws.ExecutionPoolSize, + Group: "JsonRPC", + }) + f.DurationFlag(&flagset.DurationFlag{ + Name: "ws.ep-requesttimeout", + Usage: "Request Timeout for rpc execution pool for WS requests", + Value: &c.cliConfig.JsonRPC.Ws.ExecutionPoolRequestTimeout, + Default: c.cliConfig.JsonRPC.Ws.ExecutionPoolRequestTimeout, + Group: "JsonRPC", + }) // graphql options f.BoolFlag(&flagset.BoolFlag{ diff --git a/internal/web3ext/web3ext.go b/internal/web3ext/web3ext.go index dcdd5baf23..c823f096d6 100644 --- a/internal/web3ext/web3ext.go +++ b/internal/web3ext/web3ext.go @@ -192,6 +192,34 @@ web3._extend({ name: 'stopWS', call: 'admin_stopWS' }), + new web3._extend.Method({ + name: 'getExecutionPoolSize', + call: 'admin_getExecutionPoolSize' + }), + new web3._extend.Method({ + name: 'getExecutionPoolRequestTimeout', + call: 'admin_getExecutionPoolRequestTimeout' + }), + // new web3._extend.Method({ + // name: 'setWSExecutionPoolRequestTimeout', + // call: 'admin_setWSExecutionPoolRequestTimeout', + // params: 1 + // }), + // new web3._extend.Method({ + // name: 'setHttpExecutionPoolRequestTimeout', + // call: 'admin_setHttpExecutionPoolRequestTimeout', + // params: 1 + // }), + new web3._extend.Method({ + name: 'setWSExecutionPoolSize', + call: 'admin_setWSExecutionPoolSize', + params: 1 + }), + new web3._extend.Method({ + name: 'setHttpExecutionPoolSize', + call: 'admin_setHttpExecutionPoolSize', + params: 1 + }), ], properties: [ new web3._extend.Property({ diff --git a/node/api.go b/node/api.go index 1b32399f63..f8e7f944a6 100644 --- a/node/api.go +++ b/node/api.go @@ -20,6 +20,7 @@ import ( "context" "fmt" "strings" + "time" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/crypto" @@ -342,3 +343,91 @@ func (s *publicWeb3API) ClientVersion() string { func (s *publicWeb3API) Sha3(input hexutil.Bytes) hexutil.Bytes { return crypto.Keccak256(input) } + +type ExecutionPoolSize struct { + HttpLimit int + WSLimit int +} + +type ExecutionPoolRequestTimeout struct { + HttpLimit time.Duration + WSLimit time.Duration +} + +func (api *privateAdminAPI) GetExecutionPoolSize() *ExecutionPoolSize { + var httpLimit int + if api.node.http.host != "" { + httpLimit = api.node.http.httpHandler.Load().(*rpcHandler).server.GetExecutionPoolSize() + } + + var wsLimit int + if api.node.ws.host != "" { + wsLimit = api.node.ws.wsHandler.Load().(*rpcHandler).server.GetExecutionPoolSize() + } + + executionPoolSize := &ExecutionPoolSize{ + HttpLimit: httpLimit, + WSLimit: wsLimit, + } + + return executionPoolSize +} + +func (api *privateAdminAPI) GetExecutionPoolRequestTimeout() *ExecutionPoolRequestTimeout { + var httpLimit time.Duration + if api.node.http.host != "" { + httpLimit = api.node.http.httpHandler.Load().(*rpcHandler).server.GetExecutionPoolRequestTimeout() + } + + var wsLimit time.Duration + if api.node.ws.host != "" { + wsLimit = api.node.ws.wsHandler.Load().(*rpcHandler).server.GetExecutionPoolRequestTimeout() + } + + executionPoolRequestTimeout := &ExecutionPoolRequestTimeout{ + HttpLimit: httpLimit, + WSLimit: wsLimit, + } + + return executionPoolRequestTimeout +} + +// func (api *privateAdminAPI) SetWSExecutionPoolRequestTimeout(n int) *ExecutionPoolRequestTimeout { +// if api.node.ws.host != "" { +// api.node.ws.wsConfig.executionPoolRequestTimeout = time.Duration(n) * time.Millisecond +// api.node.ws.wsHandler.Load().(*rpcHandler).server.SetExecutionPoolRequestTimeout(time.Duration(n) * time.Millisecond) +// log.Warn("updating ws execution pool request timeout", "timeout", n) +// } + +// return api.GetExecutionPoolRequestTimeout() +// } + +// func (api *privateAdminAPI) SetHttpExecutionPoolRequestTimeout(n int) *ExecutionPoolRequestTimeout { +// if api.node.http.host != "" { +// api.node.http.httpConfig.executionPoolRequestTimeout = time.Duration(n) * time.Millisecond +// api.node.http.httpHandler.Load().(*rpcHandler).server.SetExecutionPoolRequestTimeout(time.Duration(n) * time.Millisecond) +// log.Warn("updating http execution pool request timeout", "timeout", n) +// } + +// return api.GetExecutionPoolRequestTimeout() +// } + +func (api *privateAdminAPI) SetWSExecutionPoolSize(n int) *ExecutionPoolSize { + if api.node.ws.host != "" { + api.node.ws.wsConfig.executionPoolSize = uint64(n) + api.node.ws.wsHandler.Load().(*rpcHandler).server.SetExecutionPoolSize(n) + log.Warn("updating ws execution pool size", "threads", n) + } + + return api.GetExecutionPoolSize() +} + +func (api *privateAdminAPI) SetHttpExecutionPoolSize(n int) *ExecutionPoolSize { + if api.node.http.host != "" { + api.node.http.httpConfig.executionPoolSize = uint64(n) + api.node.http.httpHandler.Load().(*rpcHandler).server.SetExecutionPoolSize(n) + log.Warn("updating http execution pool size", "threads", n) + } + + return api.GetExecutionPoolSize() +} diff --git a/node/config.go b/node/config.go index 495e4c5fcb..c8f40c1062 100644 --- a/node/config.go +++ b/node/config.go @@ -25,6 +25,7 @@ import ( "runtime" "strings" "sync" + "time" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" @@ -207,6 +208,11 @@ type Config struct { // Maximum number of messages in a batch RPCBatchLimit uint64 `toml:",omitempty"` + // Configs for RPC execution pool + WSJsonRPCExecutionPoolSize uint64 `toml:",omitempty"` + WSJsonRPCExecutionPoolRequestTimeout time.Duration `toml:",omitempty"` + HTTPJsonRPCExecutionPoolSize uint64 `toml:",omitempty"` + HTTPJsonRPCExecutionPoolRequestTimeout time.Duration `toml:",omitempty"` } // IPCEndpoint resolves an IPC endpoint based on a configured value, taking into diff --git a/node/node.go b/node/node.go index 94fcfb8cbf..5cf233d17a 100644 --- a/node/node.go +++ b/node/node.go @@ -105,7 +105,7 @@ func New(conf *Config) (*Node, error) { node := &Node{ config: conf, - inprocHandler: rpc.NewServer(), + inprocHandler: rpc.NewServer(0, 0), eventmux: new(event.TypeMux), log: conf.Logger, stop: make(chan struct{}), @@ -405,10 +405,12 @@ func (n *Node) startRPC() error { return err } if err := server.enableRPC(apis, httpConfig{ - CorsAllowedOrigins: n.config.HTTPCors, - Vhosts: n.config.HTTPVirtualHosts, - Modules: n.config.HTTPModules, - prefix: n.config.HTTPPathPrefix, + CorsAllowedOrigins: n.config.HTTPCors, + Vhosts: n.config.HTTPVirtualHosts, + Modules: n.config.HTTPModules, + prefix: n.config.HTTPPathPrefix, + executionPoolSize: n.config.HTTPJsonRPCExecutionPoolSize, + executionPoolRequestTimeout: n.config.HTTPJsonRPCExecutionPoolRequestTimeout, }); err != nil { return err } @@ -422,9 +424,11 @@ func (n *Node) startRPC() error { return err } if err := server.enableWS(n.rpcAPIs, wsConfig{ - Modules: n.config.WSModules, - Origins: n.config.WSOrigins, - prefix: n.config.WSPathPrefix, + Modules: n.config.WSModules, + Origins: n.config.WSOrigins, + prefix: n.config.WSPathPrefix, + executionPoolSize: n.config.WSJsonRPCExecutionPoolSize, + executionPoolRequestTimeout: n.config.WSJsonRPCExecutionPoolRequestTimeout, }); err != nil { return err } diff --git a/node/rpcstack.go b/node/rpcstack.go index f2c31ecb08..cba9a22f6f 100644 --- a/node/rpcstack.go +++ b/node/rpcstack.go @@ -28,6 +28,7 @@ import ( "strings" "sync" "sync/atomic" + "time" "github.com/rs/cors" @@ -42,6 +43,10 @@ type httpConfig struct { Vhosts []string prefix string // path prefix on which to mount http handler jwtSecret []byte // optional JWT secret + + // Execution pool config + executionPoolSize uint64 + executionPoolRequestTimeout time.Duration } // wsConfig is the JSON-RPC/Websocket configuration @@ -50,6 +55,10 @@ type wsConfig struct { Modules []string prefix string // path prefix on which to mount ws handler jwtSecret []byte // optional JWT secret + + // Execution pool config + executionPoolSize uint64 + executionPoolRequestTimeout time.Duration } type rpcHandler struct { @@ -284,7 +293,7 @@ func (h *httpServer) enableRPC(apis []rpc.API, config httpConfig) error { } // Create RPC server and handler. - srv := rpc.NewServer() + srv := rpc.NewServer(config.executionPoolSize, config.executionPoolRequestTimeout) srv.SetRPCBatchLimit(h.RPCBatchLimit) if err := RegisterApis(apis, config.Modules, srv, false); err != nil { return err @@ -316,7 +325,7 @@ func (h *httpServer) enableWS(apis []rpc.API, config wsConfig) error { return fmt.Errorf("JSON-RPC over WebSocket is already enabled") } // Create RPC server and handler. - srv := rpc.NewServer() + srv := rpc.NewServer(config.executionPoolSize, config.executionPoolRequestTimeout) srv.SetRPCBatchLimit(h.RPCBatchLimit) if err := RegisterApis(apis, config.Modules, srv, false); err != nil { return err diff --git a/packaging/templates/mainnet-v1/archive/config.toml b/packaging/templates/mainnet-v1/archive/config.toml index 8e98736196..5491c784ef 100644 --- a/packaging/templates/mainnet-v1/archive/config.toml +++ b/packaging/templates/mainnet-v1/archive/config.toml @@ -67,6 +67,8 @@ gcmode = "archive" vhosts = ["*"] corsdomain = ["*"] # prefix = "" + # ep-size = 40 + # ep-requesttimeout = "0s" [jsonrpc.ws] enabled = true port = 8546 @@ -74,6 +76,8 @@ gcmode = "archive" # host = "localhost" # api = ["web3", "net"] origins = ["*"] + # ep-size = 40 + # ep-requesttimeout = "0s" # [jsonrpc.graphql] # enabled = false # port = 0 diff --git a/packaging/templates/mainnet-v1/sentry/sentry/bor/config.toml b/packaging/templates/mainnet-v1/sentry/sentry/bor/config.toml index 853b2ed313..90df84dc07 100644 --- a/packaging/templates/mainnet-v1/sentry/sentry/bor/config.toml +++ b/packaging/templates/mainnet-v1/sentry/sentry/bor/config.toml @@ -67,6 +67,8 @@ syncmode = "full" vhosts = ["*"] corsdomain = ["*"] # prefix = "" + # ep-size = 40 +# ep-requesttimeout = "0s" # [jsonrpc.ws] # enabled = false # port = 8546 @@ -74,6 +76,8 @@ syncmode = "full" # host = "localhost" # api = ["web3", "net"] # origins = ["*"] + # ep-size = 40 +# ep-requesttimeout = "0s" # [jsonrpc.graphql] # enabled = false # port = 0 diff --git a/packaging/templates/mainnet-v1/sentry/validator/bor/config.toml b/packaging/templates/mainnet-v1/sentry/validator/bor/config.toml index 284445113c..9e2d80fd2a 100644 --- a/packaging/templates/mainnet-v1/sentry/validator/bor/config.toml +++ b/packaging/templates/mainnet-v1/sentry/validator/bor/config.toml @@ -69,6 +69,8 @@ syncmode = "full" vhosts = ["*"] corsdomain = ["*"] # prefix = "" + # ep-size = 40 +# ep-requesttimeout = "0s" # [jsonrpc.ws] # enabled = false # port = 8546 @@ -76,6 +78,8 @@ syncmode = "full" # host = "localhost" # api = ["web3", "net"] # origins = ["*"] + # ep-size = 40 +# ep-requesttimeout = "0s" # [jsonrpc.graphql] # enabled = false # port = 0 diff --git a/packaging/templates/mainnet-v1/without-sentry/bor/config.toml b/packaging/templates/mainnet-v1/without-sentry/bor/config.toml index 22361b64bc..1e5fd67762 100644 --- a/packaging/templates/mainnet-v1/without-sentry/bor/config.toml +++ b/packaging/templates/mainnet-v1/without-sentry/bor/config.toml @@ -69,6 +69,8 @@ syncmode = "full" vhosts = ["*"] corsdomain = ["*"] # prefix = "" + # ep-size = 40 +# ep-requesttimeout = "0s" # [jsonrpc.ws] # enabled = false # port = 8546 @@ -76,6 +78,8 @@ syncmode = "full" # host = "localhost" # api = ["web3", "net"] # origins = ["*"] + # ep-size = 40 +# ep-requesttimeout = "0s" # [jsonrpc.graphql] # enabled = false # port = 0 diff --git a/packaging/templates/testnet-v4/archive/config.toml b/packaging/templates/testnet-v4/archive/config.toml index 992aff0c68..fb9ffd0a17 100644 --- a/packaging/templates/testnet-v4/archive/config.toml +++ b/packaging/templates/testnet-v4/archive/config.toml @@ -67,6 +67,8 @@ gcmode = "archive" vhosts = ["*"] corsdomain = ["*"] # prefix = "" + # ep-size = 40 + # ep-requesttimeout = "0s" [jsonrpc.ws] enabled = true port = 8546 @@ -74,6 +76,8 @@ gcmode = "archive" # host = "localhost" # api = ["web3", "net"] origins = ["*"] + # ep-size = 40 + # ep-requesttimeout = "0s" # [jsonrpc.graphql] # enabled = false # port = 0 diff --git a/packaging/templates/testnet-v4/sentry/sentry/bor/config.toml b/packaging/templates/testnet-v4/sentry/sentry/bor/config.toml index 1a335a42a3..9884c0eccc 100644 --- a/packaging/templates/testnet-v4/sentry/sentry/bor/config.toml +++ b/packaging/templates/testnet-v4/sentry/sentry/bor/config.toml @@ -67,6 +67,8 @@ syncmode = "full" vhosts = ["*"] corsdomain = ["*"] # prefix = "" + # ep-size = 40 + # ep-requesttimeout = "0s" # [jsonrpc.ws] # enabled = false # port = 8546 @@ -74,6 +76,8 @@ syncmode = "full" # host = "localhost" # api = ["web3", "net"] # origins = ["*"] + # ep-size = 40 + # ep-requesttimeout = "0s" # [jsonrpc.graphql] # enabled = false # port = 0 diff --git a/packaging/templates/testnet-v4/sentry/validator/bor/config.toml b/packaging/templates/testnet-v4/sentry/validator/bor/config.toml index 798375364e..49c47fedd4 100644 --- a/packaging/templates/testnet-v4/sentry/validator/bor/config.toml +++ b/packaging/templates/testnet-v4/sentry/validator/bor/config.toml @@ -69,6 +69,8 @@ syncmode = "full" vhosts = ["*"] corsdomain = ["*"] # prefix = "" + # ep-size = 40 +# ep-requesttimeout = "0s" # [jsonrpc.ws] # enabled = false # port = 8546 @@ -76,6 +78,8 @@ syncmode = "full" # host = "localhost" # api = ["web3", "net"] # origins = ["*"] + # ep-size = 40 +# ep-requesttimeout = "0s" # [jsonrpc.graphql] # enabled = false # port = 0 diff --git a/packaging/templates/testnet-v4/without-sentry/bor/config.toml b/packaging/templates/testnet-v4/without-sentry/bor/config.toml index 47a3053b58..2fb83a6ae2 100644 --- a/packaging/templates/testnet-v4/without-sentry/bor/config.toml +++ b/packaging/templates/testnet-v4/without-sentry/bor/config.toml @@ -69,6 +69,8 @@ syncmode = "full" vhosts = ["*"] corsdomain = ["*"] # prefix = "" + # ep-size = 40 + # ep-requesttimeout = "0s" # [jsonrpc.ws] # enabled = false # port = 8546 @@ -76,6 +78,8 @@ syncmode = "full" # host = "localhost" # api = ["web3", "net"] # origins = ["*"] + # ep-size = 40 + # ep-requesttimeout = "0s" # [jsonrpc.graphql] # enabled = false # port = 0 diff --git a/rpc/client.go b/rpc/client.go index d3ce029775..fc286fe8dc 100644 --- a/rpc/client.go +++ b/rpc/client.go @@ -112,7 +112,7 @@ func (c *Client) newClientConn(conn ServerCodec) *clientConn { ctx := context.Background() ctx = context.WithValue(ctx, clientContextKey{}, c) ctx = context.WithValue(ctx, peerInfoContextKey{}, conn.peerInfo()) - handler := newHandler(ctx, conn, c.idgen, c.services) + handler := newHandler(ctx, conn, c.idgen, c.services, NewExecutionPool(100, 0)) return &clientConn{conn, handler} } diff --git a/rpc/client_test.go b/rpc/client_test.go index fa6010bb19..1bebd27677 100644 --- a/rpc/client_test.go +++ b/rpc/client_test.go @@ -33,12 +33,14 @@ import ( "time" "github.com/davecgh/go-spew/spew" + "github.com/ethereum/go-ethereum/log" ) func TestClientRequest(t *testing.T) { server := newTestServer() defer server.Stop() + client := DialInProc(server) defer client.Close() @@ -46,6 +48,7 @@ func TestClientRequest(t *testing.T) { if err := client.Call(&resp, "test_echo", "hello", 10, &echoArgs{"world"}); err != nil { t.Fatal(err) } + if !reflect.DeepEqual(resp, echoResult{"hello", 10, &echoArgs{"world"}}) { t.Errorf("incorrect result %#v", resp) } @@ -407,7 +410,7 @@ func TestClientSubscriptionUnsubscribeServer(t *testing.T) { t.Parallel() // Create the server. - srv := NewServer() + srv := NewServer(0, 0) srv.RegisterName("nftest", new(notificationTestService)) p1, p2 := net.Pipe() recorder := &unsubscribeRecorder{ServerCodec: NewCodec(p1)} @@ -443,7 +446,7 @@ func TestClientSubscriptionChannelClose(t *testing.T) { t.Parallel() var ( - srv = NewServer() + srv = NewServer(0, 0) httpsrv = httptest.NewServer(srv.WebsocketHandler(nil)) wsURL = "ws:" + strings.TrimPrefix(httpsrv.URL, "http:") ) diff --git a/rpc/endpoints.go b/rpc/endpoints.go index d78ebe2858..2a539d4fc5 100644 --- a/rpc/endpoints.go +++ b/rpc/endpoints.go @@ -27,7 +27,7 @@ import ( func StartIPCEndpoint(ipcEndpoint string, apis []API) (net.Listener, *Server, error) { // Register all the APIs exposed by the services. var ( - handler = NewServer() + handler = NewServer(0, 0) regMap = make(map[string]struct{}) registered []string ) diff --git a/rpc/execution_pool.go b/rpc/execution_pool.go new file mode 100644 index 0000000000..d0f5ab5daa --- /dev/null +++ b/rpc/execution_pool.go @@ -0,0 +1,99 @@ +package rpc + +import ( + "context" + "sync" + "sync/atomic" + "time" + + "github.com/JekaMas/workerpool" +) + +type SafePool struct { + executionPool *atomic.Pointer[workerpool.WorkerPool] + + sync.RWMutex + + timeout time.Duration + size int + + // Skip sending task to execution pool + fastPath bool +} + +func NewExecutionPool(initialSize int, timeout time.Duration) *SafePool { + sp := &SafePool{ + size: initialSize, + timeout: timeout, + } + + if initialSize == 0 { + sp.fastPath = true + + return sp + } + + var ptr atomic.Pointer[workerpool.WorkerPool] + + p := workerpool.New(initialSize) + ptr.Store(p) + sp.executionPool = &ptr + + return sp +} + +func (s *SafePool) Submit(ctx context.Context, fn func() error) (<-chan error, bool) { + if s.fastPath { + go func() { + _ = fn() + }() + + return nil, true + } + + if s.executionPool == nil { + return nil, false + } + + pool := s.executionPool.Load() + if pool == nil { + return nil, false + } + + return pool.Submit(ctx, fn, s.Timeout()), true +} + +func (s *SafePool) ChangeSize(n int) { + oldPool := s.executionPool.Swap(workerpool.New(n)) + + if oldPool != nil { + go func() { + oldPool.StopWait() + }() + } + + s.Lock() + s.size = n + s.Unlock() +} + +func (s *SafePool) ChangeTimeout(n time.Duration) { + s.Lock() + defer s.Unlock() + + s.timeout = n +} + +func (s *SafePool) Timeout() time.Duration { + s.RLock() + defer s.RUnlock() + + return s.timeout +} + +func (s *SafePool) Size() int { + s.RLock() + defer s.RUnlock() + + return s.size +} diff --git a/rpc/handler.go b/rpc/handler.go index 488a29300a..f1fb555c00 100644 --- a/rpc/handler.go +++ b/rpc/handler.go @@ -34,21 +34,20 @@ import ( // // The entry points for incoming messages are: // -// h.handleMsg(message) -// h.handleBatch(message) +// h.handleMsg(message) +// h.handleBatch(message) // // Outgoing calls use the requestOp struct. Register the request before sending it // on the connection: // -// op := &requestOp{ids: ...} -// h.addRequestOp(op) +// op := &requestOp{ids: ...} +// h.addRequestOp(op) // // Now send the request, then wait for the reply to be delivered through handleMsg: // -// if err := op.wait(...); err != nil { -// h.removeRequestOp(op) // timeout, etc. -// } -// +// if err := op.wait(...); err != nil { +// h.removeRequestOp(op) // timeout, etc. +// } type handler struct { reg *serviceRegistry unsubscribeCb *callback @@ -64,6 +63,8 @@ type handler struct { subLock sync.Mutex serverSubs map[ID]*Subscription + + executionPool *SafePool } type callProc struct { @@ -71,7 +72,7 @@ type callProc struct { notifiers []*Notifier } -func newHandler(connCtx context.Context, conn jsonWriter, idgen func() ID, reg *serviceRegistry) *handler { +func newHandler(connCtx context.Context, conn jsonWriter, idgen func() ID, reg *serviceRegistry, pool *SafePool) *handler { rootCtx, cancelRoot := context.WithCancel(connCtx) h := &handler{ reg: reg, @@ -84,11 +85,13 @@ func newHandler(connCtx context.Context, conn jsonWriter, idgen func() ID, reg * allowSubscribe: true, serverSubs: make(map[ID]*Subscription), log: log.Root(), + executionPool: pool, } if conn.remoteAddr() != "" { h.log = h.log.New("conn", conn.remoteAddr()) } h.unsubscribeCb = newCallback(reflect.Value{}, reflect.ValueOf(h.unsubscribe)) + return h } @@ -219,12 +222,16 @@ func (h *handler) cancelServerSubscriptions(err error) { // startCallProc runs fn in a new goroutine and starts tracking it in the h.calls wait group. func (h *handler) startCallProc(fn func(*callProc)) { h.callWG.Add(1) - go func() { - ctx, cancel := context.WithCancel(h.rootCtx) + + ctx, cancel := context.WithCancel(h.rootCtx) + + h.executionPool.Submit(context.Background(), func() error { defer h.callWG.Done() defer cancel() fn(&callProc{ctx: ctx}) - }() + + return nil + }) } // handleImmediate executes non-call messages. It returns false if the message is a @@ -261,6 +268,7 @@ func (h *handler) handleSubscriptionResult(msg *jsonrpcMessage) { // handleResponse processes method call responses. func (h *handler) handleResponse(msg *jsonrpcMessage) { + op := h.respWait[string(msg.ID)] if op == nil { h.log.Debug("Unsolicited RPC response", "reqid", idForLog{msg.ID}) @@ -281,7 +289,11 @@ func (h *handler) handleResponse(msg *jsonrpcMessage) { return } if op.err = json.Unmarshal(msg.Result, &op.sub.subid); op.err == nil { - go op.sub.run() + h.executionPool.Submit(context.Background(), func() error { + op.sub.run() + return nil + }) + h.clientSubs[op.sub.subid] = op.sub } } diff --git a/rpc/http_test.go b/rpc/http_test.go index c84d7705f2..9737e64e91 100644 --- a/rpc/http_test.go +++ b/rpc/http_test.go @@ -103,7 +103,7 @@ func TestHTTPResponseWithEmptyGet(t *testing.T) { func TestHTTPRespBodyUnlimited(t *testing.T) { const respLength = maxRequestContentLength * 3 - s := NewServer() + s := NewServer(0, 0) defer s.Stop() s.RegisterName("test", largeRespService{respLength}) ts := httptest.NewServer(s) diff --git a/rpc/inproc.go b/rpc/inproc.go index fbe9a40cec..29af5507b9 100644 --- a/rpc/inproc.go +++ b/rpc/inproc.go @@ -26,7 +26,13 @@ func DialInProc(handler *Server) *Client { initctx := context.Background() c, _ := newClient(initctx, func(context.Context) (ServerCodec, error) { p1, p2 := net.Pipe() - go handler.ServeCodec(NewCodec(p1), 0) + + //nolint:contextcheck + handler.executionPool.Submit(initctx, func() error { + handler.ServeCodec(NewCodec(p1), 0) + return nil + }) + return NewCodec(p2), nil }) return c diff --git a/rpc/ipc.go b/rpc/ipc.go index 07a211c627..76fbd13f92 100644 --- a/rpc/ipc.go +++ b/rpc/ipc.go @@ -35,7 +35,11 @@ func (s *Server) ServeListener(l net.Listener) error { return err } log.Trace("Accepted RPC connection", "conn", conn.RemoteAddr()) - go s.ServeCodec(NewCodec(conn), 0) + + s.executionPool.Submit(context.Background(), func() error { + s.ServeCodec(NewCodec(conn), 0) + return nil + }) } } diff --git a/rpc/server.go b/rpc/server.go index dc8afa0b6e..04ee2dc87b 100644 --- a/rpc/server.go +++ b/rpc/server.go @@ -21,6 +21,7 @@ import ( "fmt" "io" "sync/atomic" + "time" mapset "github.com/deckarep/golang-set" @@ -50,12 +51,19 @@ type Server struct { run int32 codecs mapset.Set - BatchLimit uint64 + BatchLimit uint64 + executionPool *SafePool } // NewServer creates a new server instance with no registered handlers. -func NewServer() *Server { - server := &Server{idgen: randomIDGenerator(), codecs: mapset.NewSet(), run: 1} +func NewServer(executionPoolSize uint64, executionPoolRequesttimeout time.Duration) *Server { + server := &Server{ + idgen: randomIDGenerator(), + codecs: mapset.NewSet(), + run: 1, + executionPool: NewExecutionPool(int(executionPoolSize), executionPoolRequesttimeout), + } + // Register the default service providing meta information about the RPC service such // as the services and methods it offers. rpcService := &RPCService{server} @@ -67,6 +75,22 @@ func (s *Server) SetRPCBatchLimit(batchLimit uint64) { s.BatchLimit = batchLimit } +func (s *Server) SetExecutionPoolSize(n int) { + s.executionPool.ChangeSize(n) +} + +func (s *Server) SetExecutionPoolRequestTimeout(n time.Duration) { + s.executionPool.ChangeTimeout(n) +} + +func (s *Server) GetExecutionPoolRequestTimeout() time.Duration { + return s.executionPool.Timeout() +} + +func (s *Server) GetExecutionPoolSize() int { + return s.executionPool.Size() +} + // RegisterName creates a service for the given receiver type under the given name. When no // methods on the given receiver match the criteria to be either a RPC method or a // subscription an error is returned. Otherwise a new service is created and added to the @@ -106,7 +130,8 @@ func (s *Server) serveSingleRequest(ctx context.Context, codec ServerCodec) { return } - h := newHandler(ctx, codec, s.idgen, &s.services) + h := newHandler(ctx, codec, s.idgen, &s.services, s.executionPool) + h.allowSubscribe = false defer h.close(io.EOF, nil) diff --git a/rpc/server_test.go b/rpc/server_test.go index e67893710d..166956681b 100644 --- a/rpc/server_test.go +++ b/rpc/server_test.go @@ -29,7 +29,7 @@ import ( ) func TestServerRegisterName(t *testing.T) { - server := NewServer() + server := NewServer(0, 0) service := new(testService) if err := server.RegisterName("test", service); err != nil { diff --git a/rpc/subscription_test.go b/rpc/subscription_test.go index 54a053dba8..cfca1b24b9 100644 --- a/rpc/subscription_test.go +++ b/rpc/subscription_test.go @@ -53,7 +53,7 @@ func TestSubscriptions(t *testing.T) { subCount = len(namespaces) notificationCount = 3 - server = NewServer() + server = NewServer(0, 0) clientConn, serverConn = net.Pipe() out = json.NewEncoder(clientConn) in = json.NewDecoder(clientConn) diff --git a/rpc/testservice_test.go b/rpc/testservice_test.go index 253e263289..2285821779 100644 --- a/rpc/testservice_test.go +++ b/rpc/testservice_test.go @@ -26,7 +26,7 @@ import ( ) func newTestServer() *Server { - server := NewServer() + server := NewServer(0, 0) server.idgen = sequentialIDGenerator() if err := server.RegisterName("test", new(testService)); err != nil { panic(err) diff --git a/rpc/websocket_test.go b/rpc/websocket_test.go index f74b7fd08b..b805ed2023 100644 --- a/rpc/websocket_test.go +++ b/rpc/websocket_test.go @@ -203,7 +203,7 @@ func TestClientWebsocketPing(t *testing.T) { // This checks that the websocket transport can deal with large messages. func TestClientWebsocketLargeMessage(t *testing.T) { var ( - srv = NewServer() + srv = NewServer(0, 0) httpsrv = httptest.NewServer(srv.WebsocketHandler(nil)) wsURL = "ws:" + strings.TrimPrefix(httpsrv.URL, "http:") ) From 67843e17ecbd7e9acea16cab7c5a234602b85587 Mon Sep 17 00:00:00 2001 From: Arpit Temani Date: Fri, 3 Feb 2023 18:30:08 +0530 Subject: [PATCH 48/56] version change (#721) --- packaging/templates/package_scripts/control | 2 +- packaging/templates/package_scripts/control.arm64 | 2 +- packaging/templates/package_scripts/control.profile.amd64 | 2 +- packaging/templates/package_scripts/control.profile.arm64 | 2 +- packaging/templates/package_scripts/control.validator | 2 +- packaging/templates/package_scripts/control.validator.arm64 | 2 +- params/version.go | 2 +- 7 files changed, 7 insertions(+), 7 deletions(-) diff --git a/packaging/templates/package_scripts/control b/packaging/templates/package_scripts/control index b487371358..d3d295be30 100644 --- a/packaging/templates/package_scripts/control +++ b/packaging/templates/package_scripts/control @@ -1,5 +1,5 @@ Source: bor -Version: 0.3.4-beta +Version: 0.3.5-beta Section: develop Priority: standard Maintainer: Polygon diff --git a/packaging/templates/package_scripts/control.arm64 b/packaging/templates/package_scripts/control.arm64 index 684088051f..0900bdf1a1 100644 --- a/packaging/templates/package_scripts/control.arm64 +++ b/packaging/templates/package_scripts/control.arm64 @@ -1,5 +1,5 @@ Source: bor -Version: 0.3.4-beta +Version: 0.3.5-beta Section: develop Priority: standard Maintainer: Polygon diff --git a/packaging/templates/package_scripts/control.profile.amd64 b/packaging/templates/package_scripts/control.profile.amd64 index 48ad7831d0..6866b26802 100644 --- a/packaging/templates/package_scripts/control.profile.amd64 +++ b/packaging/templates/package_scripts/control.profile.amd64 @@ -1,5 +1,5 @@ Source: bor-profile -Version: 0.3.4-beta +Version: 0.3.5-beta Section: develop Priority: standard Maintainer: Polygon diff --git a/packaging/templates/package_scripts/control.profile.arm64 b/packaging/templates/package_scripts/control.profile.arm64 index 6cc46bdbf5..3d6dd268d0 100644 --- a/packaging/templates/package_scripts/control.profile.arm64 +++ b/packaging/templates/package_scripts/control.profile.arm64 @@ -1,5 +1,5 @@ Source: bor-profile -Version: 0.3.4-beta +Version: 0.3.5-beta Section: develop Priority: standard Maintainer: Polygon diff --git a/packaging/templates/package_scripts/control.validator b/packaging/templates/package_scripts/control.validator index dd28aae21d..e57443f700 100644 --- a/packaging/templates/package_scripts/control.validator +++ b/packaging/templates/package_scripts/control.validator @@ -1,5 +1,5 @@ Source: bor-profile -Version: 0.3.4-beta +Version: 0.3.5-beta Section: develop Priority: standard Maintainer: Polygon diff --git a/packaging/templates/package_scripts/control.validator.arm64 b/packaging/templates/package_scripts/control.validator.arm64 index 80f62ed71d..e504e4ebe1 100644 --- a/packaging/templates/package_scripts/control.validator.arm64 +++ b/packaging/templates/package_scripts/control.validator.arm64 @@ -1,5 +1,5 @@ Source: bor-profile -Version: 0.3.4-beta +Version: 0.3.5-beta Section: develop Priority: standard Maintainer: Polygon diff --git a/params/version.go b/params/version.go index 5d59c13d71..475a34f579 100644 --- a/params/version.go +++ b/params/version.go @@ -23,7 +23,7 @@ import ( const ( VersionMajor = 0 // Major version component of the current release VersionMinor = 3 // Minor version component of the current release - VersionPatch = 4 // Patch version component of the current release + VersionPatch = 5 // Patch version component of the current release VersionMeta = "beta" // Version metadata to append to the version string ) From 22fa4033e8fabb51c44e8d2a8c6bb695a6e9285e Mon Sep 17 00:00:00 2001 From: Evgeny Danilenko <6655321@bk.ru> Date: Thu, 9 Feb 2023 01:41:09 +0400 Subject: [PATCH 49/56] Event based pprof (#732) * feature * Save pprof to /tmp --------- Co-authored-by: Jerry --- common/context.go | 32 +++++++++ common/context_test.go | 107 ++++++++++++++++++++++++++++ common/set/slice.go | 17 +++++ eth/tracers/api.go | 2 +- internal/ethapi/api.go | 91 ++++++++++++++++++++++- internal/ethapi/transaction_args.go | 2 +- 6 files changed, 247 insertions(+), 4 deletions(-) create mode 100644 common/context.go create mode 100644 common/context_test.go diff --git a/common/context.go b/common/context.go new file mode 100644 index 0000000000..1f44cf97ae --- /dev/null +++ b/common/context.go @@ -0,0 +1,32 @@ +package common + +import ( + "context" + + unique "github.com/ethereum/go-ethereum/common/set" +) + +type key struct{} + +var ( + labelsKey key +) + +func WithLabels(ctx context.Context, labels ...string) context.Context { + if len(labels) == 0 { + return ctx + } + + labels = append(labels, Labels(ctx)...) + + return context.WithValue(ctx, labelsKey, unique.Deduplicate(labels)) +} + +func Labels(ctx context.Context) []string { + labels, ok := ctx.Value(labelsKey).([]string) + if !ok { + return nil + } + + return labels +} diff --git a/common/context_test.go b/common/context_test.go new file mode 100644 index 0000000000..bc093a3dca --- /dev/null +++ b/common/context_test.go @@ -0,0 +1,107 @@ +package common + +import ( + "context" + "reflect" + "sort" + "testing" +) + +func TestWithLabels(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + initial []string + new []string + expected []string + }{ + { + "nil-nil", + nil, + nil, + nil, + }, + + { + "nil-something", + nil, + []string{"one", "two"}, + []string{"one", "two"}, + }, + + { + "something-nil", + []string{"one", "two"}, + nil, + []string{"one", "two"}, + }, + + { + "something-something", + []string{"one", "two"}, + []string{"three", "four"}, + []string{"one", "two", "three", "four"}, + }, + + // deduplication + { + "with duplicates nil-something", + nil, + []string{"one", "two", "one"}, + []string{"one", "two"}, + }, + + { + "with duplicates something-nil", + []string{"one", "two", "one"}, + nil, + []string{"one", "two"}, + }, + + { + "with duplicates something-something", + []string{"one", "two"}, + []string{"three", "one"}, + []string{"one", "two", "three"}, + }, + + { + "with duplicates something-something", + []string{"one", "two", "three"}, + []string{"three", "four", "two"}, + []string{"one", "two", "three", "four"}, + }, + } + + for _, c := range cases { + c := c + + t.Run(c.name, func(t *testing.T) { + t.Parallel() + + ctx := context.Background() + + ctx = WithLabels(ctx, c.initial...) + ctx = WithLabels(ctx, c.new...) + + got := Labels(ctx) + + if len(got) != len(c.expected) { + t.Errorf("case %s. expected %v, got %v", c.name, c.expected, got) + + return + } + + gotSorted := sort.StringSlice(got) + gotSorted.Sort() + + expectedSorted := sort.StringSlice(c.expected) + expectedSorted.Sort() + + if !reflect.DeepEqual(gotSorted, expectedSorted) { + t.Errorf("case %s. expected %v, got %v", c.name, expectedSorted, gotSorted) + } + }) + } +} diff --git a/common/set/slice.go b/common/set/slice.go index 36f11e67fe..eda4dda23b 100644 --- a/common/set/slice.go +++ b/common/set/slice.go @@ -9,3 +9,20 @@ func New[T comparable](slice []T) map[T]struct{} { return m } + +func ToSlice[T comparable](m map[T]struct{}) []T { + slice := make([]T, len(m)) + + var i int + + for k := range m { + slice[i] = k + i++ + } + + return slice +} + +func Deduplicate[T comparable](slice []T) []T { + return ToSlice(New(slice)) +} diff --git a/eth/tracers/api.go b/eth/tracers/api.go index 13f5c627cd..ce7b36b906 100644 --- a/eth/tracers/api.go +++ b/eth/tracers/api.go @@ -1052,7 +1052,7 @@ func (api *API) TraceCall(ctx context.Context, args ethapi.TransactionArgs, bloc } } // Execute the trace - msg, err := args.ToMessage(api.backend.RPCGasCap(), block.BaseFee()) + msg, err := args.ToMessage(ctx, api.backend.RPCGasCap(), block.BaseFee()) if err != nil { return nil, err } diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go index 6bb7c225be..dd3ea97f5b 100644 --- a/internal/ethapi/api.go +++ b/internal/ethapi/api.go @@ -21,7 +21,11 @@ import ( "errors" "fmt" "math/big" + "os" + "path/filepath" + "runtime/pprof" "strings" + "sync" "time" "github.com/davecgh/go-spew/spew" @@ -1005,7 +1009,7 @@ func DoCall(ctx context.Context, b Backend, args TransactionArgs, blockNrOrHash defer cancel() // Get a new instance of the EVM. - msg, err := args.ToMessage(globalGasCap, header.BaseFee) + msg, err := args.ToMessage(ctx, globalGasCap, header.BaseFee) if err != nil { return nil, err } @@ -1028,15 +1032,83 @@ func DoCall(ctx context.Context, b Backend, args TransactionArgs, blockNrOrHash } // If the timer caused an abort, return an appropriate error message + timeoutMu.Lock() if evm.Cancelled() { + timeoutErrors++ + + if timeoutErrors >= pprofThreshold { + timeoutNoErrors = 0 + + if !isRunning { + runProfile() + } + + log.Warn("[eth_call] timeout", + "timeoutErrors", timeoutErrors, + "timeoutNoErrors", timeoutNoErrors, + "args", args, + "blockNrOrHash", blockNrOrHash, + "overrides", overrides, + "timeout", timeout, + "globalGasCap", globalGasCap) + } + + timeoutMu.Unlock() + return nil, fmt.Errorf("execution aborted (timeout = %v)", timeout) + } else { + if timeoutErrors >= pprofStopThreshold { + timeoutErrors = 0 + timeoutNoErrors = 0 + + if isRunning { + pprof.StopCPUProfile() + isRunning = false + } + } + } + + if isRunning && time.Since(pprofTime) >= pprofDuration { + timeoutErrors = 0 + timeoutNoErrors = 0 + + pprof.StopCPUProfile() + + isRunning = false } + + timeoutMu.Unlock() + if err != nil { return result, fmt.Errorf("err: %w (supplied gas %d)", err, msg.Gas()) } + return result, nil } +func runProfile() { + pprofTime = time.Now() + + name := fmt.Sprintf("profile_eth_call-count-%d-time-%s.prof", + number, pprofTime.Format("2006-01-02-15-04-05")) + + name = filepath.Join(os.TempDir(), name) + + f, err := os.Create(name) + if err != nil { + log.Error("[eth_call] can't create profile file", "name", name, "err", err) + return + } + + if err = pprof.StartCPUProfile(f); err != nil { + log.Error("[eth_call] can't start profiling", "name", name, "err", err) + return + } + + isRunning = true + number++ +} + func newRevertError(result *core.ExecutionResult) *revertError { reason, errUnpack := abi.UnpackRevert(result.Revert()) err := errors.New("execution reverted") @@ -1067,6 +1139,21 @@ func (e *revertError) ErrorData() interface{} { return e.reason } +var ( + number int + timeoutErrors int // count for timeout errors + timeoutNoErrors int + timeoutMu sync.Mutex + isRunning bool + pprofTime time.Time +) + +const ( + pprofThreshold = 3 + pprofStopThreshold = 3 + pprofDuration = time.Minute +) + // Call executes the given transaction on the state for the given block number. // // Additionally, the caller can specify a batch of contract for fields overriding. @@ -1573,7 +1660,7 @@ func AccessList(ctx context.Context, b Backend, blockNrOrHash rpc.BlockNumberOrH statedb := db.Copy() // Set the accesslist to the last al args.AccessList = &accessList - msg, err := args.ToMessage(b.RPCGasCap(), header.BaseFee) + msg, err := args.ToMessage(ctx, b.RPCGasCap(), header.BaseFee) if err != nil { return nil, 0, nil, err } diff --git a/internal/ethapi/transaction_args.go b/internal/ethapi/transaction_args.go index aa2596fe81..a8f0b2cde9 100644 --- a/internal/ethapi/transaction_args.go +++ b/internal/ethapi/transaction_args.go @@ -197,7 +197,7 @@ func (args *TransactionArgs) setDefaults(ctx context.Context, b Backend) error { // ToMessage converts the transaction arguments to the Message type used by the // core evm. This method is used in calls and traces that do not require a real // live transaction. -func (args *TransactionArgs) ToMessage(globalGasCap uint64, baseFee *big.Int) (types.Message, error) { +func (args *TransactionArgs) ToMessage(_ context.Context, globalGasCap uint64, baseFee *big.Int) (types.Message, error) { // Reject invalid combinations of pre- and post-1559 fee styles if args.GasPrice != nil && (args.MaxFeePerGas != nil || args.MaxPriorityFeePerGas != nil) { return types.Message{}, errors.New("both gasPrice and (maxFeePerGas or maxPriorityFeePerGas) specified") From d9cc2187b2ceccbb8d3febe728e0e0b25412787e Mon Sep 17 00:00:00 2001 From: Manav Darji Date: Tue, 14 Feb 2023 15:37:59 +0530 Subject: [PATCH 50/56] Cherry-pick changes from develop (#738) * Check if block is nil to prevent panic (#736) * miner: use env for tracing instead of block object (#728) --------- Co-authored-by: Dmitry <46797839+dkeysil@users.noreply.github.com> --- internal/ethapi/api.go | 4 ++++ miner/worker.go | 6 +++--- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go index dd3ea97f5b..f5953f59c3 100644 --- a/internal/ethapi/api.go +++ b/internal/ethapi/api.go @@ -631,6 +631,10 @@ func (s *PublicBlockChainAPI) GetTransactionReceiptsByBlock(ctx context.Context, return nil, err } + if block == nil { + return nil, errors.New("block not found") + } + receipts, err := s.b.GetReceipts(ctx, block.Hash()) if err != nil { return nil, err diff --git a/miner/worker.go b/miner/worker.go index 797e7ea980..30809cd558 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -1314,9 +1314,9 @@ func (w *worker) commit(ctx context.Context, env *environment, interval func(), tracing.SetAttributes( span, - attribute.Int("number", int(block.Number().Uint64())), - attribute.String("hash", block.Hash().String()), - attribute.String("sealhash", w.engine.SealHash(block.Header()).String()), + attribute.Int("number", int(env.header.Number.Uint64())), + attribute.String("hash", env.header.Hash().String()), + attribute.String("sealhash", w.engine.SealHash(env.header).String()), attribute.Int("len of env.txs", len(env.txs)), attribute.Bool("error", err != nil), ) From 4916d757eb09b3e64612bf0fe33cf7a8da00fdb0 Mon Sep 17 00:00:00 2001 From: Manav Darji Date: Tue, 14 Feb 2023 18:48:36 +0530 Subject: [PATCH 51/56] add max code init size check in txpool (#739) --- core/error.go | 4 ++++ core/tx_pool.go | 6 ++++++ params/protocol_params.go | 3 ++- 3 files changed, 12 insertions(+), 1 deletion(-) diff --git a/core/error.go b/core/error.go index 51ebefc137..234620ee4b 100644 --- a/core/error.go +++ b/core/error.go @@ -63,6 +63,10 @@ var ( // have enough funds for transfer(topmost call only). ErrInsufficientFundsForTransfer = errors.New("insufficient funds for transfer") + // ErrMaxInitCodeSizeExceeded is returned if creation transaction provides the init code bigger + // than init code size limit. + ErrMaxInitCodeSizeExceeded = errors.New("max initcode size exceeded") + // ErrInsufficientFunds is returned if the total cost of executing a transaction // is higher than the balance of the user's account. ErrInsufficientFunds = errors.New("insufficient funds for gas * price + value") diff --git a/core/tx_pool.go b/core/tx_pool.go index 7648668688..3d3f01eecb 100644 --- a/core/tx_pool.go +++ b/core/tx_pool.go @@ -18,6 +18,7 @@ package core import ( "errors" + "fmt" "math" "math/big" "sort" @@ -604,6 +605,11 @@ func (pool *TxPool) validateTx(tx *types.Transaction, local bool) error { if uint64(tx.Size()) > txMaxSize { return ErrOversizedData } + // Check whether the init code size has been exceeded. + // (TODO): Add a hardfork check here while pulling upstream changes. + if tx.To() == nil && len(tx.Data()) > params.MaxInitCodeSize { + return fmt.Errorf("%w: code size %v limit %v", ErrMaxInitCodeSizeExceeded, len(tx.Data()), params.MaxInitCodeSize) + } // Transactions can't be negative. This may never happen using RLP decoded // transactions but may occur if you create a transaction using the RPC. if tx.Value().Sign() < 0 { diff --git a/params/protocol_params.go b/params/protocol_params.go index d468af5d3c..103266caff 100644 --- a/params/protocol_params.go +++ b/params/protocol_params.go @@ -125,7 +125,8 @@ const ( ElasticityMultiplier = 2 // Bounds the maximum gas limit an EIP-1559 block may have. InitialBaseFee = 1000000000 // Initial base fee for EIP-1559 blocks. - MaxCodeSize = 24576 // Maximum bytecode to permit for a contract + MaxCodeSize = 24576 // Maximum bytecode to permit for a contract + MaxInitCodeSize = 2 * MaxCodeSize // Maximum initcode to permit in a creation transaction and create instructions // Precompiled contract gas prices From 79718d74455829b294778f43a98b3f53c8ea2b59 Mon Sep 17 00:00:00 2001 From: Manav Darji Date: Wed, 15 Feb 2023 14:07:26 +0530 Subject: [PATCH 52/56] Revert "Event based pprof" and update version (#742) * Revert "Event based pprof (#732)" This reverts commit 22fa4033e8fabb51c44e8d2a8c6bb695a6e9285e. * params: update version to 0.3.4-beta3 * packaging/templates: update bor version --- common/context.go | 32 ------ common/context_test.go | 107 ------------------ common/set/slice.go | 17 --- eth/tracers/api.go | 2 +- internal/ethapi/api.go | 91 +-------------- internal/ethapi/transaction_args.go | 2 +- packaging/templates/package_scripts/control | 2 +- .../templates/package_scripts/control.arm64 | 2 +- .../package_scripts/control.profile.amd64 | 2 +- .../package_scripts/control.profile.arm64 | 2 +- .../package_scripts/control.validator | 2 +- .../package_scripts/control.validator.arm64 | 2 +- params/version.go | 8 +- 13 files changed, 14 insertions(+), 257 deletions(-) delete mode 100644 common/context.go delete mode 100644 common/context_test.go diff --git a/common/context.go b/common/context.go deleted file mode 100644 index 1f44cf97ae..0000000000 --- a/common/context.go +++ /dev/null @@ -1,32 +0,0 @@ -package common - -import ( - "context" - - unique "github.com/ethereum/go-ethereum/common/set" -) - -type key struct{} - -var ( - labelsKey key -) - -func WithLabels(ctx context.Context, labels ...string) context.Context { - if len(labels) == 0 { - return ctx - } - - labels = append(labels, Labels(ctx)...) - - return context.WithValue(ctx, labelsKey, unique.Deduplicate(labels)) -} - -func Labels(ctx context.Context) []string { - labels, ok := ctx.Value(labelsKey).([]string) - if !ok { - return nil - } - - return labels -} diff --git a/common/context_test.go b/common/context_test.go deleted file mode 100644 index bc093a3dca..0000000000 --- a/common/context_test.go +++ /dev/null @@ -1,107 +0,0 @@ -package common - -import ( - "context" - "reflect" - "sort" - "testing" -) - -func TestWithLabels(t *testing.T) { - t.Parallel() - - cases := []struct { - name string - initial []string - new []string - expected []string - }{ - { - "nil-nil", - nil, - nil, - nil, - }, - - { - "nil-something", - nil, - []string{"one", "two"}, - []string{"one", "two"}, - }, - - { - "something-nil", - []string{"one", "two"}, - nil, - []string{"one", "two"}, - }, - - { - "something-something", - []string{"one", "two"}, - []string{"three", "four"}, - []string{"one", "two", "three", "four"}, - }, - - // deduplication - { - "with duplicates nil-something", - nil, - []string{"one", "two", "one"}, - []string{"one", "two"}, - }, - - { - "with duplicates something-nil", - []string{"one", "two", "one"}, - nil, - []string{"one", "two"}, - }, - - { - "with duplicates something-something", - []string{"one", "two"}, - []string{"three", "one"}, - []string{"one", "two", "three"}, - }, - - { - "with duplicates something-something", - []string{"one", "two", "three"}, - []string{"three", "four", "two"}, - []string{"one", "two", "three", "four"}, - }, - } - - for _, c := range cases { - c := c - - t.Run(c.name, func(t *testing.T) { - t.Parallel() - - ctx := context.Background() - - ctx = WithLabels(ctx, c.initial...) - ctx = WithLabels(ctx, c.new...) - - got := Labels(ctx) - - if len(got) != len(c.expected) { - t.Errorf("case %s. expected %v, got %v", c.name, c.expected, got) - - return - } - - gotSorted := sort.StringSlice(got) - gotSorted.Sort() - - expectedSorted := sort.StringSlice(c.expected) - expectedSorted.Sort() - - if !reflect.DeepEqual(gotSorted, expectedSorted) { - t.Errorf("case %s. expected %v, got %v", c.name, expectedSorted, gotSorted) - } - }) - } -} diff --git a/common/set/slice.go b/common/set/slice.go index eda4dda23b..36f11e67fe 100644 --- a/common/set/slice.go +++ b/common/set/slice.go @@ -9,20 +9,3 @@ func New[T comparable](slice []T) map[T]struct{} { return m } - -func ToSlice[T comparable](m map[T]struct{}) []T { - slice := make([]T, len(m)) - - var i int - - for k := range m { - slice[i] = k - i++ - } - - return slice -} - -func Deduplicate[T comparable](slice []T) []T { - return ToSlice(New(slice)) -} diff --git a/eth/tracers/api.go b/eth/tracers/api.go index ce7b36b906..13f5c627cd 100644 --- a/eth/tracers/api.go +++ b/eth/tracers/api.go @@ -1052,7 +1052,7 @@ func (api *API) TraceCall(ctx context.Context, args ethapi.TransactionArgs, bloc } } // Execute the trace - msg, err := args.ToMessage(ctx, api.backend.RPCGasCap(), block.BaseFee()) + msg, err := args.ToMessage(api.backend.RPCGasCap(), block.BaseFee()) if err != nil { return nil, err } diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go index f5953f59c3..49b1610987 100644 --- a/internal/ethapi/api.go +++ b/internal/ethapi/api.go @@ -21,11 +21,7 @@ import ( "errors" "fmt" "math/big" - "os" - "path/filepath" - "runtime/pprof" "strings" - "sync" "time" "github.com/davecgh/go-spew/spew" @@ -1013,7 +1009,7 @@ func DoCall(ctx context.Context, b Backend, args TransactionArgs, blockNrOrHash defer cancel() // Get a new instance of the EVM. - msg, err := args.ToMessage(ctx, globalGasCap, header.BaseFee) + msg, err := args.ToMessage(globalGasCap, header.BaseFee) if err != nil { return nil, err } @@ -1036,83 +1032,15 @@ func DoCall(ctx context.Context, b Backend, args TransactionArgs, blockNrOrHash } // If the timer caused an abort, return an appropriate error message - timeoutMu.Lock() if evm.Cancelled() { - timeoutErrors++ - - if timeoutErrors >= pprofThreshold { - timeoutNoErrors = 0 - - if !isRunning { - runProfile() - } - - log.Warn("[eth_call] timeout", - "timeoutErrors", timeoutErrors, - "timeoutNoErrors", timeoutNoErrors, - "args", args, - "blockNrOrHash", blockNrOrHash, - "overrides", overrides, - "timeout", timeout, - "globalGasCap", globalGasCap) - } - - timeoutMu.Unlock() - return nil, fmt.Errorf("execution aborted (timeout = %v)", timeout) - } else { - if timeoutErrors >= pprofStopThreshold { - timeoutErrors = 0 - timeoutNoErrors = 0 - - if isRunning { - pprof.StopCPUProfile() - isRunning = false - } - } - } - - if isRunning && time.Since(pprofTime) >= pprofDuration { - timeoutErrors = 0 - timeoutNoErrors = 0 - - pprof.StopCPUProfile() - - isRunning = false } - - timeoutMu.Unlock() - if err != nil { return result, fmt.Errorf("err: %w (supplied gas %d)", err, msg.Gas()) } - return result, nil } -func runProfile() { - pprofTime = time.Now() - - name := fmt.Sprintf("profile_eth_call-count-%d-time-%s.prof", - number, pprofTime.Format("2006-01-02-15-04-05")) - - name = filepath.Join(os.TempDir(), name) - - f, err := os.Create(name) - if err != nil { - log.Error("[eth_call] can't create profile file", "name", name, "err", err) - return - } - - if err = pprof.StartCPUProfile(f); err != nil { - log.Error("[eth_call] can't start profiling", "name", name, "err", err) - return - } - - isRunning = true - number++ -} - func newRevertError(result *core.ExecutionResult) *revertError { reason, errUnpack := abi.UnpackRevert(result.Revert()) err := errors.New("execution reverted") @@ -1143,21 +1071,6 @@ func (e *revertError) ErrorData() interface{} { return e.reason } -var ( - number int - timeoutErrors int // count for timeout errors - timeoutNoErrors int - timeoutMu sync.Mutex - isRunning bool - pprofTime time.Time -) - -const ( - pprofThreshold = 3 - pprofStopThreshold = 3 - pprofDuration = time.Minute -) - // Call executes the given transaction on the state for the given block number. // // Additionally, the caller can specify a batch of contract for fields overriding. @@ -1664,7 +1577,7 @@ func AccessList(ctx context.Context, b Backend, blockNrOrHash rpc.BlockNumberOrH statedb := db.Copy() // Set the accesslist to the last al args.AccessList = &accessList - msg, err := args.ToMessage(ctx, b.RPCGasCap(), header.BaseFee) + msg, err := args.ToMessage(b.RPCGasCap(), header.BaseFee) if err != nil { return nil, 0, nil, err } diff --git a/internal/ethapi/transaction_args.go b/internal/ethapi/transaction_args.go index a8f0b2cde9..aa2596fe81 100644 --- a/internal/ethapi/transaction_args.go +++ b/internal/ethapi/transaction_args.go @@ -197,7 +197,7 @@ func (args *TransactionArgs) setDefaults(ctx context.Context, b Backend) error { // ToMessage converts the transaction arguments to the Message type used by the // core evm. This method is used in calls and traces that do not require a real // live transaction. -func (args *TransactionArgs) ToMessage(_ context.Context, globalGasCap uint64, baseFee *big.Int) (types.Message, error) { +func (args *TransactionArgs) ToMessage(globalGasCap uint64, baseFee *big.Int) (types.Message, error) { // Reject invalid combinations of pre- and post-1559 fee styles if args.GasPrice != nil && (args.MaxFeePerGas != nil || args.MaxPriorityFeePerGas != nil) { return types.Message{}, errors.New("both gasPrice and (maxFeePerGas or maxPriorityFeePerGas) specified") diff --git a/packaging/templates/package_scripts/control b/packaging/templates/package_scripts/control index d3d295be30..df0427b322 100644 --- a/packaging/templates/package_scripts/control +++ b/packaging/templates/package_scripts/control @@ -1,5 +1,5 @@ Source: bor -Version: 0.3.5-beta +Version: 0.3.4-beta3 Section: develop Priority: standard Maintainer: Polygon diff --git a/packaging/templates/package_scripts/control.arm64 b/packaging/templates/package_scripts/control.arm64 index 0900bdf1a1..bcc8041a77 100644 --- a/packaging/templates/package_scripts/control.arm64 +++ b/packaging/templates/package_scripts/control.arm64 @@ -1,5 +1,5 @@ Source: bor -Version: 0.3.5-beta +Version: 0.3.4-beta3 Section: develop Priority: standard Maintainer: Polygon diff --git a/packaging/templates/package_scripts/control.profile.amd64 b/packaging/templates/package_scripts/control.profile.amd64 index 6866b26802..507d4328b2 100644 --- a/packaging/templates/package_scripts/control.profile.amd64 +++ b/packaging/templates/package_scripts/control.profile.amd64 @@ -1,5 +1,5 @@ Source: bor-profile -Version: 0.3.5-beta +Version: 0.3.4-beta3 Section: develop Priority: standard Maintainer: Polygon diff --git a/packaging/templates/package_scripts/control.profile.arm64 b/packaging/templates/package_scripts/control.profile.arm64 index 3d6dd268d0..011dfa8b63 100644 --- a/packaging/templates/package_scripts/control.profile.arm64 +++ b/packaging/templates/package_scripts/control.profile.arm64 @@ -1,5 +1,5 @@ Source: bor-profile -Version: 0.3.5-beta +Version: 0.3.4-beta3 Section: develop Priority: standard Maintainer: Polygon diff --git a/packaging/templates/package_scripts/control.validator b/packaging/templates/package_scripts/control.validator index e57443f700..94ee786237 100644 --- a/packaging/templates/package_scripts/control.validator +++ b/packaging/templates/package_scripts/control.validator @@ -1,5 +1,5 @@ Source: bor-profile -Version: 0.3.5-beta +Version: 0.3.4-beta3 Section: develop Priority: standard Maintainer: Polygon diff --git a/packaging/templates/package_scripts/control.validator.arm64 b/packaging/templates/package_scripts/control.validator.arm64 index e504e4ebe1..96049a56d6 100644 --- a/packaging/templates/package_scripts/control.validator.arm64 +++ b/packaging/templates/package_scripts/control.validator.arm64 @@ -1,5 +1,5 @@ Source: bor-profile -Version: 0.3.5-beta +Version: 0.3.4-beta3 Section: develop Priority: standard Maintainer: Polygon diff --git a/params/version.go b/params/version.go index 475a34f579..46fcbb6e1e 100644 --- a/params/version.go +++ b/params/version.go @@ -21,10 +21,10 @@ import ( ) const ( - VersionMajor = 0 // Major version component of the current release - VersionMinor = 3 // Minor version component of the current release - VersionPatch = 5 // Patch version component of the current release - VersionMeta = "beta" // Version metadata to append to the version string + VersionMajor = 0 // Major version component of the current release + VersionMinor = 3 // Minor version component of the current release + VersionPatch = 4 // Patch version component of the current release + VersionMeta = "beta3" // Version metadata to append to the version string ) // Version holds the textual version string. From a85370115ea3613baaeb1709a407ea06d152a4f8 Mon Sep 17 00:00:00 2001 From: SHIVAM SHARMA Date: Mon, 27 Feb 2023 15:38:34 +0530 Subject: [PATCH 53/56] internal/ethapi :: Fix : newRPCTransactionFromBlockIndex --- core/blockchain.go | 3 +++ internal/ethapi/api.go | 29 +++++++++++++++-------------- 2 files changed, 18 insertions(+), 14 deletions(-) diff --git a/core/blockchain.go b/core/blockchain.go index cbcf02fef4..fed1d04268 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -1363,6 +1363,7 @@ func (bc *BlockChain) WriteBlockAndSetHead(block *types.Block, receipts []*types // the chain mutex to be held. func (bc *BlockChain) writeBlockAndSetHead(block *types.Block, receipts []*types.Receipt, logs []*types.Log, state *state.StateDB, emitHeadEvent bool) (status WriteStatus, err error) { var stateSyncLogs []*types.Log + if stateSyncLogs, err = bc.writeBlockWithState(block, receipts, logs, state); err != nil { return NonStatTy, err } @@ -1371,6 +1372,7 @@ func (bc *BlockChain) writeBlockAndSetHead(block *types.Block, receipts []*types if err != nil { return NonStatTy, err } + if reorg { // Reorganise the chain if the parent is not the head block if block.ParentHash() != currentBlock.Hash() { @@ -1378,6 +1380,7 @@ func (bc *BlockChain) writeBlockAndSetHead(block *types.Block, receipts []*types return NonStatTy, err } } + status = CanonStatTy } else { status = SideStatTy diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go index 49b1610987..8ba6ea0b91 100644 --- a/internal/ethapi/api.go +++ b/internal/ethapi/api.go @@ -1457,27 +1457,28 @@ func newRPCPendingTransaction(tx *types.Transaction, current *types.Header, conf func newRPCTransactionFromBlockIndex(b *types.Block, index uint64, config *params.ChainConfig, db ethdb.Database) *RPCTransaction { txs := b.Transactions() - if index >= uint64(len(txs)+1) { - return nil - } - - // If the index out of the range of transactions defined in block body, it means that the transaction is a bor state sync transaction, and we need to fetch it from the database - if index == uint64(len(txs)) { - borReceipt := rawdb.ReadBorReceipt(db, b.Hash(), b.NumberU64(), config) - if borReceipt != nil { - tx, _, _, _ := rawdb.ReadBorTransaction(db, borReceipt.TxHash) - - if tx != nil { - txs = append(txs, tx) + borReceipt := rawdb.ReadBorReceipt(db, b.Hash(), b.NumberU64(), config) + if borReceipt != nil { + if borReceipt.TxHash != (common.Hash{}) { + borTx, _, _, _ := rawdb.ReadBorTransactionWithBlockHash(db, borReceipt.TxHash, b.Hash()) + if borTx != nil { + txs = append(txs, borTx) } } } - // If the index is still out of the range after checking bor state sync transaction, it means that the transaction index is invalid if index >= uint64(len(txs)) { return nil } - return newRPCTransaction(txs[index], b.Hash(), b.NumberU64(), index, b.BaseFee(), config) + + rpcTx := newRPCTransaction(txs[index], b.Hash(), b.NumberU64(), index, b.BaseFee(), config) + + // If the transaction is a bor transaction, we need to set the hash to the derived bor tx hash. BorTx is always the last index. + if borReceipt != nil && int(index) == len(txs)-1 { + rpcTx.Hash = borReceipt.TxHash + } + + return rpcTx } // newRPCRawTransactionFromBlockIndex returns the bytes of a transaction given a block and a transaction index. From ac353affa3e9a096bf086e4dd9dbb32541a3f20c Mon Sep 17 00:00:00 2001 From: Manav Darji Date: Thu, 6 Apr 2023 15:36:19 +0530 Subject: [PATCH 54/56] Merge master to qa (#813) * Merge qa to master (#750) * Added checks to RPC requests and introduced new flags to customise the parameters (#657) * added a check to reject rpc requests with batch size > the one set using a newly added flag (rpcbatchlimit) * added a check to reject rpc requests whose result size > the one set using a newly added flag (rpcreturndatalimit) * updated the config files and docs * chg : trieTimeout from 60 to 10 mins (#692) * chg : trieTimeout from 60 to 10 mins * chg : cache.timout to 10m from 1h in configs * internal/cli/server : fix : added triesInMemory in config (#691) * changed version from 0.3.0 to 0.3.4-beta (#693) * fix nil state-sync issue, increase grpc limit (#695) * Increase grpc message size limit in pprof * consensus/bor/bor.go : stateSyncs init fixed [Fix #686] * eth/filters: handle nil state-sync before notify * eth/filters: update check Co-authored-by: Jerry Co-authored-by: Daniil * core, tests/bor: add more tests for state-sync validation (#710) * core: add get state sync function for tests * tests/bor: add validation for state sync events post consensus * Arpit/temp bor sync (#701) * Increase grpc message size limit in pprof * ReadBorReceipts improvements * use internal function * fix tests * fetch geth upstread for ReadBorReceiptRLP * Only query bor receipt when the query index is equal to # tx in block body This change reduces the frequency of calling ReadBorReceipt and ReadBorTransaction, which are CPU and db intensive. * Revert "fetch geth upstread for ReadBorReceiptRLP" This reverts commit 2e838a6b1313d26674f3a8df4b044e35dcbf35a0. * Restore ReadBorReceiptRLP * fix bor receipts * remove unused * fix lints --------- Co-authored-by: Jerry Co-authored-by: Manav Darji Co-authored-by: Evgeny Danienko <6655321@bk.ru> * Revert "chg : trieTimeout from 60 to 10 mins (#692)" (#720) This reverts commit 241843c7e7bb18e64d2e157fd6fbbd665f6ce9d9. * Arpit/add execution pool 2 (#719) * initial * linters * linters * remove timeout * update pool * change pool size function * check nil * check nil * fix tests * Use execution pool from server in all handlers * simplify things * test fix * add support for cli, config * add to cli and config * merge base branch * debug statements * fix bug * atomic pointer timeout * add apis * update workerpool * fix issues * change params * fix issues * fix ipc issue * remove execution pool from IPC * revert * fix tests * mutex * refactor flag and value names * ordering fix * refactor flag and value names * update default ep size to 40 * fix bor start issues * revert file changes * debug statements * fix bug * update workerpool * atomic pointer timeout * add apis * Merge branch 'add-execution-pool' of github.com:maticnetwork/bor into arpit/add-execution-pool * fix issues * change params * fix issues * fix ipc issue * remove execution pool from IPC * revert * merge base branch * Merge branch 'add-execution-pool' of github.com:maticnetwork/bor into arpit/add-execution-pool * mutex * fix tests * Merge branch 'arpit/add-execution-pool' of github.com:maticnetwork/bor into arpit/add-execution-pool * Change default size of execution pool to 40 * refactor flag and value names * fix merge conflicts * ordering fix * refactor flag and value names * update default ep size to 40 * fix bor start issues * revert file changes * fix linters * fix go.mod * change sec to ms * change default value for ep timeout * fix node api calls * comment setter for ep timeout --------- Co-authored-by: Evgeny Danienko <6655321@bk.ru> Co-authored-by: Jerry Co-authored-by: Manav Darji * version change (#721) * Event based pprof (#732) * feature * Save pprof to /tmp --------- Co-authored-by: Jerry * Cherry-pick changes from develop (#738) * Check if block is nil to prevent panic (#736) * miner: use env for tracing instead of block object (#728) --------- Co-authored-by: Dmitry <46797839+dkeysil@users.noreply.github.com> * add max code init size check in txpool (#739) * Revert "Event based pprof" and update version (#742) * Revert "Event based pprof (#732)" This reverts commit 22fa4033e8fabb51c44e8d2a8c6bb695a6e9285e. * params: update version to 0.3.4-beta3 * packaging/templates: update bor version * params, packaging/templates: update bor version --------- Co-authored-by: SHIVAM SHARMA Co-authored-by: Pratik Patil Co-authored-by: Jerry Co-authored-by: Daniil Co-authored-by: Arpit Temani Co-authored-by: Evgeny Danienko <6655321@bk.ru> Co-authored-by: Dmitry <46797839+dkeysil@users.noreply.github.com> * core, miner: add sub-spans for tracing (#753) * core, miner: add sub-spans for tracing * fix linters * core: add logs for debugging * core: add more logs to print tdd while reorg * fix linters * core: minor fix * core: remove debug logs * core: use different span for write block and set head * core: use internal context for sending traces (#755) * core: add : impossible reorg block dump (#754) * add : impossible reorg block dump * chg : 3 seperate files for impossoble reorg dump * add : use exportBlocks method and RLP blocks before writing * chg : small changes * bump : go version from 1.19 to 1.20.1 (#761) * Revert "bump : go version from 1.19 to 1.20.1 (#761)" This reverts commit 4561012af9a31d20c2715ce26e4b39ca93420b8b. * core/vm: use optimized bigint (#26021) * Add holiman/big * Fix linter * Bump version to v0.3.5 * fix lints from develop (few lints decided to appear from code that was untouched, weird) * upgrade crypto lib version (#770) * bump dep : github.com/Masterminds/goutils to v1.1.1 (#769) * mardizzone/pos-1313: bump crypto dependency (#772) * dev: chg: bumd net dependency * dev: chg: bump crypto dependency * dev: chg: bump crypto dependency * bump dep : golang.org/x/net to v0.8.0 (#771) * Verify validator set against local contract on receiving an end-of-sprint block (#768) * Verify validator set against local contract on receiving an end-of-sprint block * Fix tests * Respect error returned by ParseValidators * Keep going back until a parent block presents * core/txpool: implement DoS defenses from geth (#778) * Hotfixes and deps bump (#776) * dev: chg: bump deps * internal/cli/server, rpc: lower down http readtimeout to 10s * dev: chg: get p2p adapter * dev: chg: lower down jsonrpc readtimeout to 10s * cherry-pick txpool optimisation changes * add check for empty lists in txpool (#704) * add check * linters * core, miner: add empty instrumentation name for tracing --------- Co-authored-by: Raneet Debnath Co-authored-by: SHIVAM SHARMA Co-authored-by: Evgeny Danilenko <6655321@bk.ru> Co-authored-by: Manav Darji * packaging,params: bump to v0.3.6 (#782) * v0.3.6 fix (#787) * Fix get validator set in header verifier * chg : commit tx logs from info to debug (#673) * chg : commit tx logs from info to debug * fix : minor changes * chg : miner : commitTransactions-stats moved from info to debug * lint : fix linters * refactor logging * miner : chg : UnauthorizedSignerError to debug * lint : fix lint * fix : log.Logger interface compatibility --------- Co-authored-by: Evgeny Danienko <6655321@bk.ru> * Remove unnecessary sorting of valset from header in verification * dev: chg: version bump --------- Co-authored-by: SHIVAM SHARMA Co-authored-by: Evgeny Danienko <6655321@bk.ru> Co-authored-by: marcello33 * core: improve locks in txpool (#807) * added a write lock to the txs.filter method and a read lock to the txs.reheap method - both of which are called by Filter during reorg adjustments to txpool * txpool reorg locks * more locks * locks * linters * params, packaging: update version for v0.3.8-beta release * core: add logs in reheap --------- Co-authored-by: Alex Co-authored-by: Evgeny Danienko <6655321@bk.ru> * Merge qa to master (#808) * Added checks to RPC requests and introduced new flags to customise the parameters (#657) * added a check to reject rpc requests with batch size > the one set using a newly added flag (rpcbatchlimit) * added a check to reject rpc requests whose result size > the one set using a newly added flag (rpcreturndatalimit) * updated the config files and docs * chg : trieTimeout from 60 to 10 mins (#692) * chg : trieTimeout from 60 to 10 mins * chg : cache.timout to 10m from 1h in configs * internal/cli/server : fix : added triesInMemory in config (#691) * changed version from 0.3.0 to 0.3.4-beta (#693) * fix nil state-sync issue, increase grpc limit (#695) * Increase grpc message size limit in pprof * consensus/bor/bor.go : stateSyncs init fixed [Fix #686] * eth/filters: handle nil state-sync before notify * eth/filters: update check Co-authored-by: Jerry Co-authored-by: Daniil * core, tests/bor: add more tests for state-sync validation (#710) * core: add get state sync function for tests * tests/bor: add validation for state sync events post consensus * Arpit/temp bor sync (#701) * Increase grpc message size limit in pprof * ReadBorReceipts improvements * use internal function * fix tests * fetch geth upstread for ReadBorReceiptRLP * Only query bor receipt when the query index is equal to # tx in block body This change reduces the frequency of calling ReadBorReceipt and ReadBorTransaction, which are CPU and db intensive. * Revert "fetch geth upstread for ReadBorReceiptRLP" This reverts commit 2e838a6b1313d26674f3a8df4b044e35dcbf35a0. * Restore ReadBorReceiptRLP * fix bor receipts * remove unused * fix lints --------- Co-authored-by: Jerry Co-authored-by: Manav Darji Co-authored-by: Evgeny Danienko <6655321@bk.ru> * Revert "chg : trieTimeout from 60 to 10 mins (#692)" (#720) This reverts commit 241843c7e7bb18e64d2e157fd6fbbd665f6ce9d9. * Arpit/add execution pool 2 (#719) * initial * linters * linters * remove timeout * update pool * change pool size function * check nil * check nil * fix tests * Use execution pool from server in all handlers * simplify things * test fix * add support for cli, config * add to cli and config * merge base branch * debug statements * fix bug * atomic pointer timeout * add apis * update workerpool * fix issues * change params * fix issues * fix ipc issue * remove execution pool from IPC * revert * fix tests * mutex * refactor flag and value names * ordering fix * refactor flag and value names * update default ep size to 40 * fix bor start issues * revert file changes * debug statements * fix bug * update workerpool * atomic pointer timeout * add apis * Merge branch 'add-execution-pool' of github.com:maticnetwork/bor into arpit/add-execution-pool * fix issues * change params * fix issues * fix ipc issue * remove execution pool from IPC * revert * merge base branch * Merge branch 'add-execution-pool' of github.com:maticnetwork/bor into arpit/add-execution-pool * mutex * fix tests * Merge branch 'arpit/add-execution-pool' of github.com:maticnetwork/bor into arpit/add-execution-pool * Change default size of execution pool to 40 * refactor flag and value names * fix merge conflicts * ordering fix * refactor flag and value names * update default ep size to 40 * fix bor start issues * revert file changes * fix linters * fix go.mod * change sec to ms * change default value for ep timeout * fix node api calls * comment setter for ep timeout --------- Co-authored-by: Evgeny Danienko <6655321@bk.ru> Co-authored-by: Jerry Co-authored-by: Manav Darji * version change (#721) * Event based pprof (#732) * feature * Save pprof to /tmp --------- Co-authored-by: Jerry * Cherry-pick changes from develop (#738) * Check if block is nil to prevent panic (#736) * miner: use env for tracing instead of block object (#728) --------- Co-authored-by: Dmitry <46797839+dkeysil@users.noreply.github.com> * add max code init size check in txpool (#739) * Revert "Event based pprof" and update version (#742) * Revert "Event based pprof (#732)" This reverts commit 22fa4033e8fabb51c44e8d2a8c6bb695a6e9285e. * params: update version to 0.3.4-beta3 * packaging/templates: update bor version * internal/ethapi :: Fix : newRPCTransactionFromBlockIndex * fix: remove assignment for bor receipt --------- Co-authored-by: SHIVAM SHARMA Co-authored-by: Pratik Patil Co-authored-by: Jerry Co-authored-by: Daniil Co-authored-by: Arpit Temani Co-authored-by: Evgeny Danienko <6655321@bk.ru> Co-authored-by: Dmitry <46797839+dkeysil@users.noreply.github.com> * Setting up bor to use hosted 18.04 runner as ubuntu provided 18.04 runner is end of life --------- Co-authored-by: SHIVAM SHARMA Co-authored-by: Pratik Patil Co-authored-by: Jerry Co-authored-by: Daniil Co-authored-by: Arpit Temani Co-authored-by: Evgeny Danienko <6655321@bk.ru> Co-authored-by: Dmitry <46797839+dkeysil@users.noreply.github.com> Co-authored-by: Martin Holst Swende Co-authored-by: marcello33 Co-authored-by: Raneet Debnath Co-authored-by: Raneet Debnath <35629432+Raneet10@users.noreply.github.com> Co-authored-by: Alex Co-authored-by: Daniel Jones --- .github/workflows/packager.yml | 4 +- Makefile | 7 +- builder/files/config.toml | 2 +- cmd/evm/internal/t8ntool/transaction.go | 3 +- common/debug/debug.go | 24 + common/math/big.go | 29 +- common/math/uint.go | 23 + common/time.go | 9 + common/tracing/context.go | 10 +- consensus/bor/bor.go | 49 +- consensus/bor/heimdall/span/spanner.go | 15 +- consensus/bor/span.go | 4 +- consensus/bor/span_mock.go | 46 +- consensus/misc/eip1559.go | 53 + core/blockchain.go | 148 +- core/tests/blockchain_repair_test.go | 2 +- core/tx_journal.go | 17 +- core/tx_list.go | 316 ++- core/tx_list_test.go | 9 +- core/tx_pool.go | 1152 ++++++++--- core/tx_pool_test.go | 1686 ++++++++++++++++- core/txpool2_test.go | 229 +++ core/types/access_list_tx.go | 65 +- core/types/dynamic_fee_tx.go | 71 +- core/types/legacy_tx.go | 60 +- core/types/transaction.go | 180 +- core/types/transaction_signing.go | 13 +- core/types/transaction_test.go | 32 +- core/vm/contracts.go | 28 +- docs/cli/example_config.toml | 2 +- eth/api_backend.go | 11 +- eth/bor_checkpoint_verifier.go | 1 + eth/handler.go | 3 +- eth/handler_test.go | 3 +- eth/sync.go | 7 +- go.mod | 23 +- go.sum | 44 +- internal/cli/server/config.go | 2 +- internal/cli/server/pprof/pprof.go | 22 + internal/ethapi/api.go | 40 +- internal/testlog/testlog.go | 32 + internal/web3ext/web3ext.go | 5 + les/handler_test.go | 2 +- les/server_requests.go | 22 +- log/logger.go | 41 + log/root.go | 32 + miner/fake_miner.go | 2 +- miner/worker.go | 227 ++- miner/worker_test.go | 4 +- p2p/dnsdisc/client_test.go | 8 +- .../templates/mainnet-v1/archive/config.toml | 2 +- .../mainnet-v1/sentry/sentry/bor/config.toml | 2 +- .../sentry/validator/bor/config.toml | 2 +- .../mainnet-v1/without-sentry/bor/config.toml | 2 +- packaging/templates/package_scripts/control | 2 +- .../templates/package_scripts/control.arm64 | 2 +- .../package_scripts/control.profile.amd64 | 2 +- .../package_scripts/control.profile.arm64 | 2 +- .../package_scripts/control.validator | 2 +- .../package_scripts/control.validator.arm64 | 2 +- .../templates/testnet-v4/archive/config.toml | 2 +- .../testnet-v4/sentry/sentry/bor/config.toml | 2 +- .../sentry/validator/bor/config.toml | 2 +- .../testnet-v4/without-sentry/bor/config.toml | 2 +- params/version.go | 8 +- rpc/http.go | 2 +- tests/bor/bor_test.go | 20 + tests/bor/helper.go | 11 + tests/init_test.go | 3 - 69 files changed, 4269 insertions(+), 622 deletions(-) create mode 100644 common/math/uint.go create mode 100644 common/time.go create mode 100644 core/txpool2_test.go diff --git a/.github/workflows/packager.yml b/.github/workflows/packager.yml index 7485aca976..f2aef42485 100644 --- a/.github/workflows/packager.yml +++ b/.github/workflows/packager.yml @@ -12,7 +12,9 @@ on: jobs: build: - runs-on: ubuntu-18.04 + runs-on: + group: ubuntu-runners + labels: 18.04RunnerT2Large steps: - name: Checkout uses: actions/checkout@v2 diff --git a/Makefile b/Makefile index 242435df76..a8a4b66e8d 100644 --- a/Makefile +++ b/Makefile @@ -59,7 +59,10 @@ ios: @echo "Import \"$(GOBIN)/Geth.framework\" to use the library." test: - $(GOTEST) --timeout 5m -shuffle=on -cover -coverprofile=cover.out $(TESTALL) + $(GOTEST) --timeout 5m -shuffle=on -cover -short -coverprofile=cover.out -covermode=atomic $(TESTALL) + +test-txpool-race: + $(GOTEST) -run=TestPoolMiningDataRaces --timeout 600m -race -v ./core/ test-race: $(GOTEST) --timeout 15m -race -shuffle=on $(TESTALL) @@ -75,7 +78,7 @@ lint: lintci-deps: rm -f ./build/bin/golangci-lint - curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b ./build/bin v1.48.0 + curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b ./build/bin v1.50.1 goimports: goimports -local "$(PACKAGE)" -w . diff --git a/builder/files/config.toml b/builder/files/config.toml index 1b8d915b7b..cb790f371c 100644 --- a/builder/files/config.toml +++ b/builder/files/config.toml @@ -94,7 +94,7 @@ syncmode = "full" # vhosts = ["*"] # corsdomain = ["*"] # [jsonrpc.timeouts] -# read = "30s" +# read = "10s" # write = "30s" # idle = "2m0s" diff --git a/cmd/evm/internal/t8ntool/transaction.go b/cmd/evm/internal/t8ntool/transaction.go index 6f1c964ada..cf2039b66c 100644 --- a/cmd/evm/internal/t8ntool/transaction.go +++ b/cmd/evm/internal/t8ntool/transaction.go @@ -24,6 +24,8 @@ import ( "os" "strings" + "gopkg.in/urfave/cli.v1" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/core" @@ -32,7 +34,6 @@ import ( "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/tests" - "gopkg.in/urfave/cli.v1" ) type result struct { diff --git a/common/debug/debug.go b/common/debug/debug.go index 6a677e495d..056ebe2fa7 100644 --- a/common/debug/debug.go +++ b/common/debug/debug.go @@ -1,6 +1,7 @@ package debug import ( + "fmt" "runtime" ) @@ -26,3 +27,26 @@ func Callers(show int) []string { return callers } + +func CodeLine() (string, string, int) { + pc, filename, line, _ := runtime.Caller(1) + return runtime.FuncForPC(pc).Name(), filename, line +} + +func CodeLineStr() string { + pc, filename, line, _ := runtime.Caller(1) + return fmt.Sprintf("%s:%d - %s", filename, line, runtime.FuncForPC(pc).Name()) +} + +func Stack(all bool) []byte { + buf := make([]byte, 4096) + + for { + n := runtime.Stack(buf, all) + if n < len(buf) { + return buf[:n] + } + + buf = make([]byte, 2*len(buf)) + } +} diff --git a/common/math/big.go b/common/math/big.go index 1af5b4d879..4ccf89e38c 100644 --- a/common/math/big.go +++ b/common/math/big.go @@ -20,6 +20,8 @@ package math import ( "fmt" "math/big" + + "github.com/holiman/uint256" ) // Various big integer limit values. @@ -132,6 +134,7 @@ func MustParseBig256(s string) *big.Int { // BigPow returns a ** b as a big integer. func BigPow(a, b int64) *big.Int { r := big.NewInt(a) + return r.Exp(r, big.NewInt(b), nil) } @@ -140,6 +143,15 @@ func BigMax(x, y *big.Int) *big.Int { if x.Cmp(y) < 0 { return y } + + return x +} + +func BigMaxUint(x, y *uint256.Int) *uint256.Int { + if x.Lt(y) { + return y + } + return x } @@ -148,6 +160,15 @@ func BigMin(x, y *big.Int) *big.Int { if x.Cmp(y) > 0 { return y } + + return x +} + +func BigMinUint256(x, y *uint256.Int) *uint256.Int { + if x.Gt(y) { + return y + } + return x } @@ -227,10 +248,10 @@ func U256Bytes(n *big.Int) []byte { // S256 interprets x as a two's complement number. // x must not exceed 256 bits (the result is undefined if it does) and is not modified. // -// S256(0) = 0 -// S256(1) = 1 -// S256(2**255) = -2**255 -// S256(2**256-1) = -1 +// S256(0) = 0 +// S256(1) = 1 +// S256(2**255) = -2**255 +// S256(2**256-1) = -1 func S256(x *big.Int) *big.Int { if x.Cmp(tt255) < 0 { return x diff --git a/common/math/uint.go b/common/math/uint.go new file mode 100644 index 0000000000..96b8261884 --- /dev/null +++ b/common/math/uint.go @@ -0,0 +1,23 @@ +package math + +import ( + "math/big" + + "github.com/holiman/uint256" +) + +var ( + U0 = uint256.NewInt(0) + U1 = uint256.NewInt(1) + U100 = uint256.NewInt(100) +) + +func U256LTE(a, b *uint256.Int) bool { + return a.Lt(b) || a.Eq(b) +} + +func FromBig(v *big.Int) *uint256.Int { + u, _ := uint256.FromBig(v) + + return u +} diff --git a/common/time.go b/common/time.go new file mode 100644 index 0000000000..6c7662e04c --- /dev/null +++ b/common/time.go @@ -0,0 +1,9 @@ +package common + +import "time" + +const TimeMilliseconds = "15:04:05.000" + +func NowMilliseconds() string { + return time.Now().Format(TimeMilliseconds) +} diff --git a/common/tracing/context.go b/common/tracing/context.go index 510e45d775..c3c6342502 100644 --- a/common/tracing/context.go +++ b/common/tracing/context.go @@ -4,6 +4,7 @@ import ( "context" "time" + "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" ) @@ -51,11 +52,16 @@ func Trace(ctx context.Context, spanName string) (context.Context, trace.Span) { return tr.Start(ctx, spanName) } -func Exec(ctx context.Context, spanName string, opts ...Option) { +func Exec(ctx context.Context, instrumentationName, spanName string, opts ...Option) { var span trace.Span tr := FromContext(ctx) + if tr == nil && len(instrumentationName) != 0 { + tr = otel.GetTracerProvider().Tracer(instrumentationName) + ctx = WithTracer(ctx, tr) + } + if tr != nil { ctx, span = tr.Start(ctx, spanName) } @@ -85,7 +91,7 @@ func ElapsedTime(ctx context.Context, span trace.Span, msg string, fn func(conte fn(ctx, span) if span != nil { - span.SetAttributes(attribute.Int(msg, int(time.Since(now).Milliseconds()))) + span.SetAttributes(attribute.Int(msg, int(time.Since(now).Microseconds()))) } } diff --git a/consensus/bor/bor.go b/consensus/bor/bor.go index 5b32263762..e01b26b688 100644 --- a/consensus/bor/bor.go +++ b/consensus/bor/bor.go @@ -298,6 +298,14 @@ func (c *Bor) VerifyHeader(chain consensus.ChainHeaderReader, header *types.Head return c.verifyHeader(chain, header, nil) } +func (c *Bor) GetSpanner() Spanner { + return c.spanner +} + +func (c *Bor) SetSpanner(spanner Spanner) { + c.spanner = spanner +} + // VerifyHeaders is similar to VerifyHeader, but verifies a batch of headers. The // method returns a quit channel to abort the operations and a results channel to // retrieve the async verifications (the order is that of the input slice). @@ -454,6 +462,33 @@ func (c *Bor) verifyCascadingFields(chain consensus.ChainHeaderReader, header *t return err } + // Verify the validator list match the local contract + if IsSprintStart(number+1, c.config.CalculateSprint(number)) { + newValidators, err := c.spanner.GetCurrentValidatorsByBlockNrOrHash(context.Background(), rpc.BlockNumberOrHashWithNumber(rpc.LatestBlockNumber), number+1) + + if err != nil { + return err + } + + sort.Sort(valset.ValidatorsByAddress(newValidators)) + + headerVals, err := valset.ParseValidators(header.Extra[extraVanity : len(header.Extra)-extraSeal]) + + if err != nil { + return err + } + + if len(newValidators) != len(headerVals) { + return errInvalidSpanValidators + } + + for i, val := range newValidators { + if !bytes.Equal(val.HeaderBytes(), headerVals[i].HeaderBytes()) { + return errInvalidSpanValidators + } + } + } + // verify the validator list in the last sprint block if IsSprintStart(number, c.config.CalculateSprint(number)) { parentValidatorBytes := parent.Extra[extraVanity : len(parent.Extra)-extraSeal] @@ -518,7 +553,7 @@ func (c *Bor) snapshot(chain consensus.ChainHeaderReader, number uint64, hash co hash := checkpoint.Hash() // get validators and current span - validators, err := c.spanner.GetCurrentValidators(context.Background(), hash, number+1) + validators, err := c.spanner.GetCurrentValidatorsByHash(context.Background(), hash, number+1) if err != nil { return nil, err } @@ -688,7 +723,7 @@ func (c *Bor) Prepare(chain consensus.ChainHeaderReader, header *types.Header) e // get validator set if number if IsSprintStart(number+1, c.config.CalculateSprint(number)) { - newValidators, err := c.spanner.GetCurrentValidators(context.Background(), header.ParentHash, number+1) + newValidators, err := c.spanner.GetCurrentValidatorsByHash(context.Background(), header.ParentHash, number+1) if err != nil { return errUnknownValidators } @@ -821,7 +856,7 @@ func (c *Bor) FinalizeAndAssemble(ctx context.Context, chain consensus.ChainHead if IsSprintStart(headerNumber, c.config.CalculateSprint(headerNumber)) { cx := statefull.ChainContext{Chain: chain, Bor: c} - tracing.Exec(finalizeCtx, "bor.checkAndCommitSpan", func(ctx context.Context, span trace.Span) { + tracing.Exec(finalizeCtx, "", "bor.checkAndCommitSpan", func(ctx context.Context, span trace.Span) { // check and commit span err = c.checkAndCommitSpan(finalizeCtx, state, header, cx) }) @@ -832,7 +867,7 @@ func (c *Bor) FinalizeAndAssemble(ctx context.Context, chain consensus.ChainHead } if c.HeimdallClient != nil { - tracing.Exec(finalizeCtx, "bor.checkAndCommitSpan", func(ctx context.Context, span trace.Span) { + tracing.Exec(finalizeCtx, "", "bor.checkAndCommitSpan", func(ctx context.Context, span trace.Span) { // commit states stateSyncData, err = c.CommitStates(finalizeCtx, state, header, cx) }) @@ -844,7 +879,7 @@ func (c *Bor) FinalizeAndAssemble(ctx context.Context, chain consensus.ChainHead } } - tracing.Exec(finalizeCtx, "bor.changeContractCodeIfNeeded", func(ctx context.Context, span trace.Span) { + tracing.Exec(finalizeCtx, "", "bor.changeContractCodeIfNeeded", func(ctx context.Context, span trace.Span) { err = c.changeContractCodeIfNeeded(headerNumber, state) }) @@ -854,7 +889,7 @@ func (c *Bor) FinalizeAndAssemble(ctx context.Context, chain consensus.ChainHead } // No block rewards in PoA, so the state remains as it is - tracing.Exec(finalizeCtx, "bor.IntermediateRoot", func(ctx context.Context, span trace.Span) { + tracing.Exec(finalizeCtx, "", "bor.IntermediateRoot", func(ctx context.Context, span trace.Span) { header.Root = state.IntermediateRoot(chain.Config().IsEIP158(header.Number)) }) @@ -1218,7 +1253,7 @@ func (c *Bor) SetHeimdallClient(h IHeimdallClient) { } func (c *Bor) GetCurrentValidators(ctx context.Context, headerHash common.Hash, blockNumber uint64) ([]*valset.Validator, error) { - return c.spanner.GetCurrentValidators(ctx, headerHash, blockNumber) + return c.spanner.GetCurrentValidatorsByHash(ctx, headerHash, blockNumber) } // diff --git a/consensus/bor/heimdall/span/spanner.go b/consensus/bor/heimdall/span/spanner.go index e0f2d66c6b..9307a0337e 100644 --- a/consensus/bor/heimdall/span/spanner.go +++ b/consensus/bor/heimdall/span/spanner.go @@ -89,7 +89,7 @@ func (c *ChainSpanner) GetCurrentSpan(ctx context.Context, headerHash common.Has } // GetCurrentValidators get current validators -func (c *ChainSpanner) GetCurrentValidators(ctx context.Context, headerHash common.Hash, blockNumber uint64) ([]*valset.Validator, error) { +func (c *ChainSpanner) GetCurrentValidatorsByBlockNrOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash, blockNumber uint64) ([]*valset.Validator, error) { ctx, cancel := context.WithCancel(ctx) defer cancel() @@ -107,16 +107,13 @@ func (c *ChainSpanner) GetCurrentValidators(ctx context.Context, headerHash comm toAddress := c.validatorContractAddress gas := (hexutil.Uint64)(uint64(math.MaxUint64 / 2)) - // block - blockNr := rpc.BlockNumberOrHashWithHash(headerHash, false) - result, err := c.ethAPI.Call(ctx, ethapi.TransactionArgs{ Gas: &gas, To: &toAddress, Data: &msgData, - }, blockNr, nil) + }, blockNrOrHash, nil) if err != nil { - panic(err) + return nil, err } var ( @@ -144,6 +141,12 @@ func (c *ChainSpanner) GetCurrentValidators(ctx context.Context, headerHash comm return valz, nil } +func (c *ChainSpanner) GetCurrentValidatorsByHash(ctx context.Context, headerHash common.Hash, blockNumber uint64) ([]*valset.Validator, error) { + blockNr := rpc.BlockNumberOrHashWithHash(headerHash, false) + + return c.GetCurrentValidatorsByBlockNrOrHash(ctx, blockNr, blockNumber) +} + const method = "commitSpan" func (c *ChainSpanner) CommitSpan(ctx context.Context, heimdallSpan HeimdallSpan, state *state.StateDB, header *types.Header, chainContext core.ChainContext) error { diff --git a/consensus/bor/span.go b/consensus/bor/span.go index 86a58fa42e..179f92c79c 100644 --- a/consensus/bor/span.go +++ b/consensus/bor/span.go @@ -9,11 +9,13 @@ import ( "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/rpc" ) //go:generate mockgen -destination=./span_mock.go -package=bor . Spanner type Spanner interface { GetCurrentSpan(ctx context.Context, headerHash common.Hash) (*span.Span, error) - GetCurrentValidators(ctx context.Context, headerHash common.Hash, blockNumber uint64) ([]*valset.Validator, error) + GetCurrentValidatorsByHash(ctx context.Context, headerHash common.Hash, blockNumber uint64) ([]*valset.Validator, error) + GetCurrentValidatorsByBlockNrOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash, blockNumber uint64) ([]*valset.Validator, error) CommitSpan(ctx context.Context, heimdallSpan span.HeimdallSpan, state *state.StateDB, header *types.Header, chainContext core.ChainContext) error } diff --git a/consensus/bor/span_mock.go b/consensus/bor/span_mock.go index 6d5f62e25d..910e81716c 100644 --- a/consensus/bor/span_mock.go +++ b/consensus/bor/span_mock.go @@ -1,5 +1,5 @@ // Code generated by MockGen. DO NOT EDIT. -// Source: github.com/ethereum/go-ethereum/consensus/bor (interfaces: Spanner) +// Source: consensus/bor/span.go // Package bor is a generated GoMock package. package bor @@ -14,6 +14,7 @@ import ( core "github.com/ethereum/go-ethereum/core" state "github.com/ethereum/go-ethereum/core/state" types "github.com/ethereum/go-ethereum/core/types" + rpc "github.com/ethereum/go-ethereum/rpc" gomock "github.com/golang/mock/gomock" ) @@ -41,45 +42,60 @@ func (m *MockSpanner) EXPECT() *MockSpannerMockRecorder { } // CommitSpan mocks base method. -func (m *MockSpanner) CommitSpan(arg0 context.Context, arg1 span.HeimdallSpan, arg2 *state.StateDB, arg3 *types.Header, arg4 core.ChainContext) error { +func (m *MockSpanner) CommitSpan(ctx context.Context, heimdallSpan span.HeimdallSpan, state *state.StateDB, header *types.Header, chainContext core.ChainContext) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CommitSpan", arg0, arg1, arg2, arg3, arg4) + ret := m.ctrl.Call(m, "CommitSpan", ctx, heimdallSpan, state, header, chainContext) ret0, _ := ret[0].(error) return ret0 } // CommitSpan indicates an expected call of CommitSpan. -func (mr *MockSpannerMockRecorder) CommitSpan(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { +func (mr *MockSpannerMockRecorder) CommitSpan(ctx, heimdallSpan, state, header, chainContext interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CommitSpan", reflect.TypeOf((*MockSpanner)(nil).CommitSpan), arg0, arg1, arg2, arg3, arg4) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CommitSpan", reflect.TypeOf((*MockSpanner)(nil).CommitSpan), ctx, heimdallSpan, state, header, chainContext) } // GetCurrentSpan mocks base method. -func (m *MockSpanner) GetCurrentSpan(arg0 context.Context, arg1 common.Hash) (*span.Span, error) { +func (m *MockSpanner) GetCurrentSpan(ctx context.Context, headerHash common.Hash) (*span.Span, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetCurrentSpan", arg0, arg1) + ret := m.ctrl.Call(m, "GetCurrentSpan", ctx, headerHash) ret0, _ := ret[0].(*span.Span) ret1, _ := ret[1].(error) return ret0, ret1 } // GetCurrentSpan indicates an expected call of GetCurrentSpan. -func (mr *MockSpannerMockRecorder) GetCurrentSpan(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockSpannerMockRecorder) GetCurrentSpan(ctx, headerHash interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCurrentSpan", reflect.TypeOf((*MockSpanner)(nil).GetCurrentSpan), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCurrentSpan", reflect.TypeOf((*MockSpanner)(nil).GetCurrentSpan), ctx, headerHash) } -// GetCurrentValidators mocks base method. -func (m *MockSpanner) GetCurrentValidators(arg0 context.Context, arg1 common.Hash, arg2 uint64) ([]*valset.Validator, error) { +// GetCurrentValidatorsByBlockNrOrHash mocks base method. +func (m *MockSpanner) GetCurrentValidatorsByBlockNrOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash, blockNumber uint64) ([]*valset.Validator, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetCurrentValidators", arg0, arg1, arg2) + ret := m.ctrl.Call(m, "GetCurrentValidatorsByBlockNrOrHash", ctx, blockNrOrHash, blockNumber) ret0, _ := ret[0].([]*valset.Validator) ret1, _ := ret[1].(error) return ret0, ret1 } -// GetCurrentValidators indicates an expected call of GetCurrentValidators. -func (mr *MockSpannerMockRecorder) GetCurrentValidators(arg0, arg1, arg2 interface{}) *gomock.Call { +// GetCurrentValidatorsByBlockNrOrHash indicates an expected call of GetCurrentValidatorsByBlockNrOrHash. +func (mr *MockSpannerMockRecorder) GetCurrentValidatorsByBlockNrOrHash(ctx, blockNrOrHash, blockNumber interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCurrentValidators", reflect.TypeOf((*MockSpanner)(nil).GetCurrentValidators), arg0, arg1, arg2) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCurrentValidatorsByBlockNrOrHash", reflect.TypeOf((*MockSpanner)(nil).GetCurrentValidatorsByBlockNrOrHash), ctx, blockNrOrHash, blockNumber) +} + +// GetCurrentValidatorsByHash mocks base method. +func (m *MockSpanner) GetCurrentValidatorsByHash(ctx context.Context, headerHash common.Hash, blockNumber uint64) ([]*valset.Validator, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetCurrentValidatorsByHash", ctx, headerHash, blockNumber) + ret0, _ := ret[0].([]*valset.Validator) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetCurrentValidatorsByHash indicates an expected call of GetCurrentValidatorsByHash. +func (mr *MockSpannerMockRecorder) GetCurrentValidatorsByHash(ctx, headerHash, blockNumber interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCurrentValidatorsByHash", reflect.TypeOf((*MockSpanner)(nil).GetCurrentValidatorsByHash), ctx, headerHash, blockNumber) } diff --git a/consensus/misc/eip1559.go b/consensus/misc/eip1559.go index 193a5b84e2..00a8ab5b58 100644 --- a/consensus/misc/eip1559.go +++ b/consensus/misc/eip1559.go @@ -20,6 +20,8 @@ import ( "fmt" "math/big" + "github.com/holiman/uint256" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/core/types" @@ -92,3 +94,54 @@ func CalcBaseFee(config *params.ChainConfig, parent *types.Header) *big.Int { ) } } + +// CalcBaseFee calculates the basefee of the header. +func CalcBaseFeeUint(config *params.ChainConfig, parent *types.Header) *uint256.Int { + var ( + initialBaseFeeUint = uint256.NewInt(params.InitialBaseFee) + baseFeeChangeDenominatorUint64 = params.BaseFeeChangeDenominator(config.Bor, parent.Number) + baseFeeChangeDenominatorUint = uint256.NewInt(baseFeeChangeDenominatorUint64) + ) + + // If the current block is the first EIP-1559 block, return the InitialBaseFee. + if !config.IsLondon(parent.Number) { + return initialBaseFeeUint.Clone() + } + + var ( + parentGasTarget = parent.GasLimit / params.ElasticityMultiplier + parentGasTargetBig = uint256.NewInt(parentGasTarget) + ) + + // If the parent gasUsed is the same as the target, the baseFee remains unchanged. + if parent.GasUsed == parentGasTarget { + return math.FromBig(parent.BaseFee) + } + + if parent.GasUsed > parentGasTarget { + // If the parent block used more gas than its target, the baseFee should increase. + gasUsedDelta := uint256.NewInt(parent.GasUsed - parentGasTarget) + + parentBaseFee := math.FromBig(parent.BaseFee) + x := gasUsedDelta.Mul(parentBaseFee, gasUsedDelta) + y := x.Div(x, parentGasTargetBig) + baseFeeDelta := math.BigMaxUint( + x.Div(y, baseFeeChangeDenominatorUint), + math.U1, + ) + + return x.Add(parentBaseFee, baseFeeDelta) + } + + // Otherwise if the parent block used less gas than its target, the baseFee should decrease. + gasUsedDelta := uint256.NewInt(parentGasTarget - parent.GasUsed) + parentBaseFee := math.FromBig(parent.BaseFee) + x := gasUsedDelta.Mul(parentBaseFee, gasUsedDelta) + y := x.Div(x, parentGasTargetBig) + baseFeeDelta := x.Div(y, baseFeeChangeDenominatorUint) + + return math.BigMaxUint( + x.Sub(parentBaseFee, baseFeeDelta), + math.U0.Clone(), + ) +} diff --git a/core/blockchain.go b/core/blockchain.go index fed1d04268..680cb7dce6 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -18,21 +18,29 @@ package core import ( + "compress/gzip" + "context" "errors" "fmt" "io" "math/big" + "os" + "path/filepath" "sort" + "strings" "sync" "sync/atomic" "time" lru "github.com/hashicorp/golang-lru" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/mclock" "github.com/ethereum/go-ethereum/common/prque" + "github.com/ethereum/go-ethereum/common/tracing" "github.com/ethereum/go-ethereum/consensus" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/state" @@ -1349,46 +1357,89 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types. } // WriteBlockWithState writes the block and all associated state to the database. -func (bc *BlockChain) WriteBlockAndSetHead(block *types.Block, receipts []*types.Receipt, logs []*types.Log, state *state.StateDB, emitHeadEvent bool) (status WriteStatus, err error) { +func (bc *BlockChain) WriteBlockAndSetHead(ctx context.Context, block *types.Block, receipts []*types.Receipt, logs []*types.Log, state *state.StateDB, emitHeadEvent bool) (status WriteStatus, err error) { if !bc.chainmu.TryLock() { return NonStatTy, errChainStopped } defer bc.chainmu.Unlock() - return bc.writeBlockAndSetHead(block, receipts, logs, state, emitHeadEvent) + return bc.writeBlockAndSetHead(ctx, block, receipts, logs, state, emitHeadEvent) } // writeBlockAndSetHead writes the block and all associated state to the database, // and also it applies the given block as the new chain head. This function expects // the chain mutex to be held. -func (bc *BlockChain) writeBlockAndSetHead(block *types.Block, receipts []*types.Receipt, logs []*types.Log, state *state.StateDB, emitHeadEvent bool) (status WriteStatus, err error) { +func (bc *BlockChain) writeBlockAndSetHead(ctx context.Context, block *types.Block, receipts []*types.Receipt, logs []*types.Log, state *state.StateDB, emitHeadEvent bool) (status WriteStatus, err error) { + writeBlockAndSetHeadCtx, span := tracing.StartSpan(ctx, "blockchain.writeBlockAndSetHead") + defer tracing.EndSpan(span) + var stateSyncLogs []*types.Log - if stateSyncLogs, err = bc.writeBlockWithState(block, receipts, logs, state); err != nil { + tracing.Exec(writeBlockAndSetHeadCtx, "", "blockchain.writeBlockWithState", func(_ context.Context, span trace.Span) { + stateSyncLogs, err = bc.writeBlockWithState(block, receipts, logs, state) + tracing.SetAttributes( + span, + attribute.Int("number", int(block.Number().Uint64())), + attribute.Bool("error", err != nil), + ) + }) + + if err != nil { return NonStatTy, err } + currentBlock := bc.CurrentBlock() - reorg, err := bc.forker.ReorgNeeded(currentBlock.Header(), block.Header()) + + var reorg bool + + tracing.Exec(writeBlockAndSetHeadCtx, "", "blockchain.ReorgNeeded", func(_ context.Context, span trace.Span) { + reorg, err = bc.forker.ReorgNeeded(currentBlock.Header(), block.Header()) + tracing.SetAttributes( + span, + attribute.Int("number", int(block.Number().Uint64())), + attribute.Int("current block", int(currentBlock.Number().Uint64())), + attribute.Bool("reorg needed", reorg), + attribute.Bool("error", err != nil), + ) + }) if err != nil { return NonStatTy, err } - if reorg { - // Reorganise the chain if the parent is not the head block - if block.ParentHash() != currentBlock.Hash() { - if err := bc.reorg(currentBlock, block); err != nil { - return NonStatTy, err + tracing.Exec(writeBlockAndSetHeadCtx, "", "blockchain.reorg", func(_ context.Context, span trace.Span) { + if reorg { + // Reorganise the chain if the parent is not the head block + if block.ParentHash() != currentBlock.Hash() { + if err = bc.reorg(currentBlock, block); err != nil { + status = NonStatTy + } } + status = CanonStatTy + } else { + status = SideStatTy } - status = CanonStatTy - } else { - status = SideStatTy + tracing.SetAttributes( + span, + attribute.Int("number", int(block.Number().Uint64())), + attribute.Int("current block", int(currentBlock.Number().Uint64())), + attribute.Bool("reorg needed", reorg), + attribute.Bool("error", err != nil), + attribute.String("status", string(status)), + ) + }) + + if status == NonStatTy { + return } + // Set new head. if status == CanonStatTy { - bc.writeHeadBlock(block) + tracing.Exec(writeBlockAndSetHeadCtx, "", "blockchain.writeHeadBlock", func(_ context.Context, _ trace.Span) { + bc.writeHeadBlock(block) + }) } + bc.futureBlocks.Remove(block.Hash()) if status == CanonStatTy { @@ -1785,7 +1836,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals, setHead bool) // Don't set the head, only insert the block _, err = bc.writeBlockWithState(block, receipts, logs, statedb) } else { - status, err = bc.writeBlockAndSetHead(block, receipts, logs, statedb, false) + status, err = bc.writeBlockAndSetHead(context.Background(), block, receipts, logs, statedb, false) } atomic.StoreUint32(&followupInterrupt, 1) if err != nil { @@ -2194,6 +2245,35 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error { } else { // len(newChain) == 0 && len(oldChain) > 0 // rewind the canonical chain to a lower point. + + home, err := os.UserHomeDir() + if err != nil { + fmt.Println("Impossible reorg : Unable to get user home dir", "Error", err) + } + outPath := filepath.Join(home, "impossible-reorgs", fmt.Sprintf("%v-impossibleReorg", time.Now().Format(time.RFC3339))) + + if _, err := os.Stat(outPath); errors.Is(err, os.ErrNotExist) { + err := os.MkdirAll(outPath, os.ModePerm) + if err != nil { + log.Error("Impossible reorg : Unable to create Dir", "Error", err) + } + } else { + err = ExportBlocks(oldChain, filepath.Join(outPath, "oldChain.gz")) + if err != nil { + log.Error("Impossible reorg : Unable to export oldChain", "Error", err) + } + + err = ExportBlocks([]*types.Block{oldBlock}, filepath.Join(outPath, "oldBlock.gz")) + if err != nil { + log.Error("Impossible reorg : Unable to export oldBlock", "Error", err) + } + + err = ExportBlocks([]*types.Block{newBlock}, filepath.Join(outPath, "newBlock.gz")) + if err != nil { + log.Error("Impossible reorg : Unable to export newBlock", "Error", err) + } + } + log.Error("Impossible reorg, please file an issue", "oldnum", oldBlock.Number(), "oldhash", oldBlock.Hash(), "oldblocks", len(oldChain), "newnum", newBlock.Number(), "newhash", newBlock.Hash(), "newblocks", len(newChain)) } // Insert the new chain(except the head block(reverse order)), @@ -2246,6 +2326,44 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error { return nil } +// ExportBlocks exports blocks into the specified file, truncating any data +// already present in the file. +func ExportBlocks(blocks []*types.Block, fn string) error { + log.Info("Exporting blockchain", "file", fn) + + // Open the file handle and potentially wrap with a gzip stream + fh, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm) + if err != nil { + return err + } + defer fh.Close() + + var writer io.Writer = fh + if strings.HasSuffix(fn, ".gz") { + writer = gzip.NewWriter(writer) + defer writer.(*gzip.Writer).Close() + } + // Iterate over the blocks and export them + if err := ExportN(writer, blocks); err != nil { + return err + } + + log.Info("Exported blocks", "file", fn) + + return nil +} + +// ExportBlock writes a block to the given writer. +func ExportN(w io.Writer, blocks []*types.Block) error { + for _, block := range blocks { + if err := block.EncodeRLP(w); err != nil { + return err + } + } + + return nil +} + // InsertBlockWithoutSetHead executes the block, runs the necessary verification // upon it and then persist the block and the associate state into the database. // The key difference between the InsertChain is it won't do the canonical chain diff --git a/core/tests/blockchain_repair_test.go b/core/tests/blockchain_repair_test.go index 9b166b7165..d18418727b 100644 --- a/core/tests/blockchain_repair_test.go +++ b/core/tests/blockchain_repair_test.go @@ -1796,7 +1796,7 @@ func testRepair(t *testing.T, tt *rewindTest, snapshots bool) { ethAPIMock.EXPECT().Call(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() spanner := bor.NewMockSpanner(ctrl) - spanner.EXPECT().GetCurrentValidators(gomock.Any(), gomock.Any(), gomock.Any()).Return([]*valset.Validator{ + spanner.EXPECT().GetCurrentValidatorsByHash(gomock.Any(), gomock.Any(), gomock.Any()).Return([]*valset.Validator{ { ID: 0, Address: miner.TestBankAddress, diff --git a/core/tx_journal.go b/core/tx_journal.go index d282126a08..980bdb9864 100644 --- a/core/tx_journal.go +++ b/core/tx_journal.go @@ -61,11 +61,13 @@ func (journal *txJournal) load(add func([]*types.Transaction) []error) error { if _, err := os.Stat(journal.path); os.IsNotExist(err) { return nil } + // Open the journal for loading any past transactions input, err := os.Open(journal.path) if err != nil { return err } + defer input.Close() // Temporarily discard any journal additions (don't double add on load) @@ -80,29 +82,35 @@ func (journal *txJournal) load(add func([]*types.Transaction) []error) error { // appropriate progress counters. Then use this method to load all the // journaled transactions in small-ish batches. loadBatch := func(txs types.Transactions) { + errs := add(txs) + + dropped = len(errs) + for _, err := range add(txs) { - if err != nil { - log.Debug("Failed to add journaled transaction", "err", err) - dropped++ - } + log.Debug("Failed to add journaled transaction", "err", err) } } var ( failure error batch types.Transactions ) + for { // Parse the next transaction and terminate on error tx := new(types.Transaction) + if err = stream.Decode(tx); err != nil { if err != io.EOF { failure = err } + if batch.Len() > 0 { loadBatch(batch) } + break } + // New transaction parsed, queue up for later, import if threshold is reached total++ @@ -111,6 +119,7 @@ func (journal *txJournal) load(add func([]*types.Transaction) []error) error { batch = batch[:0] } } + log.Info("Loaded local transaction journal", "transactions", total, "dropped", dropped) return failure diff --git a/core/tx_list.go b/core/tx_list.go index f141a03bbd..851f732905 100644 --- a/core/tx_list.go +++ b/core/tx_list.go @@ -25,8 +25,12 @@ import ( "sync/atomic" "time" + "github.com/holiman/uint256" + "github.com/ethereum/go-ethereum/common" + cmath "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/log" ) // nonceHeap is a heap.Interface implementation over 64bit unsigned integers for @@ -54,36 +58,67 @@ func (h *nonceHeap) Pop() interface{} { type txSortedMap struct { items map[uint64]*types.Transaction // Hash map storing the transaction data index *nonceHeap // Heap of nonces of all the stored transactions (non-strict mode) - cache types.Transactions // Cache of the transactions already sorted + m sync.RWMutex + + cache types.Transactions // Cache of the transactions already sorted + isEmpty bool + cacheMu sync.RWMutex } // newTxSortedMap creates a new nonce-sorted transaction map. func newTxSortedMap() *txSortedMap { return &txSortedMap{ - items: make(map[uint64]*types.Transaction), - index: new(nonceHeap), + items: make(map[uint64]*types.Transaction), + index: new(nonceHeap), + isEmpty: true, } } // Get retrieves the current transactions associated with the given nonce. func (m *txSortedMap) Get(nonce uint64) *types.Transaction { + m.m.RLock() + defer m.m.RUnlock() + return m.items[nonce] } +func (m *txSortedMap) Has(nonce uint64) bool { + if m == nil { + return false + } + + m.m.RLock() + defer m.m.RUnlock() + + return m.items[nonce] != nil +} + // Put inserts a new transaction into the map, also updating the map's nonce // index. If a transaction already exists with the same nonce, it's overwritten. func (m *txSortedMap) Put(tx *types.Transaction) { + m.m.Lock() + defer m.m.Unlock() + nonce := tx.Nonce() if m.items[nonce] == nil { heap.Push(m.index, nonce) } - m.items[nonce], m.cache = tx, nil + + m.items[nonce] = tx + + m.cacheMu.Lock() + m.isEmpty = true + m.cache = nil + m.cacheMu.Unlock() } // Forward removes all transactions from the map with a nonce lower than the // provided threshold. Every removed transaction is returned for any post-removal // maintenance. func (m *txSortedMap) Forward(threshold uint64) types.Transactions { + m.m.Lock() + defer m.m.Unlock() + var removed types.Transactions // Pop off heap items until the threshold is reached @@ -92,10 +127,15 @@ func (m *txSortedMap) Forward(threshold uint64) types.Transactions { removed = append(removed, m.items[nonce]) delete(m.items, nonce) } + // If we had a cached order, shift the front + m.cacheMu.Lock() if m.cache != nil { + hitCacheCounter.Inc(1) m.cache = m.cache[len(removed):] } + m.cacheMu.Unlock() + return removed } @@ -105,21 +145,51 @@ func (m *txSortedMap) Forward(threshold uint64) types.Transactions { // If you want to do several consecutive filterings, it's therefore better to first // do a .filter(func1) followed by .Filter(func2) or reheap() func (m *txSortedMap) Filter(filter func(*types.Transaction) bool) types.Transactions { + m.m.Lock() + defer m.m.Unlock() + removed := m.filter(filter) // If transactions were removed, the heap and cache are ruined if len(removed) > 0 { - m.reheap() + m.reheap(false) } return removed } -func (m *txSortedMap) reheap() { - *m.index = make([]uint64, 0, len(m.items)) +func (m *txSortedMap) reheap(withRlock bool) { + index := make(nonceHeap, 0, len(m.items)) + + if withRlock { + m.m.RLock() + log.Info("[DEBUG] Acquired lock over txpool map while performing reheap") + } + for nonce := range m.items { - *m.index = append(*m.index, nonce) + index = append(index, nonce) } - heap.Init(m.index) + + if withRlock { + m.m.RUnlock() + } + + heap.Init(&index) + + if withRlock { + m.m.Lock() + } + + m.index = &index + + if withRlock { + m.m.Unlock() + } + + m.cacheMu.Lock() m.cache = nil + m.isEmpty = true + m.cacheMu.Unlock() + + resetCacheGauge.Inc(1) } // filter is identical to Filter, but **does not** regenerate the heap. This method @@ -135,7 +205,12 @@ func (m *txSortedMap) filter(filter func(*types.Transaction) bool) types.Transac } } if len(removed) > 0 { + m.cacheMu.Lock() m.cache = nil + m.isEmpty = true + m.cacheMu.Unlock() + + resetCacheGauge.Inc(1) } return removed } @@ -143,45 +218,66 @@ func (m *txSortedMap) filter(filter func(*types.Transaction) bool) types.Transac // Cap places a hard limit on the number of items, returning all transactions // exceeding that limit. func (m *txSortedMap) Cap(threshold int) types.Transactions { + m.m.Lock() + defer m.m.Unlock() + // Short circuit if the number of items is under the limit if len(m.items) <= threshold { return nil } + // Otherwise gather and drop the highest nonce'd transactions var drops types.Transactions sort.Sort(*m.index) + for size := len(m.items); size > threshold; size-- { drops = append(drops, m.items[(*m.index)[size-1]]) delete(m.items, (*m.index)[size-1]) } + *m.index = (*m.index)[:threshold] heap.Init(m.index) // If we had a cache, shift the back + m.cacheMu.Lock() if m.cache != nil { m.cache = m.cache[:len(m.cache)-len(drops)] } + m.cacheMu.Unlock() + return drops } // Remove deletes a transaction from the maintained map, returning whether the // transaction was found. func (m *txSortedMap) Remove(nonce uint64) bool { + m.m.Lock() + defer m.m.Unlock() + // Short circuit if no transaction is present _, ok := m.items[nonce] if !ok { return false } + // Otherwise delete the transaction and fix the heap index for i := 0; i < m.index.Len(); i++ { if (*m.index)[i] == nonce { heap.Remove(m.index, i) + break } } + delete(m.items, nonce) + + m.cacheMu.Lock() m.cache = nil + m.isEmpty = true + m.cacheMu.Unlock() + + resetCacheGauge.Inc(1) return true } @@ -194,55 +290,129 @@ func (m *txSortedMap) Remove(nonce uint64) bool { // prevent getting into and invalid state. This is not something that should ever // happen but better to be self correcting than failing! func (m *txSortedMap) Ready(start uint64) types.Transactions { + m.m.Lock() + defer m.m.Unlock() + // Short circuit if no transactions are available if m.index.Len() == 0 || (*m.index)[0] > start { return nil } + // Otherwise start accumulating incremental transactions var ready types.Transactions + for next := (*m.index)[0]; m.index.Len() > 0 && (*m.index)[0] == next; next++ { ready = append(ready, m.items[next]) delete(m.items, next) heap.Pop(m.index) } + + m.cacheMu.Lock() m.cache = nil + m.isEmpty = true + m.cacheMu.Unlock() + + resetCacheGauge.Inc(1) return ready } // Len returns the length of the transaction map. func (m *txSortedMap) Len() int { + m.m.RLock() + defer m.m.RUnlock() + return len(m.items) } func (m *txSortedMap) flatten() types.Transactions { // If the sorting was not cached yet, create and cache it - if m.cache == nil { - m.cache = make(types.Transactions, 0, len(m.items)) + m.cacheMu.Lock() + defer m.cacheMu.Unlock() + + if m.isEmpty { + m.isEmpty = false // to simulate sync.Once + + m.cacheMu.Unlock() + + m.m.RLock() + + cache := make(types.Transactions, 0, len(m.items)) + for _, tx := range m.items { - m.cache = append(m.cache, tx) + cache = append(cache, tx) } - sort.Sort(types.TxByNonce(m.cache)) + + m.m.RUnlock() + + // exclude sorting from locks + sort.Sort(types.TxByNonce(cache)) + + m.cacheMu.Lock() + m.cache = cache + + reinitCacheGauge.Inc(1) + missCacheCounter.Inc(1) + } else { + hitCacheCounter.Inc(1) } + return m.cache } +func (m *txSortedMap) lastElement() *types.Transaction { + // If the sorting was not cached yet, create and cache it + m.cacheMu.Lock() + defer m.cacheMu.Unlock() + + cache := m.cache + + if m.isEmpty { + m.isEmpty = false // to simulate sync.Once + + m.cacheMu.Unlock() + + m.m.RLock() + cache = make(types.Transactions, 0, len(m.items)) + + for _, tx := range m.items { + cache = append(cache, tx) + } + + m.m.RUnlock() + + // exclude sorting from locks + sort.Sort(types.TxByNonce(cache)) + + m.cacheMu.Lock() + m.cache = cache + + reinitCacheGauge.Inc(1) + missCacheCounter.Inc(1) + } else { + hitCacheCounter.Inc(1) + } + + ln := len(cache) + if ln == 0 { + return nil + } + + return cache[len(cache)-1] +} + // Flatten creates a nonce-sorted slice of transactions based on the loosely // sorted internal representation. The result of the sorting is cached in case // it's requested again before any modifications are made to the contents. func (m *txSortedMap) Flatten() types.Transactions { // Copy the cache to prevent accidental modifications - cache := m.flatten() - txs := make(types.Transactions, len(cache)) - copy(txs, cache) - return txs + return m.flatten() } // LastElement returns the last element of a flattened list, thus, the // transaction with the highest nonce func (m *txSortedMap) LastElement() *types.Transaction { - cache := m.flatten() - return cache[len(cache)-1] + return m.lastElement() } // txList is a "list" of transactions belonging to an account, sorted by account @@ -253,17 +423,18 @@ type txList struct { strict bool // Whether nonces are strictly continuous or not txs *txSortedMap // Heap indexed sorted hash map of the transactions - costcap *big.Int // Price of the highest costing transaction (reset only if exceeds balance) - gascap uint64 // Gas limit of the highest spending transaction (reset only if exceeds block limit) + costcap *uint256.Int // Price of the highest costing transaction (reset only if exceeds balance) + gascap uint64 // Gas limit of the highest spending transaction (reset only if exceeds block limit) + totalcost *big.Int // Total cost of all transactions in the list } // newTxList create a new transaction list for maintaining nonce-indexable fast, // gapped, sortable transaction lists. func newTxList(strict bool) *txList { return &txList{ - strict: strict, - txs: newTxSortedMap(), - costcap: new(big.Int), + strict: strict, + txs: newTxSortedMap(), + totalcost: new(big.Int), } } @@ -285,31 +456,41 @@ func (l *txList) Add(tx *types.Transaction, priceBump uint64) (bool, *types.Tran if old.GasFeeCapCmp(tx) >= 0 || old.GasTipCapCmp(tx) >= 0 { return false, nil } + // thresholdFeeCap = oldFC * (100 + priceBump) / 100 - a := big.NewInt(100 + int64(priceBump)) - aFeeCap := new(big.Int).Mul(a, old.GasFeeCap()) - aTip := a.Mul(a, old.GasTipCap()) + a := uint256.NewInt(100 + priceBump) + aFeeCap := uint256.NewInt(0).Mul(a, old.GasFeeCapUint()) + aTip := a.Mul(a, old.GasTipCapUint()) // thresholdTip = oldTip * (100 + priceBump) / 100 - b := big.NewInt(100) + b := cmath.U100 thresholdFeeCap := aFeeCap.Div(aFeeCap, b) thresholdTip := aTip.Div(aTip, b) // We have to ensure that both the new fee cap and tip are higher than the // old ones as well as checking the percentage threshold to ensure that // this is accurate for low (Wei-level) gas price replacements. - if tx.GasFeeCapIntCmp(thresholdFeeCap) < 0 || tx.GasTipCapIntCmp(thresholdTip) < 0 { + if tx.GasFeeCapUIntLt(thresholdFeeCap) || tx.GasTipCapUIntLt(thresholdTip) { return false, nil } + // Old is being replaced, subtract old cost + l.subTotalCost([]*types.Transaction{old}) } + + // Add new tx cost to totalcost + l.totalcost.Add(l.totalcost, tx.Cost()) + // Otherwise overwrite the old transaction with the current one l.txs.Put(tx) - if cost := tx.Cost(); l.costcap.Cmp(cost) < 0 { + + if cost := tx.CostUint(); l.costcap == nil || l.costcap.Lt(cost) { l.costcap = cost } + if gas := tx.Gas(); l.gascap < gas { l.gascap = gas } + return true, old } @@ -317,7 +498,10 @@ func (l *txList) Add(tx *types.Transaction, priceBump uint64) (bool, *types.Tran // provided threshold. Every removed transaction is returned for any post-removal // maintenance. func (l *txList) Forward(threshold uint64) types.Transactions { - return l.txs.Forward(threshold) + txs := l.txs.Forward(threshold) + l.subTotalCost(txs) + + return txs } // Filter removes all transactions from the list with a cost or gas limit higher @@ -329,17 +513,20 @@ func (l *txList) Forward(threshold uint64) types.Transactions { // a point in calculating all the costs or if the balance covers all. If the threshold // is lower than the costgas cap, the caps will be reset to a new high after removing // the newly invalidated transactions. -func (l *txList) Filter(costLimit *big.Int, gasLimit uint64) (types.Transactions, types.Transactions) { +func (l *txList) Filter(costLimit *uint256.Int, gasLimit uint64) (types.Transactions, types.Transactions) { // If all transactions are below the threshold, short circuit - if l.costcap.Cmp(costLimit) <= 0 && l.gascap <= gasLimit { + if cmath.U256LTE(l.costcap, costLimit) && l.gascap <= gasLimit { return nil, nil } - l.costcap = new(big.Int).Set(costLimit) // Lower the caps to the thresholds + + l.costcap = costLimit.Clone() // Lower the caps to the thresholds l.gascap = gasLimit // Filter out all the transactions above the account's funds + cost := uint256.NewInt(0) removed := l.txs.Filter(func(tx *types.Transaction) bool { - return tx.Gas() > gasLimit || tx.Cost().Cmp(costLimit) > 0 + cost.SetFromBig(tx.Cost()) + return tx.Gas() > gasLimit || cost.Gt(costLimit) }) if len(removed) == 0 { @@ -354,16 +541,27 @@ func (l *txList) Filter(costLimit *big.Int, gasLimit uint64) (types.Transactions lowest = nonce } } + + l.txs.m.Lock() invalids = l.txs.filter(func(tx *types.Transaction) bool { return tx.Nonce() > lowest }) + l.txs.m.Unlock() } - l.txs.reheap() + // Reset total cost + l.subTotalCost(removed) + l.subTotalCost(invalids) + + l.txs.reheap(true) + return removed, invalids } // Cap places a hard limit on the number of items, returning all transactions // exceeding that limit. func (l *txList) Cap(threshold int) types.Transactions { - return l.txs.Cap(threshold) + txs := l.txs.Cap(threshold) + l.subTotalCost(txs) + + return txs } // Remove deletes a transaction from the maintained list, returning whether the @@ -375,9 +573,14 @@ func (l *txList) Remove(tx *types.Transaction) (bool, types.Transactions) { if removed := l.txs.Remove(nonce); !removed { return false, nil } + + l.subTotalCost([]*types.Transaction{tx}) // In strict mode, filter out non-executable transactions if l.strict { - return true, l.txs.Filter(func(tx *types.Transaction) bool { return tx.Nonce() > nonce }) + txs := l.txs.Filter(func(tx *types.Transaction) bool { return tx.Nonce() > nonce }) + l.subTotalCost(txs) + + return true, txs } return true, nil } @@ -390,7 +593,10 @@ func (l *txList) Remove(tx *types.Transaction) (bool, types.Transactions) { // prevent getting into and invalid state. This is not something that should ever // happen but better to be self correcting than failing! func (l *txList) Ready(start uint64) types.Transactions { - return l.txs.Ready(start) + txs := l.txs.Ready(start) + l.subTotalCost(txs) + + return txs } // Len returns the length of the transaction list. @@ -416,13 +622,26 @@ func (l *txList) LastElement() *types.Transaction { return l.txs.LastElement() } +func (l *txList) Has(nonce uint64) bool { + return l != nil && l.txs.items[nonce] != nil +} + +// subTotalCost subtracts the cost of the given transactions from the +// total cost of all transactions. +func (l *txList) subTotalCost(txs []*types.Transaction) { + for _, tx := range txs { + l.totalcost.Sub(l.totalcost, tx.Cost()) + } +} + // priceHeap is a heap.Interface implementation over transactions for retrieving // price-sorted transactions to discard when the pool fills up. If baseFee is set // then the heap is sorted based on the effective tip based on the given base fee. // If baseFee is nil then the sorting is based on gasFeeCap. type priceHeap struct { - baseFee *big.Int // heap should always be re-sorted after baseFee is changed - list []*types.Transaction + baseFee *uint256.Int // heap should always be re-sorted after baseFee is changed + list []*types.Transaction + baseFeeMu sync.RWMutex } func (h *priceHeap) Len() int { return len(h.list) } @@ -440,16 +659,24 @@ func (h *priceHeap) Less(i, j int) bool { } func (h *priceHeap) cmp(a, b *types.Transaction) int { + h.baseFeeMu.RLock() + if h.baseFee != nil { // Compare effective tips if baseFee is specified - if c := a.EffectiveGasTipCmp(b, h.baseFee); c != 0 { + if c := a.EffectiveGasTipTxUintCmp(b, h.baseFee); c != 0 { + h.baseFeeMu.RUnlock() + return c } } + + h.baseFeeMu.RUnlock() + // Compare fee caps if baseFee is not specified or effective tips are equal if c := a.GasFeeCapCmp(b); c != 0 { return c } + // Compare tips if effective tips and fee caps are equal return a.GasTipCapCmp(b) } @@ -629,7 +856,10 @@ func (l *txPricedList) Reheap() { // SetBaseFee updates the base fee and triggers a re-heap. Note that Removed is not // necessary to call right before SetBaseFee when processing a new block. -func (l *txPricedList) SetBaseFee(baseFee *big.Int) { +func (l *txPricedList) SetBaseFee(baseFee *uint256.Int) { + l.urgent.baseFeeMu.Lock() l.urgent.baseFee = baseFee + l.urgent.baseFeeMu.Unlock() + l.Reheap() } diff --git a/core/tx_list_test.go b/core/tx_list_test.go index ef49cae1dd..80b8c1ef32 100644 --- a/core/tx_list_test.go +++ b/core/tx_list_test.go @@ -17,10 +17,11 @@ package core import ( - "math/big" "math/rand" "testing" + "github.com/holiman/uint256" + "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" ) @@ -59,11 +60,15 @@ func BenchmarkTxListAdd(b *testing.B) { for i := 0; i < len(txs); i++ { txs[i] = transaction(uint64(i), 0, key) } + // Insert the transactions in a random order - priceLimit := big.NewInt(int64(DefaultTxPoolConfig.PriceLimit)) + priceLimit := uint256.NewInt(DefaultTxPoolConfig.PriceLimit) b.ResetTimer() + b.ReportAllocs() + for i := 0; i < b.N; i++ { list := newTxList(true) + for _, v := range rand.Perm(len(txs)) { list.Add(txs[v], DefaultTxPoolConfig.PriceBump) list.Filter(priceLimit, DefaultTxPoolConfig.PriceBump) diff --git a/core/tx_pool.go b/core/tx_pool.go index 3d3f01eecb..ce73aa26ac 100644 --- a/core/tx_pool.go +++ b/core/tx_pool.go @@ -17,6 +17,8 @@ package core import ( + "container/heap" + "context" "errors" "fmt" "math" @@ -26,8 +28,12 @@ import ( "sync/atomic" "time" + "github.com/holiman/uint256" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" + "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/prque" + "github.com/ethereum/go-ethereum/common/tracing" "github.com/ethereum/go-ethereum/consensus/misc" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" @@ -86,6 +92,14 @@ var ( // than some meaningful limit a user might use. This is not a consensus error // making the transaction invalid, rather a DOS protection. ErrOversizedData = errors.New("oversized data") + + // ErrFutureReplacePending is returned if a future transaction replaces a pending + // transaction. Future transactions should only be able to replace other future transactions. + ErrFutureReplacePending = errors.New("future transaction tries to replace pending") + + // ErrOverdraft is returned if a transaction would cause the senders balance to go negative + // thus invalidating a potential large number of transactions. + ErrOverdraft = errors.New("transaction would cause overdraft") ) var ( @@ -127,6 +141,11 @@ var ( localGauge = metrics.NewRegisteredGauge("txpool/local", nil) slotsGauge = metrics.NewRegisteredGauge("txpool/slots", nil) + resetCacheGauge = metrics.NewRegisteredGauge("txpool/resetcache", nil) + reinitCacheGauge = metrics.NewRegisteredGauge("txpool/reinittcache", nil) + hitCacheCounter = metrics.NewRegisteredCounter("txpool/cachehit", nil) + missCacheCounter = metrics.NewRegisteredCounter("txpool/cachemiss", nil) + reheapTimer = metrics.NewRegisteredTimer("txpool/reheap", nil) ) @@ -232,14 +251,17 @@ func (config *TxPoolConfig) sanitize() TxPoolConfig { // current state) and future transactions. Transactions move between those // two states over time as they are received and processed. type TxPool struct { - config TxPoolConfig - chainconfig *params.ChainConfig - chain blockChain - gasPrice *big.Int - txFeed event.Feed - scope event.SubscriptionScope - signer types.Signer - mu sync.RWMutex + config TxPoolConfig + chainconfig *params.ChainConfig + chain blockChain + gasPrice *big.Int + gasPriceUint *uint256.Int + gasPriceMu sync.RWMutex + + txFeed event.Feed + scope event.SubscriptionScope + signer types.Signer + mu sync.RWMutex istanbul bool // Fork indicator whether we are in the istanbul stage. eip2718 bool // Fork indicator whether we are using EIP-2718 type transactions. @@ -252,11 +274,13 @@ type TxPool struct { locals *accountSet // Set of local transaction to exempt from eviction rules journal *txJournal // Journal of local transaction to back up to disk - pending map[common.Address]*txList // All currently processable transactions - queue map[common.Address]*txList // Queued but non-processable transactions - beats map[common.Address]time.Time // Last heartbeat from each known account - all *txLookup // All transactions to allow lookups - priced *txPricedList // All transactions sorted by price + pending map[common.Address]*txList // All currently processable transactions + pendingCount int + pendingMu sync.RWMutex + queue map[common.Address]*txList // Queued but non-processable transactions + beats map[common.Address]time.Time // Last heartbeat from each known account + all *txLookup // All transactions to allow lookups + priced *txPricedList // All transactions sorted by price chainHeadCh chan ChainHeadEvent chainHeadSub event.Subscription @@ -301,6 +325,7 @@ func NewTxPool(config TxPoolConfig, chainconfig *params.ChainConfig, chain block reorgShutdownCh: make(chan struct{}), initDoneCh: make(chan struct{}), gasPrice: new(big.Int).SetUint64(config.PriceLimit), + gasPriceUint: uint256.NewInt(config.PriceLimit), } pool.locals = newAccountSet(pool.signer) @@ -377,9 +402,7 @@ func (pool *TxPool) loop() { // Handle stats reporting ticks case <-report.C: - pool.mu.RLock() pending, queued := pool.stats() - pool.mu.RUnlock() stales := int(atomic.LoadInt64(&pool.priced.stales)) if pending != prevPending || queued != prevQueued || stales != prevStales { @@ -389,22 +412,45 @@ func (pool *TxPool) loop() { // Handle inactive account transaction eviction case <-evict.C: - pool.mu.Lock() + now := time.Now() + + var ( + list types.Transactions + tx *types.Transaction + toRemove []common.Hash + ) + + pool.mu.RLock() for addr := range pool.queue { // Skip local transactions from the eviction mechanism if pool.locals.contains(addr) { continue } + // Any non-locals old enough should be removed - if time.Since(pool.beats[addr]) > pool.config.Lifetime { - list := pool.queue[addr].Flatten() - for _, tx := range list { - pool.removeTx(tx.Hash(), true) + if now.Sub(pool.beats[addr]) > pool.config.Lifetime { + list = pool.queue[addr].Flatten() + for _, tx = range list { + toRemove = append(toRemove, tx.Hash()) } + queuedEvictionMeter.Mark(int64(len(list))) } } - pool.mu.Unlock() + + pool.mu.RUnlock() + + if len(toRemove) > 0 { + pool.mu.Lock() + + var hash common.Hash + + for _, hash = range toRemove { + pool.removeTx(hash, true) + } + + pool.mu.Unlock() + } // Handle local transaction journal rotation case <-journal.C: @@ -442,27 +488,45 @@ func (pool *TxPool) SubscribeNewTxsEvent(ch chan<- NewTxsEvent) event.Subscripti // GasPrice returns the current gas price enforced by the transaction pool. func (pool *TxPool) GasPrice() *big.Int { - pool.mu.RLock() - defer pool.mu.RUnlock() + pool.gasPriceMu.RLock() + defer pool.gasPriceMu.RUnlock() return new(big.Int).Set(pool.gasPrice) } +func (pool *TxPool) GasPriceUint256() *uint256.Int { + pool.gasPriceMu.RLock() + defer pool.gasPriceMu.RUnlock() + + return pool.gasPriceUint.Clone() +} + // SetGasPrice updates the minimum price required by the transaction pool for a // new transaction, and drops all transactions below this threshold. func (pool *TxPool) SetGasPrice(price *big.Int) { - pool.mu.Lock() - defer pool.mu.Unlock() + pool.gasPriceMu.Lock() + defer pool.gasPriceMu.Unlock() old := pool.gasPrice pool.gasPrice = price + + if pool.gasPriceUint == nil { + pool.gasPriceUint, _ = uint256.FromBig(price) + } else { + pool.gasPriceUint.SetFromBig(price) + } + // if the min miner fee increased, remove transactions below the new threshold if price.Cmp(old) > 0 { + pool.mu.Lock() + defer pool.mu.Unlock() + // pool.priced is sorted by GasFeeCap, so we have to iterate through pool.all instead drop := pool.all.RemotesBelowTip(price) for _, tx := range drop { pool.removeTx(tx.Hash(), false) } + pool.priced.Removed(len(drop)) } @@ -481,9 +545,6 @@ func (pool *TxPool) Nonce(addr common.Address) uint64 { // Stats retrieves the current pool stats, namely the number of pending and the // number of queued (non-executable) transactions. func (pool *TxPool) Stats() (int, int) { - pool.mu.RLock() - defer pool.mu.RUnlock() - return pool.stats() } @@ -491,47 +552,69 @@ func (pool *TxPool) Stats() (int, int) { // number of queued (non-executable) transactions. func (pool *TxPool) stats() (int, int) { pending := 0 + + pool.pendingMu.RLock() for _, list := range pool.pending { pending += list.Len() } + pool.pendingMu.RUnlock() + + pool.mu.RLock() + queued := 0 for _, list := range pool.queue { queued += list.Len() } + + pool.mu.RUnlock() + return pending, queued } // Content retrieves the data content of the transaction pool, returning all the // pending as well as queued transactions, grouped by account and sorted by nonce. func (pool *TxPool) Content() (map[common.Address]types.Transactions, map[common.Address]types.Transactions) { - pool.mu.Lock() - defer pool.mu.Unlock() - pending := make(map[common.Address]types.Transactions) + + pool.pendingMu.RLock() for addr, list := range pool.pending { pending[addr] = list.Flatten() } + pool.pendingMu.RUnlock() + queued := make(map[common.Address]types.Transactions) + + pool.mu.RLock() + for addr, list := range pool.queue { queued[addr] = list.Flatten() } + + pool.mu.RUnlock() + return pending, queued } // ContentFrom retrieves the data content of the transaction pool, returning the // pending as well as queued transactions of this address, grouped by nonce. func (pool *TxPool) ContentFrom(addr common.Address) (types.Transactions, types.Transactions) { - pool.mu.RLock() - defer pool.mu.RUnlock() - var pending types.Transactions + + pool.pendingMu.RLock() if list, ok := pool.pending[addr]; ok { pending = list.Flatten() } + pool.pendingMu.RUnlock() + + pool.mu.RLock() + var queued types.Transactions if list, ok := pool.queue[addr]; ok { queued = list.Flatten() } + + pool.mu.RUnlock() + return pending, queued } @@ -542,35 +625,74 @@ func (pool *TxPool) ContentFrom(addr common.Address) (types.Transactions, types. // The enforceTips parameter can be used to do an extra filtering on the pending // transactions and only return those whose **effective** tip is large enough in // the next pending execution environment. -func (pool *TxPool) Pending(enforceTips bool) map[common.Address]types.Transactions { - pool.mu.Lock() - defer pool.mu.Unlock() +// +//nolint:gocognit +func (pool *TxPool) Pending(ctx context.Context, enforceTips bool) map[common.Address]types.Transactions { + pending := make(map[common.Address]types.Transactions, 10) - pending := make(map[common.Address]types.Transactions) - for addr, list := range pool.pending { - txs := list.Flatten() + tracing.Exec(ctx, "TxpoolPending", "txpool.Pending()", func(ctx context.Context, span trace.Span) { + tracing.ElapsedTime(ctx, span, "txpool.Pending.RLock()", func(ctx context.Context, s trace.Span) { + pool.pendingMu.RLock() + }) - // If the miner requests tip enforcement, cap the lists now - if enforceTips && !pool.locals.contains(addr) { - for i, tx := range txs { - if tx.EffectiveGasTipIntCmp(pool.gasPrice, pool.priced.urgent.baseFee) < 0 { - txs = txs[:i] - break + defer pool.pendingMu.RUnlock() + + pendingAccounts := len(pool.pending) + + var pendingTxs int + + tracing.ElapsedTime(ctx, span, "Loop", func(ctx context.Context, s trace.Span) { + gasPriceUint := uint256.NewInt(0) + baseFee := uint256.NewInt(0) + + for addr, list := range pool.pending { + txs := list.Flatten() + + // If the miner requests tip enforcement, cap the lists now + if enforceTips && !pool.locals.contains(addr) { + for i, tx := range txs { + pool.pendingMu.RUnlock() + + pool.gasPriceMu.RLock() + if pool.gasPriceUint != nil { + gasPriceUint.Set(pool.gasPriceUint) + } + + pool.priced.urgent.baseFeeMu.Lock() + if pool.priced.urgent.baseFee != nil { + baseFee.Set(pool.priced.urgent.baseFee) + } + pool.priced.urgent.baseFeeMu.Unlock() + + pool.gasPriceMu.RUnlock() + + pool.pendingMu.RLock() + + if tx.EffectiveGasTipUintLt(gasPriceUint, baseFee) { + txs = txs[:i] + break + } + } + } + + if len(txs) > 0 { + pending[addr] = txs + pendingTxs += len(txs) } } - } - if len(txs) > 0 { - pending[addr] = txs - } - } + + tracing.SetAttributes(span, + attribute.Int("pending-transactions", pendingTxs), + attribute.Int("pending-accounts", pendingAccounts), + ) + }) + }) + return pending } // Locals retrieves the accounts currently considered local by the pool. func (pool *TxPool) Locals() []common.Address { - pool.mu.Lock() - defer pool.mu.Unlock() - return pool.locals.flatten() } @@ -579,14 +701,22 @@ func (pool *TxPool) Locals() []common.Address { // freely modified by calling code. func (pool *TxPool) local() map[common.Address]types.Transactions { txs := make(map[common.Address]types.Transactions) + + pool.locals.m.RLock() + defer pool.locals.m.RUnlock() + for addr := range pool.locals.accounts { + pool.pendingMu.RLock() if pending := pool.pending[addr]; pending != nil { txs[addr] = append(txs[addr], pending.Flatten()...) } + pool.pendingMu.RUnlock() + if queued := pool.queue[addr]; queued != nil { txs[addr] = append(txs[addr], queued.Flatten()...) } } + return txs } @@ -597,10 +727,12 @@ func (pool *TxPool) validateTx(tx *types.Transaction, local bool) error { if !pool.eip2718 && tx.Type() != types.LegacyTxType { return ErrTxTypeNotSupported } + // Reject dynamic fee transactions until EIP-1559 activates. if !pool.eip1559 && tx.Type() == types.DynamicFeeTxType { return ErrTxTypeNotSupported } + // Reject transactions over defined size to prevent DOS attacks if uint64(tx.Size()) > txMaxSize { return ErrOversizedData @@ -615,47 +747,82 @@ func (pool *TxPool) validateTx(tx *types.Transaction, local bool) error { if tx.Value().Sign() < 0 { return ErrNegativeValue } + // Ensure the transaction doesn't exceed the current block limit gas. if pool.currentMaxGas < tx.Gas() { return ErrGasLimit } + // Sanity check for extremely large numbers - if tx.GasFeeCap().BitLen() > 256 { + gasFeeCap := tx.GasFeeCapRef() + if gasFeeCap.BitLen() > 256 { return ErrFeeCapVeryHigh } - if tx.GasTipCap().BitLen() > 256 { + + // do NOT use uint256 here. results vs *big.Int are different + gasTipCap := tx.GasTipCapRef() + if gasTipCap.BitLen() > 256 { return ErrTipVeryHigh } + // Ensure gasFeeCap is greater than or equal to gasTipCap. - if tx.GasFeeCapIntCmp(tx.GasTipCap()) < 0 { + gasTipCapU, _ := uint256.FromBig(gasTipCap) + if tx.GasFeeCapUIntLt(gasTipCapU) { return ErrTipAboveFeeCap } + // Make sure the transaction is signed properly. from, err := types.Sender(pool.signer, tx) if err != nil { return ErrInvalidSender } + // Drop non-local transactions under our own minimal accepted gas price or tip - if !local && tx.GasTipCapIntCmp(pool.gasPrice) < 0 { + pool.gasPriceMu.RLock() + + if !local && tx.GasTipCapUIntLt(pool.gasPriceUint) { + pool.gasPriceMu.RUnlock() + return ErrUnderpriced } + + pool.gasPriceMu.RUnlock() + // Ensure the transaction adheres to nonce ordering if pool.currentState.GetNonce(from) > tx.Nonce() { return ErrNonceTooLow } + // Transactor should have enough funds to cover the costs // cost == V + GP * GL - if pool.currentState.GetBalance(from).Cmp(tx.Cost()) < 0 { + balance := pool.currentState.GetBalance(from) + if balance.Cmp(tx.Cost()) < 0 { return ErrInsufficientFunds } + // Verify that replacing transactions will not result in overdraft + list := pool.pending[from] + if list != nil { // Sender already has pending txs + sum := new(big.Int).Add(tx.Cost(), list.totalcost) + if repl := list.txs.Get(tx.Nonce()); repl != nil { + // Deduct the cost of a transaction replaced by this + sum.Sub(sum, repl.Cost()) + } + + if balance.Cmp(sum) < 0 { + log.Trace("Replacing transactions would overdraft", "sender", from, "balance", pool.currentState.GetBalance(from), "required", sum) + return ErrOverdraft + } + } // Ensure the transaction has more gas than the basic tx fee. intrGas, err := IntrinsicGas(tx.Data(), tx.AccessList(), tx.To() == nil, true, pool.istanbul) if err != nil { return err } + if tx.Gas() < intrGas { return ErrIntrinsicGas } + return nil } @@ -684,14 +851,19 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err e invalidTxMeter.Mark(1) return false, err } + + // already validated by this point + from, _ := types.Sender(pool.signer, tx) + // If the transaction pool is full, discard underpriced transactions if uint64(pool.all.Slots()+numSlots(tx)) > pool.config.GlobalSlots+pool.config.GlobalQueue { // If the new transaction is underpriced, don't accept it if !isLocal && pool.priced.Underpriced(tx) { - log.Trace("Discarding underpriced transaction", "hash", hash, "gasTipCap", tx.GasTipCap(), "gasFeeCap", tx.GasFeeCap()) + log.Trace("Discarding underpriced transaction", "hash", hash, "gasTipCap", tx.GasTipCapUint(), "gasFeeCap", tx.GasFeeCapUint()) underpricedTxMeter.Mark(1) return false, ErrUnderpriced } + // We're about to replace a transaction. The reorg does a more thorough // analysis of what to remove and how, but it runs async. We don't want to // do too many replacements between reorg-runs, so we cap the number of @@ -712,30 +884,61 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err e overflowedTxMeter.Mark(1) return false, ErrTxPoolOverflow } - // Bump the counter of rejections-since-reorg - pool.changesSinceReorg += len(drop) + // If the new transaction is a future transaction it should never churn pending transactions + if pool.isFuture(from, tx) { + var replacesPending bool + + for _, dropTx := range drop { + dropSender, _ := types.Sender(pool.signer, dropTx) + if list := pool.pending[dropSender]; list != nil && list.Overlaps(dropTx) { + replacesPending = true + break + } + } + // Add all transactions back to the priced queue + if replacesPending { + for _, dropTx := range drop { + heap.Push(&pool.priced.urgent, dropTx) + } + + log.Trace("Discarding future transaction replacing pending tx", "hash", hash) + + return false, ErrFutureReplacePending + } + } // Kick out the underpriced remote transactions. for _, tx := range drop { - log.Trace("Discarding freshly underpriced transaction", "hash", tx.Hash(), "gasTipCap", tx.GasTipCap(), "gasFeeCap", tx.GasFeeCap()) + log.Trace("Discarding freshly underpriced transaction", "hash", tx.Hash(), "gasTipCap", tx.GasTipCapUint(), "gasFeeCap", tx.GasFeeCapUint()) underpricedTxMeter.Mark(1) - pool.removeTx(tx.Hash(), false) + + dropped := pool.removeTx(tx.Hash(), false) + pool.changesSinceReorg += dropped } } + // Try to replace an existing transaction in the pending pool - from, _ := types.Sender(pool.signer, tx) // already validated - if list := pool.pending[from]; list != nil && list.Overlaps(tx) { + pool.pendingMu.RLock() + + list := pool.pending[from] + + if list != nil && list.Overlaps(tx) { // Nonce already pending, check if required price bump is met inserted, old := list.Add(tx, pool.config.PriceBump) + pool.pendingCount++ + pool.pendingMu.RUnlock() + if !inserted { pendingDiscardMeter.Mark(1) return false, ErrReplaceUnderpriced } + // New transaction is better, replace old one if old != nil { pool.all.Remove(old.Hash()) pool.priced.Removed(1) pendingReplaceMeter.Mark(1) } + pool.all.Add(tx, isLocal) pool.priced.Put(tx, isLocal) pool.journalTx(from, tx) @@ -744,8 +947,13 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err e // Successful promotion, bump the heartbeat pool.beats[from] = time.Now() + return old != nil, nil } + + // it is not an unlocking of unlocked because of the return in previous 'if' + pool.pendingMu.RUnlock() + // New transaction isn't replacing a pending one, push into queue replaced, err = pool.enqueueTx(hash, tx, isLocal, true) if err != nil { @@ -766,6 +974,20 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err e return replaced, nil } +// isFuture reports whether the given transaction is immediately executable. +func (pool *TxPool) isFuture(from common.Address, tx *types.Transaction) bool { + list := pool.pending[from] + if list == nil { + return pool.pendingNonces.get(from) != tx.Nonce() + } + // Sender has pending transactions. + if old := list.txs.Get(tx.Nonce()); old != nil { + return false // It replaces a pending transaction. + } + // Not replacing, check if parent nonce exists in pending. + return list.txs.Get(tx.Nonce()-1) == nil +} + // enqueueTx inserts a new transaction into the non-executable transaction queue. // // Note, this method assumes the pool lock is held! @@ -835,19 +1057,25 @@ func (pool *TxPool) promoteTx(addr common.Address, hash common.Hash, tx *types.T }() // Try to insert the transaction into the pending queue + pool.pendingMu.Lock() if pool.pending[addr] == nil { pool.pending[addr] = newTxList(true) } list := pool.pending[addr] inserted, old := list.Add(tx, pool.config.PriceBump) + pool.pendingCount++ + pool.pendingMu.Unlock() + if !inserted { // An older transaction was better, discard this pool.all.Remove(hash) pool.priced.Removed(1) pendingDiscardMeter.Mark(1) + return false } + // Otherwise discard any previous transaction and mark this if old != nil { pool.all.Remove(old.Hash()) @@ -857,11 +1085,13 @@ func (pool *TxPool) promoteTx(addr common.Address, hash common.Hash, tx *types.T // Nothing was replaced, bump the pending counter pendingGauge.Inc(1) } + // Set the potentially new pending nonce and notify any subsystems of the new tx pool.pendingNonces.set(addr, tx.Nonce()+1) // Successful promotion, bump the heartbeat pool.beats[addr] = time.Now() + return true } @@ -877,8 +1107,7 @@ func (pool *TxPool) AddLocals(txs []*types.Transaction) []error { // AddLocal enqueues a single local transaction into the pool if it is valid. This is // a convenience wrapper aroundd AddLocals. func (pool *TxPool) AddLocal(tx *types.Transaction) error { - errs := pool.AddLocals([]*types.Transaction{tx}) - return errs[0] + return pool.addTx(tx, !pool.config.NoLocals, true) } // AddRemotes enqueues a batch of transactions into the pool if they are valid. If the @@ -895,108 +1124,216 @@ func (pool *TxPool) AddRemotesSync(txs []*types.Transaction) []error { return pool.addTxs(txs, false, true) } +func (pool *TxPool) AddRemoteSync(txs *types.Transaction) error { + return pool.addTx(txs, false, true) +} + // This is like AddRemotes with a single transaction, but waits for pool reorganization. Tests use this method. func (pool *TxPool) addRemoteSync(tx *types.Transaction) error { - errs := pool.AddRemotesSync([]*types.Transaction{tx}) - return errs[0] + return pool.AddRemoteSync(tx) } // AddRemote enqueues a single transaction into the pool if it is valid. This is a convenience // wrapper around AddRemotes. -// -// Deprecated: use AddRemotes func (pool *TxPool) AddRemote(tx *types.Transaction) error { - errs := pool.AddRemotes([]*types.Transaction{tx}) - return errs[0] + return pool.addTx(tx, false, false) } // addTxs attempts to queue a batch of transactions if they are valid. func (pool *TxPool) addTxs(txs []*types.Transaction, local, sync bool) []error { // Filter out known ones without obtaining the pool lock or recovering signatures var ( - errs = make([]error, len(txs)) + errs []error news = make([]*types.Transaction, 0, len(txs)) + err error + + hash common.Hash ) - for i, tx := range txs { + + for _, tx := range txs { // If the transaction is known, pre-set the error slot - if pool.all.Get(tx.Hash()) != nil { - errs[i] = ErrAlreadyKnown + hash = tx.Hash() + + if pool.all.Get(hash) != nil { + errs = append(errs, ErrAlreadyKnown) knownTxMeter.Mark(1) + continue } + // Exclude transactions with invalid signatures as soon as // possible and cache senders in transactions before // obtaining lock - _, err := types.Sender(pool.signer, tx) + _, err = types.Sender(pool.signer, tx) if err != nil { - errs[i] = ErrInvalidSender + errs = append(errs, ErrInvalidSender) invalidTxMeter.Mark(1) + continue } + // Accumulate all unknown transactions for deeper processing news = append(news, tx) } + if len(news) == 0 { return errs } // Process all the new transaction and merge any errors into the original slice pool.mu.Lock() - newErrs, dirtyAddrs := pool.addTxsLocked(news, local) + errs, dirtyAddrs := pool.addTxsLocked(news, local) pool.mu.Unlock() - var nilSlot = 0 - for _, err := range newErrs { - for errs[nilSlot] != nil { - nilSlot++ + // Reorg the pool internals if needed and return + done := pool.requestPromoteExecutables(dirtyAddrs) + if sync { + <-done + } + + return errs +} + +// addTxs attempts to queue a batch of transactions if they are valid. +func (pool *TxPool) addTx(tx *types.Transaction, local, sync bool) error { + // Filter out known ones without obtaining the pool lock or recovering signatures + var ( + err error + hash common.Hash + ) + + func() { + // If the transaction is known, pre-set the error slot + hash = tx.Hash() + + if pool.all.Get(hash) != nil { + err = ErrAlreadyKnown + + knownTxMeter.Mark(1) + + return } - errs[nilSlot] = err - nilSlot++ + + // Exclude transactions with invalid signatures as soon as + // possible and cache senders in transactions before + // obtaining lock + _, err = types.Sender(pool.signer, tx) + if err != nil { + invalidTxMeter.Mark(1) + + return + } + }() + + if err != nil { + return err } + + var dirtyAddrs *accountSet + + // Process all the new transaction and merge any errors into the original slice + pool.mu.Lock() + err, dirtyAddrs = pool.addTxLocked(tx, local) + pool.mu.Unlock() + // Reorg the pool internals if needed and return done := pool.requestPromoteExecutables(dirtyAddrs) if sync { <-done } - return errs + + return err } // addTxsLocked attempts to queue a batch of transactions if they are valid. // The transaction pool lock must be held. func (pool *TxPool) addTxsLocked(txs []*types.Transaction, local bool) ([]error, *accountSet) { dirty := newAccountSet(pool.signer) - errs := make([]error, len(txs)) - for i, tx := range txs { - replaced, err := pool.add(tx, local) - errs[i] = err + + var ( + replaced bool + errs []error + ) + + for _, tx := range txs { + var err error + + replaced, err = pool.add(tx, local) if err == nil && !replaced { dirty.addTx(tx) } + + if err != nil { + errs = append(errs, err) + } } + validTxMeter.Mark(int64(len(dirty.accounts))) + return errs, dirty } +func (pool *TxPool) addTxLocked(tx *types.Transaction, local bool) (error, *accountSet) { + dirty := newAccountSet(pool.signer) + + var ( + replaced bool + err error + ) + + replaced, err = pool.add(tx, local) + if err == nil && !replaced { + dirty.addTx(tx) + } + + validTxMeter.Mark(int64(len(dirty.accounts))) + + return err, dirty +} + // Status returns the status (unknown/pending/queued) of a batch of transactions // identified by their hashes. func (pool *TxPool) Status(hashes []common.Hash) []TxStatus { status := make([]TxStatus, len(hashes)) + + var ( + txList *txList + isPending bool + ) + for i, hash := range hashes { tx := pool.Get(hash) if tx == nil { continue } + from, _ := types.Sender(pool.signer, tx) // already validated - pool.mu.RLock() - if txList := pool.pending[from]; txList != nil && txList.txs.items[tx.Nonce()] != nil { + + pool.pendingMu.RLock() + + if txList = pool.pending[from]; txList != nil && txList.txs.Has(tx.Nonce()) { status[i] = TxStatusPending - } else if txList := pool.queue[from]; txList != nil && txList.txs.items[tx.Nonce()] != nil { - status[i] = TxStatusQueued + isPending = true + } else { + isPending = false } + + pool.pendingMu.RUnlock() + + if !isPending { + pool.mu.RLock() + + if txList := pool.queue[from]; txList != nil && txList.txs.Has(tx.Nonce()) { + status[i] = TxStatusQueued + } + + pool.mu.RUnlock() + } + // implicit else: the tx may have been included into a block between // checking pool.Get and obtaining the lock. In that case, TxStatusUnknown is correct - pool.mu.RUnlock() } + return status } @@ -1013,12 +1350,14 @@ func (pool *TxPool) Has(hash common.Hash) bool { // removeTx removes a single transaction from the queue, moving all subsequent // transactions back to the future queue. -func (pool *TxPool) removeTx(hash common.Hash, outofbound bool) { +// Returns the number of transactions removed from the pending queue. +func (pool *TxPool) removeTx(hash common.Hash, outofbound bool) int { // Fetch the transaction we wish to delete tx := pool.all.Get(hash) if tx == nil { - return + return 0 } + addr, _ := types.Sender(pool.signer, tx) // already validated during insertion // Remove it from the list of known transactions @@ -1026,39 +1365,59 @@ func (pool *TxPool) removeTx(hash common.Hash, outofbound bool) { if outofbound { pool.priced.Removed(1) } + if pool.locals.contains(addr) { localGauge.Dec(1) } + // Remove the transaction from the pending lists and reset the account nonce + pool.pendingMu.Lock() + if pending := pool.pending[addr]; pending != nil { if removed, invalids := pending.Remove(tx); removed { + pool.pendingCount-- + // If no more pending transactions are left, remove the list if pending.Empty() { delete(pool.pending, addr) } + + pool.pendingMu.Unlock() + // Postpone any invalidated transactions for _, tx := range invalids { // Internal shuffle shouldn't touch the lookup set. pool.enqueueTx(tx.Hash(), tx, false, false) } + // Update the account nonce if needed pool.pendingNonces.setIfLower(addr, tx.Nonce()) + // Reduce the pending counter pendingGauge.Dec(int64(1 + len(invalids))) - return + + return 1 + len(invalids) } + + pool.pendingMu.TryLock() } + + pool.pendingMu.Unlock() + // Transaction is in the future queue if future := pool.queue[addr]; future != nil { if removed, _ := future.Remove(tx); removed { // Reduce the queued counter queuedGauge.Dec(1) } + if future.Empty() { delete(pool.queue, addr) delete(pool.beats, addr) } } + + return 0 } // requestReset requests a pool reset to the new head block. @@ -1109,8 +1468,10 @@ func (pool *TxPool) scheduleReorgLoop() { for { // Launch next background reorg if needed if curDone == nil && launchNextRun { + ctx := context.Background() + // Run the background reorg and announcements - go pool.runReorg(nextDone, reset, dirtyAccounts, queuedEvents) + go pool.runReorg(ctx, nextDone, reset, dirtyAccounts, queuedEvents) // Prepare everything for the next round of reorg curDone, nextDone = nextDone, make(chan struct{}) @@ -1165,86 +1526,178 @@ func (pool *TxPool) scheduleReorgLoop() { } // runReorg runs reset and promoteExecutables on behalf of scheduleReorgLoop. -func (pool *TxPool) runReorg(done chan struct{}, reset *txpoolResetRequest, dirtyAccounts *accountSet, events map[common.Address]*txSortedMap) { - defer func(t0 time.Time) { - reorgDurationTimer.Update(time.Since(t0)) - }(time.Now()) - defer close(done) - - var promoteAddrs []common.Address - if dirtyAccounts != nil && reset == nil { - // Only dirty accounts need to be promoted, unless we're resetting. - // For resets, all addresses in the tx queue will be promoted and - // the flatten operation can be avoided. - promoteAddrs = dirtyAccounts.flatten() - } - pool.mu.Lock() - if reset != nil { - // Reset from the old head to the new, rescheduling any reorged transactions - pool.reset(reset.oldHead, reset.newHead) - - // Nonces were reset, discard any events that became stale - for addr := range events { - events[addr].Forward(pool.pendingNonces.get(addr)) - if events[addr].Len() == 0 { - delete(events, addr) +// +//nolint:gocognit +func (pool *TxPool) runReorg(ctx context.Context, done chan struct{}, reset *txpoolResetRequest, dirtyAccounts *accountSet, events map[common.Address]*txSortedMap) { + tracing.Exec(ctx, "TxPoolReorg", "txpool-reorg", func(ctx context.Context, span trace.Span) { + defer func(t0 time.Time) { + reorgDurationTimer.Update(time.Since(t0)) + }(time.Now()) + + defer close(done) + + var promoteAddrs []common.Address + + tracing.ElapsedTime(ctx, span, "01 dirty accounts flattening", func(_ context.Context, innerSpan trace.Span) { + if dirtyAccounts != nil && reset == nil { + // Only dirty accounts need to be promoted, unless we're resetting. + // For resets, all addresses in the tx queue will be promoted and + // the flatten operation can be avoided. + promoteAddrs = dirtyAccounts.flatten() } + + tracing.SetAttributes( + innerSpan, + attribute.Int("promoteAddresses-flatten", len(promoteAddrs)), + ) + }) + + tracing.ElapsedTime(ctx, span, "02 obtaining pool.WMutex", func(_ context.Context, _ trace.Span) { + pool.mu.Lock() + }) + + if reset != nil { + tracing.ElapsedTime(ctx, span, "03 reset-head reorg", func(_ context.Context, innerSpan trace.Span) { + + // Reset from the old head to the new, rescheduling any reorged transactions + tracing.ElapsedTime(ctx, innerSpan, "04 reset-head-itself reorg", func(_ context.Context, innerSpan trace.Span) { + pool.reset(reset.oldHead, reset.newHead) + }) + + tracing.SetAttributes( + innerSpan, + attribute.Int("events-reset-head", len(events)), + ) + + // Nonces were reset, discard any events that became stale + for addr := range events { + events[addr].Forward(pool.pendingNonces.get(addr)) + + if events[addr].Len() == 0 { + delete(events, addr) + } + } + + // Reset needs promote for all addresses + promoteAddrs = make([]common.Address, 0, len(pool.queue)) + for addr := range pool.queue { + promoteAddrs = append(promoteAddrs, addr) + } + + tracing.SetAttributes( + innerSpan, + attribute.Int("promoteAddresses-reset-head", len(promoteAddrs)), + ) + }) } - // Reset needs promote for all addresses - promoteAddrs = make([]common.Address, 0, len(pool.queue)) - for addr := range pool.queue { - promoteAddrs = append(promoteAddrs, addr) + + // Check for pending transactions for every account that sent new ones + var promoted []*types.Transaction + + tracing.ElapsedTime(ctx, span, "05 promoteExecutables", func(_ context.Context, _ trace.Span) { + promoted = pool.promoteExecutables(promoteAddrs) + }) + + tracing.SetAttributes( + span, + attribute.Int("count.promoteAddresses-reset-head", len(promoteAddrs)), + attribute.Int("count.all", pool.all.Count()), + attribute.Int("count.pending", len(pool.pending)), + attribute.Int("count.queue", len(pool.queue)), + ) + + // If a new block appeared, validate the pool of pending transactions. This will + // remove any transaction that has been included in the block or was invalidated + // because of another transaction (e.g. higher gas price). + + //nolint:nestif + if reset != nil { + tracing.ElapsedTime(ctx, span, "new block", func(_ context.Context, innerSpan trace.Span) { + + tracing.ElapsedTime(ctx, innerSpan, "06 demoteUnexecutables", func(_ context.Context, _ trace.Span) { + pool.demoteUnexecutables() + }) + + var nonces map[common.Address]uint64 + + tracing.ElapsedTime(ctx, innerSpan, "07 set_base_fee", func(_ context.Context, _ trace.Span) { + if reset.newHead != nil { + if pool.chainconfig.IsLondon(new(big.Int).Add(reset.newHead.Number, big.NewInt(1))) { + // london fork enabled, reset given the base fee + pendingBaseFee := misc.CalcBaseFeeUint(pool.chainconfig, reset.newHead) + pool.priced.SetBaseFee(pendingBaseFee) + } else { + // london fork not enabled, reheap to "reset" the priced list + pool.priced.Reheap() + } + } + + // Update all accounts to the latest known pending nonce + nonces = make(map[common.Address]uint64, len(pool.pending)) + }) + + tracing.ElapsedTime(ctx, innerSpan, "08 obtaining pendingMu.RMutex", func(_ context.Context, _ trace.Span) { + pool.pendingMu.RLock() + }) + + var highestPending *types.Transaction + + tracing.ElapsedTime(ctx, innerSpan, "09 fill nonces", func(_ context.Context, innerSpan trace.Span) { + for addr, list := range pool.pending { + highestPending = list.LastElement() + if highestPending != nil { + nonces[addr] = highestPending.Nonce() + 1 + } + } + }) + + pool.pendingMu.RUnlock() + + tracing.ElapsedTime(ctx, innerSpan, "10 reset nonces", func(_ context.Context, _ trace.Span) { + pool.pendingNonces.setAll(nonces) + }) + }) } - } - // Check for pending transactions for every account that sent new ones - promoted := pool.promoteExecutables(promoteAddrs) - - // If a new block appeared, validate the pool of pending transactions. This will - // remove any transaction that has been included in the block or was invalidated - // because of another transaction (e.g. higher gas price). - if reset != nil { - pool.demoteUnexecutables() - if reset.newHead != nil { - if pool.chainconfig.IsLondon(new(big.Int).Add(reset.newHead.Number, big.NewInt(1))) { - // london fork enabled, reset given the base fee - pendingBaseFee := misc.CalcBaseFee(pool.chainconfig, reset.newHead) - pool.priced.SetBaseFee(pendingBaseFee) - } else { - // london fork not enabled, reheap to "reset" the priced list - pool.priced.Reheap() + + // Ensure pool.queue and pool.pending sizes stay within the configured limits. + tracing.ElapsedTime(ctx, span, "11 truncatePending", func(_ context.Context, _ trace.Span) { + pool.truncatePending() + }) + + tracing.ElapsedTime(ctx, span, "12 truncateQueue", func(_ context.Context, _ trace.Span) { + pool.truncateQueue() + }) + + dropBetweenReorgHistogram.Update(int64(pool.changesSinceReorg)) + pool.changesSinceReorg = 0 // Reset change counter + + pool.mu.Unlock() + + // Notify subsystems for newly added transactions + tracing.ElapsedTime(ctx, span, "13 notify about new transactions", func(_ context.Context, _ trace.Span) { + for _, tx := range promoted { + addr, _ := types.Sender(pool.signer, tx) + + if _, ok := events[addr]; !ok { + events[addr] = newTxSortedMap() + } + + events[addr].Put(tx) } - } - // Update all accounts to the latest known pending nonce - nonces := make(map[common.Address]uint64, len(pool.pending)) - for addr, list := range pool.pending { - highestPending := list.LastElement() - nonces[addr] = highestPending.Nonce() + 1 - } - pool.pendingNonces.setAll(nonces) - } - // Ensure pool.queue and pool.pending sizes stay within the configured limits. - pool.truncatePending() - pool.truncateQueue() + }) - dropBetweenReorgHistogram.Update(int64(pool.changesSinceReorg)) - pool.changesSinceReorg = 0 // Reset change counter - pool.mu.Unlock() + if len(events) > 0 { + tracing.ElapsedTime(ctx, span, "14 txFeed", func(_ context.Context, _ trace.Span) { + var txs []*types.Transaction - // Notify subsystems for newly added transactions - for _, tx := range promoted { - addr, _ := types.Sender(pool.signer, tx) - if _, ok := events[addr]; !ok { - events[addr] = newTxSortedMap() - } - events[addr].Put(tx) - } - if len(events) > 0 { - var txs []*types.Transaction - for _, set := range events { - txs = append(txs, set.Flatten()...) + for _, set := range events { + txs = append(txs, set.Flatten()...) + } + + pool.txFeed.Send(NewTxsEvent{txs}) + }) } - pool.txFeed.Send(NewTxsEvent{txs}) - } + }) } // reset retrieves the current state of the blockchain and ensures the content @@ -1343,64 +1796,100 @@ func (pool *TxPool) reset(oldHead, newHead *types.Header) { // invalidated transactions (low nonce, low balance) are deleted. func (pool *TxPool) promoteExecutables(accounts []common.Address) []*types.Transaction { // Track the promoted transactions to broadcast them at once - var promoted []*types.Transaction + var ( + promoted []*types.Transaction + promotedLen int + forwards types.Transactions + forwardsLen int + caps types.Transactions + capsLen int + drops types.Transactions + dropsLen int + list *txList + hash common.Hash + readies types.Transactions + readiesLen int + ) + + balance := uint256.NewInt(0) // Iterate over all accounts and promote any executable transactions for _, addr := range accounts { - list := pool.queue[addr] + list = pool.queue[addr] if list == nil { continue // Just in case someone calls with a non existing account } + // Drop all transactions that are deemed too old (low nonce) - forwards := list.Forward(pool.currentState.GetNonce(addr)) + forwards = list.Forward(pool.currentState.GetNonce(addr)) + forwardsLen = len(forwards) + for _, tx := range forwards { - hash := tx.Hash() + hash = tx.Hash() pool.all.Remove(hash) } - log.Trace("Removed old queued transactions", "count", len(forwards)) + + log.Trace("Removed old queued transactions", "count", forwardsLen) + // Drop all transactions that are too costly (low balance or out of gas) - drops, _ := list.Filter(pool.currentState.GetBalance(addr), pool.currentMaxGas) + balance.SetFromBig(pool.currentState.GetBalance(addr)) + + drops, _ = list.Filter(balance, pool.currentMaxGas) + dropsLen = len(drops) + for _, tx := range drops { - hash := tx.Hash() + hash = tx.Hash() pool.all.Remove(hash) } - log.Trace("Removed unpayable queued transactions", "count", len(drops)) - queuedNofundsMeter.Mark(int64(len(drops))) + + log.Trace("Removed unpayable queued transactions", "count", dropsLen) + queuedNofundsMeter.Mark(int64(dropsLen)) // Gather all executable transactions and promote them - readies := list.Ready(pool.pendingNonces.get(addr)) + readies = list.Ready(pool.pendingNonces.get(addr)) + readiesLen = len(readies) + for _, tx := range readies { - hash := tx.Hash() + hash = tx.Hash() if pool.promoteTx(addr, hash, tx) { promoted = append(promoted, tx) } } - log.Trace("Promoted queued transactions", "count", len(promoted)) - queuedGauge.Dec(int64(len(readies))) + + log.Trace("Promoted queued transactions", "count", promotedLen) + queuedGauge.Dec(int64(readiesLen)) // Drop all transactions over the allowed limit - var caps types.Transactions if !pool.locals.contains(addr) { caps = list.Cap(int(pool.config.AccountQueue)) + capsLen = len(caps) + for _, tx := range caps { - hash := tx.Hash() + hash = tx.Hash() pool.all.Remove(hash) + log.Trace("Removed cap-exceeding queued transaction", "hash", hash) } - queuedRateLimitMeter.Mark(int64(len(caps))) + + queuedRateLimitMeter.Mark(int64(capsLen)) } + // Mark all the items dropped as removed - pool.priced.Removed(len(forwards) + len(drops) + len(caps)) - queuedGauge.Dec(int64(len(forwards) + len(drops) + len(caps))) + pool.priced.Removed(forwardsLen + dropsLen + capsLen) + + queuedGauge.Dec(int64(forwardsLen + dropsLen + capsLen)) + if pool.locals.contains(addr) { - localGauge.Dec(int64(len(forwards) + len(drops) + len(caps))) + localGauge.Dec(int64(forwardsLen + dropsLen + capsLen)) } + // Delete the entire queue entry if it became empty. if list.Empty() { delete(pool.queue, addr) delete(pool.beats, addr) } } + return promoted } @@ -1408,86 +1897,162 @@ func (pool *TxPool) promoteExecutables(accounts []common.Address) []*types.Trans // pending limit. The algorithm tries to reduce transaction counts by an approximately // equal number for all for accounts with many pending transactions. func (pool *TxPool) truncatePending() { - pending := uint64(0) - for _, list := range pool.pending { - pending += uint64(list.Len()) - } + pending := uint64(pool.pendingCount) if pending <= pool.config.GlobalSlots { return } pendingBeforeCap := pending + + var listLen int + + type pair struct { + address common.Address + value int64 + } + // Assemble a spam order to penalize large transactors first - spammers := prque.New(nil) + spammers := make([]pair, 0, 8) + count := 0 + + var ok bool + + pool.pendingMu.RLock() for addr, list := range pool.pending { // Only evict transactions from high rollers - if !pool.locals.contains(addr) && uint64(list.Len()) > pool.config.AccountSlots { - spammers.Push(addr, int64(list.Len())) + listLen = len(list.txs.items) + + pool.pendingMu.RUnlock() + + pool.locals.m.RLock() + + if uint64(listLen) > pool.config.AccountSlots { + if _, ok = pool.locals.accounts[addr]; ok { + pool.locals.m.RUnlock() + + pool.pendingMu.RLock() + + continue + } + + count++ + + spammers = append(spammers, pair{addr, int64(listLen)}) } + + pool.locals.m.RUnlock() + + pool.pendingMu.RLock() } + + pool.pendingMu.RUnlock() + // Gradually drop transactions from offenders - offenders := []common.Address{} - for pending > pool.config.GlobalSlots && !spammers.Empty() { + offenders := make([]common.Address, 0, len(spammers)) + sort.Slice(spammers, func(i, j int) bool { + return spammers[i].value < spammers[j].value + }) + + var ( + offender common.Address + caps types.Transactions + capsLen int + list *txList + hash common.Hash + ) + + // todo: metrics: spammers, offenders, total loops + for len(spammers) != 0 && pending > pool.config.GlobalSlots { // Retrieve the next offender if not local address - offender, _ := spammers.Pop() - offenders = append(offenders, offender.(common.Address)) + offender, spammers = spammers[len(spammers)-1].address, spammers[:len(spammers)-1] + offenders = append(offenders, offender) + + var threshold int // Equalize balances until all the same or below threshold if len(offenders) > 1 { // Calculate the equalization threshold for all current offenders - threshold := pool.pending[offender.(common.Address)].Len() + pool.pendingMu.RLock() + threshold = len(pool.pending[offender].txs.items) // Iteratively reduce all offenders until below limit or threshold reached for pending > pool.config.GlobalSlots && pool.pending[offenders[len(offenders)-2]].Len() > threshold { for i := 0; i < len(offenders)-1; i++ { - list := pool.pending[offenders[i]] + list = pool.pending[offenders[i]] + + caps = list.Cap(len(list.txs.items) - 1) + capsLen = len(caps) + + pool.pendingMu.RUnlock() - caps := list.Cap(list.Len() - 1) for _, tx := range caps { // Drop the transaction from the global pools too - hash := tx.Hash() + hash = tx.Hash() pool.all.Remove(hash) // Update the account nonce to the dropped transaction pool.pendingNonces.setIfLower(offenders[i], tx.Nonce()) log.Trace("Removed fairness-exceeding pending transaction", "hash", hash) } - pool.priced.Removed(len(caps)) - pendingGauge.Dec(int64(len(caps))) + + pool.priced.Removed(capsLen) + + pendingGauge.Dec(int64(capsLen)) if pool.locals.contains(offenders[i]) { - localGauge.Dec(int64(len(caps))) + localGauge.Dec(int64(capsLen)) } + pending-- + + pool.pendingMu.RLock() } } + + pool.pendingMu.RUnlock() } } // If still above threshold, reduce to limit or min allowance if pending > pool.config.GlobalSlots && len(offenders) > 0 { + + pool.pendingMu.RLock() + for pending > pool.config.GlobalSlots && uint64(pool.pending[offenders[len(offenders)-1]].Len()) > pool.config.AccountSlots { for _, addr := range offenders { - list := pool.pending[addr] + list = pool.pending[addr] + + caps = list.Cap(len(list.txs.items) - 1) + capsLen = len(caps) + + pool.pendingMu.RUnlock() - caps := list.Cap(list.Len() - 1) for _, tx := range caps { // Drop the transaction from the global pools too - hash := tx.Hash() + hash = tx.Hash() pool.all.Remove(hash) // Update the account nonce to the dropped transaction pool.pendingNonces.setIfLower(addr, tx.Nonce()) log.Trace("Removed fairness-exceeding pending transaction", "hash", hash) } - pool.priced.Removed(len(caps)) - pendingGauge.Dec(int64(len(caps))) - if pool.locals.contains(addr) { - localGauge.Dec(int64(len(caps))) + + pool.priced.Removed(capsLen) + + pendingGauge.Dec(int64(capsLen)) + + if _, ok = pool.locals.accounts[addr]; ok { + localGauge.Dec(int64(capsLen)) } + pending-- + + pool.pendingMu.RLock() } } + + pool.pendingMu.RUnlock() } + pendingRateLimitMeter.Mark(int64(pendingBeforeCap - pending)) } @@ -1510,27 +2075,52 @@ func (pool *TxPool) truncateQueue() { } sort.Sort(addresses) + var ( + tx *types.Transaction + txs types.Transactions + list *txList + addr addressByHeartbeat + size uint64 + ) + // Drop transactions until the total is below the limit or only locals remain for drop := queued - pool.config.GlobalQueue; drop > 0 && len(addresses) > 0; { - addr := addresses[len(addresses)-1] - list := pool.queue[addr.address] + addr = addresses[len(addresses)-1] + list = pool.queue[addr.address] addresses = addresses[:len(addresses)-1] + var ( + listFlatten types.Transactions + isSet bool + ) + // Drop all transactions if they are less than the overflow - if size := uint64(list.Len()); size <= drop { - for _, tx := range list.Flatten() { + if size = uint64(list.Len()); size <= drop { + listFlatten = list.Flatten() + isSet = true + + for _, tx = range listFlatten { pool.removeTx(tx.Hash(), true) } + drop -= size queuedRateLimitMeter.Mark(int64(size)) + continue } + // Otherwise drop only last few transactions - txs := list.Flatten() + if !isSet { + listFlatten = list.Flatten() + } + + txs = listFlatten for i := len(txs) - 1; i >= 0 && drop > 0; i-- { pool.removeTx(txs[i].Hash(), true) + drop-- + queuedRateLimitMeter.Mark(1) } } @@ -1544,56 +2134,98 @@ func (pool *TxPool) truncateQueue() { // is always explicitly triggered by SetBaseFee and it would be unnecessary and wasteful // to trigger a re-heap is this function func (pool *TxPool) demoteUnexecutables() { + balance := uint256.NewInt(0) + + var ( + olds types.Transactions + oldsLen int + hash common.Hash + drops types.Transactions + dropsLen int + invalids types.Transactions + invalidsLen int + gapped types.Transactions + gappedLen int + ) + // Iterate over all accounts and demote any non-executable transactions + pool.pendingMu.RLock() + for addr, list := range pool.pending { nonce := pool.currentState.GetNonce(addr) // Drop all transactions that are deemed too old (low nonce) - olds := list.Forward(nonce) + olds = list.Forward(nonce) + oldsLen = len(olds) + for _, tx := range olds { - hash := tx.Hash() + hash = tx.Hash() pool.all.Remove(hash) log.Trace("Removed old pending transaction", "hash", hash) } + // Drop all transactions that are too costly (low balance or out of gas), and queue any invalids back for later - drops, invalids := list.Filter(pool.currentState.GetBalance(addr), pool.currentMaxGas) + balance.SetFromBig(pool.currentState.GetBalance(addr)) + drops, invalids = list.Filter(balance, pool.currentMaxGas) + dropsLen = len(drops) + invalidsLen = len(invalids) + for _, tx := range drops { - hash := tx.Hash() + hash = tx.Hash() + log.Trace("Removed unpayable pending transaction", "hash", hash) + pool.all.Remove(hash) } - pendingNofundsMeter.Mark(int64(len(drops))) + + pendingNofundsMeter.Mark(int64(dropsLen)) for _, tx := range invalids { - hash := tx.Hash() + hash = tx.Hash() + log.Trace("Demoting pending transaction", "hash", hash) // Internal shuffle shouldn't touch the lookup set. pool.enqueueTx(hash, tx, false, false) } - pendingGauge.Dec(int64(len(olds) + len(drops) + len(invalids))) + + pendingGauge.Dec(int64(oldsLen + dropsLen + invalidsLen)) + if pool.locals.contains(addr) { - localGauge.Dec(int64(len(olds) + len(drops) + len(invalids))) + localGauge.Dec(int64(oldsLen + dropsLen + invalidsLen)) } // If there's a gap in front, alert (should never happen) and postpone all transactions if list.Len() > 0 && list.txs.Get(nonce) == nil { - gapped := list.Cap(0) + gapped = list.Cap(0) + gappedLen = len(gapped) + for _, tx := range gapped { - hash := tx.Hash() + hash = tx.Hash() log.Error("Demoting invalidated transaction", "hash", hash) // Internal shuffle shouldn't touch the lookup set. pool.enqueueTx(hash, tx, false, false) } - pendingGauge.Dec(int64(len(gapped))) + + pendingGauge.Dec(int64(gappedLen)) // This might happen in a reorg, so log it to the metering - blockReorgInvalidatedTx.Mark(int64(len(gapped))) + blockReorgInvalidatedTx.Mark(int64(gappedLen)) } + // Delete the entire pending entry if it became empty. if list.Empty() { + pool.pendingMu.RUnlock() + pool.pendingMu.Lock() + + pool.pendingCount -= pool.pending[addr].Len() delete(pool.pending, addr) + + pool.pendingMu.Unlock() + pool.pendingMu.RLock() } } + + pool.pendingMu.RUnlock() } // addressByHeartbeat is an account address tagged with its last activity timestamp. @@ -1611,9 +2243,10 @@ func (a addressesByHeartbeat) Swap(i, j int) { a[i], a[j] = a[j], a[i] } // accountSet is simply a set of addresses to check for existence, and a signer // capable of deriving addresses from transactions. type accountSet struct { - accounts map[common.Address]struct{} - signer types.Signer - cache *[]common.Address + accounts map[common.Address]struct{} + accountsFlatted []common.Address + signer types.Signer + m sync.RWMutex } // newAccountSet creates a new address set with an associated signer for sender @@ -1631,17 +2264,26 @@ func newAccountSet(signer types.Signer, addrs ...common.Address) *accountSet { // contains checks if a given address is contained within the set. func (as *accountSet) contains(addr common.Address) bool { + as.m.RLock() + defer as.m.RUnlock() + _, exist := as.accounts[addr] return exist } func (as *accountSet) empty() bool { + as.m.RLock() + defer as.m.RUnlock() + return len(as.accounts) == 0 } // containsTx checks if the sender of a given tx is within the set. If the sender // cannot be derived, this method returns false. func (as *accountSet) containsTx(tx *types.Transaction) bool { + as.m.RLock() + defer as.m.RUnlock() + if addr, err := types.Sender(as.signer, tx); err == nil { return as.contains(addr) } @@ -1650,8 +2292,14 @@ func (as *accountSet) containsTx(tx *types.Transaction) bool { // add inserts a new address into the set to track. func (as *accountSet) add(addr common.Address) { + as.m.Lock() + defer as.m.Unlock() + + if _, ok := as.accounts[addr]; !ok { + as.accountsFlatted = append(as.accountsFlatted, addr) + } + as.accounts[addr] = struct{}{} - as.cache = nil } // addTx adds the sender of tx into the set. @@ -1664,22 +2312,25 @@ func (as *accountSet) addTx(tx *types.Transaction) { // flatten returns the list of addresses within this set, also caching it for later // reuse. The returned slice should not be changed! func (as *accountSet) flatten() []common.Address { - if as.cache == nil { - accounts := make([]common.Address, 0, len(as.accounts)) - for account := range as.accounts { - accounts = append(accounts, account) - } - as.cache = &accounts - } - return *as.cache + as.m.RLock() + defer as.m.RUnlock() + + return as.accountsFlatted } // merge adds all addresses from the 'other' set into 'as'. func (as *accountSet) merge(other *accountSet) { + var ok bool + + as.m.Lock() + defer as.m.Unlock() + for addr := range other.accounts { + if _, ok = as.accounts[addr]; !ok { + as.accountsFlatted = append(as.accountsFlatted, addr) + } as.accounts[addr] = struct{}{} } - as.cache = nil } // txLookup is used internally by TxPool to track transactions while allowing @@ -1835,7 +2486,10 @@ func (t *txLookup) RemoteToLocals(locals *accountSet) int { var migrated int for hash, tx := range t.remotes { if locals.containsTx(tx) { + locals.m.Lock() t.locals[hash] = tx + locals.m.Unlock() + delete(t.remotes, hash) migrated += 1 } diff --git a/core/tx_pool_test.go b/core/tx_pool_test.go index 664ca6c9d4..13fa4ff20d 100644 --- a/core/tx_pool_test.go +++ b/core/tx_pool_test.go @@ -21,6 +21,7 @@ import ( "crypto/ecdsa" "errors" "fmt" + "io" "io/ioutil" "math/big" "math/rand" @@ -32,11 +33,15 @@ import ( "testing" "time" + "github.com/holiman/uint256" + "go.uber.org/goleak" "gonum.org/v1/gonum/floats" "gonum.org/v1/gonum/stat" "pgregory.net/rapid" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/debug" + "github.com/ethereum/go-ethereum/common/leak" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" @@ -98,7 +103,7 @@ func transaction(nonce uint64, gaslimit uint64, key *ecdsa.PrivateKey) *types.Tr } func pricedTransaction(nonce uint64, gaslimit uint64, gasprice *big.Int, key *ecdsa.PrivateKey) *types.Transaction { - tx, _ := types.SignTx(types.NewTransaction(nonce, common.Address{}, big.NewInt(100), gaslimit, gasprice, nil), types.HomesteadSigner{}, key) + tx, _ := types.SignTx(types.NewTransaction(nonce, common.Address{0x01}, big.NewInt(100), gaslimit, gasprice, nil), types.HomesteadSigner{}, key) return tx } @@ -153,12 +158,17 @@ func validateTxPoolInternals(pool *TxPool) error { if total := pool.all.Count(); total != pending+queued { return fmt.Errorf("total transaction count %d != %d pending + %d queued", total, pending, queued) } + pool.priced.Reheap() priced, remote := pool.priced.urgent.Len()+pool.priced.floating.Len(), pool.all.RemoteCount() if priced != remote { return fmt.Errorf("total priced transaction count %d != %d", priced, remote) } + // Ensure the next nonce to assign is the correct one + pool.pendingMu.RLock() + defer pool.pendingMu.RUnlock() + for addr, txs := range pool.pending { // Find the last transaction var last uint64 @@ -167,10 +177,16 @@ func validateTxPoolInternals(pool *TxPool) error { last = nonce } } + if nonce := pool.pendingNonces.get(addr); nonce != last+1 { return fmt.Errorf("pending nonce mismatch: have %v, want %v", nonce, last+1) } + + if txs.totalcost.Cmp(common.Big0) < 0 { + return fmt.Errorf("totalcost went negative: %v", txs.totalcost) + } } + return nil } @@ -325,10 +341,18 @@ func TestInvalidTransactions(t *testing.T) { } tx = transaction(1, 100000, key) + + pool.gasPriceMu.Lock() + pool.gasPrice = big.NewInt(1000) - if err := pool.AddRemote(tx); err != ErrUnderpriced { + pool.gasPriceUint = uint256.NewInt(1000) + + pool.gasPriceMu.Unlock() + + if err := pool.AddRemote(tx); !errors.Is(err, ErrUnderpriced) { t.Error("expected", ErrUnderpriced, "got", err) } + if err := pool.AddLocal(tx); err != nil { t.Error("expected", nil, "got", err) } @@ -347,9 +371,12 @@ func TestTransactionQueue(t *testing.T) { pool.enqueueTx(tx.Hash(), tx, false, true) <-pool.requestPromoteExecutables(newAccountSet(pool.signer, from)) + + pool.pendingMu.RLock() if len(pool.pending) != 1 { t.Error("expected valid txs to be 1 is", len(pool.pending)) } + pool.pendingMu.RUnlock() tx = transaction(1, 100, key) from, _ = deriveSender(tx) @@ -357,9 +384,13 @@ func TestTransactionQueue(t *testing.T) { pool.enqueueTx(tx.Hash(), tx, false, true) <-pool.requestPromoteExecutables(newAccountSet(pool.signer, from)) + + pool.pendingMu.RLock() if _, ok := pool.pending[from].txs.items[tx.Nonce()]; ok { t.Error("expected transaction to be in tx pool") } + pool.pendingMu.RUnlock() + if len(pool.queue) > 0 { t.Error("expected transaction queue to be empty. is", len(pool.queue)) } @@ -383,9 +414,13 @@ func TestTransactionQueue2(t *testing.T) { pool.enqueueTx(tx3.Hash(), tx3, false, true) pool.promoteExecutables([]common.Address{from}) + + pool.pendingMu.RLock() if len(pool.pending) != 1 { t.Error("expected pending length to be 1, got", len(pool.pending)) } + pool.pendingMu.RUnlock() + if pool.queue[from].Len() != 2 { t.Error("expected len(queue) == 2, got", pool.queue[from].Len()) } @@ -399,8 +434,10 @@ func TestTransactionNegativeValue(t *testing.T) { tx, _ := types.SignTx(types.NewTransaction(0, common.Address{}, big.NewInt(-1), 100, big.NewInt(1), nil), types.HomesteadSigner{}, key) from, _ := deriveSender(tx) + testAddBalance(pool, from, big.NewInt(1)) - if err := pool.AddRemote(tx); err != ErrNegativeValue { + + if err := pool.AddRemote(tx); !errors.Is(err, ErrNegativeValue) { t.Error("expected", ErrNegativeValue, "got", err) } } @@ -413,7 +450,7 @@ func TestTransactionTipAboveFeeCap(t *testing.T) { tx := dynamicFeeTx(0, 100, big.NewInt(1), big.NewInt(2), key) - if err := pool.AddRemote(tx); err != ErrTipAboveFeeCap { + if err := pool.AddRemote(tx); !errors.Is(err, ErrTipAboveFeeCap) { t.Error("expected", ErrTipAboveFeeCap, "got", err) } } @@ -428,12 +465,12 @@ func TestTransactionVeryHighValues(t *testing.T) { veryBigNumber.Lsh(veryBigNumber, 300) tx := dynamicFeeTx(0, 100, big.NewInt(1), veryBigNumber, key) - if err := pool.AddRemote(tx); err != ErrTipVeryHigh { + if err := pool.AddRemote(tx); !errors.Is(err, ErrTipVeryHigh) { t.Error("expected", ErrTipVeryHigh, "got", err) } tx2 := dynamicFeeTx(0, 100, veryBigNumber, big.NewInt(1), key) - if err := pool.AddRemote(tx2); err != ErrFeeCapVeryHigh { + if err := pool.AddRemote(tx2); !errors.Is(err, ErrFeeCapVeryHigh) { t.Error("expected", ErrFeeCapVeryHigh, "got", err) } } @@ -495,23 +532,32 @@ func TestTransactionDoubleNonce(t *testing.T) { if replace, err := pool.add(tx2, false); err != nil || !replace { t.Errorf("second transaction insert failed (%v) or not reported replacement (%v)", err, replace) } + <-pool.requestPromoteExecutables(newAccountSet(signer, addr)) + + pool.pendingMu.RLock() if pool.pending[addr].Len() != 1 { t.Error("expected 1 pending transactions, got", pool.pending[addr].Len()) } if tx := pool.pending[addr].txs.items[0]; tx.Hash() != tx2.Hash() { t.Errorf("transaction mismatch: have %x, want %x", tx.Hash(), tx2.Hash()) } + pool.pendingMu.RUnlock() // Add the third transaction and ensure it's not saved (smaller price) pool.add(tx3, false) + <-pool.requestPromoteExecutables(newAccountSet(signer, addr)) + + pool.pendingMu.RLock() if pool.pending[addr].Len() != 1 { t.Error("expected 1 pending transactions, got", pool.pending[addr].Len()) } if tx := pool.pending[addr].txs.items[0]; tx.Hash() != tx2.Hash() { t.Errorf("transaction mismatch: have %x, want %x", tx.Hash(), tx2.Hash()) } + pool.pendingMu.RUnlock() + // Ensure the total transaction count is correct if pool.all.Count() != 1 { t.Error("expected 1 total transactions, got", pool.all.Count()) @@ -530,9 +576,13 @@ func TestTransactionMissingNonce(t *testing.T) { if _, err := pool.add(tx, false); err != nil { t.Error("didn't expect error", err) } + + pool.pendingMu.RLock() if len(pool.pending) != 0 { t.Error("expected 0 pending transactions, got", len(pool.pending)) } + pool.pendingMu.RUnlock() + if pool.queue[addr].Len() != 1 { t.Error("expected 1 queued transaction, got", pool.queue[addr].Len()) } @@ -603,19 +653,27 @@ func TestTransactionDropping(t *testing.T) { pool.enqueueTx(tx12.Hash(), tx12, false, true) // Check that pre and post validations leave the pool as is + pool.pendingMu.RLock() if pool.pending[account].Len() != 3 { t.Errorf("pending transaction mismatch: have %d, want %d", pool.pending[account].Len(), 3) } + pool.pendingMu.RUnlock() + if pool.queue[account].Len() != 3 { t.Errorf("queued transaction mismatch: have %d, want %d", pool.queue[account].Len(), 3) } if pool.all.Count() != 6 { t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), 6) } + <-pool.requestReset(nil, nil) + + pool.pendingMu.RLock() if pool.pending[account].Len() != 3 { t.Errorf("pending transaction mismatch: have %d, want %d", pool.pending[account].Len(), 3) } + pool.pendingMu.RUnlock() + if pool.queue[account].Len() != 3 { t.Errorf("queued transaction mismatch: have %d, want %d", pool.queue[account].Len(), 3) } @@ -626,6 +684,7 @@ func TestTransactionDropping(t *testing.T) { testAddBalance(pool, account, big.NewInt(-650)) <-pool.requestReset(nil, nil) + pool.pendingMu.RLock() if _, ok := pool.pending[account].txs.items[tx0.Nonce()]; !ok { t.Errorf("funded pending transaction missing: %v", tx0) } @@ -635,6 +694,8 @@ func TestTransactionDropping(t *testing.T) { if _, ok := pool.pending[account].txs.items[tx2.Nonce()]; ok { t.Errorf("out-of-fund pending transaction present: %v", tx1) } + pool.pendingMu.RUnlock() + if _, ok := pool.queue[account].txs.items[tx10.Nonce()]; !ok { t.Errorf("funded queued transaction missing: %v", tx10) } @@ -651,12 +712,15 @@ func TestTransactionDropping(t *testing.T) { atomic.StoreUint64(&pool.chain.(*testBlockChain).gasLimit, 100) <-pool.requestReset(nil, nil) + pool.pendingMu.RLock() if _, ok := pool.pending[account].txs.items[tx0.Nonce()]; !ok { t.Errorf("funded pending transaction missing: %v", tx0) } if _, ok := pool.pending[account].txs.items[tx1.Nonce()]; ok { t.Errorf("over-gased pending transaction present: %v", tx1) } + pool.pendingMu.RUnlock() + if _, ok := pool.queue[account].txs.items[tx10.Nonce()]; !ok { t.Errorf("funded queued transaction missing: %v", tx10) } @@ -711,19 +775,27 @@ func TestTransactionPostponing(t *testing.T) { } } // Check that pre and post validations leave the pool as is + pool.pendingMu.RLock() if pending := pool.pending[accs[0]].Len() + pool.pending[accs[1]].Len(); pending != len(txs) { t.Errorf("pending transaction mismatch: have %d, want %d", pending, len(txs)) } + pool.pendingMu.RUnlock() + if len(pool.queue) != 0 { t.Errorf("queued accounts mismatch: have %d, want %d", len(pool.queue), 0) } if pool.all.Count() != len(txs) { t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), len(txs)) } + <-pool.requestReset(nil, nil) + + pool.pendingMu.RLock() if pending := pool.pending[accs[0]].Len() + pool.pending[accs[1]].Len(); pending != len(txs) { t.Errorf("pending transaction mismatch: have %d, want %d", pending, len(txs)) } + pool.pendingMu.RUnlock() + if len(pool.queue) != 0 { t.Errorf("queued accounts mismatch: have %d, want %d", len(pool.queue), 0) } @@ -738,12 +810,17 @@ func TestTransactionPostponing(t *testing.T) { // The first account's first transaction remains valid, check that subsequent // ones are either filtered out, or queued up for later. + pool.pendingMu.RLock() if _, ok := pool.pending[accs[0]].txs.items[txs[0].Nonce()]; !ok { t.Errorf("tx %d: valid and funded transaction missing from pending pool: %v", 0, txs[0]) } + pool.pendingMu.RUnlock() + if _, ok := pool.queue[accs[0]].txs.items[txs[0].Nonce()]; ok { t.Errorf("tx %d: valid and funded transaction present in future queue: %v", 0, txs[0]) } + + pool.pendingMu.RLock() for i, tx := range txs[1:100] { if i%2 == 1 { if _, ok := pool.pending[accs[0]].txs.items[tx.Nonce()]; ok { @@ -761,11 +838,16 @@ func TestTransactionPostponing(t *testing.T) { } } } + pool.pendingMu.RUnlock() + // The second account's first transaction got invalid, check that all transactions // are either filtered out, or queued up for later. + pool.pendingMu.RLock() if pool.pending[accs[1]] != nil { t.Errorf("invalidated account still has pending transactions") } + pool.pendingMu.RUnlock() + for i, tx := range txs[100:] { if i%2 == 1 { if _, ok := pool.queue[accs[1]].txs.items[tx.Nonce()]; !ok { @@ -854,9 +936,13 @@ func TestTransactionQueueAccountLimiting(t *testing.T) { if err := pool.addRemoteSync(transaction(i, 100000, key)); err != nil { t.Fatalf("tx %d: failed to add transaction: %v", i, err) } + + pool.pendingMu.RLock() if len(pool.pending) != 0 { t.Errorf("tx %d: pending pool size mismatch: have %d, want %d", i, len(pool.pending), 0) } + pool.pendingMu.RUnlock() + if i <= testTxPoolConfig.AccountQueue { if pool.queue[account].Len() != int(i) { t.Errorf("tx %d: queue size mismatch: have %d, want %d", i, pool.queue[account].Len(), i) @@ -935,6 +1021,7 @@ func testTransactionQueueGlobalLimiting(t *testing.T, nolocals bool) { for i := uint64(0); i < 3*config.GlobalQueue; i++ { txs = append(txs, transaction(i+1, 100000, local)) } + pool.AddLocals(txs) // If locals are disabled, the previous eviction algorithm should apply here too @@ -1112,6 +1199,7 @@ func testTransactionQueueTimeLimiting(t *testing.T, nolocals bool) { t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 1) } } + if err := validateTxPoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } @@ -1128,7 +1216,7 @@ func TestTransactionPendingLimiting(t *testing.T) { defer pool.Stop() account := crypto.PubkeyToAddress(key.PublicKey) - testAddBalance(pool, account, big.NewInt(1000000)) + testAddBalance(pool, account, big.NewInt(1000000000000)) // Keep track of transaction events to ensure all executables get announced events := make(chan NewTxsEvent, testTxPoolConfig.AccountQueue+5) @@ -1140,9 +1228,13 @@ func TestTransactionPendingLimiting(t *testing.T) { if err := pool.addRemoteSync(transaction(i, 100000, key)); err != nil { t.Fatalf("tx %d: failed to add transaction: %v", i, err) } + + pool.pendingMu.RLock() if pool.pending[account].Len() != int(i)+1 { t.Errorf("tx %d: pending pool size mismatch: have %d, want %d", i, pool.pending[account].Len(), i+1) } + pool.pendingMu.RUnlock() + if len(pool.queue) != 0 { t.Errorf("tx %d: queue size mismatch: have %d, want %d", i, pool.queue[account].Len(), 0) } @@ -1195,9 +1287,13 @@ func TestTransactionPendingGlobalLimiting(t *testing.T) { pool.AddRemotesSync(txs) pending := 0 + + pool.pendingMu.RLock() for _, list := range pool.pending { pending += list.Len() } + pool.pendingMu.RUnlock() + if pending > int(config.GlobalSlots) { t.Fatalf("total pending transactions overflow allowance: %d > %d", pending, config.GlobalSlots) } @@ -1330,11 +1426,14 @@ func TestTransactionPendingMinimumAllowance(t *testing.T) { // Import the batch and verify that limits have been enforced pool.AddRemotesSync(txs) + pool.pendingMu.RLock() for addr, list := range pool.pending { if list.Len() != int(config.AccountSlots) { t.Errorf("addr %x: total pending transactions mismatch: have %d, want %d", addr, list.Len(), config.AccountSlots) } } + pool.pendingMu.RUnlock() + if err := validateTxPoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } @@ -1391,15 +1490,19 @@ func TestTransactionPoolRepricing(t *testing.T) { if pending != 7 { t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 7) } + if queued != 3 { t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 3) } + if err := validateEvents(events, 7); err != nil { t.Fatalf("original event firing failed: %v", err) } + if err := validateTxPoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } + // Reprice the pool and check that underpriced transactions get dropped pool.SetGasPrice(big.NewInt(2)) @@ -1407,58 +1510,76 @@ func TestTransactionPoolRepricing(t *testing.T) { if pending != 2 { t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 2) } + if queued != 5 { t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 5) } + if err := validateEvents(events, 0); err != nil { t.Fatalf("reprice event firing failed: %v", err) } + if err := validateTxPoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } + // Check that we can't add the old transactions back - if err := pool.AddRemote(pricedTransaction(1, 100000, big.NewInt(1), keys[0])); err != ErrUnderpriced { + if err := pool.AddRemote(pricedTransaction(1, 100000, big.NewInt(1), keys[0])); !errors.Is(err, ErrUnderpriced) { t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, ErrUnderpriced) } - if err := pool.AddRemote(pricedTransaction(0, 100000, big.NewInt(1), keys[1])); err != ErrUnderpriced { + + if err := pool.AddRemote(pricedTransaction(0, 100000, big.NewInt(1), keys[1])); !errors.Is(err, ErrUnderpriced) { t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, ErrUnderpriced) } - if err := pool.AddRemote(pricedTransaction(2, 100000, big.NewInt(1), keys[2])); err != ErrUnderpriced { + + if err := pool.AddRemote(pricedTransaction(2, 100000, big.NewInt(1), keys[2])); !errors.Is(err, ErrUnderpriced) { t.Fatalf("adding underpriced queued transaction error mismatch: have %v, want %v", err, ErrUnderpriced) } + if err := validateEvents(events, 0); err != nil { t.Fatalf("post-reprice event firing failed: %v", err) } + if err := validateTxPoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } + // However we can add local underpriced transactions tx := pricedTransaction(1, 100000, big.NewInt(1), keys[3]) + if err := pool.AddLocal(tx); err != nil { t.Fatalf("failed to add underpriced local transaction: %v", err) } + if pending, _ = pool.Stats(); pending != 3 { t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 3) } + if err := validateEvents(events, 1); err != nil { t.Fatalf("post-reprice local event firing failed: %v", err) } + if err := validateTxPoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } + // And we can fill gaps with properly priced transactions if err := pool.AddRemote(pricedTransaction(1, 100000, big.NewInt(2), keys[0])); err != nil { t.Fatalf("failed to add pending transaction: %v", err) } + if err := pool.AddRemote(pricedTransaction(0, 100000, big.NewInt(2), keys[1])); err != nil { t.Fatalf("failed to add pending transaction: %v", err) } + if err := pool.AddRemote(pricedTransaction(2, 100000, big.NewInt(2), keys[2])); err != nil { t.Fatalf("failed to add queued transaction: %v", err) } + if err := validateEvents(events, 5); err != nil { t.Fatalf("post-reprice event firing failed: %v", err) } + if err := validateTxPoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } @@ -1487,6 +1608,7 @@ func TestTransactionPoolRepricingDynamicFee(t *testing.T) { keys[i], _ = crypto.GenerateKey() testAddBalance(pool, crypto.PubkeyToAddress(keys[i].PublicKey), big.NewInt(1000000)) } + // Generate and queue a batch of transactions, both pending and queued txs := types.Transactions{} @@ -1512,15 +1634,19 @@ func TestTransactionPoolRepricingDynamicFee(t *testing.T) { if pending != 7 { t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 7) } + if queued != 3 { t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 3) } + if err := validateEvents(events, 7); err != nil { t.Fatalf("original event firing failed: %v", err) } + if err := validateTxPoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } + // Reprice the pool and check that underpriced transactions get dropped pool.SetGasPrice(big.NewInt(2)) @@ -1528,64 +1654,87 @@ func TestTransactionPoolRepricingDynamicFee(t *testing.T) { if pending != 2 { t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 2) } + if queued != 5 { t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 5) } + if err := validateEvents(events, 0); err != nil { t.Fatalf("reprice event firing failed: %v", err) } + if err := validateTxPoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } + // Check that we can't add the old transactions back tx := pricedTransaction(1, 100000, big.NewInt(1), keys[0]) - if err := pool.AddRemote(tx); err != ErrUnderpriced { + + if err := pool.AddRemote(tx); !errors.Is(err, ErrUnderpriced) { t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, ErrUnderpriced) } + tx = dynamicFeeTx(0, 100000, big.NewInt(2), big.NewInt(1), keys[1]) - if err := pool.AddRemote(tx); err != ErrUnderpriced { + + if err := pool.AddRemote(tx); !errors.Is(err, ErrUnderpriced) { t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, ErrUnderpriced) } + tx = dynamicFeeTx(2, 100000, big.NewInt(1), big.NewInt(1), keys[2]) - if err := pool.AddRemote(tx); err != ErrUnderpriced { + if err := pool.AddRemote(tx); !errors.Is(err, ErrUnderpriced) { t.Fatalf("adding underpriced queued transaction error mismatch: have %v, want %v", err, ErrUnderpriced) } + if err := validateEvents(events, 0); err != nil { t.Fatalf("post-reprice event firing failed: %v", err) } + if err := validateTxPoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } + // However we can add local underpriced transactions tx = dynamicFeeTx(1, 100000, big.NewInt(1), big.NewInt(1), keys[3]) + if err := pool.AddLocal(tx); err != nil { t.Fatalf("failed to add underpriced local transaction: %v", err) } + if pending, _ = pool.Stats(); pending != 3 { t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 3) } + if err := validateEvents(events, 1); err != nil { t.Fatalf("post-reprice local event firing failed: %v", err) } + if err := validateTxPoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } + // And we can fill gaps with properly priced transactions tx = pricedTransaction(1, 100000, big.NewInt(2), keys[0]) + if err := pool.AddRemote(tx); err != nil { t.Fatalf("failed to add pending transaction: %v", err) } + tx = dynamicFeeTx(0, 100000, big.NewInt(3), big.NewInt(2), keys[1]) + if err := pool.AddRemote(tx); err != nil { t.Fatalf("failed to add pending transaction: %v", err) } + tx = dynamicFeeTx(2, 100000, big.NewInt(2), big.NewInt(2), keys[2]) + if err := pool.AddRemote(tx); err != nil { t.Fatalf("failed to add queued transaction: %v", err) } + if err := validateEvents(events, 5); err != nil { t.Fatalf("post-reprice event firing failed: %v", err) } + if err := validateTxPoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } @@ -1607,7 +1756,7 @@ func TestTransactionPoolRepricingKeepsLocals(t *testing.T) { keys := make([]*ecdsa.PrivateKey, 3) for i := 0; i < len(keys); i++ { keys[i], _ = crypto.GenerateKey() - testAddBalance(pool, crypto.PubkeyToAddress(keys[i].PublicKey), big.NewInt(1000*1000000)) + testAddBalance(pool, crypto.PubkeyToAddress(keys[i].PublicKey), big.NewInt(100000*1000000)) } // Create transaction (both pending and queued) with a linearly growing gasprice for i := uint64(0); i < 500; i++ { @@ -1686,7 +1835,7 @@ func TestTransactionPoolUnderpricing(t *testing.T) { defer sub.Unsubscribe() // Create a number of test accounts and fund them - keys := make([]*ecdsa.PrivateKey, 4) + keys := make([]*ecdsa.PrivateKey, 5) for i := 0; i < len(keys); i++ { keys[i], _ = crypto.GenerateKey() testAddBalance(pool, crypto.PubkeyToAddress(keys[i].PublicKey), big.NewInt(1000000)) @@ -1719,9 +1868,13 @@ func TestTransactionPoolUnderpricing(t *testing.T) { t.Fatalf("pool internal state corrupted: %v", err) } // Ensure that adding an underpriced transaction on block limit fails - if err := pool.AddRemote(pricedTransaction(0, 100000, big.NewInt(1), keys[1])); err != ErrUnderpriced { + if err := pool.AddRemote(pricedTransaction(0, 100000, big.NewInt(1), keys[1])); !errors.Is(err, ErrUnderpriced) { t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, ErrUnderpriced) } + // Replace a future transaction with a future transaction + if err := pool.AddRemote(pricedTransaction(1, 100000, big.NewInt(2), keys[1])); err != nil { // +K1:1 => -K1:1 => Pend K0:0, K0:1, K2:0; Que K1:1 + t.Fatalf("failed to add well priced transaction: %v", err) + } // Ensure that adding high priced transactions drops cheap ones, but not own if err := pool.AddRemote(pricedTransaction(0, 100000, big.NewInt(3), keys[1])); err != nil { // +K1:0 => -K1:1 => Pend K0:0, K0:1, K1:0, K2:0; Que - t.Fatalf("failed to add well priced transaction: %v", err) @@ -1732,6 +1885,10 @@ func TestTransactionPoolUnderpricing(t *testing.T) { if err := pool.AddRemote(pricedTransaction(3, 100000, big.NewInt(5), keys[1])); err != nil { // +K1:3 => -K0:1 => Pend K1:0, K2:0; Que K1:2 K1:3 t.Fatalf("failed to add well priced transaction: %v", err) } + // Ensure that replacing a pending transaction with a future transaction fails + if err := pool.AddRemote(pricedTransaction(5, 100000, big.NewInt(6), keys[1])); err != ErrFutureReplacePending { + t.Fatalf("adding future replace transaction error mismatch: have %v, want %v", err, ErrFutureReplacePending) + } pending, queued = pool.Stats() if pending != 2 { t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 2) @@ -1739,7 +1896,8 @@ func TestTransactionPoolUnderpricing(t *testing.T) { if queued != 2 { t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 2) } - if err := validateEvents(events, 1); err != nil { + + if err := validateEvents(events, 2); err != nil { t.Fatalf("additional event firing failed: %v", err) } if err := validateTxPoolInternals(pool); err != nil { @@ -1891,7 +2049,7 @@ func TestTransactionPoolUnderpricingDynamicFee(t *testing.T) { // Ensure that adding an underpriced transaction fails tx := dynamicFeeTx(0, 100000, big.NewInt(2), big.NewInt(1), keys[1]) - if err := pool.AddRemote(tx); err != ErrUnderpriced { // Pend K0:0, K0:1, K2:0; Que K1:1 + if err := pool.AddRemote(tx); !errors.Is(err, ErrUnderpriced) { // Pend K0:0, K0:1, K2:0; Que K1:1 t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, ErrUnderpriced) } @@ -1901,11 +2059,12 @@ func TestTransactionPoolUnderpricingDynamicFee(t *testing.T) { t.Fatalf("failed to add well priced transaction: %v", err) } - tx = pricedTransaction(2, 100000, big.NewInt(3), keys[1]) + tx = pricedTransaction(1, 100000, big.NewInt(3), keys[1]) if err := pool.AddRemote(tx); err != nil { // +K1:2, -K0:1 => Pend K0:0 K1:0, K2:0; Que K1:2 t.Fatalf("failed to add well priced transaction: %v", err) } - tx = dynamicFeeTx(3, 100000, big.NewInt(4), big.NewInt(1), keys[1]) + + tx = dynamicFeeTx(2, 100000, big.NewInt(4), big.NewInt(1), keys[1]) if err := pool.AddRemote(tx); err != nil { // +K1:3, -K1:0 => Pend K0:0 K2:0; Que K1:2 K1:3 t.Fatalf("failed to add well priced transaction: %v", err) } @@ -1916,7 +2075,8 @@ func TestTransactionPoolUnderpricingDynamicFee(t *testing.T) { if queued != 2 { t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 2) } - if err := validateEvents(events, 1); err != nil { + + if err := validateEvents(events, 2); err != nil { t.Fatalf("additional event firing failed: %v", err) } if err := validateTxPoolInternals(pool); err != nil { @@ -1991,7 +2151,7 @@ func TestDualHeapEviction(t *testing.T) { add(false) for baseFee = 0; baseFee <= 1000; baseFee += 100 { - pool.priced.SetBaseFee(big.NewInt(int64(baseFee))) + pool.priced.SetBaseFee(uint256.NewInt(uint64(baseFee))) add(true) check(highCap, "fee cap") add(false) @@ -2020,49 +2180,65 @@ func TestTransactionDeduplication(t *testing.T) { // Create a batch of transactions and add a few of them txs := make([]*types.Transaction, 16) + for i := 0; i < len(txs); i++ { txs[i] = pricedTransaction(uint64(i), 100000, big.NewInt(1), key) } + var firsts []*types.Transaction + for i := 0; i < len(txs); i += 2 { firsts = append(firsts, txs[i]) } + errs := pool.AddRemotesSync(firsts) - if len(errs) != len(firsts) { - t.Fatalf("first add mismatching result count: have %d, want %d", len(errs), len(firsts)) + + if len(errs) != 0 { + t.Fatalf("first add mismatching result count: have %d, want %d", len(errs), 0) } + for i, err := range errs { if err != nil { t.Errorf("add %d failed: %v", i, err) } } + pending, queued := pool.Stats() + if pending != 1 { t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 1) } + if queued != len(txs)/2-1 { t.Fatalf("queued transactions mismatched: have %d, want %d", queued, len(txs)/2-1) } + // Try to add all of them now and ensure previous ones error out as knowns errs = pool.AddRemotesSync(txs) - if len(errs) != len(txs) { - t.Fatalf("all add mismatching result count: have %d, want %d", len(errs), len(txs)) + if len(errs) != 0 { + t.Fatalf("all add mismatching result count: have %d, want %d", len(errs), 0) } + for i, err := range errs { if i%2 == 0 && err == nil { t.Errorf("add %d succeeded, should have failed as known", i) } + if i%2 == 1 && err != nil { t.Errorf("add %d failed: %v", i, err) } } + pending, queued = pool.Stats() + if pending != len(txs) { t.Fatalf("pending transactions mismatched: have %d, want %d", pending, len(txs)) } + if queued != 0 { t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0) } + if err := validateTxPoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } @@ -2096,12 +2272,15 @@ func TestTransactionReplacement(t *testing.T) { if err := pool.addRemoteSync(pricedTransaction(0, 100000, big.NewInt(1), key)); err != nil { t.Fatalf("failed to add original cheap pending transaction: %v", err) } - if err := pool.AddRemote(pricedTransaction(0, 100001, big.NewInt(1), key)); err != ErrReplaceUnderpriced { + + if err := pool.AddRemote(pricedTransaction(0, 100001, big.NewInt(1), key)); !errors.Is(err, ErrReplaceUnderpriced) { t.Fatalf("original cheap pending transaction replacement error mismatch: have %v, want %v", err, ErrReplaceUnderpriced) } + if err := pool.AddRemote(pricedTransaction(0, 100000, big.NewInt(2), key)); err != nil { t.Fatalf("failed to replace original cheap pending transaction: %v", err) } + if err := validateEvents(events, 2); err != nil { t.Fatalf("cheap replacement event firing failed: %v", err) } @@ -2109,12 +2288,15 @@ func TestTransactionReplacement(t *testing.T) { if err := pool.addRemoteSync(pricedTransaction(0, 100000, big.NewInt(price), key)); err != nil { t.Fatalf("failed to add original proper pending transaction: %v", err) } - if err := pool.AddRemote(pricedTransaction(0, 100001, big.NewInt(threshold-1), key)); err != ErrReplaceUnderpriced { + + if err := pool.AddRemote(pricedTransaction(0, 100001, big.NewInt(threshold-1), key)); !errors.Is(err, ErrReplaceUnderpriced) { t.Fatalf("original proper pending transaction replacement error mismatch: have %v, want %v", err, ErrReplaceUnderpriced) } + if err := pool.AddRemote(pricedTransaction(0, 100000, big.NewInt(threshold), key)); err != nil { t.Fatalf("failed to replace original proper pending transaction: %v", err) } + if err := validateEvents(events, 2); err != nil { t.Fatalf("proper replacement event firing failed: %v", err) } @@ -2123,9 +2305,11 @@ func TestTransactionReplacement(t *testing.T) { if err := pool.AddRemote(pricedTransaction(2, 100000, big.NewInt(1), key)); err != nil { t.Fatalf("failed to add original cheap queued transaction: %v", err) } - if err := pool.AddRemote(pricedTransaction(2, 100001, big.NewInt(1), key)); err != ErrReplaceUnderpriced { + + if err := pool.AddRemote(pricedTransaction(2, 100001, big.NewInt(1), key)); !errors.Is(err, ErrReplaceUnderpriced) { t.Fatalf("original cheap queued transaction replacement error mismatch: have %v, want %v", err, ErrReplaceUnderpriced) } + if err := pool.AddRemote(pricedTransaction(2, 100000, big.NewInt(2), key)); err != nil { t.Fatalf("failed to replace original cheap queued transaction: %v", err) } @@ -2133,9 +2317,11 @@ func TestTransactionReplacement(t *testing.T) { if err := pool.AddRemote(pricedTransaction(2, 100000, big.NewInt(price), key)); err != nil { t.Fatalf("failed to add original proper queued transaction: %v", err) } - if err := pool.AddRemote(pricedTransaction(2, 100001, big.NewInt(threshold-1), key)); err != ErrReplaceUnderpriced { + + if err := pool.AddRemote(pricedTransaction(2, 100001, big.NewInt(threshold-1), key)); !errors.Is(err, ErrReplaceUnderpriced) { t.Fatalf("original proper queued transaction replacement error mismatch: have %v, want %v", err, ErrReplaceUnderpriced) } + if err := pool.AddRemote(pricedTransaction(2, 100000, big.NewInt(threshold), key)); err != nil { t.Fatalf("failed to replace original proper queued transaction: %v", err) } @@ -2143,6 +2329,7 @@ func TestTransactionReplacement(t *testing.T) { if err := validateEvents(events, 0); err != nil { t.Fatalf("queued replacement event firing failed: %v", err) } + if err := validateTxPoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } @@ -2197,7 +2384,7 @@ func TestTransactionReplacementDynamicFee(t *testing.T) { } // 2. Don't bump tip or feecap => discard tx = dynamicFeeTx(nonce, 100001, big.NewInt(2), big.NewInt(1), key) - if err := pool.AddRemote(tx); err != ErrReplaceUnderpriced { + if err := pool.AddRemote(tx); !errors.Is(err, ErrReplaceUnderpriced) { t.Fatalf("original cheap %s transaction replacement error mismatch: have %v, want %v", stage, err, ErrReplaceUnderpriced) } // 3. Bump both more than min => accept @@ -2220,22 +2407,22 @@ func TestTransactionReplacementDynamicFee(t *testing.T) { } // 6. Bump tip max allowed so it's still underpriced => discard tx = dynamicFeeTx(nonce, 100000, big.NewInt(gasFeeCap), big.NewInt(tipThreshold-1), key) - if err := pool.AddRemote(tx); err != ErrReplaceUnderpriced { + if err := pool.AddRemote(tx); !errors.Is(err, ErrReplaceUnderpriced) { t.Fatalf("original proper %s transaction replacement error mismatch: have %v, want %v", stage, err, ErrReplaceUnderpriced) } // 7. Bump fee cap max allowed so it's still underpriced => discard tx = dynamicFeeTx(nonce, 100000, big.NewInt(feeCapThreshold-1), big.NewInt(gasTipCap), key) - if err := pool.AddRemote(tx); err != ErrReplaceUnderpriced { + if err := pool.AddRemote(tx); !errors.Is(err, ErrReplaceUnderpriced) { t.Fatalf("original proper %s transaction replacement error mismatch: have %v, want %v", stage, err, ErrReplaceUnderpriced) } // 8. Bump tip min for acceptance => accept tx = dynamicFeeTx(nonce, 100000, big.NewInt(gasFeeCap), big.NewInt(tipThreshold), key) - if err := pool.AddRemote(tx); err != ErrReplaceUnderpriced { + if err := pool.AddRemote(tx); !errors.Is(err, ErrReplaceUnderpriced) { t.Fatalf("original proper %s transaction replacement error mismatch: have %v, want %v", stage, err, ErrReplaceUnderpriced) } // 9. Bump fee cap min for acceptance => accept tx = dynamicFeeTx(nonce, 100000, big.NewInt(feeCapThreshold), big.NewInt(gasTipCap), key) - if err := pool.AddRemote(tx); err != ErrReplaceUnderpriced { + if err := pool.AddRemote(tx); !errors.Is(err, ErrReplaceUnderpriced) { t.Fatalf("original proper %s transaction replacement error mismatch: have %v, want %v", stage, err, ErrReplaceUnderpriced) } // 10. Check events match expected (3 new executable txs during pending, 0 during queue) @@ -2465,6 +2652,7 @@ func benchmarkPendingDemotion(b *testing.B, size int) { } // Benchmark the speed of pool validation b.ResetTimer() + b.ReportAllocs() for i := 0; i < b.N; i++ { pool.demoteUnexecutables() } @@ -2496,37 +2684,161 @@ func benchmarkFuturePromotion(b *testing.B, size int) { } // Benchmarks the speed of batched transaction insertion. -func BenchmarkPoolBatchInsert100(b *testing.B) { benchmarkPoolBatchInsert(b, 100, false) } -func BenchmarkPoolBatchInsert1000(b *testing.B) { benchmarkPoolBatchInsert(b, 1000, false) } -func BenchmarkPoolBatchInsert10000(b *testing.B) { benchmarkPoolBatchInsert(b, 10000, false) } - -func BenchmarkPoolBatchLocalInsert100(b *testing.B) { benchmarkPoolBatchInsert(b, 100, true) } -func BenchmarkPoolBatchLocalInsert1000(b *testing.B) { benchmarkPoolBatchInsert(b, 1000, true) } -func BenchmarkPoolBatchLocalInsert10000(b *testing.B) { benchmarkPoolBatchInsert(b, 10000, true) } - -func benchmarkPoolBatchInsert(b *testing.B, size int, local bool) { +func BenchmarkPoolBatchInsert(b *testing.B) { // Generate a batch of transactions to enqueue into the pool pool, key := setupTxPool() defer pool.Stop() account := crypto.PubkeyToAddress(key.PublicKey) - testAddBalance(pool, account, big.NewInt(1000000)) + testAddBalance(pool, account, big.NewInt(1000000000000000000)) - batches := make([]types.Transactions, b.N) - for i := 0; i < b.N; i++ { - batches[i] = make(types.Transactions, size) - for j := 0; j < size; j++ { - batches[i][j] = transaction(uint64(size*i+j), 100000, key) - } + const format = "size %d, is local %t" + + cases := []struct { + name string + size int + isLocal bool + }{ + {size: 100, isLocal: false}, + {size: 1000, isLocal: false}, + {size: 10000, isLocal: false}, + + {size: 100, isLocal: true}, + {size: 1000, isLocal: true}, + {size: 10000, isLocal: true}, } + + for i := range cases { + cases[i].name = fmt.Sprintf(format, cases[i].size, cases[i].isLocal) + } + // Benchmark importing the transactions into the queue - b.ResetTimer() - for _, batch := range batches { - if local { - pool.AddLocals(batch) - } else { - pool.AddRemotes(batch) - } + + for _, testCase := range cases { + singleCase := testCase + + b.Run(singleCase.name, func(b *testing.B) { + batches := make([]types.Transactions, b.N) + + for i := 0; i < b.N; i++ { + batches[i] = make(types.Transactions, singleCase.size) + + for j := 0; j < singleCase.size; j++ { + batches[i][j] = transaction(uint64(singleCase.size*i+j), 100000, key) + } + } + + b.ResetTimer() + b.ReportAllocs() + + for _, batch := range batches { + if testCase.isLocal { + pool.AddLocals(batch) + } else { + pool.AddRemotes(batch) + } + } + }) + } +} + +func BenchmarkPoolMining(b *testing.B) { + const format = "size %d" + + cases := []struct { + name string + size int + }{ + {size: 1}, + {size: 5}, + {size: 10}, + {size: 20}, + } + + for i := range cases { + cases[i].name = fmt.Sprintf(format, cases[i].size) + } + + const blockGasLimit = 30_000_000 + + // Benchmark importing the transactions into the queue + + for _, testCase := range cases { + singleCase := testCase + + b.Run(singleCase.name, func(b *testing.B) { + // Generate a batch of transactions to enqueue into the pool + pendingAddedCh := make(chan struct{}, 1024) + + pool, localKey := setupTxPoolWithConfig(params.TestChainConfig, testTxPoolConfig, txPoolGasLimit, MakeWithPromoteTxCh(pendingAddedCh)) + defer pool.Stop() + + localKeyPub := localKey.PublicKey + account := crypto.PubkeyToAddress(localKeyPub) + + const balanceStr = "1_000_000_000" + balance, ok := big.NewInt(0).SetString(balanceStr, 0) + if !ok { + b.Fatal("incorrect initial balance", balanceStr) + } + + testAddBalance(pool, account, balance) + + signer := types.NewEIP155Signer(big.NewInt(1)) + baseFee := uint256.NewInt(1) + + const batchesSize = 100 + + batches := make([]types.Transactions, batchesSize) + + for i := 0; i < batchesSize; i++ { + batches[i] = make(types.Transactions, singleCase.size) + + for j := 0; j < singleCase.size; j++ { + batches[i][j] = transaction(uint64(singleCase.size*i+j), 100_000, localKey) + } + + for _, batch := range batches { + pool.AddRemotes(batch) + } + } + + var promoted int + + for range pendingAddedCh { + promoted++ + + if promoted >= batchesSize*singleCase.size/2 { + break + } + } + + var total int + + b.ResetTimer() + b.ReportAllocs() + + pendingDurations := make([]time.Duration, b.N) + + var added int + + for i := 0; i < b.N; i++ { + added, pendingDurations[i], _ = mining(b, pool, signer, baseFee, blockGasLimit, i) + total += added + } + + b.StopTimer() + + pendingDurationsFloat := make([]float64, len(pendingDurations)) + + for i, v := range pendingDurations { + pendingDurationsFloat[i] = float64(v.Nanoseconds()) + } + + mean, stddev := stat.MeanStdDev(pendingDurationsFloat, nil) + b.Logf("[%s] pending mean %v, stdev %v, %v-%v", + common.NowMilliseconds(), time.Duration(mean), time.Duration(stddev), time.Duration(floats.Min(pendingDurationsFloat)), time.Duration(floats.Max(pendingDurationsFloat))) + }) } } @@ -2566,62 +2878,355 @@ func BenchmarkInsertRemoteWithAllLocals(b *testing.B) { } // Benchmarks the speed of batch transaction insertion in case of multiple accounts. -func BenchmarkPoolMultiAccountBatchInsert(b *testing.B) { +func BenchmarkPoolAccountMultiBatchInsert(b *testing.B) { // Generate a batch of transactions to enqueue into the pool pool, _ := setupTxPool() defer pool.Stop() - b.ReportAllocs() + batches := make(types.Transactions, b.N) + for i := 0; i < b.N; i++ { key, _ := crypto.GenerateKey() account := crypto.PubkeyToAddress(key.PublicKey) + pool.currentState.AddBalance(account, big.NewInt(1000000)) + tx := transaction(uint64(0), 100000, key) + batches[i] = tx } + // Benchmark importing the transactions into the queue + b.ReportAllocs() b.ResetTimer() + for _, tx := range batches { pool.AddRemotesSync([]*types.Transaction{tx}) } } -type acc struct { - nonce uint64 - key *ecdsa.PrivateKey - account common.Address -} - -type testTx struct { - tx *types.Transaction - idx int - isLocal bool -} +func BenchmarkPoolAccountMultiBatchInsertRace(b *testing.B) { + // Generate a batch of transactions to enqueue into the pool + pool, _ := setupTxPool() + defer pool.Stop() -const localIdx = 0 + batches := make(types.Transactions, b.N) -func getTransactionGen(t *rapid.T, keys []*acc, nonces []uint64, localKey *acc, gasPriceMin, gasPriceMax, gasLimitMin, gasLimitMax uint64) *testTx { - idx := rapid.IntRange(0, len(keys)-1).Draw(t, "accIdx").(int) + for i := 0; i < b.N; i++ { + key, _ := crypto.GenerateKey() + account := crypto.PubkeyToAddress(key.PublicKey) + tx := transaction(uint64(0), 100000, key) - var ( - isLocal bool - key *ecdsa.PrivateKey - ) + pool.currentState.AddBalance(account, big.NewInt(1000000)) - if idx == localIdx { - isLocal = true - key = localKey.key - } else { - key = keys[idx].key + batches[i] = tx } - nonces[idx]++ + done := make(chan struct{}) - gasPriceUint := rapid.Uint64Range(gasPriceMin, gasPriceMax).Draw(t, "gasPrice").(uint64) - gasPrice := big.NewInt(0).SetUint64(gasPriceUint) - gasLimit := rapid.Uint64Range(gasLimitMin, gasLimitMax).Draw(t, "gasLimit").(uint64) + go func() { + t := time.NewTicker(time.Microsecond) + defer t.Stop() - return &testTx{ + var pending map[common.Address]types.Transactions + + loop: + for { + select { + case <-t.C: + pending = pool.Pending(context.Background(), true) + case <-done: + break loop + } + } + + fmt.Fprint(io.Discard, pending) + }() + + b.ReportAllocs() + b.ResetTimer() + + for _, tx := range batches { + pool.AddRemotesSync([]*types.Transaction{tx}) + } + + close(done) +} + +func BenchmarkPoolAccountMultiBatchInsertNoLockRace(b *testing.B) { + // Generate a batch of transactions to enqueue into the pool + pendingAddedCh := make(chan struct{}, 1024) + + pool, localKey := setupTxPoolWithConfig(params.TestChainConfig, testTxPoolConfig, txPoolGasLimit, MakeWithPromoteTxCh(pendingAddedCh)) + defer pool.Stop() + + _ = localKey + + batches := make(types.Transactions, b.N) + + for i := 0; i < b.N; i++ { + key, _ := crypto.GenerateKey() + account := crypto.PubkeyToAddress(key.PublicKey) + tx := transaction(uint64(0), 100000, key) + + pool.currentState.AddBalance(account, big.NewInt(1000000)) + + batches[i] = tx + } + + done := make(chan struct{}) + + go func() { + t := time.NewTicker(time.Microsecond) + defer t.Stop() + + var pending map[common.Address]types.Transactions + + for range t.C { + pending = pool.Pending(context.Background(), true) + + if len(pending) >= b.N/2 { + close(done) + + return + } + } + }() + + b.ReportAllocs() + b.ResetTimer() + + for _, tx := range batches { + pool.AddRemotes([]*types.Transaction{tx}) + } + + <-done +} + +func BenchmarkPoolAccountsBatchInsert(b *testing.B) { + // Generate a batch of transactions to enqueue into the pool + pool, _ := setupTxPool() + defer pool.Stop() + + batches := make(types.Transactions, b.N) + + for i := 0; i < b.N; i++ { + key, _ := crypto.GenerateKey() + account := crypto.PubkeyToAddress(key.PublicKey) + + pool.currentState.AddBalance(account, big.NewInt(1000000)) + + tx := transaction(uint64(0), 100000, key) + + batches[i] = tx + } + + // Benchmark importing the transactions into the queue + b.ReportAllocs() + b.ResetTimer() + + for _, tx := range batches { + _ = pool.AddRemoteSync(tx) + } +} + +func BenchmarkPoolAccountsBatchInsertRace(b *testing.B) { + // Generate a batch of transactions to enqueue into the pool + pool, _ := setupTxPool() + defer pool.Stop() + + batches := make(types.Transactions, b.N) + + for i := 0; i < b.N; i++ { + key, _ := crypto.GenerateKey() + account := crypto.PubkeyToAddress(key.PublicKey) + tx := transaction(uint64(0), 100000, key) + + pool.currentState.AddBalance(account, big.NewInt(1000000)) + + batches[i] = tx + } + + done := make(chan struct{}) + + go func() { + t := time.NewTicker(time.Microsecond) + defer t.Stop() + + var pending map[common.Address]types.Transactions + + loop: + for { + select { + case <-t.C: + pending = pool.Pending(context.Background(), true) + case <-done: + break loop + } + } + + fmt.Fprint(io.Discard, pending) + }() + + b.ReportAllocs() + b.ResetTimer() + + for _, tx := range batches { + _ = pool.AddRemoteSync(tx) + } + + close(done) +} + +func BenchmarkPoolAccountsBatchInsertNoLockRace(b *testing.B) { + // Generate a batch of transactions to enqueue into the pool + pendingAddedCh := make(chan struct{}, 1024) + + pool, localKey := setupTxPoolWithConfig(params.TestChainConfig, testTxPoolConfig, txPoolGasLimit, MakeWithPromoteTxCh(pendingAddedCh)) + defer pool.Stop() + + _ = localKey + + batches := make(types.Transactions, b.N) + + for i := 0; i < b.N; i++ { + key, _ := crypto.GenerateKey() + account := crypto.PubkeyToAddress(key.PublicKey) + tx := transaction(uint64(0), 100000, key) + + pool.currentState.AddBalance(account, big.NewInt(1000000)) + + batches[i] = tx + } + + done := make(chan struct{}) + + go func() { + t := time.NewTicker(time.Microsecond) + defer t.Stop() + + var pending map[common.Address]types.Transactions + + for range t.C { + pending = pool.Pending(context.Background(), true) + + if len(pending) >= b.N/2 { + close(done) + + return + } + } + }() + + b.ReportAllocs() + b.ResetTimer() + + for _, tx := range batches { + _ = pool.AddRemote(tx) + } + + <-done +} + +func TestPoolMultiAccountBatchInsertRace(t *testing.T) { + t.Parallel() + + // Generate a batch of transactions to enqueue into the pool + pool, _ := setupTxPool() + defer pool.Stop() + + const n = 5000 + + batches := make(types.Transactions, n) + batchesSecond := make(types.Transactions, n) + + for i := 0; i < n; i++ { + batches[i] = newTxs(pool) + batchesSecond[i] = newTxs(pool) + } + + done := make(chan struct{}) + + go func() { + t := time.NewTicker(time.Microsecond) + defer t.Stop() + + var ( + pending map[common.Address]types.Transactions + total int + ) + + for range t.C { + pending = pool.Pending(context.Background(), true) + total = len(pending) + + _ = pool.Locals() + + if total >= n { + close(done) + + return + } + } + }() + + for _, tx := range batches { + pool.AddRemotesSync([]*types.Transaction{tx}) + } + + for _, tx := range batchesSecond { + pool.AddRemotes([]*types.Transaction{tx}) + } + + <-done +} + +func newTxs(pool *TxPool) *types.Transaction { + key, _ := crypto.GenerateKey() + account := crypto.PubkeyToAddress(key.PublicKey) + tx := transaction(uint64(0), 100000, key) + + pool.currentState.AddBalance(account, big.NewInt(1_000_000_000)) + + return tx +} + +type acc struct { + nonce uint64 + key *ecdsa.PrivateKey + account common.Address +} + +type testTx struct { + tx *types.Transaction + idx int + isLocal bool +} + +const localIdx = 0 + +func getTransactionGen(t *rapid.T, keys []*acc, nonces []uint64, localKey *acc, gasPriceMin, gasPriceMax, gasLimitMin, gasLimitMax uint64) *testTx { + idx := rapid.IntRange(0, len(keys)-1).Draw(t, "accIdx").(int) + + var ( + isLocal bool + key *ecdsa.PrivateKey + ) + + if idx == localIdx { + isLocal = true + key = localKey.key + } else { + key = keys[idx].key + } + + nonces[idx]++ + + gasPriceUint := rapid.Uint64Range(gasPriceMin, gasPriceMax).Draw(t, "gasPrice").(uint64) + gasPrice := big.NewInt(0).SetUint64(gasPriceUint) + gasLimit := rapid.Uint64Range(gasLimitMin, gasLimitMax).Draw(t, "gasLimit").(uint64) + + return &testTx{ tx: pricedTransaction(nonces[idx]-1, gasLimit, gasPrice, key), idx: idx, isLocal: isLocal, @@ -2878,20 +3483,20 @@ func testPoolBatchInsert(t *testing.T, cfg txPoolRapidConfig) { wg.Wait() var ( - addIntoTxPool func(tx []*types.Transaction) []error + addIntoTxPool func(tx *types.Transaction) error totalInBatch int ) for _, tx := range txs.txs { - addIntoTxPool = pool.AddRemotesSync + addIntoTxPool = pool.AddRemoteSync if tx.isLocal { - addIntoTxPool = pool.AddLocals + addIntoTxPool = pool.AddLocal } - err := addIntoTxPool([]*types.Transaction{tx.tx}) - if len(err) != 0 && err[0] != nil { - rt.Log("on adding a transaction to the tx pool", err[0], tx.tx.Gas(), tx.tx.GasPrice(), pool.GasPrice(), getBalance(pool, keys[tx.idx].account)) + err := addIntoTxPool(tx.tx) + if err != nil { + rt.Log("on adding a transaction to the tx pool", err, tx.tx.Gas(), tx.tx.GasPrice(), pool.GasPrice(), getBalance(pool, keys[tx.idx].account)) } } @@ -2930,7 +3535,7 @@ func testPoolBatchInsert(t *testing.T, cfg txPoolRapidConfig) { // check if txPool got stuck if currentTxPoolStats == lastTxPoolStats { - stuckBlocks++ //todo: переписать + stuckBlocks++ //todo: need something better then that } else { stuckBlocks = 0 lastTxPoolStats = currentTxPoolStats @@ -2938,7 +3543,7 @@ func testPoolBatchInsert(t *testing.T, cfg txPoolRapidConfig) { // copy-paste start := time.Now() - pending := pool.Pending(true) + pending := pool.Pending(context.Background(), true) locals := pool.Locals() // from fillTransactions @@ -2956,7 +3561,7 @@ func testPoolBatchInsert(t *testing.T, cfg txPoolRapidConfig) { // check for nonce gaps var lastNonce, currentNonce int - pending = pool.Pending(true) + pending = pool.Pending(context.Background(), true) for txAcc, pendingTxs := range pending { lastNonce = int(pool.Nonce(txAcc)) - len(pendingTxs) - 1 @@ -3026,7 +3631,7 @@ func fillTransactions(ctx context.Context, pool *TxPool, locals []common.Address signer := types.NewLondonSigner(big.NewInt(1)) // fake baseFee - baseFee := big.NewInt(1) + baseFee := uint256.NewInt(1) blockGasLimit := gasLimit @@ -3083,7 +3688,10 @@ func commitTransactions(pool *TxPool, txs *types.TransactionsByPriceAndNonce, bl if tx.Gas() <= blockGasLimit { blockGasLimit -= tx.Gas() + + pool.mu.Lock() pool.removeTx(tx.Hash(), false) + pool.mu.Unlock() txCount++ } else { @@ -3098,3 +3706,885 @@ func MakeWithPromoteTxCh(ch chan struct{}) func(*TxPool) { pool.promoteTxCh = ch } } + +//nolint:thelper +func mining(tb testing.TB, pool *TxPool, signer types.Signer, baseFee *uint256.Int, blockGasLimit uint64, totalBlocks int) (int, time.Duration, time.Duration) { + var ( + localTxsCount int + remoteTxsCount int + localTxs = make(map[common.Address]types.Transactions) + remoteTxs map[common.Address]types.Transactions + total int + ) + + start := time.Now() + + pending := pool.Pending(context.Background(), true) + + pendingDuration := time.Since(start) + + remoteTxs = pending + + locals := pool.Locals() + + pendingLen, queuedLen := pool.Stats() + + for _, account := range locals { + if txs := remoteTxs[account]; len(txs) > 0 { + delete(remoteTxs, account) + + localTxs[account] = txs + } + } + + localTxsCount = len(localTxs) + remoteTxsCount = len(remoteTxs) + + var txLocalCount int + + if localTxsCount > 0 { + txs := types.NewTransactionsByPriceAndNonce(signer, localTxs, baseFee) + + blockGasLimit, txLocalCount = commitTransactions(pool, txs, blockGasLimit) + + total += txLocalCount + } + + var txRemoteCount int + + if remoteTxsCount > 0 { + txs := types.NewTransactionsByPriceAndNonce(signer, remoteTxs, baseFee) + + _, txRemoteCount = commitTransactions(pool, txs, blockGasLimit) + + total += txRemoteCount + } + + miningDuration := time.Since(start) + + tb.Logf("[%s] mining block. block %d. total %d: pending %d(added %d), local %d(added %d), queued %d, localTxsCount %d, remoteTxsCount %d, pending %v, mining %v", + common.NowMilliseconds(), totalBlocks, total, pendingLen, txRemoteCount, localTxsCount, txLocalCount, queuedLen, localTxsCount, remoteTxsCount, pendingDuration, miningDuration) + + return total, pendingDuration, miningDuration +} + +//nolint:paralleltest +func TestPoolMiningDataRaces(t *testing.T) { + if testing.Short() { + t.Skip("only for data race testing") + } + + const format = "size %d, txs ticker %v, api ticker %v" + + cases := []struct { + name string + size int + txsTickerDuration time.Duration + apiTickerDuration time.Duration + }{ + { + size: 1, + txsTickerDuration: 200 * time.Millisecond, + apiTickerDuration: 10 * time.Millisecond, + }, + { + size: 1, + txsTickerDuration: 400 * time.Millisecond, + apiTickerDuration: 10 * time.Millisecond, + }, + { + size: 1, + txsTickerDuration: 600 * time.Millisecond, + apiTickerDuration: 10 * time.Millisecond, + }, + { + size: 1, + txsTickerDuration: 800 * time.Millisecond, + apiTickerDuration: 10 * time.Millisecond, + }, + + { + size: 5, + txsTickerDuration: 200 * time.Millisecond, + apiTickerDuration: 10 * time.Millisecond, + }, + { + size: 5, + txsTickerDuration: 400 * time.Millisecond, + apiTickerDuration: 10 * time.Millisecond, + }, + { + size: 5, + txsTickerDuration: 600 * time.Millisecond, + apiTickerDuration: 10 * time.Millisecond, + }, + { + size: 5, + txsTickerDuration: 800 * time.Millisecond, + apiTickerDuration: 10 * time.Millisecond, + }, + + { + size: 10, + txsTickerDuration: 200 * time.Millisecond, + apiTickerDuration: 10 * time.Millisecond, + }, + { + size: 10, + txsTickerDuration: 400 * time.Millisecond, + apiTickerDuration: 10 * time.Millisecond, + }, + { + size: 10, + txsTickerDuration: 600 * time.Millisecond, + apiTickerDuration: 10 * time.Millisecond, + }, + { + size: 10, + txsTickerDuration: 800 * time.Millisecond, + apiTickerDuration: 10 * time.Millisecond, + }, + + { + size: 20, + txsTickerDuration: 200 * time.Millisecond, + apiTickerDuration: 10 * time.Millisecond, + }, + { + size: 20, + txsTickerDuration: 400 * time.Millisecond, + apiTickerDuration: 10 * time.Millisecond, + }, + { + size: 20, + txsTickerDuration: 600 * time.Millisecond, + apiTickerDuration: 10 * time.Millisecond, + }, + { + size: 20, + txsTickerDuration: 800 * time.Millisecond, + apiTickerDuration: 10 * time.Millisecond, + }, + + { + size: 30, + txsTickerDuration: 200 * time.Millisecond, + apiTickerDuration: 10 * time.Millisecond, + }, + { + size: 30, + txsTickerDuration: 400 * time.Millisecond, + apiTickerDuration: 10 * time.Millisecond, + }, + { + size: 30, + txsTickerDuration: 600 * time.Millisecond, + apiTickerDuration: 10 * time.Millisecond, + }, + { + size: 30, + txsTickerDuration: 800 * time.Millisecond, + apiTickerDuration: 10 * time.Millisecond, + }, + } + + for i := range cases { + cases[i].name = fmt.Sprintf(format, cases[i].size, cases[i].txsTickerDuration, cases[i].apiTickerDuration) + } + + //nolint:paralleltest + for _, testCase := range cases { + singleCase := testCase + + t.Run(singleCase.name, func(t *testing.T) { + defer goleak.VerifyNone(t, leak.IgnoreList()...) + + const ( + blocks = 300 + blockGasLimit = 40_000_000 + blockPeriod = time.Second + threads = 10 + batchesSize = 10_000 + timeoutDuration = 10 * blockPeriod + + balanceStr = "1_000_000_000_000" + ) + + apiWithMining(t, balanceStr, batchesSize, singleCase, timeoutDuration, threads, blockPeriod, blocks, blockGasLimit) + }) + } +} + +//nolint:gocognit,thelper +func apiWithMining(tb testing.TB, balanceStr string, batchesSize int, singleCase struct { + name string + size int + txsTickerDuration time.Duration + apiTickerDuration time.Duration +}, timeoutDuration time.Duration, threads int, blockPeriod time.Duration, blocks int, blockGasLimit uint64) { + done := make(chan struct{}) + + var wg sync.WaitGroup + + defer func() { + close(done) + + tb.Logf("[%s] finishing apiWithMining", common.NowMilliseconds()) + + wg.Wait() + + tb.Logf("[%s] apiWithMining finished", common.NowMilliseconds()) + }() + + // Generate a batch of transactions to enqueue into the pool + pendingAddedCh := make(chan struct{}, 1024) + + pool, localKey := setupTxPoolWithConfig(params.TestChainConfig, testTxPoolConfig, txPoolGasLimit, MakeWithPromoteTxCh(pendingAddedCh)) + defer pool.Stop() + + localKeyPub := localKey.PublicKey + account := crypto.PubkeyToAddress(localKeyPub) + + balance, ok := big.NewInt(0).SetString(balanceStr, 0) + if !ok { + tb.Fatal("incorrect initial balance", balanceStr) + } + + testAddBalance(pool, account, balance) + + signer := types.NewEIP155Signer(big.NewInt(1)) + baseFee := uint256.NewInt(1) + + batchesLocal := make([]types.Transactions, batchesSize) + batchesRemote := make([]types.Transactions, batchesSize) + batchesRemotes := make([]types.Transactions, batchesSize) + batchesRemoteSync := make([]types.Transactions, batchesSize) + batchesRemotesSync := make([]types.Transactions, batchesSize) + + for i := 0; i < batchesSize; i++ { + batchesLocal[i] = make(types.Transactions, singleCase.size) + + for j := 0; j < singleCase.size; j++ { + batchesLocal[i][j] = pricedTransaction(uint64(singleCase.size*i+j), 100_000, big.NewInt(int64(i+1)), localKey) + } + + batchesRemote[i] = make(types.Transactions, singleCase.size) + + remoteKey, _ := crypto.GenerateKey() + remoteAddr := crypto.PubkeyToAddress(remoteKey.PublicKey) + testAddBalance(pool, remoteAddr, balance) + + for j := 0; j < singleCase.size; j++ { + batchesRemote[i][j] = pricedTransaction(uint64(j), 100_000, big.NewInt(int64(i+1)), remoteKey) + } + + batchesRemotes[i] = make(types.Transactions, singleCase.size) + + remotesKey, _ := crypto.GenerateKey() + remotesAddr := crypto.PubkeyToAddress(remotesKey.PublicKey) + testAddBalance(pool, remotesAddr, balance) + + for j := 0; j < singleCase.size; j++ { + batchesRemotes[i][j] = pricedTransaction(uint64(j), 100_000, big.NewInt(int64(i+1)), remotesKey) + } + + batchesRemoteSync[i] = make(types.Transactions, singleCase.size) + + remoteSyncKey, _ := crypto.GenerateKey() + remoteSyncAddr := crypto.PubkeyToAddress(remoteSyncKey.PublicKey) + testAddBalance(pool, remoteSyncAddr, balance) + + for j := 0; j < singleCase.size; j++ { + batchesRemoteSync[i][j] = pricedTransaction(uint64(j), 100_000, big.NewInt(int64(i+1)), remoteSyncKey) + } + + batchesRemotesSync[i] = make(types.Transactions, singleCase.size) + + remotesSyncKey, _ := crypto.GenerateKey() + remotesSyncAddr := crypto.PubkeyToAddress(remotesSyncKey.PublicKey) + testAddBalance(pool, remotesSyncAddr, balance) + + for j := 0; j < singleCase.size; j++ { + batchesRemotesSync[i][j] = pricedTransaction(uint64(j), 100_000, big.NewInt(int64(i+1)), remotesSyncKey) + } + } + + tb.Logf("[%s] starting goroutines", common.NowMilliseconds()) + + txsTickerDuration := singleCase.txsTickerDuration + apiTickerDuration := singleCase.apiTickerDuration + + // locals + wg.Add(1) + + go func() { + defer func() { + tb.Logf("[%s] stopping AddLocal(s)", common.NowMilliseconds()) + + wg.Done() + + tb.Logf("[%s] stopped AddLocal(s)", common.NowMilliseconds()) + }() + + tb.Logf("[%s] starting AddLocal(s)", common.NowMilliseconds()) + + for _, batch := range batchesLocal { + batch := batch + + select { + case <-done: + return + default: + } + + if rand.Int()%2 == 0 { + runWithTimeout(tb, func(_ chan struct{}) { + errs := pool.AddLocals(batch) + if len(errs) != 0 { + tb.Logf("[%s] AddLocals error, %v", common.NowMilliseconds(), errs) + } + }, done, "AddLocals", timeoutDuration, 0, 0) + } else { + for _, tx := range batch { + tx := tx + + runWithTimeout(tb, func(_ chan struct{}) { + err := pool.AddLocal(tx) + if err != nil { + tb.Logf("[%s] AddLocal error %s", common.NowMilliseconds(), err) + } + }, done, "AddLocal", timeoutDuration, 0, 0) + + time.Sleep(txsTickerDuration) + } + } + + time.Sleep(txsTickerDuration) + } + }() + + // remotes + wg.Add(1) + + go func() { + defer func() { + tb.Logf("[%s] stopping AddRemotes", common.NowMilliseconds()) + + wg.Done() + + tb.Logf("[%s] stopped AddRemotes", common.NowMilliseconds()) + }() + + addTransactionsBatches(tb, batchesRemotes, getFnForBatches(pool.AddRemotes), done, timeoutDuration, txsTickerDuration, "AddRemotes", 0) + }() + + // remote + wg.Add(1) + + go func() { + defer func() { + tb.Logf("[%s] stopping AddRemote", common.NowMilliseconds()) + + wg.Done() + + tb.Logf("[%s] stopped AddRemote", common.NowMilliseconds()) + }() + + addTransactions(tb, batchesRemote, pool.AddRemote, done, timeoutDuration, txsTickerDuration, "AddRemote", 0) + }() + + // sync + // remotes + wg.Add(1) + + go func() { + defer func() { + tb.Logf("[%s] stopping AddRemotesSync", common.NowMilliseconds()) + + wg.Done() + + tb.Logf("[%s] stopped AddRemotesSync", common.NowMilliseconds()) + }() + + addTransactionsBatches(tb, batchesRemotesSync, getFnForBatches(pool.AddRemotesSync), done, timeoutDuration, txsTickerDuration, "AddRemotesSync", 0) + }() + + // remote + wg.Add(1) + + go func() { + defer func() { + tb.Logf("[%s] stopping AddRemoteSync", common.NowMilliseconds()) + + wg.Done() + + tb.Logf("[%s] stopped AddRemoteSync", common.NowMilliseconds()) + }() + + addTransactions(tb, batchesRemoteSync, pool.AddRemoteSync, done, timeoutDuration, txsTickerDuration, "AddRemoteSync", 0) + }() + + // tx pool API + for i := 0; i < threads; i++ { + i := i + + wg.Add(1) + + go func() { + defer func() { + tb.Logf("[%s] stopping Pending-no-tips, thread %d", common.NowMilliseconds(), i) + + wg.Done() + + tb.Logf("[%s] stopped Pending-no-tips, thread %d", common.NowMilliseconds(), i) + }() + + runWithTicker(tb, func(_ chan struct{}) { + p := pool.Pending(context.Background(), false) + fmt.Fprint(io.Discard, p) + }, done, "Pending-no-tips", apiTickerDuration, timeoutDuration, i) + }() + + wg.Add(1) + + go func() { + defer func() { + tb.Logf("[%s] stopping Pending-with-tips, thread %d", common.NowMilliseconds(), i) + + wg.Done() + + tb.Logf("[%s] stopped Pending-with-tips, thread %d", common.NowMilliseconds(), i) + }() + + runWithTicker(tb, func(_ chan struct{}) { + p := pool.Pending(context.Background(), true) + fmt.Fprint(io.Discard, p) + }, done, "Pending-with-tips", apiTickerDuration, timeoutDuration, i) + }() + + wg.Add(1) + + go func() { + defer func() { + tb.Logf("[%s] stopping Locals, thread %d", common.NowMilliseconds(), i) + + wg.Done() + + tb.Logf("[%s] stopped Locals, thread %d", common.NowMilliseconds(), i) + }() + + runWithTicker(tb, func(_ chan struct{}) { + l := pool.Locals() + fmt.Fprint(io.Discard, l) + }, done, "Locals", apiTickerDuration, timeoutDuration, i) + }() + + wg.Add(1) + + go func() { + defer func() { + tb.Logf("[%s] stopping Content, thread %d", common.NowMilliseconds(), i) + + wg.Done() + + tb.Logf("[%s] stopped Content, thread %d", common.NowMilliseconds(), i) + }() + + runWithTicker(tb, func(_ chan struct{}) { + p, q := pool.Content() + fmt.Fprint(io.Discard, p, q) + }, done, "Content", apiTickerDuration, timeoutDuration, i) + }() + + wg.Add(1) + + go func() { + defer func() { + tb.Logf("[%s] stopping GasPriceUint256, thread %d", common.NowMilliseconds(), i) + + wg.Done() + + tb.Logf("[%s] stopped GasPriceUint256, thread %d", common.NowMilliseconds(), i) + }() + + runWithTicker(tb, func(_ chan struct{}) { + res := pool.GasPriceUint256() + fmt.Fprint(io.Discard, res) + }, done, "GasPriceUint256", apiTickerDuration, timeoutDuration, i) + }() + + wg.Add(1) + + go func() { + defer func() { + tb.Logf("[%s] stopping GasPrice, thread %d", common.NowMilliseconds(), i) + + wg.Done() + + tb.Logf("[%s] stopped GasPrice, thread %d", common.NowMilliseconds(), i) + }() + + runWithTicker(tb, func(_ chan struct{}) { + res := pool.GasPrice() + fmt.Fprint(io.Discard, res) + }, done, "GasPrice", apiTickerDuration, timeoutDuration, i) + }() + + wg.Add(1) + + go func() { + defer func() { + tb.Logf("[%s] stopping SetGasPrice, thread %d", common.NowMilliseconds(), i) + + wg.Done() + + tb.Logf("[%s] stopped SetGasPrice, , thread %d", common.NowMilliseconds(), i) + }() + + runWithTicker(tb, func(_ chan struct{}) { + pool.SetGasPrice(pool.GasPrice()) + }, done, "SetGasPrice", apiTickerDuration, timeoutDuration, i) + }() + + wg.Add(1) + + go func() { + defer func() { + tb.Logf("[%s] stopping ContentFrom, thread %d", common.NowMilliseconds(), i) + + wg.Done() + + tb.Logf("[%s] stopped ContentFrom, thread %d", common.NowMilliseconds(), i) + }() + + runWithTicker(tb, func(_ chan struct{}) { + p, q := pool.ContentFrom(account) + fmt.Fprint(io.Discard, p, q) + }, done, "ContentFrom", apiTickerDuration, timeoutDuration, i) + }() + + wg.Add(1) + + go func() { + defer func() { + tb.Logf("[%s] stopping Has, thread %d", common.NowMilliseconds(), i) + + wg.Done() + + tb.Logf("[%s] stopped Has, thread %d", common.NowMilliseconds(), i) + }() + + runWithTicker(tb, func(_ chan struct{}) { + res := pool.Has(batchesRemotes[0][0].Hash()) + fmt.Fprint(io.Discard, res) + }, done, "Has", apiTickerDuration, timeoutDuration, i) + }() + + wg.Add(1) + + go func() { + defer func() { + tb.Logf("[%s] stopping Get, thread %d", common.NowMilliseconds(), i) + + wg.Done() + + tb.Logf("[%s] stopped Get, thread %d", common.NowMilliseconds(), i) + }() + + runWithTicker(tb, func(_ chan struct{}) { + tx := pool.Get(batchesRemotes[0][0].Hash()) + fmt.Fprint(io.Discard, tx == nil) + }, done, "Get", apiTickerDuration, timeoutDuration, i) + }() + + wg.Add(1) + + go func() { + defer func() { + tb.Logf("[%s] stopping Nonce, thread %d", common.NowMilliseconds(), i) + + wg.Done() + + tb.Logf("[%s] stopped Nonce, thread %d", common.NowMilliseconds(), i) + }() + + runWithTicker(tb, func(_ chan struct{}) { + res := pool.Nonce(account) + fmt.Fprint(io.Discard, res) + }, done, "Nonce", apiTickerDuration, timeoutDuration, i) + }() + + wg.Add(1) + + go func() { + defer func() { + tb.Logf("[%s] stopping Stats, thread %d", common.NowMilliseconds(), i) + + wg.Done() + + tb.Logf("[%s] stopped Stats, thread %d", common.NowMilliseconds(), i) + }() + + runWithTicker(tb, func(_ chan struct{}) { + p, q := pool.Stats() + fmt.Fprint(io.Discard, p, q) + }, done, "Stats", apiTickerDuration, timeoutDuration, i) + }() + + wg.Add(1) + + go func() { + defer func() { + tb.Logf("[%s] stopping Status, thread %d", common.NowMilliseconds(), i) + + wg.Done() + + tb.Logf("[%s] stopped Status, thread %d", common.NowMilliseconds(), i) + }() + + runWithTicker(tb, func(_ chan struct{}) { + st := pool.Status([]common.Hash{batchesRemotes[1][0].Hash()}) + fmt.Fprint(io.Discard, st) + }, done, "Status", apiTickerDuration, timeoutDuration, i) + }() + + wg.Add(1) + + go func() { + defer func() { + tb.Logf("[%s] stopping SubscribeNewTxsEvent, thread %d", common.NowMilliseconds(), i) + + wg.Done() + + tb.Logf("[%s] stopped SubscribeNewTxsEvent, thread %d", common.NowMilliseconds(), i) + }() + + runWithTicker(tb, func(c chan struct{}) { + ch := make(chan NewTxsEvent, 10) + sub := pool.SubscribeNewTxsEvent(ch) + + if sub == nil { + return + } + + defer sub.Unsubscribe() + + select { + case <-done: + return + case <-c: + case res := <-ch: + fmt.Fprint(io.Discard, res) + } + + }, done, "SubscribeNewTxsEvent", apiTickerDuration, timeoutDuration, i) + }() + } + + // wait for the start + tb.Logf("[%s] before the first propagated transaction", common.NowMilliseconds()) + <-pendingAddedCh + tb.Logf("[%s] after the first propagated transaction", common.NowMilliseconds()) + + var ( + totalTxs int + totalBlocks int + ) + + pendingDurations := make([]time.Duration, 0, blocks) + + var ( + added int + pendingDuration time.Duration + miningDuration time.Duration + diff time.Duration + ) + + for { + added, pendingDuration, miningDuration = mining(tb, pool, signer, baseFee, blockGasLimit, totalBlocks) + + totalTxs += added + + pendingDurations = append(pendingDurations, pendingDuration) + + totalBlocks++ + + if totalBlocks > blocks { + fmt.Fprint(io.Discard, totalTxs) + break + } + + diff = blockPeriod - miningDuration + if diff > 0 { + time.Sleep(diff) + } + } + + pendingDurationsFloat := make([]float64, len(pendingDurations)) + + for i, v := range pendingDurations { + pendingDurationsFloat[i] = float64(v.Nanoseconds()) + } + + mean, stddev := stat.MeanStdDev(pendingDurationsFloat, nil) + tb.Logf("[%s] pending mean %v, stddev %v, %v-%v", + common.NowMilliseconds(), time.Duration(mean), time.Duration(stddev), time.Duration(floats.Min(pendingDurationsFloat)), time.Duration(floats.Max(pendingDurationsFloat))) +} + +func addTransactionsBatches(tb testing.TB, batches []types.Transactions, fn func(types.Transactions) error, done chan struct{}, timeoutDuration time.Duration, tickerDuration time.Duration, name string, thread int) { + tb.Helper() + + tb.Logf("[%s] starting %s", common.NowMilliseconds(), name) + + defer func() { + tb.Logf("[%s] stop %s", common.NowMilliseconds(), name) + }() + + for _, batch := range batches { + batch := batch + + select { + case <-done: + return + default: + } + + runWithTimeout(tb, func(_ chan struct{}) { + err := fn(batch) + if err != nil { + tb.Logf("[%s] %s error: %s", common.NowMilliseconds(), name, err) + } + }, done, name, timeoutDuration, 0, thread) + + time.Sleep(tickerDuration) + } +} + +func addTransactions(tb testing.TB, batches []types.Transactions, fn func(*types.Transaction) error, done chan struct{}, timeoutDuration time.Duration, tickerDuration time.Duration, name string, thread int) { + tb.Helper() + + tb.Logf("[%s] starting %s", common.NowMilliseconds(), name) + + defer func() { + tb.Logf("[%s] stop %s", common.NowMilliseconds(), name) + }() + + for _, batch := range batches { + for _, tx := range batch { + tx := tx + + select { + case <-done: + return + default: + } + + runWithTimeout(tb, func(_ chan struct{}) { + err := fn(tx) + if err != nil { + tb.Logf("%s error: %s", name, err) + } + }, done, name, timeoutDuration, 0, thread) + + time.Sleep(tickerDuration) + } + + time.Sleep(tickerDuration) + } +} + +func getFnForBatches(fn func([]*types.Transaction) []error) func(types.Transactions) error { + return func(batch types.Transactions) error { + errs := fn(batch) + if len(errs) != 0 { + return errs[0] + } + + return nil + } +} + +//nolint:unparam +func runWithTicker(tb testing.TB, fn func(c chan struct{}), done chan struct{}, name string, tickerDuration, timeoutDuration time.Duration, thread int) { + tb.Helper() + + select { + case <-done: + tb.Logf("[%s] Short path. finishing outer runWithTicker for %q, thread %d", common.NowMilliseconds(), name, thread) + + return + default: + } + + defer func() { + tb.Logf("[%s] finishing outer runWithTicker for %q, thread %d", common.NowMilliseconds(), name, thread) + }() + + localTicker := time.NewTicker(tickerDuration) + defer localTicker.Stop() + + n := 0 + + for range localTicker.C { + select { + case <-done: + return + default: + } + + runWithTimeout(tb, fn, done, name, timeoutDuration, n, thread) + + n++ + } +} + +func runWithTimeout(tb testing.TB, fn func(chan struct{}), outerDone chan struct{}, name string, timeoutDuration time.Duration, n, thread int) { + tb.Helper() + + select { + case <-outerDone: + tb.Logf("[%s] Short path. exiting inner runWithTimeout by outer exit event for %q, thread %d, iteration %d", common.NowMilliseconds(), name, thread, n) + + return + default: + } + + timeout := time.NewTimer(timeoutDuration) + defer timeout.Stop() + + doneCh := make(chan struct{}) + + isError := new(int32) + *isError = 0 + + go func() { + defer close(doneCh) + + select { + case <-outerDone: + return + default: + fn(doneCh) + } + }() + + const isDebug = false + + var stack string + + select { + case <-outerDone: + tb.Logf("[%s] exiting inner runWithTimeout by outer exit event for %q, thread %d, iteration %d", common.NowMilliseconds(), name, thread, n) + case <-doneCh: + // only for debug + //tb.Logf("[%s] exiting inner runWithTimeout by successful call for %q, thread %d, iteration %d", common.NowMilliseconds(), name, thread, n) + case <-timeout.C: + atomic.StoreInt32(isError, 1) + + if isDebug { + stack = string(debug.Stack(true)) + } + + tb.Errorf("[%s] %s timeouted, thread %d, iteration %d. Stack %s", common.NowMilliseconds(), name, thread, n, stack) + } +} diff --git a/core/txpool2_test.go b/core/txpool2_test.go new file mode 100644 index 0000000000..45f784f343 --- /dev/null +++ b/core/txpool2_test.go @@ -0,0 +1,229 @@ +// Copyright 2023 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . +package core + +import ( + "crypto/ecdsa" + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/event" +) + +func pricedValuedTransaction(nonce uint64, value int64, gaslimit uint64, gasprice *big.Int, key *ecdsa.PrivateKey) *types.Transaction { + tx, _ := types.SignTx(types.NewTransaction(nonce, common.Address{}, big.NewInt(value), gaslimit, gasprice, nil), types.HomesteadSigner{}, key) + return tx +} + +func count(t *testing.T, pool *TxPool) (pending int, queued int) { + t.Helper() + + pending, queued = pool.stats() + + if err := validateTxPoolInternals(pool); err != nil { + t.Fatalf("pool internal state corrupted: %v", err) + } + + return pending, queued +} + +func fillPool(t *testing.T, pool *TxPool) { + t.Helper() + // Create a number of test accounts, fund them and make transactions + executableTxs := types.Transactions{} + nonExecutableTxs := types.Transactions{} + + for i := 0; i < 384; i++ { + key, _ := crypto.GenerateKey() + pool.currentState.AddBalance(crypto.PubkeyToAddress(key.PublicKey), big.NewInt(10000000000)) + // Add executable ones + for j := 0; j < int(pool.config.AccountSlots); j++ { + executableTxs = append(executableTxs, pricedTransaction(uint64(j), 100000, big.NewInt(300), key)) + } + } + // Import the batch and verify that limits have been enforced + pool.AddRemotesSync(executableTxs) + pool.AddRemotesSync(nonExecutableTxs) + pending, queued := pool.Stats() + slots := pool.all.Slots() + // sanity-check that the test prerequisites are ok (pending full) + if have, want := pending, slots; have != want { + t.Fatalf("have %d, want %d", have, want) + } + + if have, want := queued, 0; have != want { + t.Fatalf("have %d, want %d", have, want) + } + + t.Logf("pool.config: GlobalSlots=%d, GlobalQueue=%d\n", pool.config.GlobalSlots, pool.config.GlobalQueue) + t.Logf("pending: %d queued: %d, all: %d\n", pending, queued, slots) +} + +// Tests that if a batch high-priced of non-executables arrive, they do not kick out +// executable transactions +func TestTransactionFutureAttack(t *testing.T) { + t.Parallel() + + // Create the pool to test the limit enforcement with + statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + blockchain := &testBlockChain{1000000, statedb, new(event.Feed)} + config := testTxPoolConfig + config.GlobalQueue = 100 + config.GlobalSlots = 100 + pool := NewTxPool(config, eip1559Config, blockchain) + + defer pool.Stop() + fillPool(t, pool) + pending, _ := pool.Stats() + // Now, future transaction attack starts, let's add a bunch of expensive non-executables, and see if the pending-count drops + { + key, _ := crypto.GenerateKey() + pool.currentState.AddBalance(crypto.PubkeyToAddress(key.PublicKey), big.NewInt(100000000000)) + futureTxs := types.Transactions{} + for j := 0; j < int(pool.config.GlobalSlots+pool.config.GlobalQueue); j++ { + futureTxs = append(futureTxs, pricedTransaction(1000+uint64(j), 100000, big.NewInt(500), key)) + } + for i := 0; i < 5; i++ { + pool.AddRemotesSync(futureTxs) + newPending, newQueued := count(t, pool) + t.Logf("pending: %d queued: %d, all: %d\n", newPending, newQueued, pool.all.Slots()) + } + } + + newPending, _ := pool.Stats() + // Pending should not have been touched + if have, want := newPending, pending; have < want { + t.Errorf("wrong pending-count, have %d, want %d (GlobalSlots: %d)", + have, want, pool.config.GlobalSlots) + } +} + +// Tests that if a batch high-priced of non-executables arrive, they do not kick out +// executable transactions +func TestTransactionFuture1559(t *testing.T) { + t.Parallel() + // Create the pool to test the pricing enforcement with + statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + blockchain := &testBlockChain{1000000, statedb, new(event.Feed)} + pool := NewTxPool(testTxPoolConfig, eip1559Config, blockchain) + + defer pool.Stop() + + // Create a number of test accounts, fund them and make transactions + fillPool(t, pool) + pending, _ := pool.Stats() + + // Now, future transaction attack starts, let's add a bunch of expensive non-executables, and see if the pending-count drops + { + key, _ := crypto.GenerateKey() + pool.currentState.AddBalance(crypto.PubkeyToAddress(key.PublicKey), big.NewInt(100000000000)) + futureTxs := types.Transactions{} + for j := 0; j < int(pool.config.GlobalSlots+pool.config.GlobalQueue); j++ { + futureTxs = append(futureTxs, dynamicFeeTx(1000+uint64(j), 100000, big.NewInt(200), big.NewInt(101), key)) + } + pool.AddRemotesSync(futureTxs) + } + + newPending, _ := pool.Stats() + // Pending should not have been touched + if have, want := newPending, pending; have != want { + t.Errorf("Wrong pending-count, have %d, want %d (GlobalSlots: %d)", + have, want, pool.config.GlobalSlots) + } +} + +// Tests that if a batch of balance-overdraft txs arrive, they do not kick out +// executable transactions +func TestTransactionZAttack(t *testing.T) { + t.Parallel() + // Create the pool to test the pricing enforcement with + statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + blockchain := &testBlockChain{1000000, statedb, new(event.Feed)} + pool := NewTxPool(testTxPoolConfig, eip1559Config, blockchain) + + defer pool.Stop() + + // Create a number of test accounts, fund them and make transactions + fillPool(t, pool) + + countInvalidPending := func() int { + t.Helper() + + var ivpendingNum int + + pendingtxs, _ := pool.Content() + + for account, txs := range pendingtxs { + cur_balance := new(big.Int).Set(pool.currentState.GetBalance(account)) + for _, tx := range txs { + if cur_balance.Cmp(tx.Value()) <= 0 { + ivpendingNum++ + } else { + cur_balance.Sub(cur_balance, tx.Value()) + } + } + } + + if err := validateTxPoolInternals(pool); err != nil { + t.Fatalf("pool internal state corrupted: %v", err) + } + + return ivpendingNum + } + ivPending := countInvalidPending() + t.Logf("invalid pending: %d\n", ivPending) + + // Now, DETER-Z attack starts, let's add a bunch of expensive non-executables (from N accounts) along with balance-overdraft txs (from one account), and see if the pending-count drops + for j := 0; j < int(pool.config.GlobalQueue); j++ { + futureTxs := types.Transactions{} + key, _ := crypto.GenerateKey() + pool.currentState.AddBalance(crypto.PubkeyToAddress(key.PublicKey), big.NewInt(100000000000)) + futureTxs = append(futureTxs, pricedTransaction(1000+uint64(j), 21000, big.NewInt(500), key)) + pool.AddRemotesSync(futureTxs) + } + + overDraftTxs := types.Transactions{} + { + key, _ := crypto.GenerateKey() + pool.currentState.AddBalance(crypto.PubkeyToAddress(key.PublicKey), big.NewInt(100000000000)) + for j := 0; j < int(pool.config.GlobalSlots); j++ { + overDraftTxs = append(overDraftTxs, pricedValuedTransaction(uint64(j), 60000000000, 21000, big.NewInt(500), key)) + } + } + pool.AddRemotesSync(overDraftTxs) + pool.AddRemotesSync(overDraftTxs) + pool.AddRemotesSync(overDraftTxs) + pool.AddRemotesSync(overDraftTxs) + pool.AddRemotesSync(overDraftTxs) + + newPending, newQueued := count(t, pool) + newIvPending := countInvalidPending() + + t.Logf("pool.all.Slots(): %d\n", pool.all.Slots()) + t.Logf("pending: %d queued: %d, all: %d\n", newPending, newQueued, pool.all.Slots()) + t.Logf("invalid pending: %d\n", newIvPending) + + // Pending should not have been touched + if newIvPending != ivPending { + t.Errorf("Wrong invalid pending-count, have %d, want %d (GlobalSlots: %d, queued: %d)", + newIvPending, ivPending, pool.config.GlobalSlots, newQueued) + } +} diff --git a/core/types/access_list_tx.go b/core/types/access_list_tx.go index 8ad5e739e9..509f86b622 100644 --- a/core/types/access_list_tx.go +++ b/core/types/access_list_tx.go @@ -19,6 +19,8 @@ package types import ( "math/big" + "github.com/holiman/uint256" + "github.com/ethereum/go-ethereum/common" ) @@ -44,15 +46,16 @@ func (al AccessList) StorageKeys() int { // AccessListTx is the data of EIP-2930 access list transactions. type AccessListTx struct { - ChainID *big.Int // destination chain ID - Nonce uint64 // nonce of sender account - GasPrice *big.Int // wei per gas - Gas uint64 // gas limit - To *common.Address `rlp:"nil"` // nil means contract creation - Value *big.Int // wei amount - Data []byte // contract invocation input data - AccessList AccessList // EIP-2930 access list - V, R, S *big.Int // signature values + ChainID *big.Int // destination chain ID + Nonce uint64 // nonce of sender account + GasPrice *big.Int // wei per gas + gasPriceUint256 *uint256.Int // wei per gas + Gas uint64 // gas limit + To *common.Address `rlp:"nil"` // nil means contract creation + Value *big.Int // wei amount + Data []byte // contract invocation input data + AccessList AccessList // EIP-2930 access list + V, R, S *big.Int // signature values } // copy creates a deep copy of the transaction data and initializes all fields. @@ -80,6 +83,12 @@ func (tx *AccessListTx) copy() TxData { } if tx.GasPrice != nil { cpy.GasPrice.Set(tx.GasPrice) + + if cpy.gasPriceUint256 != nil { + cpy.gasPriceUint256.Set(tx.gasPriceUint256) + } else { + cpy.gasPriceUint256, _ = uint256.FromBig(tx.GasPrice) + } } if tx.V != nil { cpy.V.Set(tx.V) @@ -100,11 +109,39 @@ func (tx *AccessListTx) accessList() AccessList { return tx.AccessList } func (tx *AccessListTx) data() []byte { return tx.Data } func (tx *AccessListTx) gas() uint64 { return tx.Gas } func (tx *AccessListTx) gasPrice() *big.Int { return tx.GasPrice } -func (tx *AccessListTx) gasTipCap() *big.Int { return tx.GasPrice } -func (tx *AccessListTx) gasFeeCap() *big.Int { return tx.GasPrice } -func (tx *AccessListTx) value() *big.Int { return tx.Value } -func (tx *AccessListTx) nonce() uint64 { return tx.Nonce } -func (tx *AccessListTx) to() *common.Address { return tx.To } +func (tx *AccessListTx) gasPriceU256() *uint256.Int { + if tx.gasPriceUint256 != nil { + return tx.gasPriceUint256 + } + + tx.gasPriceUint256, _ = uint256.FromBig(tx.GasPrice) + + return tx.gasPriceUint256 +} + +func (tx *AccessListTx) gasTipCap() *big.Int { return tx.GasPrice } +func (tx *AccessListTx) gasTipCapU256() *uint256.Int { + if tx.gasPriceUint256 != nil { + return tx.gasPriceUint256 + } + + tx.gasPriceUint256, _ = uint256.FromBig(tx.GasPrice) + + return tx.gasPriceUint256 +} +func (tx *AccessListTx) gasFeeCap() *big.Int { return tx.GasPrice } +func (tx *AccessListTx) gasFeeCapU256() *uint256.Int { + if tx.gasPriceUint256 != nil { + return tx.gasPriceUint256 + } + + tx.gasPriceUint256, _ = uint256.FromBig(tx.GasPrice) + + return tx.gasPriceUint256 +} +func (tx *AccessListTx) value() *big.Int { return tx.Value } +func (tx *AccessListTx) nonce() uint64 { return tx.Nonce } +func (tx *AccessListTx) to() *common.Address { return tx.To } func (tx *AccessListTx) rawSignatureValues() (v, r, s *big.Int) { return tx.V, tx.R, tx.S diff --git a/core/types/dynamic_fee_tx.go b/core/types/dynamic_fee_tx.go index 53f246ea1f..532544d54e 100644 --- a/core/types/dynamic_fee_tx.go +++ b/core/types/dynamic_fee_tx.go @@ -19,19 +19,23 @@ package types import ( "math/big" + "github.com/holiman/uint256" + "github.com/ethereum/go-ethereum/common" ) type DynamicFeeTx struct { - ChainID *big.Int - Nonce uint64 - GasTipCap *big.Int // a.k.a. maxPriorityFeePerGas - GasFeeCap *big.Int // a.k.a. maxFeePerGas - Gas uint64 - To *common.Address `rlp:"nil"` // nil means contract creation - Value *big.Int - Data []byte - AccessList AccessList + ChainID *big.Int + Nonce uint64 + GasTipCap *big.Int // a.k.a. maxPriorityFeePerGas + gasTipCapUint256 *uint256.Int // a.k.a. maxPriorityFeePerGas + GasFeeCap *big.Int // a.k.a. maxFeePerGas + gasFeeCapUint256 *uint256.Int // a.k.a. maxFeePerGas + Gas uint64 + To *common.Address `rlp:"nil"` // nil means contract creation + Value *big.Int + Data []byte + AccessList AccessList // Signature values V *big.Int `json:"v" gencodec:"required"` @@ -65,9 +69,21 @@ func (tx *DynamicFeeTx) copy() TxData { } if tx.GasTipCap != nil { cpy.GasTipCap.Set(tx.GasTipCap) + + if cpy.gasTipCapUint256 != nil { + cpy.gasTipCapUint256.Set(tx.gasTipCapUint256) + } else { + cpy.gasTipCapUint256, _ = uint256.FromBig(tx.GasTipCap) + } } if tx.GasFeeCap != nil { cpy.GasFeeCap.Set(tx.GasFeeCap) + + if cpy.gasFeeCapUint256 != nil { + cpy.gasFeeCapUint256.Set(tx.gasFeeCapUint256) + } else { + cpy.gasFeeCapUint256, _ = uint256.FromBig(tx.GasFeeCap) + } } if tx.V != nil { cpy.V.Set(tx.V) @@ -88,11 +104,38 @@ func (tx *DynamicFeeTx) accessList() AccessList { return tx.AccessList } func (tx *DynamicFeeTx) data() []byte { return tx.Data } func (tx *DynamicFeeTx) gas() uint64 { return tx.Gas } func (tx *DynamicFeeTx) gasFeeCap() *big.Int { return tx.GasFeeCap } -func (tx *DynamicFeeTx) gasTipCap() *big.Int { return tx.GasTipCap } -func (tx *DynamicFeeTx) gasPrice() *big.Int { return tx.GasFeeCap } -func (tx *DynamicFeeTx) value() *big.Int { return tx.Value } -func (tx *DynamicFeeTx) nonce() uint64 { return tx.Nonce } -func (tx *DynamicFeeTx) to() *common.Address { return tx.To } +func (tx *DynamicFeeTx) gasFeeCapU256() *uint256.Int { + if tx.gasFeeCapUint256 != nil { + return tx.gasFeeCapUint256 + } + + tx.gasFeeCapUint256, _ = uint256.FromBig(tx.GasFeeCap) + + return tx.gasFeeCapUint256 +} +func (tx *DynamicFeeTx) gasTipCap() *big.Int { return tx.GasTipCap } +func (tx *DynamicFeeTx) gasTipCapU256() *uint256.Int { + if tx.gasTipCapUint256 != nil { + return tx.gasTipCapUint256 + } + + tx.gasTipCapUint256, _ = uint256.FromBig(tx.GasTipCap) + + return tx.gasTipCapUint256 +} +func (tx *DynamicFeeTx) gasPrice() *big.Int { return tx.GasFeeCap } +func (tx *DynamicFeeTx) gasPriceU256() *uint256.Int { + if tx.gasFeeCapUint256 != nil { + return tx.gasTipCapUint256 + } + + tx.gasFeeCapUint256, _ = uint256.FromBig(tx.GasFeeCap) + + return tx.gasFeeCapUint256 +} +func (tx *DynamicFeeTx) value() *big.Int { return tx.Value } +func (tx *DynamicFeeTx) nonce() uint64 { return tx.Nonce } +func (tx *DynamicFeeTx) to() *common.Address { return tx.To } func (tx *DynamicFeeTx) rawSignatureValues() (v, r, s *big.Int) { return tx.V, tx.R, tx.S diff --git a/core/types/legacy_tx.go b/core/types/legacy_tx.go index cb86bed772..72fcd34fa5 100644 --- a/core/types/legacy_tx.go +++ b/core/types/legacy_tx.go @@ -19,18 +19,21 @@ package types import ( "math/big" + "github.com/holiman/uint256" + "github.com/ethereum/go-ethereum/common" ) // LegacyTx is the transaction data of regular Ethereum transactions. type LegacyTx struct { - Nonce uint64 // nonce of sender account - GasPrice *big.Int // wei per gas - Gas uint64 // gas limit - To *common.Address `rlp:"nil"` // nil means contract creation - Value *big.Int // wei amount - Data []byte // contract invocation input data - V, R, S *big.Int // signature values + Nonce uint64 // nonce of sender account + GasPrice *big.Int // wei per gas + gasPriceUint256 *uint256.Int // wei per gas + Gas uint64 // gas limit + To *common.Address `rlp:"nil"` // nil means contract creation + Value *big.Int // wei amount + Data []byte // contract invocation input data + V, R, S *big.Int // signature values } // NewTransaction creates an unsigned legacy transaction. @@ -77,6 +80,12 @@ func (tx *LegacyTx) copy() TxData { } if tx.GasPrice != nil { cpy.GasPrice.Set(tx.GasPrice) + + if cpy.gasPriceUint256 != nil { + cpy.gasPriceUint256.Set(tx.gasPriceUint256) + } else { + cpy.gasPriceUint256, _ = uint256.FromBig(tx.GasPrice) + } } if tx.V != nil { cpy.V.Set(tx.V) @@ -97,11 +106,38 @@ func (tx *LegacyTx) accessList() AccessList { return nil } func (tx *LegacyTx) data() []byte { return tx.Data } func (tx *LegacyTx) gas() uint64 { return tx.Gas } func (tx *LegacyTx) gasPrice() *big.Int { return tx.GasPrice } -func (tx *LegacyTx) gasTipCap() *big.Int { return tx.GasPrice } -func (tx *LegacyTx) gasFeeCap() *big.Int { return tx.GasPrice } -func (tx *LegacyTx) value() *big.Int { return tx.Value } -func (tx *LegacyTx) nonce() uint64 { return tx.Nonce } -func (tx *LegacyTx) to() *common.Address { return tx.To } +func (tx *LegacyTx) gasPriceU256() *uint256.Int { + if tx.gasPriceUint256 != nil { + return tx.gasPriceUint256 + } + + tx.gasPriceUint256, _ = uint256.FromBig(tx.GasPrice) + + return tx.gasPriceUint256 +} +func (tx *LegacyTx) gasTipCap() *big.Int { return tx.GasPrice } +func (tx *LegacyTx) gasTipCapU256() *uint256.Int { + if tx.gasPriceUint256 != nil { + return tx.gasPriceUint256 + } + + tx.gasPriceUint256, _ = uint256.FromBig(tx.GasPrice) + + return tx.gasPriceUint256 +} +func (tx *LegacyTx) gasFeeCap() *big.Int { return tx.GasPrice } +func (tx *LegacyTx) gasFeeCapU256() *uint256.Int { + if tx.gasPriceUint256 != nil { + return tx.gasPriceUint256 + } + + tx.gasPriceUint256, _ = uint256.FromBig(tx.GasPrice) + + return tx.gasPriceUint256 +} +func (tx *LegacyTx) value() *big.Int { return tx.Value } +func (tx *LegacyTx) nonce() uint64 { return tx.Nonce } +func (tx *LegacyTx) to() *common.Address { return tx.To } func (tx *LegacyTx) rawSignatureValues() (v, r, s *big.Int) { return tx.V, tx.R, tx.S diff --git a/core/types/transaction.go b/core/types/transaction.go index e0e52f25bc..9b89f12517 100644 --- a/core/types/transaction.go +++ b/core/types/transaction.go @@ -25,6 +25,8 @@ import ( "sync/atomic" "time" + "github.com/holiman/uint256" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/crypto" @@ -53,9 +55,9 @@ type Transaction struct { time time.Time // Time first seen locally (spam avoidance) // caches - hash atomic.Value - size atomic.Value - from atomic.Value + hash atomic.Pointer[common.Hash] + size atomic.Pointer[common.StorageSize] + from atomic.Pointer[sigCache] } // NewTx creates a new transaction. @@ -77,8 +79,11 @@ type TxData interface { data() []byte gas() uint64 gasPrice() *big.Int + gasPriceU256() *uint256.Int gasTipCap() *big.Int + gasTipCapU256() *uint256.Int gasFeeCap() *big.Int + gasFeeCapU256() *uint256.Int value() *big.Int nonce() uint64 to() *common.Address @@ -194,7 +199,8 @@ func (tx *Transaction) setDecoded(inner TxData, size int) { tx.inner = inner tx.time = time.Now() if size > 0 { - tx.size.Store(common.StorageSize(size)) + v := float64(size) + tx.size.Store((*common.StorageSize)(&v)) } } @@ -265,16 +271,23 @@ func (tx *Transaction) AccessList() AccessList { return tx.inner.accessList() } func (tx *Transaction) Gas() uint64 { return tx.inner.gas() } // GasPrice returns the gas price of the transaction. -func (tx *Transaction) GasPrice() *big.Int { return new(big.Int).Set(tx.inner.gasPrice()) } +func (tx *Transaction) GasPrice() *big.Int { return new(big.Int).Set(tx.inner.gasPrice()) } +func (tx *Transaction) GasPriceRef() *big.Int { return tx.inner.gasPrice() } +func (tx *Transaction) GasPriceUint() *uint256.Int { return tx.inner.gasPriceU256() } // GasTipCap returns the gasTipCap per gas of the transaction. -func (tx *Transaction) GasTipCap() *big.Int { return new(big.Int).Set(tx.inner.gasTipCap()) } +func (tx *Transaction) GasTipCap() *big.Int { return new(big.Int).Set(tx.inner.gasTipCap()) } +func (tx *Transaction) GasTipCapRef() *big.Int { return tx.inner.gasTipCap() } +func (tx *Transaction) GasTipCapUint() *uint256.Int { return tx.inner.gasTipCapU256() } // GasFeeCap returns the fee cap per gas of the transaction. -func (tx *Transaction) GasFeeCap() *big.Int { return new(big.Int).Set(tx.inner.gasFeeCap()) } +func (tx *Transaction) GasFeeCap() *big.Int { return new(big.Int).Set(tx.inner.gasFeeCap()) } +func (tx *Transaction) GasFeeCapRef() *big.Int { return tx.inner.gasFeeCap() } +func (tx *Transaction) GasFeeCapUint() *uint256.Int { return tx.inner.gasFeeCapU256() } // Value returns the ether amount of the transaction. -func (tx *Transaction) Value() *big.Int { return new(big.Int).Set(tx.inner.value()) } +func (tx *Transaction) Value() *big.Int { return new(big.Int).Set(tx.inner.value()) } +func (tx *Transaction) ValueRef() *big.Int { return tx.inner.value() } // Nonce returns the sender account nonce of the transaction. func (tx *Transaction) Nonce() uint64 { return tx.inner.nonce() } @@ -287,9 +300,19 @@ func (tx *Transaction) To() *common.Address { // Cost returns gas * gasPrice + value. func (tx *Transaction) Cost() *big.Int { - total := new(big.Int).Mul(tx.GasPrice(), new(big.Int).SetUint64(tx.Gas())) - total.Add(total, tx.Value()) - return total + gasPrice, _ := uint256.FromBig(tx.GasPriceRef()) + gasPrice.Mul(gasPrice, uint256.NewInt(tx.Gas())) + value, _ := uint256.FromBig(tx.ValueRef()) + + return gasPrice.Add(gasPrice, value).ToBig() +} + +func (tx *Transaction) CostUint() *uint256.Int { + gasPrice, _ := uint256.FromBig(tx.GasPriceRef()) + gasPrice.Mul(gasPrice, uint256.NewInt(tx.Gas())) + value, _ := uint256.FromBig(tx.ValueRef()) + + return gasPrice.Add(gasPrice, value) } // RawSignatureValues returns the V, R, S signature values of the transaction. @@ -303,11 +326,18 @@ func (tx *Transaction) GasFeeCapCmp(other *Transaction) int { return tx.inner.gasFeeCap().Cmp(other.inner.gasFeeCap()) } -// GasFeeCapIntCmp compares the fee cap of the transaction against the given fee cap. func (tx *Transaction) GasFeeCapIntCmp(other *big.Int) int { return tx.inner.gasFeeCap().Cmp(other) } +func (tx *Transaction) GasFeeCapUIntCmp(other *uint256.Int) int { + return tx.inner.gasFeeCapU256().Cmp(other) +} + +func (tx *Transaction) GasFeeCapUIntLt(other *uint256.Int) bool { + return tx.inner.gasFeeCapU256().Lt(other) +} + // GasTipCapCmp compares the gasTipCap of two transactions. func (tx *Transaction) GasTipCapCmp(other *Transaction) int { return tx.inner.gasTipCap().Cmp(other.inner.gasTipCap()) @@ -318,6 +348,14 @@ func (tx *Transaction) GasTipCapIntCmp(other *big.Int) int { return tx.inner.gasTipCap().Cmp(other) } +func (tx *Transaction) GasTipCapUIntCmp(other *uint256.Int) int { + return tx.inner.gasTipCapU256().Cmp(other) +} + +func (tx *Transaction) GasTipCapUIntLt(other *uint256.Int) bool { + return tx.inner.gasTipCapU256().Lt(other) +} + // EffectiveGasTip returns the effective miner gasTipCap for the given base fee. // Note: if the effective gasTipCap is negative, this method returns both error // the actual negative value, _and_ ErrGasFeeCapTooLow @@ -356,10 +394,73 @@ func (tx *Transaction) EffectiveGasTipIntCmp(other *big.Int, baseFee *big.Int) i return tx.EffectiveGasTipValue(baseFee).Cmp(other) } +func (tx *Transaction) EffectiveGasTipUintCmp(other *uint256.Int, baseFee *uint256.Int) int { + if baseFee == nil { + return tx.GasTipCapUIntCmp(other) + } + + return tx.EffectiveGasTipValueUint(baseFee).Cmp(other) +} + +func (tx *Transaction) EffectiveGasTipUintLt(other *uint256.Int, baseFee *uint256.Int) bool { + if baseFee == nil { + return tx.GasTipCapUIntLt(other) + } + + return tx.EffectiveGasTipValueUint(baseFee).Lt(other) +} + +func (tx *Transaction) EffectiveGasTipTxUintCmp(other *Transaction, baseFee *uint256.Int) int { + if baseFee == nil { + return tx.inner.gasTipCapU256().Cmp(other.inner.gasTipCapU256()) + } + + return tx.EffectiveGasTipValueUint(baseFee).Cmp(other.EffectiveGasTipValueUint(baseFee)) +} + +func (tx *Transaction) EffectiveGasTipValueUint(baseFee *uint256.Int) *uint256.Int { + effectiveTip, _ := tx.EffectiveGasTipUnit(baseFee) + return effectiveTip +} + +func (tx *Transaction) EffectiveGasTipUnit(baseFee *uint256.Int) (*uint256.Int, error) { + if baseFee == nil { + return tx.GasFeeCapUint(), nil + } + + var err error + + gasFeeCap := tx.GasFeeCapUint().Clone() + + if gasFeeCap.Lt(baseFee) { + err = ErrGasFeeCapTooLow + } + + gasTipCapUint := tx.GasTipCapUint() + + if gasFeeCap.Lt(gasTipCapUint) { + return gasFeeCap, err + } + + if gasFeeCap.Lt(gasTipCapUint) && baseFee.IsZero() { + return gasFeeCap, err + } + + gasFeeCap.Sub(gasFeeCap, baseFee) + + if gasFeeCap.Gt(gasTipCapUint) || gasFeeCap.Eq(gasTipCapUint) { + gasFeeCap.Add(gasFeeCap, baseFee) + + return gasTipCapUint, err + } + + return gasFeeCap, err +} + // Hash returns the transaction hash. func (tx *Transaction) Hash() common.Hash { if hash := tx.hash.Load(); hash != nil { - return hash.(common.Hash) + return *hash } var h common.Hash @@ -368,7 +469,9 @@ func (tx *Transaction) Hash() common.Hash { } else { h = prefixedRlpHash(tx.Type(), tx.inner) } - tx.hash.Store(h) + + tx.hash.Store(&h) + return h } @@ -376,11 +479,14 @@ func (tx *Transaction) Hash() common.Hash { // encoding and returning it, or returning a previously cached value. func (tx *Transaction) Size() common.StorageSize { if size := tx.size.Load(); size != nil { - return size.(common.StorageSize) + return *size } + c := writeCounter(0) + rlp.Encode(&c, &tx.inner) - tx.size.Store(common.StorageSize(c)) + tx.size.Store((*common.StorageSize)(&c)) + return common.StorageSize(c) } @@ -444,14 +550,14 @@ func (s TxByNonce) Swap(i, j int) { s[i], s[j] = s[j], s[i] } // TxWithMinerFee wraps a transaction with its gas price or effective miner gasTipCap type TxWithMinerFee struct { tx *Transaction - minerFee *big.Int + minerFee *uint256.Int } // NewTxWithMinerFee creates a wrapped transaction, calculating the effective // miner gasTipCap if a base fee is provided. // Returns error in case of a negative effective miner gasTipCap. -func NewTxWithMinerFee(tx *Transaction, baseFee *big.Int) (*TxWithMinerFee, error) { - minerFee, err := tx.EffectiveGasTip(baseFee) +func NewTxWithMinerFee(tx *Transaction, baseFee *uint256.Int) (*TxWithMinerFee, error) { + minerFee, err := tx.EffectiveGasTipUnit(baseFee) if err != nil { return nil, err } @@ -496,7 +602,7 @@ type TransactionsByPriceAndNonce struct { txs map[common.Address]Transactions // Per account nonce-sorted list of transactions heads TxByPriceAndTime // Next transaction for each unique account (price heap) signer Signer // Signer for the set of transactions - baseFee *big.Int // Current base fee + baseFee *uint256.Int // Current base fee } // NewTransactionsByPriceAndNonce creates a transaction set that can retrieve @@ -504,6 +610,7 @@ type TransactionsByPriceAndNonce struct { // // Note, the input map is reowned so the caller should not interact any more with // if after providing it to the constructor. +/* func NewTransactionsByPriceAndNonce(signer Signer, txs map[common.Address]Transactions, baseFee *big.Int) *TransactionsByPriceAndNonce { // Initialize a price and received time based heap with the head transactions heads := make(TxByPriceAndTime, 0, len(txs)) @@ -524,6 +631,39 @@ func NewTransactionsByPriceAndNonce(signer Signer, txs map[common.Address]Transa } heap.Init(&heads) + // Assemble and return the transaction set + return &TransactionsByPriceAndNonce{ + txs: txs, + heads: heads, + signer: signer, + baseFee: baseFee, + } +}*/ + +func NewTransactionsByPriceAndNonce(signer Signer, txs map[common.Address]Transactions, baseFee *uint256.Int) *TransactionsByPriceAndNonce { + // Initialize a price and received time based heap with the head transactions + heads := make(TxByPriceAndTime, 0, len(txs)) + + for from, accTxs := range txs { + if len(accTxs) == 0 { + continue + } + + acc, _ := Sender(signer, accTxs[0]) + wrapped, err := NewTxWithMinerFee(accTxs[0], baseFee) + + // Remove transaction if sender doesn't match from, or if wrapping fails. + if acc != from || err != nil { + delete(txs, from) + continue + } + + heads = append(heads, wrapped) + txs[from] = accTxs[1:] + } + + heap.Init(&heads) + // Assemble and return the transaction set return &TransactionsByPriceAndNonce{ txs: txs, diff --git a/core/types/transaction_signing.go b/core/types/transaction_signing.go index 1d0d2a4c75..959aba637a 100644 --- a/core/types/transaction_signing.go +++ b/core/types/transaction_signing.go @@ -130,12 +130,11 @@ func MustSignNewTx(prv *ecdsa.PrivateKey, s Signer, txdata TxData) *Transaction // not match the signer used in the current call. func Sender(signer Signer, tx *Transaction) (common.Address, error) { if sc := tx.from.Load(); sc != nil { - sigCache := sc.(sigCache) // If the signer used to derive from in a previous // call is not the same as used current, invalidate // the cache. - if sigCache.signer.Equal(signer) { - return sigCache.from, nil + if sc.signer.Equal(signer) { + return sc.from, nil } } @@ -143,7 +142,9 @@ func Sender(signer Signer, tx *Transaction) (common.Address, error) { if err != nil { return common.Address{}, err } - tx.from.Store(sigCache{signer: signer, from: addr}) + + tx.from.Store(&sigCache{signer: signer, from: addr}) + return addr, nil } @@ -461,10 +462,10 @@ func (fs FrontierSigner) SignatureValues(tx *Transaction, sig []byte) (r, s, v * func (fs FrontierSigner) Hash(tx *Transaction) common.Hash { return rlpHash([]interface{}{ tx.Nonce(), - tx.GasPrice(), + tx.GasPriceRef(), tx.Gas(), tx.To(), - tx.Value(), + tx.ValueRef(), tx.Data(), }) } diff --git a/core/types/transaction_test.go b/core/types/transaction_test.go index a4755675cd..255a7b76b4 100644 --- a/core/types/transaction_test.go +++ b/core/types/transaction_test.go @@ -27,7 +27,10 @@ import ( "testing" "time" + "github.com/holiman/uint256" + "github.com/ethereum/go-ethereum/common" + cmath "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/rlp" ) @@ -272,14 +275,22 @@ func TestTransactionPriceNonceSort1559(t *testing.T) { // Tests that transactions can be correctly sorted according to their price in // decreasing order, but at the same time with increasing nonces when issued by // the same account. -func testTransactionPriceNonceSort(t *testing.T, baseFee *big.Int) { +// +//nolint:gocognit,thelper +func testTransactionPriceNonceSort(t *testing.T, baseFeeBig *big.Int) { // Generate a batch of accounts to start with keys := make([]*ecdsa.PrivateKey, 25) for i := 0; i < len(keys); i++ { keys[i], _ = crypto.GenerateKey() } + signer := LatestSignerForChainID(common.Big1) + var baseFee *uint256.Int + if baseFeeBig != nil { + baseFee = cmath.FromBig(baseFeeBig) + } + // Generate a batch of transactions with overlapping values, but shifted nonces groups := map[common.Address]Transactions{} expectedCount := 0 @@ -308,7 +319,7 @@ func testTransactionPriceNonceSort(t *testing.T, baseFee *big.Int) { GasTipCap: big.NewInt(int64(rand.Intn(gasFeeCap + 1))), Data: nil, }) - if count == 25 && int64(gasFeeCap) < baseFee.Int64() { + if count == 25 && uint64(gasFeeCap) < baseFee.Uint64() { count = i } } @@ -341,12 +352,25 @@ func testTransactionPriceNonceSort(t *testing.T, baseFee *big.Int) { t.Errorf("invalid nonce ordering: tx #%d (A=%x N=%v) < tx #%d (A=%x N=%v)", i, fromi[:4], txi.Nonce(), i+j, fromj[:4], txj.Nonce()) } } + // If the next tx has different from account, the price must be lower than the current one if i+1 < len(txs) { next := txs[i+1] fromNext, _ := Sender(signer, next) - tip, err := txi.EffectiveGasTip(baseFee) - nextTip, nextErr := next.EffectiveGasTip(baseFee) + tip, err := txi.EffectiveGasTipUnit(baseFee) + nextTip, nextErr := next.EffectiveGasTipUnit(baseFee) + + tipBig, _ := txi.EffectiveGasTip(baseFeeBig) + nextTipBig, _ := next.EffectiveGasTip(baseFeeBig) + + if tip.Cmp(cmath.FromBig(tipBig)) != 0 { + t.Fatalf("EffectiveGasTip incorrect. uint256 %q, big.Int %q, baseFee %q, baseFeeBig %q", tip.String(), tipBig.String(), baseFee.String(), baseFeeBig.String()) + } + + if nextTip.Cmp(cmath.FromBig(nextTipBig)) != 0 { + t.Fatalf("EffectiveGasTip next incorrect. uint256 %q, big.Int %q, baseFee %q, baseFeeBig %q", nextTip.String(), nextTipBig.String(), baseFee.String(), baseFeeBig.String()) + } + if err != nil || nextErr != nil { t.Errorf("error calculating effective tip") } diff --git a/core/vm/contracts.go b/core/vm/contracts.go index 9210f5486c..d7d9eeb525 100644 --- a/core/vm/contracts.go +++ b/core/vm/contracts.go @@ -30,7 +30,7 @@ import ( "github.com/ethereum/go-ethereum/crypto/bn256" "github.com/ethereum/go-ethereum/params" - //lint:ignore SA1019 Needed for precompile + big2 "github.com/holiman/big" "golang.org/x/crypto/ripemd160" ) @@ -266,9 +266,10 @@ var ( // modexpMultComplexity implements bigModexp multComplexity formula, as defined in EIP-198 // // def mult_complexity(x): -// if x <= 64: return x ** 2 -// elif x <= 1024: return x ** 2 // 4 + 96 * x - 3072 -// else: return x ** 2 // 16 + 480 * x - 199680 +// +// if x <= 64: return x ** 2 +// elif x <= 1024: return x ** 2 // 4 + 96 * x - 3072 +// else: return x ** 2 // 16 + 480 * x - 199680 // // where is x is max(length_of_MODULUS, length_of_BASE) func modexpMultComplexity(x *big.Int) *big.Int { @@ -379,15 +380,24 @@ func (c *bigModExp) Run(input []byte) ([]byte, error) { } // Retrieve the operands and execute the exponentiation var ( - base = new(big.Int).SetBytes(getData(input, 0, baseLen)) - exp = new(big.Int).SetBytes(getData(input, baseLen, expLen)) - mod = new(big.Int).SetBytes(getData(input, baseLen+expLen, modLen)) + base = new(big2.Int).SetBytes(getData(input, 0, baseLen)) + exp = new(big2.Int).SetBytes(getData(input, baseLen, expLen)) + mod = new(big2.Int).SetBytes(getData(input, baseLen+expLen, modLen)) + v []byte ) - if mod.BitLen() == 0 { + + switch { + case mod.BitLen() == 0: // Modulo 0 is undefined, return zero return common.LeftPadBytes([]byte{}, int(modLen)), nil + case base.BitLen() == 1: // a bit length of 1 means it's 1 (or -1). + //If base == 1, then we can just return base % mod (if mod >= 1, which it is) + v = base.Mod(base, mod).Bytes() + default: + v = base.Exp(base, exp, mod).Bytes() } - return common.LeftPadBytes(base.Exp(base, exp, mod).Bytes(), int(modLen)), nil + + return common.LeftPadBytes(v, int(modLen)), nil } // newCurvePoint unmarshals a binary blob into a bn256 elliptic curve point, diff --git a/docs/cli/example_config.toml b/docs/cli/example_config.toml index c32c40e2c6..31c73965fc 100644 --- a/docs/cli/example_config.toml +++ b/docs/cli/example_config.toml @@ -93,7 +93,7 @@ ethstats = "" # Reporting URL of a ethstats service (nodename:sec vhosts = ["localhost"] # Comma separated list of virtual hostnames from which to accept requests (server enforced). Accepts '*' wildcard. corsdomain = ["localhost"] # Comma separated list of domains from which to accept cross origin requests (browser enforced) [jsonrpc.timeouts] - read = "30s" + read = "10s" write = "30s" idle = "2m0s" diff --git a/eth/api_backend.go b/eth/api_backend.go index 60aea7527e..c8825dc582 100644 --- a/eth/api_backend.go +++ b/eth/api_backend.go @@ -236,11 +236,18 @@ func (b *EthAPIBackend) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscri } func (b *EthAPIBackend) SendTx(ctx context.Context, signedTx *types.Transaction) error { - return b.eth.txPool.AddLocal(signedTx) + err := b.eth.txPool.AddLocal(signedTx) + if err != nil { + if unwrapped := errors.Unwrap(err); unwrapped != nil { + return unwrapped + } + } + + return err } func (b *EthAPIBackend) GetPoolTransactions() (types.Transactions, error) { - pending := b.eth.txPool.Pending(false) + pending := b.eth.txPool.Pending(context.Background(), false) var txs types.Transactions for _, batch := range pending { txs = append(txs, batch...) diff --git a/eth/bor_checkpoint_verifier.go b/eth/bor_checkpoint_verifier.go index 61e8c382e1..ad81eb6116 100644 --- a/eth/bor_checkpoint_verifier.go +++ b/eth/bor_checkpoint_verifier.go @@ -26,6 +26,7 @@ func newCheckpointVerifier(verifyFn func(ctx context.Context, handler *ethHandle ) // check if we have the checkpoint blocks + //nolint:contextcheck head := handler.ethAPI.BlockNumber() if head < hexutil.Uint64(endBlock) { log.Debug("Head block behind checkpoint block", "head", head, "checkpoint end block", endBlock) diff --git a/eth/handler.go b/eth/handler.go index 8e6d89f9ef..48bdf8eb15 100644 --- a/eth/handler.go +++ b/eth/handler.go @@ -17,6 +17,7 @@ package eth import ( + "context" "errors" "math" "math/big" @@ -69,7 +70,7 @@ type txPool interface { // Pending should return pending transactions. // The slice should be modifiable by the caller. - Pending(enforceTips bool) map[common.Address]types.Transactions + Pending(ctx context.Context, enforceTips bool) map[common.Address]types.Transactions // SubscribeNewTxsEvent should return an event subscription of // NewTxsEvent and send events to the given channel. diff --git a/eth/handler_test.go b/eth/handler_test.go index c6d7811d10..7a14619159 100644 --- a/eth/handler_test.go +++ b/eth/handler_test.go @@ -17,6 +17,7 @@ package eth import ( + "context" "math/big" "sort" "sync" @@ -92,7 +93,7 @@ func (p *testTxPool) AddRemotes(txs []*types.Transaction) []error { } // Pending returns all the transactions known to the pool -func (p *testTxPool) Pending(enforceTips bool) map[common.Address]types.Transactions { +func (p *testTxPool) Pending(ctx context.Context, enforceTips bool) map[common.Address]types.Transactions { p.lock.RLock() defer p.lock.RUnlock() diff --git a/eth/sync.go b/eth/sync.go index aa79b6181c..377acff95c 100644 --- a/eth/sync.go +++ b/eth/sync.go @@ -17,6 +17,7 @@ package eth import ( + "context" "errors" "math/big" "sync/atomic" @@ -44,20 +45,24 @@ func (h *handler) syncTransactions(p *eth.Peer) { // // TODO(karalabe): Figure out if we could get away with random order somehow var txs types.Transactions - pending := h.txpool.Pending(false) + + pending := h.txpool.Pending(context.Background(), false) for _, batch := range pending { txs = append(txs, batch...) } + if len(txs) == 0 { return } // The eth/65 protocol introduces proper transaction announcements, so instead // of dripping transactions across multiple peers, just send the entire list as // an announcement and let the remote side decide what they need (likely nothing). + hashes := make([]common.Hash, len(txs)) for i, tx := range txs { hashes[i] = tx.Hash() } + p.AsyncSendPooledTransactionHashes(hashes) } diff --git a/go.mod b/go.mod index f55b2f9aa7..69fa5990bd 100644 --- a/go.mod +++ b/go.mod @@ -12,13 +12,13 @@ require ( github.com/aws/aws-sdk-go-v2/config v1.1.1 github.com/aws/aws-sdk-go-v2/credentials v1.1.1 github.com/aws/aws-sdk-go-v2/service/route53 v1.1.1 - github.com/btcsuite/btcd/btcec/v2 v2.1.2 + github.com/btcsuite/btcd/btcec/v2 v2.1.3 github.com/cespare/cp v0.1.0 github.com/cloudflare/cloudflare-go v0.14.0 github.com/consensys/gnark-crypto v0.4.1-0.20210426202927-39ac3d4b3f1f github.com/davecgh/go-spew v1.1.1 github.com/deckarep/golang-set v1.8.0 - github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf + github.com/docker/docker v1.6.1 github.com/dop251/goja v0.0.0-20211011172007-d99e4b8cbf48 github.com/edsrzf/mmap-go v1.0.0 github.com/fatih/color v1.7.0 @@ -37,6 +37,7 @@ require ( github.com/hashicorp/go-bexpr v0.1.10 github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d github.com/hashicorp/hcl/v2 v2.10.1 + github.com/holiman/big v0.0.0-20221017200358-a027dc42d04e github.com/holiman/bloomfilter/v2 v2.0.3 github.com/holiman/uint256 v1.2.0 github.com/huin/goupnp v1.0.3-0.20220313090229-ca81a64b4204 @@ -69,12 +70,12 @@ require ( go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.2.0 go.opentelemetry.io/otel/sdk v1.2.0 go.uber.org/goleak v1.1.12 - golang.org/x/crypto v0.0.0-20220507011949-2cf3adece122 - golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 - golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10 - golang.org/x/text v0.3.7 + golang.org/x/crypto v0.1.0 + golang.org/x/sync v0.1.0 + golang.org/x/sys v0.6.0 + golang.org/x/text v0.8.0 golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba - golang.org/x/tools v0.1.12 + golang.org/x/tools v0.6.0 gonum.org/v1/gonum v0.11.0 google.golang.org/grpc v1.48.0 google.golang.org/protobuf v1.28.0 @@ -90,7 +91,7 @@ require github.com/gammazero/deque v0.2.1 // indirect require ( github.com/Azure/azure-sdk-for-go/sdk/azcore v0.21.1 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v0.8.3 // indirect - github.com/Masterminds/goutils v1.1.0 // indirect + github.com/Masterminds/goutils v1.1.1 // indirect github.com/Masterminds/semver v1.5.0 // indirect github.com/Masterminds/sprig v2.22.0+incompatible // indirect github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 // indirect @@ -136,9 +137,9 @@ require ( go.opentelemetry.io/otel/trace v1.2.0 go.opentelemetry.io/proto/otlp v0.10.0 // indirect golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e // indirect - golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect - golang.org/x/net v0.0.0-20220728030405-41545e8bf201 // indirect - golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect + golang.org/x/mod v0.8.0 // indirect + golang.org/x/net v0.8.0 // indirect + golang.org/x/term v0.6.0 // indirect golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f // indirect google.golang.org/genproto v0.0.0-20220725144611-272f38e5d71b // indirect gopkg.in/yaml.v2 v2.4.0 // indirect diff --git a/go.sum b/go.sum index 4b312ccfb1..61c9fd1ca5 100644 --- a/go.sum +++ b/go.sum @@ -33,8 +33,9 @@ github.com/JekaMas/go-grpc-net-conn v0.0.0-20220708155319-6aff21f2d13d h1:RO27lg github.com/JekaMas/go-grpc-net-conn v0.0.0-20220708155319-6aff21f2d13d/go.mod h1:romz7UPgSYhfJkKOalzEEyV6sWtt/eAEm0nX2aOrod0= github.com/JekaMas/workerpool v1.1.5 h1:xmrx2Zyft95CEGiEqzDxiawptCIRZQ0zZDhTGDFOCaw= github.com/JekaMas/workerpool v1.1.5/go.mod h1:IoDWPpwMcA27qbuugZKeBslDrgX09lVmksuh9sjzbhc= -github.com/Masterminds/goutils v1.1.0 h1:zukEsf/1JZwCMgHiK3GZftabmxiCw4apj3a28RPBiVg= github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= +github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= +github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= github.com/Masterminds/sprig v2.22.0+incompatible h1:z4yfnGrZ7netVz+0EDJ0Wi+5VZCSYp4Z0m2dk6cEM60= @@ -84,8 +85,8 @@ github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQ github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40/go.mod h1:8rLXio+WjiTceGBHIoTvn60HIbs7Hm7bcHjyrSqYB9c= github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= -github.com/btcsuite/btcd/btcec/v2 v2.1.2 h1:YoYoC9J0jwfukodSBMzZYUVQ8PTiYg4BnOWiJVzTmLs= -github.com/btcsuite/btcd/btcec/v2 v2.1.2/go.mod h1:ctjw4H1kknNJmRN4iP1R7bTQ+v3GJkZBd6mui8ZsAZE= +github.com/btcsuite/btcd/btcec/v2 v2.1.3 h1:xM/n3yIhHAhHy04z4i43C8p4ehixJZMsnrVJkgl+MTE= +github.com/btcsuite/btcd/btcec/v2 v2.1.3/go.mod h1:ctjw4H1kknNJmRN4iP1R7bTQ+v3GJkZBd6mui8ZsAZE= github.com/btcsuite/btcd/chaincfg/chainhash v1.0.0 h1:MSskdM4/xJYcFzy0altH/C/xHopifpWzHUi1JeVI34Q= github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34= github.com/cenkalti/backoff/v4 v4.1.1 h1:G2HAfAmvm/GcKan2oOQpBXOd2tT2G57ZnZGWa1PxPBQ= @@ -136,8 +137,8 @@ github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91/go.mod h1:2pZnwu github.com/dnaeon/go-vcr v1.1.0/go.mod h1:M7tiix8f0r6mKKJ3Yq/kqU1OYf3MnfmBWVbPx/yU9ko= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= -github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf h1:sh8rkQZavChcmakYiSlqu2425CHyFXLZZnvm7PDpU8M= -github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v1.6.1 h1:4xYASHy5cScPkLD7PO0uTmnVc860m9NarPN1X8zeMe8= +github.com/docker/docker v1.6.1/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/dop251/goja v0.0.0-20211011172007-d99e4b8cbf48 h1:iZOop7pqsg+56twTopWgwCGxdB5SI2yDO8Ti7eTRliQ= github.com/dop251/goja v0.0.0-20211011172007-d99e4b8cbf48/go.mod h1:R9ET47fwRVRPZnOGvHxxhuZcbrMCuiqOz3Rlrh4KSnk= github.com/dop251/goja_nodejs v0.0.0-20210225215109-d91c329300e7/go.mod h1:hn7BA7c8pLvoGndExHudxTDKZ84Pyvv+90pbBjbTz0Y= @@ -275,6 +276,8 @@ github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d h1:dg1dEPuW github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl/v2 v2.10.1 h1:h4Xx4fsrRE26ohAk/1iGF/JBqRQbyUqu5Lvj60U54ys= github.com/hashicorp/hcl/v2 v2.10.1/go.mod h1:FwWsfWEjyV/CMj8s/gqAuiviY72rJ1/oayI9WftqcKg= +github.com/holiman/big v0.0.0-20221017200358-a027dc42d04e h1:pIYdhNkDh+YENVNi3gto8n9hAmRxKxoar0iE6BLucjw= +github.com/holiman/big v0.0.0-20221017200358-a027dc42d04e/go.mod h1:j9cQbcqHQujT0oKJ38PylVfqohClLr3CvDC+Qcg+lhU= github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao= github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA= github.com/holiman/uint256 v1.2.0 h1:gpSYcPLWGv4sG43I2mVLiDZCNDh/EpGjSk8tmtxitHM= @@ -535,8 +538,8 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20220507011949-2cf3adece122 h1:NvGWuYG8dkDHFSKksI1P9faiVJ9rayE6l0+ouWVIDs8= -golang.org/x/crypto v0.0.0-20220507011949-2cf3adece122/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.1.0 h1:MDRAIl0xIo9Io2xV565hzXHw3zVseKrJKodhohM5CjU= +golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -569,8 +572,8 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.8.0 h1:LUYupSeNrTNCGzR/hVBk2NHZO4hXcVaW1k4Qx7rjPx8= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180811021610-c39426892332/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -600,8 +603,8 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220728030405-41545e8bf201 h1:bvOltf3SADAfG05iRml8lAB3qjoEX5RCyN4K6G5v3N0= -golang.org/x/net v0.0.0-20220728030405-41545e8bf201/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= +golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -615,8 +618,8 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 h1:uVc8UZUe6tr40fFVnUP5Oj+veunVezqYl9z7DYw9xzw= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -658,12 +661,12 @@ golang.org/x/sys v0.0.0-20210420205809-ac73e9fd8988/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10 h1:WIoqL4EROvwiPdUtaip4VcDdpZ4kha7wBWZrbVKCIZg= -golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.6.0 h1:clScbb1cHjoCkyRbWwBEUZ5H/tIFu5TAXIqaZD0Gcjw= +golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= @@ -671,8 +674,9 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= +golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -709,8 +713,8 @@ golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.12 h1:VveCTK38A2rkS8ZqFY25HIDFscX5X9OoEhJd3quQmXU= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.6.0 h1:BOw41kyTf3PuCW1pVQf8+Cyg8pMlkYB1oo9iJ6D/lKM= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/internal/cli/server/config.go b/internal/cli/server/config.go index ca7a235ace..b733f36988 100644 --- a/internal/cli/server/config.go +++ b/internal/cli/server/config.go @@ -540,7 +540,7 @@ func DefaultConfig() *Config { VHost: []string{"localhost"}, }, HttpTimeout: &HttpTimeouts{ - ReadTimeout: 30 * time.Second, + ReadTimeout: 10 * time.Second, WriteTimeout: 30 * time.Second, IdleTimeout: 120 * time.Second, }, diff --git a/internal/cli/server/pprof/pprof.go b/internal/cli/server/pprof/pprof.go index 44034f3bb8..69056bd0fb 100644 --- a/internal/cli/server/pprof/pprof.go +++ b/internal/cli/server/pprof/pprof.go @@ -61,6 +61,28 @@ func CPUProfile(ctx context.Context, sec int) ([]byte, map[string]string, error) }, nil } +// CPUProfile generates a CPU Profile for a given duration +func CPUProfileWithChannel(done chan bool) ([]byte, map[string]string, error) { + var buf bytes.Buffer + if err := pprof.StartCPUProfile(&buf); err != nil { + return nil, nil, err + } + + select { + case <-done: + case <-time.After(30 * time.Second): + } + + pprof.StopCPUProfile() + + return buf.Bytes(), + map[string]string{ + "X-Content-Type-Options": "nosniff", + "Content-Type": "application/octet-stream", + "Content-Disposition": `attachment; filename="profile"`, + }, nil +} + // Trace runs a trace profile for a given duration func Trace(ctx context.Context, sec int) ([]byte, map[string]string, error) { if sec <= 0 { diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go index 8ba6ea0b91..f130813234 100644 --- a/internal/ethapi/api.go +++ b/internal/ethapi/api.go @@ -21,6 +21,7 @@ import ( "errors" "fmt" "math/big" + "runtime" "strings" "time" @@ -1457,16 +1458,26 @@ func newRPCPendingTransaction(tx *types.Transaction, current *types.Header, conf func newRPCTransactionFromBlockIndex(b *types.Block, index uint64, config *params.ChainConfig, db ethdb.Database) *RPCTransaction { txs := b.Transactions() - borReceipt := rawdb.ReadBorReceipt(db, b.Hash(), b.NumberU64(), config) - if borReceipt != nil { - if borReceipt.TxHash != (common.Hash{}) { - borTx, _, _, _ := rawdb.ReadBorTransactionWithBlockHash(db, borReceipt.TxHash, b.Hash()) - if borTx != nil { - txs = append(txs, borTx) + if index >= uint64(len(txs)+1) { + return nil + } + + var borReceipt *types.Receipt + + // Read bor receipts if a state-sync transaction is requested + if index == uint64(len(txs)) { + borReceipt = rawdb.ReadBorReceipt(db, b.Hash(), b.NumberU64(), config) + if borReceipt != nil { + if borReceipt.TxHash != (common.Hash{}) { + borTx, _, _, _ := rawdb.ReadBorTransactionWithBlockHash(db, borReceipt.TxHash, b.Hash()) + if borTx != nil { + txs = append(txs, borTx) + } } } } + // If the index is still out of the range after checking bor state sync transaction, it means that the transaction index is invalid if index >= uint64(len(txs)) { return nil } @@ -1474,7 +1485,7 @@ func newRPCTransactionFromBlockIndex(b *types.Block, index uint64, config *param rpcTx := newRPCTransaction(txs[index], b.Hash(), b.NumberU64(), index, b.BaseFee(), config) // If the transaction is a bor transaction, we need to set the hash to the derived bor tx hash. BorTx is always the last index. - if borReceipt != nil && int(index) == len(txs)-1 { + if borReceipt != nil && index == uint64(len(txs)-1) { rpcTx.Hash = borReceipt.TxHash } @@ -2222,6 +2233,21 @@ func (api *PrivateDebugAPI) PurgeCheckpointWhitelist() { api.b.PurgeCheckpointWhitelist() } +// GetTraceStack returns the current trace stack +func (api *PrivateDebugAPI) GetTraceStack() string { + buf := make([]byte, 1024) + + for { + n := runtime.Stack(buf, true) + + if n < len(buf) { + return string(buf) + } + + buf = make([]byte, 2*len(buf)) + } +} + // PublicNetAPI offers network related RPC methods type PublicNetAPI struct { net *p2p.Server diff --git a/internal/testlog/testlog.go b/internal/testlog/testlog.go index a5836b8446..93d6f27086 100644 --- a/internal/testlog/testlog.go +++ b/internal/testlog/testlog.go @@ -148,3 +148,35 @@ func (l *logger) flush() { } l.h.buf = nil } + +func (l *logger) OnTrace(fn func(l log.Logging)) { + if l.GetHandler().Level() >= log.LvlTrace { + fn(l.Trace) + } +} + +func (l *logger) OnDebug(fn func(l log.Logging)) { + if l.GetHandler().Level() >= log.LvlDebug { + fn(l.Debug) + } +} +func (l *logger) OnInfo(fn func(l log.Logging)) { + if l.GetHandler().Level() >= log.LvlInfo { + fn(l.Info) + } +} +func (l *logger) OnWarn(fn func(l log.Logging)) { + if l.GetHandler().Level() >= log.LvlWarn { + fn(l.Warn) + } +} +func (l *logger) OnError(fn func(l log.Logging)) { + if l.GetHandler().Level() >= log.LvlError { + fn(l.Error) + } +} +func (l *logger) OnCrit(fn func(l log.Logging)) { + if l.GetHandler().Level() >= log.LvlCrit { + fn(l.Crit) + } +} diff --git a/internal/web3ext/web3ext.go b/internal/web3ext/web3ext.go index c823f096d6..38ce69e33e 100644 --- a/internal/web3ext/web3ext.go +++ b/internal/web3ext/web3ext.go @@ -512,6 +512,11 @@ web3._extend({ call: 'debug_purgeCheckpointWhitelist', params: 0, }), + new web3._extend.Method({ + name: 'getTraceStack', + call: 'debug_getTraceStack', + params: 0, + }), ], properties: [] }); diff --git a/les/handler_test.go b/les/handler_test.go index 3ceabdf8ec..af3324b042 100644 --- a/les/handler_test.go +++ b/les/handler_test.go @@ -617,7 +617,7 @@ func testTransactionStatus(t *testing.T, protocol int) { sendRequest(rawPeer.app, GetTxStatusMsg, reqID, []common.Hash{tx.Hash()}) } if err := expectResponse(rawPeer.app, TxStatusMsg, reqID, testBufLimit, []light.TxStatus{expStatus}); err != nil { - t.Errorf("transaction status mismatch") + t.Error("transaction status mismatch", err) } } signer := types.HomesteadSigner{} diff --git a/les/server_requests.go b/les/server_requests.go index 3595a6ab38..b31c11c9d0 100644 --- a/les/server_requests.go +++ b/les/server_requests.go @@ -507,25 +507,39 @@ func handleSendTx(msg Decoder) (serveRequestFn, uint64, uint64, error) { if err := msg.Decode(&r); err != nil { return nil, 0, 0, err } + amount := uint64(len(r.Txs)) + return func(backend serverBackend, p *clientPeer, waitOrStop func() bool) *reply { stats := make([]light.TxStatus, len(r.Txs)) + + var ( + err error + addFn func(transaction *types.Transaction) error + ) + for i, tx := range r.Txs { if i != 0 && !waitOrStop() { return nil } + hash := tx.Hash() stats[i] = txStatus(backend, hash) + if stats[i].Status == core.TxStatusUnknown { - addFn := backend.TxPool().AddRemotes + addFn = backend.TxPool().AddRemote + // Add txs synchronously for testing purpose if backend.AddTxsSync() { - addFn = backend.TxPool().AddRemotesSync + addFn = backend.TxPool().AddRemoteSync } - if errs := addFn([]*types.Transaction{tx}); errs[0] != nil { - stats[i].Error = errs[0].Error() + + if err = addFn(tx); err != nil { + stats[i].Error = err.Error() + continue } + stats[i] = txStatus(backend, hash) } } diff --git a/log/logger.go b/log/logger.go index 2b96681a82..c2678259bf 100644 --- a/log/logger.go +++ b/log/logger.go @@ -106,6 +106,8 @@ type RecordKeyNames struct { Ctx string } +type Logging func(msg string, ctx ...interface{}) + // A Logger writes key/value pairs to a Handler type Logger interface { // New returns a new Logger that has this logger's context plus the given context @@ -124,6 +126,13 @@ type Logger interface { Warn(msg string, ctx ...interface{}) Error(msg string, ctx ...interface{}) Crit(msg string, ctx ...interface{}) + + OnTrace(func(l Logging)) + OnDebug(func(l Logging)) + OnInfo(func(l Logging)) + OnWarn(func(l Logging)) + OnError(func(l Logging)) + OnCrit(func(l Logging)) } type logger struct { @@ -198,6 +207,38 @@ func (l *logger) SetHandler(h Handler) { l.h.Swap(h) } +func (l *logger) OnTrace(fn func(l Logging)) { + if l.GetHandler().Level() >= LvlTrace { + fn(l.Trace) + } +} + +func (l *logger) OnDebug(fn func(l Logging)) { + if l.GetHandler().Level() >= LvlDebug { + fn(l.Debug) + } +} +func (l *logger) OnInfo(fn func(l Logging)) { + if l.GetHandler().Level() >= LvlInfo { + fn(l.Info) + } +} +func (l *logger) OnWarn(fn func(l Logging)) { + if l.GetHandler().Level() >= LvlWarn { + fn(l.Warn) + } +} +func (l *logger) OnError(fn func(l Logging)) { + if l.GetHandler().Level() >= LvlError { + fn(l.Error) + } +} +func (l *logger) OnCrit(fn func(l Logging)) { + if l.GetHandler().Level() >= LvlCrit { + fn(l.Crit) + } +} + func normalize(ctx []interface{}) []interface{} { // if the caller passed a Ctx object, then expand it if len(ctx) == 1 { diff --git a/log/root.go b/log/root.go index 9fb4c5ae0b..04b80f4a02 100644 --- a/log/root.go +++ b/log/root.go @@ -60,6 +60,38 @@ func Crit(msg string, ctx ...interface{}) { os.Exit(1) } +func OnTrace(fn func(l Logging)) { + if root.GetHandler().Level() >= LvlTrace { + fn(root.Trace) + } +} + +func OnDebug(fn func(l Logging)) { + if root.GetHandler().Level() >= LvlDebug { + fn(root.Debug) + } +} +func OnInfo(fn func(l Logging)) { + if root.GetHandler().Level() >= LvlInfo { + fn(root.Info) + } +} +func OnWarn(fn func(l Logging)) { + if root.GetHandler().Level() >= LvlWarn { + fn(root.Warn) + } +} +func OnError(fn func(l Logging)) { + if root.GetHandler().Level() >= LvlError { + fn(root.Error) + } +} +func OnCrit(fn func(l Logging)) { + if root.GetHandler().Level() >= LvlCrit { + fn(root.Crit) + } +} + // Output is a convenient alias for write, allowing for the modification of // the calldepth (number of stack frames to skip). // calldepth influences the reported line number of the log message. diff --git a/miner/fake_miner.go b/miner/fake_miner.go index 3ca2f5be77..39cc999a0a 100644 --- a/miner/fake_miner.go +++ b/miner/fake_miner.go @@ -47,7 +47,7 @@ func NewBorDefaultMiner(t *testing.T) *DefaultBorMiner { ethAPI.EXPECT().Call(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() spanner := bor.NewMockSpanner(ctrl) - spanner.EXPECT().GetCurrentValidators(gomock.Any(), gomock.Any(), gomock.Any()).Return([]*valset.Validator{ + spanner.EXPECT().GetCurrentValidatorsByHash(gomock.Any(), gomock.Any(), gomock.Any()).Return([]*valset.Validator{ { ID: 0, Address: common.Address{0x1}, diff --git a/miner/worker.go b/miner/worker.go index 30809cd558..9d04838ccb 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -17,10 +17,15 @@ package miner import ( + "bytes" "context" "errors" "fmt" "math/big" + "os" + "runtime" + "runtime/pprof" + ptrace "runtime/trace" "sync" "sync/atomic" "time" @@ -31,14 +36,17 @@ import ( "go.opentelemetry.io/otel/trace" "github.com/ethereum/go-ethereum/common" + cmath "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/common/tracing" "github.com/ethereum/go-ethereum/consensus" + "github.com/ethereum/go-ethereum/consensus/bor" "github.com/ethereum/go-ethereum/consensus/misc" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/trie" ) @@ -83,6 +91,12 @@ const ( staleThreshold = 7 ) +// metrics gauge to track total and empty blocks sealed by a miner +var ( + sealedBlocksCounter = metrics.NewRegisteredCounter("worker/sealedBlocks", nil) + sealedEmptyBlocksCounter = metrics.NewRegisteredCounter("worker/sealedEmptyBlocks", nil) +) + // environment is the worker's current environment and holds all // information of the sealing block generation. type environment struct { @@ -257,6 +271,8 @@ type worker struct { skipSealHook func(*task) bool // Method to decide whether skipping the sealing. fullTaskHook func() // Method to call before pushing the full sealing task. resubmitHook func(time.Duration, time.Duration) // Method to call upon updating resubmitting interval. + + profileCount *int32 // Global count for profiling } //nolint:staticcheck @@ -285,6 +301,7 @@ func newWorker(config *Config, chainConfig *params.ChainConfig, engine consensus resubmitIntervalCh: make(chan time.Duration), resubmitAdjustCh: make(chan *intervalAdjust, resubmitAdjustChanSize), } + worker.profileCount = new(int32) // Subscribe NewTxsEvent for tx pool worker.txsSub = eth.TxPool().SubscribeNewTxsEvent(worker.txsCh) // Subscribe events for blockchain @@ -560,9 +577,11 @@ func (w *worker) mainLoop(ctx context.Context) { for { select { case req := <-w.newWorkCh: + //nolint:contextcheck w.commitWork(req.ctx, req.interrupt, req.noempty, req.timestamp) case req := <-w.getWorkCh: + //nolint:contextcheck block, err := w.generateWork(req.ctx, req.params) if err != nil { req.err = err @@ -622,13 +641,17 @@ func (w *worker) mainLoop(ctx context.Context) { if gp := w.current.gasPool; gp != nil && gp.Gas() < params.TxGas { continue } + txs := make(map[common.Address]types.Transactions) + for _, tx := range ev.Txs { acc, _ := types.Sender(w.current.signer, tx) txs[acc] = append(txs[acc], tx) } - txset := types.NewTransactionsByPriceAndNonce(w.current.signer, txs, w.current.header.BaseFee) + + txset := types.NewTransactionsByPriceAndNonce(w.current.signer, txs, cmath.FromBig(w.current.header.BaseFee)) tcount := w.current.tcount + w.commitTransactions(w.current, txset, nil) // Only update the snapshot if any new transactions were added @@ -758,7 +781,7 @@ func (w *worker) resultLoop() { err error ) - tracing.Exec(task.ctx, "resultLoop", func(ctx context.Context, span trace.Span) { + tracing.Exec(task.ctx, "", "resultLoop", func(ctx context.Context, span trace.Span) { for i, taskReceipt := range task.receipts { receipt := new(types.Receipt) receipts[i] = receipt @@ -782,8 +805,8 @@ func (w *worker) resultLoop() { } // Commit block and state to database. - tracing.ElapsedTime(ctx, span, "WriteBlockAndSetHead time taken", func(_ context.Context, _ trace.Span) { - _, err = w.chain.WriteBlockAndSetHead(block, receipts, logs, task.state, true) + tracing.Exec(ctx, "", "resultLoop.WriteBlockAndSetHead", func(ctx context.Context, span trace.Span) { + _, err = w.chain.WriteBlockAndSetHead(ctx, block, receipts, logs, task.state, true) }) tracing.SetAttributes( @@ -808,6 +831,12 @@ func (w *worker) resultLoop() { // Broadcast the block and announce chain insertion event w.mux.Post(core.NewMinedBlockEvent{Block: block}) + sealedBlocksCounter.Inc(1) + + if block.Transactions().Len() == 0 { + sealedEmptyBlocksCounter.Inc(1) + } + // Insert the block into the set of pending ones to resultLoop for confirmations w.unconfirmed.Insert(block.NumberU64(), block.Hash()) case <-w.exitCh: @@ -918,6 +947,22 @@ func (w *worker) commitTransactions(env *environment, txs *types.TransactionsByP } var coalescedLogs []*types.Log + initialGasLimit := env.gasPool.Gas() + initialTxs := txs.GetTxs() + + var breakCause string + + defer func() { + log.OnDebug(func(lg log.Logging) { + lg("commitTransactions-stats", + "initialTxsCount", initialTxs, + "initialGasLimit", initialGasLimit, + "resultTxsCount", txs.GetTxs(), + "resultGapPool", env.gasPool.Gas(), + "exitCause", breakCause) + }) + }() + for { // In the following three cases, we will interrupt the execution of the transaction. // (1) new head block event arrival, the interrupt signal is 1 @@ -965,7 +1010,14 @@ func (w *worker) commitTransactions(env *environment, txs *types.TransactionsByP // Start executing the transaction env.state.Prepare(tx.Hash(), env.tcount) + var start time.Time + + log.OnDebug(func(log.Logging) { + start = time.Now() + }) + logs, err := w.commitTransaction(env, tx) + switch { case errors.Is(err, core.ErrGasLimitReached): // Pop the current out-of-gas transaction without shifting in the next from the account @@ -988,6 +1040,10 @@ func (w *worker) commitTransactions(env *environment, txs *types.TransactionsByP env.tcount++ txs.Shift() + log.OnDebug(func(lg log.Logging) { + lg("Committed new tx", "tx hash", tx.Hash(), "from", from, "to", tx.To(), "nonce", tx.Nonce(), "gas", tx.Gas(), "gasPrice", tx.GasPrice(), "value", tx.Value(), "time spent", time.Since(start)) + }) + case errors.Is(err, core.ErrTxTypeNotSupported): // Pop the unsupported transaction without shifting in the next from the account log.Trace("Skipping unsupported transaction type", "sender", from, "type", tx.Type()) @@ -1077,7 +1133,7 @@ func (w *worker) prepareWork(genParams *generateParams) (*environment, error) { } // Set baseFee and GasLimit if we are on an EIP-1559 chain if w.chainConfig.IsLondon(header.Number) { - header.BaseFee = misc.CalcBaseFee(w.chainConfig, parent.Header()) + header.BaseFee = misc.CalcBaseFeeUint(w.chainConfig, parent.Header()).ToBig() if !w.chainConfig.IsLondon(parent.Number()) { parentGasLimit := parent.GasLimit() * params.ElasticityMultiplier header.GasLimit = core.CalcGasLimit(parentGasLimit, w.config.GasCeil) @@ -1085,7 +1141,12 @@ func (w *worker) prepareWork(genParams *generateParams) (*environment, error) { } // Run the consensus preparation with the default or customized consensus engine. if err := w.engine.Prepare(w.chain, header); err != nil { - log.Error("Failed to prepare header for sealing", "err", err) + switch err.(type) { + case *bor.UnauthorizedSignerError: + log.Debug("Failed to prepare header for sealing", "err", err) + default: + log.Error("Failed to prepare header for sealing", "err", err) + } return nil, err } // Could potentially happen if starting to mine in an odd state. @@ -1117,9 +1178,75 @@ func (w *worker) prepareWork(genParams *generateParams) (*environment, error) { return env, nil } +func startProfiler(profile string, filepath string, number uint64) (func() error, error) { + var ( + buf bytes.Buffer + err error + ) + + closeFn := func() {} + + switch profile { + case "cpu": + err = pprof.StartCPUProfile(&buf) + + if err == nil { + closeFn = func() { + pprof.StopCPUProfile() + } + } + case "trace": + err = ptrace.Start(&buf) + + if err == nil { + closeFn = func() { + ptrace.Stop() + } + } + case "heap": + runtime.GC() + + err = pprof.WriteHeapProfile(&buf) + default: + log.Info("Incorrect profile name") + } + + if err != nil { + return func() error { + closeFn() + return nil + }, err + } + + closeFnNew := func() error { + var err error + + closeFn() + + if buf.Len() == 0 { + return nil + } + + f, err := os.Create(filepath + "/" + profile + "-" + fmt.Sprint(number) + ".prof") + if err != nil { + return err + } + + defer f.Close() + + _, err = f.Write(buf.Bytes()) + + return err + } + + return closeFnNew, nil +} + // fillTransactions retrieves the pending transactions from the txpool and fills them // into the given sealing block. The transaction selection and ordering strategy can // be customized with the plugin in the future. +// +//nolint:gocognit func (w *worker) fillTransactions(ctx context.Context, interrupt *int32, env *environment) { ctx, span := tracing.StartSpan(ctx, "fillTransactions") defer tracing.EndSpan(span) @@ -1134,10 +1261,76 @@ func (w *worker) fillTransactions(ctx context.Context, interrupt *int32, env *en remoteTxs map[common.Address]types.Transactions ) - tracing.Exec(ctx, "worker.SplittingTransactions", func(ctx context.Context, span trace.Span) { - pending := w.eth.TxPool().Pending(true) + // TODO: move to config or RPC + const profiling = false + + if profiling { + doneCh := make(chan struct{}) + + defer func() { + close(doneCh) + }() + + go func(number uint64) { + closeFn := func() error { + return nil + } + + for { + select { + case <-time.After(150 * time.Millisecond): + // Check if we've not crossed limit + if attempt := atomic.AddInt32(w.profileCount, 1); attempt >= 10 { + log.Info("Completed profiling", "attempt", attempt) + + return + } + + log.Info("Starting profiling in fill transactions", "number", number) + + dir, err := os.MkdirTemp("", fmt.Sprintf("bor-traces-%s-", time.Now().UTC().Format("2006-01-02-150405Z"))) + if err != nil { + log.Error("Error in profiling", "path", dir, "number", number, "err", err) + return + } + + // grab the cpu profile + closeFnInternal, err := startProfiler("cpu", dir, number) + if err != nil { + log.Error("Error in profiling", "path", dir, "number", number, "err", err) + return + } + + closeFn = func() error { + err := closeFnInternal() + + log.Info("Completed profiling", "path", dir, "number", number, "error", err) + + return nil + } + + case <-doneCh: + err := closeFn() + + if err != nil { + log.Info("closing fillTransactions", "number", number, "error", err) + } + + return + } + } + }(env.header.Number.Uint64()) + } + + tracing.Exec(ctx, "", "worker.SplittingTransactions", func(ctx context.Context, span trace.Span) { + + prePendingTime := time.Now() + + pending := w.eth.TxPool().Pending(ctx, true) remoteTxs = pending + postPendingTime := time.Now() + for _, account := range w.eth.TxPool().Locals() { if txs := remoteTxs[account]; len(txs) > 0 { delete(remoteTxs, account) @@ -1145,6 +1338,8 @@ func (w *worker) fillTransactions(ctx context.Context, interrupt *int32, env *en } } + postLocalsTime := time.Now() + localTxsCount = len(localTxs) remoteTxsCount = len(remoteTxs) @@ -1152,6 +1347,8 @@ func (w *worker) fillTransactions(ctx context.Context, interrupt *int32, env *en span, attribute.Int("len of local txs", localTxsCount), attribute.Int("len of remote txs", remoteTxsCount), + attribute.String("time taken by Pending()", fmt.Sprintf("%v", postPendingTime.Sub(prePendingTime))), + attribute.String("time taken by Locals()", fmt.Sprintf("%v", postLocalsTime.Sub(postPendingTime))), ) }) @@ -1164,8 +1361,8 @@ func (w *worker) fillTransactions(ctx context.Context, interrupt *int32, env *en if localTxsCount > 0 { var txs *types.TransactionsByPriceAndNonce - tracing.Exec(ctx, "worker.LocalTransactionsByPriceAndNonce", func(ctx context.Context, span trace.Span) { - txs = types.NewTransactionsByPriceAndNonce(env.signer, localTxs, env.header.BaseFee) + tracing.Exec(ctx, "", "worker.LocalTransactionsByPriceAndNonce", func(ctx context.Context, span trace.Span) { + txs = types.NewTransactionsByPriceAndNonce(env.signer, localTxs, cmath.FromBig(env.header.BaseFee)) tracing.SetAttributes( span, @@ -1173,7 +1370,7 @@ func (w *worker) fillTransactions(ctx context.Context, interrupt *int32, env *en ) }) - tracing.Exec(ctx, "worker.LocalCommitTransactions", func(ctx context.Context, span trace.Span) { + tracing.Exec(ctx, "", "worker.LocalCommitTransactions", func(ctx context.Context, span trace.Span) { committed = w.commitTransactions(env, txs, interrupt) }) @@ -1187,8 +1384,8 @@ func (w *worker) fillTransactions(ctx context.Context, interrupt *int32, env *en if remoteTxsCount > 0 { var txs *types.TransactionsByPriceAndNonce - tracing.Exec(ctx, "worker.RemoteTransactionsByPriceAndNonce", func(ctx context.Context, span trace.Span) { - txs = types.NewTransactionsByPriceAndNonce(env.signer, remoteTxs, env.header.BaseFee) + tracing.Exec(ctx, "", "worker.RemoteTransactionsByPriceAndNonce", func(ctx context.Context, span trace.Span) { + txs = types.NewTransactionsByPriceAndNonce(env.signer, remoteTxs, cmath.FromBig(env.header.BaseFee)) tracing.SetAttributes( span, @@ -1196,7 +1393,7 @@ func (w *worker) fillTransactions(ctx context.Context, interrupt *int32, env *en ) }) - tracing.Exec(ctx, "worker.RemoteCommitTransactions", func(ctx context.Context, span trace.Span) { + tracing.Exec(ctx, "", "worker.RemoteCommitTransactions", func(ctx context.Context, span trace.Span) { committed = w.commitTransactions(env, txs, interrupt) }) @@ -1237,7 +1434,7 @@ func (w *worker) commitWork(ctx context.Context, interrupt *int32, noempty bool, err error ) - tracing.Exec(ctx, "worker.prepareWork", func(ctx context.Context, span trace.Span) { + tracing.Exec(ctx, "", "worker.prepareWork", func(ctx context.Context, span trace.Span) { // Set the coinbase if the worker is running or it's required var coinbase common.Address if w.isRunning() { diff --git a/miner/worker_test.go b/miner/worker_test.go index 011895c854..ffd44bebfe 100644 --- a/miner/worker_test.go +++ b/miner/worker_test.go @@ -75,7 +75,7 @@ func testGenerateBlockAndImport(t *testing.T, isClique bool, isBor bool) { ethAPIMock.EXPECT().Call(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() spanner := bor.NewMockSpanner(ctrl) - spanner.EXPECT().GetCurrentValidators(gomock.Any(), gomock.Any(), gomock.Any()).Return([]*valset.Validator{ + spanner.EXPECT().GetCurrentValidatorsByHash(gomock.Any(), gomock.Any(), gomock.Any()).Return([]*valset.Validator{ { ID: 0, Address: TestBankAddress, @@ -622,7 +622,7 @@ func BenchmarkBorMining(b *testing.B) { ethAPIMock.EXPECT().Call(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() spanner := bor.NewMockSpanner(ctrl) - spanner.EXPECT().GetCurrentValidators(gomock.Any(), gomock.Any(), gomock.Any()).Return([]*valset.Validator{ + spanner.EXPECT().GetCurrentValidatorsByHash(gomock.Any(), gomock.Any(), gomock.Any()).Return([]*valset.Validator{ { ID: 0, Address: TestBankAddress, diff --git a/p2p/dnsdisc/client_test.go b/p2p/dnsdisc/client_test.go index 9320dd667a..99c3aac0ea 100644 --- a/p2p/dnsdisc/client_test.go +++ b/p2p/dnsdisc/client_test.go @@ -57,15 +57,19 @@ func TestClientSyncTree(t *testing.T) { c := NewClient(Config{Resolver: r, Logger: testlog.Logger(t, log.LvlTrace)}) stree, err := c.SyncTree("enrtree://AKPYQIUQIL7PSIACI32J7FGZW56E5FKHEFCCOFHILBIMW3M6LWXS2@n") + if err != nil { t.Fatal("sync error:", err) } + if !reflect.DeepEqual(sortByID(stree.Nodes()), sortByID(wantNodes)) { t.Errorf("wrong nodes in synced tree:\nhave %v\nwant %v", spew.Sdump(stree.Nodes()), spew.Sdump(wantNodes)) } + if !reflect.DeepEqual(stree.Links(), wantLinks) { t.Errorf("wrong links in synced tree: %v", stree.Links()) } + if stree.Seq() != wantSeq { t.Errorf("synced tree has wrong seq: %d", stree.Seq()) } @@ -295,7 +299,7 @@ func TestIteratorEmptyTree(t *testing.T) { // updateSomeNodes applies ENR updates to some of the given nodes. func updateSomeNodes(keySeed int64, nodes []*enode.Node) { - keys := testKeys(nodesSeed1, len(nodes)) + keys := testKeys(keySeed, len(nodes)) for i, n := range nodes[:len(nodes)/2] { r := n.Record() r.Set(enr.IP{127, 0, 0, 1}) @@ -384,10 +388,12 @@ func makeTestTree(domain string, nodes []*enode.Node, links []string) (*Tree, st if err != nil { panic(err) } + url, err := tree.Sign(testKey(signingKeySeed), domain) if err != nil { panic(err) } + return tree, url } diff --git a/packaging/templates/mainnet-v1/archive/config.toml b/packaging/templates/mainnet-v1/archive/config.toml index 5491c784ef..355d07051d 100644 --- a/packaging/templates/mainnet-v1/archive/config.toml +++ b/packaging/templates/mainnet-v1/archive/config.toml @@ -86,7 +86,7 @@ gcmode = "archive" # vhosts = ["*"] # corsdomain = ["*"] # [jsonrpc.timeouts] - # read = "30s" + # read = "10s" # write = "30s" # idle = "2m0s" diff --git a/packaging/templates/mainnet-v1/sentry/sentry/bor/config.toml b/packaging/templates/mainnet-v1/sentry/sentry/bor/config.toml index 90df84dc07..0299f59bdc 100644 --- a/packaging/templates/mainnet-v1/sentry/sentry/bor/config.toml +++ b/packaging/templates/mainnet-v1/sentry/sentry/bor/config.toml @@ -86,7 +86,7 @@ syncmode = "full" # vhosts = ["*"] # corsdomain = ["*"] # [jsonrpc.timeouts] - # read = "30s" + # read = "10s" # write = "30s" # idle = "2m0s" diff --git a/packaging/templates/mainnet-v1/sentry/validator/bor/config.toml b/packaging/templates/mainnet-v1/sentry/validator/bor/config.toml index 9e2d80fd2a..36b14cd263 100644 --- a/packaging/templates/mainnet-v1/sentry/validator/bor/config.toml +++ b/packaging/templates/mainnet-v1/sentry/validator/bor/config.toml @@ -88,7 +88,7 @@ syncmode = "full" # vhosts = ["*"] # corsdomain = ["*"] # [jsonrpc.timeouts] - # read = "30s" + # read = "10s" # write = "30s" # idle = "2m0s" diff --git a/packaging/templates/mainnet-v1/without-sentry/bor/config.toml b/packaging/templates/mainnet-v1/without-sentry/bor/config.toml index 1e5fd67762..cfba7fb181 100644 --- a/packaging/templates/mainnet-v1/without-sentry/bor/config.toml +++ b/packaging/templates/mainnet-v1/without-sentry/bor/config.toml @@ -88,7 +88,7 @@ syncmode = "full" # vhosts = ["*"] # corsdomain = ["*"] # [jsonrpc.timeouts] - # read = "30s" + # read = "10s" # write = "30s" # idle = "2m0s" diff --git a/packaging/templates/package_scripts/control b/packaging/templates/package_scripts/control index df0427b322..130226241b 100644 --- a/packaging/templates/package_scripts/control +++ b/packaging/templates/package_scripts/control @@ -1,5 +1,5 @@ Source: bor -Version: 0.3.4-beta3 +Version: 0.3.8-beta Section: develop Priority: standard Maintainer: Polygon diff --git a/packaging/templates/package_scripts/control.arm64 b/packaging/templates/package_scripts/control.arm64 index bcc8041a77..e8073532af 100644 --- a/packaging/templates/package_scripts/control.arm64 +++ b/packaging/templates/package_scripts/control.arm64 @@ -1,5 +1,5 @@ Source: bor -Version: 0.3.4-beta3 +Version: 0.3.8-beta Section: develop Priority: standard Maintainer: Polygon diff --git a/packaging/templates/package_scripts/control.profile.amd64 b/packaging/templates/package_scripts/control.profile.amd64 index 507d4328b2..a5b46bff79 100644 --- a/packaging/templates/package_scripts/control.profile.amd64 +++ b/packaging/templates/package_scripts/control.profile.amd64 @@ -1,5 +1,5 @@ Source: bor-profile -Version: 0.3.4-beta3 +Version: 0.3.8-beta Section: develop Priority: standard Maintainer: Polygon diff --git a/packaging/templates/package_scripts/control.profile.arm64 b/packaging/templates/package_scripts/control.profile.arm64 index 011dfa8b63..b0d94da338 100644 --- a/packaging/templates/package_scripts/control.profile.arm64 +++ b/packaging/templates/package_scripts/control.profile.arm64 @@ -1,5 +1,5 @@ Source: bor-profile -Version: 0.3.4-beta3 +Version: 0.3.8-beta Section: develop Priority: standard Maintainer: Polygon diff --git a/packaging/templates/package_scripts/control.validator b/packaging/templates/package_scripts/control.validator index 94ee786237..887713056a 100644 --- a/packaging/templates/package_scripts/control.validator +++ b/packaging/templates/package_scripts/control.validator @@ -1,5 +1,5 @@ Source: bor-profile -Version: 0.3.4-beta3 +Version: 0.3.8-beta Section: develop Priority: standard Maintainer: Polygon diff --git a/packaging/templates/package_scripts/control.validator.arm64 b/packaging/templates/package_scripts/control.validator.arm64 index 96049a56d6..f9fa7635a9 100644 --- a/packaging/templates/package_scripts/control.validator.arm64 +++ b/packaging/templates/package_scripts/control.validator.arm64 @@ -1,5 +1,5 @@ Source: bor-profile -Version: 0.3.4-beta3 +Version: 0.3.8-beta Section: develop Priority: standard Maintainer: Polygon diff --git a/packaging/templates/testnet-v4/archive/config.toml b/packaging/templates/testnet-v4/archive/config.toml index fb9ffd0a17..71d360faf6 100644 --- a/packaging/templates/testnet-v4/archive/config.toml +++ b/packaging/templates/testnet-v4/archive/config.toml @@ -86,7 +86,7 @@ gcmode = "archive" # vhosts = ["*"] # corsdomain = ["*"] # [jsonrpc.timeouts] - # read = "30s" + # read = "10s" # write = "30s" # idle = "2m0s" diff --git a/packaging/templates/testnet-v4/sentry/sentry/bor/config.toml b/packaging/templates/testnet-v4/sentry/sentry/bor/config.toml index 9884c0eccc..124b34f09c 100644 --- a/packaging/templates/testnet-v4/sentry/sentry/bor/config.toml +++ b/packaging/templates/testnet-v4/sentry/sentry/bor/config.toml @@ -86,7 +86,7 @@ syncmode = "full" # vhosts = ["*"] # corsdomain = ["*"] # [jsonrpc.timeouts] - # read = "30s" + # read = "10s" # write = "30s" # idle = "2m0s" diff --git a/packaging/templates/testnet-v4/sentry/validator/bor/config.toml b/packaging/templates/testnet-v4/sentry/validator/bor/config.toml index 49c47fedd4..bfebe422ca 100644 --- a/packaging/templates/testnet-v4/sentry/validator/bor/config.toml +++ b/packaging/templates/testnet-v4/sentry/validator/bor/config.toml @@ -88,7 +88,7 @@ syncmode = "full" # vhosts = ["*"] # corsdomain = ["*"] # [jsonrpc.timeouts] - # read = "30s" + # read = "10s" # write = "30s" # idle = "2m0s" diff --git a/packaging/templates/testnet-v4/without-sentry/bor/config.toml b/packaging/templates/testnet-v4/without-sentry/bor/config.toml index 2fb83a6ae2..2f7710d0d8 100644 --- a/packaging/templates/testnet-v4/without-sentry/bor/config.toml +++ b/packaging/templates/testnet-v4/without-sentry/bor/config.toml @@ -88,7 +88,7 @@ syncmode = "full" # vhosts = ["*"] # corsdomain = ["*"] # [jsonrpc.timeouts] - # read = "30s" + # read = "10s" # write = "30s" # idle = "2m0s" diff --git a/params/version.go b/params/version.go index 46fcbb6e1e..affdc6b5eb 100644 --- a/params/version.go +++ b/params/version.go @@ -21,10 +21,10 @@ import ( ) const ( - VersionMajor = 0 // Major version component of the current release - VersionMinor = 3 // Minor version component of the current release - VersionPatch = 4 // Patch version component of the current release - VersionMeta = "beta3" // Version metadata to append to the version string + VersionMajor = 0 // Major version component of the current release + VersionMinor = 3 // Minor version component of the current release + VersionPatch = 8 // Patch version component of the current release + VersionMeta = "beta" // Version metadata to append to the version string ) // Version holds the textual version string. diff --git a/rpc/http.go b/rpc/http.go index 18404c060a..09594d0280 100644 --- a/rpc/http.go +++ b/rpc/http.go @@ -104,7 +104,7 @@ type HTTPTimeouts struct { // DefaultHTTPTimeouts represents the default timeout values used if further // configuration is not provided. var DefaultHTTPTimeouts = HTTPTimeouts{ - ReadTimeout: 30 * time.Second, + ReadTimeout: 10 * time.Second, WriteTimeout: 30 * time.Second, IdleTimeout: 120 * time.Second, } diff --git a/tests/bor/bor_test.go b/tests/bor/bor_test.go index 2dc20a915e..e6e8188ce0 100644 --- a/tests/bor/bor_test.go +++ b/tests/bor/bor_test.go @@ -392,12 +392,18 @@ func TestInsertingSpanSizeBlocks(t *testing.T) { currentValidators := []*valset.Validator{valset.NewValidator(addr, 10)} + spanner := getMockedSpanner(t, currentValidators) + _bor.SetSpanner(spanner) + // Insert sprintSize # of blocks so that span is fetched at the start of a new sprint for i := uint64(1); i <= spanSize; i++ { block = buildNextBlock(t, _bor, chain, block, nil, init.genesis.Config.Bor, nil, currentValidators) insertNewBlock(t, chain, block) } + spanner = getMockedSpanner(t, currentSpan.ValidatorSet.Validators) + _bor.SetSpanner(spanner) + validators, err := _bor.GetCurrentValidators(context.Background(), block.Hash(), spanSize) // check validator set at the first block of new span if err != nil { t.Fatalf("%s", err) @@ -427,6 +433,9 @@ func TestFetchStateSyncEvents(t *testing.T) { currentValidators := []*valset.Validator{valset.NewValidator(addr, 10)} + spanner := getMockedSpanner(t, currentValidators) + _bor.SetSpanner(spanner) + // Insert sprintSize # of blocks so that span is fetched at the start of a new sprint for i := uint64(1); i < sprintSize; i++ { if IsSpanEnd(i) { @@ -528,6 +537,9 @@ func TestFetchStateSyncEvents_2(t *testing.T) { currentValidators = []*valset.Validator{valset.NewValidator(addr, 10)} } + spanner := getMockedSpanner(t, currentValidators) + _bor.SetSpanner(spanner) + block = buildNextBlock(t, _bor, chain, block, nil, init.genesis.Config.Bor, nil, currentValidators) insertNewBlock(t, chain, block) } @@ -554,6 +566,9 @@ func TestFetchStateSyncEvents_2(t *testing.T) { currentValidators = []*valset.Validator{valset.NewValidator(addr, 10)} } + spanner := getMockedSpanner(t, currentValidators) + _bor.SetSpanner(spanner) + block = buildNextBlock(t, _bor, chain, block, nil, init.genesis.Config.Bor, nil, res.Result.ValidatorSet.Validators) insertNewBlock(t, chain, block) } @@ -580,6 +595,8 @@ func TestOutOfTurnSigning(t *testing.T) { h.EXPECT().Close().AnyTimes() + spanner := getMockedSpanner(t, heimdallSpan.ValidatorSet.Validators) + _bor.SetSpanner(spanner) _bor.SetHeimdallClient(h) db := init.ethereum.ChainDb() @@ -1082,6 +1099,9 @@ func TestJaipurFork(t *testing.T) { res, _ := loadSpanFromFile(t) + spanner := getMockedSpanner(t, res.Result.ValidatorSet.Validators) + _bor.SetSpanner(spanner) + for i := uint64(1); i < sprintSize; i++ { block = buildNextBlock(t, _bor, chain, block, nil, init.genesis.Config.Bor, nil, res.Result.ValidatorSet.Validators) insertNewBlock(t, chain, block) diff --git a/tests/bor/helper.go b/tests/bor/helper.go index e28076a3b1..c4b45f970d 100644 --- a/tests/bor/helper.go +++ b/tests/bor/helper.go @@ -352,6 +352,17 @@ func getMockedHeimdallClient(t *testing.T, heimdallSpan *span.HeimdallSpan) (*mo return h, ctrl } +func getMockedSpanner(t *testing.T, validators []*valset.Validator) *bor.MockSpanner { + t.Helper() + + spanner := bor.NewMockSpanner(gomock.NewController(t)) + spanner.EXPECT().GetCurrentValidatorsByHash(gomock.Any(), gomock.Any(), gomock.Any()).Return(validators, nil).AnyTimes() + spanner.EXPECT().GetCurrentValidatorsByBlockNrOrHash(gomock.Any(), gomock.Any(), gomock.Any()).Return(validators, nil).AnyTimes() + spanner.EXPECT().GetCurrentSpan(gomock.Any(), gomock.Any()).Return(&span.Span{0, 0, 0}, nil).AnyTimes() + spanner.EXPECT().CommitSpan(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes() + return spanner +} + func generateFakeStateSyncEvents(sample *clerk.EventRecordWithTime, count int) []*clerk.EventRecordWithTime { events := make([]*clerk.EventRecordWithTime, count) event := *sample diff --git a/tests/init_test.go b/tests/init_test.go index 1c6841e030..5e32f20abf 100644 --- a/tests/init_test.go +++ b/tests/init_test.go @@ -141,9 +141,6 @@ func (tm *testMatcher) findSkip(name string) (reason string, skipload bool) { isWin32 := runtime.GOARCH == "386" && runtime.GOOS == "windows" for _, re := range tm.slowpat { if re.MatchString(name) { - if testing.Short() { - return "skipped in -short mode", false - } if isWin32 { return "skipped on 32bit windows", false } From 977fe0a185b3b5923186d1dfb91fbbd50a5892ca Mon Sep 17 00:00:00 2001 From: Manav Darji Date: Thu, 6 Apr 2023 18:30:09 +0530 Subject: [PATCH 55/56] core: remove duplicate tests --- core/tx_pool_test.go | 252 ------------------------------------------- 1 file changed, 252 deletions(-) diff --git a/core/tx_pool_test.go b/core/tx_pool_test.go index 319848028a..38d19f4caf 100644 --- a/core/tx_pool_test.go +++ b/core/tx_pool_test.go @@ -3257,258 +3257,6 @@ type testTx struct { const localIdx = 0 -func BenchmarkPoolAccountMultiBatchInsertNoLockRace(b *testing.B) { - // Generate a batch of transactions to enqueue into the pool - pendingAddedCh := make(chan struct{}, 1024) - - pool, localKey := setupTxPoolWithConfig(params.TestChainConfig, testTxPoolConfig, txPoolGasLimit, MakeWithPromoteTxCh(pendingAddedCh)) - defer pool.Stop() - - _ = localKey - - batches := make(types.Transactions, b.N) - - for i := 0; i < b.N; i++ { - key, _ := crypto.GenerateKey() - account := crypto.PubkeyToAddress(key.PublicKey) - tx := transaction(uint64(0), 100000, key) - - pool.currentState.AddBalance(account, big.NewInt(1000000)) - - batches[i] = tx - } - - done := make(chan struct{}) - - go func() { - t := time.NewTicker(time.Microsecond) - defer t.Stop() - - var pending map[common.Address]types.Transactions - - for range t.C { - pending = pool.Pending(context.Background(), true) - - if len(pending) >= b.N/2 { - close(done) - - return - } - } - }() - - b.ReportAllocs() - b.ResetTimer() - - for _, tx := range batches { - pool.AddRemotes([]*types.Transaction{tx}) - } - - <-done -} - -func BenchmarkPoolAccountsBatchInsert(b *testing.B) { - // Generate a batch of transactions to enqueue into the pool - pool, _ := setupTxPool() - defer pool.Stop() - - batches := make(types.Transactions, b.N) - - for i := 0; i < b.N; i++ { - key, _ := crypto.GenerateKey() - account := crypto.PubkeyToAddress(key.PublicKey) - - pool.currentState.AddBalance(account, big.NewInt(1000000)) - - tx := transaction(uint64(0), 100000, key) - - batches[i] = tx - } - - // Benchmark importing the transactions into the queue - b.ReportAllocs() - b.ResetTimer() - - for _, tx := range batches { - _ = pool.AddRemoteSync(tx) - } -} - -func BenchmarkPoolAccountsBatchInsertRace(b *testing.B) { - // Generate a batch of transactions to enqueue into the pool - pool, _ := setupTxPool() - defer pool.Stop() - - batches := make(types.Transactions, b.N) - - for i := 0; i < b.N; i++ { - key, _ := crypto.GenerateKey() - account := crypto.PubkeyToAddress(key.PublicKey) - tx := transaction(uint64(0), 100000, key) - - pool.currentState.AddBalance(account, big.NewInt(1000000)) - - batches[i] = tx - } - - done := make(chan struct{}) - - go func() { - t := time.NewTicker(time.Microsecond) - defer t.Stop() - - var pending map[common.Address]types.Transactions - - loop: - for { - select { - case <-t.C: - pending = pool.Pending(context.Background(), true) - case <-done: - break loop - } - } - - fmt.Fprint(io.Discard, pending) - }() - - b.ReportAllocs() - b.ResetTimer() - - for _, tx := range batches { - _ = pool.AddRemoteSync(tx) - } - - close(done) -} - -func BenchmarkPoolAccountsBatchInsertNoLockRace(b *testing.B) { - // Generate a batch of transactions to enqueue into the pool - pendingAddedCh := make(chan struct{}, 1024) - - pool, localKey := setupTxPoolWithConfig(params.TestChainConfig, testTxPoolConfig, txPoolGasLimit, MakeWithPromoteTxCh(pendingAddedCh)) - defer pool.Stop() - - _ = localKey - - batches := make(types.Transactions, b.N) - - for i := 0; i < b.N; i++ { - key, _ := crypto.GenerateKey() - account := crypto.PubkeyToAddress(key.PublicKey) - tx := transaction(uint64(0), 100000, key) - - pool.currentState.AddBalance(account, big.NewInt(1000000)) - - batches[i] = tx - } - - done := make(chan struct{}) - - go func() { - t := time.NewTicker(time.Microsecond) - defer t.Stop() - - var pending map[common.Address]types.Transactions - - for range t.C { - pending = pool.Pending(context.Background(), true) - - if len(pending) >= b.N/2 { - close(done) - - return - } - } - }() - - b.ReportAllocs() - b.ResetTimer() - - for _, tx := range batches { - _ = pool.AddRemote(tx) - } - - <-done -} - -func TestPoolMultiAccountBatchInsertRace(t *testing.T) { - t.Parallel() - - // Generate a batch of transactions to enqueue into the pool - pool, _ := setupTxPool() - defer pool.Stop() - - const n = 5000 - - batches := make(types.Transactions, n) - batchesSecond := make(types.Transactions, n) - - for i := 0; i < n; i++ { - batches[i] = newTxs(pool) - batchesSecond[i] = newTxs(pool) - } - - done := make(chan struct{}) - - go func() { - t := time.NewTicker(time.Microsecond) - defer t.Stop() - - var ( - pending map[common.Address]types.Transactions - total int - ) - - for range t.C { - pending = pool.Pending(context.Background(), true) - total = len(pending) - - _ = pool.Locals() - - if total >= n { - close(done) - - return - } - } - }() - - for _, tx := range batches { - pool.AddRemotesSync([]*types.Transaction{tx}) - } - - for _, tx := range batchesSecond { - pool.AddRemotes([]*types.Transaction{tx}) - } - - <-done -} - -func newTxs(pool *TxPool) *types.Transaction { - key, _ := crypto.GenerateKey() - account := crypto.PubkeyToAddress(key.PublicKey) - tx := transaction(uint64(0), 100000, key) - - pool.currentState.AddBalance(account, big.NewInt(1_000_000_000)) - - return tx -} - -type acc struct { - nonce uint64 - key *ecdsa.PrivateKey - account common.Address -} - -type testTx struct { - tx *types.Transaction - idx int - isLocal bool -} - -const localIdx = 0 - func getTransactionGen(t *rapid.T, keys []*acc, nonces []uint64, localKey *acc, gasPriceMin, gasPriceMax, gasLimitMin, gasLimitMax uint64) *testTx { idx := rapid.IntRange(0, len(keys)-1).Draw(t, "accIdx").(int) From 303eda5fdee66e67b5f8ad8a7eb9f89f2a60c6ed Mon Sep 17 00:00:00 2001 From: Manav Darji Date: Thu, 6 Apr 2023 18:30:28 +0530 Subject: [PATCH 56/56] miner: use get validators by hash in tests --- miner/worker_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/miner/worker_test.go b/miner/worker_test.go index 5ff3830773..24577a4fb2 100644 --- a/miner/worker_test.go +++ b/miner/worker_test.go @@ -163,7 +163,7 @@ func getFakeBorFromConfig(t *testing.T, chainConfig *params.ChainConfig) (consen ethAPIMock.EXPECT().Call(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() spanner := bor.NewMockSpanner(ctrl) - spanner.EXPECT().GetCurrentValidators(gomock.Any(), gomock.Any(), gomock.Any()).Return([]*valset.Validator{ + spanner.EXPECT().GetCurrentValidatorsByHash(gomock.Any(), gomock.Any(), gomock.Any()).Return([]*valset.Validator{ { ID: 0, Address: TestBankAddress,