diff --git a/.circleci/config.yml b/.circleci/config.yml deleted file mode 100644 index bd3e83c3a3..0000000000 --- a/.circleci/config.yml +++ /dev/null @@ -1,729 +0,0 @@ -# Disclaimer: -# * Unless otherwise specified, assume `resource_class` and `parallelism` values are cherry-picked values that provided a reasonable enough build-duration-to-cost tradeoff at the time of choosing. -# * There's too many variables (architecture types, CircleCI concurrency limitations, parallel pipeline runs, source code changes) to feel confident we've found a best-fit configuration. - -version: 2.1 - -orbs: - win: circleci/windows@5.0.0 - go: circleci/go@1.7.3 - slack: circleci/slack@4.12.5 - -parameters: - ubuntu_image: - type: string - default: "ubuntu-2004:2023.04.2" - build_dir: - type: string - default: "/opt/cibuild" - result_path: - type: string - default: "/tmp/build_test_results_<< pipeline.id >>" - valid_nightly_branch: - type: string - default: /hotfix\/.*/ - # The following is intentional - hardcoding a token for public repos - # is recommended here to allow fork access - codecov: - type: string - default: "8b4a1f91-f154-4c26-b84c-c9aaa90159c6" - -executors: - amd64_medium: - machine: - image: << pipeline.parameters.ubuntu_image >> - resource_class: medium - amd64_large: - machine: - image: << pipeline.parameters.ubuntu_image >> - resource_class: large - arm64_medium: - machine: - image: << pipeline.parameters.ubuntu_image >> - resource_class: arm.medium - arm64_large: - machine: - image: << pipeline.parameters.ubuntu_image >> - resource_class: arm.large - mac_arm64_medium: - macos: - xcode: 14.3.1 - resource_class: macos.m1.medium.gen1 - mac_arm64_large: - macos: - xcode: 14.3.1 - resource_class: macos.m1.large.gen1 - -slack-fail-stop-step: &slack-fail-post-step - post-steps: - - slack/notify: - branch_pattern: "master,rel/beta,rel/nightly,rel/stable" - event: fail - template: basic_fail_1 - -# ===== Workflow Definitions ===== -workflows: - version: 2 - nightly_build_and_test: - jobs: - - build_nightly: - name: << matrix.platform >>_build_nightly - matrix: &matrix-nightly - parameters: - platform: ["amd64", "arm64", "mac_arm64"] - filters: &filters-nightly - branches: - only: - - /rel\/.*/ - - << pipeline.parameters.valid_nightly_branch >> - context: slack-secrets - <<: *slack-fail-post-step - - - test_nightly: - name: << matrix.platform >>_test_nightly - matrix: - <<: *matrix-nightly - requires: - - << matrix.platform >>_build_nightly - context: slack-secrets - <<: *slack-fail-post-step - - - integration_nightly: - name: << matrix.platform >>_integration_nightly - matrix: - <<: *matrix-nightly - requires: - - << matrix.platform >>_build_nightly - context: slack-secrets - <<: *slack-fail-post-step - - - e2e_expect_nightly: - name: << matrix.platform >>_e2e_expect_nightly - matrix: - <<: *matrix-nightly - requires: - - << matrix.platform >>_build_nightly - context: slack-secrets - <<: *slack-fail-post-step - - - e2e_subs_nightly: - name: << matrix.platform >>_e2e_subs_nightly - matrix: - <<: *matrix-nightly - requires: - - << matrix.platform >>_build_nightly - context: - - slack-secrets - - aws-secrets - <<: *slack-fail-post-step - - - tests_verification_job_nightly: - name: << matrix.platform >>_<< matrix.job_type >>_verification - matrix: - parameters: - platform: ["amd64", "arm64", "mac_arm64"] - job_type: ["test_nightly", "integration_nightly", "e2e_expect_nightly"] - requires: - - << matrix.platform >>_<< matrix.job_type >> - context: slack-secrets - <<: *slack-fail-post-step - - - upload_binaries: - name: << matrix.platform >>_upload_binaries - matrix: - <<: *matrix-nightly - requires: - - << matrix.platform >>_test_nightly_verification - - << matrix.platform >>_integration_nightly_verification - - << matrix.platform >>_e2e_expect_nightly_verification - - << matrix.platform >>_e2e_subs_nightly - context: - - slack-secrets - - aws-secrets - <<: *slack-fail-post-step - - "circleci_build_and_test": - jobs: - - test: - name: << matrix.platform >>_test - matrix: &matrix-default - parameters: - platform: ["amd64"] - filters: &filters-default - branches: - ignore: - - /rel\/.*/ - - << pipeline.parameters.valid_nightly_branch >> - context: slack-secrets - <<: *slack-fail-post-step - - - integration: - name: << matrix.platform >>_integration - matrix: - <<: *matrix-default - filters: - <<: *filters-default - context: slack-secrets - <<: *slack-fail-post-step - - - e2e_expect: - name: << matrix.platform >>_e2e_expect - matrix: - <<: *matrix-default - filters: - <<: *filters-default - context: slack-secrets - <<: *slack-fail-post-step - - - e2e_subs: - name: << matrix.platform >>_e2e_subs - matrix: - <<: *matrix-default - filters: - <<: *filters-default - context: slack-secrets - <<: *slack-fail-post-step - - - tests_verification_job: - name: << matrix.platform >>_<< matrix.job_type >>_verification - matrix: - parameters: - platform: ["amd64"] - job_type: ["test", "integration", "e2e_expect"] - requires: - - << matrix.platform >>_<< matrix.job_type >> - context: slack-secrets - <<: *slack-fail-post-step - -# ===== Job Definitions ===== -jobs: - build_nightly: - description: "Persists build artifacts to workspace in order to support `upload_binaries`." - parameters: - platform: - type: string - build_dir: - type: string - default: << pipeline.parameters.build_dir >> - executor: << parameters.platform >>_medium - working_directory: << pipeline.parameters.build_dir >>/project - steps: - - generic_build - - persist_to_workspace: - root: << parameters.build_dir >> - paths: - - project - - go - - gimme - - .gimme - - test: - parameters: - platform: - type: string - executor: << parameters.platform >>_medium - working_directory: << pipeline.parameters.build_dir >>/project - parallelism: 32 - environment: - CODECOV_TOKEN: << pipeline.parameters.codecov >> - steps: - - generic_build - - generic_test: - platform: << parameters.platform >> - result_subdir: << parameters.platform >>_test - short_test_flag: "-short" - - upload_coverage - - test_nightly: - parameters: - platform: - type: string - executor: << parameters.platform >>_large - working_directory: << pipeline.parameters.build_dir >>/project - parallelism: 4 - environment: - CODECOV_TOKEN: << pipeline.parameters.codecov >> - steps: - - generic_build - - generic_test: - platform: << parameters.platform >> - result_subdir: << parameters.platform >>_test_nightly - no_output_timeout: 45m - - upload_coverage - - integration: - parameters: - platform: - type: string - executor: << parameters.platform >>_large - working_directory: << pipeline.parameters.build_dir >>/project - parallelism: 16 - environment: - E2E_TEST_FILTER: "GO" - steps: - - generic_build - - generic_integration: - platform: << parameters.platform >> - result_subdir: << parameters.platform >>_integration - short_test_flag: "-short" - - integration_nightly: - parameters: - platform: - type: string - executor: << parameters.platform >>_large - working_directory: << pipeline.parameters.build_dir >>/project - parallelism: 4 - environment: - E2E_TEST_FILTER: "GO" - steps: - - generic_build - - generic_integration: - platform: << parameters.platform >> - result_subdir: << parameters.platform >>_integration_nightly - no_output_timeout: 45m - - e2e_expect: - parameters: - platform: - type: string - executor: << parameters.platform >>_large - working_directory: << pipeline.parameters.build_dir >>/project - parallelism: 10 - environment: - E2E_TEST_FILTER: "EXPECT" - steps: - - generic_build - - generic_integration: - platform: << parameters.platform >> - result_subdir: << parameters.platform >>_e2e_expect - short_test_flag: "-short" - - e2e_expect_nightly: - parameters: - platform: - type: string - executor: << parameters.platform >>_large - working_directory: << pipeline.parameters.build_dir >>/project - parallelism: 2 - environment: - E2E_TEST_FILTER: "EXPECT" - steps: - - generic_build - - generic_integration: - platform: << parameters.platform >> - result_subdir: << parameters.platform>>_e2e_expect_nightly - no_output_timeout: 45m - - e2e_subs: - parameters: - platform: - type: string - executor: << parameters.platform >>_large - working_directory: << pipeline.parameters.build_dir >>/project - environment: - E2E_TEST_FILTER: "SCRIPTS" - steps: - - generic_build - - generic_integration: - platform: << parameters.platform >> - result_subdir: << parameters.platform >>_e2e_subs - short_test_flag: "-short" - - e2e_subs_nightly: - parameters: - platform: - type: string - executor: << parameters.platform >>_large - working_directory: << pipeline.parameters.build_dir >>/project - environment: - E2E_TEST_FILTER: "SCRIPTS" - CI_PLATFORM: << parameters.platform >> - # This platform is arbitrary, basically we just want to keep temps for - # one of the platforms in the matrix. - CI_KEEP_TEMP_PLATFORM: "amd64" - steps: - - generic_build - - generic_integration: - platform: << parameters.platform >> - result_subdir: << parameters.platform >>_e2e_subs_nightly - no_output_timeout: 45m - - windows_x64_build: - executor: - name: win/default - size: large - steps: - - checkout - - prepare_windows - - run: - no_output_timeout: 45m - command: | - # export PATH=$(echo "$PATH" | sed -e 's|:/home/circleci/\.go_workspace/bin||g' | sed -e 's|:/usr/local/go/bin||g') - export GOPATH="/home/circleci/go" - export ALGORAND_DEADLOCK=enable - export SKIP_GO_INSTALLATION=True - export PATH=/mingw64/bin:/C/tools/msys64/mingw64/bin:/usr/bin:$PATH - export MAKE=mingw32-make - $msys2 scripts/travis/build_test.sh - shell: bash.exe - - tests_verification_job: - docker: - - image: python:3.9.6-alpine - resource_class: small - working_directory: << pipeline.parameters.build_dir >>/project - parameters: - platform: - type: string - job_type: - type: string - steps: - - checkout - - tests_verification_command: - result_subdir: << parameters.platform >>_<< parameters.job_type >> - - tests_verification_job_nightly: - docker: - - image: python:3.9.6-alpine - resource_class: small - working_directory: << pipeline.parameters.build_dir >>/project - parameters: - platform: - type: string - job_type: - type: string - steps: - - checkout - - tests_verification_command: - result_subdir: << parameters.platform >>_<< parameters.job_type >> - - upload_binaries: - working_directory: << pipeline.parameters.build_dir >>/project - parameters: - platform: - type: string - executor: << parameters.platform >>_medium - steps: - - prepare_build_dir - - prepare_go - - upload_binaries_command: - platform: << parameters.platform >> - -# ===== Command Definitions ===== -commands: - prepare_go: - description: Clean out existing Go so we can use our preferred version - steps: - - run: | - sudo rm -rf ${HOME}/.go_workspace /usr/local/go - - prepare_build_dir: - description: Set up build directory - parameters: - build_dir: - type: string - default: << pipeline.parameters.build_dir >> - steps: - - run: - working_directory: /tmp - command: | - sudo rm -rf ${HOME}/node_pkg/* - sudo rm -rf << parameters.build_dir >> - sudo mkdir -p << parameters.build_dir >> - sudo chown -R $USER:$GROUP << parameters.build_dir >> - - prepare_windows: - description: Prepare windows image - steps: - - run: - name: install deps - shell: bash.exe - command: | - choco install -y msys2 pacman make wget --force - choco install -y golang --version=$(./scripts/get_golang_version.sh) --force - choco install -y python3 --version=3.7.3 --force - export msys2='cmd //C RefreshEnv.cmd ' - export msys2+='& set MSYS=winsymlinks:nativestrict ' - export msys2+='& C:\\tools\\msys64\\msys2_shell.cmd -defterm -no-start' - export mingw64="$msys2 -mingw64 -full-path -here -c "\"\$@"\" --" - export msys2+=" -msys2 -c "\"\$@"\" --" - $msys2 pacman --sync --noconfirm --needed mingw-w64-x86_64-toolchain mingw-w64-x86_64-libtool unzip autoconf automake - - generic_build: - description: > - Run basic build. - - If command execution time increases _appreciably_, revisit CI topology: - * Historically, the command executes _quickly_ (< 3m with resource class >= medium). - * Consequently, it's faster to embed the command in a combined build + test workflow rather than independent build and test workflows. - parameters: - build_dir: - type: string - default: << pipeline.parameters.build_dir >> - steps: - - prepare_build_dir - - checkout - - prepare_go - - restore_libsodium - - restore_go_caches - - run: - name: scripts/travis/build.sh --make_debug - command: | - export PATH=$(echo "$PATH" | sed -e "s|:${HOME}/\.go_workspace/bin||g" | sed -e 's|:/usr/local/go/bin||g') - export GOPATH="<< parameters.build_dir >>/go" - export ALGORAND_DEADLOCK=enable - export GIMME_INSTALL_DIR=<< parameters.build_dir >> - export GIMME_ENV_PREFIX=<< parameters.build_dir >>/.gimme/envs - export GIMME_VERSION_PREFIX=<< parameters.build_dir >>/.gimme/versions - scripts/travis/build.sh --make_debug - - cache_libsodium - - save_go_caches - - save_go_caches: - description: Cache Go source and build caches - parameters: - build_dir: - type: string - default: << pipeline.parameters.build_dir >> - steps: - - save_cache: - name: Saving Go mod source cache - key: go-mod-v5-{{ .Branch }}-{{ checksum "go.sum" }} - paths: - - << parameters.build_dir >>/go/pkg/mod - - save_cache: - name: Saving Go build cache - key: go-cache-v5-{{ arch }}-{{ .Branch }}-{{ checksum "go.sum" }} - paths: - - tmp/go-cache - - restore_go_caches: - description: Restore Go source and build caches - steps: - - restore_cache: - name: Restoring Go mod source cache - keys: - - go-mod-v5-{{ .Branch }}-{{ checksum "go.sum" }} - - go-mod-v5-{{ .Branch }}- - - go-mod-v5-master- - - restore_cache: - name: Restoring Go build cache - keys: - - go-cache-v5-{{ arch }}-{{ .Branch }}-{{ checksum "go.sum" }} - - go-cache-v5-{{ arch }}-{{ .Branch }}- - - go-cache-v5-{{ arch }}-master- - - cache_libsodium: - description: Cache libsodium for build - steps: - - run: - name: Get libsodium md5 - command: | - mkdir -p tmp - find crypto/libsodium-fork -type f -exec openssl md5 "{}" + > tmp/libsodium.md5 - - save_cache: - name: Save cached libsodium build - key: 'libsodium-fork-v4-{{ arch }}-{{ checksum "tmp/libsodium.md5" }}' - paths: - - crypto/libs - - restore_libsodium: - description: Restore libsodium for build - steps: - - run: - name: Get libsodium md5 - command: | - mkdir -p tmp - find crypto/libsodium-fork -type f -exec openssl md5 "{}" + > tmp/libsodium.md5 - - restore_cache: - name: Restore cached libsodium build - keys: - - 'libsodium-fork-v4-{{ arch }}-{{ checksum "tmp/libsodium.md5" }}' - - generic_test: - description: Run build tests from build workspace, for re-use by diferent architectures - parameters: - platform: - type: string - build_dir: - type: string - default: << pipeline.parameters.build_dir >> - result_subdir: - type: string - no_output_timeout: - type: string - default: 30m - short_test_flag: - type: string - default: "" - result_path: - type: string - default: << pipeline.parameters.result_path >> - steps: - - run: | - mkdir -p << parameters.result_path >>/<< parameters.result_subdir >>/${CIRCLE_NODE_INDEX} - touch << parameters.result_path >>/<< parameters.result_subdir >>/${CIRCLE_NODE_INDEX}/results.xml - touch << parameters.result_path >>/<< parameters.result_subdir >>/${CIRCLE_NODE_INDEX}/testresults.json - - run: - name: Run build tests - no_output_timeout: << parameters.no_output_timeout >> - command: | - set -e - set -x - export PATH=$(echo "$PATH" | sed -e "s|:${HOME}/\.go_workspace/bin||g" | sed -e 's|:/usr/local/go/bin||g') - export KMD_NOUSB=True - export GOPATH="<< parameters.build_dir >>/go" - export PATH="${PATH}:${GOPATH}/bin" - export ALGORAND_DEADLOCK=enable - export GIMME_ENV_PREFIX=<< parameters.build_dir >>/.gimme/envs - export GIMME_VERSION_PREFIX=<< parameters.build_dir >>/.gimme/versions - GOLANG_VERSION=$(./scripts/get_golang_version.sh) - eval "$(<< parameters.build_dir >>/gimme "${GOLANG_VERSION}")" - scripts/configure_dev.sh - scripts/buildtools/install_buildtools.sh -o "gotest.tools/gotestsum" - PACKAGES="$(go list ./... | grep -v /go-algorand/test/)" - export PACKAGE_NAMES=$(echo $PACKAGES | tr -d '\n') - export PARTITION_TOTAL=${CIRCLE_NODE_TOTAL} - export PARTITION_ID=${CIRCLE_NODE_INDEX} - gotestsum --format standard-verbose --junitfile << parameters.result_path >>/<< parameters.result_subdir >>/${CIRCLE_NODE_INDEX}/results.xml --jsonfile << parameters.result_path >>/<< parameters.result_subdir >>/${CIRCLE_NODE_INDEX}/testresults.json -- --tags "sqlite_unlock_notify sqlite_omit_load_extension" << parameters.short_test_flag >> -race -timeout 1h -coverprofile=coverage.txt -covermode=atomic -p 1 $PACKAGE_NAMES - - store_artifacts: - path: << parameters.result_path >> - destination: test-results - - store_test_results: - path: << parameters.result_path >> - - persist_to_workspace: - root: << parameters.result_path >> - paths: - - << parameters.result_subdir >> - - upload_coverage: - description: Collect coverage reports and upload them - steps: - - run: - name: Upload Coverage Reports - no_output_timeout: 10m - command: | - scripts/travis/upload_coverage.sh || true - - generic_integration: - description: Run integration tests from build workspace, for re-use by diferent architectures - parameters: - platform: - type: string - build_dir: - type: string - default: << pipeline.parameters.build_dir >> - result_subdir: - type: string - no_output_timeout: - type: string - default: 30m - short_test_flag: - type: string - default: "" - result_path: - type: string - default: << pipeline.parameters.result_path >> - steps: - - run: | - mkdir -p << parameters.result_path >>/<< parameters.result_subdir >>/${CIRCLE_NODE_INDEX} - touch << parameters.result_path >>/<< parameters.result_subdir >>/${CIRCLE_NODE_INDEX}/results.xml - touch << parameters.result_path >>/<< parameters.result_subdir >>/${CIRCLE_NODE_INDEX}/testresults.json - - run: - name: Run integration tests - no_output_timeout: << parameters.no_output_timeout >> - command: | - set -x - export CI_E2E_FILENAME="${CIRCLE_BRANCH/\//-}" - export PATH=$(echo "$PATH" | sed -e "s|:${HOME}/\.go_workspace/bin||g" | sed -e 's|:/usr/local/go/bin||g') - export KMD_NOUSB=True - export GOPATH="<< parameters.build_dir >>/go" - export PATH="${PATH}:${GOPATH}/bin" - export ALGORAND_DEADLOCK=enable - export BUILD_TYPE=integration - export GIMME_ENV_PREFIX=<< parameters.build_dir >>/.gimme/envs - export GIMME_VERSION_PREFIX=<< parameters.build_dir >>/.gimme/versions - GOLANG_VERSION=$(./scripts/get_golang_version.sh) - eval "$(<< parameters.build_dir >>/gimme "${GOLANG_VERSION}")" - scripts/configure_dev.sh - scripts/buildtools/install_buildtools.sh -o "gotest.tools/gotestsum" - export ALGOTEST=1 - export SHORTTEST=<< parameters.short_test_flag >> - export TEST_RESULTS=<< parameters.result_path >>/<< parameters.result_subdir >>/${CIRCLE_NODE_INDEX} - export PARTITION_TOTAL=${CIRCLE_NODE_TOTAL} - export PARTITION_ID=${CIRCLE_NODE_INDEX} - export PARALLEL_FLAG="-p 1" - test/scripts/run_integration_tests.sh - - - store_artifacts: - path: << parameters.result_path >> - destination: test-results - - store_test_results: - path: << parameters.result_path >> - - persist_to_workspace: - root: << parameters.result_path >> - paths: - - << parameters.result_subdir >> - - tests_verification_command: - description: Check if all tests were run at least once and only once across all parallel runs - parameters: - result_path: - type: string - default: << pipeline.parameters.result_path >> - result_subdir: - type: string - steps: - - attach_workspace: - at: << parameters.result_path >> - - run: - name: Check if all tests were run - # Add to --ignored-tests when a test should _not_ be considered. - # * For example, E2E expect test runners (e.g. `TestAlgodWithExpect`) - # produce partitioned subtests. - # * The parent tests are deliberately _not_ partitioned. By ignoring - # these tests, `check_tests.py` won't provide conflicting advice to - # partition the parent tests. - command: | - cat << parameters.result_path >>/<< parameters.result_subdir >>/**/testresults.json > << parameters.result_path >>/<< parameters.result_subdir >>/combined_testresults.json - python3 scripts/buildtools/check_tests.py \ - --tests-results-filepath << parameters.result_path >>/<< parameters.result_subdir >>/combined_testresults.json \ - --ignored-tests \ - TestAlgodWithExpect \ - TestAlgohWithExpect \ - TestGoalWithExpect \ - TestTealdbgWithExpect - - upload_binaries_command: - description: save build artifacts for potential deployments - parameters: - platform: - type: string - build_dir: - type: string - default: << pipeline.parameters.build_dir >> - steps: - - attach_workspace: - at: << parameters.build_dir >> - - run: - name: Upload Binaries << parameters.platform >> - command: | - if [ "${CIRCLE_BRANCH}" = "rel/nightly" ] - then - export NIGHTLY_BUILD="true" - fi - export PATH=$(echo "$PATH" | sed -e "s|:${HOME}/\.go_workspace/bin||g" | sed -e 's|:/usr/local/go/bin||g') - export GOPATH="<< parameters.build_dir >>/go" - export TRAVIS_BRANCH=${CIRCLE_BRANCH} - scripts/travis/deploy_packages.sh - no_output_timeout: 20m - - when: - condition: - equal: [ "amd64", << parameters.platform >> ] - steps: - - run: - name: test_release.sh - command: | - export TRAVIS_BRANCH=${CIRCLE_BRANCH} - scripts/travis/test_release.sh diff --git a/.github/workflows/ci-nightly.yml b/.github/workflows/ci-nightly.yml new file mode 100644 index 0000000000..299c0b29ee --- /dev/null +++ b/.github/workflows/ci-nightly.yml @@ -0,0 +1,494 @@ +name: Nightly Tests +on: + push: + branches: + - master + - 'rel/**' + workflow_dispatch: + inputs: + branch: + description: 'Branch to run tests on' + required: true + default: 'master' + type: string + +env: + CODECOV_TOKEN: "8b4a1f91-f154-4c26-b84c-c9aaa90159c6" # Same public token from CircleCI config + ALGORAND_DEADLOCK: enable + KMD_NOUSB: True + BUILD_TYPE: integration + ALGOTEST: 1 + SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }} + +concurrency: + group: nightly-${{ github.ref }} + cancel-in-progress: true + +permissions: + id-token: write + contents: read + pull-requests: read + +jobs: + build: + strategy: + matrix: + platform: ["ubuntu-24.04", "ubuntu-24.04-arm", "macos-14"] + runs-on: ${{ matrix.platform }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ github.event.inputs.branch || github.ref }} + fetch-depth: 0 + - name: Get Go version + id: go_version + run: echo "GO_VERSION=$(./scripts/get_golang_version.sh)" >> $GITHUB_ENV + - name: Set up Go + uses: actions/setup-go@v5 + with: + go-version: ${{ env.GO_VERSION }} + cache: true + - name: Cache libsodium + uses: actions/cache@v4 + with: + path: crypto/libs + key: libsodium-${{ matrix.platform }}-${{ hashFiles('crypto/libsodium-fork/**') }} + - name: Build + run: | + scripts/travis/build.sh --make_debug + - name: Create workspace archive + run: | + tar -czf /tmp/workspace-${{ matrix.platform }}.tar.gz . + shell: bash + - name: Upload workspace archive + uses: actions/upload-artifact@v4 + with: + name: workspace-${{ matrix.platform }}-${{ github.run_id }} + path: /tmp/workspace-${{ matrix.platform }}.tar.gz + retention-days: 1 + - name: Notify Slack on failure + if: failure() && env.SLACK_WEBHOOK != '' + uses: slackapi/slack-github-action@v2.1.0 + with: + webhook: ${{ secrets.SLACK_WEBHOOK }} + webhook-type: webhook-trigger + payload: | + { + "text": "🚨 Build Failure Alert", + "blocks": [ + { + "type": "section", + "text": { + "type": "mrkdwn", + "text": "*Build Failure in Nightly Build*\n\n• Job Type: `${{ github.job }}`\n• Platform: `${{ matrix.platform }}`\n• Run URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}" + } + } + ] + } + + test_nightly: + needs: [build] + strategy: + fail-fast: false + matrix: + platform: ["ubuntu-24.04", "ubuntu-24.04-arm", "macos-14"] + partition_id: [0, 1] # set PARTITION_TOTAL below to match + runs-on: ${{ matrix.platform }} + env: + PARTITION_ID: ${{ matrix.partition_id }} + PARTITION_TOTAL: 2 + CIRCLECI: true + steps: + - name: Download workspace archive + uses: actions/download-artifact@v4 + with: + name: workspace-${{ matrix.platform }}-${{ github.run_id }} + path: /tmp/ + - name: Extract workspace archive + run: | + tar -xzf /tmp/workspace-${{ matrix.platform }}.tar.gz + rm -f /tmp/workspace-${{ matrix.platform }}.tar.gz + shell: bash + - name: Get Go version + id: go_version + run: echo "GO_VERSION=$(./scripts/get_golang_version.sh)" >> $GITHUB_ENV + - name: Set up Go + uses: actions/setup-go@v5 + with: + go-version: ${{ env.GO_VERSION }} + cache: true + - name: Run tests + run: | + ./scripts/configure_dev.sh + ./scripts/buildtools/install_buildtools.sh -o "gotest.tools/gotestsum" + PACKAGES="$(go list ./... | grep -v /go-algorand/test/)" + export PACKAGE_NAMES=$(echo $PACKAGES | tr -d '\n') + mkdir -p test_results/${{ matrix.platform }}_test_nightly/${PARTITION_ID} + gotestsum --format standard-verbose \ + --junitfile ~/test_results/${{ matrix.platform }}_test_nightly/${PARTITION_ID}/results.xml \ + --jsonfile ~/test_results/${{ matrix.platform }}_test_nightly/${PARTITION_ID}/testresults.json \ + -- --tags "sqlite_unlock_notify sqlite_omit_load_extension" \ + -race -timeout 1h -coverprofile=coverage.txt -covermode=atomic -p 1 \ + $PACKAGE_NAMES + - name: Notify Slack on failure + if: failure() && env.SLACK_WEBHOOK != '' + uses: slackapi/slack-github-action@v2.1.0 + with: + webhook: ${{ secrets.SLACK_WEBHOOK }} + webhook-type: webhook-trigger + payload: | + { + "text": "🚨 Test Failure Alert", + "blocks": [ + { + "type": "section", + "text": { + "type": "mrkdwn", + "text": "*Test Failure in Nightly Build*\n\n• Job Type: `${{ github.job }}`\n• Platform: `${{ matrix.platform }}`\n• Partition: `${{ matrix.partition_id }}` of ${{ env.PARTITION_TOTAL }}\n• Failed Step: `${{ steps.run_tests.name }}`\n• Run URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}" + } + } + ] + } + - name: Upload test artifacts to GitHub + uses: actions/upload-artifact@v4 + with: + name: test-results-${{ matrix.platform }}-${{ github.run_id }}-${{ matrix.partition_id }} + path: ~/test_results + retention-days: 7 + - name: Upload coverage + # Only upload coverage from ubuntu-24.04 platform + if: matrix.platform == 'ubuntu-24.04' && ${{ !cancelled() }} + uses: codecov/codecov-action@v4 + env: + GITHUB_ACTIONS: True + CIRCLECI: "" + with: + token: ${{ env.CODECOV_TOKEN }} + file: ./coverage.txt + fail_ci_if_error: false + - name: Upload test results to Codecov + if: ${{ !cancelled() }} + uses: codecov/test-results-action@v1 + with: + file: ${{ matrix.platform == 'macos-14' && '/Users/runner' || '/home/runner' }}/test_results/${{ matrix.platform }}_test_nightly/${{ matrix.partition_id }}/results.xml + token: ${{ env.CODECOV_TOKEN }} + fail_ci_if_error: false + + integration_nightly: + needs: [build] + strategy: + fail-fast: false + matrix: + platform: ["ubuntu-24.04", "ubuntu-24.04-arm", "macos-14"] + partition_id: [0, 1] # set PARTITION_TOTAL below to match + runs-on: ${{ matrix.platform }} + env: + CIRCLECI: true + PARTITION_ID: ${{ matrix.partition_id }} + PARTITION_TOTAL: 2 + E2E_TEST_FILTER: GO + PARALLEL_FLAG: "-p 4" + steps: + - name: Download workspace archive + uses: actions/download-artifact@v4 + with: + name: workspace-${{ matrix.platform }}-${{ github.run_id }} + path: /tmp/ + - name: Extract workspace archive + run: | + tar -xzf /tmp/workspace-${{ matrix.platform }}.tar.gz + rm -f /tmp/workspace-${{ matrix.platform }}.tar.gz + shell: bash + - name: Get Go version + id: go_version + run: echo "GO_VERSION=$(./scripts/get_golang_version.sh)" >> $GITHUB_ENV + - name: Set up Go + uses: actions/setup-go@v5 + with: + go-version: ${{ env.GO_VERSION }} + cache: true + - name: Run integration tests + run: | + ./scripts/configure_dev.sh + ./scripts/buildtools/install_buildtools.sh -o "gotest.tools/gotestsum" + mkdir -p ~/test_results/${{ matrix.platform }}_integration_nightly/${PARTITION_ID} + TEST_RESULTS=~/test_results/${{ matrix.platform }}_integration_nightly/${PARTITION_ID} \ + test/scripts/run_integration_tests.sh + - name: Notify Slack on failure + if: failure() && env.SLACK_WEBHOOK != '' + uses: slackapi/slack-github-action@v2.1.0 + with: + webhook: ${{ secrets.SLACK_WEBHOOK }} + webhook-type: webhook-trigger + payload: | + { + "text": "🚨 Integration Test Failure Alert", + "blocks": [ + { + "type": "section", + "text": { + "type": "mrkdwn", + "text": "*Integration Test Failure in Nightly Build*\n\n• Job Type: `${{ github.job }}`\n• Platform: `${{ matrix.platform }}`\n• Partition: `${{ matrix.partition_id }}` of ${{ env.PARTITION_TOTAL }}\n• Failed Step: `${{ steps.run_integration_tests.name }}`\n• Run URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}" + } + } + ] + } + - name: Upload test artifacts to GitHub + uses: actions/upload-artifact@v4 + with: + name: integration-results-${{ matrix.platform }}-${{ github.run_id }}-${{ matrix.partition_id }} + path: ~/test_results + retention-days: 7 + - name: Upload test results to Codecov + if: ${{ !cancelled() }} + uses: codecov/test-results-action@v1 + with: + file: ${{ matrix.platform == 'macos-14' && '/Users/runner' || '/home/runner' }}/test_results/${{ matrix.platform }}_integration_nightly/${{ matrix.partition_id }}/results.xml + token: ${{ env.CODECOV_TOKEN }} + fail_ci_if_error: false + + e2e_expect_nightly: + needs: [build] + strategy: + fail-fast: false + matrix: + platform: ["ubuntu-24.04", "ubuntu-24.04-arm", "macos-14"] + partition_id: [0, 1] + runs-on: ${{ matrix.platform }} + env: + CIRCLECI: true + PARTITION_ID: ${{ matrix.partition_id }} + PARTITION_TOTAL: 2 + E2E_TEST_FILTER: EXPECT + PARALLEL_FLAG: "-p 4" + steps: + - name: Download workspace archive + uses: actions/download-artifact@v4 + with: + name: workspace-${{ matrix.platform }}-${{ github.run_id }} + path: /tmp/ + - name: Extract workspace archive + run: | + tar -xzf /tmp/workspace-${{ matrix.platform }}.tar.gz + rm -f /tmp/workspace-${{ matrix.platform }}.tar.gz + shell: bash + - name: Get Go version + id: go_version + run: echo "GO_VERSION=$(./scripts/get_golang_version.sh)" >> $GITHUB_ENV + - name: Set up Go + uses: actions/setup-go@v5 + with: + go-version: ${{ env.GO_VERSION }} + cache: true + - name: Run E2E expect tests + run: | + scripts/configure_dev.sh + scripts/buildtools/install_buildtools.sh -o "gotest.tools/gotestsum" + mkdir -p ~/test_results/${{ matrix.platform }}_e2e_expect_nightly/${PARTITION_ID} + TEST_RESULTS=~/test_results/${{ matrix.platform }}_e2e_expect_nightly/${PARTITION_ID} \ + test/scripts/run_integration_tests.sh + - name: Notify Slack on failure + if: failure() && env.SLACK_WEBHOOK != '' + uses: slackapi/slack-github-action@v2.1.0 + with: + webhook: ${{ secrets.SLACK_WEBHOOK }} + webhook-type: webhook-trigger + payload: | + { + "text": "🚨 Expect Test Failure Alert", + "blocks": [ + { + "type": "section", + "text": { + "type": "mrkdwn", + "text": "*Expect Test Failure in Nightly Build*\n\n• Job Type: `${{ github.job }}`\n• Platform: `${{ matrix.platform }}`\n• Partition: `${{ matrix.partition_id }}` of ${{ env.PARTITION_TOTAL }}\n• Failed Step: `${{ steps.run_e2e_expect_tests.name }}`\n• Run URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}" + } + } + ] + } + - name: Upload test artifacts to GitHub + uses: actions/upload-artifact@v4 + with: + name: e2e_expect-results-${{ matrix.platform }}-${{ github.run_id }}-${{ matrix.partition_id }} + path: ~/test_results + retention-days: 7 + - name: Upload test results to Codecov + if: ${{ !cancelled() }} + uses: codecov/test-results-action@v1 + with: + file: ${{ matrix.platform == 'macos-14' && '/Users/runner' || '/home/runner' }}/test_results/${{ matrix.platform }}_e2e_expect_nightly/${{ matrix.partition_id }}/results.xml + token: ${{ env.CODECOV_TOKEN }} + fail_ci_if_error: false + + e2e_subs_nightly: + needs: [build] + strategy: + fail-fast: false + matrix: + platform: ["ubuntu-24.04", "ubuntu-24.04-arm", "macos-14"] + runs-on: ${{ matrix.platform }} + env: + E2E_TEST_FILTER: SCRIPTS + CI_PLATFORM: ${{ matrix.platform }} + CI_KEEP_TEMP_PLATFORM: "ubuntu-24.04" + S3_TESTDATA: ${{ secrets.S3_TESTDATA }} + steps: + - name: Download workspace archive + uses: actions/download-artifact@v4 + with: + name: workspace-${{ matrix.platform }}-${{ github.run_id }} + path: /tmp/ + - name: Extract workspace archive + run: | + tar -xzf /tmp/workspace-${{ matrix.platform }}.tar.gz + rm -f /tmp/workspace-${{ matrix.platform }}.tar.gz + shell: bash + - name: Get Go version + id: go_version + run: echo "GO_VERSION=$(./scripts/get_golang_version.sh)" >> $GITHUB_ENV + - name: Set up Go + uses: actions/setup-go@v5 + with: + go-version: ${{ env.GO_VERSION }} + cache: true + - name: Configure AWS credentials + uses: aws-actions/configure-aws-credentials@v4.2.1 + with: + role-to-assume: ${{ secrets.AWS_ROLE }} + role-session-name: github-actions + aws-region: ${{ secrets.AWS_REGION }} + - name: Run E2E subs tests + run: | + scripts/configure_dev.sh + scripts/buildtools/install_buildtools.sh -o "gotest.tools/gotestsum" + mkdir -p ~/test_results/${{ matrix.platform }}_e2e_subs_nightly + TEST_RESULTS=~/test_results/${{ matrix.platform }}_e2e_subs_nightly \ + test/scripts/run_integration_tests.sh + - name: Notify Slack on failure + if: failure() && env.SLACK_WEBHOOK != '' + uses: slackapi/slack-github-action@v2.1.0 + with: + webhook: ${{ secrets.SLACK_WEBHOOK }} + webhook-type: webhook-trigger + payload: | + { + "text": "🚨 Subs Test Failure Alert", + "blocks": [ + { + "type": "section", + "text": { + "type": "mrkdwn", + "text": "*Subs Test Failure in Nightly Build*\n\n• Job Type: `${{ github.job }}`\n• Platform: `${{ matrix.platform }}`\n• Failed Step: `${{ steps.run_e2e_expect_tests.name }}`\n• Run URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}" + } + } + ] + } + - name: Upload test artifacts to GitHub + uses: actions/upload-artifact@v4 + with: + name: e2e_subs-results-${{ matrix.platform }}-${{ github.run_id }} + path: ~/test_results + retention-days: 7 + + verify_nightly: + needs: [test_nightly, integration_nightly, e2e_expect_nightly] + strategy: + fail-fast: false + matrix: + test_type: ["test", "integration", "e2e_expect"] + platform: ["ubuntu-24.04", "ubuntu-24.04-arm", "macos-14"] + runs-on: ${{ matrix.platform }} + steps: + - uses: actions/checkout@v4 + - uses: actions/download-artifact@v4 + with: + pattern: ${{ matrix.test_type }}-results-${{ matrix.platform }}-${{ github.run_id }}-* + path: ~/test_results + merge-multiple: true + - name: Check test execution + run: | + cat ~/test_results/${{ matrix.platform }}_${{ matrix.test_type }}_nightly/**/testresults.json > ~/test_results/${{ matrix.platform }}_${{ matrix.test_type }}_nightly/combined_testresults.json + python3 scripts/buildtools/check_tests.py \ + --tests-results-filepath ~/test_results/${{ matrix.platform }}_${{ matrix.test_type }}_nightly/combined_testresults.json \ + --ignored-tests \ + TestAlgodWithExpect \ + TestAlgohWithExpect \ + TestGoalWithExpect \ + TestTealdbgWithExpect + - name: Notify Slack on failure + if: failure() && env.SLACK_WEBHOOK != '' + uses: slackapi/slack-github-action@v2.1.0 + with: + webhook: ${{ secrets.SLACK_WEBHOOK }} + webhook-type: webhook-trigger + payload: | + { + "text": "🚨 Verify Failure Alert", + "blocks": [ + { + "type": "section", + "text": { + "type": "mrkdwn", + "text": "*Verify Failure in PR Build*\n\n• Job: `upload`\n• Branch: `${{ github.ref_name }}`\n• Run URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}" + } + } + ] + } + + upload: + needs: [verify_nightly, e2e_subs_nightly] + if: github.ref == 'refs/heads/master' || startsWith(github.ref, 'refs/heads/rel/') + strategy: + matrix: + platform: ["ubuntu-24.04", "ubuntu-24.04-arm", "macos-14"] + runs-on: ${{ matrix.platform }} + steps: + - name: Download workspace archive + uses: actions/download-artifact@v4 + with: + name: workspace-${{ matrix.platform }}-${{ github.run_id }} + path: /tmp/ + - name: Extract workspace archive + run: | + tar -xzf /tmp/workspace-${{ matrix.platform }}.tar.gz + rm -f /tmp/workspace-${{ matrix.platform }}.tar.gz + shell: bash + - name: Configure AWS credentials + uses: aws-actions/configure-aws-credentials@v4.2.1 + with: + role-to-assume: ${{ secrets.AWS_ROLE }} + role-session-name: github-actions + aws-region: ${{ secrets.AWS_REGION }} + - name: Upload Binaries + env: + AWS_REGION: ${{ secrets.AWS_REGION }} + S3_REGION: ${{ secrets.AWS_REGION }} + S3_RELEASE_BUCKET: ${{ secrets.S3_RELEASE_BUCKET }} + timeout-minutes: 20 + run: | + if [[ "${{ github.ref }}" == "refs/heads/rel/nightly" ]]; then + export NIGHTLY_BUILD="true" + fi + export TRAVIS_BRANCH="${{ github.ref_name }}" + scripts/travis/deploy_packages.sh + shell: bash + - name: Notify Slack on failure + if: failure() && env.SLACK_WEBHOOK != '' + uses: slackapi/slack-github-action@v2.1.0 + with: + webhook: ${{ secrets.SLACK_WEBHOOK }} + webhook-type: webhook-trigger + payload: | + { + "text": "🚨 Upload Failure Alert", + "blocks": [ + { + "type": "section", + "text": { + "type": "mrkdwn", + "text": "*Upload Failure in Nightly Build*\n\n• Job: `upload`\n• Branch: `${{ github.ref_name }}`\n• Run URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}" + } + } + ] + } diff --git a/.github/workflows/ci-pr.yml b/.github/workflows/ci-pr.yml new file mode 100644 index 0000000000..df6161d5ad --- /dev/null +++ b/.github/workflows/ci-pr.yml @@ -0,0 +1,425 @@ +name: PR Tests +on: + pull_request: + branches: + - master + - 'rel/**' + +env: + CODECOV_TOKEN: "8b4a1f91-f154-4c26-b84c-c9aaa90159c6" # Same public token from CircleCI config + ALGORAND_DEADLOCK: enable + KMD_NOUSB: True + BUILD_TYPE: integration + ALGOTEST: 1 + SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }} + +concurrency: + group: pr-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true + +permissions: + pull-requests: read + contents: read + +jobs: + build: + strategy: + matrix: + platform: ["ubuntu-24.04"] + runs-on: ${{ matrix.platform }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + fetch-depth: 0 + - name: Get Go version + id: go_version + run: echo "GO_VERSION=$(./scripts/get_golang_version.sh)" >> $GITHUB_ENV + - name: Set up Go + uses: actions/setup-go@v5 + with: + go-version: ${{ env.GO_VERSION }} + cache: true + - name: Cache libsodium + uses: actions/cache@v4 + with: + path: crypto/libs + key: libsodium-${{ matrix.platform }}-${{ hashFiles('crypto/libsodium-fork/**') }} + - name: Build + run: | + scripts/travis/build.sh --make_debug + - name: Create workspace archive + run: | + tar -czf /tmp/workspace-${{ matrix.platform }}.tar.gz . + shell: bash + - name: Upload workspace archive + uses: actions/upload-artifact@v4 + with: + name: workspace-${{ matrix.platform }}-${{ github.run_id }} + path: /tmp/workspace-${{ matrix.platform }}.tar.gz + retention-days: 1 + - name: Notify Slack on failure + if: failure() && env.SLACK_WEBHOOK != '' + uses: slackapi/slack-github-action@v2.1.0 + with: + webhook: ${{ secrets.SLACK_WEBHOOK }} + webhook-type: webhook-trigger + payload: | + { + "text": "🚨 Build Failure Alert", + "blocks": [ + { + "type": "section", + "text": { + "type": "mrkdwn", + "text": "*Build Failure in PR Build*\n\n• Job Type: `${{ github.job }}`\n• Platform: `${{ matrix.platform }}`\n• Run URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}" + } + } + ] + } + + test: + needs: [build] + strategy: + fail-fast: false + matrix: + platform: ["ubuntu-24.04"] + partition_id: [0, 1, 2, 3] # set PARTITION_TOTAL below to match + runs-on: ${{ matrix.platform }} + env: + PARTITION_ID: ${{ matrix.partition_id }} + PARTITION_TOTAL: 4 + CIRCLECI: true + SHORTTEST: "-short" + steps: + - name: Download workspace archive + uses: actions/download-artifact@v4 + with: + name: workspace-${{ matrix.platform }}-${{ github.run_id }} + path: /tmp/ + - name: Extract workspace archive + run: | + tar -xzf /tmp/workspace-${{ matrix.platform }}.tar.gz + rm -f /tmp/workspace-${{ matrix.platform }}.tar.gz + shell: bash + - name: Get Go version + id: go_version + run: echo "GO_VERSION=$(./scripts/get_golang_version.sh)" >> $GITHUB_ENV + - name: Set up Go + uses: actions/setup-go@v5 + with: + go-version: ${{ env.GO_VERSION }} + cache: true + - name: Run tests + run: | + ./scripts/configure_dev.sh + ./scripts/buildtools/install_buildtools.sh -o "gotest.tools/gotestsum" + PACKAGES="$(go list ./... | grep -v /go-algorand/test/)" + export PACKAGE_NAMES=$(echo $PACKAGES | tr -d '\n') + mkdir -p test_results/${{ matrix.platform }}_test/${PARTITION_ID} + gotestsum --format standard-verbose \ + --junitfile ~/test_results/${{ matrix.platform }}_test/${PARTITION_ID}/results.xml \ + --jsonfile ~/test_results/${{ matrix.platform }}_test/${PARTITION_ID}/testresults.json \ + -- --tags "sqlite_unlock_notify sqlite_omit_load_extension" $SHORTTEST \ + -race -timeout 1h -coverprofile=coverage.txt -covermode=atomic -p 4 \ + $PACKAGE_NAMES + - name: Notify Slack on failure + if: failure() && env.SLACK_WEBHOOK != '' + uses: slackapi/slack-github-action@v2.1.0 + with: + webhook: ${{ secrets.SLACK_WEBHOOK }} + webhook-type: webhook-trigger + payload: | + { + "text": "🚨 Test Failure Alert", + "blocks": [ + { + "type": "section", + "text": { + "type": "mrkdwn", + "text": "*Test Failure in PR Build*\n\n• Job Type: `${{ github.job }}`\n• Platform: `${{ matrix.platform }}`\n• Partition: `${{ matrix.partition_id }}` of ${{ env.PARTITION_TOTAL }}\n• Failed Step: `${{ steps.run_tests.name }}`\n• Run URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}" + } + } + ] + } + - name: Upload test artifacts to GitHub + uses: actions/upload-artifact@v4 + with: + name: test-results-${{ matrix.platform }}-${{ github.run_id }}-${{ matrix.partition_id }} + path: ~/test_results + retention-days: 7 + - name: Upload coverage + # Only upload coverage from ubuntu-24.04 platform + if: matrix.platform == 'ubuntu-24.04' && ${{ !cancelled() }} + uses: codecov/codecov-action@v4 + env: + GITHUB_ACTIONS: True + CIRCLECI: "" + with: + token: ${{ env.CODECOV_TOKEN }} + file: ./coverage.txt + fail_ci_if_error: false + - name: Upload test results to Codecov + if: ${{ !cancelled() }} + uses: codecov/test-results-action@v1 + with: + file: ${{ matrix.platform == 'macos-14' && '/Users/runner' || '/home/runner' }}/test_results/${{ matrix.platform }}_test/${{ matrix.partition_id }}/results.xml + token: ${{ env.CODECOV_TOKEN }} + fail_ci_if_error: false + + integration: + needs: [build] + strategy: + fail-fast: false + matrix: + platform: ["ubuntu-24.04"] + partition_id: [0, 1, 2, 3] # set PARTITION_TOTAL below to match + runs-on: ${{ matrix.platform }} + env: + CIRCLECI: true + PARTITION_ID: ${{ matrix.partition_id }} + PARTITION_TOTAL: 4 + E2E_TEST_FILTER: GO + PARALLEL_FLAG: "-p 4" + SHORTTEST: "-short" + steps: + - name: Download workspace archive + uses: actions/download-artifact@v4 + with: + name: workspace-${{ matrix.platform }}-${{ github.run_id }} + path: /tmp/ + - name: Extract workspace archive + run: | + tar -xzf /tmp/workspace-${{ matrix.platform }}.tar.gz + rm -f /tmp/workspace-${{ matrix.platform }}.tar.gz + shell: bash + - name: Get Go version + id: go_version + run: echo "GO_VERSION=$(./scripts/get_golang_version.sh)" >> $GITHUB_ENV + - name: Set up Go + uses: actions/setup-go@v5 + with: + go-version: ${{ env.GO_VERSION }} + cache: true + - name: Run integration tests + run: | + ./scripts/configure_dev.sh + ./scripts/buildtools/install_buildtools.sh -o "gotest.tools/gotestsum" + mkdir -p ~/test_results/${{ matrix.platform }}_integration/${PARTITION_ID} + TEST_RESULTS=~/test_results/${{ matrix.platform }}_integration/${PARTITION_ID} \ + test/scripts/run_integration_tests.sh + - name: Notify Slack on failure + if: failure() && env.SLACK_WEBHOOK != '' + uses: slackapi/slack-github-action@v2.1.0 + with: + webhook: ${{ secrets.SLACK_WEBHOOK }} + webhook-type: webhook-trigger + payload: | + { + "text": "🚨 Integration Test Failure Alert", + "blocks": [ + { + "type": "section", + "text": { + "type": "mrkdwn", + "text": "*Integration Test Failure in PR Build*\n\n• Job Type: `${{ github.job }}`\n• Platform: `${{ matrix.platform }}`\n• Partition: `${{ matrix.partition_id }}` of ${{ env.PARTITION_TOTAL }}\n• Failed Step: `${{ steps.run_integration_tests.name }}`\n• Run URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}" + } + } + ] + } + - name: Upload test artifacts to GitHub + uses: actions/upload-artifact@v4 + with: + name: integration-results-${{ matrix.platform }}-${{ github.run_id }}-${{ matrix.partition_id }} + path: ~/test_results + retention-days: 7 + - name: Upload test results to Codecov + if: ${{ !cancelled() }} + uses: codecov/test-results-action@v1 + with: + file: ${{ matrix.platform == 'macos-14' && '/Users/runner' || '/home/runner' }}/test_results/${{ matrix.platform }}_integration/${{ matrix.partition_id }}/results.xml + token: ${{ env.CODECOV_TOKEN }} + fail_ci_if_error: false + + e2e_expect: + needs: [build] + strategy: + fail-fast: false + matrix: + platform: ["ubuntu-24.04"] + partition_id: [0, 1, 2, 3, 4, 5, 6, 7] # set PARTITION_TOTAL below to match + runs-on: ${{ matrix.platform }} + env: + CIRCLECI: true + PARTITION_ID: ${{ matrix.partition_id }} + PARTITION_TOTAL: 8 + E2E_TEST_FILTER: EXPECT + PARALLEL_FLAG: "-p 4" + SHORTTEST: "-short" + steps: + - name: Download workspace archive + uses: actions/download-artifact@v4 + with: + name: workspace-${{ matrix.platform }}-${{ github.run_id }} + path: /tmp/ + - name: Extract workspace archive + run: | + tar -xzf /tmp/workspace-${{ matrix.platform }}.tar.gz + rm -f /tmp/workspace-${{ matrix.platform }}.tar.gz + shell: bash + - name: Get Go version + id: go_version + run: echo "GO_VERSION=$(./scripts/get_golang_version.sh)" >> $GITHUB_ENV + - name: Set up Go + uses: actions/setup-go@v5 + with: + go-version: ${{ env.GO_VERSION }} + cache: true + - name: Run E2E expect tests + run: | + scripts/configure_dev.sh + scripts/buildtools/install_buildtools.sh -o "gotest.tools/gotestsum" + mkdir -p ~/test_results/${{ matrix.platform }}_e2e_expect/${PARTITION_ID} + TEST_RESULTS=~/test_results/${{ matrix.platform }}_e2e_expect/${PARTITION_ID} \ + test/scripts/run_integration_tests.sh + - name: Notify Slack on failure + if: failure() && env.SLACK_WEBHOOK != '' + uses: slackapi/slack-github-action@v2.1.0 + with: + webhook: ${{ secrets.SLACK_WEBHOOK }} + webhook-type: webhook-trigger + payload: | + { + "text": "🚨 Expect Test Failure Alert", + "blocks": [ + { + "type": "section", + "text": { + "type": "mrkdwn", + "text": "*Expect Test Failure in PR Build*\n\n• Job Type: `${{ github.job }}`\n• Platform: `${{ matrix.platform }}`\n• Partition: `${{ matrix.partition_id }}` of ${{ env.PARTITION_TOTAL }}\n• Failed Step: `${{ steps.run_e2e_expect_tests.name }}`\n• Run URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}" + } + } + ] + } + - name: Upload test artifacts to GitHub + uses: actions/upload-artifact@v4 + with: + name: e2e_expect-results-${{ matrix.platform }}-${{ github.run_id }}-${{ matrix.partition_id }} + path: ~/test_results + retention-days: 7 + - name: Upload test results to Codecov + if: ${{ !cancelled() }} + uses: codecov/test-results-action@v1 + with: + file: ${{ matrix.platform == 'macos-14' && '/Users/runner' || '/home/runner' }}/test_results/${{ matrix.platform }}_e2e_expect/${{ matrix.partition_id }}/results.xml + token: ${{ env.CODECOV_TOKEN }} + fail_ci_if_error: false + + e2e_subs: + needs: [build] + strategy: + fail-fast: false + matrix: + platform: ["ubuntu-24.04"] + runs-on: ${{ matrix.platform }} + env: + E2E_TEST_FILTER: SCRIPTS + CI_PLATFORM: ${{ matrix.platform }} + CI_KEEP_TEMP_PLATFORM: "" + SHORTTEST: "-short" + steps: + - name: Download workspace archive + uses: actions/download-artifact@v4 + with: + name: workspace-${{ matrix.platform }}-${{ github.run_id }} + path: /tmp/ + - name: Extract workspace archive + run: | + tar -xzf /tmp/workspace-${{ matrix.platform }}.tar.gz + rm -f /tmp/workspace-${{ matrix.platform }}.tar.gz + shell: bash + - name: Get Go version + id: go_version + run: echo "GO_VERSION=$(./scripts/get_golang_version.sh)" >> $GITHUB_ENV + - name: Set up Go + uses: actions/setup-go@v5 + with: + go-version: ${{ env.GO_VERSION }} + cache: true + - name: Run E2E subs tests + run: | + scripts/configure_dev.sh + scripts/buildtools/install_buildtools.sh -o "gotest.tools/gotestsum" + mkdir -p ~/test_results/${{ matrix.platform }}_e2e_subs + TEST_RESULTS=~/test_results/${{ matrix.platform }}_e2e_subs \ + test/scripts/run_integration_tests.sh + - name: Notify Slack on failure + if: failure() && env.SLACK_WEBHOOK != '' + uses: slackapi/slack-github-action@v2.1.0 + with: + webhook: ${{ secrets.SLACK_WEBHOOK }} + webhook-type: webhook-trigger + payload: | + { + "text": "🚨 Subs Test Failure Alert", + "blocks": [ + { + "type": "section", + "text": { + "type": "mrkdwn", + "text": "*Subs Test Failure in PR Build*\n\n• Job Type: `${{ github.job }}`\n• Platform: `${{ matrix.platform }}`\n• Failed Step: `${{ steps.run_e2e_expect_tests.name }}`\n• Run URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}" + } + } + ] + } + - name: Upload test artifacts to GitHub + uses: actions/upload-artifact@v4 + with: + name: e2e_subs-results-${{ matrix.platform }}-${{ github.run_id }} + path: ~/test_results + retention-days: 7 + + verify: + needs: [test, integration, e2e_expect] + strategy: + fail-fast: false + matrix: + test_type: ["test", "integration", "e2e_expect"] + platform: ["ubuntu-24.04"] + runs-on: ${{ matrix.platform }} + steps: + - uses: actions/checkout@v4 + - uses: actions/download-artifact@v4 + with: + pattern: ${{ matrix.test_type }}-results-${{ matrix.platform }}-${{ github.run_id }}-* + path: ~/test_results + merge-multiple: true + - name: Check test execution + run: | + cat ~/test_results/${{ matrix.platform }}_${{ matrix.test_type }}/**/testresults.json > ~/test_results/${{ matrix.platform }}_${{ matrix.test_type }}/combined_testresults.json + python3 scripts/buildtools/check_tests.py \ + --tests-results-filepath ~/test_results/${{ matrix.platform }}_${{ matrix.test_type }}/combined_testresults.json \ + --ignored-tests \ + TestAlgodWithExpect \ + TestAlgohWithExpect \ + TestGoalWithExpect \ + TestTealdbgWithExpect + - name: Notify Slack on failure + if: failure() && env.SLACK_WEBHOOK != '' + uses: slackapi/slack-github-action@v2.1.0 + with: + webhook: ${{ secrets.SLACK_WEBHOOK }} + webhook-type: webhook-trigger + payload: | + { + "text": "🚨 Verify Failure Alert", + "blocks": [ + { + "type": "section", + "text": { + "type": "mrkdwn", + "text": "*Verify Failure in PR Build*\n\n• Job: `upload`\n• Branch: `${{ github.ref_name }}`\n• Run URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}" + } + } + ] + } diff --git a/.github/workflows/codegen_verification.yml b/.github/workflows/codegen_verification.yml index 8ac30df114..8dd82c5d31 100644 --- a/.github/workflows/codegen_verification.yml +++ b/.github/workflows/codegen_verification.yml @@ -6,7 +6,7 @@ on: pull_request: jobs: codegen_verification: - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 services: converter: image: swaggerapi/swagger-converter@sha256:dcfd1c2537f5f271cb4ec942d08aa59ca41b9a24078040061a772afca7e548ae # v1.0.4 diff --git a/.github/workflows/container.yml b/.github/workflows/container.yml index bebc3136a3..74ad7dc5ea 100644 --- a/.github/workflows/container.yml +++ b/.github/workflows/container.yml @@ -10,7 +10,7 @@ on: jobs: build-and-push: name: Build and Push to DockerHub - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 steps: - name: Checkout Code uses: actions/checkout@v4 diff --git a/.gitignore b/.gitignore index b0f211512a..5515ceed59 100644 --- a/.gitignore +++ b/.gitignore @@ -78,3 +78,6 @@ tools/x-repo-types/x-repo-types # python virtual environment .venv + +# ignore local claude config changes +CLAUDE.local.md diff --git a/AGENTS.md b/AGENTS.md new file mode 100644 index 0000000000..83b7eecdd2 --- /dev/null +++ b/AGENTS.md @@ -0,0 +1,174 @@ +# CLAUDE.md + +This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository. + +## Common Development Commands + +### Build +```bash +make build # Build all binaries +make install # Build and install binaries to $GOPATH/bin +make buildsrc # Build main source (faster than full build) +``` + +### Testing +```bash +make test # Run unit tests +make fulltest # Run unit tests with race detection +make shorttest # Run short tests with race detection +make integration # Run integration tests +make testall # Run all tests (unit + integration) +``` + +### Code Quality +```bash +make sanity # Run all checks (fmt, lint, fix, tidy) +make fmt # Format code and check licenses +make lint # Run linter (requires deps) +make fix # Run algofix tool +make vet # Run go vet +make tidy # Clean up go.mod files +``` + +### Code Generation + +Some code must be re-generated after changes. Run the following to regenerate auto-generated code if changes are made to relevant files. + +``` +make rebuild_kmd_swagger # Rebuild swagger.json files +make generate # Regenerate for stringer et al. +make expectlint # Run expect linter +touch data/transactions/logic/fields_string.go # Ensure rebuild of teal specs +make -C data/transactions/logic # Update TEAL Specs +touch daemon/algod/api/algod.oas2.json # Ensure rebuild of API spec +make -C daemon/algod/api generate # Regenerate REST server +make msgp # Regenerate msgp files +``` + +To verify that this wasn't missed, we run verification steps, which can be found in `scripts/travis/codegen_verification.sh`. If code is not clean, it will fail CI checks. + +### Development Setup +```bash +./scripts/configure_dev.sh # Initial environment setup +./scripts/buildtools/install_buildtools.sh # Install build tools +make deps # Check/install dependencies +``` + +### Single Test Execution +```bash +go test -v -run TestName ./path/to/package # Run specific test +go test -v ./agreement/... # Run tests in package tree rooted at agreement +go test -v ./agreement/ # Run tests for just the agreement package +``` + +### Running E2E tests +E2E tests run one or more algod processes, each with their own data directory containing logs and configuration (created in a subdirectory of TESTDIR). If an E2E test fails, useful information can often be found in the node.log files produced by algod while running the test. For example: +```bash +export NODEBINDIR=~/go/bin # path to algod, goal, etc. Code changes to goal or algod require rebuilding with "make" to place new binaries here before running E2E tests. +export TESTDATADIR=`pwd`/test/testdata # path to go-algorand/test/testdata +export TESTDIR=/tmp +# network and node data will be created in /tmp/TestAssetSend/, logs in /tmp/TestAssetSend/Primary/node.log and /tmp/TestAssetSend/Node/node.log +go test ./test/e2e-go/features/transactions -run TestAssetSend -v -timeout=0 +``` + +## Architecture Overview + +### Main Binaries +- **`algod`**: Core blockchain node daemon (consensus, networking, REST API) +- **`kmd`**: Key Management Daemon (secure wallet operations, isolated process) +- **`goal`**: Primary CLI tool for node interaction and account management +- **`algokey`**: Standalone key generation and management utility + +### Core Components + +#### Node Layer (`node/`) +Central orchestrator that integrates all subsystems. The `AlgorandFullNode` struct manages: +- Ledger state and transaction pool +- Network communication and message routing +- Agreement service for consensus participation +- Catchup service for blockchain synchronization + +#### Agreement Layer (`agreement/`) +Implements Algorand's Byzantine Agreement protocol: +- **Service**: Main consensus coordinator +- **State Machine**: Manages consensus rounds, periods, and steps +- **Vote/Proposal Managers**: Handle consensus message flow +- **CryptoVerifier**: Asynchronous signature verification + +#### Ledger Layer (`ledger/`) +Manages blockchain state using tracker-based architecture: +- **Blockchain Storage**: Sequential block storage with certificates +- **Trackers**: Independent state machines consuming blockchain events + - `accountUpdates`: Account balances and application state + - `acctsOnline`: Online account tracking for consensus + - `catchpointTracker`: Catchpoint generation for fast sync + - `txTail`: Recent transaction tracking +- **Atomic Updates**: Coordinated state transitions across trackers + +#### Network Layer (`network/`) +Supports multiple networking implementations through `GossipNode` interface: +- **WebSocket Network**: Traditional relay-based topology +- **P2P Network**: LibP2P-based peer-to-peer networking +- **Hybrid Network**: Combines both approaches + +#### Data Layer (`data/`) +- **Transaction Pool**: Manages pending transactions +- **Transaction Handler**: Processes incoming network transactions +- **Account Manager**: Handles participation key lifecycle +- **Core Types**: Transactions, blocks, accounts, and protocol structures + +#### Cryptography (`crypto/`) +- Ed25519 signatures, multisig, LogicSig (smart signatures) +- VRF (Verifiable Random Functions) for consensus leader selection +- State proof cryptography for light client verification +- Merkle tree implementations for data integrity + +### Key Architectural Patterns + +#### Interface-Based Design +System boundaries defined by Go interfaces: +- `GossipNode`: Network abstraction +- `BlockValidator`/`BlockFactory`: Consensus integration +- `Ledger`: Storage abstraction +- `KeyManager`: Cryptographic operations + +#### Tracker Pattern +Ledger uses independent state machines that can rebuild from blockchain events, enabling: +- Stateless tracker logic with optional persistent caching +- Atomic coordinated updates across different state types +- Efficient state rebuilding and validation + +#### Concurrent Architecture +- Agreement service separates concurrent I/O from serialized protocol logic +- Crypto verification runs in dedicated thread pools +- Network and disk operations use separate goroutines + +#### Security Isolation +- KMD runs as separate process to isolate key material +- Transaction verification separated from consensus participation +- Clear boundaries between trusted and untrusted operations + +## Development Guidelines + +### Testing Strategy +- Unit tests focus on individual component logic +- Integration tests verify cross-component interactions +- Race detection enabled for concurrent code validation +- Benchmark tests for performance-critical paths + +### Code Organization +- Interface-first design for testability and modularity +- Dependency injection for component assembly +- Clear separation between protocol logic and implementation details +- Consistent error handling patterns throughout + +### Performance Considerations +- Tracker pattern enables efficient state caching +- Asynchronous block writing with in-memory queues +- Parallel transaction verification +- Catchpoint mechanism for fast node synchronization + +### Protocol Evolution +- Consensus parameters support versioning for upgrades +- Backward compatibility maintained through careful interface design +- Feature flags and gradual rollout mechanisms diff --git a/CLAUDE.md b/CLAUDE.md new file mode 120000 index 0000000000..47dc3e3d86 --- /dev/null +++ b/CLAUDE.md @@ -0,0 +1 @@ +AGENTS.md \ No newline at end of file diff --git a/Dockerfile b/Dockerfile index 0a7c6fca3d..04750adc5e 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,6 +1,6 @@ -FROM ubuntu:20.04 as builder +FROM ubuntu:24.04 AS builder -ARG GO_VERSION="1.23.3" +ARG GO_VERSION="1.23.9" ARG CHANNEL ARG URL @@ -41,7 +41,7 @@ RUN /dist/files/build/install.sh \ -b "${BRANCH}" \ -s "${SHA}" -FROM debian:bookworm-20240311-slim as final +FROM debian:bookworm-20250630-slim AS final ENV PATH="/node/bin:${PATH}" ALGOD_PORT="8080" KMD_PORT="7833" ALGORAND_DATA="/algod/data" diff --git a/Makefile b/Makefile index a13327dd77..2b16198b6b 100644 --- a/Makefile +++ b/Makefile @@ -8,13 +8,14 @@ else export GOPATH := $(shell go env GOPATH) GOPATH1 := $(firstword $(subst :, ,$(GOPATH))) endif +GOBIN := $(if $(shell go env GOBIN),$(shell go env GOBIN),$(GOPATH1)/bin) SRCPATH := $(shell pwd) ARCH := $(shell ./scripts/archtype.sh) OS_TYPE := $(shell ./scripts/ostype.sh) # overrides for cross-compiling platform-specific binaries ifdef CROSS_COMPILE_ARCH ARCH := $(CROSS_COMPILE_ARCH) - GO_INSTALL := CGO_ENABLED=1 GOOS=$(OS_TYPE) GOARCH=$(ARCH) go build -o $(GOPATH1)/bin-$(OS_TYPE)-$(ARCH) + GO_INSTALL := CGO_ENABLED=1 GOOS=$(OS_TYPE) GOARCH=$(ARCH) go build -o $(GOBIN) else GO_INSTALL := go install endif @@ -105,10 +106,10 @@ fmt: ./scripts/check_license.sh -i fix: build - $(GOPATH1)/bin/algofix */ + $(GOBIN)/algofix */ lint: deps - $(GOPATH1)/bin/golangci-lint run -c .golangci.yml + $(GOBIN)/golangci-lint run -c .golangci.yml expectlint: cd test/e2e-go/cli/goal/expect && python3 expect_linter.py *.exp @@ -139,16 +140,23 @@ prof: cd node && go test $(GOTAGS) -cpuprofile=cpu.out -memprofile=mem.out -mutexprofile=mutex.out generate: deps - PATH=$(GOPATH1)/bin:$$PATH go generate ./... + PATH=$(GOBIN):$$PATH go generate ./... msgp: $(patsubst %,%/msgp_gen.go,$(MSGP_GENERATE)) +api: + make -C daemon/algod/api + +logic: + make -C data/transactions/logic + + %/msgp_gen.go: deps ALWAYS @set +e; \ printf "msgp: $(@D)..."; \ - $(GOPATH1)/bin/msgp -file ./$(@D) -o $@ -warnmask github.com/algorand/go-algorand > ./$@.out 2>&1; \ + $(GOBIN)/msgp -file ./$(@D) -o $@ -warnmask github.com/algorand/go-algorand > ./$@.out 2>&1; \ if [ "$$?" != "0" ]; then \ - printf "failed:\n$(GOPATH1)/bin/msgp -file ./$(@D) -o $@ -warnmask github.com/algorand/go-algorand\n"; \ + printf "failed:\n$(GOBIN)/msgp -file ./$(@D) -o $@ -warnmask github.com/algorand/go-algorand\n"; \ cat ./$@.out; \ rm ./$@.out; \ exit 1; \ @@ -171,29 +179,51 @@ crypto/libs/$(OS_TYPE)/$(ARCH)/lib/libsodium.a: universal: ifeq ($(OS_TYPE),darwin) # build amd64 Mac binaries - mkdir -p $(GOPATH1)/bin-darwin-amd64 - CROSS_COMPILE_ARCH=amd64 GOBIN=$(GOPATH1)/bin-darwin-amd64 MACOSX_DEPLOYMENT_TARGET=13.0 EXTRA_CONFIGURE_FLAGS='CFLAGS="-arch x86_64 -mmacos-version-min=13.0" --host=x86_64-apple-darwin' $(MAKE) + mkdir -p $(GOBIN)-darwin-amd64 + CROSS_COMPILE_ARCH=amd64 GOBIN=$(GOBIN)-darwin-amd64 MACOSX_DEPLOYMENT_TARGET=13.0 EXTRA_CONFIGURE_FLAGS='CFLAGS="-arch x86_64 -mmacos-version-min=13.0" --host=x86_64-apple-darwin' $(MAKE) # build arm64 Mac binaries - mkdir -p $(GOPATH1)/bin-darwin-arm64 - CROSS_COMPILE_ARCH=arm64 GOBIN=$(GOPATH1)/bin-darwin-arm64 MACOSX_DEPLOYMENT_TARGET=13.0 EXTRA_CONFIGURE_FLAGS='CFLAGS="-arch arm64 -mmacos-version-min=13.0" --host=aarch64-apple-darwin' $(MAKE) + mkdir -p $(GOBIN)-darwin-arm64 + CROSS_COMPILE_ARCH=arm64 GOBIN=$(GOBIN)-darwin-arm64 MACOSX_DEPLOYMENT_TARGET=13.0 EXTRA_CONFIGURE_FLAGS='CFLAGS="-arch arm64 -mmacos-version-min=13.0" --host=aarch64-apple-darwin' $(MAKE) # same for buildsrc-special cd tools/block-generator && \ - CROSS_COMPILE_ARCH=amd64 GOBIN=$(GOPATH1)/bin-darwin-amd64 MACOSX_DEPLOYMENT_TARGET=13.0 EXTRA_CONFIGURE_FLAGS='CFLAGS="-arch x86_64 -mmacos-version-min=13.0" --host=x86_64-apple-darwin' $(MAKE) - CROSS_COMPILE_ARCH=arm64 GOBIN=$(GOPATH1)/bin-darwin-arm64 MACOSX_DEPLOYMENT_TARGET=13.0 EXTRA_CONFIGURE_FLAGS='CFLAGS="-arch arm64 -mmacos-version-min=13.0" --host=aarch64-apple-darwin' $(MAKE) + CROSS_COMPILE_ARCH=amd64 GOBIN=$(GOBIN)-darwin-amd64 MACOSX_DEPLOYMENT_TARGET=13.0 EXTRA_CONFIGURE_FLAGS='CFLAGS="-arch x86_64 -mmacos-version-min=13.0" --host=x86_64-apple-darwin' $(MAKE) + CROSS_COMPILE_ARCH=arm64 GOBIN=$(GOBIN)-darwin-arm64 MACOSX_DEPLOYMENT_TARGET=13.0 EXTRA_CONFIGURE_FLAGS='CFLAGS="-arch arm64 -mmacos-version-min=13.0" --host=aarch64-apple-darwin' $(MAKE) # lipo together - mkdir -p $(GOPATH1)/bin - for binary in $$(ls $(GOPATH1)/bin-darwin-arm64); do \ - if [ -f $(GOPATH1)/bin-darwin-amd64/$$binary ]; then \ - lipo -create -output $(GOPATH1)/bin/$$binary \ - $(GOPATH1)/bin-darwin-arm64/$$binary \ - $(GOPATH1)/bin-darwin-amd64/$$binary; \ + mkdir -p $(GOBIN) + for binary in $$(ls $(GOBIN)-darwin-arm64); do \ + skip=false; \ + for nongo_file in $(NONGO_BIN_FILES) $(GOBIN)/node_exporter; do \ + if [ "$(GOBIN)/$$binary" = "$$nongo_file" ]; then \ + echo "Skipping non-binary file: $$binary"; \ + skip=true; \ + break; \ + fi; \ + done; \ + if [ "$$skip" = "true" ]; then \ + continue; \ + fi; \ + if [ -f $(GOBIN)-darwin-amd64/$$binary ]; then \ + lipo -create -output $(GOBIN)/$$binary \ + $(GOBIN)-darwin-arm64/$$binary \ + $(GOBIN)-darwin-amd64/$$binary; \ else \ echo "Warning: Binary $$binary exists in arm64 but not in amd64"; \ - fi \ + fi; \ + done + # copy NONGO_BIN files + for nongo_file in $(NONGO_BIN_FILES); do \ + short_name=$$(basename $$nongo_file); \ + if [ -f $(GOBIN)-darwin-arm64/$$short_name ]; then \ + cp -f $(GOBIN)-darwin-arm64/$$short_name $$nongo_file; \ + else \ + echo "Warning: NONGO_BIN file $$short_name does not exist in arm64"; \ + fi; \ done + # for node_exporter cross-compilation is using universal binary already + cp -f $(GOBIN)-darwin-arm64/node_exporter $(GOBIN)/node_exporter else echo "OS_TYPE must be darwin for universal builds, skipping" endif @@ -213,7 +243,7 @@ $(KMD_API_SWAGGER_SPEC): $(KMD_API_FILES) crypto/libs/$(OS_TYPE)/$(ARCH)/lib/lib cd daemon/kmd/lib/kmdapi && \ python3 genSwaggerWrappers.py $(KMD_API_SWAGGER_WRAPPER) cd daemon/kmd && \ - PATH=$(GOPATH1)/bin:$$PATH \ + PATH=$(GOBIN):$$PATH \ go generate ./... rm daemon/kmd/lib/kmdapi/$(KMD_API_SWAGGER_WRAPPER) @@ -267,23 +297,23 @@ check-go-version: ## We overwrite bin-race/kmd with a non -race version due to ## the incredible performance impact of -race on Scrypt. build-race: build - @mkdir -p $(GOPATH1)/bin-race - GOBIN=$(GOPATH1)/bin-race go install $(GOTRIMPATH) $(GOTAGS) -race -ldflags="$(GOLDFLAGS)" ./... - cp $(GOPATH1)/bin/kmd $(GOPATH1)/bin-race + @mkdir -p $(GOBIN)-race + GOBIN=$(GOBIN)-race go install $(GOTRIMPATH) $(GOTAGS) -race -ldflags="$(GOLDFLAGS)" ./... + cp $(GOBIN)/kmd $(GOBIN)-race -NONGO_BIN_FILES=$(GOPATH1)/bin/find-nodes.sh $(GOPATH1)/bin/update.sh $(GOPATH1)/bin/COPYING $(GOPATH1)/bin/ddconfig.sh +NONGO_BIN_FILES=$(GOBIN)/find-nodes.sh $(GOBIN)/update.sh $(GOBIN)/COPYING $(GOBIN)/ddconfig.sh NONGO_BIN: $(NONGO_BIN_FILES) -$(GOPATH1)/bin/find-nodes.sh: scripts/find-nodes.sh +$(GOBIN)/find-nodes.sh: scripts/find-nodes.sh -$(GOPATH1)/bin/update.sh: cmd/updater/update.sh +$(GOBIN)/update.sh: cmd/updater/update.sh -$(GOPATH1)/bin/COPYING: COPYING +$(GOBIN)/COPYING: COPYING -$(GOPATH1)/bin/ddconfig.sh: scripts/ddconfig.sh +$(GOBIN)/ddconfig.sh: scripts/ddconfig.sh -$(GOPATH1)/bin/%: +$(GOBIN)/%: cp -f $< $@ test: build @@ -308,28 +338,29 @@ testall: fulltest integration clean: go clean -i ./... - rm -f $(GOPATH1)/bin/node_exporter + rm -f $(GOBIN)/node_exporter cd crypto/libsodium-fork && \ test ! -e Makefile || make clean rm -rf crypto/lib rm -rf crypto/libs rm -rf crypto/copies rm -rf ./gen/devnet ./gen/mainnetnet ./gen/testnet + rm -rf $(GOBIN)-darwin-amd64 $(GOBIN)-darwin-arm64 # clean without crypto cleango: go clean -i ./... - rm -f $(GOPATH1)/bin/node_exporter + rm -f $(GOBIN)/node_exporter # assign the phony target node_exporter the dependency of the actual executable. -node_exporter: $(GOPATH1)/bin/node_exporter +node_exporter: $(GOBIN)/node_exporter # The recipe for making the node_exporter is by extracting it from the gzipped&tar file. # The file is was taken from the S3 cloud and it traditionally stored at # /travis-build-artifacts-us-ea-1.algorand.network/algorand/node_exporter/latest/node_exporter-stable-linux-x86_64.tar.gz -$(GOPATH1)/bin/node_exporter: - mkdir -p $(GOPATH1)/bin && \ - cd $(GOPATH1)/bin && \ +$(GOBIN)/node_exporter: + mkdir -p $(GOBIN) && \ + cd $(GOBIN) && \ if [ -z "$(CROSS_COMPILE_ARCH)" ]; then \ tar -xzvf $(SRCPATH)/installer/external/node_exporter-stable-$(shell ./scripts/ostype.sh)-$(shell uname -m | tr '[:upper:]' '[:lower:]').tar.gz; \ else \ @@ -351,7 +382,7 @@ gen/%/genesis.dump: gen/%/genesis.json ./scripts/dump_genesis.sh $< > $@ gen/%/genesis.json: gen/%.json gen/generate.go buildsrc - $(GOPATH1)/bin/genesis -q $(SHORT_PART_PERIOD_FLAG) -n $(shell basename $(shell dirname $@)) -c $< -d $(subst .json,,$<) + $(GOBIN)/genesis -q $(SHORT_PART_PERIOD_FLAG) -n $(shell basename $(shell dirname $@)) -c $< -d $(subst .json,,$<) gen: $(addsuffix gen, $(NETWORKS)) mainnetgen @@ -369,15 +400,15 @@ mainnetgen: gen/mainnet/genesis.dump # This target is preserved as part of the history on how mainnet genesis.json was generated from the CSV file. gen/mainnet/genesis.json: gen/pregen/mainnet/genesis.csv buildsrc mkdir -p gen/mainnet - cat gen/pregen/mainnet/genesis.csv | $(GOPATH1)/bin/incorporate -m gen/pregen/mainnet/metadata.json > gen/mainnet/genesis.json + cat gen/pregen/mainnet/genesis.csv | $(GOBIN)/incorporate -m gen/pregen/mainnet/metadata.json > gen/mainnet/genesis.json capabilities: build - sudo setcap cap_ipc_lock+ep $(GOPATH1)/bin/kmd + sudo setcap cap_ipc_lock+ep $(GOBIN)/kmd dump: $(addprefix gen/,$(addsuffix /genesis.dump, $(NETWORKS))) install: build - scripts/dev_install.sh -p $(GOPATH1)/bin + scripts/dev_install.sh -p $(GOBIN) .PHONY: default fmt lint check_shell sanity cover prof deps build test fulltest shorttest clean cleango deploy node_exporter install %gen gen NONGO_BIN check-go-version rebuild_kmd_swagger universal diff --git a/README.md b/README.md index 9cfac38c50..2b5ad446ea 100644 --- a/README.md +++ b/README.md @@ -2,9 +2,9 @@ # go-algorand | **Branch** | **Build Status** | | --------------- | ---------------- | -| **rel/stable** | [![CircleCI](https://circleci.com/gh/algorand/go-algorand/tree/rel%2Fstable.svg?style=svg)](https://circleci.com/gh/algorand/go-algorand/tree/rel%2Fstable) | -| **rel/beta** | [![CircleCI](https://circleci.com/gh/algorand/go-algorand/tree/rel%2Fbeta.svg?style=svg)](https://circleci.com/gh/algorand/go-algorand/tree/rel%2Fbeta) | -| **rel/nightly** | [![CircleCI](https://circleci.com/gh/algorand/go-algorand/tree/rel%2Fnightly.svg?style=svg)](https://circleci.com/gh/algorand/go-algorand/tree/rel%2Fnightly) | +| **rel/stable** | [![Nightly Tests](https://github.com/algorand/go-algorand/actions/workflows/ci-nightly.yml/badge.svg?branch=rel%2Fstable)](https://github.com/algorand/go-algorand/actions/workflows/ci-nightly.yml) | +| **rel/beta** | [![Nightly Tests](https://github.com/algorand/go-algorand/actions/workflows/ci-nightly.yml/badge.svg?branch=rel%2Fbeta)](https://github.com/algorand/go-algorand/actions/workflows/ci-nightly.yml) | +| **rel/nightly** | [![Nightly Tests](https://github.com/algorand/go-algorand/actions/workflows/ci-nightly.yml/badge.svg?branch=rel%2Fnightly)](https://github.com/algorand/go-algorand/actions/workflows/ci-nightly.yml) | **Algorand's** official implementation in Go. @@ -21,7 +21,7 @@ Development is done using the Go Programming Language. The Go version is specifi ### Linux / OSX -We currently strive to support Debian-based distributions, with Ubuntu 20.04 as our official release target. Building on Arch Linux also works well. Our core engineering team uses Linux and OSX, so both environments are well-supported for development. +We currently strive to support Debian-based distributions, with Ubuntu 24.04 as our official release target. Building on Arch Linux also works well. Our core engineering team uses Linux and OSX, so both environments are well-supported for development. **OSX Only**: [Homebrew (brew)](https://brew.sh) must be installed before continuing. [Here](https://docs.brew.sh/Installation) are the installation requirements. diff --git a/agreement/bundle.go b/agreement/bundle.go index c2374ec968..0494162caf 100644 --- a/agreement/bundle.go +++ b/agreement/bundle.go @@ -36,8 +36,8 @@ type unauthenticatedBundle struct { Step step `codec:"step"` Proposal proposalValue `codec:"prop"` - Votes []voteAuthenticator `codec:"vote,allocbound=config.MaxVoteThreshold"` - EquivocationVotes []equivocationVoteAuthenticator `codec:"eqv,allocbound=config.MaxVoteThreshold"` + Votes []voteAuthenticator `codec:"vote,allocbound=bounds.MaxVoteThreshold"` + EquivocationVotes []equivocationVoteAuthenticator `codec:"eqv,allocbound=bounds.MaxVoteThreshold"` } // bundle is a set of votes, all from the same round, period, and step, and from distinct senders, that reaches quorum. @@ -48,8 +48,8 @@ type bundle struct { U unauthenticatedBundle `codec:"u"` - Votes []vote `codec:"vote,allocbound=config.MaxVoteThreshold"` - EquivocationVotes []equivocationVote `codec:"eqv,allocbound=config.MaxVoteThreshold"` + Votes []vote `codec:"vote,allocbound=bounds.MaxVoteThreshold"` + EquivocationVotes []equivocationVote `codec:"eqv,allocbound=bounds.MaxVoteThreshold"` } // voteAuthenticators omit the Round, Period, Step, and Proposal for compression diff --git a/agreement/cadaver.go b/agreement/cadaver.go index 4f4edf3d59..6dea25fdff 100644 --- a/agreement/cadaver.go +++ b/agreement/cadaver.go @@ -64,7 +64,7 @@ func (c *cadaver) filename() string { baseDir := c.baseDirectory if baseDir == "" { // Put cadaver files in our data directory - baseDir = config.GetCurrentVersion().DataDirectory + baseDir = config.DataDirectory } fmtstr := "%s.cdv" diff --git a/agreement/cryptoVerifier_test.go b/agreement/cryptoVerifier_test.go index 1c482ab71b..48ea73afa2 100644 --- a/agreement/cryptoVerifier_test.go +++ b/agreement/cryptoVerifier_test.go @@ -388,9 +388,9 @@ func BenchmarkCryptoVerifierBundleVertification(b *testing.B) { } } -// TestCryptoVerifierVerificationFailures tests to see that the cryptoVerifier.VerifyVote returns an error in the vote response +// TestCryptoVerifierVerificationErrs tests to see that the cryptoVerifier.VerifyVote returns an error in the vote response // when being unable to enqueue a vote. -func TestCryptoVerifierVerificationFailures(t *testing.T) { +func TestCryptoVerifierVerificationErrs(t *testing.T) { partitiontest.PartitionTest(t) mainPool := execpool.MakePool(t) diff --git a/agreement/msgp_gen.go b/agreement/msgp_gen.go index b012f66da2..16d464d39a 100644 --- a/agreement/msgp_gen.go +++ b/agreement/msgp_gen.go @@ -7,7 +7,7 @@ import ( "github.com/algorand/msgp/msgp" - "github.com/algorand/go-algorand/config" + "github.com/algorand/go-algorand/config/bounds" "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/data/bookkeeping" @@ -680,8 +680,8 @@ func (z *Certificate) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) err = msgp.WrapError(err, "struct-from-array", "Votes") return } - if zb0007 > config.MaxVoteThreshold { - err = msgp.ErrOverflow(uint64(zb0007), uint64(config.MaxVoteThreshold)) + if zb0007 > bounds.MaxVoteThreshold { + err = msgp.ErrOverflow(uint64(zb0007), uint64(bounds.MaxVoteThreshold)) err = msgp.WrapError(err, "struct-from-array", "Votes") return } @@ -709,8 +709,8 @@ func (z *Certificate) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) err = msgp.WrapError(err, "struct-from-array", "EquivocationVotes") return } - if zb0009 > config.MaxVoteThreshold { - err = msgp.ErrOverflow(uint64(zb0009), uint64(config.MaxVoteThreshold)) + if zb0009 > bounds.MaxVoteThreshold { + err = msgp.ErrOverflow(uint64(zb0009), uint64(bounds.MaxVoteThreshold)) err = msgp.WrapError(err, "struct-from-array", "EquivocationVotes") return } @@ -792,8 +792,8 @@ func (z *Certificate) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) err = msgp.WrapError(err, "Votes") return } - if zb0013 > config.MaxVoteThreshold { - err = msgp.ErrOverflow(uint64(zb0013), uint64(config.MaxVoteThreshold)) + if zb0013 > bounds.MaxVoteThreshold { + err = msgp.ErrOverflow(uint64(zb0013), uint64(bounds.MaxVoteThreshold)) err = msgp.WrapError(err, "Votes") return } @@ -819,8 +819,8 @@ func (z *Certificate) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) err = msgp.WrapError(err, "EquivocationVotes") return } - if zb0015 > config.MaxVoteThreshold { - err = msgp.ErrOverflow(uint64(zb0015), uint64(config.MaxVoteThreshold)) + if zb0015 > bounds.MaxVoteThreshold { + err = msgp.ErrOverflow(uint64(zb0015), uint64(bounds.MaxVoteThreshold)) err = msgp.WrapError(err, "EquivocationVotes") return } @@ -881,10 +881,10 @@ func (z *Certificate) MsgIsZero() bool { func CertificateMaxSize() (s int) { s = 1 + 4 + basics.RoundMaxSize() + 4 + msgp.Uint64Size + 5 + msgp.Uint64Size + 5 + ProposalValueMaxSize() + 5 // Calculating size of slice: z.Votes - s += msgp.ArrayHeaderSize + ((config.MaxVoteThreshold) * (VoteAuthenticatorMaxSize())) + s += msgp.ArrayHeaderSize + ((bounds.MaxVoteThreshold) * (VoteAuthenticatorMaxSize())) s += 4 // Calculating size of slice: z.EquivocationVotes - s += msgp.ArrayHeaderSize + ((config.MaxVoteThreshold) * (EquivocationVoteAuthenticatorMaxSize())) + s += msgp.ArrayHeaderSize + ((bounds.MaxVoteThreshold) * (EquivocationVoteAuthenticatorMaxSize())) return } @@ -1682,8 +1682,8 @@ func (z *bundle) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) (o [] err = msgp.WrapError(err, "struct-from-array", "Votes") return } - if zb0005 > config.MaxVoteThreshold { - err = msgp.ErrOverflow(uint64(zb0005), uint64(config.MaxVoteThreshold)) + if zb0005 > bounds.MaxVoteThreshold { + err = msgp.ErrOverflow(uint64(zb0005), uint64(bounds.MaxVoteThreshold)) err = msgp.WrapError(err, "struct-from-array", "Votes") return } @@ -1711,8 +1711,8 @@ func (z *bundle) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) (o [] err = msgp.WrapError(err, "struct-from-array", "EquivocationVotes") return } - if zb0007 > config.MaxVoteThreshold { - err = msgp.ErrOverflow(uint64(zb0007), uint64(config.MaxVoteThreshold)) + if zb0007 > bounds.MaxVoteThreshold { + err = msgp.ErrOverflow(uint64(zb0007), uint64(bounds.MaxVoteThreshold)) err = msgp.WrapError(err, "struct-from-array", "EquivocationVotes") return } @@ -1768,8 +1768,8 @@ func (z *bundle) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) (o [] err = msgp.WrapError(err, "Votes") return } - if zb0009 > config.MaxVoteThreshold { - err = msgp.ErrOverflow(uint64(zb0009), uint64(config.MaxVoteThreshold)) + if zb0009 > bounds.MaxVoteThreshold { + err = msgp.ErrOverflow(uint64(zb0009), uint64(bounds.MaxVoteThreshold)) err = msgp.WrapError(err, "Votes") return } @@ -1795,8 +1795,8 @@ func (z *bundle) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) (o [] err = msgp.WrapError(err, "EquivocationVotes") return } - if zb0011 > config.MaxVoteThreshold { - err = msgp.ErrOverflow(uint64(zb0011), uint64(config.MaxVoteThreshold)) + if zb0011 > bounds.MaxVoteThreshold { + err = msgp.ErrOverflow(uint64(zb0011), uint64(bounds.MaxVoteThreshold)) err = msgp.WrapError(err, "EquivocationVotes") return } @@ -1857,10 +1857,10 @@ func (z *bundle) MsgIsZero() bool { func BundleMaxSize() (s int) { s = 1 + 2 + UnauthenticatedBundleMaxSize() + 5 // Calculating size of slice: z.Votes - s += msgp.ArrayHeaderSize + ((config.MaxVoteThreshold) * (VoteMaxSize())) + s += msgp.ArrayHeaderSize + ((bounds.MaxVoteThreshold) * (VoteMaxSize())) s += 4 // Calculating size of slice: z.EquivocationVotes - s += msgp.ArrayHeaderSize + ((config.MaxVoteThreshold) * (EquivocationVoteMaxSize())) + s += msgp.ArrayHeaderSize + ((bounds.MaxVoteThreshold) * (EquivocationVoteMaxSize())) return } @@ -4493,8 +4493,8 @@ func PlayerMaxSize() (s int) { func (z *proposal) MarshalMsg(b []byte) (o []byte) { o = msgp.Require(b, z.Msgsize()) // omitempty: check for empty values - zb0005Len := uint32(34) - var zb0005Mask uint64 /* 43 bits */ + zb0005Len := uint32(36) + var zb0005Mask uint64 /* 45 bits */ if (*z).unauthenticatedProposal.Block.BlockHeader.Bonus.MsgIsZero() { zb0005Len-- zb0005Mask |= 0x40 @@ -4535,7 +4535,7 @@ func (z *proposal) MarshalMsg(b []byte) (o []byte) { zb0005Len-- zb0005Mask |= 0x8000 } - if (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolApprovals == 0 { + if (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolApprovals.MsgIsZero() { zb0005Len-- zb0005Mask |= 0x10000 } @@ -4563,73 +4563,81 @@ func (z *proposal) MarshalMsg(b []byte) (o []byte) { zb0005Len-- zb0005Mask |= 0x400000 } - if (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.CurrentProtocol.MsgIsZero() { + if (*z).unauthenticatedProposal.Block.BlockHeader.Branch512.MsgIsZero() { zb0005Len-- zb0005Mask |= 0x800000 } - if (*z).unauthenticatedProposal.Block.BlockHeader.Proposer.MsgIsZero() { + if (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.CurrentProtocol.MsgIsZero() { zb0005Len-- zb0005Mask |= 0x1000000 } - if (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRate == 0 { + if (*z).unauthenticatedProposal.Block.BlockHeader.Proposer.MsgIsZero() { zb0005Len-- zb0005Mask |= 0x2000000 } + if (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRate == 0 { + zb0005Len-- + zb0005Mask |= 0x4000000 + } if (*z).unauthenticatedProposal.Block.BlockHeader.Round.MsgIsZero() { zb0005Len-- - zb0005Mask |= 0x8000000 + zb0005Mask |= 0x10000000 } if (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRecalculationRound.MsgIsZero() { zb0005Len-- - zb0005Mask |= 0x10000000 + zb0005Mask |= 0x20000000 } if (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsPool.MsgIsZero() { zb0005Len-- - zb0005Mask |= 0x20000000 + zb0005Mask |= 0x40000000 } if (*z).unauthenticatedProposal.SeedProof.MsgIsZero() { zb0005Len-- - zb0005Mask |= 0x40000000 + zb0005Mask |= 0x80000000 } if (*z).unauthenticatedProposal.Block.BlockHeader.Seed.MsgIsZero() { zb0005Len-- - zb0005Mask |= 0x80000000 + zb0005Mask |= 0x100000000 } if len((*z).unauthenticatedProposal.Block.BlockHeader.StateProofTracking) == 0 { zb0005Len-- - zb0005Mask |= 0x100000000 + zb0005Mask |= 0x200000000 } if (*z).unauthenticatedProposal.Block.BlockHeader.TxnCounter == 0 { zb0005Len-- - zb0005Mask |= 0x200000000 + zb0005Mask |= 0x400000000 } if (*z).unauthenticatedProposal.Block.BlockHeader.TimeStamp == 0 { zb0005Len-- - zb0005Mask |= 0x400000000 + zb0005Mask |= 0x800000000 } if (*z).unauthenticatedProposal.Block.BlockHeader.TxnCommitments.NativeSha512_256Commitment.MsgIsZero() { zb0005Len-- - zb0005Mask |= 0x800000000 + zb0005Mask |= 0x1000000000 } if (*z).unauthenticatedProposal.Block.BlockHeader.TxnCommitments.Sha256Commitment.MsgIsZero() { zb0005Len-- - zb0005Mask |= 0x1000000000 + zb0005Mask |= 0x2000000000 + } + if (*z).unauthenticatedProposal.Block.BlockHeader.TxnCommitments.Sha512Commitment.MsgIsZero() { + zb0005Len-- + zb0005Mask |= 0x4000000000 } if (*z).unauthenticatedProposal.Block.Payset.MsgIsZero() { zb0005Len-- - zb0005Mask |= 0x2000000000 + zb0005Mask |= 0x8000000000 } if (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradeDelay.MsgIsZero() { zb0005Len-- - zb0005Mask |= 0x4000000000 + zb0005Mask |= 0x10000000000 } if (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradePropose.MsgIsZero() { zb0005Len-- - zb0005Mask |= 0x8000000000 + zb0005Mask |= 0x20000000000 } if (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradeApprove == false { zb0005Len-- - zb0005Mask |= 0x10000000000 + zb0005Mask |= 0x40000000000 } // variable map header, size zb0005Len o = msgp.AppendMapHeader(o, zb0005Len) @@ -4687,7 +4695,7 @@ func (z *proposal) MarshalMsg(b []byte) (o []byte) { if (zb0005Mask & 0x10000) == 0 { // if not empty // string "nextyes" o = append(o, 0xa7, 0x6e, 0x65, 0x78, 0x74, 0x79, 0x65, 0x73) - o = msgp.AppendUint64(o, (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolApprovals) + o = (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolApprovals.MarshalMsg(o) } if (zb0005Mask & 0x20000) == 0 { // if not empty // string "oper" @@ -4734,46 +4742,51 @@ func (z *proposal) MarshalMsg(b []byte) (o []byte) { o = (*z).unauthenticatedProposal.Block.BlockHeader.Branch.MarshalMsg(o) } if (zb0005Mask & 0x800000) == 0 { // if not empty + // string "prev512" + o = append(o, 0xa7, 0x70, 0x72, 0x65, 0x76, 0x35, 0x31, 0x32) + o = (*z).unauthenticatedProposal.Block.BlockHeader.Branch512.MarshalMsg(o) + } + if (zb0005Mask & 0x1000000) == 0 { // if not empty // string "proto" o = append(o, 0xa5, 0x70, 0x72, 0x6f, 0x74, 0x6f) o = (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.CurrentProtocol.MarshalMsg(o) } - if (zb0005Mask & 0x1000000) == 0 { // if not empty + if (zb0005Mask & 0x2000000) == 0 { // if not empty // string "prp" o = append(o, 0xa3, 0x70, 0x72, 0x70) o = (*z).unauthenticatedProposal.Block.BlockHeader.Proposer.MarshalMsg(o) } - if (zb0005Mask & 0x2000000) == 0 { // if not empty + if (zb0005Mask & 0x4000000) == 0 { // if not empty // string "rate" o = append(o, 0xa4, 0x72, 0x61, 0x74, 0x65) o = msgp.AppendUint64(o, (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRate) } - if (zb0005Mask & 0x8000000) == 0 { // if not empty + if (zb0005Mask & 0x10000000) == 0 { // if not empty // string "rnd" o = append(o, 0xa3, 0x72, 0x6e, 0x64) o = (*z).unauthenticatedProposal.Block.BlockHeader.Round.MarshalMsg(o) } - if (zb0005Mask & 0x10000000) == 0 { // if not empty + if (zb0005Mask & 0x20000000) == 0 { // if not empty // string "rwcalr" o = append(o, 0xa6, 0x72, 0x77, 0x63, 0x61, 0x6c, 0x72) o = (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRecalculationRound.MarshalMsg(o) } - if (zb0005Mask & 0x20000000) == 0 { // if not empty + if (zb0005Mask & 0x40000000) == 0 { // if not empty // string "rwd" o = append(o, 0xa3, 0x72, 0x77, 0x64) o = (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsPool.MarshalMsg(o) } - if (zb0005Mask & 0x40000000) == 0 { // if not empty + if (zb0005Mask & 0x80000000) == 0 { // if not empty // string "sdpf" o = append(o, 0xa4, 0x73, 0x64, 0x70, 0x66) o = (*z).unauthenticatedProposal.SeedProof.MarshalMsg(o) } - if (zb0005Mask & 0x80000000) == 0 { // if not empty + if (zb0005Mask & 0x100000000) == 0 { // if not empty // string "seed" o = append(o, 0xa4, 0x73, 0x65, 0x65, 0x64) o = (*z).unauthenticatedProposal.Block.BlockHeader.Seed.MarshalMsg(o) } - if (zb0005Mask & 0x100000000) == 0 { // if not empty + if (zb0005Mask & 0x200000000) == 0 { // if not empty // string "spt" o = append(o, 0xa3, 0x73, 0x70, 0x74) if (*z).unauthenticatedProposal.Block.BlockHeader.StateProofTracking == nil { @@ -4793,42 +4806,47 @@ func (z *proposal) MarshalMsg(b []byte) (o []byte) { o = zb0002.MarshalMsg(o) } } - if (zb0005Mask & 0x200000000) == 0 { // if not empty + if (zb0005Mask & 0x400000000) == 0 { // if not empty // string "tc" o = append(o, 0xa2, 0x74, 0x63) o = msgp.AppendUint64(o, (*z).unauthenticatedProposal.Block.BlockHeader.TxnCounter) } - if (zb0005Mask & 0x400000000) == 0 { // if not empty + if (zb0005Mask & 0x800000000) == 0 { // if not empty // string "ts" o = append(o, 0xa2, 0x74, 0x73) o = msgp.AppendInt64(o, (*z).unauthenticatedProposal.Block.BlockHeader.TimeStamp) } - if (zb0005Mask & 0x800000000) == 0 { // if not empty + if (zb0005Mask & 0x1000000000) == 0 { // if not empty // string "txn" o = append(o, 0xa3, 0x74, 0x78, 0x6e) o = (*z).unauthenticatedProposal.Block.BlockHeader.TxnCommitments.NativeSha512_256Commitment.MarshalMsg(o) } - if (zb0005Mask & 0x1000000000) == 0 { // if not empty + if (zb0005Mask & 0x2000000000) == 0 { // if not empty // string "txn256" o = append(o, 0xa6, 0x74, 0x78, 0x6e, 0x32, 0x35, 0x36) o = (*z).unauthenticatedProposal.Block.BlockHeader.TxnCommitments.Sha256Commitment.MarshalMsg(o) } - if (zb0005Mask & 0x2000000000) == 0 { // if not empty + if (zb0005Mask & 0x4000000000) == 0 { // if not empty + // string "txn512" + o = append(o, 0xa6, 0x74, 0x78, 0x6e, 0x35, 0x31, 0x32) + o = (*z).unauthenticatedProposal.Block.BlockHeader.TxnCommitments.Sha512Commitment.MarshalMsg(o) + } + if (zb0005Mask & 0x8000000000) == 0 { // if not empty // string "txns" o = append(o, 0xa4, 0x74, 0x78, 0x6e, 0x73) o = (*z).unauthenticatedProposal.Block.Payset.MarshalMsg(o) } - if (zb0005Mask & 0x4000000000) == 0 { // if not empty + if (zb0005Mask & 0x10000000000) == 0 { // if not empty // string "upgradedelay" o = append(o, 0xac, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x64, 0x65, 0x6c, 0x61, 0x79) o = (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradeDelay.MarshalMsg(o) } - if (zb0005Mask & 0x8000000000) == 0 { // if not empty + if (zb0005Mask & 0x20000000000) == 0 { // if not empty // string "upgradeprop" o = append(o, 0xab, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x70, 0x72, 0x6f, 0x70) o = (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradePropose.MarshalMsg(o) } - if (zb0005Mask & 0x10000000000) == 0 { // if not empty + if (zb0005Mask & 0x40000000000) == 0 { // if not empty // string "upgradeyes" o = append(o, 0xaa, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x79, 0x65, 0x73) o = msgp.AppendBool(o, (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradeApprove) @@ -4876,6 +4894,14 @@ func (z *proposal) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) (o return } } + if zb0005 > 0 { + zb0005-- + bts, err = (*z).unauthenticatedProposal.Block.BlockHeader.Branch512.UnmarshalMsgWithState(bts, st) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "Branch512") + return + } + } if zb0005 > 0 { zb0005-- bts, err = (*z).unauthenticatedProposal.Block.BlockHeader.Seed.UnmarshalMsgWithState(bts, st) @@ -4900,6 +4926,14 @@ func (z *proposal) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) (o return } } + if zb0005 > 0 { + zb0005-- + bts, err = (*z).unauthenticatedProposal.Block.BlockHeader.TxnCommitments.Sha512Commitment.UnmarshalMsgWithState(bts, st) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "Sha512Commitment") + return + } + } if zb0005 > 0 { zb0005-- (*z).unauthenticatedProposal.Block.BlockHeader.TimeStamp, bts, err = msgp.ReadInt64Bytes(bts) @@ -4916,8 +4950,8 @@ func (z *proposal) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) (o err = msgp.WrapError(err, "struct-from-array", "GenesisID") return } - if zb0007 > config.MaxGenesisIDLen { - err = msgp.ErrOverflow(uint64(zb0007), uint64(config.MaxGenesisIDLen)) + if zb0007 > bounds.MaxGenesisIDLen { + err = msgp.ErrOverflow(uint64(zb0007), uint64(bounds.MaxGenesisIDLen)) return } (*z).unauthenticatedProposal.Block.BlockHeader.GenesisID, bts, err = msgp.ReadStringBytes(bts) @@ -5032,7 +5066,7 @@ func (z *proposal) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) (o } if zb0005 > 0 { zb0005-- - (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolApprovals, bts, err = msgp.ReadUint64Bytes(bts) + bts, err = (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolApprovals.UnmarshalMsgWithState(bts, st) if err != nil { err = msgp.WrapError(err, "struct-from-array", "NextProtocolApprovals") return @@ -5131,8 +5165,8 @@ func (z *proposal) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) (o err = msgp.WrapError(err, "struct-from-array", "ExpiredParticipationAccounts") return } - if zb0010 > config.MaxProposedExpiredOnlineAccounts { - err = msgp.ErrOverflow(uint64(zb0010), uint64(config.MaxProposedExpiredOnlineAccounts)) + if zb0010 > bounds.MaxProposedExpiredOnlineAccounts { + err = msgp.ErrOverflow(uint64(zb0010), uint64(bounds.MaxProposedExpiredOnlineAccounts)) err = msgp.WrapError(err, "struct-from-array", "ExpiredParticipationAccounts") return } @@ -5160,8 +5194,8 @@ func (z *proposal) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) (o err = msgp.WrapError(err, "struct-from-array", "AbsentParticipationAccounts") return } - if zb0012 > config.MaxMarkAbsent { - err = msgp.ErrOverflow(uint64(zb0012), uint64(config.MaxMarkAbsent)) + if zb0012 > bounds.MaxMarkAbsent { + err = msgp.ErrOverflow(uint64(zb0012), uint64(bounds.MaxMarkAbsent)) err = msgp.WrapError(err, "struct-from-array", "AbsentParticipationAccounts") return } @@ -5251,6 +5285,12 @@ func (z *proposal) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) (o err = msgp.WrapError(err, "Branch") return } + case "prev512": + bts, err = (*z).unauthenticatedProposal.Block.BlockHeader.Branch512.UnmarshalMsgWithState(bts, st) + if err != nil { + err = msgp.WrapError(err, "Branch512") + return + } case "seed": bts, err = (*z).unauthenticatedProposal.Block.BlockHeader.Seed.UnmarshalMsgWithState(bts, st) if err != nil { @@ -5269,6 +5309,12 @@ func (z *proposal) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) (o err = msgp.WrapError(err, "Sha256Commitment") return } + case "txn512": + bts, err = (*z).unauthenticatedProposal.Block.BlockHeader.TxnCommitments.Sha512Commitment.UnmarshalMsgWithState(bts, st) + if err != nil { + err = msgp.WrapError(err, "Sha512Commitment") + return + } case "ts": (*z).unauthenticatedProposal.Block.BlockHeader.TimeStamp, bts, err = msgp.ReadInt64Bytes(bts) if err != nil { @@ -5282,8 +5328,8 @@ func (z *proposal) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) (o err = msgp.WrapError(err, "GenesisID") return } - if zb0015 > config.MaxGenesisIDLen { - err = msgp.ErrOverflow(uint64(zb0015), uint64(config.MaxGenesisIDLen)) + if zb0015 > bounds.MaxGenesisIDLen { + err = msgp.ErrOverflow(uint64(zb0015), uint64(bounds.MaxGenesisIDLen)) return } (*z).unauthenticatedProposal.Block.BlockHeader.GenesisID, bts, err = msgp.ReadStringBytes(bts) @@ -5370,7 +5416,7 @@ func (z *proposal) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) (o return } case "nextyes": - (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolApprovals, bts, err = msgp.ReadUint64Bytes(bts) + bts, err = (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolApprovals.UnmarshalMsgWithState(bts, st) if err != nil { err = msgp.WrapError(err, "NextProtocolApprovals") return @@ -5453,8 +5499,8 @@ func (z *proposal) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) (o err = msgp.WrapError(err, "ExpiredParticipationAccounts") return } - if zb0018 > config.MaxProposedExpiredOnlineAccounts { - err = msgp.ErrOverflow(uint64(zb0018), uint64(config.MaxProposedExpiredOnlineAccounts)) + if zb0018 > bounds.MaxProposedExpiredOnlineAccounts { + err = msgp.ErrOverflow(uint64(zb0018), uint64(bounds.MaxProposedExpiredOnlineAccounts)) err = msgp.WrapError(err, "ExpiredParticipationAccounts") return } @@ -5480,8 +5526,8 @@ func (z *proposal) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) (o err = msgp.WrapError(err, "AbsentParticipationAccounts") return } - if zb0020 > config.MaxMarkAbsent { - err = msgp.ErrOverflow(uint64(zb0020), uint64(config.MaxMarkAbsent)) + if zb0020 > bounds.MaxMarkAbsent { + err = msgp.ErrOverflow(uint64(zb0020), uint64(bounds.MaxMarkAbsent)) err = msgp.WrapError(err, "AbsentParticipationAccounts") return } @@ -5550,7 +5596,7 @@ func (_ *proposal) CanUnmarshalMsg(z interface{}) bool { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *proposal) Msgsize() (s int) { - s = 3 + 4 + (*z).unauthenticatedProposal.Block.BlockHeader.Round.Msgsize() + 5 + (*z).unauthenticatedProposal.Block.BlockHeader.Branch.Msgsize() + 5 + (*z).unauthenticatedProposal.Block.BlockHeader.Seed.Msgsize() + 4 + (*z).unauthenticatedProposal.Block.BlockHeader.TxnCommitments.NativeSha512_256Commitment.Msgsize() + 7 + (*z).unauthenticatedProposal.Block.BlockHeader.TxnCommitments.Sha256Commitment.Msgsize() + 3 + msgp.Int64Size + 4 + msgp.StringPrefixSize + len((*z).unauthenticatedProposal.Block.BlockHeader.GenesisID) + 3 + (*z).unauthenticatedProposal.Block.BlockHeader.GenesisHash.Msgsize() + 4 + (*z).unauthenticatedProposal.Block.BlockHeader.Proposer.Msgsize() + 3 + (*z).unauthenticatedProposal.Block.BlockHeader.FeesCollected.Msgsize() + 3 + (*z).unauthenticatedProposal.Block.BlockHeader.Bonus.Msgsize() + 3 + (*z).unauthenticatedProposal.Block.BlockHeader.ProposerPayout.Msgsize() + 5 + (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.FeeSink.Msgsize() + 4 + (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsPool.Msgsize() + 5 + msgp.Uint64Size + 5 + msgp.Uint64Size + 5 + msgp.Uint64Size + 7 + (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRecalculationRound.Msgsize() + 6 + (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.CurrentProtocol.Msgsize() + 10 + (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocol.Msgsize() + 8 + msgp.Uint64Size + 11 + (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolVoteBefore.Msgsize() + 11 + (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolSwitchOn.Msgsize() + 12 + (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradePropose.Msgsize() + 13 + (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradeDelay.Msgsize() + 11 + msgp.BoolSize + 3 + msgp.Uint64Size + 4 + msgp.MapHeaderSize + s = 3 + 4 + (*z).unauthenticatedProposal.Block.BlockHeader.Round.Msgsize() + 5 + (*z).unauthenticatedProposal.Block.BlockHeader.Branch.Msgsize() + 8 + (*z).unauthenticatedProposal.Block.BlockHeader.Branch512.Msgsize() + 5 + (*z).unauthenticatedProposal.Block.BlockHeader.Seed.Msgsize() + 4 + (*z).unauthenticatedProposal.Block.BlockHeader.TxnCommitments.NativeSha512_256Commitment.Msgsize() + 7 + (*z).unauthenticatedProposal.Block.BlockHeader.TxnCommitments.Sha256Commitment.Msgsize() + 7 + (*z).unauthenticatedProposal.Block.BlockHeader.TxnCommitments.Sha512Commitment.Msgsize() + 3 + msgp.Int64Size + 4 + msgp.StringPrefixSize + len((*z).unauthenticatedProposal.Block.BlockHeader.GenesisID) + 3 + (*z).unauthenticatedProposal.Block.BlockHeader.GenesisHash.Msgsize() + 4 + (*z).unauthenticatedProposal.Block.BlockHeader.Proposer.Msgsize() + 3 + (*z).unauthenticatedProposal.Block.BlockHeader.FeesCollected.Msgsize() + 3 + (*z).unauthenticatedProposal.Block.BlockHeader.Bonus.Msgsize() + 3 + (*z).unauthenticatedProposal.Block.BlockHeader.ProposerPayout.Msgsize() + 5 + (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.FeeSink.Msgsize() + 4 + (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsPool.Msgsize() + 5 + msgp.Uint64Size + 5 + msgp.Uint64Size + 5 + msgp.Uint64Size + 7 + (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRecalculationRound.Msgsize() + 6 + (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.CurrentProtocol.Msgsize() + 10 + (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocol.Msgsize() + 8 + (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolApprovals.Msgsize() + 11 + (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolVoteBefore.Msgsize() + 11 + (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolSwitchOn.Msgsize() + 12 + (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradePropose.Msgsize() + 13 + (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradeDelay.Msgsize() + 11 + msgp.BoolSize + 3 + msgp.Uint64Size + 4 + msgp.MapHeaderSize if (*z).unauthenticatedProposal.Block.BlockHeader.StateProofTracking != nil { for zb0001, zb0002 := range (*z).unauthenticatedProposal.Block.BlockHeader.StateProofTracking { _ = zb0001 @@ -5572,12 +5618,12 @@ func (z *proposal) Msgsize() (s int) { // MsgIsZero returns whether this is a zero value func (z *proposal) MsgIsZero() bool { - return ((*z).unauthenticatedProposal.Block.BlockHeader.Round.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.Branch.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.Seed.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.TxnCommitments.NativeSha512_256Commitment.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.TxnCommitments.Sha256Commitment.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.TimeStamp == 0) && ((*z).unauthenticatedProposal.Block.BlockHeader.GenesisID == "") && ((*z).unauthenticatedProposal.Block.BlockHeader.GenesisHash.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.Proposer.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.FeesCollected.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.Bonus.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.ProposerPayout.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.FeeSink.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsPool.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsLevel == 0) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRate == 0) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsResidue == 0) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRecalculationRound.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.CurrentProtocol.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocol.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolApprovals == 0) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolVoteBefore.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolSwitchOn.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradePropose.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradeDelay.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradeApprove == false) && ((*z).unauthenticatedProposal.Block.BlockHeader.TxnCounter == 0) && (len((*z).unauthenticatedProposal.Block.BlockHeader.StateProofTracking) == 0) && (len((*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts) == 0) && (len((*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.AbsentParticipationAccounts) == 0) && ((*z).unauthenticatedProposal.Block.Payset.MsgIsZero()) && ((*z).unauthenticatedProposal.SeedProof.MsgIsZero()) && ((*z).unauthenticatedProposal.OriginalPeriod == 0) && ((*z).unauthenticatedProposal.OriginalProposer.MsgIsZero()) + return ((*z).unauthenticatedProposal.Block.BlockHeader.Round.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.Branch.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.Branch512.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.Seed.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.TxnCommitments.NativeSha512_256Commitment.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.TxnCommitments.Sha256Commitment.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.TxnCommitments.Sha512Commitment.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.TimeStamp == 0) && ((*z).unauthenticatedProposal.Block.BlockHeader.GenesisID == "") && ((*z).unauthenticatedProposal.Block.BlockHeader.GenesisHash.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.Proposer.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.FeesCollected.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.Bonus.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.ProposerPayout.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.FeeSink.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsPool.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsLevel == 0) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRate == 0) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsResidue == 0) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRecalculationRound.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.CurrentProtocol.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocol.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolApprovals.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolVoteBefore.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolSwitchOn.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradePropose.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradeDelay.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradeApprove == false) && ((*z).unauthenticatedProposal.Block.BlockHeader.TxnCounter == 0) && (len((*z).unauthenticatedProposal.Block.BlockHeader.StateProofTracking) == 0) && (len((*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts) == 0) && (len((*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.AbsentParticipationAccounts) == 0) && ((*z).unauthenticatedProposal.Block.Payset.MsgIsZero()) && ((*z).unauthenticatedProposal.SeedProof.MsgIsZero()) && ((*z).unauthenticatedProposal.OriginalPeriod == 0) && ((*z).unauthenticatedProposal.OriginalProposer.MsgIsZero()) } // MaxSize returns a maximum valid message size for this message type func ProposalMaxSize() (s int) { - s = 3 + 4 + basics.RoundMaxSize() + 5 + bookkeeping.BlockHashMaxSize() + 5 + committee.SeedMaxSize() + 4 + crypto.DigestMaxSize() + 7 + crypto.DigestMaxSize() + 3 + msgp.Int64Size + 4 + msgp.StringPrefixSize + config.MaxGenesisIDLen + 3 + crypto.DigestMaxSize() + 4 + basics.AddressMaxSize() + 3 + basics.MicroAlgosMaxSize() + 3 + basics.MicroAlgosMaxSize() + 3 + basics.MicroAlgosMaxSize() + 5 + basics.AddressMaxSize() + 4 + basics.AddressMaxSize() + 5 + msgp.Uint64Size + 5 + msgp.Uint64Size + 5 + msgp.Uint64Size + 7 + basics.RoundMaxSize() + 6 + protocol.ConsensusVersionMaxSize() + 10 + protocol.ConsensusVersionMaxSize() + 8 + msgp.Uint64Size + 11 + basics.RoundMaxSize() + 11 + basics.RoundMaxSize() + 12 + protocol.ConsensusVersionMaxSize() + 13 + basics.RoundMaxSize() + 11 + msgp.BoolSize + 3 + msgp.Uint64Size + 4 + s = 3 + 4 + basics.RoundMaxSize() + 5 + bookkeeping.BlockHashMaxSize() + 8 + crypto.Sha512DigestMaxSize() + 5 + committee.SeedMaxSize() + 4 + crypto.DigestMaxSize() + 7 + crypto.DigestMaxSize() + 7 + crypto.Sha512DigestMaxSize() + 3 + msgp.Int64Size + 4 + msgp.StringPrefixSize + bounds.MaxGenesisIDLen + 3 + crypto.DigestMaxSize() + 4 + basics.AddressMaxSize() + 3 + basics.MicroAlgosMaxSize() + 3 + basics.MicroAlgosMaxSize() + 3 + basics.MicroAlgosMaxSize() + 5 + basics.AddressMaxSize() + 4 + basics.AddressMaxSize() + 5 + msgp.Uint64Size + 5 + msgp.Uint64Size + 5 + msgp.Uint64Size + 7 + basics.RoundMaxSize() + 6 + protocol.ConsensusVersionMaxSize() + 10 + protocol.ConsensusVersionMaxSize() + 8 + basics.RoundMaxSize() + 11 + basics.RoundMaxSize() + 11 + basics.RoundMaxSize() + 12 + protocol.ConsensusVersionMaxSize() + 13 + basics.RoundMaxSize() + 11 + msgp.BoolSize + 3 + msgp.Uint64Size + 4 s += msgp.MapHeaderSize // Adding size of map keys for z.unauthenticatedProposal.Block.BlockHeader.StateProofTracking s += protocol.NumStateProofTypes * (protocol.StateProofTypeMaxSize()) @@ -5585,13 +5631,13 @@ func ProposalMaxSize() (s int) { s += protocol.NumStateProofTypes * (bookkeeping.StateProofTrackingDataMaxSize()) s += 11 // Calculating size of slice: z.unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts - s += msgp.ArrayHeaderSize + ((config.MaxProposedExpiredOnlineAccounts) * (basics.AddressMaxSize())) + s += msgp.ArrayHeaderSize + ((bounds.MaxProposedExpiredOnlineAccounts) * (basics.AddressMaxSize())) s += 11 // Calculating size of slice: z.unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.AbsentParticipationAccounts - s += msgp.ArrayHeaderSize + ((config.MaxMarkAbsent) * (basics.AddressMaxSize())) + s += msgp.ArrayHeaderSize + ((bounds.MaxMarkAbsent) * (basics.AddressMaxSize())) s += 5 // Using maxtotalbytes for: z.unauthenticatedProposal.Block.Payset - s += config.MaxTxnBytesPerBlock + s += bounds.MaxTxnBytesPerBlock s += 5 + crypto.VrfProofMaxSize() + 5 + msgp.Uint64Size + 6 + basics.AddressMaxSize() return } @@ -9027,8 +9073,8 @@ func ThresholdEventMaxSize() (s int) { func (z *transmittedPayload) MarshalMsg(b []byte) (o []byte) { o = msgp.Require(b, z.Msgsize()) // omitempty: check for empty values - zb0005Len := uint32(35) - var zb0005Mask uint64 /* 43 bits */ + zb0005Len := uint32(37) + var zb0005Mask uint64 /* 45 bits */ if (*z).unauthenticatedProposal.Block.BlockHeader.Bonus.MsgIsZero() { zb0005Len-- zb0005Mask |= 0x80 @@ -9069,7 +9115,7 @@ func (z *transmittedPayload) MarshalMsg(b []byte) (o []byte) { zb0005Len-- zb0005Mask |= 0x10000 } - if (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolApprovals == 0 { + if (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolApprovals.MsgIsZero() { zb0005Len-- zb0005Mask |= 0x20000 } @@ -9097,77 +9143,85 @@ func (z *transmittedPayload) MarshalMsg(b []byte) (o []byte) { zb0005Len-- zb0005Mask |= 0x800000 } - if (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.CurrentProtocol.MsgIsZero() { + if (*z).unauthenticatedProposal.Block.BlockHeader.Branch512.MsgIsZero() { zb0005Len-- zb0005Mask |= 0x1000000 } - if (*z).unauthenticatedProposal.Block.BlockHeader.Proposer.MsgIsZero() { + if (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.CurrentProtocol.MsgIsZero() { zb0005Len-- zb0005Mask |= 0x2000000 } - if (*z).PriorVote.MsgIsZero() { + if (*z).unauthenticatedProposal.Block.BlockHeader.Proposer.MsgIsZero() { zb0005Len-- zb0005Mask |= 0x4000000 } - if (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRate == 0 { + if (*z).PriorVote.MsgIsZero() { zb0005Len-- zb0005Mask |= 0x8000000 } + if (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRate == 0 { + zb0005Len-- + zb0005Mask |= 0x10000000 + } if (*z).unauthenticatedProposal.Block.BlockHeader.Round.MsgIsZero() { zb0005Len-- - zb0005Mask |= 0x20000000 + zb0005Mask |= 0x40000000 } if (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRecalculationRound.MsgIsZero() { zb0005Len-- - zb0005Mask |= 0x40000000 + zb0005Mask |= 0x80000000 } if (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsPool.MsgIsZero() { zb0005Len-- - zb0005Mask |= 0x80000000 + zb0005Mask |= 0x100000000 } if (*z).unauthenticatedProposal.SeedProof.MsgIsZero() { zb0005Len-- - zb0005Mask |= 0x100000000 + zb0005Mask |= 0x200000000 } if (*z).unauthenticatedProposal.Block.BlockHeader.Seed.MsgIsZero() { zb0005Len-- - zb0005Mask |= 0x200000000 + zb0005Mask |= 0x400000000 } if len((*z).unauthenticatedProposal.Block.BlockHeader.StateProofTracking) == 0 { zb0005Len-- - zb0005Mask |= 0x400000000 + zb0005Mask |= 0x800000000 } if (*z).unauthenticatedProposal.Block.BlockHeader.TxnCounter == 0 { zb0005Len-- - zb0005Mask |= 0x800000000 + zb0005Mask |= 0x1000000000 } if (*z).unauthenticatedProposal.Block.BlockHeader.TimeStamp == 0 { zb0005Len-- - zb0005Mask |= 0x1000000000 + zb0005Mask |= 0x2000000000 } if (*z).unauthenticatedProposal.Block.BlockHeader.TxnCommitments.NativeSha512_256Commitment.MsgIsZero() { zb0005Len-- - zb0005Mask |= 0x2000000000 + zb0005Mask |= 0x4000000000 } if (*z).unauthenticatedProposal.Block.BlockHeader.TxnCommitments.Sha256Commitment.MsgIsZero() { zb0005Len-- - zb0005Mask |= 0x4000000000 + zb0005Mask |= 0x8000000000 + } + if (*z).unauthenticatedProposal.Block.BlockHeader.TxnCommitments.Sha512Commitment.MsgIsZero() { + zb0005Len-- + zb0005Mask |= 0x10000000000 } if (*z).unauthenticatedProposal.Block.Payset.MsgIsZero() { zb0005Len-- - zb0005Mask |= 0x8000000000 + zb0005Mask |= 0x20000000000 } if (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradeDelay.MsgIsZero() { zb0005Len-- - zb0005Mask |= 0x10000000000 + zb0005Mask |= 0x40000000000 } if (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradePropose.MsgIsZero() { zb0005Len-- - zb0005Mask |= 0x20000000000 + zb0005Mask |= 0x80000000000 } if (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradeApprove == false { zb0005Len-- - zb0005Mask |= 0x40000000000 + zb0005Mask |= 0x100000000000 } // variable map header, size zb0005Len o = msgp.AppendMapHeader(o, zb0005Len) @@ -9225,7 +9279,7 @@ func (z *transmittedPayload) MarshalMsg(b []byte) (o []byte) { if (zb0005Mask & 0x20000) == 0 { // if not empty // string "nextyes" o = append(o, 0xa7, 0x6e, 0x65, 0x78, 0x74, 0x79, 0x65, 0x73) - o = msgp.AppendUint64(o, (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolApprovals) + o = (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolApprovals.MarshalMsg(o) } if (zb0005Mask & 0x40000) == 0 { // if not empty // string "oper" @@ -9272,51 +9326,56 @@ func (z *transmittedPayload) MarshalMsg(b []byte) (o []byte) { o = (*z).unauthenticatedProposal.Block.BlockHeader.Branch.MarshalMsg(o) } if (zb0005Mask & 0x1000000) == 0 { // if not empty + // string "prev512" + o = append(o, 0xa7, 0x70, 0x72, 0x65, 0x76, 0x35, 0x31, 0x32) + o = (*z).unauthenticatedProposal.Block.BlockHeader.Branch512.MarshalMsg(o) + } + if (zb0005Mask & 0x2000000) == 0 { // if not empty // string "proto" o = append(o, 0xa5, 0x70, 0x72, 0x6f, 0x74, 0x6f) o = (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.CurrentProtocol.MarshalMsg(o) } - if (zb0005Mask & 0x2000000) == 0 { // if not empty + if (zb0005Mask & 0x4000000) == 0 { // if not empty // string "prp" o = append(o, 0xa3, 0x70, 0x72, 0x70) o = (*z).unauthenticatedProposal.Block.BlockHeader.Proposer.MarshalMsg(o) } - if (zb0005Mask & 0x4000000) == 0 { // if not empty + if (zb0005Mask & 0x8000000) == 0 { // if not empty // string "pv" o = append(o, 0xa2, 0x70, 0x76) o = (*z).PriorVote.MarshalMsg(o) } - if (zb0005Mask & 0x8000000) == 0 { // if not empty + if (zb0005Mask & 0x10000000) == 0 { // if not empty // string "rate" o = append(o, 0xa4, 0x72, 0x61, 0x74, 0x65) o = msgp.AppendUint64(o, (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRate) } - if (zb0005Mask & 0x20000000) == 0 { // if not empty + if (zb0005Mask & 0x40000000) == 0 { // if not empty // string "rnd" o = append(o, 0xa3, 0x72, 0x6e, 0x64) o = (*z).unauthenticatedProposal.Block.BlockHeader.Round.MarshalMsg(o) } - if (zb0005Mask & 0x40000000) == 0 { // if not empty + if (zb0005Mask & 0x80000000) == 0 { // if not empty // string "rwcalr" o = append(o, 0xa6, 0x72, 0x77, 0x63, 0x61, 0x6c, 0x72) o = (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRecalculationRound.MarshalMsg(o) } - if (zb0005Mask & 0x80000000) == 0 { // if not empty + if (zb0005Mask & 0x100000000) == 0 { // if not empty // string "rwd" o = append(o, 0xa3, 0x72, 0x77, 0x64) o = (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsPool.MarshalMsg(o) } - if (zb0005Mask & 0x100000000) == 0 { // if not empty + if (zb0005Mask & 0x200000000) == 0 { // if not empty // string "sdpf" o = append(o, 0xa4, 0x73, 0x64, 0x70, 0x66) o = (*z).unauthenticatedProposal.SeedProof.MarshalMsg(o) } - if (zb0005Mask & 0x200000000) == 0 { // if not empty + if (zb0005Mask & 0x400000000) == 0 { // if not empty // string "seed" o = append(o, 0xa4, 0x73, 0x65, 0x65, 0x64) o = (*z).unauthenticatedProposal.Block.BlockHeader.Seed.MarshalMsg(o) } - if (zb0005Mask & 0x400000000) == 0 { // if not empty + if (zb0005Mask & 0x800000000) == 0 { // if not empty // string "spt" o = append(o, 0xa3, 0x73, 0x70, 0x74) if (*z).unauthenticatedProposal.Block.BlockHeader.StateProofTracking == nil { @@ -9336,42 +9395,47 @@ func (z *transmittedPayload) MarshalMsg(b []byte) (o []byte) { o = zb0002.MarshalMsg(o) } } - if (zb0005Mask & 0x800000000) == 0 { // if not empty + if (zb0005Mask & 0x1000000000) == 0 { // if not empty // string "tc" o = append(o, 0xa2, 0x74, 0x63) o = msgp.AppendUint64(o, (*z).unauthenticatedProposal.Block.BlockHeader.TxnCounter) } - if (zb0005Mask & 0x1000000000) == 0 { // if not empty + if (zb0005Mask & 0x2000000000) == 0 { // if not empty // string "ts" o = append(o, 0xa2, 0x74, 0x73) o = msgp.AppendInt64(o, (*z).unauthenticatedProposal.Block.BlockHeader.TimeStamp) } - if (zb0005Mask & 0x2000000000) == 0 { // if not empty + if (zb0005Mask & 0x4000000000) == 0 { // if not empty // string "txn" o = append(o, 0xa3, 0x74, 0x78, 0x6e) o = (*z).unauthenticatedProposal.Block.BlockHeader.TxnCommitments.NativeSha512_256Commitment.MarshalMsg(o) } - if (zb0005Mask & 0x4000000000) == 0 { // if not empty + if (zb0005Mask & 0x8000000000) == 0 { // if not empty // string "txn256" o = append(o, 0xa6, 0x74, 0x78, 0x6e, 0x32, 0x35, 0x36) o = (*z).unauthenticatedProposal.Block.BlockHeader.TxnCommitments.Sha256Commitment.MarshalMsg(o) } - if (zb0005Mask & 0x8000000000) == 0 { // if not empty + if (zb0005Mask & 0x10000000000) == 0 { // if not empty + // string "txn512" + o = append(o, 0xa6, 0x74, 0x78, 0x6e, 0x35, 0x31, 0x32) + o = (*z).unauthenticatedProposal.Block.BlockHeader.TxnCommitments.Sha512Commitment.MarshalMsg(o) + } + if (zb0005Mask & 0x20000000000) == 0 { // if not empty // string "txns" o = append(o, 0xa4, 0x74, 0x78, 0x6e, 0x73) o = (*z).unauthenticatedProposal.Block.Payset.MarshalMsg(o) } - if (zb0005Mask & 0x10000000000) == 0 { // if not empty + if (zb0005Mask & 0x40000000000) == 0 { // if not empty // string "upgradedelay" o = append(o, 0xac, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x64, 0x65, 0x6c, 0x61, 0x79) o = (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradeDelay.MarshalMsg(o) } - if (zb0005Mask & 0x20000000000) == 0 { // if not empty + if (zb0005Mask & 0x80000000000) == 0 { // if not empty // string "upgradeprop" o = append(o, 0xab, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x70, 0x72, 0x6f, 0x70) o = (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradePropose.MarshalMsg(o) } - if (zb0005Mask & 0x40000000000) == 0 { // if not empty + if (zb0005Mask & 0x100000000000) == 0 { // if not empty // string "upgradeyes" o = append(o, 0xaa, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x79, 0x65, 0x73) o = msgp.AppendBool(o, (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradeApprove) @@ -9419,6 +9483,14 @@ func (z *transmittedPayload) UnmarshalMsgWithState(bts []byte, st msgp.Unmarshal return } } + if zb0005 > 0 { + zb0005-- + bts, err = (*z).unauthenticatedProposal.Block.BlockHeader.Branch512.UnmarshalMsgWithState(bts, st) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "Branch512") + return + } + } if zb0005 > 0 { zb0005-- bts, err = (*z).unauthenticatedProposal.Block.BlockHeader.Seed.UnmarshalMsgWithState(bts, st) @@ -9443,6 +9515,14 @@ func (z *transmittedPayload) UnmarshalMsgWithState(bts []byte, st msgp.Unmarshal return } } + if zb0005 > 0 { + zb0005-- + bts, err = (*z).unauthenticatedProposal.Block.BlockHeader.TxnCommitments.Sha512Commitment.UnmarshalMsgWithState(bts, st) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "Sha512Commitment") + return + } + } if zb0005 > 0 { zb0005-- (*z).unauthenticatedProposal.Block.BlockHeader.TimeStamp, bts, err = msgp.ReadInt64Bytes(bts) @@ -9459,8 +9539,8 @@ func (z *transmittedPayload) UnmarshalMsgWithState(bts []byte, st msgp.Unmarshal err = msgp.WrapError(err, "struct-from-array", "GenesisID") return } - if zb0007 > config.MaxGenesisIDLen { - err = msgp.ErrOverflow(uint64(zb0007), uint64(config.MaxGenesisIDLen)) + if zb0007 > bounds.MaxGenesisIDLen { + err = msgp.ErrOverflow(uint64(zb0007), uint64(bounds.MaxGenesisIDLen)) return } (*z).unauthenticatedProposal.Block.BlockHeader.GenesisID, bts, err = msgp.ReadStringBytes(bts) @@ -9575,7 +9655,7 @@ func (z *transmittedPayload) UnmarshalMsgWithState(bts []byte, st msgp.Unmarshal } if zb0005 > 0 { zb0005-- - (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolApprovals, bts, err = msgp.ReadUint64Bytes(bts) + bts, err = (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolApprovals.UnmarshalMsgWithState(bts, st) if err != nil { err = msgp.WrapError(err, "struct-from-array", "NextProtocolApprovals") return @@ -9674,8 +9754,8 @@ func (z *transmittedPayload) UnmarshalMsgWithState(bts []byte, st msgp.Unmarshal err = msgp.WrapError(err, "struct-from-array", "ExpiredParticipationAccounts") return } - if zb0010 > config.MaxProposedExpiredOnlineAccounts { - err = msgp.ErrOverflow(uint64(zb0010), uint64(config.MaxProposedExpiredOnlineAccounts)) + if zb0010 > bounds.MaxProposedExpiredOnlineAccounts { + err = msgp.ErrOverflow(uint64(zb0010), uint64(bounds.MaxProposedExpiredOnlineAccounts)) err = msgp.WrapError(err, "struct-from-array", "ExpiredParticipationAccounts") return } @@ -9703,8 +9783,8 @@ func (z *transmittedPayload) UnmarshalMsgWithState(bts []byte, st msgp.Unmarshal err = msgp.WrapError(err, "struct-from-array", "AbsentParticipationAccounts") return } - if zb0012 > config.MaxMarkAbsent { - err = msgp.ErrOverflow(uint64(zb0012), uint64(config.MaxMarkAbsent)) + if zb0012 > bounds.MaxMarkAbsent { + err = msgp.ErrOverflow(uint64(zb0012), uint64(bounds.MaxMarkAbsent)) err = msgp.WrapError(err, "struct-from-array", "AbsentParticipationAccounts") return } @@ -9802,6 +9882,12 @@ func (z *transmittedPayload) UnmarshalMsgWithState(bts []byte, st msgp.Unmarshal err = msgp.WrapError(err, "Branch") return } + case "prev512": + bts, err = (*z).unauthenticatedProposal.Block.BlockHeader.Branch512.UnmarshalMsgWithState(bts, st) + if err != nil { + err = msgp.WrapError(err, "Branch512") + return + } case "seed": bts, err = (*z).unauthenticatedProposal.Block.BlockHeader.Seed.UnmarshalMsgWithState(bts, st) if err != nil { @@ -9820,6 +9906,12 @@ func (z *transmittedPayload) UnmarshalMsgWithState(bts []byte, st msgp.Unmarshal err = msgp.WrapError(err, "Sha256Commitment") return } + case "txn512": + bts, err = (*z).unauthenticatedProposal.Block.BlockHeader.TxnCommitments.Sha512Commitment.UnmarshalMsgWithState(bts, st) + if err != nil { + err = msgp.WrapError(err, "Sha512Commitment") + return + } case "ts": (*z).unauthenticatedProposal.Block.BlockHeader.TimeStamp, bts, err = msgp.ReadInt64Bytes(bts) if err != nil { @@ -9833,8 +9925,8 @@ func (z *transmittedPayload) UnmarshalMsgWithState(bts []byte, st msgp.Unmarshal err = msgp.WrapError(err, "GenesisID") return } - if zb0015 > config.MaxGenesisIDLen { - err = msgp.ErrOverflow(uint64(zb0015), uint64(config.MaxGenesisIDLen)) + if zb0015 > bounds.MaxGenesisIDLen { + err = msgp.ErrOverflow(uint64(zb0015), uint64(bounds.MaxGenesisIDLen)) return } (*z).unauthenticatedProposal.Block.BlockHeader.GenesisID, bts, err = msgp.ReadStringBytes(bts) @@ -9921,7 +10013,7 @@ func (z *transmittedPayload) UnmarshalMsgWithState(bts []byte, st msgp.Unmarshal return } case "nextyes": - (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolApprovals, bts, err = msgp.ReadUint64Bytes(bts) + bts, err = (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolApprovals.UnmarshalMsgWithState(bts, st) if err != nil { err = msgp.WrapError(err, "NextProtocolApprovals") return @@ -10004,8 +10096,8 @@ func (z *transmittedPayload) UnmarshalMsgWithState(bts []byte, st msgp.Unmarshal err = msgp.WrapError(err, "ExpiredParticipationAccounts") return } - if zb0018 > config.MaxProposedExpiredOnlineAccounts { - err = msgp.ErrOverflow(uint64(zb0018), uint64(config.MaxProposedExpiredOnlineAccounts)) + if zb0018 > bounds.MaxProposedExpiredOnlineAccounts { + err = msgp.ErrOverflow(uint64(zb0018), uint64(bounds.MaxProposedExpiredOnlineAccounts)) err = msgp.WrapError(err, "ExpiredParticipationAccounts") return } @@ -10031,8 +10123,8 @@ func (z *transmittedPayload) UnmarshalMsgWithState(bts []byte, st msgp.Unmarshal err = msgp.WrapError(err, "AbsentParticipationAccounts") return } - if zb0020 > config.MaxMarkAbsent { - err = msgp.ErrOverflow(uint64(zb0020), uint64(config.MaxMarkAbsent)) + if zb0020 > bounds.MaxMarkAbsent { + err = msgp.ErrOverflow(uint64(zb0020), uint64(bounds.MaxMarkAbsent)) err = msgp.WrapError(err, "AbsentParticipationAccounts") return } @@ -10107,7 +10199,7 @@ func (_ *transmittedPayload) CanUnmarshalMsg(z interface{}) bool { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *transmittedPayload) Msgsize() (s int) { - s = 3 + 4 + (*z).unauthenticatedProposal.Block.BlockHeader.Round.Msgsize() + 5 + (*z).unauthenticatedProposal.Block.BlockHeader.Branch.Msgsize() + 5 + (*z).unauthenticatedProposal.Block.BlockHeader.Seed.Msgsize() + 4 + (*z).unauthenticatedProposal.Block.BlockHeader.TxnCommitments.NativeSha512_256Commitment.Msgsize() + 7 + (*z).unauthenticatedProposal.Block.BlockHeader.TxnCommitments.Sha256Commitment.Msgsize() + 3 + msgp.Int64Size + 4 + msgp.StringPrefixSize + len((*z).unauthenticatedProposal.Block.BlockHeader.GenesisID) + 3 + (*z).unauthenticatedProposal.Block.BlockHeader.GenesisHash.Msgsize() + 4 + (*z).unauthenticatedProposal.Block.BlockHeader.Proposer.Msgsize() + 3 + (*z).unauthenticatedProposal.Block.BlockHeader.FeesCollected.Msgsize() + 3 + (*z).unauthenticatedProposal.Block.BlockHeader.Bonus.Msgsize() + 3 + (*z).unauthenticatedProposal.Block.BlockHeader.ProposerPayout.Msgsize() + 5 + (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.FeeSink.Msgsize() + 4 + (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsPool.Msgsize() + 5 + msgp.Uint64Size + 5 + msgp.Uint64Size + 5 + msgp.Uint64Size + 7 + (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRecalculationRound.Msgsize() + 6 + (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.CurrentProtocol.Msgsize() + 10 + (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocol.Msgsize() + 8 + msgp.Uint64Size + 11 + (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolVoteBefore.Msgsize() + 11 + (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolSwitchOn.Msgsize() + 12 + (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradePropose.Msgsize() + 13 + (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradeDelay.Msgsize() + 11 + msgp.BoolSize + 3 + msgp.Uint64Size + 4 + msgp.MapHeaderSize + s = 3 + 4 + (*z).unauthenticatedProposal.Block.BlockHeader.Round.Msgsize() + 5 + (*z).unauthenticatedProposal.Block.BlockHeader.Branch.Msgsize() + 8 + (*z).unauthenticatedProposal.Block.BlockHeader.Branch512.Msgsize() + 5 + (*z).unauthenticatedProposal.Block.BlockHeader.Seed.Msgsize() + 4 + (*z).unauthenticatedProposal.Block.BlockHeader.TxnCommitments.NativeSha512_256Commitment.Msgsize() + 7 + (*z).unauthenticatedProposal.Block.BlockHeader.TxnCommitments.Sha256Commitment.Msgsize() + 7 + (*z).unauthenticatedProposal.Block.BlockHeader.TxnCommitments.Sha512Commitment.Msgsize() + 3 + msgp.Int64Size + 4 + msgp.StringPrefixSize + len((*z).unauthenticatedProposal.Block.BlockHeader.GenesisID) + 3 + (*z).unauthenticatedProposal.Block.BlockHeader.GenesisHash.Msgsize() + 4 + (*z).unauthenticatedProposal.Block.BlockHeader.Proposer.Msgsize() + 3 + (*z).unauthenticatedProposal.Block.BlockHeader.FeesCollected.Msgsize() + 3 + (*z).unauthenticatedProposal.Block.BlockHeader.Bonus.Msgsize() + 3 + (*z).unauthenticatedProposal.Block.BlockHeader.ProposerPayout.Msgsize() + 5 + (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.FeeSink.Msgsize() + 4 + (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsPool.Msgsize() + 5 + msgp.Uint64Size + 5 + msgp.Uint64Size + 5 + msgp.Uint64Size + 7 + (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRecalculationRound.Msgsize() + 6 + (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.CurrentProtocol.Msgsize() + 10 + (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocol.Msgsize() + 8 + (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolApprovals.Msgsize() + 11 + (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolVoteBefore.Msgsize() + 11 + (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolSwitchOn.Msgsize() + 12 + (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradePropose.Msgsize() + 13 + (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradeDelay.Msgsize() + 11 + msgp.BoolSize + 3 + msgp.Uint64Size + 4 + msgp.MapHeaderSize if (*z).unauthenticatedProposal.Block.BlockHeader.StateProofTracking != nil { for zb0001, zb0002 := range (*z).unauthenticatedProposal.Block.BlockHeader.StateProofTracking { _ = zb0001 @@ -10129,12 +10221,12 @@ func (z *transmittedPayload) Msgsize() (s int) { // MsgIsZero returns whether this is a zero value func (z *transmittedPayload) MsgIsZero() bool { - return ((*z).unauthenticatedProposal.Block.BlockHeader.Round.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.Branch.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.Seed.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.TxnCommitments.NativeSha512_256Commitment.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.TxnCommitments.Sha256Commitment.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.TimeStamp == 0) && ((*z).unauthenticatedProposal.Block.BlockHeader.GenesisID == "") && ((*z).unauthenticatedProposal.Block.BlockHeader.GenesisHash.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.Proposer.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.FeesCollected.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.Bonus.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.ProposerPayout.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.FeeSink.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsPool.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsLevel == 0) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRate == 0) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsResidue == 0) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRecalculationRound.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.CurrentProtocol.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocol.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolApprovals == 0) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolVoteBefore.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolSwitchOn.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradePropose.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradeDelay.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradeApprove == false) && ((*z).unauthenticatedProposal.Block.BlockHeader.TxnCounter == 0) && (len((*z).unauthenticatedProposal.Block.BlockHeader.StateProofTracking) == 0) && (len((*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts) == 0) && (len((*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.AbsentParticipationAccounts) == 0) && ((*z).unauthenticatedProposal.Block.Payset.MsgIsZero()) && ((*z).unauthenticatedProposal.SeedProof.MsgIsZero()) && ((*z).unauthenticatedProposal.OriginalPeriod == 0) && ((*z).unauthenticatedProposal.OriginalProposer.MsgIsZero()) && ((*z).PriorVote.MsgIsZero()) + return ((*z).unauthenticatedProposal.Block.BlockHeader.Round.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.Branch.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.Branch512.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.Seed.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.TxnCommitments.NativeSha512_256Commitment.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.TxnCommitments.Sha256Commitment.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.TxnCommitments.Sha512Commitment.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.TimeStamp == 0) && ((*z).unauthenticatedProposal.Block.BlockHeader.GenesisID == "") && ((*z).unauthenticatedProposal.Block.BlockHeader.GenesisHash.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.Proposer.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.FeesCollected.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.Bonus.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.ProposerPayout.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.FeeSink.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsPool.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsLevel == 0) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRate == 0) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsResidue == 0) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRecalculationRound.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.CurrentProtocol.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocol.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolApprovals.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolVoteBefore.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolSwitchOn.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradePropose.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradeDelay.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradeApprove == false) && ((*z).unauthenticatedProposal.Block.BlockHeader.TxnCounter == 0) && (len((*z).unauthenticatedProposal.Block.BlockHeader.StateProofTracking) == 0) && (len((*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts) == 0) && (len((*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.AbsentParticipationAccounts) == 0) && ((*z).unauthenticatedProposal.Block.Payset.MsgIsZero()) && ((*z).unauthenticatedProposal.SeedProof.MsgIsZero()) && ((*z).unauthenticatedProposal.OriginalPeriod == 0) && ((*z).unauthenticatedProposal.OriginalProposer.MsgIsZero()) && ((*z).PriorVote.MsgIsZero()) } // MaxSize returns a maximum valid message size for this message type func TransmittedPayloadMaxSize() (s int) { - s = 3 + 4 + basics.RoundMaxSize() + 5 + bookkeeping.BlockHashMaxSize() + 5 + committee.SeedMaxSize() + 4 + crypto.DigestMaxSize() + 7 + crypto.DigestMaxSize() + 3 + msgp.Int64Size + 4 + msgp.StringPrefixSize + config.MaxGenesisIDLen + 3 + crypto.DigestMaxSize() + 4 + basics.AddressMaxSize() + 3 + basics.MicroAlgosMaxSize() + 3 + basics.MicroAlgosMaxSize() + 3 + basics.MicroAlgosMaxSize() + 5 + basics.AddressMaxSize() + 4 + basics.AddressMaxSize() + 5 + msgp.Uint64Size + 5 + msgp.Uint64Size + 5 + msgp.Uint64Size + 7 + basics.RoundMaxSize() + 6 + protocol.ConsensusVersionMaxSize() + 10 + protocol.ConsensusVersionMaxSize() + 8 + msgp.Uint64Size + 11 + basics.RoundMaxSize() + 11 + basics.RoundMaxSize() + 12 + protocol.ConsensusVersionMaxSize() + 13 + basics.RoundMaxSize() + 11 + msgp.BoolSize + 3 + msgp.Uint64Size + 4 + s = 3 + 4 + basics.RoundMaxSize() + 5 + bookkeeping.BlockHashMaxSize() + 8 + crypto.Sha512DigestMaxSize() + 5 + committee.SeedMaxSize() + 4 + crypto.DigestMaxSize() + 7 + crypto.DigestMaxSize() + 7 + crypto.Sha512DigestMaxSize() + 3 + msgp.Int64Size + 4 + msgp.StringPrefixSize + bounds.MaxGenesisIDLen + 3 + crypto.DigestMaxSize() + 4 + basics.AddressMaxSize() + 3 + basics.MicroAlgosMaxSize() + 3 + basics.MicroAlgosMaxSize() + 3 + basics.MicroAlgosMaxSize() + 5 + basics.AddressMaxSize() + 4 + basics.AddressMaxSize() + 5 + msgp.Uint64Size + 5 + msgp.Uint64Size + 5 + msgp.Uint64Size + 7 + basics.RoundMaxSize() + 6 + protocol.ConsensusVersionMaxSize() + 10 + protocol.ConsensusVersionMaxSize() + 8 + basics.RoundMaxSize() + 11 + basics.RoundMaxSize() + 11 + basics.RoundMaxSize() + 12 + protocol.ConsensusVersionMaxSize() + 13 + basics.RoundMaxSize() + 11 + msgp.BoolSize + 3 + msgp.Uint64Size + 4 s += msgp.MapHeaderSize // Adding size of map keys for z.unauthenticatedProposal.Block.BlockHeader.StateProofTracking s += protocol.NumStateProofTypes * (protocol.StateProofTypeMaxSize()) @@ -10142,13 +10234,13 @@ func TransmittedPayloadMaxSize() (s int) { s += protocol.NumStateProofTypes * (bookkeeping.StateProofTrackingDataMaxSize()) s += 11 // Calculating size of slice: z.unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts - s += msgp.ArrayHeaderSize + ((config.MaxProposedExpiredOnlineAccounts) * (basics.AddressMaxSize())) + s += msgp.ArrayHeaderSize + ((bounds.MaxProposedExpiredOnlineAccounts) * (basics.AddressMaxSize())) s += 11 // Calculating size of slice: z.unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.AbsentParticipationAccounts - s += msgp.ArrayHeaderSize + ((config.MaxMarkAbsent) * (basics.AddressMaxSize())) + s += msgp.ArrayHeaderSize + ((bounds.MaxMarkAbsent) * (basics.AddressMaxSize())) s += 5 // Using maxtotalbytes for: z.unauthenticatedProposal.Block.Payset - s += config.MaxTxnBytesPerBlock + s += bounds.MaxTxnBytesPerBlock s += 5 + crypto.VrfProofMaxSize() + 5 + msgp.Uint64Size + 6 + basics.AddressMaxSize() + 3 + UnauthenticatedVoteMaxSize() return } @@ -10306,8 +10398,8 @@ func (z *unauthenticatedBundle) UnmarshalMsgWithState(bts []byte, st msgp.Unmars err = msgp.WrapError(err, "struct-from-array", "Votes") return } - if zb0007 > config.MaxVoteThreshold { - err = msgp.ErrOverflow(uint64(zb0007), uint64(config.MaxVoteThreshold)) + if zb0007 > bounds.MaxVoteThreshold { + err = msgp.ErrOverflow(uint64(zb0007), uint64(bounds.MaxVoteThreshold)) err = msgp.WrapError(err, "struct-from-array", "Votes") return } @@ -10335,8 +10427,8 @@ func (z *unauthenticatedBundle) UnmarshalMsgWithState(bts []byte, st msgp.Unmars err = msgp.WrapError(err, "struct-from-array", "EquivocationVotes") return } - if zb0009 > config.MaxVoteThreshold { - err = msgp.ErrOverflow(uint64(zb0009), uint64(config.MaxVoteThreshold)) + if zb0009 > bounds.MaxVoteThreshold { + err = msgp.ErrOverflow(uint64(zb0009), uint64(bounds.MaxVoteThreshold)) err = msgp.WrapError(err, "struct-from-array", "EquivocationVotes") return } @@ -10418,8 +10510,8 @@ func (z *unauthenticatedBundle) UnmarshalMsgWithState(bts []byte, st msgp.Unmars err = msgp.WrapError(err, "Votes") return } - if zb0013 > config.MaxVoteThreshold { - err = msgp.ErrOverflow(uint64(zb0013), uint64(config.MaxVoteThreshold)) + if zb0013 > bounds.MaxVoteThreshold { + err = msgp.ErrOverflow(uint64(zb0013), uint64(bounds.MaxVoteThreshold)) err = msgp.WrapError(err, "Votes") return } @@ -10445,8 +10537,8 @@ func (z *unauthenticatedBundle) UnmarshalMsgWithState(bts []byte, st msgp.Unmars err = msgp.WrapError(err, "EquivocationVotes") return } - if zb0015 > config.MaxVoteThreshold { - err = msgp.ErrOverflow(uint64(zb0015), uint64(config.MaxVoteThreshold)) + if zb0015 > bounds.MaxVoteThreshold { + err = msgp.ErrOverflow(uint64(zb0015), uint64(bounds.MaxVoteThreshold)) err = msgp.WrapError(err, "EquivocationVotes") return } @@ -10507,10 +10599,10 @@ func (z *unauthenticatedBundle) MsgIsZero() bool { func UnauthenticatedBundleMaxSize() (s int) { s = 1 + 4 + basics.RoundMaxSize() + 4 + msgp.Uint64Size + 5 + msgp.Uint64Size + 5 + ProposalValueMaxSize() + 5 // Calculating size of slice: z.Votes - s += msgp.ArrayHeaderSize + ((config.MaxVoteThreshold) * (VoteAuthenticatorMaxSize())) + s += msgp.ArrayHeaderSize + ((bounds.MaxVoteThreshold) * (VoteAuthenticatorMaxSize())) s += 4 // Calculating size of slice: z.EquivocationVotes - s += msgp.ArrayHeaderSize + ((config.MaxVoteThreshold) * (EquivocationVoteAuthenticatorMaxSize())) + s += msgp.ArrayHeaderSize + ((bounds.MaxVoteThreshold) * (EquivocationVoteAuthenticatorMaxSize())) return } @@ -10858,8 +10950,8 @@ func UnauthenticatedEquivocationVoteMaxSize() (s int) { func (z *unauthenticatedProposal) MarshalMsg(b []byte) (o []byte) { o = msgp.Require(b, z.Msgsize()) // omitempty: check for empty values - zb0005Len := uint32(34) - var zb0005Mask uint64 /* 41 bits */ + zb0005Len := uint32(36) + var zb0005Mask uint64 /* 43 bits */ if (*z).Block.BlockHeader.Bonus.MsgIsZero() { zb0005Len-- zb0005Mask |= 0x40 @@ -10900,7 +10992,7 @@ func (z *unauthenticatedProposal) MarshalMsg(b []byte) (o []byte) { zb0005Len-- zb0005Mask |= 0x8000 } - if (*z).Block.BlockHeader.UpgradeState.NextProtocolApprovals == 0 { + if (*z).Block.BlockHeader.UpgradeState.NextProtocolApprovals.MsgIsZero() { zb0005Len-- zb0005Mask |= 0x10000 } @@ -10928,73 +11020,81 @@ func (z *unauthenticatedProposal) MarshalMsg(b []byte) (o []byte) { zb0005Len-- zb0005Mask |= 0x400000 } - if (*z).Block.BlockHeader.UpgradeState.CurrentProtocol.MsgIsZero() { + if (*z).Block.BlockHeader.Branch512.MsgIsZero() { zb0005Len-- zb0005Mask |= 0x800000 } - if (*z).Block.BlockHeader.Proposer.MsgIsZero() { + if (*z).Block.BlockHeader.UpgradeState.CurrentProtocol.MsgIsZero() { zb0005Len-- zb0005Mask |= 0x1000000 } - if (*z).Block.BlockHeader.RewardsState.RewardsRate == 0 { + if (*z).Block.BlockHeader.Proposer.MsgIsZero() { zb0005Len-- zb0005Mask |= 0x2000000 } + if (*z).Block.BlockHeader.RewardsState.RewardsRate == 0 { + zb0005Len-- + zb0005Mask |= 0x4000000 + } if (*z).Block.BlockHeader.Round.MsgIsZero() { zb0005Len-- - zb0005Mask |= 0x8000000 + zb0005Mask |= 0x10000000 } if (*z).Block.BlockHeader.RewardsState.RewardsRecalculationRound.MsgIsZero() { zb0005Len-- - zb0005Mask |= 0x10000000 + zb0005Mask |= 0x20000000 } if (*z).Block.BlockHeader.RewardsState.RewardsPool.MsgIsZero() { zb0005Len-- - zb0005Mask |= 0x20000000 + zb0005Mask |= 0x40000000 } if (*z).SeedProof.MsgIsZero() { zb0005Len-- - zb0005Mask |= 0x40000000 + zb0005Mask |= 0x80000000 } if (*z).Block.BlockHeader.Seed.MsgIsZero() { zb0005Len-- - zb0005Mask |= 0x80000000 + zb0005Mask |= 0x100000000 } if len((*z).Block.BlockHeader.StateProofTracking) == 0 { zb0005Len-- - zb0005Mask |= 0x100000000 + zb0005Mask |= 0x200000000 } if (*z).Block.BlockHeader.TxnCounter == 0 { zb0005Len-- - zb0005Mask |= 0x200000000 + zb0005Mask |= 0x400000000 } if (*z).Block.BlockHeader.TimeStamp == 0 { zb0005Len-- - zb0005Mask |= 0x400000000 + zb0005Mask |= 0x800000000 } if (*z).Block.BlockHeader.TxnCommitments.NativeSha512_256Commitment.MsgIsZero() { zb0005Len-- - zb0005Mask |= 0x800000000 + zb0005Mask |= 0x1000000000 } if (*z).Block.BlockHeader.TxnCommitments.Sha256Commitment.MsgIsZero() { zb0005Len-- - zb0005Mask |= 0x1000000000 + zb0005Mask |= 0x2000000000 + } + if (*z).Block.BlockHeader.TxnCommitments.Sha512Commitment.MsgIsZero() { + zb0005Len-- + zb0005Mask |= 0x4000000000 } if (*z).Block.Payset.MsgIsZero() { zb0005Len-- - zb0005Mask |= 0x2000000000 + zb0005Mask |= 0x8000000000 } if (*z).Block.BlockHeader.UpgradeVote.UpgradeDelay.MsgIsZero() { zb0005Len-- - zb0005Mask |= 0x4000000000 + zb0005Mask |= 0x10000000000 } if (*z).Block.BlockHeader.UpgradeVote.UpgradePropose.MsgIsZero() { zb0005Len-- - zb0005Mask |= 0x8000000000 + zb0005Mask |= 0x20000000000 } if (*z).Block.BlockHeader.UpgradeVote.UpgradeApprove == false { zb0005Len-- - zb0005Mask |= 0x10000000000 + zb0005Mask |= 0x40000000000 } // variable map header, size zb0005Len o = msgp.AppendMapHeader(o, zb0005Len) @@ -11052,7 +11152,7 @@ func (z *unauthenticatedProposal) MarshalMsg(b []byte) (o []byte) { if (zb0005Mask & 0x10000) == 0 { // if not empty // string "nextyes" o = append(o, 0xa7, 0x6e, 0x65, 0x78, 0x74, 0x79, 0x65, 0x73) - o = msgp.AppendUint64(o, (*z).Block.BlockHeader.UpgradeState.NextProtocolApprovals) + o = (*z).Block.BlockHeader.UpgradeState.NextProtocolApprovals.MarshalMsg(o) } if (zb0005Mask & 0x20000) == 0 { // if not empty // string "oper" @@ -11099,46 +11199,51 @@ func (z *unauthenticatedProposal) MarshalMsg(b []byte) (o []byte) { o = (*z).Block.BlockHeader.Branch.MarshalMsg(o) } if (zb0005Mask & 0x800000) == 0 { // if not empty + // string "prev512" + o = append(o, 0xa7, 0x70, 0x72, 0x65, 0x76, 0x35, 0x31, 0x32) + o = (*z).Block.BlockHeader.Branch512.MarshalMsg(o) + } + if (zb0005Mask & 0x1000000) == 0 { // if not empty // string "proto" o = append(o, 0xa5, 0x70, 0x72, 0x6f, 0x74, 0x6f) o = (*z).Block.BlockHeader.UpgradeState.CurrentProtocol.MarshalMsg(o) } - if (zb0005Mask & 0x1000000) == 0 { // if not empty + if (zb0005Mask & 0x2000000) == 0 { // if not empty // string "prp" o = append(o, 0xa3, 0x70, 0x72, 0x70) o = (*z).Block.BlockHeader.Proposer.MarshalMsg(o) } - if (zb0005Mask & 0x2000000) == 0 { // if not empty + if (zb0005Mask & 0x4000000) == 0 { // if not empty // string "rate" o = append(o, 0xa4, 0x72, 0x61, 0x74, 0x65) o = msgp.AppendUint64(o, (*z).Block.BlockHeader.RewardsState.RewardsRate) } - if (zb0005Mask & 0x8000000) == 0 { // if not empty + if (zb0005Mask & 0x10000000) == 0 { // if not empty // string "rnd" o = append(o, 0xa3, 0x72, 0x6e, 0x64) o = (*z).Block.BlockHeader.Round.MarshalMsg(o) } - if (zb0005Mask & 0x10000000) == 0 { // if not empty + if (zb0005Mask & 0x20000000) == 0 { // if not empty // string "rwcalr" o = append(o, 0xa6, 0x72, 0x77, 0x63, 0x61, 0x6c, 0x72) o = (*z).Block.BlockHeader.RewardsState.RewardsRecalculationRound.MarshalMsg(o) } - if (zb0005Mask & 0x20000000) == 0 { // if not empty + if (zb0005Mask & 0x40000000) == 0 { // if not empty // string "rwd" o = append(o, 0xa3, 0x72, 0x77, 0x64) o = (*z).Block.BlockHeader.RewardsState.RewardsPool.MarshalMsg(o) } - if (zb0005Mask & 0x40000000) == 0 { // if not empty + if (zb0005Mask & 0x80000000) == 0 { // if not empty // string "sdpf" o = append(o, 0xa4, 0x73, 0x64, 0x70, 0x66) o = (*z).SeedProof.MarshalMsg(o) } - if (zb0005Mask & 0x80000000) == 0 { // if not empty + if (zb0005Mask & 0x100000000) == 0 { // if not empty // string "seed" o = append(o, 0xa4, 0x73, 0x65, 0x65, 0x64) o = (*z).Block.BlockHeader.Seed.MarshalMsg(o) } - if (zb0005Mask & 0x100000000) == 0 { // if not empty + if (zb0005Mask & 0x200000000) == 0 { // if not empty // string "spt" o = append(o, 0xa3, 0x73, 0x70, 0x74) if (*z).Block.BlockHeader.StateProofTracking == nil { @@ -11158,42 +11263,47 @@ func (z *unauthenticatedProposal) MarshalMsg(b []byte) (o []byte) { o = zb0002.MarshalMsg(o) } } - if (zb0005Mask & 0x200000000) == 0 { // if not empty + if (zb0005Mask & 0x400000000) == 0 { // if not empty // string "tc" o = append(o, 0xa2, 0x74, 0x63) o = msgp.AppendUint64(o, (*z).Block.BlockHeader.TxnCounter) } - if (zb0005Mask & 0x400000000) == 0 { // if not empty + if (zb0005Mask & 0x800000000) == 0 { // if not empty // string "ts" o = append(o, 0xa2, 0x74, 0x73) o = msgp.AppendInt64(o, (*z).Block.BlockHeader.TimeStamp) } - if (zb0005Mask & 0x800000000) == 0 { // if not empty + if (zb0005Mask & 0x1000000000) == 0 { // if not empty // string "txn" o = append(o, 0xa3, 0x74, 0x78, 0x6e) o = (*z).Block.BlockHeader.TxnCommitments.NativeSha512_256Commitment.MarshalMsg(o) } - if (zb0005Mask & 0x1000000000) == 0 { // if not empty + if (zb0005Mask & 0x2000000000) == 0 { // if not empty // string "txn256" o = append(o, 0xa6, 0x74, 0x78, 0x6e, 0x32, 0x35, 0x36) o = (*z).Block.BlockHeader.TxnCommitments.Sha256Commitment.MarshalMsg(o) } - if (zb0005Mask & 0x2000000000) == 0 { // if not empty + if (zb0005Mask & 0x4000000000) == 0 { // if not empty + // string "txn512" + o = append(o, 0xa6, 0x74, 0x78, 0x6e, 0x35, 0x31, 0x32) + o = (*z).Block.BlockHeader.TxnCommitments.Sha512Commitment.MarshalMsg(o) + } + if (zb0005Mask & 0x8000000000) == 0 { // if not empty // string "txns" o = append(o, 0xa4, 0x74, 0x78, 0x6e, 0x73) o = (*z).Block.Payset.MarshalMsg(o) } - if (zb0005Mask & 0x4000000000) == 0 { // if not empty + if (zb0005Mask & 0x10000000000) == 0 { // if not empty // string "upgradedelay" o = append(o, 0xac, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x64, 0x65, 0x6c, 0x61, 0x79) o = (*z).Block.BlockHeader.UpgradeVote.UpgradeDelay.MarshalMsg(o) } - if (zb0005Mask & 0x8000000000) == 0 { // if not empty + if (zb0005Mask & 0x20000000000) == 0 { // if not empty // string "upgradeprop" o = append(o, 0xab, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x70, 0x72, 0x6f, 0x70) o = (*z).Block.BlockHeader.UpgradeVote.UpgradePropose.MarshalMsg(o) } - if (zb0005Mask & 0x10000000000) == 0 { // if not empty + if (zb0005Mask & 0x40000000000) == 0 { // if not empty // string "upgradeyes" o = append(o, 0xaa, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x79, 0x65, 0x73) o = msgp.AppendBool(o, (*z).Block.BlockHeader.UpgradeVote.UpgradeApprove) @@ -11241,6 +11351,14 @@ func (z *unauthenticatedProposal) UnmarshalMsgWithState(bts []byte, st msgp.Unma return } } + if zb0005 > 0 { + zb0005-- + bts, err = (*z).Block.BlockHeader.Branch512.UnmarshalMsgWithState(bts, st) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "Branch512") + return + } + } if zb0005 > 0 { zb0005-- bts, err = (*z).Block.BlockHeader.Seed.UnmarshalMsgWithState(bts, st) @@ -11265,6 +11383,14 @@ func (z *unauthenticatedProposal) UnmarshalMsgWithState(bts []byte, st msgp.Unma return } } + if zb0005 > 0 { + zb0005-- + bts, err = (*z).Block.BlockHeader.TxnCommitments.Sha512Commitment.UnmarshalMsgWithState(bts, st) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "Sha512Commitment") + return + } + } if zb0005 > 0 { zb0005-- (*z).Block.BlockHeader.TimeStamp, bts, err = msgp.ReadInt64Bytes(bts) @@ -11281,8 +11407,8 @@ func (z *unauthenticatedProposal) UnmarshalMsgWithState(bts []byte, st msgp.Unma err = msgp.WrapError(err, "struct-from-array", "GenesisID") return } - if zb0007 > config.MaxGenesisIDLen { - err = msgp.ErrOverflow(uint64(zb0007), uint64(config.MaxGenesisIDLen)) + if zb0007 > bounds.MaxGenesisIDLen { + err = msgp.ErrOverflow(uint64(zb0007), uint64(bounds.MaxGenesisIDLen)) return } (*z).Block.BlockHeader.GenesisID, bts, err = msgp.ReadStringBytes(bts) @@ -11397,7 +11523,7 @@ func (z *unauthenticatedProposal) UnmarshalMsgWithState(bts []byte, st msgp.Unma } if zb0005 > 0 { zb0005-- - (*z).Block.BlockHeader.UpgradeState.NextProtocolApprovals, bts, err = msgp.ReadUint64Bytes(bts) + bts, err = (*z).Block.BlockHeader.UpgradeState.NextProtocolApprovals.UnmarshalMsgWithState(bts, st) if err != nil { err = msgp.WrapError(err, "struct-from-array", "NextProtocolApprovals") return @@ -11496,8 +11622,8 @@ func (z *unauthenticatedProposal) UnmarshalMsgWithState(bts []byte, st msgp.Unma err = msgp.WrapError(err, "struct-from-array", "ExpiredParticipationAccounts") return } - if zb0010 > config.MaxProposedExpiredOnlineAccounts { - err = msgp.ErrOverflow(uint64(zb0010), uint64(config.MaxProposedExpiredOnlineAccounts)) + if zb0010 > bounds.MaxProposedExpiredOnlineAccounts { + err = msgp.ErrOverflow(uint64(zb0010), uint64(bounds.MaxProposedExpiredOnlineAccounts)) err = msgp.WrapError(err, "struct-from-array", "ExpiredParticipationAccounts") return } @@ -11525,8 +11651,8 @@ func (z *unauthenticatedProposal) UnmarshalMsgWithState(bts []byte, st msgp.Unma err = msgp.WrapError(err, "struct-from-array", "AbsentParticipationAccounts") return } - if zb0012 > config.MaxMarkAbsent { - err = msgp.ErrOverflow(uint64(zb0012), uint64(config.MaxMarkAbsent)) + if zb0012 > bounds.MaxMarkAbsent { + err = msgp.ErrOverflow(uint64(zb0012), uint64(bounds.MaxMarkAbsent)) err = msgp.WrapError(err, "struct-from-array", "AbsentParticipationAccounts") return } @@ -11616,6 +11742,12 @@ func (z *unauthenticatedProposal) UnmarshalMsgWithState(bts []byte, st msgp.Unma err = msgp.WrapError(err, "Branch") return } + case "prev512": + bts, err = (*z).Block.BlockHeader.Branch512.UnmarshalMsgWithState(bts, st) + if err != nil { + err = msgp.WrapError(err, "Branch512") + return + } case "seed": bts, err = (*z).Block.BlockHeader.Seed.UnmarshalMsgWithState(bts, st) if err != nil { @@ -11634,6 +11766,12 @@ func (z *unauthenticatedProposal) UnmarshalMsgWithState(bts []byte, st msgp.Unma err = msgp.WrapError(err, "Sha256Commitment") return } + case "txn512": + bts, err = (*z).Block.BlockHeader.TxnCommitments.Sha512Commitment.UnmarshalMsgWithState(bts, st) + if err != nil { + err = msgp.WrapError(err, "Sha512Commitment") + return + } case "ts": (*z).Block.BlockHeader.TimeStamp, bts, err = msgp.ReadInt64Bytes(bts) if err != nil { @@ -11647,8 +11785,8 @@ func (z *unauthenticatedProposal) UnmarshalMsgWithState(bts []byte, st msgp.Unma err = msgp.WrapError(err, "GenesisID") return } - if zb0015 > config.MaxGenesisIDLen { - err = msgp.ErrOverflow(uint64(zb0015), uint64(config.MaxGenesisIDLen)) + if zb0015 > bounds.MaxGenesisIDLen { + err = msgp.ErrOverflow(uint64(zb0015), uint64(bounds.MaxGenesisIDLen)) return } (*z).Block.BlockHeader.GenesisID, bts, err = msgp.ReadStringBytes(bts) @@ -11735,7 +11873,7 @@ func (z *unauthenticatedProposal) UnmarshalMsgWithState(bts []byte, st msgp.Unma return } case "nextyes": - (*z).Block.BlockHeader.UpgradeState.NextProtocolApprovals, bts, err = msgp.ReadUint64Bytes(bts) + bts, err = (*z).Block.BlockHeader.UpgradeState.NextProtocolApprovals.UnmarshalMsgWithState(bts, st) if err != nil { err = msgp.WrapError(err, "NextProtocolApprovals") return @@ -11818,8 +11956,8 @@ func (z *unauthenticatedProposal) UnmarshalMsgWithState(bts []byte, st msgp.Unma err = msgp.WrapError(err, "ExpiredParticipationAccounts") return } - if zb0018 > config.MaxProposedExpiredOnlineAccounts { - err = msgp.ErrOverflow(uint64(zb0018), uint64(config.MaxProposedExpiredOnlineAccounts)) + if zb0018 > bounds.MaxProposedExpiredOnlineAccounts { + err = msgp.ErrOverflow(uint64(zb0018), uint64(bounds.MaxProposedExpiredOnlineAccounts)) err = msgp.WrapError(err, "ExpiredParticipationAccounts") return } @@ -11845,8 +11983,8 @@ func (z *unauthenticatedProposal) UnmarshalMsgWithState(bts []byte, st msgp.Unma err = msgp.WrapError(err, "AbsentParticipationAccounts") return } - if zb0020 > config.MaxMarkAbsent { - err = msgp.ErrOverflow(uint64(zb0020), uint64(config.MaxMarkAbsent)) + if zb0020 > bounds.MaxMarkAbsent { + err = msgp.ErrOverflow(uint64(zb0020), uint64(bounds.MaxMarkAbsent)) err = msgp.WrapError(err, "AbsentParticipationAccounts") return } @@ -11915,7 +12053,7 @@ func (_ *unauthenticatedProposal) CanUnmarshalMsg(z interface{}) bool { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *unauthenticatedProposal) Msgsize() (s int) { - s = 3 + 4 + (*z).Block.BlockHeader.Round.Msgsize() + 5 + (*z).Block.BlockHeader.Branch.Msgsize() + 5 + (*z).Block.BlockHeader.Seed.Msgsize() + 4 + (*z).Block.BlockHeader.TxnCommitments.NativeSha512_256Commitment.Msgsize() + 7 + (*z).Block.BlockHeader.TxnCommitments.Sha256Commitment.Msgsize() + 3 + msgp.Int64Size + 4 + msgp.StringPrefixSize + len((*z).Block.BlockHeader.GenesisID) + 3 + (*z).Block.BlockHeader.GenesisHash.Msgsize() + 4 + (*z).Block.BlockHeader.Proposer.Msgsize() + 3 + (*z).Block.BlockHeader.FeesCollected.Msgsize() + 3 + (*z).Block.BlockHeader.Bonus.Msgsize() + 3 + (*z).Block.BlockHeader.ProposerPayout.Msgsize() + 5 + (*z).Block.BlockHeader.RewardsState.FeeSink.Msgsize() + 4 + (*z).Block.BlockHeader.RewardsState.RewardsPool.Msgsize() + 5 + msgp.Uint64Size + 5 + msgp.Uint64Size + 5 + msgp.Uint64Size + 7 + (*z).Block.BlockHeader.RewardsState.RewardsRecalculationRound.Msgsize() + 6 + (*z).Block.BlockHeader.UpgradeState.CurrentProtocol.Msgsize() + 10 + (*z).Block.BlockHeader.UpgradeState.NextProtocol.Msgsize() + 8 + msgp.Uint64Size + 11 + (*z).Block.BlockHeader.UpgradeState.NextProtocolVoteBefore.Msgsize() + 11 + (*z).Block.BlockHeader.UpgradeState.NextProtocolSwitchOn.Msgsize() + 12 + (*z).Block.BlockHeader.UpgradeVote.UpgradePropose.Msgsize() + 13 + (*z).Block.BlockHeader.UpgradeVote.UpgradeDelay.Msgsize() + 11 + msgp.BoolSize + 3 + msgp.Uint64Size + 4 + msgp.MapHeaderSize + s = 3 + 4 + (*z).Block.BlockHeader.Round.Msgsize() + 5 + (*z).Block.BlockHeader.Branch.Msgsize() + 8 + (*z).Block.BlockHeader.Branch512.Msgsize() + 5 + (*z).Block.BlockHeader.Seed.Msgsize() + 4 + (*z).Block.BlockHeader.TxnCommitments.NativeSha512_256Commitment.Msgsize() + 7 + (*z).Block.BlockHeader.TxnCommitments.Sha256Commitment.Msgsize() + 7 + (*z).Block.BlockHeader.TxnCommitments.Sha512Commitment.Msgsize() + 3 + msgp.Int64Size + 4 + msgp.StringPrefixSize + len((*z).Block.BlockHeader.GenesisID) + 3 + (*z).Block.BlockHeader.GenesisHash.Msgsize() + 4 + (*z).Block.BlockHeader.Proposer.Msgsize() + 3 + (*z).Block.BlockHeader.FeesCollected.Msgsize() + 3 + (*z).Block.BlockHeader.Bonus.Msgsize() + 3 + (*z).Block.BlockHeader.ProposerPayout.Msgsize() + 5 + (*z).Block.BlockHeader.RewardsState.FeeSink.Msgsize() + 4 + (*z).Block.BlockHeader.RewardsState.RewardsPool.Msgsize() + 5 + msgp.Uint64Size + 5 + msgp.Uint64Size + 5 + msgp.Uint64Size + 7 + (*z).Block.BlockHeader.RewardsState.RewardsRecalculationRound.Msgsize() + 6 + (*z).Block.BlockHeader.UpgradeState.CurrentProtocol.Msgsize() + 10 + (*z).Block.BlockHeader.UpgradeState.NextProtocol.Msgsize() + 8 + (*z).Block.BlockHeader.UpgradeState.NextProtocolApprovals.Msgsize() + 11 + (*z).Block.BlockHeader.UpgradeState.NextProtocolVoteBefore.Msgsize() + 11 + (*z).Block.BlockHeader.UpgradeState.NextProtocolSwitchOn.Msgsize() + 12 + (*z).Block.BlockHeader.UpgradeVote.UpgradePropose.Msgsize() + 13 + (*z).Block.BlockHeader.UpgradeVote.UpgradeDelay.Msgsize() + 11 + msgp.BoolSize + 3 + msgp.Uint64Size + 4 + msgp.MapHeaderSize if (*z).Block.BlockHeader.StateProofTracking != nil { for zb0001, zb0002 := range (*z).Block.BlockHeader.StateProofTracking { _ = zb0001 @@ -11937,12 +12075,12 @@ func (z *unauthenticatedProposal) Msgsize() (s int) { // MsgIsZero returns whether this is a zero value func (z *unauthenticatedProposal) MsgIsZero() bool { - return ((*z).Block.BlockHeader.Round.MsgIsZero()) && ((*z).Block.BlockHeader.Branch.MsgIsZero()) && ((*z).Block.BlockHeader.Seed.MsgIsZero()) && ((*z).Block.BlockHeader.TxnCommitments.NativeSha512_256Commitment.MsgIsZero()) && ((*z).Block.BlockHeader.TxnCommitments.Sha256Commitment.MsgIsZero()) && ((*z).Block.BlockHeader.TimeStamp == 0) && ((*z).Block.BlockHeader.GenesisID == "") && ((*z).Block.BlockHeader.GenesisHash.MsgIsZero()) && ((*z).Block.BlockHeader.Proposer.MsgIsZero()) && ((*z).Block.BlockHeader.FeesCollected.MsgIsZero()) && ((*z).Block.BlockHeader.Bonus.MsgIsZero()) && ((*z).Block.BlockHeader.ProposerPayout.MsgIsZero()) && ((*z).Block.BlockHeader.RewardsState.FeeSink.MsgIsZero()) && ((*z).Block.BlockHeader.RewardsState.RewardsPool.MsgIsZero()) && ((*z).Block.BlockHeader.RewardsState.RewardsLevel == 0) && ((*z).Block.BlockHeader.RewardsState.RewardsRate == 0) && ((*z).Block.BlockHeader.RewardsState.RewardsResidue == 0) && ((*z).Block.BlockHeader.RewardsState.RewardsRecalculationRound.MsgIsZero()) && ((*z).Block.BlockHeader.UpgradeState.CurrentProtocol.MsgIsZero()) && ((*z).Block.BlockHeader.UpgradeState.NextProtocol.MsgIsZero()) && ((*z).Block.BlockHeader.UpgradeState.NextProtocolApprovals == 0) && ((*z).Block.BlockHeader.UpgradeState.NextProtocolVoteBefore.MsgIsZero()) && ((*z).Block.BlockHeader.UpgradeState.NextProtocolSwitchOn.MsgIsZero()) && ((*z).Block.BlockHeader.UpgradeVote.UpgradePropose.MsgIsZero()) && ((*z).Block.BlockHeader.UpgradeVote.UpgradeDelay.MsgIsZero()) && ((*z).Block.BlockHeader.UpgradeVote.UpgradeApprove == false) && ((*z).Block.BlockHeader.TxnCounter == 0) && (len((*z).Block.BlockHeader.StateProofTracking) == 0) && (len((*z).Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts) == 0) && (len((*z).Block.BlockHeader.ParticipationUpdates.AbsentParticipationAccounts) == 0) && ((*z).Block.Payset.MsgIsZero()) && ((*z).SeedProof.MsgIsZero()) && ((*z).OriginalPeriod == 0) && ((*z).OriginalProposer.MsgIsZero()) + return ((*z).Block.BlockHeader.Round.MsgIsZero()) && ((*z).Block.BlockHeader.Branch.MsgIsZero()) && ((*z).Block.BlockHeader.Branch512.MsgIsZero()) && ((*z).Block.BlockHeader.Seed.MsgIsZero()) && ((*z).Block.BlockHeader.TxnCommitments.NativeSha512_256Commitment.MsgIsZero()) && ((*z).Block.BlockHeader.TxnCommitments.Sha256Commitment.MsgIsZero()) && ((*z).Block.BlockHeader.TxnCommitments.Sha512Commitment.MsgIsZero()) && ((*z).Block.BlockHeader.TimeStamp == 0) && ((*z).Block.BlockHeader.GenesisID == "") && ((*z).Block.BlockHeader.GenesisHash.MsgIsZero()) && ((*z).Block.BlockHeader.Proposer.MsgIsZero()) && ((*z).Block.BlockHeader.FeesCollected.MsgIsZero()) && ((*z).Block.BlockHeader.Bonus.MsgIsZero()) && ((*z).Block.BlockHeader.ProposerPayout.MsgIsZero()) && ((*z).Block.BlockHeader.RewardsState.FeeSink.MsgIsZero()) && ((*z).Block.BlockHeader.RewardsState.RewardsPool.MsgIsZero()) && ((*z).Block.BlockHeader.RewardsState.RewardsLevel == 0) && ((*z).Block.BlockHeader.RewardsState.RewardsRate == 0) && ((*z).Block.BlockHeader.RewardsState.RewardsResidue == 0) && ((*z).Block.BlockHeader.RewardsState.RewardsRecalculationRound.MsgIsZero()) && ((*z).Block.BlockHeader.UpgradeState.CurrentProtocol.MsgIsZero()) && ((*z).Block.BlockHeader.UpgradeState.NextProtocol.MsgIsZero()) && ((*z).Block.BlockHeader.UpgradeState.NextProtocolApprovals.MsgIsZero()) && ((*z).Block.BlockHeader.UpgradeState.NextProtocolVoteBefore.MsgIsZero()) && ((*z).Block.BlockHeader.UpgradeState.NextProtocolSwitchOn.MsgIsZero()) && ((*z).Block.BlockHeader.UpgradeVote.UpgradePropose.MsgIsZero()) && ((*z).Block.BlockHeader.UpgradeVote.UpgradeDelay.MsgIsZero()) && ((*z).Block.BlockHeader.UpgradeVote.UpgradeApprove == false) && ((*z).Block.BlockHeader.TxnCounter == 0) && (len((*z).Block.BlockHeader.StateProofTracking) == 0) && (len((*z).Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts) == 0) && (len((*z).Block.BlockHeader.ParticipationUpdates.AbsentParticipationAccounts) == 0) && ((*z).Block.Payset.MsgIsZero()) && ((*z).SeedProof.MsgIsZero()) && ((*z).OriginalPeriod == 0) && ((*z).OriginalProposer.MsgIsZero()) } // MaxSize returns a maximum valid message size for this message type func UnauthenticatedProposalMaxSize() (s int) { - s = 3 + 4 + basics.RoundMaxSize() + 5 + bookkeeping.BlockHashMaxSize() + 5 + committee.SeedMaxSize() + 4 + crypto.DigestMaxSize() + 7 + crypto.DigestMaxSize() + 3 + msgp.Int64Size + 4 + msgp.StringPrefixSize + config.MaxGenesisIDLen + 3 + crypto.DigestMaxSize() + 4 + basics.AddressMaxSize() + 3 + basics.MicroAlgosMaxSize() + 3 + basics.MicroAlgosMaxSize() + 3 + basics.MicroAlgosMaxSize() + 5 + basics.AddressMaxSize() + 4 + basics.AddressMaxSize() + 5 + msgp.Uint64Size + 5 + msgp.Uint64Size + 5 + msgp.Uint64Size + 7 + basics.RoundMaxSize() + 6 + protocol.ConsensusVersionMaxSize() + 10 + protocol.ConsensusVersionMaxSize() + 8 + msgp.Uint64Size + 11 + basics.RoundMaxSize() + 11 + basics.RoundMaxSize() + 12 + protocol.ConsensusVersionMaxSize() + 13 + basics.RoundMaxSize() + 11 + msgp.BoolSize + 3 + msgp.Uint64Size + 4 + s = 3 + 4 + basics.RoundMaxSize() + 5 + bookkeeping.BlockHashMaxSize() + 8 + crypto.Sha512DigestMaxSize() + 5 + committee.SeedMaxSize() + 4 + crypto.DigestMaxSize() + 7 + crypto.DigestMaxSize() + 7 + crypto.Sha512DigestMaxSize() + 3 + msgp.Int64Size + 4 + msgp.StringPrefixSize + bounds.MaxGenesisIDLen + 3 + crypto.DigestMaxSize() + 4 + basics.AddressMaxSize() + 3 + basics.MicroAlgosMaxSize() + 3 + basics.MicroAlgosMaxSize() + 3 + basics.MicroAlgosMaxSize() + 5 + basics.AddressMaxSize() + 4 + basics.AddressMaxSize() + 5 + msgp.Uint64Size + 5 + msgp.Uint64Size + 5 + msgp.Uint64Size + 7 + basics.RoundMaxSize() + 6 + protocol.ConsensusVersionMaxSize() + 10 + protocol.ConsensusVersionMaxSize() + 8 + basics.RoundMaxSize() + 11 + basics.RoundMaxSize() + 11 + basics.RoundMaxSize() + 12 + protocol.ConsensusVersionMaxSize() + 13 + basics.RoundMaxSize() + 11 + msgp.BoolSize + 3 + msgp.Uint64Size + 4 s += msgp.MapHeaderSize // Adding size of map keys for z.Block.BlockHeader.StateProofTracking s += protocol.NumStateProofTypes * (protocol.StateProofTypeMaxSize()) @@ -11950,13 +12088,13 @@ func UnauthenticatedProposalMaxSize() (s int) { s += protocol.NumStateProofTypes * (bookkeeping.StateProofTrackingDataMaxSize()) s += 11 // Calculating size of slice: z.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts - s += msgp.ArrayHeaderSize + ((config.MaxProposedExpiredOnlineAccounts) * (basics.AddressMaxSize())) + s += msgp.ArrayHeaderSize + ((bounds.MaxProposedExpiredOnlineAccounts) * (basics.AddressMaxSize())) s += 11 // Calculating size of slice: z.Block.BlockHeader.ParticipationUpdates.AbsentParticipationAccounts - s += msgp.ArrayHeaderSize + ((config.MaxMarkAbsent) * (basics.AddressMaxSize())) + s += msgp.ArrayHeaderSize + ((bounds.MaxMarkAbsent) * (basics.AddressMaxSize())) s += 5 // Using maxtotalbytes for: z.Block.Payset - s += config.MaxTxnBytesPerBlock + s += bounds.MaxTxnBytesPerBlock s += 5 + crypto.VrfProofMaxSize() + 5 + msgp.Uint64Size + 6 + basics.AddressMaxSize() return } diff --git a/agreement/persistence.go b/agreement/persistence.go index ba2b05a006..4bcda1c5bd 100644 --- a/agreement/persistence.go +++ b/agreement/persistence.go @@ -107,8 +107,8 @@ func persist(log serviceLogger, crash db.Accessor, Round basics.Round, Period pe }() err = crash.Atomic(func(ctx context.Context, tx *sql.Tx) error { - _, err := tx.Exec("insert or replace into Service (rowid, data) values (1, ?)", raw) - return err + _, err1 := tx.Exec("insert or replace into Service (rowid, data) values (1, ?)", raw) + return err1 }) if err == nil { return diff --git a/agreement/persistence_test.go b/agreement/persistence_test.go index 59f8ed5c1f..1a7381aa6a 100644 --- a/agreement/persistence_test.go +++ b/agreement/persistence_test.go @@ -278,7 +278,7 @@ func TestEmptyMapDeserialization(t *testing.T) { require.NotNil(t, v1.Equivocators) } -func TestDecodeFailures(t *testing.T) { +func TestDecodeErrs(t *testing.T) { partitiontest.PartitionTest(t) clock := timers.MakeMonotonicClock[TimeoutType](time.Date(2015, 1, 2, 5, 6, 7, 8, time.UTC)) ce := clock.Encode() diff --git a/agreement/pseudonode_test.go b/agreement/pseudonode_test.go index 9b25e739e2..fb94ac62fb 100644 --- a/agreement/pseudonode_test.go +++ b/agreement/pseudonode_test.go @@ -458,12 +458,12 @@ func TestPseudonodeLoadingOfParticipationKeys(t *testing.T) { type substrServiceLogger struct { logging.Logger - looupStrings []string + lookupStrings []string instancesFound []int } func (ssl *substrServiceLogger) Infof(s string, args ...interface{}) { - for i, str := range ssl.looupStrings { + for i, str := range ssl.lookupStrings { if strings.Contains(s, str) { ssl.instancesFound[i]++ return @@ -471,9 +471,9 @@ func (ssl *substrServiceLogger) Infof(s string, args ...interface{}) { } } -// TestPseudonodeFailedEnqueuedTasks test to see that in the case where we cannot enqueue the verification task to the backlog, we won't be waiting forever - instead, +// TestPseudonodeNonEnqueuedTasks test to see that in the case where we cannot enqueue the verification task to the backlog, we won't be waiting forever - instead, // we would generate a warning message and keep going. -func TestPseudonodeFailedEnqueuedTasks(t *testing.T) { +func TestPseudonodeNonEnqueuedTasks(t *testing.T) { partitiontest.PartitionTest(t) t.Parallel() @@ -485,7 +485,7 @@ func TestPseudonodeFailedEnqueuedTasks(t *testing.T) { subStrLogger := &substrServiceLogger{ Logger: logging.TestingLog(t), - looupStrings: []string{"pseudonode.makeVotes: failed to enqueue vote verification for", "pseudonode.makeProposals: failed to enqueue vote verification"}, + lookupStrings: []string{"pseudonode.makeVotes: failed to enqueue vote verification for", "pseudonode.makeProposals: failed to enqueue vote verification"}, instancesFound: []int{0, 0}, } sLogger := serviceLogger{ diff --git a/agreement/service.go b/agreement/service.go index 5247ce34cc..a3339bbc91 100644 --- a/agreement/service.go +++ b/agreement/service.go @@ -20,6 +20,7 @@ package agreement import ( "context" "io" + "sync" "time" "github.com/algorand/go-algorand/config" @@ -40,7 +41,7 @@ type Service struct { // for exiting quit chan struct{} - done chan struct{} + wg sync.WaitGroup quitFn context.CancelFunc // TODO instead of storing this, pass a context into Start() // external events @@ -139,7 +140,6 @@ func (s *Service) Start() { s.quitFn = quitFn s.quit = make(chan struct{}) - s.done = make(chan struct{}) s.voteVerifier = MakeAsyncVoteVerifier(s.BacklogPool) s.demux = makeDemux(demuxParams{ @@ -165,6 +165,7 @@ func (s *Service) Start() { input := make(chan externalEvent) output := make(chan []action) ready := make(chan externalDemuxSignals) + s.wg.Add(2) go s.demuxLoop(ctx, input, output, ready) go s.mainLoop(input, output, ready) } @@ -178,7 +179,7 @@ func (s *Service) Shutdown() { close(s.quit) s.quitFn() - <-s.done + s.wg.Wait() s.persistenceLoop.Quit() } @@ -189,6 +190,7 @@ func (s *Service) DumpDemuxQueues(w io.Writer) { // demuxLoop repeatedly executes pending actions and then requests the next event from the Service.demux. func (s *Service) demuxLoop(ctx context.Context, input chan<- externalEvent, output <-chan []action, ready <-chan externalDemuxSignals) { + defer s.wg.Done() for a := range output { s.do(ctx, a) extSignals := <-ready @@ -202,7 +204,6 @@ func (s *Service) demuxLoop(ctx context.Context, input chan<- externalEvent, out s.demux.quit() s.loopback.Quit() s.voteVerifier.Quit() - close(s.done) } // mainLoop drives the state machine. @@ -213,7 +214,8 @@ func (s *Service) demuxLoop(ctx context.Context, input chan<- externalEvent, out // 3. Drive the state machine with this input to obtain a slice of pending actions. // 4. If necessary, persist state to disk. func (s *Service) mainLoop(input <-chan externalEvent, output chan<- []action, ready chan<- externalDemuxSignals) { - // setup + defer s.wg.Done() + var clock timers.Clock[TimeoutType] var router rootRouter var status player diff --git a/agreement/service_test.go b/agreement/service_test.go index 3b174c266e..b46aefb659 100644 --- a/agreement/service_test.go +++ b/agreement/service_test.go @@ -2543,6 +2543,7 @@ func TestAgreementServiceStartDeadline(t *testing.T) { close(inputCh) output := make(chan []action, 10) ready := make(chan externalDemuxSignals, 1) + s.wg.Add(1) s.mainLoop(inputCh, output, ready) // check the ready channel: diff --git a/agreement/state_machine_test.go b/agreement/state_machine_test.go index 73bd279672..189ae432c7 100644 --- a/agreement/state_machine_test.go +++ b/agreement/state_machine_test.go @@ -231,7 +231,7 @@ func (w ioPropWrapper) containsTrace(trace ioTrace) (contains bool, info string, for _, e := range trace.events { valid := checker.addEvent(e) if valid != nil { - return false, valid.Error(), nil + return false, valid.Error(), nil //nolint:nilerr // intentional } } return true, "", nil diff --git a/agreement/vote.go b/agreement/vote.go index 599023ada0..5727f5f91b 100644 --- a/agreement/vote.go +++ b/agreement/vote.go @@ -132,7 +132,7 @@ func (uv unauthenticatedVote) verify(l LedgerReader) (vote, error) { return vote{}, fmt.Errorf("unauthenticatedVote.verify: vote by %v in round %d after VoteLastValid %d: %+v", rv.Sender, rv.Round, m.Record.VoteLastValid, uv) } - ephID := basics.OneTimeIDForRound(rv.Round, m.Record.KeyDilution(proto)) + ephID := basics.OneTimeIDForRound(rv.Round, proto.EffectiveKeyDilution(m.Record.OnlineAccountData.VoteKeyDilution)) voteID := m.Record.VoteID if !voteID.Verify(ephID, rv, uv.Sig) { return vote{}, fmt.Errorf("unauthenticatedVote.verify: could not verify FS signature on vote by %v given %v: %+v", rv.Sender, voteID, uv) diff --git a/buildnumber.dat b/buildnumber.dat index 0cfbf08886..d00491fd7e 100644 --- a/buildnumber.dat +++ b/buildnumber.dat @@ -1 +1 @@ -2 +1 diff --git a/catchup/catchpointService.go b/catchup/catchpointService.go index 422c30b28d..b1a3def2d0 100644 --- a/catchup/catchpointService.go +++ b/catchup/catchpointService.go @@ -552,7 +552,7 @@ func (cs *CatchpointCatchupService) processStageBlocksDownload() (err error) { var blk *bookkeeping.Block var cert *agreement.Certificate for retryCount := uint64(1); blocksFetched <= lookback; { - if err := cs.ctx.Err(); err != nil { + if err1 := cs.ctx.Err(); err1 != nil { return cs.stopOrAbort() } diff --git a/catchup/catchpointService_test.go b/catchup/catchpointService_test.go index 7e070c5cb7..045223f44d 100644 --- a/catchup/catchpointService_test.go +++ b/catchup/catchpointService_test.go @@ -144,6 +144,7 @@ func TestProcessStageBlocksDownloadNilCert(t *testing.T) { blk2 := blk1 blk2.BlockHeader.Round = 2 blk2.BlockHeader.Branch = blk1.Hash() + blk2.BlockHeader.Branch512 = blk1.Hash512() blk2.TxnCommitments, err = blk2.PaysetCommit() require.NoError(t, err) diff --git a/catchup/fetcher_test.go b/catchup/fetcher_test.go index 8c4fd2bb4a..75dd33838d 100644 --- a/catchup/fetcher_test.go +++ b/catchup/fetcher_test.go @@ -109,15 +109,15 @@ func buildTestLedger(t *testing.T, blk bookkeeping.Block) (ledger *data.Ledger, return } -func addBlocks(t *testing.T, ledger *data.Ledger, blk bookkeeping.Block, numBlocks int) { - var err error - for i := 0; i < numBlocks; i++ { +func addBlocks(t *testing.T, ledger *data.Ledger, blk bookkeeping.Block, numBlocks basics.Round) { + for range numBlocks { + var err error blk.BlockHeader.Round++ blk.BlockHeader.TimeStamp += int64(crypto.RandUint64() % 100 * 1000) blk.TxnCommitments, err = blk.PaysetCommit() require.NoError(t, err) - err := ledger.AddBlock(blk, agreement.Certificate{Round: blk.BlockHeader.Round}) + err = ledger.AddBlock(blk, agreement.Certificate{Round: blk.BlockHeader.Round}) require.NoError(t, err) hdr, err := ledger.BlockHdr(blk.BlockHeader.Round) diff --git a/catchup/peerSelector_test.go b/catchup/peerSelector_test.go index fffcfc3960..e2a6418843 100644 --- a/catchup/peerSelector_test.go +++ b/catchup/peerSelector_test.go @@ -325,7 +325,7 @@ func peerSelectorTestRandVal(t *testing.T, seed int) float64 { randVal = randVal + 1 return randVal } -func TestPeerSelector_PeersDownloadFailed(t *testing.T) { +func TestPeerSelector_PeersDownloadError(t *testing.T) { partitiontest.PartitionTest(t) t.Parallel() diff --git a/catchup/service.go b/catchup/service.go index 60b2c5ffb2..0575fda879 100644 --- a/catchup/service.go +++ b/catchup/service.go @@ -192,11 +192,11 @@ func (s *Service) triggerSync() { // SetDisableSyncRound attempts to set the first round we _do_not_ want to fetch from the network // Blocks from disableSyncRound or any round after disableSyncRound will not be fetched while this is set -func (s *Service) SetDisableSyncRound(rnd uint64) error { - if basics.Round(rnd) < s.ledger.LastRound() { +func (s *Service) SetDisableSyncRound(rnd basics.Round) error { + if rnd < s.ledger.LastRound() { return ErrSyncRoundInvalid } - s.disableSyncRound.Store(rnd) + s.disableSyncRound.Store(uint64(rnd)) s.triggerSync() return nil } @@ -208,8 +208,8 @@ func (s *Service) UnsetDisableSyncRound() { } // GetDisableSyncRound returns the disabled sync round -func (s *Service) GetDisableSyncRound() uint64 { - return s.disableSyncRound.Load() +func (s *Service) GetDisableSyncRound() basics.Round { + return basics.Round(s.disableSyncRound.Load()) } // SynchronizingTime returns the time we've been performing a catchup operation (0 if not currently catching up) diff --git a/catchup/service_test.go b/catchup/service_test.go index de0bfaf8cc..e375f354f8 100644 --- a/catchup/service_test.go +++ b/catchup/service_test.go @@ -261,7 +261,7 @@ func TestSyncRound(t *testing.T) { } // Assert that the last block is the one we expect--i.e. disableSyncRound - 1 rnd := s.GetDisableSyncRound() - rr, lr := basics.Round(rnd-1), local.LastRound() + rr, lr := rnd-1, local.LastRound() require.Equal(t, rr, lr) for r := basics.Round(1); r < rr; r++ { @@ -361,7 +361,7 @@ func TestServiceFetchBlocksOneBlock(t *testing.T) { partitiontest.PartitionTest(t) // Make Ledger - numBlocks := 10 + const numBlocks = 10 local := new(mockedLedger) local.blocks = append(local.blocks, bookkeeping.Block{}) lastRoundAtStart := local.LastRound() @@ -397,7 +397,7 @@ func TestServiceFetchBlocksOneBlock(t *testing.T) { s.sync() // Asserts that the last block is the one we expect - require.Equal(t, lastRoundAtStart+basics.Round(numBlocks), local.LastRound()) + require.Equal(t, lastRoundAtStart+numBlocks, local.LastRound()) // Get the same block we wrote block, _, _, err := makeUniversalBlockFetcher(logging.Base(), @@ -418,7 +418,7 @@ func TestServiceFetchBlocksOneBlock(t *testing.T) { func TestAbruptWrites(t *testing.T) { partitiontest.PartitionTest(t) - numberOfBlocks := 100 + numberOfBlocks := basics.Round(100) if testing.Short() { numberOfBlocks = 10 @@ -456,7 +456,7 @@ func TestAbruptWrites(t *testing.T) { wg.Add(1) go func() { defer wg.Done() - for i := basics.Round(lastRound + 1); i <= basics.Round(numberOfBlocks); i++ { + for i := lastRound + 1; i <= numberOfBlocks; i++ { time.Sleep(time.Duration(rand.Uint32()%5) * time.Millisecond) blk, err := remote.Block(i) require.NoError(t, err) @@ -493,7 +493,7 @@ func TestServiceFetchBlocksMultiBlocks(t *testing.T) { t.Fatal(err) return } - addBlocks(t, remote, blk, int(numberOfBlocks)-1) + addBlocks(t, remote, blk, numberOfBlocks-1) // Create a network and block service blockServiceConfig := config.GetDefaultLocal() @@ -536,7 +536,7 @@ func TestServiceFetchBlocksMalformed(t *testing.T) { partitiontest.PartitionTest(t) // Make Ledger - numBlocks := 10 + const numBlocks = 10 local := new(mockedLedger) local.blocks = append(local.blocks, bookkeeping.Block{}) @@ -913,7 +913,7 @@ func TestCatchupUnmatchedCertificate(t *testing.T) { partitiontest.PartitionTest(t) // Make Ledger - numBlocks := 10 + const numBlocks = 10 local := new(mockedLedger) local.blocks = append(local.blocks, bookkeeping.Block{}) lastRoundAtStart := local.LastRound() @@ -1055,7 +1055,7 @@ func TestServiceLedgerUnavailable(t *testing.T) { t.Fatal(err) return } - numBlocks := 10 + const numBlocks = 10 addBlocks(t, remote, blk, numBlocks) // Create a network and block service @@ -1101,7 +1101,7 @@ func TestServiceNoBlockForRound(t *testing.T) { t.Fatal(err) return } - numBlocks := 10 + const numBlocks = 10 addBlocks(t, remote, blk, numBlocks) // Create a network and block service diff --git a/catchup/universalFetcher.go b/catchup/universalFetcher.go index 4d03df6e5a..d7089a4256 100644 --- a/catchup/universalFetcher.go +++ b/catchup/universalFetcher.go @@ -253,14 +253,14 @@ func (hf *HTTPFetcher) getBlockBytes(ctx context.Context, r basics.Round) (data } return nil, noBlockErr default: - bodyBytes, err := rpcs.ResponseBytes(response, hf.log, fetcherMaxBlockBytes) + bodyBytes, err1 := rpcs.ResponseBytes(response, hf.log, fetcherMaxBlockBytes) hf.log.Warnf("HTTPFetcher.getBlockBytes: response status code %d from '%s'. Response body '%s' ", response.StatusCode, blockURL, string(bodyBytes)) - if err == nil { - err = makeErrHTTPResponse(response.StatusCode, blockURL, fmt.Sprintf("Response body '%s'", string(bodyBytes))) + if err1 == nil { + err1 = makeErrHTTPResponse(response.StatusCode, blockURL, fmt.Sprintf("Response body '%s'", string(bodyBytes))) } else { - err = makeErrHTTPResponse(response.StatusCode, blockURL, err.Error()) + err1 = makeErrHTTPResponse(response.StatusCode, blockURL, err1.Error()) } - return nil, err + return nil, err1 } // at this point, we've already receieved the response headers. ensure that the diff --git a/cmd/algod/main.go b/cmd/algod/main.go index f50dc00f77..f9f60a4c09 100644 --- a/cmd/algod/main.go +++ b/cmd/algod/main.go @@ -74,7 +74,7 @@ func main() { func run() int { dataDir := resolveDataDir() absolutePath, absPathErr := filepath.Abs(dataDir) - config.UpdateVersionDataDir(absolutePath) + config.DataDirectory = absolutePath if *seed != "" { seedVal, err := strconv.ParseInt(*seed, 10, 64) @@ -137,7 +137,7 @@ func run() int { } // If data directory doesn't exist, we can't run. Don't bother trying. - if _, err := os.Stat(absolutePath); err != nil { + if _, err1 := os.Stat(absolutePath); err1 != nil { fmt.Fprintf(os.Stderr, "Data directory %s does not appear to be valid\n", dataDir) return 1 } @@ -175,7 +175,7 @@ func run() int { checkAndDeleteIndexerFile("indexer.sqlite-shm") checkAndDeleteIndexerFile("indexer.sqlite-wal") - cfg, err := config.LoadConfigFromDisk(absolutePath) + cfg, migrationResults, err := config.LoadConfigFromDiskWithMigrations(absolutePath) if err != nil && !os.IsNotExist(err) { // log is not setup yet, this will log to stderr log.Fatalf("Cannot load config: %v", err) @@ -215,12 +215,19 @@ func run() int { isTest := os.Getenv("ALGOTEST") != "" remoteTelemetryEnabled := false if !isTest { - telemetryConfig, err := logging.EnsureTelemetryConfig(&dataDir, genesis.ID()) - if err != nil { - fmt.Fprintln(os.Stdout, "error loading telemetry config", err) + root, err1 := config.GetGlobalConfigFileRoot() + var cfgDir *string + if err1 == nil { + cfgDir = &root } - if os.IsPermission(err) { - fmt.Fprintf(os.Stderr, "Permission error on accessing telemetry config: %v", err) + telemetryConfig, err1 := logging.EnsureTelemetryConfig(&dataDir, cfgDir) + config.AnnotateTelemetry(&telemetryConfig, genesis.ID()) + if err1 != nil { + if os.IsPermission(err1) { + fmt.Fprintf(os.Stderr, "permission error on accessing telemetry config: %v", err1) + } else { + fmt.Fprintf(os.Stderr, "error loading telemetry config: %v", err1) + } return 1 } fmt.Fprintf(os.Stdout, "Telemetry configured from '%s'\n", telemetryConfig.FilePath) @@ -240,23 +247,23 @@ func run() int { } // Try to enable remote telemetry now when URI is defined. Skip for DNS based telemetry. ctx, telemetryCancelFn := context.WithTimeout(context.Background(), defaultStaticTelemetryStartupTimeout) - err = log.EnableTelemetryContext(ctx, telemetryConfig) + err1 = log.EnableTelemetryContext(ctx, telemetryConfig) telemetryCancelFn() - if err != nil { - fmt.Fprintln(os.Stdout, "error creating telemetry hook", err) + if err1 != nil { + fmt.Fprintln(os.Stdout, "error creating telemetry hook", err1) // Remote telemetry init loop go func() { for { time.Sleep(defaultStaticTelemetryBGDialRetry) // Try to enable remote telemetry now when URI is defined. Skip for DNS based telemetry. - err := log.EnableTelemetryContext(context.Background(), telemetryConfig) + err1 := log.EnableTelemetryContext(context.Background(), telemetryConfig) // Error occurs only if URI is defined and we need to retry later - if err == nil { + if err1 == nil { // Remote telemetry enabled or empty static URI, stop retrying return } - fmt.Fprintln(os.Stdout, "error creating telemetry hook", err) + fmt.Fprintln(os.Stdout, "error creating telemetry hook", err1) // Try to reenable every minute } }() @@ -343,18 +350,18 @@ func run() int { if peerOverrideArray != nil { phonebookAddresses = peerOverrideArray } else { - ex, err := os.Executable() - if err != nil { - log.Errorf("cannot locate node executable: %s", err) + ex, err1 := os.Executable() + if err1 != nil { + log.Errorf("cannot locate node executable: %s", err1) } else { phonebookDirs := []string{filepath.Dir(ex), dataDir} for _, phonebookDir := range phonebookDirs { - phonebookAddresses, err = config.LoadPhonebook(phonebookDir) - if err == nil { + phonebookAddresses, err1 = config.LoadPhonebook(phonebookDir) + if err1 == nil { log.Debugf("Static phonebook loaded from %s", phonebookDir) break } else { - log.Debugf("Cannot load static phonebook from %s dir: %v", phonebookDir, err) + log.Debugf("Cannot load static phonebook from %s dir: %v", phonebookDir, err1) } } } @@ -364,7 +371,7 @@ func run() int { cfg.LogSizeLimit = 0 } - err = s.Initialize(cfg, phonebookAddresses, string(genesisText)) + err = s.Initialize(cfg, phonebookAddresses, string(genesisText), migrationResults) if err != nil { fmt.Fprintln(os.Stderr, err) log.Error(err) diff --git a/cmd/algofix/main.go b/cmd/algofix/main.go index 09df524928..d3bb997c3d 100644 --- a/cmd/algofix/main.go +++ b/cmd/algofix/main.go @@ -162,18 +162,18 @@ func processFile(filename string, useStdin bool) error { // AST changed. // Print and parse, to update any missing scoping // or position information for subsequent fixers. - newSrc, err := gofmtFile(newFile) - if err != nil { - return err + newSrc, err1 := gofmtFile(newFile) + if err1 != nil { + return err1 } - newFile, err = parser.ParseFile(fset, filename, newSrc, parserMode) - if err != nil { + newFile, err1 = parser.ParseFile(fset, filename, newSrc, parserMode) + if err1 != nil { if debug { fmt.Printf("%s", newSrc) - report(err) + report(err1) os.Exit(exitCode) } - return err + return err1 } } } diff --git a/cmd/algofix/typecheck.go b/cmd/algofix/typecheck.go index 2b55355a26..e17cbf5963 100644 --- a/cmd/algofix/typecheck.go +++ b/cmd/algofix/typecheck.go @@ -9,6 +9,7 @@ import ( "go/ast" "go/parser" "go/token" + "maps" "os" "os/exec" "path/filepath" @@ -258,10 +259,7 @@ func typecheck(cfg *TypeConfig, f *ast.File) (typeof map[interface{}]string, ass if !copied { copied = true // Copy map lazily: it's time. - cfg1.Type = make(map[string]*Type) - for k, v := range cfg.Type { - cfg1.Type[k] = v - } + cfg1.Type = maps.Clone(cfg.Type) } t := &Type{Field: map[string]string{}} cfg1.Type[s.Name.Name] = t diff --git a/cmd/algoh/blockWatcher.go b/cmd/algoh/blockWatcher.go index 13b1716d4d..94fc9b088d 100644 --- a/cmd/algoh/blockWatcher.go +++ b/cmd/algoh/blockWatcher.go @@ -20,6 +20,7 @@ import ( "sync" "time" + "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/protocol" "github.com/algorand/go-algorand/rpcs" @@ -28,7 +29,7 @@ import ( var log = logging.Base() type blockListener interface { - init(uint64) + init(basics.Round) onBlock(rpcs.EncodedBlockCert) } @@ -71,7 +72,7 @@ func runBlockWatcher(watchers []blockListener, client Client, abort <-chan struc } } -func (bw *blockWatcher) run(watchers []blockListener, stallDetect time.Duration, curBlock uint64) bool { +func (bw *blockWatcher) run(watchers []blockListener, stallDetect time.Duration, curBlock basics.Round) bool { lastBlock := time.Now() for { // Inner loop needed during catchup. @@ -112,7 +113,7 @@ func (bw *blockWatcher) run(watchers []blockListener, stallDetect time.Duration, } // This keeps retrying forever, or until an abort signal is received. -func (bw *blockWatcher) getLastRound() (uint64, bool) { +func (bw *blockWatcher) getLastRound() (basics.Round, bool) { for { status, err := bw.client.Status() if err != nil { @@ -125,7 +126,7 @@ func (bw *blockWatcher) getLastRound() (uint64, bool) { } } -func (bw *blockWatcher) blockUntilReady() (curBlock uint64, ok bool) { +func (bw *blockWatcher) blockUntilReady() (curBlock basics.Round, ok bool) { curBlock, ok = bw.blockIfStalled() if !ok { return @@ -135,7 +136,7 @@ func (bw *blockWatcher) blockUntilReady() (curBlock uint64, ok bool) { } // blockIfStalled keeps checking status until the LastRound updates. -func (bw *blockWatcher) blockIfStalled() (uint64, bool) { +func (bw *blockWatcher) blockIfStalled() (basics.Round, bool) { curBlock, ok := bw.getLastRound() if !ok { return 0, false @@ -160,7 +161,7 @@ func (bw *blockWatcher) blockIfStalled() (uint64, bool) { } // blockIfCatchup blocks until the lastBlock stops quickly changing. An initial block is passed -func (bw *blockWatcher) blockIfCatchup(start uint64) (uint64, bool) { +func (bw *blockWatcher) blockIfCatchup(start basics.Round) (basics.Round, bool) { last := start for { diff --git a/cmd/algoh/blockWatcher_test.go b/cmd/algoh/blockWatcher_test.go index bf2193035c..33e0235ddd 100644 --- a/cmd/algoh/blockWatcher_test.go +++ b/cmd/algoh/blockWatcher_test.go @@ -22,6 +22,7 @@ import ( "testing" "time" + "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/rpcs" "github.com/algorand/go-algorand/test/partitiontest" "github.com/stretchr/testify/require" @@ -112,7 +113,7 @@ type testlistener struct { blockCount uint32 } -func (l *testlistener) init(block uint64) { +func (l *testlistener) init(block basics.Round) { atomic.AddUint32(&(l.initCount), 1) } diff --git a/cmd/algoh/blockstats.go b/cmd/algoh/blockstats.go index 2bc50d0620..fecb5295c7 100644 --- a/cmd/algoh/blockstats.go +++ b/cmd/algoh/blockstats.go @@ -19,6 +19,7 @@ package main import ( "time" + "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/logging/telemetryspec" "github.com/algorand/go-algorand/rpcs" ) @@ -31,7 +32,7 @@ type blockstats struct { lastBlockTime time.Time } -func (stats *blockstats) init(block uint64) { +func (stats *blockstats) init(block basics.Round) { } func (stats *blockstats) onBlock(block rpcs.EncodedBlockCert) { diff --git a/cmd/algoh/client.go b/cmd/algoh/client.go index 463445daf8..2f610d529f 100644 --- a/cmd/algoh/client.go +++ b/cmd/algoh/client.go @@ -20,12 +20,13 @@ import ( "context" "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model" + "github.com/algorand/go-algorand/data/basics" ) // Client is a minimal interface for the RestClient type Client interface { Status() (model.NodeStatusResponse, error) - RawBlock(round uint64) ([]byte, error) + RawBlock(round basics.Round) ([]byte, error) GetGoRoutines(ctx context.Context) (string, error) HealthCheck() error } diff --git a/cmd/algoh/deadman.go b/cmd/algoh/deadman.go index 4ba1769663..dc1952205a 100644 --- a/cmd/algoh/deadman.go +++ b/cmd/algoh/deadman.go @@ -23,13 +23,14 @@ import ( "time" "github.com/algorand/go-algorand/config" + "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/logging/telemetryspec" "github.com/algorand/go-algorand/rpcs" ) type deadManWatcher struct { timeout time.Duration - newBlockChan chan uint64 + newBlockChan chan basics.Round uploadOnError bool client Client done <-chan struct{} @@ -47,7 +48,7 @@ func makeDeadManWatcher(timeout int64, client Client, uploadOnError bool, done < return deadManWatcher{ timeout: deadManTime, - newBlockChan: make(chan uint64), + newBlockChan: make(chan basics.Round), client: client, uploadOnError: uploadOnError, done: done, @@ -56,11 +57,11 @@ func makeDeadManWatcher(timeout int64, client Client, uploadOnError bool, done < } } -func (w deadManWatcher) init(initBlock uint64) { +func (w deadManWatcher) init(initBlock basics.Round) { go w.run(initBlock) } -func (w deadManWatcher) run(initBlock uint64) { +func (w deadManWatcher) run(initBlock basics.Round) { defer w.wg.Done() latestBlock := initBlock @@ -89,10 +90,10 @@ func (w deadManWatcher) run(initBlock uint64) { } func (w deadManWatcher) onBlock(block rpcs.EncodedBlockCert) { - w.newBlockChan <- uint64(block.Block.BlockHeader.Round) + w.newBlockChan <- block.Block.BlockHeader.Round } -func (w deadManWatcher) reportDeadManTimeout(curBlock uint64) (err error) { +func (w deadManWatcher) reportDeadManTimeout(curBlock basics.Round) (err error) { var details telemetryspec.DeadManTriggeredEventDetails if w.algodConfig.EnableProfiler { goRoutines, err := getGoRoutines(w.client) @@ -101,7 +102,7 @@ func (w deadManWatcher) reportDeadManTimeout(curBlock uint64) (err error) { } details = telemetryspec.DeadManTriggeredEventDetails{ Timeout: int64(w.timeout.Seconds()), - CurrentBlock: curBlock, + CurrentBlock: uint64(curBlock), GoRoutines: goRoutines, } } else { @@ -111,7 +112,7 @@ func (w deadManWatcher) reportDeadManTimeout(curBlock uint64) (err error) { } details = telemetryspec.DeadManTriggeredEventDetails{ Timeout: int64(w.timeout.Seconds()), - CurrentBlock: curBlock, + CurrentBlock: uint64(curBlock), GoRoutines: healthCheck, } } diff --git a/cmd/algoh/main.go b/cmd/algoh/main.go index 9cab7214b3..84570ab11f 100644 --- a/cmd/algoh/main.go +++ b/cmd/algoh/main.go @@ -88,7 +88,7 @@ func main() { dataDir := ensureDataDir() absolutePath, absPathErr := filepath.Abs(dataDir) - config.UpdateVersionDataDir(absolutePath) + config.DataDirectory = absolutePath if *versionCheck { fmt.Println(config.FormatVersionAndLicense()) @@ -116,7 +116,7 @@ func main() { log.Fatalf("Error validating DNSBootstrap input: %v", err) } - if _, err := os.Stat(absolutePath); err != nil { + if _, err1 := os.Stat(absolutePath); err1 != nil { reportErrorf("Data directory %s does not appear to be valid\n", dataDir) } @@ -334,7 +334,13 @@ func initTelemetry(genesis bookkeeping.Genesis, log logging.Logger, dataDirector // If ALGOTEST env variable is set, telemetry is disabled - allows disabling telemetry for tests isTest := os.Getenv("ALGOTEST") != "" if !isTest { - telemetryConfig, err := logging.EnsureTelemetryConfig(&dataDirectory, genesis.ID()) + root, err := config.GetGlobalConfigFileRoot() + var cfgDir *string + if err == nil { + cfgDir = &root + } + telemetryConfig, err := logging.EnsureTelemetryConfig(&dataDirectory, cfgDir) + config.AnnotateTelemetry(&telemetryConfig, genesis.ID()) if err != nil { fmt.Fprintln(os.Stdout, "error loading telemetry config", err) return diff --git a/cmd/algoh/mockClient.go b/cmd/algoh/mockClient.go index a7f8327d3a..a23481bb94 100644 --- a/cmd/algoh/mockClient.go +++ b/cmd/algoh/mockClient.go @@ -31,7 +31,7 @@ import ( // Helpers to initialize mockClient // ////////////////////////////////////// -func makeNodeStatuses(blocks ...uint64) (ret []model.NodeStatusResponse) { +func makeNodeStatuses(blocks ...basics.Round) (ret []model.NodeStatusResponse) { ret = make([]model.NodeStatusResponse, 0, len(blocks)) for _, block := range blocks { ret = append(ret, model.NodeStatusResponse{LastRound: block}) @@ -39,8 +39,8 @@ func makeNodeStatuses(blocks ...uint64) (ret []model.NodeStatusResponse) { return ret } -func makeBlocks(blocks ...uint64) (ret map[uint64]rpcs.EncodedBlockCert) { - ret = map[uint64]rpcs.EncodedBlockCert{} +func makeBlocks(blocks ...basics.Round) (ret map[basics.Round]rpcs.EncodedBlockCert) { + ret = map[basics.Round]rpcs.EncodedBlockCert{} for _, block := range blocks { ret[block] = rpcs.EncodedBlockCert{Block: bookkeeping.Block{BlockHeader: bookkeeping.BlockHeader{Round: basics.Round(block)}}} } @@ -51,18 +51,18 @@ func makeBlocks(blocks ...uint64) (ret map[uint64]rpcs.EncodedBlockCert) { type mockClient struct { StatusCalls int - BlockCalls map[uint64]int + BlockCalls map[basics.Round]int GetGoRoutinesCalls int HealthCheckCalls int error []error status []model.NodeStatusResponse routine []string - block map[uint64]rpcs.EncodedBlockCert + block map[basics.Round]rpcs.EncodedBlockCert } -func makeMockClient(error []error, status []model.NodeStatusResponse, block map[uint64]rpcs.EncodedBlockCert, routine []string) mockClient { +func makeMockClient(error []error, status []model.NodeStatusResponse, block map[basics.Round]rpcs.EncodedBlockCert, routine []string) mockClient { return mockClient{ - BlockCalls: make(map[uint64]int), + BlockCalls: make(map[basics.Round]int), error: error, status: status, block: block, @@ -93,7 +93,7 @@ func (c *mockClient) Status() (s model.NodeStatusResponse, e error) { return } -func (c *mockClient) RawBlock(block uint64) (b []byte, e error) { +func (c *mockClient) RawBlock(block basics.Round) (b []byte, e error) { c.BlockCalls[block]++ e = c.nextError() bl, ok := c.block[block] diff --git a/cmd/algokey/keyreg.go b/cmd/algokey/keyreg.go index df1e23524b..e7604518ce 100644 --- a/cmd/algokey/keyreg.go +++ b/cmd/algokey/keyreg.go @@ -40,8 +40,8 @@ var keyregCmd *cobra.Command type keyregCmdParams struct { fee uint64 - firstValid uint64 - lastValid uint64 + firstValid basics.Round + lastValid basics.Round network string offline bool txFile string @@ -51,8 +51,8 @@ type keyregCmdParams struct { // There is no node to query, so we do our best here. const ( - txnLife uint64 = 1000 - minFee uint64 = 1000 + txnLife = 1000 + minFee = 1000 ) var validNetworks map[string]crypto.Digest @@ -75,11 +75,11 @@ func init() { } keyregCmd.Flags().Uint64Var(¶ms.fee, "fee", minFee, "transaction fee") - keyregCmd.Flags().Uint64Var(¶ms.firstValid, "firstvalid", 0, "first round where the transaction may be committed to the ledger") + keyregCmd.Flags().Uint64Var((*uint64)(¶ms.firstValid), "firstvalid", 0, "first round where the transaction may be committed to the ledger") if err := keyregCmd.MarkFlagRequired("firstvalid"); err != nil { panic(err) } - keyregCmd.Flags().Uint64Var(¶ms.lastValid, "lastvalid", 0, fmt.Sprintf("last round where the generated transaction may be committed to the ledger, defaults to firstvalid + %d", txnLife)) + keyregCmd.Flags().Uint64Var((*uint64)(¶ms.lastValid), "lastvalid", 0, fmt.Sprintf("last round where the generated transaction may be committed to the ledger, defaults to firstvalid + %d", txnLife)) keyregCmd.Flags().StringVar(¶ms.network, "network", "mainnet", "the network where the provided keys will be registered, one of mainnet/testnet/betanet") if err := keyregCmd.MarkFlagRequired("network"); err != nil { panic(err) @@ -194,7 +194,7 @@ func run(params keyregCmdParams) error { part = &partkey.Participation - if params.firstValid < uint64(part.FirstValid) { + if params.firstValid < part.FirstValid { return fmt.Errorf("the transaction's firstvalid round (%d) field should be set greater than or equal to the participation key's first valid round (%d). The network will reject key registration transactions that are set to take effect before the participation key's first valid round", params.firstValid, part.FirstValid) } } @@ -209,8 +209,7 @@ func run(params keyregCmdParams) error { // Generate go-online transaction txn = part.GenerateRegistrationTransaction( basics.MicroAlgos{Raw: params.fee}, - basics.Round(params.firstValid), - basics.Round(params.lastValid), + params.firstValid, params.lastValid, [32]byte{}, part.StateProofSecrets != nil) } else { @@ -220,8 +219,8 @@ func run(params keyregCmdParams) error { Header: transactions.Header{ Sender: accountAddress, Fee: basics.MicroAlgos{Raw: params.fee}, - FirstValid: basics.Round(params.firstValid), - LastValid: basics.Round(params.lastValid), + FirstValid: params.firstValid, + LastValid: params.lastValid, }, } } diff --git a/cmd/algokey/multisig.go b/cmd/algokey/multisig.go index bd615857a8..66322f4ed9 100644 --- a/cmd/algokey/multisig.go +++ b/cmd/algokey/multisig.go @@ -85,15 +85,15 @@ var multisigCmd = &cobra.Command{ } ver, thresh, pks := stxn.Msig.Preimage() - addr, err := crypto.MultisigAddrGen(ver, thresh, pks) - if err != nil { - fmt.Fprintf(os.Stderr, "Cannot generate multisig addr: %v\n", err) + addr, err1 := crypto.MultisigAddrGen(ver, thresh, pks) + if err1 != nil { + fmt.Fprintf(os.Stderr, "Cannot generate multisig addr: %v\n", err1) os.Exit(1) } - stxn.Msig, err = crypto.MultisigSign(stxn.Txn, addr, ver, thresh, pks, *key) - if err != nil { - fmt.Fprintf(os.Stderr, "Cannot add multisig signature: %v\n", err) + stxn.Msig, err1 = crypto.MultisigSign(stxn.Txn, addr, ver, thresh, pks, *key) + if err1 != nil { + fmt.Fprintf(os.Stderr, "Cannot add multisig signature: %v\n", err1) os.Exit(1) } @@ -147,9 +147,9 @@ var appendAuthAddrCmd = &cobra.Command{ // Convert the addresses into public keys pks := make([]crypto.PublicKey, len(params[1:])) for i, addrStr := range params[1:] { - addr, err := basics.UnmarshalChecksumAddress(addrStr) - if err != nil { - fmt.Fprintf(os.Stderr, "Cannot decode address: %v\n", err) + addr, err1 := basics.UnmarshalChecksumAddress(addrStr) + if err1 != nil { + fmt.Fprintf(os.Stderr, "Cannot decode address: %v\n", err1) os.Exit(1) } pks[i] = crypto.PublicKey(addr) diff --git a/cmd/algokey/part.go b/cmd/algokey/part.go index 6f017e20ff..0eb0816a71 100644 --- a/cmd/algokey/part.go +++ b/cmd/algokey/part.go @@ -31,8 +31,8 @@ import ( ) var partKeyfile string -var partFirstRound uint64 -var partLastRound uint64 +var partFirstRound basics.Round +var partLastRound basics.Round var partKeyDilution uint64 var partParent string @@ -57,7 +57,7 @@ var partGenerateCmd = &cobra.Command{ } if partKeyDilution == 0 { - partKeyDilution = account.DefaultKeyDilution(basics.Round(partFirstRound), basics.Round(partLastRound)) + partKeyDilution = account.DefaultKeyDilution(partFirstRound, partLastRound) } var err error @@ -81,7 +81,7 @@ var partGenerateCmd = &cobra.Command{ var partkey account.PersistedParticipation participationGen := func() { - partkey, err = account.FillDBWithParticipationKeys(partdb, parent, basics.Round(partFirstRound), basics.Round(partLastRound), partKeyDilution) + partkey, err = account.FillDBWithParticipationKeys(partdb, parent, partFirstRound, partLastRound, partKeyDilution) } util.RunFuncWithSpinningCursor(participationGen) @@ -185,8 +185,8 @@ func init() { partCmd.AddCommand(keyregCmd) partGenerateCmd.Flags().StringVar(&partKeyfile, "keyfile", "", "Participation key filename") - partGenerateCmd.Flags().Uint64Var(&partFirstRound, "first", 0, "First round for participation key") - partGenerateCmd.Flags().Uint64Var(&partLastRound, "last", 0, "Last round for participation key") + partGenerateCmd.Flags().Uint64Var((*uint64)(&partFirstRound), "first", 0, "First round for participation key") + partGenerateCmd.Flags().Uint64Var((*uint64)(&partLastRound), "last", 0, "Last round for participation key") partGenerateCmd.Flags().Uint64Var(&partKeyDilution, "dilution", 0, "Key dilution for two-level participation keys (defaults to sqrt of validity window)") partGenerateCmd.Flags().StringVar(&partParent, "parent", "", "Address of parent account") partGenerateCmd.MarkFlagRequired("first") diff --git a/cmd/algons/dnsCmd.go b/cmd/algons/dnsCmd.go index 9f432e76b1..971578e2d3 100644 --- a/cmd/algons/dnsCmd.go +++ b/cmd/algons/dnsCmd.go @@ -348,13 +348,13 @@ func doDeleteDNS(network string, noPrompt bool, excludePattern string, includePa name = service + "._tcp." + network + ".algodev.network" } - records, err := cloudflareDNS.ListDNSRecord(context.Background(), "SRV", name, "", "", "", "") + records, err1 := cloudflareDNS.ListDNSRecord(context.Background(), "SRV", name, "", "", "", "") - if err != nil { + if err1 != nil { if name != "" { - fmt.Fprintf(os.Stderr, "Error listing SRV '%s' entries: %v\n", service, err) + fmt.Fprintf(os.Stderr, "Error listing SRV '%s' entries: %v\n", service, err1) } else { - fmt.Fprintf(os.Stderr, "Error listing SRV entries: %v\n", err) + fmt.Fprintf(os.Stderr, "Error listing SRV entries: %v\n", err1) } os.Exit(1) } @@ -381,9 +381,9 @@ func doDeleteDNS(network string, noPrompt bool, excludePattern string, includePa } for _, recordType := range []string{"A", "CNAME", "TXT"} { - records, err := cloudflareDNS.ListDNSRecord(context.Background(), recordType, "", "", "", "", "") - if err != nil { - fmt.Fprintf(os.Stderr, "Error listing DNS '%s' entries: %v\n", recordType, err) + records, err1 := cloudflareDNS.ListDNSRecord(context.Background(), recordType, "", "", "", "", "") + if err1 != nil { + fmt.Fprintf(os.Stderr, "Error listing DNS '%s' entries: %v\n", recordType, err1) os.Exit(1) } for _, r := range records { diff --git a/cmd/buildtools/genesis.go b/cmd/buildtools/genesis.go index 631bb6230d..794d22d1b0 100644 --- a/cmd/buildtools/genesis.go +++ b/cmd/buildtools/genesis.go @@ -179,15 +179,15 @@ var ensureCmd = &cobra.Command{ if createRelease { // In case we're creating first one for network, ensure output directory exists - err := os.MkdirAll(releaseNetworkDir, os.ModeDir|os.FileMode(0777)) - if err != nil { - reportErrorf("Error creating release genesis output directory '%s': %v\n", releaseNetworkDir, err) + err1 := os.MkdirAll(releaseNetworkDir, os.ModeDir|os.FileMode(0777)) + if err1 != nil { + reportErrorf("Error creating release genesis output directory '%s': %v\n", releaseNetworkDir, err1) } // Make sure release genesis file exists and if it does, the hash matches its computed hash - err = ensureReleaseGenesis(sourceGenesis, releaseFile) - if err != nil { - reportErrorf("Error ensuring release genesis file '%s': %v\n", releaseFile, err) + err1 = ensureReleaseGenesis(sourceGenesis, releaseFile) + if err1 != nil { + reportErrorf("Error ensuring release genesis file '%s': %v\n", releaseFile, err1) } } else { // If the target network is custom (not well-known), don't bother with release genesis file diff --git a/cmd/catchpointdump/file.go b/cmd/catchpointdump/file.go index 31e27c1662..e79a86bce1 100644 --- a/cmd/catchpointdump/file.go +++ b/cmd/catchpointdump/file.go @@ -424,17 +424,17 @@ func loadCatchpointIntoDatabase(ctx context.Context, catchupAccessor ledger.Catc readComplete := int64(0) for readComplete < header.Size { - bytesRead, err := tarReader.Read(balancesBlockBytes[readComplete:]) + bytesRead, err1 := tarReader.Read(balancesBlockBytes[readComplete:]) readComplete += int64(bytesRead) progress += uint64(bytesRead) - if err != nil { - if err == io.EOF { + if err1 != nil { + if err1 == io.EOF { if readComplete == header.Size { break } - err = fmt.Errorf("getPeerLedger received io.EOF while reading from tar file stream prior of reaching chunk size %d / %d", readComplete, header.Size) + err1 = fmt.Errorf("getPeerLedger received io.EOF while reading from tar file stream prior of reaching chunk size %d / %d", readComplete, header.Size) } - return fileHeader, err + return fileHeader, err1 } } err = catchupAccessor.ProcessStagingBalances(ctx, header.Name, balancesBlockBytes, &downloadProgress) diff --git a/cmd/diagcfg/telemetry.go b/cmd/diagcfg/telemetry.go index 900f4227d5..c04481319c 100644 --- a/cmd/diagcfg/telemetry.go +++ b/cmd/diagcfg/telemetry.go @@ -55,7 +55,12 @@ func maybeUpdateDataDirFromEnv() { func readTelemetryConfigOrExit() logging.TelemetryConfig { maybeUpdateDataDirFromEnv() - cfg, err := logging.ReadTelemetryConfigOrDefault(dataDir, "") + globalDir, err := config.GetGlobalConfigFileRoot() + if err != nil { + fmt.Fprintf(os.Stderr, telemetryConfigReadError, err) + os.Exit(1) + } + cfg, err := logging.ReadTelemetryConfigOrDefault(dataDir, globalDir) if err != nil { fmt.Fprintf(os.Stderr, telemetryConfigReadError, err) os.Exit(1) @@ -72,21 +77,21 @@ func saveTelemetryConfig(cfg logging.TelemetryConfig) { if dataDir != "" { // Save to dataDir and only update global config {Name,GUID} ddPath := filepath.Join(dataDir, logging.TelemetryConfigFilename) - err := cfg.Save(ddPath) - if err != nil { - fmt.Fprintf(os.Stderr, pathErrFormat, ddPath, err) + err1 := cfg.Save(ddPath) + if err1 != nil { + fmt.Fprintf(os.Stderr, pathErrFormat, ddPath, err1) os.Exit(1) } - gcfg, err := logging.LoadTelemetryConfig(globalPath) - if err != nil && !os.IsNotExist(err) { - fmt.Fprintf(os.Stderr, pathErrFormat, globalPath, err) + gcfg, err1 := logging.LoadTelemetryConfig(globalPath) + if err1 != nil && !os.IsNotExist(err1) { + fmt.Fprintf(os.Stderr, pathErrFormat, globalPath, err1) os.Exit(1) } gcfg.Name = cfg.Name gcfg.GUID = cfg.GUID - err = gcfg.Save(globalPath) - if err != nil { - fmt.Fprintf(os.Stderr, pathErrFormat, globalPath, err) + err1 = gcfg.Save(globalPath) + if err1 != nil { + fmt.Fprintf(os.Stderr, pathErrFormat, globalPath, err1) os.Exit(1) } } else { @@ -112,7 +117,12 @@ var telemetryStatusCmd = &cobra.Command{ Long: `Print the node's telemetry status`, Run: func(cmd *cobra.Command, args []string) { maybeUpdateDataDirFromEnv() - cfg, err := logging.ReadTelemetryConfigOrDefault(dataDir, "") + globalDir, err := config.GetGlobalConfigFileRoot() + if err != nil { + fmt.Fprintf(os.Stderr, telemetryConfigReadError, err) + os.Exit(1) + } + cfg, err := logging.ReadTelemetryConfigOrDefault(dataDir, globalDir) // If error loading config, can't disable / no need to disable if err != nil { diff --git a/cmd/goal/account.go b/cmd/goal/account.go index 453e1e9b35..ca8da712a7 100644 --- a/cmd/goal/account.go +++ b/cmd/goal/account.go @@ -26,6 +26,7 @@ import ( "path/filepath" "slices" "sort" + "strconv" "strings" "time" @@ -58,8 +59,8 @@ var ( transactionFee uint64 statusChangeLease string statusChangeTxFile string - roundFirstValid uint64 - roundLastValid uint64 + roundFirstValid basics.Round + roundLastValid basics.Round keyDilution uint64 threshold uint8 partKeyOutDir string @@ -160,11 +161,11 @@ func init() { changeOnlineCmd.Flags().StringVarP(&signerAddress, "signer", "S", "", "Address of key to sign with, if different due to rekeying") changeOnlineCmd.Flags().BoolVarP(&online, "online", "o", true, "Set this account to online or offline") changeOnlineCmd.Flags().Uint64VarP(&transactionFee, "fee", "f", 0, "The Fee to set on the status change transaction (defaults to suggested fee)") - changeOnlineCmd.Flags().Uint64VarP(&firstValid, "firstRound", "", 0, "") - changeOnlineCmd.Flags().Uint64VarP(&firstValid, "firstvalid", "", 0, "FirstValid for the status change transaction (0 for current)") - changeOnlineCmd.Flags().Uint64VarP(&numValidRounds, "validRounds", "", 0, "") - changeOnlineCmd.Flags().Uint64VarP(&numValidRounds, "validrounds", "v", 0, "The validity period for the status change transaction") - changeOnlineCmd.Flags().Uint64Var(&lastValid, "lastvalid", 0, "The last round where the transaction may be committed to the ledger") + changeOnlineCmd.Flags().Uint64VarP((*uint64)(&firstValid), "firstRound", "", 0, "") + changeOnlineCmd.Flags().Uint64VarP((*uint64)(&firstValid), "firstvalid", "", 0, "FirstValid for the status change transaction (0 for current)") + changeOnlineCmd.Flags().Uint64VarP((*uint64)(&numValidRounds), "validRounds", "", 0, "") + changeOnlineCmd.Flags().Uint64VarP((*uint64)(&numValidRounds), "validrounds", "v", 0, "The validity period for the status change transaction") + changeOnlineCmd.Flags().Uint64Var((*uint64)(&lastValid), "lastvalid", 0, "The last round where the transaction may be committed to the ledger") changeOnlineCmd.Flags().StringVarP(&statusChangeLease, "lease", "x", "", "Lease value (base64, optional): no transaction may also acquire this lease until lastvalid") changeOnlineCmd.Flags().StringVarP(&statusChangeTxFile, "txfile", "t", "", "Write status change transaction to this file") changeOnlineCmd.Flags().BoolVarP(&noWaitAfterSend, "no-wait", "N", false, "Don't wait for transaction to commit") @@ -174,9 +175,9 @@ func init() { // addParticipationKey flags addParticipationKeyCmd.Flags().StringVarP(&accountAddress, "address", "a", "", "Account to associate with the generated partkey") addParticipationKeyCmd.MarkFlagRequired("address") - addParticipationKeyCmd.Flags().Uint64VarP(&roundFirstValid, "roundFirstValid", "", 0, "The first round for which the generated partkey will be valid") + addParticipationKeyCmd.Flags().Uint64VarP((*uint64)(&roundFirstValid), "roundFirstValid", "", 0, "The first round for which the generated partkey will be valid") addParticipationKeyCmd.MarkFlagRequired("roundFirstValid") - addParticipationKeyCmd.Flags().Uint64VarP(&roundLastValid, "roundLastValid", "", 0, "The last round for which the generated partkey will be valid") + addParticipationKeyCmd.Flags().Uint64VarP((*uint64)(&roundLastValid), "roundLastValid", "", 0, "The last round for which the generated partkey will be valid") addParticipationKeyCmd.MarkFlagRequired("roundLastValid") addParticipationKeyCmd.Flags().StringVarP(&partKeyOutDir, "outdir", "o", "", "Save participation key file to specified output directory to (for offline creation)") addParticipationKeyCmd.Flags().Uint64VarP(&keyDilution, "keyDilution", "", 0, "Key dilution for two-level participation keys (defaults to sqrt of validity window)") @@ -199,14 +200,14 @@ func init() { renewParticipationKeyCmd.Flags().StringVarP(&accountAddress, "address", "a", "", "Account address to update (required)") renewParticipationKeyCmd.MarkFlagRequired("address") renewParticipationKeyCmd.Flags().Uint64VarP(&transactionFee, "fee", "f", 0, "The Fee to set on the status change transaction (defaults to suggested fee)") - renewParticipationKeyCmd.Flags().Uint64VarP(&roundLastValid, "roundLastValid", "", 0, "The last round for which the generated partkey will be valid") + renewParticipationKeyCmd.Flags().Uint64VarP((*uint64)(&roundLastValid), "roundLastValid", "", 0, "The last round for which the generated partkey will be valid") renewParticipationKeyCmd.MarkFlagRequired("roundLastValid") renewParticipationKeyCmd.Flags().Uint64VarP(&keyDilution, "keyDilution", "", 0, "Key dilution for two-level participation keys") renewParticipationKeyCmd.Flags().BoolVarP(&noWaitAfterSend, "no-wait", "N", false, "Don't wait for transaction to commit") // renewAllParticipationKeyCmd renewAllParticipationKeyCmd.Flags().Uint64VarP(&transactionFee, "fee", "f", 0, "The Fee to set on the status change transactions (defaults to suggested fee)") - renewAllParticipationKeyCmd.Flags().Uint64VarP(&roundLastValid, "roundLastValid", "", 0, "The last round for which the generated partkeys will be valid") + renewAllParticipationKeyCmd.Flags().Uint64VarP((*uint64)(&roundLastValid), "roundLastValid", "", 0, "The last round for which the generated partkeys will be valid") renewAllParticipationKeyCmd.MarkFlagRequired("roundLastValid") renewAllParticipationKeyCmd.Flags().Uint64VarP(&keyDilution, "keyDilution", "", 0, "Key dilution for two-level participation keys") renewAllParticipationKeyCmd.Flags().BoolVarP(&noWaitAfterSend, "no-wait", "N", false, "Don't wait for transaction to commit") @@ -216,11 +217,11 @@ func init() { markNonparticipatingCmd.MarkFlagRequired("address") markNonparticipatingCmd.Flags().StringVarP(&signerAddress, "signer", "S", "", "Address of key to sign with, if different from address due to rekeying") markNonparticipatingCmd.Flags().Uint64VarP(&transactionFee, "fee", "f", 0, "The Fee to set on the status change transaction (defaults to suggested fee)") - markNonparticipatingCmd.Flags().Uint64VarP(&firstValid, "firstRound", "", 0, "") - markNonparticipatingCmd.Flags().Uint64VarP(&firstValid, "firstvalid", "", 0, "FirstValid for the status change transaction (0 for current)") - markNonparticipatingCmd.Flags().Uint64VarP(&numValidRounds, "validRounds", "", 0, "") - markNonparticipatingCmd.Flags().Uint64VarP(&numValidRounds, "validrounds", "v", 0, "The validity period for the status change transaction") - markNonparticipatingCmd.Flags().Uint64Var(&lastValid, "lastvalid", 0, "The last round where the transaction may be committed to the ledger") + markNonparticipatingCmd.Flags().Uint64VarP((*uint64)(&firstValid), "firstRound", "", 0, "") + markNonparticipatingCmd.Flags().Uint64VarP((*uint64)(&firstValid), "firstvalid", "", 0, "FirstValid for the status change transaction (0 for current)") + markNonparticipatingCmd.Flags().Uint64VarP((*uint64)(&numValidRounds), "validRounds", "", 0, "") + markNonparticipatingCmd.Flags().Uint64VarP((*uint64)(&numValidRounds), "validrounds", "v", 0, "The validity period for the status change transaction") + markNonparticipatingCmd.Flags().Uint64Var((*uint64)(&lastValid), "lastvalid", 0, "The last round where the transaction may be committed to the ledger") markNonparticipatingCmd.Flags().StringVarP(&statusChangeTxFile, "txfile", "t", "", "Write status change transaction to this file, rather than posting to network") markNonparticipatingCmd.Flags().BoolVarP(&noWaitAfterSend, "no-wait", "N", false, "Don't wait for transaction to commit") markNonparticipatingCmd.Flags().MarkDeprecated("firstRound", "use --firstvalid instead") @@ -922,21 +923,21 @@ var changeOnlineCmd = &cobra.Command{ firstTxRound, lastTxRound, _, err := client.ComputeValidityRounds(firstValid, lastValid, numValidRounds) if err != nil { - reportErrorf(err.Error()) + reportErrorln(err.Error()) } err = changeAccountOnlineStatus( accountAddress, online, statusChangeTxFile, walletName, firstTxRound, lastTxRound, transactionFee, scLeaseBytes(cmd), dataDir, client, ) if err != nil { - reportErrorf(err.Error()) + reportErrorln(err.Error()) } }, } func changeAccountOnlineStatus( acct string, goOnline bool, txFile string, wallet string, - firstTxRound, lastTxRound, fee uint64, leaseBytes [32]byte, + firstTxRound, lastTxRound basics.Round, fee uint64, leaseBytes [32]byte, dataDir string, client libgoal.Client, ) error { // Generate an unsigned online/offline tx @@ -1078,10 +1079,10 @@ var renewParticipationKeyCmd = &cobra.Command{ } proto := config.Consensus[protocol.ConsensusVersion(params.ConsensusVersion)] - if roundLastValid <= (currentRound + proto.MaxTxnLife) { + if roundLastValid <= (currentRound + basics.Round(proto.MaxTxnLife)) { reportErrorf(errLastRoundInvalid, currentRound) } - txRoundLastValid := currentRound + proto.MaxTxnLife + txRoundLastValid := currentRound + basics.Round(proto.MaxTxnLife) // Make sure we don't already have a partkey valid for (or after) specified roundLastValid parts, err := client.ListParticipationKeys() @@ -1098,7 +1099,7 @@ var renewParticipationKeyCmd = &cobra.Command{ err = generateAndRegisterPartKey(accountAddress, currentRound, roundLastValid, txRoundLastValid, transactionFee, scLeaseBytes(cmd), keyDilution, walletName, dataDir, client) if err != nil { - reportErrorf(err.Error()) + reportErrorln(err.Error()) } version := config.GetCurrentVersion() @@ -1106,7 +1107,7 @@ var renewParticipationKeyCmd = &cobra.Command{ }, } -func generateAndRegisterPartKey(address string, currentRound, keyLastValidRound, txLastValidRound uint64, fee uint64, leaseBytes [32]byte, dilution uint64, wallet string, dataDir string, client libgoal.Client) error { +func generateAndRegisterPartKey(address string, currentRound, keyLastValidRound, txLastValidRound basics.Round, fee uint64, leaseBytes [32]byte, dilution uint64, wallet string, dataDir string, client libgoal.Client) error { // Generate a participation keys database and install it var part algodAcct.Participation var keyPath string @@ -1152,7 +1153,7 @@ var renewAllParticipationKeyCmd = &cobra.Command{ }, } -func renewPartKeysInDir(dataDir string, lastValidRound uint64, fee uint64, leaseBytes [32]byte, dilution uint64, wallet string) error { +func renewPartKeysInDir(dataDir string, lastValidRound basics.Round, fee uint64, leaseBytes [32]byte, dilution uint64, wallet string) error { client := ensureAlgodClient(dataDir) // Build list of accounts to renew from all accounts with part keys present @@ -1182,10 +1183,10 @@ func renewPartKeysInDir(dataDir string, lastValidRound uint64, fee uint64, lease } proto := config.Consensus[protocol.ConsensusVersion(params.ConsensusVersion)] - if lastValidRound <= (currentRound + proto.MaxTxnLife) { + if lastValidRound <= (currentRound + basics.Round(proto.MaxTxnLife)) { return fmt.Errorf(errLastRoundInvalid, currentRound) } - txLastValidRound := currentRound + proto.MaxTxnLife + txLastValidRound := currentRound + basics.Round(proto.MaxTxnLife) var anyErrors bool @@ -1217,17 +1218,13 @@ func renewPartKeysInDir(dataDir string, lastValidRound uint64, fee uint64, lease return nil } -func maxRound(current uint64, next *uint64) uint64 { +func maxRound(current basics.Round, next *basics.Round) basics.Round { if next != nil && *next > current { return *next } return current } -func uintToStr(number uint64) string { - return fmt.Sprintf("%d", number) -} - var listParticipationKeysCmd = &cobra.Command{ Use: "listpartkeys", Short: "List participation keys summary", @@ -1243,8 +1240,9 @@ var listParticipationKeysCmd = &cobra.Command{ } // Squeezed this into 77 characters. - rowFormat := "%-10s %-11s %-15s %10s %11s %10s\n" - fmt.Printf(rowFormat, "Registered", "Account", "ParticipationID", "Last Used", "First round", "Last round") + hdrFormat := "%-10s %-11s %-15s %10s %11s %10s\n" + rowFormat := "%-10s %-11s %-15s %10s %11d %10d\n" + fmt.Printf(hdrFormat, "Registered", "Account", "ParticipationID", "Last Used", "First round", "Last round") for _, part := range parts { onlineAccountInfo, err := client.AccountInformation(part.Address, false) if err == nil { @@ -1275,17 +1273,14 @@ var listParticipationKeysCmd = &cobra.Command{ lastUsed := maxRound(0, part.LastVote) lastUsed = maxRound(lastUsed, part.LastBlockProposal) lastUsed = maxRound(lastUsed, part.LastStateProof) - lastUsedString := "N/A" - if lastUsed != 0 { - lastUsedString = uintToStr(lastUsed) - } + lastUsedString := roundOrNA(&lastUsed) fmt.Printf(rowFormat, onlineInfoStr, fmt.Sprintf("%s...%s", part.Address[:4], part.Address[len(part.Address)-4:]), fmt.Sprintf("%s...", part.Id[:8]), lastUsedString, - uintToStr(part.Key.VoteFirstValid), - uintToStr(part.Key.VoteLastValid)) + part.Key.VoteFirstValid, + part.Key.VoteLastValid) } } }, @@ -1467,11 +1462,11 @@ var importRootKeysCmd = &cobra.Command{ }, } -func strOrNA(value *uint64) string { - if value == nil { +func roundOrNA(value *basics.Round) string { + if value == nil || *value == 0 { return "N/A" } - return uintToStr(*value) + return strconv.FormatUint(uint64(*value), 10) } var partkeyInfoCmd = &cobra.Command{ @@ -1494,12 +1489,12 @@ var partkeyInfoCmd = &cobra.Command{ fmt.Println() fmt.Printf("Participation ID: %s\n", part.Id) fmt.Printf("Parent address: %s\n", part.Address) - fmt.Printf("Last vote round: %s\n", strOrNA(part.LastVote)) - fmt.Printf("Last block proposal round: %s\n", strOrNA(part.LastBlockProposal)) + fmt.Printf("Last vote round: %s\n", roundOrNA(part.LastVote)) + fmt.Printf("Last block proposal round: %s\n", roundOrNA(part.LastBlockProposal)) // PKI TODO: enable with state proof support. //fmt.Printf("Last state proof round: %s\n", strOrNA(part.LastStateProof)) - fmt.Printf("Effective first round: %s\n", strOrNA(part.EffectiveFirstValid)) - fmt.Printf("Effective last round: %s\n", strOrNA(part.EffectiveLastValid)) + fmt.Printf("Effective first round: %s\n", roundOrNA(part.EffectiveFirstValid)) + fmt.Printf("Effective last round: %s\n", roundOrNA(part.EffectiveLastValid)) fmt.Printf("First round: %d\n", part.Key.VoteFirstValid) fmt.Printf("Last round: %d\n", part.Key.VoteLastValid) fmt.Printf("Key dilution: %d\n", part.Key.VoteKeyDilution) diff --git a/cmd/goal/accountsList.go b/cmd/goal/accountsList.go index ab93bb5ce8..4276da0248 100644 --- a/cmd/goal/accountsList.go +++ b/cmd/goal/accountsList.go @@ -20,10 +20,10 @@ import ( "encoding/json" "fmt" "os" - "os/user" "path/filepath" "strings" + "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model" "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/libgoal" @@ -63,11 +63,11 @@ func (accountList *AccountsList) accountListFileName() string { if libgoal.AlgorandDataIsPrivate(dataDir) { return filepath.Join(dataDir, gid, "accountList.json") } - cu, err := user.Current() + cfgRoot, err := config.GetGlobalConfigFileRoot() if err != nil { - reportErrorln("could not get current user info") + reportErrorf("unable to find config root: %v", err) } - return filepath.Join(cu.HomeDir, ".algorand", gid, "accountList.json") + return filepath.Join(cfgRoot, gid, "accountList.json") } // isDefault returns true, if the account is marked is default, false otherwise. If account doesn't exist isDefault diff --git a/cmd/goal/application.go b/cmd/goal/application.go index 0735043c8f..13ddd32d6b 100644 --- a/cmd/goal/application.go +++ b/cmd/goal/application.go @@ -43,7 +43,7 @@ import ( ) var ( - appIdx uint64 + appIdx basics.AppIndex appCreator string approvalProgFile string @@ -141,15 +141,15 @@ func init() { // Can't use PersistentFlags on the root because for some reason marking // a root command as required with MarkPersistentFlagRequired isn't // working - callAppCmd.Flags().Uint64Var(&appIdx, "app-id", 0, "Application ID") - optInAppCmd.Flags().Uint64Var(&appIdx, "app-id", 0, "Application ID") - closeOutAppCmd.Flags().Uint64Var(&appIdx, "app-id", 0, "Application ID") - clearAppCmd.Flags().Uint64Var(&appIdx, "app-id", 0, "Application ID") - deleteAppCmd.Flags().Uint64Var(&appIdx, "app-id", 0, "Application ID") - readStateAppCmd.Flags().Uint64Var(&appIdx, "app-id", 0, "Application ID") - updateAppCmd.Flags().Uint64Var(&appIdx, "app-id", 0, "Application ID") - infoAppCmd.Flags().Uint64Var(&appIdx, "app-id", 0, "Application ID") - methodAppCmd.Flags().Uint64Var(&appIdx, "app-id", 0, "Application ID") + callAppCmd.Flags().Uint64Var((*uint64)(&appIdx), "app-id", 0, "Application ID") + optInAppCmd.Flags().Uint64Var((*uint64)(&appIdx), "app-id", 0, "Application ID") + closeOutAppCmd.Flags().Uint64Var((*uint64)(&appIdx), "app-id", 0, "Application ID") + clearAppCmd.Flags().Uint64Var((*uint64)(&appIdx), "app-id", 0, "Application ID") + deleteAppCmd.Flags().Uint64Var((*uint64)(&appIdx), "app-id", 0, "Application ID") + readStateAppCmd.Flags().Uint64Var((*uint64)(&appIdx), "app-id", 0, "Application ID") + updateAppCmd.Flags().Uint64Var((*uint64)(&appIdx), "app-id", 0, "Application ID") + infoAppCmd.Flags().Uint64Var((*uint64)(&appIdx), "app-id", 0, "Application ID") + methodAppCmd.Flags().Uint64Var((*uint64)(&appIdx), "app-id", 0, "Application ID") // Add common transaction flags to all txn-generating app commands addTxnFlags(createAppCmd) @@ -286,7 +286,7 @@ func translateBoxRefs(input []boxRef, foreignApps []uint64) []transactions.BoxRe // put the appIdx in foreignApps, and then used the appIdx here // (rather than 0), then maybe they really want to use it in the // transaction as the full number. Though it's hard to see why. - if !found && tbr.appID == appIdx { + if !found && tbr.appID == uint64(appIdx) { index = 0 found = true } @@ -495,9 +495,9 @@ var createAppCmd = &cobra.Command{ reportInfof("Issued transaction from account %s, txid %s (fee %d)", tx.Sender, txid, tx.Fee.Raw) if !noWaitAfterSend { - txn, err := waitForCommit(client, txid, lv) - if err != nil { - reportErrorf(err.Error()) + txn, err1 := waitForCommit(client, txid, lv) + if err1 != nil { + reportErrorln(err1.Error()) } if txn.ApplicationIndex != nil && *txn.ApplicationIndex != 0 { reportInfof("Created app with app index %d", *txn.ApplicationIndex) @@ -1033,7 +1033,7 @@ var infoAppCmd = &cobra.Command{ params := meta.Params fmt.Printf("Application ID: %d\n", appIdx) - fmt.Printf("Application account: %v\n", basics.AppIndex(appIdx).Address()) + fmt.Printf("Application account: %v\n", appIdx.Address()) fmt.Printf("Creator: %v\n", params.Creator) fmt.Printf("Approval hash: %v\n", basics.Address(logic.HashProgram(params.ApprovalProgram))) fmt.Printf("Clear hash: %v\n", basics.Address(logic.HashProgram(params.ClearStateProgram))) @@ -1103,7 +1103,7 @@ func populateMethodCallTxnArgs(types []string, values []string) ([]transactions. // into the appropriate foreign array. Their placement will be as compact as possible, which means // values will be deduplicated and any value that is the sender or the current app will not be added // to the foreign array. -func populateMethodCallReferenceArgs(sender string, currentApp uint64, types []string, values []string, accounts *[]string, apps *[]uint64, assets *[]uint64) ([]int, error) { +func populateMethodCallReferenceArgs(sender string, currentApp basics.AppIndex, types []string, values []string, accounts *[]string, apps *[]uint64, assets *[]uint64) ([]int, error) { resolvedIndexes := make([]int, len(types)) for i, value := range values { @@ -1132,7 +1132,7 @@ func populateMethodCallReferenceArgs(sender string, currentApp uint64, types []s if err != nil { return nil, fmt.Errorf("Unable to parse application ID '%s': %s", value, err) } - if appID == currentApp { + if appID == uint64(currentApp) { resolved = 0 } else { duplicate := false diff --git a/cmd/goal/application_test.go b/cmd/goal/application_test.go index bccc9f352f..6d72195621 100644 --- a/cmd/goal/application_test.go +++ b/cmd/goal/application_test.go @@ -18,6 +18,7 @@ package main import ( "fmt" + "slices" "testing" "github.com/algorand/go-algorand/test/partitiontest" @@ -28,14 +29,6 @@ func TestParseMethodArgJSONtoByteSlice(t *testing.T) { partitiontest.PartitionTest(t) t.Parallel() - makeRepeatSlice := func(size int, value string) []string { - slice := make([]string, size) - for i := range slice { - slice[i] = value - } - return slice - } - tests := []struct { argTypes []string jsonArgs []string @@ -57,7 +50,7 @@ func TestParseMethodArgJSONtoByteSlice(t *testing.T) { expectedAppArgs: [][]byte{{100}, {255, 255}}, }, { - argTypes: makeRepeatSlice(15, "string"), + argTypes: slices.Repeat([]string{"string"}, 15), jsonArgs: []string{ `"a"`, `"b"`, @@ -94,7 +87,7 @@ func TestParseMethodArgJSONtoByteSlice(t *testing.T) { }, }, { - argTypes: makeRepeatSlice(16, "string"), + argTypes: slices.Repeat([]string{"string"}, 16), jsonArgs: []string{ `"a"`, `"b"`, diff --git a/cmd/goal/asset.go b/cmd/goal/asset.go index 1625b13676..f487d0f028 100644 --- a/cmd/goal/asset.go +++ b/cmd/goal/asset.go @@ -28,7 +28,7 @@ import ( ) var ( - assetID uint64 + assetID basics.AssetIndex assetCreator string assetTotal uint64 assetDecimals uint32 @@ -84,12 +84,12 @@ func init() { destroyAssetCmd.Flags().StringVar(&assetManager, "manager", "", "Manager account to issue the destroy transaction (defaults to creator)") destroyAssetCmd.Flags().StringVar(&assetCreator, "creator", "", "Creator account address for asset to destroy") - destroyAssetCmd.Flags().Uint64Var(&assetID, "assetid", 0, "Asset ID to destroy") + destroyAssetCmd.Flags().Uint64Var((*uint64)(&assetID), "assetid", 0, "Asset ID to destroy") destroyAssetCmd.Flags().StringVar(&assetUnitName, "asset", "", "Unit name of asset to destroy") configAssetCmd.Flags().StringVar(&assetManager, "manager", "", "Manager account to issue the config transaction") configAssetCmd.Flags().StringVar(&assetCreator, "creator", "", "Account address for asset to configure (defaults to manager)") - configAssetCmd.Flags().Uint64Var(&assetID, "assetid", 0, "Asset ID to configure") + configAssetCmd.Flags().Uint64Var((*uint64)(&assetID), "assetid", 0, "Asset ID to configure") configAssetCmd.Flags().StringVar(&assetUnitName, "asset", "", "Unit name of asset to configure") configAssetCmd.Flags().StringVar(&assetNewManager, "new-manager", "", "New manager address") configAssetCmd.Flags().StringVar(&assetNewReserve, "new-reserve", "", "New reserve address") @@ -99,7 +99,7 @@ func init() { sendAssetCmd.Flags().StringVar(&assetClawback, "clawback", "", "Address to issue a clawback transaction from (defaults to no clawback)") sendAssetCmd.Flags().StringVar(&assetCreator, "creator", "", "Account address for asset creator") - sendAssetCmd.Flags().Uint64Var(&assetID, "assetid", 0, "ID of the asset being transferred") + sendAssetCmd.Flags().Uint64Var((*uint64)(&assetID), "assetid", 0, "ID of the asset being transferred") sendAssetCmd.Flags().StringVar(&assetUnitName, "asset", "", "Unit name of the asset being transferred") sendAssetCmd.Flags().StringVarP(&account, "from", "f", "", "Account address to send the money from (if not specified, uses default account)") sendAssetCmd.Flags().StringVarP(&toAddress, "to", "t", "", "Address to send to money to (required)") @@ -110,7 +110,7 @@ func init() { freezeAssetCmd.Flags().StringVar(&assetFreezer, "freezer", "", "Address to issue a freeze transaction from") freezeAssetCmd.Flags().StringVar(&assetCreator, "creator", "", "Account address for asset creator") - freezeAssetCmd.Flags().Uint64Var(&assetID, "assetid", 0, "ID of the asset being frozen") + freezeAssetCmd.Flags().Uint64Var((*uint64)(&assetID), "assetid", 0, "ID of the asset being frozen") freezeAssetCmd.Flags().StringVar(&assetUnitName, "asset", "", "Unit name of the asset being frozen") freezeAssetCmd.Flags().StringVar(&account, "account", "", "Account address to freeze/unfreeze") freezeAssetCmd.Flags().BoolVar(&assetFrozen, "freeze", false, "Freeze or unfreeze") @@ -119,7 +119,7 @@ func init() { freezeAssetCmd.MarkFlagRequired("freeze") optinAssetCmd.Flags().StringVar(&assetUnitName, "asset", "", "Unit name of the asset being accepted") - optinAssetCmd.Flags().Uint64Var(&assetID, "assetid", 0, "ID of the asset being accepted") + optinAssetCmd.Flags().Uint64Var((*uint64)(&assetID), "assetid", 0, "ID of the asset being accepted") optinAssetCmd.Flags().StringVarP(&account, "account", "a", "", "Account address to opt in to using the asset (if not specified, uses default account)") optinAssetCmd.Flags().StringVar(&assetCreator, "creator", "", "Account address for asset creator") @@ -131,7 +131,7 @@ func init() { addTxnFlags(freezeAssetCmd) addTxnFlags(optinAssetCmd) - infoAssetCmd.Flags().Uint64Var(&assetID, "assetid", 0, "ID of the asset to look up") + infoAssetCmd.Flags().Uint64Var((*uint64)(&assetID), "assetid", 0, "ID of the asset to look up") infoAssetCmd.Flags().StringVar(&assetUnitName, "asset", "", "DEPRECATED! Unit name of the asset to look up") infoAssetCmd.Flags().StringVar(&assetUnitName, "unitname", "", "Unit name of the asset to look up") infoAssetCmd.Flags().StringVar(&assetCreator, "creator", "", "Account address of the asset creator") @@ -321,9 +321,9 @@ var createAssetCmd = &cobra.Command{ reportInfof("Issued transaction from account %s, txid %s (fee %d)", tx.Sender, txid, tx.Fee.Raw) if !noWaitAfterSend { - txn, err := waitForCommit(client, txid, lv) - if err != nil { - reportErrorf(err.Error()) + txn, err1 := waitForCommit(client, txid, lv) + if err1 != nil { + reportErrorln(err1.Error()) } if txn.AssetIndex != nil && *txn.AssetIndex != 0 { reportInfof("Created asset with asset index %d", *txn.AssetIndex) @@ -332,7 +332,7 @@ var createAssetCmd = &cobra.Command{ } else { err = writeTxnToFile(client, sign, dataDir, walletName, tx, outFilename) if err != nil { - reportErrorf(err.Error()) + reportErrorln(err.Error()) } } }, @@ -408,7 +408,7 @@ var destroyAssetCmd = &cobra.Command{ } else { err = writeTxnToFile(client, sign, dataDir, walletName, tx, outFilename) if err != nil { - reportErrorf(err.Error()) + reportErrorln(err.Error()) } } }, @@ -501,7 +501,7 @@ var configAssetCmd = &cobra.Command{ } else { err = writeTxnToFile(client, sign, dataDir, walletName, tx, outFilename) if err != nil { - reportErrorf(err.Error()) + reportErrorln(err.Error()) } } }, @@ -588,7 +588,7 @@ var sendAssetCmd = &cobra.Command{ } else { err = writeTxnToFile(client, sign, dataDir, walletName, tx, outFilename) if err != nil { - reportErrorf(err.Error()) + reportErrorln(err.Error()) } } }, @@ -657,7 +657,7 @@ var freezeAssetCmd = &cobra.Command{ } else { err = writeTxnToFile(client, sign, dataDir, walletName, tx, outFilename) if err != nil { - reportErrorf(err.Error()) + reportErrorln(err.Error()) } } }, diff --git a/cmd/goal/box.go b/cmd/goal/box.go index bc98b02061..12288d3fff 100644 --- a/cmd/goal/box.go +++ b/cmd/goal/box.go @@ -31,7 +31,7 @@ func init() { appBoxCmd.AddCommand(appBoxInfoCmd) appBoxCmd.AddCommand(appBoxListCmd) - appBoxCmd.PersistentFlags().Uint64Var(&appIdx, "app-id", 0, "Application ID") + appBoxCmd.PersistentFlags().Uint64Var((*uint64)(&appIdx), "app-id", 0, "Application ID") appBoxCmd.MarkFlagRequired("app-id") appBoxInfoCmd.Flags().StringVarP(&boxName, "name", "n", "", "Application box name. Use the same form as app-arg to name the box.") diff --git a/cmd/goal/clerk.go b/cmd/goal/clerk.go index 165c37ed9c..2112a5f736 100644 --- a/cmd/goal/clerk.go +++ b/cmd/goal/clerk.go @@ -69,11 +69,11 @@ var ( requestFilename string requestOutFilename string - simulateStartRound uint64 + simulateStartRound basics.Round simulateAllowEmptySignatures bool simulateAllowMoreLogging bool simulateAllowMoreOpcodeBudget bool - simulateExtraOpcodeBudget uint64 + simulateExtraOpcodeBudget int simulateFullTrace bool simulateEnableRequestTrace bool @@ -165,11 +165,11 @@ func init() { simulateCmd.Flags().StringVar(&requestFilename, "request", "", "Simulate request object to run. Mutually exclusive with --txfile") simulateCmd.Flags().StringVar(&requestOutFilename, "request-only-out", "", "Filename for writing simulate request object. If provided, the command will only write the request object and exit. No simulation will happen") simulateCmd.Flags().StringVarP(&outFilename, "result-out", "o", "", "Filename for writing simulation result") - simulateCmd.Flags().Uint64Var(&simulateStartRound, "round", 0, "Specify the round after which the simulation will take place. If not specified, the simulation will take place after the latest round.") + simulateCmd.Flags().Uint64Var((*uint64)(&simulateStartRound), "round", 0, "Specify the round after which the simulation will take place. If not specified, the simulation will take place after the latest round.") simulateCmd.Flags().BoolVar(&simulateAllowEmptySignatures, "allow-empty-signatures", false, "Allow transactions without signatures to be simulated as if they had correct signatures") simulateCmd.Flags().BoolVar(&simulateAllowMoreLogging, "allow-more-logging", false, "Lift the limits on log opcode during simulation") simulateCmd.Flags().BoolVar(&simulateAllowMoreOpcodeBudget, "allow-more-opcode-budget", false, "Apply max extra opcode budget for apps per transaction group (default 320000) during simulation") - simulateCmd.Flags().Uint64Var(&simulateExtraOpcodeBudget, "extra-opcode-budget", 0, "Apply extra opcode budget for apps per transaction group during simulation") + simulateCmd.Flags().IntVar(&simulateExtraOpcodeBudget, "extra-opcode-budget", 0, "Apply extra opcode budget for apps per transaction group during simulation") simulateCmd.Flags().BoolVar(&simulateFullTrace, "full-trace", false, "Enable all options for simulation execution trace") simulateCmd.Flags().BoolVar(&simulateEnableRequestTrace, "trace", false, "Enable simulation time execution trace of app calls") @@ -190,7 +190,7 @@ var clerkCmd = &cobra.Command{ }, } -func waitForCommit(client libgoal.Client, txid string, transactionLastValidRound uint64) (txn model.PendingTransactionResponse, err error) { +func waitForCommit(client libgoal.Client, txid string, transactionLastValidRound basics.Round) (txn model.PendingTransactionResponse, err error) { // Get current round information stat, err := client.Status() if err != nil { @@ -408,20 +408,20 @@ var sendCmd = &cobra.Command{ // (we don't use accountList.getAddressByName because this address likely doesn't correspond to an account) var rekeyTo basics.Address if rekeyToAddress != "" { - var err error - rekeyTo, err = basics.UnmarshalChecksumAddress(rekeyToAddress) - if err != nil { - reportErrorf(err.Error()) + var err1 error + rekeyTo, err1 = basics.UnmarshalChecksumAddress(rekeyToAddress) + if err1 != nil { + reportErrorln(err1.Error()) } } client := ensureFullClient(dataDir) firstValid, lastValid, _, err = client.ComputeValidityRounds(firstValid, lastValid, numValidRounds) if err != nil { - reportErrorf(err.Error()) + reportErrorln(err.Error()) } payment, err := client.ConstructPayment( fromAddressResolved, toAddressResolved, fee, amount, noteBytes, closeToAddressResolved, - leaseBytes, basics.Round(firstValid), basics.Round(lastValid), + leaseBytes, firstValid, lastValid, ) if err != nil { reportErrorf(errorConstructingTX, err) @@ -449,9 +449,9 @@ var sendCmd = &cobra.Command{ var stx transactions.SignedTxn if lsig.Logic != nil { - params, err := client.SuggestedParams() - if err != nil { - reportErrorf(errorNodeStatus, err) + params, err1 := client.SuggestedParams() + if err1 != nil { + reportErrorf(errorNodeStatus, err1) } proto := protocol.ConsensusVersion(params.ConsensusVersion) uncheckedTxn := transactions.SignedTxn{ @@ -463,12 +463,12 @@ var sendCmd = &cobra.Command{ CurrentProtocol: proto, }, } - groupCtx, err := verify.PrepareGroupContext([]transactions.SignedTxn{uncheckedTxn}, &blockHeader, nil, nil) - if err == nil { - err = verify.LogicSigSanityCheck(0, groupCtx) + groupCtx, err1 := verify.PrepareGroupContext([]transactions.SignedTxn{uncheckedTxn}, &blockHeader, nil, nil) + if err1 == nil { + err1 = verify.LogicSigSanityCheck(0, groupCtx) } - if err != nil { - reportErrorf("%s: txn error %s", outFilename, err) + if err1 != nil { + reportErrorf("%s: txn error %s", outFilename, err1) } stx = uncheckedTxn } else if program != nil { @@ -501,24 +501,24 @@ var sendCmd = &cobra.Command{ reportErrorf(msigParseError, "Not enough arguments to create the multisig address.\nPlease make sure to specify the threshold and at least 2 addresses\n") } - threshold, err := strconv.ParseUint(params[0], 10, 8) - if err != nil || threshold < 1 || threshold > 255 { + threshold, err1 := strconv.ParseUint(params[0], 10, 8) + if err1 != nil || threshold < 1 || threshold > 255 { reportErrorf(msigParseError, "Failed to parse the threshold. Make sure it's a number between 1 and 255") } // Convert the addresses into public keys pks := make([]crypto.PublicKey, len(params[1:])) for i, addrStr := range params[1:] { - addr, err := basics.UnmarshalChecksumAddress(addrStr) - if err != nil { - reportErrorf(failDecodeAddressError, err) + addr, err2 := basics.UnmarshalChecksumAddress(addrStr) + if err2 != nil { + reportErrorf(failDecodeAddressError, err2) } pks[i] = crypto.PublicKey(addr) } - addr, err := crypto.MultisigAddrGen(1, uint8(threshold), pks) - if err != nil { - reportErrorf(msigParseError, err) + addr, err1 := crypto.MultisigAddrGen(1, uint8(threshold), pks) + if err1 != nil { + reportErrorf(msigParseError, err1) } // Generate the multisig and assign to the txn @@ -533,10 +533,10 @@ var sendCmd = &cobra.Command{ if outFilename == "" { // Broadcast the tx - txid, err := client.BroadcastTransaction(stx) + txid, err1 := client.BroadcastTransaction(stx) - if err != nil { - reportErrorf(errorBroadcastingTX, err) + if err1 != nil { + reportErrorf(errorBroadcastingTX, err1) } // update information from Transaction @@ -546,9 +546,9 @@ var sendCmd = &cobra.Command{ reportInfof(infoTxIssued, amount, fromAddressResolved, toAddressResolved, txid, fee) if !noWaitAfterSend { - _, err = waitForCommit(client, txid, lastValid) - if err != nil { - reportErrorf(err.Error()) + _, err1 = waitForCommit(client, txid, lastValid) + if err1 != nil { + reportErrorln(err1.Error()) } } } else { @@ -609,12 +609,12 @@ var rawsendCmd = &cobra.Command{ pendingTxns := make(map[transactions.Txid]string) for _, txgroup := range txgroups { // Broadcast the transaction - err := client.BroadcastTransactionGroup(txgroup) - if err != nil { + err1 := client.BroadcastTransactionGroup(txgroup) + if err1 != nil { for _, txn := range txgroup { - txnErrors[txn.ID()] = err.Error() + txnErrors[txn.ID()] = err1.Error() } - reportWarnf(errorBroadcastingTX, err) + reportWarnf(errorBroadcastingTX, err1) continue } @@ -1162,7 +1162,7 @@ var dryrunCmd = &cobra.Command{ } data, err := libgoal.MakeDryrunStateBytes(client, nil, stxns, accts, string(proto), dumpForDryrunFormat.String()) if err != nil { - reportErrorf(err.Error()) + reportErrorln(err.Error()) } writeFile(outFilename, data, 0600) return @@ -1317,7 +1317,7 @@ var simulateCmd = &cobra.Command{ Txns: txgroup, }, }, - Round: basics.Round(simulateStartRound), + Round: simulateStartRound, AllowEmptySignatures: simulateAllowEmptySignatures, AllowMoreLogging: simulateAllowMoreLogging, AllowUnnamedResources: simulateAllowUnnamedResources, @@ -1343,7 +1343,7 @@ var simulateCmd = &cobra.Command{ Txns: txgroup, }, }, - Round: basics.Round(simulateStartRound), + Round: simulateStartRound, AllowEmptySignatures: simulateAllowEmptySignatures, AllowMoreLogging: simulateAllowMoreLogging, AllowUnnamedResources: simulateAllowUnnamedResources, diff --git a/cmd/goal/commands.go b/cmd/goal/commands.go index 707d3ccc14..e818721153 100644 --- a/cmd/goal/commands.go +++ b/cmd/goal/commands.go @@ -23,7 +23,6 @@ import ( "io" "os" "os/exec" - "os/user" "path/filepath" "strings" @@ -260,18 +259,15 @@ func resolveKmdDataDir(dataDir string) string { algodKmdPath, _ := filepath.Abs(filepath.Join(dataDir, libgoal.DefaultKMDDataDir)) return algodKmdPath } - cu, err := user.Current() + cfgRoot, err := config.GetGlobalConfigFileRoot() if err != nil { - reportErrorf("could not look up current user while looking for kmd dir: %s", err) - } - if cu.HomeDir == "" { - reportErrorln("user has no home dir while looking for kmd dir") + reportErrorf("unable to find config root: %v", err) } genesis, err := readGenesis(dataDir) if err != nil { reportErrorf("could not read genesis.json: %s", err) } - return filepath.Join(cu.HomeDir, ".algorand", genesis.ID(), libgoal.DefaultKMDDataDir) + return filepath.Join(cfgRoot, genesis.ID(), libgoal.DefaultKMDDataDir) } func ensureCacheDir(dataDir string) string { @@ -285,7 +281,7 @@ func ensureCacheDir(dataDir string) string { return cacheDir } // Put the cache in the user's home directory - algorandDir, err := config.GetDefaultConfigFilePath() + algorandDir, err := config.GetGlobalConfigFileRoot() if err != nil { reportErrorf("config error %s", err) } @@ -361,9 +357,9 @@ func getWalletHandleMaybePassword(dataDir string, walletName string, getPassword if len(walletID) == 0 { // If we still don't have a default, check if there's only one wallet. // If there is, make it the default and continue - wallets, err := kmd.ListWallets() - if err != nil { - return nil, nil, fmt.Errorf(errCouldNotListWallets, err) + wallets, err1 := kmd.ListWallets() + if err1 != nil { + return nil, nil, fmt.Errorf(errCouldNotListWallets, err1) } if len(wallets) == 1 { // Only one wallet, so it's unambigious diff --git a/cmd/goal/common.go b/cmd/goal/common.go index 100e372049..daa7a58301 100644 --- a/cmd/goal/common.go +++ b/cmd/goal/common.go @@ -20,6 +20,7 @@ import ( "github.com/spf13/cobra" cmdutil "github.com/algorand/go-algorand/cmd/util" + "github.com/algorand/go-algorand/data/basics" ) const ( @@ -33,11 +34,11 @@ const ( var validateNoPosArgsFn = cobra.NoArgs // transaction validity period margins -var firstValid uint64 -var lastValid uint64 +var firstValid basics.Round +var lastValid basics.Round // numValidRounds specifies validity period for a transaction and used to calculate last valid round -var numValidRounds uint64 // also used in account and asset +var numValidRounds basics.Round // also used in account and asset var ( fee uint64 @@ -55,9 +56,9 @@ var dumpForDryrunFormat cmdutil.CobraStringValue = *cmdutil.MakeCobraStringValue func addTxnFlags(cmd *cobra.Command) { cmd.Flags().Uint64Var(&fee, "fee", 0, "The transaction fee (automatically determined by default), in microAlgos") - cmd.Flags().Uint64Var(&firstValid, "firstvalid", 0, "The first round where the transaction may be committed to the ledger") - cmd.Flags().Uint64Var(&numValidRounds, "validrounds", 0, "The number of rounds for which the transaction will be valid") - cmd.Flags().Uint64Var(&lastValid, "lastvalid", 0, "The last round where the transaction may be committed to the ledger") + cmd.Flags().Uint64Var((*uint64)(&firstValid), "firstvalid", 0, "The first round where the transaction may be committed to the ledger") + cmd.Flags().Uint64Var((*uint64)(&numValidRounds), "validrounds", 0, "The number of rounds for which the transaction will be valid") + cmd.Flags().Uint64Var((*uint64)(&lastValid), "lastvalid", 0, "The last round where the transaction may be committed to the ledger") cmd.Flags().StringVarP(&outFilename, "out", "o", "", "Write transaction to this file") cmd.Flags().BoolVarP(&sign, "sign", "s", false, "Use with -o to indicate that the dumped transaction should be signed") cmd.Flags().StringVar(¬eBase64, "noteb64", "", "Note (URL-base64 encoded)") diff --git a/cmd/goal/formatting.go b/cmd/goal/formatting.go index 69e5bb5e3f..a977d611b7 100644 --- a/cmd/goal/formatting.go +++ b/cmd/goal/formatting.go @@ -207,3 +207,11 @@ func encodeBytesAsAppCallBytes(value []byte) string { return "b64:" + base64.StdEncoding.EncodeToString(value) } + +func nilToZero[T any](valPtr *T) T { + if valPtr == nil { + var defaultV T + return defaultV + } + return *valPtr +} diff --git a/cmd/goal/interact.go b/cmd/goal/interact.go index 32198de3de..68984428f8 100644 --- a/cmd/goal/interact.go +++ b/cmd/goal/interact.go @@ -49,12 +49,12 @@ func init() { appInteractCmd.AddCommand(appQueryCmd) appInteractCmd.PersistentFlags().StringVarP(&appHdr, "header", "", "", "Application header") - appQueryCmd.Flags().Uint64Var(&appIdx, "app-id", 0, "Application ID") + appQueryCmd.Flags().Uint64Var((*uint64)(&appIdx), "app-id", 0, "Application ID") appQueryCmd.Flags().StringVarP(&account, "from", "f", "", "Account to query state for (if omitted, query from global state)") appQueryCmd.Flags().SetInterspersed(false) appQueryCmd.MarkFlagRequired("app-id") - appExecuteCmd.Flags().Uint64Var(&appIdx, "app-id", 0, "Application ID (if omitted, zero, which creates an application)") + appExecuteCmd.Flags().Uint64Var((*uint64)(&appIdx), "app-id", 0, "Application ID (if omitted, zero, which creates an application)") appExecuteCmd.Flags().StringVarP(&account, "from", "f", "", "Account to execute interaction from") appExecuteCmd.Flags().StringVarP(&signerAddress, "signer", "S", "", "Address of key to sign with, if different from \"from\" address due to rekeying") appExecuteCmd.Flags().SetInterspersed(false) @@ -630,9 +630,9 @@ var appExecuteCmd = &cobra.Command{ reportInfof("Issued transaction from account %s, txid %s (fee %d)", tx.Sender, txid, tx.Fee.Raw) if !noWaitAfterSend { - txn, err := waitForCommit(client, txid, lv) - if err != nil { - reportErrorf(err.Error()) + txn, err1 := waitForCommit(client, txid, lv) + if err1 != nil { + reportErrorln(err1.Error()) } if txn.ApplicationIndex != nil && *txn.ApplicationIndex != 0 { reportInfof("Created app with app index %d", *txn.ApplicationIndex) @@ -642,7 +642,7 @@ var appExecuteCmd = &cobra.Command{ // Broadcast or write transaction to file err = writeTxnToFile(client, sign, dataDir, walletName, tx, outFilename) if err != nil { - reportErrorf(err.Error()) + reportErrorln(err.Error()) } } }, diff --git a/cmd/goal/ledger.go b/cmd/goal/ledger.go index 1064bd1eea..cea9ed406e 100644 --- a/cmd/goal/ledger.go +++ b/cmd/goal/ledger.go @@ -24,6 +24,7 @@ import ( "github.com/spf13/cobra" "github.com/algorand/go-algorand/cmd/util/datadir" + "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/protocol/transcode" ) @@ -83,7 +84,7 @@ var blockCmd = &cobra.Command{ dataDir := datadir.EnsureSingleDataDir() client := ensureAlgodClient(dataDir) - response, err := client.RawBlock(round) + response, err := client.RawBlock(basics.Round(round)) if err != nil { reportErrorf(errorRequestFail, err) } diff --git a/cmd/goal/logging.go b/cmd/goal/logging.go index 6120f547f4..f32cae2b57 100644 --- a/cmd/goal/logging.go +++ b/cmd/goal/logging.go @@ -53,7 +53,12 @@ var loggingCmd = &cobra.Command{ Run: func(cmd *cobra.Command, _ []string) { reportWarnln("`goal logging` deprecated, use `diagcfg telemetry status`") dataDir := datadir.EnsureSingleDataDir() - cfg, err := logging.EnsureTelemetryConfig(&dataDir, "") + root, err := config.GetGlobalConfigFileRoot() + var cfgDir *string + if err == nil { + cfgDir = &root + } + cfg, err := logging.EnsureTelemetryConfig(&dataDir, cfgDir) // If error loading config, can't disable / no need to disable if err != nil { @@ -75,7 +80,12 @@ var enableCmd = &cobra.Command{ Run: func(cmd *cobra.Command, _ []string) { reportWarnln("`goal logging enable` deprecated, use `diagcfg telemetry enable`") dataDir := datadir.EnsureSingleDataDir() - cfg, err := logging.EnsureTelemetryConfig(&dataDir, "") + root, err := config.GetGlobalConfigFileRoot() + var cfgDir *string + if err == nil { + cfgDir = &root + } + cfg, err := logging.EnsureTelemetryConfig(&dataDir, cfgDir) if err != nil { fmt.Println(err) return @@ -96,7 +106,12 @@ var disableCmd = &cobra.Command{ Run: func(cmd *cobra.Command, _ []string) { reportWarnf("`goal logging disable` deprecated, use `diagcfg telemetry disable`") dataDir := datadir.EnsureSingleDataDir() - cfg, err := logging.EnsureTelemetryConfig(&dataDir, "") + root, err := config.GetGlobalConfigFileRoot() + var cfgDir *string + if err == nil { + cfgDir = &root + } + cfg, err := logging.EnsureTelemetryConfig(&dataDir, cfgDir) // If error loading config, can't disable / no need to disable if err != nil { @@ -127,7 +142,12 @@ var loggingSendCmd = &cobra.Command{ errcount := 0 var firsterr error = nil datadir.OnDataDirs(func(dataDir string) { - cfg, err := logging.EnsureTelemetryConfig(&dataDir, "") + root, err := config.GetGlobalConfigFileRoot() + var cfgDir *string + if err == nil { + cfgDir = &root + } + cfg, err := logging.EnsureTelemetryConfig(&dataDir, cfgDir) if err != nil { fmt.Println(err) return diff --git a/cmd/goal/multisig.go b/cmd/goal/multisig.go index cacff18fcd..3b85ba6ac2 100644 --- a/cmd/goal/multisig.go +++ b/cmd/goal/multisig.go @@ -109,13 +109,13 @@ var addSigCmd = &cobra.Command{ var msig crypto.MultisigSig if noSig { - multisigInfo, err := client.LookupMultisigAccount(wh, stxn.Txn.Sender.String()) - if err != nil { - reportErrorf(msigLookupError, err) + multisigInfo, err1 := client.LookupMultisigAccount(wh, stxn.Txn.Sender.String()) + if err1 != nil { + reportErrorf(msigLookupError, err1) } - msig, err = msigInfoToMsig(multisigInfo) - if err != nil { - reportErrorf(msigParseError, err) + msig, err1 = msigInfoToMsig(multisigInfo) + if err1 != nil { + reportErrorf(msigParseError, err1) } } else { if stxn.AuthAddr.IsZero() { diff --git a/cmd/goal/node.go b/cmd/goal/node.go index 220837e270..af98f8a9c6 100644 --- a/cmd/goal/node.go +++ b/cmd/goal/node.go @@ -482,28 +482,13 @@ func makeStatusString(stat model.NodeStatusResponse) string { statusString = statusString + "\n" + fmt.Sprintf(catchupStoppedOnUnsupported, stat.LastRound) } - upgradeNextProtocolVoteBefore := uint64(0) - if stat.UpgradeNextProtocolVoteBefore != nil { - upgradeNextProtocolVoteBefore = *stat.UpgradeNextProtocolVoteBefore - } + upgradeNextProtocolVoteBefore := nilToZero(stat.UpgradeNextProtocolVoteBefore) if upgradeNextProtocolVoteBefore > stat.LastRound { - upgradeVotesRequired := uint64(0) - upgradeNoVotes := uint64(0) - upgradeYesVotes := uint64(0) - upgradeVoteRounds := uint64(0) - if stat.UpgradeVotesRequired != nil { - upgradeVotesRequired = *stat.UpgradeVotesRequired - } - if stat.UpgradeNoVotes != nil { - upgradeNoVotes = *stat.UpgradeNoVotes - } - if stat.UpgradeYesVotes != nil { - upgradeYesVotes = *stat.UpgradeYesVotes - } - if stat.UpgradeVoteRounds != nil { - upgradeVoteRounds = *stat.UpgradeVoteRounds - } + upgradeVotesRequired := nilToZero(stat.UpgradeVotesRequired) + upgradeNoVotes := nilToZero(stat.UpgradeNoVotes) + upgradeYesVotes := nilToZero(stat.UpgradeYesVotes) + upgradeVoteRounds := nilToZero(stat.UpgradeVoteRounds) statusString = statusString + "\n" + fmt.Sprintf( infoNodeStatusConsensusUpgradeVoting, upgradeYesVotes, diff --git a/cmd/goal/tealsign.go b/cmd/goal/tealsign.go index 0663771655..bfbad5d526 100644 --- a/cmd/goal/tealsign.go +++ b/cmd/goal/tealsign.go @@ -142,9 +142,9 @@ The base64 encoding of the signature will always be printed to stdout. Optionall progHash = logic.HashProgram(stxn.Lsig.Logic) } else { // Otherwise, the contract address is the logic hash - parsedAddr, err := basics.UnmarshalChecksumAddress(contractAddr) - if err != nil { - reportErrorf(tealsignParseAddr, err) + parsedAddr, err1 := basics.UnmarshalChecksumAddress(contractAddr) + if err1 != nil { + reportErrorf(tealsignParseAddr, err1) } // Copy parsed address as program hash diff --git a/cmd/goal/wallet.go b/cmd/goal/wallet.go index 8bc1b427c8..5dadd597c9 100644 --- a/cmd/goal/wallet.go +++ b/cmd/goal/wallet.go @@ -101,15 +101,15 @@ var newWalletCmd = &cobra.Command{ var mdk crypto.MasterDerivationKey if recoverWallet { fmt.Println(infoRecoveryPrompt) - resp, err := reader.ReadString('\n') + resp, err1 := reader.ReadString('\n') resp = strings.TrimSpace(resp) - if err != nil { - reportErrorf(errorFailedToReadResponse, err) + if err1 != nil { + reportErrorf(errorFailedToReadResponse, err1) } var key []byte - key, err = passphrase.MnemonicToKey(resp) - if err != nil { - reportErrorf(errorBadMnemonic, err) + key, err1 = passphrase.MnemonicToKey(resp) + if err1 != nil { + reportErrorf(errorBadMnemonic, err1) } // Copy the recovered key into the mdk n := copy(mdk[:], key) @@ -148,32 +148,32 @@ var newWalletCmd = &cobra.Command{ if !recoverWallet && !noDisplaySeed { // Offer to print backup seed fmt.Println(infoBackupExplanation) - resp, err := reader.ReadString('\n') + resp, err1 := reader.ReadString('\n') resp = strings.TrimSpace(resp) - if err != nil { - reportErrorf(errorFailedToReadResponse, err) + if err1 != nil { + reportErrorf(errorFailedToReadResponse, err1) } if strings.ToLower(resp) != "n" { // Get a wallet handle token - token, err := client.GetWalletHandleToken(walletID, walletPassword) - if err != nil { - reportErrorf(errorCouldntInitializeWallet, err) + token, err1 := client.GetWalletHandleToken(walletID, walletPassword) + if err1 != nil { + reportErrorf(errorCouldntInitializeWallet, err1) } // Invalidate the handle when we're done with it defer client.ReleaseWalletHandle(token) // Export the master derivation key - mdk, err := client.ExportMasterDerivationKey(token, walletPassword) - if err != nil { - reportErrorf(errorCouldntExportMDK, err) + mdk, err1 := client.ExportMasterDerivationKey(token, walletPassword) + if err1 != nil { + reportErrorf(errorCouldntExportMDK, err1) } // Convert the key to a mnemonic - mnemonic, err := passphrase.KeyToMnemonic(mdk[:]) - if err != nil { - reportErrorf(errorCouldntMakeMnemonic, err) + mnemonic, err1 := passphrase.KeyToMnemonic(mdk[:]) + if err1 != nil { + reportErrorf(errorCouldntMakeMnemonic, err1) } // Display the mnemonic to the user diff --git a/cmd/incorporate/incorporate.go b/cmd/incorporate/incorporate.go index 4d3e429467..3205f3ed48 100644 --- a/cmd/incorporate/incorporate.go +++ b/cmd/incorporate/incorporate.go @@ -180,8 +180,8 @@ func parseInput() (genesis bookkeeping.Genesis) { MicroAlgos: basics.MicroAlgos{Raw: record.Algos * 1e6}, VoteID: record.VoteID, SelectionID: record.SelectionID, - VoteFirstValid: basics.Round(record.VoteFirstValid), - VoteLastValid: basics.Round(record.VoteLastValid), + VoteFirstValid: record.VoteFirstValid, + VoteLastValid: record.VoteLastValid, VoteKeyDilution: record.VoteKeyDilution, }, } @@ -259,14 +259,16 @@ func parseRecord(cols []string) (rec record) { } copy(rec.VoteID[:], vote) - rec.VoteFirstValid, err = strconv.ParseUint(cols[6], 10, 64) + fv, err := strconv.ParseUint(cols[6], 10, 64) if cols[6] != "" && err != nil { log.Fatal(err) } - rec.VoteLastValid, err = strconv.ParseUint(cols[7], 10, 64) + rec.VoteFirstValid = basics.Round(fv) + lv, err := strconv.ParseUint(cols[7], 10, 64) if cols[7] != "" && err != nil { log.Fatal(err) } + rec.VoteLastValid = basics.Round(lv) rec.VoteKeyDilution, err = strconv.ParseUint(cols[8], 10, 64) if cols[8] != "" && err != nil { log.Fatal(err) @@ -282,7 +284,7 @@ type record struct { Status basics.Status SelectionID crypto.VRFVerifier VoteID crypto.OneTimeSignatureVerifier - VoteFirstValid uint64 - VoteLastValid uint64 + VoteFirstValid basics.Round + VoteLastValid basics.Round VoteKeyDilution uint64 } diff --git a/cmd/loadgenerator/config.go b/cmd/loadgenerator/config.go index b19b96c824..a696e5c6a4 100644 --- a/cmd/loadgenerator/config.go +++ b/cmd/loadgenerator/config.go @@ -22,6 +22,8 @@ import ( "net/url" "os" "strings" + + "github.com/algorand/go-algorand/data/basics" ) type config struct { @@ -34,9 +36,9 @@ type config struct { // APIToken is the API token used to communicate with the node. APIToken string // RoundModulator is the modulator used to determine of the current round is the round at which transactions need to be sent. - RoundModulator uint64 + RoundModulator basics.Round // RoundOffset is the offset used to determine of the current round is the round at which transactions need to be sent. - RoundOffset uint64 + RoundOffset basics.Round // Fee is the amount of algos that would be specified in the transaction fee field. Fee uint64 // TxnsToSend is the number of transactions to send in the round where (((round + RoundOffset) % RoundModulator) == 0) diff --git a/cmd/loadgenerator/main.go b/cmd/loadgenerator/main.go index 9afd126d3f..eec97721c0 100644 --- a/cmd/loadgenerator/main.go +++ b/cmd/loadgenerator/main.go @@ -163,11 +163,11 @@ func main() { os.Exit(0) } -func isSpendRound(cfg config, round uint64) bool { +func isSpendRound(cfg config, round basics.Round) bool { return cfg.RoundModulator == 0 || ((round+cfg.RoundOffset)%cfg.RoundModulator == 0) } -func nextSpendRound(cfg config, round uint64) uint64 { +func nextSpendRound(cfg config, round basics.Round) basics.Round { if cfg.RoundModulator == 0 { return round } @@ -248,8 +248,8 @@ func generateTransactions(restClient client.RestClient, cfg config, privateKeys Header: transactions.Header{ Sender: publicKeys[i%len(publicKeys)], Fee: basics.MicroAlgos{Raw: cfg.Fee}, - FirstValid: basics.Round(nodeStatus.LastRound), - LastValid: basics.Round(nodeStatus.LastRound + 2), + FirstValid: nodeStatus.LastRound, + LastValid: nodeStatus.LastRound + 2, Note: make([]byte, 4), GenesisID: vers.GenesisID, GenesisHash: genesisHash, diff --git a/cmd/netgoal/generate.go b/cmd/netgoal/generate.go index e3188f3381..353c2b71bd 100644 --- a/cmd/netgoal/generate.go +++ b/cmd/netgoal/generate.go @@ -28,6 +28,7 @@ import ( "github.com/spf13/cobra" + "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/gen" "github.com/algorand/go-algorand/netdeploy" "github.com/algorand/go-algorand/netdeploy/remote" @@ -52,7 +53,7 @@ var accountsCount uint64 var assetsCount uint64 var applicationCount uint64 var balRange []string -var lastPartKeyRound uint64 +var lastPartKeyRound basics.Round var deterministicKeys bool func init() { @@ -80,7 +81,7 @@ func init() { generateCmd.Flags().Uint64VarP(&applicationCount, "napps", "", 7, "Application Count") generateCmd.Flags().StringArrayVar(&balRange, "bal", []string{}, "Application Count") generateCmd.Flags().BoolVarP(&deterministicKeys, "deterministic", "", false, "Whether to generate deterministic keys") - generateCmd.Flags().Uint64VarP(&lastPartKeyRound, "last-part-key-round", "", gen.DefaultGenesis.LastPartKeyRound, "LastPartKeyRound in genesis.json") + generateCmd.Flags().Uint64VarP((*uint64)(&lastPartKeyRound), "last-part-key-round", "", uint64(gen.DefaultGenesis.LastPartKeyRound), "LastPartKeyRound in genesis.json") longParts := make([]string, len(generateTemplateLines)+1) longParts[0] = generateCmd.Long @@ -111,38 +112,38 @@ template modes for -t:`, baseRelay := remote.NodeConfig{} baseNonParticipatingNode := remote.NodeConfig{} if nodeTemplatePath != "" { - fin, err := os.Open(nodeTemplatePath) - if err != nil { - reportErrorf("%s: bad node template, %s", nodeTemplatePath, err) + fin, err1 := os.Open(nodeTemplatePath) + if err1 != nil { + reportErrorf("%s: bad node template, %s", nodeTemplatePath, err1) } dec := json.NewDecoder(fin) - err = dec.Decode(&baseNode) - if err != nil { - reportErrorf("%s: bad node template, %s", nodeTemplatePath, err) + err1 = dec.Decode(&baseNode) + if err1 != nil { + reportErrorf("%s: bad node template, %s", nodeTemplatePath, err1) } } if nonParticipatingNodeTemplatePath != "" { - fin, err := os.Open(nonParticipatingNodeTemplatePath) - if err != nil { - reportErrorf("%s: bad npnode template, %s", nonParticipatingNodeTemplatePath, err) + fin, err1 := os.Open(nonParticipatingNodeTemplatePath) + if err1 != nil { + reportErrorf("%s: bad npnode template, %s", nonParticipatingNodeTemplatePath, err1) } dec := json.NewDecoder(fin) - err = dec.Decode(&baseNonParticipatingNode) - if err != nil { - reportErrorf("%s: bad node template, %s", nodeTemplatePath, err) + err1 = dec.Decode(&baseNonParticipatingNode) + if err1 != nil { + reportErrorf("%s: bad node template, %s", nodeTemplatePath, err1) } } else { baseNonParticipatingNode = baseNode } if relayTemplatePath != "" { - fin, err := os.Open(relayTemplatePath) - if err != nil { - reportErrorf("%s: bad relay template, %s", relayTemplatePath, err) + fin, err1 := os.Open(relayTemplatePath) + if err1 != nil { + reportErrorf("%s: bad relay template, %s", relayTemplatePath, err1) } dec := json.NewDecoder(fin) - err = dec.Decode(&baseRelay) - if err != nil { - reportErrorf("%s: bad relay template, %s", relayTemplatePath, err) + err1 = dec.Decode(&baseRelay) + if err1 != nil { + reportErrorf("%s: bad relay template, %s", relayTemplatePath, err1) } } else { baseRelay = baseNode diff --git a/cmd/netgoal/network.go b/cmd/netgoal/network.go index 8638d8151f..487ac4cdcc 100644 --- a/cmd/netgoal/network.go +++ b/cmd/netgoal/network.go @@ -165,9 +165,9 @@ func runBuildNetwork() error { bootstrappedFile = resolveFile(r.BootstrappedFile, templateBaseDir) } if util.FileExists(bootstrappedFile) && bootstrapLoadingFile { - fileTemplate, err := remote.LoadBootstrappedData(bootstrappedFile) - if err != nil { - return fmt.Errorf("error resolving bootstrap file: %v", err) + fileTemplate, err1 := remote.LoadBootstrappedData(bootstrappedFile) + if err1 != nil { + return fmt.Errorf("error resolving bootstrap file: %v", err1) } net.BootstrappedNet = fileTemplate net.SetUseBootstrappedFiles(bootstrapLoadingFile) diff --git a/cmd/nodecfg/get.go b/cmd/nodecfg/get.go index df9b3ef17a..c42554da45 100644 --- a/cmd/nodecfg/get.go +++ b/cmd/nodecfg/get.go @@ -55,8 +55,8 @@ var getCmd = &cobra.Command{ reportErrorf("Target rootdir '%s' already exists", networkRootDir) } - if err := doGet(getChannel, getRootDir); err != nil { - reportErrorf("Error retrieving configuration: %v", err) + if err1 := doGet(getChannel, getRootDir); err1 != nil { + reportErrorf("Error retrieving configuration: %v", err1) } cfg, err := remote.LoadDeployedNetworkConfigFromDir(getRootDir) diff --git a/cmd/partitiontest_linter/go.mod b/cmd/partitiontest_linter/go.mod index bb2ceb5d74..93336aa3e0 100644 --- a/cmd/partitiontest_linter/go.mod +++ b/cmd/partitiontest_linter/go.mod @@ -2,7 +2,7 @@ module github.com/algorand/go-algorand/cmd/partitiontest_linter go 1.23 -toolchain go1.23.3 +toolchain go1.23.9 require ( golang.org/x/mod v0.22.0 // indirect diff --git a/cmd/pingpong/runCmd.go b/cmd/pingpong/runCmd.go index 6a57cab766..a32c37cc88 100644 --- a/cmd/pingpong/runCmd.go +++ b/cmd/pingpong/runCmd.go @@ -280,16 +280,16 @@ var runCmd = &cobra.Command{ cfg.RandomizeDst = randomDst || cfg.RandomizeDst cfg.Quiet = quietish || cfg.Quiet if runTime != "" { - val, err := strconv.ParseUint(runTime, 10, 32) - if err != nil { - reportErrorf("Invalid value specified for --run: %v\n", err) + val, err1 := strconv.ParseUint(runTime, 10, 32) + if err1 != nil { + reportErrorf("Invalid value specified for --run: %v\n", err1) } cfg.RunTime = time.Duration(uint32(val)) * time.Second } if refreshTime != "" { - val, err := strconv.ParseUint(refreshTime, 10, 32) - if err != nil { - reportErrorf("Invalid value specified for --refresh: %v\n", err) + val, err1 := strconv.ParseUint(refreshTime, 10, 32) + if err1 != nil { + reportErrorf("Invalid value specified for --refresh: %v\n", err1) } cfg.RefreshTime = time.Duration(uint32(val)) * time.Second } @@ -311,8 +311,8 @@ var runCmd = &cobra.Command{ programStr = tealLight case "normal": programStr = tealNormal - bytes, err := base64.StdEncoding.DecodeString("iZWMx72KvU6Bw6sPAWQFL96YH+VMrBA0XKWD9XbZOZI=") - if err != nil { + bytes, err1 := base64.StdEncoding.DecodeString("iZWMx72KvU6Bw6sPAWQFL96YH+VMrBA0XKWD9XbZOZI=") + if err1 != nil { reportErrorf("Internal error, cannot decode.") } cfg.LogicArgs = [][]byte{bytes} @@ -321,8 +321,8 @@ var runCmd = &cobra.Command{ default: reportErrorf("Invalid argument for --teal: %v\n", teal) } - ops, err := logic.AssembleString(programStr) - if err != nil { + ops, err1 := logic.AssembleString(programStr) + if err1 != nil { ops.ReportMultipleErrors(teal, os.Stderr) reportErrorf("Internal error, cannot assemble %v \n", programStr) } diff --git a/cmd/tealdbg/cdtState.go b/cmd/tealdbg/cdtState.go index 32a070e9c8..b7dc3def64 100644 --- a/cmd/tealdbg/cdtState.go +++ b/cmd/tealdbg/cdtState.go @@ -665,10 +665,10 @@ func encodeAppGlobalAppID(key string) string { return appGlobalObjIDPrefix + key } -func decodeAppGlobalAppID(objID string) (uint64, bool) { +func decodeAppGlobalAppID(objID string) (basics.AppIndex, bool) { if strings.HasPrefix(objID, appGlobalObjIDPrefix) { if val, err := strconv.ParseInt(objID[len(appGlobalObjIDPrefix):], 10, 32); err == nil { - return uint64(val), true + return basics.AppIndex(val), true } } return 0, false @@ -689,12 +689,12 @@ func encodeAppLocalsAppID(addr string, appID string) string { return fmt.Sprintf("%s%s_%s", appLocalAppIDPrefix, addr, appID) } -func decodeAppLocalsAppID(objID string) (string, uint64, bool) { +func decodeAppLocalsAppID(objID string) (string, basics.AppIndex, bool) { if strings.HasPrefix(objID, appLocalAppIDPrefix) { encoded := objID[len(appLocalAppIDPrefix):] parts := strings.Split(encoded, "_") if val, err := strconv.ParseInt(parts[1], 10, 32); err == nil { - return parts[0], uint64(val), true + return parts[0], basics.AppIndex(val), true } } return "", 0, false @@ -967,14 +967,14 @@ func makeAppLocalState(s *cdtState, addr string) (desc []cdt.RuntimePropertyDesc return } -func makeAppGlobalKV(s *cdtState, appID uint64) (desc []cdt.RuntimePropertyDescriptor) { - if tkv, ok := s.AppState.global[basics.AppIndex(appID)]; ok { +func makeAppGlobalKV(s *cdtState, appID basics.AppIndex) (desc []cdt.RuntimePropertyDescriptor) { + if tkv, ok := s.AppState.global[appID]; ok { return tkvToRpd(tkv) } return } -func makeAppLocalsKV(s *cdtState, addr string, appID uint64) (desc []cdt.RuntimePropertyDescriptor) { +func makeAppLocalsKV(s *cdtState, addr string, appID basics.AppIndex) (desc []cdt.RuntimePropertyDescriptor) { a, err := basics.UnmarshalChecksumAddress(addr) if err != nil { return @@ -985,7 +985,7 @@ func makeAppLocalsKV(s *cdtState, addr string, appID uint64) (desc []cdt.Runtime return } - if tkv, ok := state[basics.AppIndex(appID)]; ok { + if tkv, ok := state[appID]; ok { return tkvToRpd(tkv) } return diff --git a/cmd/tealdbg/dryrunRequest.go b/cmd/tealdbg/dryrunRequest.go index ddd8531196..79dcfa1e8d 100644 --- a/cmd/tealdbg/dryrunRequest.go +++ b/cmd/tealdbg/dryrunRequest.go @@ -74,18 +74,17 @@ func balanceRecordsFromDdr(ddr *v2.DryrunRequest) (records []basics.BalanceRecor if err != nil { return } - appIdx := basics.AppIndex(a.Id) ad := accounts[addr] if ad.AppParams == nil { ad.AppParams = make(map[basics.AppIndex]basics.AppParams, 1) - ad.AppParams[appIdx] = params + ad.AppParams[a.Id] = params } else { - ap, ok := ad.AppParams[appIdx] + ap, ok := ad.AppParams[a.Id] if ok { v2.MergeAppParams(&ap, ¶ms) - ad.AppParams[appIdx] = ap + ad.AppParams[a.Id] = ap } else { - ad.AppParams[appIdx] = params + ad.AppParams[a.Id] = params } } accounts[addr] = ad diff --git a/cmd/tealdbg/local.go b/cmd/tealdbg/local.go index 0f31d795a8..4756e9b731 100644 --- a/cmd/tealdbg/local.go +++ b/cmd/tealdbg/local.go @@ -379,19 +379,19 @@ func (r *LocalRunner) Setup(dp *DebugParams) (err error) { r.runs[i].program = data if IsTextFile(data) { source := string(data) - ops, err := logic.AssembleString(source) + ops, err1 := logic.AssembleString(source) if ops.Version > r.proto.LogicSigVersion { return fmt.Errorf("program version (%d) is beyond the maximum supported protocol version (%d)", ops.Version, r.proto.LogicSigVersion) } - if err != nil { + if err1 != nil { errorLines := "" for _, lineError := range ops.Errors { errorLines = fmt.Sprintf("%s\n%s", errorLines, lineError.Error()) } if errorLines != "" { - return fmt.Errorf("%w:%s", err, errorLines) + return fmt.Errorf("%w:%s", err1, errorLines) } - return err + return err1 } r.runs[i].program = ops.Program if !dp.DisableSourceMap { @@ -415,7 +415,7 @@ func (r *LocalRunner) Setup(dp *DebugParams) (err error) { txn := r.txnGroup[dp.GroupIndex] appIdx := txn.Txn.ApplicationID if appIdx == 0 { - appIdx = basics.AppIndex(dp.AppID) + appIdx = dp.AppID } b, states, err = makeBalancesAdapter( @@ -452,7 +452,7 @@ func (r *LocalRunner) Setup(dp *DebugParams) (err error) { appIdx := stxn.Txn.ApplicationID if appIdx == 0 { // app create, use ApprovalProgram from the transaction if len(stxn.Txn.ApprovalProgram) > 0 { - appIdx = basics.AppIndex(dp.AppID) + appIdx = dp.AppID b, states, err = makeBalancesAdapter( balances, r.txnGroup, gi, r.protoName, dp.Round, dp.LatestTimestamp, diff --git a/cmd/tealdbg/localLedger.go b/cmd/tealdbg/localLedger.go index 33b80b6fb9..d420c2edc6 100644 --- a/cmd/tealdbg/localLedger.go +++ b/cmd/tealdbg/localLedger.go @@ -62,13 +62,13 @@ type localLedger struct { balances map[basics.Address]basics.AccountData txnGroup []transactions.SignedTxn groupIndex int - round uint64 + round basics.Round aidx basics.AppIndex } func makeBalancesAdapter( balances map[basics.Address]basics.AccountData, txnGroup []transactions.SignedTxn, - groupIndex int, proto string, round uint64, latestTimestamp int64, + groupIndex int, proto string, round basics.Round, latestTimestamp int64, appIdx basics.AppIndex, painless bool, indexerURL string, indexerToken string, ) (apply.Balances, AppState, error) { @@ -214,7 +214,7 @@ func getAppCreatorFromIndexer(indexerURL string, indexerToken string, app basics return creator, nil } -func getBalanceFromIndexer(indexerURL string, indexerToken string, account basics.Address, round uint64) (basics.AccountData, error) { +func getBalanceFromIndexer(indexerURL string, indexerToken string, account basics.Address, round basics.Round) (basics.AccountData, error) { queryString := fmt.Sprintf("%s/v2/accounts/%s?round=%d", indexerURL, account, round) client := &http.Client{} request, err := http.NewRequest("GET", queryString, nil) @@ -298,10 +298,10 @@ func (l *localLedger) LookupAsset(rnd basics.Round, addr basics.Address, aidx ba return ledgercore.AssetResource{}, nil } var result ledgercore.AssetResource - if p, ok := ad.AssetParams[basics.AssetIndex(aidx)]; ok { + if p, ok := ad.AssetParams[aidx]; ok { result.AssetParams = &p } - if p, ok := ad.Assets[basics.AssetIndex(aidx)]; ok { + if p, ok := ad.Assets[aidx]; ok { result.AssetHolding = &p } @@ -314,10 +314,10 @@ func (l *localLedger) LookupApplication(rnd basics.Round, addr basics.Address, a return ledgercore.AppResource{}, nil } var result ledgercore.AppResource - if p, ok := ad.AppParams[basics.AppIndex(aidx)]; ok { + if p, ok := ad.AppParams[aidx]; ok { result.AppParams = &p } - if s, ok := ad.AppLocalStates[basics.AppIndex(aidx)]; ok { + if s, ok := ad.AppLocalStates[aidx]; ok { result.AppLocalState = &s } diff --git a/cmd/tealdbg/local_test.go b/cmd/tealdbg/local_test.go index 99fe763a0a..9e4ecb25ef 100644 --- a/cmd/tealdbg/local_test.go +++ b/cmd/tealdbg/local_test.go @@ -708,7 +708,7 @@ func TestRunMode(t *testing.T) { a.NotNil(l.runs[0].eval) a.Nil(l.runs[0].ba) a.Equal(modeLogicsig, l.runs[0].mode) - a.Equal(basics.AppIndex(0), l.runs[0].aidx) + a.Zero(l.runs[0].aidx) // check run mode application dp = DebugParams{ @@ -745,7 +745,7 @@ func TestRunMode(t *testing.T) { a.NotNil(l.runs[0].eval) a.Nil(l.runs[0].ba) a.Equal(modeLogicsig, l.runs[0].mode) - a.Equal(basics.AppIndex(0), l.runs[0].aidx) + a.Zero(l.runs[0].aidx) } func TestDebugFromTxn(t *testing.T) { @@ -810,7 +810,7 @@ func TestDebugFromTxn(t *testing.T) { a.Equal([]byte{3}, l.runs[0].program) a.Nil(l.runs[0].ba) a.Equal(modeLogicsig, l.runs[0].mode) - a.Equal(basics.AppIndex(0), l.runs[0].aidx) + a.Zero(l.runs[0].aidx) // ensure clear approval program is supposed to be debugged brs = makeSampleBalanceRecord(sender, 0, appIdx) @@ -1293,7 +1293,7 @@ int 1` LatestTimestamp: 333, GroupIndex: 0, RunMode: "application", - AppID: uint64(appIdx), + AppID: appIdx, } local := MakeLocalRunner(nil) @@ -1425,7 +1425,7 @@ byte 0x5ce9454909639d2d17a3f753ce7d93fa0b9ab12e // addr LatestTimestamp: 333, GroupIndex: 0, RunMode: "application", - AppID: uint64(appIdx), + AppID: appIdx, } local := MakeLocalRunner(nil) diff --git a/cmd/tealdbg/main.go b/cmd/tealdbg/main.go index f2c2f8d055..f081bde83f 100644 --- a/cmd/tealdbg/main.go +++ b/cmd/tealdbg/main.go @@ -25,6 +25,7 @@ import ( "github.com/spf13/cobra/doc" cmdutil "github.com/algorand/go-algorand/cmd/util" + "github.com/algorand/go-algorand/data/basics" ) func main() { @@ -119,7 +120,7 @@ var noBrowserCheck bool var noSourceMap bool var verbose bool var painless bool -var appID uint64 +var appID basics.AppIndex var listenForDrReq bool func init() { @@ -138,7 +139,7 @@ func init() { debugCmd.Flags().IntVarP(&groupIndex, "group-index", "g", 0, "Transaction index in a txn group") debugCmd.Flags().StringVarP(&balanceFile, "balance", "b", "", "Balance records to evaluate stateful TEAL on in form of json or msgpack file") debugCmd.Flags().StringVarP(&ddrFile, "dryrun-req", "d", "", "Program(s) and state(s) in dryrun REST request format") - debugCmd.Flags().Uint64VarP(&appID, "app-id", "a", 1380011588, "Application ID for stateful TEAL if not set in transaction(s)") + debugCmd.Flags().Uint64VarP((*uint64)(&appID), "app-id", "a", 1380011588, "Application ID for stateful TEAL if not set in transaction(s)") debugCmd.Flags().Uint64VarP(&roundNumber, "round", "r", 0, "Ledger round number to evaluate stateful TEAL on") debugCmd.Flags().Int64VarP(×tamp, "latest-timestamp", "l", 0, "Latest confirmed timestamp to evaluate stateful TEAL on") debugCmd.Flags().VarP(&runMode, "mode", "m", "TEAL evaluation mode: "+runMode.AllowedString()) @@ -243,7 +244,7 @@ func debugLocal(args []string) { DdrBlob: ddrBlob, IndexerURL: indexerURL, IndexerToken: indexerToken, - Round: uint64(roundNumber), + Round: basics.Round(roundNumber), LatestTimestamp: timestamp, RunMode: runMode.String(), DisableSourceMap: noSourceMap, diff --git a/cmd/tealdbg/server.go b/cmd/tealdbg/server.go index 5bce050a03..2fffb2cb2d 100644 --- a/cmd/tealdbg/server.go +++ b/cmd/tealdbg/server.go @@ -25,6 +25,7 @@ import ( "strings" "time" + "github.com/algorand/go-algorand/data/basics" "github.com/algorand/websocket" "github.com/gorilla/mux" ) @@ -89,11 +90,11 @@ type DebugParams struct { DdrBlob []byte IndexerURL string IndexerToken string - Round uint64 + Round basics.Round LatestTimestamp int64 RunMode string DisableSourceMap bool - AppID uint64 + AppID basics.AppIndex Painless bool ListenForDrReq bool } @@ -156,9 +157,9 @@ func (ds *DebugServer) startDebug() (err error) { } go func() { - err := ds.server.ListenAndServe() - if err != nil && err != http.ErrServerClosed { - log.Panicf("failed to listen: %v", err) + err1 := ds.server.ListenAndServe() + if err1 != nil && err1 != http.ErrServerClosed { + log.Panicf("failed to listen: %v", err1) } }() defer ds.server.Shutdown(context.Background()) diff --git a/cmd/updater/update.sh b/cmd/updater/update.sh index c7f049eec9..f0544204f3 100755 --- a/cmd/updater/update.sh +++ b/cmd/updater/update.sh @@ -495,6 +495,8 @@ function backup_binaries() { BACKUPFILES="algod kmd carpenter doberman goal update.sh updater diagcfg" # add node_exporter to the files list we're going to backup, but only we if had it previously deployed. [ -f "${BINDIR}/node_exporter" ] && BACKUPFILES="${BACKUPFILES} node_exporter" + # If we have algotmpl, we should back it up too + [ -f "${BINDIR}/algotmpl" ] && BACKUPFILES="${BACKUPFILES} algotmpl" tar -zcf "${BINDIR}/backup/bin-v${CURRENTVER}.tar.gz" -C "${BINDIR}" ${BACKUPFILES} >/dev/null 2>&1 } diff --git a/cmd/util/cmd.go b/cmd/util/cmd.go index 16cc22f1a9..ec72cfceac 100644 --- a/cmd/util/cmd.go +++ b/cmd/util/cmd.go @@ -18,6 +18,7 @@ package cmdutil import ( "fmt" + "slices" "strings" ) @@ -49,12 +50,10 @@ func (c *CobraStringValue) IsSet() bool { return c.isSet } // Set sets a value and fails if it is not allowed func (c *CobraStringValue) Set(other string) error { - for _, s := range c.allowed { - if other == s { - c.value = other - c.isSet = true - return nil - } + if slices.Contains(c.allowed, other) { + c.value = other + c.isSet = true + return nil } return fmt.Errorf("value %s not allowed", other) } diff --git a/config/bounds/bounds.go b/config/bounds/bounds.go new file mode 100644 index 0000000000..0613087df0 --- /dev/null +++ b/config/bounds/bounds.go @@ -0,0 +1,154 @@ +// Copyright (C) 2019-2025 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package bounds + +/* The bounds package is intended to hold conservative bounds on the sizes of + various messages. Many cannot be static, because they depend on consensus + parameters. They are set at runtime iterating over every consensus version + and selecting the largest bound. This allows msgpack parsing to safely + reject anything that NO consensus version would allow. +*/ + +// MaxVoteThreshold is the largest threshold for a bundle over all supported +// consensus protocols, used for decoding purposes. +var MaxVoteThreshold int + +// MaxEvalDeltaAccounts is the largest number of accounts that may appear in an +// eval delta, used for decoding purposes. +var MaxEvalDeltaAccounts int + +// MaxStateDeltaKeys is the largest number of key/value pairs that may appear in +// a StateDelta, used for decoding purposes. +var MaxStateDeltaKeys int + +// MaxLogCalls is the highest allowable log messages that may appear in any +// version, used only for decoding purposes. Never decrease this value. +var MaxLogCalls int + +// MaxInnerTransactionsPerDelta is the maximum number of inner transactions in +// one EvalDelta +var MaxInnerTransactionsPerDelta int + +// MaxLogicSigMaxSize is the largest logical signature appear in any of the +// supported protocols, used for decoding purposes. +var MaxLogicSigMaxSize int + +// MaxTxnNoteBytes is the largest supported nodes field array size supported by +// any of the consensus protocols. used for decoding purposes. +var MaxTxnNoteBytes int + +// MaxTxGroupSize is the largest supported number of transactions per +// transaction group supported by any of the consensus protocols. used for +// decoding purposes. +var MaxTxGroupSize int + +// MaxAppProgramLen is the largest supported app program size supported by any +// of the consensus protocols. used for decoding purposes. +var MaxAppProgramLen int + +// MaxBytesKeyValueLen is a maximum length of key or value across all protocols. +// used for decoding purposes. +var MaxBytesKeyValueLen int + +// MaxExtraAppProgramLen is the maximum extra app program length supported by +// any of the consensus protocols. used for decoding purposes. +var MaxExtraAppProgramLen int + +// MaxAvailableAppProgramLen is the largest supported app program size including +// the extra pages supported by any of the consensus protocols. used for +// decoding purposes. +var MaxAvailableAppProgramLen int + +// MaxProposedExpiredOnlineAccounts is the maximum number of online accounts +// that a proposer can take offline for having expired voting keys. +var MaxProposedExpiredOnlineAccounts int + +// MaxMarkAbsent is the maximum number of online accounts that a proposer can +// suspend for not proposing "lately" +var MaxMarkAbsent int + +// MaxAppTotalArgLen is the maximum number of bytes across all arguments of an +// application max sum([len(arg) for arg in txn.ApplicationArgs]) +var MaxAppTotalArgLen int + +// MaxAssetNameBytes is the maximum asset name length in bytes +var MaxAssetNameBytes int + +// MaxAssetUnitNameBytes is the maximum asset unit name length in bytes +var MaxAssetUnitNameBytes int + +// MaxAssetURLBytes is the maximum asset URL length in bytes +var MaxAssetURLBytes int + +// MaxAppBytesValueLen is the maximum length of a bytes value used in an +// application's global or local key/value store +var MaxAppBytesValueLen int + +// MaxAppBytesKeyLen is the maximum length of a key used in an application's +// global or local key/value store +var MaxAppBytesKeyLen int + +// StateProofTopVoters is a bound on how many online accounts get to participate +// in forming the state proof, by including the top StateProofTopVoters accounts +// (by normalized balance) into the vector commitment. +var StateProofTopVoters int + +// MaxTxnBytesPerBlock determines the maximum number of bytes that transactions +// can take up in a block. Specifically, the sum of the lengths of encodings of +// each transaction in a block must not exceed MaxTxnBytesPerBlock. +var MaxTxnBytesPerBlock int + +// MaxAppTxnForeignApps is the max number of foreign apps per txn across all consensus versions +var MaxAppTxnForeignApps int + +// MaxEvalDeltaTotalLogSize is the maximum size of the sum of all log sizes in a single eval delta. +const MaxEvalDeltaTotalLogSize = 1024 + +// MaxGenesisIDLen is the maximum length of the genesis ID set for purpose of +// setting allocbounds on structs containing GenesisID and for purposes of +// calculating MaxSize functions on those types. Current value is larger than +// the existing network IDs and the ones used in testing +const MaxGenesisIDLen = 128 + +// EncodedMaxAssetsPerAccount is the decoder limit of number of assets stored +// per account. it's being verified by the unit test +// TestEncodedAccountAllocationBounds to align with +// config.Consensus[protocol.ConsensusCurrentVersion].MaxAssetsPerAccount; note +// that the decoded parameter is used only for protecting the decoder against +// malicious encoded account data stream. protocol-specific contents would be +// tested once the decoding is complete. +const EncodedMaxAssetsPerAccount = 1024 + +// EncodedMaxAppLocalStates is the decoder limit for number of opted-in apps in a single account. +// It is verified in TestEncodedAccountAllocationBounds to align with +// config.Consensus[protocol.ConsensusCurrentVersion].MaxAppsOptedIn +const EncodedMaxAppLocalStates = 64 + +// EncodedMaxAppParams is the decoder limit for number of created apps in a single account. +// It is verified in TestEncodedAccountAllocationBounds to align with +// config.Consensus[protocol.ConsensusCurrentVersion].MaxAppsCreated +const EncodedMaxAppParams = 64 + +// EncodedMaxKeyValueEntries is the decoder limit for the length of a key/value store. +// It is verified in TestEncodedAccountAllocationBounds to align with +// config.Consensus[protocol.ConsensusCurrentVersion].MaxLocalSchemaEntries and +// config.Consensus[protocol.ConsensusCurrentVersion].MaxGlobalSchemaEntries +const EncodedMaxKeyValueEntries = 1024 + +// MaxConsensusVersionLen must be larger than any URL length of any consensus +// version (which is currently URL+hash=89) +const MaxConsensusVersionLen = 128 diff --git a/config/config.go b/config/config.go index 495eba5890..96bf001a5b 100644 --- a/config/config.go +++ b/config/config.go @@ -19,12 +19,14 @@ package config import ( "encoding/json" "errors" + "fmt" "io" "os" "os/user" "path/filepath" "strings" + "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/protocol" "github.com/algorand/go-algorand/util/codecs" ) @@ -82,14 +84,6 @@ const ConfigurableConsensusProtocolsFilename = "consensus.json" // do not expose in normal config so it is not in code generated local_defaults.go const defaultRelayGossipFanout = 8 -// MaxGenesisIDLen is the maximum length of the genesis ID set for purpose of setting -// allocbounds on structs containing GenesisID and for purposes of calculating MaxSize functions -// on those types. Current value is larger than the existing network IDs and the ones used in testing -const MaxGenesisIDLen = 128 - -// MaxEvalDeltaTotalLogSize is the maximum size of the sum of all log sizes in a single eval delta. -const MaxEvalDeltaTotalLogSize = 1024 - // CatchpointTrackingModeUntracked defines the CatchpointTracking mode that does _not_ track catchpoints const CatchpointTrackingModeUntracked = -1 @@ -113,10 +107,16 @@ const PlaceholderPublicAddress = "PLEASE_SET_ME" // cannot be loaded, the default config is returned (with the error from loading the // custom file). func LoadConfigFromDisk(custom string) (c Local, err error) { + c, _, err = loadConfigFromFile(filepath.Join(custom, ConfigFilename)) + return +} + +// LoadConfigFromDiskWithMigrations is like LoadConfigFromDisk but also returns migration results +func LoadConfigFromDiskWithMigrations(custom string) (c Local, migrations []MigrationResult, err error) { return loadConfigFromFile(filepath.Join(custom, ConfigFilename)) } -func loadConfigFromFile(configFile string) (c Local, err error) { +func loadConfigFromFile(configFile string) (c Local, migrations []MigrationResult, err error) { c = defaultLocal c.Version = 0 // Reset to 0 so we get the version from the loaded file. c, err = mergeConfigFromFile(configFile, c) @@ -127,7 +127,7 @@ func loadConfigFromFile(configFile string) (c Local, err error) { // Migrate in case defaults were changed // If a config file does not have version, it is assumed to be zero. // All fields listed in migrate() might be changed if an actual value matches to default value from a previous version. - c, err = migrate(c) + c, migrations, err = migrate(c) return } @@ -174,11 +174,6 @@ func enrichNetworkingConfig(source Local) (Local, error) { source.GossipFanout = defaultRelayGossipFanout } } - // In hybrid mode we want to prevent connections from the same node over both P2P and WS. - // The only way it is supported at the moment is to use net identity challenge that is based on PublicAddress. - if (source.NetAddress != "" || source.P2PHybridNetAddress != "") && source.EnableP2PHybridMode && source.PublicAddress == "" { - return source, errors.New("PublicAddress must be specified when EnableP2PHybridMode is set") - } source.PublicAddress = strings.ToLower(source.PublicAddress) return source, nil } @@ -237,7 +232,8 @@ func savePhonebook(entries []string, w io.Writer) error { return enc.Encode(pb) } -var globalConfigFileRoot string +// DataDirectory for the current instance +var DataDirectory string // GetConfigFilePath retrieves the full path to a configuration file // These are global configurations - not specific to data-directory / network. @@ -249,12 +245,13 @@ func GetConfigFilePath(file string) (string, error) { return filepath.Join(rootPath, file), nil } -// GetGlobalConfigFileRoot returns the current root folder for global configuration files. -// This will likely only change for tests. +var globalConfigFileRoot string + +// GetGlobalConfigFileRoot returns the root directory for global configuration files. func GetGlobalConfigFileRoot() (string, error) { var err error if globalConfigFileRoot == "" { - globalConfigFileRoot, err = GetDefaultConfigFilePath() + globalConfigFileRoot, err = deriveConfigFilePath() if err == nil { dirErr := os.Mkdir(globalConfigFileRoot, os.ModePerm) if !os.IsExist(dirErr) { @@ -265,29 +262,33 @@ func GetGlobalConfigFileRoot() (string, error) { return globalConfigFileRoot, err } -// SetGlobalConfigFileRoot allows overriding the root folder for global configuration files. -// It returns the current one so it can be restored, if desired. -// This will likely only change for tests. -func SetGlobalConfigFileRoot(rootPath string) string { - currentRoot := globalConfigFileRoot - globalConfigFileRoot = rootPath - return currentRoot -} - -// GetDefaultConfigFilePath retrieves the default directory for global (not per-instance) config files -// By default we store in ~/.algorand/. -// This will likely only change for tests. -func GetDefaultConfigFilePath() (string, error) { +// deriveConfigFilePath retrieves the directory (~/.algorand) for global (not +// per-instance) config files. +func deriveConfigFilePath() (string, error) { currentUser, err := user.Current() if err != nil { return "", err } if currentUser.HomeDir == "" { - return "", errors.New("GetDefaultConfigFilePath fail - current user has no home directory") + return "", fmt.Errorf("current user %s has no home directory", currentUser.Username) } return filepath.Join(currentUser.HomeDir, ".algorand"), nil } +// AnnotateTelemetry adds some extra information to the TelemetryConfig that +// isn't actually in the config file, but is reported by telemetry. +func AnnotateTelemetry(cfg *logging.TelemetryConfig, genesisID string) { + ver := GetCurrentVersion() + ch := ver.Channel + // Should not happen, but default to "dev" if channel is unspecified. + if ch == "" { + ch = "dev" + } + cfg.ChainID = fmt.Sprintf("%s-%s", ch, genesisID) + cfg.Version = ver.String() + cfg.DataDirectory = DataDirectory +} + const ( dnssecSRV = 1 << iota dnssecRelayAddr diff --git a/config/config_test.go b/config/config_test.go index 5661138183..68928d5908 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -30,6 +30,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/algorand/go-algorand/config/bounds" "github.com/algorand/go-algorand/protocol" "github.com/algorand/go-algorand/test/partitiontest" "github.com/algorand/go-algorand/util/codecs" @@ -49,7 +50,7 @@ func TestLocal_SaveThenLoad(t *testing.T) { c1, err := loadWithoutDefaults(defaultConfig) require.NoError(t, err) - c1, err = migrate(c1) + c1, _, err = migrate(c1) require.NoError(t, err) var b1 bytes.Buffer ser1 := json.NewEncoder(&b1) @@ -145,20 +146,6 @@ func TestLocal_EnrichNetworkingConfig(t *testing.T) { c2, err = enrichNetworkingConfig(c1) require.NoError(t, err) - c1 = Local{ - NetAddress: "test1", - EnableP2PHybridMode: true, - } - c2, err = enrichNetworkingConfig(c1) - require.ErrorContains(t, err, "PublicAddress must be specified when EnableP2PHybridMode is set") - - c1 = Local{ - P2PHybridNetAddress: "test1", - EnableP2PHybridMode: true, - } - c2, err = enrichNetworkingConfig(c1) - require.ErrorContains(t, err, "PublicAddress must be specified when EnableP2PHybridMode is set") - c1 = Local{ EnableP2PHybridMode: true, PublicAddress: "test2", @@ -261,7 +248,7 @@ func loadWithoutDefaults(cfg Local) (Local, error) { if err != nil { return Local{}, err } - cfg, err = loadConfigFromFile(name) + cfg, _, err = loadConfigFromFile(name) return cfg, err } @@ -272,24 +259,46 @@ func TestLocal_ConfigMigrate(t *testing.T) { c0, err := loadWithoutDefaults(GetVersionedDefaultLocalConfig(0)) a.NoError(err) - c0, err = migrate(c0) + c0, migrations0, err := migrate(c0) a.NoError(err) - cLatest, err := migrate(defaultLocal) + a.Empty(migrations0) + cLatest, migrationsLatest, err := migrate(defaultLocal) a.NoError(err) + a.Empty(migrationsLatest) a.Equal(defaultLocal, c0) a.Equal(defaultLocal, cLatest) cLatest.Version = getLatestConfigVersion() + 1 - _, err = migrate(cLatest) + _, _, err = migrate(cLatest) a.Error(err) // Ensure we don't migrate values that aren't the default old version c0Modified := GetVersionedDefaultLocalConfig(0) c0Modified.BaseLoggerDebugLevel = GetVersionedDefaultLocalConfig(0).BaseLoggerDebugLevel + 1 - c0Modified, err = migrate(c0Modified) + c0Modified, migrationsModified, err := migrate(c0Modified) a.NoError(err) a.NotEqual(defaultLocal, c0Modified) + + // Assert specific important migrations covering different data types + a.NotEmpty(migrationsModified) + migrationMap := make(map[string]MigrationResult) + for _, m := range migrationsModified { + a.NotEqual("Version", m.FieldName) + a.NotContains(migrationMap, m.FieldName) + migrationMap[m.FieldName] = m + } + for _, expected := range []MigrationResult{ + {FieldName: "IncomingConnectionsLimit", OldValue: int64(-1), NewValue: int64(2400), OldVersion: 0, NewVersion: 27}, + {FieldName: "TxPoolSize", OldValue: int64(50000), NewValue: int64(75000), OldVersion: 0, NewVersion: 23}, + {FieldName: "ProposalAssemblyTime", OldValue: int64(0), NewValue: int64(500000000), OldVersion: 0, NewVersion: 23}, + {FieldName: "AgreementIncomingVotesQueueLength", OldValue: uint64(0), NewValue: uint64(20000), OldVersion: 0, NewVersion: 27}, + {FieldName: "EnableTxBacklogRateLimiting", OldValue: false, NewValue: true, OldVersion: 0, NewVersion: 30}, + {FieldName: "DNSBootstrapID", OldValue: ".algorand.network", NewValue: ".algorand.network?backup=.algorand.net&dedup=.algorand-.(network|net)", OldVersion: 0, NewVersion: 28}, + } { + a.Contains(migrationMap, expected.FieldName) + a.Equal(expected, migrationMap[expected.FieldName]) + } } func TestLocal_ConfigMigrateFromDisk(t *testing.T) { @@ -302,15 +311,50 @@ func TestLocal_ConfigMigrateFromDisk(t *testing.T) { configsPath := filepath.Join(ourPath, "../test/testdata/configs") for configVersion := uint32(0); configVersion <= getLatestConfigVersion(); configVersion++ { - c, err := loadConfigFromFile(filepath.Join(configsPath, fmt.Sprintf("config-v%d.json", configVersion))) + c, migrations, err := loadConfigFromFile(filepath.Join(configsPath, fmt.Sprintf("config-v%d.json", configVersion))) a.NoError(err) - modified, err := migrate(c) + modified, _, err := migrate(c) a.NoError(err) a.Equal(defaultLocal, modified, "config-v%d.json", configVersion) + + if len(migrations) > 0 { + t.Logf("Migration results for config-v%d.json:", configVersion) + for _, m := range migrations { + t.Logf(" Automatically upgraded default value for %s from %v (version %d) to %v (version %d)", + m.FieldName, m.OldValue, m.OldVersion, m.NewValue, m.NewVersion) + } + } + + // Spot-check specific migrations + expectedMigrations := map[uint32]MigrationResult{ + 1: {FieldName: "TxPoolSize", OldValue: 50000, NewValue: 75000, OldVersion: 1, NewVersion: 23}, + 3: {FieldName: "MaxConnectionsPerIP", OldValue: 30, NewValue: 8, OldVersion: 3, NewVersion: 35}, + 5: {FieldName: "TxPoolSize", OldValue: 15000, NewValue: 75000, OldVersion: 5, NewVersion: 23}, + 17: {FieldName: "IncomingConnectionsLimit", OldValue: 800, NewValue: 2400, OldVersion: 17, NewVersion: 27}, + 19: {FieldName: "ProposalAssemblyTime", OldValue: 250000000, NewValue: 500000000, OldVersion: 19, NewVersion: 23}, + 21: {FieldName: "AgreementIncomingVotesQueueLength", OldValue: 10000, NewValue: 20000, OldVersion: 21, NewVersion: 27}, + 23: {FieldName: "CadaverSizeTarget", OldValue: 1073741824, NewValue: 0, OldVersion: 23, NewVersion: 24}, + 27: {FieldName: "EnableTxBacklogRateLimiting", OldValue: false, NewValue: true, OldVersion: 27, NewVersion: 30}, + 30: {FieldName: "DNSSecurityFlags", OldValue: 1, NewValue: 9, OldVersion: 30, NewVersion: 34}, + } + if expected, ok := expectedMigrations[configVersion]; ok { + found := false + for _, m := range migrations { + if m.FieldName == expected.FieldName { + found = true + a.EqualValues(expected.OldValue, m.OldValue) + a.EqualValues(expected.NewValue, m.NewValue) + a.EqualValues(expected.OldVersion, m.OldVersion) + a.EqualValues(expected.NewVersion, m.NewVersion) + break + } + } + a.True(found, "v%d should have %s migration", configVersion, expected.FieldName) + } } cNext := Local{Version: getLatestConfigVersion() + 1} - _, err = migrate(cNext) + _, _, err = migrate(cNext) a.Error(err) } @@ -758,16 +802,20 @@ func TestLocal_ValidateP2PHybridConfig(t *testing.T) { enableP2PHybridMode bool p2pHybridNetAddress string netAddress string + publicAddress string err bool }{ - {false, "", "", false}, - {false, ":0", "", false}, - {false, "", ":0", false}, - {false, ":0", ":0", false}, - {true, "", "", false}, - {true, ":0", "", true}, - {true, "", ":0", true}, - {true, ":0", ":0", false}, + {false, "", "", "", false}, + {false, ":0", "", "", false}, + {false, "", ":0", "", false}, + {false, ":0", ":0", "", false}, + {true, "", "", "", false}, + {true, ":0", "", "", true}, + {true, ":0", "", "pub", true}, + {true, "", ":0", "", true}, + {true, "", ":0", "pub", true}, + {true, ":0", ":0", "", true}, + {true, ":0", ":0", "pub", false}, } for i, test := range tests { @@ -779,9 +827,10 @@ func TestLocal_ValidateP2PHybridConfig(t *testing.T) { EnableP2PHybridMode: test.enableP2PHybridMode, P2PHybridNetAddress: test.p2pHybridNetAddress, NetAddress: test.netAddress, + PublicAddress: test.publicAddress, } err := c.ValidateP2PHybridConfig() - require.Equal(t, test.err, err != nil, name) + require.Equal(t, test.err, err != nil, "%s: %v => %v", name, test, err) }) } } @@ -923,7 +972,7 @@ func TestEnsureAndResolveGenesisDirs_migrate(t *testing.T) { require.FileExists(t, filepath.Join(hotDir, "stateproof.sqlite-wal")) } -func TestEnsureAndResolveGenesisDirs_migrateCrashFail(t *testing.T) { +func TestEnsureAndResolveGenesisDirs_migrateCrashErr(t *testing.T) { partitiontest.PartitionTest(t) cfg := GetDefaultLocal() @@ -955,7 +1004,7 @@ func TestEnsureAndResolveGenesisDirs_migrateCrashFail(t *testing.T) { require.NoFileExists(t, filepath.Join(hotDir, "crash.sqlite-shm")) } -func TestEnsureAndResolveGenesisDirs_migrateSPFail(t *testing.T) { +func TestEnsureAndResolveGenesisDirs_migrateSPErr(t *testing.T) { partitiontest.PartitionTest(t) cfg := GetDefaultLocal() @@ -1170,3 +1219,28 @@ func TestTracksCatchpointsWithoutStoring(t *testing.T) { require.Equal(t, true, cfg.TracksCatchpoints()) require.Equal(t, false, cfg.StoresCatchpoints()) } + +func TestEncodedAccountAllocationBounds(t *testing.T) { + partitiontest.PartitionTest(t) + + // ensure that all the supported protocols have value limits less or + // equal to their corresponding codec allocbounds + for protoVer, proto := range Consensus { + if proto.MaxAssetsPerAccount > 0 && proto.MaxAssetsPerAccount > bounds.EncodedMaxAssetsPerAccount { + require.Failf(t, "proto.MaxAssetsPerAccount > EncodedMaxAssetsPerAccount", "protocol version = %s", protoVer) + } + if proto.MaxAppsCreated > 0 && proto.MaxAppsCreated > bounds.EncodedMaxAppParams { + require.Failf(t, "proto.MaxAppsCreated > EncodedMaxAppParams", "protocol version = %s", protoVer) + } + if proto.MaxAppsOptedIn > 0 && proto.MaxAppsOptedIn > bounds.EncodedMaxAppLocalStates { + require.Failf(t, "proto.MaxAppsOptedIn > EncodedMaxAppLocalStates", "protocol version = %s", protoVer) + } + if proto.MaxLocalSchemaEntries > bounds.EncodedMaxKeyValueEntries { + require.Failf(t, "proto.MaxLocalSchemaEntries > EncodedMaxKeyValueEntries", "protocol version = %s", protoVer) + } + if proto.MaxGlobalSchemaEntries > bounds.EncodedMaxKeyValueEntries { + require.Failf(t, "proto.MaxGlobalSchemaEntries > EncodedMaxKeyValueEntries", "protocol version = %s", protoVer) + } + // There is no protocol limit to the number of Boxes per account, so that allocbound is not checked. + } +} diff --git a/config/consensus.go b/config/consensus.go index 61f71e8de9..2f081fc936 100644 --- a/config/consensus.go +++ b/config/consensus.go @@ -19,6 +19,8 @@ package config import ( "time" + "github.com/algorand/go-algorand/config/bounds" + "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/protocol" ) @@ -553,6 +555,9 @@ type ConsensusParams struct { // Heartbeat support Heartbeat bool + + // EnableSha512BlockHash adds an additional SHA-512 hash to the block header. + EnableSha512BlockHash bool } // ProposerPayoutRules puts several related consensus parameters in one place. The same @@ -639,6 +644,29 @@ type BonusPlan struct { DecayInterval uint64 } +// EffectiveKeyDilution returns the key dilution for this account, +// returning the default key dilution if not explicitly specified. +func (proto ConsensusParams) EffectiveKeyDilution(kd uint64) uint64 { + if kd != 0 { + return kd + } + return proto.DefaultKeyDilution +} + +// BalanceRequirements returns all the consensus values that determine min balance. +func (proto ConsensusParams) BalanceRequirements() basics.BalanceRequirements { + return basics.BalanceRequirements{ + MinBalance: proto.MinBalance, + AppFlatParamsMinBalance: proto.AppFlatParamsMinBalance, + AppFlatOptInMinBalance: proto.AppFlatOptInMinBalance, + BoxFlatMinBalance: proto.BoxFlatMinBalance, + BoxByteMinBalance: proto.BoxByteMinBalance, + SchemaMinBalancePerEntry: proto.SchemaMinBalancePerEntry, + SchemaUintMinBalance: proto.SchemaUintMinBalance, + SchemaBytesMinBalance: proto.SchemaBytesMinBalance, + } +} + // PaysetCommitType enumerates possible ways for the block header to commit to // the set of transactions in the block. type PaysetCommitType int @@ -664,97 +692,6 @@ type ConsensusProtocols map[protocol.ConsensusVersion]ConsensusParams // consensus protocol. var Consensus ConsensusProtocols -// MaxVoteThreshold is the largest threshold for a bundle over all supported -// consensus protocols, used for decoding purposes. -var MaxVoteThreshold int - -// MaxEvalDeltaAccounts is the largest number of accounts that may appear in -// an eval delta, used for decoding purposes. -var MaxEvalDeltaAccounts int - -// MaxStateDeltaKeys is the largest number of key/value pairs that may appear -// in a StateDelta, used for decoding purposes. -var MaxStateDeltaKeys int - -// MaxLogCalls is the highest allowable log messages that may appear in -// any version, used only for decoding purposes. Never decrease this value. -var MaxLogCalls int - -// MaxInnerTransactionsPerDelta is the maximum number of inner transactions in one EvalDelta -var MaxInnerTransactionsPerDelta int - -// MaxLogicSigMaxSize is the largest logical signature appear in any of the supported -// protocols, used for decoding purposes. -var MaxLogicSigMaxSize int - -// MaxTxnNoteBytes is the largest supported nodes field array size supported by any -// of the consensus protocols. used for decoding purposes. -var MaxTxnNoteBytes int - -// MaxTxGroupSize is the largest supported number of transactions per transaction group supported by any -// of the consensus protocols. used for decoding purposes. -var MaxTxGroupSize int - -// MaxAppProgramLen is the largest supported app program size supported by any -// of the consensus protocols. used for decoding purposes. -var MaxAppProgramLen int - -// MaxBytesKeyValueLen is a maximum length of key or value across all protocols. -// used for decoding purposes. -var MaxBytesKeyValueLen int - -// MaxExtraAppProgramLen is the maximum extra app program length supported by any -// of the consensus protocols. used for decoding purposes. -var MaxExtraAppProgramLen int - -// MaxAvailableAppProgramLen is the largest supported app program size including the extra -// pages supported by any of the consensus protocols. used for decoding purposes. -var MaxAvailableAppProgramLen int - -// MaxProposedExpiredOnlineAccounts is the maximum number of online accounts -// that a proposer can take offline for having expired voting keys. -var MaxProposedExpiredOnlineAccounts int - -// MaxMarkAbsent is the maximum number of online accounts that a proposer can -// suspend for not proposing "lately" -var MaxMarkAbsent int - -// MaxAppTotalArgLen is the maximum number of bytes across all arguments of an application -// max sum([len(arg) for arg in txn.ApplicationArgs]) -var MaxAppTotalArgLen int - -// MaxAssetNameBytes is the maximum asset name length in bytes -var MaxAssetNameBytes int - -// MaxAssetUnitNameBytes is the maximum asset unit name length in bytes -var MaxAssetUnitNameBytes int - -// MaxAssetURLBytes is the maximum asset URL length in bytes -var MaxAssetURLBytes int - -// MaxAppBytesValueLen is the maximum length of a bytes value used in an application's global or -// local key/value store -var MaxAppBytesValueLen int - -// MaxAppBytesKeyLen is the maximum length of a key used in an application's global or local -// key/value store -var MaxAppBytesKeyLen int - -// StateProofTopVoters is a bound on how many online accounts get to -// participate in forming the state proof, by including the -// top StateProofTopVoters accounts (by normalized balance) into the -// vector commitment. -var StateProofTopVoters int - -// MaxTxnBytesPerBlock determines the maximum number of bytes -// that transactions can take up in a block. Specifically, -// the sum of the lengths of encodings of each transaction -// in a block must not exceed MaxTxnBytesPerBlock. -var MaxTxnBytesPerBlock int - -// MaxAppTxnForeignApps is the max number of foreign apps per txn across all consensus versions -var MaxAppTxnForeignApps int - func checkSetMax(value int, curMax *int) { if value > *curMax { *curMax = value @@ -765,47 +702,47 @@ func checkSetMax(value int, curMax *int) { // to enforce memory allocation limits. The values should be generous to // prevent correctness bugs, but not so large that DoS attacks are trivial func checkSetAllocBounds(p ConsensusParams) { - checkSetMax(int(p.SoftCommitteeThreshold), &MaxVoteThreshold) - checkSetMax(int(p.CertCommitteeThreshold), &MaxVoteThreshold) - checkSetMax(int(p.NextCommitteeThreshold), &MaxVoteThreshold) - checkSetMax(int(p.LateCommitteeThreshold), &MaxVoteThreshold) - checkSetMax(int(p.RedoCommitteeThreshold), &MaxVoteThreshold) - checkSetMax(int(p.DownCommitteeThreshold), &MaxVoteThreshold) + checkSetMax(int(p.SoftCommitteeThreshold), &bounds.MaxVoteThreshold) + checkSetMax(int(p.CertCommitteeThreshold), &bounds.MaxVoteThreshold) + checkSetMax(int(p.NextCommitteeThreshold), &bounds.MaxVoteThreshold) + checkSetMax(int(p.LateCommitteeThreshold), &bounds.MaxVoteThreshold) + checkSetMax(int(p.RedoCommitteeThreshold), &bounds.MaxVoteThreshold) + checkSetMax(int(p.DownCommitteeThreshold), &bounds.MaxVoteThreshold) // These bounds could be tighter, but since these values are just to // prevent DoS, setting them to be the maximum number of allowed // executed TEAL instructions should be fine (order of ~1000) - checkSetMax(p.MaxAppProgramLen, &MaxStateDeltaKeys) - checkSetMax(p.MaxAppProgramLen, &MaxEvalDeltaAccounts) - checkSetMax(p.MaxAppProgramLen, &MaxAppProgramLen) - checkSetMax((int(p.LogicSigMaxSize) * p.MaxTxGroupSize), &MaxLogicSigMaxSize) - checkSetMax(p.MaxTxnNoteBytes, &MaxTxnNoteBytes) - checkSetMax(p.MaxTxGroupSize, &MaxTxGroupSize) + checkSetMax(p.MaxAppProgramLen, &bounds.MaxStateDeltaKeys) + checkSetMax(p.MaxAppProgramLen, &bounds.MaxEvalDeltaAccounts) + checkSetMax(p.MaxAppProgramLen, &bounds.MaxAppProgramLen) + checkSetMax((int(p.LogicSigMaxSize) * p.MaxTxGroupSize), &bounds.MaxLogicSigMaxSize) + checkSetMax(p.MaxTxnNoteBytes, &bounds.MaxTxnNoteBytes) + checkSetMax(p.MaxTxGroupSize, &bounds.MaxTxGroupSize) // MaxBytesKeyValueLen is max of MaxAppKeyLen and MaxAppBytesValueLen - checkSetMax(p.MaxAppKeyLen, &MaxBytesKeyValueLen) - checkSetMax(p.MaxAppBytesValueLen, &MaxBytesKeyValueLen) - checkSetMax(p.MaxExtraAppProgramPages, &MaxExtraAppProgramLen) + checkSetMax(p.MaxAppKeyLen, &bounds.MaxBytesKeyValueLen) + checkSetMax(p.MaxAppBytesValueLen, &bounds.MaxBytesKeyValueLen) + checkSetMax(p.MaxExtraAppProgramPages, &bounds.MaxExtraAppProgramLen) // MaxAvailableAppProgramLen is the max of supported app program size - MaxAvailableAppProgramLen = MaxAppProgramLen * (1 + MaxExtraAppProgramLen) + bounds.MaxAvailableAppProgramLen = bounds.MaxAppProgramLen * (1 + bounds.MaxExtraAppProgramLen) // There is no consensus parameter for MaxLogCalls and MaxAppProgramLen as an approximation // Its value is much larger than any possible reasonable MaxLogCalls value in future - checkSetMax(p.MaxAppProgramLen, &MaxLogCalls) - checkSetMax(p.MaxInnerTransactions*p.MaxTxGroupSize, &MaxInnerTransactionsPerDelta) - checkSetMax(p.MaxProposedExpiredOnlineAccounts, &MaxProposedExpiredOnlineAccounts) - checkSetMax(p.Payouts.MaxMarkAbsent, &MaxMarkAbsent) + checkSetMax(p.MaxAppProgramLen, &bounds.MaxLogCalls) + checkSetMax(p.MaxInnerTransactions*p.MaxTxGroupSize, &bounds.MaxInnerTransactionsPerDelta) + checkSetMax(p.MaxProposedExpiredOnlineAccounts, &bounds.MaxProposedExpiredOnlineAccounts) + checkSetMax(p.Payouts.MaxMarkAbsent, &bounds.MaxMarkAbsent) // These bounds are exported to make them available to the msgp generator for calculating // maximum valid message size for each message going across the wire. - checkSetMax(p.MaxAppTotalArgLen, &MaxAppTotalArgLen) - checkSetMax(p.MaxAssetNameBytes, &MaxAssetNameBytes) - checkSetMax(p.MaxAssetUnitNameBytes, &MaxAssetUnitNameBytes) - checkSetMax(p.MaxAssetURLBytes, &MaxAssetURLBytes) - checkSetMax(p.MaxAppBytesValueLen, &MaxAppBytesValueLen) - checkSetMax(p.MaxAppKeyLen, &MaxAppBytesKeyLen) - checkSetMax(int(p.StateProofTopVoters), &StateProofTopVoters) - checkSetMax(p.MaxTxnBytesPerBlock, &MaxTxnBytesPerBlock) - - checkSetMax(p.MaxAppTxnForeignApps, &MaxAppTxnForeignApps) + checkSetMax(p.MaxAppTotalArgLen, &bounds.MaxAppTotalArgLen) + checkSetMax(p.MaxAssetNameBytes, &bounds.MaxAssetNameBytes) + checkSetMax(p.MaxAssetUnitNameBytes, &bounds.MaxAssetUnitNameBytes) + checkSetMax(p.MaxAssetURLBytes, &bounds.MaxAssetURLBytes) + checkSetMax(p.MaxAppBytesValueLen, &bounds.MaxAppBytesValueLen) + checkSetMax(p.MaxAppKeyLen, &bounds.MaxAppBytesKeyLen) + checkSetMax(int(p.StateProofTopVoters), &bounds.StateProofTopVoters) + checkSetMax(p.MaxTxnBytesPerBlock, &bounds.MaxTxnBytesPerBlock) + + checkSetMax(p.MaxAppTxnForeignApps, &bounds.MaxAppTxnForeignApps) } // DeepCopy creates a deep copy of a consensus protocols map. @@ -1486,6 +1423,7 @@ func initConsensusProtocols() { vFuture.LogicSigVersion = 12 // When moving this to a release, put a new higher LogicSigVersion here vFuture.EnableAppVersioning = true // if not promoted when v12 goes into effect, update logic/field.go + vFuture.EnableSha512BlockHash = true Consensus[protocol.ConsensusFuture] = vFuture diff --git a/config/localTemplate.go b/config/localTemplate.go index db32ce742c..8525031f59 100644 --- a/config/localTemplate.go +++ b/config/localTemplate.go @@ -17,7 +17,6 @@ package config import ( - "errors" "fmt" "os" "path/filepath" @@ -780,12 +779,28 @@ func (cfg Local) IsHybridServer() bool { func (cfg Local) ValidateP2PHybridConfig() error { if cfg.EnableP2PHybridMode { if cfg.NetAddress == "" && cfg.P2PHybridNetAddress != "" || cfg.NetAddress != "" && cfg.P2PHybridNetAddress == "" { - return errors.New("both NetAddress and P2PHybridNetAddress must be set or unset") + return P2PHybridConfigError{ + msg: "P2PHybridMode requires both NetAddress and P2PHybridNetAddress to be set or unset", + } + } + // In hybrid mode we want to prevent connections from the same node over both P2P and WS. + // The only way it is supported at the moment is to use net identity challenge that is based on PublicAddress. + if (cfg.NetAddress != "" || cfg.P2PHybridNetAddress != "") && cfg.PublicAddress == "" { + return P2PHybridConfigError{msg: "PublicAddress must be specified when EnableP2PHybridMode is set"} } } return nil } +// P2PHybridConfigError is an error type for P2PHybrid configuration issues +type P2PHybridConfigError struct { + msg string +} + +func (e P2PHybridConfigError) Error() string { + return e.msg +} + // ensureAbsGenesisDir will convert a path to absolute, and will attempt to make a genesis directory there func ensureAbsGenesisDir(path string, genesisID string) (string, error) { pathAbs, err := filepath.Abs(path) diff --git a/config/migrate.go b/config/migrate.go index 0670ab626e..5522454094 100644 --- a/config/migrate.go +++ b/config/migrate.go @@ -29,8 +29,16 @@ import ( // it's implemented in ./config/defaults_gen.go, and should be the only "consumer" of this exported variable var AutogenLocal = GetVersionedDefaultLocalConfig(getLatestConfigVersion()) -func migrate(cfg Local) (newCfg Local, err error) { +// MigrationResult represents a single field migration from one version to another +type MigrationResult struct { + FieldName string + OldVersion, NewVersion uint32 + OldValue, NewValue any +} + +func migrate(cfg Local) (newCfg Local, migrations []MigrationResult, err error) { newCfg = cfg + originalVersion := cfg.Version latestConfigVersion := getLatestConfigVersion() if cfg.Version > latestConfigVersion { @@ -38,6 +46,9 @@ func migrate(cfg Local) (newCfg Local, err error) { return } + // Track which fields were migrated during this entire process + migrationResults := make(map[string]MigrationResult) + for { if newCfg.Version == latestConfigVersion { break @@ -77,6 +88,14 @@ func migrate(cfg Local) (newCfg Local, err error) { // we're skipping the error checking here since we already tested that in the unit test. boolVal, _ := strconv.ParseBool(nextVersionDefaultValue) reflect.ValueOf(&newCfg).Elem().FieldByName(field.Name).SetBool(boolVal) + if m, exists := migrationResults[field.Name]; exists { + m.NewValue = boolVal + m.NewVersion = nextVersion + migrationResults[field.Name] = m + } else { + oldValue := reflect.ValueOf(&defaultCurrentConfig).Elem().FieldByName(field.Name).Bool() + migrationResults[field.Name] = MigrationResult{FieldName: field.Name, OldVersion: originalVersion, NewVersion: nextVersion, OldValue: oldValue, NewValue: boolVal} + } } case reflect.Int32: fallthrough @@ -87,6 +106,14 @@ func migrate(cfg Local) (newCfg Local, err error) { // we're skipping the error checking here since we already tested that in the unit test. intVal, _ := strconv.ParseInt(nextVersionDefaultValue, 10, 64) reflect.ValueOf(&newCfg).Elem().FieldByName(field.Name).SetInt(intVal) + if m, exists := migrationResults[field.Name]; exists { + m.NewValue = intVal + m.NewVersion = nextVersion + migrationResults[field.Name] = m + } else { + oldValue := reflect.ValueOf(&defaultCurrentConfig).Elem().FieldByName(field.Name).Int() + migrationResults[field.Name] = MigrationResult{FieldName: field.Name, OldVersion: originalVersion, NewVersion: nextVersion, OldValue: oldValue, NewValue: intVal} + } } case reflect.Uint32: fallthrough @@ -97,17 +124,41 @@ func migrate(cfg Local) (newCfg Local, err error) { // we're skipping the error checking here since we already tested that in the unit test. uintVal, _ := strconv.ParseUint(nextVersionDefaultValue, 10, 64) reflect.ValueOf(&newCfg).Elem().FieldByName(field.Name).SetUint(uintVal) + if m, exists := migrationResults[field.Name]; exists { + m.NewValue = uintVal + m.NewVersion = nextVersion + migrationResults[field.Name] = m + } else { + oldValue := reflect.ValueOf(&defaultCurrentConfig).Elem().FieldByName(field.Name).Uint() + migrationResults[field.Name] = MigrationResult{FieldName: field.Name, OldVersion: originalVersion, NewVersion: nextVersion, OldValue: oldValue, NewValue: uintVal} + } } case reflect.String: if reflect.ValueOf(&newCfg).Elem().FieldByName(field.Name).String() == reflect.ValueOf(&defaultCurrentConfig).Elem().FieldByName(field.Name).String() { // we're skipping the error checking here since we already tested that in the unit test. reflect.ValueOf(&newCfg).Elem().FieldByName(field.Name).SetString(nextVersionDefaultValue) + if m, exists := migrationResults[field.Name]; exists { + m.NewValue = nextVersionDefaultValue + m.NewVersion = nextVersion + migrationResults[field.Name] = m + } else { + oldValue := reflect.ValueOf(&defaultCurrentConfig).Elem().FieldByName(field.Name).String() + migrationResults[field.Name] = MigrationResult{FieldName: field.Name, OldVersion: originalVersion, NewVersion: nextVersion, OldValue: oldValue, NewValue: nextVersionDefaultValue} + } } default: panic(fmt.Sprintf("unsupported data type (%s) encountered when reflecting on config.Local datatype %s", reflect.ValueOf(&defaultCurrentConfig).Elem().FieldByName(field.Name).Kind(), field.Name)) } } } + + // Only return migrations where the value actually changed + for _, m := range migrationResults { + if m.FieldName != "Version" && m.OldValue != m.NewValue { + migrations = append(migrations, m) + } + } + return } diff --git a/config/version.go b/config/version.go index 7494c6bd05..67ee33ceb3 100644 --- a/config/version.go +++ b/config/version.go @@ -33,7 +33,7 @@ const VersionMajor = 4 // VersionMinor is the Minor semantic version number (x.#.z) - changed when backwards-compatible features are introduced. // Not enforced until after initial public release (x > 0). -const VersionMinor = 1 +const VersionMinor = 2 // Version is the type holding our full version information. type Version struct { @@ -58,9 +58,6 @@ type Version struct { // Branch-derived release channel the build is based on Channel string - - // DataDirectory for the current instance - DataDirectory string } func (v Version) String() string { @@ -95,14 +92,13 @@ func convertToInt(val string) int { } var currentVersion = Version{ - Major: VersionMajor, - Minor: VersionMinor, - BuildNumber: convertToInt(BuildNumber), // set using -ldflags - Suffix: "", - CommitHash: CommitHash, - Branch: Branch, - Channel: Channel, - DataDirectory: "", + Major: VersionMajor, + Minor: VersionMinor, + BuildNumber: convertToInt(BuildNumber), // set using -ldflags + Suffix: "", + CommitHash: CommitHash, + Branch: Branch, + Channel: Channel, } // GetCurrentVersion retrieves a copy of the current global Version structure (for the application) @@ -122,14 +118,6 @@ func SetCurrentVersion(version Version) { currentVersion = version } -// UpdateVersionDataDir is a convenience method for setting the data dir on the global Version struct -// Used by algod and algoh to set built-time ephemeral version component e.g. data directory -func UpdateVersionDataDir(dataDir string) { - v := GetCurrentVersion() - v.DataDirectory = dataDir - SetCurrentVersion(v) -} - // GetAlgorandVersion retrieves the current version formatted as a simple version string (Major.Minor.BuildNumber) func GetAlgorandVersion() string { return currentVersion.String() diff --git a/crypto/hashes.go b/crypto/hashes.go index b4392300bf..557ff23d2c 100644 --- a/crypto/hashes.go +++ b/crypto/hashes.go @@ -43,6 +43,7 @@ const ( Sha512_256 HashType = iota Sumhash Sha256 + Sha512 MaxHashType ) @@ -55,8 +56,12 @@ const ( Sha512_256Size = sha512.Size256 SumhashDigestSize = sumhash.Sumhash512DigestSize Sha256Size = sha256.Size + Sha512Size = sha512.Size ) +// Sha512Digest is a 64-byte digest produced by the SHA-512 hash function. +type Sha512Digest [Sha512Size]byte + // HashFactory is responsible for generating new hashes accordingly to the type it stores. // //msgp:postunmarshalcheck HashFactory Validate @@ -76,6 +81,8 @@ func (h HashType) String() string { return "sumhash" case Sha256: return "sha256" + case Sha512: + return "sha512" default: return "" } @@ -90,6 +97,8 @@ func UnmarshalHashType(s string) (HashType, error) { return Sumhash, nil case "sha256": return Sha256, nil + case "sha512": + return Sha512, nil default: return 0, fmt.Errorf("HashType not supported: %s", s) } @@ -105,6 +114,8 @@ func (z HashFactory) NewHash() hash.Hash { return sumhash.New512(nil) case Sha256: return sha256.New() + case Sha512: + return sha512.New() // This shouldn't be reached, when creating a new hash, one would know the type of hash they wanted, // in addition to that, unmarshalling of the hashFactory verifies the HashType of the factory. default: diff --git a/crypto/merklesignature/kats_test.go b/crypto/merklesignature/kats_test.go index 53aff9c2f0..c4c1962fae 100644 --- a/crypto/merklesignature/kats_test.go +++ b/crypto/merklesignature/kats_test.go @@ -24,8 +24,6 @@ import ( "github.com/stretchr/testify/require" - "github.com/algorand/go-algorand/config" - "github.com/algorand/go-algorand/protocol" "github.com/algorand/go-algorand/test/partitiontest" ) @@ -60,8 +58,8 @@ func generateMssKat(startRound, atRound, numOfKeys uint64, messageToSign []byte) return mssKat{}, fmt.Errorf("error: Signature round cann't be smaller then start round") } - interval := config.Consensus[protocol.ConsensusCurrentVersion].StateProofInterval - stateProofSecrets, err := New(startRound, startRound+(interval*numOfKeys)-1, interval) + const spInterval = 256 + stateProofSecrets, err := New(startRound, startRound+(spInterval*numOfKeys)-1, spInterval) if err != nil { return mssKat{}, fmt.Errorf("error: %w", err) } diff --git a/crypto/merklesignature/merkleSignatureScheme.go b/crypto/merklesignature/merkleSignatureScheme.go index a17a59c84b..51b0a9e2c7 100644 --- a/crypto/merklesignature/merkleSignatureScheme.go +++ b/crypto/merklesignature/merkleSignatureScheme.go @@ -253,7 +253,7 @@ func (s *Signature) ValidateSaltVersion(version byte) error { return nil } -// FirstRoundInKeyLifetime calculates the round of the valid key for a given round by lowering to the closest KeyLiftime divisor. +// FirstRoundInKeyLifetime calculates the round of the valid key for a given round by lowering to the closest KeyLifetime divisor. func (v *Verifier) FirstRoundInKeyLifetime(round uint64) (uint64, error) { if v.KeyLifetime == 0 { return 0, ErrKeyLifetimeIsZero diff --git a/crypto/msgp_gen.go b/crypto/msgp_gen.go index fc279029a0..ca58b84370 100644 --- a/crypto/msgp_gen.go +++ b/crypto/msgp_gen.go @@ -241,6 +241,16 @@ import ( // |-----> (*) MsgIsZero // |-----> SeedMaxSize() // +// Sha512Digest +// |-----> (*) MarshalMsg +// |-----> (*) CanMarshalMsg +// |-----> (*) UnmarshalMsg +// |-----> (*) UnmarshalMsgWithState +// |-----> (*) CanUnmarshalMsg +// |-----> (*) Msgsize +// |-----> (*) MsgIsZero +// |-----> Sha512DigestMaxSize() +// // Signature // |-----> (*) MarshalMsg // |-----> (*) CanMarshalMsg @@ -3242,6 +3252,60 @@ func SeedMaxSize() (s int) { return } +// MarshalMsg implements msgp.Marshaler +func (z *Sha512Digest) MarshalMsg(b []byte) (o []byte) { + o = msgp.Require(b, z.Msgsize()) + o = msgp.AppendBytes(o, (*z)[:]) + return +} + +func (_ *Sha512Digest) CanMarshalMsg(z interface{}) bool { + _, ok := (z).(*Sha512Digest) + return ok +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *Sha512Digest) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) (o []byte, err error) { + if st.AllowableDepth == 0 { + err = msgp.ErrMaxDepthExceeded{} + return + } + st.AllowableDepth-- + bts, err = msgp.ReadExactBytes(bts, (*z)[:]) + if err != nil { + err = msgp.WrapError(err) + return + } + o = bts + return +} + +func (z *Sha512Digest) UnmarshalMsg(bts []byte) (o []byte, err error) { + return z.UnmarshalMsgWithState(bts, msgp.DefaultUnmarshalState) +} +func (_ *Sha512Digest) CanUnmarshalMsg(z interface{}) bool { + _, ok := (z).(*Sha512Digest) + return ok +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *Sha512Digest) Msgsize() (s int) { + s = msgp.ArrayHeaderSize + (Sha512Size * (msgp.ByteSize)) + return +} + +// MsgIsZero returns whether this is a zero value +func (z *Sha512Digest) MsgIsZero() bool { + return (*z) == (Sha512Digest{}) +} + +// MaxSize returns a maximum valid message size for this message type +func Sha512DigestMaxSize() (s int) { + // Calculating size of array: z + s = msgp.ArrayHeaderSize + ((Sha512Size) * (msgp.ByteSize)) + return +} + // MarshalMsg implements msgp.Marshaler func (z *Signature) MarshalMsg(b []byte) (o []byte) { o = msgp.Require(b, z.Msgsize()) diff --git a/crypto/msgp_gen_test.go b/crypto/msgp_gen_test.go index 0105a58f1d..f864b58c44 100644 --- a/crypto/msgp_gen_test.go +++ b/crypto/msgp_gen_test.go @@ -1214,6 +1214,66 @@ func BenchmarkUnmarshalSeed(b *testing.B) { } } +func TestMarshalUnmarshalSha512Digest(t *testing.T) { + partitiontest.PartitionTest(t) + v := Sha512Digest{} + bts := v.MarshalMsg(nil) + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func TestRandomizedEncodingSha512Digest(t *testing.T) { + protocol.RunEncodingTest(t, &Sha512Digest{}) +} + +func BenchmarkMarshalMsgSha512Digest(b *testing.B) { + v := Sha512Digest{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgSha512Digest(b *testing.B) { + v := Sha512Digest{} + bts := make([]byte, 0, v.Msgsize()) + bts = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalSha512Digest(b *testing.B) { + v := Sha512Digest{} + bts := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + func TestMarshalUnmarshalSignature(t *testing.T) { partitiontest.PartitionTest(t) v := Signature{} diff --git a/crypto/stateproof/structs.go b/crypto/stateproof/structs.go index ecb8782da8..418e2bbed3 100644 --- a/crypto/stateproof/structs.go +++ b/crypto/stateproof/structs.go @@ -83,7 +83,7 @@ type StateProof struct { } // SigPartProofMaxSize is the maximum valid size of SigProofs and PartProofs elements of the Stateproof struct in bytes. -// It is equal to merklearray.ProofMaxSizeByElements(config.StateProofTopVoters/2) +// It is equal to merklearray.ProofMaxSizeByElements(bounds.StateProofTopVoters/2) // See merklearray.Proof comment for explanation on the bound calculation const SigPartProofMaxSize = 35353 diff --git a/crypto/stateproof/verifier.go b/crypto/stateproof/verifier.go index 1afbf0ab45..823f2a0c72 100644 --- a/crypto/stateproof/verifier.go +++ b/crypto/stateproof/verifier.go @@ -22,6 +22,7 @@ import ( "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/crypto/merklearray" + "github.com/algorand/go-algorand/data/basics" ) // Errors for the StateProof verifier @@ -65,7 +66,7 @@ func MkVerifierWithLnProvenWeight(partcom crypto.GenericDigest, lnProvenWt uint6 // Verify checks if s is a valid state proof for the data on a round. // it uses the trusted data from the Verifier struct -func (v *Verifier) Verify(round uint64, data MessageHash, s *StateProof) error { +func (v *Verifier) Verify(round basics.Round, data MessageHash, s *StateProof) error { if err := verifyStateProofTreesDepth(s); err != nil { return err } @@ -96,7 +97,7 @@ func (v *Verifier) Verify(round uint64, data MessageHash, s *StateProof) error { // verify that the msg and the signature is valid under the given participant's Pk err = r.Part.PK.VerifyBytes( - round, + uint64(round), data[:], &r.SigSlot.Sig, ) diff --git a/crypto/stateproof/weights_test.go b/crypto/stateproof/weights_test.go index 19a93f3622..667257a259 100644 --- a/crypto/stateproof/weights_test.go +++ b/crypto/stateproof/weights_test.go @@ -21,7 +21,7 @@ import ( "math" "testing" - "github.com/algorand/go-algorand/config" + "github.com/algorand/go-algorand/config/bounds" "github.com/algorand/go-algorand/crypto/merklearray" "github.com/algorand/go-algorand/test/partitiontest" "github.com/stretchr/testify/require" @@ -192,7 +192,7 @@ func TestSigPartProofMaxSize(t *testing.T) { // Ensures that the SigPartProofMaxSize constant used for maxtotalbytes for StateProof.(Sig|Part)Proof(s) is // correct. It should be logically bound by the maximum number of StateProofTopVoters. It is scaled by 1/2 // see merkelarray.Proof comment for explanation of the size calculation. - require.Equal(t, SigPartProofMaxSize, merklearray.ProofMaxSizeByElements(config.StateProofTopVoters/2)) + require.Equal(t, SigPartProofMaxSize, merklearray.ProofMaxSizeByElements(bounds.StateProofTopVoters/2)) } func BenchmarkVerifyWeights(b *testing.B) { diff --git a/crypto/statetrie/nibbles/nibbles.go b/crypto/statetrie/nibbles/nibbles.go index ade9117674..30518148b3 100644 --- a/crypto/statetrie/nibbles/nibbles.go +++ b/crypto/statetrie/nibbles/nibbles.go @@ -83,10 +83,7 @@ func ShiftLeft(nyb1 Nibbles, numNibbles int) Nibbles { // SharedPrefix returns a slice from nyb1 that contains the shared prefix // between nyb1 and nyb2 func SharedPrefix(nyb1 Nibbles, nyb2 Nibbles) Nibbles { - minLength := len(nyb1) - if len(nyb2) < minLength { - minLength = len(nyb2) - } + minLength := min(len(nyb2), len(nyb1)) for i := 0; i < minLength; i++ { if nyb1[i] != nyb2[i] { return nyb1[:i] diff --git a/daemon/algod/api/Makefile b/daemon/algod/api/Makefile index 5dd3055944..ce7ec7b8f4 100644 --- a/daemon/algod/api/Makefile +++ b/daemon/algod/api/Makefile @@ -1,48 +1,41 @@ -GOPATH := $(shell go env GOPATH) -GOPATH1 := $(firstword $(subst :, ,$(GOPATH))) - # Allow overriding swagger-converter API, e.g. for use with local container SWAGGER_CONVERTER_API ?= https://converter.swagger.io -# `make all` or just `make` should be appropriate for dev work -all: server/v2/generated/model/types.go server/v2/generated/nonparticipating/public/routes.go server/v2/generated/nonparticipating/private/routes.go server/v2/generated/participating/public/routes.go server/v2/generated/participating/private/routes.go server/v2/generated/data/routes.go server/v2/generated/experimental/routes.go - -# `make generate` should be able to replace old `generate.sh` script and be appropriate for build system use -generate: oapi-codegen all - -server/v2/generated/nonparticipating/public/routes.go: algod.oas3.yml - $(GOPATH1)/bin/oapi-codegen -config ./server/v2/generated/nonparticipating/public/public_routes.yml algod.oas3.yml - -server/v2/generated/nonparticipating/private/routes.go: algod.oas3.yml - $(GOPATH1)/bin/oapi-codegen -config ./server/v2/generated/nonparticipating/private/private_routes.yml algod.oas3.yml - -server/v2/generated/participating/public/routes.go: algod.oas3.yml - $(GOPATH1)/bin/oapi-codegen -config ./server/v2/generated/participating/public/public_routes.yml algod.oas3.yml - -server/v2/generated/participating/private/routes.go: algod.oas3.yml - $(GOPATH1)/bin/oapi-codegen -config ./server/v2/generated/participating/private/private_routes.yml algod.oas3.yml - - -server/v2/generated/data/routes.go: algod.oas3.yml - $(GOPATH1)/bin/oapi-codegen -config ./server/v2/generated/data/data_routes.yml algod.oas3.yml - -server/v2/generated/experimental/routes.go: algod.oas3.yml - $(GOPATH1)/bin/oapi-codegen -config ./server/v2/generated/experimental/experimental_routes.yml algod.oas3.yml - -server/v2/generated/model/types.go: algod.oas3.yml - $(GOPATH1)/bin/oapi-codegen -config ./server/v2/generated/model/model_types.yml algod.oas3.yml - -algod.oas3.yml: algod.oas2.json - jq < algod.oas2.json > /dev/null # fail with a nice explantion if json is malformed - ! grep '"type": "number"' $< # Don't use the number type. Use integer (and format uint64 usually) - curl -s -X POST "$(SWAGGER_CONVERTER_API)/api/convert" -H "accept: application/json" -H "Content-Type: application/json" -d @./algod.oas2.json -o .3tmp.json - python3 jsoncanon.py < .3tmp.json > algod.oas3.yml +GEN := \ + server/v2/generated/nonparticipating/public/routes.go \ + server/v2/generated/nonparticipating/private/routes.go \ + server/v2/generated/participating/public/routes.go \ + server/v2/generated/participating/private/routes.go \ + server/v2/generated/data/routes.go \ + server/v2/generated/experimental/routes.go \ + server/v2/generated/model/types.go + +all: $(GEN) + +# `make generate` exists because the old Makefile was written in a way +# that presumed oapi-codegen was already installed. CI used `make +# generate` to install it first. Now `go run` makes everything work +# no matter what. We could move to `go tool` later, if we want the +# versioning to be controlled in go.mod instead of here. +generate: all + +OAPI := go run github.com/oapi-codegen/oapi-codegen/v2/cmd/oapi-codegen@v2.4.1 + +# Pattern rule for all routes.go generation +%/routes.go: %/routes.yml algod.oas3.yml + $(OAPI) -config $^ + +# Only one types.go, but it's still more succinct to use a pattern +%/types.go: %/types.yml algod.oas3.yml + $(OAPI) -config $^ + +# We use Makefile as a dependency here so everything gets rebuilt when you mess with how things are built. +algod.oas3.yml: algod.oas2.json Makefile + jq < $< > /dev/null # fail with a nice explantion if json is malformed + ! grep '"type": "number"' $< # Don't use the number type. Use integer (and format uint64 usually) + curl -s -X POST "$(SWAGGER_CONVERTER_API)/api/convert" -H "accept: application/json" -H "Content-Type: application/json" -d @./$< -o .3tmp.json + python3 jsoncanon.py < .3tmp.json > $@ rm -f .3tmp.json -oapi-codegen: .PHONY - ../../../scripts/buildtools/install_buildtools.sh -o github.com/algorand/oapi-codegen -c github.com/algorand/oapi-codegen/cmd/oapi-codegen - clean: - rm -rf server/v2/generated/model/types.go server/v2/generated/nonparticipating/public/routes.go server/v2/generated/nonparticipating/private/routes.go server/v2/generated/participating/public/routes.go server/v2/generated/participating/private/routes.go server/v2/generated/data/routes.go algod.oas3.yml - -.PHONY: + rm -rf $(GEN) algod.oas3.yml diff --git a/daemon/algod/api/README.md b/daemon/algod/api/README.md index f8b9cc48c4..3c82566d7f 100644 --- a/daemon/algod/api/README.md +++ b/daemon/algod/api/README.md @@ -43,9 +43,11 @@ containing per-round ledger differences that get compacted when actually written ## What codegen tool is used? -We found that [oapi-codegen](https://github.com/deepmap/oapi-codegen) produced the cleanest code, and had an easy to work with codebase. There is an algorand fork of this project which contains a couple modifications that were needed to properly support our needs. - -Specifically, `uint64` types aren't strictly supported by OpenAPI. So we added a type-mapping feature to oapi-codegen. +We found that [oapi-codegen](https://github.com/deepmap/oapi-codegen) +produced the cleanest code, and had an easy to work with codebase. We +initially forked it in `algorand/oapi-codegen` but found that features +we added are now available in the upstream repo, so have migrated +back. ## Why do we have algod.oas2.json and algod.oas3.yml? diff --git a/daemon/algod/api/algod.oas2.json b/daemon/algod/api/algod.oas2.json index 3884ef64f2..813fcd0063 100644 --- a/daemon/algod/api/algod.oas2.json +++ b/daemon/algod/api/algod.oas2.json @@ -1,14 +1,7 @@ { - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "schemes": [ - "http", - "https" - ], + "consumes": ["application/json"], + "produces": ["application/json"], + "schemes": ["http", "https"], "swagger": "2.0", "info": { "description": "API endpoint for algod operations.", @@ -25,16 +18,9 @@ "paths": { "/health": { "get": { - "tags": [ - "public", - "common" - ], - "produces": [ - "application/json" - ], - "schemes": [ - "http" - ], + "tags": ["public", "common"], + "produces": ["application/json"], + "schemes": ["http"], "summary": "Returns OK if healthy.", "operationId": "HealthCheck", "responses": { @@ -49,16 +35,9 @@ }, "/ready": { "get": { - "tags": [ - "public", - "common" - ], - "produces": [ - "application/json" - ], - "scheme": [ - "http" - ], + "tags": ["public", "common"], + "produces": ["application/json"], + "scheme": ["http"], "summary": "Returns OK if healthy and fully caught up.", "operationId": "GetReady", "responses": { @@ -79,16 +58,9 @@ }, "/metrics": { "get": { - "tags": [ - "public", - "common" - ], - "produces": [ - "text/plain" - ], - "schemes": [ - "http" - ], + "tags": ["public", "common"], + "produces": ["text/plain"], + "schemes": ["http"], "summary": "Return metrics about algod functioning.", "operationId": "Metrics", "responses": { @@ -104,16 +76,9 @@ "/genesis": { "get": { "description": "Returns the entire genesis file in json.", - "tags": [ - "public", - "common" - ], - "produces": [ - "application/json" - ], - "schemes": [ - "http" - ], + "tags": ["public", "common"], + "produces": ["application/json"], + "schemes": ["http"], "summary": "Gets the genesis information.", "operationId": "GetGenesis", "responses": { @@ -132,16 +97,9 @@ "/swagger.json": { "get": { "description": "Returns the entire swagger spec in json.", - "tags": [ - "public", - "common" - ], - "produces": [ - "application/json" - ], - "schemes": [ - "http" - ], + "tags": ["public", "common"], + "produces": ["application/json"], + "schemes": ["http"], "summary": "Gets the current swagger spec.", "operationId": "SwaggerJSON", "responses": { @@ -160,16 +118,9 @@ "/versions": { "get": { "description": "Retrieves the supported API versions, binary build versions, and genesis information.", - "tags": [ - "public", - "common" - ], - "produces": [ - "application/json" - ], - "schemes": [ - "http" - ], + "tags": ["public", "common"], + "produces": ["application/json"], + "schemes": ["http"], "operationId": "GetVersion", "responses": { "200": { @@ -181,15 +132,9 @@ "/debug/settings/pprof": { "get": { "description": "Retrieves the current settings for blocking and mutex profiles", - "tags": [ - "private" - ], - "produces": [ - "application/json" - ], - "schemes": [ - "http" - ], + "tags": ["private"], + "produces": ["application/json"], + "schemes": ["http"], "operationId": "GetDebugSettingsProf", "responses": { "200": { @@ -199,15 +144,9 @@ }, "put": { "description": "Enables blocking and mutex profiles, and returns the old settings", - "tags": [ - "private" - ], - "produces": [ - "application/json" - ], - "schemes": [ - "http" - ], + "tags": ["private"], + "produces": ["application/json"], + "schemes": ["http"], "operationId": "PutDebugSettingsProf", "responses": { "200": { @@ -219,15 +158,9 @@ "/debug/settings/config": { "get": { "description": "Returns the merged (defaults + overrides) config file in json.", - "tags": [ - "private" - ], - "produces": [ - "application/json" - ], - "schemes": [ - "http" - ], + "tags": ["private"], + "produces": ["application/json"], + "schemes": ["http"], "summary": "Gets the merged config file.", "operationId": "GetConfig", "responses": { @@ -246,27 +179,14 @@ "/v2/accounts/{address}": { "get": { "description": "Given a specific account public key, this call returns the account's status, balance and spendable amounts", - "tags": [ - "public", - "nonparticipating" - ], - "produces": [ - "application/json", - "application/msgpack" - ], - "schemes": [ - "http" - ], + "tags": ["public", "nonparticipating"], + "produces": ["application/json", "application/msgpack"], + "schemes": ["http"], "summary": "Get account information.", "operationId": "AccountInformation", "parameters": [ { - "pattern": "[A-Z0-9]{58}", - "type": "string", - "description": "An account public key", - "name": "address", - "in": "path", - "required": true + "$ref": "#/parameters/address" }, { "name": "exclude", @@ -274,10 +194,7 @@ "in": "query", "required": false, "type": "string", - "enum": [ - "all", - "none" - ] + "enum": ["all", "none"] }, { "$ref": "#/parameters/format" @@ -309,56 +226,22 @@ "description": "Unknown Error" } } - }, - "parameters": [ - { - "type": "string", - "name": "address", - "in": "path", - "required": true - }, - { - "enum": [ - "json", - "msgpack" - ], - "type": "string", - "name": "format", - "in": "query" - } - ] + } }, "/v2/accounts/{address}/assets/{asset-id}": { "get": { "description": "Given a specific account public key and asset ID, this call returns the account's asset holding and asset parameters (if either exist). Asset parameters will only be returned if the provided address is the asset's creator.", - "tags": [ - "public", - "nonparticipating" - ], - "produces": [ - "application/json", - "application/msgpack" - ], - "schemes": [ - "http" - ], + "tags": ["public", "nonparticipating"], + "produces": ["application/json", "application/msgpack"], + "schemes": ["http"], "summary": "Get account information about a given asset.", "operationId": "AccountAssetInformation", "parameters": [ { - "pattern": "[A-Z0-9]{58}", - "type": "string", - "description": "An account public key", - "name": "address", - "in": "path", - "required": true + "$ref": "#/parameters/address" }, { - "type": "integer", - "description": "An asset identifier", - "name": "asset-id", - "in": "path", - "required": true + "$ref": "#/parameters/asset-id" }, { "$ref": "#/parameters/format" @@ -390,48 +273,19 @@ "description": "Unknown Error" } } - }, - "parameters": [ - { - "type": "string", - "name": "address", - "in": "path", - "required": true - }, - { - "enum": [ - "json", - "msgpack" - ], - "type": "string", - "name": "format", - "in": "query" - } - ] + } }, "/v2/accounts/{address}/assets": { "get": { "description": "Lookup an account's asset holdings.", - "tags": [ - "public", - "experimental" - ], - "produces": [ - "application/json" - ], - "schemes": [ - "http" - ], + "tags": ["public", "experimental"], + "produces": ["application/json"], + "schemes": ["http"], "summary": "Get a list of assets held by an account, inclusive of asset params.", "operationId": "AccountAssetsInformation", "parameters": [ { - "pattern": "[A-Z0-9]{58}", - "type": "string", - "description": "An account public key", - "name": "address", - "in": "path", - "required": true + "$ref": "#/parameters/address" }, { "$ref": "#/parameters/limit" @@ -466,47 +320,22 @@ "description": "Unknown Error" } } - }, - "parameters": [ - { - "type": "string", - "name": "address", - "in": "path", - "required": true - } - ] + } }, "/v2/accounts/{address}/applications/{application-id}": { "get": { "description": "Given a specific account public key and application ID, this call returns the account's application local state and global state (AppLocalState and AppParams, if either exists). Global state will only be returned if the provided address is the application's creator.", - "tags": [ - "public", - "nonparticipating" - ], - "produces": [ - "application/json", - "application/msgpack" - ], - "schemes": [ - "http" - ], + "tags": ["public", "nonparticipating"], + "produces": ["application/json", "application/msgpack"], + "schemes": ["http"], "summary": "Get account information about a given app.", "operationId": "AccountApplicationInformation", "parameters": [ { - "pattern": "[A-Z0-9]{58}", - "type": "string", - "description": "An account public key", - "name": "address", - "in": "path", - "required": true + "$ref": "#/parameters/address" }, { - "type": "integer", - "description": "An application identifier", - "name": "application-id", - "in": "path", - "required": true + "$ref": "#/parameters/application-id" }, { "$ref": "#/parameters/format" @@ -538,49 +367,19 @@ "description": "Unknown Error" } } - }, - "parameters": [ - { - "type": "string", - "name": "address", - "in": "path", - "required": true - }, - { - "enum": [ - "json", - "msgpack" - ], - "type": "string", - "name": "format", - "in": "query" - } - ] + } }, "/v2/accounts/{address}/transactions/pending": { "get": { "description": "Get the list of pending transactions by address, sorted by priority, in decreasing order, truncated at the end at MAX. If MAX = 0, returns all pending transactions.\n", - "tags": [ - "public", - "participating" - ], - "produces": [ - "application/json", - "application/msgpack" - ], - "schemes": [ - "http" - ], + "tags": ["public", "participating"], + "produces": ["application/json", "application/msgpack"], + "schemes": ["http"], "summary": "Get a list of unconfirmed transactions currently in the transaction pool by address.", "operationId": "GetPendingTransactionsByAddress", "parameters": [ { - "pattern": "[A-Z0-9]{58}", - "type": "string", - "description": "An account public key", - "name": "address", - "in": "path", - "required": true + "$ref": "#/parameters/address" }, { "$ref": "#/parameters/max" @@ -621,39 +420,18 @@ "description": "Unknown Error" } } - }, - "parameters": [ - { - "type": "string", - "name": "address", - "in": "path", - "required": true - } - ] + } }, "/v2/blocks/{round}": { "get": { - "tags": [ - "public", - "nonparticipating" - ], - "produces": [ - "application/json", - "application/msgpack" - ], - "schemes": [ - "http" - ], + "tags": ["public", "nonparticipating"], + "produces": ["application/json", "application/msgpack"], + "schemes": ["http"], "summary": "Get the block for the given round.", "operationId": "GetBlock", "parameters": [ { - "minimum": 0, - "type": "integer", - "description": "The round from which to fetch block information.", - "name": "round", - "in": "path", - "required": true + "$ref": "#/parameters/round" }, { "type": "boolean", @@ -697,47 +475,18 @@ "description": "Unknown Error" } } - }, - "parameters": [ - { - "type": "string", - "name": "round", - "in": "path", - "required": true - }, - { - "enum": [ - "json", - "msgpack" - ], - "type": "string", - "name": "format", - "in": "query" - } - ] + } }, "/v2/blocks/{round}/txids": { "get": { - "tags": [ - "public", - "nonparticipating" - ], - "produces": [ - "application/json" - ], - "schemes": [ - "http" - ], + "tags": ["public", "nonparticipating"], + "produces": ["application/json"], + "schemes": ["http"], "summary": "Get the top level transaction IDs for the block on the given round.", "operationId": "GetBlockTxids", "parameters": [ { - "minimum": 0, - "type": "integer", - "description": "The round from which to fetch block transaction IDs.", - "name": "round", - "in": "path", - "required": true + "$ref": "#/parameters/round" } ], "responses": { @@ -776,26 +525,14 @@ }, "/v2/blocks/{round}/hash": { "get": { - "tags": [ - "public", - "nonparticipating" - ], - "produces": [ - "application/json" - ], - "schemes": [ - "http" - ], + "tags": ["public", "nonparticipating"], + "produces": ["application/json"], + "schemes": ["http"], "summary": "Get the block hash for the block on the given round.", "operationId": "GetBlockHash", "parameters": [ { - "minimum": 0, - "type": "integer", - "description": "The round from which to fetch block hash information.", - "name": "round", - "in": "path", - "required": true + "$ref": "#/parameters/round" } ], "responses": { @@ -834,25 +571,14 @@ }, "/v2/blocks/{round}/transactions/{txid}/proof": { "get": { - "tags": [ - "public", - "nonparticipating" - ], - "produces": [ - "application/json" - ], - "schemes": [ - "http" - ], + "tags": ["public", "nonparticipating"], + "produces": ["application/json"], + "schemes": ["http"], "summary": "Get a proof for a transaction in a block.", "operationId": "GetTransactionProof", "parameters": [ { - "type": "integer", - "description": "The round in which the transaction appears.", - "name": "round", - "in": "path", - "required": true + "$ref": "#/parameters/round" }, { "type": "string", @@ -864,10 +590,7 @@ }, { "type": "string", - "enum": [ - "sha512_256", - "sha256" - ], + "enum": ["sha512_256", "sha256"], "description": "The type of hash function used to create the proof, must be one of: \n* sha512_256 \n* sha256", "name": "hashtype", "in": "query", @@ -909,45 +632,19 @@ "description": "Unknown error" } } - }, - "parameters": [ - { - "type": "integer", - "name": "round", - "in": "path", - "required": true - }, - { - "type": "string", - "name": "txid", - "in": "path", - "required": true - } - ] + } }, "/v2/blocks/{round}/logs": { "get": { - "tags": [ - "public", - "nonparticipating" - ], + "tags": ["public", "nonparticipating"], "description": "Get all of the logs from outer and inner app calls in the given round", - "produces": [ - "application/json" - ], - "schemes": [ - "http" - ], + "produces": ["application/json"], + "schemes": ["http"], "summary": "Get all of the logs from outer and inner app calls in the given round", "operationId": "GetBlockLogs", "parameters": [ { - "minimum": 0, - "type": "integer", - "description": "The round from which to fetch block log information.", - "name": "round", - "in": "path", - "required": true + "$ref": "#/parameters/round" } ], "responses": { @@ -983,16 +680,9 @@ }, "/v2/ledger/supply": { "get": { - "tags": [ - "public", - "nonparticipating" - ], - "produces": [ - "application/json" - ], - "schemes": [ - "http" - ], + "tags": ["public", "nonparticipating"], + "produces": ["application/json"], + "schemes": ["http"], "summary": "Get the current supply reported by the ledger.", "operationId": "GetSupply", "responses": { @@ -1013,17 +703,10 @@ }, "/v2/participation": { "get": { - "tags": [ - "private", - "participating" - ], + "tags": ["private", "participating"], "description": "Return a list of participation keys", - "produces": [ - "application/json" - ], - "schemes": [ - "http" - ], + "produces": ["application/json"], + "schemes": ["http"], "summary": "Return a list of participation keys", "operationId": "GetParticipationKeys", "responses": { @@ -1061,19 +744,10 @@ } }, "post": { - "tags": [ - "private", - "participating" - ], - "consumes": [ - "application/msgpack" - ], - "produces": [ - "application/json" - ], - "schemes": [ - "http" - ], + "tags": ["private", "participating"], + "consumes": ["application/msgpack"], + "produces": ["application/json"], + "schemes": ["http"], "summary": "Add a participation key to the node", "operationId": "AddParticipationKey", "parameters": [ @@ -1130,34 +804,26 @@ }, "/v2/participation/generate/{address}": { "post": { - "tags": [ - "private", - "participating" - ], - "produces": [ - "application/json" - ], - "schemes": [ - "http" - ], + "tags": ["private", "participating"], + "produces": ["application/json"], + "schemes": ["http"], "summary": "Generate and install participation keys to the node.", "operationId": "GenerateParticipationKeys", "parameters": [ { - "type": "string", - "description": "An account public key", - "name": "address", - "in": "path", - "required": true + "$ref": "#/parameters/address" }, { "type": "integer", + "format": "uint64", "description": "Key dilution for two-level participation keys (defaults to sqrt of validity window).", "name": "dilution", "in": "query" }, { "type": "integer", + "format": "uint64", + "x-go-type": "basics.Round", "description": "First round for participation key.", "name": "first", "in": "query", @@ -1165,6 +831,8 @@ }, { "type": "integer", + "format": "uint64", + "x-go-type": "basics.Round", "description": "Last round for participation key.", "name": "last", "in": "query", @@ -1210,17 +878,10 @@ }, "/v2/participation/{participation-id}": { "delete": { - "tags": [ - "private", - "participating" - ], + "tags": ["private", "participating"], "description": "Delete a given participation key by ID", - "produces": [ - "application/json" - ], - "schemes": [ - "http" - ], + "produces": ["application/json"], + "schemes": ["http"], "summary": "Delete a given participation key by ID", "operationId": "DeleteParticipationKeyByID", "responses": { @@ -1257,17 +918,10 @@ } }, "get": { - "tags": [ - "private", - "participating" - ], + "tags": ["private", "participating"], "description": "Given a participation ID, return information about that participation key", - "produces": [ - "application/json" - ], - "schemes": [ - "http" - ], + "produces": ["application/json"], + "schemes": ["http"], "summary": "Get participation key info given a participation ID", "operationId": "GetParticipationKeyByID", "responses": { @@ -1305,17 +959,10 @@ } }, "post": { - "tags": [ - "private", - "participating" - ], + "tags": ["private", "participating"], "description": "Given a participation ID, append state proof keys to a particular set of participation keys", - "consumes": [ - "application/msgpack" - ], - "produces": [ - "application/json" - ], + "consumes": ["application/msgpack"], + "produces": ["application/json"], "parameters": [ { "description": "The state proof keys to add to an existing participation ID", @@ -1328,9 +975,7 @@ } } ], - "schemes": [ - "http" - ], + "schemes": ["http"], "summary": "Append state proof keys to a participation key", "operationId": "AppendKeys", "responses": { @@ -1379,10 +1024,7 @@ "/v2/shutdown": { "post": { "description": "Special management endpoint to shutdown the node. Optionally provide a timeout parameter to indicate that the node should begin shutting down after a number of seconds.", - "tags": [ - "private", - "nonparticipating" - ], + "tags": ["private", "nonparticipating"], "operationId": "ShutdownNode", "parameters": [ { @@ -1403,16 +1045,9 @@ }, "/v2/status": { "get": { - "tags": [ - "public", - "nonparticipating" - ], - "produces": [ - "application/json" - ], - "schemes": [ - "http" - ], + "tags": ["public", "nonparticipating"], + "produces": ["application/json"], + "schemes": ["http"], "summary": "Gets the current node status.", "operationId": "GetStatus", "responses": { @@ -1439,27 +1074,15 @@ }, "/v2/status/wait-for-block-after/{round}": { "get": { - "tags": [ - "public", - "nonparticipating" - ], + "tags": ["public", "nonparticipating"], "description": "Waits for a block to appear after round {round} and returns the node's status at the time. There is a 1 minute timeout, when reached the current status is returned regardless of whether or not it is the round after the given round.", - "produces": [ - "application/json" - ], - "schemes": [ - "http" - ], + "produces": ["application/json"], + "schemes": ["http"], "summary": "Gets the node status after waiting for a round after the given round.", "operationId": "WaitForBlock", "parameters": [ { - "minimum": 0, - "type": "integer", - "description": "The round to wait until returning status", - "name": "round", - "in": "path", - "required": true + "$ref": "#/parameters/round" } ], "responses": { @@ -1495,31 +1118,14 @@ "description": "Unknown Error" } } - }, - "parameters": [ - { - "type": "string", - "name": "round", - "in": "path", - "required": true - } - ] + } }, "/v2/transactions": { "post": { - "tags": [ - "public", - "participating" - ], - "consumes": [ - "application/x-binary" - ], - "produces": [ - "application/json" - ], - "schemes": [ - "http" - ], + "tags": ["public", "participating"], + "consumes": ["application/x-binary"], + "produces": ["application/json"], + "schemes": ["http"], "summary": "Broadcasts a raw transaction or transaction group to the network.", "operationId": "RawTransaction", "parameters": [ @@ -1570,16 +1176,9 @@ }, "/v2/transactions/async": { "post": { - "tags": [ - "public", - "experimental" - ], - "consumes": [ - "application/x-binary" - ], - "schemes": [ - "http" - ], + "tags": ["public", "experimental"], + "consumes": ["application/x-binary"], + "schemes": ["http"], "summary": "Fast track for broadcasting a raw transaction or transaction group to the network through the tx handler without performing most of the checks and reporting detailed errors. Should be only used for development and performance testing.", "operationId": "RawTransactionAsync", "parameters": [ @@ -1633,21 +1232,10 @@ }, "/v2/transactions/simulate": { "post": { - "tags": [ - "public", - "nonparticipating" - ], - "consumes": [ - "application/json", - "application/msgpack" - ], - "produces": [ - "application/json", - "application/msgpack" - ], - "schemes": [ - "http" - ], + "tags": ["public", "nonparticipating"], + "consumes": ["application/json", "application/msgpack"], + "produces": ["application/json", "application/msgpack"], + "schemes": ["http"], "summary": "Simulates a raw transaction or transaction group as it would be evaluated on the network. The simulation will use blockchain state from the latest committed round.", "operationId": "SimulateTransaction", "parameters": [ @@ -1700,16 +1288,9 @@ }, "/v2/transactions/params": { "get": { - "tags": [ - "public", - "nonparticipating" - ], - "produces": [ - "application/json" - ], - "schemes": [ - "http" - ], + "tags": ["public", "nonparticipating"], + "produces": ["application/json"], + "schemes": ["http"], "summary": "Get parameters for constructing a new transaction", "operationId": "TransactionParams", "responses": { @@ -1743,17 +1324,9 @@ "/v2/transactions/pending": { "get": { "description": "Get the list of pending transactions, sorted by priority, in decreasing order, truncated at the end at MAX. If MAX = 0, returns all pending transactions.\n", - "tags": [ - "public", - "participating" - ], - "produces": [ - "application/json", - "application/msgpack" - ], - "schemes": [ - "http" - ], + "tags": ["public", "participating"], + "produces": ["application/json", "application/msgpack"], + "schemes": ["http"], "summary": "Get a list of unconfirmed transactions currently in the transaction pool.", "operationId": "GetPendingTransactions", "parameters": [ @@ -1795,17 +1368,9 @@ "/v2/transactions/pending/{txid}": { "get": { "description": "Given a transaction ID of a recently submitted transaction, it returns information about it. There are several cases when this might succeed:\n- transaction committed (committed round \u003e 0)\n- transaction still in the pool (committed round = 0, pool error = \"\")\n- transaction removed from pool due to error (committed round = 0, pool error != \"\")\nOr the transaction may have happened sufficiently long ago that the node no longer remembers it, and this will return an error.\n", - "tags": [ - "public", - "participating" - ], - "produces": [ - "application/json", - "application/msgpack" - ], - "schemes": [ - "http" - ], + "tags": ["public", "participating"], + "produces": ["application/json", "application/msgpack"], + "schemes": ["http"], "summary": "Get a specific pending transaction.", "operationId": "PendingTransactionInformation", "parameters": [ @@ -1850,40 +1415,19 @@ "description": "Unknown Error" } } - }, - "parameters": [ - { - "type": "string", - "name": "txid", - "in": "path", - "required": true - } - ] + } }, "/v2/deltas/{round}": { "get": { "description": "Get ledger deltas for a round.", - "tags": [ - "public", - "nonparticipating" - ], - "produces": [ - "application/json", - "application/msgpack" - ], - "schemes": [ - "http" - ], + "tags": ["public", "nonparticipating"], + "produces": ["application/json", "application/msgpack"], + "schemes": ["http"], "summary": "Get a LedgerStateDelta object for a given round", "operationId": "GetLedgerStateDelta", "parameters": [ { - "type": "integer", - "description": "The round for which the deltas are desired.", - "name": "round", - "in": "path", - "required": true, - "minimum": 0 + "$ref": "#/parameters/round" }, { "$ref": "#/parameters/format" @@ -1932,27 +1476,14 @@ "/v2/deltas/{round}/txn/group": { "get": { "description": "Get ledger deltas for transaction groups in a given round.", - "tags": [ - "public", - "nonparticipating" - ], - "produces": [ - "application/json", - "application/msgpack" - ], - "schemes": [ - "http" - ], + "tags": ["public", "nonparticipating"], + "produces": ["application/json", "application/msgpack"], + "schemes": ["http"], "summary": "Get LedgerStateDelta objects for all transaction groups in a given round", "operationId": "GetTransactionGroupLedgerStateDeltasForRound", "parameters": [ { - "type": "integer", - "description": "The round for which the deltas are desired.", - "name": "round", - "in": "path", - "required": true, - "minimum": 0 + "$ref": "#/parameters/round" }, { "$ref": "#/parameters/format" @@ -2001,17 +1532,9 @@ "/v2/deltas/txn/group/{id}": { "get": { "description": "Get a ledger delta for a given transaction group.", - "tags": [ - "public", - "nonparticipating" - ], - "produces": [ - "application/json", - "application/msgpack" - ], - "schemes": [ - "http" - ], + "tags": ["public", "nonparticipating"], + "produces": ["application/json", "application/msgpack"], + "schemes": ["http"], "summary": "Get a LedgerStateDelta object for a given transaction group", "operationId": "GetLedgerStateDeltaForTransactionGroup", "parameters": [ @@ -2069,26 +1592,14 @@ }, "/v2/stateproofs/{round}": { "get": { - "tags": [ - "public", - "nonparticipating" - ], - "produces": [ - "application/json" - ], - "schemes": [ - "http" - ], + "tags": ["public", "nonparticipating"], + "produces": ["application/json"], + "schemes": ["http"], "summary": "Get a state proof that covers a given round", "operationId": "GetStateProof", "parameters": [ { - "type": "integer", - "description": "The round for which a state proof is desired.", - "name": "round", - "in": "path", - "required": true, - "minimum": 0 + "$ref": "#/parameters/round" } ], "responses": { @@ -2129,38 +1640,18 @@ "description": "Unknown Error" } } - }, - "parameters": [ - { - "type": "integer", - "name": "round", - "in": "path", - "required": true - } - ] + } }, "/v2/blocks/{round}/lightheader/proof": { "get": { - "tags": [ - "public", - "nonparticipating" - ], - "produces": [ - "application/json" - ], - "schemes": [ - "http" - ], + "tags": ["public", "nonparticipating"], + "produces": ["application/json"], + "schemes": ["http"], "summary": "Gets a proof for a given light block header inside a state proof commitment", "operationId": "GetLightBlockHeaderProof", "parameters": [ { - "type": "integer", - "description": "The round to which the light block header belongs.", - "name": "round", - "in": "path", - "required": true, - "minimum": 0 + "$ref": "#/parameters/round" } ], "responses": { @@ -2201,38 +1692,19 @@ "description": "Unknown Error" } } - }, - "parameters": [ - { - "type": "integer", - "name": "round", - "in": "path", - "required": true - } - ] + } }, "/v2/applications/{application-id}": { "get": { "description": "Given a application ID, it returns application information including creator, approval and clear programs, global and local schemas, and global state.", - "tags": [ - "public", - "nonparticipating" - ], - "produces": [ - "application/json" - ], - "schemes": [ - "http" - ], + "tags": ["public", "nonparticipating"], + "produces": ["application/json"], + "schemes": ["http"], "summary": "Get application information.", "operationId": "GetApplicationByID", "parameters": [ { - "type": "integer", - "description": "An application identifier", - "name": "application-id", - "in": "path", - "required": true + "$ref": "#/parameters/application-id" } ], "responses": { @@ -2268,41 +1740,23 @@ "description": "Unknown Error" } } - }, - "parameters": [ - { - "type": "integer", - "name": "application-id", - "in": "path", - "required": true - } - ] + } }, "/v2/applications/{application-id}/boxes": { "get": { "description": "Given an application ID, return all Box names. No particular ordering is guaranteed. Request fails when client or server-side configured limits prevent returning all Box names.", - "tags": [ - "public", - "nonparticipating" - ], - "produces": [ - "application/json" - ], - "schemes": [ - "http" - ], + "tags": ["public", "nonparticipating"], + "produces": ["application/json"], + "schemes": ["http"], "summary": "Get all box names for a given application.", "operationId": "GetApplicationBoxes", "parameters": [ { - "type": "integer", - "description": "An application identifier", - "name": "application-id", - "in": "path", - "required": true + "$ref": "#/parameters/application-id" }, { "type": "integer", + "x-go-type": "uint64", "description": "Max number of box names to return. If max is not set, or max == 0, returns all box-names.", "name": "max", "in": "query" @@ -2335,38 +1789,19 @@ "description": "Unknown Error" } } - }, - "parameters": [ - { - "type": "integer", - "name": "application-id", - "in": "path", - "required": true - } - ] + } }, "/v2/applications/{application-id}/box": { "get": { "description": "Given an application ID and box name, it returns the round, box name, and value (each base64 encoded). Box names must be in the goal app call arg encoding form 'encoding:value'. For ints, use the form 'int:1234'. For raw bytes, use the form 'b64:A=='. For printable strings, use the form 'str:hello'. For addresses, use the form 'addr:XYZ...'.", - "tags": [ - "public", - "nonparticipating" - ], - "produces": [ - "application/json" - ], - "schemes": [ - "http" - ], + "tags": ["public", "nonparticipating"], + "produces": ["application/json"], + "schemes": ["http"], "summary": "Get box information for a given application.", "operationId": "GetApplicationBoxByName", "parameters": [ { - "type": "integer", - "description": "An application identifier", - "name": "application-id", - "in": "path", - "required": true + "$ref": "#/parameters/application-id" }, { "type": "string", @@ -2409,44 +1844,19 @@ "description": "Unknown Error" } } - }, - "parameters": [ - { - "type": "integer", - "name": "application-id", - "in": "path", - "required": true - }, - { - "type": "string", - "name": "name", - "in": "query", - "required": true - } - ] + } }, "/v2/assets/{asset-id}": { "get": { "description": "Given a asset ID, it returns asset information including creator, name, total supply and special addresses.", - "tags": [ - "public", - "nonparticipating" - ], - "produces": [ - "application/json" - ], - "schemes": [ - "http" - ], + "tags": ["public", "nonparticipating"], + "produces": ["application/json"], + "schemes": ["http"], "summary": "Get asset information.", "operationId": "GetAssetByID", "parameters": [ { - "type": "integer", - "description": "An asset identifier", - "name": "asset-id", - "in": "path", - "required": true + "$ref": "#/parameters/asset-id" } ], "responses": { @@ -2482,26 +1892,13 @@ "description": "Unknown Error" } } - }, - "parameters": [ - { - "type": "integer", - "name": "asset-id", - "in": "path", - "required": true - } - ] + } }, "/v2/ledger/sync": { "delete": { "description": "Unset the ledger sync round.", - "tags": [ - "public", - "data" - ], - "schemes": [ - "http" - ], + "tags": ["public", "data"], + "schemes": ["http"], "summary": "Removes minimum sync round restriction from the ledger.", "operationId": "UnsetSyncRound", "responses": { @@ -2539,13 +1936,8 @@ }, "get": { "description": "Gets the minimum sync round for the ledger.", - "tags": [ - "public", - "data" - ], - "schemes": [ - "http" - ], + "tags": ["public", "data"], + "schemes": ["http"], "summary": "Returns the minimum sync round the ledger is keeping in cache.", "operationId": "GetSyncRound", "responses": { @@ -2585,23 +1977,13 @@ "/v2/ledger/sync/{round}": { "post": { "description": "Sets the minimum sync round on the ledger.", - "tags": [ - "public", - "data" - ], - "schemes": [ - "http" - ], + "tags": ["public", "data"], + "schemes": ["http"], "summary": "Given a round, tells the ledger to keep that round in its cache.", "operationId": "SetSyncRound", "parameters": [ { - "type": "integer", - "description": "The round for which the deltas are desired.", - "name": "round", - "in": "path", - "required": true, - "minimum": 0 + "$ref": "#/parameters/round" } ], "responses": { @@ -2641,19 +2023,10 @@ "/v2/teal/compile": { "post": { "description": "Given TEAL source code in plain text, return base64 encoded program bytes and base32 SHA512_256 hash of program bytes (Address style). This endpoint is only enabled when a node's configuration file sets EnableDeveloperAPI to true.", - "tags": [ - "public", - "nonparticipating" - ], - "consumes": [ - "text/plain" - ], - "produces": [ - "application/json" - ], - "schemes": [ - "http" - ], + "tags": ["public", "nonparticipating"], + "consumes": ["text/plain"], + "produces": ["application/json"], + "schemes": ["http"], "summary": "Compile TEAL source code to binary, produce its hash", "operationId": "TealCompile", "parameters": [ @@ -2709,19 +2082,10 @@ "/v2/teal/disassemble": { "post": { "description": "Given the program bytes, return the TEAL source code in plain text. This endpoint is only enabled when a node's configuration file sets EnableDeveloperAPI to true.", - "tags": [ - "public", - "nonparticipating" - ], - "consumes": [ - "application/x-binary" - ], - "produces": [ - "application/json" - ], - "schemes": [ - "http" - ], + "tags": ["public", "nonparticipating"], + "consumes": ["application/x-binary"], + "produces": ["application/json"], + "schemes": ["http"], "summary": "Disassemble program bytes into the TEAL source code.", "operationId": "TealDisassemble", "parameters": [ @@ -2770,17 +2134,10 @@ }, "/v2/catchup/{catchpoint}": { "post": { - "tags": [ - "private", - "nonparticipating" - ], + "tags": ["private", "nonparticipating"], "description": "Given a catchpoint, it starts catching up to this catchpoint", - "produces": [ - "application/json" - ], - "schemes": [ - "http" - ], + "produces": ["application/json"], + "schemes": ["http"], "summary": "Starts a catchpoint catchup.", "operationId": "StartCatchup", "parameters": [ @@ -2791,7 +2148,8 @@ "name": "min", "description": "Specify the minimum number of blocks which the ledger must be advanced by in order to start the catchup. This is useful for simplifying tools which support fast catchup, they can run the catchup unconditionally and the node will skip the catchup if it is not needed.", "in": "query", - "type": "integer" + "type": "integer", + "x-go-type": "basics.Round" } ], "responses": { @@ -2833,17 +2191,10 @@ } }, "delete": { - "tags": [ - "private", - "nonparticipating" - ], + "tags": ["private", "nonparticipating"], "description": "Given a catchpoint, it aborts catching up to this catchpoint", - "produces": [ - "application/json" - ], - "schemes": [ - "http" - ], + "produces": ["application/json"], + "schemes": ["http"], "summary": "Aborts a catchpoint catchup.", "operationId": "AbortCatchup", "parameters": [ @@ -2877,33 +2228,15 @@ "description": "Unknown Error" } } - }, - "parameters": [ - { - "type": "string", - "name": "catchpoint", - "in": "path", - "required": true - } - ] + } }, "/v2/teal/dryrun": { "post": { "description": "Executes TEAL program(s) in context and returns debugging information about the execution. This endpoint is only enabled when a node's configuration file sets EnableDeveloperAPI to true.", - "tags": [ - "public", - "nonparticipating" - ], - "consumes": [ - "application/json", - "application/msgpack" - ], - "produces": [ - "application/json" - ], - "schemes": [ - "http" - ], + "tags": ["public", "nonparticipating"], + "consumes": ["application/json", "application/msgpack"], + "produces": ["application/json"], + "schemes": ["http"], "summary": "Provide debugging information for a transaction (or group).", "operationId": "TealDryrun", "parameters": [ @@ -2950,16 +2283,9 @@ }, "/v2/experimental": { "get": { - "tags": [ - "public", - "experimental" - ], - "produces": [ - "application/json" - ], - "schemes": [ - "http" - ], + "tags": ["public", "experimental"], + "produces": ["application/json"], + "schemes": ["http"], "summary": "Returns OK if experimental API is enabled.", "operationId": "ExperimentalCheck", "responses": { @@ -2978,16 +2304,9 @@ "/v2/devmode/blocks/offset": { "get": { "description": "Gets the current timestamp offset.", - "tags": [ - "public", - "nonparticipating" - ], - "produces": [ - "application/json" - ], - "schemes": [ - "http" - ], + "tags": ["public", "nonparticipating"], + "produces": ["application/json"], + "schemes": ["http"], "summary": "Returns the timestamp offset. Timestamp offsets can only be set in dev mode.", "operationId": "GetBlockTimeStampOffset", "responses": { @@ -3010,18 +2329,14 @@ "/v2/devmode/blocks/offset/{offset}": { "post": { "description": "Sets the timestamp offset (seconds) for blocks in dev mode. Providing an offset of 0 will unset this value and try to use the real clock for the timestamp.", - "tags": [ - "public", - "nonparticipating" - ], - "schemes": [ - "http" - ], + "tags": ["public", "nonparticipating"], + "schemes": ["http"], "summary": "Given a timestamp offset in seconds, adds the offset to every subsequent block header's timestamp.", "operationId": "SetBlockTimeStampOffset", "parameters": [ { "type": "integer", + "x-go-type": "uint64", "description": "The timestamp offset for blocks in dev mode.", "name": "offset", "in": "path", @@ -3060,7 +2375,7 @@ } }, "definitions": { - "GenesisAllocation":{ + "GenesisAllocation": { "title": "Allocations for Genesis File", "type": "object", "properties": { @@ -3075,7 +2390,7 @@ "properties": { "algo": { "type": "integer", - "format" : "uint64" + "format": "uint64" }, "onl": { "type": "integer" @@ -3091,29 +2406,23 @@ }, "voteKD": { "type": "integer", - "format" : "uint64" + "format": "uint64" }, "voteFst": { "type": "integer", - "format" : "uint64" + "format": "uint64" }, "voteLst": { "type": "integer", - "format" : "uint64" + "format": "uint64" } }, - "required": [ - "algo" - ] + "required": ["algo", "onl"] } }, - "required": [ - "addr", - "comment", - "state" - ] + "required": ["addr", "comment", "state"] }, - "Genesis":{ + "Genesis": { "title": "Genesis File in JSON", "type": "object", "properties": { @@ -3146,18 +2455,10 @@ }, "timestamp": { "type": "integer", - "format" : "int64" + "format": "int64" } }, - "required": [ - "alloc", - "fees", - "id", - "network", - "proto", - "rwd", - "timestamp" - ] + "required": ["alloc", "fees", "id", "network", "proto", "rwd", "timestamp"] }, "LedgerStateDelta": { "description": "Ledger StateDelta object", @@ -3167,10 +2468,7 @@ "LedgerStateDeltaForTransactionGroup": { "description": "Contains a ledger delta for a single transaction group", "type": "object", - "required": [ - "Delta", - "Ids" - ], + "required": ["Delta", "Ids"], "properties": { "Delta": { "$ref": "#/definitions/LedgerStateDelta" @@ -3207,15 +2505,18 @@ }, "amount": { "description": "\\[algo\\] total number of MicroAlgos in the account", - "type": "integer" + "type": "integer", + "format": "uint64" }, "min-balance": { "description": "MicroAlgo balance required by the account.\n\nThe requirement grows based on asset and application usage.", - "type": "integer" + "type": "integer", + "format": "uint64" }, "amount-without-pending-rewards": { "description": "specifies the amount of MicroAlgos in the account, without the pending rewards.", - "type": "integer" + "type": "integer", + "format": "uint64" }, "apps-local-state": { "description": "\\[appl\\] applications local data stored in this account.\n\nNote the raw object uses `map[int] -\u003e AppLocalState` for this type.", @@ -3226,7 +2527,8 @@ }, "total-apps-opted-in": { "description": "The count of all applications that have been opted in, equivalent to the count of application local data (AppLocalState objects) stored in this account.", - "type": "integer" + "type": "integer", + "format": "uint64" }, "apps-total-schema": { "description": "\\[tsch\\] stores the sum of all of the local schemas and global schemas in this account.\n\nNote: the raw account uses `StateSchema` for this type.", @@ -3234,7 +2536,8 @@ }, "apps-total-extra-pages": { "description": "\\[teap\\] the sum of all extra application program pages for this account.", - "type": "integer" + "type": "integer", + "format": "uint64" }, "assets": { "description": "\\[asset\\] assets held by this account.\n\nNote the raw object uses `map[int] -\u003e AssetHolding` for this type.", @@ -3245,7 +2548,8 @@ }, "total-assets-opted-in": { "description": "The count of all assets that have been opted in, equivalent to the count of AssetHolding objects held by this account.", - "type": "integer" + "type": "integer", + "format": "uint64" }, "created-apps": { "description": "\\[appp\\] parameters of applications created by this account including app global data.\n\nNote: the raw account uses `map[int] -\u003e AppParams` for this type.", @@ -3256,7 +2560,8 @@ }, "total-created-apps": { "description": "The count of all apps (AppParams objects) created by this account.", - "type": "integer" + "type": "integer", + "format": "uint64" }, "created-assets": { "description": "\\[apar\\] parameters of assets created by this account.\n\nNote: the raw account uses `map[int] -\u003e Asset` for this type.", @@ -3267,15 +2572,18 @@ }, "total-created-assets": { "description": "The count of all assets (AssetParams objects) created by this account.", - "type": "integer" + "type": "integer", + "format": "uint64" }, "total-boxes": { "description": "\\[tbx\\] The number of existing boxes created by this account's app.", - "type": "integer" + "type": "integer", + "format": "uint64" }, "total-box-bytes": { "description": "\\[tbxb\\] The total number of bytes used by this account's app's box keys and values.", - "type": "integer" + "type": "integer", + "format": "uint64" }, "participation": { "$ref": "#/definitions/AccountParticipation" @@ -3286,19 +2594,23 @@ }, "pending-rewards": { "description": "amount of MicroAlgos of pending rewards in this account.", - "type": "integer" + "type": "integer", + "format": "uint64" }, "reward-base": { "description": "\\[ebase\\] used as part of the rewards computation. Only applicable to accounts which are participating.", - "type": "integer" + "type": "integer", + "format": "uint64" }, "rewards": { "description": "\\[ern\\] total rewards of MicroAlgos the account has received, including pending rewards.", - "type": "integer" + "type": "integer", + "format": "uint64" }, "round": { "description": "The round for which this information is relevant.", - "type": "integer" + "type": "integer", + "x-go-type": "basics.Round" }, "status": { "description": "\\[onl\\] delegation status of the account's MicroAlgos\n* Offline - indicates that the associated account is delegated.\n* Online - indicates that the associated account used as part of the delegation pool.\n* NotParticipating - indicates that the associated account is neither a delegator nor a delegate.", @@ -3307,11 +2619,7 @@ "sig-type": { "description": "Indicates what type of signature is used by this account, must be one of:\n* sig\n* msig\n* lsig", "type": "string", - "enum": [ - "sig", - "msig", - "lsig" - ] + "enum": ["sig", "msig", "lsig"] }, "auth-addr": { "description": "\\[spend\\] the address against which signing should be checked. If empty, the address of the current account is used. This field can be updated in any transaction by setting the RekeyTo field.", @@ -3320,20 +2628,20 @@ }, "last-proposed": { "description": "The round in which this account last proposed the block.", - "type": "integer" + "type": "integer", + "x-go-type": "basics.Round" }, "last-heartbeat": { "description": "The round in which this account last went online, or explicitly renewed their online status.", - "type": "integer" + "type": "integer", + "x-go-type": "basics.Round" } } }, "AccountAssetHolding": { "description": "AccountAssetHolding describes the account's asset holding and asset parameters (if either exist) for a specific asset ID.", "type": "object", - "required": [ - "asset-holding" - ], + "required": ["asset-holding"], "properties": { "asset-holding": { "description": "\\[asset\\] Details about the asset held by this account.\n\nThe raw account uses `AssetHolding` for this type.", @@ -3363,15 +2671,18 @@ }, "vote-first-valid": { "description": "\\[voteFst\\] First round for which this participation is valid.", - "type": "integer" + "type": "integer", + "x-go-type": "basics.Round" }, "vote-key-dilution": { "description": "\\[voteKD\\] Number of subkeys in each batch of participation keys.", - "type": "integer" + "type": "integer", + "x-go-type": "uint64" }, "vote-last-valid": { "description": "\\[voteLst\\] Last round for which this participation is valid.", - "type": "integer" + "type": "integer", + "x-go-type": "basics.Round" }, "vote-participation-key": { "description": "\\[vote\\] root participation public key (if any) currently registered for this round.", @@ -3388,14 +2699,12 @@ "Asset": { "description": "Specifies both the unique identifier and the parameters for an asset", "type": "object", - "required": [ - "index", - "params" - ], + "required": ["index", "params"], "properties": { "index": { "description": "unique asset identifier", - "type": "integer" + "type": "integer", + "x-go-type": "basics.AssetIndex" }, "params": { "$ref": "#/definitions/AssetParams" @@ -3405,20 +2714,18 @@ "AssetHolding": { "description": "Describes an asset held by an account.\n\nDefinition:\ndata/basics/userBalance.go : AssetHolding", "type": "object", - "required": [ - "asset-id", - "amount", - "is-frozen" - ], + "required": ["asset-id", "amount", "is-frozen"], "properties": { "amount": { "description": "\\[a\\] number of units held.", "type": "integer", + "format": "uint64", "x-algorand-format": "uint64" }, "asset-id": { "description": "Asset ID of the holding.", "type": "integer", + "x-go-type": "basics.AssetIndex", "x-go-name": "AssetID" }, "is-frozen": { @@ -3430,11 +2737,7 @@ "AssetParams": { "description": "AssetParams specifies the parameters for an asset.\n\n\\[apar\\] when part of an AssetConfig transaction.\n\nDefinition:\ndata/transactions/asset.go : AssetParams", "type": "object", - "required": [ - "creator", - "total", - "decimals" - ], + "required": ["creator", "total", "decimals"], "properties": { "clawback": { "description": "\\[c\\] Address of account used to clawback holdings of this asset. If empty, clawback is not permitted.", @@ -3447,6 +2750,7 @@ "decimals": { "description": "\\[dc\\] The number of digits to use after the decimal point when displaying this asset. If 0, the asset is not divisible. If 1, the base unit of the asset is in tenths. If 2, the base unit of the asset is in hundredths, and so on. This value must be between 0 and 19 (inclusive).", "type": "integer", + "format": "uint64", "maximum": 19, "minimum": 0 }, @@ -3483,6 +2787,7 @@ "total": { "description": "\\[t\\] The total number of units of this asset.", "type": "integer", + "format": "uint64", "x-algorand-format": "uint64" }, "unit-name": { @@ -3508,10 +2813,7 @@ "AssetHoldingReference": { "description": "References an asset held by an account.", "type": "object", - "required": [ - "account", - "asset" - ], + "required": ["account", "asset"], "properties": { "account": { "description": "Address of the account holding the asset.", @@ -3521,6 +2823,7 @@ "asset": { "description": "Asset ID of the holding.", "type": "integer", + "x-go-type": "basics.AssetIndex", "x-algorand-format": "uint64" } } @@ -3528,10 +2831,7 @@ "ApplicationLocalReference": { "description": "References an account's local state for an application.", "type": "object", - "required": [ - "account", - "app" - ], + "required": ["account", "app"], "properties": { "account": { "description": "Address of the account with the local state.", @@ -3541,6 +2841,7 @@ "app": { "description": "Application ID of the local state application.", "type": "integer", + "x-go-type": "basics.AppIndex", "x-algorand-format": "uint64" } } @@ -3548,32 +2849,29 @@ "ApplicationStateSchema": { "description": "Specifies maximums on the number of each type that may be stored.", "type": "object", - "required": [ - "num-uint", - "num-byte-slice" - ], + "required": ["num-uint", "num-byte-slice"], "properties": { "num-uint": { "description": "\\[nui\\] num of uints.", - "type": "integer" + "type": "integer", + "format": "uint64" }, "num-byte-slice": { "description": "\\[nbs\\] num of byte slices.", - "type": "integer" + "type": "integer", + "format": "uint64" } } }, "ApplicationLocalState": { "description": "Stores local state associated with an application.", "type": "object", - "required": [ - "id", - "schema" - ], + "required": ["id", "schema"], "properties": { "id": { "description": "The application which this local state is for.", - "type": "integer" + "type": "integer", + "x-go-type": "basics.AppIndex" }, "schema": { "description": "\\[hsch\\] schema.", @@ -3588,11 +2886,7 @@ "ParticipationKey": { "description": "Represents a participation key used by the node.", "type": "object", - "required": [ - "id", - "key", - "address" - ], + "required": ["id", "key", "address"], "properties": { "id": { "description": "The key's ParticipationID.", @@ -3606,24 +2900,29 @@ "effective-first-valid": { "description": "When registered, this is the first round it may be used.", "type": "integer", + "x-go-type": "basics.Round", "x-algorand-format": "uint64" }, "effective-last-valid": { "description": "When registered, this is the last round it may be used.", "type": "integer", + "x-go-type": "basics.Round", "x-algorand-format": "uint64" }, "last-vote": { "description": "Round when this key was last used to vote.", - "type": "integer" + "type": "integer", + "x-go-type": "basics.Round" }, "last-block-proposal": { "description": "Round when this key was last used to propose a block.", - "type": "integer" + "type": "integer", + "x-go-type": "basics.Round" }, "last-state-proof": { "description": "Round when this key was last used to generate a state proof.", - "type": "integer" + "type": "integer", + "x-go-type": "basics.Round" }, "key": { "description": "Key information stored on the account.", @@ -3641,10 +2940,7 @@ "TealKeyValue": { "description": "Represents a key-value pair in an application store.", "type": "object", - "required": [ - "key", - "value" - ], + "required": ["key", "value"], "properties": { "key": { "type": "string" @@ -3657,15 +2953,12 @@ "TealValue": { "description": "Represents a TEAL value.", "type": "object", - "required": [ - "type", - "uint", - "bytes" - ], + "required": ["type", "uint", "bytes"], "properties": { "type": { "description": "\\[tt\\] value type. Value `1` refers to **bytes**, value `2` refers to **uint**", - "type": "integer" + "type": "integer", + "x-go-type": "uint64" }, "bytes": { "description": "\\[tb\\] bytes value.", @@ -3674,6 +2967,7 @@ "uint": { "description": "\\[ui\\] uint value.", "type": "integer", + "format": "uint64", "x-algorand-format": "uint64" } } @@ -3681,13 +2975,12 @@ "AvmValue": { "description": "Represents an AVM value.", "type": "object", - "required": [ - "type" - ], + "required": ["type"], "properties": { "type": { "description": "value type. Value `1` refers to **bytes**, value `2` refers to **uint64**", - "type": "integer" + "type": "integer", + "x-go-type": "uint64" }, "bytes": { "description": "bytes value.", @@ -3697,6 +2990,7 @@ "uint": { "description": "uint value.", "type": "integer", + "format": "uint64", "x-algorand-format": "uint64" } } @@ -3704,10 +2998,7 @@ "AvmKeyValue": { "description": "Represents an AVM key-value pair in an application store.", "type": "object", - "required": [ - "key", - "value" - ], + "required": ["key", "value"], "properties": { "key": { "type": "string", @@ -3728,10 +3019,7 @@ "AccountStateDelta": { "description": "Application state delta.", "type": "object", - "required": [ - "address", - "delta" - ], + "required": ["address", "delta"], "properties": { "address": { "type": "string" @@ -3744,10 +3032,7 @@ "EvalDeltaKeyValue": { "description": "Key-value pairs for StateDelta.", "type": "object", - "required": [ - "key", - "value" - ], + "required": ["key", "value"], "properties": { "key": { "type": "string" @@ -3760,13 +3045,12 @@ "EvalDelta": { "description": "Represents a TEAL value delta.", "type": "object", - "required": [ - "action" - ], + "required": ["action"], "properties": { "action": { "description": "\\[at\\] delta action.", - "type": "integer" + "type": "integer", + "format": "uint64" }, "bytes": { "description": "\\[bs\\] bytes value.", @@ -3775,6 +3059,7 @@ "uint": { "description": "\\[ui\\] uint value.", "type": "integer", + "format": "uint64", "x-algorand-format": "uint64" } } @@ -3782,14 +3067,12 @@ "Application": { "description": "Application index and its parameters", "type": "object", - "required": [ - "id", - "params" - ], + "required": ["id", "params"], "properties": { "id": { "description": "\\[appidx\\] application index.", - "type": "integer" + "type": "integer", + "x-go-type": "basics.AppIndex" }, "params": { "description": "\\[appparams\\] application parameters.", @@ -3800,11 +3083,7 @@ "ApplicationParams": { "description": "Stores the global information associated with an application.", "type": "object", - "required": [ - "creator", - "approval-program", - "clear-state-program" - ], + "required": ["creator", "approval-program", "clear-state-program"], "properties": { "creator": { "description": "The address that created this application. This is the address where the parameters and global state for this application can be found.", @@ -3825,7 +3104,8 @@ }, "extra-program-pages": { "description": "\\[epp\\] the amount of extra program pages available to this app.", - "type": "integer" + "type": "integer", + "format": "uint64" }, "local-state-schema": { "description": "\\[lsch\\] local schema", @@ -3841,18 +3121,15 @@ }, "version": { "description": "\\[v\\] the number of updates to the application programs", - "type": "integer" + "type": "integer", + "x-go-type": "uint64" } } }, "DryrunState": { "description": "Stores the TEAL eval step data", "type": "object", - "required": [ - "line", - "pc", - "stack" - ], + "required": ["line", "pc", "stack"], "properties": { "line": { "description": "Line number", @@ -3883,9 +3160,7 @@ "DryrunTxnResult": { "description": "DryrunTxnResult contains any LogicSig or ApplicationCall program debug information and state updates from a dryrun.", "type": "object", - "required": [ - "disassembly" - ], + "required": ["disassembly"], "properties": { "disassembly": { "description": "Disassembled program line by line.", @@ -3954,9 +3229,7 @@ "ErrorResponse": { "description": "An error response with optional data field.", "type": "object", - "required": [ - "message" - ], + "required": ["message"], "properties": { "data": { "type": "object" @@ -4007,12 +3280,14 @@ "round": { "description": "Round is available to some TEAL scripts. Defaults to the current round on the network this algod is attached to.", "type": "integer", + "x-go-type": "basics.Round", "x-algorand-format": "uint64" }, "latest-timestamp": { "description": "LatestTimestamp is available to some TEAL scripts. Defaults to the latest confirmed timestamp this algod is attached to.", "type": "integer", - "format": "int64" + "x-go-type": "int64", + "minimum": 0 }, "sources": { "type": "array", @@ -4025,12 +3300,7 @@ "DryrunSource": { "description": "DryrunSource is TEAL source text that gets uploaded, compiled, and inserted into transactions or application state.", "type": "object", - "required": [ - "field-name", - "source", - "txn-index", - "app-index" - ], + "required": ["field-name", "source", "txn-index", "app-index"], "properties": { "field-name": { "description": "FieldName is what kind of sources this is. If lsig then it goes into the transactions[this.TxnIndex].LogicSig. If approv or clearp it goes into the Approval Program or Clear State Program of application[this.AppIndex].", @@ -4044,6 +3314,7 @@ }, "app-index": { "type": "integer", + "x-go-type": "basics.AppIndex", "x-algorand-format": "uint64" } } @@ -4051,9 +3322,7 @@ "SimulateRequest": { "description": "Request type for simulation endpoint.", "type": "object", - "required": [ - "txn-groups" - ], + "required": ["txn-groups"], "properties": { "txn-groups": { "description": "The transaction groups to simulate.", @@ -4064,7 +3333,8 @@ }, "round": { "description": "If provided, specifies the round preceding the simulation. State changes through this round will be used to run this simulation. Usually only the 4 most recent rounds will be available (controlled by the node config value MaxAcctLookback). If not specified, defaults to the latest available round.", - "type": "integer" + "type": "integer", + "x-go-type": "basics.Round" }, "allow-empty-signatures": { "description": "Allows transactions without signatures to be simulated as if they had correct signatures.", @@ -4094,9 +3364,7 @@ "SimulateRequestTransactionGroup": { "description": "A transaction group to simulate.", "type": "object", - "required": [ - "txns" - ], + "required": ["txns"], "properties": { "txns": { "description": "An atomic transaction group.", @@ -4135,23 +3403,20 @@ "Box": { "description": "Box name and its content.", "type": "object", - "required": [ - "round", - "name", - "value" - ], + "required": ["round","name","value"], "properties": { "round": { "description": "The round for which this information is relevant", - "type": "integer" + "type": "integer", + "x-go-type": "basics.Round" }, "name": { - "description": "\\[name\\] box name, base64 encoded", + "description": "The box name, base64 encoded", "type": "string", "format": "byte" }, "value": { - "description": "\\[value\\] box value, base64 encoded.", + "description": "The box value, base64 encoded.", "type": "string", "format": "byte" } @@ -4174,14 +3439,12 @@ "BoxReference": { "description": "References a box of an application.", "type": "object", - "required": [ - "app", - "name" - ], + "required": ["app", "name"], "properties": { "app": { "description": "Application ID which this box belongs to", - "type": "integer" + "type": "integer", + "x-go-type": "basics.AppIndex" }, "name": { "description": "Base64 encoded box name", @@ -4190,32 +3453,11 @@ } } }, - "KvDelta": { - "description": "A single Delta containing the key, the previous value and the current value for a single round.", - "type": "object", - "properties": { - "key": { - "description": "The key, base64 encoded.", - "type": "string", - "format": "byte" - }, - "value": { - "description": "The new value of the KV store entry, base64 encoded.", - "type": "string", - "format": "byte" - } - } - }, "Version": { "description": "algod version information.", "type": "object", "title": "Version contains the current algod version.", - "required": [ - "versions", - "genesis_id", - "genesis_hash_b64", - "build" - ], + "required": ["versions", "genesis_id", "genesis_hash_b64", "build"], "properties": { "build": { "$ref": "#/definitions/BuildVersion" @@ -4239,26 +3481,24 @@ "description": "algod mutex and blocking profiling state.", "type": "object", "title": "algod mutex and blocking profiling state.", - "tags": [ - "private" - ], + "tags": ["private"], "properties": { "block-rate": { "description": "The rate of blocking events. The profiler aims to sample an average of one blocking event per rate nanoseconds spent blocked. To turn off profiling entirely, pass rate 0.", "example": 1000, - "type": "integer" + "type": "integer", + "format": "uint64" }, "mutex-rate": { "description": "The rate of mutex events. On average 1/rate events are reported. To turn off profiling entirely, pass rate 0", "example": 1000, - "type": "integer" + "type": "integer", + "format": "uint64" } } }, "BuildVersion": { - "tags": [ - "common" - ], + "tags": ["common"], "type": "object", "title": "BuildVersion contains the current algod build version information.", "required": [ @@ -4274,8 +3514,7 @@ "type": "string" }, "build_number": { - "type": "integer", - "format": "int64" + "type": "integer" }, "channel": { "type": "string" @@ -4284,30 +3523,27 @@ "type": "string" }, "major": { - "type": "integer", - "format": "int64" + "type": "integer" }, "minor": { - "type": "integer", - "format": "int64" + "type": "integer" } } }, "PendingTransactionResponse": { "description": "Details about a pending transaction. If the transaction was recently confirmed, includes confirmation details like the round and reward details.", "type": "object", - "required": [ - "txn", - "pool-error" - ], + "required": ["txn", "pool-error"], "properties": { "asset-index": { "description": "The asset index if the transaction was found and it created an asset.", - "type": "integer" + "type": "integer", + "x-go-type": "basics.AssetIndex" }, "application-index": { "description": "The application index if the transaction was found and it created an application.", - "type": "integer" + "type": "integer", + "x-go-type": "basics.AppIndex" }, "close-rewards": { "description": "Rewards in microalgos applied to the close remainder to account.", @@ -4315,15 +3551,18 @@ }, "closing-amount": { "description": "Closing amount for the transaction.", - "type": "integer" + "type": "integer", + "x-go-type": "uint64" }, "asset-closing-amount": { "description": "The number of the asset's unit that were transferred to the close-to address.", - "type": "integer" + "type": "integer", + "x-go-type": "uint64" }, "confirmed-round": { "description": "The round where this transaction was confirmed, if present.", - "type": "integer" + "type": "integer", + "x-go-type": "basics.Round" }, "pool-error": { "description": "Indicates that the transaction was kicked out of this node's transaction pool (and specifies why that happened). An empty string indicates the transaction wasn't kicked out of this node's txpool due to an error.\n", @@ -4331,11 +3570,13 @@ }, "receiver-rewards": { "description": "Rewards in microalgos applied to the receiver account.", - "type": "integer" + "type": "integer", + "x-go-type": "uint64" }, "sender-rewards": { "description": "Rewards in microalgos applied to the sender account.", - "type": "integer" + "type": "integer", + "x-go-type": "uint64" }, "local-state-delta": { "description": "Local state key/value changes for the application being executed by this transaction.", @@ -4373,9 +3614,7 @@ "SimulateTransactionGroupResult": { "description": "Simulation result for an atomic transaction group", "type": "object", - "required": [ - "txn-results" - ], + "required": ["txn-results"], "properties": { "txn-results": { "description": "Simulation result for individual transactions", @@ -4411,9 +3650,7 @@ "SimulateTransactionResult": { "description": "Simulation result for an individual transaction", "type": "object", - "required": [ - "txn-result" - ], + "required": ["txn-result"], "properties": { "txn-result": { "$ref": "#/definitions/PendingTransactionResponse" @@ -4432,7 +3669,7 @@ "unnamed-resources-accessed": { "$ref": "#/definitions/SimulateUnnamedResourcesAccessed" }, - "fixed-signer":{ + "fixed-signer": { "description": "The account that needed to sign this transaction when no signature was provided and the provided signer was incorrect.", "type": "string", "x-algorand-format": "Address" @@ -4442,10 +3679,7 @@ "StateProof": { "description": "Represents a state proof and its corresponding message", "type": "object", - "required": [ - "Message", - "StateProof" - ], + "required": ["Message", "StateProof"], "properties": { "Message": { "$ref": "#/definitions/StateProofMessage" @@ -4460,15 +3694,12 @@ "LightBlockHeaderProof": { "description": "Proof of membership and position of a light block header.", "type": "object", - "required": [ - "index", - "treedepth", - "proof" - ], + "required": ["index", "treedepth", "proof"], "properties": { "index": { "description": "The index of the light block header in the vector commitment tree", - "type": "integer" + "type": "integer", + "x-go-type": "uint64" }, "treedepth": { "description": "Represents the depth of the tree that is being proven, i.e. the number of edges from a leaf to the root.", @@ -4505,16 +3736,19 @@ "LnProvenWeight": { "description": "An integer value representing the natural log of the proven weight with 16 bits of precision. This value would be used to verify the next state proof.", "type": "integer", + "format": "uint64", "x-algorand-format": "uint64" }, "FirstAttestedRound": { "description": "The first round the message attests to.", "type": "integer", + "x-go-type": "basics.Round", "x-algorand-format": "uint64" }, "LastAttestedRound": { "description": "The last round the message attests to.", "type": "integer", + "x-go-type": "basics.Round", "x-algorand-format": "uint64" } } @@ -4552,10 +3786,7 @@ "ScratchChange": { "description": "A write operation into a scratch slot.", "type": "object", - "required": [ - "slot", - "new-value" - ], + "required": ["slot", "new-value"], "properties": { "slot": { "description": "The scratch slot written.", @@ -4568,11 +3799,7 @@ }, "ApplicationStateOperation": { "description": "An operation against an application's global/local/box state.", - "required": [ - "operation", - "app-state-type", - "key" - ], + "required": ["operation", "app-state-type", "key"], "properties": { "operation": { "description": "Operation type. Value `w` is **write**, `d` is **delete**.", @@ -4599,9 +3826,7 @@ }, "ApplicationKVStorage": { "description": "An application's global/local/box state.", - "required": [ - "kvs" - ], + "required": ["kvs"], "properties": { "kvs": { "description": "Key-Value pairs representing application states.", @@ -4619,13 +3844,12 @@ }, "ApplicationInitialStates": { "description": "An application's initial global/local/box states that were accessed during simulation.", - "required": [ - "id" - ], + "required": ["id"], "properties": { "id": { "description": "Application index.", "type": "integer", + "x-go-type": "basics.AppIndex", "x-algorand-format": "uint64" }, "app-locals": { @@ -4646,9 +3870,7 @@ "SimulationOpcodeTraceUnit": { "description": "The set of trace information and effect from evaluating a single opcode.", "type": "object", - "required": [ - "pc" - ], + "required": ["pc"], "properties": { "pc": { "description": "The program counter of the current opcode being evaluated.", @@ -4762,6 +3984,7 @@ "type": "array", "items": { "type": "integer", + "x-go-type": "basics.AssetIndex", "x-algorand-format": "uint64" } }, @@ -4770,6 +3993,7 @@ "type": "array", "items": { "type": "integer", + "x-go-type": "basics.AppIndex", "x-algorand-format": "uint64" } }, @@ -4816,11 +4040,7 @@ "AppCallLogs": { "description": "The logged messages from an app call along with the app ID and outer transaction ID. Logs appear in the same order that they were emitted.", "type": "object", - "required": [ - "logs", - "application-index", - "txId" - ], + "required": ["logs", "application-index", "txId"], "properties": { "logs": { "description": "An array of logs", @@ -4832,64 +4052,49 @@ }, "application-index": { "description": "The application from which the logs were generated", - "type": "integer" + "type": "integer", + "x-go-type": "basics.AppIndex" }, "txId": { "description": "The transaction ID of the outer app call that lead to these logs", "type": "string" } } + }, + "TransactionProof": { + "description": "Proof of transaction in a block.", + "type": "object", + "required": ["proof", "stibhash", "idx", "treedepth", "hashtype"], + "properties": { + "proof": { + "description": "Proof of transaction membership.", + "type": "string", + "format": "byte" + }, + "stibhash": { + "description": "Hash of SignedTxnInBlock for verifying proof.", + "type": "string", + "format": "byte" + }, + "treedepth": { + "description": "Represents the depth of the tree that is being proven, i.e. the number of edges from a leaf to the root.", + "type": "integer", + "x-go-type": "uint64" + }, + "idx": { + "description": "Index of the transaction in the block's payset.", + "type": "integer", + "x-go-type": "uint64" + }, + "hashtype": { + "type": "string", + "enum": ["sha512_256", "sha256"], + "description": "The type of hash function used to create the proof, must be one of: \n* sha512_256 \n* sha256" + } + } } }, "parameters": { - "account-id": { - "type": "string", - "x-go-name": "AccountID", - "description": "account string", - "name": "account-id", - "in": "path", - "required": true - }, - "address": { - "type": "string", - "description": "Only include transactions with this address in one of the transaction fields.", - "name": "address", - "in": "query" - }, - "address-role": { - "enum": [ - "sender", - "receiver", - "freeze-target" - ], - "type": "string", - "description": "Combine with the address parameter to define what type of address to search for.", - "name": "address-role", - "in": "query" - }, - "after-time": { - "type": "string", - "format": "date-time", - "x-algorand-format": "RFC3339 String", - "description": "Include results after the given time. Must be an RFC 3339 formatted string.", - "name": "after-time", - "in": "query" - }, - "asset-id": { - "type": "integer", - "x-go-name": "AssetID", - "description": "Asset ID", - "name": "asset-id", - "in": "query" - }, - "before-time": { - "type": "string", - "format": "date-time", - "x-algorand-format": "RFC3339 String", - "description": "Include results before the given time. Must be an RFC 3339 formatted string.", - "name": "before-time", - "in": "query" - }, "catchpoint": { "type": "string", "format": "catchpoint", @@ -4900,29 +4105,8 @@ "in": "path", "required": true }, - "currency-greater-than": { - "type": "integer", - "description": "Results should have an amount greater than this value. MicroAlgos are the default currency unless an asset-id is provided, in which case the asset will be used.", - "name": "currency-greater-than", - "in": "query" - }, - "currency-less-than": { - "type": "integer", - "description": "Results should have an amount less than this value. MicroAlgos are the default currency unless an asset-id is provided, in which case the asset will be used.", - "name": "currency-less-than", - "in": "query" - }, - "exclude-close-to": { - "type": "boolean", - "description": "Combine with address and address-role parameters to define what type of address to search for. The close to fields are normally treated as a receiver, if you would like to exclude them set this parameter to true.", - "name": "exclude-close-to", - "in": "query" - }, "format": { - "enum": [ - "json", - "msgpack" - ], + "enum": ["json", "msgpack"], "type": "string", "description": "Configures whether the response object is JSON or MessagePack encoded. If not provided, defaults to JSON.", "name": "format", @@ -4930,60 +4114,62 @@ }, "limit": { "type": "integer", + "x-go-type": "uint64", "description": "Maximum number of results to return.", "name": "limit", "in": "query" }, "max": { "type": "integer", + "x-go-type": "uint64", "description": "Truncated number of transactions to display. If max=0, returns all pending txns.", "name": "max", "in": "query" }, - "max-round": { - "type": "integer", - "description": "Include results at or before the specified max-round.", - "name": "max-round", - "in": "query" - }, - "min-round": { - "type": "integer", - "description": "Include results at or after the specified min-round.", - "name": "min-round", - "in": "query" - }, "next": { "type": "string", "description": "The next page of results. Use the next token provided by the previous results.", "name": "next", "in": "query" }, - "note-prefix": { - "type": "string", - "description": "Specifies a prefix which must be contained in the note field.", - "name": "note-prefix", - "in": "query", - "x-algorand-format": "base64" - }, "round": { "type": "integer", - "description": "Include results for the specified round.", + "x-go-type": "basics.Round", + "minimum": 0, + "description": "A round number.", "name": "round", - "in": "query" + "in": "path", + "required": true + }, + "address": { + "type": "string", + "x-go-type": "basics.Address", + "pattern": "[A-Z0-9]{58}", + "description": "An account public key.", + "name": "address", + "in": "path", + "required": true }, - "round-number": { + "asset-id": { "type": "integer", - "description": "Round number", - "name": "round-number", + "x-go-type": "basics.AssetIndex", + "minimum": 0, + "description": "An asset identifier.", + "name": "asset-id", + "in": "path", + "required": true + }, + "application-id": { + "type": "integer", + "x-go-type": "basics.AppIndex", + "minimum": 0, + "description": "An application identifier.", + "name": "application-id", "in": "path", "required": true }, "sig-type": { - "enum": [ - "sig", - "msig", - "lsig" - ], + "enum": ["sig", "msig", "lsig"], "type": "string", "description": "SigType filters just results using the specified type of signature:\n* sig - Standard\n* msig - MultiSig\n* lsig - LogicSig", "name": "sig-type", @@ -4998,15 +4184,7 @@ "in": "query" }, "tx-type": { - "enum": [ - "pay", - "keyreg", - "acfg", - "axfer", - "afrz", - "appl", - "stpf" - ], + "enum": ["pay", "keyreg", "acfg", "axfer", "afrz", "appl", "stpf"], "type": "string", "name": "tx-type", "in": "query" @@ -5017,13 +4195,12 @@ "description": "Response containing the timestamp offset in seconds", "schema": { "type": "object", - "required": [ - "offset" - ], + "required": ["offset"], "properties": { "offset": { "description": "Timestamp offset in seconds.", - "type": "integer" + "type": "integer", + "x-go-type": "uint64" } } } @@ -5032,13 +4209,12 @@ "description": "Response containing the ledger's minimum sync round", "schema": { "type": "object", - "required": [ - "round" - ], + "required": ["round"], "properties": { "round": { "description": "The minimum sync round for the ledger.", - "type": "integer" + "type": "integer", + "x-go-type": "basics.Round" } } } @@ -5053,9 +4229,7 @@ "description": "Response containing all ledger state deltas for transaction groups, with their associated Ids, in a single round.", "schema": { "type": "object", - "required": [ - "Deltas" - ], + "required": ["Deltas"], "properties": { "Deltas": { "type": "array", @@ -5094,13 +4268,12 @@ "description": "AccountAssetResponse describes the account's asset holding and asset parameters (if either exist) for a specific asset ID. Asset parameters will only be returned if the provided address is the asset's creator.", "schema": { "type": "object", - "required": [ - "round" - ], + "required": ["round"], "properties": { "round": { "description": "The round for which this information is relevant.", - "type": "integer" + "type": "integer", + "x-go-type": "basics.Round" }, "asset-holding": { "description": "\\[asset\\] Details about the asset held by this account.\n\nThe raw account uses `AssetHolding` for this type.", @@ -5117,13 +4290,12 @@ "description": "AccountAssetsInformationResponse contains a list of assets held by an account.", "schema": { "type": "object", - "required": [ - "round" - ], + "required": ["round"], "properties": { "round": { "description": "The round for which this information is relevant.", - "type": "integer" + "type": "integer", + "x-go-type": "basics.Round" }, "next-token": { "description": "Used for pagination, when making another request provide this token with the next parameter.", @@ -5142,13 +4314,12 @@ "description": "AccountApplicationResponse describes the account's application local state and global state (AppLocalState and AppParams, if either exists) for a specific application ID. Global state will only be returned if the provided address is the application's creator.", "schema": { "type": "object", - "required": [ - "round" - ], + "required": ["round"], "properties": { "round": { "description": "The round for which this information is relevant.", - "type": "integer" + "type": "integer", + "x-go-type": "basics.Round" }, "app-local-state": { "description": "\\[appl\\] the application local data stored in this account.\n\nThe raw account uses `AppLocalState` for this type.", @@ -5165,9 +4336,7 @@ "description": "Encoded block object.", "schema": { "type": "object", - "required": [ - "block" - ], + "required": ["block"], "properties": { "block": { "description": "Block header data.", @@ -5186,9 +4355,7 @@ "description": "Top level transaction IDs in a block.", "schema": { "type": "object", - "required": [ - "blockTxids" - ], + "required": ["blockTxids"], "properties": { "blockTxids": { "description": "Block transaction IDs.", @@ -5204,9 +4371,7 @@ "description": "Hash of a block header.", "schema": { "type": "object", - "required": [ - "blockHash" - ], + "required": ["blockHash"], "properties": { "blockHash": { "description": "Block header hash.", @@ -5218,54 +4383,15 @@ "TransactionProofResponse": { "description": "Proof of transaction in a block.", "schema": { - "type": "object", - "required": [ - "proof", - "stibhash", - "idx", - "treedepth", - "hashtype" - ], - "properties": { - "proof": { - "description": "Proof of transaction membership.", - "type": "string", - "format": "byte" - }, - "stibhash": { - "description": "Hash of SignedTxnInBlock for verifying proof.", - "type": "string", - "format": "byte" - }, - "treedepth": { - "description": "Represents the depth of the tree that is being proven, i.e. the number of edges from a leaf to the root.", - "type": "integer" - }, - "idx": { - "description": "Index of the transaction in the block's payset.", - "type": "integer" - }, - "hashtype": { - "type": "string", - "enum": [ - "sha512_256", - "sha256" - ], - "description": "The type of hash function used to create the proof, must be one of: \n* sha512_256 \n* sha256" - } - } + "$ref": "#/definitions/TransactionProof" } }, "CatchpointStartResponse": { - "tags": [ - "private" - ], + "tags": ["private"], "schema": { "description": "An catchpoint start response.", "type": "object", - "required": [ - "catchup-message" - ], + "required": ["catchup-message"], "properties": { "catchup-message": { "description": "Catchup start response string", @@ -5275,15 +4401,11 @@ } }, "CatchpointAbortResponse": { - "tags": [ - "private" - ], + "tags": ["private"], "schema": { "description": "An catchpoint abort response.", "type": "object", - "required": [ - "catchup-message" - ], + "required": ["catchup-message"], "properties": { "catchup-message": { "description": "Catchup abort response string", @@ -5309,11 +4431,13 @@ "properties": { "catchup-time": { "description": "CatchupTime in nanoseconds", - "type": "integer" + "type": "integer", + "x-go-type": "int64" }, "last-round": { "description": "LastRound indicates the last round seen", - "type": "integer" + "type": "integer", + "x-go-type": "basics.Round" }, "last-version": { "description": "LastVersion indicates the last consensus version supported", @@ -5325,7 +4449,8 @@ }, "next-version-round": { "description": "NextVersionRound is the round at which the next consensus version will apply", - "type": "integer" + "type": "integer", + "x-go-type": "basics.Round" }, "next-version-supported": { "description": "NextVersionSupported indicates whether the next consensus version is supported by this node", @@ -5337,7 +4462,8 @@ }, "time-since-last-round": { "description": "TimeSinceLastRound in nanoseconds", - "type": "integer" + "type": "integer", + "x-go-type": "int64" }, "last-catchpoint": { "description": "The last catchpoint seen by the node", @@ -5349,39 +4475,48 @@ }, "catchpoint-total-accounts": { "description": "The total number of accounts included in the current catchpoint", - "type": "integer" + "type": "integer", + "x-go-type": "uint64" }, "catchpoint-processed-accounts": { "description": "The number of accounts from the current catchpoint that have been processed so far as part of the catchup", - "type": "integer" + "type": "integer", + "x-go-type": "uint64" }, "catchpoint-verified-accounts": { "description": "The number of accounts from the current catchpoint that have been verified so far as part of the catchup", - "type": "integer" + "type": "integer", + "x-go-type": "uint64" }, "catchpoint-total-kvs": { "description": "The total number of key-values (KVs) included in the current catchpoint", - "type": "integer" + "type": "integer", + "x-go-type": "uint64" }, "catchpoint-processed-kvs": { "description": "The number of key-values (KVs) from the current catchpoint that have been processed so far as part of the catchup", - "type": "integer" + "type": "integer", + "x-go-type": "uint64" }, "catchpoint-verified-kvs": { "description": "The number of key-values (KVs) from the current catchpoint that have been verified so far as part of the catchup", - "type": "integer" + "type": "integer", + "x-go-type": "uint64" }, "catchpoint-total-blocks": { "description": "The total number of blocks that are required to complete the current catchpoint catchup", - "type": "integer" + "type": "integer", + "x-go-type": "uint64" }, "catchpoint-acquired-blocks": { "description": "The number of blocks that have already been obtained by the node as part of the catchup", - "type": "integer" + "type": "integer", + "x-go-type": "uint64" }, "upgrade-delay": { "description": "Upgrade delay", - "type": "integer" + "type": "integer", + "x-go-type": "basics.Round" }, "upgrade-node-vote": { "description": "This node's upgrade vote", @@ -5389,27 +4524,33 @@ }, "upgrade-votes-required": { "description": "Yes votes required for consensus upgrade", - "type": "integer" + "type": "integer", + "x-go-type": "basics.Round" }, "upgrade-votes": { "description": "Total votes cast for consensus upgrade", - "type": "integer" + "type": "integer", + "x-go-type": "basics.Round" }, "upgrade-yes-votes": { "description": "Yes votes cast for consensus upgrade", - "type": "integer" + "type": "integer", + "x-go-type": "basics.Round" }, "upgrade-no-votes": { "description": "No votes cast for consensus upgrade", - "type": "integer" + "type": "integer", + "x-go-type": "basics.Round" }, "upgrade-next-protocol-vote-before": { "description": "Next protocol round", - "type": "integer" + "type": "integer", + "x-go-type": "basics.Round" }, "upgrade-vote-rounds": { "description": "Total voting rounds for current upgrade", - "type": "integer" + "type": "integer", + "x-go-type": "basics.Round" } } } @@ -5419,10 +4560,7 @@ "schema": { "description": "PendingTransactions is an array of signed transactions exactly as they were submitted.", "type": "object", - "required": [ - "top-transactions", - "total-transactions" - ], + "required": ["top-transactions", "total-transactions"], "properties": { "top-transactions": { "description": "An array of signed transaction objects.", @@ -5458,9 +4596,7 @@ "description": "Participation ID of the submission", "schema": { "type": "object", - "required": [ - "partId" - ], + "required": ["partId"], "properties": { "partId": { "description": "encoding of the participation ID.", @@ -5473,9 +4609,7 @@ "description": "Transaction ID of the submission.", "schema": { "type": "object", - "required": [ - "txId" - ], + "required": ["txId"], "properties": { "txId": { "description": "encoding of the transaction hash.", @@ -5488,19 +4622,17 @@ "description": "Result of a transaction group simulation.", "schema": { "type": "object", - "required": [ - "version", - "last-round", - "txn-groups" - ], + "required": ["version", "last-round", "txn-groups"], "properties": { "version": { "description": "The version of this response object.", - "type": "integer" + "type": "integer", + "x-go-type": "uint64" }, "last-round": { "description": "The round immediately preceding this simulation. State changes through this round were used to run this simulation.", - "type": "integer" + "type": "integer", + "x-go-type": "basics.Round" }, "txn-groups": { "description": "A result object for each transaction group that was simulated.", @@ -5525,9 +4657,7 @@ "description": "All logs emitted in the given round. Each app call, whether top-level or inner, that contains logs results in a separate AppCallLogs object. Therefore there may be multiple AppCallLogs with the same application ID and outer transaction ID in the event of multiple inner app calls to the same app. App calls with no logs are not included in the response. AppCallLogs are returned in the same order that their corresponding app call appeared in the block (pre-order traversal of inner app calls)", "schema": { "type": "object", - "required": [ - "logs" - ], + "required": ["logs"], "properties": { "logs": { "type": "array", @@ -5543,23 +4673,22 @@ "schema": { "description": "Supply represents the current supply of MicroAlgos in the system", "type": "object", - "required": [ - "online-money", - "current_round", - "total-money" - ], + "required": ["online-money", "current_round", "total-money"], "properties": { "current_round": { "description": "Round", - "type": "integer" + "type": "integer", + "x-go-type": "basics.Round" }, "online-money": { "description": "OnlineMoney", - "type": "integer" + "type": "integer", + "x-go-type": "uint64" }, "total-money": { "description": "TotalMoney", - "type": "integer" + "type": "integer", + "x-go-type": "uint64" } } } @@ -5584,7 +4713,8 @@ }, "fee": { "description": "Fee is the suggested transaction fee\nFee is in units of micro-Algos per byte.\nFee may fall to zero but transactions must still have a fee of\nat least MinTxnFee for the current network protocol.", - "type": "integer" + "type": "integer", + "x-go-type": "uint64" }, "genesis-hash": { "description": "GenesisHash is the hash of the genesis block.", @@ -5597,11 +4727,13 @@ }, "last-round": { "description": "LastRound indicates the last round seen", - "type": "integer" + "type": "integer", + "x-go-type": "basics.Round" }, "min-fee": { "description": "The minimum transaction fee (not per byte) required for the\ntxn to validate for the current network protocol.", - "type": "integer" + "type": "integer", + "x-go-type": "uint64" } } } @@ -5616,9 +4748,7 @@ "description": "Box names of an application", "schema": { "type": "object", - "required": [ - "boxes" - ], + "required": ["boxes"], "properties": { "boxes": { "type": "array", @@ -5645,10 +4775,7 @@ "description": "Teal compile Result", "schema": { "type": "object", - "required": [ - "hash", - "result" - ], + "required": ["hash", "result"], "properties": { "hash": { "description": "base32 SHA512_256 of program bytes (Address style)", @@ -5669,9 +4796,7 @@ "description": "Teal disassembly Result", "schema": { "type": "object", - "required": [ - "result" - ], + "required": ["result"], "properties": { "result": { "description": "disassembled Teal code", @@ -5684,11 +4809,7 @@ "description": "DryrunResponse contains per-txn debug information from a dryrun.", "schema": { "type": "object", - "required": [ - "txns", - "protocol-version", - "error" - ], + "required": ["txns", "protocol-version", "error"], "properties": { "txns": { "type": "array", diff --git a/daemon/algod/api/algod.oas3.yml b/daemon/algod/api/algod.oas3.yml index c19e14307a..9efe2415bc 100644 --- a/daemon/algod/api/algod.oas3.yml +++ b/daemon/algod/api/algod.oas3.yml @@ -1,69 +1,41 @@ { "components": { "parameters": { - "account-id": { - "description": "account string", + "address": { + "description": "An account public key.", "in": "path", - "name": "account-id", + "name": "address", "required": true, "schema": { + "pattern": "[A-Z0-9]{58}", "type": "string", - "x-go-name": "AccountID" + "x-go-type": "basics.Address" }, - "x-go-name": "AccountID" - }, - "address": { - "description": "Only include transactions with this address in one of the transaction fields.", - "in": "query", - "name": "address", - "schema": { - "type": "string" - } + "x-go-type": "basics.Address" }, - "address-role": { - "description": "Combine with the address parameter to define what type of address to search for.", - "in": "query", - "name": "address-role", - "schema": { - "enum": [ - "sender", - "receiver", - "freeze-target" - ], - "type": "string" - } - }, - "after-time": { - "description": "Include results after the given time. Must be an RFC 3339 formatted string.", - "in": "query", - "name": "after-time", + "application-id": { + "description": "An application identifier.", + "in": "path", + "name": "application-id", + "required": true, "schema": { - "format": "date-time", - "type": "string", - "x-algorand-format": "RFC3339 String" + "minimum": 0, + "type": "integer", + "x-go-type": "basics.AppIndex" }, - "x-algorand-format": "RFC3339 String" + "x-go-type": "basics.AppIndex" }, "asset-id": { - "description": "Asset ID", - "in": "query", + "description": "An asset identifier.", + "in": "path", "name": "asset-id", + "required": true, "schema": { + "minimum": 0, "type": "integer", - "x-go-name": "AssetID" + "x-go-type": "basics.AssetIndex" }, - "x-go-name": "AssetID" - }, - "before-time": { - "description": "Include results before the given time. Must be an RFC 3339 formatted string.", - "in": "query", - "name": "before-time", - "schema": { - "format": "date-time", - "type": "string", - "x-algorand-format": "RFC3339 String" - }, - "x-algorand-format": "RFC3339 String" + "x-go-type": "basics.AssetIndex" }, "catchpoint": { "description": "A catch point", @@ -78,30 +50,6 @@ }, "x-algorand-format": "Catchpoint String" }, - "currency-greater-than": { - "description": "Results should have an amount greater than this value. MicroAlgos are the default currency unless an asset-id is provided, in which case the asset will be used.", - "in": "query", - "name": "currency-greater-than", - "schema": { - "type": "integer" - } - }, - "currency-less-than": { - "description": "Results should have an amount less than this value. MicroAlgos are the default currency unless an asset-id is provided, in which case the asset will be used.", - "in": "query", - "name": "currency-less-than", - "schema": { - "type": "integer" - } - }, - "exclude-close-to": { - "description": "Combine with address and address-role parameters to define what type of address to search for. The close to fields are normally treated as a receiver, if you would like to exclude them set this parameter to true.", - "in": "query", - "name": "exclude-close-to", - "schema": { - "type": "boolean" - } - }, "format": { "description": "Configures whether the response object is JSON or MessagePack encoded. If not provided, defaults to JSON.", "in": "query", @@ -119,32 +67,20 @@ "in": "query", "name": "limit", "schema": { - "type": "integer" - } + "type": "integer", + "x-go-type": "uint64" + }, + "x-go-type": "uint64" }, "max": { "description": "Truncated number of transactions to display. If max=0, returns all pending txns.", "in": "query", "name": "max", "schema": { - "type": "integer" - } - }, - "max-round": { - "description": "Include results at or before the specified max-round.", - "in": "query", - "name": "max-round", - "schema": { - "type": "integer" - } - }, - "min-round": { - "description": "Include results at or after the specified min-round.", - "in": "query", - "name": "min-round", - "schema": { - "type": "integer" - } + "type": "integer", + "x-go-type": "uint64" + }, + "x-go-type": "uint64" }, "next": { "description": "The next page of results. Use the next token provided by the previous results.", @@ -154,32 +90,17 @@ "type": "string" } }, - "note-prefix": { - "description": "Specifies a prefix which must be contained in the note field.", - "in": "query", - "name": "note-prefix", - "schema": { - "type": "string", - "x-algorand-format": "base64" - }, - "x-algorand-format": "base64" - }, "round": { - "description": "Include results for the specified round.", - "in": "query", - "name": "round", - "schema": { - "type": "integer" - } - }, - "round-number": { - "description": "Round number", + "description": "A round number.", "in": "path", - "name": "round-number", + "name": "round", "required": true, "schema": { - "type": "integer" - } + "minimum": 0, + "type": "integer", + "x-go-type": "basics.Round" + }, + "x-go-type": "basics.Round" }, "sig-type": { "description": "SigType filters just results using the specified type of signature:\n* sig - Standard\n* msig - MultiSig\n* lsig - LogicSig", @@ -237,7 +158,8 @@ }, "round": { "description": "The round for which this information is relevant.", - "type": "integer" + "type": "integer", + "x-go-type": "basics.Round" } }, "required": [ @@ -262,7 +184,8 @@ }, "round": { "description": "The round for which this information is relevant.", - "type": "integer" + "type": "integer", + "x-go-type": "basics.Round" } }, "required": [ @@ -291,7 +214,8 @@ }, "round": { "description": "The round for which this information is relevant.", - "type": "integer" + "type": "integer", + "x-go-type": "basics.Round" } }, "required": [ @@ -586,7 +510,8 @@ "properties": { "offset": { "description": "Timestamp offset in seconds.", - "type": "integer" + "type": "integer", + "x-go-type": "uint64" } }, "required": [ @@ -605,7 +530,8 @@ "properties": { "round": { "description": "The minimum sync round for the ledger.", - "type": "integer" + "type": "integer", + "x-go-type": "basics.Round" } }, "required": [ @@ -659,39 +585,48 @@ }, "catchpoint-acquired-blocks": { "description": "The number of blocks that have already been obtained by the node as part of the catchup", - "type": "integer" + "type": "integer", + "x-go-type": "uint64" }, "catchpoint-processed-accounts": { "description": "The number of accounts from the current catchpoint that have been processed so far as part of the catchup", - "type": "integer" + "type": "integer", + "x-go-type": "uint64" }, "catchpoint-processed-kvs": { "description": "The number of key-values (KVs) from the current catchpoint that have been processed so far as part of the catchup", - "type": "integer" + "type": "integer", + "x-go-type": "uint64" }, "catchpoint-total-accounts": { "description": "The total number of accounts included in the current catchpoint", - "type": "integer" + "type": "integer", + "x-go-type": "uint64" }, "catchpoint-total-blocks": { "description": "The total number of blocks that are required to complete the current catchpoint catchup", - "type": "integer" + "type": "integer", + "x-go-type": "uint64" }, "catchpoint-total-kvs": { "description": "The total number of key-values (KVs) included in the current catchpoint", - "type": "integer" + "type": "integer", + "x-go-type": "uint64" }, "catchpoint-verified-accounts": { "description": "The number of accounts from the current catchpoint that have been verified so far as part of the catchup", - "type": "integer" + "type": "integer", + "x-go-type": "uint64" }, "catchpoint-verified-kvs": { "description": "The number of key-values (KVs) from the current catchpoint that have been verified so far as part of the catchup", - "type": "integer" + "type": "integer", + "x-go-type": "uint64" }, "catchup-time": { "description": "CatchupTime in nanoseconds", - "type": "integer" + "type": "integer", + "x-go-type": "int64" }, "last-catchpoint": { "description": "The last catchpoint seen by the node", @@ -699,7 +634,8 @@ }, "last-round": { "description": "LastRound indicates the last round seen", - "type": "integer" + "type": "integer", + "x-go-type": "basics.Round" }, "last-version": { "description": "LastVersion indicates the last consensus version supported", @@ -711,7 +647,8 @@ }, "next-version-round": { "description": "NextVersionRound is the round at which the next consensus version will apply", - "type": "integer" + "type": "integer", + "x-go-type": "basics.Round" }, "next-version-supported": { "description": "NextVersionSupported indicates whether the next consensus version is supported by this node", @@ -723,19 +660,23 @@ }, "time-since-last-round": { "description": "TimeSinceLastRound in nanoseconds", - "type": "integer" + "type": "integer", + "x-go-type": "int64" }, "upgrade-delay": { "description": "Upgrade delay", - "type": "integer" + "type": "integer", + "x-go-type": "basics.Round" }, "upgrade-next-protocol-vote-before": { "description": "Next protocol round", - "type": "integer" + "type": "integer", + "x-go-type": "basics.Round" }, "upgrade-no-votes": { "description": "No votes cast for consensus upgrade", - "type": "integer" + "type": "integer", + "x-go-type": "basics.Round" }, "upgrade-node-vote": { "description": "This node's upgrade vote", @@ -743,19 +684,23 @@ }, "upgrade-vote-rounds": { "description": "Total voting rounds for current upgrade", - "type": "integer" + "type": "integer", + "x-go-type": "basics.Round" }, "upgrade-votes": { "description": "Total votes cast for consensus upgrade", - "type": "integer" + "type": "integer", + "x-go-type": "basics.Round" }, "upgrade-votes-required": { "description": "Yes votes required for consensus upgrade", - "type": "integer" + "type": "integer", + "x-go-type": "basics.Round" }, "upgrade-yes-votes": { "description": "Yes votes cast for consensus upgrade", - "type": "integer" + "type": "integer", + "x-go-type": "basics.Round" } }, "required": [ @@ -880,7 +825,8 @@ }, "last-round": { "description": "The round immediately preceding this simulation. State changes through this round were used to run this simulation.", - "type": "integer" + "type": "integer", + "x-go-type": "basics.Round" }, "txn-groups": { "description": "A result object for each transaction group that was simulated.", @@ -891,7 +837,8 @@ }, "version": { "description": "The version of this response object.", - "type": "integer" + "type": "integer", + "x-go-type": "uint64" } }, "required": [ @@ -923,15 +870,18 @@ "properties": { "current_round": { "description": "Round", - "type": "integer" + "type": "integer", + "x-go-type": "basics.Round" }, "online-money": { "description": "OnlineMoney", - "type": "integer" + "type": "integer", + "x-go-type": "uint64" }, "total-money": { "description": "TotalMoney", - "type": "integer" + "type": "integer", + "x-go-type": "uint64" } }, "required": [ @@ -978,7 +928,8 @@ }, "fee": { "description": "Fee is the suggested transaction fee\nFee is in units of micro-Algos per byte.\nFee may fall to zero but transactions must still have a fee of\nat least MinTxnFee for the current network protocol.", - "type": "integer" + "type": "integer", + "x-go-type": "uint64" }, "genesis-hash": { "description": "GenesisHash is the hash of the genesis block.", @@ -992,11 +943,13 @@ }, "last-round": { "description": "LastRound indicates the last round seen", - "type": "integer" + "type": "integer", + "x-go-type": "basics.Round" }, "min-fee": { "description": "The minimum transaction fee (not per byte) required for the\ntxn to validate for the current network protocol.", - "type": "integer" + "type": "integer", + "x-go-type": "uint64" } }, "required": [ @@ -1017,44 +970,7 @@ "content": { "application/json": { "schema": { - "properties": { - "hashtype": { - "description": "The type of hash function used to create the proof, must be one of: \n* sha512_256 \n* sha256", - "enum": [ - "sha512_256", - "sha256" - ], - "type": "string" - }, - "idx": { - "description": "Index of the transaction in the block's payset.", - "type": "integer" - }, - "proof": { - "description": "Proof of transaction membership.", - "format": "byte", - "pattern": "^(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?$", - "type": "string" - }, - "stibhash": { - "description": "Hash of SignedTxnInBlock for verifying proof.", - "format": "byte", - "pattern": "^(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?$", - "type": "string" - }, - "treedepth": { - "description": "Represents the depth of the tree that is being proven, i.e. the number of edges from a leaf to the root.", - "type": "integer" - } - }, - "required": [ - "hashtype", - "idx", - "proof", - "stibhash", - "treedepth" - ], - "type": "object" + "$ref": "#/components/schemas/TransactionProof" } } }, @@ -1081,10 +997,12 @@ }, "amount": { "description": "\\[algo\\] total number of MicroAlgos in the account", + "format": "uint64", "type": "integer" }, "amount-without-pending-rewards": { "description": "specifies the amount of MicroAlgos in the account, without the pending rewards.", + "format": "uint64", "type": "integer" }, "apps-local-state": { @@ -1096,6 +1014,7 @@ }, "apps-total-extra-pages": { "description": "\\[teap\\] the sum of all extra application program pages for this account.", + "format": "uint64", "type": "integer" }, "apps-total-schema": { @@ -1133,14 +1052,17 @@ }, "last-heartbeat": { "description": "The round in which this account last went online, or explicitly renewed their online status.", - "type": "integer" + "type": "integer", + "x-go-type": "basics.Round" }, "last-proposed": { "description": "The round in which this account last proposed the block.", - "type": "integer" + "type": "integer", + "x-go-type": "basics.Round" }, "min-balance": { "description": "MicroAlgo balance required by the account.\n\nThe requirement grows based on asset and application usage.", + "format": "uint64", "type": "integer" }, "participation": { @@ -1148,19 +1070,23 @@ }, "pending-rewards": { "description": "amount of MicroAlgos of pending rewards in this account.", + "format": "uint64", "type": "integer" }, "reward-base": { "description": "\\[ebase\\] used as part of the rewards computation. Only applicable to accounts which are participating.", + "format": "uint64", "type": "integer" }, "rewards": { "description": "\\[ern\\] total rewards of MicroAlgos the account has received, including pending rewards.", + "format": "uint64", "type": "integer" }, "round": { "description": "The round for which this information is relevant.", - "type": "integer" + "type": "integer", + "x-go-type": "basics.Round" }, "sig-type": { "description": "Indicates what type of signature is used by this account, must be one of:\n* sig\n* msig\n* lsig", @@ -1177,26 +1103,32 @@ }, "total-apps-opted-in": { "description": "The count of all applications that have been opted in, equivalent to the count of application local data (AppLocalState objects) stored in this account.", + "format": "uint64", "type": "integer" }, "total-assets-opted-in": { "description": "The count of all assets that have been opted in, equivalent to the count of AssetHolding objects held by this account.", + "format": "uint64", "type": "integer" }, "total-box-bytes": { "description": "\\[tbxb\\] The total number of bytes used by this account's app's box keys and values.", + "format": "uint64", "type": "integer" }, "total-boxes": { "description": "\\[tbx\\] The number of existing boxes created by this account's app.", + "format": "uint64", "type": "integer" }, "total-created-apps": { "description": "The count of all apps (AppParams objects) created by this account.", + "format": "uint64", "type": "integer" }, "total-created-assets": { "description": "The count of all assets (AssetParams objects) created by this account.", + "format": "uint64", "type": "integer" } }, @@ -1248,15 +1180,18 @@ }, "vote-first-valid": { "description": "\\[voteFst\\] First round for which this participation is valid.", - "type": "integer" + "type": "integer", + "x-go-type": "basics.Round" }, "vote-key-dilution": { "description": "\\[voteKD\\] Number of subkeys in each batch of participation keys.", - "type": "integer" + "type": "integer", + "x-go-type": "uint64" }, "vote-last-valid": { "description": "\\[voteLst\\] Last round for which this participation is valid.", - "type": "integer" + "type": "integer", + "x-go-type": "basics.Round" }, "vote-participation-key": { "description": "\\[vote\\] root participation public key (if any) currently registered for this round.", @@ -1295,7 +1230,8 @@ "properties": { "application-index": { "description": "The application from which the logs were generated", - "type": "integer" + "type": "integer", + "x-go-type": "basics.AppIndex" }, "logs": { "description": "An array of logs", @@ -1323,7 +1259,8 @@ "properties": { "id": { "description": "\\[appidx\\] application index.", - "type": "integer" + "type": "integer", + "x-go-type": "basics.AppIndex" }, "params": { "$ref": "#/components/schemas/ApplicationParams" @@ -1354,7 +1291,8 @@ "id": { "description": "Application index.", "type": "integer", - "x-algorand-format": "uint64" + "x-algorand-format": "uint64", + "x-go-type": "basics.AppIndex" } }, "required": [ @@ -1394,7 +1332,8 @@ "app": { "description": "Application ID of the local state application.", "type": "integer", - "x-algorand-format": "uint64" + "x-algorand-format": "uint64", + "x-go-type": "basics.AppIndex" } }, "required": [ @@ -1408,7 +1347,8 @@ "properties": { "id": { "description": "The application which this local state is for.", - "type": "integer" + "type": "integer", + "x-go-type": "basics.AppIndex" }, "key-value": { "$ref": "#/components/schemas/TealKeyValueStore" @@ -1447,6 +1387,7 @@ }, "extra-program-pages": { "description": "\\[epp\\] the amount of extra program pages available to this app.", + "format": "uint64", "type": "integer" }, "global-state": { @@ -1460,7 +1401,8 @@ }, "version": { "description": "\\[v\\] the number of updates to the application programs", - "type": "integer" + "type": "integer", + "x-go-type": "uint64" } }, "required": [ @@ -1508,10 +1450,12 @@ "properties": { "num-byte-slice": { "description": "\\[nbs\\] num of byte slices.", + "format": "uint64", "type": "integer" }, "num-uint": { "description": "\\[nui\\] num of uints.", + "format": "uint64", "type": "integer" } }, @@ -1526,7 +1470,8 @@ "properties": { "index": { "description": "unique asset identifier", - "type": "integer" + "type": "integer", + "x-go-type": "basics.AssetIndex" }, "params": { "$ref": "#/components/schemas/AssetParams" @@ -1543,13 +1488,15 @@ "properties": { "amount": { "description": "\\[a\\] number of units held.", + "format": "uint64", "type": "integer", "x-algorand-format": "uint64" }, "asset-id": { "description": "Asset ID of the holding.", "type": "integer", - "x-go-name": "AssetID" + "x-go-name": "AssetID", + "x-go-type": "basics.AssetIndex" }, "is-frozen": { "description": "\\[f\\] whether or not the holding is frozen.", @@ -1574,7 +1521,8 @@ "asset": { "description": "Asset ID of the holding.", "type": "integer", - "x-algorand-format": "uint64" + "x-algorand-format": "uint64", + "x-go-type": "basics.AssetIndex" } }, "required": [ @@ -1596,6 +1544,7 @@ }, "decimals": { "description": "\\[dc\\] The number of digits to use after the decimal point when displaying this asset. If 0, the asset is not divisible. If 1, the base unit of the asset is in tenths. If 2, the base unit of the asset is in hundredths, and so on. This value must be between 0 and 19 (inclusive).", + "format": "uint64", "maximum": 19, "minimum": 0, "type": "integer" @@ -1634,6 +1583,7 @@ }, "total": { "description": "\\[t\\] The total number of units of this asset.", + "format": "uint64", "type": "integer", "x-algorand-format": "uint64" }, @@ -1694,10 +1644,12 @@ }, "type": { "description": "value type. Value `1` refers to **bytes**, value `2` refers to **uint64**", - "type": "integer" + "type": "integer", + "x-go-type": "uint64" }, "uint": { "description": "uint value.", + "format": "uint64", "type": "integer", "x-algorand-format": "uint64" } @@ -1711,17 +1663,18 @@ "description": "Box name and its content.", "properties": { "name": { - "description": "\\[name\\] box name, base64 encoded", + "description": "The box name, base64 encoded", "format": "byte", "pattern": "^(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?$", "type": "string" }, "round": { "description": "The round for which this information is relevant", - "type": "integer" + "type": "integer", + "x-go-type": "basics.Round" }, "value": { - "description": "\\[value\\] box value, base64 encoded.", + "description": "The box value, base64 encoded.", "format": "byte", "pattern": "^(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?$", "type": "string" @@ -1754,7 +1707,8 @@ "properties": { "app": { "description": "Application ID which this box belongs to", - "type": "integer" + "type": "integer", + "x-go-type": "basics.AppIndex" }, "name": { "description": "Base64 encoded box name", @@ -1775,7 +1729,6 @@ "type": "string" }, "build_number": { - "format": "int64", "type": "integer" }, "channel": { @@ -1785,11 +1738,9 @@ "type": "string" }, "major": { - "format": "int64", "type": "integer" }, "minor": { - "format": "int64", "type": "integer" } }, @@ -1810,11 +1761,13 @@ "block-rate": { "description": "The rate of blocking events. The profiler aims to sample an average of one blocking event per rate nanoseconds spent blocked. To turn off profiling entirely, pass rate 0.", "example": 1000, + "format": "uint64", "type": "integer" }, "mutex-rate": { "description": "The rate of mutex events. On average 1/rate events are reported. To turn off profiling entirely, pass rate 0", "example": 1000, + "format": "uint64", "type": "integer" } }, @@ -1838,8 +1791,9 @@ }, "latest-timestamp": { "description": "LatestTimestamp is available to some TEAL scripts. Defaults to the latest confirmed timestamp this algod is attached to.", - "format": "int64", - "type": "integer" + "minimum": 0, + "type": "integer", + "x-go-type": "int64" }, "protocol-version": { "description": "ProtocolVersion specifies a specific version string to operate under, otherwise whatever the current protocol of the network this algod is running in.", @@ -1848,7 +1802,8 @@ "round": { "description": "Round is available to some TEAL scripts. Defaults to the current round on the network this algod is attached to.", "type": "integer", - "x-algorand-format": "uint64" + "x-algorand-format": "uint64", + "x-go-type": "basics.Round" }, "sources": { "items": { @@ -1882,7 +1837,8 @@ "properties": { "app-index": { "type": "integer", - "x-algorand-format": "uint64" + "x-algorand-format": "uint64", + "x-go-type": "basics.AppIndex" }, "field-name": { "description": "FieldName is what kind of sources this is. If lsig then it goes into the transactions[this.TxnIndex].LogicSig. If approv or clearp it goes into the Approval Program or Clear State Program of application[this.AppIndex].", @@ -2031,6 +1987,7 @@ "properties": { "action": { "description": "\\[at\\] delta action.", + "format": "uint64", "type": "integer" }, "bytes": { @@ -2039,6 +1996,7 @@ }, "uint": { "description": "\\[ui\\] uint value.", + "format": "uint64", "type": "integer", "x-algorand-format": "uint64" } @@ -2150,7 +2108,8 @@ } }, "required": [ - "algo" + "algo", + "onl" ], "type": "object" } @@ -2163,24 +2122,6 @@ "title": "Allocations for Genesis File", "type": "object" }, - "KvDelta": { - "description": "A single Delta containing the key, the previous value and the current value for a single round.", - "properties": { - "key": { - "description": "The key, base64 encoded.", - "format": "byte", - "pattern": "^(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?$", - "type": "string" - }, - "value": { - "description": "The new value of the KV store entry, base64 encoded.", - "format": "byte", - "pattern": "^(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?$", - "type": "string" - } - }, - "type": "object" - }, "LedgerStateDelta": { "description": "Ledger StateDelta object", "type": "object", @@ -2210,7 +2151,8 @@ "properties": { "index": { "description": "The index of the light block header in the vector commitment tree", - "type": "integer" + "type": "integer", + "x-go-type": "uint64" }, "proof": { "description": "The encoded proof.", @@ -2241,12 +2183,14 @@ "effective-first-valid": { "description": "When registered, this is the first round it may be used.", "type": "integer", - "x-algorand-format": "uint64" + "x-algorand-format": "uint64", + "x-go-type": "basics.Round" }, "effective-last-valid": { "description": "When registered, this is the last round it may be used.", "type": "integer", - "x-algorand-format": "uint64" + "x-algorand-format": "uint64", + "x-go-type": "basics.Round" }, "id": { "description": "The key's ParticipationID.", @@ -2257,15 +2201,18 @@ }, "last-block-proposal": { "description": "Round when this key was last used to propose a block.", - "type": "integer" + "type": "integer", + "x-go-type": "basics.Round" }, "last-state-proof": { "description": "Round when this key was last used to generate a state proof.", - "type": "integer" + "type": "integer", + "x-go-type": "basics.Round" }, "last-vote": { "description": "Round when this key was last used to vote.", - "type": "integer" + "type": "integer", + "x-go-type": "basics.Round" } }, "required": [ @@ -2280,15 +2227,18 @@ "properties": { "application-index": { "description": "The application index if the transaction was found and it created an application.", - "type": "integer" + "type": "integer", + "x-go-type": "basics.AppIndex" }, "asset-closing-amount": { "description": "The number of the asset's unit that were transferred to the close-to address.", - "type": "integer" + "type": "integer", + "x-go-type": "uint64" }, "asset-index": { "description": "The asset index if the transaction was found and it created an asset.", - "type": "integer" + "type": "integer", + "x-go-type": "basics.AssetIndex" }, "close-rewards": { "description": "Rewards in microalgos applied to the close remainder to account.", @@ -2296,11 +2246,13 @@ }, "closing-amount": { "description": "Closing amount for the transaction.", - "type": "integer" + "type": "integer", + "x-go-type": "uint64" }, "confirmed-round": { "description": "The round where this transaction was confirmed, if present.", - "type": "integer" + "type": "integer", + "x-go-type": "basics.Round" }, "global-state-delta": { "$ref": "#/components/schemas/StateDelta" @@ -2334,11 +2286,13 @@ }, "receiver-rewards": { "description": "Rewards in microalgos applied to the receiver account.", - "type": "integer" + "type": "integer", + "x-go-type": "uint64" }, "sender-rewards": { "description": "Rewards in microalgos applied to the sender account.", - "type": "integer" + "type": "integer", + "x-go-type": "uint64" }, "txn": { "description": "The raw signed transaction.", @@ -2411,7 +2365,8 @@ }, "round": { "description": "If provided, specifies the round preceding the simulation. State changes through this round will be used to run this simulation. Usually only the 4 most recent rounds will be available (controlled by the node config value MaxAcctLookback). If not specified, defaults to the latest available round.", - "type": "integer" + "type": "integer", + "x-go-type": "basics.Round" }, "txn-groups": { "description": "The transaction groups to simulate.", @@ -2558,7 +2513,8 @@ "description": "The unnamed applications that were referenced. The order of this array is arbitrary.", "items": { "type": "integer", - "x-algorand-format": "uint64" + "x-algorand-format": "uint64", + "x-go-type": "basics.AppIndex" }, "type": "array" }, @@ -2573,7 +2529,8 @@ "description": "The unnamed assets that were referenced. The order of this array is arbitrary.", "items": { "type": "integer", - "x-algorand-format": "uint64" + "x-algorand-format": "uint64", + "x-go-type": "basics.AssetIndex" }, "type": "array" }, @@ -2764,15 +2721,18 @@ "FirstAttestedRound": { "description": "The first round the message attests to.", "type": "integer", - "x-algorand-format": "uint64" + "x-algorand-format": "uint64", + "x-go-type": "basics.Round" }, "LastAttestedRound": { "description": "The last round the message attests to.", "type": "integer", - "x-algorand-format": "uint64" + "x-algorand-format": "uint64", + "x-go-type": "basics.Round" }, "LnProvenWeight": { "description": "An integer value representing the natural log of the proven weight with 16 bits of precision. This value would be used to verify the next state proof.", + "format": "uint64", "type": "integer", "x-algorand-format": "uint64" }, @@ -2824,10 +2784,12 @@ }, "type": { "description": "\\[tt\\] value type. Value `1` refers to **bytes**, value `2` refers to **uint**", - "type": "integer" + "type": "integer", + "x-go-type": "uint64" }, "uint": { "description": "\\[ui\\] uint value.", + "format": "uint64", "type": "integer", "x-algorand-format": "uint64" } @@ -2839,6 +2801,49 @@ ], "type": "object" }, + "TransactionProof": { + "description": "Proof of transaction in a block.", + "properties": { + "hashtype": { + "description": "The type of hash function used to create the proof, must be one of: \n* sha512_256 \n* sha256", + "enum": [ + "sha512_256", + "sha256" + ], + "type": "string" + }, + "idx": { + "description": "Index of the transaction in the block's payset.", + "type": "integer", + "x-go-type": "uint64" + }, + "proof": { + "description": "Proof of transaction membership.", + "format": "byte", + "pattern": "^(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?$", + "type": "string" + }, + "stibhash": { + "description": "Hash of SignedTxnInBlock for verifying proof.", + "format": "byte", + "pattern": "^(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?$", + "type": "string" + }, + "treedepth": { + "description": "Represents the depth of the tree that is being proven, i.e. the number of edges from a leaf to the root.", + "type": "integer", + "x-go-type": "uint64" + } + }, + "required": [ + "hashtype", + "idx", + "proof", + "stibhash", + "treedepth" + ], + "type": "object" + }, "Version": { "description": "algod version information.", "properties": { @@ -3085,26 +3090,16 @@ "operationId": "AccountInformation", "parameters": [ { - "description": "Configures whether the response object is JSON or MessagePack encoded. If not provided, defaults to JSON.", - "in": "query", - "name": "format", - "schema": { - "enum": [ - "json", - "msgpack" - ], - "type": "string" - } - }, - { - "description": "An account public key", + "description": "An account public key.", "in": "path", "name": "address", "required": true, "schema": { "pattern": "[A-Z0-9]{58}", - "type": "string" - } + "type": "string", + "x-go-type": "basics.Address" + }, + "x-go-type": "basics.Address" }, { "description": "When set to `all` will exclude asset holdings, application local state, created asset parameters, any created application parameters. Defaults to `none`.", @@ -3117,6 +3112,18 @@ ], "type": "string" } + }, + { + "description": "Configures whether the response object is JSON or MessagePack encoded. If not provided, defaults to JSON.", + "in": "query", + "name": "format", + "schema": { + "enum": [ + "json", + "msgpack" + ], + "type": "string" + } } ], "responses": { @@ -3197,6 +3204,30 @@ "description": "Given a specific account public key and application ID, this call returns the account's application local state and global state (AppLocalState and AppParams, if either exists). Global state will only be returned if the provided address is the application's creator.", "operationId": "AccountApplicationInformation", "parameters": [ + { + "description": "An account public key.", + "in": "path", + "name": "address", + "required": true, + "schema": { + "pattern": "[A-Z0-9]{58}", + "type": "string", + "x-go-type": "basics.Address" + }, + "x-go-type": "basics.Address" + }, + { + "description": "An application identifier.", + "in": "path", + "name": "application-id", + "required": true, + "schema": { + "minimum": 0, + "type": "integer", + "x-go-type": "basics.AppIndex" + }, + "x-go-type": "basics.AppIndex" + }, { "description": "Configures whether the response object is JSON or MessagePack encoded. If not provided, defaults to JSON.", "in": "query", @@ -3208,25 +3239,6 @@ ], "type": "string" } - }, - { - "description": "An account public key", - "in": "path", - "name": "address", - "required": true, - "schema": { - "pattern": "[A-Z0-9]{58}", - "type": "string" - } - }, - { - "description": "An application identifier", - "in": "path", - "name": "application-id", - "required": true, - "schema": { - "type": "integer" - } } ], "responses": { @@ -3243,7 +3255,8 @@ }, "round": { "description": "The round for which this information is relevant.", - "type": "integer" + "type": "integer", + "x-go-type": "basics.Round" } }, "required": [ @@ -3263,7 +3276,8 @@ }, "round": { "description": "The round for which this information is relevant.", - "type": "integer" + "type": "integer", + "x-go-type": "basics.Round" } }, "required": [ @@ -3338,22 +3352,26 @@ "operationId": "AccountAssetsInformation", "parameters": [ { - "description": "An account public key", + "description": "An account public key.", "in": "path", "name": "address", "required": true, "schema": { "pattern": "[A-Z0-9]{58}", - "type": "string" - } + "type": "string", + "x-go-type": "basics.Address" + }, + "x-go-type": "basics.Address" }, { "description": "Maximum number of results to return.", "in": "query", "name": "limit", "schema": { - "type": "integer" - } + "type": "integer", + "x-go-type": "uint64" + }, + "x-go-type": "uint64" }, { "description": "The next page of results. Use the next token provided by the previous results.", @@ -3382,7 +3400,8 @@ }, "round": { "description": "The round for which this information is relevant.", - "type": "integer" + "type": "integer", + "x-go-type": "basics.Round" } }, "required": [ @@ -3442,34 +3461,39 @@ "operationId": "AccountAssetInformation", "parameters": [ { - "description": "Configures whether the response object is JSON or MessagePack encoded. If not provided, defaults to JSON.", - "in": "query", - "name": "format", - "schema": { - "enum": [ - "json", - "msgpack" - ], - "type": "string" - } - }, - { - "description": "An account public key", + "description": "An account public key.", "in": "path", "name": "address", "required": true, "schema": { "pattern": "[A-Z0-9]{58}", - "type": "string" - } + "type": "string", + "x-go-type": "basics.Address" + }, + "x-go-type": "basics.Address" }, { - "description": "An asset identifier", + "description": "An asset identifier.", "in": "path", "name": "asset-id", "required": true, "schema": { - "type": "integer" + "minimum": 0, + "type": "integer", + "x-go-type": "basics.AssetIndex" + }, + "x-go-type": "basics.AssetIndex" + }, + { + "description": "Configures whether the response object is JSON or MessagePack encoded. If not provided, defaults to JSON.", + "in": "query", + "name": "format", + "schema": { + "enum": [ + "json", + "msgpack" + ], + "type": "string" } } ], @@ -3487,7 +3511,8 @@ }, "round": { "description": "The round for which this information is relevant.", - "type": "integer" + "type": "integer", + "x-go-type": "basics.Round" } }, "required": [ @@ -3507,7 +3532,8 @@ }, "round": { "description": "The round for which this information is relevant.", - "type": "integer" + "type": "integer", + "x-go-type": "basics.Round" } }, "required": [ @@ -3582,22 +3608,26 @@ "operationId": "GetPendingTransactionsByAddress", "parameters": [ { - "description": "An account public key", + "description": "An account public key.", "in": "path", "name": "address", "required": true, "schema": { "pattern": "[A-Z0-9]{58}", - "type": "string" - } + "type": "string", + "x-go-type": "basics.Address" + }, + "x-go-type": "basics.Address" }, { "description": "Truncated number of transactions to display. If max=0, returns all pending txns.", "in": "query", "name": "max", "schema": { - "type": "integer" - } + "type": "integer", + "x-go-type": "uint64" + }, + "x-go-type": "uint64" }, { "description": "Configures whether the response object is JSON or MessagePack encoded. If not provided, defaults to JSON.", @@ -3746,13 +3776,16 @@ "operationId": "GetApplicationByID", "parameters": [ { - "description": "An application identifier", + "description": "An application identifier.", "in": "path", "name": "application-id", "required": true, "schema": { - "type": "integer" - } + "minimum": 0, + "type": "integer", + "x-go-type": "basics.AppIndex" + }, + "x-go-type": "basics.AppIndex" } ], "responses": { @@ -3824,13 +3857,16 @@ "operationId": "GetApplicationBoxByName", "parameters": [ { - "description": "An application identifier", + "description": "An application identifier.", "in": "path", "name": "application-id", "required": true, "schema": { - "type": "integer" - } + "minimum": 0, + "type": "integer", + "x-go-type": "basics.AppIndex" + }, + "x-go-type": "basics.AppIndex" }, { "description": "A box name, in the goal app call arg form 'encoding:value'. For ints, use the form 'int:1234'. For raw bytes, use the form 'b64:A=='. For printable strings, use the form 'str:hello'. For addresses, use the form 'addr:XYZ...'.", @@ -3911,21 +3947,26 @@ "operationId": "GetApplicationBoxes", "parameters": [ { - "description": "An application identifier", + "description": "An application identifier.", "in": "path", "name": "application-id", "required": true, "schema": { - "type": "integer" - } + "minimum": 0, + "type": "integer", + "x-go-type": "basics.AppIndex" + }, + "x-go-type": "basics.AppIndex" }, { "description": "Max number of box names to return. If max is not set, or max == 0, returns all box-names.", "in": "query", "name": "max", "schema": { - "type": "integer" - } + "type": "integer", + "x-go-type": "uint64" + }, + "x-go-type": "uint64" } ], "responses": { @@ -3998,13 +4039,16 @@ "operationId": "GetAssetByID", "parameters": [ { - "description": "An asset identifier", + "description": "An asset identifier.", "in": "path", "name": "asset-id", "required": true, "schema": { - "type": "integer" - } + "minimum": 0, + "type": "integer", + "x-go-type": "basics.AssetIndex" + }, + "x-go-type": "basics.AssetIndex" } ], "responses": { @@ -4075,26 +4119,16 @@ "operationId": "GetBlock", "parameters": [ { - "description": "Configures whether the response object is JSON or MessagePack encoded. If not provided, defaults to JSON.", - "in": "query", - "name": "format", - "schema": { - "enum": [ - "json", - "msgpack" - ], - "type": "string" - } - }, - { - "description": "The round from which to fetch block information.", + "description": "A round number.", "in": "path", "name": "round", "required": true, "schema": { "minimum": 0, - "type": "integer" - } + "type": "integer", + "x-go-type": "basics.Round" + }, + "x-go-type": "basics.Round" }, { "description": "If true, only the block header (exclusive of payset or certificate) may be included in response.", @@ -4103,6 +4137,18 @@ "schema": { "type": "boolean" } + }, + { + "description": "Configures whether the response object is JSON or MessagePack encoded. If not provided, defaults to JSON.", + "in": "query", + "name": "format", + "schema": { + "enum": [ + "json", + "msgpack" + ], + "type": "string" + } } ], "responses": { @@ -4232,14 +4278,16 @@ "operationId": "GetBlockHash", "parameters": [ { - "description": "The round from which to fetch block hash information.", + "description": "A round number.", "in": "path", "name": "round", "required": true, "schema": { "minimum": 0, - "type": "integer" - } + "type": "integer", + "x-go-type": "basics.Round" + }, + "x-go-type": "basics.Round" } ], "responses": { @@ -4319,14 +4367,16 @@ "operationId": "GetLightBlockHeaderProof", "parameters": [ { - "description": "The round to which the light block header belongs.", + "description": "A round number.", "in": "path", "name": "round", "required": true, "schema": { "minimum": 0, - "type": "integer" - } + "type": "integer", + "x-go-type": "basics.Round" + }, + "x-go-type": "basics.Round" } ], "responses": { @@ -4408,14 +4458,16 @@ "operationId": "GetBlockLogs", "parameters": [ { - "description": "The round from which to fetch block log information.", + "description": "A round number.", "in": "path", "name": "round", "required": true, "schema": { "minimum": 0, - "type": "integer" - } + "type": "integer", + "x-go-type": "basics.Round" + }, + "x-go-type": "basics.Round" } ], "responses": { @@ -4493,13 +4545,16 @@ "operationId": "GetTransactionProof", "parameters": [ { - "description": "The round in which the transaction appears.", + "description": "A round number.", "in": "path", "name": "round", "required": true, "schema": { - "type": "integer" - } + "minimum": 0, + "type": "integer", + "x-go-type": "basics.Round" + }, + "x-go-type": "basics.Round" }, { "description": "The transaction ID for which to generate a proof.", @@ -4541,44 +4596,7 @@ "content": { "application/json": { "schema": { - "properties": { - "hashtype": { - "description": "The type of hash function used to create the proof, must be one of: \n* sha512_256 \n* sha256", - "enum": [ - "sha512_256", - "sha256" - ], - "type": "string" - }, - "idx": { - "description": "Index of the transaction in the block's payset.", - "type": "integer" - }, - "proof": { - "description": "Proof of transaction membership.", - "format": "byte", - "pattern": "^(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?$", - "type": "string" - }, - "stibhash": { - "description": "Hash of SignedTxnInBlock for verifying proof.", - "format": "byte", - "pattern": "^(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?$", - "type": "string" - }, - "treedepth": { - "description": "Represents the depth of the tree that is being proven, i.e. the number of edges from a leaf to the root.", - "type": "integer" - } - }, - "required": [ - "hashtype", - "idx", - "proof", - "stibhash", - "treedepth" - ], - "type": "object" + "$ref": "#/components/schemas/TransactionProof" } } }, @@ -4641,14 +4659,16 @@ "operationId": "GetBlockTxids", "parameters": [ { - "description": "The round from which to fetch block transaction IDs.", + "description": "A round number.", "in": "path", "name": "round", "required": true, "schema": { "minimum": 0, - "type": "integer" - } + "type": "integer", + "x-go-type": "basics.Round" + }, + "x-go-type": "basics.Round" } ], "responses": { @@ -4828,8 +4848,10 @@ "in": "query", "name": "min", "schema": { - "type": "integer" - } + "type": "integer", + "x-go-type": "basics.Round" + }, + "x-go-type": "basics.Round" } ], "responses": { @@ -5060,14 +5082,16 @@ "operationId": "GetLedgerStateDelta", "parameters": [ { - "description": "The round for which the deltas are desired.", + "description": "A round number.", "in": "path", "name": "round", "required": true, "schema": { "minimum": 0, - "type": "integer" - } + "type": "integer", + "x-go-type": "basics.Round" + }, + "x-go-type": "basics.Round" }, { "description": "Configures whether the response object is JSON or MessagePack encoded. If not provided, defaults to JSON.", @@ -5191,14 +5215,16 @@ "operationId": "GetTransactionGroupLedgerStateDeltasForRound", "parameters": [ { - "description": "The round for which the deltas are desired.", + "description": "A round number.", "in": "path", "name": "round", "required": true, "schema": { "minimum": 0, - "type": "integer" - } + "type": "integer", + "x-go-type": "basics.Round" + }, + "x-go-type": "basics.Round" }, { "description": "Configures whether the response object is JSON or MessagePack encoded. If not provided, defaults to JSON.", @@ -5350,7 +5376,8 @@ "properties": { "offset": { "description": "Timestamp offset in seconds.", - "type": "integer" + "type": "integer", + "x-go-type": "uint64" } }, "required": [ @@ -5396,8 +5423,10 @@ "required": true, "schema": { "minimum": 0, - "type": "integer" - } + "type": "integer", + "x-go-type": "uint64" + }, + "x-go-type": "uint64" } ], "responses": { @@ -5483,15 +5512,18 @@ "properties": { "current_round": { "description": "Round", - "type": "integer" + "type": "integer", + "x-go-type": "basics.Round" }, "online-money": { "description": "OnlineMoney", - "type": "integer" + "type": "integer", + "x-go-type": "uint64" }, "total-money": { "description": "TotalMoney", - "type": "integer" + "type": "integer", + "x-go-type": "uint64" } }, "required": [ @@ -5597,7 +5629,8 @@ "properties": { "round": { "description": "The minimum sync round for the ledger.", - "type": "integer" + "type": "integer", + "x-go-type": "basics.Round" } }, "required": [ @@ -5667,14 +5700,16 @@ "operationId": "SetSyncRound", "parameters": [ { - "description": "The round for which the deltas are desired.", + "description": "A round number.", "in": "path", "name": "round", "required": true, "schema": { "minimum": 0, - "type": "integer" - } + "type": "integer", + "x-go-type": "basics.Round" + }, + "x-go-type": "basics.Round" } ], "responses": { @@ -5904,19 +5939,23 @@ "operationId": "GenerateParticipationKeys", "parameters": [ { - "description": "An account public key", + "description": "An account public key.", "in": "path", "name": "address", "required": true, "schema": { - "type": "string" - } + "pattern": "[A-Z0-9]{58}", + "type": "string", + "x-go-type": "basics.Address" + }, + "x-go-type": "basics.Address" }, { "description": "Key dilution for two-level participation keys (defaults to sqrt of validity window).", "in": "query", "name": "dilution", "schema": { + "format": "uint64", "type": "integer" } }, @@ -5926,8 +5965,11 @@ "name": "first", "required": true, "schema": { - "type": "integer" - } + "format": "uint64", + "type": "integer", + "x-go-type": "basics.Round" + }, + "x-go-type": "basics.Round" }, { "description": "Last round for participation key.", @@ -5935,8 +5977,11 @@ "name": "last", "required": true, "schema": { - "type": "integer" - } + "format": "uint64", + "type": "integer", + "x-go-type": "basics.Round" + }, + "x-go-type": "basics.Round" } ], "responses": { @@ -6272,14 +6317,16 @@ "operationId": "GetStateProof", "parameters": [ { - "description": "The round for which a state proof is desired.", + "description": "A round number.", "in": "path", "name": "round", "required": true, "schema": { "minimum": 0, - "type": "integer" - } + "type": "integer", + "x-go-type": "basics.Round" + }, + "x-go-type": "basics.Round" } ], "responses": { @@ -6371,39 +6418,48 @@ }, "catchpoint-acquired-blocks": { "description": "The number of blocks that have already been obtained by the node as part of the catchup", - "type": "integer" + "type": "integer", + "x-go-type": "uint64" }, "catchpoint-processed-accounts": { "description": "The number of accounts from the current catchpoint that have been processed so far as part of the catchup", - "type": "integer" + "type": "integer", + "x-go-type": "uint64" }, "catchpoint-processed-kvs": { "description": "The number of key-values (KVs) from the current catchpoint that have been processed so far as part of the catchup", - "type": "integer" + "type": "integer", + "x-go-type": "uint64" }, "catchpoint-total-accounts": { "description": "The total number of accounts included in the current catchpoint", - "type": "integer" + "type": "integer", + "x-go-type": "uint64" }, "catchpoint-total-blocks": { "description": "The total number of blocks that are required to complete the current catchpoint catchup", - "type": "integer" + "type": "integer", + "x-go-type": "uint64" }, "catchpoint-total-kvs": { "description": "The total number of key-values (KVs) included in the current catchpoint", - "type": "integer" + "type": "integer", + "x-go-type": "uint64" }, "catchpoint-verified-accounts": { "description": "The number of accounts from the current catchpoint that have been verified so far as part of the catchup", - "type": "integer" + "type": "integer", + "x-go-type": "uint64" }, "catchpoint-verified-kvs": { "description": "The number of key-values (KVs) from the current catchpoint that have been verified so far as part of the catchup", - "type": "integer" + "type": "integer", + "x-go-type": "uint64" }, "catchup-time": { "description": "CatchupTime in nanoseconds", - "type": "integer" + "type": "integer", + "x-go-type": "int64" }, "last-catchpoint": { "description": "The last catchpoint seen by the node", @@ -6411,7 +6467,8 @@ }, "last-round": { "description": "LastRound indicates the last round seen", - "type": "integer" + "type": "integer", + "x-go-type": "basics.Round" }, "last-version": { "description": "LastVersion indicates the last consensus version supported", @@ -6423,7 +6480,8 @@ }, "next-version-round": { "description": "NextVersionRound is the round at which the next consensus version will apply", - "type": "integer" + "type": "integer", + "x-go-type": "basics.Round" }, "next-version-supported": { "description": "NextVersionSupported indicates whether the next consensus version is supported by this node", @@ -6435,19 +6493,23 @@ }, "time-since-last-round": { "description": "TimeSinceLastRound in nanoseconds", - "type": "integer" + "type": "integer", + "x-go-type": "int64" }, "upgrade-delay": { "description": "Upgrade delay", - "type": "integer" + "type": "integer", + "x-go-type": "basics.Round" }, "upgrade-next-protocol-vote-before": { "description": "Next protocol round", - "type": "integer" + "type": "integer", + "x-go-type": "basics.Round" }, "upgrade-no-votes": { "description": "No votes cast for consensus upgrade", - "type": "integer" + "type": "integer", + "x-go-type": "basics.Round" }, "upgrade-node-vote": { "description": "This node's upgrade vote", @@ -6455,19 +6517,23 @@ }, "upgrade-vote-rounds": { "description": "Total voting rounds for current upgrade", - "type": "integer" + "type": "integer", + "x-go-type": "basics.Round" }, "upgrade-votes": { "description": "Total votes cast for consensus upgrade", - "type": "integer" + "type": "integer", + "x-go-type": "basics.Round" }, "upgrade-votes-required": { "description": "Yes votes required for consensus upgrade", - "type": "integer" + "type": "integer", + "x-go-type": "basics.Round" }, "upgrade-yes-votes": { "description": "Yes votes cast for consensus upgrade", - "type": "integer" + "type": "integer", + "x-go-type": "basics.Round" } }, "required": [ @@ -6523,14 +6589,16 @@ "operationId": "WaitForBlock", "parameters": [ { - "description": "The round to wait until returning status", + "description": "A round number.", "in": "path", "name": "round", "required": true, "schema": { "minimum": 0, - "type": "integer" - } + "type": "integer", + "x-go-type": "basics.Round" + }, + "x-go-type": "basics.Round" } ], "responses": { @@ -6546,39 +6614,48 @@ }, "catchpoint-acquired-blocks": { "description": "The number of blocks that have already been obtained by the node as part of the catchup", - "type": "integer" + "type": "integer", + "x-go-type": "uint64" }, "catchpoint-processed-accounts": { "description": "The number of accounts from the current catchpoint that have been processed so far as part of the catchup", - "type": "integer" + "type": "integer", + "x-go-type": "uint64" }, "catchpoint-processed-kvs": { "description": "The number of key-values (KVs) from the current catchpoint that have been processed so far as part of the catchup", - "type": "integer" + "type": "integer", + "x-go-type": "uint64" }, "catchpoint-total-accounts": { "description": "The total number of accounts included in the current catchpoint", - "type": "integer" + "type": "integer", + "x-go-type": "uint64" }, "catchpoint-total-blocks": { "description": "The total number of blocks that are required to complete the current catchpoint catchup", - "type": "integer" + "type": "integer", + "x-go-type": "uint64" }, "catchpoint-total-kvs": { "description": "The total number of key-values (KVs) included in the current catchpoint", - "type": "integer" + "type": "integer", + "x-go-type": "uint64" }, "catchpoint-verified-accounts": { "description": "The number of accounts from the current catchpoint that have been verified so far as part of the catchup", - "type": "integer" + "type": "integer", + "x-go-type": "uint64" }, "catchpoint-verified-kvs": { "description": "The number of key-values (KVs) from the current catchpoint that have been verified so far as part of the catchup", - "type": "integer" + "type": "integer", + "x-go-type": "uint64" }, "catchup-time": { "description": "CatchupTime in nanoseconds", - "type": "integer" + "type": "integer", + "x-go-type": "int64" }, "last-catchpoint": { "description": "The last catchpoint seen by the node", @@ -6586,7 +6663,8 @@ }, "last-round": { "description": "LastRound indicates the last round seen", - "type": "integer" + "type": "integer", + "x-go-type": "basics.Round" }, "last-version": { "description": "LastVersion indicates the last consensus version supported", @@ -6598,7 +6676,8 @@ }, "next-version-round": { "description": "NextVersionRound is the round at which the next consensus version will apply", - "type": "integer" + "type": "integer", + "x-go-type": "basics.Round" }, "next-version-supported": { "description": "NextVersionSupported indicates whether the next consensus version is supported by this node", @@ -6610,19 +6689,23 @@ }, "time-since-last-round": { "description": "TimeSinceLastRound in nanoseconds", - "type": "integer" + "type": "integer", + "x-go-type": "int64" }, "upgrade-delay": { "description": "Upgrade delay", - "type": "integer" + "type": "integer", + "x-go-type": "basics.Round" }, "upgrade-next-protocol-vote-before": { "description": "Next protocol round", - "type": "integer" + "type": "integer", + "x-go-type": "basics.Round" }, "upgrade-no-votes": { "description": "No votes cast for consensus upgrade", - "type": "integer" + "type": "integer", + "x-go-type": "basics.Round" }, "upgrade-node-vote": { "description": "This node's upgrade vote", @@ -6630,19 +6713,23 @@ }, "upgrade-vote-rounds": { "description": "Total voting rounds for current upgrade", - "type": "integer" + "type": "integer", + "x-go-type": "basics.Round" }, "upgrade-votes": { "description": "Total votes cast for consensus upgrade", - "type": "integer" + "type": "integer", + "x-go-type": "basics.Round" }, "upgrade-votes-required": { "description": "Yes votes required for consensus upgrade", - "type": "integer" + "type": "integer", + "x-go-type": "basics.Round" }, "upgrade-yes-votes": { "description": "Yes votes cast for consensus upgrade", - "type": "integer" + "type": "integer", + "x-go-type": "basics.Round" } }, "required": [ @@ -7176,7 +7263,8 @@ }, "fee": { "description": "Fee is the suggested transaction fee\nFee is in units of micro-Algos per byte.\nFee may fall to zero but transactions must still have a fee of\nat least MinTxnFee for the current network protocol.", - "type": "integer" + "type": "integer", + "x-go-type": "uint64" }, "genesis-hash": { "description": "GenesisHash is the hash of the genesis block.", @@ -7190,11 +7278,13 @@ }, "last-round": { "description": "LastRound indicates the last round seen", - "type": "integer" + "type": "integer", + "x-go-type": "basics.Round" }, "min-fee": { "description": "The minimum transaction fee (not per byte) required for the\ntxn to validate for the current network protocol.", - "type": "integer" + "type": "integer", + "x-go-type": "uint64" } }, "required": [ @@ -7263,8 +7353,10 @@ "in": "query", "name": "max", "schema": { - "type": "integer" - } + "type": "integer", + "x-go-type": "uint64" + }, + "x-go-type": "uint64" }, { "description": "Configures whether the response object is JSON or MessagePack encoded. If not provided, defaults to JSON.", @@ -7543,7 +7635,8 @@ }, "last-round": { "description": "The round immediately preceding this simulation. State changes through this round were used to run this simulation.", - "type": "integer" + "type": "integer", + "x-go-type": "basics.Round" }, "txn-groups": { "description": "A result object for each transaction group that was simulated.", @@ -7554,7 +7647,8 @@ }, "version": { "description": "The version of this response object.", - "type": "integer" + "type": "integer", + "x-go-type": "uint64" } }, "required": [ @@ -7579,7 +7673,8 @@ }, "last-round": { "description": "The round immediately preceding this simulation. State changes through this round were used to run this simulation.", - "type": "integer" + "type": "integer", + "x-go-type": "basics.Round" }, "txn-groups": { "description": "A result object for each transaction group that was simulated.", @@ -7590,7 +7685,8 @@ }, "version": { "description": "The version of this response object.", - "type": "integer" + "type": "integer", + "x-go-type": "uint64" } }, "required": [ diff --git a/daemon/algod/api/client/restClient.go b/daemon/algod/api/client/restClient.go index 1749327ee3..484b2ba168 100644 --- a/daemon/algod/api/client/restClient.go +++ b/daemon/algod/api/client/restClient.go @@ -297,7 +297,7 @@ func (client RestClient) WaitForBlockAfter(round basics.Round) (response model.N // WaitForRound returns the node status after waiting for the given round. It // waits no more than waitTime in TOTAL, and returns an error if the round has // not been reached. -func (client RestClient) WaitForRound(round uint64, waitTime time.Duration) (status model.NodeStatusResponse, err error) { +func (client RestClient) WaitForRound(round basics.Round, waitTime time.Duration) (status model.NodeStatusResponse, err error) { timeout := time.After(waitTime) for { status, err = client.Status() @@ -321,7 +321,7 @@ const singleRoundMaxTime = globals.MaxTimePerRound * 40 // WaitForRoundWithTimeout waits for a given round to be reached. As it // waits, it returns early with an error if the wait time for any round exceeds // singleRoundMaxTime so we can alert when we're getting "hung" waiting. -func (client RestClient) WaitForRoundWithTimeout(roundToWaitFor uint64) error { +func (client RestClient) WaitForRoundWithTimeout(roundToWaitFor basics.Round) error { status, err := client.Status() if err != nil { return err @@ -339,7 +339,7 @@ func (client RestClient) WaitForRoundWithTimeout(roundToWaitFor uint64) error { // WaitForConfirmedTxn waits until either the passed txid is confirmed // or until the passed roundTimeout passes // or until waiting for a round to pass times out -func (client RestClient) WaitForConfirmedTxn(roundTimeout uint64, txid string) (txn v2.PreEncodedTxInfo, err error) { +func (client RestClient) WaitForConfirmedTxn(roundTimeout basics.Round, txid string) (txn v2.PreEncodedTxInfo, err error) { for { // Get current round information curStatus, statusErr := client.Status() @@ -462,14 +462,14 @@ func (client RestClient) RawPendingTransactionsByAddr(addr string, max uint64) ( } // AssetInformation gets the AssetInformationResponse associated with the passed asset index -func (client RestClient) AssetInformation(index uint64) (response model.Asset, err error) { +func (client RestClient) AssetInformation(index basics.AssetIndex) (response model.Asset, err error) { err = client.get(&response, fmt.Sprintf("/v2/assets/%d", index), nil) return } // ApplicationInformation gets the ApplicationInformationResponse associated // with the passed application index -func (client RestClient) ApplicationInformation(index uint64) (response model.Application, err error) { +func (client RestClient) ApplicationInformation(index basics.AppIndex) (response model.Application, err error) { err = client.get(&response, fmt.Sprintf("/v2/applications/%d", index), nil) return } @@ -479,7 +479,7 @@ type applicationBoxesParams struct { } // ApplicationBoxes gets the BoxesResponse associated with the passed application ID -func (client RestClient) ApplicationBoxes(appID uint64, maxBoxNum uint64) (response model.BoxesResponse, err error) { +func (client RestClient) ApplicationBoxes(appID basics.AppIndex, maxBoxNum uint64) (response model.BoxesResponse, err error) { err = client.get(&response, fmt.Sprintf("/v2/applications/%d/boxes", appID), applicationBoxesParams{maxBoxNum}) return } @@ -489,7 +489,7 @@ type applicationBoxByNameParams struct { } // GetApplicationBoxByName gets the BoxResponse associated with the passed application ID and box name -func (client RestClient) GetApplicationBoxByName(appID uint64, name string) (response model.BoxResponse, err error) { +func (client RestClient) GetApplicationBoxByName(appID basics.AppIndex, name string) (response model.BoxResponse, err error) { err = client.get(&response, fmt.Sprintf("/v2/applications/%d/box", appID), applicationBoxByNameParams{name}) return } @@ -547,13 +547,13 @@ func (client RestClient) RawPendingTransactionInformation(transactionID string) } // AccountApplicationInformation gets account information about a given app. -func (client RestClient) AccountApplicationInformation(accountAddress string, applicationID uint64) (response model.AccountApplicationResponse, err error) { +func (client RestClient) AccountApplicationInformation(accountAddress string, applicationID basics.AppIndex) (response model.AccountApplicationResponse, err error) { err = client.get(&response, fmt.Sprintf("/v2/accounts/%s/applications/%d", accountAddress, applicationID), nil) return } // RawAccountApplicationInformation gets account information about a given app. -func (client RestClient) RawAccountApplicationInformation(accountAddress string, applicationID uint64) (response []byte, err error) { +func (client RestClient) RawAccountApplicationInformation(accountAddress string, applicationID basics.AppIndex) (response []byte, err error) { var blob Blob err = client.getRaw(&blob, fmt.Sprintf("/v2/accounts/%s/applications/%d", accountAddress, applicationID), rawFormat{Format: "msgpack"}) response = blob @@ -561,19 +561,11 @@ func (client RestClient) RawAccountApplicationInformation(accountAddress string, } // AccountAssetInformation gets account information about a given app. -func (client RestClient) AccountAssetInformation(accountAddress string, assetID uint64) (response model.AccountAssetResponse, err error) { +func (client RestClient) AccountAssetInformation(accountAddress string, assetID basics.AssetIndex) (response model.AccountAssetResponse, err error) { err = client.get(&response, fmt.Sprintf("/v2/accounts/%s/assets/%d", accountAddress, assetID), nil) return } -// RawAccountAssetInformation gets account information about a given app. -func (client RestClient) RawAccountAssetInformation(accountAddress string, assetID uint64) (response []byte, err error) { - var blob Blob - err = client.getRaw(&blob, fmt.Sprintf("/v2/accounts/%s/assets/%d", accountAddress, assetID), rawFormat{Format: "msgpack"}) - response = blob - return -} - // AccountAssetsInformation gets account information about a particular account's assets, subject to pagination. func (client RestClient) AccountAssetsInformation(accountAddress string, next *string, limit *uint64) (response model.AccountAssetsInformationResponse, err error) { err = client.get(&response, fmt.Sprintf("/v2/accounts/%s/assets", accountAddress), pageParams{next, limit}) @@ -620,7 +612,7 @@ func (client RestClient) SendRawTransactionGroup(txgroup []transactions.SignedTx } // Block gets the block info for the given round -func (client RestClient) Block(round uint64) (response v2.BlockResponseJSON, err error) { +func (client RestClient) Block(round basics.Round) (response v2.BlockResponseJSON, err error) { // Note: this endpoint gets the Block as JSON, meaning some string fields with non-UTF-8 data will lose // information. Msgpack should be used instead if this becomes a problem. err = client.get(&response, fmt.Sprintf("/v2/blocks/%d", round), nil) @@ -628,7 +620,7 @@ func (client RestClient) Block(round uint64) (response v2.BlockResponseJSON, err } // RawBlock gets the encoded, raw msgpack block for the given round -func (client RestClient) RawBlock(round uint64) (response []byte, err error) { +func (client RestClient) RawBlock(round basics.Round) (response []byte, err error) { var blob Blob err = client.getRaw(&blob, fmt.Sprintf("/v2/blocks/%d", round), rawFormat{Format: "msgpack"}) response = blob @@ -636,7 +628,7 @@ func (client RestClient) RawBlock(round uint64) (response []byte, err error) { } // EncodedBlockCert takes a round and returns its parsed block and certificate -func (client RestClient) EncodedBlockCert(round uint64) (blockCert rpcs.EncodedBlockCert, err error) { +func (client RestClient) EncodedBlockCert(round basics.Round) (blockCert rpcs.EncodedBlockCert, err error) { resp, err := client.RawBlock(round) if err != nil { return @@ -777,19 +769,19 @@ func (client RestClient) RawSimulateRawTransaction(data []byte) (response []byte } // StateProofs gets a state proof that covers a given round -func (client RestClient) StateProofs(round uint64) (response model.StateProofResponse, err error) { +func (client RestClient) StateProofs(round basics.Round) (response model.StateProofResponse, err error) { err = client.get(&response, fmt.Sprintf("/v2/stateproofs/%d", round), nil) return } // LightBlockHeaderProof gets a Merkle proof for the light block header of a given round. -func (client RestClient) LightBlockHeaderProof(round uint64) (response model.LightBlockHeaderProofResponse, err error) { +func (client RestClient) LightBlockHeaderProof(round basics.Round) (response model.LightBlockHeaderProofResponse, err error) { err = client.get(&response, fmt.Sprintf("/v2/blocks/%d/lightheader/proof", round), nil) return } // TransactionProof gets a Merkle proof for a transaction in a block. -func (client RestClient) TransactionProof(txid string, round uint64, hashType crypto.HashType) (response model.TransactionProofResponse, err error) { +func (client RestClient) TransactionProof(txid string, round basics.Round, hashType crypto.HashType) (response model.TransactionProofResponse, err error) { txid = stripTransaction(txid) err = client.get(&response, fmt.Sprintf("/v2/blocks/%d/transactions/%s/proof", round, txid), proofParams{HashType: hashType.String()}) return @@ -822,7 +814,7 @@ func (client RestClient) RemoveParticipationKeyByID(participationID string) (err /* Endpoint registered for follower nodes */ // SetSyncRound sets the sync round for the catchup service -func (client RestClient) SetSyncRound(round uint64) (err error) { +func (client RestClient) SetSyncRound(round basics.Round) (err error) { err = client.post(nil, fmt.Sprintf("/v2/ledger/sync/%d", round), nil, nil, true) return } @@ -840,7 +832,7 @@ func (client RestClient) GetSyncRound() (response model.GetSyncRoundResponse, er } // GetLedgerStateDelta retrieves the ledger state delta for the round -func (client RestClient) GetLedgerStateDelta(round uint64) (response ledgercore.StateDelta, err error) { +func (client RestClient) GetLedgerStateDelta(round basics.Round) (response ledgercore.StateDelta, err error) { // Note: this endpoint gets the StateDelta as JSON, meaning some string fields with non-UTF-8 data will lose // information. Msgpack should be used instead if this becomes a problem. err = client.get(&response, fmt.Sprintf("/v2/deltas/%d", round), nil) @@ -878,7 +870,7 @@ func (client RestClient) GetBlockTimestampOffset() (response model.GetBlockTimeS } // BlockLogs returns all the logs in a block for a given round -func (client RestClient) BlockLogs(round uint64) (response model.BlockLogsResponse, err error) { +func (client RestClient) BlockLogs(round basics.Round) (response model.BlockLogsResponse, err error) { err = client.get(&response, fmt.Sprintf("/v2/blocks/%d/logs", round), nil) return } diff --git a/daemon/algod/api/server/v2/account.go b/daemon/algod/api/server/v2/account.go index 00d4720420..e94f09767a 100644 --- a/daemon/algod/api/server/v2/account.go +++ b/daemon/algod/api/server/v2/account.go @@ -34,7 +34,7 @@ import ( func AssetHolding(ah basics.AssetHolding, ai basics.AssetIndex) model.AssetHolding { return model.AssetHolding{ Amount: ah.Amount, - AssetID: uint64(ai), + AssetID: ai, IsFrozen: ah.Frozen, } } @@ -72,9 +72,9 @@ func AccountDataToAccount( apiParticipation = &model.AccountParticipation{ VoteParticipationKey: record.VoteID[:], SelectionParticipationKey: record.SelectionID[:], - VoteFirstValid: uint64(record.VoteFirstValid), - VoteLastValid: uint64(record.VoteLastValid), - VoteKeyDilution: uint64(record.VoteKeyDilution), + VoteFirstValid: record.VoteFirstValid, + VoteLastValid: record.VoteLastValid, + VoteKeyDilution: record.VoteKeyDilution, } if !record.StateProofID.IsEmpty() { tmp := record.StateProofID[:] @@ -111,11 +111,11 @@ func AccountDataToAccount( return model.Account{}, errors.New("overflow on pending reward calculation") } - minBalance := record.MinBalance(consensus) + minBalance := record.MinBalance(consensus.BalanceRequirements()) return model.Account{ SigType: nil, - Round: uint64(lastRound), + Round: lastRound, Address: address, Amount: amount.Raw, PendingRewards: pendingRewards.Raw, @@ -139,8 +139,8 @@ func AccountDataToAccount( TotalBoxes: omitEmpty(record.TotalBoxes), TotalBoxBytes: omitEmpty(record.TotalBoxBytes), MinBalance: minBalance.Raw, - LastProposed: omitEmpty(uint64(record.LastProposed)), - LastHeartbeat: omitEmpty(uint64(record.LastHeartbeat)), + LastProposed: omitEmpty(record.LastProposed), + LastHeartbeat: omitEmpty(record.LastHeartbeat), }, nil } @@ -207,8 +207,8 @@ func AccountToAccountData(a *model.Account) (basics.AccountData, error) { if a.Participation != nil { copy(voteID[:], a.Participation.VoteParticipationKey) copy(selID[:], a.Participation.SelectionParticipationKey) - voteFirstValid = basics.Round(a.Participation.VoteFirstValid) - voteLastValid = basics.Round(a.Participation.VoteLastValid) + voteFirstValid = a.Participation.VoteFirstValid + voteLastValid = a.Participation.VoteLastValid voteKeyDilution = a.Participation.VoteKeyDilution if a.Participation.StateProofKey != nil { copy(stateProofID[:], *a.Participation.StateProofKey) @@ -287,7 +287,7 @@ func AccountToAccountData(a *model.Account) (basics.AccountData, error) { if a.Assets != nil && len(*a.Assets) > 0 { assets = make(map[basics.AssetIndex]basics.AssetHolding, len(*a.Assets)) for _, h := range *a.Assets { - assets[basics.AssetIndex(h.AssetID)] = basics.AssetHolding{ + assets[h.AssetID] = basics.AssetHolding{ Amount: h.Amount, Frozen: h.IsFrozen, } @@ -302,7 +302,7 @@ func AccountToAccountData(a *model.Account) (basics.AccountData, error) { if err != nil { return basics.AccountData{}, err } - appLocalStates[basics.AppIndex(ls.Id)] = basics.AppLocalState{ + appLocalStates[ls.Id] = basics.AppLocalState{ Schema: basics.StateSchema{ NumUint: ls.Schema.NumUint, NumByteSlice: ls.Schema.NumByteSlice, @@ -320,7 +320,7 @@ func AccountToAccountData(a *model.Account) (basics.AccountData, error) { if err != nil { return basics.AccountData{}, err } - appParams[basics.AppIndex(params.Id)] = ap + appParams[params.Id] = ap } } @@ -338,26 +338,6 @@ func AccountToAccountData(a *model.Account) (basics.AccountData, error) { totalExtraPages = uint32(*a.AppsTotalExtraPages) } - var totalBoxes uint64 - if a.TotalBoxes != nil { - totalBoxes = *a.TotalBoxes - } - - var totalBoxBytes uint64 - if a.TotalBoxBytes != nil { - totalBoxBytes = *a.TotalBoxBytes - } - - var lastProposed uint64 - if a.LastProposed != nil { - lastProposed = *a.LastProposed - } - - var lastHeartbeat uint64 - if a.LastHeartbeat != nil { - lastHeartbeat = *a.LastHeartbeat - } - status, err := basics.UnmarshalStatus(a.Status) if err != nil { return basics.AccountData{}, err @@ -380,10 +360,10 @@ func AccountToAccountData(a *model.Account) (basics.AccountData, error) { AppParams: appParams, TotalAppSchema: totalSchema, TotalExtraAppPages: totalExtraPages, - TotalBoxes: totalBoxes, - TotalBoxBytes: totalBoxBytes, - LastProposed: basics.Round(lastProposed), - LastHeartbeat: basics.Round(lastHeartbeat), + TotalBoxes: nilToZero(a.TotalBoxes), + TotalBoxBytes: nilToZero(a.TotalBoxBytes), + LastProposed: nilToZero(a.LastProposed), + LastHeartbeat: nilToZero(a.LastHeartbeat), } if a.AuthAddr != nil { @@ -449,7 +429,7 @@ func AppParamsToApplication(creator string, appIdx basics.AppIndex, appParams *b globalState := convertTKVToGenerated(&appParams.GlobalState) extraProgramPages := uint64(appParams.ExtraProgramPages) app := model.Application{ - Id: uint64(appIdx), + Id: appIdx, Params: model.ApplicationParams{ Creator: creator, ApprovalProgram: appParams.ApprovalProgram, @@ -474,7 +454,7 @@ func AppParamsToApplication(creator string, appIdx basics.AppIndex, appParams *b func AppLocalState(state basics.AppLocalState, appIdx basics.AppIndex) model.ApplicationLocalState { localState := convertTKVToGenerated(&state.KeyValue) return model.ApplicationLocalState{ - Id: uint64(appIdx), + Id: appIdx, KeyValue: localState, Schema: model.ApplicationStateSchema{ NumByteSlice: state.Schema.NumByteSlice, @@ -508,7 +488,7 @@ func AssetParamsToAsset(creator string, idx basics.AssetIndex, params *basics.As } return model.Asset{ - Index: uint64(idx), + Index: idx, Params: assetParams, } } diff --git a/daemon/algod/api/server/v2/account_test.go b/daemon/algod/api/server/v2/account_test.go index 5db8e8c3cb..e58990ada0 100644 --- a/daemon/algod/api/server/v2/account_test.go +++ b/daemon/algod/api/server/v2/account_test.go @@ -103,7 +103,7 @@ func TestAccount(t *testing.T) { }, AssetParams: map[basics.AssetIndex]basics.AssetParams{assetIdx1: assetParams1, assetIdx2: assetParams2}, } - b := a.WithUpdatedRewards(proto, 100) + b := a.WithUpdatedRewards(proto.RewardUnit, 100) addr := basics.Address{}.String() conv, err := AccountDataToAccount(addr, &b, round, &proto, a.MicroAlgos) @@ -118,7 +118,7 @@ func TestAccount(t *testing.T) { require.Equal(t, uint64(totalAppExtraPages), *conv.AppsTotalExtraPages) verifyCreatedApp := func(index int, appIdx basics.AppIndex, params basics.AppParams) { - require.Equal(t, uint64(appIdx), (*conv.CreatedApps)[index].Id) + require.Equal(t, appIdx, (*conv.CreatedApps)[index].Id) require.Equal(t, params.ApprovalProgram, (*conv.CreatedApps)[index].Params.ApprovalProgram) if params.Version != 0 { require.NotNil(t, (*conv.CreatedApps)[index].Params.Version) @@ -164,7 +164,7 @@ func TestAccount(t *testing.T) { } verifyAppLocalState := func(index int, appIdx basics.AppIndex, numUints, numByteSlices uint64, keyValues model.TealKeyValueStore) { - require.Equal(t, uint64(appIdx), (*conv.AppsLocalState)[index].Id) + require.Equal(t, appIdx, (*conv.AppsLocalState)[index].Id) require.Equal(t, numUints, (*conv.AppsLocalState)[index].Schema.NumUint) require.Equal(t, numByteSlices, (*conv.AppsLocalState)[index].Schema.NumByteSlice) require.Equal(t, len(keyValues), len(*(*conv.AppsLocalState)[index].KeyValue)) @@ -179,7 +179,7 @@ func TestAccount(t *testing.T) { verifyAppLocalState(1, appIdx2, 10, 0, model.TealKeyValueStore{makeTKV("bytes", "value2"), makeTKV("uint", 2)}) verifyCreatedAsset := func(index int, assetIdx basics.AssetIndex, params basics.AssetParams) { - require.Equal(t, uint64(assetIdx), (*conv.CreatedAssets)[index].Index) + require.Equal(t, assetIdx, (*conv.CreatedAssets)[index].Index) require.Equal(t, params.Total, (*conv.CreatedAssets)[index].Params.Total) require.NotNil(t, (*conv.CreatedAssets)[index].Params.DefaultFrozen) require.Equal(t, params.DefaultFrozen, *(*conv.CreatedAssets)[index].Params.DefaultFrozen) diff --git a/daemon/algod/api/server/v2/dryrun.go b/daemon/algod/api/server/v2/dryrun.go index 32fb6704d5..acec949591 100644 --- a/daemon/algod/api/server/v2/dryrun.go +++ b/daemon/algod/api/server/v2/dryrun.go @@ -51,7 +51,7 @@ type DryrunRequest struct { ProtocolVersion string `codec:"protocol-version"` // Round is available to some TEAL scripts. Defaults to the current round on the network this algod is attached to. - Round uint64 `codec:"round"` + Round basics.Round `codec:"round"` // LatestTimestamp is available to some TEAL scripts. Defaults to the latest confirmed timestamp this algod is attached to. LatestTimestamp int64 `codec:"latest-timestamp"` @@ -158,8 +158,8 @@ func (ddr *dryrunDebugReceiver) updateScratch() { func (ddr *dryrunDebugReceiver) stateToState(state *logic.DebugState) model.DryrunState { st := model.DryrunState{ - Line: uint64(state.Line), - Pc: uint64(state.PC), + Line: state.Line, + Pc: state.PC, } st.Stack = make([]model.TealValue, len(state.Stack)) for i, v := range state.Stack { @@ -281,14 +281,14 @@ func (dl *dryrunLedger) lookup(rnd basics.Round, addr basics.Address) (basics.Ac } if out.AppParams == nil { out.AppParams = make(map[basics.AppIndex]basics.AppParams) - out.AppParams[basics.AppIndex(app.Id)] = params + out.AppParams[app.Id] = params } else { - ap, ok := out.AppParams[basics.AppIndex(app.Id)] + ap, ok := out.AppParams[app.Id] if ok { MergeAppParams(&ap, ¶ms) - out.AppParams[basics.AppIndex(app.Id)] = ap + out.AppParams[app.Id] = ap } else { - out.AppParams[basics.AppIndex(app.Id)] = params + out.AppParams[app.Id] = params } } } @@ -342,10 +342,10 @@ func (dl *dryrunLedger) LookupApplication(rnd basics.Round, addr basics.Address, return ledgercore.AppResource{}, err } var result ledgercore.AppResource - if p, ok := ad.AppParams[basics.AppIndex(aidx)]; ok { + if p, ok := ad.AppParams[aidx]; ok { result.AppParams = &p } - if s, ok := ad.AppLocalStates[basics.AppIndex(aidx)]; ok { + if s, ok := ad.AppLocalStates[aidx]; ok { result.AppLocalState = &s } return result, nil @@ -357,10 +357,10 @@ func (dl *dryrunLedger) LookupAsset(rnd basics.Round, addr basics.Address, aidx return ledgercore.AssetResource{}, err } var result ledgercore.AssetResource - if p, ok := ad.AssetParams[basics.AssetIndex(aidx)]; ok { + if p, ok := ad.AssetParams[aidx]; ok { result.AssetParams = &p } - if p, ok := ad.Assets[basics.AssetIndex(aidx)]; ok { + if p, ok := ad.Assets[aidx]; ok { result.AssetHolding = &p } return result, nil @@ -378,7 +378,7 @@ func (dl *dryrunLedger) GetCreatorForRound(rnd basics.Round, cidx basics.Creatab continue } for _, asset := range *acct.CreatedAssets { - if asset.Index == uint64(cidx) { + if asset.Index == basics.AssetIndex(cidx) { addr, err := basics.UnmarshalChecksumAddress(acct.Address) return addr, true, err } @@ -387,7 +387,7 @@ func (dl *dryrunLedger) GetCreatorForRound(rnd basics.Round, cidx basics.Creatab return basics.Address{}, false, fmt.Errorf("no asset %d", cidx) case basics.AppCreatable: for _, app := range dl.dr.Apps { - if app.Id == uint64(cidx) { + if app.Id == basics.AppIndex(cidx) { var addr basics.Address if app.Params.Creator != "" { var err error @@ -480,18 +480,18 @@ func doDryrunRequest(dr *DryrunRequest, response *model.DryrunResponse) { creator := stxn.Txn.Sender.String() // check and use the first entry in dr.Apps if len(dr.Apps) > 0 && dr.Apps[0].Params.Creator == creator { - appIdx = basics.AppIndex(dr.Apps[0].Id) + appIdx = dr.Apps[0].Id } } if stxn.Txn.OnCompletion == transactions.OptInOC { if idx, ok := dl.accountsIn[stxn.Txn.Sender]; ok { acct := dl.dr.Accounts[idx] ls := model.ApplicationLocalState{ - Id: uint64(appIdx), + Id: appIdx, KeyValue: new(model.TealKeyValueStore), } for _, app := range dr.Apps { - if basics.AppIndex(app.Id) == appIdx { + if app.Id == appIdx { if app.Params.LocalStateSchema != nil { ls.Schema = *app.Params.LocalStateSchema } @@ -504,7 +504,7 @@ func doDryrunRequest(dr *DryrunRequest, response *model.DryrunResponse) { } else { found := false for _, apls := range *acct.AppsLocalState { - if apls.Id == uint64(appIdx) { + if apls.Id == appIdx { // already opted in found = true } @@ -525,7 +525,7 @@ func doDryrunRequest(dr *DryrunRequest, response *model.DryrunResponse) { var app basics.AppParams ok := false for _, appt := range dr.Apps { - if appt.Id == uint64(appIdx) { + if appt.Id == appIdx { app, err = ApplicationParamsToAppParams(&appt.Params) if err != nil { response.Error = err.Error() @@ -577,8 +577,8 @@ func doDryrunRequest(dr *DryrunRequest, response *model.DryrunResponse) { // This is necessary because the fields can only be represented as unsigned // integers, so a negative cost would underflow. The two fields also provide // more information, which can be useful for testing purposes. - budgetAdded := uint64(proto.MaxAppProgramCost * numInnerTxns(delta)) - budgetConsumed := uint64(cost) + budgetAdded + budgetAdded := proto.MaxAppProgramCost * numInnerTxns(delta) + budgetConsumed := cost + budgetAdded result.BudgetAdded = &budgetAdded result.BudgetConsumed = &budgetConsumed maxCurrentBudget = pooledAppBudget diff --git a/daemon/algod/api/server/v2/dryrun_test.go b/daemon/algod/api/server/v2/dryrun_test.go index ec8aefad86..e84c02dc89 100644 --- a/daemon/algod/api/server/v2/dryrun_test.go +++ b/daemon/algod/api/server/v2/dryrun_test.go @@ -1155,7 +1155,7 @@ int 1`) }, Apps: []model.Application{ { - Id: uint64(appIdx), + Id: appIdx, Params: model.ApplicationParams{ Creator: creator.String(), ApprovalProgram: approval, @@ -1246,7 +1246,7 @@ return }, Apps: []model.Application{ { - Id: uint64(appIdx), + Id: appIdx, Params: model.ApplicationParams{ Creator: creator.String(), ApprovalProgram: approval, @@ -1255,7 +1255,7 @@ return }, }, { - Id: uint64(appIdx + 1), + Id: appIdx + 1, Params: model.ApplicationParams{ Creator: creator.String(), ApprovalProgram: approv, @@ -1310,7 +1310,7 @@ func TestDryrunCost(t *testing.T) { for _, test := range tests { t.Run(test.msg, func(t *testing.T) { expectedCosts := make([]int64, 3) - expectedBudgetAdded := make([]uint64, 3) + expectedBudgetAdded := make([]int, 3) ops, err := logic.AssembleString("#pragma version 5\nbyte 0x41\n" + strings.Repeat("keccak256\n", test.numHashes) + "pop\nint 1\n") require.NoError(t, err) @@ -1383,7 +1383,7 @@ int 1`) }, Apps: []model.Application{ { - Id: uint64(appIdx), + Id: appIdx, Params: model.ApplicationParams{ Creator: creator.String(), ApprovalProgram: app1, @@ -1392,7 +1392,7 @@ int 1`) }, }, { - Id: uint64(appIdx + 1), + Id: appIdx + 1, Params: model.ApplicationParams{ Creator: creator.String(), ApprovalProgram: app2, @@ -1401,7 +1401,7 @@ int 1`) }, }, { - Id: uint64(appIdx + 2), + Id: appIdx + 2, Params: model.ApplicationParams{ Creator: creator.String(), ApprovalProgram: app3, @@ -1483,7 +1483,7 @@ int 1` ApplicationID: appIdx, }.SignedTxn()}, Apps: []model.Application{{ - Id: uint64(appIdx), + Id: appIdx, Params: model.ApplicationParams{ Creator: sender.String(), ApprovalProgram: approval, @@ -1545,7 +1545,7 @@ int 0 }, Apps: []model.Application{ { - Id: uint64(appIdx), + Id: appIdx, Params: model.ApplicationParams{ Creator: creator.String(), ApprovalProgram: approval, @@ -1609,7 +1609,7 @@ int 1 ApplicationID: appIdx, }.SignedTxn()}, Apps: []model.Application{{ - Id: uint64(appIdx), + Id: appIdx, Params: model.ApplicationParams{ ApprovalProgram: paySender.Program, ClearStateProgram: clst, @@ -1685,7 +1685,7 @@ int 1`) Sender: sender, ApplicationID: appIdx}.SignedTxn()) apps = append(apps, model.Application{ - Id: uint64(appIdx), + Id: appIdx, Params: model.ApplicationParams{ ApprovalProgram: approvalOps.Program, ClearStateProgram: clst, diff --git a/daemon/algod/api/server/v2/generated/data/routes.go b/daemon/algod/api/server/v2/generated/data/routes.go index ed37b0bb92..9ae261b61e 100644 --- a/daemon/algod/api/server/v2/generated/data/routes.go +++ b/daemon/algod/api/server/v2/generated/data/routes.go @@ -1,6 +1,6 @@ // Package data provides primitives to interact with the openapi HTTP API. // -// Code generated by github.com/algorand/oapi-codegen DO NOT EDIT. +// Code generated by github.com/oapi-codegen/oapi-codegen/v2 version v2.4.1 DO NOT EDIT. package data import ( @@ -14,9 +14,10 @@ import ( "strings" . "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model" - "github.com/algorand/oapi-codegen/pkg/runtime" + "github.com/algorand/go-algorand/data/basics" "github.com/getkin/kin-openapi/openapi3" "github.com/labstack/echo/v4" + "github.com/oapi-codegen/runtime" ) // ServerInterface represents all server handlers. @@ -29,7 +30,7 @@ type ServerInterface interface { GetSyncRound(ctx echo.Context) error // Given a round, tells the ledger to keep that round in its cache. // (POST /v2/ledger/sync/{round}) - SetSyncRound(ctx echo.Context, round uint64) error + SetSyncRound(ctx echo.Context, round basics.Round) error } // ServerInterfaceWrapper converts echo contexts to parameters. @@ -41,9 +42,9 @@ type ServerInterfaceWrapper struct { func (w *ServerInterfaceWrapper) UnsetSyncRound(ctx echo.Context) error { var err error - ctx.Set(Api_keyScopes, []string{""}) + ctx.Set(Api_keyScopes, []string{}) - // Invoke the callback with all the unmarshalled arguments + // Invoke the callback with all the unmarshaled arguments err = w.Handler.UnsetSyncRound(ctx) return err } @@ -52,9 +53,9 @@ func (w *ServerInterfaceWrapper) UnsetSyncRound(ctx echo.Context) error { func (w *ServerInterfaceWrapper) GetSyncRound(ctx echo.Context) error { var err error - ctx.Set(Api_keyScopes, []string{""}) + ctx.Set(Api_keyScopes, []string{}) - // Invoke the callback with all the unmarshalled arguments + // Invoke the callback with all the unmarshaled arguments err = w.Handler.GetSyncRound(ctx) return err } @@ -63,16 +64,16 @@ func (w *ServerInterfaceWrapper) GetSyncRound(ctx echo.Context) error { func (w *ServerInterfaceWrapper) SetSyncRound(ctx echo.Context) error { var err error // ------------- Path parameter "round" ------------- - var round uint64 + var round basics.Round - err = runtime.BindStyledParameterWithLocation("simple", false, "round", runtime.ParamLocationPath, ctx.Param("round"), &round) + err = runtime.BindStyledParameterWithOptions("simple", "round", ctx.Param("round"), &round, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) if err != nil { return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter round: %s", err)) } - ctx.Set(Api_keyScopes, []string{""}) + ctx.Set(Api_keyScopes, []string{}) - // Invoke the callback with all the unmarshalled arguments + // Invoke the callback with all the unmarshaled arguments err = w.Handler.SetSyncRound(ctx, round) return err } @@ -114,230 +115,225 @@ func RegisterHandlersWithBaseURL(router EchoRouter, si ServerInterface, baseURL // Base64 encoded, gzipped, json marshaled Swagger object var swaggerSpec = []string{ - "H4sIAAAAAAAC/+x9f5PbtpLgV0Fpt8qxT5yxHSf74qtXexM7yZuNk7gyTvZ2bV8CkS0JbyiADwBnpPj8", - "3a/QDZAgCUrUzMROrt5f9oj40Wg0Gv0L3e9mudpUSoK0Zvb03azimm/Agsa/eJ6rWtpMFO6vAkyuRWWF", - "krOn4RszVgu5ms1nwv1acbuezWeSb6Bt4/rPZxr+UQsNxeyp1TXMZyZfw4a7ge2ucq2bkbbZSmV+iDMa", - "4vz57P2eD7woNBgzhPIHWe6YkHlZF8Cs5tLw3H0y7FrYNbNrYZjvzIRkSgJTS2bXncZsKaAszElY5D9q", - "0LtolX7y8SW9b0HMtCphCOcztVkICQEqaIBqNoRZxQpYYqM1t8zN4GANDa1iBrjO12yp9AFQCYgYXpD1", - "Zvb09cyALEDjbuUgrvC/Sw3wG2SW6xXY2dt5anFLCzqzYpNY2rnHvgZTl9YwbItrXIkrkMz1OmHf1cay", - "BTAu2Y9fP2OffvrpF24hG24tFJ7IRlfVzh6vibrPns4KbiF8HtIaL1dKc1lkTfsfv36G81/4BU5txY2B", - "9GE5c1/Y+fOxBYSOCRIS0sIK96FD/a5H4lC0Py9gqTRM3BNqfKebEs//UXcl5zZfV0pIm9gXhl8ZfU7y", - "sKj7Ph7WANBpXzlMaTfo64fZF2/fPZo/evj+X16fZf/t//zs0/cTl/+sGfcABpIN81prkPkuW2ngeFrW", - "XA7x8aOnB7NWdVmwNb/CzecbZPW+L3N9iXVe8bJ2dCJyrc7KlTKMezIqYMnr0rIwMatl6diUG81TOxOG", - "VVpdiQKKueO+12uRr1nODQ2B7di1KEtHg7WBYozW0qvbc5jexyhxcN0IH7igPy4y2nUdwARskRtkeakM", - "ZFYduJ7CjcNlweILpb2rzHGXFXu1BoaTuw902SLupKPpstwxi/taMG4YZ+FqmjOxZDtVs2vcnFJcYn+/", - "Goe1DXNIw83p3KPu8I6hb4CMBPIWSpXAJSIvnLshyuRSrGoNhl2vwa79nafBVEoaYGrxd8it2/b/uPjh", - "e6Y0+w6M4St4yfNLBjJXBRQn7HzJpLIRaXhaQhy6nmPr8HClLvm/G+VoYmNWFc8v0zd6KTYisarv+FZs", - "6g2T9WYB2m1puEKsYhpsreUYQDTiAVLc8O1w0le6ljnufzttR5Zz1CZMVfIdImzDt399OPfgGMbLklUg", - "CyFXzG7lqBzn5j4MXqZVLYsJYo51expdrKaCXCwFFKwZZQ8kfppD8Ah5HDyt8BWBEwYZBaeZ5QA4ErYJ", - "mnGn231hFV9BRDIn7CfP3PCrVZcgG0Jnix1+qjRcCVWbptMIjDj1fglcKgtZpWEpEjR24dHhGAy18Rx4", - "42WgXEnLhYTCMWcEWlkgZjUKUzThfn1neIsvuIHPn4zd8e3Xibu/VP1d37vjk3YbG2V0JBNXp/vqD2xa", - "sur0n6AfxnMbscro58FGitUrd9ssRYk30d/d/gU01AaZQAcR4W4yYiW5rTU8fSMfuL9Yxi4slwXXhftl", - "Qz99V5dWXIiV+6mkn16olcgvxGoEmQ2sSYULu23oHzdemh3bbVKveKHUZV3FC8o7iutix86fj20yjXks", - "YZ412m6seLzaBmXk2B5222zkCJCjuKu4a3gJOw0OWp4v8Z/tEumJL/Vv7p+qKl1vWy1TqHV07K9kNB94", - "s8JZVZUi5w6JP/rP7qtjAkCKBG9bnOKF+vRdBGKlVQXaChqUV1VWqpyXmbHc4kj/qmE5ezr7l9PW/nJK", - "3c1pNPkL1+sCOzmRlcSgjFfVEWO8dKKP2cMsHIPGT8gmiO2h0CQkbaIjJeFYcAlXXNqTVmXp8IPmAL/2", - "M7X4JmmH8N1TwUYRzqjhAgxJwNTwnmER6hmilSFaUSBdlWrR/PDJWVW1GMTvZ1VF+EDpEQQKZrAVxpr7", - "uHzenqR4nvPnJ+ybeGwUxZUsd+5yIFHD3Q1Lf2v5W6yxLfk1tCPeMwy3U+kTtzUBDU7MvwuKQ7VirUon", - "9RykFdf4b75tTGbu90md/xwkFuN2nLhQ0fKYIx0Hf4mUm096lDMkHG/uOWFn/b43Ixs3yh6CMectFu+a", - "ePAXYWFjDlJCBFFETX57uNZ8N/NCYobC3pBMfjJAFFLxlZAI7dypT5Jt+CXth0K8O0IA0+hFREskQTYm", - "VC9zetSfDOwsfwJqTW1skESdpFoKY1GvxsZsDSUKzlwGgo5J5UaUMWHD9yyigfla84po2X8hsUtI1Oep", - "EcF6y4t34p2YhDli99FGI1Q3ZssHWWcSEuQaPRi+LFV++Tdu1ndwwhdhrCHt4zRsDbwAzdbcrBMHp0fb", - "7WhT6Ns1RJpli2iqk2aJL9TK3MESS3UM66qqZ7ws3dRDltVbLQ486SCXJXONGWwEGsy94kgWdtK/2Fc8", - "XzuxgOW8LOetqUhVWQlXUDqlXUgJes7smtv28OPIQa/Bc2TAMTsLLFqNNzOhiU03tggNbMPxBto4baYq", - "u30aDmr4BnpSEN6IqkYrQqRonD8Pq4MrkMiTmqER/GaNaK2JBz9xc/tPOLNUtDiyANrgvmvw1/CLDtCu", - "dXufynYKpQuyWVv3m9AsV5qGoBveT+7+A1y3nYk6P6k0ZH4Iza9AG1661fUWdb8h37s6nQdOZsEtj06m", - "p8K0AkacA/uheAc6YaX5Af/DS+Y+OynGUVJLPQKFERW5Uwu6mB2qaCbXAO2tim3IlMkqnl8eBeWzdvI0", - "m5l08r4i66nfQr+IZodebUVh7mqbcLCxveqeELJdBXY0kEX2Mp1orikIeKUqRuyjBwJxChyNEKK2d36t", - "fam2KZi+VNvBlaa2cCc74caZzOy/VNvnHjKlD2Mex56CdLdAyTdg8HaTMeN0s7R+ubOF0jeTJnoXjGSt", - "t5FxN2okTM17SMKmdZX5s5nwWFCD3kBtgMd+IaA/fApjHSxcWP47YMG4Ue8CC92B7hoLalOJEu6A9NdJ", - "IW7BDXz6mF387eyzR49/efzZ544kK61Wmm/YYmfBsE+8WY4ZuyvhflI7QukiPfrnT4KPqjtuahyjap3D", - "hlfDocj3RdovNWOu3RBrXTTjqhsAJ3FEcFcboZ2RW9eB9hwW9eoCrHWa7kutlnfODQczpKDDRi8r7QQL", - "0/UTemnptHBNTmFrNT+tsCXIguIM3DqEcTrgZnEnRDW28UU7S8E8Rgs4eCiO3aZ2ml28VXqn67swb4DW", - "Siev4Eorq3JVZk7OEyphoHjpWzDfImxX1f+doGXX3DA3N3ova1mM2CHsVk6/v2joV1vZ4mbvDUbrTazO", - "zztlX7rIb7WQCnRmt5IhdXbMI0utNoyzAjuirPENWJK/xAYuLN9UPyyXd2PtVDhQwo4jNmDcTIxaOOnH", - "QK4kBfMdMNn4Uaegp4+Y4GWy4wB4jFzsZI6usrs4tuPWrI2Q6Lc3O5lHpi0HYwnFqkOWtzdhjaGDprpn", - "EuA4dLzAz2irfw6l5V8r/aoVX7/Rqq7unD3355y6HO4X470BhesbzMBCrspuAOnKwX6SWuNHWdCzxohA", - "a0DokSJfiNXaRvriS61+hzsxOUsKUPxAxqLS9RmajL5XhWMmtjZ3IEq2g7UcztFtzNf4QtWWcSZVAbj5", - "tUkLmSMhhxjrhCFaNpZb0T4hDFuAo66c1261dcUwAGlwX7QdM57TCc0QNWYk/KKJm6FWNB2Fs5UaeLFj", - "CwDJ1MLHOPjoC1wkx+gpG8Q0L+Im+EUHrkqrHIyBIvOm6IOghXZ0ddg9eELAEeBmFmYUW3J9a2Avrw7C", - "eQm7DGP9DPvk25/N/Y8Ar1WWlwcQi21S6O3b04ZQT5t+H8H1J4/Jjix1RLVOvHUMogQLYyg8Ciej+9eH", - "aLCLt0fLFWgMKfldKT5McjsCakD9nen9ttDW1UgEu1fTnYTnNkxyqYJglRqs5MZmh9iya9SxJbgVRJww", - "xYlx4BHB6wU3lsKghCzQpknXCc5DQpibYhzgUTXEjfxz0ECGY+fuHpSmNo06YuqqUtpCkVoDemRH5/oe", - "ts1cahmN3eg8VrHawKGRx7AUje+R5TVg/IPbxv/qPbrDxaFP3d3zuyQqO0C0iNgHyEVoFWE3juIdAUSY", - "FtFEOML0KKcJHZ7PjFVV5biFzWrZ9BtD0wW1PrM/tW2HxEVODrq3CwUGHSi+vYf8mjBL8dtrbpiHI7jY", - "0ZxD8VpDmN1hzIyQOWT7KB9VPNcqPgIHD2ldrTQvICug5LtEcAB9ZvR53wC44626qyxkFIib3vSWkkPc", - "456hFY5nUsIjwy8sd0fQqQItgfjeB0YuAMdOMSdPR/eaoXCu5BaF8XDZtNWJEfE2vFLW7binBwTZc/Qp", - "AI/goRn65qjAzlmre/an+C8wfoJGjjh+kh2YsSW04x+1gBFbsH/jFJ2XHnvvceAk2xxlYwf4yNiRHTFM", - "v+TailxUqOt8C7s7V/36EyQd56wAy0UJBYs+kBpYxf0ZhZD2x7yZKjjJ9jYEf2B8SywnhOl0gb+EHerc", - "L+ltQmTquAtdNjGqu5+4ZAhoiHh2InjcBLY8t+XOCWp2DTt2DRqYqRcUwjD0p1hVZfEASf/Mnhm9dzbp", - "G93rLr7AoaLlpWLNSCfYD9+rnmLQQYfXBSqlygkWsgEykhBMih1hlXK7Lvzzp/AAJlBSB0jPtNE131z/", - "90wHzbgC9l+qZjmXqHLVFhqZRmkUFFCAdDM4EayZ0wcnthiCEjZAmiR+efCgv/AHD/yeC8OWcB3eDLqG", - "fXQ8eIB2nJfK2M7hugN7qDtu54nrAx1X7uLzWkifpxyOePIjT9nJl73BG2+XO1PGeMJ1y781A+idzO2U", - "tcc0Mi3aC8ed5MvpxgcN1o37fiE2dcntXXit4IqXmboCrUUBBzm5n1go+dUVL39ouuF7SMgdjeaQ5fiK", - "b+JY8Mr1oYd/bhwhhTvAFPQ/FSA4p14X1OmAitlGqorNBgrBLZQ7VmnIgd67OcnRNEs9YRQJn6+5XKHC", - "oFW98sGtNA4y/NqQaUbXcjBEUqiyW5mhkTt1AfgwtfDk0YlTwJ1K17eQkwJzzZv5/CvXKTdztAd9j0HS", - "STafjWq8DqlXrcZLyOm+25xwGXTkvQg/7cQTXSmIOif7DPEVb4s7TG5zfx+TfTt0CsrhxFHEb/txLOjX", - "qdvl7g6EHhqIaag0GLyiYjOVoa9qGb/RDqGCO2NhM7TkU9dfRo7fj6P6opKlkJBtlIRdMi2JkPAdfkwe", - "J7wmRzqjwDLWt6+DdODvgdWdZwo13ha/uNv9E9r3WJmvlb4rlygNOFm8n+CBPOhu91Pe1E/KyzLhWvQv", - "OPsMwMybYF2hGTdG5QJltvPCzH1UMHkj/XPPLvpfNu9S7uDs9cft+dDi5ABoI4ayYpzlpUALspLG6jq3", - "byRHG1W01EQQV1DGx62Wz0KTtJk0YcX0Q72RHAP4GstVMmBjCQkzzdcAwXhp6tUKjO3pOkuAN9K3EpLV", - "Ulica+OOS0bnpQKNkVQn1HLDd2zpaMIq9htoxRa17Ur/+EDZWFGW3qHnpmFq+UZyy0rgxrLvhHy1xeGC", - "0z8cWQn2WunLBgvp230FEowwWTrY7Bv6inH9fvlrH+OP4e70OQSdthkTZm6ZnSQp/+eTf3/6+iz7b579", - "9jD74n+cvn335P39B4MfH7//61//b/enT9//9f6//2tqpwLsqeezHvLz514zPn+O6k8Uqt+H/YPZ/zdC", - "Zkkii6M5erTFPsFUEZ6A7neNY3YNb6TdSkdIV7wUheMtNyGH/g0zOIt0OnpU09mInjEsrPVIpeIWXIYl", - "mEyPNd5YihrGZ6YfqqNT0r89x/OyrCVtZZC+6R1miC9Ty3mTjIDylD1l+FJ9zUOQp//z8Wefz+btC/Pm", - "+2w+81/fJihZFNtUHoECtildMX4kcc+wiu8M2DT3QNiToXQU2xEPu4HNArRZi+rDcwpjxSLN4cKTJW9z", - "2spzSQH+7vygi3PnPSdq+eHhthqggMquU/mLOoIatmp3E6AXdlJpdQVyzsQJnPRtPoXTF31QXwl8GQJT", - "tVJTtKHmHBChBaqIsB4vZJJhJUU/vecN/vI3d64O+YFTcPXnTEX03vvmq1fs1DNMc49SWtDQURKChCrt", - "H092ApIcN4vflL2Rb+RzWKL1Qcmnb2TBLT9dcCNyc1ob0F/yksscTlaKPQ3vMZ9zy9/IgaQ1mlgxejTN", - "qnpRipxdxgpJS56ULGs4wps3r3m5Um/evB3EZgzVBz9Vkr/QBJkThFVtM5/qJ9NwzXXK92WaVC84MuXy", - "2jcrCdmqJgNpSCXkx0/zPF5Vpp/yYbj8qird8iMyND6hgdsyZqxq3qM5AcU/6XX7+73yF4Pm18GuUhsw", - "7NcNr14Lad+y7E398OGn+LKvzYHwq7/yHU3uKphsXRlNSdE3quDCSa3EWPWs4quUi+3Nm9cWeIW7j/Ly", - "Bm0cZcmwW+fVYXhggEO1C2ieOI9uAMFx9ONgXNwF9QppHdNLwE+4hd0H2Lfar+j9/I2368AbfF7bdebO", - "dnJVxpF42Jkm29vKCVkhGsOIFWqrPjHeAli+hvzSZyyDTWV38073EPDjBc3AOoShXHb0whCzKaGDYgGs", - "rgruRXEud/20NoZeVOCgP8Il7F6pNhnTMXlsumlVzNhBRUqNpEtHrPGx9WP0N99HlYWHpj47CT7eDGTx", - "tKGL0Gf8IJPIeweHOEUUnbQfY4jgOoEIIv4RFNxgoW68W5F+anlC5iCtuIIMSrESi1Qa3v8c+sMCrI4q", - "feZBH4XcDGiYWDKnyi/oYvXqveZyBe56dleqMrykrKrJoA3Uh9bAtV0At3vt/DJOSBGgQ5XyGl9eo4Vv", - "7pYAW7ffwqLFTsK10yrQUERtfPTyyXj8GQEOxQ3hCd1bTeFkVNf1qEtkHAy3coPdRq31oXkxnSFc9H0D", - "mLJUXbt9cVAon22TkrpE90tt+ApGdJfYezcxH0bH44eDHJJIkjKIWvZFjYEkkASZGmduzckzDO6LO8So", - "ZvYCMsNM5CD2PiNMou0RtihRgG0iV2nvue54USkr8BhoadYCWraiYACji5H4OK65CccR86UGLjtJOvsd", - "077sS013HsUSRklRm8Rz4Tbsc9CB3u8T1IWsdCEVXaz0T0gr53QvfL6Q2g4lUTQtoIQVLZwaB0JpEya1", - "G+Tg+GG5RN6SpcISIwN1JAD4OcBpLg8YI98ImzxCiowjsDHwAQdm36v4bMrVMUBKn/CJh7Hxioj+hvTD", - "PgrUd8KoqtzlKkb8jXngAD4VRStZ9CKqcRgm5Jw5NnfFS8fmvC7eDjLIkIYKRS8fmg+9uT+maOxxTdGV", - "f9SaSEi4yWpiaTYAnRa190C8UNuMXigndZHFduHoPfl2Ad9Lpw4m5aK7Z9hCbTGcC68WipU/AMs4HAGM", - "yPayFQbpFfuNyVkEzL5p98u5KSo0SDLe0NqQy5igN2XqEdlyjFw+idLL3QiAnhmqrdXgzRIHzQdd8WR4", - "mbe32rxNmxqehaWO/9gRSu7SCP6G9rFuQri/tYn/xpOLhRP1QTLhDS1Lt8lQSJ0ryjp4TILCPjl0gNiD", - "1Zd9OTCJ1m6sVxevEdZSrMQx36FTcog2AyWgEpx1RNPsMhUp4HR5wHv8InSLjHW4e1zu7kcBhBpWwlho", - "nUYhLuhjmOM5pk9Wajm+OlvppVvfj0o1lz+5zbFjZ5kffAUYgb8U2tgMPW7JJbhGXxs0In3tmqYl0G6I", - "IhUbEEWa4+K0l7DLClHWaXr183773E37fXPRmHqBt5iQFKC1wOIYycDlPVNTbPveBb+gBb/gd7beaafB", - "NXUTa0cu3Tn+JOeix8D2sYMEAaaIY7hroyjdwyCjB+dD7hhJo1FMy8k+b8PgMBVh7INRauHZ+9jNTyMl", - "1xKlAUy/EFSrFRQhvVnwh8koiVyp5Cqq4lRV+3LmnTBKXYeZ5/YkrfNh+DAWhB+J+5mQBWzT0MdaAULe", - "vqzDhHs4yQokpStJm4WSqIlD/LFFZKv7wL7Q/gOAZBD0q54zu41Opl1qthM3oAReeJ3EQFjf/mM53BCP", - "uvlY+HQn8+n+I4QDIk0JGxU2GaYhGGHAvKpEse05nmjUUSMYP8q6PCJtIWvxgx3AQDcIOklwnVTaPtTa", - "G9hPUec9dVoZxV77wGJH3zz3D/CLWqMHoxPZPMzb3uhqE9f+7c8XVmm+Au+FygikWw2ByzkGDVFWdMOs", - "oHCSQiyXEHtfzE08Bx3gBjb2YgLpJogs7aKphbSfP0mR0QHqaWE8jLI0xSRoYcwn/2ro5QoyfWRKaq6E", - "aGtu4KpKPtf/FnbZz7ysnZIhtGnDc73bqXv5HrHrV5tvYYcjH4x6dYAd2BW0PP0ISIMpS3/zyUQJrO+Z", - "Top/VC87W3jETp2ld+mOtsYXZRgn/vaW6RQt6C7lNgejDZJwsEzZjYt0bII7PdBFfJ+UD22CKA7LIJG8", - "H08lTChhObyKmlwUh2j3FfAyEC8uZ/Z+PrtdJEDqNvMjHsD1y+YCTeIZI03JM9wJ7DkS5byqtLriZebj", - "JcYuf62u/OWPzUN4xQfWZNKU/eqrsxcvPfjv57O8BK6zxhIwuipsV/1pVkVlHPZfJZTt2xs6yVIUbX6T", - "kTmOsbjGzN49Y9OgKEobPxMdRR9zsUwHvB/kfT7Uh5a4J+QHqibip/V5UsBPN8iHX3FRBmdjgHYkOB0X", - "N62yTpIrxAPcOlgoivm69VijjxvevHl9FfDYugkoYKbJr56IoDITDOR9JpI+hC0RH2B9uKQfMANmWrGR", - "Pj8mcjwfY8TvXEj7WunOHeMfQCZjlH4/6c3J8oTHkZDwUCazL7OdMJLvfl396g79gwfxiX7wYM5+Lf2H", - "CED8feF/RzXmwYOkkzJpLXO8CI1hkm/gfvOYY3QjPqyeL+F6mhxwdrVpBFg1ToYNhVKwUUD3tcfetRYe", - "n4X/pYAS3E8nU2wB8aYTumNgppygi7EHj00s64YqcxqmZD90G9/aOtLCO8VXfiCf7/AIyXqDftLMlCJP", - "R5DIhXHcR1LMpmvMsPGIUdiNWIuREGBZi2gs12xKatYekNEcSWSaZHbYFncL5Y93LcU/amCicMrTUoDG", - "67N3owYdBEcdyL1p85sfmNxh7fC3MbfscWsFk9M+W8teN+HzxnUVFpqqLXRkoHk844Bx7wkS9/QRbjl8", - "NLfuRnpOU5emVGgPjM77BEfmSFZcFyZbavUbpC9sdFMl8m0E/6pAa/JvIFMBgn2W0viu28Lx7eyHtnu6", - "Cj628bdWucOim+JmN7lM06f6uI28iW5t0lmhPZLHdL04kKH7AmGEteDximJusdpKCHLiks4TJZvoPGRL", - "n8r4yegpjd+eSg/z4Jltya8XPFWKxqlcDqZoezvhWFax0DlsgGlSKdDsLAoUb9oKSlhXgW5dHcPktzdU", - "n2jayYpTqychRcUa0pyiIUqjEsPU8ppLKlbu+hG/8r0NkKff9bpWGtNNmnTkWAG52CStvm/evC7yYZRQ", - "IVaC6nDXBqJCz34gRjktkYp8sewmQYhHzfmSPZxH1eb9bhTiShixKAFbPKIWC27wumy87k0XtzyQdm2w", - "+eMJzde1LDQUdm0IsUaxRsVFIa+Jf1yAvQaQ7CG2e/QF+wQjP424gvsOi14Imj199AXG7dAfD1O3rK+j", - "vo9lF8izQ0x4mo4x9JXGcEzSj5oO8l5qgN9g/HbYc5qo65SzhC39hXL4LG245CtIPwPZHICJ+uJuYtRA", - "Dy+SnA5grFY7Jmx6frDc8aeRp+WO/REYLFebjbAbHx9o1MbRU1vFmSYNw2G9s1CWKsAVPmKYbZVQkz+C", - "GsM3I0/DMBj6e3QFx2idM045RkvRBsCHsqDsPKQwxjpdTXkuwo2byy0dZUmMh1+ySgtp0cxS22X2F6cW", - "a5479ncyBm62+PxJot5VtySMPA7wD453DQb0VRr1eoTsg8zi+7JPpJLZxnGU4n6byiE6laPxwOnIz7Hw", - "0/1DT5V83SjZKLnVHXLjEae+FeHJPQPekhSb9RxFj0ev7INTZq3T5MFrt0M//fjCSxkbpVN1Cdrj7iUO", - "DVYLuMKHeelNcmPeci90OWkXbgP9xw2zCiJnJJaFs5xUBCLH6b43+U6K//m7NsE6+m/pwWPPBqh0wtrp", - "7XYfOKjxOKtb301McWn4bQRzk9GGowyxMhLkT1H8TZ+PEZbUB4n2vGNwfPQr004HRzn+wQME+sGDuReD", - "f33c/Uzs/cGDdJ7jpMnN/dpi4TYaMfZN7eGXKmEAC8URm7gln4YhYYAcu6TcB8cEF36oOesWovvwUsTd", - "PCNLB7WmT8GbN6/xS8AD/tFHxEdmlriB7WOI8cPeLcSZJJmi+R6F03P2pdpOJZzeHRSI5w+AohGUTDTP", - "4UoGhUaTUQEHw1IiGnWjLqBUTsmMaw/F9vw/D57d4ud7sF2Lsvi59bL2LhLNZb5OBiMvXMdfSEbvXMHE", - "KpPlTNZcSiiTw5Fu+0vQgRNa+t/V1Hk2Qk5s2y90S8vtLa4FvAtmACpM6NArbOkmiLHazc7VZH8oV6pg", - "OE9bO6NljsOK0alKnYln1DjsprY+PBafnPu8RktRYrRn2m+MLTPN7UieLiyrHsoYuXGwyrkhMwONDppx", - "scGL2fBNVQKezCvQfIVdlYRed8zUhiNHhTGYqdwnbIl5MRSztZZMLZfRMkBaoaHczVnFjaFBHrplwRbn", - "nj199PBh0uyF2JmwUsJiWOYP7VIenWIT+uJrOVHFgaOAPQzr+5aijtnYIeH40pX/qMHYFE/FD/RAFr2k", - "7tamspVNidUT9g0mWHJE3Mmoj+bKkKu4m7ezrkrFiznmUH711dkLRrNSH6pUT2UzV2it65J/0r0yPY9p", - "SCA1kqBn+jj7M4a4VRubNVUuUykQXYu2DqfohfagHS/Gzgl7TibUJo6FJmGYiVtvoIiKapISj8Th/mMt", - "z9dom+xIQOO8cnq918DOWs9N9MixKbKEDNvB7Uu+UsXXOVN2DfpaGMCH/3AF3ayLTQpSbxsPWRi7y9O1", - "lEQpJ0cIo01JpWPRHoAjSTYEFSQh6yH+SMsUlX0+tvztBfZKP/no1dLtef1DDr+QyZt9550LOZdKihwr", - "LqQkacwQN81NOaE4Rdq/aGb+hCYOV7KCb/Pk2GNxtKZvYIQecUOXf/TVbSpRB/1pYesru63AGs/ZoJiH", - "gtreISakAV80yxFRzCeVTgQ1Jd9bNAEUR5IRJn8asXB+7b597+3fmHvjUki0dHm0ef2MXFalEeiZlkxY", - "tlJg/Hq6j4bMa9fnBJNBFrB9e/JCrUR+IVY4BoXRuWVTaOpwqLMQqOoDQ13bZ66tT9Hf/NwJB6NJz6rK", - "Tzpebj0pSNqtHEVwKm4pBJJEyG3Gj0fbQ257I8zxPnWEBlcYtQYV3sMDwmhKdndH+crplkRR2ILRw81k", - "nl4hE2C8EDK4UNMXRJ68EnBj8LyO9DO55pZ0h0k87RXwcuSdBT6EJh/8bYfqFyhwKME1hjnGt7GtNj7C", - "OJoGrcTP5Y6FQ+GoOxImnvGyidBO1A5HqcoLURTT2qsmnmIcjnFn4WVmB10HXwk23bHox7E30VgqxEVd", - "rMBmvChSGbS+xK8Mv4a3aLCFvG5qXTWPELup0IfU5ifKlTT1Zs9cocEtp4vK8yeoofkIRbPDmNBnscN/", - "U4WexnfGx2Yf/fg3BGIXx+X/Hz5mTkm9jqYzI1bZdEzgnXJ7dLRT34zQ2/53SunhVfAf4tFvj8vFe5Ti", - "b1+5iyPODzyIT6erpUnfi7HgCr+HvEpN4skuV8KrbFDODKMecPMSW9YDPjRMAn7Fy5EH97GvhO5X8h+M", - "PbvPR7NEcOuzgFnO9rKg0cxKFCvc874MXYhj8cEUHnx3Xgu/1r0IHffdfdvx1FGMWMssRj10N3OitRt8", - "rBfN1zUYmjR5Wap88qn3w5y5TuNZQ9Vm49NpJ2LYrjaqiOk8joYCSDMtCs9NhPyj7pn8hopR8ou+To/W", - "sVkcayolNPolzOklYAAvAENTxxNFJlKPWfa1KLEe0n9c/PD9bHwjox0YbqnP4ps0Ko9tTPNYqk8eK9XB", - "Rz1uO1GyTCkR85kZMXJjrp70afBFb5Mfviaj3RSQKKXNMa1fTB18QAArlUpSP0w5Mms3IqA9ooN2Y4mX", - "xHSRoodvr8ZSrIQ6P/g9rifkw/PmvowEXAlVh7DK8Lgh2HroV5/Cq1M3aISxJZ8MfWx35Kjz9JWvf03L", - "9Ma2b3+m8AoG0urdH8CVOtj0flGqhBpLdue2CWtKp04qpdoRd6fUwEqVW/JKXzCCk8zQoaVB+aoBWT2f", - "IucP8PF+PjsvjpKEUyW7ZjRK6j59IVZrixU//ga8AP3yQEWTtooJHrFKGdFWMC7dYD6F9BqHO5n6isgR", - "sIgrsgzHCtHlV5BbLFvdRs1qgGPqs7jJgjf3n5VNxu+C5rGVL2iyr4rJsFb1AeF9kHgtSh5IdX5Pptfs", - "OGveRtDTzmtu2nRPvZwLk19+L5eQY1b1vYnu/nMNMkqiNg8GV4RlGeW9E80DRawLcLw7oQVoXx66vfBE", - "9bluDc5YHoxL2N0zrEMNycLDzevcmyQeRwyQbzvkoB/zEPlwUGEaykAshFh/n8q9La4zmjM+Stt4w7kC", - "SbqLo03luGfKID/eYC7X9ai0sSjrj+XCG9ZcHzcsPMcS98ZHvvImcXlsfmPnw8Jb1z7xOaYlbJyiIQU6", - "mPBbyEFKs5Ti0tcfQayQC/qa6yK0uJOkcnQ3iTTQy2Zm0b7MGkYvJUq54CPHvFROjMjGXop2H0M1kcT3", - "DIV8twnAEK4laA1F4+sslYHMqvCSax8c+1BBce03QoIZLZ9GwI2mzv+xrQ2AZSQ5psrnPpw9XiDTsOEO", - "Oh1l8B+fcx+yn9H3kMQjlBE8aDpu6PVwPevwJk+YARJjql8yf1seTg5yEyuykBJ0FlzK/XT+spvREfP2", - "FnVOF3R8MBpL++TcW3tYSdIAmw9X2dMRouwXl7A7JSUoFAIPOxgDTZITgR4lLO5t8p3a1U0K7tWdgPdx", - "81BWSpXZiBfzfFiDoE/xlyK/BMwh2rxdcbLfve7ZcJOwT9B51oSpXK93Ied+VYGE4v4JY2eSXguGiJVu", - "edLe5PKe3Tf/FmctaioL4q3lJ29k+tkVFuzQt+RmYZj9PMyAY3W3nIoGOZDhfivHYumusbhHtwrwyVSt", - "fBhD0pNKIqIiKFIyyQW5op/hQU8ZjjC3SZSEByMUOPMubGZKlQrSv0n+FTdUGlPxZAiQBTklDUgDhR88", - "iQAfnncgpaj/HJJmqiXT0EaH3DR7qE/ISazZjGn0/ZmbWbr8bqk0xDNi9CllCm5etGEaXvzPQljN9e4m", - "OT67qEpZT0axfDDOsgmxbBfShlkOcViW6jpDZpU1dXJSqq1rZ7qXcSja2PZzp3oBUcAmN15Q27E1L1iu", - "tIY87pF+yE1QbZSGrFQYv5kKLVlaJ3dv8PWmZKVaMVXlqgCqN5WmoLG5aik5ik0QhcslUUC0g2kAqE9E", - "xxOndHcqOYgzFLUOlmcIm//K9aGUFG1WOFp0RkEKI08RwPgscB5D1HgILxIO5TPq2xLTvHkptkg3oFNH", - "fsmsrmHOfIt+jX1/8LkGthHGECgNLV2LssSMEGIbhVQ0EUlp1I6IvecYL30lMKiumx2EpOHK3XlNypSY", - "B1zE+cyYXWtVr9ZRgvoGzqDy6torxPEoP5ka4x7xaaib4gnbKGO9pkkjtUtuY0k/yZW0WpVl1yhFIvrK", - "W9q/49uzPLcvlLpc8PzyPuq1UtlmpcU8JE7oR/22M+leasLuBZwhDZjDqb6pHcbAeqKdzCB7LG5gFD9k", - "ZY7AfHuYgx62uZ8NF9ZfV5eZptWYM8m4VRuRp8/UnyuMdjT4NcWikskIqTYrpY/BZnjY48uqiZpCFjlE", - "M0ieLC55xjwj8NEjyG7cf1EC74/LluAZzchFOWQuXorK8lFZrwcAQko5DWytqaBrLIk1XEWtKAcKxr70", - "AZ14q2CI4e1gcyPcOVAWbgXUIKy5AfATMj7MKWkkhUgv1DZ8v99mlbwR8O/3U3mHeYzFbl60pKUpejNk", - "oBrhCOkU+XsDHV9hPovF1HDHpvj2xBs+AmA8ALIDw6QwyGPBWHJRQpGlareeNzaqeaRp+zeX3dr1eC8T", - "J895HUqnurFrDT4jEon4uuv/qrgjJdU0H1qSZQFboAdbv4FWVBN1HvlfoKSSqT1jgKqyEq6gExfq0zTV", - "KGqKKwh9TdOZFQAVeiP7NrJUwGN8l/cMJ37tWRQyNwW7SUsKIZZ2ih0wkySNOluZ0TExU4+Sg+hKFDXv", - "4M8cK3J0zYDuKCdQNdARsqBHTp3mJxrhxzDAWeifEmUCJt5O40NHs6A06vYxoIMB0LUZO/UyHf8c5yBr", - "HCw4W9E4YonEW75hKn4txw2SQ5Jv1a2J+ySUjBD71RZylGq8vgOF13hGnBQ+nRFSuwQoSCtwXRLW9jVI", - "JlVUovaam0ZVaZOjhh9oYmwkpNemb+BUbsOUb7+zDAdjppclcVSR0A2d3tw8/1FO4t6DODpeikYM+He9", - "e+xfgbq92oENVF0WTLr9dLI/Fnn1t5jn4nO2qMNAZamuqeZsrIc+h+AHJeoLLiAvlovmWg7h2HOft7dv", - "6hDRQ5QN3zGl8R+ndf6j5qVY7pDPEPihGzNr7kjIO14pIsCHd7uJ94tX8wBYsLaoMBWtW0wdMxpu50aJ", - "gHYXeSgOptiGX0K8DRjsQPwzt45xmnqBlgt3Zfe2c4gFv/iQe2nDi1jTxwywuw53CDnBXe//2T5yjacK", - "iRurkuehwrAvcdblM1hFPBCXXcNm/yvoIV8LJNBUJm+JVoe0GcUNTKZHsq7U06Kx8k0dsAcVmweVq261", - "jImW316Nnj3vxyct5a53YWrUzQDouM7rIfDjsrcfBv/J5Mxjy5gC/h8F7yOFrmN4qab1B8ByJ7VOAlay", - "Vi/UNtOwNIcCTMhc7dR53SblCSZWIXMN3FDEzfkPXvFscw8L6RRhigltfJrNKAUshWyZpZBVbRN6DKYg", - "lrsIYbHRH9E64kIbkxKcMHnFyx+uQGtRjG2cOx1UEjYuMRMcHb5vwoTR3KnDAYRpdTh8eN2a0eNm7gKn", - "InYUrmkslwXXRdxcSJaDdvc+u+Y7c3OPUuMcOORT4pE0000HEnmXkLQJkHLnncK39Pc0API7dPxMcNhg", - "XHDCWUOmHatG/DNDGP4UDpsN32alWuHz4JED4ZNOo4ePVEAl0QxO8tm0dYd5jPgN9k+D9TY8I7IKZ50y", - "xf5z/wNuJaqRP0lh9558slH232tT3C0dzIBUuWqD/4lYhucx9cTeZ1WKn9kHYTM8VQm0B9Emwoh/qGsX", - "H9lFDIPw+RliI/j0condSIvUQ36yDGRoMTB7wvvBtKHsPPfhWUNT2sDUQEiZ+zQIR1rayD4f7qUR8NAU", - "YvxZ707bhMy4cY6pMbk/8UFWqSrLp8R8UkmewrsJPKRdGEfoI3ICjKy7CY8xTZGqTkKzTrWqY8tsjlbL", - "OuTtqvJ9Sv+YmWiEo3ddEGqJvAyPMBnH8CVPY0yZ99+Ydc1gDZNgnGnIa41m4mu+O1y2cCQV/MXfzj57", - "9PiXx599zlwDVogVmLacQK/sXxsXKGTf7vNhIwEHy7PpTQhpRQhxwf8YHlU1m+LPGnFb0+YKHhQ9PMa+", - "nLgAUi99h3XgbrRXOE4b2v/H2q7UIu98x1Io+P33TKuyTJdzaeSqhAMltVuRC8VpIBVoI4x1jLDrARW2", - "jYg2azQPYlLvK0oTpWQOwX7sqUDYkZCr1ELGAmqRn2HSBu81YrCtSs+ryNOzb11eTyMLHQqNGBWzAFap", - "yov2YslSEOELIh29rPWGT7SIRzGyDbOlaNkUIfrI8zTpxQX393P7bjFom+b0bhMT4kU4lDcgzTH/xHhC", - "kptwkta0/4fhH4kMK3fGNZrl/h68Iqkf7HlzfDaIe2iyi0wCbZhtI0EeCMDIa9vOO8nooViUYVyTlwD9", - "CcGB3Bc/vmsdywefhSAkocMB8OLns2275iWDB+cjp+r+rkFKtJS3Y5TQWf6hF7mB9TYXSbRF3mhiLRhi", - "S2ooFkbPrc2z5hXziFYyeOyslbLMaaZlmXgkTXYcPFMx4TiVQF/x8sNzja+FNvYM8QHFj+NPo+KXsjGS", - "CZXmZgk4X/BJc0evYu9uavkSH2b/J7g9St5zfijvhB/cZmjc4SWFVy8bbzRIdo1jUpDVo8/ZwlfRqTTk", - "wvSd+9dBOGkehoIWSx/QClt74CXqoXX+rOwtyHgZInHY95F7q/HZewjbI/qRmcrIyU1SeYr6BmSRwF+K", - "R8XFvQ9cF7esuHKzfE5RZsYj8zkNy5ZPXR6lNnGXTm1guM7Jt3UHt4mLul3b1GRkkwu3vHnz2i6m5BBL", - "F1lx3TGJ2Z1UWzmq1srvkL6McOTH8POmKObnsYTWlLR5JOl+bz9qUR4MWOmUUHg/n60ogxEWCfjFF4X6", - "sHdpgGAkjZhf+m3SxRBiEmvtTB5NFWV8mlAXwXdLJLPHV415rYXdYUHwYEATvyTzMX3T5PbwuWEaX5q/", - "+6y6BBniPdpMILUJt+s3ipd4H5GLT7pbSJUn7CtK3e8Pyl/vLf4NPv3Lk+Lhp4/+bfGXh589zOHJZ188", - "fMi/eMIfffHpI3j8l8+ePIRHy8+/WDwuHj95vHjy+Mnnn32Rf/rk0eLJ51/82z3HhxzIBGio2fF09r+z", - "s3KlsrOX59krB2yLE16Jb8HtDerKS8xbhkjN8STChoty9jT89L/CCTvJ1aYdPvw684XXZmtrK/P09PT6", - "+vok7nK6wqf/mVV1vj4N82CKu4688vK8idGnOBzc0dZ6jJvaJP9y33786uIVO3t5ftISzOzp7OHJw5NH", - "vma95JWYPZ19ij/h6Vnjvp9i4txT42tinDZvtd7PB9+qiipmuE+rJjug+2sNvMQEO+6PDVgt8vBJAy92", - "/v/mmq9WoE/w9Qb9dPX4NEgjp+985oT3+76dxpEhp+86CSaKAz1D5MOhJqfvQk3s/QN26iH7mLOow0RA", - "9zU7XWAdrKlNIV7d+FJQjTGn71AQH/391FtT0h9RIaKTdhoStYy0pCf56Y8dFL6zW7eQ/cO5NtF4Obf5", - "uq5O3+F/8NBEK6LUvad2K0/RgXz6roMI/3mAiO7vbfe4BWalDMCp5ZIKie/7fPqO/o0mgm0FWjhpFLPq", - "+F8p+9kp1pPcDX/eSe/uLCGVs+YnaYC05VBKZCfz9ulbw0fOi9D4YifzIDaHmEjkDo8fPqTpn+B/Zr7e", - "Wi+zy6k/zzO6zw8abTrJcpH39ux1Dbz0wA/syQxhePThYDiXFAfpmDFdGu/ns88+JBbOpZNveMmwJU3/", - "6QfcBNBXIgf2CjaV0lyLcsd+kk0oZ1T9OkWBl1JdywC5kzjqzYbrHUryG3UFhvnC2hFxMg1OdqJwDwwB", - "aGkYrzzu+MjrWVUvSpHP5pQa+S1KazYluAQj0nCmYEBrB++eim8Ononpu9CVh/ekrJkE54FkBjR8Iqno", - "YH/D3vddsDTVvdQGzf7JCP7JCO6QEdhay9EjGt1fmHcNKv/ENef5Gvbxg+FtGV3ws0qlEktc7GEWvmzR", - "GK+46PKKNtRw9vT1tKqe3utBBu0CjDvMJ0GZcZJ6q2vohiOFM48+12iv/QJmT1PV0N7+Ie73Z1yG89zZ", - "cXJrcl0K0A0VcDmsJPVPLvD/DRegknic9nXOLJSlic++VXj2yQPk02lK8sxN5AOd7KetMN35+TTYLVI6", - "aLflu86fXb3KrGtbqOtoFrT4k7tqqGW4j7Xp/316zYXNlkr7pJt8aUEPO1vg5akvndX7ta1WMfiCJTii", - "H+PnpMlfT7lXN1LfkNeNdRzow6mvXuUbaRSioMPn1uoWW7GQzzb2q9dvHZczoK8CC26NMk9PT/FZzFoZ", - "ezp7P3/XM9jEH982hBWKAs8qLa6weMnb+WybKS1WQvIy81aNtv7f7PHJw9n7/xcAAP//vH26bRYNAQA=", + "H4sIAAAAAAAC/+x9a5MbN5LgX0FwN0KWjmRLsuwZ62Jir2350WvZUqjbnttV62ywKkliugjUACg2aV3/", + "9wskHoWqQpFFNiXbF/NJahYeiUQikcjn+1EmVqXgwLUaPX8/KqmkK9Ag8S+a5xIU/jcHlUlWaib46Pno", + "nBOaZaLimpTVrGAZuYHtdDQeMfO1pHo5Go84XcHoeRhkPJLwz4pJyEfPtaxgPFLZElbUTqs1SNP37fnk", + "vx9Pvnj3/rO/3o3GI70tzRhKS8YXo/FoM1mIiftxRhXL1PTcjX+37ysty4Jl1CxhwvL0ouomhOXANZsz", + "kH0La463a30rxtmqWo2ePw5LYlzDAmTPmsryguew6VtU9JkqBbp3PebjgJX4MU66BjPozlU0GmRUZ8tS", + "MK4TKyH4ldjPySVE3XctYi7kiup2+4j8kPaejJ88vvu3QIpPxp99miZGWiyEpDyfhHG/CuOSS9vu7oCG", + "/msbAV8JPmeLSoIit0vQS5BEL4FIUKXgCoiY/QMyTZgi/3n56kciJPkBlKILeE2zGwI8EznkU3IxJ1xo", + "UkqxZjnkY5LDnFaFVkQL7Bno458VyG2NXQdXjEnghhbejv6hBB+NRyu1KGl2M3rXRtPd3XhUsBVLrOoH", + "ujEURXi1moEkYm4W5MGRoCvJ+wCyI8bw7CTJinH9+bM2Hda/ruimC96VrHhGNeQRgFpSrmhmWiCUOVNl", + "QbeI2hXd/O3x2AGuCC0KUgLPGV8QveGqbylm7pMthMMmgeirJRDzhZR0ARGep+QnBUhJ+FWLG+CBOshs", + "i59KCWsmKhU69awDp04sJKIDKSqeYlQEPzg09/Ao2/eUDOoNjni3+5tiC/epDfUlW1xtSyBzVpj7kvyj", + "UjoQcKVw25dAVAmZ4b05McMY5Cu24FRXEp5f80fmLzIhl5rynMrc/LKyP/1QFZpdsoX5qbA/vRQLll2y", + "Rc8OBFhT51Rht5X9x4yXPqp6k7xLXgpxU5XxgrL4LBhauXjRRxl2zH7SSDPI8yA34P64sa42Fy/6WOru", + "HnoTNrIHyF7cldQ0vIGtBAMtzeb4z2aOpEXn8reRFS9Mb13OU6g15O/YNQpU51Z+Oq+FiDfus/maCa7B", + "XoWRmHGGzPb5+1hykqIEqZkdlJblpBAZLSZKU40j/buE+ej56N/OakHvzHZXZ9HkL02vS+xkLmMJhvFN", + "aFkeMMZrIzyiqNVz0A0fskd9LiS5XbJsSfSSKcK43USUuwynKWBNuZ6ODjrJdzF3eOuAqLfCXpJ2K1oM", + "qHcviG04A4W074TeB6ohKSLGCWKcUJ6TRSFm4YdPzsuyRi5+Py9Li6oxYXMCDO9z2DCl1UPEDK0PWTzP", + "xYsp+TYe+5YVBRG82JIZuHsHcjOm5duOjzsB3CAW11CP+EAR3Gkhp2bXPBqMXHYKYkSpcikKcwXuJSPT", + "+DvXNqZA8/ugzn966ovR3k93KNE7pCI12V/qhxv5pEVUXZrCHoaaztt9j6MoM8oOWlIXNYJPTVf4C9Ow", + "UnuJJIIoIjS3PVRKuvUS1AQloS4F/aTAEk9JF4wjtGMjkHOyojd2PwTi3RACqCBpWzKz4tUt08ta5Aqo", + "n3beF39uQk7tOTEbTpmRjUnBlDbCEG6mIksoUOCkQbEQU9FRRDOAFnYsIsB8K2lpydx9sXIc44SG95eF", + "9Z43+cBLNglzrLao8Y5QHc3M9zLcJCRW4dCE4ctCZDffUbU8weGf+bG6xwKnIUugOUiypGqZOFMt2q5H", + "G0LfpiHSLJlFU03DEl+KhTrBEgtxCFcry69oUZipu9ystVoceNBBLgpiGhNYMW0ewIzjCViwNXDLeqbk", + "a5otjTBBMloU41ovIcpJAWsoiJCEcQ5yTPSS6vrw48j+oYTnSIHhgxpItBqn05iSqyVImAuJD1UJZEXx", + "clqZ51FZNPsE5qroClqyE16WotIGxujlcvHCrw7WwJEnhaER/LBGfPDHg0/N3O4TzsyFXRyVgIoWxrOi", + "ymv8BX7RANq0rq9aXk8hZI6KHqrNb0ySTEg7hL383eTmP0Bl3dlS5yelhIkbQtI1SEULs7rWoh4G8j3V", + "6dxzMnOqaXQyHRWmX3SWc2A/FApBJrQbr/A/tCDmsxFwDCXV1MNQTkGZJuwH3tkGVXYm08DwLS3IyurN", + "SEmzm4Og/KqePM1mBp28r62qzm2hW0TYoasNy9WptgkH69ur5gmxOh/Pjjpiyk6mE801BAFXoiSWfbRA", + "sJwCR7MIEZuTX2tfik0Kpi/FpnOliQ2cZCfMOIOZ/Zdi88JBJuR+zOPYQ5BuFsjpChTebg0ziJmlVlWf", + "z4Q8TpromCZqBTyhZtRImBq3kIRNq3LizmZCPW4btAYiQb20WwhoD5/CWAMLl5p+ACwoM+opsNAc6NRY", + "EKuSFXAC0l8mhbgZVfDpU3L53flnT57+8vSzzw1JllIsJF2R2VaDIp84PR9RelvAw+TDCaWL9OifP/MG", + "kea4qXGUqGQGK1p2h7KGFvswts2IadfFWhPNuOoA4CCOCOZqs2gnb2y/u/HoBcyqxSVobR7Br6WYn5wb", + "dmZIQYeNXpfSCBaqaZRy0tJZbpqcwUZLelZiS+C5Nb2ZdTBl3oCr2UmIqm/j83qWnDiM5rD3UBy6TfU0", + "23ir5FZWp9B8gJRCJq/gUgotMlFMjJzHREJ38dq1IK6F366y/buFltxSRczcaACreN6jotAbPvz+skNf", + "bXiNm503mF1vYnVu3iH70kR+/QopQU70hhOkzobmZC7FilCSY0eUNb4FbeUvtoJLTVflq/n8NDpSgQMl", + "VDxsBcrMRGwLI/0oyATP1V5tjrcGtpDpphqCsza2vC1L90Pl0HS55RmqkU5xlvu1X87UR9SWZ5EqzMBY", + "QL5o0OoHVXn1YcpC8UAlIDWYeomf0SLwAgpNvxHyqhZ3v5WiKk/OzttzDl0OdYtxNofc9PUaZcYXBTQk", + "9YWBfZpa4++yoK+C0sGuAaFHYn3JFksdvS9fS/EB7tDkLClA8YNVLhWmT1fF9KPIDfPRlTqB6FkPVnNE", + "Q7cxH6QzUWlCCRc54OZXKi2U9njtmIOaVVIC17Gci/oMpsgMDHVltDKrrUqiRep+qTtOaGZP6ARRo3rc", + "HIKrhm1lp1vSNRBaSKD5lswAOBEzs+jaywEXSRUpjezsxDonEg/ltw1gSykyUAryidNn74XXt7P3j96B", + "PFwNriLMQpQgcyo/zApu1nuBv4HtZE2Lyojn3/+sHv5RFqGFpsWeLcA2qY1oq++6S7kHTLuIuA1RTMpW", + "W2hPghGxDdMpQEMfsu+Pvd7tb4PZIYIPhMA1SPSo+aBHy0/yAYgywP+BD9YHWUJVTowY2Kt+MJKr2W9O", + "ufCy4Z4ZwgQFVXqy70oxjRp6E7PUiIunbhEcuEeefEmVRjGQMJ6j/tZehTiPlS3NFKMDncpwyt7XmJn0", + "Z/8Q606bmeudq0qFV5mqylJIDXlqeWiz7p3rR9iEucQ8Gjs8/bQglYJ9I/chMBrf4dEpAvAPqoOF2tm8", + "u4tDrwMjvmwPxXIDvhpHu2C89K0ixMdOtT0wMlXvgSU3plr0NhOiAIoqU6VFWRoOpScVD/36MHhpW5/r", + "n+q2XZK0ZiArqeQCFJqYXHsH+a1FukJb15Iq4uDw/gmo8LIucl2YzbGeKMYzmOw6L/gINq3ig3PUca/K", + "haQ5THIo6DbhbWE/E/v5QMLwYyOB1PoDoWEyQ2timkbqM+H9TY+bVeBUKiV4E/xCMnPOzTOqJjXX+/hJ", + "c8BpU3zTEeuDMAuCkaQDPx4iy9JTYkS8+9dCG7JyRIercbfSPdfSg70w6wdBII47qRUB7dn/C5SbOwhg", + "J51/C6pv4fXUp1p2j/of7/bGhdm6ylq3TfKK6OXLexhjHw/qsUW8plKzjJX4XP0etid/vbcnSPpKkBw0", + "ZQXkJPpgX/Jl3J9YN+T2mMe95gepW7vgd/StieV4z6wm8DewRbXJaxvREGmrTqGOSIxqLlzKCQLqvebN", + "iyduAhua6WJrBFu9hC25BQlEVTPrtdI1oWlRTuIB0jFT/TM6g3zSHL7TQ+ASh4qWl/I8tK+t3fBdtZ5c", + "DXS4V1YpRJHQf7ZPfAcZSQgGuQuRUphdZ7QotkSHsBlPSQ0g3QWB3hhBnnmgGmjGFZD/EhXJKMcXbqUh", + "CGlCouSDwrKZwYibYU7nqlpjCApYgX3N45dHj9oLf/TI7TlTZA631uWGY8M2Oh49QlXca6F043CdQNtt", + "jttF4tJBW6W5ZN2rrc1T9ju5uZGH7OTr1uDBwGnOlFKOcM3y780AWidzM2TtMY0Mc/DDcQeZ75ouYZ11", + "475fslVVUH0KQyWsaTERa5CS5bCXk7uJmeBfr2nxKnS7G49gA5mh0QwmGUYJDhwLrkwfG1hoxmGcmQNs", + "A0eGAgQXttel7bTnpV37LbPVCnJGNRRbUkrIwEbJGSlVhaVOiQ2ZyJaUL/AFJEW1cK7Odhxk+JWymjBZ", + "8c4Qh4piesMnaMJQyTA1NFv6aEsjhAE1L9u2/cM+1m5pAMVeRoMu7Wh72vagpMl0POp9+Bt8r+uHv8Vb", + "M2T0WGNiQz6MkFZDM9B6hvg0slIXifE2msNniOHDWGnqoVNQdieOnMLrj31+4ZdVWRbbEwhJdiAioZSg", + "8EqL1YDKfhVz8gPLpDgvFiLceWqrNKy6xhvb9Zee4/rmmBew4AXjMFkJDokn/Sv8+gN+HKx2tNdwz4go", + "EB00YPvh00BCawHNyYeQ9H03CUmmffbblk71jZCnsrLbAQe/KQZYrve6dbgpj7Wv06JImKSt+qHDRdQ4", + "OIUzSahSImMoKF7kauy8z60V27q1t9D/OoRGneAAt8dt2V6jMCyryIeiJJRkBUM1v+BKyyrT15yipi9a", + "asJZ0CsH+tXCX/kmaT10Qk3shrrmFB1Fg/4v6Rg0h4Qe6hsArx1W1WIBSrceWHOAa+5aMU4qzjTOtTLH", + "ZWLPSwkSPfamtuWKbsnc0IQW5DeQgswq3XxyrCqlidKsKJwh2ExDxPyaU00KoEqTHxi/2uBw3o/EH1kO", + "+lbIm4CF6XDGtQAOiqlJ2tPxW/sVg0ocTpYuwARjLexn7/Fc54YYmbU3klb8n0/+4/nb88l/08lvjydf", + "/I+zd++f3T181Pnx6d3f/vZ/mz99eve3h//x76nt87CngsEd5Bcv3Bv94gU+xKI4kTbsfwSDzIrxSZIo", + "Y4eiFi2STzBfhiO4h029n17CNdcbbghvTQuWG150MvJpX1OdA22PWIvKGhvXUuN5BBz4HLoHqyIJTtXi", + "rx9EnmtPsNPhJt7yVoyB44zq5AC6gVNwtedMudU++PbrK3LmCEE9QGJxQ0epBRIvGBfB2PDyMbsUB3Zd", + "82v+Aub4HhT8+TXPqaZn9jSdVQrkl7SgPIPpQpDnPijyBdX0mneuod4EUlFQc5RBKsUp6Cq9luvrt7RY", + "iOvrdx0/hK5s5aaKuag7Z101mZ9yYuQGUemJS+IykXBLZcoW4lN8uGho7L0TDiuTiMoqsXySGDf+dCiU", + "ZanayR66KCrLwqAoIlXl8hWYbSVKixA4Zpi5i701NPCjcE4lkt76J2+lQJFfV7R8y7h+RybX1ePHn2II", + "Xp3i4FfHAw3dbksY/PDtTUbRfu/iwq1cjk7lk5IuUjaT6+u3GmiJFIICxwpfmkVBsFsjPNBHAuBQ9QJC", + "LPIBW2IhOziuF5d7aXv5tF7pReEn3NRm7PS9djCKij96A/dE1tNKLyeGIyRXpcwx8HvlEwzQhblyvAeB", + "Ygt8AKilqMySgWRLyG5cZitYlXo7bnT3ji7uLvYMhynUGbngwDkz+MsoNwNWZU6dIEP5tp3iRtlgCBz0", + "DdzA9krY7tOB2cGibHRRihXVd3SRdqO71pBvfJDdGO3Nd35XPkbUpSPBuEtPFs8DXfg+/UfbCgAnONYp", + "omjk+ehDBJUJRFji70HBEQs1492L9FPLYzwDrtkaJlCwBZsVCTb9965dw8NqqFJCBmzto3rDgIqwOTGv", + "o5m9jt2LSVK+AHOpm4tYKFqg0/40aehH6XAJVOoZUL1TX8vjNBMeOhTIbzFoGpUmY7ME2Jj9ZhqVIBxu", + "zQMP3962jXMknh7lTmXXBPmRoPrudZD09JhHhEN4Ip+dv+/DnoT3gvNPi6kTQbbfVwaHCyluzW4aAIVP", + "3YgJXqJ7qlJ0AUOvo4apaGBKjIYFCAfZJ/0k5R0xb4s1HRlj4CJs94nBS5I7gPli2AOaAVoujn5ua0J0", + "VoVXvNh6pM4KFKiDg6glHSobdja+OAzYNBsDyWth1QPWxFp89JdU+aOfjyOOfqS0+PukktmVP+8i8r6j", + "upsdz1/TbdY+tvqcGRDBTQ+fRc+nzvP58kbjg3LfjUcuxCG1d4KjFJ1DAQuLE9vY01mdn6neTQPHq/kc", + "md4k5cgXKSMjycTNAeYh9ogQqzEng0dInYIIbLSs48DkRxEfdr44BEju8ktRPzbeXdHfkA4WtN74RkoW", + "pbn1WY/VKvMsxaW3qEWeloszDkMYHxPDSde0MJzUBZ7Wg3RyteHbp5WZzfl2POx7Ew08aG6NKJ0ctEor", + "zxyzvljw9stIvwoOWsNMbCY2Mjr5tJptZuZMJOMVME47dXht5rwHiszEBn2K8IazDu4HQ9cPmQcscgPZ", + "MIVUjv36xEYL3mGA7BbkU9SskPScXi2QXZ8kexwwPeJ0H9l9EqXQOxFILQVmnQbcaXT26lma0lZXEqmv", + "23HIDhvC1FKspu9wJneyB6Nd5Wkz1913dbrD/uRo/qx+lCR/XaXcffIy2s6lzbV4SFrGNjk0gNiB1ddt", + "ITaJ1qbjUhOvEdZSLMkw+q6xq4s2BQWgJmDSkKsnNymz9PX1WwUoM1z6bpGeE3eP8u3DyBtOwoIpDbVx", + "wTu5fHzbD6oTzWNLzPtXp0s5N+t7I0QQNKw5Fjs2lvnRV4Cu63MmlZ6gZSa5BNPoG4WatG9M07Qg3PS3", + "Y8qaeg6WgxGiG9hOclZUaVJ2IH3/wkD0Y7i5VDXDi5Jx6200w1T4SQfdA2yTCI917N6JoJcWQS/px8DP", + "sINlmhqYpKG85vR/kiPW4oW7OEuCllPE1N3QXpTu4LVRLH2X0UZCdOR2Md1l8+mcy9yPvdcby0f09wkR", + "dqTkWqKMiOkAQrFYQO4zvbmgUJv1yuXTKwRf1LkEze870gdOic3ih0n4duTvc+7p0Oec3ignglUxktDH", + "jxmEvI6uw9yDOMkCuM3cMjq83kiRRFzsGI8tIs3ox+XtHbf5pOvwVctduPbptXsYNhu3pwCau2eVAr++", + "3Ye2u10OdeM+p+NGitjdBwwHRIpjWkUCTIdoejg3LUuWb1qGPzvq9AiSGCjudTPBt3CGbMkNtgc/Tcfi", + "PbV6HpjbEds7Y8cZPvPPzCPT+jM7j1xzNmjmsg3klURrUsNbuJtPPzw0B679+58vtZB0Ac4iOLEg3WsI", + "XM4haIhS0iuimXWQztl8DrElTB1jxWkA17F35AMIu4cEu+ay8LbcSZ9dIttDW/UK9iM0TU8JSunzubjq", + "2iP9wyPSrYXLJtq4I4yKyYQC38N28jMtKvMSYlLVvqnOQNi81g+gifXqe9jiyHtdPg1ge3YFVXFvACk0", + "ZV0Jn1SUJfyBalRfwDdwYwsP2Knz9C6daGtcKY3+o1HfUI16Es2lfLhjU7vIGEiH7NVl2uvEnC1obkub", + "0PdtEcv3yz7REySeiqH3xjGXXMi0sde7DGjhCR8XO7obj+7n75G6J92Ie3bidbiak7uA3pjW/t9w+jpw", + "Q2hZSrGmxcT5yfQJHVKsndCBzb1bzUd+X6VPxdXX5y9fO/DvxqOsAConQdXRuypsV/5pVmVLcOy+hmw6", + "dqfbtaqwaPNDyuzYk+YWU6+3tGmdWje131R0UJ1nzTztKb6XbzoXL7vEHa5eUAZPr9oibR29ms5ddE1Z", + "4Q2/HtqhWna73GHVlZJ8Ih7g3k5ikfffvcfqjRO4vn679pit7SnWUSqkxE/40qkjPZ07vCZ9Vmta38Mh", + "cZ2vMJNp+t3FXZ5TZIzO4YyeXA78RsjGReWiGpMOax9OQDSPCYvHtFH+ylnhO2LhlFgR8tfFr4Y3PHoU", + "H/xHj8bk18J9iADE32fud3xHPXqUNAwnVX2GZaEmj9MVPAxxEb0b8XHVEBxuh4kL5+tVkJFFPxkGCrWe", + "Zx7dtw57t5I5fObulxwKMD9Nh6gq4k236I6BGXKCLvuiEoPz88qW81RE8HYMPkbJGtLCq8dV8LB29u4R", + "4tUK7c4TVbAs7fTDZ8qwJG5dek1jgo0H25DNHBXr8SvnFYtGN83UUSbP1kKiWZMIV8lMwDV+Z8KxgIqz", + "f1YQlfXFm7h1OfunEI7aEbDT+kU3cLtq8OiYgr/3NxF6rdouhdFOk+uLYAb0iEjVmTow3iGescP8d8Qq", + "OIry1ycGti2d6/Beytr5zttdBNqZgT37dBbX/geSK4dpN/PFkJ1majKX4jdIyw5oJEyk7vDWbYYK+N+A", + "p3xU24wseA7UBavr2fcRyHDdQh+p3FuX4BcdquYdc4Wn+cRhG32g0iDa7361gUqnF3eb0PdQjR1PmoE0", + "PcwMD2zkFo61fLy7G+X2hNq8Fo3Is/Q5jwNFz+z49Tl3MHeCawt6O6OpQkfmvWhgira/4ZinBfGd/Qap", + "kJrBzk6iWIbQltlkfyXI2nrUTZV85NvPTjv41Vc/8pDi4ufd2PqqFEokhqn4LeXoR4j9LAd0vRVYPwzT", + "61ZITPCp0j6EOWRslVSGX1+/zbOu51fOFsyWFK8UEDrXLs+jG8gWlbdU5Kp5h1wkDjUXc/J4XJ9Zvxs5", + "WzPFZgVgiye2xYwqvKCDT0ToYpYHXC8VNn86oPmy4rmEXC+VRawSJLzPUfQMnrAz0LcAnDzGdk++IJ+g", + "w7Bia3iYvmCcsDZ6/uSL8a7K2YhxLBK/i8nnyOV9IEOastGr2o5h2KobNR2ZMJcAv0H/fbLjfNmuQ04X", + "tnRX0P7TtaKcGoSkYFrtgcn2xf1FV44WXri1zoDSUmwJ0+n5QVPDsXqiyQ1DtGCQTKxWTK+cp6gSK0Nh", + "dRlyO6kfDuvr+TJoHi7/EV2wy8Qb/3d4btFVT4QjetX/iPb2GK1jQm3G1oLV8Re+Qi258JmpsS5cKAdn", + "cWPmMktHeRXDMeaklIxr1BpVej75q3m+S5oZhjjtA3cy+/xZor5aswQRPwzwj453CQrkOo162UP2Xspx", + "fcknXPDJynCU/GGd0iE6lb2+4mn/3j63456h7y1dm3EnvQRYNQiQRtz8XqTIdwx4T+IM6zmIQg9e2Uen", + "1UqmCYZWZod+evPSSSIrIVOVLmoG4KQSCVoyWGN8aXqTzJj33AtZDNqF+0D/+3q3ebE0Et386U4+FiKr", + "cuKdFtIqGUn/5x/q/Pho3LZxuy3tpZAJPa3TOH5kt9TD9IVtG7p1B8RvPZgbjDYcpYuVnnAPG88R+vwe", + "/l5tkOyeN1SlT34l0rzjUdZ/9AiBfvRo7ETlX582P1v2/ujRcJfZtL7Q/JpAzXF3TTt7pemb2uovRUJ7", + "56t4Br8xl6okoWFN3mXmSp25McakWSrx48sdp4lXPNgNOX2APGrwcxs3vzN/xc2sI2D6+UOzemySfPLw", + "PYqhoORLsRlKRK1ry9PTHwBFPSgZqBXElXSq4yY9Jfa6+URka0adQSHMSzUugDXYa+VPtAsGNeMde1Gx", + "Iv+5tkK3biZJebZMOpXPTMdf7DMgahBpMLIl5RyKZG/7Wv7Fv6oT7/5/iJ5hV4ynP7ULMVvYW5DWYDWB", + "8FP68Q2umC7MBDGKmgm5QoqTYiFygvPUlUtq1titaJ6qJJuI8cdhV5V2XsmYPMEVFJmzAt1o0/ZwbDmR", + "VPdwVSz770tcmXGwCr+yagk7OkhC2QqvbUVXZQF4CNcg6QK7Cg6t7pixDUeOypIQVZpP2BKTvwiiK8mJ", + "mM+jZQDXTEKxHZOSKmUHeWyWBRuce/T8yePHj4cZGRFfA9Zu8eoX/qpe3JMzbGK/uMpftmDCQeAfA/1d", + "TXWHbH6XuFz51X9WoHSKxeIHG5CNFmJzr9vSq6FM8JR8i/nJDKE3SgSgUtRnWG7mBK3KQtB8jEmhr74+", + "f0nsrLaPBEQdln5doAaweUSSRp7hOVJ9/rWe3FXDx9mdOsesWulJKMqayqRoWtS1ZFnL+wl1gzF2puSF", + "VcsGxx47CcHU4nIFeVQD1qoBkDjMf7Sm2RL1ndPRTpVyTzWg4SWMPQeszUVR3GsomIUc3CzDVTG2RYzH", + "ROglyFumAPNOwBqaCRtDtlOnkPcJHJurlRXnlnCmB0ivoTzWobvggbOir/evSELW2od72/7qTB5Y5PzQ", + "Ys+X2Csdt9OqHN3ye7AlMza+6MaU/OCMHRnlgrMMi02kRHBMxTjMrDqgLkfa3qlG7iwnjmGyXnUIUHdY", + "7K1g7VmmQ1zXqSH6avbbEo79U8PGFQFcgFaOB0I+9uXjnYGOcQWuAJqhr5ijCplw/UqGxQQXkhO6pI9H", + "mE2tR9f6jfn2o9PNY86YG8ZR5+aQ6l6C1sBWKIZ2dk6YJgsByq22GRem3po+06sNRxDeTV+KBcsu2QLH", + "sK6IBinWC7g71Ln3CXY+uKbtV6atq10Qfm641NlJ/brfJVmICvufqrnei/6U75d3pImQG8aPR9tBjDtd", + "/fFeNmQIa/T8gxLv8w7ZhPL1zVG+Nk9WS2/YgtjI3WTaYMYTYLxk3Bt803mwsuRdghuDp7mnn8ok1fbR", + "MYjjXQEtesJhMKjeegzcd6h2JQaDElyjn6N/G+vK+z1sJTSoXxeUb4k/FIa6I6HkK1oEZ/hEHX2Uzpww", + "Zp2FW5X1U2zFsPWJD81toGtvIGjojtVQDr2n+rKNzqp8AXpC8zyVd+5L/Erwqw8ohA1kVSgCFuJMm+na", + "u9TmJsoEV9Vqx1y+wT2ny5miSsFqViRcb1+Ej5CHHcZEVLMt/puqgNW/M87p/eDob+/hnh9Wo6AbzZ6S", + "ng1NTxRbTIZjAu+U+6Ojnvo4Qq/7n5TSfeD3HyKuu8Xl4j1K8bevzcURp+nu+PjbqyVk0UZ/eoHffT6w", + "kMm1yZXwKuvUeUOPDNy8xJa1gPcNk4CvadGTcSG22tj71Voy+vIuZL1pRah22es0JTVPGKLC6M//ZT2w", + "W5ahrnmzz8faulh/SOOJw8dOpPdbGr9v2BWt11vNUHrticeZ/GoiONTm50oxdPWltChENpgzuGHOTaf+", + "VL1itXKZ7xNeeeuVyOOzEHtzAaQZm3VYToRW4MM2+Q2fVskv8jY9WkM/EohmaNYyRKNbwtgGZnrwPDB2", + "6niiSGXrMEu+YQUWh/rPy1c/jvo3MtqB7pa61NlJFXbfxoRItTZ5LEQDHzt4gOBFWv+telTqmBsqfRpc", + "deLkh2+sgnAISDZP0iGtXw4dvEMAC2GrQqXqZnSz04zq7fDIj6ih3l7LUWLqSFFFu9pS4u1jlZ51ExIK", + "kQ4qTNqQkYYUd0rVEXIvBa+BtReNy0dniyt16jJ1GOiLIcJhBx9349FFfpD4lKpFNbKjpBjsS7ZY6i8L", + "kd18BzQHaeuJpJ6TtprICswzVC1Zie+fUihW1wMuzGAukfcSh5sODc25WoLLCuOTBHTG8g7Ua8g01oeu", + "3UAlwHA/hzK9RAOBNyhik9/BFUQC5FDq5U5hyTp3l3pZlw0FF3nGFJmBM12sgY8Jm8K0HayW10mhSAF0", + "7pWwUgg9oK5uCFtCNMZAp+irU6N5txjYyfkWpTS0pXSnw4uwnIeYABtoeUtVnTmqlUZhcLj2fA4ZJrzf", + "mX7v70vgUT62sVfdISzzKBsfC+GCWLLhpBrtGtZdifB2ghrVpPqQkPYlxLiB7QNFGjSUrAgcImyPyQCP", + "yLF2XF9UoM+04RwjmQr0hAjyfvAuAX9dY+mYIgBRdsojwfA0bq6nOmPlcdB4ieYIMEzX6b2K9tfp8FAw", + "7cvu162u3v9SfoHF7JVzKqUh3XysTyIX3XLMty5dPSZaDNZCn7gelP/NJ2i1sxTsxlWoQYRZ2+wtlblv", + "cZI0efbeZGmg52FmVgdGdb18DvXLsRGKWSGMADTpCwxtRioFF94Hyvpa10nLEOo5SAl5sAkWQsFECx9m", + "dUDyTxc+uQN71sv8KLy1PPoPCBm2K+qtofCmLiSB5SAp1kygzvk8xgqRsKIGehkVd0irQfft0Ff2u88p", + "4sv77Vav9uE9nIv9FbJ96B1THczHp2tOnHBwMPdqJCI5QjPLOAc58UbcdmkH3kyTiXmV8yqzokp8NoP2", + "enDasR3cLKnUzLqrbD2hoqwcN7A9s2ofX3Xc73gMtJUhLehRQukWUZxUV61ScC9OAt7vm76zFKKY9FgG", + "L7r1KNqH4YZlN4CJWUNkipGCHzSPjZmEfIIGqeAzcrvc+moLZQkc8odTQs65jQ707iPNCqStyfkDvWv+", + "Dc6aV7bCjNNAT695OswKK73Ie3I/P8wOntfHmxQYfnnP+e0gR8yuN7zPR+4WS8I06wRPh6o3uv4dLREq", + "Ij8LRUqAurSG4K+QJSTeUQSzs0RphNA/gBJnQCaqECkv/GMyyJih0piKJ0OANPABz9UaCjd4EgHOyW5P", + "Vlb32ecdFXMiofbNODYBq8tpapm46lONtGcOszQ541xIiGdEP1ObqDlEtmGeY/zPjGlJ5faYNKlNVKXU", + "UL1Y3ustGRwl64XUzpJdHBaFuJ0gW5uE6kopdYBpp5rXtq9TWvczR30GkdslVU5E3JIlzUkmpIQs7pEO", + "8bZQrYSESSHQCzPl2DHX5pGwwrhOTgqxIKLMRA62EFqagvrmqjinKHtB5MqWRIGlHUwZYPtEdDxwSnP7", + "WvPsBOW1vYU2/OZfmT42fUWd/s4uemJdBHriC0C5dHcOQ7ZxF14kHJuRqa2UTYvIc7ZBugGZOvJzomUF", + "Y+JatKvwu4NPJZAVU8qCEmjplhUFZo9gm8ihIfgDpVHbIztfoB/0mqHDWzOTiBWpS3M7hvQrMQ+4jDOy", + "Eb2Uoloso/oAAU7/dJeVe9jHo/ykKvRJxBBRM8UzshJKu2exHalecu0C+kkmuJaiKJqKPCvnL5zR9we6", + "Oc8y/VKImxnNbh7iI5wLHVaaj31Khbbvbj2TbOVgHPZS0Bs+QfJQ+9Os23bo1eroeTDvbHG/juFhnyY/", + "AvPdfua6365x3l1Ye11NPpt+C51zQrVYsSx93P5c3q+9Pqsp7pXMtGirENssNNgM+UB8jwV3JuSeXTQD", + "p8kyqufE8Qjn1oGcyPwXxfj2uGQOjgf13KFdvuMErEnWKwa2AEBIbSIEXUlbujgW0gLDEQubOAWdUtqA", + "Drxw0PfvfrCZEU4OlIZ7AdXxRg4AfmI1GGObEdN6Ns/Exn9/WKfMPAr4u91U3mAefU6VlzVpSetW6RNZ", + "9XCEdAGCnR6IV5gEYzbUDzGUoh94+UcA9HsmNmAY5J94KBhzygrIJ6kqxRdBBzaOnusuxjIa3ddztJw8", + "o5WvBGzGriS4xEpW+pdNc2JJDSmJ0LyrEec5bMDGaP0GUtg6vuPInAWFLfPb0iiIclLAGhoOmy7bU4VS", + "KFuD76tCZ5IDlGjxbSvaUp6IcZXAlvbFrX0S+bINwW5SHWMRa3eK7NG1JDVDGz6xx0QNPUoGojXLK9rA", + "nzpU5GjqEs1RTqCq83yY+Cfm0Gl+siO88QOc+/4pUcZj4t0wPnQwC0qjbhcD2uuZXKm+U8/TjslxKrNg", + "KMLZ8mDXtiRe8w1V0lver9Xsknz9Ehu4T0zwCLFfbyBDqcY9hSB3j6Eey4nLgYTUzgFy+2AwXRLa/CVw", + "wkVU8/iWqvCKqbO6+h/sxNiIcffQPsJGX/sP339nCQ5GVCvZYrpEaSDr++n4f5eTuPMg9o6XohEFLpR3", + "h2rMU7d7dmADURU54WY/jeyPNYLdLea4+JjMKj9QUYhbW8Q4fqK+AG/PtdTnTUxOLGfhWvZ+0mOXcLit", + "BWFRhMiKbomQ+I95kP6zogWbb5HPWPB9N6KW1JCQMyBbLwrnd20m3i1ejT1gXhEj/FR23WzomNFwWzNK", + "BLS5yH3ZNkFW9AbibUAHEcs/M20Yp6pmqNQwV3ZrO7tYcIv36ZlWNI+VAJhodtvgDj7huen9P+uw1Xgq", + "n/+xLGjmS1a74nNNPoNV7T1x6SWsdoc5d/maJ4FQKb8mWunTZORHaFMPZF2pmJ++4lgNsDslwDt1we61", + "jIFK4VaNox0B4oOWcupdOE0MZ2dJcanffYuLKx9/nN1JZojuW8YQ8P9Au9Jwr+hEtqUrqMfrscXSP8Iu", + "NBLxJGC1avCZ2EwkzNU+RxqrB5+JTQ2wCrpbxjMJVFm/o4tX7tlaJ0Bm3DyjrdduMKuGUXKYM16zWsbL", + "SideQZgHmW8jhMXWBERrj22uT8YwouiaFq/WICXL+zbOnB5bGjgu0uMtKK5vQgESbuTuAEzVL0CMp671", + "83Ezc/3bAoPWd1ZpynMq87g54yQDaaQGcku36nhTVbA67DNW0UgWamYLicxWSNoWkGLrrM33NCQFAOkJ", + "LUoDLEHopJ2wAlnFkBY9hp8uDH8KS9CKbiaFWGDUb8+BcHmu0XRoH5CCoxLdSnfD1u3nUew32D0NliJx", + "jEgLnHXIFLvP/SvcSnyE/sSZ3nnyrYazHYZtPZ3twfRI5Ys6PMMSS/c8piLnXWKmOHrei6o+TYmnPYg2", + "MekS3dGq9+wi+le4tAuxCn14scqmC0cqPt/qFSaob1A7AjBA1XEFNHMeYl1FXEdRYZEydtkNDtTTWe2+", + "v5d6wENFinJnvTltcNAx4xxS4XN3PoNJKcpJNsS31VYryp2RwUHahLGHPiITQs+6g9+NCvW7GjnRGoW8", + "Di1y2ltIbJ+trMx2qQz6lEw9HL1pwBBz5GV4hK1qDWOtgipm7B/n3tjdVKIFJkEokZBVEpXMt3S7v/Bj", + "T/b5y+/OP3vy9Jenn31OTAOSswWouqZBq3Bi7ZrIeFtr9HGdETvL0+lN8NlCLOK89dKHvYVNcWfNcltV", + "JyPulI08RDuduABSwbndEnlH7RWOU4dF/LG2K7XIk+9YCgUffs+kKIp0TZkgVyXML6ndigww5gVSglRM", + "acMIm/ZTpmunbLVE5SJmDV/b3FCCZ+C1z44KmO7x5UotpM+nF/kZ5mJwNicCm7JwvMraiXaty73TrH4P", + "hUZ0t5kBKUXpRHs2JymIMGZLVhD06k5tivr0yE03MFvrsJsiROf8nia9c+5ewmJOdnP7Ziluneb0ZhMT", + "4oU/lEeQZp91oz/PyDGcpDYM/GH4RyJxysm4Rljuh+AVyffBjqjw847XREgaMgi0boKMBHkgAD3x0I2g", + "1SjILspNLq2NAa0R3vzcFj9+qM3SeyNTEBLfYQ94cSxz3S4EUzhwfufE3j8EpERLeddHCY3l7wuP9qw3", + "XCTRFjmlidagLFsSXbEwCohXX4U4855XSSccXQqhiXmZFkUijN3qcfBMxYRjngRyTYuPzzW+YVLpc8QH", + "5G/6A7fisOUYyRaV6uQJOV/SQWBFIcofBSr+GmPr/w5mZ5O3o5vFGf47dyCqhGhhvb3nwQIOnNzimNax", + "68nnZObK/ZQSMqbaDgW3XqQJ8bYg2dz518JGt2N/710m6Geh73Ec5t4fiPwYGdmC54CDuT7qvzNz6uEA", + "ydOSItUOoSTwl+J1cVH1PdfOPUvDHJfKKUrceGAqp265+KHLw3Xg5VUp6K5z8K3fwG3iwq/XNjRX2eAK", + "M9fXb/VsSEKxdDUY0x1znJ2kLMz9i8J8lARnFpVuDAdJkrBqkXtf9pqWv2SUp6G5i0bc7ykgv7ToN6Ph", + "o2BecTteKICKseKerYv5OHgxCG66PSfX/BFRS+rfFu7Pp599PhqPgFcrs/j6+2g8cl/fpV5q+SYZV1on", + "0un4iLpqAg8UKel2SDD73tQ5SfzWmYI+vkijNJul33TfmT3Dh6sLQLjgyOqRvdgb1OXP+VcCoJ3E0Dqs", + "4cRYkqzTA4Wt2Jcp6Oe+tPg29XtPtY8W961YsddJrlGI5W48WtgkZVid5BdXq+7jbruHoCdfoFv6fdKA", + "WcQk1tqYPJoqSuo2oCCL65aokIGR11klmd5eGvx7tTv75SaVDOrbkJ7J5fwKFngn+2pxA9z7mNXJnCrl", + "petvBS1Q+rSOAdzInKKYkq9thRB3Lf7twewv8Olfn+WPP33yl9lfH3/2OINnn33x+DH94hl98sWnT+Dp", + "Xz979hiezD//YvY0f/rs6ezZ02eff/ZF9umzJ7Nnn3/xlweG0g3IFlBf+ef56H9PzouFmJy/vphcGWBr", + "nNCSfQ9mb1DDNscEhYjUDK9YWFFWjJ77n/6XvyinmVjVw/tfR64e5Gipdamen53d3t5O4y5nC8yBMtGi", + "ypZnfh7MZdl4r7y+CHFB1vcPd7S2OeGmhvx+5tubry+vyPnri2lNMKPno8fTx9MnmE+xBE5LNno++hR/", + "wtOzxH0/wyzaZ8oV4zkLoaN34863srSlesynRUgDav5aAi2QRZo/VqAly/wnCTTfuv+rW7pYgJxixJj9", + "af30zL89zt67vDJ3u76dxd5oZ+8byXnyPT29P9W+Jmfvfbn/3QM2Srk7P9eow0BAdzU7m2HdvaFNIV5d", + "/1JQ2lBn7/GN3vv7mbuv0x9RjWJP2pkXQnpa2lwi6Y8NFL7XG7OQ3cOZNtF4GdXZsirP3uN/8NBEK7J5", + "vM/0hp+h28nZ+wYi3OcOIpq/193jFph+1gMn5nOF3jG7Pp+9t/9GE8GmBMnM2xNTnLlfbVbLMyxzu+3+", + "vOXOSaKAVCqwn7gCq2Pz9Ym2PKsjcQMfuch948stz/wj2fthI3d4+vixnf4Z/mfk6ju2smKdufM8svf5", + "XlVvI3M28t6Wlj/Aa+ONjUCMMDz5eDBccOt7bZixvTTuxqPPPiYWLriRb2hBsKWd/tOPuAkg1ywDcgWr", + "UkgqWbElP/HgPh6V6U9R4A0Xt9xDbiSOarWicotS80qsQRFXrikiTiLByE72rYLCcE3DeOVRw0fejspq", + "VrBsNLZ50t+htKZTgotXPXdn8mr3evDmqfh275kYvgtNeXhHGq5BcB6fus/OnEgp3Nl6TxZtnw4LxYPU", + "3o3+xSP+xSNOyCN0JXnv6Y2uNsx0CaWLuM9otoRdrKJ7kUZ3/6gUqRQ4lzv4iKuL1sdGLptspPZdHj1/", + "2w1Nd9SMWoGpf8sYQb1+asjAkPy5RkeNaD8HV8FrW1H6v737QwgFX1HuT3qDFqwHBZUFAxnog/JuEbt/", + "8Yf/b/iDLc5J7b6OiYaiUDFX0AK5glXAuVzJ3DoBDOQQjazXtQTe+PnMKztSD9dmy/eNP5uPMbWsdC5u", + "o1nQTGgt492niflYqfbfZ7eU6clcSJc2mc41yG5nDbQ4c6X5Wr/W9W46X7CIT/RjHPee/PWMujdK6hty", + "wb6OnUd06qt7J/Y08gEX/nOtqotVX8iBg9Lr7TvD5RTItWfOtSbn+dkZxu8thdJno7vx+5aWJ/74LhCW", + "r1Y+KiVbY/mjd4bHCskWjNNi4lQhdenR0dPp49Hd/wsAAP//auagyh4NAQA=", } // GetSwagger returns the content of the embedded swagger specification file @@ -345,16 +341,16 @@ var swaggerSpec = []string{ func decodeSpec() ([]byte, error) { zipped, err := base64.StdEncoding.DecodeString(strings.Join(swaggerSpec, "")) if err != nil { - return nil, fmt.Errorf("error base64 decoding spec: %s", err) + return nil, fmt.Errorf("error base64 decoding spec: %w", err) } zr, err := gzip.NewReader(bytes.NewReader(zipped)) if err != nil { - return nil, fmt.Errorf("error decompressing spec: %s", err) + return nil, fmt.Errorf("error decompressing spec: %w", err) } var buf bytes.Buffer _, err = buf.ReadFrom(zr) if err != nil { - return nil, fmt.Errorf("error decompressing spec: %s", err) + return nil, fmt.Errorf("error decompressing spec: %w", err) } return buf.Bytes(), nil @@ -372,7 +368,7 @@ func decodeSpecCached() func() ([]byte, error) { // Constructs a synthetic filesystem for resolving external references when loading openapi specifications. func PathToRawSpec(pathToFile string) map[string]func() ([]byte, error) { - var res = make(map[string]func() ([]byte, error)) + res := make(map[string]func() ([]byte, error)) if len(pathToFile) > 0 { res[pathToFile] = rawSpec } @@ -386,12 +382,12 @@ func PathToRawSpec(pathToFile string) map[string]func() ([]byte, error) { // Externally referenced files must be embedded in the corresponding golang packages. // Urls can be supported but this task was out of the scope. func GetSwagger() (swagger *openapi3.T, err error) { - var resolvePath = PathToRawSpec("") + resolvePath := PathToRawSpec("") loader := openapi3.NewLoader() loader.IsExternalRefsAllowed = true loader.ReadFromURIFunc = func(loader *openapi3.Loader, url *url.URL) ([]byte, error) { - var pathToFile = url.String() + pathToFile := url.String() pathToFile = path.Clean(pathToFile) getSpec, ok := resolvePath[pathToFile] if !ok { diff --git a/daemon/algod/api/server/v2/generated/data/data_routes.yml b/daemon/algod/api/server/v2/generated/data/routes.yml similarity index 85% rename from daemon/algod/api/server/v2/generated/data/data_routes.yml rename to daemon/algod/api/server/v2/generated/data/routes.yml index 3055eda1e0..a6235909f4 100644 --- a/daemon/algod/api/server/v2/generated/data/data_routes.yml +++ b/daemon/algod/api/server/v2/generated/data/routes.yml @@ -14,9 +14,9 @@ output-options: - nonparticipating # do not exclude endpoints tagged data/experimental # - experimental - type-mappings: - integer: uint64 skip-prune: true + user-templates: + echo/echo-register.tmpl: ./templates/echo/echo-register.tmpl additional-imports: - alias: "." package: "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model" diff --git a/daemon/algod/api/server/v2/generated/experimental/routes.go b/daemon/algod/api/server/v2/generated/experimental/routes.go index 715a4fcac4..f2e2c342f2 100644 --- a/daemon/algod/api/server/v2/generated/experimental/routes.go +++ b/daemon/algod/api/server/v2/generated/experimental/routes.go @@ -1,6 +1,6 @@ // Package experimental provides primitives to interact with the openapi HTTP API. // -// Code generated by github.com/algorand/oapi-codegen DO NOT EDIT. +// Code generated by github.com/oapi-codegen/oapi-codegen/v2 version v2.4.1 DO NOT EDIT. package experimental import ( @@ -14,16 +14,17 @@ import ( "strings" . "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model" - "github.com/algorand/oapi-codegen/pkg/runtime" + "github.com/algorand/go-algorand/data/basics" "github.com/getkin/kin-openapi/openapi3" "github.com/labstack/echo/v4" + "github.com/oapi-codegen/runtime" ) // ServerInterface represents all server handlers. type ServerInterface interface { // Get a list of assets held by an account, inclusive of asset params. // (GET /v2/accounts/{address}/assets) - AccountAssetsInformation(ctx echo.Context, address string, params AccountAssetsInformationParams) error + AccountAssetsInformation(ctx echo.Context, address basics.Address, params AccountAssetsInformationParams) error // Returns OK if experimental API is enabled. // (GET /v2/experimental) ExperimentalCheck(ctx echo.Context) error @@ -41,14 +42,14 @@ type ServerInterfaceWrapper struct { func (w *ServerInterfaceWrapper) AccountAssetsInformation(ctx echo.Context) error { var err error // ------------- Path parameter "address" ------------- - var address string + var address basics.Address - err = runtime.BindStyledParameterWithLocation("simple", false, "address", runtime.ParamLocationPath, ctx.Param("address"), &address) + err = runtime.BindStyledParameterWithOptions("simple", "address", ctx.Param("address"), &address, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) if err != nil { return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter address: %s", err)) } - ctx.Set(Api_keyScopes, []string{""}) + ctx.Set(Api_keyScopes, []string{}) // Parameter object where we will unmarshal all parameters from the context var params AccountAssetsInformationParams @@ -66,7 +67,7 @@ func (w *ServerInterfaceWrapper) AccountAssetsInformation(ctx echo.Context) erro return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter next: %s", err)) } - // Invoke the callback with all the unmarshalled arguments + // Invoke the callback with all the unmarshaled arguments err = w.Handler.AccountAssetsInformation(ctx, address, params) return err } @@ -75,9 +76,9 @@ func (w *ServerInterfaceWrapper) AccountAssetsInformation(ctx echo.Context) erro func (w *ServerInterfaceWrapper) ExperimentalCheck(ctx echo.Context) error { var err error - ctx.Set(Api_keyScopes, []string{""}) + ctx.Set(Api_keyScopes, []string{}) - // Invoke the callback with all the unmarshalled arguments + // Invoke the callback with all the unmarshaled arguments err = w.Handler.ExperimentalCheck(ctx) return err } @@ -86,9 +87,9 @@ func (w *ServerInterfaceWrapper) ExperimentalCheck(ctx echo.Context) error { func (w *ServerInterfaceWrapper) RawTransactionAsync(ctx echo.Context) error { var err error - ctx.Set(Api_keyScopes, []string{""}) + ctx.Set(Api_keyScopes, []string{}) - // Invoke the callback with all the unmarshalled arguments + // Invoke the callback with all the unmarshaled arguments err = w.Handler.RawTransactionAsync(ctx) return err } @@ -130,235 +131,231 @@ func RegisterHandlersWithBaseURL(router EchoRouter, si ServerInterface, baseURL // Base64 encoded, gzipped, json marshaled Swagger object var swaggerSpec = []string{ - "H4sIAAAAAAAC/+x9f5PbtpLgV0Fpt8qxT5yxHSf74qtXexM7yZuNk7g8TvZ2bd8LRLYkvKEAPgCckeLz", - "d79CN0CCJChRMxM7qbq/7BHxo9FoNPoXut/PcrWplARpzezp+1nFNd+ABY1/8TxXtbSZKNxfBZhci8oK", - "JWdPwzdmrBZyNZvPhPu14nY9m88k30DbxvWfzzT8sxYaitlTq2uYz0y+hg13A9td5Vo3I22zlcr8EGc0", - "xPnz2Yc9H3hRaDBmCOVPstwxIfOyLoBZzaXhuftk2LWwa2bXwjDfmQnJlASmlsyuO43ZUkBZmJOwyH/W", - "oHfRKv3k40v60IKYaVXCEM5narMQEgJU0ADVbAizihWwxEZrbpmbwcEaGlrFDHCdr9lS6QOgEhAxvCDr", - "zezpm5kBWYDG3cpBXOF/lxrgN8gs1yuws3fz1OKWFnRmxSaxtHOPfQ2mLq1h2BbXuBJXIJnrdcJ+qI1l", - "C2BcslffPmOff/75V24hG24tFJ7IRlfVzh6vibrPns4KbiF8HtIaL1dKc1lkTftX3z7D+S/8Aqe24sZA", - "+rCcuS/s/PnYAkLHBAkJaWGF+9ChftcjcSjanxewVBom7gk1vtNNief/pLuSc5uvKyWkTewLw6+MPid5", - "WNR9Hw9rAOi0rxymtBv0zcPsq3fvH80fPfzwL2/Osv/2f37x+YeJy3/WjHsAA8mGea01yHyXrTRwPC1r", - "Lof4eOXpwaxVXRZsza9w8/kGWb3vy1xfYp1XvKwdnYhcq7NypQzjnowKWPK6tCxMzGpZOjblRvPUzoRh", - "lVZXooBi7rjv9Vrka5ZzQ0NgO3YtytLRYG2gGKO19Or2HKYPMUocXDfCBy7oj4uMdl0HMAFb5AZZXioD", - "mVUHrqdw43BZsPhCae8qc9xlxV6vgeHk7gNdtog76Wi6LHfM4r4WjBvGWbia5kws2U7V7Bo3pxSX2N+v", - "xmFtwxzScHM696g7vGPoGyAjgbyFUiVwicgL526IMrkUq1qDYddrsGt/52kwlZIGmFr8A3Lrtv0/Ln76", - "kSnNfgBj+Ape8vySgcxVAcUJO18yqWxEGp6WEIeu59g6PFypS/4fRjma2JhVxfPL9I1eio1IrOoHvhWb", - "esNkvVmAdlsarhCrmAZbazkGEI14gBQ3fDuc9LWuZY77307bkeUctQlTlXyHCNvw7V8fzj04hvGyZBXI", - "QsgVs1s5Kse5uQ+Dl2lVy2KCmGPdnkYXq6kgF0sBBWtG2QOJn+YQPEIeB08rfEXghEFGwWlmOQCOhG2C", - "Ztzpdl9YxVcQkcwJ+9kzN/xq1SXIhtDZYoefKg1XQtWm6TQCI069XwKXykJWaViKBI1deHQ4BkNtPAfe", - "eBkoV9JyIaFwzBmBVhaIWY3CFE24X98Z3uILbuDLJ2N3fPt14u4vVX/X9+74pN3GRhkdycTV6b76A5uW", - "rDr9J+iH8dxGrDL6ebCRYvXa3TZLUeJN9A+3fwENtUEm0EFEuJuMWEluaw1P38oH7i+WsQvLZcF14X7Z", - "0E8/1KUVF2LlfirppxdqJfILsRpBZgNrUuHCbhv6x42XZsd2m9QrXih1WVfxgvKO4rrYsfPnY5tMYx5L", - "mGeNthsrHq+3QRk5tofdNhs5AuQo7iruGl7CToODludL/Ge7RHriS/2b+6eqStfbVssUah0d+ysZzQfe", - "rHBWVaXIuUPiK//ZfXVMAEiR4G2LU7xQn76PQKy0qkBbQYPyqspKlfMyM5ZbHOlfNSxnT2f/ctraX06p", - "uzmNJn/hel1gJyeykhiU8ao6YoyXTvQxe5iFY9D4CdkEsT0UmoSkTXSkJBwLLuGKS3vSqiwdftAc4Dd+", - "phbfJO0Qvnsq2CjCGTVcgCEJmBreMyxCPUO0MkQrCqSrUi2aHz47q6oWg/j9rKoIHyg9gkDBDLbCWHMf", - "l8/bkxTPc/78hH0Xj42iuJLlzl0OJGq4u2Hpby1/izW2Jb+GdsR7huF2Kn3itiagwYn5d0FxqFasVemk", - "noO04hr/zbeNycz9Pqnzn4PEYtyOExcqWh5zpOPgL5Fy81mPcoaE4809J+ys3/dmZONG2UMw5rzF4l0T", - "D/4iLGzMQUqIIIqoyW8P15rvZl5IzFDYG5LJzwaIQiq+EhKhnTv1SbINv6T9UIh3RwhgGr2IaIkkyMaE", - "6mVOj/qTgZ3lT0CtqY0NkqiTVEthLOrV2JitoUTBmctA0DGp3IgyJmz4nkU0MF9rXhEt+y8kdgmJ+jw1", - "IlhvefFOvBOTMEfsPtpohOrGbPkg60xCglyjB8PXpcov/8bN+g5O+CKMNaR9nIatgReg2ZqbdeLg9Gi7", - "HW0KfbuGSLNsEU110izxhVqZO1hiqY5hXVX1jJelm3rIsnqrxYEnHeSyZK4xg41Ag7lXHMnCTvoX+4bn", - "aycWsJyX5bw1FakqK+EKSqe0CylBz5ldc9sefhw56DV4jgw4ZmeBRavxZiY0senGFqGBbTjeQBunzVRl", - "t0/DQQ3fQE8KwhtR1WhFiBSN8+dhdXAFEnlSMzSC36wRrTXx4Cdubv8JZ5aKFkcWQBvcdw3+Gn7RAdq1", - "bu9T2U6hdEE2a+t+E5rlStMQdMP7yd1/gOu2M1HnZ5WGzA+h+RVow0u3ut6i7jfke1en88DJLLjl0cn0", - "VJhWwIhzYD8U70AnrDQ/4X94ydxnJ8U4SmqpR6AwoiJ3akEXs0MVzeQaoL1VsQ2ZMlnF88ujoHzWTp5m", - "M5NO3jdkPfVb6BfR7NDrrSjMXW0TDja2V90TQrarwI4GsshephPNNQUBr1XFiH30QCBOgaMRQtT2zq+1", - "r9U2BdPXaju40tQW7mQn3DiTmf3XavvcQ6b0Yczj2FOQ7hYo+QYM3m4yZpxultYvd7ZQ+mbSRO+Ckaz1", - "NjLuRo2EqXkPSdi0rjJ/NhMeC2rQG6gN8NgvBPSHT2Gsg4ULy38HLBg36l1goTvQXWNBbSpRwh2Q/jop", - "xC24gc8fs4u/nX3x6PHfH3/xpSPJSquV5hu22Fkw7DNvlmPG7kq4n9SOULpIj/7lk+Cj6o6bGseoWuew", - "4dVwKPJ9kfZLzZhrN8RaF8246gbASRwR3NVGaGfk1nWgPYdFvboAa52m+1Kr5Z1zw8EMKeiw0ctKO8HC", - "dP2EXlo6LVyTU9hazU8rbAmyoDgDtw5hnA64WdwJUY1tfNHOUjCP0QIOHopjt6mdZhdvld7p+i7MG6C1", - "0skruNLKqlyVmZPzhEoYKF76Fsy3CNtV9X8naNk1N8zNjd7LWhYjdgi7ldPvLxr69Va2uNl7g9F6E6vz", - "807Zly7yWy2kAp3ZrWRInR3zyFKrDeOswI4oa3wHluQvsYELyzfVT8vl3Vg7FQ6UsOOIDRg3E6MWTvox", - "kCtJwXwHTDZ+1Cno6SMmeJnsOAAeIxc7maOr7C6O7bg1ayMk+u3NTuaRacvBWEKx6pDl7U1YY+igqe6Z", - "BDgOHS/wM9rqn0Np+bdKv27F1++0qqs7Z8/9Oacuh/vFeG9A4foGM7CQq7IbQLpysJ+k1vhJFvSsMSLQ", - "GhB6pMgXYrW2kb74Uqvf4U5MzpICFD+Qsah0fYYmox9V4ZiJrc0diJLtYC2Hc3Qb8zW+ULVlnElVAG5+", - "bdJC5kjIIcY6YYiWjeVWtE8IwxbgqCvntVttXTEMQBrcF23HjOd0QjNEjRkJv2jiZqgVTUfhbKUGXuzY", - "AkAytfAxDj76AhfJMXrKBjHNi7gJftGBq9IqB2OgyLwp+iBooR1dHXYPnhBwBLiZhRnFllzfGtjLq4Nw", - "XsIuw1g/wz77/hdz/xPAa5Xl5QHEYpsUevv2tCHU06bfR3D9yWOyI0sdUa0Tbx2DKMHCGAqPwsno/vUh", - "Guzi7dFyBRpDSn5Xig+T3I6AGlB/Z3q/LbR1NRLB7tV0J+G5DZNcqiBYpQYrubHZIbbsGnVsCW4FESdM", - "cWIceETwesGNpTAoIQu0adJ1gvOQEOamGAd4VA1xI/8SNJDh2Lm7B6WpTaOOmLqqlLZQpNaAHtnRuX6E", - "bTOXWkZjNzqPVaw2cGjkMSxF43tkeQ0Y/+C28b96j+5wcehTd/f8LonKDhAtIvYBchFaRdiNo3hHABGm", - "RTQRjjA9ymlCh+czY1VVOW5hs1o2/cbQdEGtz+zPbdshcZGTg+7tQoFBB4pv7yG/JsxS/PaaG+bhCC52", - "NOdQvNYQZncYMyNkDtk+ykcVz7WKj8DBQ1pXK80LyAoo+S4RHECfGX3eNwDueKvuKgsZBeKmN72l5BD3", - "uGdoheOZlPDI8AvL3RF0qkBLIL73gZELwLFTzMnT0b1mKJwruUVhPFw2bXViRLwNr5R1O+7pAUH2HH0K", - "wCN4aIa+OSqwc9bqnv0p/guMn6CRI46fZAdmbAnt+EctYMQW7N84Reelx957HDjJNkfZ2AE+MnZkRwzT", - "L7m2IhcV6jrfw+7OVb/+BEnHOSvAclFCwaIPpAZWcX9GIaT9MW+mCk6yvQ3BHxjfEssJYTpd4C9hhzr3", - "S3qbEJk67kKXTYzq7icuGQIaIp6dCB43gS3Pbblzgppdw45dgwZm6gWFMAz9KVZVWTxA0j+zZ0bvnU36", - "Rve6iy9wqGh5qVgz0gn2w/e6pxh00OF1gUqpcoKFbICMJASTYkdYpdyuC//8KTyACZTUAdIzbXTNN9f/", - "PdNBM66A/ZeqWc4lqly1hUamURoFBRQg3QxOBGvm9MGJLYaghA2QJolfHjzoL/zBA7/nwrAlXIc3g65h", - "Hx0PHqAd56UytnO47sAe6o7beeL6QMeVu/i8FtLnKYcjnvzIU3byZW/wxtvlzpQxnnDd8m/NAHoncztl", - "7TGNTIv2wnEn+XK68UGDdeO+X4hNXXJ7F14ruOJlpq5Aa1HAQU7uJxZKfnPFy5+abvgeEnJHozlkOb7i", - "mzgWvHZ96OGfG0dI4Q4wBf1PBQjOqdcFdTqgYraRqmKzgUJwC+WOVRpyoPduTnI0zVJPGEXC52suV6gw", - "aFWvfHArjYMMvzZkmtG1HAyRFKrsVmZo5E5dAD5MLTx5dOIUcKfS9S3kpMBc82Y+/8p1ys0c7UHfY5B0", - "ks1noxqvQ+pVq/EScrrvNidcBh15L8JPO/FEVwqizsk+Q3zF2+IOk9vc38dk3w6dgnI4cRTx234cC/p1", - "6na5uwOhhwZiGioNBq+o2Exl6Ktaxm+0Q6jgzljYDC351PXvI8fv1ai+qGQpJGQbJWGXTEsiJPyAH5PH", - "Ca/Jkc4osIz17esgHfh7YHXnmUKNt8Uv7nb/hPY9VuZbpe/KJUoDThbvJ3ggD7rb/ZQ39ZPysky4Fv0L", - "zj4DMPMmWFdoxo1RuUCZ7bwwcx8VTN5I/9yzi/6XzbuUOzh7/XF7PrQ4OQDaiKGsGGd5KdCCrKSxus7t", - "W8nRRhUtNRHEFZTxcavls9AkbSZNWDH9UG8lxwC+xnKVDNhYQsJM8y1AMF6aerUCY3u6zhLgrfSthGS1", - "FBbn2rjjktF5qUBjJNUJtdzwHVs6mrCK/QZasUVtu9I/PlA2VpSld+i5aZhavpXcshK4sewHIV9vcbjg", - "9A9HVoK9VvqywUL6dl+BBCNMlg42+46+Yly/X/7ax/hjuDt9DkGnbcaEmVtmJ0nK//ns35++Ocv+m2e/", - "Pcy++h+n794/+XD/weDHxx/++tf/2/3p8w9/vf/v/5raqQB76vmsh/z8udeMz5+j+hOF6vdh/2j2/42Q", - "WZLI4miOHm2xzzBVhCeg+13jmF3DW2m30hHSFS9F4XjLTcihf8MMziKdjh7VdDaiZwwLaz1SqbgFl2EJ", - "JtNjjTeWoobxmemH6uiU9G/P8bwsa0lbGaRveocZ4svUct4kI6A8ZU8ZvlRf8xDk6f98/MWXs3n7wrz5", - "PpvP/Nd3CUoWxTaVR6CAbUpXjB9J3DOs4jsDNs09EPZkKB3FdsTDbmCzAG3Wovr4nMJYsUhzuPBkyduc", - "tvJcUoC/Oz/o4tx5z4lafny4rQYooLLrVP6ijqCGrdrdBOiFnVRaXYGcM3ECJ32bT+H0RR/UVwJfhsBU", - "rdQUbag5B0RogSoirMcLmWRYSdFP73mDv/zNnatDfuAUXP05UxG997775jU79QzT3KOUFjR0lIQgoUr7", - "x5OdgCTHzeI3ZW/lW/kclmh9UPLpW1lwy08X3IjcnNYG9Ne85DKHk5ViT8N7zOfc8rdyIGmNJlaMHk2z", - "ql6UImeXsULSkiclyxqO8PbtG16u1Nu37waxGUP1wU+V5C80QeYEYVXbzKf6yTRcc53yfZkm1QuOTLm8", - "9s1KQraqyUAaUgn58dM8j1eV6ad8GC6/qkq3/IgMjU9o4LaMGaua92hOQPFPet3+/qj8xaD5dbCr1AYM", - "+3XDqzdC2ncse1s/fPg5vuxrcyD86q98R5O7CiZbV0ZTUvSNKrhwUisxVj2r+CrlYnv79o0FXuHuo7y8", - "QRtHWTLs1nl1GB4Y4FDtAponzqMbQHAc/TgYF3dBvUJax/QS8BNuYfcB9q32K3o/f+PtOvAGn9d2nbmz", - "nVyVcSQedqbJ9rZyQlaIxjBihdqqT4y3AJavIb/0GctgU9ndvNM9BPx4QTOwDmEolx29MMRsSuigWACr", - "q4J7UZzLXT+tjaEXFTjoK7iE3WvVJmM6Jo9NN62KGTuoSKmRdOmINT62foz+5vuosvDQ1GcnwcebgSye", - "NnQR+owfZBJ57+AQp4iik/ZjDBFcJxBBxD+Cghss1I13K9JPLU/IHKQVV5BBKVZikUrD+59Df1iA1VGl", - "zzzoo5CbAQ0TS+ZU+QVdrF6911yuwF3P7kpVhpeUVTUZtIH60Bq4tgvgdq+dX8YJKQJ0qFJe48trtPDN", - "3RJg6/ZbWLTYSbh2WgUaiqiNj14+GY8/I8ChuCE8oXurKZyM6roedYmMg+FWbrDbqLU+NC+mM4SLvm8A", - "U5aqa7cvDgrls21SUpfofqkNX8GI7hJ77ybmw+h4/HCQQxJJUgZRy76oMZAEkiBT48ytOXmGwX1xhxjV", - "zF5AZpiJHMTeZ4RJtD3CFiUKsE3kKu091x0vKmUFHgMtzVpAy1YUDGB0MRIfxzU34ThivtTAZSdJZ79j", - "2pd9qenOo1jCKClqk3gu3IZ9DjrQ+32CupCVLqSii5X+CWnlnO6FzxdS26EkiqYFlLCihVPjQChtwqR2", - "gxwcPy2XyFuyVFhiZKCOBAA/BzjN5QFj5Bthk0dIkXEENgY+4MDsRxWfTbk6BkjpEz7xMDZeEdHfkH7Y", - "R4H6ThhVlbtcxYi/MQ8cwKeiaCWLXkQ1DsOEnDPH5q546dic18XbQQYZ0lCh6OVD86E398cUjT2uKbry", - "j1oTCQk3WU0szQag06L2HogXapvRC+WkLrLYLhy9J98u4Hvp1MGkXHT3DFuoLYZz4dVCsfIHYBmHI4AR", - "2V62wiC9Yr8xOYuA2Tftfjk3RYUGScYbWhtyGRP0pkw9IluOkctnUXq5GwHQM0O1tRq8WeKg+aArngwv", - "8/ZWm7dpU8OzsNTxHztCyV0awd/QPtZNCPe3NvHfeHKxcKI+Sia8oWXpNhkKqXNFWQePSVDYJ4cOEHuw", - "+rIvBybR2o316uI1wlqKlTjmO3RKDtFmoARUgrOOaJpdpiIFnC4PeI9fhG6RsQ53j8vd/SiAUMNKGAut", - "0yjEBX0KczzH9MlKLcdXZyu9dOt7pVRz+ZPbHDt2lvnRV4AR+Euhjc3Q45Zcgmv0rUEj0reuaVoC7YYo", - "UrEBUaQ5Lk57CbusEGWdplc/7/fP3bQ/NheNqRd4iwlJAVoLLI6RDFzeMzXFtu9d8Ata8At+Z+uddhpc", - "UzexduTSneNPci56DGwfO0gQYIo4hrs2itI9DDJ6cD7kjpE0GsW0nOzzNgwOUxHGPhilFp69j938NFJy", - "LVEawPQLQbVaQRHSmwV/mIySyJVKrqIqTlW1L2feCaPUdZh5bk/SOh+GD2NB+JG4nwlZwDYNfawVIOTt", - "yzpMuIeTrEBSupK0WSiJmjjEH1tEtrqP7AvtPwBIBkG/7jmz2+hk2qVmO3EDSuCF10kMhPXtP5bDDfGo", - "m4+FT3cyn+4/Qjgg0pSwUWGTYRqCEQbMq0oU257jiUYdNYLxo6zLI9IWshY/2AEMdIOgkwTXSaXtQ629", - "gf0Udd5Tp5VR7LUPLHb0zXP/AL+oNXowOpHNw7ztja42ce3f/3JhleYr8F6ojEC61RC4nGPQEGVFN8wK", - "CicpxHIJsffF3MRz0AFuYGMvJpBugsjSLppaSPvlkxQZHaCeFsbDKEtTTIIWxnzyr4deriDTR6ak5kqI", - "tuYGrqrkc/3vYZf9wsvaKRlCmzY817udupfvEbt+tfkedjjywahXB9iBXUHL0ytAGkxZ+ptPJkpgfc90", - "UvyjetnZwiN26iy9S3e0Nb4owzjxt7dMp2hBdym3ORhtkISDZcpuXKRjE9zpgS7i+6R8aBNEcVgGieT9", - "eCphQgnL4VXU5KI4RLuvgZeBeHE5sw/z2e0iAVK3mR/xAK5fNhdoEs8YaUqe4U5gz5Eo51Wl1RUvMx8v", - "MXb5a3XlL39sHsIrPrImk6bs19+cvXjpwf8wn+UlcJ01loDRVWG76k+zKirjsP8qoWzf3tBJlqJo85uM", - "zHGMxTVm9u4ZmwZFUdr4mego+piLZTrg/SDv86E+tMQ9IT9QNRE/rc+TAn66QT78iosyOBsDtCPB6bi4", - "aZV1klwhHuDWwUJRzNetxxp93PD27ZurgMfWTUABM01+9UQElZlgIO8zkfQhbIn4AOvDJf2EGTDTio30", - "+TGR4/kYI37nQtq3SnfuGP8AMhmj9PtJb06WJzyOhISHMpl9me2EkXz36+pXd+gfPIhP9IMHc/Zr6T9E", - "AOLvC/87qjEPHiSdlElrmeNFaAyTfAP3m8ccoxvxcfV8CdfT5ICzq00jwKpxMmwolIKNArqvPfautfD4", - "LPwvBZTgfjqZYguIN53QHQMz5QRdjD14bGJZN1SZ0zAl+6Hb+NbWkRbeKb7yA/l8h0dI1hv0k2amFHk6", - "gkQujOM+kmI2XWOGjUeMwm7EWoyEAMtaRGO5ZlNSs/aAjOZIItMks8O2uFsof7xrKf5ZAxOFU56WAjRe", - "n70bNeggOOpA7k2b3/zA5A5rh7+NuWWPWyuYnPbZWva6CZ83rquw0FRtoSMDzeMZB4x7T5C4p49wy+Gj", - "uXU30nOaujSlQntgdN4nODJHsuK6MNlSq98gfWGjmyqRbyP4VwVak38DmQoQ7LOUxnfdFo5vZz+03dNV", - "8LGNv7XKHRbdFDe7yWWaPtXHbeRNdGuTzgrtkTym68WBDN0XCCOsBY9XFHOL1VZCkBOXdJ4o2UTnIVv6", - "VMZPRk9p/PZUepgHz2xLfr3gqVI0TuVyMEXb2wnHsoqFzmEDTJNKgWZnUaB401ZQwroKdOvqGCa/vaH6", - "RNNOVpxaPQkpKtaQ5hQNURqVGKaW11xSsXLXj/iV722APP2u17XSmG7SpCPHCsjFJmn1ffv2TZEPo4QK", - "sRJUh7s2EBV69gMxymmJVOSLZTcJQjxqzpfs4TyqNu93oxBXwohFCdjiEbVYcIPXZeN1b7q45YG0a4PN", - "H09ovq5loaGwa0OINYo1Ki4KeU384wLsNYBkD7Hdo6/YZxj5acQV3HdY9ELQ7OmjrzBuh/54mLplfR31", - "fSy7QJ4dYsLTdIyhrzSGY5J+1HSQ91ID/Abjt8Oe00Rdp5wlbOkvlMNnacMlX0H6GcjmAEzUF3cTowZ6", - "eJHkdABjtdoxYdPzg+WOP408LXfsj8BgudpshN34+ECjNo6e2irONGkYDuudhbJUAa7wEcNsq4Sa/AnU", - "GL4ZeRqGwdA/ois4RuucccoxWoo2AD6UBWXnIYUx1ulqynMRbtxcbukoS2I8/JJVWkiLZpbaLrO/OLVY", - "89yxv5MxcLPFl08S9a66JWHkcYB/dLxrMKCv0qjXI2QfZBbfl30mlcw2jqMU99tUDtGpHI0HTkd+joWf", - "7h96quTrRslGya3ukBuPOPWtCE/uGfCWpNis5yh6PHplH50ya50mD167Hfr51QsvZWyUTtUlaI+7lzg0", - "WC3gCh/mpTfJjXnLvdDlpF24DfSfNswqiJyRWBbOclIRiByn+97kOyn+lx/aBOvov6UHjz0boNIJa6e3", - "233koMbjrG59NzHFpeG3EcxNRhuOMsTKSJA/RfE3fT5FWFIfJNrzjsHx0a9MOx0c5fgHDxDoBw/mXgz+", - "9XH3M7H3Bw/SeY6TJjf3a4uF22jE2De1h1+rhAEsFEds4pZ8GoaEAXLsknIfHBNc+KHmrFuI7uNLEXfz", - "jCwd1Jo+BW/fvsEvAQ/4Rx8Rn5hZ4ga2jyHGD3u3EGeSZIrmexROz9nXajuVcHp3UCCePwCKRlAy0TyH", - "KxkUGk1GBRwMS4lo1I26gFI5JTOuPRTb8/88eHaLn+/Bdi3K4pfWy9q7SDSX+ToZjLxwHf9OMnrnCiZW", - "mSxnsuZSQpkcjnTbvwcdOKGl/0NNnWcj5MS2/UK3tNze4lrAu2AGoMKEDr3Clm6CGKvd7FxN9odypQqG", - "87S1M1rmOKwYnarUmXhGjcNuauvDY/HJuc9rtBQlRnum/cbYMtPcjuTpwrLqoYyRGwernBsyM9DooBkX", - "G7yYDd9UJeDJvALNV9hVSeh1x0xtOHJUGIOZyn3ClpgXQzFba8nUchktA6QVGsrdnFXcGBrkoVsWbHHu", - "2dNHDx8mzV6InQkrJSyGZf7ULuXRKTahL76WE1UcOArYw7B+aCnqmI0dEo4vXfnPGoxN8VT8QA9k0Uvq", - "bm0qW9mUWD1h32GCJUfEnYz6aK4MuYq7eTvrqlS8mGMO5dffnL1gNCv1oUr1VDZzhda6Lvkn3SvT85iG", - "BFIjCXqmj7M/Y4hbtbFZU+UylQLRtWjrcIpeaA/a8WLsnLDnZEJt4lhoEoaZuPUGiqioJinxSBzuP9by", - "fI22yY4ENM4rp9d7Deys9dxEjxybIkvIsB3cvuQrVXydM2XXoK+FAXz4D1fQzbrYpCD1tvGQhbG7PF1L", - "SZRycoQw2pRUOhbtATiSZENQQRKyHuKPtExR2edjy99eYK/0k49eLd2e1z/k8AuZvNkP3rmQc6mkyLHi", - "QkqSxgxx09yUE4pTpP2LZuZPaOJwJSv4Nk+OPRZHa/oGRugRN3T5R1/dphJ10J8Wtr6y2wqs8ZwNinko", - "qO0dYkIa8EWzHBHFfFLpRFBT8r1FE0BxJBlh8qcRC+e37tuP3v6NuTcuhURLl0eb18/IZVUagZ5pyYRl", - "KwXGr6f7aMi8cX1OMBlkAdt3Jy/USuQXYoVjUBidWzaFpg6HOguBqj4w1LV95tr6FP3Nz51wMJr0rKr8", - "pOPl1pOCpN3KUQSn4pZCIEmE3Gb8eLQ95LY3whzvU0docIVRa1DhPTwgjKZkd3eUb5xuSRSFLRg93Ezm", - "6RUyAcYLIYMLNX1B5MkrATcGz+tIP5Nrbkl3mMTTXgMvR95Z4ENo8sHfdqh+gQKHElxjmGN8G9tq4yOM", - "o2nQSvxc7lg4FI66I2HiGS+bCO1E7XCUqrwQRTGtvWriKcbhGHcWXmZ20HXwlWDTHYt+HHsTjaVCXNTF", - "CmzGiyKVQetr/Mrwa3iLBlvI66bWVfMIsZsKfUhtfqJcSVNv9swVGtxyuqg8f4Iamo9QNDuMCX0WO/w3", - "VehpfGd8bPbRj39DIHZxXP7/4WPmlNTraDozYpVNxwTeKbdHRzv1zQi97X+nlB5eBf8hHv32uFy8Ryn+", - "9o27OOL8wIP4dLpamvS9GAuu8HvIq9QknuxyJbzKBuXMMOoBNy+xZT3gQ8Mk4Fe8HHlwH/tK6H4l/8HY", - "s/t8NEsEtz4LmOVsLwsazaxEscI978vQhTgWH0zhwXfntfBr3YvQcd/d9x1PHcWItcxi1EN3Mydau8HH", - "etF8XYOhSZOXpconn3o/zJnrNJ41VG02Pp12IobtaqOKmM7jaCiANNOi8NxEyD/qnslvqBglv+jr9Ggd", - "m8WxplJCo1/CnF4CBvACMDR1PFFkIvWYZd+KEush/cfFTz/Oxjcy2oHhlvosvkmj8tjGNI+l+uSxUh18", - "1OO2EyXLlBIxn5kRIzfm6kmfBl/0NvnhWzLaTQGJUtoc0/rF1MEHBLBSqST1w5Qjs3YjAtojOmg3lnhJ", - "TBcpevj+aizFSqjzg9/jekI+PG/uy0jAlVB1CKsMjxuCrYd+9Sm8OnWDRhhb8snQp3ZHjjpPX/v617RM", - "b2z7/hcKr2Agrd79AVypg03vF6VKqLFkd26bsKZ06qRSqh1xd0oNrFS5Ja/0BSM4yQwdWhqUrxqQ1fMp", - "cv4AHx/ms/PiKEk4VbJrRqOk7tMXYrW2WPHjb8AL0C8PVDRpq5jgEauUEW0F49IN5lNIr3G4k6mviBwB", - "i7giy3CsEF1+BbnFstVt1KwGOKY+i5sseHP/f2WT8bugeWzlC5rsq2IyrFV9QHgfJF6LkgdSnd+T6TU7", - "zpq3EfS085qbNt1TL+fC5JffyyXkmFV9b6K7/1yDjJKozYPBFWFZRnnvRPNAEesCHO9OaAHal4duLzxR", - "fa5bgzOWB+MSdvcM61BDsvBw8zr3JonHEQPk2w456Mc8RD4cVJiGMhALIdbfp3Jvi+uM5oyP0jbecK5A", - "ku7iaFM57pkyyI83mMt1PSptLMr6Y7nwhjXXxw0Lz7HEvfGRr7xJXB6b39j5sPDWtU98jmkJG6doSIEO", - "JvwWcpDSLKW49PVHECvkgr7muggt7iSpHN1NIg30splZtC+zhtFLiVIu+MgxL5UTI7Kxl6Ldx1BNJPE9", - "QyHfbQIwhGsJWkPR+DpLZSCzKrzk2gfHPlRQXPuNkGBGy6cRcKOp81+1tQGwjCTHVPnch7PHC2QaNtxB", - "p6MM/uNz7kP2M/oekniEMoIHTccNvR6uZx3e5AkzQGJM9Uvmb8vDyUFuYkUWUoLOgku5n85fdjM6Yt7e", - "os7pgo4PRmNpn5x7aw8rSRpg8+EqezpClP3iEnanpASFQuBhB2OgSXIi0KOExb1NvlO7uknBvboT8D5t", - "HspKqTIb8WKeD2sQ9Cn+UuSXgDlEm7crTva71z0bbhL2GTrPmjCV6/Uu5NyvKpBQ3D9h7EzSa8EQsdIt", - "T9qbXN6z++bf4qxFTWVBvLX85K1MP7vCgh36ltwsDLOfhxlwrO6WU9EgBzLcb+VYLN01FvfoVgE+maqV", - "D2NIelJJRFQERUomuSBX9DM86CnDEeY2iZLwYIQCZ96FzUypUkH6N8m/4oZKYyqeDAGyIKekAWmg8IMn", - "EeDD8w6kFPWfQ9JMtWQa2uiQm2YP9Qk5iTWbMY2+P3MzS5ffLZWGeEaMPqVMwc2LNkzDi/9ZCKu53t0k", - "x2cXVSnrySiWD8ZZNiGW7ULaMMshDstSXWfIrLKmTk5KtXXtTPcyDkUb237uVC8gCtjkxgtqO7bmBcuV", - "1pDHPdIPuQmqjdKQlQrjN1OhJUvr5O4Nvt6UrFQrpqpcFUD1ptIUNDZXLSVHsQmicLkkCoh2MA0A9Yno", - "eOKU7k4lB3GGotbB8gxh81+7PpSSos0KR4vOKEhh5CkCGJ8FzmOIGg/hRcKhfEZ9W2KaNy/FFukGdOrI", - "L5nVNcyZb9Gvse8PPtfANsIYAqWhpWtRlpgRQmyjkIomIimN2hGx9xzjpa8EBtV1s4OQNFy5O69JmRLz", - "gIs4nxmza63q1TpKUN/AGVReXXuFOB7lZ1Nj3CM+DXVTPGEbZazXNGmkdsltLOlnuZJWq7LsGqVIRF95", - "S/sPfHuW5/aFUpcLnl/eR71WKtustJiHxAn9qN92Jt1LTdi9gDOkAXM41Te1wxhYT7STGWSPxQ2M4oes", - "zBGY7w5z0MM297Phwvrr6jLTtBpzJhm3aiPy9Jn6c4XRjga/plhUMhkh1Wal9DHYDA97fFk1UVPIIodo", - "BsmTxSXPmGcEPnoE2Y37L0rg/XHZEjyjGbkoh8zFS1FZPirr9QBASCmnga01FXSNJbGGq6gV5UDB2Jc+", - "oBNvFQwxvB1sboQ7B8rCrYAahDU3AH5Gxoc5JY2kEOmF2obv99uskjcC/sN+Ku8wj7HYzYuWtDRFb4YM", - "VCMcIZ0if2+g42vMZ7GYGu7YFN+eeMNHAIwHQHZgmBQGeSwYSy5KKLJU7dbzxkY1jzRt/+ayW7se72Xi", - "5DmvQ+lUN3atwWdEIhFfd/1fFXekpJrmQ0uyLGAL9GDrN9CKaqLOI/8LlFQytWcMUFVWwhV04kJ9mqYa", - "RU1xBaGvaTqzAqBCb2TfRpYKeIzv8p7hxK89i0LmpmA3aUkhxNJOsQNmkqRRZyszOiZm6lFyEF2JouYd", - "/JljRY6uGdAd5QSqBjpCFvTIqdP8TCO8CgOchf4pUSZg4t00PnQ0C0qjbh8DOhgAXZuxUy/T8c9xDrLG", - "wYKzFY0jlki85Rum4tdy3CA5JPlW3Zq4T0LJCLHfbCFHqcbrO1B4jWfESeHTGSG1S4CCtALXJWFtX4Nk", - "UkUlaq+5aVSVNjlq+IEmxkZCem36Bk7lNkz59jvLcDBmelkSRxUJ3dDpzc3zn+Qk7j2Io+OlaMSAf9e7", - "x/4VqNurHdhA1WXBpNtPJ/tjkVd/i3kuPmeLOgxUluqaas7GeuhzCH5Qor7gAvJiuWiu5RCOPfd5e/um", - "DhE9RNnwHVMa/3Fa5z9rXorlDvkMgR+6MbPmjoS845UiAnx4t5t4v3g1D4AFa4sKU9G6xdQxo+F2bpQI", - "aHeRh+Jgim34JcTbgMEOxD9z6xinqRdouXBXdm87h1jwiw+5lza8iDV9zAC763CHkBPc9f6f7SPXeKqQ", - "uLEqeR4qDPsSZ10+g1XEA3HZNWz2v4Ie8rVAAk1l8pZodUibUdzAZHok60o9LRor39QBe1CxeVC56lbL", - "mGj57dXo2fN+fNJS7noXpkbdDICO67weAj8ue/tx8J9Mzjy2jCng/1HwPlLoOoaXalp/BCx3UuskYCVr", - "9UJtMw1LcyjAhMzVTp3XbVKeYGIVMtfADUXcnP/kFc8297CQThGmmNDGp9mMUsBSyJZZClnVNqHHYApi", - "uYsQFhv9Ea0jLrQxKcEJk1e8/OkKtBbF2Ma500ElYeMSM8HR4fsmTBjNnTocQJhWh8OH160ZPW7mLnAq", - "YkfhmsZyWXBdxM2FZDlod++za74zN/coNc6BQz4lHkkz3XQgkXcJSZsAKXfeKXxLf08DIL9Dx88Ehw3G", - "BSecNWTasWrEPzOE4U/hsNnwbVaqFT4PHjkQPuk0evhIBVQSzeAkn01bd5jHiN9g/zRYb8MzIqtw1ilT", - "7D/3P+FWohr5sxR278knG2X/vTbF3dLBDEiVqzb4n4hleB5TT+x9VqX4mX0QNsNTlUB7EG0ijPiHunbx", - "kV3EMAifnyE2gk8vl9iNtEg95CfLQIYWA7MnvB9MG8rOcx+eNTSlDUwNhJS5T4NwpKWN7PPhXhoBD00h", - "xp/17rRNyIwb55gak/sTH2SVqrJ8SswnleQpvJvAQ9qFcYQ+IifAyLqb8BjTFKnqJDTrVKs6tszmaLWs", - "Q96uKt+n9I+ZiUY4etcFoZbIy/AIk3EMX/I0xpR5/41Z1wzWMAnGmYa81mgmvua7w2ULR1LBX/zt7ItH", - "j//++IsvmWvACrEC05YT6JX9a+MChezbfT5uJOBgeTa9CSGtCCEu+B/Do6pmU/xZI25r2lzBg6KHx9iX", - "ExdA6qXvsA7cjfYKx2lD+/9Y25Va5J3vWAoFv/+eaVWW6XIujVyVcKCkdityoTgNpAJthLGOEXY9oMK2", - "EdFmjeZBTOp9RWmilMwh2I89FQg7EnKVWshYQC3yM0za4L1GDLZV6XkVeXr2rcvraWShQ6ERo2IWwCpV", - "edFeLFkKInxBpKOXtd7wiRbxKEa2YbYULZsiRB95nia9uOD+fm7fLQZt05zebWJCvAiH8gakOeafGE9I", - "chNO0pr2/zD8I5Fh5c64RrPc34NXJPWDPW+OzwZxD012kUmgDbNtJMgDARh5bdt5Jxk9FIsyjGvyEqA/", - "ITiQ++LHD61j+eCzEIQkdDgAXvx8tm3XvGTw4HziVN0/NEiJlvJujBI6yz/0Ijew3uYiibbIG02sBUNs", - "SQ3Fwui5tXnWvGIe0UoGj521UpY5zbQsE4+kyY6DZyomHKcS6Ctefnyu8a3Qxp4hPqB4Nf40Kn4pGyOZ", - "UGluloDzBZ80d/Qq9u6mli/xYfZ/gtuj5D3nh/JO+MFthsYdXlJ49bLxRoNk1zgmBVk9+pItfBWdSkMu", - "TN+5fx2Ek+ZhKGix9AGtsLUHXqIeWucvyt6CjJchEof9GLm3Gp+9h7A9op+YqYyc3CSVp6hvQBYJ/KV4", - "VFzc+8B1ccuKKzfL5xRlZjwyn9OwbPnU5VFqE3fp1AaG65x8W3dwm7io27VNTUY2uXDL27dv7GJKDrF0", - "kRXXHZOY3Um1laNqrfwO6csIR34MP2+KYn4ZS2hNSZtHku739qMW5cGAlU4JhQ/z2YoyGGGRgL/7olAf", - "9y4NEIykEfNLv026GEJMYq2dyaOpooxPE+oi+G6JZPb4qjGvtbA7LAgeDGji78l8TN81uT18bpjGl+bv", - "PqsuQYZ4jzYTSG3C7fqd4iXeR+Tik+4WUuUJ+4ZS9/uD8td7i3+Dz//ypHj4+aN/W/zl4RcPc3jyxVcP", - "H/KvnvBHX33+CB7/5YsnD+HR8suvFo+Lx08eL548fvLlF1/lnz95tHjy5Vf/ds/xIQcyARpqdjyd/e/s", - "rFyp7OzlefbaAdvihFfie3B7g7ryEvOWIVJzPImw4aKcPQ0//a9wwk5ytWmHD7/OfOG12drayjw9Pb2+", - "vj6Ju5yu8Ol/ZlWdr0/DPJjiriOvvDxvYvQpDgd3tLUe46Y2yb/ct1ffXLxmZy/PT1qCmT2dPTx5ePLI", - "16yXvBKzp7PP8Sc8PWvc91NMnHtqfE2M0+at1of54FtVUcUM92nVZAd0f62Bl5hgx/2xAatFHj5p4MXO", - "/99c89UK9Am+3qCfrh6fBmnk9L3PnPBh37fTODLk9H0nwURxoGcT+ZD0Sb5Q6hJd4kE+umd6cRwnccn9", - "88Khn1pi8IU5bxlhqJuOPufZ0zcp24uPoazqRSlyRtc30q/bnIi8mrQhLftAQ9vMNPX8W2boGNzD7Kt3", - "77/4y4eUkNUH5AfvEGw9ID4kF1954QOFkwDXP2vQuxYw9NbPYjCG7sJ09rStZZWvaOJnO2E/+0gH/Eo8", - "pYkI9Y/CmsRzodMIYG6IFFwNFt5h8U4M/UNyePzwYTj5Xq6OyOrUU2uM7q7vYRAXdEw6g05F+4RQ5BaT", - "IT6GFPuzoZRLDptCcoqqx3DbDb8krwsG1DHt3816jPoYXURy837Eb0tg7r9jrbIJj7JppkRyxCG3HDmB", - "IZQ2NoyVgsx+PrwpVZT+w3z25Ehq2Gug6iQGToD/Ay8dyFCEtDEEwaOPB8G5pIhPd+3Q9fhhPvviY+Lg", - "XDrmxUuGLaO62gmKl5dSXcvQ0sky9WbD9Q4lFTtlj32WI/QlhnZE93SxcneG38yILWOFoQq0cAojL2fv", - "Phy6Xk7f+xQ/By6jTi19H68cdZh4ye1rdrrAGopTm4KJGo8vBU1g5vQ9ntDR30+9JT79EY1pJKWdhiRf", - "Iy0pnUv6YweF7+3WLWT/cK5NNF7Obb6uq9P3+B8UuKIVUdr3U7uVpxh8dPq+gwj/eYCI7u9t97gFZjQO", - "wKnl0qA8su/z6Xv6N5qoQ5itUNMVUL6JGj1bQ345S999vZoYUS9G8ihflFAQc3oyoYNUNu50owP9CsUP", - "w376noklg/4UwoQZjji3lFj0FEs171pchp93Mk/+ONzmTlLFkZ9PgzqUEm27Ld93/uweObOubaGuo1nQ", - "kEhW8CFk7mNt+n+fXnNhs6XSPpcfX1rQw84WeHnqK/L0fm2T4A++YGb/6Mf4lVry11PuUT2rlEmQ7St+", - "HXn/zrAxSQhg7NcKNYqx22mbLYRECopvqNZ+QB+HsvHgXnJyDQbKBRfMMA8PJgPRihc5N05sZW0G7660", - "/iF57D62tPE1L1jIoZKxVvY481pqZ2l/DEkkyW6ewxWUjmKY0uwQ7/nEsswXDz//eNNfgL4SObDXsKmU", - "5lqUO/azbB7g3JgVf4vkrXl+iTJ+Q/IUnan5dfdNj05nlehWfwtJRoDZLVtzWZT+Hb6qsaylo010uqoo", - "7MddYaH6YaU0AkDZJ6GgQAhzwi6aMBEMuqiDmlQQ2aBXBHMq0yQcQ0jIjTjhKpnPtpnjByuQmedI2UIV", - "O183bKb5td3S2/oB2yM5c4QnDqTA1Fcv6Iw0CnHj4XNrp4ztfmiQaCx+b945hdiAvgq2itaM9fT0FB8S", - "rZWxpzOnz3dNXPHHdw3mQhnlWaXFFZZ7QaQpLZyaWmbeDtRWTJw9Pnk4+/D/AgAA//+TZLcgSA4BAA==", + "H4sIAAAAAAAC/+x9+3MbN5Pgv4LibpVjH4eyHTv7xVdf7SlxHto4sctSsrdr+RJwpkni0xCYD8BIZHz6", + "36/QeAxmBkMOKdlJqu4nWxw8Go1Go9HPD5NcrCvBgWs1efFhUlFJ16BB4l+0KCQo/G8BKpes0kzwyYvJ", + "KSc0z0XNNanqeclycgXb2WQ6YeZrRfVqMp1wuobJizDIdCLhnzWTUExeaFnDdKLyFaypnVZrkKbvu9Ps", + "vx9nX77/8Pxvt5PpRG8rM4bSkvHlZDrZZEuRuR/nVLFczU7d+Lf7vtKqKllOzRIyVqQX1TQhrACu2YKB", + "HFpYe7xd61szztb1evLicVgS4xqWIAfWVFVnvIDN0KKiz1Qp0IPrMR9HrMSPca9rMIPuXEWrQU51vqoE", + "4zqxEoJfif2cXELUfdciFkKuqe62j8gPae/J9Mnj238JpPhk+vzzNDHScikk5UUWxv06jEvObbvbAxr6", + "r10EfC34gi1rCYrcrECvQBK9AiJBVYIrIGL+D8g1YYr8x/nrn4iQ5EdQii7hDc2vCPBcFFDMyNmCcKFJ", + "JcU1K6CYkgIWtC61Ilpgz0Af/6xBbhvsOrhiTAI3tPBu8g8l+GQ6WatlRfOryfsumm5vp5OSrVliVT/S", + "jaEowuv1HCQRC7MgD44EXUs+BJAdMYZnJ0nWjOsvnnXpsPl1TTd98C5kzXOqoYgA1JJyRXPTAqEsmKpK", + "ukXUrunm74+nDnBFaFmSCnjB+JLoDVdDSzFz39tCOGwSiL5YATFfSEWXEOF5Rn5WgJSEX7W4Ah6og8y3", + "+KmScM1ErUKngXXg1ImFRHQgRc1TjIrgB4fmAR5l+94ng3qLI97u/qbY0n3qQn3OlhfbCsiClea+JP+o", + "lQ4EXCvc9hUQVUFueG9BzDAG+YotOdW1hBeX/JH5i2TkXFNeUFmYX9b2px/rUrNztjQ/lfanV2LJ8nO2", + "HNiBAGvqnCrstrb/mPHSR1VvknfJKyGu6ipeUB6fBUMrZy+HKMOOOUwaaQZ5GuQG3B831sXm7OUQS93d", + "Q2/CRg4AOYi7ipqGV7CVYKCl+QL/2SyQtOhC/j6x4oXpratFCrWG/B27RoHq1MpPp40Q8dZ9Nl9zwTXY", + "qzASM06Q2b74EEtOUlQgNbOD0qrKSpHTMlOaahzpXyUsJi8m/3LSCHontrs6iSZ/ZXqdYydzGUswjC+j", + "VXXAGG+M8Iii1sBBN3zIHvWFkORmxfIV0SumCON2E1HuMpymhGvK9Wxy0Em+jbnDOwdEsxX2krRb0WFA", + "g3tBbMM5KKR9J/Q+UC1JETFOEOOE8oIsSzEPP3x2WlUNcvH7aVVZVE0JWxBgeJ/DhimtHiJmaHPI4nnO", + "Xs7Id/HYN6wsieDllszB3TtQmDEt33Z83AngBrG4hmbEB4rgTgs5M7vm0WDksvsgRpQqV6I0V+BeMjKN", + "v3dtYwo0v4/q/Jenvhjtw3SHEr1DKlKT/aV5uJHPOkTVpynsYajptNv3OIoyo+ygJXXWIPi+6Qp/YRrW", + "ai+RRBBFhOa2h0pJt16CylAS6lPQzwos8VR0yThCOzUCOSdremX3QyDeDSGACpK2JTMrXt0wvWpEroD6", + "We998dcm5NSeE7PhlBnZmJRMaSMM4WYqsoISBU4aFAsxFR1FNCNoYcciAsw3klaWzN0XK8cxTmh4f1lY", + "73iTj7xkkzDHaosG7wjV0cx8L8NNQmIVDm0YvipFfvU9Vat7OPxzP1b/WOA0ZAW0AElWVK0SZ6pD281o", + "Y+jbNESaJfNoqllY4iuxVPewxFIcwtWq6mtalmbqPjfrrBYHHnWQy5KYxgTWTJsHMON4ApbsGrhlPTPy", + "Dc1XRpggOS3LaaOXEFVWwjWUREjCOAc5JXpFdXP4cWT/UMJzpMDwQQ0kWo3TaczIxQokLITEh6oEsqZ4", + "Oa3N86gq230Cc1V0DR3ZCS9LUWsDY/RyOXvpVwfXwJEnhaER/LBGfPDHg8/M3O4TzsyFXRyVgIoWxvOy", + "Lhr8BX7RAtq0bq5a3kwhZIGKHqrNb0ySXEg7hL383eTmP0Bl09lS52eVhMwNIek1SEVLs7rOoh4G8r2v", + "07nnZBZU0+hkOipMv+gs58B+KBSCTGg3XuN/aEnMZyPgGEpqqIehnIIyTdgPvLMNquxMpoHhW1qQtdWb", + "kYrmVwdB+XUzeZrNjDp531hVndtCt4iwQxcbVqj72iYcbGiv2ifE6nw8O+qJKTuZTjTXGARciIpY9tEB", + "wXIKHM0iRGzu/Vr7SmxSMH0lNr0rTWzgXnbCjDOa2X8lNi8dZELuxzyOPQbpZoGcrkHh7dYyg5hZGlX1", + "6VzI46SJnmmiUcATakaNhKlpB0nYtK4ydzYT6nHboDMQCeql3UJAd/gUxlpYONf0I2BBmVHvAwvtge4b", + "C2JdsRLugfRXSSFuThV8/pScf3/6/MnTX58+/8KQZCXFUtI1mW81KPKZ0/MRpbclPEw+nFC6SI/+xTNv", + "EGmPmxpHiVrmsKZVfyhraLEPY9uMmHZ9rLXRjKsOAI7iiGCuNot28tb2u51OXsK8Xp6D1uYR/EaKxb1z", + "w94MKeiw0ZtKGsFCtY1STlo6KUyTE9hoSU8qbAm8sKY3sw6mzBtwPb8Xohra+KKZpSAOowXsPRSHblMz", + "zTbeKrmV9X1oPkBKIZNXcCWFFrkoMyPnMZHQXbxxLYhr4ber6v5uoSU3VBEzNxrAal4MqCj0ho+/v+zQ", + "Fxve4GbnDWbXm1idm3fMvrSR37xCKpCZ3nCC1NnSnCykWBNKCuyIssZ3oK38xdZwrum6er1Y3I+OVOBA", + "CRUPW4MyMxHbwkg/CnLBC7VXm+OtgR1kuqnG4KyLLW/L0sNQOTSdb3mOaqT7OMvD2i9n6iNqy/NIFWZg", + "LKFYtmj1o6q8hjBloXigEpAaTL3Cz2gReAmlpt8KedGIu99JUVf3zs67c45dDnWLcTaHwvT1GmXGlyW0", + "JPWlgX2WWuMfsqCvg9LBrgGhR2J9xZYrHb0v30jxEe7Q5CwpQPGDVS6Vpk9fxfSTKAzz0bW6B9GzGazh", + "iIZuYz5I56LWhBIuCsDNr1VaKB3w2jEHNa+lBK5jORf1GUyRORjqymltVltXRIvU/dJ0zGhuT2iGqFED", + "bg7BVcO2stOt6DUQWkqgxZbMATgRc7PoxssBF0kVqYzs7MQ6JxKP5bctYCspclAKiszps/fC69vZ+0fv", + "QB6uBlcRZiFKkAWVH2cFV9d7gb+CbXZNy9qI5z/8oh7+WRahhablni3ANqmN6Krv+ku5A0y7iLgLUUzK", + "VltoT4IRsQ3TKUHDELLvjr3B7e+C2SOCj4TAa5DoUfNRj5af5CMQZYD/Ix+sj7KEusqMGDiofjCSq9lv", + "TrnwsuGeGcIEJVU623elmEYtvYlZasTFU7cIDjwgT76iSqMYSBgvUH9rr0Kcx8qWZorJgU5lOOXga8xM", + "+ot/iPWnzc31zlWtwqtM1VUlpIYitTy0WQ/O9RNswlxiEY0dnn5akFrBvpGHEBiN7/DoFAH4B9XBQu1s", + "3v3FodeBEV+2h2K5BV+Do10wnvtWEeJjp9oBGJlq9sCSG1MdepsLUQJFlanSoqoMh9JZzUO/IQye29an", + "+uembZ8krRnISiqFAIUmJtfeQX5jka7Q1rWiijg4vH8CKrysi1wfZnOsM8V4Dtmu84KPYNMqPjhHHfe6", + "WkpaQFZASbcJbwv7mdjPBxKGHxsJpNEfCA3ZHK2JaRppzoT3Nz1uVoFTqZTgTfALyc05N8+ohtRc7+Mn", + "LQCnTfFNR6wPwiwIRpIO/HiILEtPiRHx7r8W2pCVIzpcjbuV7riWAeyFWT8KAnHcrFEEdGf/L1Bu7iCA", + "3ev8W1BDC2+mvq9lD6j/8W5vXZidq6xz2ySviEG+vIcxDvGgAVvEGyo1y1mFz9UfYHvvr/fuBElfCVKA", + "pqyEgkQf7Eu+ivsT64bcHfO41/wodWsf/J6+NbEc75nVBv4Ktqg2eWMjGiJt1X2oIxKjmguXcoKAeq95", + "8+KJm8CG5rrcGsFWr2BLbkACUfXceq30TWhaVFk8QDpmanhGZ5BPmsN3egic41DR8lKeh/a1tRu+i86T", + "q4UO98qqhCgT+s/uie8hIwnBKHchUgmz64yW5ZboEDbjKakFpLsg0BsjyDMPVAvNuALyX6ImOeX4wq01", + "BCFNSJR8UFg2MxhxM8zpXFUbDEEJa7Cvefzy6FF34Y8euT1niizgxrrccGzYRcejR6iKeyOUbh2ue9B2", + "m+N2lrh00FZpLln3auvylP1Obm7kMTv5pjN4MHCaM6WUI1yz/DszgM7J3IxZe0wj4xz8cNxR5ru2S1hv", + "3bjv52xdl1Tfh6ESrmmZiWuQkhWwl5O7iZng31zT8nXodjudwAZyQ6M5ZDlGCY4cCy5MHxtYaMZhnJkD", + "bANHxgIEZ7bXue2056Xd+C2z9RoKRjWUW1JJyMFGyRkpVYWlzogNmchXlC/xBSRFvXSuznYcZPi1spow", + "WfPeEIeKYnrDMzRhqGSYGpotfbSlEcKAmpdt1/5hH2s3NIBiL6NRl3a0PV17UNJkOp0MPvwNvq+bh7/F", + "Wztk9FhjYks+jJDWQDPSeob4NLJSH4nxNprDZ4jh41hpmqFTUPYnjpzCm49DfuHndVWV23sQkuxAREIl", + "QeGVFqsBlf0qFuRHlktxWi5FuPPUVmlY9403tuuvA8f17TEvYMFLxiFbCw6JJ/1r/PojfhytdrTX8MCI", + "KBAdNGD34dNCQmcB7cnHkPRdNwlJpnv2u5ZO9a2Q92VltwOOflOMsFzvdetwUx5rX6dlmTBJW/VDj4uo", + "aXAKZ5JQpUTOUFA8K9TUeZ9bK7Z1a++g/00IjbqHA9wdt2N7jcKwrCIfyopQkpcM1fyCKy3rXF9yipq+", + "aKkJZ0GvHBhWC3/tm6T10Ak1sRvqklN0FA36v6Rj0AISeqhvAbx2WNXLJSjdeWAtAC65a8U4qTnTONfa", + "HJfMnpcKJHrszWzLNd2ShaEJLcjvIAWZ17r95FjXShOlWVk6Q7CZhojFJaealECVJj8yfrHB4bwfiT+y", + "HPSNkFcBC7PxjGsJHBRTWdrT8Tv7FYNKHE5WLsAEYy3sZ+/x3OSGmJi1t5JW/J/P/v3Fu9Psv2n2++Ps", + "y/9x8v7Ds9uHj3o/Pr39+9//b/unz2///vDf/zW1fR72VDC4g/zspXujn73Eh1gUJ9KF/c9gkFkzniWJ", + "MnYo6tAi+QzzZTiCe9jW++kVXHK94YbwrmnJCsOL7o18utdU70DbI9ahstbGddR4HgEHPofuwKpIglN1", + "+OtHkee6E+x0uIm3vBNj4DijuncA3cApuLpzptxqH3z3zQU5cYSgHiCxuKGj1AKJF4yLYGx5+ZhdigO7", + "LvklfwkLfA8K/uKSF1TTE3uaTmoF8itaUp7DbCnICx8U+ZJqesl719BgAqkoqDnKIJXiFHSdXsvl5Tta", + "LsXl5fueH0JftnJTxVzUnbO+msxPmRm5QdQ6c0lcMgk3VKZsIT7Fh4uGxt474bAyiaitEssniXHjz8ZC", + "WVWqm+yhj6KqKg2KIlJVLl+B2VaitAiBY4aZu9hbQwM/CedUIumNf/LWChT5bU2rd4zr9yS7rB8//hxD", + "8JoUB785HmjodlvB6IfvYDKK7nsXF27lcnQqzyq6TNlMLi/faaAVUggKHGt8aZYlwW6t8EAfCYBDNQsI", + "scgHbImF7OC4Xlzuue3l03qlF4WfcFPbsdN32sEoKv7oDdwTWU9rvcoMR0iuSplj4PfKJxigS3PleA8C", + "xZb4AFArUZslA8lXkF+5zFawrvR22uruHV3cXewZDlOoM3LBgQtm8JdTbgasq4I6QYbybTfFjbLBEDjo", + "W7iC7YWw3Wcjs4NF2eiiFCtq6Ogi7UZ3rSHf+CC7Mbqb7/yufIyoS0eCcZeeLF4EuvB9ho+2FQDu4Vin", + "iKKV52MIEVQmEGGJfwAFRyzUjHcn0k8tj/EcuGbXkEHJlmxeJtj0f/btGh5WQ5UScmDXPqo3DKgIWxDz", + "Oprb69i9mCTlSzCXurmIhaIlOu3PkoZ+lA5XQKWeA9U79bU8TjPhoUOB/AaDplFpMjVLgI3Zb6ZRCcLh", + "xjzw8O1t2zhH4tlR7lR2TVAcCarv3gRJz455RDiEJ/LZ+fs+7El4Lzj/tJg6EWT7fW1wuJTixuymAVD4", + "1I2Y4CW6p2pFlzD2OmqZikamxGhZgHCQfdJPUt4Ri65Y05MxRi7Cds8MXpLcAcwXwx7QDNBxcfRzWxOi", + "syq85uXWI3VeokAdHEQt6VDZsrPx5WHAptkYSN4Iqx6wNtbio7+iyh/9Yhpx9COlxT8mlcyu/Hlnkfcd", + "1f3seP6a7rL2qdXnzIEIbnr4LHo+dZ7PlzeZHpT7bjpxIQ6pvRMcpegCSlhanNjGns6a/EzNbho4Xi8W", + "yPSylCNfpIyMJBM3B5iH2CNCrMacjB4hdQoisNGyjgOTn0R82PnyECC5yy9F/dh4d0V/QzpY0HrjGylZ", + "VObWZwNWq9yzFJfeohF5Oi7OOAxhfEoMJ72mpeGkLvC0GaSXqw3fPp3MbM634+HQm2jkQXNrROnkoFVa", + "eeaY9cWCt19G+lVw0BrmYpPZyOjk02q+mZszkYxXwDjt1OG1mfMeKDIXG/QpwhvOOrgfDN0wZB6wyA1k", + "wxRSOfYbEhsteIcBsluQT1GzQtJzerVAdkOS7HHADIjTQ2T3WZRC755A6igwmzTgTqOzV8/Slrb6kkhz", + "3U5DdtgQppZiNUOHM7mTAxjtK0/bue6+b9IdDidH82f1kyT56yvl7pKX0XaubK7FQ9IydsmhBcQOrL7p", + "CrFJtLYdl9p4jbCWYkmG0feNXX20KSgBNQFZS67OrlJm6cvLdwpQZjj33SI9J+4e5duHkTechCVTGhrj", + "gndy+fS2H1QnmseWWAyvTldyYdb3VoggaFhzLHZsLfOTrwBd1xdMKp2hZSa5BNPoW4WatG9N07Qg3Pa3", + "Y8qaeg6WgxGiK9hmBSvrNCk7kH54aSD6Kdxcqp7jRcm49TaaYyr8pIPuAbZJhMc6du9E0CuLoFf0U+Bn", + "3MEyTQ1M0lBee/q/yBHr8MJdnCVByyli6m/oIEp38Noolr7PaCMhOnK7mO2y+fTOZeHH3uuN5SP6h4QI", + "O1JyLVFGxHQAoVguofCZ3lxQqM165fLplYIvm1yC5vcd6QNnxGbxwyR8O/L3Ofd0GHJOb5UTwaoYSejj", + "xwxC3kTXYe5BnGQJ3GZumRxeb6RMIi52jMcWkWb00/L2ntt80nX4ouMu3Pj02j0Mm43bUwIt3LNKgV/f", + "7kPb3y6HuumQ03ErRezuA4YDIsUxrSIBpkc0A5ybVhUrNh3Dnx11dgRJjBT3+pngOzhDtuQG24OftmPx", + "nlo9D8ztiO2dseMEn/kn5pFp/ZmdR645GzR32QaKWqI1qeUt3M+nHx6aI9f+wy/nWki6BGcRzCxIdxoC", + "l3MIGqKU9IpoZh2kC7ZYQGwJU8dYcVrA9ewdxQjCHiDBvrksvC130mefyPbQVrOC/QhN01OCUoZ8Li76", + "9kj/8Ih0a+GyiTbuCKNiMqHAD7DNfqFlbV5CTKrGN9UZCNvX+gE0cb3+AbY48l6XTwPYnl1BVdxbQApN", + "WVfCJxVlCX+gWtUX8A3c2sIDduo0vUv3tDWulMbw0WhuqFY9ifZSPt6xaVxkDKRj9uo87XVizha0t6VL", + "6Pu2iBX7ZZ/oCRJPxdB745hLLmTa2OtdBrT0hI+LndxOJ3fz90jdk27EPTvxJlzNyV1Ab0xr/285fR24", + "IbSqpLimZeb8ZIaEDimundCBzb1bzSd+X6VPxcU3p6/eOPBvp5O8BCqzoOoYXBW2q/4yq7IlOHZfQzYd", + "u9PtWlVYtPkhZXbsSXODqdc72rRerZvGbyo6qM6zZpH2FN/LN52Ll13iDlcvqIKnV2ORto5ebecuek1Z", + "6Q2/HtqxWna73HHVlZJ8Ih7gzk5ikfffnccajBO4vHx37THb2FOso1RIiZ/wpVNHejr3eE36rDa0vodD", + "4jpfYybT9LuLuzynyBidwxm9dznwWyFbF5WLakw6rH08AdE8Jiwe00b5C2eF74mFM2JFyN+Wvxne8OhR", + "fPAfPZqS30r3IQIQf5+73/Ed9ehR0jCcVPUZloWaPE7X8DDERQxuxKdVQ3C4GScunF6vg4wshskwUKj1", + "PPPovnHYu5HM4bNwvxRQgvlpNkZVEW+6RXcMzJgTdD4UlRicn9e2nKcigndj8DFK1pAWXj2ugoe1s/eP", + "EK/XaHfOVMnytNMPnyvDkrh16TWNCTYebUM2c9RswK+c1ywa3TRTR5k8OwuJZk0iXCUzATf4nQvHAmrO", + "/llDVNYXb+LO5eyfQjhqT8BO6xfdwN2qwZNjCv7e3UTotWq7FEY7Ta4vgxnQIyJVZ+rAeId4xh7z3xGr", + "4CjKX58Y2LZyrsN7KWvnO293EWhnBvbs01lchx9Irhym3cyXY3aaqWwhxe+Qlh3QSJhI3eGt2wwV8L8D", + "T/modhlZ8BxoClY3s+8jkPG6hSFSubMuwS86VM075gpP84nDNvpApUG038NqA5VOL+42YeihGjuetANp", + "BpgZHtjILRxr+Xh3N8rtCbV5LVqRZ+lzHgeKntjxm3PuYO4F15b0Zk5ThY7Me9HAFG1/yzFPC+I7+w1S", + "ITWDnZ1EsQyhLbPJ/iqQjfWonyr5yLefnXb0q6955CHFxc+7qfVVKZVIDFPzG8rRjxD7WQ7oeiuwfhim", + "142QmOBTpX0IC8jZOqkMv7x8V+R9z6+CLZktKV4rIHShXZ5HN5AtKm+pyFXzDrlIHGrOFuTxtDmzfjcK", + "ds0Um5eALZ7YFnOq8IIOPhGhi1kecL1S2PzpiOarmhcSCr1SFrFKkPA+R9EzeMLOQd8AcPIY2z35knyG", + "DsOKXcPD9AXjhLXJiydfTndVzkaMY5H4XUy+QC7vAxnSlI1e1XYMw1bdqOnIhIUE+B2G75Md58t2HXO6", + "sKW7gvafrjXl1CAkBdN6D0y2L+4vunJ08MKtdQaUlmJLmE7PD5oajjUQTW4YogWD5GK9ZnrtPEWVWBsK", + "a8qQ20n9cFhfz5dB83D5j+iCXSXe+H/Ac4uuByIc0av+J7S3x2idEmoztpasib/wFWrJmc9MjXXhQjk4", + "ixszl1k6yqsYjrEglWRco9ao1ovsb+b5LmluGOJsCNxs/sWzRH21dgkifhjgnxzvEhTI6zTq5QDZeynH", + "9SWfccGzteEoxcMmpUN0Kgd9xdP+vUNuxwND31m6NuNmgwRYtwiQRtz8TqTIdwx4R+IM6zmIQg9e2Sen", + "1VqmCYbWZod+fvvKSSJrIVOVLhoG4KQSCVoyuMb40vQmmTHvuBeyHLULd4H+j/Vu82JpJLr50518LERW", + "5cQ7LaRVMpL+Lz82+fHRuG3jdjvaSyETelqncfzEbqmH6Qu7NnTrDojfBjA3Gm04Sh8rA+EeNp4j9Pkj", + "/L26INk9b6lKn/xGpHnHo6z/6BEC/ejR1InKvz1tf7bs/dGj8S6zaX2h+TWBmuPumm72StM3tdVfiYT2", + "zlfxDH5jLlVJQsOavMvMlTp3Y0xJu1Tip5c77ide8WA35PQB8qjBz13c/MH8FTeziYAZ5g/t6rFJ8inC", + "9yiGgpKvxGYsEXWuLU9PfwIUDaBkpFYQV9Krjpv0lNjr5hORrRl1DqUwL9W4ANZor5W/0C4Y1Ex37EXN", + "yuKXxgrduZkk5fkq6VQ+Nx1/tc+AqEGkwchXlHMok73ta/lX/6pOvPv/IQaGXTOe/tQtxGxh70DagNUG", + "wk/pxze4Yro0E8QoaifkCilOyqUoCM7TVC5pWGO/onmqkmwixh+HXdfaeSVj8gRXUGTBSnSjTdvDsWUm", + "qR7gqlj235e4MuNgFX5l1RJ2dJCEsjVe24quqxLwEF6DpEvsKjh0umPGNhw5KktCVGU+YUtM/iKIriUn", + "YrGIlgFcMwnldkoqqpQd5LFZFmxw7smLJ48fPx5nZER8jVi7xatf+OtmcU9OsIn94ip/2YIJB4F/DPS3", + "DdUdsvl94nLlV/9Zg9IpFosfbEA2WojNvW5Lr4YywTPyHeYnM4TeKhGASlGfYbmdE7SuSkGLKSaFvvjm", + "9BWxs9o+EhB1WPp1iRrA9hFJGnnG50j1+dcGcleNH2d36hyzaqWzUJQ1lUnRtGhqybKO9xPqBmPszMhL", + "q5YNjj12EoKpxeUaiqgGrFUDIHGY/2hN8xXqO2eTnSrlgWpA40sYew7YmIuiuNdQMAs5uFmGq2JsixhP", + "idArkDdMAeadgGtoJ2wM2U6dQt4ncGyvVtacW8KZHSC9hvJYh+6CB86Kvt6/IglZZx/ubPtrMnlgkfND", + "iz2fY6903E6ncnTH78GWzNj4ohsz8qMzduSUC85yLDaREsExFeM4s+qIuhxpe6eauLOcOIbJetUhQN1h", + "cbCCtWeZDnF9p4boq9lvSzj2Tw0bVwRwCVo5HgjF1JePdwY6xhW4AmiGvmKOKmTC9SsZFhNcSO7RJX06", + "wWxqA7rWb823n5xuHnPGXDGOOjeHVPcStAa2UjG0s3PCNFkKUG617bgw9c70mV1sOILwfvZKLFl+zpY4", + "hnVFNEixXsD9oU69T7DzwTVtvzZtXe2C8HPLpc5O6tf9PslCVNj/VM31QfSnfL+8I02E3DB+PNoOYtzp", + "6o/3siFDuEbPP6jwPu+RTShf3x7lG/NktfSGLYiN3E2mDWY8AcYrxr3BN50HK0/eJbgxeJoH+qlcUm0f", + "HaM43gXQciAcBoPqrcfAXYfqVmIwKME1+jmGt7GpvD/AVkKD5nVB+Zb4Q2GoOxJKvqZlcIZP1NFH6cwJ", + "Y9ZZuFNZP8VWDFvPfGhuC117A0FDd6yGcug9NZRtdF4XS9AZLYpU3rmv8CvBrz6gEDaQ16EIWIgzbadr", + "71ObmygXXNXrHXP5BnecrmCKKgXreZlwvX0ZPkIRdhgTUc23+G+qAtbwzjin94Ojv72He3FYjYJ+NHtK", + "ejY0nSm2zMZjAu+Uu6Ojmfo4Qm/63yul+8DvP0Vcd4fLxXuU4m/fmIsjTtPd8/G3V0vIoo3+9AK/+3xg", + "IZNrmyvhVdar84YeGbh5iS3rAO8bJgG/puVAxoXYamPvV2vJGMq7kA+mFaHaZa/TlDQ8YYwKYzj/l/XA", + "7liG+ubNIR9r62L9MY0nDh87kT5safyhZVe0Xm8NQxm0Jx5n8muI4FCbnyvF0NeX0rIU+WjO4IY5NZ2G", + "U/WK9dplvk945V2vRRGfhdibCyDN2KzDciK0Ah+2yW/4tEp+kTfp0Vr6kUA0Y7OWIRrdEqY2MNOD54Gx", + "U8cTRSpbh1nyLSuxONR/nL/+aTK8kdEO9LfUpc5OqrCHNiZEqnXJYyla+NjBAwQv0/pvNaBSx9xQ6dPg", + "qhMnP3xrFYRjQLJ5kg5p/Wrs4D0CWApbFSpVN6OfnWbSbIdHfkQNzfZajhJTR4oqutWWEm8fq/RsmpBQ", + "iHRUYdKWjDSmuFOqjpB7KXgNrL1oXD46W1ypV5epx0BfjhEOe/i4nU7OioPEp1QtqokdJcVgX7HlSn9V", + "ivzqe6AFSFtPJPWctNVE1mCeoWrFKnz/VEKxph5waQZzibxXONxsbGjOxQpcVhifJKA3lnegvoZcY33o", + "xg1UAoz3c6jSSzQQeIMiNvkDXEEkQAGVXu0Ulqxzd6VXTdlQcJFnTJE5ONPFNfApYTOYdYPViiYpFCmB", + "LrwSVgqhR9TVDWFLiMYY6BR99Wo07xYDeznfopSGtpTubHwRltMQE2ADLW+oajJHddIojA7XXiwgx4T3", + "O9Pv/ecKeJSPbepVdwjLIsrGx0K4IJZsuFeNdgPrrkR4O0GNalJ9TEiHEmJcwfaBIi0aSlYEDhG2x2SA", + "R+RYO64vKjBk2nCOkUwFekIEeT94l4C/qbF0TBGAKDvlkWB4GjfXU5Ox8jhovERzBBim6+xORfubdHgo", + "mA5l9+tXVx9+Kb/EYvbKOZXSkG4+1ieRs3455huXrh4TLQZroU9cD8r/5hO02llKduUq1CDCrG32hsrC", + "t7iXNHn23mRpoBdhZtYERvW9fA71y7ERinkpjACUDQWGtiOVggvvA2V9rZukZQj1AqSEItgES6Eg08KH", + "WR2Q/NOFT+7AnvUyPwpvHY/+A0KG7YoGayi8bQpJYDlIijUTqHM+j7FCJKypgV5GxR3SatB9O/S1/e5z", + "ivjyfrvVq0N4D+dif4VsH3rHVA/z8elaECccHMy9WolIjtDMMs5BZt6I2y3twNtpMjGvclHnVlSJz2bQ", + "Xo9OO7aDmyWVmnl/lZ0nVJSV4wq2J1bt46uO+x2PgbYypAU9SijdIYp71VWrFNzLewHvj03fWQlRZgOW", + "wbN+PYruYbhi+RVgYtYQmWKk4AftY2MmIZ+hQSr4jNystr7aQlUBh+LhjJBTbqMDvftIuwJpZ3L+QO+a", + "f4OzFrWtMOM00LNLng6zwkov8o7czw+zg+cN8SYFhl/ecX47yBGz6w0f8pG7wZIw7TrBs7Hqjb5/R0eE", + "isjPQpESoM6tIfhrZAmJdxTB7CxRGiH0D6DEGZCJKkXKC/+YDDJmqDSm4skQIA18xHO1gcINnkSAc7Lb", + "k5XVffZ5R8WCSGh8M45NwOpymlomroZUI92ZwyxtzrgQEuIZ0c/UJmoOkW2Y5xj/M2daUrk9Jk1qG1Up", + "NdQglvd6SwZHyWYhjbNkH4dlKW4yZGtZqK6UUgeYdqp9bfs6pU0/c9TnELldUuVExC1Z0YLkQkrI4x7p", + "EG8L1VpIyEqBXpgpx46FNo+ENcZ1clKKJRFVLgqwhdDSFDQ0V805RdkLIle2JAos7WDKANsnouORU5rb", + "15pnM5TX9hba8Jt/YfrY9BVN+ju76My6CAzEF4By6e4chmzjPrxIODYjU1cpmxaRF2yDdAMydeQXRMsa", + "psS16FbhdwefSiBrppQFJdDSDStLzB7BNpFDQ/AHSqN2QHY+Qz/oa4YOb+1MIlakrsztGNKvxDzgPM7I", + "RvRKinq5iuoDBDj9013W7mEfj/KzqtEnEUNEzRTPyFoo7Z7FdqRmyY0L6Ge54FqKsmwr8qycv3RG3x/p", + "5jTP9SshruY0v3qIj3AudFhpMfUpFbq+u81MspODcdxLQW94huSh9qdZt+3Qq9XR82je2eF+PcPDPk1+", + "BOb7/cx1v13jtL+w7rrafDb9FjrlhGqxZnn6uP21vF8HfVZT3CuZadFWIbZZaLAZ8oH4HgvuTMg9+2gG", + "TpNlVE+J4xHOrQM5kfkvivHdcckCHA8auEP7fMcJWFk+KAZ2AEBIbSIEXUtbujgW0gLDEUubOAWdUrqA", + "jrxw0PfvbrCZEe4dKA13AqrnjRwA/MxqMKY2I6b1bJ6Ljf/+sEmZeRTwt7upvMU8hpwqzxvSktat0iey", + "GuAI6QIEOz0QLzAJxnysH2IoRT/y8o8AGPZMbMEwyj/xUDAWlJVQZKkqxWdBBzaNnusuxjIa3ddztJw8", + "p7WvBGzGriW4xEpW+pdtc2JFDSmJ0LyvEecFbMDGaP0OUtg6vtPInAWlLfPb0SiIKivhGloOmy7bU41S", + "KLsG31eFzqQAqNDi21W0pTwR4yqBHe2LW3sW+bKNwW5SHWMRa3eK7NG1JDVDG57ZY6LGHiUD0TUratrC", + "nzpU5GjrEs1RTqCq93zI/BNz7DQ/2xHe+gFOff+UKOMx8X4cHzqYBaVRt4sB7fVMrtXQqedpx+Q4lVkw", + "FOFsRbBrWxJv+Iaq6A0f1mr2Sb55iY3cJyZ4hNhvNpCjVOOeQlC4x9CA5cTlQEJq5wCFfTCYLglt/go4", + "4SKqeXxDVXjFNFld/Q92YmzEuHtoH2Gjb/yH776zBAcjqpNsMV2iNJD13XT8f8hJ3HkQB8dL0YgCF8q7", + "QzXmqds9O7CBqMuCcLOfRvbHGsHuFnNcfErmtR+oLMWNLWIcP1FfgrfnWurzJiYnlrNwLXs/6alLONzV", + "grAoQmRNt0RI/Mc8SP9Z05IttshnLPi+G1ErakjIGZCtF4XzuzYT7xavph4wr4gRfiq7bjZ2zGi4rRkl", + "Atpc5L5smyBregXxNqCDiOWfuTaMU9VzVGqYK7uznX0suMX79ExrWsRKAEw0u21xB5/w3PT+n03YajyV", + "z/9YlTT3Jatd8bk2n8Gq9p649ArWu8Oc+3zNk0ColN8QrfRpMoojtKkHsq5UzM9QcawW2L0S4L26YHda", + "xkilcKfG0Y4A8VFLue9duJ8Yzt6S4lK/+xYXVz7+NLuTzBA9tIwx4P+JdqXlXtGLbEtXUI/XY4ulf4Jd", + "aCXiScBq1eBzsckkLNQ+RxqrB5+LTQOwCrpbxnMJVFm/o7PX7tnaJEBm3DyjrdduMKuGUQpYMN6wWsar", + "WideQZgHmW8jhMXWBETrgG1uSMYwoug1LV9fg5SsGNo4c3psaeC4SI+3oLi+CQVIuJH7AzDVvAAxnrrR", + "z8fNzPVvCwxa31mlKS+oLOLmjJMcpJEayA3dquNNVcHqsM9YRSNZqJ0tJDJbIWlbQMqtszbf0ZAUAKT3", + "aFEaYQlCJ+2EFcgqhrQYMPz0YfhLWILWdJOVYolRvwMHwuW5RtOhfUAKjkp0K92NW7efR7HfYfc0WIrE", + "MSItcNYxU+w+969xK/ER+jNneufJtxrObhi29XS2B9MjlS+b8AxLLP3zmIqcd4mZ4uh5L6r6NCWe9iDa", + "xKRLdE+rPrCL6F/h0i7EKvTxxSrbLhyp+HyrV8hQ36B2BGCAauIKaO48xPqKuJ6iwiJl6rIbHKins9p9", + "fy8NgIeKFOXOenva4KBjxjmkwufufAZZJaosH+PbaqsVFc7I4CBtwzhAH5EJYWDdwe9GhfpdrZxorUJe", + "hxY5HSwkts9WVuW7VAZDSqYBjt42YIgF8jI8wla1hrFWQRUz9Y9zb+xuK9ECkyCUSMhriUrmG7rdX/hx", + "IPv8+fenz588/fXp8y+IaUAKtgTV1DToFE5sXBMZ72qNPq0zYm95Or0JPluIRZy3Xvqwt7Ap7qxZbqua", + "ZMS9spGHaKcTF0AqOLdfIu+ovcJxmrCIP9d2pRZ57zuWQsHH3zMpyjJdUybIVQnzS2q3IgOMeYFUIBVT", + "2jDCtv2U6cYpW61QuYhZw69tbijBc/DaZ0cFTA/4cqUWMuTTi/wMczE4mxOBTVU6XmXtRLvW5d5pVr+H", + "QiO628yBVKJyoj1bkBREGLMlawh6dac2RX165KYbmK112E0RonN+T5PeKXcvYbEgu7l9uxS3TnN6s4kJ", + "8cIfyiNIc8i6MZxn5BhO0hgG/jT8I5E45d64Rljux+AVyffBjqjw057XREgaMgq0foKMBHkgAAPx0K2g", + "1SjILspNLq2NAa0R3vzcFT9+bMzSeyNTEBLfYQ94cSxz0y4EUzhw/uDE3j8GpERLeT9ECa3l7wuP9qw3", + "XCTRFjmlidagLFsSfbEwCohXX4c484FXSS8cXQqhiXmZlmUijN3qcfBMxYRjngTympafnmt8y6TSp4gP", + "KN4OB27FYcsxki0q1b0n5HxFR4EVhSh/Eqj4G4yt/08wO5u8Hd0szvDfuwNRJURL6+29CBZw4OQGx7SO", + "XU++IHNX7qeSkDPVdSi48SJNiLcFyRbOvxY2uhv7e+cyQb8IfYfjsPD+QOSnyMgWPAcczM1R/4OZ0wAH", + "SJ6WFKn2CCWBvxSvi4uq77l27lga5rhUTlHixgNTOfXLxY9dHq4DL69aQX+do2/9Fm4TF36ztrG5ykZX", + "mLm8fKfnYxKKpavBmO6Y4+xeysLcvSjMJ0lwZlHpxnCQJAmrEbn3Za/p+EtGeRrau2jE/YEC8iuLfjMa", + "PgoWNbfjhQKoGCvu2bpYTIMXg+Cm2wtyyR8RtaL+beH+fPr8i8l0Arxem8U33yfTifv6PvVSKzbJuNIm", + "kU7PR9RVE3igSEW3Y4LZ96bOSeK3yRT06UUapdk8/ab73uwZPlxdAMIZR1aP7MXeoC5/zv9PALSTGDqH", + "NZwYS5JNeqCwFfsyBf0ylBbfpn4fqPbR4b41K/c6ybUKsdxOJ0ubpAyrk/zqatV92m33EAzkC3RLv0sa", + "MIuYxFpbk0dTRUndRhRkcd0SFTIw8jqvJdPbc4N/r3Znv16lkkF9F9IzuZxfwQLvZF8troB7H7MmmVOt", + "vHT9naAlSp/WMYAbmVOUM/KNrRDirsW/P5j/G3z+t2fF48+f/Nv8b4+fP87h2fMvHz+mXz6jT778/Ak8", + "/dvzZ4/hyeKLL+dPi6fPns6fPX32xfMv88+fPZk/++LLf3tgKN2AbAH1lX9eTP53dlouRXb65iy7MMA2", + "OKEV+wHM3qCGbYEJChGpOV6xsKasnLzwP/0vf1HOcrFuhve/Tlw9yMlK60q9ODm5ubmZxV1OlpgDJdOi", + "zlcnfh7MZdl6r7w5C3FB1vcPd7SxOeGmhvx+5tvbb84vyOmbs1lDMJMXk8ezx7MnmE+xAk4rNnkx+Rx/", + "wtOzwn0/wSzaJ8oV4zkJoaO30963qrKlesynZUgDav5aAS2RRZo/1qAly/0nCbTYuv+rG7pcgpxhxJj9", + "6frpiX97nHxweWVud307ib3RTj60kvMUe3oGf6qkJ8MrIa7Qkca/hh6ojneYQW/YhrPCoN+2RLcnddYw", + "QkSx91SZvHiX0tg6v+2qnpcsN8L1zBOw2Z2IvkLOpYZ/oH5+Yvknms0DNzQc7nH25fsPz/92m3TU7vts", + "Nc6OO7921/Cj80Bo7jEXQYDxqhhPFVb0zxrktlkSugdN4gWMFHeSvybtwObtWrl6TQ6uGfnZOWHhV8u4", + "gqu7C4StJFwzUavQaWAJZojUCsLr9T2WMkafZqS5p48fe/binuoR7Z64IxFvadss2nNpPCTZS+xymHpn", + "mcVkiI/+sfhZ2dR8BpuMUxsuhHEEa3plDcLoKUykyxXgMOqCDxDJITDObYu/QT5iHca75TmzQCTyr/a5", + "9QAH8OEDsTq/ZNZY4Zw2V1Da7ES8yV9yO508O5BQdqrVW1nKE+D/SEsDMhQ+qZeF4Mmng+CMWy93c+3Z", + "6/l2Onn+KXFwxg3vpCXBlvZCxrj2xGHgV1zccN/SyFL1ek3lFiUlPWaPXYY69IDw7eyRsBc7Ncf73cRe", + "C1hIrQLJ1sCxfvHtvuvt5IPLtbbnMoxNeycuRiPqMPKS3dXsZI41Y8c2BRU1Hl4KvpTVyQc8oYO/n7i3", + "ZvojmgCslHjiH9ADLW0erPTHFgo/6I1ZyO7hTJtovJzqfFVXJx/wPyjwRSuyNShO9IafoMvkyYcWItzn", + "HiLavzfd4xaYOt0DJxYLhfLQrs8nH+y/0UQtwmyEqraA9E3U6OsV5FeT9LXYKdAT9SJWHqbzEgrLnJ6N", + "6MCFjjsddaDfogyjyOsfCFsQ6E7BlJ/hgHNrE1afYAX7bYNL//OW58kf+9vcyss78POJf46lROt2yw+t", + "P9tHTq1qXYibaBY0ZFjbXR8y87FW3b9PbijT2UJIl9iVLjTIfmcNtDxxxcM6vzYVOXpfsMxI9GMcmZv8", + "9YQ6VE8qoRJk+5beRErMU2xsJQRQ+iuBL5qh22mTzRlHCopvqEZ/YT/2zR29ewlrQG91YzjupyXD3EhS", + "0CKnyki0pCkV0H4s3CaP3aeWNr6iBfEppTLSyB6n7pXcWtqfQxJJspuXcA2loRgiJNnHe/5gWeb5488/", + "3fTnIK9ZDuQC1pWQVLJyS37mIejwaFb8LZK3pE4tHEje+pRLetOOY5TpTDrtGpY+5xIQvSEryovS5R4R", + "NdbzNbSJriIiclY0V5gv6VoJiQDYzMFQWPctNSPnwbkNXcVq/4IqLNmgDRZz9dtJKDq+WeeHEVeJecYY", + "frAEnjmOlM1FsXVFDCeS3uiNzSfSY3tWzhzgiT0pMPXVCToDjXy0i//c6EljvSMqRILG8d1781ZWIK+9", + "rqRRo704OcHgyZVQ+gSf+m0VW/zxfcCcLxU/qSS7xtpTiDQhmXnBlpnTQzV1XydPZ48nt/8vAAD//1hz", + "GYWbDgEA", } // GetSwagger returns the content of the embedded swagger specification file @@ -366,16 +363,16 @@ var swaggerSpec = []string{ func decodeSpec() ([]byte, error) { zipped, err := base64.StdEncoding.DecodeString(strings.Join(swaggerSpec, "")) if err != nil { - return nil, fmt.Errorf("error base64 decoding spec: %s", err) + return nil, fmt.Errorf("error base64 decoding spec: %w", err) } zr, err := gzip.NewReader(bytes.NewReader(zipped)) if err != nil { - return nil, fmt.Errorf("error decompressing spec: %s", err) + return nil, fmt.Errorf("error decompressing spec: %w", err) } var buf bytes.Buffer _, err = buf.ReadFrom(zr) if err != nil { - return nil, fmt.Errorf("error decompressing spec: %s", err) + return nil, fmt.Errorf("error decompressing spec: %w", err) } return buf.Bytes(), nil @@ -393,7 +390,7 @@ func decodeSpecCached() func() ([]byte, error) { // Constructs a synthetic filesystem for resolving external references when loading openapi specifications. func PathToRawSpec(pathToFile string) map[string]func() ([]byte, error) { - var res = make(map[string]func() ([]byte, error)) + res := make(map[string]func() ([]byte, error)) if len(pathToFile) > 0 { res[pathToFile] = rawSpec } @@ -407,12 +404,12 @@ func PathToRawSpec(pathToFile string) map[string]func() ([]byte, error) { // Externally referenced files must be embedded in the corresponding golang packages. // Urls can be supported but this task was out of the scope. func GetSwagger() (swagger *openapi3.T, err error) { - var resolvePath = PathToRawSpec("") + resolvePath := PathToRawSpec("") loader := openapi3.NewLoader() loader.IsExternalRefsAllowed = true loader.ReadFromURIFunc = func(loader *openapi3.Loader, url *url.URL) ([]byte, error) { - var pathToFile = url.String() + pathToFile := url.String() pathToFile = path.Clean(pathToFile) getSpec, ok := resolvePath[pathToFile] if !ok { diff --git a/daemon/algod/api/server/v2/generated/experimental/experimental_routes.yml b/daemon/algod/api/server/v2/generated/experimental/routes.yml similarity index 83% rename from daemon/algod/api/server/v2/generated/experimental/experimental_routes.yml rename to daemon/algod/api/server/v2/generated/experimental/routes.yml index d62923b4bf..3df6d0c7ea 100644 --- a/daemon/algod/api/server/v2/generated/experimental/experimental_routes.yml +++ b/daemon/algod/api/server/v2/generated/experimental/routes.yml @@ -12,9 +12,9 @@ output-options: - participating - nonparticipating - data - type-mappings: - integer: uint64 skip-prune: true + user-templates: + echo/echo-register.tmpl: ./templates/echo/echo-register.tmpl additional-imports: - alias: "." package: "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model" diff --git a/daemon/algod/api/server/v2/generated/model/types.go b/daemon/algod/api/server/v2/generated/model/types.go index 57b30b5ac9..46a3d600e9 100644 --- a/daemon/algod/api/server/v2/generated/model/types.go +++ b/daemon/algod/api/server/v2/generated/model/types.go @@ -1,13 +1,13 @@ // Package model provides primitives to interact with the openapi HTTP API. // -// Code generated by github.com/algorand/oapi-codegen DO NOT EDIT. +// Code generated by github.com/oapi-codegen/oapi-codegen/v2 version v2.4.1 DO NOT EDIT. package model import ( "encoding/json" - "time" - openapi_types "github.com/algorand/oapi-codegen/pkg/types" + "github.com/algorand/go-algorand/data/basics" + openapi_types "github.com/oapi-codegen/runtime/types" ) const ( @@ -21,11 +21,10 @@ const ( AccountSigTypeSig AccountSigType = "sig" ) -// Defines values for AddressRole. +// Defines values for TransactionProofHashtype. const ( - AddressRoleFreezeTarget AddressRole = "freeze-target" - AddressRoleReceiver AddressRole = "receiver" - AddressRoleSender AddressRole = "sender" + TransactionProofHashtypeSha256 TransactionProofHashtype = "sha256" + TransactionProofHashtypeSha512256 TransactionProofHashtype = "sha512_256" ) // Defines values for Format. @@ -52,10 +51,10 @@ const ( TxTypeStpf TxType = "stpf" ) -// Defines values for TransactionProofResponseHashtype. +// Defines values for AccountInformationParamsExclude. const ( - TransactionProofResponseHashtypeSha256 TransactionProofResponseHashtype = "sha256" - TransactionProofResponseHashtypeSha512256 TransactionProofResponseHashtype = "sha512_256" + AccountInformationParamsExcludeAll AccountInformationParamsExclude = "all" + AccountInformationParamsExcludeNone AccountInformationParamsExclude = "none" ) // Defines values for AccountInformationParamsFormat. @@ -64,12 +63,6 @@ const ( AccountInformationParamsFormatMsgpack AccountInformationParamsFormat = "msgpack" ) -// Defines values for AccountInformationParamsExclude. -const ( - AccountInformationParamsExcludeAll AccountInformationParamsExclude = "all" - AccountInformationParamsExcludeNone AccountInformationParamsExclude = "none" -) - // Defines values for AccountApplicationInformationParamsFormat. const ( AccountApplicationInformationParamsFormatJson AccountApplicationInformationParamsFormat = "json" @@ -189,10 +182,10 @@ type Account struct { IncentiveEligible *bool `json:"incentive-eligible,omitempty"` // LastHeartbeat The round in which this account last went online, or explicitly renewed their online status. - LastHeartbeat *uint64 `json:"last-heartbeat,omitempty"` + LastHeartbeat *basics.Round `json:"last-heartbeat,omitempty"` // LastProposed The round in which this account last proposed the block. - LastProposed *uint64 `json:"last-proposed,omitempty"` + LastProposed *basics.Round `json:"last-proposed,omitempty"` // MinBalance MicroAlgo balance required by the account. // @@ -212,7 +205,7 @@ type Account struct { Rewards uint64 `json:"rewards"` // Round The round for which this information is relevant. - Round uint64 `json:"round"` + Round basics.Round `json:"round"` // SigType Indicates what type of signature is used by this account, must be one of: // * sig @@ -277,13 +270,13 @@ type AccountParticipation struct { StateProofKey *[]byte `json:"state-proof-key,omitempty"` // VoteFirstValid \[voteFst\] First round for which this participation is valid. - VoteFirstValid uint64 `json:"vote-first-valid"` + VoteFirstValid basics.Round `json:"vote-first-valid"` // VoteKeyDilution \[voteKD\] Number of subkeys in each batch of participation keys. VoteKeyDilution uint64 `json:"vote-key-dilution"` // VoteLastValid \[voteLst\] Last round for which this participation is valid. - VoteLastValid uint64 `json:"vote-last-valid"` + VoteLastValid basics.Round `json:"vote-last-valid"` // VoteParticipationKey \[vote\] root participation public key (if any) currently registered for this round. VoteParticipationKey []byte `json:"vote-participation-key"` @@ -300,7 +293,7 @@ type AccountStateDelta struct { // AppCallLogs The logged messages from an app call along with the app ID and outer transaction ID. Logs appear in the same order that they were emitted. type AppCallLogs struct { // ApplicationIndex The application from which the logs were generated - ApplicationIndex uint64 `json:"application-index"` + ApplicationIndex basics.AppIndex `json:"application-index"` // Logs An array of logs Logs [][]byte `json:"logs"` @@ -312,7 +305,7 @@ type AppCallLogs struct { // Application Application index and its parameters type Application struct { // Id \[appidx\] application index. - Id uint64 `json:"id"` + Id basics.AppIndex `json:"id"` // Params Stores the global information associated with an application. Params ApplicationParams `json:"params"` @@ -330,7 +323,7 @@ type ApplicationInitialStates struct { AppLocals *[]ApplicationKVStorage `json:"app-locals,omitempty"` // Id Application index. - Id uint64 `json:"id"` + Id basics.AppIndex `json:"id"` } // ApplicationKVStorage An application's global/local/box state. @@ -348,13 +341,13 @@ type ApplicationLocalReference struct { Account string `json:"account"` // App Application ID of the local state application. - App uint64 `json:"app"` + App basics.AppIndex `json:"app"` } // ApplicationLocalState Stores local state associated with an application. type ApplicationLocalState struct { // Id The application which this local state is for. - Id uint64 `json:"id"` + Id basics.AppIndex `json:"id"` // KeyValue Represents a key-value store for use in an application. KeyValue *TealKeyValueStore `json:"key-value,omitempty"` @@ -420,7 +413,7 @@ type ApplicationStateSchema struct { // Asset Specifies both the unique identifier and the parameters for an asset type Asset struct { // Index unique asset identifier - Index uint64 `json:"index"` + Index basics.AssetIndex `json:"index"` // Params AssetParams specifies the parameters for an asset. // @@ -439,8 +432,8 @@ type AssetHolding struct { // Amount \[a\] number of units held. Amount uint64 `json:"amount"` - // AssetId Asset ID of the holding. - AssetID uint64 `json:"asset-id"` + // AssetID Asset ID of the holding. + AssetID basics.AssetIndex `json:"asset-id"` // IsFrozen \[f\] whether or not the holding is frozen. IsFrozen bool `json:"is-frozen"` @@ -452,7 +445,7 @@ type AssetHoldingReference struct { Account string `json:"account"` // Asset Asset ID of the holding. - Asset uint64 `json:"asset"` + Asset basics.AssetIndex `json:"asset"` } // AssetParams AssetParams specifies the parameters for an asset. @@ -530,13 +523,13 @@ type AvmValue struct { // Box Box name and its content. type Box struct { - // Name \[name\] box name, base64 encoded + // Name The box name, base64 encoded Name []byte `json:"name"` // Round The round for which this information is relevant - Round uint64 `json:"round"` + Round basics.Round `json:"round"` - // Value \[value\] box value, base64 encoded. + // Value The box value, base64 encoded. Value []byte `json:"value"` } @@ -549,7 +542,7 @@ type BoxDescriptor struct { // BoxReference References a box of an application. type BoxReference struct { // App Application ID which this box belongs to - App uint64 `json:"app"` + App basics.AppIndex `json:"app"` // Name Base64 encoded box name Name []byte `json:"name"` @@ -558,11 +551,11 @@ type BoxReference struct { // BuildVersion defines model for BuildVersion. type BuildVersion struct { Branch string `json:"branch"` - BuildNumber uint64 `json:"build_number"` + BuildNumber int `json:"build_number"` Channel string `json:"channel"` CommitHash string `json:"commit_hash"` - Major uint64 `json:"major"` - Minor uint64 `json:"minor"` + Major int `json:"major"` + Minor int `json:"minor"` } // DebugSettingsProf algod mutex and blocking profiling state. @@ -580,25 +573,25 @@ type DryrunRequest struct { Apps []Application `json:"apps"` // LatestTimestamp LatestTimestamp is available to some TEAL scripts. Defaults to the latest confirmed timestamp this algod is attached to. - LatestTimestamp uint64 `json:"latest-timestamp"` + LatestTimestamp int64 `json:"latest-timestamp"` // ProtocolVersion ProtocolVersion specifies a specific version string to operate under, otherwise whatever the current protocol of the network this algod is running in. ProtocolVersion string `json:"protocol-version"` // Round Round is available to some TEAL scripts. Defaults to the current round on the network this algod is attached to. - Round uint64 `json:"round"` + Round basics.Round `json:"round"` Sources []DryrunSource `json:"sources"` Txns []json.RawMessage `json:"txns"` } // DryrunSource DryrunSource is TEAL source text that gets uploaded, compiled, and inserted into transactions or application state. type DryrunSource struct { - AppIndex uint64 `json:"app-index"` + AppIndex basics.AppIndex `json:"app-index"` // FieldName FieldName is what kind of sources this is. If lsig then it goes into the transactions[this.TxnIndex].LogicSig. If approv or clearp it goes into the Approval Program or Clear State Program of application[this.AppIndex]. FieldName string `json:"field-name"` Source string `json:"source"` - TxnIndex uint64 `json:"txn-index"` + TxnIndex int `json:"txn-index"` } // DryrunState Stores the TEAL eval step data @@ -607,10 +600,10 @@ type DryrunState struct { Error *string `json:"error,omitempty"` // Line Line number - Line uint64 `json:"line"` + Line int `json:"line"` // Pc Program counter - Pc uint64 `json:"pc"` + Pc int `json:"pc"` Scratch *[]TealValue `json:"scratch,omitempty"` Stack []TealValue `json:"stack"` } @@ -621,10 +614,10 @@ type DryrunTxnResult struct { AppCallTrace *[]DryrunState `json:"app-call-trace,omitempty"` // BudgetAdded Budget added during execution of app call transaction. - BudgetAdded *uint64 `json:"budget-added,omitempty"` + BudgetAdded *int `json:"budget-added,omitempty"` // BudgetConsumed Budget consumed during execution of app call transaction. - BudgetConsumed *uint64 `json:"budget-consumed,omitempty"` + BudgetConsumed *int `json:"budget-consumed,omitempty"` // Disassembly Disassembled program line by line. Disassembly []string `json:"disassembly"` @@ -676,7 +669,7 @@ type Genesis struct { Network string `json:"network"` Proto string `json:"proto"` Rwd string `json:"rwd"` - Timestamp uint64 `json:"timestamp"` + Timestamp int64 `json:"timestamp"` } // GenesisAllocation defines model for GenesisAllocation. @@ -685,7 +678,7 @@ type GenesisAllocation struct { Comment string `json:"comment"` State struct { Algo uint64 `json:"algo"` - Onl *uint64 `json:"onl,omitempty"` + Onl int `json:"onl"` Sel *string `json:"sel,omitempty"` Stprf *string `json:"stprf,omitempty"` Vote *string `json:"vote,omitempty"` @@ -695,15 +688,6 @@ type GenesisAllocation struct { } `json:"state"` } -// KvDelta A single Delta containing the key, the previous value and the current value for a single round. -type KvDelta struct { - // Key The key, base64 encoded. - Key *[]byte `json:"key,omitempty"` - - // Value The new value of the KV store entry, base64 encoded. - Value *[]byte `json:"value,omitempty"` -} - // LedgerStateDelta Ledger StateDelta object type LedgerStateDelta = map[string]interface{} @@ -723,7 +707,7 @@ type LightBlockHeaderProof struct { Proof []byte `json:"proof"` // Treedepth Represents the depth of the tree that is being proven, i.e. the number of edges from a leaf to the root. - Treedepth uint64 `json:"treedepth"` + Treedepth int `json:"treedepth"` } // ParticipationKey Represents a participation key used by the node. @@ -732,10 +716,10 @@ type ParticipationKey struct { Address string `json:"address"` // EffectiveFirstValid When registered, this is the first round it may be used. - EffectiveFirstValid *uint64 `json:"effective-first-valid,omitempty"` + EffectiveFirstValid *basics.Round `json:"effective-first-valid,omitempty"` // EffectiveLastValid When registered, this is the last round it may be used. - EffectiveLastValid *uint64 `json:"effective-last-valid,omitempty"` + EffectiveLastValid *basics.Round `json:"effective-last-valid,omitempty"` // Id The key's ParticipationID. Id string `json:"id"` @@ -744,34 +728,34 @@ type ParticipationKey struct { Key AccountParticipation `json:"key"` // LastBlockProposal Round when this key was last used to propose a block. - LastBlockProposal *uint64 `json:"last-block-proposal,omitempty"` + LastBlockProposal *basics.Round `json:"last-block-proposal,omitempty"` // LastStateProof Round when this key was last used to generate a state proof. - LastStateProof *uint64 `json:"last-state-proof,omitempty"` + LastStateProof *basics.Round `json:"last-state-proof,omitempty"` // LastVote Round when this key was last used to vote. - LastVote *uint64 `json:"last-vote,omitempty"` + LastVote *basics.Round `json:"last-vote,omitempty"` } // PendingTransactionResponse Details about a pending transaction. If the transaction was recently confirmed, includes confirmation details like the round and reward details. type PendingTransactionResponse struct { // ApplicationIndex The application index if the transaction was found and it created an application. - ApplicationIndex *uint64 `json:"application-index,omitempty"` + ApplicationIndex *basics.AppIndex `json:"application-index,omitempty"` // AssetClosingAmount The number of the asset's unit that were transferred to the close-to address. AssetClosingAmount *uint64 `json:"asset-closing-amount,omitempty"` // AssetIndex The asset index if the transaction was found and it created an asset. - AssetIndex *uint64 `json:"asset-index,omitempty"` + AssetIndex *basics.AssetIndex `json:"asset-index,omitempty"` // CloseRewards Rewards in microalgos applied to the close remainder to account. - CloseRewards *uint64 `json:"close-rewards,omitempty"` + CloseRewards *int `json:"close-rewards,omitempty"` // ClosingAmount Closing amount for the transaction. ClosingAmount *uint64 `json:"closing-amount,omitempty"` // ConfirmedRound The round where this transaction was confirmed, if present. - ConfirmedRound *uint64 `json:"confirmed-round,omitempty"` + ConfirmedRound *basics.Round `json:"confirmed-round,omitempty"` // GlobalStateDelta Application state delta. GlobalStateDelta *StateDelta `json:"global-state-delta,omitempty"` @@ -804,7 +788,7 @@ type ScratchChange struct { NewValue AvmValue `json:"new-value"` // Slot The scratch slot written. - Slot uint64 `json:"slot"` + Slot int `json:"slot"` } // SimulateInitialStates Initial states of resources that were accessed during simulation. @@ -828,13 +812,13 @@ type SimulateRequest struct { ExecTraceConfig *SimulateTraceConfig `json:"exec-trace-config,omitempty"` // ExtraOpcodeBudget Applies extra opcode budget during simulation for each transaction group. - ExtraOpcodeBudget *uint64 `json:"extra-opcode-budget,omitempty"` + ExtraOpcodeBudget *int `json:"extra-opcode-budget,omitempty"` // FixSigners If true, signers for transactions that are missing signatures will be fixed during evaluation. FixSigners *bool `json:"fix-signers,omitempty"` // Round If provided, specifies the round preceding the simulation. State changes through this round will be used to run this simulation. Usually only the 4 most recent rounds will be available (controlled by the node config value MaxAcctLookback). If not specified, defaults to the latest available round. - Round *uint64 `json:"round,omitempty"` + Round *basics.Round `json:"round,omitempty"` // TxnGroups The transaction groups to simulate. TxnGroups []SimulateRequestTransactionGroup `json:"txn-groups"` @@ -864,13 +848,13 @@ type SimulateTraceConfig struct { // SimulateTransactionGroupResult Simulation result for an atomic transaction group type SimulateTransactionGroupResult struct { // AppBudgetAdded Total budget added during execution of app calls in the transaction group. - AppBudgetAdded *uint64 `json:"app-budget-added,omitempty"` + AppBudgetAdded *int `json:"app-budget-added,omitempty"` // AppBudgetConsumed Total budget consumed during execution of app calls in the transaction group. - AppBudgetConsumed *uint64 `json:"app-budget-consumed,omitempty"` + AppBudgetConsumed *int `json:"app-budget-consumed,omitempty"` // FailedAt If present, indicates which transaction in this group caused the failure. This array represents the path to the failing transaction. Indexes are zero based, the first element indicates the top-level transaction, and successive elements indicate deeper inner transactions. - FailedAt *[]uint64 `json:"failed-at,omitempty"` + FailedAt *[]int `json:"failed-at,omitempty"` // FailureMessage If present, indicates that the transaction group failed and specifies why that happened FailureMessage *string `json:"failure-message,omitempty"` @@ -885,7 +869,7 @@ type SimulateTransactionGroupResult struct { // SimulateTransactionResult Simulation result for an individual transaction type SimulateTransactionResult struct { // AppBudgetConsumed Budget used during execution of an app call transaction. This value includes budged used by inner app calls spawned by this transaction. - AppBudgetConsumed *uint64 `json:"app-budget-consumed,omitempty"` + AppBudgetConsumed *int `json:"app-budget-consumed,omitempty"` // ExecTrace The execution trace of calling an app or a logic sig, containing the inner app call trace in a recursive way. ExecTrace *SimulationTransactionExecTrace `json:"exec-trace,omitempty"` @@ -894,7 +878,7 @@ type SimulateTransactionResult struct { FixedSigner *string `json:"fixed-signer,omitempty"` // LogicSigBudgetConsumed Budget used during execution of a logic sig transaction. - LogicSigBudgetConsumed *uint64 `json:"logic-sig-budget-consumed,omitempty"` + LogicSigBudgetConsumed *int `json:"logic-sig-budget-consumed,omitempty"` // TxnResult Details about a pending transaction. If the transaction was recently confirmed, includes confirmation details like the round and reward details. TxnResult PendingTransactionResponse `json:"txn-result"` @@ -912,19 +896,19 @@ type SimulateUnnamedResourcesAccessed struct { AppLocals *[]ApplicationLocalReference `json:"app-locals,omitempty"` // Apps The unnamed applications that were referenced. The order of this array is arbitrary. - Apps *[]uint64 `json:"apps,omitempty"` + Apps *[]basics.AppIndex `json:"apps,omitempty"` // AssetHoldings The unnamed asset holdings that were referenced. The order of this array is arbitrary. AssetHoldings *[]AssetHoldingReference `json:"asset-holdings,omitempty"` // Assets The unnamed assets that were referenced. The order of this array is arbitrary. - Assets *[]uint64 `json:"assets,omitempty"` + Assets *[]basics.AssetIndex `json:"assets,omitempty"` // Boxes The unnamed boxes that were referenced. The order of this array is arbitrary. Boxes *[]BoxReference `json:"boxes,omitempty"` // ExtraBoxRefs The number of extra box references used to increase the IO budget. This is in addition to the references defined in the input transaction group and any referenced to unnamed boxes. - ExtraBoxRefs *uint64 `json:"extra-box-refs,omitempty"` + ExtraBoxRefs *int `json:"extra-box-refs,omitempty"` } // SimulationEvalOverrides The set of parameters and limits override during simulation. If this set of parameters is present, then evaluation parameters may differ from standard evaluation in certain ways. @@ -936,34 +920,34 @@ type SimulationEvalOverrides struct { AllowUnnamedResources *bool `json:"allow-unnamed-resources,omitempty"` // ExtraOpcodeBudget The extra opcode budget added to each transaction group during simulation - ExtraOpcodeBudget *uint64 `json:"extra-opcode-budget,omitempty"` + ExtraOpcodeBudget *int `json:"extra-opcode-budget,omitempty"` // FixSigners If true, signers for transactions that are missing signatures will be fixed during evaluation. FixSigners *bool `json:"fix-signers,omitempty"` // MaxLogCalls The maximum log calls one can make during simulation - MaxLogCalls *uint64 `json:"max-log-calls,omitempty"` + MaxLogCalls *int `json:"max-log-calls,omitempty"` // MaxLogSize The maximum byte number to log during simulation - MaxLogSize *uint64 `json:"max-log-size,omitempty"` + MaxLogSize *int `json:"max-log-size,omitempty"` } // SimulationOpcodeTraceUnit The set of trace information and effect from evaluating a single opcode. type SimulationOpcodeTraceUnit struct { // Pc The program counter of the current opcode being evaluated. - Pc uint64 `json:"pc"` + Pc int `json:"pc"` // ScratchChanges The writes into scratch slots. ScratchChanges *[]ScratchChange `json:"scratch-changes,omitempty"` // SpawnedInners The indexes of the traces for inner transactions spawned by this opcode, if any. - SpawnedInners *[]uint64 `json:"spawned-inners,omitempty"` + SpawnedInners *[]int `json:"spawned-inners,omitempty"` // StackAdditions The values added by this opcode to the stack. StackAdditions *[]AvmValue `json:"stack-additions,omitempty"` // StackPopCount The number of deleted stack values by this opcode. - StackPopCount *uint64 `json:"stack-pop-count,omitempty"` + StackPopCount *int `json:"stack-pop-count,omitempty"` // StateChanges The operations against the current application's states. StateChanges *[]ApplicationStateOperation `json:"state-changes,omitempty"` @@ -1017,10 +1001,10 @@ type StateProofMessage struct { BlockHeadersCommitment []byte `json:"BlockHeadersCommitment"` // FirstAttestedRound The first round the message attests to. - FirstAttestedRound uint64 `json:"FirstAttestedRound"` + FirstAttestedRound basics.Round `json:"FirstAttestedRound"` // LastAttestedRound The last round the message attests to. - LastAttestedRound uint64 `json:"LastAttestedRound"` + LastAttestedRound basics.Round `json:"LastAttestedRound"` // LnProvenWeight An integer value representing the natural log of the proven weight with 16 bits of precision. This value would be used to verify the next state proof. LnProvenWeight uint64 `json:"LnProvenWeight"` @@ -1052,6 +1036,31 @@ type TealValue struct { Uint uint64 `json:"uint"` } +// TransactionProof Proof of transaction in a block. +type TransactionProof struct { + // Hashtype The type of hash function used to create the proof, must be one of: + // * sha512_256 + // * sha256 + Hashtype TransactionProofHashtype `json:"hashtype"` + + // Idx Index of the transaction in the block's payset. + Idx uint64 `json:"idx"` + + // Proof Proof of transaction membership. + Proof []byte `json:"proof"` + + // Stibhash Hash of SignedTxnInBlock for verifying proof. + Stibhash []byte `json:"stibhash"` + + // Treedepth Represents the depth of the tree that is being proven, i.e. the number of edges from a leaf to the root. + Treedepth uint64 `json:"treedepth"` +} + +// TransactionProofHashtype The type of hash function used to create the proof, must be one of: +// * sha512_256 +// * sha256 +type TransactionProofHashtype string + // Version algod version information. type Version struct { Build BuildVersion `json:"build"` @@ -1060,62 +1069,32 @@ type Version struct { Versions []string `json:"versions"` } -// AccountID defines model for account-id. -type AccountID = string - // Address defines model for address. -type Address = string - -// AddressRole defines model for address-role. -type AddressRole string +type Address = basics.Address -// AfterTime defines model for after-time. -type AfterTime = time.Time +// ApplicationId defines model for application-id. +type ApplicationId = basics.AppIndex -// AssetID defines model for asset-id. -type AssetID uint64 - -// BeforeTime defines model for before-time. -type BeforeTime = time.Time +// AssetId defines model for asset-id. +type AssetId = basics.AssetIndex // Catchpoint defines model for catchpoint. type Catchpoint = string -// CurrencyGreaterThan defines model for currency-greater-than. -type CurrencyGreaterThan uint64 - -// CurrencyLessThan defines model for currency-less-than. -type CurrencyLessThan uint64 - -// ExcludeCloseTo defines model for exclude-close-to. -type ExcludeCloseTo = bool - // Format defines model for format. type Format string // Limit defines model for limit. -type Limit uint64 +type Limit = uint64 // Max defines model for max. -type Max uint64 - -// MaxRound defines model for max-round. -type MaxRound uint64 - -// MinRound defines model for min-round. -type MinRound uint64 +type Max = uint64 // Next defines model for next. type Next = string -// NotePrefix defines model for note-prefix. -type NotePrefix = string - // Round defines model for round. -type Round uint64 - -// RoundNumber defines model for round-number. -type RoundNumber uint64 +type Round = basics.Round // SigType defines model for sig-type. type SigType string @@ -1135,7 +1114,7 @@ type AccountApplicationResponse struct { CreatedApp *ApplicationParams `json:"created-app,omitempty"` // Round The round for which this information is relevant. - Round uint64 `json:"round"` + Round basics.Round `json:"round"` } // AccountAssetResponse defines model for AccountAssetResponse. @@ -1155,7 +1134,7 @@ type AccountAssetResponse struct { CreatedAsset *AssetParams `json:"created-asset,omitempty"` // Round The round for which this information is relevant. - Round uint64 `json:"round"` + Round basics.Round `json:"round"` } // AccountAssetsInformationResponse defines model for AccountAssetsInformationResponse. @@ -1166,7 +1145,7 @@ type AccountAssetsInformationResponse struct { NextToken *string `json:"next-token,omitempty"` // Round The round for which this information is relevant. - Round uint64 `json:"round"` + Round basics.Round `json:"round"` } // AccountResponse Account information at a given round. @@ -1266,7 +1245,7 @@ type GetBlockTimeStampOffsetResponse struct { // GetSyncRoundResponse defines model for GetSyncRoundResponse. type GetSyncRoundResponse struct { // Round The minimum sync round for the ledger. - Round uint64 `json:"round"` + Round basics.Round `json:"round"` } // LedgerStateDeltaForTransactionGroupResponse Ledger StateDelta object @@ -1308,13 +1287,13 @@ type NodeStatusResponse struct { CatchpointVerifiedKvs *uint64 `json:"catchpoint-verified-kvs,omitempty"` // CatchupTime CatchupTime in nanoseconds - CatchupTime uint64 `json:"catchup-time"` + CatchupTime int64 `json:"catchup-time"` // LastCatchpoint The last catchpoint seen by the node LastCatchpoint *string `json:"last-catchpoint,omitempty"` // LastRound LastRound indicates the last round seen - LastRound uint64 `json:"last-round"` + LastRound basics.Round `json:"last-round"` // LastVersion LastVersion indicates the last consensus version supported LastVersion string `json:"last-version"` @@ -1323,7 +1302,7 @@ type NodeStatusResponse struct { NextVersion string `json:"next-version"` // NextVersionRound NextVersionRound is the round at which the next consensus version will apply - NextVersionRound uint64 `json:"next-version-round"` + NextVersionRound basics.Round `json:"next-version-round"` // NextVersionSupported NextVersionSupported indicates whether the next consensus version is supported by this node NextVersionSupported bool `json:"next-version-supported"` @@ -1332,31 +1311,31 @@ type NodeStatusResponse struct { StoppedAtUnsupportedRound bool `json:"stopped-at-unsupported-round"` // TimeSinceLastRound TimeSinceLastRound in nanoseconds - TimeSinceLastRound uint64 `json:"time-since-last-round"` + TimeSinceLastRound int64 `json:"time-since-last-round"` // UpgradeDelay Upgrade delay - UpgradeDelay *uint64 `json:"upgrade-delay,omitempty"` + UpgradeDelay *basics.Round `json:"upgrade-delay,omitempty"` // UpgradeNextProtocolVoteBefore Next protocol round - UpgradeNextProtocolVoteBefore *uint64 `json:"upgrade-next-protocol-vote-before,omitempty"` + UpgradeNextProtocolVoteBefore *basics.Round `json:"upgrade-next-protocol-vote-before,omitempty"` // UpgradeNoVotes No votes cast for consensus upgrade - UpgradeNoVotes *uint64 `json:"upgrade-no-votes,omitempty"` + UpgradeNoVotes *basics.Round `json:"upgrade-no-votes,omitempty"` // UpgradeNodeVote This node's upgrade vote UpgradeNodeVote *bool `json:"upgrade-node-vote,omitempty"` // UpgradeVoteRounds Total voting rounds for current upgrade - UpgradeVoteRounds *uint64 `json:"upgrade-vote-rounds,omitempty"` + UpgradeVoteRounds *basics.Round `json:"upgrade-vote-rounds,omitempty"` // UpgradeVotes Total votes cast for consensus upgrade - UpgradeVotes *uint64 `json:"upgrade-votes,omitempty"` + UpgradeVotes *basics.Round `json:"upgrade-votes,omitempty"` // UpgradeVotesRequired Yes votes required for consensus upgrade - UpgradeVotesRequired *uint64 `json:"upgrade-votes-required,omitempty"` + UpgradeVotesRequired *basics.Round `json:"upgrade-votes-required,omitempty"` // UpgradeYesVotes Yes votes cast for consensus upgrade - UpgradeYesVotes *uint64 `json:"upgrade-yes-votes,omitempty"` + UpgradeYesVotes *basics.Round `json:"upgrade-yes-votes,omitempty"` } // ParticipationKeyResponse Represents a participation key used by the node. @@ -1371,7 +1350,7 @@ type PendingTransactionsResponse struct { TopTransactions []map[string]interface{} `json:"top-transactions"` // TotalTransactions Total number of transactions in the pool. - TotalTransactions uint64 `json:"total-transactions"` + TotalTransactions int `json:"total-transactions"` } // PostParticipationResponse defines model for PostParticipationResponse. @@ -1398,7 +1377,7 @@ type SimulateResponse struct { InitialStates *SimulateInitialStates `json:"initial-states,omitempty"` // LastRound The round immediately preceding this simulation. State changes through this round were used to run this simulation. - LastRound uint64 `json:"last-round"` + LastRound basics.Round `json:"last-round"` // TxnGroups A result object for each transaction group that was simulated. TxnGroups []SimulateTransactionGroupResult `json:"txn-groups"` @@ -1413,7 +1392,7 @@ type StateProofResponse = StateProof // SupplyResponse Supply represents the current supply of MicroAlgos in the system type SupplyResponse struct { // CurrentRound Round - CurrentRound uint64 `json:"current_round"` + CurrentRound basics.Round `json:"current_round"` // OnlineMoney OnlineMoney OnlineMoney uint64 `json:"online-money"` @@ -1447,56 +1426,34 @@ type TransactionParametersResponse struct { GenesisId string `json:"genesis-id"` // LastRound LastRound indicates the last round seen - LastRound uint64 `json:"last-round"` + LastRound basics.Round `json:"last-round"` // MinFee The minimum transaction fee (not per byte) required for the // txn to validate for the current network protocol. MinFee uint64 `json:"min-fee"` } -// TransactionProofResponse defines model for TransactionProofResponse. -type TransactionProofResponse struct { - // Hashtype The type of hash function used to create the proof, must be one of: - // * sha512_256 - // * sha256 - Hashtype TransactionProofResponseHashtype `json:"hashtype"` - - // Idx Index of the transaction in the block's payset. - Idx uint64 `json:"idx"` - - // Proof Proof of transaction membership. - Proof []byte `json:"proof"` - - // Stibhash Hash of SignedTxnInBlock for verifying proof. - Stibhash []byte `json:"stibhash"` - - // Treedepth Represents the depth of the tree that is being proven, i.e. the number of edges from a leaf to the root. - Treedepth uint64 `json:"treedepth"` -} - -// TransactionProofResponseHashtype The type of hash function used to create the proof, must be one of: -// * sha512_256 -// * sha256 -type TransactionProofResponseHashtype string +// TransactionProofResponse Proof of transaction in a block. +type TransactionProofResponse = TransactionProof // VersionsResponse algod version information. type VersionsResponse = Version // AccountInformationParams defines parameters for AccountInformation. type AccountInformationParams struct { - // Format Configures whether the response object is JSON or MessagePack encoded. If not provided, defaults to JSON. - Format *AccountInformationParamsFormat `form:"format,omitempty" json:"format,omitempty"` - // Exclude When set to `all` will exclude asset holdings, application local state, created asset parameters, any created application parameters. Defaults to `none`. Exclude *AccountInformationParamsExclude `form:"exclude,omitempty" json:"exclude,omitempty"` -} -// AccountInformationParamsFormat defines parameters for AccountInformation. -type AccountInformationParamsFormat string + // Format Configures whether the response object is JSON or MessagePack encoded. If not provided, defaults to JSON. + Format *AccountInformationParamsFormat `form:"format,omitempty" json:"format,omitempty"` +} // AccountInformationParamsExclude defines parameters for AccountInformation. type AccountInformationParamsExclude string +// AccountInformationParamsFormat defines parameters for AccountInformation. +type AccountInformationParamsFormat string + // AccountApplicationInformationParams defines parameters for AccountApplicationInformation. type AccountApplicationInformationParams struct { // Format Configures whether the response object is JSON or MessagePack encoded. If not provided, defaults to JSON. @@ -1550,11 +1507,11 @@ type GetApplicationBoxesParams struct { // GetBlockParams defines parameters for GetBlock. type GetBlockParams struct { - // Format Configures whether the response object is JSON or MessagePack encoded. If not provided, defaults to JSON. - Format *GetBlockParamsFormat `form:"format,omitempty" json:"format,omitempty"` - // HeaderOnly If true, only the block header (exclusive of payset or certificate) may be included in response. HeaderOnly *bool `form:"header-only,omitempty" json:"header-only,omitempty"` + + // Format Configures whether the response object is JSON or MessagePack encoded. If not provided, defaults to JSON. + Format *GetBlockParamsFormat `form:"format,omitempty" json:"format,omitempty"` } // GetBlockParamsFormat defines parameters for GetBlock. @@ -1580,7 +1537,7 @@ type GetTransactionProofParamsFormat string // StartCatchupParams defines parameters for StartCatchup. type StartCatchupParams struct { // Min Specify the minimum number of blocks which the ledger must be advanced by in order to start the catchup. This is useful for simplifying tools which support fast catchup, they can run the catchup unconditionally and the node will skip the catchup if it is not needed. - Min *uint64 `form:"min,omitempty" json:"min,omitempty"` + Min *basics.Round `form:"min,omitempty" json:"min,omitempty"` } // GetLedgerStateDeltaForTransactionGroupParams defines parameters for GetLedgerStateDeltaForTransactionGroup. @@ -1616,15 +1573,15 @@ type GenerateParticipationKeysParams struct { Dilution *uint64 `form:"dilution,omitempty" json:"dilution,omitempty"` // First First round for participation key. - First uint64 `form:"first" json:"first"` + First basics.Round `form:"first" json:"first"` // Last Last round for participation key. - Last uint64 `form:"last" json:"last"` + Last basics.Round `form:"last" json:"last"` } // ShutdownNodeParams defines parameters for ShutdownNode. type ShutdownNodeParams struct { - Timeout *uint64 `form:"timeout,omitempty" json:"timeout,omitempty"` + Timeout *int `form:"timeout,omitempty" json:"timeout,omitempty"` } // TealCompileTextBody defines parameters for TealCompile. diff --git a/daemon/algod/api/server/v2/generated/model/model_types.yml b/daemon/algod/api/server/v2/generated/model/types.yml similarity index 82% rename from daemon/algod/api/server/v2/generated/model/model_types.yml rename to daemon/algod/api/server/v2/generated/model/types.yml index ad39740b12..c9f0e86fcb 100644 --- a/daemon/algod/api/server/v2/generated/model/model_types.yml +++ b/daemon/algod/api/server/v2/generated/model/types.yml @@ -2,8 +2,6 @@ package: model generate: models: true output-options: - type-mappings: - integer: uint64 skip-prune: true output: ./server/v2/generated/model/types.go compatibility: diff --git a/daemon/algod/api/server/v2/generated/nonparticipating/private/routes.go b/daemon/algod/api/server/v2/generated/nonparticipating/private/routes.go index a8fd685c10..20a728cccc 100644 --- a/daemon/algod/api/server/v2/generated/nonparticipating/private/routes.go +++ b/daemon/algod/api/server/v2/generated/nonparticipating/private/routes.go @@ -1,6 +1,6 @@ // Package private provides primitives to interact with the openapi HTTP API. // -// Code generated by github.com/algorand/oapi-codegen DO NOT EDIT. +// Code generated by github.com/oapi-codegen/oapi-codegen/v2 version v2.4.1 DO NOT EDIT. package private import ( @@ -14,9 +14,9 @@ import ( "strings" . "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model" - "github.com/algorand/oapi-codegen/pkg/runtime" "github.com/getkin/kin-openapi/openapi3" "github.com/labstack/echo/v4" + "github.com/oapi-codegen/runtime" ) // ServerInterface represents all server handlers. @@ -50,9 +50,9 @@ type ServerInterfaceWrapper struct { func (w *ServerInterfaceWrapper) GetConfig(ctx echo.Context) error { var err error - ctx.Set(Api_keyScopes, []string{""}) + ctx.Set(Api_keyScopes, []string{}) - // Invoke the callback with all the unmarshalled arguments + // Invoke the callback with all the unmarshaled arguments err = w.Handler.GetConfig(ctx) return err } @@ -61,9 +61,9 @@ func (w *ServerInterfaceWrapper) GetConfig(ctx echo.Context) error { func (w *ServerInterfaceWrapper) GetDebugSettingsProf(ctx echo.Context) error { var err error - ctx.Set(Api_keyScopes, []string{""}) + ctx.Set(Api_keyScopes, []string{}) - // Invoke the callback with all the unmarshalled arguments + // Invoke the callback with all the unmarshaled arguments err = w.Handler.GetDebugSettingsProf(ctx) return err } @@ -72,9 +72,9 @@ func (w *ServerInterfaceWrapper) GetDebugSettingsProf(ctx echo.Context) error { func (w *ServerInterfaceWrapper) PutDebugSettingsProf(ctx echo.Context) error { var err error - ctx.Set(Api_keyScopes, []string{""}) + ctx.Set(Api_keyScopes, []string{}) - // Invoke the callback with all the unmarshalled arguments + // Invoke the callback with all the unmarshaled arguments err = w.Handler.PutDebugSettingsProf(ctx) return err } @@ -85,14 +85,14 @@ func (w *ServerInterfaceWrapper) AbortCatchup(ctx echo.Context) error { // ------------- Path parameter "catchpoint" ------------- var catchpoint string - err = runtime.BindStyledParameterWithLocation("simple", false, "catchpoint", runtime.ParamLocationPath, ctx.Param("catchpoint"), &catchpoint) + err = runtime.BindStyledParameterWithOptions("simple", "catchpoint", ctx.Param("catchpoint"), &catchpoint, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) if err != nil { return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter catchpoint: %s", err)) } - ctx.Set(Api_keyScopes, []string{""}) + ctx.Set(Api_keyScopes, []string{}) - // Invoke the callback with all the unmarshalled arguments + // Invoke the callback with all the unmarshaled arguments err = w.Handler.AbortCatchup(ctx, catchpoint) return err } @@ -103,12 +103,12 @@ func (w *ServerInterfaceWrapper) StartCatchup(ctx echo.Context) error { // ------------- Path parameter "catchpoint" ------------- var catchpoint string - err = runtime.BindStyledParameterWithLocation("simple", false, "catchpoint", runtime.ParamLocationPath, ctx.Param("catchpoint"), &catchpoint) + err = runtime.BindStyledParameterWithOptions("simple", "catchpoint", ctx.Param("catchpoint"), &catchpoint, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) if err != nil { return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter catchpoint: %s", err)) } - ctx.Set(Api_keyScopes, []string{""}) + ctx.Set(Api_keyScopes, []string{}) // Parameter object where we will unmarshal all parameters from the context var params StartCatchupParams @@ -119,7 +119,7 @@ func (w *ServerInterfaceWrapper) StartCatchup(ctx echo.Context) error { return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter min: %s", err)) } - // Invoke the callback with all the unmarshalled arguments + // Invoke the callback with all the unmarshaled arguments err = w.Handler.StartCatchup(ctx, catchpoint, params) return err } @@ -128,7 +128,7 @@ func (w *ServerInterfaceWrapper) StartCatchup(ctx echo.Context) error { func (w *ServerInterfaceWrapper) ShutdownNode(ctx echo.Context) error { var err error - ctx.Set(Api_keyScopes, []string{""}) + ctx.Set(Api_keyScopes, []string{}) // Parameter object where we will unmarshal all parameters from the context var params ShutdownNodeParams @@ -139,7 +139,7 @@ func (w *ServerInterfaceWrapper) ShutdownNode(ctx echo.Context) error { return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter timeout: %s", err)) } - // Invoke the callback with all the unmarshalled arguments + // Invoke the callback with all the unmarshaled arguments err = w.Handler.ShutdownNode(ctx, params) return err } @@ -184,236 +184,231 @@ func RegisterHandlersWithBaseURL(router EchoRouter, si ServerInterface, baseURL // Base64 encoded, gzipped, json marshaled Swagger object var swaggerSpec = []string{ - "H4sIAAAAAAAC/+x9a3Mbt5LoX0Fxt8qP5UjyI9kT3zq1V/EjRxsndllK9u5avgk40yRxNATmABiKjK//", - "+y00HoOZwZBDSbGT2v1ki4NHo9Fo9AvdHye5WFWCA9dq8uzjpKKSrkCDxL9onoua64wV5q8CVC5ZpZng", - "k2f+G1FaMr6YTCfM/FpRvZxMJ5yuoGlj+k8nEv5RMwnF5JmWNUwnKl/CipqB9bYyrcNIm2whMjfEqR3i", - "7MXk044PtCgkKNWH8g0vt4TxvKwLIFpSrmhuPilyzfSS6CVTxHUmjBPBgYg50ctWYzJnUBbqyC/yHzXI", - "bbRKN/nwkj41IGZSlNCH87lYzRgHDxUEoMKGEC1IAXNstKSamBkMrL6hFkQBlfmSzIXcA6oFIoYXeL2a", - "PHs/UcALkLhbObA1/ncuAX6DTFO5AD35ME0tbq5BZpqtEks7c9iXoOpSK4JtcY0LtgZOTK8j8kOtNJkB", - "oZy8e/WcPHny5BuzkBXVGgpHZIOramaP12S7T55NCqrBf+7TGi0XQlJeZKH9u1fPcf5zt8CxrahSkD4s", - "p+YLOXsxtADfMUFCjGtY4D60qN/0SByK5ucZzIWEkXtiG9/ppsTzf9FdyanOl5VgXCf2heBXYj8neVjU", - "fRcPCwC02lcGU9IM+v4k++bDx0fTRyef/un9afZf7s+vnnwaufznYdw9GEg2zGspgefbbCGB4mlZUt7H", - "xztHD2op6rIgS7rGzacrZPWuLzF9Letc07I2dMJyKU7LhVCEOjIqYE7rUhM/Mal5adiUGc1RO2GKVFKs", - "WQHF1HDf6yXLlySnyg6B7cg1K0tDg7WCYojW0qvbcZg+xSgxcN0IH7igPy4ymnXtwQRskBtkeSkUZFrs", - "uZ78jUN5QeILpbmr1GGXFblYAsHJzQd72SLuuKHpstwSjftaEKoIJf5qmhI2J1tRk2vcnJJdYX+3GoO1", - "FTFIw81p3aPm8A6hr4eMBPJmQpRAOSLPn7s+yvicLWoJilwvQS/dnSdBVYIrIGL2d8i12fZ/P3/zIxGS", - "/ABK0QW8pfkVAZ6LAoojcjYnXOiINBwtIQ5Nz6F1OLhSl/zflTA0sVKLiuZX6Ru9ZCuWWNUPdMNW9Yrw", - "ejUDabbUXyFaEAm6lnwIIDviHlJc0U1/0gtZ8xz3v5m2JcsZamOqKukWEbaim7+eTB04itCyJBXwgvEF", - "0Rs+KMeZufeDl0lR82KEmKPNnkYXq6ogZ3MGBQmj7IDETbMPHsYPg6cRviJw/CCD4IRZ9oDDYZOgGXO6", - "zRdS0QVEJHNEfnLMDb9qcQU8EDqZbfFTJWHNRK1CpwEYcerdEjgXGrJKwpwlaOzcocMwGNvGceCVk4Fy", - "wTVlHArDnBFoocEyq0GYogl36zv9W3xGFXz9dOiOb76O3P256O76zh0ftdvYKLNHMnF1mq/uwKYlq1b/", - "EfphPLdii8z+3NtItrgwt82clXgT/d3sn0dDrZAJtBDh7ybFFpzqWsKzS/7Q/EUycq4pL6gszC8r+9MP", - "danZOVuYn0r702uxYPk5WwwgM8CaVLiw28r+Y8ZLs2O9SeoVr4W4qqt4QXlLcZ1tydmLoU22Yx5KmKdB", - "240Vj4uNV0YO7aE3YSMHgBzEXUVNwyvYSjDQ0nyO/2zmSE90Ln8z/1RVaXrrap5CraFjdyWj+cCZFU6r", - "qmQ5NUh85z6br4YJgFUkaNPiGC/UZx8jECspKpCa2UFpVWWlyGmZKU01jvTPEuaTZ5N/Om7sL8e2uzqO", - "Jn9tep1jJyOyWjEoo1V1wBhvjeijdjALw6DxE7IJy/ZQaGLcbqIhJWZYcAlryvVRo7K0+EE4wO/dTA2+", - "rbRj8d1RwQYRTmzDGSgrAduG9xSJUE8QrQTRigLpohSz8MP906pqMIjfT6vK4gOlR2AomMGGKa0e4PJp", - "c5Liec5eHJHv4rFRFBe83JrLwYoa5m6Yu1vL3WLBtuTW0Ix4TxHcTiGPzNZ4NBgx/y4oDtWKpSiN1LOX", - "Vkzjv7m2MZmZ30d1/nOQWIzbYeJCRcthzuo4+Euk3NzvUE6fcJy554icdvvejGzMKDsIRp01WLxr4sFf", - "mIaV2ksJEUQRNbntoVLS7cQJiRkKe30y+UmBpZCKLhhHaKdGfeJkRa/sfgjEuyEEUEEvsrRkJchgQnUy", - "p0P9Uc/O8ieg1tTGeknUSKolUxr1amxMllCi4Ey5J+iYVG5EGSM2fMciAszXklaWlt0XK3Yxjvq8bWRh", - "veXFO/JOTMIcsftooxGqG7PlvawzCQlyjQ4M35Yiv/obVcs7OOEzP1af9nEasgRagCRLqpaJg9Oh7Wa0", - "MfRtGiLNklk01VFY4muxUHewxFIcwrqq6jktSzN1n2V1VosDjzrIZUlMYwIrhgZzpzhaC7vVv8hLmi+N", - "WEByWpbTxlQkqqyENZRGaWecg5wSvaS6Ofw4stdr8BwpMMxOA4lW48xMaGKTwRYhgawo3kAro81UZbtP", - "4KCKrqAjBeGNKGq0IkSKxtkLvzpYA0eeFIZG8MMa0VoTD35k5nafcGYu7OKsBVB7913AX+AXLaBN6+Y+", - "5c0UQhbWZq3Nb0ySXEg7hL3h3eTmP0Bl09lS5/1KQuaGkHQNUtHSrK6zqAeBfO/qdO45mQXVNDqZjgrT", - "CpjlHNgPxTuQCSvNG/wPLYn5bKQYQ0kN9TAURkTkTi3sxWxQZWcyDdDeKsjKmjJJRfOrg6B83kyeZjOj", - "Tt5Laz11W+gWEXboYsMKdVfbhIMN7VX7hFjblWdHPVlkJ9OJ5hqDgAtREcs+OiBYToGjWYSIzZ1fa9+K", - "TQqmb8Wmd6WJDdzJTphxRjP7b8XmhYNMyP2Yx7HHIN0skNMVKLzdeMw4zSyNX+50JuTNpInOBcNJ420k", - "1IwaCVPTDpKwaV1l7mwmPBa2QWegJsBjtxDQHT6FsRYWzjX9HbCgzKh3gYX2QHeNBbGqWAl3QPrLpBA3", - "owqePCbnfzv96tHjXx5/9bUhyUqKhaQrMttqUOS+M8sRpbclPEhqRyhdpEf/+qn3UbXHTY2jRC1zWNGq", - "P5T1fVnt1zYjpl0fa20046oDgKM4IpirzaKdWLeuAe0FzOrFOWhtNN23UszvnBv2ZkhBh43eVtIIFqrt", - "J3TS0nFhmhzDRkt6XGFL4IWNMzDrYMrogKvZnRDV0MYXzSwFcRgtYO+hOHSbmmm28VbJrazvwrwBUgqZ", - "vIIrKbTIRZkZOY+JhIHirWtBXAu/XVX3dwstuaaKmLnRe1nzYsAOoTd8/P1lh77Y8AY3O28wu97E6ty8", - "Y/aljfxGC6lAZnrDCVJnyzwyl2JFKCmwI8oa34G28hdbwbmmq+rNfH431k6BAyXsOGwFysxEbAsj/SjI", - "BbfBfHtMNm7UMejpIsZ7mfQwAA4j51ueo6vsLo7tsDVrxTj67dWW55Fpy8BYQrFokeXtTVhD6LBT3VMJ", - "cAw6XuNntNW/gFLTV0JeNOLrd1LU1Z2z5+6cY5dD3WKcN6Awfb0ZmPFF2Q4gXRjYj1Jr/CILeh6MCHYN", - "CD1S5Gu2WOpIX3wrxe9wJyZnSQGKH6yxqDR9+iajH0VhmImu1R2Iks1gDYczdBvzNToTtSaUcFEAbn6t", - "0kLmQMghxjphiJaO5Va0TzBFZmCoK6e1WW1dEQxA6t0XTceM5vaEZogaNRB+EeJmbCs7nQ1nKyXQYktm", - "AJyImYtxcNEXuEiK0VPai2lOxE3wixZclRQ5KAVF5kzRe0Hz7ezVoXfgCQFHgMMsRAkyp/LWwF6t98J5", - "BdsMY/0Uuf/9z+rBF4BXC03LPYjFNin0du1pfajHTb+L4LqTx2RnLXWWao14axhECRqGUHgQTgb3rwtR", - "bxdvj5Y1SAwp+V0p3k9yOwIKoP7O9H5baOtqIILdqelGwjMbxikXXrBKDVZSpbN9bNk0atkSzAoiTpji", - "xDjwgOD1miptw6AYL9Cmaa8TnMcKYWaKYYAH1RAz8s9eA+mPnZt7kKtaBXVE1VUlpIYitQb0yA7O9SNs", - "wlxiHo0ddB4tSK1g38hDWIrGd8hyGjD+QXXwvzqPbn9x6FM39/w2icoWEA0idgFy7ltF2I2jeAcAYapB", - "tCUcpjqUE0KHpxOlRVUZbqGzmod+Q2g6t61P9U9N2z5xWSeHvbcLAQodKK69g/zaYtbGby+pIg4O72JH", - "c46N1+rDbA5jphjPIdtF+ajimVbxEdh7SOtqIWkBWQEl3SaCA+xnYj/vGgB3vFF3hYbMBuKmN72hZB/3", - "uGNogeOplPBI8AvJzRE0qkBDIK73npELwLFTzMnR0b0wFM6V3CI/Hi7bbnViRLwN10KbHXf0gCA7jj4G", - "4AE8hKFvjgrsnDW6Z3eK/wTlJghyxOGTbEENLaEZ/6AFDNiC3Run6Lx02HuHAyfZ5iAb28NHho7sgGH6", - "LZWa5axCXed72N656tedIOk4JwVoykooSPTBqoFV3J/YENLumDdTBUfZ3vrg94xvieX4MJ028FewRZ37", - "rX2bEJk67kKXTYxq7ifKCQLqI56NCB43gQ3Ndbk1gppewpZcgwSi6pkNYej7U7SosniApH9mx4zOO5v0", - "je50F5/jUNHyUrFmVifYDd9FRzFoocPpApUQ5QgLWQ8ZSQhGxY6QSphdZ+75k38A4ympBaRj2uiaD9f/", - "PdVCM66A/KeoSU45qly1hiDTCImCAgqQZgYjgoU5XXBigyEoYQVWk8QvDx92F/7wodtzpsgcrv2bQdOw", - "i46HD9GO81Yo3Tpcd2APNcftLHF9oOPKXHxOC+nylP0RT27kMTv5tjN48HaZM6WUI1yz/FszgM7J3IxZ", - "e0wj46K9cNxRvpx2fFBv3bjv52xVl1TfhdcK1rTMxBqkZAXs5eRuYib4yzUt34Ru+B4SckOjOWQ5vuIb", - "ORZcmD724Z8Zh3FmDrAN+h8LEJzZXue20x4Vs4lUZasVFIxqKLekkpCDfe9mJEcVlnpEbCR8vqR8gQqD", - "FPXCBbfacZDh18qaZmTNe0MkhSq94RkauVMXgAtT808ejTgF1Kh0XQu5VWCuaZjPvXIdczNHe9D1GCSd", - "ZNPJoMZrkLpuNF6LnPa7zRGXQUvei/DTTDzSlYKoM7JPH1/xtpjDZDb39zHZN0OnoOxPHEX8Nh+Hgn6N", - "ul1u70DosQMRCZUEhVdUbKZS9quYx2+0fajgVmlY9S35tusvA8fv3aC+KHjJOGQrwWGbTEvCOPyAH5PH", - "Ca/Jgc4osAz17eogLfg7YLXnGUONt8Uv7nb3hHY9VuqVkHflErUDjhbvR3gg97rb3ZQ39ZPSsky4Ft0L", - "zi4DUNMQrMskoUqJnKHMdlaoqYsKtt5I99yzjf634V3KHZy97rgdH1qcHABtxFBWhJK8ZGhBFlxpWef6", - "klO0UUVLTQRxeWV82Gr53DdJm0kTVkw31CWnGMAXLFfJgI05JMw0rwC88VLViwUo3dF15gCX3LVinNSc", - "aZxrZY5LZs9LBRIjqY5syxXdkrmhCS3IbyAFmdW6Lf3jA2WlWVk6h56Zhoj5JaealECVJj8wfrHB4bzT", - "3x9ZDvpayKuAhfTtvgAOiqksHWz2nf2Kcf1u+UsX44/h7vazDzptMiZMzDJbSVL+7/1/e/b+NPsvmv12", - "kn3zL8cfPj799OBh78fHn/761//X/unJp78++Ld/Tu2Uhz31fNZBfvbCacZnL1D9iUL1u7B/Nvv/ivEs", - "SWRxNEeHtsh9TBXhCOhB2ziml3DJ9YYbQlrTkhWGt9yEHLo3TO8s2tPRoZrWRnSMYX6tByoVt+AyJMFk", - "OqzxxlJUPz4z/VAdnZLu7Tmel3nN7VZ66du+w/TxZWI+DckIbJ6yZwRfqi+pD/J0fz7+6uvJtHlhHr5P", - "phP39UOCklmxSeURKGCT0hXjRxL3FKnoVoFOcw+EPRlKZ2M74mFXsJqBVEtWfX5OoTSbpTmcf7LkbE4b", - "fsZtgL85P+ji3DrPiZh/fri1BCig0stU/qKWoIatmt0E6ISdVFKsgU8JO4Kjrs2nMPqiC+orgc59YKoU", - "Yow2FM6BJTRPFRHW44WMMqyk6KfzvMFd/urO1SE3cAqu7pypiN573728IMeOYap7NqWFHTpKQpBQpd3j", - "yVZAkuFm8ZuyS37JX8AcrQ+CP7vkBdX0eEYVy9VxrUB+S0vKczhaCPLMv8d8QTW95D1JazCxYvRomlT1", - "rGQ5uYoVkoY8bbKs/giXl+9puRCXlx96sRl99cFNleQvdoLMCMKi1plL9ZNJuKYy5ftSIdULjmxzee2a", - "1QrZorYGUp9KyI2f5nm0qlQ35UN/+VVVmuVHZKhcQgOzZURpEd6jGQHFPek1+/ujcBeDpNferlIrUOTX", - "Fa3eM64/kOyyPjl5gi/7mhwIv7or39DktoLR1pXBlBRdowou3KqVGKueVXSRcrFdXr7XQCvcfZSXV2jj", - "KEuC3VqvDv0DAxyqWUB44jy4ARaOgx8H4+LObS+f1jG9BPyEW9h+gH2r/Yrez994u/a8wae1XmbmbCdX", - "pQyJ+50J2d4WRsjy0RiKLVBbdYnxZkDyJeRXLmMZrCq9nba6+4AfJ2h61sGUzWVnXxhiNiV0UMyA1FVB", - "nShO+bab1kbZFxU46Du4gu2FaJIxHZLHpp1WRQ0dVKTUSLo0xBofWzdGd/NdVJl/aOqyk+DjTU8WzwJd", - "+D7DB9mKvHdwiFNE0Ur7MYQIKhOIsMQ/gIIbLNSMdyvSTy2P8Ry4ZmvIoGQLNkul4f2Pvj/Mw2qo0mUe", - "dFHIYUBF2JwYVX5mL1an3kvKF2CuZ3OlCkVLm1U1GbSB+tASqNQzoHqnnZ/HCSk8dKhSXuPLa7TwTc0S", - "YGP2m2m02HG4NloFGopsGxe9fDQcf2YBh+KG8PjujaZwNKjrOtQlMg76WzlgN6i1LjQvpjOEy35fAaYs", - "FddmXwwUwmXbtEldovulVnQBA7pL7L0bmQ+j5fHDQfZJJEkZRMy7okZPEkiCbBtnZs3JMwzmiznEqGZ2", - "AjL9TNZB7HxGmETbIWxWogAbIlft3lPZ8qLarMBDoKVZC0jeiIIejDZG4uO4pMofR8yX6rnsKOnsd0z7", - "sis13VkUSxglRQ2J5/xt2OWgPb3fJajzWel8KrpY6R+RVs7oXvh8IbUdgqNoWkAJC7tw29gTSpMwqdkg", - "A8eb+Rx5S5YKS4wM1JEA4OYAo7k8JMT6RsjoEVJkHIGNgQ84MPlRxGeTLw4BkruET9SPjVdE9DekH/bZ", - "QH0jjIrKXK5swN+Yew7gUlE0kkUnohqHIYxPiWFza1oaNud08WaQXoY0VCg6+dBc6M2DIUVjh2vKXvkH", - "rckKCTdZTSzNeqDTovYOiGdik9kXykldZLaZGXpPvl3A99Kpg2lz0d1TZCY2GM6FV4uNld8DyzAcHozI", - "9rJhCukV+w3JWRaYXdPulnNTVKiQZJyhNZDLkKA3ZuoB2XKIXO5H6eVuBEDHDNXUanBmib3mg7Z40r/M", - "m1tt2qRN9c/CUsd/6Agld2kAf337WDsh3N+axH/DycX8ifosmfD6lqXbZCi0nSubdfCQBIVdcmgBsQOr", - "b7tyYBKt7VivNl4jrKVYiWG+fadkH20KSkAlOGuJptlVKlLA6PKA9/i57xYZ63D3KN8+iAIIJSyY0tA4", - "jXxc0Jcwx1NMnyzEfHh1upJzs753QoTL37rNsWNrmZ99BRiBP2dS6Qw9bsklmEavFBqRXpmmaQm0HaJo", - "iw2wIs1xcdor2GYFK+s0vbp5v39hpv0xXDSqnuEtxrgN0JphcYxk4PKOqW1s+84Fv7YLfk3vbL3jToNp", - "aiaWhlzac/xJzkWHge1iBwkCTBFHf9cGUbqDQUYPzvvcMZJGo5iWo13eht5hKvzYe6PU/LP3oZvfjpRc", - "S5QGMP1CUCwWUPj0Zt4fxqMkcqXgi6iKU1Xtypl3RGzqOsw8tyNpnQvDh6Eg/EjczxgvYJOGPtYKEPLm", - "ZR0m3MNJFsBtupK0WSiJmjjEH1tEtrrP7AvtPgBIBkFfdJzZTXSy3aWwnbgBJdDC6SQK/Pp2H8v+hjjU", - "TYfCp1uZT3cfIRwQaYrpqLBJPw3BAAOmVcWKTcfxZEcdNILRg6zLA9IWshY32B4MtIOgkwTXSqXtQq2d", - "gf0Ydd5jo5XZ2GsXWGzom+buAX5RS/RgtCKb+3nbg642cu3f/3yuhaQLcF6ozIJ0qyFwOYegIcqKrohm", - "NpykYPM5xN4XdRPPQQu4no29GEG6CSJLu2hqxvXXT1NktId6Ghj3oyxNMQlaGPLJX/S9XF6mj0xJ4UqI", - "tuYGrqrkc/3vYZv9TMvaKBlMqiY817md2pfvAbu+Xn0PWxx5b9SrAWzPrqDl6R0gDaYs/eGTihJY31Ot", - "FP+oXra28ICdOk3v0h1tjSvKMEz8zS3TKlrQXsptDkYTJGFgGbMb5+nYBHN6oI34Linv2wRW7JdBInk/", - "noopX8KyfxWFXBT7aPcCaOmJF5cz+TSd3C4SIHWbuRH34PptuECTeMZIU+sZbgX2HIhyWlVSrGmZuXiJ", - "octfirW7/LG5D6/4zJpMmrIvXp6+fuvA/zSd5CVQmQVLwOCqsF31p1mVLeOw+yqx2b6dodNaiqLNDxmZ", - "4xiLa8zs3TE29YqiNPEz0VF0MRfzdMD7Xt7nQn3sEneE/EAVIn4an6cN+GkH+dA1ZaV3NnpoB4LTcXHj", - "KuskuUI8wK2DhaKYr1uPNfi44fLy/drjsXET2ICZkF89EUGlRhjIu0wkfQgbIt7D+nBJbzADZlqx4S4/", - "JnI8F2NE71xIeyVk645xDyCTMUq/n/RmZHmLx4GQcF8msyuzHREr3/26+NUc+ocP4xP98OGU/Fq6DxGA", - "+PvM/Y5qzMOHSSdl0lpmeBEawzhdwYPwmGNwIz6vns/hepwccLpeBQFWDJNhoFAbbOTRfe2wdy2Zw2fh", - "fimgBPPT0RhbQLzpFt0xMGNO0PnQg8cQy7qylTkVEbwbuo1vbQ1p4Z3iKj9Yn2//CPF6hX7STJUsT0eQ", - "8Jky3IfbmE3TmGDjAaOwGbFmAyHAvGbRWKbZmNSsHSCjOZLIVMnssA3uZsId75qzf9RAWGGUpzkDiddn", - "50b1OgiO2pN70+Y3N7B1hzXD38bcssOt5U1Ou2wtO92EL4Lryi80VVvowEDzeMYe494RJO7ow99y+Ghu", - "2Y70HKcujanQ7hmd8wkOzJGsuM5UNpfiN0hf2OimSuTb8P5Vhtbk34CnAgS7LCX4rpvC8c3s+7Z7vAo+", - "tPG3Vrn9okNxs5tcpulTfdhG3kS3Vums0A7JQ7peHMjQfoEwwFrweEUxt1htxQc5UW7Pk0020XrIlj6V", - "8ZPRYzt+cyodzL1ntiW9ntFUKRqjchmYou1thWNpQXxnvwEqpFKws5MoUDy0ZTZhXQWycXX0k9/eUH2y", - "045WnBo9CSkq1pCmNhqiVCIxTM2vKbfFyk0/y69cbwXW0296XQuJ6SZVOnKsgJytklbfy8v3Rd6PEirY", - "gtk63LWCqNCzG4jYnJZIRa5YdkgQ4lBzNicn06javNuNgq2ZYrMSsMUj22JGFV6XweseupjlAddLhc0f", - "j2i+rHkhodBLZRGrBAkqLgp5If5xBvoagJMTbPfoG3IfIz8VW8MDg0UnBE2ePfoG43bsHyepW9bVUd/F", - "sgvk2T4mPE3HGPpqxzBM0o2aDvKeS4DfYPh22HGabNcxZwlbugtl/1laUU4XkH4GstoDk+2Lu4lRAx28", - "cOt0AKWl2BKm0/ODpoY/DTwtN+zPgkFysVoxvXLxgUqsDD01VZztpH44rHfmy1J5uPxHDLOtEmryF1Bj", - "6GrgaRgGQ/+IruAYrVNCbY7RkjUB8L4sKDnzKYyxTlcoz2VxY+YyS0dZEuPh56SSjGs0s9R6nv3FqMWS", - "5ob9HQ2Bm82+fpqod9UuCcMPA/yz412CArlOo14OkL2XWVxfcp8Lnq0MRykeNKkcolM5GA+cjvwcCj/d", - "PfRYydeMkg2SW90iNxpx6lsRHt8x4C1JMaznIHo8eGWfnTJrmSYPWpsd+undaydlrIRM1SVojruTOCRo", - "yWCND/PSm2TGvOVeyHLULtwG+i8bZuVFzkgs82c5qQhEjtNdb/KNFP/zD02CdfTf2gePHRugkAlrp7Pb", - "feagxsOsbl03sY1Lw28DmBuNNhylj5WBIH8bxR/6fImwpC5Ids9bBsdHvxJpdHCU4x8+RKAfPpw6MfjX", - "x+3Plr0/fJjOc5w0uZlfGyzcRiPGvqk9/FYkDGC+OGKIW3JpGBIGyKFLynwwTHDmhpqSdiG6zy9F3M0z", - "snRQa/oUXF6+xy8eD/hHFxFfmFniBjaPIYYPe7sQZ5JkivA9Cqen5FuxGUs4nTvIE88fAEUDKBlpnsOV", - "9AqNJqMC9oalRDRqRp1BKYySGdceiu35fx48m8VPd2C7ZmXxc+Nl7VwkkvJ8mQxGnpmOv1gZvXUFW1aZ", - "LGeypJxDmRzO6ra/eB04oaX/XYydZ8X4yLbdQrd2uZ3FNYC3wfRA+QkNepkuzQQxVtvZuUL2h3IhCoLz", - "NLUzGubYrxidqtSZeEaNw65q7cJj8cm5y2s0ZyVGe6b9xtgyk1QP5OnCsuq+jJEZB6ucK2tmsKODJJSt", - "8GJWdFWVgCdzDZIusKvg0OmOmdpw5KgwBlGV+YQtMS+GILqWnIj5PFoGcM0klNspqahSdpATsyzY4NyT", - "Z49OTpJmL8TOiJVaLPplvmmW8ugYm9gvrpaTrThwELD7Yf3UUNQhG9snHFe68h81KJ3iqfjBPpBFL6m5", - "tW3ZylBi9Yh8hwmWDBG3MuqjudLnKm7n7ayrUtBiijmUL16eviZ2VtvHVqq3ZTMXaK1rk3/SvTI+j6lP", - "IDWQoGf8OLszhphVK52FKpepFIimRVOHk3VCe9COF2PniLywJtQQx2InIZiJW66giIpqWiUeicP8R2ua", - "L9E22ZKAhnnl+Hqvnp01npvokWMosoQM28DtSr7aiq9TIvQS5DVTgA//YQ3trIshBamzjfssjO3lyZpz", - "SylHBwijoaTSoWj3wFlJ1gcVJCHrIP5Ay5Qt+3xo+dtz7JV+8tGppdvx+vscfj6TN/nBORdyygVnOVZc", - "SEnSmCFunJtyRHGKtH9RTdwJTRyuZAXf8OTYYXGwpq9nhA5xfZd/9NVsqqUO+6eGjavstgCtHGeDYuoL", - "ajuHGOMKXNEsQ0QxnxQyEdSUfG8RAigOJCNM/jRg4Xxlvv3o7N+Ye+OKcbR0ObQ5/cy6rErF0DPNCdNk", - "IUC59bQfDan3ps8RJoMsYPPh6LVYsPycLXAMG0Znlm1DU/tDnfpAVRcYato+N21div7wcysczE56WlVu", - "0uFy60lBUm/4IIJTcUs+kCRCbhg/Hm0Hue2MMMf71BAarDFqDSq8h3uEEUp2t0d5aXRLS1HYgtiHm8k8", - "vYwnwHjNuHehpi+IPHkl4MbgeR3op3JJtdUdRvG0C6DlwDsLfAhtffC3HapboMCgBNfo5xjexqba+ADj", - "CA0aiZ/yLfGHwlB3JEw8p2WI0E7UDkepyglRNqa1U008xTgM4878y8wWuva+EgzdsejHoTfRUCrEWV0s", - "QGe0KFIZtL7FrwS/+rdosIG8DrWuwiPEdir0PrW5iXLBVb3aMZdvcMvpovL8CWoIH6EIO4wJfWZb/DdV", - "6Gl4Z1xs9sGPf30gdnFY/v/+Y+aU1GtoOlNskY3HBN4pt0dHM/XNCL3pf6eU7l8F/yEe/Xa4XLxHKf72", - "0lwccX7gXny6vVpC+l6MBRf43edVCokn21wJr7JeOTOMesDNS2xZB3jfMAn4mpYDD+5jX4m9X63/YOjZ", - "fT6YJYJqlwVMU7KTBQ1mVrKxwh3vS9+FOBQfbMOD785r4da6E6HDvrvvW546GyPWMItBD93NnGjNBh/q", - "RXN1DfomTVqWIh996t0wp6bTcNZQsVq5dNqJGLb1ShQxncfRUABppmXDcxMh/6h7Jr+hYpT8Iq/To7Vs", - "FoeaSi0a3RKm9iWgB88DY6eOJ4pMpA6z5BUrsR7Sv5+/+XEyvJHRDvS31GXxTRqVhzYmPJbqksdCtPBR", - "D9tOBC9TSsR0ogaM3JirJ30aXNHb5IdX1mg3BiSb0uaQ1q/HDt4jgIVIJanvpxyZNBvh0R7RQbOxlpfE", - "dJGih+/XQylWfJ0f/B7XE3LheVNXRgLWTNQ+rNI/bvC2HvurS+HVqhs0wNiST4a+tDty0Hl64epf22U6", - "Y9v3P9vwCgJcy+0fwJXa2/RuUaqEGmvtzk0TEkqnjiql2hJ3x9TASpVbckqfN4JbmaFFS73yVT2yejFG", - "zu/h49N0clYcJAmnSnZN7Cip+/Q1Wyw1Vvz4G9AC5Ns9FU2aKiZ4xCqhWFPBuDSDuRTSSxzuaOwrIkPA", - "LK7I0h/LR5evIddYtrqJmpUAh9RnMZN5b+7/VDYZvgvCYytX0GRXFZN+reo9wnsv8VqUPNDW+T0aX7Pj", - "NLyNsE87r6lq0j11ci6Mfvk9n0OOWdV3Jrr7jyXwKIna1BtcEZZ5lPeOhQeKWBfgcHdCA9CuPHQ74Ynq", - "c90anKE8GFewvadIixqShYfD69ybJB5HDFjfts9BP+QhcuGgTAXKQCz4WH+Xyr0prjOYMz5K23jDuTxJ", - "moujSeW4Y0ovP95gLtP1oLSxKOsP5cLr11wfNiy8wBL3ykW+0pC4PDa/kbN+4a1rl/gc0xIGp6hPgQ7K", - "/+ZzkNpZSnbl6o8gVqwL+prKwre4k6Ry9m5iaaDnYWbWvMzqRy8lSrngI8e8FEaMyIZeirYfQ4VI4nvK", - "hnw3CcAQrjlICUXwdZZCQaaFf8m1C45dqLBx7TdCghosn2aBG0yd/66pDYBlJCmmyqcunD1eIJGwogY6", - "GWXwH55zF7Kf2+8+iYcvI7jXdBzodX89a/8mj6keEmOqnxN3W+5PDnITKzLjHGTmXcrddP68ndER8/YW", - "dW4v6PhgBEv76NxbO1hJ0gCb91fZ0RGi7BdXsD22SpAvBO53MAbaSk4W9ChhcWeT79SurlJwL+4EvC+b", - "h7ISoswGvJhn/RoEXYq/YvkVYA7R8HbFyH732mfDTELuo/MshKlcL7c+535VAYfiwREhp9y+FvQRK+3y", - "pJ3J+T29a/4NzlrUtiyIs5YfXfL0syss2CFvyc38MLt5mALD6m45lR1kT4b7DR+KpbvG4h7tKsBHY7Xy", - "fgxJRyqJiMpCkZJJzq0r+jke9JThCHObREl4MEKBEufCJqoUqSD9m+RfMUOlMRVPhgBp4GPSgAQo3OBJ", - "BLjwvD0pRd1nnzRTzImEJjrkptlDXUJOy5rVkEbfnTnM0uZ3cyEhnhGjT22m4PCiDdPw4n9mTEsqtzfJ", - "8dlGVcp6MojlvXGWIcSyWUgTZtnHYVmK6wyZVRbq5KRUW9NOtS9jX7Sx6WdO9QyigE2qnKC2JUtakFxI", - "CXncI/2Q20K1EhKyUmD8Ziq0ZK6N3L3C15uclGJBRJWLAmy9qTQFDc1Vc05RbIIoXC6JAks7mAbA9ono", - "eOSU5k61DuIMRa295Rn85l+YPjYlRZMVzi46s0EKA08RQLkscA5DtnEfXiQcm8+oa0tM8+Y52yDdgEwd", - "+TnRsoYpcS26NfbdwacSyIopZUEJtHTNyhIzQrBNFFIRIpLSqB0Qe88wXnrNMKiunR3ESsOVufNCypSY", - "B5zH+cyIXkpRL5ZRgvoAp1d5Ze0U4niUn1SNcY/4NNRM8ZSshNJO07QjNUtuYknv54JrKcqybZSyIvrC", - "Wdp/oJvTPNevhbia0fzqAeq1XOiw0mLqEyd0o36bmWQnNWH7As6QBtT+VN+2HcbAOqIdzSA7LK5nFN9n", - "ZY7A/LCfg+63uZ/2F9ZdV5uZptWYU06oFiuWp8/UnyuMdjD4NcWikskIbW1Wmz4Gm+Fhjy+rEDWFLLKP", - "ZuA0WVzylDhG4KJHkN2Y/6IE3h2XzMExmoGLss9cnBSV5YOyXgcAhNTmNNC1tAVdY0kscBWxsDlQMPal", - "C+jIWwVDDG8HmxnhzoHScCugemHNAcD71vgwtUkjbYj0TGz89wdNVskbAf9pN5W3mMdQ7OZ5Q1rSRm/6", - "DFQDHCGdIn9noOMF5rOYjQ13DMW3R97wEQDDAZAtGEaFQR4KxpyyEoosVbv1LNioppGm7d5ctmvX471s", - "OXlOa1861YxdS3AZkayIL9v+r4oaUhKhed+SzAvYgH2w9RtIYWuiTiP/C5S2ZGrHGCCqrIQ1tOJCXZqm", - "GkVNtgbfV4XOpACo0BvZtZGlAh7ju7xjOHFrz6KQuTHYTVpSLGLtTpE9ZpKkUWfDM3tM1NijZCBas6Km", - "LfypQ0WOthnQHOUEqno6Qub1yLHT/GRHeOcHOPX9U6KMx8SHcXzoYBaURt0uBrQ3ALpWQ6eep+Of4xxk", - "wcGCsxXBEWtJvOEbqqLXfNgg2Sf5Rt0auU9M8AixLzeQo1Tj9B0onMYz4KRw6YyQ2jlAYbUC0yVhbV8C", - "J1xEJWqvqQqqSpMc1f9gJ8ZGjDtt+gZO5SZM+fY7S3AwojpZEgcVCRno9Obm+S9yEncexMHxUjSiwL3r", - "3WH/8tTt1A5sIOqyINzsp5H9sciru8UcF5+SWe0HKktxbWvOxnroC/B+UEt93gXkxHIWrmUfjj11eXu7", - "pg4WPURZ0S0REv8xWuc/alqy+Rb5jAXfdyNqSQ0JOcerjQhw4d1m4t3i1dQD5q0twk9l183GjhkNtzWj", - "RECbi9wXBxNkRa8g3gYMdrD8M9eGcap6hpYLc2V3trOPBbd4n3tpRYtY08cMsNsWd/A5wU3v/9U8co2n", - "8okbq5LmvsKwK3HW5jNYRdwTl17Cavcr6D5f8yQQKpM3RCt92oziBibTA1lX6mnRUPmmFti9is29ylW3", - "WsZIy2+nRs+O9+OjlnLXuzA26qYHdFzndR/4cdnbz4P/ZHLmoWWMAf+PgveBQtcxvLam9WfAciu1TgJW", - "a62eiU0mYa72BZhYc7VR52WTlMebWBnPJVBlI27O3jjFs8k9zLhRhG1MaPBphlEKmDPeMEvGq1on9BhM", - "Qcy3EcJioz+idcCFNiQlGGFyTcs3a5CSFUMbZ06HLQkbl5jxjg7XN2HCCHdqfwCmGh0OH143ZvS4mbnA", - "bRE7G66pNOUFlUXcnHGSgzT3PrmmW3Vzj1JwDuzzKdFImmmnA4m8S0jaFpBy65zCt/T3BADpHTp+Rjhs", - "MC444ayxph0tBvwzfRj+FA6bFd1kpVjg8+CBA+GSTqOHz6qAgqMZ3Mpn49bt51HsN9g9DdbbcIxIC5x1", - "zBS7z/0b3EpUI3/iTO88+dZG2X2vbeNu7cH0SOWLJvjfEkv/PKae2LusSvEzey9s+qcqnvYg2kQY8A+1", - "7eIDu4hhEC4/Q2wEH18usR1pkXrIby0DGVoM1I7wflBNKDvNXXhW35TWMzVYpExdGoQDLW3WPu/vpQHw", - "0BSi3FlvTxtCZsw4h9SY3J34IKtEleVjYj5tSZ7CuQkcpG0YB+gjcgIMrDuEx6hQpKqV0KxVrerQMpuD", - "1bL2ebuqfJfSP2QmGuDobReEmCMvwyNsjWP4kicYU6bdN2ZtM1hgEoQSCXkt0Ux8Tbf7yxYOpII//9vp", - "V48e//L4q6+JaUAKtgDVlBPolP1r4gIZ79p9Pm8kYG95Or0JPq2IRZz3P/pHVWFT3Fmz3FY1uYJ7RQ8P", - "sS8nLoDUS99+Hbgb7RWO04T2/7G2K7XIO9+xFAp+/z2ToizT5VyCXJVwoKR2K3KhGA2kAqmY0oYRtj2g", - "TDcR0WqJ5kFM6r22aaIEz8Hbjx0VMD0QcpVayFBALfIzTNrgvEYENlXpeJX19Oxal9PTrIUOhUaMipkB", - "qUTlRHs2JymI8AWRjF7WOsMnWsSjGNnAbG20bIoQXeR5mvTigvu7uX27GLROc3qziQnxwh/KG5DmkH9i", - "OCHJTThJY9r/w/CPRIaVO+MaYbm/B69I6gc73hyf9uIeQnaRUaD1s20kyAMBGHht23onGT0UizKMS+sl", - "QH+CdyB3xY8fGsfy3mchCInvsAe8+Pls0y68ZHDgfOFU3T8EpERL+TBECa3l73uR61lvuEiiLXJGE61B", - "WbYk+mJh9NxaPQ+vmAe0kt5jZymEJkYzLcvEI2lrx8EzFROOUQnkmpafn2u8YlLpU8QHFO+Gn0bFL2Vj", - "JFtUqpsl4HxNR80dvYq9u6n5W3yY/R9g9ih5z7mhnBO+d5uhcYeWNrx6HrzRwMk1jmmDrB59TWauik4l", - "IWeq69y/9sJJeBgKks1dQCts9J6XqPvW+bPQtyDjuY/EIT9G7q3gs3cQNkf0CzOVgZObpPIU9fXIIoG/", - "FI+Ki3vvuS5uWXHlZvmcosyMB+Zz6pctH7s8m9rEXDq1gv46R9/WLdwmLupmbWOTkY0u3HJ5+V7PxuQQ", - "SxdZMd0xidmdVFs5qNbK75C+zOLIjeHmTVHMz0MJrW3S5oGk+539qFm5N2ClVULh03SysBmMsEjAL64o", - "1Oe9Sz0EA2nE3NJvky7GIiax1tbk0VRRxqcRdRFct0Qye3zVmNeS6S0WBPcGNPZLMh/TdyG3h8sNE3xp", - "7u7T4gq4j/doMoHUyt+u3wla4n1kXXzc3EKiPCIvbep+d1D+em/2r/DkL0+LkyeP/nX2l5OvTnJ4+tU3", - "Jyf0m6f00TdPHsHjv3z19AQezb/+Zva4ePz08ezp46dff/VN/uTpo9nTr7/513uGDxmQLaC+Zsezyf/J", - "TsuFyE7fnmUXBtgGJ7Ri34PZG9SV55i3DJGa40mEFWXl5Jn/6X/7E3aUi1UzvP914gqvTZZaV+rZ8fH1", - "9fVR3OV4gU//My3qfHns58EUdy155e1ZiNG3cTi4o431GDc1JP8y3969PL8gp2/PjhqCmTybnBydHD1y", - "Nes5rdjk2eQJ/oSnZ4n7foyJc4+Vq4lx3LzVSvrt3mHIuhfO5QIKcj+8uvmX4LlVD/zjnblLTvd3ZYkx", - "rOKsQOJyxYcnWE4Rg7EQrMcnJ34vnKQTXTjH+Prj2ceJCgXtu8JED6kXDcBJyJpirv1F/8SvuLjmBLN8", - "2gNUr1ZUbu0KWtiIBsdtoguFRnbJ1pi2zfTu4ryqXCWSIZRj+br2KfedkUBCKQtzwmyFC1dPRKVQ3q+C", - "ckvs78z62psssTvY6K2B2afPCZlSnUPI4Qx9xhZh4YxYs0MP0dNJVSfQ+RIf1qhdOJtG1TUsNKIsAsZ7", - "GH1b/zfBqCHdRcgKav5aAi0xsZb5Y2UINfefJNBi6/6vruliAfLIrdP8tH587LWQ448uY8qnXd+O44iw", - "44+txDLFnp4+4mlfk+OPvhb+7gFbddBdrGnUYSSgu5odz7D+3dimEK9ueClI8+r4Iyrgg78fOytq+iMa", - "QuwNe+wTNA20tKk40h9bKPyoN2Yhu4czbaLxcqrzZV0df8T/INl+sqe9hFQmJ1t7h5Km+ZQwTehMSCyt", - "rvOl4Qa+pjNTUcvekT81vZ5bCPA29eFFk2fv+++/cCDiR0IRxdy/jQTRmqkREtGdEjGFIAK32jeC8PuT", - "7JsPHx9NH518+icj6Lo/v3ryaWT0/PMwLjkPUuzIhh9uyfF6NptmkXaTAgPrKxmOFobf97it6gxEAjL2", - "FG7tDJ/I2Wq6PL1DHt9OKJ7g79/Sgvg0CTj3o8839xm3MeJGULUC9afp5KvPufozbkiell4ku6HwdmoP", - "f8wUiNvslPA2nXDBo2SKfGHFDJFKVTHAb5SmN+A356bX//CbVsOelw/f4Vlr64pxDHNr4nrsZRKKVILP", - "MOvfFtBiTXnuH2M1ryNwv6zk7QgjBODWCuZ16dOQVCWbb60fQpR+IlVXleE4c6oCZbknGUZhtlkUwtCk", - "5rngNnQKX794BzBmQ0AnsrpiVasLmxuqwlRK/iXWkd/0f9Qgt82ur5jRfHs6UxPc93uycIvHO2Dh7YHu", - "mIU/PpCN/vlX/N/70np68pfPB4FPXnTBViBq/We9NM/tDXarS9PJ8LawzrHe8GMM7z7+2FJX3OeeutL+", - "veket8CaEV6FEPO5QtPKrs/HH+2/0USwqUCyFXCNOW/dr/bmODa8vdz2f97yPPljfx2tvMwDPx97i2pK", - "S263/Nj6s635qWWtC3FtSz8k5RW8PmlJVpTThX3EH4yQ5h50AzQpo8mbKlxU7u0uoVhXU9S6sRLbpyzu", - "QX/w4+ONFqK5FozjBOiQxVno3HSl0QXuStv2bYjnDrIfRQF92Sh1EToYW5dhOAqpIrIf7sY6GTHeT4cd", - "FHQc26iHPhmZj7Xq/n18TZk2EpTL3YwY7XfWQMtjV4Gx82tT9Kj3BSs5RT/GWQmSvx7T9rloW1DMlg11", - "7JlXUl+dBWGgkX9M4z83zpvYGYLkEtwg7z+YXVcg156SGtv+s+NjfF25FEofoyTatvvHHz+Ejfa15cOG", - "m2+bTEi2YJyWmTOSNWVkJ4+PTiaf/n8AAAD//2smK19dEwEA", + "H4sIAAAAAAAC/+x9a3Mbt5LoX0Fxt8qPJSnZsbMnvnVqr2InOdo4sctSsnfX8k3AmSaJoyEwB8BIZHz1", + "32+h8RjMDIYcUrST1O4nWxw8Go1Go9HPj6NMrErBgWs1evFxVFJJV6BB4l80zyUo/G8OKpOs1Ezw0YvR", + "GSc0y0TFNSmrWcEycg2b6Wg8YuZrSfVyNB5xuoLRizDIeCThHxWTkI9eaFnBeKSyJayonVZrkKbv+7PJ", + "f51Ovvrw8flf7kbjkd6UZgylJeOL0Xi0nizExP04o4planrmxr/b9ZWWZcEyapYwYXl6UXUTwnLgms0Z", + "yL6FNcfbtr4V42xVrUYvTsOSGNewANmzprI85zms+xYVfaZKge5dj/k4YCV+jKOuwQy6dRWNBhnV2bIU", + "jOvESgh+JfZzcglR922LmAu5orrdPiI/pL0n4yend/8USPHJ+PkXaWKkxUJIyvNJGPdlGJdc2HZ3ezT0", + "X9sIeCn4nC0qCYrcLkEvQRK9BCJBlYIrIGL2d8g0YYr8+8WbH4mQ5AdQii7gLc2uCfBM5JBPyfmccKFJ", + "KcUNyyEfkxzmtCq0Ilpgz0Af/6hAbmrsOrhiTAI3tPB+9Hcl+Gg8WqlFSbPr0Yc2mu7uxqOCrVhiVT/Q", + "taEowqvVDCQRc7MgD44EXUneB5AdMYZnK0lWjOsvn7XpsP51Rddd8C5lxTOqIY8A1JJyRTPTAqHMmSoL", + "ukHUruj6r6djB7gitChICTxnfEH0mqu+pZi5j7YQDusEoi+XQMwXUtIFRHiekp8UICXhVy2ugQfqILMN", + "fiol3DBRqdCpZx04dWIhER1IUfEUoyL4waG5h0fZvsdkUO9wxLvt3xRbuE9tqC/Y4nJTApmzwtyX5O+V", + "0oGAK4XbvgSiSsgM782JGcYgX7EFp7qS8OKKPzZ/kQm50JTnVObml5X96Yeq0OyCLcxPhf3ptViw7IIt", + "enYgwJo6pwq7rew/Zrz0UdXr5F3yWojrqowXlMVnwdDK+as+yrBj9pNGmkGeBbkB98eNdbk+f9XHUrf3", + "0OuwkT1A9uKupKbhNWwkGGhpNsd/1nMkLTqXv42seGF663KeQq0hf8euUaA6s/LTWS1EvHOfzddMcA32", + "KozEjBNkti8+xpKTFCVIzeygtCwnhchoMVGaahzpnyXMRy9G/3RSC3ontrs6iSZ/bXpdYCdzGUswjG9C", + "y3KPMd4a4RFFrZ6DbviQPepzIcntkmVLopdMEcbtJqLcZThNATeU6+lor5N8F3OH9w6IeivsJWm3osWA", + "eveC2IYzUEj7Tuh9oBqSImKcIMYJ5TlZFGIWfnh4VpY1cvH7WVlaVI0JmxNgeJ/DmimtHiFmaH3I4nnO", + "X03Jd/HYt6woiODFhszA3TuQmzEt33Z83AngBrG4hnrEB4rgTgs5Nbvm0WDksmMQI0qVS1GYK3AnGZnG", + "f3NtYwo0vw/q/Kenvhjt/XSHEr1DKlKT/aV+uJGHLaLq0hT2MNR01u57GEWZUbbQkjqvEXxsusJfmIaV", + "2kkkEUQRobntoVLSjZegJigJdSnoJwWWeEq6YByhHRuBnJMVvbb7IRDvhhBABUnbkpkVr26ZXtYiV0D9", + "tPO++HMTcmrPidlwyoxsTAqmtBGGcDMVWUKBAicNioWYig4imgG0sGURAeZbSUtL5u6LleMYJzS8vyys", + "97zJB16ySZhjtUWNd4TqYGa+k+EmIbEKhyYMXxciu/4bVcsjHP6ZH6t7LHAasgSagyRLqpaJM9Wi7Xq0", + "IfRtGiLNklk01TQs8bVYqCMssRD7cLWyfEmLwkzd5Wat1eLAgw5yURDTmMCKafMAZhxPwILdALesZ0q+", + "odnSCBMko0UxrvUSopwUcAMFEZIwzkGOiV5SXR9+HNk/lPAcKTB8UAOJVuN0GlNyuQQJcyHxoSqBrChe", + "TivzPCqLZp/AXBVdQUt2wstSVNrAGL1czl/51cENcORJYWgEP6wRH/zx4FMzt/uEM3NhF0cloKKF8ayo", + "8hp/gV80gDat66uW11MImaOih2rzG5MkE9IOYS9/N7n5D1BZd7bU+bCUMHFDSHoDUtHCrK61qEeBfI91", + "OneczJxqGp1MR4XpF53lHNgPhUKQCe3GG/wPLYj5bAQcQ0k19TCUU1CmCfuBd7ZBlZ3JNDB8Swuysnoz", + "UtLsei8oX9aTp9nMoJP3jVXVuS10iwg7dLlmuTrWNuFgfXvVPCFW5+PZUUdM2cp0ormGIOBSlMSyjxYI", + "llPgaBYhYn30a+1rsU7B9LVYd640sYaj7IQZZzCz/1qsXznIhNyNeRx7CNLNAjldgcLbrWEGMbPUquqz", + "mZCHSRMd00StgCfUjBoJU+MWkrBpVU7c2Uyox22D1kAkqJe2CwHt4VMYa2DhQtNPgAVlRj0GFpoDHRsL", + "YlWyAo5A+sukEDejCr54Si7+dvb8ydNfnj7/0pBkKcVC0hWZbTQo8tDp+YjSmwIeJR9OKF2kR//ymTeI", + "NMdNjaNEJTNY0bI7lDW02IexbUZMuy7WmmjGVQcAB3FEMFebRTt5Z/vdjUevYFYtLkBr8wh+K8X86Nyw", + "M0MKOmz0tpRGsFBNo5STlk5y0+QE1lrSkxJbAs+t6c2sgynzBlzNjkJUfRuf17PkxGE0h52HYt9tqqfZ", + "xFslN7I6huYDpBQyeQWXUmiRiWJi5DwmErqLt64FcS38dpXt3y205JYqYuZGA1jF8x4VhV7z4feXHfpy", + "zWvcbL3B7HoTq3PzDtmXJvLrV0gJcqLXnCB1NjQncylWhJIcO6Ks8R1oK3+xFVxouirfzOfH0ZEKHCih", + "4mErUGYmYlsY6UdBJniudmpzvDWwhUw31RCctbHlbVm6HyqHposNz1CNdIyz3K/9cqY+ojY8i1RhBsYC", + "8kWDVj+pyqsPUxaKByoBqcHUa/yMFoFXUGj6rZCXtbj7nRRVeXR23p5z6HKoW4yzOeSmr9coM74ooCGp", + "Lwzs09Qaf5cFvQxKB7sGhB6J9TVbLHX0vnwrxSe4Q5OzpADFD1a5VJg+XRXTjyI3zEdX6giiZz1YzREN", + "3cZ8kM5EpQklXOSAm1+ptFDa47VjDmpWSQlcx3Iu6jOYIjMw1JXRyqy2KokWqful7jihmT2hE0SN6nFz", + "CK4atpWdbklvgNBCAs03ZAbAiZiZRddeDrhIqkhpZGcn1jmReCi/bQBbSpGBUpBPnD57J7y+nb1/9Bbk", + "4WpwFWEWogSZU/lpVnB9sxP4a9hMbmhRGfH8+5/Voz/KIrTQtNixBdgmtRFt9V13KfeAaRsRtyGKSdlq", + "C+1JMCK2YToFaOhD9v2x17v9bTA7RPCJEHgDEj1qPunR8pN8AqIM8H/ig/VJllCVEyMG9qofjORq9ptT", + "LrxsuGOGMEFBlZ7sulJMo4bexCw14uKpWwQH7pEnX1OlUQwkjOeov7VXIc5jZUszxWhPpzKcsvc1Zib9", + "2T/EutNm5nrnqlLhVaaqshRSQ55aHtqse+f6EdZhLjGPxg5PPy1IpWDXyH0IjMZ3eHSKAPyD6mChdjbv", + "7uLQ68CIL5t9sdyAr8bRNhgvfKsI8bFTbQ+MTNV7YMmNqRa9zYQogKLKVGlRloZD6UnFQ78+DF7Y1mf6", + "p7ptlyStGchKKrkAhSYm195BfmuRrtDWtaSKODi8fwIqvKyLXBdmc6wnivEMJtvOCz6CTav44Bx03Kty", + "IWkOkxwKukl4W9jPxH7ekzD82Eggtf5AaJjM0JqYppH6THh/08NmFTiVSgneBL+QzJxz84yqSc31PnzS", + "HHDaFN90xPogzIJgJOnAj4fIsvSUGBHv/huhDVk5osPVuFvpnmvpwV6Y9ZMgEMed1IqA9uz/CcrNHQSw", + "o86/AdW38HrqYy27R/2Pd3vjwmxdZa3bJnlF9PLlHYyxjwf12CLeUqlZxkp8rn4Pm6O/3tsTJH0lSA6a", + "sgJyEn2wL/ky7k+sG3J7zMNe84PUrV3wO/rWxHK8Z1YT+GvYoNrkrY1oiLRVx1BHJEY1Fy7lBAH1XvPm", + "xRM3gTXNdLExgq1ewobcggSiqpn1Wuma0LQoJ/EA6Zip/hmdQT5pDt/qIXCBQ0XLS3ke2tfWdvguW0+u", + "BjrcK6sUokjoP9snvoOMJASD3IVIKcyuM1oUG6JD2IynpAaQ7oJAb4wgzzxQDTTjCsh/iopklOMLt9IQ", + "hDQhUfJBYdnMYMTNMKdzVa0xBAWswL7m8cvjx+2FP37s9pwpModb63LDsWEbHY8foyrurVC6cbiOoO02", + "x+08cemgrdJcsu7V1uYpu53c3MhDdvJta/Bg4DRnSilHuGb592YArZO5HrL2mEaGOfjhuIPMd02XsM66", + "cd8v2KoqqD6GoRJuaDERNyAly2EnJ3cTM8G/uaHFm9DtbjyCNWSGRjOYZBglOHAsuDR9bGChGYdxZg6w", + "DRwZChCc214XttOOl3btt8xWK8gZ1VBsSCkhAxslZ6RUFZY6JTZkIltSvsAXkBTVwrk623GQ4VfKasJk", + "xTtD7CuK6TWfoAlDJcPU0Gzpoy2NEAbUvGzb9g/7WLulARR7GQ26tKPtaduDkibT8aj34W/wfVM//C3e", + "miGjhxoTG/JhhLQamoHWM8SnkZW6SIy30Rw+QwyfxkpTD52Csjtx5BRef+zzC7+oyrLYHEFIsgMRCaUE", + "hVdarAZU9quYkx9YJsVZsRDhzlMbpWHVNd7Yrr/0HNd3h7yABS8Yh8lKcEg86d/g1x/w42C1o72Ge0ZE", + "gWivAdsPnwYSWgtoTj6EpO+7SUgy7bPftnSqb4U8lpXdDjj4TTHAcr3TrcNNeah9nRZFwiRt1Q8dLqLG", + "wSmcSUKVEhlDQfE8V2PnfW6t2NatvYX+tyE06ggHuD1uy/YahWFZRT4UJaEkKxiq+QVXWlaZvuIUNX3R", + "UhPOgl450K8WfumbpPXQCTWxG+qKU3QUDfq/pGPQHBJ6qG8BvHZYVYsFKN16YM0BrrhrxTipONM418oc", + "l4k9LyVI9Nib2pYruiFzQxNakN9ACjKrdPPJsaqUJkqzonCGYDMNEfMrTjUpgCpNfmD8co3DeT8Sf2Q5", + "6FshrwMWpsMZ1wI4KKYmaU/H7+xXDCpxOFm6ABOMtbCfvcdznRtiZNbeSFrxfx/+24v3Z5P/opPfTidf", + "/cvJh4/P7h497vz49O6vf/1/zZ++uPvro3/759T2edhTweAO8vNX7o1+/gofYlGcSBv2P4JBZsX4JEmU", + "sUNRixbJQ8yX4QjuUVPvp5dwxfWaG8K7oQXLDS86Gvm0r6nOgbZHrEVljY1rqfE8AvZ8Dt2DVZEEp2rx", + "108iz7Un2OpwE295K8bAcUZ1dADdwCm42nOm3GoffPfNJTlxhKAeILG4oaPUAokXjItgbHj5mF2KA7uu", + "+BV/BXN8Dwr+4ornVNMTe5pOKgXya1pQnsF0IcgLHxT5imp6xTvXUG8CqSioOcogleIUdJVey9XVe1os", + "xNXVh44fQle2clPFXNSds66azE85MXKDqPTEJXGZSLilMmUL8Sk+XDQ09t4Kh5VJRGWVWD5JjBt/OhTK", + "slTtZA9dFJVlYVAUkapy+QrMthKlRQgcM8zcxd4aGvhROKcSSW/9k7dSoMivK1q+Z1x/IJOr6vT0CwzB", + "q1Mc/Op4oKHbTQmDH769ySja711cuJXL0al8UtJFymZydfVeAy2RQlDgWOFLsygIdmuEB/pIAByqXkCI", + "Rd5jSyxke8f14nIvbC+f1iu9KPyEm9qMnb7XDkZR8Qdv4I7Ielrp5cRwhOSqlDkGfq98ggG6MFeO9yBQ", + "bIEPALUUlVkykGwJ2bXLbAWrUm/Gje7e0cXdxZ7hMIU6IxccOGcGfxnlZsCqzKkTZCjftFPcKBsMgYO+", + "g2vYXArbfTowO1iUjS5KsaL6ji7SbnTXGvKND7Ibo735zu/Kx4i6dCQYd+nJ4kWgC9+n/2hbAeAIxzpF", + "FI08H32IoDKBCEv8PSg4YKFmvHuRfmp5jGfANbuBCRRswWZFgk3/R9eu4WE1VCkhA3bjo3rDgIqwOTGv", + "o5m9jt2LSVK+AHOpm4tYKFqg0/40aehH6XAJVOoZUL1VX8vjNBMeOhTIbzFoGpUmY7MEWJv9ZhqVIBxu", + "zQMP3962jXMknh7kTmXXBPmBoPrudZD09JBHhEN4Ip+dv+/DnoT3gvNPi6kTQbbfVwaHCyluzW4aAIVP", + "3YgJXqJ7qlJ0AUOvo4apaGBKjIYFCAfZJf0k5R0xb4s1HRlj4CJs94nBS5I7gPli2AOaAVoujn5ua0J0", + "VoU3vNh4pM4KFKiDg6glHSobdja+2A/YNBsDyWth1QPWxFp89JdU+aOfjyOOfqC0+PukktmWP+888r6j", + "upsdz1/TbdY+tvqcGRDBTQ+fRc+nzvP58kbjvXLfjUcuxCG1d4KjFJ1DAQuLE9vY01mdn6neTQPHm/kc", + "md4k5cgXKSMjycTNAeYh9pgQqzEng0dInYIIbLSs48DkRxEfdr7YB0ju8ktRPzbeXdHfkA4WtN74RkoW", + "pbn1WY/VKvMsxaW3qEWeloszDkMYHxPDSW9oYTipCzytB+nkasO3Tyszm/PteNT3Jhp40NwaUTrZa5VW", + "njlkfbHg7ZeRfhXstYaZWE9sZHTyaTVbz8yZSMYrYJx26vDazHkPFJmJNfoU4Q1nHdz3hq4fMg9Y5Aay", + "ZgqpHPv1iY0WvP0A2S7Ip6hZIek5vVoguz5J9jBgesTpPrJ7GKXQOxJILQVmnQbcaXR26lma0lZXEqmv", + "23HIDhvC1FKspu9wJneyB6Nd5Wkz193f6nSH/cnR/Fn9LEn+ukq5++RltJ1Lm2txn7SMbXJoALEFq2/b", + "QmwSrU3HpSZeI6ylWJJh9F1jVxdtCgpATcCkIVdPrlNm6aur9wpQZrjw3SI9J+4e5ZtHkTechAVTGmrj", + "gndy+fy2H1QnmseWmPevTpdybtb3ToggaFhzLHZsLPOzrwBd1+dMKj1By0xyCabRtwo1ad+apmlBuOlv", + "x5Q19ewtByNE17CZ5Kyo0qTsQPr+lYHox3BzqWqGFyXj1ttohqnwkw66e9gmER7r2L0VQa8tgl7Tz4Gf", + "YQfLNDUwSUN5zen/JEesxQu3cZYELaeIqbuhvSjdwmujWPouo42E6MjtYrrN5tM5l7kfe6c3lo/o7xMi", + "7EjJtUQZEdMBhGKxgNxnenNBoTbrlcunVwi+qHMJmt+3pA+cEpvFD5Pwbcnf59zToc85vVFOBKtiJKGP", + "HzMIeR1dh7kHcZIFcJu5ZbR/vZEiibjYMR5bRJrRz8vbO27zSdfhy5a7cO3Ta/cwbDZuTwE0d88qBX59", + "2w9td7sc6sZ9TseNFLHbDxgOiBTHtIoEmA7R9HBuWpYsX7cMf3bU6QEkMVDc62aCb+EM2ZIbbAd+mo7F", + "O2r1PDC3I7Z3xo4TfOafmEem9Wd2HrnmbNDMZRvIK4nWpIa3cDeffnhoDlz79z9faCHpApxFcGJButcQ", + "uJx90BClpFdEM+sgnbP5HGJLmDrEitMArmPvyAcQdg8Jds1l4W25lT67RLaDtuoV7EZomp4SlNLnc3HZ", + "tUf6h0ekWwuXTbRxBxgVkwkFvofN5GdaVOYlxKSqfVOdgbB5re9BEzer72GDI+90+TSA7dgVVMW9A6TQ", + "lHUlfFJRlvAHqlF9Ad/AjS3cY6fO0rt0pK1xpTT6j0Z9QzXqSTSX8umOTe0iYyAdslcXaa8Tc7aguS1t", + "Qt+1RSzfLftET5B4KobeG4dcciHTxk7vMqCFJ3xc7OhuPLqfv0fqnnQj7tiJt+FqTu4CemNa+3/D6WvP", + "DaFlKcUNLSbOT6ZP6JDixgkd2Ny71Xzm91X6VFx+c/b6rQP/bjzKCqByElQdvavCduWfZlW2BMf2a8im", + "Y3e6XasKizY/pMyOPWluMfV6S5vWqXVT+01FB9V51szTnuI7+aZz8bJL3OLqBWXw9Kot0tbRq+ncRW8o", + "K7zh10M7VMtulzusulKST8QD3NtJLPL+u/dYvXECV1fvbzxma3uKdZQKKfETvnTqQE/nDq9Jn9Wa1ndw", + "SFznG8xkmn53cZfnFBmjczijR5cDvxWycVG5qMakw9qnExDNY8LiMW2Uv3RW+I5YOCVWhPx18avhDY8f", + "xwf/8eMx+bVwHyIA8feZ+x3fUY8fJw3DSVWfYVmoyeN0BY9CXETvRnxeNQSH22HiwtnNKsjIop8MA4Va", + "zzOP7luHvVvJHD5z90sOBZifpkNUFfGmW3THwAw5QRd9UYnB+Xlly3kqIng7Bh+jZA1p4dXjKnhYO3v3", + "CPFqhXbniSpYlnb64TNlWBK3Lr2mMcHGg23IZo6K9fiV84pFo5tm6iCTZ2sh0axJhKtkJuAavzPhWEDF", + "2T8qiMr64k3cupz9UwhH7QjYaf2iG7hdNXh0SMHf+5sIvVZtm8Joq8n1VTADekSk6kztGe8Qz9hh/lti", + "FRxF+esTA9uWznV4J2VtfedtLwLtzMCefTqLa/8DyZXDtJv5ashOMzWZS/EbpGUHNBImUnd46zZDBfxv", + "wFM+qm1GFjwH6oLV9ey7CGS4bqGPVO6tS/CLDlXzDrnC03xiv43eU2kQ7Xe/2kCl04u7Teh7qMaOJ81A", + "mh5mhgc2cgvHWj7e3Y1ye0JtXotG5Fn6nMeBoid2/PqcO5g7wbUFvZ3RVKEj8140MEXb33DM04L4zn6D", + "VEjNYGcnUSxDaMtssr8SZG096qZKPvDtZ6cd/OqrH3lIcfHzbmx9VQolEsNU/JZy9CPEfpYDut4KrB+G", + "6XUrJCb4VGkfwhwytkoqw6+u3udZ1/MrZwtmS4pXCgida5fn0Q1ki8pbKnLVvEMuEoea8zk5Hddn1u9G", + "zm6YYrMCsMUT22JGFV7QwScidDHLA66XCps/HdB8WfFcQq6XyiJWCRLe5yh6Bk/YGehbAE5Osd2Tr8hD", + "dBhW7AYepS8YJ6yNXjz5arytcjZiHIvEb2PyOXJ5H8iQpmz0qrZjGLbqRk1HJswlwG/Qf59sOV+265DT", + "hS3dFbT7dK0opwYhKZhWO2CyfXF/0ZWjhRdurTOgtBQbwnR6ftDUcKyeaHLDEC0YJBOrFdMr5ymqxMpQ", + "WF2G3E7qh8P6er4MmofLf0QX7DLxxv8dnlt01RPhiF71P6K9PUbrmFCbsbVgdfyFr1BLzn1maqwLF8rB", + "WdyYuczSUV7FcIw5KSXjGrVGlZ5P/mKe75JmhiFO+8CdzL58lqiv1ixBxPcD/LPjXYICeZNGvewhey/l", + "uL7kIRd8sjIcJX9Up3SITmWvr3jav7fP7bhn6HtL12bcSS8BVg0CpBE3vxcp8i0D3pM4w3r2otC9V/bZ", + "abWSaYKhldmhn969dpLISshUpYuaATipRIKWDG4wvjS9SWbMe+6FLAbtwn2g/32927xYGolu/nQnHwuR", + "VTnxTgtplYyk//MPdX58NG7buN2W9lLIhJ7WaRw/s1vqfvrCtg3dugPitx7MDUYbjtLFSk+4h43nCH1+", + "D3+vNkh2zxuq0ie/Emne8SjrP36MQD9+PHai8q9Pm58te3/8eLjLbFpfaH5NoOawu6advdL0TW311yKh", + "vfNVPIPfmEtVktCwJu8yc6XO3Bhj0iyV+PnljuPEK+7thpw+QB41+LmNm9+Zv+Jm1hEw/fyhWT02ST55", + "+B7FUFDytVgPJaLWteXp6Q+Aoh6UDNQK4ko61XGTnhI73XwisjWjzqAQ5qUaF8Aa7LXyJ9oFg5rxlr2o", + "WJH/XFuhWzeTpDxbJp3KZ6bjL/YZEDWINBjZknIORbK3fS3/4l/ViXf/30XPsCvG05/ahZgt7C1Ia7Ca", + "QPgp/fgGV0wXZoIYRc2EXCHFSbEQOcF56solNWvsVjRPVZJNxPjjsKtKO69kTJ7gCorMWYFutGl7OLac", + "SKp7uCqW/fclrsw4WIVfWbWEHR0koWyF17aiq7IAPIQ3IOkCuwoOre6YsQ1HjsqSEFWaT9gSk78IoivJ", + "iZjPo2UA10xCsRmTkiplBzk1y4I1zj168eT09HSYkRHxNWDtFq9+4W/qxT05wSb2i6v8ZQsm7AX+IdDf", + "1VS3z+Z3icuVX/1HBUqnWCx+sAHZaCE297otvRrKBE/Jd5ifzBB6o0QAKkV9huVmTtCqLATNx5gU+vKb", + "s9fEzmr7SEDUYenXBWoAm0ckaeQZniPV51/ryV01fJztqXPMqpWehKKsqUyKpkVdS5a1vJ9QNxhjZ0pe", + "WbVscOyxkxBMLS5XkEc1YK0aAInD/Edrmi1R3zkdbVUp91QDGl7C2HPA2lwUxb2GglnIwc0yXBVjW8R4", + "TIRegrxlCjDvBNxAM2FjyHbqFPI+gWNztbLi3BLOdA/pNZTH2ncXPHBW9PX+FUnIWvtwb9tfnckDi5zv", + "W+z5Anul43ZalaNbfg+2ZMbaF92Ykh+csSOjXHCWYbGJlAiOqRiHmVUH1OVI2zvVyJ3lxDFM1qsOAeoO", + "i70VrD3LdIjrOjVEX81+W8Kxf2pYuyKAC9DK8UDIx758vDPQMa7AFUAz9BVzVCETrl/JsJjgQnJEl/Tx", + "CLOp9ehavzXffnS6ecwZc8046twcUt1L0BrYCsXQzs4J02QhQLnVNuPC1HvTZ3q55gjCh+lrsWDZBVvg", + "GNYV0SDFegF3hzrzPsHOB9e0fWnautoF4eeGS52d1K/7Q5KFqLD/qZrrvehP+X55R5oIuWH8eLQtxLjV", + "1R/vZUOGcIOef1Difd4hm1C+vjnKN+bJaukNWxAbuZtMG8x4AozXjHuDbzoPVpa8S3Bj8DT39FOZpNo+", + "OgZxvEugRU84DAbVW4+B+w7VrsRgUIJr9HP0b2Ndeb+HrYQG9euC8g3xh8JQdySUvKRFcIZP1NFH6cwJ", + "Y9ZZuFVZP8VWDFuf+NDcBrp2BoKG7lgNZd97qi/b6KzKF6AnNM9Teee+xq8Ev/qAQlhDVoUiYCHOtJmu", + "vUttbqJMcFWttszlG9xzupwpqhSsZkXC9fZV+Ah52GFMRDXb4L+pClj9O+Oc3veO/vYe7vl+NQq60ewp", + "6dnQ9ESxxWQ4JvBOuT866qkPI/S6/1Ep3Qd+/yHiultcLt6jFH/7xlwccZrujo+/vVpCFm30pxf43ecD", + "C5lcm1wJr7JOnTf0yMDNS2xZC3jfMAn4DS16Mi7EVht7v1pLRl/ehaw3rQjVLnudpqTmCUNUGP35v6wH", + "dssy1DVv9vlYWxfrT2k8cfjYivR+S+P3Dbui9XqrGUqvPfEwk19NBPva/Fwphq6+lBaFyAZzBjfMmenU", + "n6pXrFYu833CK+9mJfL4LMTeXABpxmYdlhOhFfiwTX7Dp1Xyi7xNj9bQjwSiGZq1DNHoljC2gZkePA+M", + "nTqeKFLZOsySb1mBxaH+/eLNj6P+jYx2oLulLnV2UoXdtzEhUq1NHgvRwMcWHiB4kdZ/qx6VOuaGSp8G", + "V504+eFbqyAcApLNk7RP69dDB+8QwELYqlCpuhnd7DSjejs88iNqqLfXcpSYOlJU0a62lHj7WKVn3YSE", + "QqSDCpM2ZKQhxZ1SdYTcS8FrYO1F4/LR2eJKnbpMHQb6aohw2MHH3Xh0nu8lPqVqUY3sKCkG+5otlvrr", + "QmTXfwOag7T1RFLPSVtNZAXmGaqWrMT3TykUq+sBF2Ywl8h7icNNh4bmXC7BZYXxSQI6Y3kH6hvINNaH", + "rt1AJcBwP4cyvUQDgTcoYpPfwRVEAuRQ6uVWYck6d5d6WZcNBRd5xhSZgTNd3AAfEzaFaTtYLa+TQpEC", + "6NwrYaUQekBd3RC2hGiMgU7RV6dG83YxsJPzLUppaEvpTocXYTkLMQE20PKWqjpzVCuNwuBw7fkcMkx4", + "vzX93n8sgUf52MZedYewzKNsfCyEC2LJhqNqtGtYtyXC2wpqVJPqU0LalxDjGjYPFGnQULIicIiwPSQD", + "PCLH2nF9UYE+04ZzjGQq0BMiyPvBuwT8dY2lQ4oARNkpDwTD07i5nuqMlYdB4yWaA8AwXaf3Ktpfp8ND", + "wbQvu1+3unr/S/kVFrNXzqmUhnTzsT6JnHfLMd+6dPWYaDFYC33ielD+N5+g1c5SsGtXoQYRZm2zt1Tm", + "vsVR0uTZe5OlgZ6HmVkdGNX18tnXL8dGKGaFMALQpC8wtBmpFFx4Hyjra10nLUOo5yAl5MEmWAgFEy18", + "mNUeyT9d+OQW7Fkv84Pw1vLo3yNk2K6ot4bCu7qQBJaDpFgzgTrn8xgrRMKKGuhlVNwhrQbdtUMv7Xef", + "U8SX99uuXu3DezgXuytk+9A7pjqYj0/XnDjhYG/u1UhEcoBmlnEOcuKNuO3SDryZJhPzKudVZkWV+GwG", + "7fXgtGNbuFlSqZl1V9l6QkVZOa5hc2LVPr7quN/xGGgrQ1rQo4TSLaI4qq5apeBeHAW83zd9ZylEMemx", + "DJ5361G0D8M1y64BE7OGyBQjBT9oHhszCXmIBqngM3K73PhqC2UJHPJHU0LOuI0O9O4jzQqkrcn5A71t", + "/jXOmle2wozTQE+veDrMCiu9yHtyPz/MFp7Xx5sUGH55z/ntIAfMrte8z0fuFkvCNOsET4eqN7r+HS0R", + "KiI/C0VKgLqwhuCXyBIS7yiC2VmiNELoH0CJMyATVYiUF/4hGWTMUGlMxZMhQBr4gOdqDYUbPIkA52S3", + "Iyur++zzjoo5kVD7ZhyagNXlNLVMXPWpRtozh1manHEuJMQzop+pTdQcItswzzH+Z8a0pHJzSJrUJqpS", + "aqheLO/0lgyOkvVCamfJLg6LQtxOkK1NQnWllDrAtFPNa9vXKa37maM+g8jtkionIm7IkuYkE1JCFvdI", + "h3hbqFZCwqQQ6IWZcuyYa/NIWGFcJyeFWBBRZiIHWwgtTUF9c1WcU5S9IHJlS6LA0g6mDLB9IjoeOKW5", + "fa15doLy2s5CG37zL00fm76iTn9nFz2xLgI98QWgXLo7hyHbuAsvEo7NyNRWyqZF5DlbI92ATB35OdGy", + "gjFxLdpV+N3BpxLIiillQQm0dMuKArNHsHXk0BD8gdKo7ZGdz9EP+oahw1szk4gVqUtzO4b0KzEPuIgz", + "shG9lKJaLKP6AAFO/3SXlXvYx6P8pCr0ScQQUTPFM7ISSrtnsR2pXnLtAvowE1xLURRNRZ6V8xfO6PsD", + "XZ9lmX4txPWMZteP8BHOhQ4rzcc+pULbd7eeSbZyMA57Keg1nyB5qN1p1m079Gp19DyYd7a4X8fwsEuT", + "H4H5YTdz3W3XOOsurL2uJp9Nv4XOOKFarFiWPm5/Lu/XXp/VFPdKZlq0VYhtFhpshnwgvseCOxNyzy6a", + "gdNkGdUz4niEc+tATmT+i2J8e1wyB8eDeu7QLt9xAtYk6xUDWwAgpDYRgq6kLV0cC2mB4YiFTZyCTilt", + "QAdeOOj7dz/YzAhHB0rDvYDqeCMHAB9aDcbYZsS0ns0zsfbfH9UpMw8C/m47lTeYR59T5UVNWtK6VfpE", + "Vj0cIV2AYKsH4iUmwZgN9UMMpegHXv4RAP2eiQ0YBvkn7gvGnLIC8kmqSvF50IGNo+e6i7GMRvf1HC0n", + "z2jlKwGbsSsJLrGSlf5l05xYUkNKIjTvasR5DmuwMVq/gRS2ju84MmdBYcv8tjQKopwUcAMNh02X7alC", + "KZTdgO+rQmeSA5Ro8W0r2lKeiHGVwJb2xa19EvmyDcFuUh1jEWt3iuzQtSQ1Q2s+scdEDT1KBqIblle0", + "gT+1r8jR1CWao5xAVef5MPFPzKHT/GRHeOcHOPP9U6KMx8SHYXxobxaURt02BrTTM7lSfaeepx2T41Rm", + "wVCEs+XBrm1JvOYbqqS3vF+r2SX5+iU2cJ+Y4BFiv1lDhlKNewpB7h5DPZYTlwMJqZ0D5PbBYLoktPlL", + "4ISLqObxLVXhFVNndfU/2ImxEePuoX2Ajb72H77/zhIcjKhWssV0idJA1vfT8f8uJ3HrQewdL0UjClwo", + "7xbVmKdu9+zABqIqcsLNfhrZH2sEu1vMcfExmVV+oKIQt7aIcfxEfQXenmupz5uYnFjOwrXs/aTHLuFw", + "WwvCogiRFd0QIfEf8yD9R0ULNt8gn7Hg+25ELakhIWdAtl4Uzu/aTLxdvBp7wLwiRvip7LrZ0DGj4TZm", + "lAhoc5H7sm2CrOg1xNuADiKWf2baME5VzVCpYa7s1nZ2seAW79MzrWgeKwEw0eymwR18wnPT+3/VYavx", + "VD7/Y1nQzJesdsXnmnwGq9p74tJLWG0Pc+7yNU8CoVJ+TbTSp8nID9Cm7sm6UjE/fcWxGmB3SoB36oLd", + "axkDlcKtGkdbAsQHLeXYu3CcGM7OkuJSv7sWF1c+/jy7k8wQ3beMIeD/gXal4V7RiWxLV1CP12OLpX+G", + "XWgk4knAatXgM7GeSJirXY40Vg8+E+saYBV0t4xnEqiyfkfnb9yztU6AzLh5Rluv3WBWDaPkMGe8ZrWM", + "l5VOvIIwDzLfRAiLrQmI1h7bXJ+MYUTRG1q8uQEpWd63ceb02NLAcZEeb0FxfRMKkHAjdwdgqn4BYjx1", + "rZ+Pm5nr3xYYtL6zSlOeU5nHzRknGUgjNZBbulGHm6qC1WGXsYpGslAzW0hktkLStoAUG2dtvqchKQBI", + "j2hRGmAJQifthBXIKoa06DH8dGH4U1iCVnQ9KcQCo357DoTLc42mQ/uAFByV6Fa6G7ZuP49iv8H2abAU", + "iWNEWuCsQ6bYfu7f4FbiI/QnzvTWk281nO0wbOvpbA+mRypf1OEZlli65zEVOe8SM8XR815U9WlKPO1B", + "tIlJl+iOVr1nF9G/wqVdiFXow4tVNl04UvH5Vq8wQX2D2hKAAaqOK6CZ8xDrKuI6igqLlLHLbrCnns5q", + "9/291AMeKlKUO+vNaYODjhlnnwqf2/MZTEpRTrIhvq22WlHujAwO0iaMPfQRmRB61h38blSo39XIidYo", + "5LVvkdPeQmK7bGVltk1l0Kdk6uHoTQOGmCMvwyNsVWsYaxVUMWP/OPfG7qYSLTAJQomErJKoZL6lm92F", + "H3uyz1/87ez5k6e/PH3+JTENSM4WoOqaBq3CibVrIuNtrdHndUbsLE+nN8FnC7GI89ZLH/YWNsWdNctt", + "VZ2MuFM2ch/tdOICSAXndkvkHbRXOE4dFvHH2q7UIo++YykUfPo9k6Io0jVlglyVML+kdisywJgXSAlS", + "MaUNI2zaT5munbLVEpWLmDX8xuaGEjwDr312VMB0jy9XaiF9Pr3IzzAXg7M5EViXheNV1k60bV3unWb1", + "eyg0orvNDEgpSifaszlJQYQxW7KCoFd3alPUp0duuoHZWofdFCE65/c06Z1x9xIWc7Kd2zdLces0pzeb", + "mBAv/KE8gDT7rBv9eUYO4SS1YeAPwz8SiVOOxjXCcj8Fr0i+D7ZEhZ91vCZC0pBBoHUTZCTIAwHoiYdu", + "BK1GQXZRbnJpbQxojfDm57b48UNtlt4ZmYKQ+A47wItjmet2IZjCgfM7J/b+ISAlWsqHPkpoLH9XeLRn", + "veEiibbIKU20BmXZkuiKhVFAvHoZ4sx7XiWdcHQphCbmZVoUiTB2q8fBMxUTjnkSyBtafH6u8S2TSp8h", + "PiB/1x+4FYctx0i2qFRHT8j5mg4CKwpR/ixQ8bcYW/8fYHY2eTu6WZzhv3MHokqIFtbbex4s4MDJLY5p", + "HbuefElmrtxPKSFjqu1QcOtFmhBvC5LNnX8trHU79vfeZYJ+Fvoex2Hu/YHIj5GRLXgOOJjro/47M6ce", + "DpA8LSlS7RBKAn8pXhcXVd9x7dyzNMxhqZyixI17pnLqlosfujxcB15elYLuOgff+g3cJi78em1Dc5UN", + "rjBzdfVez4YkFEtXgzHdMcfZUcrC3L8ozGdJcGZR6cZwkCQJqxa5d2WvaflLRnkamrtoxP2eAvJLi34z", + "Gj4K5hW344UCqBgr7tm6mI+DF4PgptsLcsUfE7Wk/m3h/nz6/MvReAS8WpnF199H45H7+iH1UsvXybjS", + "OpFOx0fUVRN4oEhJN0OC2Xemzknit84U9PlFGqXZLP2m+5vZM3y4ugCEc46sHtmLvUFd/pz/SQC0lRha", + "hzWcGEuSdXqgsBW7MgX93JcW36Z+76n20eK+FSt2Osk1CrHcjUcLm6QMq5P84mrVfd5t9xD05At0S79P", + "GjCLmMRaG5NHU0VJ3QYUZHHdEhUyMPI6qyTTmwuDf692Z79cp5JBfRfSM7mcX8EC72RfLa6Bex+zOplT", + "pbx0/Z2gBUqf1jGAG5lTFFPyja0Q4q7Fvz6Y/St88Zdn+ekXT/519pfT56cZPHv+1ekp/eoZffLVF0/g", + "6V+ePzuFJ/Mvv5o9zZ8+ezp79vTZl8+/yr549mT27Muv/vWBoXQDsgXUV/55Mfo/k7NiISZnb88nlwbY", + "Gie0ZN+D2RvUsM0xQSEiNcMrFlaUFaMX/qf/7S/KaSZW9fD+15GrBzlaal2qFycnt7e307jLyQJzoEy0", + "qLLliZ8Hc1k23itvz0NckPX9wx2tbU64qSG/n/n27puLS3L29nxaE8zoxeh0ejp9gvkUS+C0ZKMXoy/w", + "Jzw9S9z3E8yifaJcMZ6TOnQ0ae1/h2Ey/kkvF5CThyEI8F+Cv4d65GMJ5y4L5d+VJcawivMcicvVTR9h", + "3Vd0AEWwnp6e+r1w75pIvDzBiLMXH0eWf6TS4XaQelkDnISsrjrdXfRP/JqLW04w5a89QNVqReXGrqCB", + "jWhw3Ca6UGiak+wGMzOa3m2cl6UrgdSHcqyq2TzlvjMSSKiPY06YLZvjChmpFMq75Zfuif2tKaA7kyV2", + "Bxu9NTD7NGchbbK7CR3O0NPEIiycEaus7CB6PCqrBDq/wWA+tQ1n46hkj4VGFHnAeAejb6v/Jhg1pLsI", + "6X/NX0ugBYpG5o+VIdTMf5JA8437v7qliwXIqVun+enm6YnXOZx8dPmk7rZ9O4m9UE8+NpJy5Tt6ej/K", + "XU1OPro8VTsGjM0iJ86/PeowENBtzU5mWG9zaFOIV9e/FKR5dfIRdXO9v584OT39EdWn9oY98Y+PnpY2", + "h1D6YwOFH/XaLGT7cKZNNF5GdbasypOP+B8k2zt72gtIJcizBb0oqZuPCdOEzoTUyv5quIEvPs9U1LJz", + "5M9Mr5cWArxNvVPi6MX7bswpDkT8SCiimPu3liAaM9VCIhphI6YQROBG+1oQfn86+erDxyfjJ6d3/2QE", + "Xffn8y/uBkbsvAzjkosgxQ5s+OGeHK+js60XaTcpMLDuI8PRQn9Moduq1kAkIGNHPenW8Im0zKbLsyPy", + "+GZ1gQR//5rmxGdtwbmffL65z7mNSzGCqhWo78aj559z9efckDwtvEh2oPB2Zg9/zBSI2+yU8DYeccGj", + "fLh8YcUMkcqc08NvlKYH8JsL0+t/+E2jYcc3AGN/rbXF1QKMVCz2MgmFcMFnDveaQJrfUJ75ANA6Igv3", + "y0rejjCC236lYF4VPitSWThFlXnc+olUVZaG48ypCpTlwsDMg9kmdQlDk4pngluHS4y4824jmJwFXU/U", + "NSsbXdjcUBXmgPPRn1O/6f+oQG7qXV8x8/LtvJmGpWTp//YpGb/F/hEYf3OgIzP+p3sy3z//iv97X3XP", + "Tv/y+SDwGdgu2QpEpf+sV+2FvffuddU6yd/W5jrRa36CoSQnHxuPHPe588hp/l53j1tgSRn/8BDzuUKF", + "zLbPJx/tv9FEsC5BshVwjWnL3a/2vjkxN0Kx6f684Vnyx+46Ggn5e34+8XrY1Nu62fJj48/me1EtK52L", + "W1sZJinl4KVLC7KinC5supGgujS3pxugrhVA3pThenNZBgjFEr+i0rVu2YbNudQjwWcI78HgObpgHCdA", + "Nw6chc5NVxpd+64Sd1fzeOEg+1Hk0JWoUteng7FxhYajcJqIsPlwHJ1mxHjv9jso6G5iPay6ZGQ+Vqr9", + "98ktZdrIXS79PmK021kDLU5cidfWr3XdtM4XLAYX/RjnT0n+ekKb56KpdzFb1texo5RJfXV6h55GPnDP", + "f65NPrEJBcklGE/efzC7rkDeeEqqLQIvTk4wDnwplD5B+bVpLYg/fggb/dGTn99w8209EZItGKfFxKnW", + "6hLWo6fT09Hd/w8AAP//E+Why2YTAQA=", } // GetSwagger returns the content of the embedded swagger specification file @@ -421,16 +416,16 @@ var swaggerSpec = []string{ func decodeSpec() ([]byte, error) { zipped, err := base64.StdEncoding.DecodeString(strings.Join(swaggerSpec, "")) if err != nil { - return nil, fmt.Errorf("error base64 decoding spec: %s", err) + return nil, fmt.Errorf("error base64 decoding spec: %w", err) } zr, err := gzip.NewReader(bytes.NewReader(zipped)) if err != nil { - return nil, fmt.Errorf("error decompressing spec: %s", err) + return nil, fmt.Errorf("error decompressing spec: %w", err) } var buf bytes.Buffer _, err = buf.ReadFrom(zr) if err != nil { - return nil, fmt.Errorf("error decompressing spec: %s", err) + return nil, fmt.Errorf("error decompressing spec: %w", err) } return buf.Bytes(), nil @@ -448,7 +443,7 @@ func decodeSpecCached() func() ([]byte, error) { // Constructs a synthetic filesystem for resolving external references when loading openapi specifications. func PathToRawSpec(pathToFile string) map[string]func() ([]byte, error) { - var res = make(map[string]func() ([]byte, error)) + res := make(map[string]func() ([]byte, error)) if len(pathToFile) > 0 { res[pathToFile] = rawSpec } @@ -462,12 +457,12 @@ func PathToRawSpec(pathToFile string) map[string]func() ([]byte, error) { // Externally referenced files must be embedded in the corresponding golang packages. // Urls can be supported but this task was out of the scope. func GetSwagger() (swagger *openapi3.T, err error) { - var resolvePath = PathToRawSpec("") + resolvePath := PathToRawSpec("") loader := openapi3.NewLoader() loader.IsExternalRefsAllowed = true loader.ReadFromURIFunc = func(loader *openapi3.Loader, url *url.URL) ([]byte, error) { - var pathToFile = url.String() + pathToFile := url.String() pathToFile = path.Clean(pathToFile) getSpec, ok := resolvePath[pathToFile] if !ok { diff --git a/daemon/algod/api/server/v2/generated/nonparticipating/private/private_routes.yml b/daemon/algod/api/server/v2/generated/nonparticipating/private/routes.yml similarity index 83% rename from daemon/algod/api/server/v2/generated/nonparticipating/private/private_routes.yml rename to daemon/algod/api/server/v2/generated/nonparticipating/private/routes.yml index a17facb0a4..c2bee5eac8 100644 --- a/daemon/algod/api/server/v2/generated/nonparticipating/private/private_routes.yml +++ b/daemon/algod/api/server/v2/generated/nonparticipating/private/routes.yml @@ -11,9 +11,9 @@ output-options: - participating - data - common - type-mappings: - integer: uint64 skip-prune: true + user-templates: + echo/echo-register.tmpl: ./templates/echo/echo-register.tmpl additional-imports: - alias: "." package: "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model" diff --git a/daemon/algod/api/server/v2/generated/nonparticipating/public/routes.go b/daemon/algod/api/server/v2/generated/nonparticipating/public/routes.go index bc32700ebe..1739a5dd27 100644 --- a/daemon/algod/api/server/v2/generated/nonparticipating/public/routes.go +++ b/daemon/algod/api/server/v2/generated/nonparticipating/public/routes.go @@ -1,6 +1,6 @@ // Package public provides primitives to interact with the openapi HTTP API. // -// Code generated by github.com/algorand/oapi-codegen DO NOT EDIT. +// Code generated by github.com/oapi-codegen/oapi-codegen/v2 version v2.4.1 DO NOT EDIT. package public import ( @@ -14,61 +14,62 @@ import ( "strings" . "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model" - "github.com/algorand/oapi-codegen/pkg/runtime" + "github.com/algorand/go-algorand/data/basics" "github.com/getkin/kin-openapi/openapi3" "github.com/labstack/echo/v4" + "github.com/oapi-codegen/runtime" ) // ServerInterface represents all server handlers. type ServerInterface interface { // Get account information. // (GET /v2/accounts/{address}) - AccountInformation(ctx echo.Context, address string, params AccountInformationParams) error + AccountInformation(ctx echo.Context, address basics.Address, params AccountInformationParams) error // Get account information about a given app. // (GET /v2/accounts/{address}/applications/{application-id}) - AccountApplicationInformation(ctx echo.Context, address string, applicationId uint64, params AccountApplicationInformationParams) error + AccountApplicationInformation(ctx echo.Context, address basics.Address, applicationId basics.AppIndex, params AccountApplicationInformationParams) error // Get account information about a given asset. // (GET /v2/accounts/{address}/assets/{asset-id}) - AccountAssetInformation(ctx echo.Context, address string, assetId uint64, params AccountAssetInformationParams) error + AccountAssetInformation(ctx echo.Context, address basics.Address, assetId basics.AssetIndex, params AccountAssetInformationParams) error // Get application information. // (GET /v2/applications/{application-id}) - GetApplicationByID(ctx echo.Context, applicationId uint64) error + GetApplicationByID(ctx echo.Context, applicationId basics.AppIndex) error // Get box information for a given application. // (GET /v2/applications/{application-id}/box) - GetApplicationBoxByName(ctx echo.Context, applicationId uint64, params GetApplicationBoxByNameParams) error + GetApplicationBoxByName(ctx echo.Context, applicationId basics.AppIndex, params GetApplicationBoxByNameParams) error // Get all box names for a given application. // (GET /v2/applications/{application-id}/boxes) - GetApplicationBoxes(ctx echo.Context, applicationId uint64, params GetApplicationBoxesParams) error + GetApplicationBoxes(ctx echo.Context, applicationId basics.AppIndex, params GetApplicationBoxesParams) error // Get asset information. // (GET /v2/assets/{asset-id}) - GetAssetByID(ctx echo.Context, assetId uint64) error + GetAssetByID(ctx echo.Context, assetId basics.AssetIndex) error // Get the block for the given round. // (GET /v2/blocks/{round}) - GetBlock(ctx echo.Context, round uint64, params GetBlockParams) error + GetBlock(ctx echo.Context, round basics.Round, params GetBlockParams) error // Get the block hash for the block on the given round. // (GET /v2/blocks/{round}/hash) - GetBlockHash(ctx echo.Context, round uint64) error + GetBlockHash(ctx echo.Context, round basics.Round) error // Gets a proof for a given light block header inside a state proof commitment // (GET /v2/blocks/{round}/lightheader/proof) - GetLightBlockHeaderProof(ctx echo.Context, round uint64) error + GetLightBlockHeaderProof(ctx echo.Context, round basics.Round) error // Get all of the logs from outer and inner app calls in the given round // (GET /v2/blocks/{round}/logs) - GetBlockLogs(ctx echo.Context, round uint64) error + GetBlockLogs(ctx echo.Context, round basics.Round) error // Get a proof for a transaction in a block. // (GET /v2/blocks/{round}/transactions/{txid}/proof) - GetTransactionProof(ctx echo.Context, round uint64, txid string, params GetTransactionProofParams) error + GetTransactionProof(ctx echo.Context, round basics.Round, txid string, params GetTransactionProofParams) error // Get the top level transaction IDs for the block on the given round. // (GET /v2/blocks/{round}/txids) - GetBlockTxids(ctx echo.Context, round uint64) error + GetBlockTxids(ctx echo.Context, round basics.Round) error // Get a LedgerStateDelta object for a given transaction group // (GET /v2/deltas/txn/group/{id}) GetLedgerStateDeltaForTransactionGroup(ctx echo.Context, id string, params GetLedgerStateDeltaForTransactionGroupParams) error // Get a LedgerStateDelta object for a given round // (GET /v2/deltas/{round}) - GetLedgerStateDelta(ctx echo.Context, round uint64, params GetLedgerStateDeltaParams) error + GetLedgerStateDelta(ctx echo.Context, round basics.Round, params GetLedgerStateDeltaParams) error // Get LedgerStateDelta objects for all transaction groups in a given round // (GET /v2/deltas/{round}/txn/group) - GetTransactionGroupLedgerStateDeltasForRound(ctx echo.Context, round uint64, params GetTransactionGroupLedgerStateDeltasForRoundParams) error + GetTransactionGroupLedgerStateDeltasForRound(ctx echo.Context, round basics.Round, params GetTransactionGroupLedgerStateDeltasForRoundParams) error // Returns the timestamp offset. Timestamp offsets can only be set in dev mode. // (GET /v2/devmode/blocks/offset) GetBlockTimeStampOffset(ctx echo.Context) error @@ -80,13 +81,13 @@ type ServerInterface interface { GetSupply(ctx echo.Context) error // Get a state proof that covers a given round // (GET /v2/stateproofs/{round}) - GetStateProof(ctx echo.Context, round uint64) error + GetStateProof(ctx echo.Context, round basics.Round) error // Gets the current node status. // (GET /v2/status) GetStatus(ctx echo.Context) error // Gets the node status after waiting for a round after the given round. // (GET /v2/status/wait-for-block-after/{round}) - WaitForBlock(ctx echo.Context, round uint64) error + WaitForBlock(ctx echo.Context, round basics.Round) error // Compile TEAL source code to binary, produce its hash // (POST /v2/teal/compile) TealCompile(ctx echo.Context, params TealCompileParams) error @@ -113,32 +114,32 @@ type ServerInterfaceWrapper struct { func (w *ServerInterfaceWrapper) AccountInformation(ctx echo.Context) error { var err error // ------------- Path parameter "address" ------------- - var address string + var address basics.Address - err = runtime.BindStyledParameterWithLocation("simple", false, "address", runtime.ParamLocationPath, ctx.Param("address"), &address) + err = runtime.BindStyledParameterWithOptions("simple", "address", ctx.Param("address"), &address, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) if err != nil { return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter address: %s", err)) } - ctx.Set(Api_keyScopes, []string{""}) + ctx.Set(Api_keyScopes, []string{}) // Parameter object where we will unmarshal all parameters from the context var params AccountInformationParams - // ------------- Optional query parameter "format" ------------- + // ------------- Optional query parameter "exclude" ------------- - err = runtime.BindQueryParameter("form", true, false, "format", ctx.QueryParams(), ¶ms.Format) + err = runtime.BindQueryParameter("form", true, false, "exclude", ctx.QueryParams(), ¶ms.Exclude) if err != nil { - return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter format: %s", err)) + return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter exclude: %s", err)) } - // ------------- Optional query parameter "exclude" ------------- + // ------------- Optional query parameter "format" ------------- - err = runtime.BindQueryParameter("form", true, false, "exclude", ctx.QueryParams(), ¶ms.Exclude) + err = runtime.BindQueryParameter("form", true, false, "format", ctx.QueryParams(), ¶ms.Format) if err != nil { - return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter exclude: %s", err)) + return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter format: %s", err)) } - // Invoke the callback with all the unmarshalled arguments + // Invoke the callback with all the unmarshaled arguments err = w.Handler.AccountInformation(ctx, address, params) return err } @@ -147,22 +148,22 @@ func (w *ServerInterfaceWrapper) AccountInformation(ctx echo.Context) error { func (w *ServerInterfaceWrapper) AccountApplicationInformation(ctx echo.Context) error { var err error // ------------- Path parameter "address" ------------- - var address string + var address basics.Address - err = runtime.BindStyledParameterWithLocation("simple", false, "address", runtime.ParamLocationPath, ctx.Param("address"), &address) + err = runtime.BindStyledParameterWithOptions("simple", "address", ctx.Param("address"), &address, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) if err != nil { return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter address: %s", err)) } // ------------- Path parameter "application-id" ------------- - var applicationId uint64 + var applicationId basics.AppIndex - err = runtime.BindStyledParameterWithLocation("simple", false, "application-id", runtime.ParamLocationPath, ctx.Param("application-id"), &applicationId) + err = runtime.BindStyledParameterWithOptions("simple", "application-id", ctx.Param("application-id"), &applicationId, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) if err != nil { return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter application-id: %s", err)) } - ctx.Set(Api_keyScopes, []string{""}) + ctx.Set(Api_keyScopes, []string{}) // Parameter object where we will unmarshal all parameters from the context var params AccountApplicationInformationParams @@ -173,7 +174,7 @@ func (w *ServerInterfaceWrapper) AccountApplicationInformation(ctx echo.Context) return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter format: %s", err)) } - // Invoke the callback with all the unmarshalled arguments + // Invoke the callback with all the unmarshaled arguments err = w.Handler.AccountApplicationInformation(ctx, address, applicationId, params) return err } @@ -182,22 +183,22 @@ func (w *ServerInterfaceWrapper) AccountApplicationInformation(ctx echo.Context) func (w *ServerInterfaceWrapper) AccountAssetInformation(ctx echo.Context) error { var err error // ------------- Path parameter "address" ------------- - var address string + var address basics.Address - err = runtime.BindStyledParameterWithLocation("simple", false, "address", runtime.ParamLocationPath, ctx.Param("address"), &address) + err = runtime.BindStyledParameterWithOptions("simple", "address", ctx.Param("address"), &address, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) if err != nil { return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter address: %s", err)) } // ------------- Path parameter "asset-id" ------------- - var assetId uint64 + var assetId basics.AssetIndex - err = runtime.BindStyledParameterWithLocation("simple", false, "asset-id", runtime.ParamLocationPath, ctx.Param("asset-id"), &assetId) + err = runtime.BindStyledParameterWithOptions("simple", "asset-id", ctx.Param("asset-id"), &assetId, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) if err != nil { return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter asset-id: %s", err)) } - ctx.Set(Api_keyScopes, []string{""}) + ctx.Set(Api_keyScopes, []string{}) // Parameter object where we will unmarshal all parameters from the context var params AccountAssetInformationParams @@ -208,7 +209,7 @@ func (w *ServerInterfaceWrapper) AccountAssetInformation(ctx echo.Context) error return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter format: %s", err)) } - // Invoke the callback with all the unmarshalled arguments + // Invoke the callback with all the unmarshaled arguments err = w.Handler.AccountAssetInformation(ctx, address, assetId, params) return err } @@ -217,16 +218,16 @@ func (w *ServerInterfaceWrapper) AccountAssetInformation(ctx echo.Context) error func (w *ServerInterfaceWrapper) GetApplicationByID(ctx echo.Context) error { var err error // ------------- Path parameter "application-id" ------------- - var applicationId uint64 + var applicationId basics.AppIndex - err = runtime.BindStyledParameterWithLocation("simple", false, "application-id", runtime.ParamLocationPath, ctx.Param("application-id"), &applicationId) + err = runtime.BindStyledParameterWithOptions("simple", "application-id", ctx.Param("application-id"), &applicationId, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) if err != nil { return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter application-id: %s", err)) } - ctx.Set(Api_keyScopes, []string{""}) + ctx.Set(Api_keyScopes, []string{}) - // Invoke the callback with all the unmarshalled arguments + // Invoke the callback with all the unmarshaled arguments err = w.Handler.GetApplicationByID(ctx, applicationId) return err } @@ -235,14 +236,14 @@ func (w *ServerInterfaceWrapper) GetApplicationByID(ctx echo.Context) error { func (w *ServerInterfaceWrapper) GetApplicationBoxByName(ctx echo.Context) error { var err error // ------------- Path parameter "application-id" ------------- - var applicationId uint64 + var applicationId basics.AppIndex - err = runtime.BindStyledParameterWithLocation("simple", false, "application-id", runtime.ParamLocationPath, ctx.Param("application-id"), &applicationId) + err = runtime.BindStyledParameterWithOptions("simple", "application-id", ctx.Param("application-id"), &applicationId, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) if err != nil { return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter application-id: %s", err)) } - ctx.Set(Api_keyScopes, []string{""}) + ctx.Set(Api_keyScopes, []string{}) // Parameter object where we will unmarshal all parameters from the context var params GetApplicationBoxByNameParams @@ -253,7 +254,7 @@ func (w *ServerInterfaceWrapper) GetApplicationBoxByName(ctx echo.Context) error return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter name: %s", err)) } - // Invoke the callback with all the unmarshalled arguments + // Invoke the callback with all the unmarshaled arguments err = w.Handler.GetApplicationBoxByName(ctx, applicationId, params) return err } @@ -262,14 +263,14 @@ func (w *ServerInterfaceWrapper) GetApplicationBoxByName(ctx echo.Context) error func (w *ServerInterfaceWrapper) GetApplicationBoxes(ctx echo.Context) error { var err error // ------------- Path parameter "application-id" ------------- - var applicationId uint64 + var applicationId basics.AppIndex - err = runtime.BindStyledParameterWithLocation("simple", false, "application-id", runtime.ParamLocationPath, ctx.Param("application-id"), &applicationId) + err = runtime.BindStyledParameterWithOptions("simple", "application-id", ctx.Param("application-id"), &applicationId, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) if err != nil { return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter application-id: %s", err)) } - ctx.Set(Api_keyScopes, []string{""}) + ctx.Set(Api_keyScopes, []string{}) // Parameter object where we will unmarshal all parameters from the context var params GetApplicationBoxesParams @@ -280,7 +281,7 @@ func (w *ServerInterfaceWrapper) GetApplicationBoxes(ctx echo.Context) error { return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter max: %s", err)) } - // Invoke the callback with all the unmarshalled arguments + // Invoke the callback with all the unmarshaled arguments err = w.Handler.GetApplicationBoxes(ctx, applicationId, params) return err } @@ -289,16 +290,16 @@ func (w *ServerInterfaceWrapper) GetApplicationBoxes(ctx echo.Context) error { func (w *ServerInterfaceWrapper) GetAssetByID(ctx echo.Context) error { var err error // ------------- Path parameter "asset-id" ------------- - var assetId uint64 + var assetId basics.AssetIndex - err = runtime.BindStyledParameterWithLocation("simple", false, "asset-id", runtime.ParamLocationPath, ctx.Param("asset-id"), &assetId) + err = runtime.BindStyledParameterWithOptions("simple", "asset-id", ctx.Param("asset-id"), &assetId, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) if err != nil { return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter asset-id: %s", err)) } - ctx.Set(Api_keyScopes, []string{""}) + ctx.Set(Api_keyScopes, []string{}) - // Invoke the callback with all the unmarshalled arguments + // Invoke the callback with all the unmarshaled arguments err = w.Handler.GetAssetByID(ctx, assetId) return err } @@ -307,32 +308,32 @@ func (w *ServerInterfaceWrapper) GetAssetByID(ctx echo.Context) error { func (w *ServerInterfaceWrapper) GetBlock(ctx echo.Context) error { var err error // ------------- Path parameter "round" ------------- - var round uint64 + var round basics.Round - err = runtime.BindStyledParameterWithLocation("simple", false, "round", runtime.ParamLocationPath, ctx.Param("round"), &round) + err = runtime.BindStyledParameterWithOptions("simple", "round", ctx.Param("round"), &round, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) if err != nil { return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter round: %s", err)) } - ctx.Set(Api_keyScopes, []string{""}) + ctx.Set(Api_keyScopes, []string{}) // Parameter object where we will unmarshal all parameters from the context var params GetBlockParams - // ------------- Optional query parameter "format" ------------- + // ------------- Optional query parameter "header-only" ------------- - err = runtime.BindQueryParameter("form", true, false, "format", ctx.QueryParams(), ¶ms.Format) + err = runtime.BindQueryParameter("form", true, false, "header-only", ctx.QueryParams(), ¶ms.HeaderOnly) if err != nil { - return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter format: %s", err)) + return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter header-only: %s", err)) } - // ------------- Optional query parameter "header-only" ------------- + // ------------- Optional query parameter "format" ------------- - err = runtime.BindQueryParameter("form", true, false, "header-only", ctx.QueryParams(), ¶ms.HeaderOnly) + err = runtime.BindQueryParameter("form", true, false, "format", ctx.QueryParams(), ¶ms.Format) if err != nil { - return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter header-only: %s", err)) + return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter format: %s", err)) } - // Invoke the callback with all the unmarshalled arguments + // Invoke the callback with all the unmarshaled arguments err = w.Handler.GetBlock(ctx, round, params) return err } @@ -341,16 +342,16 @@ func (w *ServerInterfaceWrapper) GetBlock(ctx echo.Context) error { func (w *ServerInterfaceWrapper) GetBlockHash(ctx echo.Context) error { var err error // ------------- Path parameter "round" ------------- - var round uint64 + var round basics.Round - err = runtime.BindStyledParameterWithLocation("simple", false, "round", runtime.ParamLocationPath, ctx.Param("round"), &round) + err = runtime.BindStyledParameterWithOptions("simple", "round", ctx.Param("round"), &round, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) if err != nil { return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter round: %s", err)) } - ctx.Set(Api_keyScopes, []string{""}) + ctx.Set(Api_keyScopes, []string{}) - // Invoke the callback with all the unmarshalled arguments + // Invoke the callback with all the unmarshaled arguments err = w.Handler.GetBlockHash(ctx, round) return err } @@ -359,16 +360,16 @@ func (w *ServerInterfaceWrapper) GetBlockHash(ctx echo.Context) error { func (w *ServerInterfaceWrapper) GetLightBlockHeaderProof(ctx echo.Context) error { var err error // ------------- Path parameter "round" ------------- - var round uint64 + var round basics.Round - err = runtime.BindStyledParameterWithLocation("simple", false, "round", runtime.ParamLocationPath, ctx.Param("round"), &round) + err = runtime.BindStyledParameterWithOptions("simple", "round", ctx.Param("round"), &round, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) if err != nil { return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter round: %s", err)) } - ctx.Set(Api_keyScopes, []string{""}) + ctx.Set(Api_keyScopes, []string{}) - // Invoke the callback with all the unmarshalled arguments + // Invoke the callback with all the unmarshaled arguments err = w.Handler.GetLightBlockHeaderProof(ctx, round) return err } @@ -377,16 +378,16 @@ func (w *ServerInterfaceWrapper) GetLightBlockHeaderProof(ctx echo.Context) erro func (w *ServerInterfaceWrapper) GetBlockLogs(ctx echo.Context) error { var err error // ------------- Path parameter "round" ------------- - var round uint64 + var round basics.Round - err = runtime.BindStyledParameterWithLocation("simple", false, "round", runtime.ParamLocationPath, ctx.Param("round"), &round) + err = runtime.BindStyledParameterWithOptions("simple", "round", ctx.Param("round"), &round, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) if err != nil { return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter round: %s", err)) } - ctx.Set(Api_keyScopes, []string{""}) + ctx.Set(Api_keyScopes, []string{}) - // Invoke the callback with all the unmarshalled arguments + // Invoke the callback with all the unmarshaled arguments err = w.Handler.GetBlockLogs(ctx, round) return err } @@ -395,9 +396,9 @@ func (w *ServerInterfaceWrapper) GetBlockLogs(ctx echo.Context) error { func (w *ServerInterfaceWrapper) GetTransactionProof(ctx echo.Context) error { var err error // ------------- Path parameter "round" ------------- - var round uint64 + var round basics.Round - err = runtime.BindStyledParameterWithLocation("simple", false, "round", runtime.ParamLocationPath, ctx.Param("round"), &round) + err = runtime.BindStyledParameterWithOptions("simple", "round", ctx.Param("round"), &round, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) if err != nil { return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter round: %s", err)) } @@ -405,12 +406,12 @@ func (w *ServerInterfaceWrapper) GetTransactionProof(ctx echo.Context) error { // ------------- Path parameter "txid" ------------- var txid string - err = runtime.BindStyledParameterWithLocation("simple", false, "txid", runtime.ParamLocationPath, ctx.Param("txid"), &txid) + err = runtime.BindStyledParameterWithOptions("simple", "txid", ctx.Param("txid"), &txid, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) if err != nil { return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter txid: %s", err)) } - ctx.Set(Api_keyScopes, []string{""}) + ctx.Set(Api_keyScopes, []string{}) // Parameter object where we will unmarshal all parameters from the context var params GetTransactionProofParams @@ -428,7 +429,7 @@ func (w *ServerInterfaceWrapper) GetTransactionProof(ctx echo.Context) error { return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter format: %s", err)) } - // Invoke the callback with all the unmarshalled arguments + // Invoke the callback with all the unmarshaled arguments err = w.Handler.GetTransactionProof(ctx, round, txid, params) return err } @@ -437,16 +438,16 @@ func (w *ServerInterfaceWrapper) GetTransactionProof(ctx echo.Context) error { func (w *ServerInterfaceWrapper) GetBlockTxids(ctx echo.Context) error { var err error // ------------- Path parameter "round" ------------- - var round uint64 + var round basics.Round - err = runtime.BindStyledParameterWithLocation("simple", false, "round", runtime.ParamLocationPath, ctx.Param("round"), &round) + err = runtime.BindStyledParameterWithOptions("simple", "round", ctx.Param("round"), &round, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) if err != nil { return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter round: %s", err)) } - ctx.Set(Api_keyScopes, []string{""}) + ctx.Set(Api_keyScopes, []string{}) - // Invoke the callback with all the unmarshalled arguments + // Invoke the callback with all the unmarshaled arguments err = w.Handler.GetBlockTxids(ctx, round) return err } @@ -457,12 +458,12 @@ func (w *ServerInterfaceWrapper) GetLedgerStateDeltaForTransactionGroup(ctx echo // ------------- Path parameter "id" ------------- var id string - err = runtime.BindStyledParameterWithLocation("simple", false, "id", runtime.ParamLocationPath, ctx.Param("id"), &id) + err = runtime.BindStyledParameterWithOptions("simple", "id", ctx.Param("id"), &id, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) if err != nil { return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter id: %s", err)) } - ctx.Set(Api_keyScopes, []string{""}) + ctx.Set(Api_keyScopes, []string{}) // Parameter object where we will unmarshal all parameters from the context var params GetLedgerStateDeltaForTransactionGroupParams @@ -473,7 +474,7 @@ func (w *ServerInterfaceWrapper) GetLedgerStateDeltaForTransactionGroup(ctx echo return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter format: %s", err)) } - // Invoke the callback with all the unmarshalled arguments + // Invoke the callback with all the unmarshaled arguments err = w.Handler.GetLedgerStateDeltaForTransactionGroup(ctx, id, params) return err } @@ -482,14 +483,14 @@ func (w *ServerInterfaceWrapper) GetLedgerStateDeltaForTransactionGroup(ctx echo func (w *ServerInterfaceWrapper) GetLedgerStateDelta(ctx echo.Context) error { var err error // ------------- Path parameter "round" ------------- - var round uint64 + var round basics.Round - err = runtime.BindStyledParameterWithLocation("simple", false, "round", runtime.ParamLocationPath, ctx.Param("round"), &round) + err = runtime.BindStyledParameterWithOptions("simple", "round", ctx.Param("round"), &round, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) if err != nil { return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter round: %s", err)) } - ctx.Set(Api_keyScopes, []string{""}) + ctx.Set(Api_keyScopes, []string{}) // Parameter object where we will unmarshal all parameters from the context var params GetLedgerStateDeltaParams @@ -500,7 +501,7 @@ func (w *ServerInterfaceWrapper) GetLedgerStateDelta(ctx echo.Context) error { return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter format: %s", err)) } - // Invoke the callback with all the unmarshalled arguments + // Invoke the callback with all the unmarshaled arguments err = w.Handler.GetLedgerStateDelta(ctx, round, params) return err } @@ -509,14 +510,14 @@ func (w *ServerInterfaceWrapper) GetLedgerStateDelta(ctx echo.Context) error { func (w *ServerInterfaceWrapper) GetTransactionGroupLedgerStateDeltasForRound(ctx echo.Context) error { var err error // ------------- Path parameter "round" ------------- - var round uint64 + var round basics.Round - err = runtime.BindStyledParameterWithLocation("simple", false, "round", runtime.ParamLocationPath, ctx.Param("round"), &round) + err = runtime.BindStyledParameterWithOptions("simple", "round", ctx.Param("round"), &round, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) if err != nil { return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter round: %s", err)) } - ctx.Set(Api_keyScopes, []string{""}) + ctx.Set(Api_keyScopes, []string{}) // Parameter object where we will unmarshal all parameters from the context var params GetTransactionGroupLedgerStateDeltasForRoundParams @@ -527,7 +528,7 @@ func (w *ServerInterfaceWrapper) GetTransactionGroupLedgerStateDeltasForRound(ct return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter format: %s", err)) } - // Invoke the callback with all the unmarshalled arguments + // Invoke the callback with all the unmarshaled arguments err = w.Handler.GetTransactionGroupLedgerStateDeltasForRound(ctx, round, params) return err } @@ -536,9 +537,9 @@ func (w *ServerInterfaceWrapper) GetTransactionGroupLedgerStateDeltasForRound(ct func (w *ServerInterfaceWrapper) GetBlockTimeStampOffset(ctx echo.Context) error { var err error - ctx.Set(Api_keyScopes, []string{""}) + ctx.Set(Api_keyScopes, []string{}) - // Invoke the callback with all the unmarshalled arguments + // Invoke the callback with all the unmarshaled arguments err = w.Handler.GetBlockTimeStampOffset(ctx) return err } @@ -549,14 +550,14 @@ func (w *ServerInterfaceWrapper) SetBlockTimeStampOffset(ctx echo.Context) error // ------------- Path parameter "offset" ------------- var offset uint64 - err = runtime.BindStyledParameterWithLocation("simple", false, "offset", runtime.ParamLocationPath, ctx.Param("offset"), &offset) + err = runtime.BindStyledParameterWithOptions("simple", "offset", ctx.Param("offset"), &offset, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) if err != nil { return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter offset: %s", err)) } - ctx.Set(Api_keyScopes, []string{""}) + ctx.Set(Api_keyScopes, []string{}) - // Invoke the callback with all the unmarshalled arguments + // Invoke the callback with all the unmarshaled arguments err = w.Handler.SetBlockTimeStampOffset(ctx, offset) return err } @@ -565,9 +566,9 @@ func (w *ServerInterfaceWrapper) SetBlockTimeStampOffset(ctx echo.Context) error func (w *ServerInterfaceWrapper) GetSupply(ctx echo.Context) error { var err error - ctx.Set(Api_keyScopes, []string{""}) + ctx.Set(Api_keyScopes, []string{}) - // Invoke the callback with all the unmarshalled arguments + // Invoke the callback with all the unmarshaled arguments err = w.Handler.GetSupply(ctx) return err } @@ -576,16 +577,16 @@ func (w *ServerInterfaceWrapper) GetSupply(ctx echo.Context) error { func (w *ServerInterfaceWrapper) GetStateProof(ctx echo.Context) error { var err error // ------------- Path parameter "round" ------------- - var round uint64 + var round basics.Round - err = runtime.BindStyledParameterWithLocation("simple", false, "round", runtime.ParamLocationPath, ctx.Param("round"), &round) + err = runtime.BindStyledParameterWithOptions("simple", "round", ctx.Param("round"), &round, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) if err != nil { return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter round: %s", err)) } - ctx.Set(Api_keyScopes, []string{""}) + ctx.Set(Api_keyScopes, []string{}) - // Invoke the callback with all the unmarshalled arguments + // Invoke the callback with all the unmarshaled arguments err = w.Handler.GetStateProof(ctx, round) return err } @@ -594,9 +595,9 @@ func (w *ServerInterfaceWrapper) GetStateProof(ctx echo.Context) error { func (w *ServerInterfaceWrapper) GetStatus(ctx echo.Context) error { var err error - ctx.Set(Api_keyScopes, []string{""}) + ctx.Set(Api_keyScopes, []string{}) - // Invoke the callback with all the unmarshalled arguments + // Invoke the callback with all the unmarshaled arguments err = w.Handler.GetStatus(ctx) return err } @@ -605,16 +606,16 @@ func (w *ServerInterfaceWrapper) GetStatus(ctx echo.Context) error { func (w *ServerInterfaceWrapper) WaitForBlock(ctx echo.Context) error { var err error // ------------- Path parameter "round" ------------- - var round uint64 + var round basics.Round - err = runtime.BindStyledParameterWithLocation("simple", false, "round", runtime.ParamLocationPath, ctx.Param("round"), &round) + err = runtime.BindStyledParameterWithOptions("simple", "round", ctx.Param("round"), &round, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) if err != nil { return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter round: %s", err)) } - ctx.Set(Api_keyScopes, []string{""}) + ctx.Set(Api_keyScopes, []string{}) - // Invoke the callback with all the unmarshalled arguments + // Invoke the callback with all the unmarshaled arguments err = w.Handler.WaitForBlock(ctx, round) return err } @@ -623,7 +624,7 @@ func (w *ServerInterfaceWrapper) WaitForBlock(ctx echo.Context) error { func (w *ServerInterfaceWrapper) TealCompile(ctx echo.Context) error { var err error - ctx.Set(Api_keyScopes, []string{""}) + ctx.Set(Api_keyScopes, []string{}) // Parameter object where we will unmarshal all parameters from the context var params TealCompileParams @@ -634,7 +635,7 @@ func (w *ServerInterfaceWrapper) TealCompile(ctx echo.Context) error { return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter sourcemap: %s", err)) } - // Invoke the callback with all the unmarshalled arguments + // Invoke the callback with all the unmarshaled arguments err = w.Handler.TealCompile(ctx, params) return err } @@ -643,9 +644,9 @@ func (w *ServerInterfaceWrapper) TealCompile(ctx echo.Context) error { func (w *ServerInterfaceWrapper) TealDisassemble(ctx echo.Context) error { var err error - ctx.Set(Api_keyScopes, []string{""}) + ctx.Set(Api_keyScopes, []string{}) - // Invoke the callback with all the unmarshalled arguments + // Invoke the callback with all the unmarshaled arguments err = w.Handler.TealDisassemble(ctx) return err } @@ -654,9 +655,9 @@ func (w *ServerInterfaceWrapper) TealDisassemble(ctx echo.Context) error { func (w *ServerInterfaceWrapper) TealDryrun(ctx echo.Context) error { var err error - ctx.Set(Api_keyScopes, []string{""}) + ctx.Set(Api_keyScopes, []string{}) - // Invoke the callback with all the unmarshalled arguments + // Invoke the callback with all the unmarshaled arguments err = w.Handler.TealDryrun(ctx) return err } @@ -665,9 +666,9 @@ func (w *ServerInterfaceWrapper) TealDryrun(ctx echo.Context) error { func (w *ServerInterfaceWrapper) TransactionParams(ctx echo.Context) error { var err error - ctx.Set(Api_keyScopes, []string{""}) + ctx.Set(Api_keyScopes, []string{}) - // Invoke the callback with all the unmarshalled arguments + // Invoke the callback with all the unmarshaled arguments err = w.Handler.TransactionParams(ctx) return err } @@ -676,7 +677,7 @@ func (w *ServerInterfaceWrapper) TransactionParams(ctx echo.Context) error { func (w *ServerInterfaceWrapper) SimulateTransaction(ctx echo.Context) error { var err error - ctx.Set(Api_keyScopes, []string{""}) + ctx.Set(Api_keyScopes, []string{}) // Parameter object where we will unmarshal all parameters from the context var params SimulateTransactionParams @@ -687,7 +688,7 @@ func (w *ServerInterfaceWrapper) SimulateTransaction(ctx echo.Context) error { return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter format: %s", err)) } - // Invoke the callback with all the unmarshalled arguments + // Invoke the callback with all the unmarshaled arguments err = w.Handler.SimulateTransaction(ctx, params) return err } @@ -753,312 +754,305 @@ func RegisterHandlersWithBaseURL(router EchoRouter, si ServerInterface, baseURL // Base64 encoded, gzipped, json marshaled Swagger object var swaggerSpec = []string{ - "H4sIAAAAAAAC/+y9/XfbtrIo+q9g6d618nFFOUnTnt281XWfm4/Wp2mSFbvd95ymr4VISMI2BXADoCy1", - "L//7XZgBSJAEJcqW7aT1T4lFEhgMBjOD+fxzlMplIQUTRo+e/TkqqKJLZpiCv2iaylKYhGf2r4zpVPHC", - "cClGz/wzoo3iYj4aj7j9taBmMRqPBF2y+h37/Xik2L9Lrlg2emZUycYjnS7YktqBzaawb1cjrZO5TNwQ", - "xzjEyYvRxy0PaJYppnUXyrci3xAu0rzMGDGKCk1T+0iTC24WxCy4Ju5jwgWRghE5I2bReJnMOMszPfGL", - "/HfJ1CZYpZu8f0kfaxATJXPWhfO5XE65YB4qVgFVbQgxkmRsBi8tqCF2Bgurf9FIohlV6YLMpNoBKgIR", - "wstEuRw9+2WkmciYgt1KGV/Bf2eKsT9YYqiaMzP6dRxb3MwwlRi+jCztxGFfMV3mRhN4F9Y45ysmiP1q", - "Qn4stSFTRqgg7189J1988cXXdiFLagzLHJH1rqqePVwTfj56NsqoYf5xl9ZoPpeKiiyp3n//6jnMf+oW", - "OPQtqjWLH5Zj+4ScvOhbgP8wQkJcGDaHfWhQv/0icijqn6dsJhUbuCf48kE3JZz/VnclpSZdFJILE9kX", - "Ak8JPo7ysODzbTysAqDxfmExpeygvzxKvv71z8fjx48+/o9fjpP/dn9++cXHgct/Xo27AwPRF9NSKSbS", - "TTJXjMJpWVDRxcd7Rw96Ics8Iwu6gs2nS2D17ltiv0XWuaJ5aemEp0oe53OpCXVklLEZLXND/MSkFLll", - "U3Y0R+2Ea1IoueIZy8aW+14seLogKdU4BLxHLnieWxosNcv6aC2+ui2H6WOIEgvXpfABC/p0kVGvawcm", - "2Bq4QZLmUrPEyB3iyUscKjISCpRaVun9hBU5WzACk9sHKGwBd8LSdJ5viIF9zQjVhBIvmsaEz8hGluQC", - "Nifn5/C9W43F2pJYpMHmNOSoPbx96OsgI4K8qZQ5owKQ589dF2VixuelYppcLJhZOJmnmC6k0IzI6b9Y", - "auy2/+fp2zdEKvIj05rO2TuanhMmUpmxbEJOZkRIE5CGoyXAof2ybx0OrpiQ/5eWliaWel7Q9Dwu0XO+", - "5JFV/UjXfFkuiSiXU6bslnoRYiRRzJRK9AGEI+4gxSVddyc9U6VIYf/raRu6nKU2roucbgBhS7r+5tHY", - "gaMJzXNSMJFxMSdmLXr1ODv3bvASJUuRDVBzjN3TQLDqgqV8xllGqlG2QOKm2QUPF/vBUytfATh+kF5w", - "qll2gCPYOkIz9nTbJ6SgcxaQzIT85JgbPDXynImK0Ml0A48KxVZclrr6qAdGmHq7Bi6kYUmh2IxHaOzU", - "ocMyGHzHceCl04FSKQzlgmWWOQPQ0jBkVr0wBRNuv+90pfiUavbV0z4ZXz8duPsz2d71rTs+aLfhpQSP", - "ZER02qfuwMY1q8b3A+6H4dyazxP8ubORfH5mpc2M5yCJ/mX3z6Oh1MAEGojwsknzuaCmVOzZB/HQ/kUS", - "cmqoyKjK7C9L/OnHMjf8lM/tTzn+9FrOeXrK5z3IrGCNXrjgsyX+Y8eLs2Ozjt4rXkt5XhbhgtLGxXW6", - "IScv+jYZx9yXMI+r22548Thb+8vIvl+YdbWRPUD24q6g9sVztlHMQkvTGfyzngE90Zn6w/5TFLn92hSz", - "GGotHTuRDOYDZ1Y4Loqcp9Qi8b17bJ9aJsDwIkHrN45AoD77MwCxULJgynAclBZFksuU5ok21MBI/1Ox", - "2ejZ6H8c1faXI/xcHwWTv7ZfncJHVmVFNSihRbHHGO+s6qO3MAvLoOERsAlke6A0cYGbaEmJWxacsxUV", - "ZlJfWRr8oDrAv7iZanyjtoP4bl3BehFO8MUp06gB44v3NAlQTwCtBNAKCuk8l9Pqh/vHRVFjEJ4fFwXi", - "A7RHxkExY2uujX4Ay6f1SQrnOXkxId+FY4MqLkW+scIBVQ0rG2ZOajkpVtmW3BrqEe9pAtsp1cRujUeD", - "VfMPQXFwrVjI3Go9O2nFvvy9ezckM/v7oI8/DxILcdtPXHDRcpjDOw78Elxu7rcop0s4ztwzIcftby9H", - "NnaULQSjT2osHpp44Bdu2FLvpIQAooCa3PZQpehm5JTEBJS9Lpn8pBlSSEHnXAC0Y3t9EmRJz3E/JODd", - "EgLT1b0IaQk1yMqE6nROh/pJx87yGVBrbGO9Jmo11ZxrA/dqeJksWA6KMxWeoENSuRRlDNjwLYuoYL5Q", - "tEBadk9Q7eIC7vP4EsJ6RcE7UCZGYQ7YfbDRANWl2fJO1hmFBLhGC4Zvc5mef0/14gAnfOrH6tI+TEMW", - "jGZMkQXVi8jBadF2PdoQ+rYvAs2SaTDVpFriaznXB1hiLvdhXUXxnOa5nbrLslqrhYEHHeQ8J/ZlwpYc", - "DObu4ogWdrx/kZc0XVi1gKQ0z8e1qUgWSc5WLLeXdi4EU2NiFtTUhx9G9vcaOEeaWWZnGAlW48xMYGJT", - "lS1CMbKkIIGW9jZT5M1vKg6q6ZK1tCCQiLIEK0Jw0Th54VfHVkwAT6qGBvCrNYK1Jhx8Yud2j2BmIXFx", - "aAE03n1X4a/iFw2g7du1PBX1FFJlaLM29jeuSCoVDoES3k1u/8Ooqj9G6rxfKJa4IRRdMaVpblfXWtSD", - "inwPdTp3nMyMGhqcTEeF8QsYcg74DtQ7piJWmrfwH5oT+9hqMZaSaurhoIzIwJ2aoWC2qMKZ7Atgb5Vk", - "iaZMUtD0fC8on9eTx9nMoJP3Eq2nbgvdIqodOlvzTB9qm2Cwvr1qnhC0XXl21NFFtjKdYK4hCDiTBUH2", - "0QIBOQWMhgiR64OLtW/lOgbTt3LdEWlyzQ6yE3acwcz+W7l+4SCTajfmYewhSLcLFHTJNEg3ETJOO0vt", - "lzueSnU5baIlYASpvY2E2lEDZWrcQhK8WhaJO5sRjwW+0BqoDvDYrgS0h49hrIGFU0OvAQvajnoILDQH", - "OjQW5LLgOTsA6S+iStyUavbFE3L6/fGXj5/89uTLryxJFkrOFV2S6cYwTe47sxzRZpOzB9HbEWgX8dG/", - "eup9VM1xY+NoWaqULWnRHQp9X3j7xdeIfa+LtSaaYdUVgIM4IrOiDdFO0K1rQXvBpuX8lBljb7rvlJwd", - "nBt2ZohBBy+9K5RVLHTTT+i0paPMvnLE1kbRowLeZCLDOAO7Dq7tHXA5PQhR9W18Vs+SEYfRjO08FPtu", - "Uz3NJtwqtVHlIcwbTCmpoiK4UNLIVOaJ1fO4jBgo3rk3iHvDb1fR/h2hJRdUEzs3eC9LkfXYIcxaDJdf", - "OPTZWtS42SrBcL2R1bl5h+xLE/n1LaRgKjFrQYA6G+aRmZJLQkkGH4Ku8R0zqH/xJTs1dFm8nc0OY+2U", - "MFDEjsOXTNuZCL5htR/NUikwmG+HycaNOgQ9bcR4L5PpB8Bh5HQjUnCVHeLY9luzllyA315vRBqYtiyM", - "OcvmDbK8ugmrDx041T0dAcei4zU8Blv9C5Yb+kqqs1p9/U7Jsjg4e27POXQ51C3GeQMy+603A3Mxz5sB", - "pHML+yS2xltZ0PPKiIBrAOiBIl/z+cIE98V3Sl6DTIzOEgMUHqCxKLffdE1Gb2RmmYkp9QFUyXqwmsNZ", - "ug35Gp3K0hBKhMwYbH6p40pmT8ghxDpBiJYJ9VawT3BNpsxSV0pLu9qyIBCA1JEX9YcJTfGEJoAa3RN+", - "UcXN4Fs4HYaz5YrRbEOmjAkipy7GwUVfwCIpRE8Zr6Y5FTfCLxpwFUqmTGuWJc4UvRM0/x6KDrMFTwA4", - "AFzNQrQkM6quDOz5aiec52yTQKyfJvd/+Fk/uAV4jTQ034FYeCeG3rY9rQv1sOm3EVx78pDs0FKHVGvV", - "W8sgcmZYHwr3wknv/rUh6uzi1dGyYgpCSq6V4v0kVyOgCtRrpverQlsWPRHs7ppuNTy7YYIK6RWr2GA5", - "1SbZxZbtSw1bgl1BwAljnBgG7lG8XlNtMAyKiwxsmihOYB5UwuwU/QD3XkPsyD/7G0h37NTKQaFLXV1H", - "dFkUUhmWxdYAHtneud6wdTWXnAVjV3ceI0mp2a6R+7AUjO+Q5W7A8Ac1lf/VeXS7iwOfupXzmygqG0DU", - "iNgGyKl/K8BuGMXbAwjXNaKRcLhuUU4VOjweaSOLwnILk5Si+q4PTaf49rH5qX63S1zo5EC5nUmmwYHi", - "3neQXyBmMX57QTVxcHgXO5hzMF6rC7M9jInmImXJNsqHK559KzwCOw9pWcwVzViSsZxuIsEB+Jjg420D", - "wI7X111pWIKBuPFNrynZxz1uGVrCeDqmPBJ4QlJ7BO1VoCYQ9/WOkTMGY8eYk6Oje9VQMFd0i/x4sGzc", - "6siIIA1X0tgdd/QAIDuOPgTgHjxUQ18eFfBxUt8921P8F9NugkqP2H+SDdN9S6jH32sBPbZgl+MUnJcW", - "e29x4Cjb7GVjO/hI35HtMUy/o8rwlBdw1/mBbQ5+9WtPEHWck4wZynOWkeABXgOL8HuCIaTtMS93FRxk", - "e+uC3zG+RZbjw3SawJ+zDdy532FuQmDqOMRdNjKqlU9UEADURzxbFTx8ha1pavKNVdTMgm3IBVOM6HKK", - "IQxdf4qRRRIOEPXPbJnReWejvtGt7uJTGCpYXizWDO8E2+E7a10MGuhwd4FCynyAhayDjCgEg2JHSCHt", - "rnOX/uQTYDwlNYB0TBtc85X4v6cbaIYVkP+SJUmpgCtXaVil00gFigIokHYGq4JVc7rgxBpDLGdLhjdJ", - "ePLwYXvhDx+6PeeazNiFzxm0L7bR8fAh2HHeSW0ah+sA9lB73E4i4gMcV1bwuVtIm6fsjnhyIw/ZyXet", - "wStvlz1TWjvCtcu/MgNoncz1kLWHNDIs2gvGHeTLacYHddYN+37Kl2VOzSG8VmxF80SumFI8Yzs5uZuY", - "S/FyRfO31WeQD8lSS6MpS1LI4hs4Fjuz32Dinx2HC24PMAb9DwWIneBXp/jRjitmHanKl0uWcWpYviGF", - "YinDfDerOepqqROCkfDpgoo5XBiULOcuuBXHAYZfajTNqFJ0hogqVWYtEjByxwSAC1PzKY9WnWLUXuna", - "FnK8wFzQaj6X5TpEMgd70PYYRJ1k41HvjdcidVXfeBE5zbzNAcKgoe8F+KknHuhKAdRZ3aeLr3Bb7GGy", - "m3s9Jvt66BiU3YmDiN/6YV/Qr71u55sDKD04EFGsUEyDiArNVBqfylmYo+1DBTfasGXXko+f/tZz/N73", - "3helyLlgyVIKtomWJeGC/QgPo8cJxGTPx6Cw9H3bvoM04G+B1ZxnCDVeFb+w2+0T2vZY6VdSHcoligMO", - "Vu8HeCB3utvdlJf1k9I8j7gWXQZnmwHocRWsyxWhWsuUg852kumxiwpGb6RL92yi/12Vl3KAs9cet+VD", - "C4sDgI2Y5QWhJM05WJCl0EaVqfkgKNiogqVGgrj8ZbzfavncvxI3k0asmG6oD4JCAF9luYoGbMxYxEzz", - "ijFvvNTlfM60ad11Zox9EO4tLkgpuIG5lva4JHheCqYgkmqCby7phswsTRhJ/mBKkmlpmto/JChrw/Pc", - "OfTsNETOPghqSM6oNuRHLs7WMJx3+vsjK5i5kOq8wkJcus+ZYJrrJB5s9h0+hbh+t/yFi/GHcHd87INO", - "64oJI7vMRpGU/+/+/372y3Hy3zT541Hy9f86+vXPpx8fPOz8+OTjN9/8/82fvvj4zYP//T9jO+Vhj6XP", - "OshPXrib8ckLuP4Eofpt2G/M/r/kIokSWRjN0aItch9KRTgCetA0jpkF+yDMWlhCWtGcZ5a3XIYc2hKm", - "cxbxdLSoprERLWOYX+uel4orcBkSYTIt1nhpLaobnxlPVAenpMs9h/MyKwVupde+MQ/Tx5fJ2bgqRoB1", - "yp4RyFRfUB/k6f588uVXo3GdYV49H41H7umvEUrm2TpWRyBj69hdMUySuKdJQTeamTj3ANijoXQY2xEO", - "u2TLKVN6wYub5xTa8Gmcw/mUJWdzWosTgQH+9vyAi3PjPCdydvNwG8VYxgqziNUvaihq8Fa9m4y1wk4K", - "JVdMjAmfsEnb5pPZ+6IL6ssZnfnAVCXlkNtQdQ6Q0DxVBFgPFzLIsBKjn1Z6gxP++uDXITdwDK72nLGI", - "3nvfvTwjR45h6ntY0gKHDooQRK7SLnmyEZBkuVmYU/ZBfBAv2AysD1I8+yAyaujRlGqe6qNSM/UtzalI", - "2WQuyTOfj/mCGvpBdDSt3sKKQdI0KcppzlNyHl5IavLEYlndET58+IXmc/nhw6+d2Izu9cFNFeUvOEFi", - "FWFZmsSV+kkUu6Aq5vvSVakXGBlreW2bFZVsWaKB1JcScuPHeR4tCt0u+dBdflHkdvkBGWpX0MBuGdFG", - "VvloVkFxKb12f99IJxgUvfB2lVIzTX5f0uIXLsyvJPlQPnr0BWT21TUQfnci39LkpmCDrSu9JSnaRhVY", - "OF4rIVY9Keg85mL78OEXw2gBuw/68hJsHHlO4LNG1qFPMICh6gVUKc69G4Bw7J0cDIs7xa98Wcf4EuAR", - "bGEzAftK+xXkz196u3bk4NPSLBJ7tqOr0pbE/c5U1d7mVsny0Riaz+G26grjTRlJFyw9dxXL2LIwm3Hj", - "cx/w4xRNzzq4xlp2mGEI1ZTAQTFlpCwy6lRxKjbtsjYaMypg0PfsnG3OZF2MaZ86Ns2yKrrvoAKlBtql", - "Jdbw2Lox2pvvosp8oqmrTgLJm54snlV04b/pP8io8h7gEMeIolH2ow8RVEUQgcTfg4JLLNSOdyXSjy2P", - "i5QJw1csYTmf82msDO8/u/4wD6ulSld50EUhVwNqwmfEXuWnKFjd9V5RMWdWPFuRKjXNsapqNGgD7kML", - "RpWZMmq22vlFWJDCQwdXygvIvAYL39guga3tfnMDFjvBLuytAgxF+I6LXp70x58h4Cy7JDz+8/qmMOm9", - "6zrURSoOeqlcYbe61rrQvJDOAC58vmRQslRe2H2xUEhXbROLugTypdR0znruLqH3bmA9jIbHDwbZpZFE", - "dRA5a6saHU0gCjK+nNg1R88ws0/sIYZrZisg08+EDmLnM4Ii2g5h0xwU2CpyFfeeqoYXFasC94EWZy1M", - "iVoV9GA0MRIexwXV/jhCvVTPZQdpZ9dY9mVbabqTIJYwKIpaFZ7z0rDNQTv3flegzlel86Xowkv/gLJy", - "9u4F6Qux7ZACVNOM5WyOC8eXPaHUBZPqDbJwvJ3NgLcksbDEwEAdKABuDmZvLg8JQd8IGTxCjIwDsCHw", - "AQYmb2R4NsV8HyCFK/hE/dggIoK/WTyxDwP1rTIqCytceY+/MfUcwJWiqDWLVkQ1DEO4GBPL5lY0t2zO", - "3cXrQToV0uBC0aqH5kJvHvRdNLa4plDk77UmVBIus5pQm/VAx1XtLRBP5TrBDOXoXWS6nlp6j+YuQL50", - "7GBiLbp7mkzlGsK5QLRgrPwOWPrh8GAEtpc110Cv8F2fnoXAbJt2u54bo0INJOMMrRW59Cl6Q6bu0S37", - "yOV+UF7uUgC0zFB1rwZnlthpPmiqJ11hXku1cV021aeFxY5/3xGK7lIP/rr2sWZBuO/rwn/9xcX8ibqR", - "Snhdy9JVKhTixwVWHdynQGGbHBpAbMHqu7YeGEVrM9aridcAazFWYplv1ynZRZtmOYNLcNJQTZPzWKSA", - "vcszkOOn/rPAWAe7R8XmQRBAqNica8Nqp5GPC7oNczyF8slSzvpXZwo1s+t7L2Ul/NFtDh82lnnjK4AI", - "/BlX2iTgcYsuwb70SoMR6ZV9Na6BNkMUsdkAz+IcF6Y9Z5sk43kZp1c37w8v7LRvKkGjyylIMS4wQGsK", - "zTGigctbpsbY9q0Lfo0Lfk0Ptt5hp8G+aidWllyac3wm56LFwLaxgwgBxoiju2u9KN3CIIOE8y53DLTR", - "IKZlss3b0DlMmR97Z5SaT3vvk/w4UnQtQRnAeIagnM9Z5subeX+YCIrI5VLMgy5ORbGtZt6EYOk6qDy3", - "pWidC8NnfUH4gbqfcJGxdRz68FYAkNeZdVBwDyaZM4HlSuJmoShqwhB/eCOw1d2wL7SdABANgj5rObPr", - "6GTcpWo7YQNyRjN3J9HMr2/7sexuiEPduC98ulH5dPsRggGBprgJGpt0yxD0MGBaFDxbtxxPOGqvEYzu", - "ZV3u0baAtbjBdmCgGQQdJbhGKW0Xau0M7Edw5z2ytzKMvXaBxZa+aeoS8LNSgQejEdncrdte3dUGrv2H", - "n0+NVHTOnBcqQZCuNAQsZx80BFXRNTEcw0kyPpux0PuiL+M5aADXsbFnA0g3QmRxF03JhfnqaYyMdlBP", - "DeNulMUpJkILfT75s66Xy+v0gSmpEgnB1lzCVRVN1/+BbZKfaV7aSwZXug7PdW6npvDdY9dXyx/YBkbe", - "GfVqAduxK2B5es+ABmOW/uqRDgpY39ONEv9wvWxs4R47dRzfpQNtjWvK0E/8tZRpNC1oLuUqB6MOkrCw", - "DNmN03hsgj09rIn4Ninv2gSe7dZBAn0/nIpr38KyK4qqWhS7aPeM0dwTLyxn9HE8ulokQEyauRF34Ppd", - "JUCjeIZIU/QMNwJ79kQ5LQolVzRPXLxEn/BXcuWEP7zuwytu+CYTp+yzl8ev3znwP45Hac6oSipLQO+q", - "4L3is1kVtnHYLkqw2rczdKKlKNj8qiJzGGNxAZW9W8amTlOUOn4mOIou5mIWD3jfyftcqA8ucUvIDyuq", - "iJ/a54kBP80gH7qiPPfORg9tT3A6LG5YZ50oVwgHuHKwUBDzdeWxepMbPnz4ZeXxWLsJMGCmqq8eiaDS", - "AwzkbSYSP4Q1Ee9gfbCkt1ABM36xEa4+JnA8F2NED66kvZKqIWNcAmQ0Run6tDeryyMee0LCfZvMts42", - "Iajf/T7/3R76hw/DE/3w4Zj8nrsHAYDw+9T9DteYhw+jTsqotczyIjCGCbpkD6pkjt6NuNl7vmAXw/SA", - "49WyUmBlPxlWFIrBRh7dFw57F4o7fGbul4zlzP40GWILCDcd0R0CM+QEnfYlPFaxrEvszKmJFO3Qbci1", - "taQFMsV1fkCfb/cIiXIJftJE5zyNR5CIqbbcR2DMpn2ZwMs9RmE7Ysl7QoBFyYOx7GtDSrO2gAzmiCJT", - "R6vD1ribSne8S8H/XTLCM3t5mnGmQHy2JKq/g8CoHb03bn5zA6M7rB7+KuaWLW4tb3LaZmvZ6iZ8Ubmu", - "/EJjvYX2DDQPZ+ww7i1B4o4+vJSDpLlFM9Jz2HVpSId2z+icT7BnjmjHda6TmZJ/sLjABjdVpN6G969y", - "sCb/wUQsQLDNUirfdd04vp5913YPv4L3bfyVr9x+0VVzs8sI0/ip3m8jL3O31vGq0A7JfXe9MJChmYHQ", - "w1rgeAUxt9BtxQc5UYHnCYtNNBLZ4qcyTBk9wvHrU+lg7qTZ5vRiSmOtaOyVy8IUbG8jHMtI4j/2G6Cr", - "Ugo4OwkCxat3ORasK5iqXR3d4reXvD7htIMvTvU9CSgqvCGNMRoi1zIyTCkuqMBm5fY75Ffua83Q02+/", - "upAKyk3qeORYxlK+jFp9P3z4JUu7UUIZn3Psw11qFjR6dgMRrGkJVOSaZVcFQhxqTmbk0TjoNu92I+Mr", - "rvk0Z/DGY3xjSjWIy8rrXn1il8eEWWh4/cmA1xelyBTLzEIjYrUk1RUXlLwq/nHKzAVjgjyC9x5/Te5D", - "5KfmK/bAYtEpQaNnj7+GuB3841FMyro+6ttYdgY828eEx+kYQl9xDMsk3ajxIO+ZYuwP1i8dtpwm/HTI", - "WYI3nUDZfZaWVNA5i6eBLHfAhN/CbkLUQAsvAp0OTBslN4Sb+PzMUMufelLLLftDMEgql0tuli4+UMul", - "pae6izNO6oeDfme+LZWHyz+EMNsick2+hWsMXfakhkEw9BtwBYdoHROKNUZzXgfA+7ag5MSXMIY+XVV7", - "LsSNncsuHXRJiIefkUJxYcDMUppZ8g97LVY0texv0gduMv3qaaTfVbMljNgP8BvHu2KaqVUc9aqH7L3O", - "4r4l94UUydJylOxBXcohOJW98cDxyM++8NPtQw/VfO0oSS+5lQ1yowGnvhLhiS0DXpEUq/XsRY97r+zG", - "KbNUcfKgpd2hn96/dlrGUqpYX4L6uDuNQzGjOFtBYl58k+yYV9wLlQ/ahatAf7thVl7lDNQyf5ajF4HA", - "cbotJ99q8T//WBdYB/8tJjy2bIBSRaydzm53w0GN+1nd2m5ijEuDZz2YG4w2GKWLlZ4gf4zir765jbCk", - "Nki45w2D4+PfibJ3cNDjHz4EoB8+HDs1+PcnzcfI3h8+jNc5jprc7K81Fq5yI4ZvY3v4rYwYwHxzxCpu", - "yZVhiBgg+4SUfWCZ4NQNNSbNRnQ3r0UcJo0sHtQaPwUfPvwCTzwe4I82Im6ZWcIG1skQ/Ye92YgzSjJZ", - "9TwIp6fkW7keSjgtGeSJ5xNAUQ9KBprnYCWdRqPRqICdYSkBjdpRpyyX9pIZ9h4K7fmfD57t4sdbsF3y", - "PPu59rK2BImiIl1Eg5Gn9sPfUEdviGBkldF2JgsqBMujw+Hd9jd/B47c0v8lh86z5GLgu+1Gt7jc1uJq", - "wJtgeqD8hBa93OR2ghCrzepcVfWHfC4zAvPUvTNq5tjtGB3r1BlJo4Zhl6Vx4bGQcu7qGs14DtGecb8x", - "vJkoanrqdEFbdd/GyI4DXc41mhlwdKYI5UsQzJoui5zByVwxRefwqRSs9TlUaoORg8YYRBf2EbwJdTEk", - "MaUSRM5mwTKYMFyxfDMmBdUaB3lkl8XWMPfo2eNHj6JmL8DOgJUiFv0y39ZLeXwEr+AT18sJOw7sBexu", - "WD/WFLXPxnYJx7Wu/HfJtInxVHiACbLgJbVSG9tWVi1WJ+Q7KLBkibhRUR/Mlb5WcbNuZ1nkkmZjqKF8", - "9vL4NcFZ8RvsVI9tM+dgrWuSf9S9MryOqS8g1VOgZ/g42yuG2FVrk1RdLmMlEO0bdR9O3grtATteiJ0J", - "eYEm1CqOBSchUIlbLVkWNNXESzwQh/2PMTRdgG2yoQH188rh/V49O6s9N0GSY9VkCRi2hdu1fMWOr2Mi", - "zYKpC64ZJP6zFWtWXaxKkDrbuK/C2FyeKoVASpnsoYxWLZX2RbsHDjVZH1QQhayF+D0tU9j2ed/2t6fw", - "VTzlo9VLt+X19zX8fCVv8qNzLqRUSMFT6LgQ06ShQtwwN+WA5hRx/6IeuRMaOVzRDr5VyrHDYm9PX88I", - "HeK6Lv/gqd1UpA7807C16+w2Z0Y7zsaysW+o7RxiXGjmmmZZIgr5pFSRoKZovkUVQLEnGUHxpx4L5yv7", - "7I2zf0PtjXMuwNLl0ObuZ+iyyjUHz7Qg3JC5ZNqtp5k0pH+x30ygGGTG1r9OXss5T0/5HMbAMDq7bAxN", - "7Q517ANVXWCoffe5fdeV6K9+boSD4aTHReEm7W+3HlUkzVr0IjgWt+QDSQLkVuOHo20ht60R5iBPLaGx", - "FUStsQLkcIcwqpbdzVFe2rslUhS8QTBxM1qnl4sIGK+58C7UuIBIoyIBNgbOa893OlXU4N1hEE87YzTv", - "ybOARGj0wV91qHaDAosSWKOfo38b627jPYyjeqHW+KnYEH8oLHUHysRzmlcR2pHe4aBVOSUKY1pb3cRj", - "jMMy7sRnZjbQtTNLsPocmn7sK4n6SiFOy2zOTEKzLFZB61t4SuCpz0Vja5aWVa+rKgmxWQq9S21uolQK", - "XS63zOVfuOJ0QXv+CDVUD1lW7TAU9Jlu4N9Yo6f+nXGx2Xsn//pA7Gy/+v/dZOaY1mtpOtF8ngzHBMiU", - "q6OjnvpyhF5/f1BK91nBn0TSb4vLhXsU428vreAI6wN34tNRtFTleyEWXMJzX1epKjzZ5EogyjrtzCDq", - "ATYvsmUt4P2LUcBXNO9JuA99JShf0X/Ql3af9laJoMZVATOUbGVBvZWVMFa45X3puhD74oMxPPhwXgu3", - "1q0I7ffd/dDw1GGMWM0sej10l3Oi1Ru8rxfN9TXomjRpnst08Kl3wxzbj/qrhsrl0pXTjsSwrZYyC+k8", - "jIZiLM60MDw3EvIPd8/oM7gYRZ+oi/hoDZvFvqZSRKNbwhgzAT14HhicOpwoMJE6zJJXPId+SP95+vbN", - "qH8jgx3obqmr4hs1KvdtTJUs1SaPuWzgo+y3nUiRxy4R45HuMXJDrZ74aXBNb6MPXqHRbghIWNJmn7df", - "Dx28QwBzGStS3y05Mqo3wqM9oIN6Y5GXhHQRo4cfVn0lVnyfH3ge9hNy4Xlj10aCrbgsfVilT27wth78", - "1ZXwavQN6mFs0ZSh23ZH9jpPz1z/a1ymM7b98DOGVxAmjNp8Aq7Uzqa3m1JFrrFod65fIVXr1EGtVBvq", - "7pAeWLF2S+7S543gqDM0aKnTvqpDVi+G6PkdfHwcj06yvTThWMuuEY4Sk6ev+XxhoOPH94xmTL3b0dGk", - "7mICR6yQmtcdjHM7mCshvYDhJkOziCwB87AjS3csH12+YqmBttV11KxibJ/+LHYy782962zSLwuqZCvX", - "0GRbF5Nur+odynun8FpQPBD7/E6G9+w4rnIjMLXzguq63FOr5sLgzO/ZjKVQVX1robt/LpgIiqiNvcEV", - "YJkFde94laAIfQH2dyfUAG2rQ7cVnqA/15XB6auDcc429zRpUEO08XCVnXuZwuOAAfRt+xr0fR4iFw7K", - "dUUZgAUf6+9KudfNdXprxgdlGy85lydJKzjqUo5bpvT64yXmsp/uVTYWdP2+Wnjdnuv9hoUX0OJeu8hX", - "WhUuD81v5KTbeOvCFT6HsoSVU9SXQGfa/+ZrkOIsOT93/UcAK+iCvqAq828cpKgcyiYeB3pWzczrzKxu", - "9FKklQskOaa5tGpE0pcp2kyGqiKJ72kM+a4LgAFcM6YUyypfZy41S4z0mVzb4NiGCoxrvxQSdG/7NASu", - "t3T++7o3ALSRpFAqn7pw9nCBRLEltdCpoIJ//5zbkP0cn/siHr6N4E7TcUWvu/tZ+5w8rjtIDKl+Rpy0", - "3F0c5DJWZC4EU4l3KbfL+YtmRUeo25uVKQro8GBUlvbBtbe2sJKoATbtrrJ1RwiqX5yzzRFegnwjcL+D", - "IdCoOSHoQcHi1iYf1K6uY3DPDwLe7dahLKTMkx4v5km3B0Gb4s95es6ghmiVu2J1v3vNs2EnIffBeVaF", - "qVwsNr7mflEwwbIHE0KOBWYL+oiVZnvS1uTintk2/xpmzUpsC+Ks5ZMPIp52BQ071BW5mR9mOw/TzLK6", - "K06Fg+yocL8WfbF0F9Dco9kFeDL0Vt6NIWlpJQFRIRQxneQUXdHP4aDHDEdQ2yQowgMRCpQ4FzbRuYwF", - "6V+m/oodKo6pcDIAyDAxpAxIBYUbPIoAF563o6Soe+yLZsoZUayODrls9VBXkBNZs+670bdnrmZp8ruZ", - "VCycEaJPsVJwldEGZXjhP1NuFFWby9T4bKIqZj3pxfLOOMsqxLJeSB1m2cVhnsuLBJhVUvXJiV1t7Xu6", - "KYx908b6O3uqpywI2KTaKWobsqAZSaVSLA2/iCdyI1RLqViSS4jfjIWWzIzVu5eQvSlILudEFqnMGPab", - "ilNQ31ylEBTUJhaEy0VRgLQDZQDwm4COB05pZSo6iBNQtXa2Z/Cbf2a/wZIUdVU4XHSCQQo9qQhMuypw", - "DkP4chdeIBysZ9S2JcZ584yvgW6Yih35GTGqZGPi3mj32HcHnypGllxrBKWipQue51ARgq+DkIoqIimO", - "2h619wTipVccguqa1UFQGy6szKtKpoQ84DSsZ0bMQslyvggK1Fdw+iuvKt2FOBzlJ11C3COkhtopnpKl", - "1MbdNHGkesl1LOn9VAqjZJ43jVKoos+dpf1Huj5OU/NayvMpTc8fwL1WSFOtNBv7wgntqN96JtUqTdgU", - "wAnQgN5d6hvfgxhYR7SDGWSLxXWM4ruszAGYv+7moLtt7sfdhbXX1WSm8WvMsSDUyCVP42fq8wqj7Q1+", - "jbGoaDFC7M2K5WPgNTjsobCqoqaARXbRzASNNpc8Jo4RuOgRYDf2v6CBt8clM+YYTY+g7DIXp0Ulaa+u", - "1wIAIMWaBqZU2NA11MQqriLnWAMFYl/agA6UKhBieDXY7AgHB8qwKwHVCWuuALyPxocxFo3EEOmpXPvn", - "D+qqkpcC/uN2Km8wj77YzdOatBRGb/oKVD0cIV4if2ug4xnUs5gODXesmm8PlPABAP0BkA0YBoVB7gvG", - "jPKcZUmsd+tJZaMaBzdtl3PZ7F0Pchk5eUpL3zrVjl0q5ioioYqvmv6vglpSktXrXUuyyNiaYcLWH0xJ", - "7Ik6DvwvLMeWqS1jgCySnK1YIy7UlWkqQdXkK+a/1dXHJGOsAG9k20YWC3gMZXnLcOLWngQhc0OwG7Wk", - "IGJxp8gOM0nUqLMWCR4TPfQoWYhWPCtpA396X5WjaQa0RzmCqs4dIfH3yKHT/IQjvPcDHPvvY6qMx8Sv", - "w/jQ3iwojrptDGhnAHSp+069iMc/hzXIKgcLzJZVjlgk8Zpv6IJeiH6DZJfk6+vWwH3iUgSIfblmKWg1", - "7r7DMnfj6XFSuHJGQO2CsQxvBfaTiLV9wQQRMmhRe0F1dVWpi6P6H3BieIkLd5u+hFO5DlO++s4SGIzo", - "VpXE3ouEquj08ub5WzmJWw9i73gxGtHM5fVusX956nbXDnhBlnlGhN1Pq/tDk1cnxRwXH5Np6QfKc3mB", - "PWfDe+gL5v2gSH3eBeTUcl6JZR+OPXZ1e9umDh4koizphkgF/9hb579LmvPZBvgMgu8/I3pBLQk5xytG", - "BLjwbjvxdvVq7AHz1hbpp8J186FjBsNt7CgB0FaQ++ZgkizpOQu3AYIdkH+mxjJOXU7BcmFFdms7u1hw", - "i/e1l5Y0C2/6UAF20+AOvia4/fr/qZNcw6l84cYip6nvMOxanDX5DHQR98RlFmy5PQu6y9c8CVSdyWui", - "Vb5sRnYJk+merCuWWtTXvqkBdqdjc6dz1ZWWMdDy2+rRsyV/fNBSDr0LQ6NuOkCHfV53gR+2vb0Z/EeL", - "M/ctYwj4nwreexpdh/BiT+sbwHKjtE4EVrRWT+U6UWymdwWYoLnaXudVXZTHm1i5SBWjGiNuTt66i2dd", - "e5gLexHGmNDKp1mNkrEZFzWz5KIoTeQeAyWIxSZAWGj0B7T2uND6tASrTK5o/nbFlOJZ38bZ04EtYcMW", - "M97R4b6NmDAqmdodgOv6DgeJ17UZPXzNCnBsYofhmtpQkVGVha9zQVKmrNwnF3SjL+9RqpwDu3xKNNBm", - "muVAAu8SkDYCkm+cU/iK/p4KQHpAx88Ahw3EBUecNWjaMbLHP9OF4bNw2CzpOsnlHNKDew6EKzoNHj68", - "AkoBZnDUz4at28+j+R9s+zTQb8MxIiNh1iFTbD/3b2Er4Rr5k+Bm68lHG2U7XxvjbvFgeqSKeR38j8TS", - "PY+xFHtXVSlMs/fKpk9V8bTHgk1kPf6hpl28ZxchDMLVZwiN4MPbJTYjLWKJ/GgZSMBioLeE9zNdh7LT", - "1IVndU1pHVMDImXsyiDsaWlD+7yXSz3ggSlEu7PenLYKmbHj7NNjcnvhg6SQRZIOifnEljyZcxM4SJsw", - "9tBH4AToWXcVHqOrJlWNgmaNblX7ttns7Za1y9tVpNsu/X1moh6O3nRByBnwMjjCaByDTJ7KmDJu55g1", - "zWAVkyCUKJaWCszEF3Szu21hTyn40++Pv3z85LcnX35F7Ask43Om63YCrbZ/dVwgF227z81GAnaWZ+Kb", - "4MuKIOK8/9EnVVWb4s4acltd1wruND3cx74cEQCxTN9uH7hL7RWMU4f2f1rbFVvkwXcshoLr3zMl8zze", - "zqXSqyIOlNhuBS4UewMpmNJcG8sImx5QbuqIaL0A8yAU9V5hmSgpUubtx44KuOkJuYotpC+gFvgZFG1w", - "XiPC1kXueBV6eraty93T0EIHSiNExUwZKWThVHs+IzGIIINIBZm1zvAJFvEgRrZithgtGyNEF3keJ72w", - "4f52bt9sBm3inN5uYkS98IfyEqTZ55/oL0hyGU5Sm/Y/Gf4RqbByMK5RLfc6eEX0frAl5/i4E/dQVRcZ", - "BFq32kaEPACAnmzbRp5kkCgWVBhX6CUAf4J3ILfVjx9rx/LOtBCAxH+wA7wwfbZ+r8pkcODccqnuHyuk", - "BEv5tY8SGsvflZHrWW8lSIItckYTY5hGtiS7amGQbq2fV1nMPbeSTrKzktIQezPN80iSNNpx4EyFhGOv", - "BGpF85vnGq+40uYY8MGy9/2pUWGmbIhkRKW+XAHO13TQ3EFW7OGmFu8gMfufzO5RVM65oZwTviPNwLhD", - "cwyvnlXeaCbIBYyJQVaPvyJT10WnUCzluu3cv/DKSZUYyhSfuYBWtjY7MlF3rfNnaa5AxjMfiUPeBO6t", - "ymfvIKyP6C0zlZ6TG6XyGPV1yCKCvxiPCpt77xAXV+y4crl6TkFlxj3rOXXblg9dHpY2sUKn1Ky7zsHS", - "uoHbiKCu1za0GNngxi0fPvxipkNqiMWbrNjPoYjZQbqt7NVr5RrKlyGO3Bhu3hjF/NxX0BqLNvcU3W/t", - "R8nznQErjRYKH8ejOVYwgiYBv7mmUDcrSz0EPWXE3NKvUi4GERNZa2PyYKqg4tOAvgjus0gxe8hqTEvF", - "zQYagnsDGv8tWo/pu6q2h6sNU/nSnOwz8pwJH+9RVwIptZeu30magzxCF5+wUkjmE/ISS/e7g/LNvel/", - "sC/+8TR79MXj/5j+49GXj1L29MuvHz2iXz+lj7/+4jF78o8vnz5ij2dffT19kj15+mT69MnTr778Ov3i", - "6ePp06++/o97lg9ZkBFQ37Pj2ej/JMf5XCbH706SMwtsjRNa8B+Y3Ru4K8+gbhkgNYWTyJaU56Nn/qf/", - "15+wSSqX9fD+15FrvDZaGFPoZ0dHFxcXk/CTozmk/idGluniyM8DJe4a+sq7kypGH+NwYEdr6zFsalX8", - "yz57//L0jBy/O5nUBDN6Nno0eTR57HrWC1rw0bPRF/ATnJ4F7PsRFM490q4nxlGVq/Vx3HlWFNgxwz6a", - "V9UB7V8LRnMosGP/WDKjeOofKUazjfu/vqDzOVMTyN7An1ZPjrw2cvSnq5zw0QIWdRtiA4Wgar4PRCzK", - "ac5TX6OMa7QfY4C9DrtGO8t6qcdkio3FfRCvyCBECasR6LC5/klmEY3fn9TMzvdGB7/y6NkvkXJWPvPD", - "t+wOg86CcLT/PH37hkhF3LXoHU3Pq6wXn+ZUp3aFWU72y4mn+3+XTG1qunQcczzSVd9/JsqlZT4ufWap", - "50WzZHOtjcWsRR1k+5ktOQUHoip0UjM8MA0GkNTs27LkR8nXv/755T8+jgYAAlV3NIMOrr/TPP8dzWts", - "DZG1rcibcV9M1LgunAEf1Ds5BktW9TT4vH6n2engdyEF+71vGxxg0X2geW5flILF9uBX6DEKxAJn9cmj", - "R55BOfU/gO7IHapglkHNPdC7UI3iSeISA3UZGT56XxW9VbTAw3js44c3ReXfwZcmll89PeBCm6V5r7zc", - "9nCdRX9LM6Jc/jIs5fFnu5QTgbGgViCh4Pw4Hn35Ge/NibA8h+YE3gwaeHclzU/iXMgL4d+0SlO5XFK1", - "AZXIVLyw3XGKzjU4VYFF4tkOyq+J+ejXj71i7ygMejz6s1E7KbuSUEQvS6Nf22452cM5YSzMSnM/3D8u", - "Coj5PK2eHxfFO8stNcQRMA7Sj625NvrBhHwXft1wjiAk6BtpJAX49vi+6X7DVx402o0K7UZVgjv5fbvy", - "+7hpJOEZE4bPOCjsMWAap2ArTJ1opasK0G6SUFAjad+A6KrwvVMtEtdUceAYeJwO2DF0QGkUnClaongn", - "o77DXQ/u+tSkAN5KY6rbld4Ma/alditJ0hAZ18i4P3Ol70eaWzoJltvqVXXy4k4Z/Fspg1VJzjlqZ0Vx", - "APXQZ27seuXoT1dm8hBaI1yPB+mL4c07+DYIvr/f4jgPJuS4/c7l2Ior07lTE7Tv3emAn4IOiHVOd2l/", - "jo5vVe8L8772ScNqKCz290Eff+aK3t8YWb2anYV0t053CfbZ0dccs742tvqX1NMc0u40tL+1hlYVz76S", - "jhbGvh65MgSBxnYlA1/bgMdNpYk1C6gHnA3qjUBCPh7hcR3nb1kMBjC70GU99pdH8NTivRI3a9y5WnZV", - "rO9YeIf9dnPyYpd29RmZggY3OI9IgfjeXDcvjXom3t+MZ2IYb3r66OnNQRDuwhtpyCuQ4tfMIa+VpcXJ", - "al8Wto0jHU3lehdXEi22VFWos4e2waOqQqTj4Ll9GwNA7kPKb7Nz1oMJ+da9WpcBcSntc2kZlU8Vo2qO", - "H1leZ5FB7vk/n8H49ybkFSRAGj2GODbIrIAXuTDPHj/54ql7RdELDBNrvzf96umz42++ca8VigsDIQN4", - "z+m8ro16tmB5Lt0HTkZ0x7UPnv2f//rvyWRybydbletvN2+wh/anwlvHsZKHFQH07dZnvkmx27rrbb4T", - "dTfi4f9WrqNSQK7vpNCtSSGL/b+E9Jk2ychdRCtjZ6MZzwGlER6TfeTR2MkfyOKohMmEvJGuL1qZU4UF", - "YqCGribzkioqDGPZxFMqpOBprGSX5hxqByiimVoxlWhe1aouFauqmBSKrSD8vq7y2oBgN6OHIN1Plsn/", - "SNdB3vy0EtNGuiWD2XNJ1wQafRiimRljCbU1+eYb8mhc317y3A6QVIiJMdclXY9u0OpXEdvQukAvHHak", - "2h37C2MPsSDV2k9VYLK+avzdOfdnq7kjubuNPRDn3NvxUzt2QjuC6z621YKAip2Bcsi6LIp8UxfCtVqe", - "V6HiLM7OMNQ48An7CHaapqOX0DZ67w7xnRHgSqykTVB7sg1IaNVHf8K9POQZnXMLCXl/L3dp4DtScumd", - "R5LMmEkXLhe4hfoIe1IuH7GfNy254EsL5aPxAL2rqrNRtVlptG6+D/HmUCkH6uNtoOCWgoJ2fAaFOR74", - "drSuGjUUPKgDsOOoxeETO2lMDas7ChxYDQOy61ZsDpecUSwZMKQhWpBXCh5HpiKn7i38h+Yh0qoWJb7+", - "IqC/wqBrEYvWAmwa7XIUfI5zQRstb3dD+byevKtBAloO4bC9Q/B+CO5w85euPgOeQreIv0IWg7/7JuSN", - "rFPo8cr3l/SVXqcqct0LeiMFw6AAq6ojLd75fys9qRaTvnYKXrjqhmCX1ZmOfM2hrYrT9/alHcrTEHUD", - "6hddu85xDSL8+2hlpoaUsWub7CwMUY82hDnbF7GDQ6gkTW7z2nUr/PQTvIvdBse6GRYDh9TzGacWiMMy", - "HShHhMR8VPjaUX0c6LV9OdDLsELTYG5kZBU3xyJ1kMiU5VLM9afJirZRRxwvESrBqlvYCKaz/snf8Ow+", - "d11ajMuTdrWvNBcpI1ouGVwZrI7uSmgjhP+4OQgNX/pe4yLMx71l7vLloy9ubvpTplY8ZeSMLQupqOL5", - "hvwkqm4sV+F2mlC356H5OsIcuAD3WLNGWhoWdLoCE3S9/uNmbmdor6s8atSrZGmYwvp+raZbvMOkYwZs", - "YBiv7dQH0OdyOf/c1DmP9aFlqZ/TPAd07fKKwcCDwqrzHPeTLbkxdZOLULqSlzRdVHs7rs2RVStCXw19", - "3KqfCSO7vnRYm0Azu8+GkWA1gbWCKWyqbqDTlDOtLcvc8CJvflP16oTeRZG4KaTNsO3ByQu/OvQmy1k9", - "dJt+fe10N/jEzu0ewcxC4uKoYsC7Q/NfaKadNIDGLl4+XjzoveQ6SLnSjFy1amXWwT5FwaiqP0bKv18o", - "lrghFF0xpSkc1taiHtyp6p+Gqr52xZk/EUU96lS9Kq+/vChqhH3/adY8+7hbLw/qG++pknMRqOQhu8Cz", - "dnldfLf7od0L/eRFmFkjqwpgXkHoAcWiaM/ksv81GuizgaoycubuYaVAQH1RTqexurQXORtXgaX2Qipn", - "z8gH8ZDoBfU1o92fT778qs81QvXC1dLr+p3qgexjHGaI8+mzdqUdVuOo8Pvspnd7v00cj3i2jhTGFxlb", - "B71Ymr2anTy8p52vLt5dpIjXh64upuGwS2bFlF7w4uZrEGvDp/Ei7N4SV/X8PxHfVgZZLJRrtYbiNmrP", - "jkdGMZaxwix2lqSGt+rdZK44NdeujRAWDh4TPmETrKtbt3vL5swJJkpyRmdV3zYphyQeBnzGEpqnigDr", - "4UKGaNJR+gGdF4jy5u2kdYIeCjqPvLZSfKtKmLktJSxpaWFNtNyeTgYNKMZBqFihpJGpzDHusywKqUx1", - "uvVkkOWB9Sl6DcNDH+FeSZlb80zvdOmcwVsHsAE0KVt/Ni6dM4+mmE8ntqhLFsqt5xrC0s5kQfCC3wLh", - "Vvna3aUyxs9a7p/P3ftjeknvwM6glJp0URZHf8J/oFDwxzrJGFqo6COzFkfQNPPoz63hwMBSc6ubKOy+", - "0jDpdlpwRoN6X8PndaeXV1K125vvDPdtIW3cFvrYABTihiPs8Xpuk3/rS9hW11lrw68eDRIZsXNeqxoa", - "QdvAinaD/kG+LAY2DY2Q8F300qe1oNqfOOMiIzTYxpatSaqaEVyzT/G6F30bLsqbD9n68jM+Z2+kISfL", - "ImdLJgzLrhapT9oczkuPreJ2P8XAif5uOH9X5ocS3ychVbrITgG/x70nKLvE/HRUQR0kK6tvKGr+TpJ/", - "UpL8eeVtDcnwTi5/PnJZ+dSpOxH86YvgLz7b1VxjDNNAkXwJ53BTDNc38T0FckcZcDasluFgm18Zrt7t", - "VepXUvkueXdS/DN1iuJODg7EGmKh2WWJdVMeIuvsk4J+mJ0hzyOWhr6DOq5ivTgUmJQph3ZCJ5keu6Ay", - "NE64U3yn+HzSik+w13d6z53p4TMzPfRoOe7Wn+dDFI19FaDVUmbMO1blbOYKOvdpP80WlpY8taHLguCX", - "k9447DO+ZKf2zbc4xUFFbA12Sy1qgWeRpVkqRaYHRHG4US8rh8DR1A/AjXs2qx3wsLhST5NLk+z7oF5k", - "hxJIG/kaWo/6wtYOGRlbEUuAkwOQ7dGf+C+Y0wqpI6s59QTc2Zj7bluwUjeO2wCQvAMlFEt++6/kjDzC", - "gt2lgCT3usc4FRkxamMVVV+fUDGak7SR3FrB0T05p70nZ+dVoLO6njXF7wKyPqGHjGBoFRb44cYPwHMq", - "HMl3EWQkoUSwOTV8xbzLf3JXPevS0szVrtrCAMeEZhmexnoT2IqpDdHlVFtdRzRzlO7p5nnZg2GwdcEU", - "tyKa5rUDHq8JR1gaa1sc0Sm+cUWh1eJFWJBLNaMWvWR15brkjPzIUyWP87msYuH1Rhu27HTwdp/+1tNg", - "wRsSujGrUuRcsGQpRayv9Ft4+iM8jH0N5cX6Pj6zD/u+bcnbJvwtsJrzDJHJV8XvJ3L6rxTo0lqtYoVU", - "9nY7xXpESP97HiV/aDYi7Z6kjUgDp5Z7GAwUtpNu/Hzk0xEazaWjb/7Z+NOV0HNv6kVpMnkRzAI2AAxn", - "HFI9C5TvPZM8aptbM3uS6+u1ul2ntynAQ+xsVU8jPYPrh/1tg/+mSdjOORMSictpXDGlWxe5u0zsv1Qm", - "9uB934sbY4/8XRyt1IfVXd7IjOG4dTquPfqxri1CZsy18u+qLFVYZDxlyMuv+r1WEkdKy/nCkLIgRsbS", - "ReoPE5oik03wIhSfMKiTjNclmG5BV4zQXDGa2csrE0RO7aJrSQqLpBoqVfucExf8GVWaArgKJVOmNcsS", - "36VmF2j+PQxVN1vwBIADwNUsREsyo+rKwJ6vdsJ5zjYJXIY1uf/Dz/ZqfePwotK4HbFYHzeC3nbadRfq", - "YdNvI7j25CHZYUI3Ui2kyMllkTOXJBdB4V446d2/NkSdXbw6WiCLjF8zxftJrkZAFajXTO9XhbYsEiu/", - "uyA+x6dnfAmamKBCegtkbLCcapPsYsv2pXAt2q4g4IQxTgwD91xNX1Nt3rt86QzKPKI4gXlQx7ZT9ANs", - "pSjeLSIj/4wPY2OnVh4KXWriRvA5UCyLrUGw9Za53rB1NRfUTvFjV0lWaAvcNXIfloLxHbKCVj2EmsDv", - "b4eLLA4sldSZMrqobABRI2IbIKf+rQC7ocO/BxCua0Qj4UDrgZByqjq145E2sigstzBJKarv+tB0im8f", - "m5/qd7vEhbUwUG5nkukwAc5BfoGY1WDKXVBNHBxkSc9djtzctV7twmwPYwJllpJtlA/GXftWeAR2HtKy", - "mCuasSRjOY0YXX7CxwQfbxsAdtyTZ7KShiVTqJES3/SaklWvMakaWsJ4OqY8EnhCUnsE7eW5JhD39Y6R", - "MwZjx5iTo6N71VAwV3SL/HiwbNzqHgOWHcPuuKMHANlx9CEA9+ChGvryqICPk9p80J7iv5h2E1R6xP6T", - "bJjuW0I9/l4LaBv+QgHWkBQt9t7iwFG22cvGdvCRviMbMzV+lm6BdpTTNSbZNU2twQVwcpnL7dEF5SaZ", - "SYWKdEJnhqmdofP/pNw7zn36rnRVVwiM4OSmGweYfNgAz3ERBIE4cWFJxFWSsjKMksdkyUVp8IkszRjL", - "XytG04VV2kMbLI4ELYxdkSbF5lRlObS3nVVyUyos+mRaAh6AjuQjNm/8dt2vpBrUBaBZOpJyQ0pheB50", - "Qqru7Z+e9fLOInFnkbizSNxZJO4sEncWiTuLxJ1F4s4icWeRuLNI3Fkk/r4Widsqk5R4jcNXbBRSJO1g", - "yrtYyr9UVflKVHkDCVgnLig3rq+/r1LQb7fYwxBkGM0BBzxn/dHdGHR69vL4NdGyVCkjqYWQC1Lk1F4N", - "2NpUXaanVLOvnvpUQxSddImt6kG+2he+eEJOvz/2FUcXrjJm8937xxivRrTZ5OyBa4vGRIaaqO+PxoRF", - "umuPRr1I8N2oXW9unkNkvCYv4e0XbMVyWTCFxQyhnWDX4nPGaP7c4WaHweefdnIXavu7He33ccPo5dC2", - "pIVX8/1aqSYUMy7JiyAH8/cZzTX7vS8NE8db0mJAJ0JgJt/KbNM6IXbXjmADm2ejrjvKBVWbSJWobgpE", - "mzSMtOzKEVbXlvXx4NVxu0TbJbNdFBbT1rEMfnz0PiqPloWtNqwzFCbqzlp0MorlmLZroY4qAAcVBoQ0", - "CdwT8h6/u90ygACRO2I1M/9kohibb1ZMA961lwjHej7XXAKP+OjphbM/toSdlSkj3GjiC+zuFi/j0Tqx", - "I82ZSBwDSqYy2yQN9jVqSKGMa6o1W053S6KQf8KJq4SPfbJdTt2OGHkRLG4bTw6JZp04BtzDnTeGDebN", - "FbZgRMeeA4xfN4vuY6MhCMTxp5hRqcX79mV69TSbO8Z3x/iC09jSCLhwBcnbTGRyjYxPbVQp+nneyzVL", - "SwtceJLvg3UeXHJsbRpO1oxNy/nc3ha6PjpoowPjcSluiRXicodywf0oCAev2vtfNUm9PVyXuwR54/d9", - "ZcYHsB1UbMCZsSyo2HiXL0s0X5Y54hCbSh+W0WLN8FiJ6dr212fVfudNfoHt1ona5u+IFnJBNcH9ZRkp", - "ReYynjq1rddieJ0THPpsLWo2vbWmCa43sjo37xAR4Xe5mWquScFUYtYCD1TjMLkOBnhyb7WW9p3YuDmx", - "gYnqrIfBdqvx1wzhQNJDBXwNxEfQc6lOzGt0YqLNdMLGM7Bo9Ke4hM2Z8M2DBpZ0hm/Gl9TmFuc/ZXlB", - "KElzDt5VKbRRZWo+CAr+m2Bhk27siTdU9/O+5/6VuAsx4uFzQ30QFIKMKq9OlAfOWMSF8Yoxz2J1OZ8z", - "bfloSEAzxj4I9xYXpBT2FiZnZMlTJRNMrbXny+ouE3xzSTdkBhVNJPmDKUmmVuoHu462ZG14nrtgFzsN", - "kbMPghqSM6oN+ZFbDmyH8+UUqpAzZi6kOq+wEO/VM2eCaa6TuGHmO3wK7XDc8r0BEIyZ+LhuY3GzfXA8", - "7DzrhfzkBcSoQTXmnOuw/2Ib9hvzjS+5SKJEdrZgxIWLtWmL3IcacI6AHjQdR2bBPggr/YwkwPGpuRw5", - "tD1AnbOIp6NFNY2NaDmK/FoHXf8OwmVIhMncuV3+QimkAR14zyZsPNbXb+39ni6Whshl0Bq0TyDjU9c+", - "secld4FoGMlaBW7cG2cNkLf6Lz7/spKHv0t6NB7sNtkdsMuumg3yAG9+w8eE5lLMsa6ivV1K2CcuitJA", - "APh1GvDYiuaJXDGleMb0wJVyKV6uaP62+uzjeMTWLE2MoilL0KIwFGtn9hukU2g0KLjhNE/gVj0UIHaC", - "X53iRzvkcdBtdLlkGaeG5RtSKJayDAuRcU3q+/wECzSQdEHFHES3kuV8ga/hOBdMsaoxo71Ct4eIF4JZ", - "iwSL0nVhPHaNmsO6vYymi0jjGBBw9s7uCSpr9KQauAeNkqN9l/TxqFfRtkhd1aFziJwmmxmgRTT0gQA/", - "9cSHqNF6R/R3RP+5E32spCKgbtayViC+wm25ZrPWdRcQvUEr2a1UF74r0f9XL9HvOZAmlCjauIPEe8NR", - "TbghF1AWacqIlV8lWOddwz13X4dMu+Cou0qb2rXnSxeUC1dTp8prADjslXi55Mb49rTXYthEZgYWTYsO", - "lpaKmw3cWmjBfztn9v+/WrVfM7XyF5pS5aNno4UxxbOjo1ymNF9IbY5GH8fhM916+GsF/5/+LlIovrL3", - "q48AtlR8zoWVuRd0PmeqNiGOnkwejT7+3wAAAP//bWL/qfvDAQA=", + "H4sIAAAAAAAC/+y9a5PbNpMo/FdQOlvly0oa23GyT/xWat+JnctsHNvlmWTPbuyTQGRLwg4F8AHAGSk+", + "/u+n0LgQJEGJmtH4kswne0QSaDQa3Y2+vhtlYlUKDlyr0ZN3o5JKugINEv+ieS5B4X9zUJlkpWaCj56M", + "jjmhWSYqrklZzQqWkXPYTEfjETNPS6qXo/GI0xWMnoRBxiMJ/6yYhHz0RMsKxiOVLWFF7bRagzTf/nY8", + "+e8Hk6/fvvvyH+9H45HelGYMpSXji9F4tJ4sxMT9OKOKZWp67MZ/v+spLcuCZdQsYcLy9KLqVwjLgWs2", + "ZyD7FtYcb9v6VoyzVbUaPXkQlsS4hgXInjWV5QnPYd23qOgxVQp073rMwwEr8WMcdA1m0K2raLyQUZ0t", + "S8G4TqyE4FNiHyeXEH2+bRFzIVdUt9+PyA9p7+H44YP3/yuQ4sPxl1+kiZEWCyEpzydh3KdhXHJq33u/", + "x4v+aRsBTwWfs0UlQZHLJeglSKKXQCSoUnAFRMz+BzJNmCL/cfryBRGS/AxK0QW8otk5AZ6JHPIpOZkT", + "LjQppbhgOeRjksOcVoVWRAv8MtDHPyuQmxq7Dq4Yk8ANLfw2+h8l+Gg8WqlFSbPz0ds2mt6/H48KtmKJ", + "Vf1M14aiCK9WM5BEzM2CPDgSdCV5H0B2xBierSRZMa6/etymw/rXFV13wTuTFc+ohjwCUEvKFc3MGwhl", + "zlRZ0A2idkXX3zwYO8AVoUVBSuA54wui11z1LcXMfbCFcFgnEH22BGKekJIuIMLzlPyiACkJn2pxDjxQ", + "B5lt8FEp4YKJSoWPetaBUycWEtGBFBVPMSqCDxyae3iU/faQDOo1jvh++zPFFu5RG+pTtjjblEDmrDDy", + "kvxPpXQg4Erhti+BqBIyw3tzYoYxyFdswamuJDx5w++bv8iEnGrKcypz88vK/vRzVWh2yhbmp8L+9Fws", + "WHbKFj07EGBNnVOFn63sP2a89FHV66QseS7EeVXGC8ris2Bo5eRZH2XYMftJI80gj4PegPvjxjpbnzzr", + "Y6nbv9DrsJE9QPbirqTmxXPYSDDQ0myO/6znSFp0Lv8cWfXCfK3LeQq1hvwdu0aF6tjqT8e1EvHaPTZP", + "M8E1WFEYqRlHyGyfvIs1JylKkJrZQWlZTgqR0WKiNNU40r9ImI+ejP7XUa3oHdnP1VE0+XPz1Sl+ZISx", + "BMP4JrQs9xjjlVEeUdXqOeiGD9mjPheSXC5ZtiR6yRRh3G4i6l2G0xRwQbmejvY6ye9j7vCbA6LeCisk", + "7Va0GFDvXhD74gwU0r5Teu+ohqaIGCeIcUJ5ThaFmIUf7h6XZY1cfH5clhZVY8LmBBjKc1gzpdU9xAyt", + "D1k8z8mzKfkhHvuSFQURvNiQGTi5A7kZ0/Jtx8edAm4Qi2uoR7yjCO60kFOzax4NRi87BDGiVrkUhRGB", + "O8nIvPyjezemQPP7oI8/e+qL0d5Pd6jRO6QiNdlf6osbudsiqi5N4ReGmo7b316NoswoW2hJndQIPjRd", + "4S9Mw0rtJJIIoojQ3PZQKenGa1AT1IS6FPSLAks8JV0wjtCOjULOyYqe2/0QiHdDCKCCpm3JzKpXl0wv", + "a5UroH7auV983oSc2nNiNpwyoxuTgiltlCHcTEWWUKDCSYNhIaaiKxHNAFrYsogA86WkpSVz98TqcYwT", + "Gu5fFtZrSvKBQjYJc2y2qPGOUF2Zme9kuElIrMGhCcO3hcjOf6RqeYDDP/NjdY8FTkOWQHOQZEnVMnGm", + "WrRdjzaEvs2LSLNkFk01DUt8LhbqAEssxD5crSyf0qIwU3e5WWu1OPCgg1wUxLxMYMW0uQAzjidgwS6A", + "W9YzJd/RbGmUCZLRohjXdglRTgq4gIIISRjnIMdEL6muDz+O7C9KeI4UGD6ogUSrcTaNKTlbgoS5kHhR", + "lUBWFIXTylyPyqL5TWCuiq6gpTuhsBSVNjBGN5eTZ351cAEceVIYGsEPa8QLfzz41MztHuHMXNjFUQlo", + "aGE8K6q8xl/gFw2gzdu1qOX1FELmaOih2vzGJMmEtENY4e8mN/8BKuuPLXXeLSVM3BCSXoBUtDCray3q", + "XiDfQ53OHSczp5pGJ9NRYfpGZzkHfodKIciEdeMl/ocWxDw2Co6hpJp6GOopqNOE/UCZbVBlZzIvGL6l", + "BVlZuxkpaXa+F5RP68nTbGbQyfvOmurcFrpFhB06W7NcHWqbcLC+vWqeEGvz8eyoo6ZsZTrRXEMQcCZK", + "YtlHCwTLKXA0ixCxPrhY+1asUzB9K9YdkSbWcJCdMOMMZvbfivUzB5mQuzGPYw9BulkgpytQKN0abhAz", + "S22qPp4JeTVtouOaqA3whJpRI2Vq3EISvlqVE3c2E+Zx+0JrIBLMS9uVgPbwKYw1sHCq6Q1gQZlRD4GF", + "5kCHxoJYlayAA5D+MqnEzaiCLx6R0x+Pv3z46PdHX35lSLKUYiHpisw2GhS56+x8ROlNAfeSFyfULtKj", + "f/XYO0Sa46bGUaKSGaxo2R3KOlrsxdi+Rsx7Xaw10YyrDgAO4ohgRJtFO3ltv3s/Hj2DWbU4Ba3NJfiV", + "FPODc8PODCno8KVXpTSKhWo6pZy2dJSbV45grSU9KvFN4Ll1vZl1MGXugKvZQYiqb+PzepacOIzmsPNQ", + "7LtN9TSbeKvkRlaHsHyAlEImRXAphRaZKCZGz2MiYbt45d4g7g2/XWX7dwstuaSKmLnRAVbxvMdEodd8", + "uPyyQ5+teY2brRLMrjexOjfvkH1pIr++hZQgJ3rNCVJnw3Iyl2JFKMnxQ9Q1fgBt9S+2glNNV+XL+fww", + "NlKBAyVMPGwFysxE7BtG+1GQCZ6rndYc7w1sIdNNNQRnbWx5X5buh8qh6XTDMzQjHeIs91u/nKuPqA3P", + "IlOYgbGAfNGg1Rs1efVhykJxRyUgNZh6jo/RI/AMCk2/F/KsVnd/kKIqD87O23MOXQ51i3E+h9x86y3K", + "jC8KaGjqCwP7NLXGj7Kgp8HoYNeA0COxPmeLpY7ul6+kuAEZmpwlBSg+sMalwnzTNTG9ELlhPrpSB1A9", + "68FqjmjoNuaDdCYqTSjhIgfc/EqlldKeqB1zULNKSuA61nPRnsEUmYGhroxWZrVVSbRIyZf6wwnN7Amd", + "IGpUT5hDCNWwb9nplvQCCC0k0HxDZgCciJlZdB3lgIukipRGd3ZqnVOJh/LbBrClFBkoBfnE2bN3wuvf", + "s/JHb0EergZXEWYhSpA5lTezgvOLncCfw2ZyQYvKqOc//arufSqL0ELTYscW4DupjWib77pLuQZM24i4", + "DVFMytZaaE+CUbEN0ylAQx+yr4+93u1vg9khghtC4AVIjKi50aPlJ7kBogzw3/DBupElVOXEqIG95gej", + "uZr95pQLrxvumCFMUFClJ7tEinmpYTcxS424eEqK4MA9+uRzqjSqgYTxHO23VhTiPFa3NFOM9gwqwyl7", + "b2Nm0l/9Raw7bWbEO1eVCrcyVZWlkBry1PLQZ9071wtYh7nEPBo7XP20IJWCXSP3ITAa3+HRGQLwD6qD", + "h9r5vLuLw6gDo75s9sVyA74aR9tgPPVvRYiPg2p7YGSq3gNLbky16G0mRAEUTaZKi7I0HEpPKh6+68Pg", + "qX37WP9Sv9slSesGsppKLkChi8m97yC/tEhX6OtaUkUcHD4+AQ1eNkSuC7M51hPFeAaTbecFL8Hmrfjg", + "XOm4V+VC0hwmORR0k4i2sI+JfbwnYfixkUBq+4HQMJmhNzFNI/WZ8PGmV5tV4FQqpXgTfEIyc87NNaom", + "Nff11SfNAadN8U1HrHfCLAhGkg78eIgsS0+JEVH2XwhtyMoRHa7GSaVrrqUHe2HWG0EgjjupDQHt2f8L", + "lJs7KGAHnX8Dqm/h9dSHWnaP+R9le0NgtkRZS9okRUQvX97BGPt4UI8v4hWVmmWsxOvqT7A5+O29PUEy", + "VoLkoCkrICfRA3uTL+PviQ1Dbo95tdv8IHNrF/yOvTWxHB+Z1QT+HDZoNnllMxoia9UhzBGJUY3ApZwg", + "oD5q3tx44ldgTTNdbIxiq5ewIZcggahqZqNWui40LcpJPEA6Z6p/RueQT7rDt0YInOJQ0fJSkYf2trUd", + "vrPWlauBDnfLKoUoEvbP9onvICMJwaBwIVIKs+uMFsWG6JA24ympAaQTEBiNEfSZO6qBZlwB+S9RkYxy", + "vOFWGoKSJiRqPqgsmxmMuhnmdKGqNYaggBXY2zw+uX+/vfD7992eM0XmcGlDbji+2EbH/ftoinsllG4c", + "rgNYu81xO0kIHfRVGiHrbm1tnrI7yM2NPGQnX7UGDw5Oc6aUcoRrln9tBtA6mesha49pZFiAH447yH3X", + "DAnrrBv3/ZStqoLqQzgq4YIWE3EBUrIcdnJyNzET/LsLWrwMn70fj2ANmaHRDCYZZgkOHAvOzDc2sdCM", + "wzgzB9gmjgwFCE7sV6f2ox037Tpuma1WkDOqodiQUkIGNkvOaKkqLHVKbMpEtqR8gTcgKaqFC3W24yDD", + "r5S1hMmKd4bYVxXTaz5BF4ZKpqmh29JnWxolDKi52bb9H/aydkkDKFYYDRLa0fa0/UFJl+l41HvxN/i+", + "qC/+Fm/NlNGrOhMb+mGEtBqagd4zxKfRlbpIjLfRHD5DDDfjpamHTkHZnTgKCq8f9sWFn1ZlWWwOoCTZ", + "gYiEUoJCkRabAZV9KubkZ5ZJcVwsRJB5aqM0rLrOG/vp7z3H9fVVbsCCF4zDZCU4JK70L/Hpz/hwsNnR", + "iuGeEVEh2mvA9sWngYTWApqTDyHp624Skkz77Lc9nep7IQ/lZbcDDr5TDPBc7wzrcFNe1b9OiyLhkrbm", + "hw4XUeMQFM4koUqJjKGieJKrsYs+t15sG9beQv+rkBp1gAPcHrfle43SsKwhH4qSUJIVDM38gistq0y/", + "4RQtfdFSE8GC3jjQbxZ+6l9J26ETZmI31BtOMVA02P+SgUFzSNihvgfw1mFVLRagdOuCNQd4w91bjJOK", + "M41zrcxxmdjzUoLEiL2pfXNFN2RuaEIL8idIQWaVbl45VpXSRGlWFM4RbKYhYv6GU00KoEqTnxk/W+Nw", + "Po7EH1kO+lLI84CF6XDGtQAOiqlJOtLxB/sUk0ocTpYuwQRzLexjH/Fc14YYmbU3ilb8n7v//uS348l/", + "08mfDyZf/+vR23eP39+73/nx0ftvvvm/zZ++eP/NvX//l9T2edhTyeAO8pNn7o5+8gwvYlGeSBv2T8Eh", + "s2J8kiTKOKCoRYvkLtbLcAR3r2n300t4w/WaG8K7oAXLDS86GPm0xVTnQNsj1qKyxsa1zHgeAXteh67B", + "qkiCU7X4643oc+0JtgbcxFveyjFwnFEdHEA3cAqu9pypsNo7P3x3Ro4cIag7SCxu6Ki0QOIG4zIYG1E+", + "ZpfixK43/A1/BnO8Dwr+5A3PqaZH9jQdVQrkt7SgPIPpQpAnPinyGdX0De+Iod4CUlFSc1RBKsUp6Cq9", + "ljdvfqPFQrx587YTh9DVrdxUMRd156xrJvNTTozeICo9cUVcJhIuqUz5QnyJD5cNjV9vhcPqJKKyRixf", + "JMaNPx0KZVmqdrGHLorKsjAoikhVuXoFZluJ0iIkjhlm7nJvDQ28EC6oRNJLf+WtFCjyx4qWvzGu35LJ", + "m+rBgy8wBa8ucfCH44GGbjclDL749hajaN93ceFWL8eg8klJFymfyZs3v2mgJVIIKhwrvGkWBcHPGumB", + "PhMAh6oXEHKR99gSC9neeb243FP7lS/rlV4UPsJNbeZOX2sHo6z4K2/gjsx6WunlxHCE5KqUOQZ+r3yB", + "AbowIsdHECi2wAuAWorKLBlItoTs3FW2glWpN+PG5z7Qxcliz3CYQpuRSw6cM4O/jHIzYFXm1CkylG/a", + "JW6UTYbAQV/DOWzOhP18OrA6WFSNLiqxovqOLtJuJGsN+cYH2Y3R3nwXd+VzRF05Esy79GTxJNCF/6b/", + "aFsF4ADHOkUUjToffYigMoEIS/w9KLjCQs141yL91PIYz4BrdgETKNiCzYoEm/7Prl/Dw2qoUkIG7MJn", + "9YYBFWFzYm5HMyuO3Y1JUr4AI9SNIBaKFhi0P006+lE7XAKVegZUb7XX8rjMhIcOFfJLTJpGo8nYLAHW", + "Zr+ZRiMIh0tzwcO7t33HBRJPrxROZdcE+RVB9Z/XSdLTq1wiHMIT9ey8vA97Eu4LLj4tpk4E2T5fGRwu", + "pLg0u2kAFL50IxZ4ieRUpegChoqjhqtoYEmMhgcIB9ml/ST1HTFvqzUdHWPgIuznE4OXJHcA88SwB3QD", + "tEIc/dzWhei8Ci95sfFInRWoUIcAUUs6VDb8bHyxH7BpNgaS18qqB6yJtfjoL6nyRz8fRxz9itrixykl", + "s61+3kkUfUd1tzqeF9Nt1j629pwZEMHNF76Kni+d5+vljcZ71b4bj1yKQ2rvBEctOocCFhYn9mVPZ3V9", + "pno3DRwv53NkepNUIF9kjIw0EzcHmIvYfUKsxZwMHiF1CiKw0bOOA5MXIj7sfLEPkNzVl6J+bJRd0d+Q", + "Tha00fhGSxalkfqsx2uVeZbiylvUKk8rxBmHIYyPieGkF7QwnNQlntaDdGq14d2nVZnNxXbc67sTDTxo", + "bo2oney1SqvPXGV9seLtl5G+Fey1hplYT2xmdPJqNVvPzJlI5itgnnbq8NrKeXcUmYk1xhShhLMB7ntD", + "1w+ZBywKA1kzhVSO3/WpjRa8/QDZrsinqFkh6Tm7WiC7Pk32asD0qNN9ZHc3KqF3IJBaBsy6DLiz6Oy0", + "szS1ra4mUovbcagOG9LUUqym73Amd7IHo13jabPW3Y91ucP+4mj+rH6QIn9do9x16jLaj0tba3Gfsoxt", + "cmgAsQWrr9pKbBKtzcClJl4jrKVYkmH0XWdXF20KCkBLwKShV0/OU27pN29+U4A6w6n/LLJz4u5RvrkX", + "RcNJWDCloXYu+CCXD+/7QXOiuWyJef/qdCnnZn2vhQiKhnXH4oeNZX7wFWDo+pxJpSfomUkuwbz0vUJL", + "2vfm1bQi3Iy3Y8q6evbWgxGic9hMclZUaVJ2IP30zED0IkguVc1QUDJuo41mWAo/GaC7h28S4bGB3VsR", + "9Nwi6Dn9EPgZdrDMqwYmaSivOf1ncsRavHAbZ0nQcoqYuhvai9ItvDbKpe8y2kiJjsIuptt8Pp1zmfux", + "d0Zj+Yz+PiXCjpRcS1QRMZ1AKBYLyH2lN5cUaqteuXp6heCLupag+X1L+cApsVX8sAjflvp9Ljwd+oLT", + "G+1EsCtGEvr4MoOQ19l1WHsQJ1kAt5VbRvv3GymSiIsD4/GNyDL6YXl7J2w+GTp81goXrmN67R6Gzcbt", + "KYDm7lqlwK9v+6HtbpdD3bgv6LhRInb7AcMBkeKYVpEC0yGaHs5Ny5Ll65bjz446vQJJDFT3upXgWzhD", + "tuQG24GfZmDxjl49d4x0xPeds+MIr/lH5pJp45ldRK45GzRz1QbySqI3qREt3K2nHy6aA9f+06+nWki6", + "AOcRnFiQrjUELmcfNEQl6RXRzAZI52w+h9gTpq7ixWkA1/F35AMIu4cEu+6ycLfcSp9dIttBW/UKdiM0", + "TU8JSumLuTjr+iP9xSOyrQVhE23cFZyKyYICP8Fm8istKnMTYlLVsanOQdgU63vQxMXqJ9jgyDtDPg1g", + "O3YFTXGvASk05V0Jj1RUJfyOanRfwDtwYwv32Knj9C4daGtcK43+o1FLqEY/ieZSbu7Y1CEyBtIhe3Wa", + "jjoxZwua29Im9F1bxPLduk90BYmnYhi9cRUhFypt7IwuA1p4wsfFjt6PR9eL90jJSTfijp14FURzchcw", + "GtP6/xtBX3tuCC1LKS5oMXFxMn1KhxQXTunA131YzQe+X6VPxdl3x89fOfDfj0dZAVROgqmjd1X4XvnZ", + "rMq24Nguhmw5dmfbtaawaPNDyew4kuYSS6+3rGmdXjd13FR0UF1kzTwdKb6Tb7oQL7vELaFeUIZIr9oj", + "bQO9msFd9IKywjt+PbRDrex2ucO6KyX5RDzAtYPEoui/a4/Vmyfw5s1vFx6ztT/FBkqFkviJWDp1xUjn", + "Dq9Jn9Wa1ndwSFznS6xkmr53cVfnFBmjCzijB9cDvxeyIahcVmMyYO3mFERzmbB4TDvlz5wXvqMWTolV", + "If9Y/GF4w/378cG/f39M/ijcgwhA/H3mfsd71P37Scdw0tRnWBZa8jhdwb2QF9G7ER/WDMHhcpi6cHyx", + "Cjqy6CfDQKE28syj+9Jh71Iyh8/c/ZJDAean6RBTRbzpFt0xMENO0GlfVmIIfl7Zdp6KCN7OwccsWUNa", + "KHpcBw/rZ+8eIV6t0O88UQXL0kE/fKYMS+I2pNe8TPDlwT5kM0fFeuLKecWi0c1r6kouz9ZColmTCFfJ", + "SsA1fmfCsYCKs39WELX1RUncEs7+KoSjdhTstH3RDdzuGjy6SsPf67sIvVVtm8Foq8v1WXADekSk+kzt", + "me8Qz9hh/ltyFRxFefGJiW1LFzq8k7K23vO2N4F2bmDPPp3Htf+C5Nph2s18NmSnmZrMpfgT0roDOgkT", + "pTu8d5uhAf5P4KkY1TYjC5EDdcPqevZdBDLcttBHKte2JfhFh655VxHhaT6x30bvaTSI9rvfbKDS5cXd", + "JvRdVOPAk2YiTQ8zwwMbhYVjLx8f7ka5PaG2rkUj8yx9zuNE0SM7fn3OHcyd5NqCXs5oqtGRuS8amKLt", + "bwTmaUH8x36DVCjNYGcnUS5DeJfZYn8lyNp71C2VfMW7n5128K2vvuQhxcXXu7GNVSmUSAxT8UvKMY4Q", + "v7Mc0H2twMZhmK8uhcQCnyodQ5hDxlZJY/ibN7/lWTfyK2cLZluKVwoInWtX59ENZJvKWypy3bxDLRKH", + "mpM5eTCuz6zfjZxdMMVmBeAbD+0bM6pQQIeYiPCJWR5wvVT4+qMBry8rnkvI9VJZxCpBwv0cVc8QCTsD", + "fQnAyQN87+HX5C4GDCt2AffSAsYpa6MnD78eb+ucjRjHJvHbmHyOXN4nMqQpG6Oq7RiGrbpR05kJcwnw", + "J/TLky3ny3465HThm04E7T5dK8qpQUgKptUOmOy3uL8YytHCC7feGVBaig1hOj0/aGo4Vk82uWGIFgyS", + "idWK6ZWLFFViZSisbkNuJ/XDYX893wbNw+UfYgh2mbjjf4TrFl31ZDhiVP0L9LfHaB0Taiu2FqzOv/Ad", + "asmJr0yNfeFCOziLGzOXWTrqq5iOMSelZFyj1ajS88k/zPVd0swwxGkfuJPZV48T/dWaLYj4foB/cLxL", + "UCAv0qiXPWTvtRz3LbnLBZ+sDEfJ79UlHaJT2Rsrno7v7Qs77hn62tq1GXfSS4BVgwBpxM2vRYp8y4DX", + "JM6wnr0odO+VfXBarWSaYGhlduiX18+dJrISMtXpomYATiuRoCWDC8wvTW+SGfOaeyGLQbtwHeg/bnSb", + "V0sj1c2f7uRlIfIqJ+5poayS0fR//bmuj4/ObZu327JeCpmw0zqL4wcOS93PXtj2odtwQHzWg7nBaMNR", + "uljpSfew+Rzhm48R79UGye55w1T68A8izT0edf379xHo+/fHTlX+41HzsWXv9+8PD5lN2wvNrwnUXE3W", + "tKtXmm9TW/2tSFjvfBfPEDfmSpUkLKxJWWZE6syNMSbNVokfXu84TL7i3mHI6QPkUYOP27j5yPwVN7PO", + "gOnnD83usUnyycPzKIeCkm/FeigRtcSWp6dPAEU9KBloFcSVdLrjJiMldob5RGRrRp1BIcxNNW6ANThq", + "5TPaBYOa8Za9qFiR/1p7oVuSSVKeLZNB5TPz4e/2GhC9EFkwsiXlHIrk1/a2/Lu/VSfu/f8jeoZdMZ5+", + "1G7EbGFvQVqD1QTCT+nHN7hiujATxChqFuQKJU6KhcgJzlN3LqlZY7ejeaqTbCLHH4ddVdpFJWPxBNdQ", + "ZM4KDKNN+8PxzYmkuoerYtt/3+LKjINd+JU1S9jRQRLKVii2FV2VBeAhvABJF/ip4ND6HCu24chRWxKi", + "SvMI38TiL4LoSnIi5vNoGcA1k1BsxqSkStlBHphlwRrnHj15+ODBg2FORsTXgLVbvPqFv6wX9/AIX7FP", + "XOcv2zBhL/CvAv37mur22fwucbn2q/+sQOkUi8UHNiEbPcRGrtvWq6FN8JT8gPXJDKE3WgSgUdRXWG7W", + "BK3KQtB8jEWhz747fk7srPYbCYg6bP26QAtg84gknTzDa6T6+ms9tauGj7O9dI5ZtdKT0JQ1VUnRvFH3", + "kmWt6Ce0DcbYmZJn1iwbAnvsJARLi8sV5FEPWGsGQOIw/9GaZku0d05HW03KPd2Ahrcw9hywdhdFea+h", + "YRZycLMM18XYNjEeE6GXIC+ZAqw7ARfQLNgYqp06g7wv4Nhcraw4t4Qz3UN7De2x9t0FD5xVfX18RRKy", + "1j5c2/dXV/LAJuf7Nns+xa/SeTutztGtuAfbMmPtm25Myc/O2ZFRLjjLsNlESgXHUozD3KoD+nKk/Z1q", + "5M5y4hgm+1WHBHWHxd4O1p5lOsR1gxqip2a/LeHYPzWsXRPABWjleCDkY98+3jnoGFfgGqAZ+oo5qpCJ", + "0K9kWkwIITlgSPp4hNXUemyt35tnL5xtHmvGnDOONjeHVHcTtA62QjH0s3PCNFkIUG61zbww9Zv5Znq2", + "5gjC2+lzsWDZKVvgGDYU0SDFRgF3hzr2McEuBte8+9S863oXhJ8bIXV2Ur/ut0kWosL+p3qu96I/Ffvl", + "A2ki5Ibx49G2EOPWUH+Uy4YM4QIj/6BEed4hm9C+vjnKd+bKaukN3yA2czdZNpjxBBjPGfcO33QdrCwp", + "S3Bj8DT3fKcySbW9dAzieGdAi550GEyqtxED1x2q3YnBoATX6Ofo38a6834PWwkv1LcLyjfEHwpD3ZFS", + "8pQWIRg+0UcftTOnjNlg4VZn/RRbMWx94lNzG+jamQgaPsduKPvKqb5qo7MqX4Ce0DxP1Z37Fp8SfOoT", + "CmENWRWagIU802a59i61uYkywVW12jKXf+Ga0+VMUaVgNSsSobfPwkPIww5jIarZBv9NdcDq3xkX9L53", + "9rePcM/361HQzWZPac+GpieKLSbDMYEy5froqKe+GqHX3x+U0n3i9yeR193icvEepfjbd0ZwxGW6OzH+", + "VrSEKtoYTy/wua8HFiq5NrkSirJOnzeMyMDNS2xZC3j/YhLwC1r0VFyIvTZWvlpPRl/dhay3rAjVrnqd", + "pqTmCUNMGP31v2wEdssz1HVv9sVY2xDrm3SeOHxsRXq/p/Gnhl/RRr3VDKXXn3g1l19NBPv6/Fwrhq69", + "lBaFyAZzBjfMsfmov1SvWK1c5ftEVN7FSuTxWYijuQDSjM0GLCdSK/Bim3yGV6vkE3mZHq1hHwlEM7Rq", + "GaLRLWFsEzM9eB4YO3U8UWSydZgl37MCm0P9x+nLF6P+jYx2oLulrnR20oTdtzEhU61NHgvRwMcWHiB4", + "kbZ/qx6TOtaGSp8G1504+eB7ayAcApKtk7TP28+HDt4hgIWwXaFSfTO61WlG9XZ45EfUUG+v5SgxdaSo", + "ot1tKXH3sUbP+hUSGpEOakza0JGGNHdK9RFyNwVvgbWCxtWjs82VOn2ZOgz02RDlsIOP9+PRSb6X+pTq", + "RTWyo6QY7HO2WOpvC5Gd/wg0B2n7iaSuk7abyArMNVQtWYn3n1IoVvcDLsxgrpD3EoebDk3NOVuCqwrj", + "iwR0xvIB1BeQaewPXYeBSoDhcQ5leokGAu9QxFc+QiiIBMih1MutypIN7i71sm4bCi7zjCkyA+e6uAA+", + "JmwK03ayWl4XhSIF0Lk3wkoh9IC+uiFtCdEYA52ir06P5u1qYKfmW1TS0LbSnQ5vwnIccgJsouUlVXXl", + "qFYZhcHp2vM5ZFjwfmv5vf9cAo/qsY296Q5hmUfV+FhIF8SWDQe1aNewbiuEtxXUqCfVTULaVxDjHDZ3", + "FGnQULIjcMiwvUoFeESO9eP6pgJ9rg0XGMlUoCdEkI+DdwX46x5LV2kCEFWnvCIYnsaNeKorVl4NGq/R", + "XAEM8+n0Wk3763J4qJj2Vffrdlfvvyk/w2b2ygWV0lBuPrYnkZNuO+ZLV64eCy0Gb6EvXA/K/+YLtNpZ", + "CnbuOtQgwqxv9pLK3L9xkDJ5Vm6yNNDzMDOrE6O6UT77xuXYDMWsEEYBmvQlhjYzlUII7x1lY63romUI", + "9RykhDz4BAuhYKKFT7Pao/inS5/cgj0bZX4lvLUi+vdIGbYr6u2h8LpuJIHtICn2TKAu+DzGCpGwogZ6", + "GTV3SJtBd+3QU/vc1xTx7f22m1f78B7Oxe4O2T71jqkO5uPTNSdOOdibezUKkVzBMss4BznxTtx2awfe", + "LJOJdZXzKrOqSnw2g/V6cNmxLdwsadTMuqtsXaGiqhznsDmyZh/fddzveAy01SEt6FFB6RZRHNRWrVJw", + "Lw4C3sct31kKUUx6PIMn3X4U7cNwzrJzwMKsITPFaMF3msfGTELuokMqxIxcLje+20JZAof83pSQY26z", + "A334SLMDaWtyfkdvm3+Ns+aV7TDjLNDTNzydZoWdXuQ1uZ8fZgvP6+NNCgy/vOb8dpArzK7XvC9G7hJb", + "wjT7BE+Hmje68R0tFSoiPwtFSoE6tY7gp8gSEvcogtVZojJCGB9AiXMgE1WIVBT+VSrImKHSmIonQ4A0", + "8AHX1RoKN3gSAS7IbkdVVvfY1x0VcyKhjs24agFWV9PUMnHVZxppzxxmaXLGuZAQz4hxprZQc8hswzrH", + "+J8Z05LKzVXKpDZRlTJD9WJ5Z7RkCJSsF1IHS3ZxWBTicoJsbRK6K6XMAeY91RTbvk9p/Z056jOIwi6p", + "cirihixpTjIhJWTxF+kUbwvVSkiYFAKjMFOBHXNtLgkrzOvkpBALIspM5GAboaUpqG+uinOKuhdEoWxJ", + "FFjawZIB9puIjgdOaaSvdc9OUF/b2WjDb/6Z+caWr6jL39lFT2yIQE9+AShX7s5hyL7chRcJx1Zkahtl", + "0yrynK2RbkCmjvycaFnBmLg32l343cGnEsiKKWVBCbR0yYoCq0ewdRTQEOKB0qjt0Z1PMA76gmHAW7OS", + "iFWpSyMdQ/mVmAecxhXZiF5KUS2WUX+AAKe/usvKXezjUX5RFcYkYoqomeIxWQml3bXYjlQvuQ4BvZsJ", + "rqUoiqYhz+r5C+f0/Zmuj7NMPxfifEaz83t4CedCh5XmY19SoR27W88kWzUYh90U9JpPkDzU7jLr9j2M", + "anX0PJh3trhfx/Gwy5Ifgfl2N3Pd7dc47i6sva4mn03fhY45oVqsWJY+bp9X9GtvzGqKeyUrLdouxLYK", + "Db6GfCCWYyGcCblnF83AabKN6jFxPMKFdSAnMv9FNb49LpmD40E9MrTLd5yCNcl61cAWAAipLYSgK2lb", + "F8dKWmA4YmELp2BQShvQgQIHY/+uB5sZ4eBAabgWUJ1o5ADgXWvBGNuKmDayeSbW/vm9umTmlYB/v53K", + "G8yjL6jytCYtacMqfSGrHo6QbkCwNQLxDItgzIbGIYZW9AOFfwRAf2RiA4ZB8Yn7gjGnrIB8kupSfBJs", + "YOPouu5yLKPRfT9Hy8kzWvlOwGbsSoIrrGS1f9l0J5bUkJIIr3ct4jyHNdgcrT9BCtvHdxy5s6CwbX5b", + "FgVRTgq4gEbApqv2VKEWyi7Af6vCxyQHKNHj2za0pSIR4y6BLeuLW/skimUbgt2kOcYi1u4U2WFrSVqG", + "1nxij4kaepQMRBcsr2gDf2pflaNpSzRHOYGqzvVh4q+YQ6f5xY7w2g9w7L9PqTIeE2+H8aG9WVAaddsY", + "0M7I5Er1nXqeDkyOS5kFRxHOlge/tiXxmm+okl7yfqtml+Trm9jAfWKCR4j9bg0ZajXuKgS5uwz1eE5c", + "DSSkdg6Q2wuD+SRhzV8CJ1xEPY8vqQq3mLqqq//BTowvMe4u2lfw0dfxw9ffWYKDEdUqtphuURrI+no2", + "/o9yErcexN7xUjSiwKXybjGNeep21w58QVRFTrjZT6P7Y49gJ8UcFx+TWeUHKgpxaZsYx1fUZ+D9uZb6", + "vIvJqeUsiGUfJz12BYfbVhAWZYis6IYIif+YC+k/K1qw+Qb5jAXff0bUkhoScg5kG0Xh4q7NxNvVq7EH", + "zBtihJ/KrpsNHTMabmNGiYA2gty3bRNkRc8h3gYMELH8M9OGcapqhkYNI7Jb29nFglu8L8+0onlsBMBC", + "s5sGd/AFz83X/1+dthpP5es/lgXNfMtq13yuyWewq70nLr2E1fY05y5f8yQQOuXXRCt9mYz8CtbUPVlX", + "KuenrzlWA+xOC/BOX7BrLWOgUbjV42hLgvigpRx6Fw6Tw9lZUtzqd9fi4s7HH2Z3khWi+5YxBPxPaFca", + "4RWdzLZ0B/V4PbZZ+gfYhUYhngSs1gw+E+uJhLnaFUhj7eAzsa4BVsF2y3gmgSobd3Ty0l1b6wLIjJtr", + "tI3aDW7VMEoOc8ZrVst4WenELQjrIPNNhLDYm4Bo7fHN9ekYRhW9oMXLC5CS5X0bZ06PbQ0cN+nxHhT3", + "bcIAEiRydwCm6hsg5lPX9vn4NSP+bYNBGzurNOU5lXn8OuMkA2m0BnJJN+rqrqrgddjlrKKRLtSsFhK5", + "rZC0LSDFxnmbr+lICgDSA3qUBniCMEg74QWyhiEtehw/XRg+C0/Qiq4nhVhg1m/PgXB1rtF1aC+QgqMR", + "3Wp3w9bt51HsT9g+DbYicYxIC5x1yBTbz/1L3Eq8hP7Cmd568q2Fs52GbSOd7cH0SOWLOj3DEkv3PKYy", + "511hpjh73quqvkyJpz2INjEZEt2xqvfsIsZXuLILsQl9eLPKZghHKj/f2hUmaG9QWxIwQNV5BTRzEWJd", + "Q1zHUGGRMnbVDfa001nrvpdLPeChIUW5s96cNgTomHH26fC5vZ7BpBTlJBsS22q7FeXOyeAgbcLYQx+R", + "C6Fn3SHuRoX+XY2aaI1GXvs2Oe1tJLbLV1Zm20wGfUamHo7edGCIOfIyPMLWtIa5VsEUM/aXc+/sbhrR", + "ApMglEjIKolG5ku62d34saf6/OmPx18+fPT7oy+/IuYFkrMFqLqnQatxYh2ayHjbavRhgxE7y9PpTfDV", + "QizivPfSp72FTXFnzXJbVRcj7rSN3Mc6nRAAqeTcbou8K+0VjlOnRXxa25Va5MF3LIWCm98zKYoi3VMm", + "6FUJ90tqtyIHjLmBlCAVU9owwqb/lOk6KFst0biIVcMvbG0owTPw1mdHBUz3xHKlFtIX04v8DGsxOJ8T", + "gXVZOF5l/UTb1uXuada+h0ojhtvMgJSidKo9m5MURJizJSsIdnVnNkV7ehSmG5itDdhNEaILfk+T3jF3", + "N2ExJ9u5fbMVt05zerOJCfXCH8orkGafd6O/zshVOEntGPhk+EeicMrBuEZY7k3wiuT9YEtW+HEnaiIU", + "DRkEWrdARoI8EICefOhG0mqUZBfVJpfWx4DeCO9+bqsfP9du6Z2ZKQiJ/2AHeHEuc/1eSKZw4Hzkwt4/", + "B6RES3nbRwmN5e9Kj/asNwiSaIuc0URrUJYtia5aGCXEq6chz7znVtJJR5dCaGJupkWRSGO3dhw8UzHh", + "mCuBvKDFh+ca3zOp9DHiA/LX/YlbcdpyjGSLSnXwgpzP6SCwohTlDwIVf4W59f8JZmeT0tHN4hz/HRmI", + "JiFa2GjvefCAAyeXOKYN7Hr4FZm5dj+lhIypdkDBpVdpQr4tSDZ38bWw1u3c32u3CfpV6Gsch7mPByIv", + "IidbiBxwMNdH/SMzpx4OkDwtKVLtEEoCfyleFzdV3yF2rtka5mqlnKLCjXuWcuq2ix+6PFwHCq9KQXed", + "g6V+A7cJgV+vbWitssEdZt68+U3PhhQUS3eDMZ9jjbODtIW5flOYD1LgzKLSjeEgSRJWrXLvql7TipeM", + "6jQ0d9Go+z0N5JcW/WY0vBTMK27HCw1QMVfcs3UxH4coBsHNZ0/IG36fqCX1dwv356MvvxqNR8CrlVl8", + "/Xw0Hrmnb1M3tXydzCutC+l0YkRdN4E7ipR0MySZfWfpnCR+60pBH16lUZrN0ne6H82e4cXVJSCccGT1", + "yF6sBHX1c24LAG0lhtZhDSfGkmRdHihsxa5KQb/2lcW3pd97un20uG/Fip1Bco1GLO/Ho4UtUobdSX53", + "veo+7LZ7CHrqBbqlX6cMmEVMYq2NyaOpoqJuAxqyuM8SHTIw8zqrJNObU4N/b3Znv5+nikH9EMozuZpf", + "wQPvdF8tzoH7GLO6mFOlvHb9g6AFap82MIAbnVMUU/Kd7RDixOI3d2b/Bl/843H+4IuH/zb7x4MvH2Tw", + "+MuvHzygXz+mD7/+4iE8+seXjx/Aw/lXX88e5Y8eP5o9fvT4qy+/zr54/HD2+Kuv/+2OoXQDsgXUd/55", + "Mvrfk+NiISbHr04mZwbYGie0ZD+B2Ru0sM2xQCEiNUMRCyvKitET/9P/7wXlNBOrenj/68j1gxwttS7V", + "k6Ojy8vLafzJ0QJroEy0qLLlkZ8Ha1k27iuvTkJekI39wx2tfU64qaG+n3n2+rvTM3L86mRaE8zoyejB", + "9MH0IdZTLIHTko2ejL7An/D0LHHfj7CK9pFyzXiOQuro+3HnWVnaVj3m0SKUATV/LYEWyCLNHyvQkmX+", + "kQSab9z/1SVdLEBOMWPM/nTx6MjfPY7euboy7w1gyWAD25Ul6r3hg5/LalawzGiorloWep1sUo+KG+I7", + "f1ylxmRGC8oz8IkDPMewSFt2xWg5AeEnuUG0/f6kZnaIRh+NMnryW8oq2wFv6onU7EBEQ6GuUs0j0AY/", + "sjwSXeOB4xku9mDy9dt3X/7jfTIYuxuXVQc0bn2aLEWmAPs6/0GL4g9rAYc1hs63gufGfUGP47pcD35Q", + "o22MxubwNPq8fqfZnOQPLjj8EdD4zwrkpsajA2wU480rcLQozIuCQ0Jv6y79aZ0seOnajMdxylEE83+c", + "vnxBhCTOFvaKZuchUdInzdaJwnHOrPmybylO4KVW4jIuV2pRNsvvh9W8xT7KCCge80cPHnje5uwEEa6P", + "3HmMZhrUbMi6M8MoHpwrDNTlgfbR61A8W9LSnuNjn+5gVH7nULYvTQ11Pz7gQpslvq+93PZwnUV/S3Mi", + "XSUGXMrDz3YpJ9yGrhtZZmXu+/Hoy894b0644b20IPimFdp4jrtC6hd+zsUl928afatarajcoDalg1Bo", + "d8mjC4VRHCgrLKeKim/yxejt+16JeRTHaB+9a5Ssy68lT61bt9FOcreI7ZEDOJZNonU/3D0uSwxRPw3P", + "j8vyleH9CgOXgCHnhTVTWt2bkh/irxveWAuJdcY2cpgcjnwBzWZwTtQ6PCnvG/VV/lai/7hpumQ5cM3m", + "zFYQTq2jQXNblzO4VVsi1n/741shHlNNJ68yqk23bw5JaOLhlLWJ6zs7cAx7pA/YZ/l6dUwtEMn66jvl", + "yC1a90drn4IXLSXoenUf6A8jVHzd+CADG8LuBkXOZ66u/kwLQ0LRclu9/E6e3aqxfys1NtRwXli9siwP", + "oNj6JLhdrxy9c0WGD6HvoplikKYbW0Cib6M8pbstjnNvSo7b71yNrbjKzTt1WJuU97fTXm1J6Z16q6Oa", + "w2qsjTzIXS/caq396lWcyrtPZm1DpzK/D/r4r6um3uJxL73ULGK3RnoF5t/RNp2ouTGh8JfUMh3SbvXL", + "v7V+GRo/XEvDjJMcjly1mkjfvJZhtW04ZTrokc1+IRHTw7JUWLfFHuFxndBlWIzNVHE5Kmrsr77oXLe3", + "YrtZ487FuKsg/gDxDfzbzcmzIbrh52YVvFFnWP1lUpykN/mmmXLStfT6w7iWhjG5xw8efzgI4l14ITT5", + "3oeOf/kh9+CQvDFNVvvywm2s7Wgm1rvYG2/xt1AR1Rz+BrMLNbHH0XPztg3+uYtFImZUwVeP/f3l3pR8", + "616ty065cMmFMBzPJxdTubAfGaZpkEHu+D+f4Ph3puR7TJnXaowRy5iLhy8yrp88fPTFY/eKpJc2ILj9", + "3uyrx0+Ov/nGvVZKxjWGi9hrT+d1peWTJRSFcB84YdMd1zx48r//67+n0+mdnfxZrL/dvDB89S/IpMep", + "Wr2Bkvq2/TPf7dTlm9sN7t+CDxnr8a1YJ8WJWN+Ks48mzgz2/xJibNYkI3c1DsbjRje8A4o1e0z2EWxj", + "J8gwgTBIpSl5IVx/1Kqg0tYmw+LviiwqKinXAPnUUypmfytbgjUrGJatkUSBvAA5USz0X6gkhAJapYQL", + "zNiqy5M3INgtMTBh468vLX6m6yigfhYUBy0c7tAcuqJrgv2yNFGgx7aI6Jp88w15MK4vZkVhBpgEDKe4", + "9IquRwmmvCtdI/XrYQ2mgb6HVsF75vAo5O6YdRx7iBmt1txCMeb6mvR3Fxaf7a3DHgy3sQdi1nv77mrf", + "XGxMce1Dt5pRrC6psXWAqsqy2NRF441i6bW2NFc1Mwy1kHwunqcbtYygsyB1G2/v1S1HuLWGXIsvtQlq", + "Tx6EyZfq6B0aKGIG1GECmJi4kwE4x5ZVR3rOvnQ56Yc7+KEewpZnvZWeQgexuC4GuYvpFFirDSu0brDk", + "o8SSqmyOpaHu+Rb0rpsCltypI/LTypMdfmImTSlRUUecW894v6KHtNjtnxBvYE5tCZ4hnUuj+gro8wWZ", + "OIov8T+0iEkgNAzz9YyRmAI9uJ701gRiE2JdQpEvDFLSRmf+3VA+rSfv6qiIlkO4zG8RvB+COyz+O1fv", + "yPIUt4i/QpKOv9BPyAtRF5ex/P4v6ZK+Sf3kphf0QnCwsRfmMmBp8dbNHpSnWuj7WmT2Sld37ryqInXk", + "6z1s1aZ+tJUIPlON6gZE+o/JKhkNqWMQO91ZMKkebQiz9mU4aEMFnH7Mu9lH4a+f4IXtY3CwD8NybL0e", + "x3ecmsAPy4Sw3J8l5qNQLKePIz03L0d62itXMuVvyp22EUwaVQnCCaWIaKL04vRveJyfurZq2hemsuUm", + "FeMZECVWgLcKo8a7rhUWwn98OAg1W0FORIU1M6OM9I/McL588MWHm/4U5AXLgJzBqhSSSlZsyC88tE+7", + "DgNUhLo9j23o3cNBGEe3YLMsaRbXPrwGXxSLLW5QZ+2vCyu78lSi0iBtSd1Wl0zW4dspKzoyjOdm6luV", + "D7/22zC0NcRTWhSIv12+Ohx4UMR7UdgNhhXTum40FUtg8h3NlmGzx7XtLTQT9h1Jxq0a1jiy6yxry3Uo", + "MBuvgUSriSwcIGEusEskSPDGxVVVaFYWzW9Ct23sPpiIRLPEGlfAO3nmV2fd6mJeD90maN+/xA0+NXO7", + "RzgzF3ZxVAIy89gAGtskpw2gbR9OH8ofdU90PSBdeWQmW/Wq66insgQq648tw7hbSpi4ISS9AKkont7W", + "ou7dqvOfhjq/dg0SPhFlPunqvS7zv7psakTkv9Nrlr/frbt3io7+ddw0Z62ioSfP4qwpEarueb2iZzEG", + "kXsmav7raEClrJuuwJp0IdXVLbuumGGlWm+9S4MZSudsbbvn9ZX0/dCip84ciw86EW2V4KOKIP2xRNCk", + "JYOaaPl4Eglb4Iyj8J1SCi0yUdiovaoshdShILCaDrqIQZ+Ya9zD+mtRX0OUrVmudhrBz/Ct2ytRbQU/", + "83hLmcGb51dtae+9M6KxnmvIXelMlMTed1ogfFRGd6tjpxhcy2L+uRvMdS/pHdh+nlGdLavy6B3+B6sQ", + "v6/TYbGrkzrSa36EfXyP3m2N2UQeW0BuiBE/bZi8Ol2Bk5GXz/HzuvnU90JG+sgP5rvdrLOJtHFbC7A9", + "iTG4M8FUb0ZtvtU2+1wLrQ2/vkM9MWLnvIZqD1En00C7UUszX8DB9jFOkPBtAMintaDa3zJnPCc02sbW", + "pVrImhHcsM/lphf9MVw4Hz7q5cvP+Jy9EJqcrMoCVsA15NeLgCZtDuelx1Zxu59i4ER/N0y6K/Njie8z", + "RYIuslPA/4Usd7cy/pOS8U+DWyom0FuJ/flIbOkP4a1w/vSF8xef7WpuMPpjoLC+ghetKaDrO/qeorqj", + "JjjrVsuksM0Bh5fy9irV90L6Vpy38v0vl49k93hwLMsQq84u662b8hDJPp8U9MNsE0WRsE70HeFxCJdh", + "WD5RZAxbLp3kauzicqxBw53vW5Xok1aJor2+1YhuzRWfmbmiR/9xloKiGKKC7KsaXaxEDt47K+ZzV8m4", + "Ty9q9tQ05Kk0XZXEfjntjW09Yys4NW++tFMcVMTWYLfcki3wDLIUZILn6qrdY91UVxVO6LHqh+qDu0jD", + "tnhYXAmg6ZXp+HVU2bBDHqS9IwobpPpazg4ZOVwQQ5XTA9Dy0Tv7L9rlSqESqzn1VN3ZmLtuW2xxajtu", + "A0DyCjVTW+XafyXm5IGtUV1xTDheMtdHHWMEtdwY7dUXwJNAC5I1Eg0DHN3jdNp7nLbeHM5Sq+tZU/pa", + "Iepje+17xZXKPrXSwX/64EflKeXucHRRqQWhhMOCanYBPspgeltV6crC0NU02sIqx4TmuT239SbABcgN", + "UdVMGVWJN9NG7qjmydqDtcC6BMmMhKdF7fO3t4wjWzJpWyzTqX3jmjKvxbVsoSbZbLbuBbMr4yTm5GeW", + "SXFcLESIRlYbpWHV6UjuPv29pzGBt1DsZTEQvGAcJivBUy20X+LTn/HhYJaBZar6RjwzD/casCXem0ho", + "LaA5+RAV4Lqb9ImwkGsF6LRWK6EU0tywZ7awjj1Ee55Hf/I2POsexw3PImecexgNFPfYbvx85OPFGx23", + "k2++a/zp6rO5N9Wy0rm4jGZBO4SNyxxSTQkvALcptr1EHOEndebC00SX5Pphf6Pkv2nSrXMpxSmVLmXt", + "AqRqXTJvM2//Upm3g/d9Ly5thqzULk5XqcMqRi9EDnbcOtvSHP1UvxQuciDKA9HSh0KYZ7pLk5dr9XsW", + "b0yRGWB9TVotlppUJdGiG/c4jiaY0Myy5om9j6UnjMr42lsbTrekF0BoIYHm5g4NnIiZWXQtYXGRVGFF", + "Zp+85oJZh6tdEbClFBkoBfnEN43ZBa9/z6bL6S3Iw9XgKsIsRAkyp/JmVnB+sRP4c9hM8PauyN2fflX3", + "PpVFWF10+xbYmq6JjWgn5XaXcg2YthFxG6KYlG0OsD0JmB0nVmUBLj8ugezrY693+9tgdojghhB4AZLN", + "2Q0fLT/JDRBlgP+GD9aNLKEqJ0bP6ML91D49YyvUGDnlwhtsd8wQJiio0pNdIsW8FC9amaVGXDwlRXDg", + "njv7c6o06uOE8RyrFlpRiPPYm4OZYt9bPU5plAN7lUpM+qt9mJo2M2Keq0oRN4LPXYM8tTwO6y1zvYB1", + "mAtLgPixQ3KctbTuGrkPgdH4Do9Ryx5CdWjQCMQMl1gc2oGpM//sheUGfDWOtsF46t+KEB+HX/TAyFS9", + "B5bcsBdATG+h9Ox4pLQoS8Oh9KTi4bs+DJ7at4/1L/W7XZK0xR2sppILUHFOo4P80iJdoQ19SRVxcJAV", + "PXdpjwvXcbcLsznWEywkNNl2XtCqbt6KD86VjntVLiTNYZJDQRN2ql/sY2If70kYfmwkEE/okwuhYTLD", + "GiFpGqnPhLyKKS/MKnAqlVK8CT4hmTnncyEjUnNfX33SHHDaFN90xHonzIJgJOnAj4fIsvTUY0Q0Yxiy", + "ckSHq3FS6Zpr6cFemPVGEIjjTmoLUHv2/wLl5g4K2EHn34DqW3g99aGW3bbpxrK9ITBboqwlbZIiopcv", + "72CMfTwoZUX+LN1G7SC6G8z7bFrRozv89Cr2iaNLyvRkLqS9t0zoXIPcmc3xn5T5uAznZNLC1SAiOILT", + "Edw4KLXipn+OY1kQiJN/hkRcrScjlCl5SFaMV9o+EZUe26LWEmi2NHek2LxuR8LW0K6MkoQFlXmBvYHn", + "QREQ0pZl0i1lBoFOpMg2jTZm3d8L+ZkX/H97a3G6tTjdWpxuLU63Fqdbi9OtxenW4nRrcbq1ON1anG4t", + "TrcWp1uL09/V4vSxKrNNvIbma59ywSftYOrbWOq/VKH/IHu9AQytT5eUIQuMCqP026X2MPRpoAXigBXQ", + "nwdig87Pvjt+TpSoZAYkMxAyTsqCmksXrHVoeD6jCr567DOVrS5AV2S2MWzFKAzmhS8ekdMfj33t3qXr", + "JNR89+6xDTUlSm8KuOea2QHPrULuu9oBN0h3Te2oFz++MbprE88KzKFR5Dt8+xlcQCFKkLagKra07Fr0", + "zoAWTx1udhj0/tNM7kLt/zCj/TFuGDUd2la09Nciv1aqCLUJ2+RZlML9x5wWCv7oy+K2461oub0b5lvL", + "fUHpb0W+aZ0Qs2tHuIHNsxEa+80Yp3KTKEzXTZZqk4YWhl05wuoaMd8fNMltmex/1SWzXRSWupnYRgTp", + "0fuoPDVOvWGdoWye/7xFJ6NUinosSpe2DZoDcFAtUkyosntCXtvvPm7lUYTIHbGamX8ygcbNNwPTwHfN", + "rcixns81l8gjPnl68eyPDWHnVQaEaUUcxQ0QL0YjNCMtgE8cA5rMRL6ZNNjXqCGFcqaoUrCa7ZZEMf/E", + "ExeEj3myXU59HDHyLFrcNp4cE8164hhwD3feaBjMmwO2cETHniOM3zSL7mOjMQjE8aeUba3F+/ZlevU0", + "m1vGd8v4otPY0ggYd0182kxkeoOMT25kxft53ndryCoDXHyS76LfA72qsNYNJ3oOs2qxMLeFrpsVGxnh", + "eEzwj8QK7XKHcsH9KMgO/tqnwVy3xkV7uC53icpO3PXFYO/hdlC+QY/QqqR8Y3YD80gmiq2qwuLQtgI/", + "LKO1fQtSVe1r62SfBf+VN0pGxmgnapu/W7SQS6qI3V/IScVzl6zYKae/5sPLJNmhz9a8ZtNbSyLZ9SZW", + "5+YdIiL8LjeLUihSgpzoNbcHqnGY0DtGiT25H7V8/63Y+HBiw5a0gB4G2+0IUjOEA0kPGfE1FB9R16s6", + "p7bRC4s2M4Ebz9Ci0Z+FFrfwsW8eNDaoM3wzRKg2tzh/MxQloSQrGHqjBVdaVpl+wyk6pKKFTbvhQ96G", + "3c/7nvpX0u7ShDfTDfWGUwwiC26qJA+cQ8Jd8j2AZ7GqWixAGT4aE9Ac4A13bzFOKm5uYWJOViyTYmKz", + "4s35MrrL1L65ohsyx4JIgvwJUpCZkfrRrltbstKsKFy8kpmGiPkbTjUpgCpNfmaGA5vhfOGVEFII+lLI", + "84CF6XC3/gI4KKYmaWvND/Yp9hR3OPFWQbRw2sd1f532NajuqPB/7v77k9+OJ/9NJ38+mHz9r0dv3z1+", + "f+9+58dH77/55v82f/ri/Tf3/v1fUtvnYWd5L+QnzzAwEavCF0zFbTHbsH8KcQMrxidJojxbAnFxhW1a", + "JHex5KQjuHtN95RewhtupKUWBCUE1Qckn7YbqXOg7RFrUVlj41reJo+AQXfIg7AqkuBUt76bv1CqeEQH", + "3nOKG2/7grT2fk8/TUNuA3Z47ZPq9qnrgtnzkruFNCxtrXpa7o2zBshbnSCff2nbw19IPRoPdiXtDthl", + "V83mn4g3v+FjQgvBF7a2q7miCtwnxstKY5bATVoB4YIWE3EBUrIc1MCVMsG/u6DFy/DZ+/EI1pBNtKQZ", + "TKxZYijWzsw3lk7NOIwzzWgxwav5UIDgxH51aj/aIb/PQogaW60gZ1RDsSGlhAxyW/eQKVIbBaa2EAvJ", + "lpQvUNRLUS2W9jU7ziVICH1SzT28PcS+uoBe84mtmdkF/9i14o4LjgPNloleWCj7LmkABfJGm72B29Oo", + "iNxnBBiPehV5g++LOgzR4q3Jga6qdTT0hwhpNTSHqCt9e0huD8nf7ZCkKsQiPuctk4pFYryNN2x7u+ki", + "yR/QlPdRKqjfNij5qzco8WxJEUokbdxx0j0zqSJMk0ssrzYDYuRdhS4E14jUGQkw3TM66q5wsHJtS7Ml", + "ZdzV5grJKgiHuXKvVkxr38f7Rqyvlpmh2dWgA7JKMr3BWxEt2e/nYP7/1lwrFMgLf2GqZDF6MlpqXT45", + "OipERoulUPoI+4TUz1Tr4dsA/zt/1ykluzD3t/cItpBswbiR0Zd0sQBZ2zlHj6YPRu//XwAAAP//d/EP", + "a0PJAQA=", } // GetSwagger returns the content of the embedded swagger specification file @@ -1066,16 +1060,16 @@ var swaggerSpec = []string{ func decodeSpec() ([]byte, error) { zipped, err := base64.StdEncoding.DecodeString(strings.Join(swaggerSpec, "")) if err != nil { - return nil, fmt.Errorf("error base64 decoding spec: %s", err) + return nil, fmt.Errorf("error base64 decoding spec: %w", err) } zr, err := gzip.NewReader(bytes.NewReader(zipped)) if err != nil { - return nil, fmt.Errorf("error decompressing spec: %s", err) + return nil, fmt.Errorf("error decompressing spec: %w", err) } var buf bytes.Buffer _, err = buf.ReadFrom(zr) if err != nil { - return nil, fmt.Errorf("error decompressing spec: %s", err) + return nil, fmt.Errorf("error decompressing spec: %w", err) } return buf.Bytes(), nil @@ -1093,7 +1087,7 @@ func decodeSpecCached() func() ([]byte, error) { // Constructs a synthetic filesystem for resolving external references when loading openapi specifications. func PathToRawSpec(pathToFile string) map[string]func() ([]byte, error) { - var res = make(map[string]func() ([]byte, error)) + res := make(map[string]func() ([]byte, error)) if len(pathToFile) > 0 { res[pathToFile] = rawSpec } @@ -1107,12 +1101,12 @@ func PathToRawSpec(pathToFile string) map[string]func() ([]byte, error) { // Externally referenced files must be embedded in the corresponding golang packages. // Urls can be supported but this task was out of the scope. func GetSwagger() (swagger *openapi3.T, err error) { - var resolvePath = PathToRawSpec("") + resolvePath := PathToRawSpec("") loader := openapi3.NewLoader() loader.IsExternalRefsAllowed = true loader.ReadFromURIFunc = func(loader *openapi3.Loader, url *url.URL) ([]byte, error) { - var pathToFile = url.String() + pathToFile := url.String() pathToFile = path.Clean(pathToFile) getSpec, ok := resolvePath[pathToFile] if !ok { diff --git a/daemon/algod/api/server/v2/generated/nonparticipating/public/public_routes.yml b/daemon/algod/api/server/v2/generated/nonparticipating/public/routes.yml similarity index 83% rename from daemon/algod/api/server/v2/generated/nonparticipating/public/public_routes.yml rename to daemon/algod/api/server/v2/generated/nonparticipating/public/routes.yml index 8566bf764e..84a30800d3 100644 --- a/daemon/algod/api/server/v2/generated/nonparticipating/public/public_routes.yml +++ b/daemon/algod/api/server/v2/generated/nonparticipating/public/routes.yml @@ -12,9 +12,9 @@ output-options: - participating - data - experimental - type-mappings: - integer: uint64 skip-prune: true + user-templates: + echo/echo-register.tmpl: ./templates/echo/echo-register.tmpl additional-imports: - alias: "." package: "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model" diff --git a/daemon/algod/api/server/v2/generated/participating/private/routes.go b/daemon/algod/api/server/v2/generated/participating/private/routes.go index fdae2f21a9..db801f0c31 100644 --- a/daemon/algod/api/server/v2/generated/participating/private/routes.go +++ b/daemon/algod/api/server/v2/generated/participating/private/routes.go @@ -1,6 +1,6 @@ // Package private provides primitives to interact with the openapi HTTP API. // -// Code generated by github.com/algorand/oapi-codegen DO NOT EDIT. +// Code generated by github.com/oapi-codegen/oapi-codegen/v2 version v2.4.1 DO NOT EDIT. package private import ( @@ -14,9 +14,10 @@ import ( "strings" . "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model" - "github.com/algorand/oapi-codegen/pkg/runtime" + "github.com/algorand/go-algorand/data/basics" "github.com/getkin/kin-openapi/openapi3" "github.com/labstack/echo/v4" + "github.com/oapi-codegen/runtime" ) // ServerInterface represents all server handlers. @@ -38,7 +39,7 @@ type ServerInterface interface { AddParticipationKey(ctx echo.Context) error // Generate and install participation keys to the node. // (POST /v2/participation/generate/{address}) - GenerateParticipationKeys(ctx echo.Context, address string, params GenerateParticipationKeysParams) error + GenerateParticipationKeys(ctx echo.Context, address basics.Address, params GenerateParticipationKeysParams) error // Delete a given participation key by ID // (DELETE /v2/participation/{participation-id}) DeleteParticipationKeyByID(ctx echo.Context, participationId string) error @@ -59,9 +60,9 @@ type ServerInterfaceWrapper struct { func (w *ServerInterfaceWrapper) GetConfig(ctx echo.Context) error { var err error - ctx.Set(Api_keyScopes, []string{""}) + ctx.Set(Api_keyScopes, []string{}) - // Invoke the callback with all the unmarshalled arguments + // Invoke the callback with all the unmarshaled arguments err = w.Handler.GetConfig(ctx) return err } @@ -70,9 +71,9 @@ func (w *ServerInterfaceWrapper) GetConfig(ctx echo.Context) error { func (w *ServerInterfaceWrapper) GetDebugSettingsProf(ctx echo.Context) error { var err error - ctx.Set(Api_keyScopes, []string{""}) + ctx.Set(Api_keyScopes, []string{}) - // Invoke the callback with all the unmarshalled arguments + // Invoke the callback with all the unmarshaled arguments err = w.Handler.GetDebugSettingsProf(ctx) return err } @@ -81,9 +82,9 @@ func (w *ServerInterfaceWrapper) GetDebugSettingsProf(ctx echo.Context) error { func (w *ServerInterfaceWrapper) PutDebugSettingsProf(ctx echo.Context) error { var err error - ctx.Set(Api_keyScopes, []string{""}) + ctx.Set(Api_keyScopes, []string{}) - // Invoke the callback with all the unmarshalled arguments + // Invoke the callback with all the unmarshaled arguments err = w.Handler.PutDebugSettingsProf(ctx) return err } @@ -92,9 +93,9 @@ func (w *ServerInterfaceWrapper) PutDebugSettingsProf(ctx echo.Context) error { func (w *ServerInterfaceWrapper) GetParticipationKeys(ctx echo.Context) error { var err error - ctx.Set(Api_keyScopes, []string{""}) + ctx.Set(Api_keyScopes, []string{}) - // Invoke the callback with all the unmarshalled arguments + // Invoke the callback with all the unmarshaled arguments err = w.Handler.GetParticipationKeys(ctx) return err } @@ -103,9 +104,9 @@ func (w *ServerInterfaceWrapper) GetParticipationKeys(ctx echo.Context) error { func (w *ServerInterfaceWrapper) AddParticipationKey(ctx echo.Context) error { var err error - ctx.Set(Api_keyScopes, []string{""}) + ctx.Set(Api_keyScopes, []string{}) - // Invoke the callback with all the unmarshalled arguments + // Invoke the callback with all the unmarshaled arguments err = w.Handler.AddParticipationKey(ctx) return err } @@ -114,14 +115,14 @@ func (w *ServerInterfaceWrapper) AddParticipationKey(ctx echo.Context) error { func (w *ServerInterfaceWrapper) GenerateParticipationKeys(ctx echo.Context) error { var err error // ------------- Path parameter "address" ------------- - var address string + var address basics.Address - err = runtime.BindStyledParameterWithLocation("simple", false, "address", runtime.ParamLocationPath, ctx.Param("address"), &address) + err = runtime.BindStyledParameterWithOptions("simple", "address", ctx.Param("address"), &address, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) if err != nil { return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter address: %s", err)) } - ctx.Set(Api_keyScopes, []string{""}) + ctx.Set(Api_keyScopes, []string{}) // Parameter object where we will unmarshal all parameters from the context var params GenerateParticipationKeysParams @@ -146,7 +147,7 @@ func (w *ServerInterfaceWrapper) GenerateParticipationKeys(ctx echo.Context) err return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter last: %s", err)) } - // Invoke the callback with all the unmarshalled arguments + // Invoke the callback with all the unmarshaled arguments err = w.Handler.GenerateParticipationKeys(ctx, address, params) return err } @@ -157,14 +158,14 @@ func (w *ServerInterfaceWrapper) DeleteParticipationKeyByID(ctx echo.Context) er // ------------- Path parameter "participation-id" ------------- var participationId string - err = runtime.BindStyledParameterWithLocation("simple", false, "participation-id", runtime.ParamLocationPath, ctx.Param("participation-id"), &participationId) + err = runtime.BindStyledParameterWithOptions("simple", "participation-id", ctx.Param("participation-id"), &participationId, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) if err != nil { return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter participation-id: %s", err)) } - ctx.Set(Api_keyScopes, []string{""}) + ctx.Set(Api_keyScopes, []string{}) - // Invoke the callback with all the unmarshalled arguments + // Invoke the callback with all the unmarshaled arguments err = w.Handler.DeleteParticipationKeyByID(ctx, participationId) return err } @@ -175,14 +176,14 @@ func (w *ServerInterfaceWrapper) GetParticipationKeyByID(ctx echo.Context) error // ------------- Path parameter "participation-id" ------------- var participationId string - err = runtime.BindStyledParameterWithLocation("simple", false, "participation-id", runtime.ParamLocationPath, ctx.Param("participation-id"), &participationId) + err = runtime.BindStyledParameterWithOptions("simple", "participation-id", ctx.Param("participation-id"), &participationId, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) if err != nil { return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter participation-id: %s", err)) } - ctx.Set(Api_keyScopes, []string{""}) + ctx.Set(Api_keyScopes, []string{}) - // Invoke the callback with all the unmarshalled arguments + // Invoke the callback with all the unmarshaled arguments err = w.Handler.GetParticipationKeyByID(ctx, participationId) return err } @@ -193,14 +194,14 @@ func (w *ServerInterfaceWrapper) AppendKeys(ctx echo.Context) error { // ------------- Path parameter "participation-id" ------------- var participationId string - err = runtime.BindStyledParameterWithLocation("simple", false, "participation-id", runtime.ParamLocationPath, ctx.Param("participation-id"), &participationId) + err = runtime.BindStyledParameterWithOptions("simple", "participation-id", ctx.Param("participation-id"), &participationId, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) if err != nil { return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter participation-id: %s", err)) } - ctx.Set(Api_keyScopes, []string{""}) + ctx.Set(Api_keyScopes, []string{}) - // Invoke the callback with all the unmarshalled arguments + // Invoke the callback with all the unmarshaled arguments err = w.Handler.AppendKeys(ctx, participationId) return err } @@ -248,239 +249,235 @@ func RegisterHandlersWithBaseURL(router EchoRouter, si ServerInterface, baseURL // Base64 encoded, gzipped, json marshaled Swagger object var swaggerSpec = []string{ - "H4sIAAAAAAAC/+y9e3PctpIo/lVQs1vlxw4l23GyJ/7Vqf0pdpKjjRO7LCV7dy3fBEP2zOCIAzAAOI/4", - "+rvfQuNBkARnOJJiJ3Xzl60hHo1Go9EvdL+f5GJVCQ5cq8mz95OKSroCDRL/onkuaq4zVpi/ClC5ZJVm", - "gk+e+W9Eacn4YjKdMPNrRfVyMp1wuoKmjek/nUj4tWYSiskzLWuYTlS+hBU1A+tdZVqHkbbZQmRuiDM7", - "xPmLyYc9H2hRSFCqD+UrXu4I43lZF0C0pFzR3HxSZMP0kuglU8R1JowTwYGIOdHLVmMyZ1AW6sQv8tca", - "5C5apZt8eEkfGhAzKUrow/lcrGaMg4cKAlBhQ4gWpIA5NlpSTcwMBlbfUAuigMp8SeZCHgDVAhHDC7xe", - "TZ69nSjgBUjcrRzYGv87lwC/QaapXICevJumFjfXIDPNVomlnTvsS1B1qRXBtrjGBVsDJ6bXCfm+VprM", - "gFBO3nzznHz22WdfmoWsqNZQOCIbXFUze7wm233ybFJQDf5zn9ZouRCS8iIL7d988xznv3ALHNuKKgXp", - "w3JmvpDzF0ML8B0TJMS4hgXuQ4v6TY/EoWh+nsFcSBi5J7bxnW5KPP8n3ZWc6nxZCcZ1Yl8IfiX2c5KH", - "Rd338bAAQKt9ZTAlzaBvH2Vfvnv/ePr40Yd/eXuW/Y/78/PPPoxc/vMw7gEMJBvmtZTA8122kEDxtCwp", - "7+PjjaMHtRR1WZAlXePm0xWyeteXmL6Wda5pWRs6YbkUZ+VCKEIdGRUwp3WpiZ+Y1Lw0bMqM5qidMEUq", - "KdasgGJquO9myfIlyamyQ2A7smFlaWiwVlAM0Vp6dXsO04cYJQauG+EDF/THRUazrgOYgC1ygywvhYJM", - "iwPXk79xKC9IfKE0d5U67rIil0sgOLn5YC9bxB03NF2WO6JxXwtCFaHEX01TwuZkJ2qywc0p2TX2d6sx", - "WFsRgzTcnNY9ag7vEPp6yEggbyZECZQj8vy566OMz9milqDIZgl66e48CaoSXAERs39Crs22/+fFqx+I", - "kOR7UIou4DXNrwnwXBRQnJDzOeFCR6ThaAlxaHoOrcPBlbrk/6mEoYmVWlQ0v07f6CVbscSqvqdbtqpX", - "hNerGUizpf4K0YJI0LXkQwDZEQ+Q4opu+5NeyprnuP/NtC1ZzlAbU1VJd4iwFd3+/dHUgaMILUtSAS8Y", - "XxC95YNynJn7MHiZFDUvRog52uxpdLGqCnI2Z1CQMMoeSNw0h+Bh/Dh4GuErAscPMghOmOUAOBy2CZox", - "p9t8IRVdQEQyJ+RHx9zwqxbXwAOhk9kOP1US1kzUKnQagBGn3i+Bc6EhqyTMWYLGLhw6DIOxbRwHXjkZ", - "KBdcU8ahMMwZgRYaLLMahCmacL++07/FZ1TBF0+H7vjm68jdn4vuru/d8VG7jY0yeyQTV6f56g5sWrJq", - "9R+hH8ZzK7bI7M+9jWSLS3PbzFmJN9E/zf55NNQKmUALEf5uUmzBqa4lPLviD81fJCMXmvKCysL8srI/", - "fV+Xml2whfmptD+9FAuWX7DFADIDrEmFC7ut7D9mvDQ71tukXvFSiOu6iheUtxTX2Y6cvxjaZDvmsYR5", - "FrTdWPG43Hpl5Ngeehs2cgDIQdxV1DS8hp0EAy3N5/jPdo70ROfyN/NPVZWmt67mKdQaOnZXMpoPnFnh", - "rKpKllODxDfus/lqmABYRYI2LU7xQn32PgKxkqICqZkdlFZVVoqclpnSVONI/yphPnk2+ZfTxv5yarur", - "02jyl6bXBXYyIqsVgzJaVUeM8dqIPmoPszAMGj8hm7BsD4Umxu0mGlJihgWXsKZcnzQqS4sfhAP81s3U", - "4NtKOxbfHRVsEOHENpyBshKwbXhPkQj1BNFKEK0okC5KMQs/3D+rqgaD+P2sqiw+UHoEhoIZbJnS6gEu", - "nzYnKZ7n/MUJ+TYeG0VxwcuduRysqGHuhrm7tdwtFmxLbg3NiPcUwe0U8sRsjUeDEfPvguJQrViK0kg9", - "B2nFNP6HaxuTmfl9VOc/B4nFuB0mLlS0HOasjoO/RMrN/Q7l9AnHmXtOyFm3783Ixoyyh2DUeYPFuyYe", - "/IVpWKmDlBBBFFGT2x4qJd1NnJCYobDXJ5MfFVgKqeiCcYR2atQnTlb02u6HQLwbQgAV9CJLS1aCDCZU", - "J3M61J/07Cx/AmpNbayXRI2kWjKlUa/GxmQJJQrOlHuCjknlRpQxYsP3LCLAvJG0srTsvlixi3HU520j", - "C+stL96Rd2IS5ojdRxuNUN2YLR9knUlIkGt0YPiqFPn1P6ha3sEJn/mx+rSP05Al0AIkWVK1TBycDm03", - "o42hb9MQaZbMoqlOwhJfioW6gyWW4hjWVVXPaVmaqfssq7NaHHjUQS5LYhoTWDE0mDvF0VrYrf5Fvqb5", - "0ogFJKdlOW1MRaLKSlhDaZR2xjnIKdFLqpvDjyN7vQbPkQLD7DSQaDXOzIQmNhlsERLIiuINtDLaTFW2", - "+wQOqugKOlIQ3oiiRitCpGicv/CrgzVw5ElhaAQ/rBGtNfHgJ2Zu9wln5sIuzloAtXffBfwFftEC2rRu", - "7lPeTCFkYW3W2vzGJMmFtEPYG95Nbv4DVDadLXXeryRkbghJ1yAVLc3qOot6EMj3rk7ngZNZUE2jk+mo", - "MK2AWc6B/VC8A5mw0rzC/9CSmM9GijGU1FAPQ2FERO7Uwl7MBlV2JtMA7a2CrKwpk1Q0vz4KyufN5Gk2", - "M+rkfW2tp24L3SLCDl1uWaHuaptwsKG9ap8Qa7vy7Kgni+xlOtFcYxBwKSpi2UcHBMspcDSLELG982vt", - "K7FNwfSV2PauNLGFO9kJM85oZv+V2L5wkAl5GPM49hikmwVyugKFtxuPGaeZpfHLnc2EvJk00blgOGm8", - "jYSaUSNhatpBEjatq8ydzYTHwjboDNQEeOwXArrDpzDWwsKFpr8DFpQZ9S6w0B7orrEgVhUr4Q5If5kU", - "4mZUwWdPyMU/zj5//OTnJ59/YUiykmIh6YrMdhoUue/MckTpXQkPktoRShfp0b946n1U7XFT4yhRyxxW", - "tOoPZX1fVvu1zYhp18daG8246gDgKI4I5mqzaCfWrWtAewGzenEBWhtN97UU8zvnhr0ZUtBho9eVNIKF", - "avsJnbR0Wpgmp7DVkp5W2BJ4YeMMzDqYMjrganYnRDW08UUzS0EcRgs4eCiO3aZmml28VXIn67swb4CU", - "Qiav4EoKLXJRZkbOYyJhoHjtWhDXwm9X1f3dQks2VBEzN3ova14M2CH0lo+/v+zQl1ve4GbvDWbXm1id", - "m3fMvrSR32ghFchMbzlB6myZR+ZSrAglBXZEWeNb0Fb+Yiu40HRVvZrP78baKXCghB2HrUCZmYhtYaQf", - "BbngNpjvgMnGjToGPV3EeC+THgbAYeRix3N0ld3FsR22Zq0YR7+92vE8Mm0ZGEsoFi2yvL0Jawgddqp7", - "KgGOQcdL/Iy2+hdQavqNkJeN+PqtFHV15+y5O+fY5VC3GOcNKExfbwZmfFG2A0gXBvaT1Bo/yYKeByOC", - "XQNCjxT5ki2WOtIXX0vxO9yJyVlSgOIHaywqTZ++yegHURhmomt1B6JkM1jD4QzdxnyNzkStCSVcFICb", - "X6u0kDkQcoixThiipWO5Fe0TTJEZGOrKaW1WW1cEA5B690XTMaO5PaEZokYNhF+EuBnbyk5nw9lKCbTY", - "kRkAJ2LmYhxc9AUukmL0lPZimhNxE/yiBVclRQ5KQZE5U/RB0Hw7e3XoPXhCwBHgMAtRgsypvDWw1+uD", - "cF7DLsNYP0Xuf/eTevAJ4NVC0/IAYrFNCr1de1of6nHT7yO47uQx2VlLnaVaI94aBlGChiEUHoWTwf3r", - "QtTbxdujZQ0SQ0p+V4r3k9yOgAKovzO93xbauhqIYHdqupHwzIZxyoUXrFKDlVTp7BBbNo1atgSzgogT", - "pjgxDjwgeL2kStswKMYLtGna6wTnsUKYmWIY4EE1xIz8k9dA+mPn5h7kqlZBHVF1VQmpoUitAT2yg3P9", - "ANswl5hHYwedRwtSKzg08hCWovEdspwGjH9QHfyvzqPbXxz61M09v0uisgVEg4h9gFz4VhF24yjeAUCY", - "ahBtCYepDuWE0OHpRGlRVYZb6Kzmod8Qmi5s6zP9Y9O2T1zWyWHv7UKAQgeKa+8g31jM2vjtJVXEweFd", - "7GjOsfFafZjNYcwU4zlk+ygfVTzTKj4CBw9pXS0kLSAroKS7RHCA/Uzs530D4I436q7QkNlA3PSmN5Ts", - "4x73DC1wPJUSHgl+Ibk5gkYVaAjE9T4wcgE4doo5OTq6F4bCuZJb5MfDZdutToyIt+FaaLPjjh4QZMfR", - "xwA8gIcw9M1RgZ2zRvfsTvHfoNwEQY44fpIdqKElNOMftYABW7B74xSdlw5773DgJNscZGMH+MjQkR0w", - "TL+mUrOcVajrfAe7O1f9uhMkHeekAE1ZCQWJPlg1sIr7ExtC2h3zZqrgKNtbH/ye8S2xHB+m0wb+Gnao", - "c7+2bxMiU8dd6LKJUc39RDlBQH3EsxHB4yawpbkud0ZQ00vYkQ1IIKqe2RCGvj9FiyqLB0j6Z/bM6Lyz", - "Sd/oXnfxBQ4VLS8Va2Z1gv3wXXYUgxY6nC5QCVGOsJD1kJGEYFTsCKmE2XXmnj/5BzCeklpAOqaNrvlw", - "/d9TLTTjCsh/i5rklKPKVWsIMo2QKCigAGlmMCJYmNMFJzYYghJWYDVJ/PLwYXfhDx+6PWeKzGHj3wya", - "hl10PHyIdpzXQunW4boDe6g5bueJ6wMdV+bic1pIl6ccjnhyI4/ZydedwYO3y5wppRzhmuXfmgF0TuZ2", - "zNpjGhkX7YXjjvLltOODeuvGfb9gq7qk+i68VrCmZSbWICUr4CAndxMzwb9e0/JV6IbvISE3NJpDluMr", - "vpFjwaXpYx/+mXEYZ+YA26D/sQDBue11YTsdUDGbSFW2WkHBqIZyRyoJOdj3bkZyVGGpJ8RGwudLyheo", - "MEhRL1xwqx0HGX6trGlG1rw3RFKo0lueoZE7dQG4MDX/5NGIU0CNSte1kFsFZkPDfO6V65ibOdqDrscg", - "6SSbTgY1XoPUdaPxWuS0322OuAxa8l6En2bika4URJ2Rffr4irfFHCazub+Pyb4ZOgVlf+Io4rf5OBT0", - "a9TtcncHQo8diEioJCi8omIzlbJfxTx+o+1DBXdKw6pvybddfx44fm8G9UXBS8YhWwkOu2RaEsbhe/yY", - "PE54TQ50RoFlqG9XB2nB3wGrPc8YarwtfnG3uye067FS3wh5Vy5RO+Bo8X6EB/Kgu91NeVM/KS3LhGvR", - "veDsMgA1DcG6TBKqlMgZymznhZq6qGDrjXTPPdvofx3epdzB2euO2/GhxckB0EYMZUUoyUuGFmTBlZZ1", - "rq84RRtVtNREEJdXxoetls99k7SZNGHFdENdcYoBfMFylQzYmEPCTPMNgDdeqnqxAKU7us4c4Iq7VoyT", - "mjONc63MccnsealAYiTViW25ojsyNzShBfkNpCCzWrelf3ygrDQrS+fQM9MQMb/iVJMSqNLke8Yvtzic", - "d/r7I8tBb4S8DlhI3+4L4KCYytLBZt/arxjX75a/dDH+GO5uP/ug0yZjwsQss5Uk5X/f/49nb8+y/6HZ", - "b4+yL//t9N37px8ePOz9+OTD3//+f9o/ffbh7w/+419TO+VhTz2fdZCfv3Ca8fkLVH+iUP0u7B/N/r9i", - "PEsSWRzN0aEtch9TRTgCetA2juklXHG95YaQ1rRkheEtNyGH7g3TO4v2dHSoprURHWOYX+uRSsUtuAxJ", - "MJkOa7yxFNWPz0w/VEenpHt7judlXnO7lV76tu8wfXyZmE9DMgKbp+wZwZfqS+qDPN2fTz7/YjJtXpiH", - "75PpxH19l6BkVmxTeQQK2KZ0xfiRxD1FKrpToNPcA2FPhtLZ2I542BWsZiDVklUfn1MozWZpDuefLDmb", - "05afcxvgb84Pujh3znMi5h8fbi0BCqj0MpW/qCWoYatmNwE6YSeVFGvgU8JO4KRr8ymMvuiC+kqgcx+Y", - "KoUYow2Fc2AJzVNFhPV4IaMMKyn66TxvcJe/unN1yA2cgqs7Zyqi9963X1+SU8cw1T2b0sIOHSUhSKjS", - "7vFkKyDJcLP4TdkVv+IvYI7WB8GfXfGCano6o4rl6rRWIL+iJeU5nCwEeebfY76gml7xnqQ1mFgxejRN", - "qnpWspxcxwpJQ542WVZ/hKurt7RciKurd73YjL764KZK8hc7QWYEYVHrzKX6ySRsqEz5vlRI9YIj21xe", - "+2a1QraorYHUpxJy46d5Hq0q1U350F9+VZVm+REZKpfQwGwZUVqE92hGQHFPes3+/iDcxSDpxttVagWK", - "/LKi1VvG9TuSXdWPHn2GL/uaHAi/uCvf0OSugtHWlcGUFF2jCi7cqpUYq55VdJFysV1dvdVAK9x9lJdX", - "aOMoS4LdWq8O/QMDHKpZQHjiPLgBFo6jHwfj4i5sL5/WMb0E/IRb2H6Afav9it7P33i7DrzBp7VeZuZs", - "J1elDIn7nQnZ3hZGyPLRGIotUFt1ifFmQPIl5NcuYxmsKr2btrr7gB8naHrWwZTNZWdfGGI2JXRQzIDU", - "VUGdKE75rpvWRtkXFTjoG7iG3aVokjEdk8emnVZFDR1UpNRIujTEGh9bN0Z3811UmX9o6rKT4ONNTxbP", - "Al34PsMH2Yq8d3CIU0TRSvsxhAgqE4iwxD+Aghss1Ix3K9JPLY/xHLhma8igZAs2S6Xh/a++P8zDaqjS", - "ZR50UchhQEXYnBhVfmYvVqfeS8oXYK5nc6UKRUubVTUZtIH60BKo1DOgeq+dn8cJKTx0qFJu8OU1Wvim", - "ZgmwNfvNNFrsOGyMVoGGItvGRS+fDMefWcChuCE8vnujKZwM6roOdYmMg/5WDtgNaq0LzYvpDOGy31eA", - "KUvFxuyLgUK4bJs2qUt0v9SKLmBAd4m9dyPzYbQ8fjjIIYkkKYOIeVfU6EkCSZBt48ysOXmGwXwxhxjV", - "zE5App/JOoidzwiTaDuEzUoUYEPkqt17KlteVJsVeAi0NGsByRtR0IPRxkh8HJdU+eOI+VI9lx0lnf2O", - "aV/2paY7j2IJo6SoIfGcvw27HLSn97sEdT4rnU9FFyv9I9LKGd0Lny+ktkNwFE0LKGFhF24be0JpEiY1", - "G2TgeDWfI2/JUmGJkYE6EgDcHGA0l4eEWN8IGT1CiowjsDHwAQcmP4j4bPLFMUByl/CJ+rHxioj+hvTD", - "Phuob4RRUZnLlQ34G3PPAVwqikay6ERU4zCE8SkxbG5NS8PmnC7eDNLLkIYKRScfmgu9eTCkaOxxTdkr", - "/6g1WSHhJquJpVkPdFrU3gPxTGwz+0I5qYvMtjND78m3C/heOnUwbS66e4rMxBbDufBqsbHyB2AZhsOD", - "EdletkwhvWK/ITnLArNv2v1ybooKFZKMM7QGchkS9MZMPSBbDpHL/Si93I0A6JihmloNzixx0HzQFk/6", - "l3lzq02btKn+WVjq+A8doeQuDeCvbx9rJ4T7R5P4bzi5mD9RHyUTXt+ydJsMhbZzZbMOHpOgsEsOLSD2", - "YPV1Vw5MorUd69XGa4S1FCsxzLfvlOyjTUEJqARnLdE0u05FChhdHvAev/DdImMd7h7luwdRAKGEBVMa", - "GqeRjwv6FOZ4iumThZgPr05Xcm7W90aIcPlbtzl2bC3zo68AI/DnTCqdocctuQTT6BuFRqRvTNO0BNoO", - "UbTFBliR5rg47TXssoKVdZpe3bzfvTDT/hAuGlXP8BZj3AZozbA4RjJwec/UNrZ974Jf2gW/pHe23nGn", - "wTQ1E0tDLu05/iTnosPA9rGDBAGmiKO/a4Mo3cMgowfnfe4YSaNRTMvJPm9D7zAVfuyDUWr+2fvQzW9H", - "Sq4lSgOYfiEoFgsofHoz7w/jURK5UvBFVMWpqvblzDshNnUdZp7bk7TOheHDUBB+JO5njBewTUMfawUI", - "efOyDhPu4SQL4DZdSdoslERNHOKPLSJb3Uf2hXYfACSDoC87zuwmOtnuUthO3IASaOF0EgV+ffuPZX9D", - "HOqmQ+HTrcyn+48QDog0xXRU2KSfhmCAAdOqYsW243iyow4awehR1uUBaQtZixvsAAbaQdBJgmul0nah", - "1s7Afoo676nRymzstQssNvRNc/cAv6glejBakc39vO1BVxu59u9+utBC0gU4L1RmQbrVELicY9AQZUVX", - "RDMbTlKw+Rxi74u6ieegBVzPxl6MIN0EkaVdNDXj+ounKTI6QD0NjIdRlqaYBC0M+eQv+14uL9NHpqRw", - "JURbcwNXVfK5/newy36iZW2UDCZVE57r3E7ty/eIXV+vvoMdjnww6tUAdmBX0PL0BpAGU5b+8ElFCazv", - "qVaKf1QvW1t4xE6dpXfpjrbGFWUYJv7mlmkVLWgv5TYHowmSMLCM2Y2LdGyCOT3QRnyXlA9tAisOyyCR", - "vB9PxZQvYdm/ikIuikO0ewm09MSLy5l8mE5uFwmQus3ciAdw/TpcoEk8Y6Sp9Qy3AnuORDmtKinWtMxc", - "vMTQ5S/F2l3+2NyHV3xkTSZN2Zdfn7187cD/MJ3kJVCZBUvA4KqwXfWnWZUt47D/KrHZvp2h01qKos0P", - "GZnjGIsNZvbuGJt6RVGa+JnoKLqYi3k64P0g73OhPnaJe0J+oAoRP43P0wb8tIN86Jqy0jsbPbQDwem4", - "uHGVdZJcIR7g1sFCUczXrccafNxwdfV27fHYuAlswEzIr56IoFIjDORdJpI+hA0RH2B9uKRXmAEzrdhw", - "lx8TOZ6LMaJ3LqR9I2TrjnEPIJMxSr+f9GZkeYvHgZBwXyazK7OdECvf/bL4xRz6hw/jE/3w4ZT8UroP", - "EYD4+8z9jmrMw4dJJ2XSWmZ4ERrDOF3Bg/CYY3AjPq6ez2EzTg44W6+CACuGyTBQqA028ujeOOxtJHP4", - "LNwvBZRgfjoZYwuIN92iOwZmzAm6GHrwGGJZV7YypyKCd0O38a2tIS28U1zlB+vz7R8hXq/QT5qpkuXp", - "CBI+U4b7cBuzaRoTbDxgFDYj1mwgBJjXLBrLNBuTmrUDZDRHEpkqmR22wd1MuONdc/ZrDYQVRnmaM5B4", - "fXZuVK+D4Kg9uTdtfnMDW3dYM/xtzC173Fre5LTP1rLXTfgiuK78QlO1hY4MNI9n7DHuPUHijj78LYeP", - "5pbtSM9x6tKYCu2e0Tmf4MAcyYrrTGVzKX6D9IWNbqpEvg3vX2VoTf4NeCpAsMtSgu+6KRzfzH5ou8er", - "4EMbf2uV2y86FDe7yWWaPtXHbeRNdGuVzgrtkDyk68WBDO0XCAOsBY9XFHOL1VZ8kBPl9jzZZBOth2zp", - "Uxk/GT214zen0sHce2Zb0s2MpkrRGJXLwBRtbyscSwviO/sNUCGVgp2dRIHioS2zCesqkI2ro5/89obq", - "k512tOLU6ElIUbGGNLXREKUSiWFqvqHcFis3/Sy/cr0VWE+/6bUREtNNqnTkWAE5WyWtvldXb4u8HyVU", - "sAWzdbhrBVGhZzcQsTktkYpcseyQIMSh5nxOHk2javNuNwq2ZorNSsAWj22LGVV4XQave+hilgdcLxU2", - "fzKi+bLmhYRCL5VFrBIkqLgo5IX4xxnoDQAnj7Dd4y/JfYz8VGwNDwwWnRA0efb4S4zbsX88St2yro76", - "PpZdIM/2MeFpOsbQVzuGYZJu1HSQ91wC/AbDt8Oe02S7jjlL2NJdKIfP0opyuoD0M5DVAZhsX9xNjBro", - "4IVbpwMoLcWOMJ2eHzQ1/GngablhfxYMkovViumViw9UYmXoqanibCf1w2G9M1+WysPlP2KYbZVQkz+B", - "GkNXA0/DMBj6B3QFx2idEmpzjJasCYD3ZUHJuU9hjHW6Qnkuixszl1k6ypIYDz8nlWRco5ml1vPsb0Yt", - "ljQ37O9kCNxs9sXTRL2rdkkYfhzgHx3vEhTIdRr1coDsvczi+pL7XPBsZThK8aBJ5RCdysF44HTk51D4", - "6f6hx0q+ZpRskNzqFrnRiFPfivD4ngFvSYphPUfR49Er++iUWcs0edDa7NCPb146KWMlZKouQXPcncQh", - "QUsGa3yYl94kM+Yt90KWo3bhNtB/2jArL3JGYpk/y0lFIHKc7nuTb6T4n75vEqyj/9Y+eOzYAIVMWDud", - "3e4jBzUeZ3XruoltXBp+G8DcaLThKH2sDAT52yj+0OdThCV1QbJ73jI4Pv6FSKODoxz/8CEC/fDh1InB", - "vzxpf7bs/eHDdJ7jpMnN/Npg4TYaMfZN7eFXImEA88URQ9ySS8OQMEAOXVLmg2GCMzfUlLQL0X18KeJu", - "npGlg1rTp+Dq6i1+8XjAP7qI+MTMEjeweQwxfNjbhTiTJFOE71E4PSVfie1YwuncQZ54/gAoGkDJSPMc", - "rqRXaDQZFXAwLCWiUTPqDEphlMy49lBsz//z4NksfroH2zUri58aL2vnIpGU58tkMPLMdPzZyuitK9iy", - "ymQ5kyXlHMrkcFa3/dnrwAkt/Z9i7Dwrxke27Ra6tcvtLK4BvA2mB8pPaNDLdGkmiLHazs4Vsj+UC1EQ", - "nKepndEwx37F6FSlzsQzahx2VWsXHotPzl1eozkrMdoz7TfGlpmkeiBPF5ZV92WMzDhY5VxZM4MdHSSh", - "bIUXs6KrqgQ8mWuQdIFdBYdOd8zUhiNHhTGIqswnbIl5MQTRteREzOfRMoBrJqHcTUlFlbKDPDLLgi3O", - "PXn2+NGjpNkLsTNipRaLfpmvmqU8PsUm9our5WQrDhwF7GFYPzQUdczG9gnHla78tQalUzwVP9gHsugl", - "Nbe2LVsZSqyekG8xwZIh4lZGfTRX+lzF7byddVUKWkwxh/Ll12cviZ3V9rGV6m3ZzAVa69rkn3SvjM9j", - "6hNIDSToGT/O/owhZtVKZ6HKZSoFomnR1OFkndAetOPF2DkhL6wJNcSx2EkIZuKWKyiioppWiUfiMP/R", - "muZLtE22JKBhXjm+3qtnZ43nJnrkGIosIcM2cLuSr7bi65QIvQS5YQrw4T+soZ11MaQgdbZxn4WxvTxZ", - "c24p5eQIYTSUVDoW7R44K8n6oIIkZB3EH2mZsmWfjy1/e4G90k8+OrV0O15/n8PPZ/Im3zvnQk654CzH", - "igspSRozxI1zU44oTpH2L6qJO6GJw5Ws4BueHDssDtb09YzQIa7v8o++mk211GH/1LB1ld0WoJXjbFBM", - "fUFt5xBjXIErmmWIKOaTQiaCmpLvLUIAxZFkhMmfBiyc35hvPzj7N+beuGYcLV0ObU4/sy6rUjH0THPC", - "NFkIUG497UdD6q3pc4LJIAvYvjt5KRYsv2ALHMOG0Zll29DU/lBnPlDVBYaats9NW5eiP/zcCgezk55V", - "lZt0uNx6UpDUWz6I4FTckg8kiZAbxo9H20NueyPM8T41hAZrjFqDCu/hHmGEkt3tUb42uqWlKGxB7MPN", - "ZJ5exhNgvGTcu1DTF0SevBJwY/C8DvRTuaTa6g6jeNol0HLgnQU+hLY++NsO1S1QYFCCa/RzDG9jU218", - "gHGEBo3ET/mO+ENhqDsSJp7TMkRoJ2qHo1TlhCgb09qpJp5iHIZxZ/5lZgtdB18Jhu5Y9OPYm2goFeKs", - "LhagM1oUqQxaX+FXgl/9WzTYQl6HWlfhEWI7FXqf2txEueCqXu2Zyze45XRRef4ENYSPUIQdxoQ+sx3+", - "myr0NLwzLjb76Me/PhC7OC7/f/8xc0rqNTSdKbbIxmMC75Tbo6OZ+maE3vS/U0r3r4L/EI9+O1wu3qMU", - "f/vaXBxxfuBefLq9WkL6XowFF/jd51UKiSfbXAmvsl45M4x6wM1LbFkHeN8wCfialgMP7mNfib1frf9g", - "6Nl9PpglgmqXBUxTspcFDWZWsrHCHe9L34U4FB9sw4Pvzmvh1roXocO+u+9anjobI9Ywi0EP3c2caM0G", - "H+tFc3UN+iZNWpYiH33q3TBnptNw1lCxWrl02okYtvVKFDGdx9FQAGmmZcNzEyH/qHsmv6FilPwiN+nR", - "WjaLY02lFo1uCVP7EtCD54GxU8cTRSZSh1nyDSuxHtJ/Xrz6YTK8kdEO9LfUZfFNGpWHNiY8luqSx0K0", - "8FEP204EL1NKxHSiBozcmKsnfRpc0dvkh2+s0W4MSDalzTGtX44dvEcAC5FKUt9POTJpNsKjPaKDZmMt", - "L4npIkUP362HUqz4Oj/4Pa4n5MLzpq6MBKyZqH1YpX/c4G099leXwqtVN2iAsSWfDH1qd+Sg8/TS1b+2", - "y3TGtu9+suEVBLiWuz+AK7W36d2iVAk11tqdmyYklE4dVUq1Je6OqYGVKrfklD5vBLcyQ4uWeuWremT1", - "Yoyc38PHh+nkvDhKEk6V7JrYUVL36Uu2WGqs+PEPoAXI1wcqmjRVTPCIVUKxpoJxaQZzKaSXONzJ2FdE", - "hoBZXJGlP5aPLl9DrrFsdRM1KwGOqc9iJvPe3L8qmwzfBeGxlStosq+KSb9W9QHhvZd4LUoeaOv8noyv", - "2XEW3kbYp50bqpp0T52cC6Nffs/nkGNW9b2J7v5rCTxKojb1BleEZR7lvWPhgSLWBTjendAAtC8P3V54", - "ovpctwZnKA/GNezuKdKihmTh4fA69yaJxxED1rftc9APeYhcOChTgTIQCz7W36Vyb4rrDOaMj9I23nAu", - "T5Lm4mhSOe6Z0suPN5jLdD0qbSzK+kO58Po114cNCy+wxL1yka80JC6PzW/kvF94a+MSn2NawuAU9SnQ", - "QfnffA5SO0vJrl39EcSKdUFvqCx8iztJKmfvJpYGeh5mZs3LrH70UqKUCz5yzEthxIhs6KVo+zFUiCS+", - "p2zId5MADOGag5RQBF9nKRRkWviXXPvg2IcKG9d+IySowfJpFrjB1PlvmtoAWEaSYqp86sLZ4wUSCStq", - "oJNRBv/hOfch+7n97pN4+DKCB03HgV4P17P2b/KY6iExpvo5cbfl4eQgN7EiM85BZt6l3E3nz9sZHTFv", - "b1Hn9oKOD0awtI/OvbWHlSQNsHl/lR0dIcp+cQ27U6sE+ULgfgdjoK3kZEGPEhZ3NvlO7eoqBffiTsD7", - "tHkoKyHKbMCLed6vQdCl+GuWXwPmEA1vV4zsd699Nswk5D46z0KYyma58zn3qwo4FA9OCDnj9rWgj1hp", - "lyftTM7v6X3zb3HWorZlQZy1/OSKp59dYcEOeUtu5ofZz8MUGFZ3y6nsIAcy3G/5UCzdBot7tKsAn4zV", - "yvsxJB2pJCIqC0VKJrmwrujneNBThiPMbRIl4cEIBUqcC5uoUqSC9G+Sf8UMlcZUPBkCpIGPSQMSoHCD", - "JxHgwvMOpBR1n33STDEnEprokJtmD3UJOS1rVkMafXfmMEub382FhHhGjD61mYLDizZMw4v/mTEtqdzd", - "JMdnG1Up68kglg/GWYYQy2YhTZhlH4dlKTYZMqss1MlJqbamnWpfxr5oY9PPnOoZRAGbVDlBbUeWtCC5", - "kBLyuEf6IbeFaiUkZKXA+M1UaMlcG7l7ha83OSnFgogqFwXYelNpChqaq+acotgEUbhcEgWWdjANgO0T", - "0fHIKc2dah3EGYpaB8sz+M2/NH1sSoomK5xddGaDFAaeIoByWeAchmzjPrxIODafUdeWmObNc7ZFugGZ", - "OvJzomUNU+JadGvsu4NPJZAVU8qCEmhpw8oSM0KwbRRSESKS0qgdEHvPMV56zTCorp0dxErDlbnzQsqU", - "mAdcxPnMiF5KUS+WUYL6AKdXeWXtFOJ4lB9VjXGP+DTUTPGUrITSTtO0IzVLbmJJ7+eCaynKsm2UsiL6", - "wlnav6fbszzXL4W4ntH8+gHqtVzosNJi6hMndKN+m5lkJzVh+wLOkAbU4VTfth3GwDqiHc0gOyyuZxQ/", - "ZGWOwHx3mIMetrmf9RfWXVebmabVmDNOqBYrlqfP1J8rjHYw+DXFopLJCG1tVps+BpvhYY8vqxA1hSyy", - "j2bgNFlc8ow4RuCiR5DdmP+iBN4dl8zBMZqBi7LPXJwUleWDsl4HAITU5jTQtbQFXWNJLHAVsbA5UDD2", - "pQvoyFsFQwxvB5sZ4c6B0nAroHphzQHA+9b4MLVJI22I9Exs/fcHTVbJGwH/YT+Vt5jHUOzmRUNa0kZv", - "+gxUAxwhnSJ/b6DjJeazmI0NdwzFt0fe8BEAwwGQLRhGhUEeC8acshKKLFW79TzYqKaRpu3eXLZr1+O9", - "bDl5TmtfOtWMXUtwGZGsiC/b/q+KGlISoXnfkswL2IJ9sPUbSGFrok4j/wuUtmRqxxggqqyENbTiQl2a", - "phpFTbYG31eFzqQAqNAb2bWRpQIe47u8Yzhxa8+ikLkx2E1aUixi7U6RA2aSpFFnyzN7TNTYo2QgWrOi", - "pi38qWNFjrYZ0BzlBKp6OkLm9cix0/xoR3jjBzjz/VOijMfEu3F86GgWlEbdPgZ0MAC6VkOnnqfjn+Mc", - "ZMHBgrMVwRFrSbzhG6qiGz5skOyTfKNujdwnJniE2K+3kKNU4/QdKJzGM+CkcOmMkNo5QGG1AtMlYW1f", - "AidcRCVqN1QFVaVJjup/sBNjI8adNn0Dp3ITpnz7nSU4GFGdLImDioQMdHpz8/wnOYl7D+LgeCkaUeDe", - "9e6xf3nqdmoHNhB1WRBu9tPI/ljk1d1ijotPyaz2A5Wl2Nias7Ee+gK8H9RSn3cBObGchWvZh2NPXd7e", - "rqmDRQ9RVnRHhMR/jNb5a01LNt8hn7Hg+25ELakhIed4tREBLrzbTLxfvJp6wLy1Rfip7LrZ2DGj4XZm", - "lAhoc5H74mCCrOg1xNuAwQ6Wf+baME5Vz9ByYa7sznb2seAW73MvrWgRa/qYAXbX4g4+J7jp/f81j1zj", - "qXzixqqkua8w7EqctfkMVhH3xKWXsNr/CrrP1zwJhMrkDdFKnzajuIHJ9EjWlXpaNFS+qQV2r2Jzr3LV", - "rZYx0vLbqdGz5/34qKXc9S6MjbrpAR3XeT0Eflz29uPgP5mceWgZY8D/o+B9oNB1DK+taf0RsNxKrZOA", - "1VqrZ2KbSZirQwEm1lxt1HnZJOXxJlbGcwlU2Yib81dO8WxyDzNuFGEbExp8mmGUAuaMN8yS8arWCT0G", - "UxDzXYSw2OiPaB1woQ1JCUaYXNPy1RqkZMXQxpnTYUvCxiVmvKPD9U2YMMKd2h+AqUaHw4fXjRk9bmYu", - "cFvEzoZrKk15QWURN2ec5CDNvU82dKdu7lEKzoFDPiUaSTPtdCCRdwlJ2wJS7pxT+Jb+ngAgvUPHzwiH", - "DcYFJ5w11rSjxYB/pg/Dn8Jhs6LbrBQLfB48cCBc0mn08FkVUHA0g1v5bNy6/TyK/Qb7p8F6G44RaYGz", - "jpli/7l/hVuJauSPnOm9J9/aKLvvtW3crT2YHql80QT/W2Lpn8fUE3uXVSl+Zu+FTf9UxdMeRJsIA/6h", - "tl18YBcxDMLlZ4iN4OPLJbYjLVIP+a1lIEOLgdoT3g+qCWWnuQvP6pvSeqYGi5SpS4NwpKXN2uf9vTQA", - "HppClDvr7WlDyIwZ55gak/sTH2SVqLJ8TMynLclTODeBg7QN4wB9RE6AgXWH8BgVilS1Epq1qlUdW2Zz", - "sFrWIW9Xle9T+ofMRAMcve2CEHPkZXiErXEMX/IEY8q0+8asbQYLTIJQIiGvJZqJN3R3uGzhQCr4i3+c", - "ff74yc9PPv+CmAakYAtQTTmBTtm/Ji6Q8a7d5+NGAvaWp9Ob4NOKWMR5/6N/VBU2xZ01y21Vkyu4V/Tw", - "GPty4gJIvfTt14G70V7hOE1o/x9ru1KLvPMdS6Hg998zKcoyXc4lyFUJB0pqtyIXitFAKpCKKW0YYdsD", - "ynQTEa2WaB7EpN5rmyZK8By8/dhRAdMDIVephQwF1CI/w6QNzmtEYFuVjldZT8++dTk9zVroUGjEqJgZ", - "kEpUTrRnc5KCCF8QyehlrTN8okU8ipENzNZGy6YI0UWep0kvLri/n9u3i0HrNKc3m5gQL/yhvAFpDvkn", - "hhOS3ISTNKb9Pwz/SGRYuTOuEZb7e/CKpH6w583xWS/uIWQXGQVaP9tGgjwQgIHXtq13ktFDsSjDuLRe", - "AvQneAdyV/z4vnEsH3wWgpD4DgfAi5/PNu3CSwYHzidO1f19QEq0lHdDlNBa/qEXuZ71hosk2iJnNNEa", - "lGVLoi8WRs+t1fPwinlAK+k9dpZCaGI007JMPJK2dhw8UzHhGJVArmn58bnGN0wqfYb4gOLN8NOo+KVs", - "jGSLSnWzBJwv6ai5o1exdzc1f40Ps/8LzB4l7zk3lHPC924zNO7Q0oZXz4M3GjjZ4Jg2yOrxF2TmquhU", - "EnKmus79jRdOwsNQkGzuAlphqw+8RD20zp+EvgUZz30kDvkhcm8Fn72DsDmin5ipDJzcJJWnqK9HFgn8", - "pXhUXNz7wHVxy4orN8vnFGVmPDKfU79s+djl2dQm5tKpFfTXOfq2buE2cVE3axubjGx04Zarq7d6NiaH", - "WLrIiumOSczupNrKUbVWfof0ZRZHbgw3b4pifhpKaG2TNg8k3e/sR83KgwErrRIKH6aThc1ghEUCfnZF", - "oT7uXeohGEgj5pZ+m3QxFjGJtbYmj6aKMj6NqIvguiWS2eOrxryWTO+wILg3oLGfk/mYvg25PVxumOBL", - "c3efFtfAfbxHkwmkVv52/VbQEu8j6+Lj5hYS5Qn52qbudwfl7/dm/w6f/e1p8eizx/8++9ujzx/l8PTz", - "Lx89ol8+pY+//OwxPPnb508fweP5F1/OnhRPnj6ZPX3y9IvPv8w/e/p49vSLL//9nuFDBmQLqK/Z8Wzy", - "v7KzciGys9fn2aUBtsEJrdh3YPYGdeU55i1DpOZ4EmFFWTl55n/6//0JO8nFqhne/zpxhdcmS60r9ez0", - "dLPZnMRdThf49D/Tos6Xp34eTHHXklden4cYfRuHgzvaWI9xU0PyL/PtzdcXl+Ts9flJQzCTZ5NHJ49O", - "Hrua9ZxWbPJs8hn+hKdnift+iolzT5WriXHavNVK+u3eYMi6F87lAgpyP7y6+bfguVUP/OOduUtO909l", - "iTGs4rxA4nLFhydYThGDsRCsJ48e+b1wkk504Zzi649n7ycqFLTvChM9pF42ACcha4q59hf9I7/mYsMJ", - "Zvm0B6herajc2RW0sBENjttEFwqN7JKtMW2b6d3FeVW5SiRDKMfyde1T7jsjgYRSFuaE2QoXrp6ISqG8", - "XwXlltjfm/W1N1lid7DRawOzT58TMqU6h5DDGfqMLcLCGbFmhx6ip5OqTqDza3xYo/bhbBpV17DQiLII", - "GO9h9HX9/whGDekuQlZQ89cSaImJtcwfK0Oouf8kgRY793+1oYsFyBO3TvPT+smp10JO37uMKR/2fTuN", - "I8JO37cSyxQHevqIp0NNTt/7Wvj7B2zVQXexplGHkYDua3Y6w/p3Y5tCvLrhpSDNq9P3qIAP/n7qrKjp", - "j2gIsTfsqU/QNNDSpuJIf2yh8L3emoXsH860icbLqc6XdXX6Hv+DZButyKbsPtVbfoqBI6fvW4hwn3uI", - "aP/edI9bYDZaD5yYzxUy7X2fT9/bf6OJYFuBZEYLxWxa7leb9fAU68ju+j/veJ78sb+OVsa3A5c5ZhNU", - "PpqqnSgueX10s8+p2zK7cblsujnv+gJ2X5Lat7IP08nTO+TK7RTgCWC+ogXxiQ1w7scfb+5zbqO6jWhp", - "RWCE4OnHg6C1feQ72JEfhCbfoK3mw3Ty+cfciXNuNEdaeoHuhqLfuOPTvUaN7B2a8YUVVIRNdtE+amdF", - "0SN6q0OC0l8JvF2HMLZSi8p5aRukNSo042YJ03Ficz99pE1y5gUJLgqYxMqtljV8uCVP6MR3UanPEyZl", - "9I3gQw9ntW2BmsyF2I1+sSMn0jAfIOHzF37S5n3EXzzlL54SeMrnjz77eNNfgFyzHMglrCohqWTljvzI", - "w8ObG/O4s6JIJpBtH/2DPG462Wa5KGABPHMMLJuJYudKbU1aE1yDtZb1BJlTb11qaQwD3NPbrVLSShMO", - "Pnn2NhUW4Z43VvWsZDmxlnU0LVVULyPLT8jo2WZ+0z2WiWmiHAQpWFmHbA56I9xr6f6FEllbtCDqV4kX", - "Dx5Epndkw3ghNg9OPLi/1oB83sHrp5kkAIxifPuVzxqHoQGwB9bQfOhpHIOdPZO/pDebu6THTv3u97ZB", - "hYx5/3nx6ofoNaK1NNiAIHwLZ0kXHy5IgQH5G4oRobZC6nNrAyp3+KpWU12rVnHGk7/uob94/+15/7ch", - "hbIty6ix3lqfJUV3wckogTfJ29+3/nR2i4kNx04lPja/E0oWWFK3f0HNduT8RU97td26V8JXO2zauRUS", - "/L4L4lGMf4C97BNpzEIWQoegdLuov4TMv4TMWymuow/PGN01aVmyha5pTx+b+prVrYc/mLwcQ7x6oIyx", - "P33S43snG9+3baVsWTbJOhQk+mCzU3TR/BeL+ItF3I5FfAuJw4in1jGNBNEdZ+sayzAwlVLRCrH0Uodv", - "XpdURg+CD5mwz3DEtCr4u3CNj22wS+LK2uvw3QCzAbOJDbxbG95fLO8vlvfnYXlnhxlNWzC5tdXrGnYr", - "WgVbl1rWuhCbyEOOsNhg976Pzyr+3b9PN5TpbC6kK9lD5xpkv7MGWp66wvudX5tat70vWMA3+jFORpf8", - "9ZS2nZZtx7lhvUMde1711FfnOB5o5HMo+M9NzF4cA4dsP0S/vX1nWLYCufY3QhPS9ez0FJPqLIXSp5MP", - "0/edcK/447tAHu/DPeLI5APShZBswTgtMxcbkTVhW09OHk0+/N8AAAD//0+OWaNUIQEA", + "H4sIAAAAAAAC/+y9a3MbN5Mo/FdQ3K3yZUlKduxs4ree2leJc9HGiV2Wkj27lk8CzjRJPBoCEwAjkfHx", + "fz+FxmUwMxhySNF2UiefbHFwaTQajUZf340ysSoFB67V6Nm7UUklXYEGiX/RPJeg8L85qEyyUjPBR89G", + "Z5zQLBMV16SsZgXLyDVspqPxiJmvJdXL0XjE6QpGz8Ig45GE3ysmIR8907KC8UhlS1hRO63WIE3fN2eT", + "/zmdfPn23dMv3o/GI70pzRhKS8YXo/FoPVmIiftxRhXL1PTMjf9+11dalgXLqFnChOXpRdVNCMuBazZn", + "IPsW1hxv2/pWjLNVtRo9Ow1LYlzDAmTPmsrynOew7ltU9JkqBbp3PebjgJX4MY66BjPo1lU0GmRUZ8tS", + "MK4TKyH4ldjPySVE3bctYi7kiup2+4j8kPYejR+dvv+XQIqPxk8/SxMjLRZCUp5Pwrhfh3HJhW33fo+G", + "/msbAV8LPmeLSoIit0vQS5BEL4FIUKXgCoiY/RMyTZgi/3nx8iciJPkRlKILeEWzawI8EznkU3I+J1xo", + "Ukpxw3LIxySHOa0KrYgW2DPQx+8VyE2NXQdXjEnghhbejP6pBB+NRyu1KGl2PXrbRtP79+NRwVYssaof", + "6dpQFOHVagaSiLlZkAdHgq4k7wPIjhjDs5UkK8b150/adFj/uqLrLniXsuIZ1ZBHAGpJuaKZaYFQ5kyV", + "Bd0gald0/Y/TsQNcEVoUpASeM74ges1V31LM3EdbCId1AtGXSyDmCynpAiI8T8nPCpCS8KsW18ADdZDZ", + "Bj+VEm6YqFTo1LMOnDqxkIgOpKh4ilER/ODQ3MOjbN9jMqjXOOL77d8UW7hPbagv2OJyUwKZs8Lcl+Sf", + "ldKBgCuF274EokrIDO/NiRnGIF+xBae6kvDsij80f5EJudCU51Tm5peV/enHqtDsgi3MT4X96YVYsOyC", + "LXp2IMCaOqcKu63sP2a89FHV6+Rd8kKI66qMF5TFZ8HQyvnzPsqwY/aTRppBngW5AffHjXW5Pn/ex1K3", + "99DrsJE9QPbirqSm4TVsJBhoaTbHf9ZzJC06l3+MrHhheutynkKtIX/HrlGgOrPy01ktRLx2n83XTHAN", + "9iqMxIwTZLbP3sWSkxQlSM3soLQsJ4XIaDFRmmoc6V8lzEfPRv9yUgt6J7a7Ookmf2F6XWAncxlLMIxv", + "QstyjzFeGeERRa2eg274kD3qcyHJ7ZJlS6KXTBHG7Sai3GU4TQE3lOvpaK+T/D7mDm8cEPVW2EvSbkWL", + "AfXuBbENZ6CQ9p3Qe081JEXEOEGME8pzsijELPxw/6wsa+Ti97OytKgaEzYnwPA+hzVTWj1AzND6kMXz", + "nD+fku/isW9ZURDBiw2Zgbt3IDdjWr7t+LgTwA1icQ31iPcUwZ0Wcmp2zaPByGXHIEaUKpeiMFfgTjIy", + "jb93bWMKNL8P6vyXp74Y7f10hxK9QypSk/2lfriR+y2i6tIU9jDUdNbuexhFmVG20JI6rxF8bLrCX5iG", + "ldpJJBFEEaG57aFS0o2XoCYoCXUp6GcFlnhKumAcoR0bgZyTFb22+yEQ74YQQAVJ25KZFa9umV7WIldA", + "/bTzvvhrE3Jqz4nZcMqMbEwKprQRhnAzFVlCgQInDYqFmIoOIpoBtLBlEQHmW0lLS+bui5XjGCc0vL8s", + "rHe8yQdeskmYY7VFjXeE6mBmvpPhJiGxCocmDF8VIrv+nqrlEQ7/zI/VPRY4DVkCzUGSJVXLxJlq0XY9", + "2hD6Ng2RZsksmmoalvhCLNQRlliIfbhaWX5Ni8JM3eVmrdXiwIMOclEQ05jAimnzAGYcT8CC3QC3rGdK", + "vqHZ0ggTJKNFMa71EqKcFHADBRGSMM5BjoleUl0ffhzZP5TwHCkwfFADiVbjdBpTcrkECXMh8aEqgawo", + "Xk4r8zwqi2afwFwVXUFLdsLLUlTawBi9XM6f+9XBDXDkSWFoBD+sER/88eBTM7f7hDNzYRdHJaCihfGs", + "qPIaf4FfNIA2reurltdTCJmjoodq8xuTJBPSDmEvfze5+Q9QWXe21Hm/lDBxQ0h6A1LRwqyutagHgXyP", + "dTp3nMycahqdTEeF6Red5RzYD4VCkAntxkv8Dy2I+WwEHENJNfUwlFNQpgn7gXe2QZWdyTQwfEsLsrJ6", + "M1LS7HovKL+uJ0+zmUEn7xurqnNb6BYRduhyzXJ1rG3Cwfr2qnlCrM7Hs6OOmLKV6URzDUHApSiJZR8t", + "ECynwNEsQsT66NfaV2Kdgukrse5caWINR9kJM85gZv+VWD93kAm5G/M49hCkmwVyugKFt1vDDGJmqVXV", + "ZzMhD5MmOqaJWgFPqBk1EqbGLSRh06qcuLOZUI/bBq2BSFAvbRcC2sOnMNbAwoWmHwALyox6DCw0Bzo2", + "FsSqZAUcgfSXSSFuRhV89phcfH/29NHjXx8//dyQZCnFQtIVmW00KHLf6fmI0psCHiQfTihdpEf//Ik3", + "iDTHTY2jRCUzWNGyO5Q1tNiHsW1GTLsu1ppoxlUHAAdxRDBXm0U7eW37vR+PnsOsWlyA1uYR/EqK+dG5", + "YWeGFHTY6FUpjWChmkYpJy2d5KbJCay1pCcltgSeW9ObWQdT5g24mh2FqPo2Pq9nyYnDaA47D8W+21RP", + "s4m3Sm5kdQzNB0gpZPIKLqXQIhPFxMh5TCR0F69cC+Ja+O0q279baMktVcTMjQawiuc9Kgq95sPvLzv0", + "5ZrXuNl6g9n1Jlbn5h2yL03k16+QEuRErzlB6mxoTuZSrAglOXZEWeM70Fb+Yiu40HRVvpzPj6MjFThQ", + "QsXDVqDMTMS2MNKPgkzwXO3U5nhrYAuZbqohOGtjy9uydD9UDk0XG56hGukYZ7lf++VMfURteBapwgyM", + "BeSLBq1+UJVXH6YsFPdUAlKDqRf4GS0Cz6HQ9FshL2tx9zspqvLo7Lw959DlULcYZ3PITV+vUWZ8UUBD", + "Ul8Y2KepNX6SBX0dlA52DQg9EusLtljq6H35SooPcIcmZ0kBih+scqkwfboqpp9EbpiPrtQRRM96sJoj", + "GrqN+SCdiUoTSrjIATe/UmmhtMdrxxzUrJISuI7lXNRnMEVmYKgro5VZbVUSLVL3S91xQjN7QieIGtXj", + "5hBcNWwrO92S3gChhQSab8gMgBMxM4uuvRxwkVSR0sjOTqxzIvFQftsAtpQiA6Ugnzh99k54fTt7/+gt", + "yMPV4CrCLEQJMqfyw6zg+mYn8NewmdzQojLi+Q+/qAd/lkVooWmxYwuwTWoj2uq77lLuANM2Im5DFJOy", + "1Rbak2BEbMN0CtDQh+y7Y693+9tgdojgAyHwBiR61HzQo+Un+QBEGeD/wAfrgyyhKidGDOxVPxjJ1ew3", + "p1x42XDHDGGCgio92XWlmEYNvYlZasTFU7cIDtwjT76gSqMYSBjPUX9rr0Kcx8qWZorRnk5lOGXva8xM", + "+ot/iHWnzcz1zlWlwqtMVWUppIY8tTy0WffO9ROsw1xiHo0dnn5akErBrpH7EBiN7/DoFAH4B9XBQu1s", + "3t3FodeBEV82+2K5AV+No20wXvhWEeJjp9oeGJmq98CSG1MtepsJUQBFlanSoiwNh9KTiod+fRi8sK3P", + "9M912y5JWjOQlVRyAQpNTK69g/zWIl2hrWtJFXFweP8EVHhZF7kuzOZYTxTjGUy2nRd8BJtW8cE56LhX", + "5ULSHCY5FHST8Lawn4n9vCdh+LGRQGr9gdAwmaE1MU0j9Znw/qaHzSpwKpUSvAl+IZk55+YZVZOa6334", + "pDngtCm+6Yj1XpgFwUjSgR8PkWXpKTEi3v03QhuyckSHq3G30h3X0oO9MOsHQSCOO6kVAe3Z/xuUmzsI", + "YEedfwOqb+H11Mdado/6H+/2xoXZuspat03yiujlyzsYYx8P6rFFvKJSs4yV+Fz9ATZHf723J0j6SpAc", + "NGUF5CT6YF/yZdyfWDfk9piHveYHqVu74Hf0rYnleM+sJvDXsEG1ySsb0RBpq46hjkiMai5cygkC6r3m", + "zYsnbgJrmuliYwRbvYQNuQUJRFUz67XSNaFpUU7iAdIxU/0zOoN80hy+1UPgAoeKlpfyPLSvre3wXbae", + "XA10uFdWKUSR0H+2T3wHGUkIBrkLkVKYXWe0KDZEh7AZT0kNIN0Fgd4YQZ65pxpoxhWQ/xYVySjHF26l", + "IQhpQqLkg8KymcGIm2FO56paYwgKWIF9zeOXhw/bC3/40O05U2QOt9blhmPDNjoePkRV3CuhdONwHUHb", + "bY7beeLSQVuluWTdq63NU3Y7ubmRh+zkq9bgwcBpzpRSjnDN8u/MAFoncz1k7TGNDHPww3EHme+aLmGd", + "deO+X7BVVVB9DEMl3NBiIm5ASpbDTk7uJmaCf3NDi5eh2/vxCNaQGRrNYJJhlODAseDS9LGBhWYcxpk5", + "wDZwZChAcG57XdhOO17atd8yW60gZ1RDsSGlhAxslJyRUlVY6pTYkIlsSfkCX0BSVAvn6mzHQYZfKasJ", + "kxXvDLGvKKbXfIImDJUMU0OzpY+2NEIYUPOybds/7GPtlgZQ7GU06NKOtqdtD0qaTMej3oe/wfdN/fC3", + "eGuGjB5qTGzIhxHSamgGWs8Qn0ZW6iIx3kZz+AwxfBgrTT10CsruxJFTeP2xzy/8oirLYnMEIckORCSU", + "EhReabEaUNmvYk5+ZJkUZ8VChDtPbZSGVdd4Y7v+2nNcXx/yAha8YBwmK8Eh8aR/iV9/xI+D1Y72Gu4Z", + "EQWivQZsP3waSGgtoDn5EJK+6yYhybTPftvSqb4V8lhWdjvg4DfFAMv1TrcON+Wh9nVaFAmTtFU/dLiI", + "GgencCYJVUpkDAXF81yNnfe5tWJbt/YW+l+F0KgjHOD2uC3baxSGZRX5UJSEkqxgqOYXXGlZZfqKU9T0", + "RUtNOAt65UC/Wvhr3ySth06oid1QV5yio2jQ/yUdg+aQ0EN9C+C1w6paLEDp1gNrDnDFXSvGScWZxrlW", + "5rhM7HkpQaLH3tS2XNENmRua0IL8AVKQWaWbT45VpTRRmhWFMwSbaYiYX3GqSQFUafIj45drHM77kfgj", + "y0HfCnkdsDAdzrgWwEExNUl7On5nv2JQicPJ0gWYYKyF/ew9nuvcECOz9kbSiv99/z+evTmb/A+d/HE6", + "+fLfTt6+e/L+wcPOj4/f/+Mf/6f502fv//HgP/41tX0e9lQwuIP8/Ll7o58/x4dYFCfShv3PYJBZMT5J", + "EmXsUNSiRXIf82U4gnvQ1PvpJVxxveaG8G5owXLDi45GPu1rqnOg7RFrUVlj41pqPI+APZ9Dd2BVJMGp", + "Wvz1g8hz7Qm2OtzEW96KMXCcUR0dQDdwCq72nCm32nvffXNJThwhqHtILG7oKLVA4gXjIhgbXj5ml+LA", + "rit+xZ/DHN+Dgj+74jnV9MSeppNKgfyKFpRnMF0I8swHRT6nml7xzjXUm0AqCmqOMkilOAVdpddydfWG", + "FgtxdfW244fQla3cVDEXdeesqybzU06M3CAqPXFJXCYSbqlM2UJ8ig8XDY29t8JhZRJRWSWWTxLjxp8O", + "hbIsVTvZQxdFZVkYFEWkqly+ArOtRGkRAscMM3ext4YGfhLOqUTSW//krRQo8tuKlm8Y12/J5Ko6Pf0M", + "Q/DqFAe/OR5o6HZTwuCHb28yivZ7Fxdu5XJ0Kp+UdJGymVxdvdFAS6QQFDhW+NIsCoLdGuGBPhIAh6oX", + "EGKR99gSC9necb243Avby6f1Si8KP+GmNmOn77SDUVT8wRu4I7KeVno5MRwhuSpljoHfK59ggC7MleM9", + "CBRb4ANALUVllgwkW0J27TJbwarUm3Gju3d0cXexZzhMoc7IBQfOmcFfRrkZsCpz6gQZyjftFDfKBkPg", + "oK/hGjaXwnafDswOFmWji1KsqL6ji7Qb3bWGfOOD7MZob77zu/Ixoi4dCcZderJ4FujC9+k/2lYAOMKx", + "ThFFI89HHyKoTCDCEn8PCg5YqBnvTqSfWh7jGXDNbmACBVuwWZFg0//VtWt4WA1VSsiA3fio3jCgImxO", + "zOtoZq9j92KSlC/AXOrmIhaKFui0P00a+lE6XAKVegZUb9XX8jjNhIcOBfJbDJpGpcnYLAHWZr+ZRiUI", + "h1vzwMO3t23jHImnB7lT2TVBfiCovnsdJD095BHhEJ7IZ+fv+7An4b3g/NNi6kSQ7feVweFCiluzmwZA", + "4VM3YoKX6J6qFF3A0OuoYSoamBKjYQHCQXZJP0l5R8zbYk1Hxhi4CNt9YvCS5A5gvhj2gGaAloujn9ua", + "EJ1V4SUvNh6pswIF6uAgakmHyoadjS/2AzbNxkDyWlj1gDWxFh/9JVX+6OfjiKMfKC1+mlQy2/LnnUfe", + "d1R3s+P5a7rN2sdWnzMDIrjp4bPo+dR5Pl/eaLxX7rvxyIU4pPZOcJSicyhgYXFiG3s6q/Mz1btp4Hg5", + "nyPTm6Qc+SJlZCSZuDnAPMQeEmI15mTwCKlTEIGNlnUcmPwk4sPOF/sAyV1+KerHxrsr+hvSwYLWG99I", + "yaI0tz7rsVplnqW49Ba1yNNyccZhCONjYjjpDS0MJ3WBp/UgnVxt+PZpZWZzvh0P+t5EAw+aWyNKJ3ut", + "0sozh6wvFrz9MtKvgr3WMBPriY2MTj6tZuuZORPJeAWM004dXps5754iM7FGnyK84ayD+97Q9UPmAYvc", + "QNZMIZVjvz6x0YK3HyDbBfkUNSskPadXC2TXJ8keBkyPON1HdvejFHpHAqmlwKzTgDuNzk49S1Pa6koi", + "9XU7DtlhQ5haitX0Hc7kTvZgtKs8bea6+75Od9ifHM2f1Y+S5K+rlLtLXkbbubS5FvdJy9gmhwYQW7D6", + "qi3EJtHadFxq4jXCWoolGUbfNXZ10aagANQETBpy9eQ6ZZa+unqjAGWGC98t0nPi7lG+eRB5w0lYMKWh", + "Ni54J5ePb/tBdaJ5bIl5/+p0Kedmfa+FCIKGNcdix8YyP/oK0HV9zqTSE7TMJJdgGn2rUJP2rWmaFoSb", + "/nZMWVPP3nIwQnQNm0nOiipNyg6kH54biH4KN5eqZnhRMm69jWaYCj/poLuHbRLhsY7dWxH0wiLoBf0Y", + "+Bl2sExTA5M0lNec/i9yxFq8cBtnSdByipi6G9qL0i28Noql7zLaSIiO3C6m22w+nXOZ+7F3emP5iP4+", + "IcKOlFxLlBExHUAoFgvIfaY3FxRqs165fHqF4Is6l6D5fUv6wCmxWfwwCd+W/H3OPR36nNMb5USwKkYS", + "+vgxg5DX0XWYexAnWQC3mVtG+9cbKZKIix3jsUWkGf24vL3jNp90Hb5suQvXPr12D8Nm4/YUQHP3rFLg", + "17f90Ha3y6Fu3Od03EgRu/2A4YBIcUyrSIDpEE0P56ZlyfJ1y/BnR50eQBIDxb1uJvgWzpAtucF24Kfp", + "WLyjVs89cztie2fsOMFn/ol5ZFp/ZueRa84GzVy2gbySaE1qeAt38+mHh+bAtf/wy4UWki7AWQQnFqQ7", + "DYHL2QcNUUp6RTSzDtI5m88htoSpQ6w4DeA69o58AGH3kGDXXBbellvps0tkO2irXsFuhKbpKUEpfT4X", + "l117pH94RLq1cNlEG3eAUTGZUOAH2Ex+oUVlXkJMqto31RkIm9f6HjRxs/oBNjjyTpdPA9iOXUFV3GtA", + "Ck1ZV8InFWUJv6ca1RfwDdzYwj126iy9S0faGldKo/9o1DdUo55Ecykf7tjULjIG0iF7dZH2OjFnC5rb", + "0ib0XVvE8t2yT/QEiadi6L1xyCUXMm3s9C4DWnjCx8WO3o9Hd/P3SN2TbsQdO/EqXM3JXUBvTGv/bzh9", + "7bkhtCyluKHFxPnJ9AkdUtw4oQObe7eaj/y+Sp+Ky2/OXrxy4L8fj7ICqJwEVUfvqrBd+ZdZlS3Bsf0a", + "sunYnW7XqsKizQ8ps2NPmltMvd7SpnVq3dR+U9FBdZ4187Sn+E6+6Vy87BK3uHpBGTy9aou0dfRqOnfR", + "G8oKb/j10A7VstvlDquulOQT8QB3dhKLvP/uPFZvnMDV1Zsbj9nanmIdpUJK/IQvnTrQ07nDa9Jntab1", + "HRwS1/kSM5mm313c5TlFxugczujR5cBvhWxcVC6qMemw9uEERPOYsHhMG+UvnRW+IxZOiRUhf1v8ZnjD", + "w4fxwX/4cEx+K9yHCED8feZ+x3fUw4dJw3BS1WdYFmryOF3BgxAX0bsRH1cNweF2mLhwdrMKMrLoJ8NA", + "odbzzKP71mHvVjKHz9z9kkMB5qfpEFVFvOkW3TEwQ07QRV9UYnB+XtlynooI3o7BxyhZQ1p49bgKHtbO", + "3j1CvFqh3XmiCpalnX74TBmWxK1Lr2lMsPFgG7KZo2I9fuW8YtHoppk6yOTZWkg0axLhKpkJuMbvTDgW", + "UHH2ewVRWV+8iVuXs38K4agdATutX3QDt6sGjw4p+Ht3E6HXqm1TGG01uT4PZkCPiFSdqT3jHeIZO8x/", + "S6yCoyh/fWJg29K5Du+krK3vvO1FoJ0Z2LNPZ3HtfyC5cph2M58P2WmmJnMp/oC07IBGwkTqDm/dZqiA", + "/wN4yke1zciC50BdsLqefReBDNct9JHKnXUJftGhat4hV3iaT+y30XsqDaL97lcbqHR6cbcJfQ/V2PGk", + "GUjTw8zwwEZu4VjLx7u7UW5PqM1r0Yg8S5/zOFD0xI5fn3MHcye4tqC3M5oqdGTeiwamaPsbjnlaEN/Z", + "b5AKqRns7CSKZQhtmU32V4KsrUfdVMkHvv3stINfffUjDykuft6Nra9KoURimIrfUo5+hNjPckDXW4H1", + "wzC9boXEBJ8q7UOYQ8ZWSWX41dWbPOt6fuVswWxJ8UoBoXPt8jy6gWxReUtFrpp3yEXiUHM+J6fj+sz6", + "3cjZDVNsVgC2eGRbzKjCCzr4RIQuZnnA9VJh88cDmi8rnkvI9VJZxCpBwvscRc/gCTsDfQvAySm2e/Ql", + "uY8Ow4rdwIP0BeOEtdGzR1+Ot1XORoxjkfhtTD5HLu8DGdKUjV7VdgzDVt2o6ciEuQT4A/rvky3ny3Yd", + "crqwpbuCdp+uFeXUICQF02oHTLYv7i+6crTwwq11BpSWYkOYTs8PmhqO1RNNbhiiBYNkYrVieuU8RZVY", + "GQqry5DbSf1wWF/Pl0HzcPmP6IJdJt74n+C5RVc9EY7oVf8T2ttjtI4JtRlbC1bHX/gKteTcZ6bGunCh", + "HJzFjZnLLB3lVQzHmJNSMq5Ra1Tp+eQL83yXNDMMcdoH7mT2+ZNEfbVmCSK+H+AfHe8SFMibNOplD9l7", + "Kcf1Jfe54JOV4Sj5gzqlQ3Qqe33F0/69fW7HPUPfWbo24056CbBqECCNuPmdSJFvGfCOxBnWsxeF7r2y", + "j06rlUwTDK3MDv38+oWTRFZCpipd1AzASSUStGRwg/Gl6U0yY95xL2QxaBfuAv2n9W7zYmkkuvnTnXws", + "RFblxDstpFUykv4vP9b58dG4beN2W9pLIRN6Wqdx/MhuqfvpC9s2dOsOiN96MDcYbThKFys94R42niP0", + "+RT+Xm2Q7J43VKWPfiPSvONR1n/4EIF++HDsROXfHjc/W/b+8OFwl9m0vtD8mkDNYXdNO3ul6Zva6q9E", + "Qnvnq3gGvzGXqiShYU3eZeZKnbkxxqRZKvHjyx3HiVfc2w05fYA8avBzGzefmL/iZtYRMP38oVk9Nkk+", + "efgexVBQ8pVYDyWi1rXl6elPgKIelAzUCuJKOtVxk54SO918IrI1o86gEOalGhfAGuy18hfaBYOa8Za9", + "qFiR/1JboVs3k6Q8Wyadymem46/2GRA1iDQY2ZJyDkWyt30t/+pf1Yl3/z9Fz7ArxtOf2oWYLewtSGuw", + "mkD4Kf34BldMF2aCGEXNhFwhxUmxEDnBeerKJTVr7FY0T1WSTcT447CrSjuvZEye4AqKzFmBbrRpezi2", + "nEiqe7gqlv33Ja7MOFiFX1m1hB0dJKFshde2oquyADyENyDpArsKDq3umLENR47KkhBVmk/YEpO/CKIr", + "yYmYz6NlANdMQrEZk5IqZQc5NcuCNc49evbo9PR0mJER8TVg7RavfuEv68U9OsEm9our/GULJuwF/iHQ", + "v6+pbp/N7xKXK7/6ewVKp1gsfrAB2WghNve6Lb0aygRPyXeYn8wQeqNEACpFfYblZk7QqiwEzceYFPry", + "m7MXxM5q+0hA1GHp1wVqAJtHJGnkGZ4j1edf68ldNXyc7alzzKqVnoSirKlMiqZFXUuWtbyfUDcYY2dK", + "nlu1bHDssZMQTC0uV5BHNWCtGgCJw/xHa5otUd85HW1VKfdUAxpewthzwNpcFMW9hoJZyMHNMlwVY1vE", + "eEyEXoK8ZQow7wTcQDNhY8h26hTyPoFjc7Wy4twSznQP6TWUx9p3FzxwVvT1/hVJyFr7cGfbX53JA4uc", + "71vs+QJ7peN2WpWjW34PtmTG2hfdmJIfnbEjo1xwlmGxiZQIjqkYh5lVB9TlSNs71cid5cQxTNarDgHq", + "Dou9Faw9y3SI6zo1RF/NflvCsX9qWLsigAvQyvFAyMe+fLwz0DGuwBVAM/QVc1QhE65fybCY4EJyRJf0", + "8QizqfXoWr81335yunnMGXPNOOrcHFLdS9Aa2ArF0M7OCdNkIUC51TbjwtQb02d6ueYIwtvpC7Fg2QVb", + "4BjWFdEgxXoBd4c68z7BzgfXtP3atHW1C8LPDZc6O6lf99skC1Fh/1M113vRn/L98o40EXLD+PFoW4hx", + "q6s/3suGDOEGPf+gxPu8QzahfH1zlG/Mk9XSG7YgNnI3mTaY8QQYLxj3Bt90HqwseZfgxuBp7umnMkm1", + "fXQM4niXQIuecBgMqrceA3cdql2JwaAE1+jn6N/GuvJ+D1sJDerXBeUb4g+Foe5IKPmaFsEZPlFHH6Uz", + "J4xZZ+FWZf0UWzFsfeJDcxvo2hkIGrpjNZR976m+bKOzKl+AntA8T+Wd+wq/EvzqAwphDVkVioCFONNm", + "uvYutbmJMsFVtdoyl29wx+lypqhSsJoVCdfb5+Ej5GGHMRHVbIP/pipg9e+Mc3rfO/rbe7jn+9Uo6Eaz", + "p6RnQ9MTxRaT4ZjAO+Xu6KinPozQ6/5HpXQf+P2niOtucbl4j1L87RtzccRpujs+/vZqCVm00Z9e4Hef", + "Dyxkcm1yJbzKOnXe0CMDNy+xZS3gfcMk4De06Mm4EFtt7P1qLRl9eRey3rQiVLvsdZqSmicMUWH05/+y", + "Htgty1DXvNnnY21drD+k8cThYyvS+y2NPzTsitbrrWYovfbEw0x+NRHsa/NzpRi6+lJaFCIbzBncMGem", + "U3+qXrFaucz3Ca+8m5XI47MQe3MBpBmbdVhOhFbgwzb5DZ9WyS/yNj1aQz8SiGZo1jJEo1vC2AZmevA8", + "MHbqeKJIZeswS75lBRaH+s+Llz+N+jcy2oHulrrU2UkVdt/GhEi1NnksRAMfW3iA4EVa/616VOqYGyp9", + "Glx14uSHb62CcAhINk/SPq1fDB28QwALYatCpepmdLPTjOrt8MiPqKHeXstRYupIUUW72lLi7WOVnnUT", + "EgqRDipM2pCRhhR3StURci8Fr4G1F43LR2eLK3XqMnUY6PMhwmEHH+/Ho/N8L/EpVYtqZEdJMdgXbLHU", + "XxUiu/4eaA7S1hNJPSdtNZEVmGeoWrIS3z+lUKyuB1yYwVwi7yUONx0amnO5BJcVxicJ6IzlHahvINNY", + "H7p2A5UAw/0cyvQSDQTeoIhNPoEriATIodTLrcKSde4u9bIuGwou8owpMgNnurgBPiZsCtN2sFpeJ4Ui", + "BdC5V8JKIfSAurohbAnRGAOdoq9OjebtYmAn51uU0tCW0p0OL8JyFmICbKDlLVV15qhWGoXB4drzOWSY", + "8H5r+r3/WgKP8rGNveoOYZlH2fhYCBfEkg1H1WjXsG5LhLcV1Kgm1YeEtC8hxjVs7inSoKFkReAQYXtI", + "BnhEjrXj+qICfaYN5xjJVKAnRJD3g3cJ+OsaS4cUAYiyUx4Ihqdxcz3VGSsPg8ZLNAeAYbpO71S0v06H", + "h4JpX3a/bnX1/pfycyxmr5xTKQ3p5mN9EjnvlmO+denqMdFisBb6xPWg/G8+QaudpWDXrkINIszaZm+p", + "zH2Lo6TJs/cmSwM9DzOzOjCq6+Wzr1+OjVDMCmEEoElfYGgzUim48N5T1te6TlqGUM9BSsiDTbAQCiZa", + "+DCrPZJ/uvDJLdizXuYH4a3l0b9HyLBdUW8Nhdd1IQksB0mxZgJ1zucxVoiEFTXQy6i4Q1oNumuHvrbf", + "fU4RX95vu3q1D+/hXOyukO1D75jqYD4+XXPihIO9uVcjEckBmlnGOciJN+K2SzvwZppMzKucV5kVVeKz", + "GbTXg9OObeFmSaVm1l1l6wkVZeW4hs2JVfv4quN+x2OgrQxpQY8SSreI4qi6apWCe3EU8D5t+s5SiGLS", + "Yxk879ajaB+Ga5ZdAyZmDZEpRgq+1zw2ZhJyHw1SwWfkdrnx1RbKEjjkD6aEnHEbHejdR5oVSFuT83t6", + "2/xrnDWvbIUZp4GeXvF0mBVWepF35H5+mC08r483KTD88o7z20EOmF2veZ+P3C2WhGnWCZ4OVW90/Tta", + "IlREfhaKlAB1YQ3BXyNLSLyjCGZnidIIoX8AJc6ATFQhUl74h2SQMUOlMRVPhgBp4AOeqzUUbvAkApyT", + "3Y6srO6zzzsq5kRC7ZtxaAJWl9PUMnHVpxppzxxmaXLGuZAQz4h+pjZRc4hswzzH+J8Z05LKzSFpUpuo", + "SqmherG801syOErWC6mdJbs4LApxO0G2NgnVlVLqANNONa9tX6e07meO+gwit0uqnIi4IUuak0xICVnc", + "Ix3ibaFaCQmTQqAXZsqxY67NI2GFcZ2cFGJBRJmJHGwhtDQF9c1VcU5R9oLIlS2JAks7mDLA9onoeOCU", + "5va15tkJyms7C234zb80fWz6ijr9nV30xLoI9MQXgHLp7hyGbOMuvEg4NiNTWymbFpHnbI10AzJ15OdE", + "ywrGxLVoV+F3B59KICumlAUl0NItKwrMHsHWkUND8AdKo7ZHdj5HP+gbhg5vzUwiVqQuze0Y0q/EPOAi", + "zshG9FKKarGM6gMEOP3TXVbuYR+P8rOq0CcRQ0TNFE/ISijtnsV2pHrJtQvo/UxwLUVRNBV5Vs5fOKPv", + "j3R9lmX6hRDXM5pdP8BHOBc6rDQf+5QKbd/deibZysE47KWg13yC5KF2p1m37dCr1dHzYN7Z4n4dw8Mu", + "TX4E5tvdzHW3XeOsu7D2upp8Nv0WOuOEarFiWfq4/bW8X3t9VlPcK5lp0VYhtllosBnygfgeC+5MyD27", + "aAZOk2VUz4jjEc6tAzmR+S+K8e1xyRwcD+q5Q7t8xwlYk6xXDGwBgJDaRAi6krZ0cSykBYYjFjZxCjql", + "tAEdeOGg79/dYDMjHB0oDXcCquONHAC8bzUYY5sR03o2z8Taf39Qp8w8CPj326m8wTz6nCovatKS1q3S", + "J7Lq4QjpAgRbPRAvMQnGbKgfYihFP/DyjwDo90xswDDIP3FfMOaUFZBPUlWKz4MObBw9112MZTS6r+do", + "OXlGK18J2IxdSXCJlaz0L5vmxJIaUhKheVcjznNYg43R+gOksHV8x5E5Cwpb5relURDlpIAbaDhsumxP", + "FUqh7AZ8XxU6kxygRItvW9GW8kSMqwS2tC9u7ZPIl20IdpPqGItYu1Nkh64lqRla84k9JmroUTIQ3bC8", + "og38qX1FjqYu0RzlBKo6z4eJf2IOneZnO8JrP8CZ758SZTwm3g7jQ3uzoDTqtjGgnZ7Jleo79TztmByn", + "MguGIpwtD3ZtS+I131AlveX9Ws0uydcvsYH7xASPEPvNGjKUatxTCHL3GOqxnLgcSEjtHCC3DwbTJaHN", + "XwInXEQ1j2+pCq+YOqur/8FOjI0Ydw/tA2z0tf/w3XeW4GBEtZItpkuUBrK+m47/k5zErQexd7wUjShw", + "obxbVGOeut2zAxuIqsgJN/tpZH+sEexuMcfFx2RW+YGKQtzaIsbxE/U5eHuupT5vYnJiOQvXsveTHruE", + "w20tCIsiRFZ0Q4TEf8yD9PeKFmy+QT5jwffdiFpSQ0LOgGy9KJzftZl4u3g19oB5RYzwU9l1s6FjRsNt", + "zCgR0OYi92XbBFnRa4i3AR1ELP/MtGGcqpqhUsNc2a3t7GLBLd6nZ1rRPFYCYKLZTYM7+ITnpvf/V4et", + "xlP5/I9lQTNfstoVn2vyGaxq74lLL2G1Pcy5y9c8CYRK+TXRSp8mIz9Am7on60rF/PQVx2qA3SkB3qkL", + "dqdlDFQKt2ocbQkQH7SUY+/CcWI4O0uKS/3uWlxc+fjj7E4yQ3TfMoaA/yfalYZ7RSeyLV1BPV6PLZb+", + "EXahkYgnAatVg8/EeiJhrnY50lg9+Eysa4BV0N0ynkmgyvodnb90z9Y6ATLj5hltvXaDWTWMksOc8ZrV", + "Ml5WOvEKwjzIfBMhLLYmIFp7bHN9MoYRRW9o8fIGpGR538aZ02NLA8dFerwFxfVNKEDCjdwdgKn6BYjx", + "1LV+Pm5mrn9bYND6zipNeU5lHjdnnGQgjdRAbulGHW6qClaHXcYqGslCzWwhkdkKSdsCUmyctfmOhqQA", + "ID2iRWmAJQidtBNWIKsY0qLH8NOF4S9hCVrR9aQQC4z67TkQLs81mg7tA1JwVKJb6W7Yuv08iv0B26fB", + "UiSOEWmBsw6ZYvu5f4lbiY/QnznTW0++1XC2w7Ctp7M9mB6pfFGHZ1hi6Z7HVOS8S8wUR897UdWnKfG0", + "B9EmJl2iO1r1nl1E/wqXdiFWoQ8vVtl04UjF51u9wgT1DWpLAAaoOq6AZs5DrKuI6ygqLFLGLrvBnno6", + "q93391IPeKhIUe6sN6cNDjpmnH0qfG7PZzApRTnJhvi22mpFuTMyOEibMPbQR2RC6Fl38LtRoX5XIyda", + "o5DXvkVOewuJ7bKVldk2lUGfkqmHozcNGGKOvAyPsFWtYaxVUMWM/ePcG7ubSrTAJAglErJKopL5lm52", + "F37syT5/8f3Z00ePf3389HNiGpCcLUDVNQ1ahRNr10TG21qjj+uM2FmeTm+CzxZiEeetlz7sLWyKO2uW", + "26o6GXGnbOQ+2unEBZAKzu2WyDtor3CcOiziz7VdqUUefcdSKPjweyZFUaRrygS5KmF+Se1WZIAxL5AS", + "pGJKG0bYtJ8yXTtlqyUqFzFr+I3NDSV4Bl777KiA6R5frtRC+nx6kZ9hLgZncyKwLgvHq6ydaNu63DvN", + "6vdQaER3mxmQUpROtGdzkoIIY7ZkBUGv7tSmqE+P3HQDs7UOuylCdM7vadI74+4lLOZkO7dvluLWaU5v", + "NjEhXvhDeQBp9lk3+vOMHMJJasPAn4Z/JBKnHI1rhOV+CF6RfB9siQo/63hNhKQhg0DrJshIkAcC0BMP", + "3QhajYLsotzk0toY0Brhzc9t8ePH2iy9MzIFIfEddoAXxzLX7UIwhQPnEyf2/jEgJVrK2z5KaCx/V3i0", + "Z73hIom2yClNtAZl2ZLoioVRQLz6OsSZ97xKOuHoUghNzMu0KBJh7FaPg2cqJhzzJJA3tPj4XONbJpU+", + "Q3xA/ro/cCsOW46RbFGpjp6Q8wUdBFYUovxRoOKvMLb+v8DsbPJ2dLM4w3/nDkSVEC2st/c8WMCBk1sc", + "0zp2PfqczFy5n1JCxlTboeDWizQh3hYkmzv/WljrduzvncsE/SL0HY7D3PsDkZ8iI1vwHHAw10f9EzOn", + "Hg6QPC0pUu0QSgJ/KV4XF1Xfce3csTTMYamcosSNe6Zy6paLH7o8XAdeXpWC7joH3/oN3CYu/HptQ3OV", + "Da4wc3X1Rs+GJBRLV4Mx3THH2VHKwty9KMxHSXBmUenGcJAkCasWuXdlr2n5S0Z5Gpq7aMT9ngLyS4t+", + "Mxo+CuYVt+OFAqgYK+7ZupiPgxeD4KbbM3LFHxK1pP5t4f58/PTz0XgEvFqZxdffR+OR+/o29VLL18m4", + "0jqRTsdH1FUTuKdISTdDgtl3ps5J4rfOFPTxRRql2Sz9pvve7Bk+XF0AwjlHVo/sxd6gLn/O3wmAthJD", + "67CGE2NJsk4PFLZiV6agX/rS4tvU7z3VPlrct2LFTie5RiGW9+PRwiYpw+okv7padR932z0EPfkC3dLv", + "kgbMIiax1sbk0VRRUrcBBVlct0SFDIy8zirJ9ObC4N+r3dmv16lkUN+F9Ewu51ewwDvZV4tr4N7HrE7m", + "VCkvXX8naIHSp3UM4EbmFMWUfGMrhLhr8R/3Zv8On33xJD/97NG/z744fXqawZOnX56e0i+f0EdffvYI", + "Hn/x9MkpPJp//uXscf74yePZk8dPPn/6ZfbZk0ezJ59/+e/3DKUbkC2gvvLPs9H/mpwVCzE5e3U+uTTA", + "1jihJfsBzN6ghm2OCQoRqRlesbCirBg98z/9//6inGZiVQ/vfx25epCjpdalenZycnt7O427nCwwB8pE", + "iypbnvh5MJdl473y6jzEBVnfP9zR2uaEmxry+5lvr7+5uCRnr86nNcGMno1Op6fTR5hPsQROSzZ6NvoM", + "f8LTs8R9P8Es2ifKFeM5qUNHk9b+1xgm45/0cgE5uR+CAP8t+HuoBz6WcO6yUP5TWWIMqzjPkbhc3fQR", + "1n1FB1AE6/Hpqd8L966JxMsTjDh79m5k+UcqHW4HqZc1wEnI6qrT3UX/zK+5uOUEU/7aA1StVlRu7Aoa", + "2IgGx22iC4WmOcluMDOj6d3GeVm6Ekh9KMeqms1T7jsjgYT6OOaE2bI5rpCRSqG8W37pjtjfmgK6M1li", + "d7DRKwOzT3MW0ia7m9DhDD1NLMLCGbHKyg6ix6OySqDzGwzmU9twNo5K9lhoRJEHjHcw+qr6fwSjhnQX", + "If2v+WsJtEDRyPyxMoSa+U8SaL5x/1e3dLEAOXXrND/dPD7xOoeTdy6f1Ptt305iL9STd42kXPmOnt6P", + "cleTk3cuT9WOAWOzyInzb486DAR0W7OTGdbbHNoU4tX1LwVpXp28Q91c7+8nTk5Pf0T1qb1hT/zjo6el", + "zSGU/thA4Tu9NgvZPpxpE42XUZ0tq/LkHf4HyTZakc3ff6LX/ATdzU7eNRDhPncQ0fy97h63wLTTHjgx", + "nytk2ts+n7yz/0YTwboEyVbAbclx96vNZnuC5a033Z83PEv+2F1HI2nnjsscs8Qq74PZzPWZvD7aCUTV", + "XZndsCRc7bSlXQG7K0ltW9n78ejJEblysx5AApivaE58nhWc+9HHm/uc20gSI1paERghePLxIGhsH/kB", + "NuQnocm3Xof/9GPuxDk3L0daeIHuQNFv2PFpX6NG9g7N+MIKKsLm3mketbM87xC9fUOC0l8JvF37MLZS", + "i9L5dtRIq5/QjJsljIeJzd0MwDYFpBckuMhhFD9utazg/R15QssrlEp9nrA2oUUVg8uc1aYBajIxbdtn", + "zo6cyLS+g4TPn/tJ65isv3nK3zwl8JSnp599vOkvQN6wDMglrEohqWTFhvzMQ7DfwTzuLM+TOcCbR38n", + "jxuP1pNM5LAAPnEMbDIT+cbV3Rs1JrgGqy3rCDInXrvUeDH0cE+vt0pJK3UQyejZm5QzlQupLqtZwTKz", + "4KnXLZVULyPVT0iH3OR+45iTBUXlm7PJ/5xOvnz77ukX75Mx1N1wqjoOcevXRC0ZkrOiCllp9K1wWR+6", + "l1SkwdGCqN8lXmZ4uJnekFvGc3H7IGDg9wrw7nAo8NOMxqmbZkvRi26xxdqZwYDcAbQPAvSC2LoFg6xg", + "/Q4GW751698etoaCfqolvP3QmreQ4PQ/L17+FMV9W/2KdZ7EqGN7YDHISwoMXrql6D1vy09/bTVfxQbz", + "F2iqK9UobDv9+/b9+8a7+433XUjVb+vWaiw52WWa0Q04HSTmJ2+0d40/nbZmZENXUqnyze+EkgVWJ+9e", + "y7MNOX/eebPbbu2L8KsNNm3dhYlLrg3iVj7VZgc97GWbIGcWshA6BPDYRf0tWv8tWt/puT748Ax5sSf1", + "ad/hwLTzCh378v+NIEksd4EuAh1QhmjdPunxPcrGdzV6KQ2eLcsBOYk+2DxAbTT/zSL+ZhF3YxHfQeIw", + "4ql1TCNBdPtp+IYyDExalzfc0b3U4ZtXBZVR8oRdivszHDH9AP4gXONjqymTuLJaSoyxYja4ILGBx9Vc", + "/s3y/mZ5fx2Wd7ab0TQFkzvr+q5hs6Jl0PCpZaVzcRv5BSAsNjCoa9m0D//23ye3lOnJXEhXNY7ONchu", + "Zw20QGQzTOcc/1qX++58wRrm0Y9x2s/krye0aaptugsY1tvXseNLkPrqzOU9jXy+Gf+59lSMPf+Q7Qef", + "vzdvDctWIG/8jVA7sj07OcH0ZUuh9AlqvJpObvHHt4E83oV7xJHJe6QLIdmCcVpMnEfIpHZWezw9Hb3/", + "vwEAAP//CeiNZR0iAQA=", } // GetSwagger returns the content of the embedded swagger specification file @@ -488,16 +485,16 @@ var swaggerSpec = []string{ func decodeSpec() ([]byte, error) { zipped, err := base64.StdEncoding.DecodeString(strings.Join(swaggerSpec, "")) if err != nil { - return nil, fmt.Errorf("error base64 decoding spec: %s", err) + return nil, fmt.Errorf("error base64 decoding spec: %w", err) } zr, err := gzip.NewReader(bytes.NewReader(zipped)) if err != nil { - return nil, fmt.Errorf("error decompressing spec: %s", err) + return nil, fmt.Errorf("error decompressing spec: %w", err) } var buf bytes.Buffer _, err = buf.ReadFrom(zr) if err != nil { - return nil, fmt.Errorf("error decompressing spec: %s", err) + return nil, fmt.Errorf("error decompressing spec: %w", err) } return buf.Bytes(), nil @@ -515,7 +512,7 @@ func decodeSpecCached() func() ([]byte, error) { // Constructs a synthetic filesystem for resolving external references when loading openapi specifications. func PathToRawSpec(pathToFile string) map[string]func() ([]byte, error) { - var res = make(map[string]func() ([]byte, error)) + res := make(map[string]func() ([]byte, error)) if len(pathToFile) > 0 { res[pathToFile] = rawSpec } @@ -529,12 +526,12 @@ func PathToRawSpec(pathToFile string) map[string]func() ([]byte, error) { // Externally referenced files must be embedded in the corresponding golang packages. // Urls can be supported but this task was out of the scope. func GetSwagger() (swagger *openapi3.T, err error) { - var resolvePath = PathToRawSpec("") + resolvePath := PathToRawSpec("") loader := openapi3.NewLoader() loader.IsExternalRefsAllowed = true loader.ReadFromURIFunc = func(loader *openapi3.Loader, url *url.URL) ([]byte, error) { - var pathToFile = url.String() + pathToFile := url.String() pathToFile = path.Clean(pathToFile) getSpec, ok := resolvePath[pathToFile] if !ok { diff --git a/daemon/algod/api/server/v2/generated/participating/private/private_routes.yml b/daemon/algod/api/server/v2/generated/participating/private/routes.yml similarity index 82% rename from daemon/algod/api/server/v2/generated/participating/private/private_routes.yml rename to daemon/algod/api/server/v2/generated/participating/private/routes.yml index 708b8cba16..21950917f6 100644 --- a/daemon/algod/api/server/v2/generated/participating/private/private_routes.yml +++ b/daemon/algod/api/server/v2/generated/participating/private/routes.yml @@ -11,9 +11,9 @@ output-options: - nonparticipating - data - common - type-mappings: - integer: uint64 skip-prune: true + user-templates: + echo/echo-register.tmpl: ./templates/echo/echo-register.tmpl additional-imports: - alias: "." package: "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model" diff --git a/daemon/algod/api/server/v2/generated/participating/public/routes.go b/daemon/algod/api/server/v2/generated/participating/public/routes.go index 4869378ac3..59328e33b2 100644 --- a/daemon/algod/api/server/v2/generated/participating/public/routes.go +++ b/daemon/algod/api/server/v2/generated/participating/public/routes.go @@ -1,6 +1,6 @@ // Package public provides primitives to interact with the openapi HTTP API. // -// Code generated by github.com/algorand/oapi-codegen DO NOT EDIT. +// Code generated by github.com/oapi-codegen/oapi-codegen/v2 version v2.4.1 DO NOT EDIT. package public import ( @@ -14,16 +14,17 @@ import ( "strings" . "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model" - "github.com/algorand/oapi-codegen/pkg/runtime" + "github.com/algorand/go-algorand/data/basics" "github.com/getkin/kin-openapi/openapi3" "github.com/labstack/echo/v4" + "github.com/oapi-codegen/runtime" ) // ServerInterface represents all server handlers. type ServerInterface interface { // Get a list of unconfirmed transactions currently in the transaction pool by address. // (GET /v2/accounts/{address}/transactions/pending) - GetPendingTransactionsByAddress(ctx echo.Context, address string, params GetPendingTransactionsByAddressParams) error + GetPendingTransactionsByAddress(ctx echo.Context, address basics.Address, params GetPendingTransactionsByAddressParams) error // Broadcasts a raw transaction or transaction group to the network. // (POST /v2/transactions) RawTransaction(ctx echo.Context) error @@ -44,14 +45,14 @@ type ServerInterfaceWrapper struct { func (w *ServerInterfaceWrapper) GetPendingTransactionsByAddress(ctx echo.Context) error { var err error // ------------- Path parameter "address" ------------- - var address string + var address basics.Address - err = runtime.BindStyledParameterWithLocation("simple", false, "address", runtime.ParamLocationPath, ctx.Param("address"), &address) + err = runtime.BindStyledParameterWithOptions("simple", "address", ctx.Param("address"), &address, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) if err != nil { return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter address: %s", err)) } - ctx.Set(Api_keyScopes, []string{""}) + ctx.Set(Api_keyScopes, []string{}) // Parameter object where we will unmarshal all parameters from the context var params GetPendingTransactionsByAddressParams @@ -69,7 +70,7 @@ func (w *ServerInterfaceWrapper) GetPendingTransactionsByAddress(ctx echo.Contex return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter format: %s", err)) } - // Invoke the callback with all the unmarshalled arguments + // Invoke the callback with all the unmarshaled arguments err = w.Handler.GetPendingTransactionsByAddress(ctx, address, params) return err } @@ -78,9 +79,9 @@ func (w *ServerInterfaceWrapper) GetPendingTransactionsByAddress(ctx echo.Contex func (w *ServerInterfaceWrapper) RawTransaction(ctx echo.Context) error { var err error - ctx.Set(Api_keyScopes, []string{""}) + ctx.Set(Api_keyScopes, []string{}) - // Invoke the callback with all the unmarshalled arguments + // Invoke the callback with all the unmarshaled arguments err = w.Handler.RawTransaction(ctx) return err } @@ -89,7 +90,7 @@ func (w *ServerInterfaceWrapper) RawTransaction(ctx echo.Context) error { func (w *ServerInterfaceWrapper) GetPendingTransactions(ctx echo.Context) error { var err error - ctx.Set(Api_keyScopes, []string{""}) + ctx.Set(Api_keyScopes, []string{}) // Parameter object where we will unmarshal all parameters from the context var params GetPendingTransactionsParams @@ -107,7 +108,7 @@ func (w *ServerInterfaceWrapper) GetPendingTransactions(ctx echo.Context) error return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter format: %s", err)) } - // Invoke the callback with all the unmarshalled arguments + // Invoke the callback with all the unmarshaled arguments err = w.Handler.GetPendingTransactions(ctx, params) return err } @@ -118,12 +119,12 @@ func (w *ServerInterfaceWrapper) PendingTransactionInformation(ctx echo.Context) // ------------- Path parameter "txid" ------------- var txid string - err = runtime.BindStyledParameterWithLocation("simple", false, "txid", runtime.ParamLocationPath, ctx.Param("txid"), &txid) + err = runtime.BindStyledParameterWithOptions("simple", "txid", ctx.Param("txid"), &txid, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) if err != nil { return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter txid: %s", err)) } - ctx.Set(Api_keyScopes, []string{""}) + ctx.Set(Api_keyScopes, []string{}) // Parameter object where we will unmarshal all parameters from the context var params PendingTransactionInformationParams @@ -134,7 +135,7 @@ func (w *ServerInterfaceWrapper) PendingTransactionInformation(ctx echo.Context) return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter format: %s", err)) } - // Invoke the callback with all the unmarshalled arguments + // Invoke the callback with all the unmarshaled arguments err = w.Handler.PendingTransactionInformation(ctx, txid, params) return err } @@ -177,242 +178,237 @@ func RegisterHandlersWithBaseURL(router EchoRouter, si ServerInterface, baseURL // Base64 encoded, gzipped, json marshaled Swagger object var swaggerSpec = []string{ - "H4sIAAAAAAAC/+x9a3fctpLgX8H2zDm2NU1JfiRz4z05s4od52pixz6Wkpk7ljdBk9XduCIBXgDsR7z+", - "73tQAEiQBLvZkmInM/lkq4lHoVAo1AtVHyapKErBgWs1efphUlJJC9Ag8S+apqLiOmGZ+SsDlUpWaib4", - "5Kn/RpSWjC8m0wkzv5ZULyfTCacFNG1M/+lEwj8qJiGbPNWygulEpUsoqBlYb0vTuh5pkyxE4oY4s0Oc", - "P5983PGBZpkEpfpQvub5ljCe5lUGREvKFU3NJ0XWTC+JXjJFXGfCOBEciJgTvWw1JnMGeaaO/SL/UYHc", - "Bqt0kw8v6WMDYiJFDn04n4lixjh4qKAGqt4QogXJYI6NllQTM4OB1TfUgiigMl2SuZB7QLVAhPACr4rJ", - "03cTBTwDibuVAlvhf+cS4FdINJUL0JP309ji5hpkolkRWdq5w74EVeVaEWyLa1ywFXBieh2TV5XSZAaE", - "cvL2xTPy+PHjr8xCCqo1ZI7IBlfVzB6uyXafPJ1kVIP/3Kc1mi+EpDxL6vZvXzzD+S/cAse2okpB/LCc", - "mS/k/PnQAnzHCAkxrmGB+9CiftMjciian2cwFxJG7oltfKebEs7/WXclpTpdloJxHdkXgl+J/RzlYUH3", - "XTysBqDVvjSYkmbQd6fJV+8/PJw+PP34T+/Okv9yf37x+OPI5T+rx92DgWjDtJISeLpNFhIonpYl5X18", - "vHX0oJaiyjOypCvcfFogq3d9ielrWeeK5pWhE5ZKcZYvhCLUkVEGc1rlmviJScVzw6bMaI7aCVOklGLF", - "MsimhvuulyxdkpQqOwS2I2uW54YGKwXZEK3FV7fjMH0MUWLguhE+cEG/X2Q069qDCdggN0jSXChItNhz", - "Pfkbh/KMhBdKc1epwy4rcrkEgpObD/ayRdxxQ9N5viUa9zUjVBFK/NU0JWxOtqIia9ycnF1jf7cag7WC", - "GKTh5rTuUXN4h9DXQ0YEeTMhcqAckefPXR9lfM4WlQRF1kvQS3fnSVCl4AqImP0dUm22/d8vXv9AhCSv", - "QCm6gDc0vSbAU5FBdkzO54QLHZCGoyXEoek5tA4HV+yS/7sShiYKtShpeh2/0XNWsMiqXtENK6qC8KqY", - "gTRb6q8QLYgEXUk+BJAdcQ8pFnTTn/RSVjzF/W+mbclyhtqYKnO6RYQVdPP16dSBowjNc1ICzxhfEL3h", - "g3KcmXs/eIkUFc9GiDna7GlwsaoSUjZnkJF6lB2QuGn2wcP4YfA0wlcAjh9kEJx6lj3gcNhEaMacbvOF", - "lHQBAckckx8dc8OvWlwDrwmdzLb4qZSwYqJSdacBGHHq3RI4FxqSUsKcRWjswqHDMBjbxnHgwslAqeCa", - "Mg6ZYc4ItNBgmdUgTMGEu/Wd/i0+owq+fDJ0xzdfR+7+XHR3feeOj9ptbJTYIxm5Os1Xd2DjklWr/wj9", - "MJxbsUVif+5tJFtcmttmznK8if5u9s+joVLIBFqI8HeTYgtOdSXh6RU/Mn+RhFxoyjMqM/NLYX96VeWa", - "XbCF+Sm3P70UC5ZesMUAMmtYowoXdivsP2a8ODvWm6he8VKI66oMF5S2FNfZlpw/H9pkO+ahhHlWa7uh", - "4nG58crIoT30pt7IASAHcVdS0/AathIMtDSd4z+bOdITnctfzT9lmZveupzHUGvo2F3JaD5wZoWzssxZ", - "Sg0S37rP5qthAmAVCdq0OMEL9emHAMRSihKkZnZQWpZJLlKaJ0pTjSP9s4T55Onkn04a+8uJ7a5Ogslf", - "ml4X2MmIrFYMSmhZHjDGGyP6qB3MwjBo/IRswrI9FJoYt5toSIkZFpzDinJ93KgsLX5QH+B3bqYG31ba", - "sfjuqGCDCCe24QyUlYBtw3uKBKgniFaCaEWBdJGLWf3D/bOybDCI38/K0uIDpUdgKJjBhimtHuDyaXOS", - "wnnOnx+T78KxURQXPN+ay8GKGuZumLtby91itW3JraEZ8Z4iuJ1CHput8WgwYv5dUByqFUuRG6lnL62Y", - "xn91bUMyM7+P6vzHILEQt8PEhYqWw5zVcfCXQLm536GcPuE4c88xOev2vRnZmFF2EIw6b7B418SDvzAN", - "hdpLCQFEATW57aFS0u3ECYkJCnt9MvlRgaWQki4YR2inRn3ipKDXdj8E4t0QAqhaL7K0ZCXI2oTqZE6H", - "+uOeneUPQK2xjfWSqJFUc6Y06tXYmCwhR8GZck/QIanciDJGbPiORdQwryUtLS27L1bsYhz1edvIwnrL", - "i3fknRiFOWD3wUYjVDdmy3tZZxQS5BodGL7JRXr9V6qWd3DCZ36sPu3jNGQJNANJllQtIwenQ9vNaGPo", - "2zREmiWzYKrjeokvxULdwRJzcQjrKstnNM/N1H2W1VktDjzqIOc5MY0JFAwN5k5xtBZ2q3+Rb2m6NGIB", - "SWmeTxtTkSiTHFaQG6WdcQ5ySvSS6ubw48her8FzpMAwOw0kWI0zM6GJTda2CAmkoHgDFUabKfN2n5qD", - "KlpARwrCG1FUaEUIFI3z5351sAKOPKkeGsGv14jWmnDwYzO3+4Qzc2EXZy2A2rvvavzV/KIFtGnd3Ke8", - "mULIzNqstfmNSZIKaYewN7yb3PwHqGw6W+q8X0pI3BCSrkAqmpvVdRb1oCbfuzqde05mRjUNTqajwrgC", - "ZjkH9kPxDmTESvMa/0NzYj4bKcZQUkM9DIUREbhTM3sxG1TZmUwDtLcKUlhTJilpen0QlM+ayeNsZtTJ", - "+9ZaT90WukXUO3S5YZm6q23CwYb2qn1CrO3Ks6OeLLKT6QRzjUHApSiJZR8dECynwNEsQsTmzq+1b8Qm", - "BtM3YtO70sQG7mQnzDijmf03YvPcQSbkfszj2GOQbhbIaQEKbzceMk4zS+OXO5sJeTNponPBcNJ4Gwk1", - "owbC1LSDJGxalYk7mxGPhW3QGagJ8NgtBHSHj2GshYULTX8DLCgz6l1goT3QXWNBFCXL4Q5IfxkV4mZU", - "weNH5OKvZ188fPTzoy++NCRZSrGQtCCzrQZF7juzHFF6m8ODqHaE0kV89C+feB9Ve9zYOEpUMoWClv2h", - "rO/Lar+2GTHt+lhroxlXXQM4iiOCudos2ol16xrQnsOsWlyA1kbTfSPF/M65YW+GGHTY6E0pjWCh2n5C", - "Jy2dZKbJCWy0pCcltgSe2TgDsw6mjA5YzO6EqIY2PmtmyYjDaAZ7D8Wh29RMsw23Sm5ldRfmDZBSyOgV", - "XEqhRSryxMh5TEQMFG9cC+Ja+O0qu79baMmaKmLmRu9lxbMBO4Te8PH3lx36csMb3Oy8wex6I6tz847Z", - "lzbyGy2kBJnoDSdInS3zyFyKglCSYUeUNb4DbeUvVsCFpkX5ej6/G2unwIEidhxWgDIzEdvCSD8KUsFt", - "MN8ek40bdQx6uojxXiY9DIDDyMWWp+gqu4tjO2zNKhhHv73a8jQwbRkYc8gWLbK8vQlrCB12qnsqAo5B", - "x0v8jLb655Br+kLIy0Z8/U6Kqrxz9tydc+xyqFuM8wZkpq83AzO+yNsBpAsD+3FsjZ9lQc9qI4JdA0KP", - "FPmSLZY60BffSPEb3InRWWKA4gdrLMpNn77J6AeRGWaiK3UHomQzWMPhDN2GfI3ORKUJJVxkgJtfqbiQ", - "ORByiLFOGKKlQ7kV7RNMkRkY6kppZVZblQQDkHr3RdMxoak9oQmiRg2EX9RxM7aVnc6Gs+USaLYlMwBO", - "xMzFOLjoC1wkxegp7cU0J+JG+EULrlKKFJSCLHGm6L2g+Xb26tA78ISAI8D1LEQJMqfy1sBer/bCeQ3b", - "BGP9FLn//U/qwWeAVwtN8z2IxTYx9HbtaX2ox02/i+C6k4dkZy11lmqNeGsYRA4ahlB4EE4G968LUW8X", - "b4+WFUgMKflNKd5PcjsCqkH9jen9ttBW5UAEu1PTjYRnNoxTLrxgFRssp0on+9iyadSyJZgVBJwwxolx", - "4AHB6yVV2oZBMZ6hTdNeJziPFcLMFMMAD6ohZuSfvAbSHzs19yBXlarVEVWVpZAastga0CM7ONcPsKnn", - "EvNg7Frn0YJUCvaNPISlYHyHLKcB4x9U1/5X59HtLw596uae30ZR2QKiQcQuQC58qwC7YRTvACBMNYi2", - "hMNUh3Lq0OHpRGlRloZb6KTidb8hNF3Y1mf6x6Ztn7isk8Pe25kAhQ4U195BvraYtfHbS6qIg8O72NGc", - "Y+O1+jCbw5goxlNIdlE+qnimVXgE9h7SqlxImkGSQU63keAA+5nYz7sGwB1v1F2hIbGBuPFNbyjZxz3u", - "GFrgeComPBL8QlJzBI0q0BCI671n5Axw7BhzcnR0rx4K54pukR8Pl223OjIi3oYroc2OO3pAkB1HHwPw", - "AB7qoW+OCuycNLpnd4q/gXIT1HLE4ZNsQQ0toRn/oAUM2ILdG6fgvHTYe4cDR9nmIBvbw0eGjuyAYfoN", - "lZqlrERd53vY3rnq150g6jgnGWjKcshI8MGqgWXYn9gQ0u6YN1MFR9ne+uD3jG+R5fgwnTbw17BFnfuN", - "fZsQmDruQpeNjGruJ8oJAuojno0IHjaBDU11vjWCml7ClqxBAlHVzIYw9P0pWpRJOEDUP7NjRuedjfpG", - "d7qLL3CoYHmxWDOrE+yG77KjGLTQ4XSBUoh8hIWsh4woBKNiR0gpzK4z9/zJP4DxlNQC0jFtdM3X1/89", - "1UIzroD8TVQkpRxVrkpDLdMIiYICCpBmBiOC1XO64MQGQ5BDAVaTxC9HR92FHx25PWeKzGHt3wyahl10", - "HB2hHeeNULp1uO7AHmqO23nk+kDHlbn4nBbS5Sn7I57cyGN28k1n8NrbZc6UUo5wzfJvzQA6J3MzZu0h", - "jYyL9sJxR/ly2vFBvXXjvl+wosqpvguvFaxonogVSMky2MvJ3cRM8G9XNH9dd8P3kJAaGk0hSfEV38ix", - "4NL0sQ//zDiMM3OAbdD/WIDg3Pa6sJ32qJhNpCorCsgY1ZBvSSkhBfvezUiOql7qMbGR8OmS8gUqDFJU", - "CxfcasdBhl8pa5qRFe8NERWq9IYnaOSOXQAuTM0/eTTiFFCj0nUt5FaBWdN6PvfKdczNHOxB12MQdZJN", - "J4Mar0HqqtF4LXLa7zZHXAYteS/ATzPxSFcKos7IPn18hdtiDpPZ3N/GZN8MHYOyP3EQ8dt8HAr6Nep2", - "vr0DoccORCSUEhReUaGZStmvYh6+0fahgluloehb8m3XnweO39tBfVHwnHFICsFhG01Lwji8wo/R44TX", - "5EBnFFiG+nZ1kBb8HbDa84yhxtviF3e7e0K7Hiv1Qsi7conaAUeL9yM8kHvd7W7Km/pJaZ5HXIvuBWeX", - "AahpHazLJKFKiZShzHaeqamLCrbeSPfcs43+N/W7lDs4e91xOz60MDkA2oghLwklac7Qgiy40rJK9RWn", - "aKMKlhoJ4vLK+LDV8plvEjeTRqyYbqgrTjGAr7ZcRQM25hAx07wA8MZLVS0WoHRH15kDXHHXinFScaZx", - "rsIcl8SelxIkRlId25YF3ZK5oQktyK8gBZlVui394wNlpVmeO4eemYaI+RWnmuRAlSavGL/c4HDe6e+P", - "LAe9FvK6xkL8dl8AB8VUEg82+85+xbh+t/yli/HHcHf72QedNhkTJmaZrSQp//f+vz19d5b8F01+PU2+", - "+peT9x+efHxw1Pvx0cevv/5/7Z8ef/z6wb/9c2ynPOyx57MO8vPnTjM+f47qTxCq34X9k9n/C8aTKJGF", - "0Rwd2iL3MVWEI6AHbeOYXsIV1xtuCGlFc5YZ3nITcujeML2zaE9Hh2paG9Exhvm1HqhU3ILLkAiT6bDG", - "G0tR/fjM+EN1dEq6t+d4XuYVt1vppW/7DtPHl4n5tE5GYPOUPSX4Un1JfZCn+/PRF19Ops0L8/r7ZDpx", - "X99HKJllm1gegQw2MV0xfCRxT5GSbhXoOPdA2KOhdDa2Ixy2gGIGUi1Z+ek5hdJsFudw/smSszlt+Dm3", - "Af7m/KCLc+s8J2L+6eHWEiCDUi9j+Ytaghq2anYToBN2UkqxAj4l7BiOuzafzOiLLqgvBzr3galSiDHa", - "UH0OLKF5qgiwHi5klGElRj+d5w3u8ld3rg65gWNwdeeMRfTe++7bS3LiGKa6Z1Na2KGDJAQRVdo9nmwF", - "JBluFr4pu+JX/DnM0fog+NMrnlFNT2ZUsVSdVArkNzSnPIXjhSBP/XvM51TTK96TtAYTKwaPpklZzXKW", - "kutQIWnI0ybL6o9wdfWO5gtxdfW+F5vRVx/cVFH+YidIjCAsKp24VD+JhDWVMd+XqlO94Mg2l9euWa2Q", - "LSprIPWphNz4cZ5Hy1J1Uz70l1+WuVl+QIbKJTQwW0aUFvV7NCOguCe9Zn9/EO5ikHTt7SqVAkV+KWj5", - "jnH9niRX1enpY3zZ1+RA+MVd+YYmtyWMtq4MpqToGlVw4VatxFj1pKSLmIvt6uqdBlri7qO8XKCNI88J", - "dmu9OvQPDHCoZgH1E+fBDbBwHPw4GBd3YXv5tI7xJeAn3ML2A+xb7Vfwfv7G27XnDT6t9DIxZzu6KmVI", - "3O9Mne1tYYQsH42h2AK1VZcYbwYkXUJ67TKWQVHq7bTV3Qf8OEHTsw6mbC47+8IQsymhg2IGpCoz6kRx", - "yrfdtDbKvqjAQd/CNWwvRZOM6ZA8Nu20KmrooCKlBtKlIdbw2Loxupvvosr8Q1OXnQQfb3qyeFrThe8z", - "fJCtyHsHhzhGFK20H0OIoDKCCEv8Ayi4wULNeLci/djyGE+Ba7aCBHK2YLNYGt7/6PvDPKyGKl3mQReF", - "XA+oCJsTo8rP7MXq1HtJ+QLM9WyuVKFobrOqRoM2UB9aApV6BlTvtPPzMCGFhw5VyjW+vEYL39QsATZm", - "v5lGix2HtdEq0FBk27jo5ePh+DMLOGQ3hMd3bzSF40Fd16EuknHQ38o1dmu11oXmhXSGcNnvBWDKUrE2", - "+2KgEC7bpk3qEtwvlaILGNBdQu/dyHwYLY8fDrJPIonKIGLeFTV6kkAUZNs4MWuOnmEwX8whRjWzE5Dp", - "Z7IOYuczwiTaDmGzHAXYOnLV7j2VLS+qzQo8BFqctYDkjSjowWhjJDyOS6r8ccR8qZ7LjpLOfsO0L7tS", - "050HsYRBUtQ68Zy/DbsctKf3uwR1PiudT0UXKv0j0soZ3QufL8S2Q3AUTTPIYWEXbht7QmkSJjUbZOB4", - "PZ8jb0liYYmBgToQANwcYDSXI0Ksb4SMHiFGxgHYGPiAA5MfRHg2+eIQILlL+ET92HhFBH9D/GGfDdQ3", - "wqgozeXKBvyNqecALhVFI1l0IqpxGML4lBg2t6K5YXNOF28G6WVIQ4Wikw/Nhd48GFI0drim7JV/0Jqs", - "kHCT1YTSrAc6LmrvgHgmNol9oRzVRWabmaH36NsFfC8dO5g2F909RWZig+FceLXYWPk9sAzD4cEIbC8b", - "ppBesd+QnGWB2TXtbjk3RoUKScYZWmtyGRL0xkw9IFsOkcv9IL3cjQDomKGaWg3OLLHXfNAWT/qXeXOr", - "TZu0qf5ZWOz4Dx2h6C4N4K9vH2snhPtrk/hvOLmYP1GfJBNe37J0mwyFtnNpsw4ekqCwSw4tIHZg9U1X", - "DoyitR3r1cZrgLUYKzHMt++U7KNNQQ6oBCct0TS5jkUKGF0e8B6/8N0CYx3uHuXbB0EAoYQFUxoap5GP", - "C/oc5niK6ZOFmA+vTpdybtb3Voj68rduc+zYWuYnXwFG4M+ZVDpBj1t0CabRC4VGpBemaVwCbYco2mID", - "LItzXJz2GrZJxvIqTq9u3u+fm2l/qC8aVc3wFmPcBmjNsDhGNHB5x9Q2tn3ngl/aBb+kd7becafBNDUT", - "S0Mu7Tn+IOeiw8B2sYMIAcaIo79rgyjdwSCDB+d97hhIo0FMy/Eub0PvMGV+7L1Rav7Z+9DNb0eKriVI", - "Axh/ISgWC8h8ejPvD+NBErlc8EVQxaksd+XMOyY2dR1mntuRtM6F4cNQEH4g7ieMZ7CJQx9qBQh587IO", - "E+7hJAvgNl1J3CwURU0Y4o8tAlvdJ/aFdh8ARIOgLzvO7CY62e5SvZ24ATnQzOkkCvz6dh/L/oY41E2H", - "wqdbmU93HyEcEGmK6aCwST8NwQADpmXJsk3H8WRHHTSC0YOsywPSFrIWN9geDLSDoKME10ql7UKtnYH9", - "BHXeE6OV2dhrF1hs6Jum7gF+Vkn0YLQim/t522tdbeTav//pQgtJF+C8UIkF6VZD4HIOQUOQFV0RzWw4", - "Scbmcwi9L+omnoMWcD0bezaCdCNEFnfRVIzrL5/EyGgP9TQw7kdZnGIitDDkk7/se7m8TB+YkuorIdia", - "G7iqos/1v4dt8hPNK6NkMKma8Fzndmpfvgfs+qr4HrY48t6oVwPYnl1By9NbQBqMWfrrTypIYH1PtVL8", - "o3rZ2sIDduosvkt3tDWuKMMw8Te3TKtoQXsptzkYTZCEgWXMblzEYxPM6YE24rukvG8TWLZfBgnk/XAq", - "pnwJy/5VVOei2Ee7l0BzT7y4nMnH6eR2kQCx28yNuAfXb+oLNIpnjDS1nuFWYM+BKKdlKcWK5omLlxi6", - "/KVYucsfm/vwik+sycQp+/Lbs5dvHPgfp5M0ByqT2hIwuCpsV/5hVmXLOOy+Smy2b2fotJaiYPPrjMxh", - "jMUaM3t3jE29oihN/ExwFF3MxTwe8L6X97lQH7vEHSE/UNYRP43P0wb8tIN86Iqy3DsbPbQDwem4uHGV", - "daJcIRzg1sFCQczXrccafNxwdfVu5fHYuAlswEydXz0SQaVGGMi7TCR+CBsi3sP6cEmvMQNmXLHhLj8m", - "cjwXY0TvXEh7IWTrjnEPIKMxSr+d9GZkeYvHgZBwXyazK7MdEyvf/bL4xRz6o6PwRB8dTckvufsQAIi/", - "z9zvqMYcHUWdlFFrmeFFaAzjtIAH9WOOwY34tHo+h/U4OeBsVdQCrBgmw5pCbbCRR/faYW8tmcNn5n7J", - "IAfz0/EYW0C46RbdITBjTtDF0IPHOpa1sJU5FRG8G7qNb20NaeGd4io/WJ9v/wjxqkA/aaJylsYjSPhM", - "Ge7DbcymaUyw8YBR2IxYsYEQYF6xYCzTbExq1g6QwRxRZKpodtgGdzPhjnfF2T8qICwzytOcgcTrs3Oj", - "eh0ER+3JvXHzmxvYusOa4W9jbtnh1vImp122lp1uwue168ovNFZb6MBA83DGHuPeESTu6MPfcvhobtmO", - "9BynLo2p0O4ZnfMJDswRrbjOVDKX4leIX9jopork2/D+VYbW5F+BxwIEuyyl9l03heOb2fdt93gVfGjj", - "b61y+0XXxc1ucpnGT/VhG3kT3VrFs0I7JA/pemEgQ/sFwgBrweMVxNxitRUf5ES5PU822UTrIVv8VIZP", - "Rk/s+M2pdDD3ntnmdD2jsVI0RuUyMAXb2wrH0oL4zn4DVJ1Kwc5OgkDxui2zCetKkI2ro5/89obqk512", - "tOLU6ElIUaGGNLXRELkSkWEqvqbcFis3/Sy/cr0VWE+/6bUWEtNNqnjkWAYpK6JW36urd1najxLK2ILZ", - "OtyVgqDQsxuI2JyWSEWuWHadIMSh5nxOTqdBtXm3GxlbMcVmOWCLh7bFjCq8Lmuve93FLA+4Xips/mhE", - "82XFMwmZXiqLWCVIreKikFfHP85ArwE4OcV2D78i9zHyU7EVPDBYdELQ5OnDrzBux/5xGrtlXR31XSw7", - "Q57tY8LjdIyhr3YMwyTdqPEg77kE+BWGb4cdp8l2HXOWsKW7UPafpYJyuoD4M5BiD0y2L+4mRg108MKt", - "0wGUlmJLmI7PD5oa/jTwtNywPwsGSUVRMF24+EAlCkNPTRVnO6kfDuud+bJUHi7/EcNsy4ia/BnUGFoM", - "PA3DYOgf0BUconVKqM0xmrMmAN6XBSXnPoUx1umqy3NZ3Ji5zNJRlsR4+DkpJeMazSyVnid/MWqxpKlh", - "f8dD4CazL59E6l21S8LwwwD/5HiXoECu4qiXA2TvZRbXl9zngieF4SjZgyaVQ3AqB+OB45GfQ+Gnu4ce", - "K/maUZJBcqta5EYDTn0rwuM7BrwlKdbrOYgeD17ZJ6fMSsbJg1Zmh358+9JJGYWQsboEzXF3EocELRms", - "8GFefJPMmLfcC5mP2oXbQP95w6y8yBmIZf4sRxWBwHG6602+keJ/etUkWEf/rX3w2LEBChmxdjq73ScO", - "ajzM6tZ1E9u4NPw2gLnRaMNR+lgZCPK3Ufx1n88RltQFye55y+D48BcijQ6OcvzREQJ9dDR1YvAvj9qf", - "LXs/OornOY6a3MyvDRZuoxFj39gefiMiBjBfHLGOW3JpGCIGyKFLynwwTHDmhpqSdiG6Ty9F3M0zsnhQ", - "a/wUXF29wy8eD/hHFxGfmVniBjaPIYYPe7sQZ5Rksvp7EE5PyTdiM5ZwOneQJ57fAYoGUDLSPIcr6RUa", - "jUYF7A1LCWjUjDqDXBglM6w9FNrz/zh4Nouf7sB2xfLsp8bL2rlIJOXpMhqMPDMdf7YyeusKtqwyWs5k", - "STmHPDqc1W1/9jpwREv/uxg7T8H4yLbdQrd2uZ3FNYC3wfRA+QkNepnOzQQhVtvZuersD/lCZATnaWpn", - "NMyxXzE6Vqkz8owahy0q7cJj8cm5y2s0ZzlGe8b9xtgykVQP5OnCsuq+jJEZB6ucK2tmsKODJJQVeDEr", - "WpQ54MlcgaQL7Co4dLpjpjYcOSiMQVRpPmFLzIshiK4kJ2I+D5YBXDMJ+XZKSqqUHeTULAs2OPfk6cPT", - "06jZC7EzYqUWi36Zr5ulPDzBJvaLq+VkKw4cBOx+WD82FHXIxvYJx5Wu/EcFSsd4Kn6wD2TRS2pubVu2", - "si6xeky+wwRLhohbGfXRXOlzFbfzdlZlLmg2xRzKl9+evSR2VtvHVqq3ZTMXaK1rk3/UvTI+j6lPIDWQ", - "oGf8OLszhphVK53UVS5jKRBNi6YOJ+uE9qAdL8TOMXluTah1HIudhGAmbllAFhTVtEo8Eof5j9Y0XaJt", - "siUBDfPK8fVePTtrPDfBI8e6yBIybAO3K/lqK75OidBLkGumAB/+wwraWRfrFKTONu6zMLaXJyvOLaUc", - "HyCM1iWVDkW7B85Ksj6oIApZB/EHWqZs2edDy99eYK/4k49OLd2O19/n8POZvMkr51xIKRecpVhxISZJ", - "Y4a4cW7KEcUp4v5FNXEnNHK4ohV86yfHDouDNX09I3SI67v8g69mUy112D81bFxltwVo5TgbZFNfUNs5", - "xBhX4IpmGSIK+aSQkaCm6HuLOoDiQDLC5E8DFs4X5tsPzv6NuTeuGUdLl0Ob08+syypXDD3TnDBNFgKU", - "W0/70ZB6Z/ocYzLIDDbvj1+KBUsv2ALHsGF0Ztk2NLU/1JkPVHWBoabtM9PWpeivf26Fg9lJz8rSTTpc", - "bj0qSOoNH0RwLG7JB5IEyK3HD0fbQW47I8zxPjWEBiuMWoMS7+EeYdQlu9ujfGt0S0tR2ILYh5vRPL2M", - "R8B4ybh3ocYviDR6JeDG4Hkd6KdSSbXVHUbxtEug+cA7C3wIbX3wtx2qW6DAoATX6OcY3sam2vgA46gb", - "NBI/5VviD4Wh7kCYeEbzOkI7UjscpSonRNmY1k418RjjMIw78S8zW+ja+0qw7o5FPw69iYZSIc6qbAE6", - "oVkWy6D1DX4l+NW/RYMNpFVd66p+hNhOhd6nNjdRKriqih1z+Qa3nC4ozx+hhvojZPUOY0Kf2Rb/jRV6", - "Gt4ZF5t98ONfH4idHZb/v/+YOSb1GppOFFsk4zGBd8rt0dFMfTNCb/rfKaX7V8G/i0e/HS4X7lGMv31r", - "Lo4wP3AvPt1eLXX6XowFF/jd51WqE0+2uRJeZb1yZhj1gJsX2bIO8L5hFPAVzQce3Ie+Enu/Wv/B0LP7", - "dDBLBNUuC5imZCcLGsysZGOFO96XvgtxKD7YhgffndfCrXUnQod9d9+3PHU2RqxhFoMeups50ZoNPtSL", - "5uoa9E2aNM9FOvrUu2HOTKfhrKGiKFw67UgM26oQWUjnYTQUQJxp2fDcSMg/6p7Rb6gYRb/IdXy0ls3i", - "UFOpRaNbwtS+BPTgeWDs1OFEgYnUYZa8YDnWQ/r3i9c/TIY3MtiB/pa6LL5Ro/LQxtSPpbrksRAtfFTD", - "thPB85gSMZ2oASM35uqJnwZX9Db64YU12o0Byaa0OaT1y7GD9whgIWJJ6vspRybNRni0B3TQbKzlJSFd", - "xOjh+9VQihVf5we/h/WEXHje1JWRgBUTlQ+r9I8bvK3H/upSeLXqBg0wtuiToc/tjhx0nl66+td2mc7Y", - "9v1PNryCANdy+ztwpfY2vVuUKqLGWrtz04TUpVNHlVJtibtjamDFyi05pc8bwa3M0KKlXvmqHlk9HyPn", - "9/DxcTo5zw6ShGMluyZ2lNh9+pItlhorfvwVaAbyzZ6KJk0VEzxipVCsqWCcm8FcCuklDnc89hWRIWAW", - "VmTpj+Wjy1eQaixb3UTNSoBD6rOYybw398/KJsN3Qf3YyhU02VXFpF+reo/w3ku8FiQPtHV+j8fX7Dir", - "30bYp51rqpp0T52cC6Nffs/nkGJW9Z2J7v5jCTxIojb1BleEZR7kvWP1A0WsC3C4O6EBaFceup3wBPW5", - "bg3OUB6Ma9jeU6RFDdHCw/Xr3JskHkcMWN+2z0E/5CFy4aBM1ZSBWPCx/i6Ve1NcZzBnfJC28YZzeZI0", - "F0eTynHHlF5+vMFcputBaWNR1h/KhdevuT5sWHiOJe6Vi3yldeLy0PxGzvuFt9Yu8TmmJaydoj4FOij/", - "m89BamfJ2bWrP4JYsS7oNZWZb3EnSeXs3cTiQM/rmVnzMqsfvRQp5YKPHNNcGDEiGXop2n4MVUcS31M2", - "5LtJAIZwzUFKyGpfZy4UJFr4l1y74NiFChvXfiMkqMHyaRa4wdT5b5vaAFhGkmKqfOrC2cMFEgkFNdDJ", - "IIP/8Jy7kP3MfvdJPHwZwb2m45pe99ez9m/ymOohMaT6OXG35f7kIDexIjPOQSbepdxN58/bGR0xb29W", - "pfaCDg9GbWkfnXtrByuJGmDT/io7OkKQ/eIatidWCfKFwP0OhkBbycmCHiQs7mzyndrVVQzuxZ2A93nz", - "UJZC5MmAF/O8X4OgS/HXLL0GzCFav10xst+99tkwk5D76Dyrw1TWy63PuV+WwCF7cEzIGbevBX3ESrs8", - "aWdyfk/vmn+Ds2aVLQvirOXHVzz+7AoLdshbcjM/zG4epsCwultOZQfZk+F+w4di6dZY3KNdBfh4rFbe", - "jyHpSCUBUVkoYjLJhXVFP8ODHjMcYW6TIAkPRihQ4lzYROUiFqR/k/wrZqg4psLJECANfEwakBoKN3gU", - "AS48b09KUffZJ80UcyKhiQ65afZQl5DTsmY1pNF3Z65nafO7uZAQzojRpzZTcP2iDdPw4n9mTEsqtzfJ", - "8dlGVcx6MojlvXGWdYhls5AmzLKPwzwX6wSZVVLXyYmptqadal/Gvmhj08+c6hkEAZtUOUFtS5Y0I6mQ", - "EtKwR/wht4WqEBKSXGD8Ziy0ZK6N3F3g601OcrEgokxFBrbeVJyChuaqOKcoNkEQLhdFgaUdTANg+wR0", - "PHJKc6daB3GCotbe8gx+8y9NH5uSoskKZxed2CCFgacIoFwWOIch27gPLxKOzWfUtSXGefOcbZBuQMaO", - "/JxoWcGUuBbdGvvu4FMJpGBKWVBqWlqzPMeMEGwThFTUEUlx1A6IvecYL71iGFTXzg5ipeHS3Hl1ypSQ", - "B1yE+cyIXkpRLZZBgvoaTq/yysopxOEoP6oK4x7xaaiZ4gkphNJO07QjNUtuYknvp4JrKfK8bZSyIvrC", - "Wdpf0c1ZmuqXQlzPaHr9APVaLnS90mzqEyd0o36bmWQnNWH7Ak6QBtT+VN+2HcbAOqIdzSA7LK5nFN9n", - "ZQ7AfL+fg+63uZ/1F9ZdV5uZxtWYM06oFgVL42fqjxVGOxj8GmNR0WSEtjarTR+DzfCwh5dVHTWFLLKP", - "ZuA0WlzyjDhG4KJHkN2Y/6IE3h2XzMExmoGLss9cnBSVpIOyXgcAhNTmNNCVtAVdQ0ms5ipiYXOgYOxL", - "F9CRtwqGGN4ONjPCnQOl4VZA9cKaawDvW+PD1CaNtCHSM7Hx3x80WSVvBPzH3VTeYh5DsZsXDWlJG73p", - "M1ANcIR4ivydgY6XmM9iNjbcsS6+PfKGDwAYDoBswTAqDPJQMOaU5ZAlsdqt57WNahpo2u7NZbt2Pd7L", - "lpOntPKlU83YlQSXEcmK+LLt/yqpISVRN+9bknkGG7APtn4FKWxN1Gngf4HclkztGANEmeSwglZcqEvT", - "VKGoyVbg+6q6M8kASvRGdm1ksYDH8C7vGE7c2pMgZG4MdqOWFItYu1Nkj5kkatTZ8MQeEzX2KBmIViyr", - "aAt/6lCRo20GNEc5gqqejpB4PXLsND/aEd76Ac58/5go4zHxfhwfOpgFxVG3iwHtDYCu1NCp5/H45zAH", - "We1gwdmy2hFrSbzhG6qkaz5skOyTfKNujdwnJniA2G83kKJU4/QdyJzGM+CkcOmMkNo5QGa1AtMlYm1f", - "AidcBCVq11TVqkqTHNX/YCfGRow7bfoGTuUmTPn2O0twMKI6WRIHFQlZ0+nNzfOf5STuPIiD48VoRIF7", - "17vD/uWp26kd2EBUeUa42U8j+2ORV3eLOS4+JbPKD5TnYm1rzoZ66HPwflBLfd4F5MRyVl/LPhx76vL2", - "dk0dLHiIUtAtERL/MVrnPyqas/kW+YwF33cjakkNCTnHq40IcOHdZuLd4tXUA+atLcJPZdfNxo4ZDLc1", - "owRAm4vcFwcTpKDXEG4DBjtY/plqwzhVNUPLhbmyO9vZx4JbvM+9VNAs1PQxA+y2xR18TnDT+383j1zD", - "qXzixjKnqa8w7EqctfkMVhH3xKWXUOx+Bd3na54E6srkDdFKnzYju4HJ9EDWFXtaNFS+qQV2r2Jzr3LV", - "rZYx0vLbqdGz4/34qKXc9S6MjbrpAR3Wed0Hflj29tPgP5qceWgZY8D/veB9oNB1CK+taf0JsNxKrROB", - "1VqrZ2KTSJirfQEm1lxt1HnZJOXxJlbGUwlU2Yib89dO8WxyDzNuFGEbE1r7NOtRMpgz3jBLxstKR/QY", - "TEHMtwHCQqM/onXAhTYkJRhhckXz1yuQkmVDG2dOhy0JG5aY8Y4O1zdiwqjv1P4ATDU6HD68bszoYTNz", - "gdsidjZcU2nKMyqzsDnjJAVp7n2yplt1c49S7RzY51OigTTTTgcSeJeQtC0g+dY5hW/p76kBpHfo+Bnh", - "sMG44Iizxpp2tBjwz/Rh+EM4bAq6SXKxwOfBAwfCJZ1GD59VAQVHM7iVz8at28+j2K+wexqst+EYkRY4", - "65gpdp/717iVqEb+yJneefKtjbL7XtvG3dqD6ZHKF03wvyWW/nmMPbF3WZXCZ/Ze2PRPVTztQbCJMOAf", - "atvFB3YRwyBcfobQCD6+XGI70iL2kN9aBhK0GKgd4f2gmlB2mrrwrL4prWdqsEiZujQIB1rarH3e30sD", - "4KEpRLmz3p62Dpkx4xxSY3J34oOkFGWSjon5tCV5MucmcJC2YRygj8AJMLDuOjxG1UWqWgnNWtWqDi2z", - "OVgta5+3q0x3Kf1DZqIBjt52QYg58jI8wtY4hi95amPKtPvGrG0Gq5kEoURCWkk0E6/pdn/ZwoFU8Bd/", - "Pfvi4aOfH33xJTENSMYWoJpyAp2yf01cIONdu8+njQTsLU/HN8GnFbGI8/5H/6iq3hR31iy3VU2u4F7R", - "w0Psy5ELIPbSt18H7kZ7heM0of2/r+2KLfLOdyyGgt9+z6TI83g5l1quijhQYrsVuFCMBlKCVExpwwjb", - "HlCmm4hotUTzICb1Xtk0UYKn4O3HjgqYHgi5ii1kKKAW+RkmbXBeIwKbMne8ynp6dq3L6WnWQodCI0bF", - "zICUonSiPZuTGET4gkgGL2ud4RMt4kGMbM1sbbRsjBBd5Hmc9MKC+7u5fbsYtI5zerOJEfHCH8obkOaQ", - "f2I4IclNOElj2v/d8I9IhpU74xr1cn8LXhHVD3a8OT7rxT3U2UVGgdbPthEhDwRg4LVt651k8FAsyDAu", - "rZcA/QnegdwVP141juW9z0IQEt9hD3jh89mmXf2SwYHzmVN1v6qREizl/RAltJa/70WuZ731RRJskTOa", - "aA3KsiXRFwuD59bqWf2KeUAr6T12lkJoYjTTPI88krZ2HDxTIeEYlUCuaP7pucYLJpU+Q3xA9nb4aVT4", - "UjZEskWlulkCzpd01NzBq9i7m5q/wYfZ/wFmj6L3nBvKOeF7txkad2huw6vntTcaOFnjmDbI6uGXZOaq", - "6JQSUqa6zv21F07qh6Eg2dwFtMJG73mJum+dPwl9CzKe+0gc8kPg3qp99g7C5oh+ZqYycHKjVB6jvh5Z", - "RPAX41Fhce8918UtK67cLJ9TkJnxwHxO/bLlY5dnU5uYS6dS0F/n6Nu6hdvIRd2sbWwystGFW66u3unZ", - "mBxi8SIrpjsmMbuTaisH1Vr5DdKXWRy5Mdy8MYr5aSihtU3aPJB0v7MfFcv3Bqy0Sih8nE4WNoMRFgn4", - "2RWF+rR3qYdgII2YW/pt0sVYxETW2po8mCrI+DSiLoLrFklmj68a00oyvcWC4N6Axn6O5mP6rs7t4XLD", - "1L40d/dpcQ3cx3s0mUAq5W/X7wTN8T6yLj5ubiGRH5Nvbep+d1C+vjf7V3j8lyfZ6eOH/zr7y+kXpyk8", - "+eKr01P61RP68KvHD+HRX754cgoP519+NXuUPXryaPbk0ZMvv/gqffzk4ezJl1/96z3DhwzIFlBfs+Pp", - "5D+Ts3whkrM358mlAbbBCS3Z92D2BnXlOeYtQ6SmeBKhoCyfPPU//R9/wo5TUTTD+18nrvDaZKl1qZ6e", - "nKzX6+Owy8kCn/4nWlTp8sTPgynuWvLKm/M6Rt/G4eCONtZj3NQ6+Zf59vbbi0ty9ub8uCGYydPJ6fHp", - "8UNXs57Tkk2eTh7jT3h6lrjvJ5g490S5mhgn9Vutj9Pet7K0FTPMp0WdHdD8tQSaY4Id80cBWrLUf5JA", - "s637v1rTxQLkMb7esD+tHp14aeTkg8uc8HHXt5MwMuTkQyvBRLanp4982Nfk5IOvib17wFY9ZBdzZpAa", - "dXl+B9qlW7K2h0iuDvQ0uNGnRGFBDPNTKZkw53VqLt8MMC4Aw9skVgbQsuKpdRbbKYDjf1+d/Sc6zF+d", - "/Sf5mpxO3YMDhQpNbHr74romtPPMgt2PU1TfbM/qbCaNc33y9F3MyOSCRctqlrOUWDkFD6qhwuAc1SM2", - "fBItihN7T6Cjr+b6hpOfJl+9//DFXz7GpMmebFwjKUjw0fL6Cl/SGJFW0M3XQyjbuAh0M+4/KpDbZhEF", - "3UxCgPse1EjWM/9AyFd2D2MTg6jFf794/QMRkjjt+Q1Nr+vHUf41XPMCMHwMZ3oOQewu1hBo4FVh7ij3", - "yqpQi7Kd2btG83ssg4qAIjt5dHrqeajTUIIDeuLOfTBTx6zVJzQM0wkMlf2n8IrAhqY63xKqgjgJjFr0", - "JYs7T9hEmbQC6XeaRvszui2JvkI49DV+pPSE0DTfA99lp7xrCx0u5Kc0l+z+5+89ZEQhiKapDLfW08if", - "u/vfY3f7UgkphTnTDOOymyvHX2ctIJ0smm89uAOJRo7J30SFsqPRCioNNQsUEtlZfWFan4ib0+VFCgLp", - "mqdD+OXoqLvwo6Mm7G8Oa2SylGPDLjqOjo7NTj05kJXttFO38oOPOjuHDNfbrFd0U0dNU8IFTzgsqGYr", - "IIHC+eT04R92hefcxqkbYdkK9R+nky/+wFt2zo1gQ3OCLe1qHv9hV3MBcsVSIJdQlEJSyfIt+ZHXDwGs", - "0oPySZ/9/civuVhzjwijr1ZFQeXWCdG05jkVDwp67eQ/vQxHjaCNXJQuFMbCoIhqZVqfBZEvJu8/eh1g", - "pO6xq9nJDEvbjm0KocIyrJ2gZ0KdfEDb+uDvJ85BGv+IPg6rPJ/43IsDLW2WrfjHllb0QW/MQnYPZ9oE", - "46VUp8uqPPmA/0E9OFiRrcZxojf8BGNCTz60EOE+9xDR/r3pHrbARPMeODGfK9Tjdn0++WD/DSaCTQmS", - "mesIE2W6X21C4xMsEb/t/7zlafTH/jpayVwHfj7xZpiYSt1u+aH1Z5um1LLSmVgHs6ADw3rf+pCZj5Xq", - "/n2ypkwbIcnlEKVzDbLfWQPNT1wlsM6vTfGN3hesKBL82BGrSmGTCLU12rd0fdl6CyptsoxvBBoqhhju", - "Jpkxjlwo5JKNWdJ+7KtIPd54uQQbf+s9uxEZVAsyk4JmKVXa/NEUBmjrxh9vqX91c3ucR/x2CCaaG/rp", - "KA0/Od7rzMFxxwiZwb6Q8+d+wuYB2m8umPUg+oZmxGedSsgrmpsNh4ycOfG/hY3fWqj6/FLQZxZbPpmc", - "8Y0/fIpQTMHXUhBlPGlOUNxyjFBhtEjDABbAE8eCkpnItq7+4ETStd7YHB1d5nZC2zdG2xBJJS3U0Mc7", - "sFL+vk2T+yySfxoC/zQE/mkq+tMQ+Ofu/mkIHGkI/NNM9qeZ7H+kmewQ21hMzHTmn2Fpk62A24j1jt5H", - "m/oUNYtvZw9jupbJWs9IsRQG08eEXGLqF2puCViBpDlJqbLSlUtTVGB0J+Ygg+zpFU9akNgYSjPx/ea/", - "Nnj1qjo9fQzk9EG3j9Isz0Pe3O+L8i5+su9LviZXk6tJbyQJhVhBZh/DhvnRba+9w/6vetzXvcIK+Aoe", - "c+v4VGVEVfM5S5lFeS74gtCFaAKvMSErF/gFpAHOlqciTE/dQxXmXkfbXemkcW9L7n0J4LzZwr0hBR1y", - "iUcTGMI7MJTgX8bEEfyPltJvms3qtox059g9rvonV/kUXOWz85U/upM2MC3+txQzn5w++cMuKDRE/yA0", - "eYGPCm4njrlEoWm0StdNBS2fKMab+5rA5DDQF2/ROsT33XtzESiQK3/BNnGrT09OMHPYUih9MjHXXzum", - "Nfz4vob5g7+dSslWWN8drZtCsgXjNE9c4GfSxKY+Oj6dfPz/AQAA//82PehVOSYBAA==", + "H4sIAAAAAAAC/+y9e3Mbt5Io/lXw426VYy1JyY6TPfGvUnuVOA9t7NgVKdmH5ZuAM00SR0NgDoChyPj6", + "u99C4zGYGQw5lGg72Zu/bHHwaDQajUY/344ysSoFB67V6OnbUUklXYEGiX/RPJeg8L85qEyyUjPBR09H", + "55zQLBMV16SsZgXLyA1sp6PxiJmvJdXL0XjE6QpGT8Mg45GEf1RMQj56qmUF45HKlrCidlqtQZq+r88n", + "/302+eLN28/+9m40HultacZQWjK+GI1Hm8lCTNyPM6pYpqbnbvx3+77SsixYRs0SJixPL6puQlgOXLM5", + "A9m3sOZ4u9a3YpytqtXo6VlYEuMaFiB71lSWFzyHTd+ios9UKdC96zEfB6zEj3HUNZhBd66i0SCjOluW", + "gnGdWAnBr8R+Ti4h6r5rEXMhV1S320fkh7T3aPzo7N0/BVJ8NP7s0zQx0mIhJOX5JIz7dRiXXNp27w5o", + "6L+2EfC14HO2qCQocrsEvQRJ9BKIBFUKroCI2d8h04Qp8u+XL38kQpIXoBRdwCua3RDgmcghn5KLOeFC", + "k1KKNcshH5Mc5rQqtCJaYM9AH/+oQG5r7Dq4YkwCN7TwevR3JfhoPFqpRUmzm9GbNprevRuPCrZiiVW9", + "oBtDUYRXqxlIIuZmQR4cCbqSvA8gO2IMz06SrBjXnz9p02H964puuuBdyYpnVEMeAagl5YpmpgVCmTNV", + "FnSLqF3RzZdnYwe4IrQoSAk8Z3xB9IarvqWYuY+2EA6bBKKvlkDMF1LSBUR4npKfFSAl4VctboAH6iCz", + "LX4qJayZqFTo1LMOnDqxkIgOpKh4ilER/ODQ3MOjbN9jMqifcMR3u78ptnCf2lBfssXVtgQyZ4W5L8nf", + "K6UDAVcKt30JRJWQGd6bEzOMQb5iC051JeHpNT8xf5EJudSU51Tm5peV/elFVWh2yRbmp8L+9FwsWHbJ", + "Fj07EGBNnVOF3Vb2HzNe+qjqTfIueS7ETVXGC8ris2Bo5eJZH2XYMftJI80gz4PcgPvjxrraXDzrY6m7", + "e+hN2MgeIHtxV1LT8Aa2Egy0NJvjP5s5khady99HVrwwvXU5T6HWkL9j1yhQnVv56bwWIn5yn83XTHAN", + "9iqMxIxTZLZP38aSkxQlSM3soLQsJ4XIaDFRmmoc6Z8lzEdPR/90Wgt6p7a7Oo0mf256XWIncxlLMIxv", + "QsvygDFeGeERRa2eg274kD3qcyHJ7ZJlS6KXTBHG7Sai3GU4TQFryvV0dNBJfhdzh9cOiHor7CVpt6LF", + "gHr3gtiGM1BI+07ofaAakiJinCDGCeU5WRRiFn745Lwsa+Ti9/OytKgaEzYnwPA+hw1TWj1EzND6kMXz", + "XDybku/isW9ZURDBiy2Zgbt3IDdjWr7t+LgTwA1icQ31iA8UwZ0Wcmp2zaPByGXHIEaUKpeiMFfgXjIy", + "jb93bWMKNL8P6vynp74Y7f10hxK9QypSk/2lfriRT1pE1aUp7GGo6bzd924UZUbZQUvqokbwsekKf2Ea", + "VmovkUQQRYTmtodKSbdegpqgJNSloJ8VWOIp6YJxhHZsBHJOVvTG7odAvBtCABUkbUtmVry6ZXpZi1wB", + "9dPO++LPTcipPSdmwykzsjEpmNJGGMLNVGQJBQqcNCgWYiq6E9EMoIUdiwgw30paWjJ3X6wcxzih4f1l", + "Yb3nTT7wkk3CHKstarwjVHdm5nsZbhISq3BowvBVIbKb76laHuHwz/xY3WOB05Al0BwkWVK1TJypFm3X", + "ow2hb9MQaZbMoqmmYYnPxUIdYYmFOISrleXXtCjM1F1u1lotDjzoIBcFMY0JrJg2D2DG8QQs2Bq4ZT1T", + "8g3NlkaYIBktinGtlxDlpIA1FERIwjgHOSZ6SXV9+HFk/1DCc6TA8EENJFqN02lMydUSJMyFxIeqBLKi", + "eDmtzPOoLJp9AnNVdAUt2QkvS1FpA2P0crl45lcHa+DIk8LQCH5YIz7448GnZm73CWfmwi6OSkBFC+NZ", + "UeU1/gK/aABtWtdXLa+nEDJHRQ/V5jcmSSakHcJe/m5y8x+gsu5sqfOTUsLEDSHpGqSihVlda1EPA/ke", + "63TuOZk51TQ6mY4K0y86yzmwHwqFIBPajZf4H1oQ89kIOIaSauphKKegTBP2A+9sgyo7k2lg+JYWZGX1", + "ZqSk2c1BUH5dT55mM4NO3jdWVee20C0i7NDVhuXqWNuEg/XtVfOEWJ2PZ0cdMWUn04nmGoKAK1ESyz5a", + "IFhOgaNZhIjN0a+1r8QmBdNXYtO50sQGjrITZpzBzP4rsXnmIBNyP+Zx7CFINwvkdAUKb7eGGcTMUquq", + "z2dC3k2a6JgmagU8oWbUSJgat5CETaty4s5mQj1uG7QGIkG9tFsIaA+fwlgDC5eavgcsKDPqMbDQHOjY", + "WBCrkhVwBNJfJoW4GVXw6WNy+f35Z48e//r4s88NSZZSLCRdkdlWgyKfOD0fUXpbwMPkwwmli/Tonz/x", + "BpHmuKlxlKhkBitadoeyhhb7MLbNiGnXxVoTzbjqAOAgjgjmarNoJz/Zfu/Go2cwqxaXoLV5BL+SYn50", + "btiZIQUdNnpVSiNYqKZRyklLp7lpcgobLelpiS2B59b0ZtbBlHkDrmZHIaq+jc/rWXLiMJrD3kNx6DbV", + "02zjrZJbWR1D8wFSCpm8gksptMhEMTFyHhMJ3cUr14K4Fn67yvbvFlpySxUxc6MBrOJ5j4pCb/jw+8sO", + "fbXhNW523mB2vYnVuXmH7EsT+fUrpAQ50RtOkDobmpO5FCtCSY4dUdb4DrSVv9gKLjVdlS/n8+PoSAUO", + "lFDxsBUoMxOxLYz0oyATPFd7tTneGthCpptqCM7a2PK2LN0PlUPT5ZZnqEY6xlnu1345Ux9RW55FqjAD", + "YwH5okGr71Xl1YcpC8UDlYDUYOo5fkaLwDMoNP1WyKta3P1Oiqo8Ojtvzzl0OdQtxtkcctPXa5QZXxTQ", + "kNQXBvZpao0fZUFfB6WDXQNCj8T6nC2WOnpfvpLiPdyhyVlSgOIHq1wqTJ+uiulHkRvmoyt1BNGzHqzm", + "iIZuYz5IZ6LShBIucsDNr1RaKO3x2jEHNaukBK5jORf1GUyRGRjqymhlVluVRIvU/VJ3nNDMntAJokb1", + "uDkEVw3byk63pGsgtJBA8y2ZAXAiZmbRtZcDLpIqUhrZ2Yl1TiQeym8bwJZSZKAU5BOnz94Lr29n7x+9", + "A3m4GlxFmIUoQeZUvp8V3Kz3An8D28maFpURz3/4RT38oyxCC02LPVuAbVIb0VbfdZdyD5h2EXEbopiU", + "rbbQngQjYhumU4CGPmTfH3u9298Gs0ME7wmBa5DoUfNej5af5D0QZYD/PR+s97KEqpwYMbBX/WAkV7Pf", + "nHLhZcM9M4QJCqr0ZN+VYho19CZmqREXT90iOHCPPPmcKo1iIGE8R/2tvQpxHitbmilGBzqV4ZS9rzEz", + "6S/+IdadNjPXO1eVCq8yVZWlkBry1PLQZt0714+wCXOJeTR2ePppQSoF+0buQ2A0vsOjUwTgH1QHC7Wz", + "eXcXh14HRnzZHorlBnw1jnbBeOlbRYiPnWp7YGSq3gNLbky16G0mRAEUVaZKi7I0HEpPKh769WHw0rY+", + "1z/Xbbskac1AVlLJBSg0Mbn2DvJbi3SFtq4lVcTB4f0TUOFlXeS6MJtjPVGMZzDZdV7wEWxaxQfnTse9", + "KheS5jDJoaDbhLeF/Uzs5wMJw4+NBFLrD4SGyQytiWkaqc+E9ze926wCp1IpwZvgF5KZc26eUTWpud53", + "nzQHnDbFNx2xPgizIBhJOvDjIbIsPSVGxLt/LbQhK0d0uBp3K91zLT3YC7O+FwTiuJNaEdCe/b9AubmD", + "AHbU+beg+hZeT32sZfeo//Fub1yYrausddskr4hevryHMfbxoB5bxCsqNctYic/VH2B79Nd7e4KkrwTJ", + "QVNWQE6iD/YlX8b9iXVDbo95t9f8IHVrF/yOvjWxHO+Z1QT+BraoNnllIxoibdUx1BGJUc2FSzlBQL3X", + "vHnxxE1gQzNdbI1gq5ewJbcggahqZr1WuiY0LcpJPEA6Zqp/RmeQT5rDd3oIXOJQ0fJSnof2tbUbvqvW", + "k6uBDvfKKoUoEvrP9onvICMJwSB3IVIKs+uMFsWW6BA24ympAaS7INAbI8gzD1QDzbgC8l+iIhnl+MKt", + "NAQhTUiUfFBYNjMYcTPM6VxVawxBASuwr3n8cnLSXvjJidtzpsgcbq3LDceGbXScnKAq7pVQunG4jqDt", + "NsftInHpoK3SXLLu1dbmKfud3NzIQ3byVWvwYOA0Z0opR7hm+fdmAK2TuRmy9phGhjn44biDzHdNl7DO", + "unHfL9mqKqg+hqES1rSYiDVIyXLYy8ndxEzwb9a0eBm6vRuPYAOZodEMJhlGCQ4cC65MHxtYaMZhnJkD", + "bANHhgIEF7bXpe2056Vd+y2z1QpyRjUUW1JKyMBGyRkpVYWlTokNmciWlC/wBSRFtXCuznYcZPiVspow", + "WfHOEIeKYnrDJ2jCUMkwNTRb+mhLI4QBNS/btv3DPtZuaQDFXkaDLu1oe9r2oKTJdDzqffgbfK/rh7/F", + "WzNk9K7GxIZ8GCGthmag9QzxaWSlLhLjbTSHzxDD+7HS1EOnoOxOHDmF1x/7/MIvq7IstkcQkuxAREIp", + "QeGVFqsBlf0q5uQFy6Q4LxYi3HlqqzSsusYb2/XXnuP6011ewIIXjMNkJTgknvQv8esL/DhY7Wiv4Z4R", + "USA6aMD2w6eBhNYCmpMPIen7bhKSTPvsty2d6lshj2VltwMOflMMsFzvdetwU97Vvk6LImGStuqHDhdR", + "4+AUziShSomMoaB4kaux8z63Vmzr1t5C/6sQGnWEA9wet2V7jcKwrCIfipJQkhUM1fyCKy2rTF9zipq+", + "aKkJZ0GvHOhXC3/tm6T10Ak1sRvqmlN0FA36v6Rj0BwSeqhvAbx2WFWLBSjdemDNAa65a8U4qTjTONfK", + "HJeJPS8lSPTYm9qWK7olc0MTWpDfQQoyq3TzybGqlCZKs6JwhmAzDRHza041KYAqTV4wfrXB4bwfiT+y", + "HPStkDcBC9PhjGsBHBRTk7Sn43f2KwaVOJwsXYAJxlrYz97juc4NMTJrbySt+N+f/NvT1+eT/6aT388m", + "X/zL6Zu3T949POn8+Pjdl1/+n+ZPn7778uG//XNq+zzsqWBwB/nFM/dGv3iGD7EoTqQN+x/BILNifJIk", + "ytihqEWL5BPMl+EI7mFT76eXcM31hhvCW9OC5YYXHY182tdU50DbI9aissbGtdR4HgEHPofuwapIglO1", + "+Ot7kefaE+x0uIm3vBVj4DijOjqAbuAUXO05U261D7775oqcOkJQD5BY3NBRaoHEC8ZFMDa8fMwuxYFd", + "1/yaP4M5vgcFf3rNc6rpqT1Np5UC+RUtKM9guhDkqQ+KfEY1veada6g3gVQU1BxlkEpxCrpKr+X6+jUt", + "FuL6+k3HD6ErW7mpYi7qzllXTeannBi5QVR64pK4TCTcUpmyhfgUHy4aGnvvhMPKJKKySiyfJMaNPx0K", + "ZVmqdrKHLorKsjAoikhVuXwFZluJ0iIEjhlm7mJvDQ38KJxTiaS3/slbKVDktxUtXzOu35DJdXV29imG", + "4NUpDn5zPNDQ7baEwQ/f3mQU7fcuLtzK5ehUPinpImUzub5+rYGWSCEocKzwpVkUBLs1wgN9JAAOVS8g", + "xCIfsCUWsoPjenG5l7aXT+uVXhR+wk1txk7fawejqPg7b+CeyHpa6eXEcITkqpQ5Bn6vfIIBujBXjvcg", + "UGyBDwC1FJVZMpBsCdmNy2wFq1Jvx43u3tHF3cWe4TCFOiMXHDhnBn8Z5WbAqsypE2Qo37ZT3CgbDIGD", + "/gQ3sL0Stvt0YHawKBtdlGJF9R1dpN3orjXkGx9kN0Z7853flY8RdelIMO7Sk8XTQBe+T//RtgLAEY51", + "iigaeT76EEFlAhGW+HtQcIeFmvHuRfqp5TGeAddsDRMo2ILNigSb/o+uXcPDaqhSQgZs7aN6w4CKsDkx", + "r6OZvY7di0lSvgBzqZuLWChaoNP+NGnoR+lwCVTqGVC9U1/L4zQTHjoUyG8xaBqVJmOzBNiY/WYalSAc", + "bs0DD9/eto1zJJ7eyZ3KrgnyO4Lqu9dB0tO7PCIcwhP57Px9H/YkvBecf1pMnQiy/b4yOFxIcWt20wAo", + "fOpGTPAS3VOVogsYeh01TEUDU2I0LEA4yD7pJynviHlbrOnIGAMXYbtPDF6S3AHMF8Me0AzQcnH0c1sT", + "orMqvOTF1iN1VqBAHRxELelQ2bCz8cVhwKbZGEheC6sesCbW4qO/pMof/XwccfQ7SosfJ5XMrvx5F5H3", + "HdXd7Hj+mm6z9rHV58yACG56+Cx6PnWez5c3Gh+U+248ciEOqb0THKXoHApYWJzYxp7O6vxM9W4aOF7O", + "58j0JilHvkgZGUkmbg4wD7ETQqzGnAweIXUKIrDRso4Dkx9FfNj54hAgucsvRf3YeHdFf0M6WNB64xsp", + "WZTm1mc9VqvMsxSX3qIWeVouzjgMYXxMDCdd08JwUhd4Wg/SydWGb59WZjbn2/Gw70008KC5NaJ0ctAq", + "rTxzl/XFgrdfRvpVcNAaZmIzsZHRyafVbDMzZyIZr4Bx2qnDazPnPVBkJjboU4Q3nHVwPxi6fsg8YJEb", + "yIYppHLs1yc2WvAOA2S3IJ+iZoWk5/Rqgez6JNm7AdMjTveR3SdRCr0jgdRSYNZpwJ1GZ6+epSltdSWR", + "+rodh+ywIUwtxWr6DmdyJ3sw2lWeNnPdfV+nO+xPjubP6gdJ8tdVyt0nL6PtXNpci4ekZWyTQwOIHVh9", + "1RZik2htOi418RphLcWSDKPvGru6aFNQAGoCJg25enKTMktfX79WgDLDpe8W6Tlx9yjfPoy84SQsmNJQ", + "Gxe8k8uHt/2gOtE8tsS8f3W6lHOzvp+ECIKGNcdix8YyP/gK0HV9zqTSE7TMJJdgGn2rUJP2rWmaFoSb", + "/nZMWVPPwXIwQnQD20nOiipNyg6kH54ZiH4MN5eqZnhRMm69jWaYCj/poHuAbRLhsY7dOxH03CLoOf0Q", + "+Bl2sExTA5M0lNec/k9yxFq8cBdnSdByipi6G9qL0h28Noql7zLaSIiO3C6mu2w+nXOZ+7H3emP5iP4+", + "IcKOlFxLlBExHUAoFgvIfaY3FxRqs165fHqF4Is6l6D5fUf6wCmxWfwwCd+O/H3OPR36nNMb5USwKkYS", + "+vgxg5DX0XWYexAnWQC3mVtGh9cbKZKIix3jsUWkGf2wvL3jNp90Hb5quQvXPr12D8Nm4/YUQHP3rFLg", + "17f70Ha3y6Fu3Od03EgRu/uA4YBIcUyrSIDpEE0P56ZlyfJNy/BnR53egSQGinvdTPAtnCFbcoPtwU/T", + "sXhPrZ4H5nbE9s7YcYrP/FPzyLT+zM4j15wNmrlsA3kl0ZrU8Bbu5tMPD82Ba//hl0stJF2AswhOLEj3", + "GgKXcwgaopT0imhmHaRzNp9DbAlTd7HiNIDr2DvyAYTdQ4Jdc1l4W+6kzy6R7aGtegX7EZqmpwSl9Plc", + "XHXtkf7hEenWwmUTbdwdjIrJhAI/wHbyCy0q8xJiUtW+qc5A2LzWD6CJ9eoH2OLIe10+DWB7dgVVcT8B", + "UmjKuhI+qShL+APVqL6Ab+DGFh6wU+fpXTrS1rhSGv1Ho76hGvUkmkt5f8emdpExkA7Zq8u014k5W9Dc", + "ljah79silu+XfaInSDwVQ++Nu1xyIdPGXu8yoIUnfFzs6N14dD9/j9Q96UbcsxOvwtWc3AX0xrT2/4bT", + "14EbQstSijUtJs5Ppk/okGLthA5s7t1qPvD7Kn0qrr45f/7Kgf9uPMoKoHISVB29q8J25Z9mVbYEx+5r", + "yKZjd7pdqwqLNj+kzI49aW4x9XpLm9apdVP7TUUH1XnWzNOe4nv5pnPxskvc4eoFZfD0qi3S1tGr6dxF", + "15QV3vDroR2qZbfLHVZdKckn4gHu7SQWef/de6zeOIHr69drj9nanmIdpUJK/IQvnbqjp3OH16TPak3r", + "ezgkrvMlZjJNv7u4y3OKjNE5nNGjy4HfCtm4qFxUY9Jh7f0JiOYxYfGYNspfOSt8RyycEitC/rb4zfCG", + "k5P44J+cjMlvhfsQAYi/z9zv+I46OUkahpOqPsOyUJPH6QoehriI3o34sGoIDrfDxIXz9SrIyKKfDAOF", + "Ws8zj+5bh71byRw+c/dLDgWYn6ZDVBXxplt0x8AMOUGXfVGJwfl5Zct5KiJ4OwYfo2QNaeHV4yp4WDt7", + "9wjxaoV254kqWJZ2+uEzZVgSty69pjHBxoNtyGaOivX4lfOKRaObZupOJs/WQqJZkwhXyUzANX5nwrGA", + "irN/VBCV9cWbuHU5+6cQjtoRsNP6RTdwu2rw6C4Ff+9vIvRatV0Ko50m12fBDOgRkaozdWC8Qzxjh/nv", + "iFVwFOWvTwxsWzrX4b2UtfOdt7sItDMDe/bpLK79DyRXDtNu5rMhO83UZC7F75CWHdBImEjd4a3bDBXw", + "vwNP+ai2GVnwHKgLVtez7yOQ4bqFPlK5ty7BLzpUzbvLFZ7mE4dt9IFKg2i/+9UGKp1e3G1C30M1djxp", + "BtL0MDM8sJFbONby8e5ulNsTavNaNCLP0uc8DhQ9tePX59zB3AmuLejtjKYKHZn3ooEp2v6GY54WxHf2", + "G6RCagY7O4liGUJbZpP9lSBr61E3VfId33522sGvvvqRhxQXP+/G1lelUCIxTMVvKUc/QuxnOaDrrcD6", + "YZhet0Jigk+V9iHMIWOrpDL8+vp1nnU9v3K2YLakeKWA0Ll2eR7dQLaovKUiV8075CJxqLmYk7NxfWb9", + "buRszRSbFYAtHtkWM6rwgg4+EaGLWR5wvVTY/PGA5suK5xJyvVQWsUqQ8D5H0TN4ws5A3wJwcobtHn1B", + "PkGHYcXW8DB9wThhbfT00RfjXZWzEeNYJH4Xk8+Ry/tAhjRlo1e1HcOwVTdqOjJhLgF+h/77ZMf5sl2H", + "nC5s6a6g/adrRTk1CEnBtNoDk+2L+4uuHC28cGudAaWl2BKm0/ODpoZj9USTG4ZowSCZWK2YXjlPUSVW", + "hsLqMuR2Uj8c1tfzZdA8XP4jumCXiTf+R3hu0VVPhCN61f+I9vYYrWNCbcbWgtXxF75CLbnwmamxLlwo", + "B2dxY+YyS0d5FcMx5qSUjGvUGlV6Pvmbeb5LmhmGOO0DdzL7/EmivlqzBBE/DPAPjncJCuQ6jXrZQ/Ze", + "ynF9ySdc8MnKcJT8YZ3SITqVvb7iaf/ePrfjnqHvLV2bcSe9BFg1CJBG3PxepMh3DHhP4gzrOYhCD17Z", + "B6fVSqYJhlZmh37+6bmTRFZCpipd1AzASSUStGSwxvjS9CaZMe+5F7IYtAv3gf7jerd5sTQS3fzpTj4W", + "Iqty4p0W0ioZSf+XF3V+fDRu27jdlvZSyISe1mkcP7Bb6mH6wrYN3boD4rcezA1GG47SxUpPuIeN5wh9", + "Poa/Vxsku+cNVemj34g073iU9U9OEOiTk7ETlX973Pxs2fvJyXCX2bS+0PyaQM3d7pp29krTN7XVX4mE", + "9s5X8Qx+Yy5VSULDmrzLzJU6c2OMSbNU4oeXO44Tr3iwG3L6AHnU4Oc2bj4yf8XNrCNg+vlDs3psknzy", + "8D2KoaDkK7EZSkSta8vT0x8ART0oGagVxJV0quMmPSX2uvlEZGtGnUEhzEs1LoA12GvlT7QLBjXjHXtR", + "sSL/pbZCt24mSXm2TDqVz0zHX+0zIGoQaTCyJeUcimRv+1r+1b+qE+/+v4ueYVeMpz+1CzFb2FuQ1mA1", + "gfBT+vENrpguzAQxipoJuUKKk2IhcoLz1JVLatbYrWieqiSbiPHHYVeVdl7JmDzBFRSZswLdaNP2cGw5", + "kVT3cFUs++9LXJlxsAq/smoJOzpIQtkKr21FV2UBeAjXIOkCuwoOre6YsQ1HjsqSEFWaT9gSk78IoivJ", + "iZjPo2UA10xCsR2TkiplBzkzy4INzj16+ujs7GyYkRHxNWDtFq9+4S/rxT06xSb2i6v8ZQsmHAT+XaB/", + "V1PdIZvfJS5XfvUfFSidYrH4wQZko4XY3Ou29GooEzwl32F+MkPojRIBqBT1GZabOUGrshA0H2NS6Ktv", + "zp8TO6vtIwFRh6VfF6gBbB6RpJFneI5Un3+tJ3fV8HF2p84xq1Z6EoqypjIpmhZ1LVnW8n5C3WCMnSl5", + "ZtWywbHHTkIwtbhcQR7VgLVqACQO8x+tabZEfed0tFOl3FMNaHgJY88Ba3NRFPcaCmYhBzfLcFWMbRHj", + "MRF6CfKWKcC8E7CGZsLGkO3UKeR9AsfmamXFuSWc6QHSayiPdegueOCs6Ov9K5KQtfbh3ra/OpMHFjk/", + "tNjzJfZKx+20Kke3/B5syYyNL7oxJS+csSOjXHCWYbGJlAiOqRiHmVUH1OVI2zvVyJ3lxDFM1qsOAeoO", + "i70VrD3LdIjrOjVEX81+W8Kxf2rYuCKAC9DK8UDIx758vDPQMa7AFUAz9BVzVCETrl/JsJjgQnJEl/Tx", + "CLOp9ehavzXffnS6ecwZc8M46twcUt1L0BrYCsXQzs4J02QhQLnVNuPC1GvTZ3q14QjCm+lzsWDZJVvg", + "GNYV0SDFegF3hzr3PsHOB9e0/dq0dbULws8Nlzo7qV/3myQLUWH/UzXXe9Gf8v3yjjQRcsP48Wg7iHGn", + "qz/ey4YMYY2ef1Difd4hm1C+vjnKN+bJaukNWxAbuZtMG8x4AoznjHuDbzoPVpa8S3Bj8DT39FOZpNo+", + "OgZxvCugRU84DAbVW4+B+w7VrsRgUIJr9HP0b2Ndeb+HrYQG9euC8i3xh8JQdySUfE2L4AyfqKOP0pkT", + "xqyzcKuyfoqtGLY+8aG5DXTtDQQN3bEayqH3VF+20VmVL0BPaJ6n8s59hV8JfvUBhbCBrApFwEKcaTNd", + "e5fa3ESZ4Kpa7ZjLN7jndDlTVClYzYqE6+2z8BHysMOYiGq2xX9TFbD6d8Y5vR8c/e093PPDahR0o9lT", + "0rOh6Ylii8lwTOCdcn901FPfjdDr/keldB/4/YeI625xuXiPUvztG3NxxGm6Oz7+9moJWbTRn17gd58P", + "LGRybXIlvMo6dd7QIwM3L7FlLeB9wyTga1r0ZFyIrTb2frWWjL68C1lvWhGqXfY6TUnNE4aoMPrzf1kP", + "7JZlqGve7POxti7W79N44vCxE+n9lsYfGnZF6/VWM5Ree+LdTH41ERxq83OlGLr6UloUIhvMGdww56ZT", + "f6pesVq5zPcJr7z1SuTxWYi9uQDSjM06LCdCK/Bhm/yGT6vkF3mbHq2hHwlEMzRrGaLRLWFsAzM9eB4Y", + "O3U8UaSydZgl37ICi0P9++XLH0f9GxntQHdLXerspAq7b2NCpFqbPBaigY8dPEDwIq3/Vj0qdcwNlT4N", + "rjpx8sO3VkE4BCSbJ+mQ1s+HDt4hgIWwVaFSdTO62WlG9XZ45EfUUG+v5SgxdaSool1tKfH2sUrPugkJ", + "hUgHFSZtyEhDijul6gi5l4LXwNqLxuWjs8WVOnWZOgz02RDhsIOPd+PRRX6Q+JSqRTWyo6QY7HO2WOqv", + "CpHdfA80B2nriaSek7aayArMM1QtWYnvn1IoVtcDLsxgLpH3EoebDg3NuVqCywrjkwR0xvIO1GvINNaH", + "rt1AJcBwP4cyvUQDgTcoYpOP4AoiAXIo9XKnsGSdu0u9rMuGgos8Y4rMwJku1sDHhE1h2g5Wy+ukUKQA", + "OvdKWCmEHlBXN4QtIRpjoFP01anRvFsM7OR8i1Ia2lK60+FFWM5DTIANtLylqs4c1UqjMDhcez6HDBPe", + "70y/9x9L4FE+trFX3SEs8ygbHwvhgliy4aga7RrWXYnwdoIa1aR6n5D2JcS4ge0DRRo0lKwIHCJs75IB", + "HpFj7bi+qECfacM5RjIV6AkR5P3gXQL+usbSXYoARNkp7wiGp3FzPdUZK+8GjZdo7gCG6Tq9V9H+Oh0e", + "CqZ92f261dX7X8rPsJi9ck6lNKSbj/VJ5KJbjvnWpavHRIvBWugT14Pyv/kErXaWgt24CjWIMGubvaUy", + "9y2OkibP3pssDfQ8zMzqwKiul8+hfjk2QjErhBGAJn2Boc1IpeDC+0BZX+s6aRlCPQcpIQ82wUIomGjh", + "w6wOSP7pwid3YM96md8Jby2P/gNChu2Kemso/FQXksBykBRrJlDnfB5jhUhYUQO9jIo7pNWg+3boa/vd", + "5xTx5f12q1f78B7Oxf4K2T70jqkO5uPTNSdOODiYezUSkdxBM8s4BznxRtx2aQfeTJOJeZXzKrOiSnw2", + "g/Z6cNqxHdwsqdTMuqtsPaGirBw3sD21ah9fddzveAy0lSEt6FFC6RZRHFVXrVJwL44C3sdN31kKUUx6", + "LIMX3XoU7cNww7IbwMSsITLFSMEPmsfGTEI+QYNU8Bm5XW59tYWyBA75wykh59xGB3r3kWYF0tbk/IHe", + "Nf8GZ80rW2HGaaCn1zwdZoWVXuQ9uZ8fZgfP6+NNCgy/vOf8dpA7zK43vM9H7hZLwjTrBE+Hqje6/h0t", + "ESoiPwtFSoC6tIbgr5ElJN5RBLOzRGmE0D+AEmdAJqoQKS/8u2SQMUOlMRVPhgBp4AOeqzUUbvAkApyT", + "3Z6srO6zzzsq5kRC7Ztx1wSsLqepZeKqTzXSnjnM0uSMcyEhnhH9TG2i5hDZhnmO8T8zpiWV27ukSW2i", + "KqWG6sXyXm/J4ChZL6R2luzisCjE7QTZ2iRUV0qpA0w71by2fZ3Sup856jOI3C6pciLilixpTjIhJWRx", + "j3SIt4VqJSRMCoFemCnHjrk2j4QVxnVyUogFEWUmcrCF0NIU1DdXxTlF2QsiV7YkCiztYMoA2yei44FT", + "mtvXmmcnKK/tLbThN//K9LHpK+r0d3bRE+si0BNfAMqlu3MYso278CLh2IxMbaVsWkSesw3SDcjUkZ8T", + "LSsYE9eiXYXfHXwqgayYUhaUQEu3rCgwewTbRA4NwR8ojdoe2fkC/aDXDB3emplErEhdmtsxpF+JecBl", + "nJGN6KUU1WIZ1QcIcPqnu6zcwz4e5WdVoU8ihoiaKZ6QlVDaPYvtSPWSaxfQTzLBtRRF0VTkWTl/4Yy+", + "L+jmPMv0cyFuZjS7eYiPcC50WGk+9ikV2r679UyylYNx2EtBb/gEyUPtT7Nu26FXq6Pnwbyzxf06hod9", + "mvwIzDf7met+u8Z5d2HtdTX5bPotdM4J1WLFsvRx+3N5v/b6rKa4VzLToq1CbLPQYDPkA/E9FtyZkHt2", + "0QycJsuonhPHI5xbB3Ii818U49vjkjk4HtRzh3b5jhOwJlmvGNgCACG1iRB0JW3p4lhICwxHLGziFHRK", + "aQM68MJB37/7wWZGODpQGu4FVMcbOQD4idVgjG1GTOvZPBMb//1hnTLzTsC/203lDebR51R5WZOWtG6V", + "PpFVD0dIFyDY6YF4hUkwZkP9EEMp+oGXfwRAv2diA4ZB/omHgjGnrIB8kqpSfBF0YOPoue5iLKPRfT1H", + "y8kzWvlKwGbsSoJLrGSlf9k0J5bUkJIIzbsacZ7DBmyM1u8gha3jO47MWVDYMr8tjYIoJwWsoeGw6bI9", + "VSiFsjX4vip0JjlAiRbftqIt5YkYVwlsaV/c2ieRL9sQ7CbVMRaxdqfIHl1LUjO04RN7TNTQo2QgWrO8", + "og38qUNFjqYu0RzlBKo6z4eJf2IOneZnO8JPfoBz3z8lynhMvBnGhw5mQWnU7WJAez2TK9V36nnaMTlO", + "ZRYMRThbHuzalsRrvqFKesv7tZpdkq9fYgP3iQkeIfabDWQo1binEOTuMdRjOXE5kJDaOUBuHwymS0Kb", + "vwROuIhqHt9SFV4xdVZX/4OdGBsx7h7ad7DR1/7D999ZgoMR1Uq2mC5RGsj6fjr+j3ISdx7E3vFSNKLA", + "hfLuUI156nbPDmwgqiIn3Oynkf2xRrC7xRwXH5NZ5QcqCnFrixjHT9Rn4O25lvq8icmJ5Sxcy95PeuwS", + "Dre1ICyKEFnRLRES/zEP0n9UtGDzLfIZC77vRtSSGhJyBmTrReH8rs3Eu8WrsQfMK2KEn8qumw0dMxpu", + "a0aJgDYXuS/bJsiK3kC8DeggYvlnpg3jVNUMlRrmym5tZxcLbvE+PdOK5rESABPNbhvcwSc8N73//zps", + "NZ7K538sC5r5ktWu+FyTz2BVe09cegmr3WHOXb7mSSBUyq+JVvo0GfkdtKkHsq5UzE9fcawG2J0S4J26", + "YPdaxkClcKvG0Y4A8UFLOfYuHCeGs7OkuNTvvsXFlY8/zO4kM0T3LWMI+H+gXWm4V3Qi29IV1OP12GLp", + "H2AXGol4ErBaNfhMbCYS5mqfI43Vg8/EpgZYBd0t45kEqqzf0cVL92ytEyAzbp7R1ms3mFXDKDnMGa9Z", + "LeNlpROvIMyDzLcRwmJrAqK1xzbXJ2MYUXRNi5drkJLlfRtnTo8tDRwX6fEWFNc3oQAJN3J3AKbqFyDG", + "U9f6+biZuf5tgUHrO6s05TmVedyccZKBNFIDuaVbdXdTVbA67DNW0UgWamYLicxWSNoWkGLrrM33NCQF", + "AOkRLUoDLEHopJ2wAlnFkBY9hp8uDH8KS9CKbiaFWGDUb8+BcHmu0XRoH5CCoxLdSnfD1u3nUex32D0N", + "liJxjEgLnHXIFLvP/UvcSnyE/syZ3nnyrYazHYZtPZ3twfRI5Ys6PMMSS/c8piLnXWKmOHrei6o+TYmn", + "PYg2MekS3dGq9+wi+le4tAuxCn14scqmC0cqPt/qFSaob1A7AjBA1XEFNHMeYl1FXEdRYZEydtkNDtTT", + "We2+v5d6wENFinJnvTltcNAx4xxS4XN3PoNJKcpJNsS31VYryp2RwUHahLGHPiITQs+6g9+NCvW7GjnR", + "GoW8Di1y2ltIbJ+trMx2qQz6lEw9HL1pwBBz5GV4hK1qDWOtgipm7B/n3tjdVKIFJkEokZBVEpXMt3S7", + "v/BjT/b5y+/PP3v0+NfHn31OTAOSswWouqZBq3Bi7ZrIeFtr9GGdETvL0+lN8NlCLOK89dKHvYVNcWfN", + "cltVJyPulI08RDuduABSwbndEnl32iscpw6L+GNtV2qRR9+xFAre/55JURTpmjJBrkqYX1K7FRlgzAuk", + "BKmY0oYRNu2nTNdO2WqJykXMGr62uaEEz8Brnx0VMN3jy5VaSJ9PL/IzzMXgbE4ENmXheJW1E+1al3un", + "Wf0eCo3objMDUorSifZsTlIQYcyWrCDo1Z3aFPXpkZtuYLbWYTdFiM75PU1659y9hMWc7Ob2zVLcOs3p", + "zSYmxAt/KO9Amn3Wjf48I3fhJLVh4A/DPxKJU47GNcJy3wevSL4PdkSFn3e8JkLSkEGgdRNkJMgDAeiJ", + "h24ErUZBdlFucmltDGiN8ObntvjxojZL741MQUh8hz3gxbHMdbsQTOHA+ciJvV8EpERLedNHCY3l7wuP", + "9qw3XCTRFjmlidagLFsSXbEwCohXX4c4855XSSccXQqhiXmZFkUijN3qcfBMxYRjngRyTYsPzzW+ZVLp", + "c8QH5D/1B27FYcsxki0q1dETcj6ng8CKQpQ/CFT8FcbW/weYnU3ejm4WZ/jv3IGoEqKF9faeBws4cHKL", + "Y1rHrkefk5kr91NKyJhqOxTcepEmxNuCZHPnXwsb3Y79vXeZoF+EvsdxmHt/IPJjZGQLngMO5vqof2Tm", + "1MMBkqclRaodQkngL8Xr4qLqe66de5aGuVsqpyhx44GpnLrl4ocuD9eBl1eloLvOwbd+A7eJC79e29Bc", + "ZYMrzFxfv9azIQnF0tVgTHfMcXaUsjD3LwrzQRKcWVS6MRwkScKqRe592Wta/pJRnobmLhpxv6eA/NKi", + "34yGj4J5xe14oQAqxop7ti7m4+DFILjp9pRc8xOiltS/Ldyfjz/7fDQeAa9WZvH199F45L6+Sb3U8k0y", + "rrROpNPxEXXVBB4oUtLtkGD2valzkvitMwV9eJFGaTZLv+m+N3uGD1cXgHDBkdUje7E3qMuf81cCoJ3E", + "0Dqs4cRYkqzTA4Wt2Jcp6Je+tPg29XtPtY8W961YsddJrlGI5d14tLBJyrA6ya+uVt2H3XYPQU++QLf0", + "+6QBs4hJrLUxeTRVlNRtQEEW1y1RIQMjr7NKMr29NPj3anf2600qGdR3IT2Ty/kVLPBO9tXiBrj3MauT", + "OVXKS9ffCVqg9GkdA7iROUUxJd/YCiHuWvzywexf4dO/PcnPPn30r7O/nX12lsGTz744O6NfPKGPvvj0", + "ETz+22dPzuDR/PMvZo/zx08ez548fvL5Z19knz55NHvy+Rf/+sBQugHZAuor/zwd/efkvFiIyfmri8mV", + "AbbGCS3ZD2D2BjVsc0xQiEjN8IqFFWXF6Kn/6X/5i3KaiVU9vP915OpBjpZal+rp6ent7e007nK6wBwo", + "Ey2qbHnq58Fclo33yquLEBdkff9wR2ubE25qyO9nvv30zeUVOX91Ma0JZvR0dDY9mz7CfIolcFqy0dPR", + "p/gTnp4l7vspZtE+Va4Yz2kIHX037nwrS1uqx3xahDSg5q8l0AJZpPljBVqyzH+SQPOt+7+6pYsFyClG", + "jNmf1o9P/dvj9K3LK/Nu17fT2Bvt9G0jOU++p6f3p9rX5PStL/e/e8BGKXfn52qQmnSU+A60S6NnNZaJ", + "PEdon3Sjj4nCujvmp1IyYc7r2IgLOaA3EbrUSqwvomXFM+tiYqcAjv99cf6f6Gbz4vw/yZfkbOyCnBSq", + "QVLT21QRgdAucgt21zdafbU9D5mgapec0dPXKdW0c1Avq1nBMvOKmPqTasgwOkhhyJpRoiFiZC8K9A8I", + "bN+w8rPJF2/efva3d0mP9K5zWu3VufNrR8IM+I0yJzXcTIQv5I74XtHNl33Y3riAGTPuPyqQ23r5K2pu", + "6XqpAyXA5K+JlJk+8vHW1UyPna4jd+x/v3z5IxGSOMXeK5rdhKhPHwFcRz3HAcCmZ9/a3O0dL8/L1S58", + "dKUWZbOWQNADvMGi0Ago8qzHZ2eeUTulR8QFTh1ziWZqicZdakYPwsiG0k3/oQhsaKaLLaEqcuFCd2xf", + "wL0VmyvKSSNCaKfVpjuj25JkeNWhGUgSpXCEpsUe+K5axa4b6HDvltLc5PtTfnSQkYQgme423lpPI3/t", + "7v+M3e2KPqQU5kwzDDip7zV/ZzaAdAJvsfXg9qRhmpL/EhUKqObpUWkILFBIZGfhVrbmWjeny0IXvdLq", + "mEj8cnLSXvjJSe2RPIdbZLKUY8M2Ok5OpmannhzIynaa0BoVCQadnUOG62zWC7oJihRKuOATDguq2RpI", + "5E315OzRn3aFF9wG4BiJ3L4c3o1Hn/2Jt+yCG+GJFgRb2tV8+qddzSXINcuAXMGqFJJKVmzJzzxEONmX", + "FconXfb3M7/h4pZ7RJhHcbVaUbl1kjoNPKfiUSnCnfynk/+tluaRi9KFQjc9lIOt4Ozz3vLF6M07/9AY", + "+MDZ1ex0hvW6hzaF+FXU/wRCLaU6fYu2vd7fT52eL/0Rza/2hX7qlZc9LW0OwvTHxtPrrd6YhewezrSJ", + "xsuozpZVefoW/4OP7WhFtv7Pqd7wU3RXP33bQIT73EFE8/e6e9wCy1Z44MR8rvCxuOvz6Vv7bzQRbEqQ", + "zFxHmBrZ/Wqz4Z+qqiyLbffnLc+SP3bX0Uj63fPzqdf1pN7tzZZvG382aUotK52L22gWtJJax4AuZOZj", + "pdp/n95Spo2Q5LJG07kG2e2sgRanrjJh69e63E/nC9Ywin5siVWlsInTms/mn+jtVSPIXdosQF8J1Ib0", + "MdzNZMY4cqGYS9a6T/ux+0Tq8EasH7/VtdNJQgbVgsykoHlGlTZ/1GVGmu/vd/d8f7WTFl0k/AYQTNRp", + "dG0khp9M99qHcdwhQma0L+TimZ+wjqx974JZB6KvaE58pr0JeUELs+GQk3Mn/jew8b6Fqo8vBX1kseWD", + "yRlf+cOnCMW0o40HokxnA4vq8A4RKswr0jCABfCJY0GTmci3ruLpSNJbvbHJh9rM7ZQ2b4ymtpNKulJ9", + "H4+gCv1j6z/3qT3/Uhn+pTL8S6n0l8rwr939S2V4dJXhXwq1vxRq/08q1A7RoqUEUqco6pdL2Rq4Dbtp", + "vRBpXWoosPhmAkWmg/TWiIXHqkZMTwm5wuxX1NwSsAZJC5JRZaUrl6lthc7mmIYR8qfXfNKAxDpwm4k/", + "qf9r3eyvq7OzT4GcPWz3UZoVRcybu31RMsZPNkjuS3I9uh51RpKwEmvIrZdaXGfC9to77P8Xxn3ZKWiD", + "qTwwvZjP1khUNZ+zjFmUF4IvCF2IOnoE01VzgV9AGuCsbyNheuyi7ZhL8WB3pVUOoynjdyWAi3oL93o4", + "tMgl7dtgCO9Ax4Z/Sak8/pLSBzK7XQn97stId47d4ap/cZUPwVU+Ol/5s5tzIyXk/0gx88nZkz/tgmKV", + "9Y9Ck2997Ns9xDGXKzlLFly8q6Dls115xWDtJx37HeMtGjyOX78xF4ECufYXbO1G+/T0FJMnLoXSp6ik", + "arrYxh/fBJjf+tuplGxtoHmHelAh2YJxWkycH+qkdpV9PD0bvfu/AQAA///38T8KmyYBAA==", } // GetSwagger returns the content of the embedded swagger specification file @@ -420,16 +416,16 @@ var swaggerSpec = []string{ func decodeSpec() ([]byte, error) { zipped, err := base64.StdEncoding.DecodeString(strings.Join(swaggerSpec, "")) if err != nil { - return nil, fmt.Errorf("error base64 decoding spec: %s", err) + return nil, fmt.Errorf("error base64 decoding spec: %w", err) } zr, err := gzip.NewReader(bytes.NewReader(zipped)) if err != nil { - return nil, fmt.Errorf("error decompressing spec: %s", err) + return nil, fmt.Errorf("error decompressing spec: %w", err) } var buf bytes.Buffer _, err = buf.ReadFrom(zr) if err != nil { - return nil, fmt.Errorf("error decompressing spec: %s", err) + return nil, fmt.Errorf("error decompressing spec: %w", err) } return buf.Bytes(), nil @@ -447,7 +443,7 @@ func decodeSpecCached() func() ([]byte, error) { // Constructs a synthetic filesystem for resolving external references when loading openapi specifications. func PathToRawSpec(pathToFile string) map[string]func() ([]byte, error) { - var res = make(map[string]func() ([]byte, error)) + res := make(map[string]func() ([]byte, error)) if len(pathToFile) > 0 { res[pathToFile] = rawSpec } @@ -461,12 +457,12 @@ func PathToRawSpec(pathToFile string) map[string]func() ([]byte, error) { // Externally referenced files must be embedded in the corresponding golang packages. // Urls can be supported but this task was out of the scope. func GetSwagger() (swagger *openapi3.T, err error) { - var resolvePath = PathToRawSpec("") + resolvePath := PathToRawSpec("") loader := openapi3.NewLoader() loader.IsExternalRefsAllowed = true loader.ReadFromURIFunc = func(loader *openapi3.Loader, url *url.URL) ([]byte, error) { - var pathToFile = url.String() + pathToFile := url.String() pathToFile = path.Clean(pathToFile) getSpec, ok := resolvePath[pathToFile] if !ok { diff --git a/daemon/algod/api/server/v2/generated/participating/public/public_routes.yml b/daemon/algod/api/server/v2/generated/participating/public/routes.yml similarity index 83% rename from daemon/algod/api/server/v2/generated/participating/public/public_routes.yml rename to daemon/algod/api/server/v2/generated/participating/public/routes.yml index a568d05443..3e11173a5a 100644 --- a/daemon/algod/api/server/v2/generated/participating/public/public_routes.yml +++ b/daemon/algod/api/server/v2/generated/participating/public/routes.yml @@ -12,9 +12,9 @@ output-options: - nonparticipating - data - experimental - type-mappings: - integer: uint64 skip-prune: true + user-templates: + echo/echo-register.tmpl: ./templates/echo/echo-register.tmpl additional-imports: - alias: "." package: "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model" diff --git a/daemon/algod/api/server/v2/handlers.go b/daemon/algod/api/server/v2/handlers.go index 126481912c..e0f7acbf69 100644 --- a/daemon/algod/api/server/v2/handlers.go +++ b/daemon/algod/api/server/v2/handlers.go @@ -41,6 +41,7 @@ import ( "github.com/algorand/go-algorand/agreement" "github.com/algorand/go-algorand/catchup" "github.com/algorand/go-algorand/config" + "github.com/algorand/go-algorand/config/bounds" "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/crypto/merklearray" "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model" @@ -142,28 +143,20 @@ type NodeInterface interface { GetParticipationKey(account.ParticipationID) (account.ParticipationRecord, error) RemoveParticipationKey(account.ParticipationID) error AppendParticipationKeys(id account.ParticipationID, keys account.StateProofKeys) error - SetSyncRound(rnd uint64) error - GetSyncRound() uint64 + SetSyncRound(rnd basics.Round) error + GetSyncRound() basics.Round UnsetSyncRound() GetBlockTimeStampOffset() (*int64, error) SetBlockTimeStampOffset(int64) error } -func roundToPtrOrNil(value basics.Round) *uint64 { - if value == 0 { - return nil - } - result := uint64(value) - return &result -} - func convertParticipationRecord(record account.ParticipationRecord) model.ParticipationKey { participationKey := model.ParticipationKey{ Id: record.ParticipationID.String(), Address: record.Account.String(), Key: model.AccountParticipation{ - VoteFirstValid: uint64(record.FirstValid), - VoteLastValid: uint64(record.LastValid), + VoteFirstValid: record.FirstValid, + VoteLastValid: record.LastValid, VoteKeyDilution: record.KeyDilution, }, } @@ -184,16 +177,16 @@ func convertParticipationRecord(record account.ParticipationRecord) model.Partic // Optional fields. if record.EffectiveLast != 0 && record.EffectiveFirst == 0 { // Special case for first valid on round 0 - zero := uint64(0) + zero := basics.Round(0) participationKey.EffectiveFirstValid = &zero } else { - participationKey.EffectiveFirstValid = roundToPtrOrNil(record.EffectiveFirst) + participationKey.EffectiveFirstValid = omitEmpty(record.EffectiveFirst) } - participationKey.EffectiveLastValid = roundToPtrOrNil(record.EffectiveLast) - participationKey.LastVote = roundToPtrOrNil(record.LastVote) - participationKey.LastBlockProposal = roundToPtrOrNil(record.LastBlockProposal) - participationKey.LastVote = roundToPtrOrNil(record.LastVote) - participationKey.LastStateProof = roundToPtrOrNil(record.LastStateProof) + participationKey.EffectiveLastValid = omitEmpty(record.EffectiveLast) + participationKey.LastVote = omitEmpty(record.LastVote) + participationKey.LastBlockProposal = omitEmpty(record.LastBlockProposal) + participationKey.LastVote = omitEmpty(record.LastVote) + participationKey.LastStateProof = omitEmpty(record.LastStateProof) return participationKey } @@ -237,8 +230,8 @@ func GetStateProofTransactionForRound(ctx context.Context, txnFetcher LedgerForA continue } - if txn.StateProofTxnFields.Message.FirstAttestedRound <= uint64(round) && - uint64(round) <= txn.StateProofTxnFields.Message.LastAttestedRound { + if txn.StateProofTxnFields.Message.FirstAttestedRound <= round && + round <= txn.StateProofTxnFields.Message.LastAttestedRound { return txn, nil } } @@ -264,7 +257,7 @@ func (v2 *Handlers) GetParticipationKeys(ctx echo.Context) error { return ctx.JSON(http.StatusOK, response) } -func (v2 *Handlers) generateKeyHandler(address string, params model.GenerateParticipationKeysParams) error { +func (v2 *Handlers) generateKeyHandler(address basics.Address, params model.GenerateParticipationKeysParams) error { installFunc := func(path string) error { bytes, err := os.ReadFile(path) if err != nil { @@ -280,13 +273,13 @@ func (v2 *Handlers) generateKeyHandler(address string, params model.GeneratePart v2.Log.Infof("Installed participation key %s", partID) return err } - _, _, err := participation.GenParticipationKeysTo(address, params.First, params.Last, nilToZero(params.Dilution), "", installFunc) + _, _, err := participation.GenParticipationKeysTo(address.String(), params.First, params.Last, nilToZero(params.Dilution), "", installFunc) return err } // GenerateParticipationKeys generates and installs participation keys to the node. // (POST /v2/participation/generate/{address}) -func (v2 *Handlers) GenerateParticipationKeys(ctx echo.Context, address string, params model.GenerateParticipationKeysParams) error { +func (v2 *Handlers) GenerateParticipationKeys(ctx echo.Context, address basics.Address, params model.GenerateParticipationKeysParams) error { if !v2.KeygenLimiter.TryAcquire(1) { err := fmt.Errorf("participation key generation already in progress") return badRequest(ctx, err, err.Error(), v2.Log) @@ -415,22 +408,17 @@ func (v2 *Handlers) ShutdownNode(ctx echo.Context, params model.ShutdownNodePara // AccountInformation gets account information for a given account. // (GET /v2/accounts/{address}) -func (v2 *Handlers) AccountInformation(ctx echo.Context, address string, params model.AccountInformationParams) error { +func (v2 *Handlers) AccountInformation(ctx echo.Context, address basics.Address, params model.AccountInformationParams) error { handle, contentType, err := getCodecHandle((*string)(params.Format)) if err != nil { return badRequest(ctx, err, errFailedParsingFormatOption, v2.Log) } - addr, err := basics.UnmarshalChecksumAddress(address) - if err != nil { - return badRequest(ctx, err, errFailedToParseAddress, v2.Log) - } - // should we skip fetching apps and assets? if params.Exclude != nil { switch *params.Exclude { case "all": - return v2.basicAccountInformation(ctx, addr, handle, contentType) + return v2.basicAccountInformation(ctx, address, handle, contentType) case "none", "": default: return badRequest(ctx, err, errFailedToParseExclude, v2.Log) @@ -441,14 +429,14 @@ func (v2 *Handlers) AccountInformation(ctx echo.Context, address string, params // count total # of resources, if max limit is set if maxResults := v2.Node.Config().MaxAPIResourcesPerAccount; maxResults != 0 { - record, _, _, lookupErr := myLedger.LookupAccount(myLedger.Latest(), addr) + record, _, _, lookupErr := myLedger.LookupAccount(myLedger.Latest(), address) if lookupErr != nil { return internalError(ctx, lookupErr, errFailedLookingUpLedger, v2.Log) } totalResults := record.TotalAssets + record.TotalAssetParams + record.TotalAppLocalStates + record.TotalAppParams if totalResults > maxResults { v2.Log.Infof("MaxAccountAPIResults limit %d exceeded, total results %d", maxResults, totalResults) - extraData := map[string]interface{}{ + extraData := map[string]any{ "max-results": maxResults, "total-assets-opted-in": record.TotalAssets, "total-created-assets": record.TotalAssetParams, @@ -462,16 +450,16 @@ func (v2 *Handlers) AccountInformation(ctx echo.Context, address string, params } } - record, lastRound, amountWithoutPendingRewards, err := myLedger.LookupLatest(addr) + record, lastRound, amountWithoutPendingRewards, err := myLedger.LookupLatest(address) if err != nil { return internalError(ctx, err, errFailedLookingUpLedger, v2.Log) } // check against configured total limit on assets/apps if handle == protocol.CodecHandle { - data, err := encode(handle, record) - if err != nil { - return internalError(ctx, err, errFailedToEncodeResponse, v2.Log) + data, err1 := encode(handle, record) + if err1 != nil { + return internalError(ctx, err1, errFailedToEncodeResponse, v2.Log) } return ctx.Blob(http.StatusOK, contentType, data) } @@ -481,7 +469,7 @@ func (v2 *Handlers) AccountInformation(ctx echo.Context, address string, params return internalError(ctx, err, fmt.Sprintf("could not retrieve consensus information for last round (%d)", lastRound), v2.Log) } - account, err := AccountDataToAccount(address, &record, lastRound, &consensus, amountWithoutPendingRewards) + account, err := AccountDataToAccount(address.String(), &record, lastRound, &consensus, amountWithoutPendingRewards) if err != nil { return internalError(ctx, err, errInternalFailure, v2.Log) } @@ -516,9 +504,9 @@ func (v2 *Handlers) basicAccountInformation(ctx echo.Context, addr basics.Addres apiParticipation = &model.AccountParticipation{ VoteParticipationKey: record.VoteID[:], SelectionParticipationKey: record.SelectionID[:], - VoteFirstValid: uint64(record.VoteFirstValid), - VoteLastValid: uint64(record.VoteLastValid), - VoteKeyDilution: uint64(record.VoteKeyDilution), + VoteFirstValid: record.VoteFirstValid, + VoteLastValid: record.VoteLastValid, + VoteKeyDilution: record.VoteKeyDilution, } if !record.StateProofID.IsEmpty() { tmp := record.StateProofID[:] @@ -533,7 +521,7 @@ func (v2 *Handlers) basicAccountInformation(ctx echo.Context, addr basics.Addres account := model.Account{ SigType: nil, - Round: uint64(lastRound), + Round: lastRound, Address: addr.String(), Amount: record.MicroAlgos.Raw, PendingRewards: pendingRewards.Raw, @@ -556,8 +544,8 @@ func (v2 *Handlers) basicAccountInformation(ctx echo.Context, addr basics.Addres TotalBoxes: omitEmpty(record.TotalBoxes), TotalBoxBytes: omitEmpty(record.TotalBoxBytes), MinBalance: record.MinBalance(&consensus).Raw, - LastProposed: omitEmpty(uint64(record.LastProposed)), - LastHeartbeat: omitEmpty(uint64(record.LastHeartbeat)), + LastProposed: omitEmpty(record.LastProposed), + LastHeartbeat: omitEmpty(record.LastHeartbeat), } response := model.AccountResponse(account) return ctx.JSON(http.StatusOK, response) @@ -565,21 +553,16 @@ func (v2 *Handlers) basicAccountInformation(ctx echo.Context, addr basics.Addres // AccountAssetInformation gets account information about a given asset. // (GET /v2/accounts/{address}/assets/{asset-id}) -func (v2 *Handlers) AccountAssetInformation(ctx echo.Context, address string, assetID uint64, params model.AccountAssetInformationParams) error { +func (v2 *Handlers) AccountAssetInformation(ctx echo.Context, address basics.Address, assetID basics.AssetIndex, params model.AccountAssetInformationParams) error { handle, contentType, err := getCodecHandle((*string)(params.Format)) if err != nil { return badRequest(ctx, err, errFailedParsingFormatOption, v2.Log) } - addr, err := basics.UnmarshalChecksumAddress(address) - if err != nil { - return badRequest(ctx, err, errFailedToParseAddress, v2.Log) - } - ledger := v2.Node.LedgerForAPI() lastRound := ledger.Latest() - record, err := ledger.LookupAsset(lastRound, addr, basics.AssetIndex(assetID)) + record, err := ledger.LookupAsset(lastRound, address, assetID) if err != nil { return internalError(ctx, err, errFailedLookingUpLedger, v2.Log) } @@ -598,17 +581,17 @@ func (v2 *Handlers) AccountAssetInformation(ctx echo.Context, address string, as } // prepare JSON response - response := model.AccountAssetResponse{Round: uint64(lastRound)} + response := model.AccountAssetResponse{Round: lastRound} if record.AssetParams != nil { - asset := AssetParamsToAsset(addr.String(), basics.AssetIndex(assetID), record.AssetParams) + asset := AssetParamsToAsset(address.String(), assetID, record.AssetParams) response.CreatedAsset = &asset.Params } if record.AssetHolding != nil { response.AssetHolding = &model.AssetHolding{ Amount: record.AssetHolding.Amount, - AssetID: uint64(assetID), + AssetID: assetID, IsFrozen: record.AssetHolding.Frozen, } } @@ -618,21 +601,16 @@ func (v2 *Handlers) AccountAssetInformation(ctx echo.Context, address string, as // AccountApplicationInformation gets account information about a given app. // (GET /v2/accounts/{address}/applications/{application-id}) -func (v2 *Handlers) AccountApplicationInformation(ctx echo.Context, address string, applicationID uint64, params model.AccountApplicationInformationParams) error { +func (v2 *Handlers) AccountApplicationInformation(ctx echo.Context, address basics.Address, applicationID basics.AppIndex, params model.AccountApplicationInformationParams) error { handle, contentType, err := getCodecHandle((*string)(params.Format)) if err != nil { return badRequest(ctx, err, errFailedParsingFormatOption, v2.Log) } - addr, err := basics.UnmarshalChecksumAddress(address) - if err != nil { - return badRequest(ctx, err, errFailedToParseAddress, v2.Log) - } - ledger := v2.Node.LedgerForAPI() lastRound := ledger.Latest() - record, err := ledger.LookupApplication(lastRound, addr, basics.AppIndex(applicationID)) + record, err := ledger.LookupApplication(lastRound, address, applicationID) if err != nil { return internalError(ctx, err, errFailedLookingUpLedger, v2.Log) } @@ -651,17 +629,17 @@ func (v2 *Handlers) AccountApplicationInformation(ctx echo.Context, address stri } // prepare JSON response - response := model.AccountApplicationResponse{Round: uint64(lastRound)} + response := model.AccountApplicationResponse{Round: lastRound} if record.AppParams != nil { - app := AppParamsToApplication(addr.String(), basics.AppIndex(applicationID), record.AppParams) + app := AppParamsToApplication(address.String(), applicationID, record.AppParams) response.CreatedApp = &app.Params } if record.AppLocalState != nil { localState := convertTKVToGenerated(&record.AppLocalState.KeyValue) response.AppLocalState = &model.ApplicationLocalState{ - Id: uint64(applicationID), + Id: applicationID, KeyValue: localState, Schema: model.ApplicationStateSchema{ NumByteSlice: record.AppLocalState.Schema.NumByteSlice, @@ -680,7 +658,7 @@ type BlockResponseJSON struct { // GetBlock gets the block for the given round. // (GET /v2/blocks/{round}) -func (v2 *Handlers) GetBlock(ctx echo.Context, round uint64, params model.GetBlockParams) error { +func (v2 *Handlers) GetBlock(ctx echo.Context, round basics.Round, params model.GetBlockParams) error { handle, contentType, err := getCodecHandle((*string)(params.Format)) if err != nil { return badRequest(ctx, err, errFailedParsingFormatOption, v2.Log) @@ -697,7 +675,7 @@ func (v2 *Handlers) GetBlock(ctx echo.Context, round uint64, params model.GetBlo // msgpack format uses 'RawBlockBytes' and attaches a custom header. if handle == protocol.CodecHandle { - blockbytes, blockErr := rpcs.RawBlockBytes(v2.Node.LedgerForAPI(), basics.Round(round)) + blockbytes, blockErr := rpcs.RawBlockBytes(v2.Node.LedgerForAPI(), round) if blockErr != nil { switch blockErr.(type) { case ledgercore.ErrNoEntry: @@ -712,7 +690,7 @@ func (v2 *Handlers) GetBlock(ctx echo.Context, round uint64, params model.GetBlo } ledger := v2.Node.LedgerForAPI() - block, err := ledger.Block(basics.Round(round)) + block, err := ledger.Block(round) if err != nil { switch err.(type) { case ledgercore.ErrNoEntry: @@ -735,9 +713,9 @@ func (v2 *Handlers) GetBlock(ctx echo.Context, round uint64, params model.GetBlo return ctx.Blob(http.StatusOK, contentType, data) } -func (v2 *Handlers) getBlockHeader(ctx echo.Context, round uint64, handle codec.Handle, contentType string) error { +func (v2 *Handlers) getBlockHeader(ctx echo.Context, round basics.Round, handle codec.Handle, contentType string) error { ledger := v2.Node.LedgerForAPI() - block, err := ledger.BlockHdr(basics.Round(round)) + block, err := ledger.BlockHdr(round) if err != nil { switch err.(type) { case ledgercore.ErrNoEntry: @@ -763,9 +741,9 @@ func (v2 *Handlers) getBlockHeader(ctx echo.Context, round uint64, handle codec. // GetBlockTxids gets all top level TxIDs in a block for the given round. // (GET /v2/blocks/{round}/txids) -func (v2 *Handlers) GetBlockTxids(ctx echo.Context, round uint64) error { +func (v2 *Handlers) GetBlockTxids(ctx echo.Context, round basics.Round) error { ledger := v2.Node.LedgerForAPI() - block, err := ledger.Block(basics.Round(round)) + block, err := ledger.Block(round) if err != nil { switch err.(type) { case ledgercore.ErrNoEntry: @@ -791,7 +769,7 @@ func (v2 *Handlers) GetBlockTxids(ctx echo.Context, round uint64) error { } // NewAppCallLogs generates a new model.AppCallLogs struct. -func NewAppCallLogs(txid string, logs []string, appIndex uint64) model.AppCallLogs { +func NewAppCallLogs(txid string, logs []string, appIndex basics.AppIndex) model.AppCallLogs { return model.AppCallLogs{ TxId: txid, Logs: convertSlice(logs, func(s string) []byte { return []byte(s) }), @@ -799,10 +777,10 @@ func NewAppCallLogs(txid string, logs []string, appIndex uint64) model.AppCallLo } } -func getAppIndexFromTxn(txn transactions.SignedTxnWithAD) uint64 { - appIndex := uint64(txn.SignedTxn.Txn.ApplicationID) +func getAppIndexFromTxn(txn transactions.SignedTxnWithAD) basics.AppIndex { + appIndex := txn.SignedTxn.Txn.ApplicationID if appIndex == 0 { - appIndex = uint64(txn.ApplyData.ApplicationID) + appIndex = txn.ApplyData.ApplicationID } return appIndex @@ -826,9 +804,9 @@ func appendLogsFromTxns(blockLogs []model.AppCallLogs, txns []transactions.Signe // GetBlockLogs gets all of the logs (inner and outer app calls) for a given block // (GET /v2/blocks/{round}/logs) -func (v2 *Handlers) GetBlockLogs(ctx echo.Context, round uint64) error { +func (v2 *Handlers) GetBlockLogs(ctx echo.Context, round basics.Round) error { ledger := v2.Node.LedgerForAPI() - block, err := ledger.Block(basics.Round(round)) + block, err := ledger.Block(round) if err != nil { switch err.(type) { case ledgercore.ErrNoEntry: @@ -856,9 +834,9 @@ func (v2 *Handlers) GetBlockLogs(ctx echo.Context, round uint64) error { // GetBlockHash gets the block hash for the given round. // (GET /v2/blocks/{round}/hash) -func (v2 *Handlers) GetBlockHash(ctx echo.Context, round uint64) error { +func (v2 *Handlers) GetBlockHash(ctx echo.Context, round basics.Round) error { ledger := v2.Node.LedgerForAPI() - block, err := ledger.Block(basics.Round(round)) + block, err := ledger.Block(round) if err != nil { switch err.(type) { case ledgercore.ErrNoEntry: @@ -875,7 +853,7 @@ func (v2 *Handlers) GetBlockHash(ctx echo.Context, round uint64) error { // GetTransactionProof generates a Merkle proof for a transaction in a block. // (GET /v2/blocks/{round}/transactions/{txid}/proof) -func (v2 *Handlers) GetTransactionProof(ctx echo.Context, round uint64, txid string, params model.GetTransactionProofParams) error { +func (v2 *Handlers) GetTransactionProof(ctx echo.Context, round basics.Round, txid string, params model.GetTransactionProofParams) error { var txID transactions.Txid err := txID.FromString(txid) if err != nil { @@ -887,7 +865,7 @@ func (v2 *Handlers) GetTransactionProof(ctx echo.Context, round uint64, txid str } ledger := v2.Node.LedgerForAPI() - block, err := ledger.Block(basics.Round(round)) + block, err := ledger.Block(round) if err != nil { return internalError(ctx, err, errFailedLookingUpLedger, v2.Log) } @@ -945,7 +923,7 @@ func (v2 *Handlers) GetTransactionProof(ctx echo.Context, round uint64, txid str Stibhash: stibhash[:], Idx: uint64(idx), Treedepth: uint64(proof.TreeDepth), - Hashtype: model.TransactionProofResponseHashtype(hashtype), + Hashtype: model.TransactionProofHashtype(hashtype), } return ctx.JSON(http.StatusOK, response) @@ -965,7 +943,7 @@ func (v2 *Handlers) GetSupply(ctx echo.Context) error { } supply := model.SupplyResponse{ - CurrentRound: uint64(latest), + CurrentRound: latest, TotalMoney: totals.Participating().Raw, OnlineMoney: totals.Online.Money.Raw, } @@ -982,13 +960,13 @@ func (v2 *Handlers) GetStatus(ctx echo.Context) error { } response := model.NodeStatusResponse{ - LastRound: uint64(stat.LastRound), + LastRound: stat.LastRound, LastVersion: string(stat.LastVersion), NextVersion: string(stat.NextVersion), - NextVersionRound: uint64(stat.NextVersionRound), + NextVersionRound: stat.NextVersionRound, NextVersionSupported: stat.NextVersionSupported, - TimeSinceLastRound: uint64(stat.TimeSinceLastRound().Nanoseconds()), - CatchupTime: uint64(stat.CatchupTime.Nanoseconds()), + TimeSinceLastRound: stat.TimeSinceLastRound().Nanoseconds(), + CatchupTime: stat.CatchupTime.Nanoseconds(), StoppedAtUnsupportedRound: stat.StoppedAtUnsupportedRound, LastCatchpoint: &stat.LastCatchpoint, Catchpoint: &stat.Catchpoint, @@ -1004,17 +982,17 @@ func (v2 *Handlers) GetStatus(ctx echo.Context) error { // Make sure a vote is happening if stat.NextProtocolVoteBefore > 0 { - votesToGo := uint64(0) + votesToGo := basics.Round(0) // Check if the vote window is still open. if stat.NextProtocolVoteBefore > stat.LastRound { // subtract 1 because the variables are referring to "Last" round and "VoteBefore" - votesToGo = uint64(stat.NextProtocolVoteBefore - stat.LastRound - 1) + votesToGo = stat.NextProtocolVoteBefore - stat.LastRound - 1 } consensus := config.Consensus[protocol.ConsensusCurrentVersion] - upgradeVoteRounds := consensus.UpgradeVoteRounds - upgradeThreshold := consensus.UpgradeThreshold - votes := consensus.UpgradeVoteRounds - votesToGo + upgradeVoteRounds := basics.Round(consensus.UpgradeVoteRounds) + upgradeThreshold := basics.Round(consensus.UpgradeThreshold) + votes := basics.Round(consensus.UpgradeVoteRounds) - votesToGo votesYes := stat.NextProtocolApprovals votesNo := votes - votesYes upgradeDelay := stat.UpgradeDelay @@ -1024,7 +1002,7 @@ func (v2 *Handlers) GetStatus(ctx echo.Context) error { response.UpgradeVotes = &votes response.UpgradeYesVotes = &votesYes response.UpgradeNoVotes = &votesNo - response.UpgradeNextProtocolVoteBefore = omitEmpty(uint64(stat.NextProtocolVoteBefore)) + response.UpgradeNextProtocolVoteBefore = omitEmpty(stat.NextProtocolVoteBefore) response.UpgradeVoteRounds = &upgradeVoteRounds } @@ -1033,7 +1011,7 @@ func (v2 *Handlers) GetStatus(ctx echo.Context) error { // WaitForBlock returns the node status after waiting for the given round. // (GET /v2/status/wait-for-block-after/{round}/) -func (v2 *Handlers) WaitForBlock(ctx echo.Context, round uint64) error { +func (v2 *Handlers) WaitForBlock(ctx echo.Context, round basics.Round) error { ledger := v2.Node.LedgerForAPI() stat, err := v2.Node.Status() @@ -1055,7 +1033,7 @@ func (v2 *Handlers) WaitForBlock(ctx echo.Context, round uint64) error { if latestBlkHdr.NextProtocol != "" { if _, nextProtocolSupported := config.Consensus[latestBlkHdr.NextProtocol]; !nextProtocolSupported { // see if the desired protocol switch is expect to happen before or after the above point. - if latestBlkHdr.NextProtocolSwitchOn <= basics.Round(round+1) { + if latestBlkHdr.NextProtocolSwitchOn <= round+1 { // we would never reach to this round, since this round would happen after the (unsupported) protocol upgrade. return badRequest(ctx, err, errRequestedRoundInUnsupportedRound, v2.Log) } @@ -1063,7 +1041,7 @@ func (v2 *Handlers) WaitForBlock(ctx echo.Context, round uint64) error { } // Wait - ledgerWaitCh, cancelLedgerWait := ledger.WaitWithCancel(basics.Round(round + 1)) + ledgerWaitCh, cancelLedgerWait := ledger.WaitWithCancel(round + 1) defer cancelLedgerWait() select { case <-v2.Shutdown: @@ -1143,7 +1121,7 @@ func (v2 *Handlers) RawTransactionAsync(ctx echo.Context) error { if !v2.Node.Config().EnableDeveloperAPI { return ctx.String(http.StatusNotFound, "/transactions/async was not enabled in the configuration file by setting the EnableDeveloperAPI to true") } - txgroup, err := decodeTxGroup(ctx.Request().Body, config.MaxTxGroupSize) + txgroup, err := decodeTxGroup(ctx.Request().Body, bounds.MaxTxGroupSize) if err != nil { return badRequest(ctx, err, err.Error(), v2.Log) } @@ -1156,16 +1134,11 @@ func (v2 *Handlers) RawTransactionAsync(ctx echo.Context) error { // AccountAssetsInformation looks up an account's asset holdings. // (GET /v2/accounts/{address}/assets) -func (v2 *Handlers) AccountAssetsInformation(ctx echo.Context, address string, params model.AccountAssetsInformationParams) error { +func (v2 *Handlers) AccountAssetsInformation(ctx echo.Context, address basics.Address, params model.AccountAssetsInformationParams) error { if !v2.Node.Config().EnableExperimentalAPI { return ctx.String(http.StatusNotFound, "/v2/accounts/{address}/assets was not enabled in the configuration file by setting the EnableExperimentalAPI to true") } - addr, err := basics.UnmarshalChecksumAddress(address) - if err != nil { - return badRequest(ctx, err, errFailedToParseAddress, v2.Log) - } - var assetGreaterThan uint64 = 0 if params.Next != nil { agt, err0 := strconv.ParseUint(*params.Next, 10, 64) @@ -1198,14 +1171,14 @@ func (v2 *Handlers) AccountAssetsInformation(ctx echo.Context, address string, p // 3. Prepare JSON response // We intentionally request one more than the limit to determine if there are more assets. - records, lookupRound, err := ledger.LookupAssets(addr, basics.AssetIndex(assetGreaterThan), *params.Limit+1) + records, lookupRound, err := ledger.LookupAssets(address, basics.AssetIndex(assetGreaterThan), *params.Limit+1) if err != nil { return internalError(ctx, err, errFailedLookingUpLedger, v2.Log) } // prepare JSON response - response := model.AccountAssetsInformationResponse{Round: uint64(lookupRound)} + response := model.AccountAssetsInformationResponse{Round: lookupRound} // If the total count is greater than the limit, we set the next token to the last asset ID being returned if uint64(len(records)) > *params.Limit { @@ -1226,7 +1199,7 @@ func (v2 *Handlers) AccountAssetsInformation(ctx echo.Context, address string, p aah := model.AccountAssetHolding{ AssetHolding: model.AssetHolding{ Amount: record.AssetHolding.Amount, - AssetID: uint64(record.AssetID), + AssetID: record.AssetID, IsFrozen: record.AssetHolding.Frozen, }, } @@ -1247,8 +1220,8 @@ func (v2 *Handlers) AccountAssetsInformation(ctx echo.Context, address string, p // PreEncodedSimulateTxnResult mirrors model.SimulateTransactionResult type PreEncodedSimulateTxnResult struct { Txn PreEncodedTxInfo `codec:"txn-result"` - AppBudgetConsumed *uint64 `codec:"app-budget-consumed,omitempty"` - LogicSigBudgetConsumed *uint64 `codec:"logic-sig-budget-consumed,omitempty"` + AppBudgetConsumed *int `codec:"app-budget-consumed,omitempty"` + LogicSigBudgetConsumed *int `codec:"logic-sig-budget-consumed,omitempty"` TransactionTrace *model.SimulationTransactionExecTrace `codec:"exec-trace,omitempty"` UnnamedResourcesAccessed *model.SimulateUnnamedResourcesAccessed `codec:"unnamed-resources-accessed,omitempty"` FixedSigner *string `codec:"fixed-signer,omitempty"` @@ -1256,9 +1229,9 @@ type PreEncodedSimulateTxnResult struct { // PreEncodedSimulateTxnGroupResult mirrors model.SimulateTransactionGroupResult type PreEncodedSimulateTxnGroupResult struct { - AppBudgetAdded *uint64 `codec:"app-budget-added,omitempty"` - AppBudgetConsumed *uint64 `codec:"app-budget-consumed,omitempty"` - FailedAt *[]uint64 `codec:"failed-at,omitempty"` + AppBudgetAdded *int `codec:"app-budget-added,omitempty"` + AppBudgetConsumed *int `codec:"app-budget-consumed,omitempty"` + FailedAt *[]int `codec:"failed-at,omitempty"` FailureMessage *string `codec:"failure-message,omitempty"` UnnamedResourcesAccessed *model.SimulateUnnamedResourcesAccessed `codec:"unnamed-resources-accessed,omitempty"` Txns []PreEncodedSimulateTxnResult `codec:"txn-results"` @@ -1267,7 +1240,7 @@ type PreEncodedSimulateTxnGroupResult struct { // PreEncodedSimulateResponse mirrors model.SimulateResponse type PreEncodedSimulateResponse struct { Version uint64 `codec:"version"` - LastRound uint64 `codec:"last-round"` + LastRound basics.Round `codec:"last-round"` TxnGroups []PreEncodedSimulateTxnGroupResult `codec:"txn-groups"` EvalOverrides *model.SimulationEvalOverrides `codec:"eval-overrides,omitempty"` ExecTraceConfig simulation.ExecTraceConfig `codec:"exec-trace-config,omitempty"` @@ -1286,7 +1259,7 @@ type PreEncodedSimulateRequest struct { AllowEmptySignatures bool `codec:"allow-empty-signatures,omitempty"` AllowMoreLogging bool `codec:"allow-more-logging,omitempty"` AllowUnnamedResources bool `codec:"allow-unnamed-resources,omitempty"` - ExtraOpcodeBudget uint64 `codec:"extra-opcode-budget,omitempty"` + ExtraOpcodeBudget int `codec:"extra-opcode-budget,omitempty"` ExecTraceConfig simulation.ExecTraceConfig `codec:"exec-trace-config,omitempty"` FixSigners bool `codec:"fix-signers,omitempty"` } @@ -1414,7 +1387,7 @@ func (v2 *Handlers) TealDryrun(ctx echo.Context) error { dr.ProtocolVersion = string(protocolVersion) if dr.Round == 0 { - dr.Round = uint64(hdr.Round + 1) + dr.Round = hdr.Round + 1 } if dr.LatestTimestamp == 0 { @@ -1435,7 +1408,7 @@ func (v2 *Handlers) UnsetSyncRound(ctx echo.Context) error { // SetSyncRound sets the sync round on the ledger. // (POST /v2/ledger/sync/{round}) -func (v2 *Handlers) SetSyncRound(ctx echo.Context, round uint64) error { +func (v2 *Handlers) SetSyncRound(ctx echo.Context, round basics.Round) error { err := v2.Node.SetSyncRound(round) if err != nil { switch err { @@ -1461,12 +1434,12 @@ func (v2 *Handlers) GetSyncRound(ctx echo.Context) error { // GetLedgerStateDelta returns the deltas for a given round. // This should be a representation of the ledgercore.StateDelta object. // (GET /v2/deltas/{round}) -func (v2 *Handlers) GetLedgerStateDelta(ctx echo.Context, round uint64, params model.GetLedgerStateDeltaParams) error { +func (v2 *Handlers) GetLedgerStateDelta(ctx echo.Context, round basics.Round, params model.GetLedgerStateDeltaParams) error { handle, contentType, err := getCodecHandle((*string)(params.Format)) if err != nil { return badRequest(ctx, err, errFailedParsingFormatOption, v2.Log) } - sDelta, err := v2.Node.LedgerForAPI().GetStateDeltaForRound(basics.Round(round)) + sDelta, err := v2.Node.LedgerForAPI().GetStateDeltaForRound(round) if err != nil { return notFound(ctx, err, fmt.Sprintf(errFailedRetrievingStateDelta, err), v2.Log) } @@ -1502,7 +1475,7 @@ func (v2 *Handlers) TransactionParams(ctx echo.Context) error { Fee: v2.Node.SuggestedFee().Raw, GenesisHash: gh[:], GenesisId: v2.Node.GenesisID(), - LastRound: uint64(stat.LastRound), + LastRound: stat.LastRound, MinFee: proto.MinTxnFee, } @@ -1512,12 +1485,12 @@ func (v2 *Handlers) TransactionParams(ctx echo.Context) error { // PreEncodedTxInfo represents the PendingTransaction response before it is // encoded to a format. type PreEncodedTxInfo struct { - AssetIndex *uint64 `codec:"asset-index,omitempty"` + AssetIndex *basics.AssetIndex `codec:"asset-index,omitempty"` AssetClosingAmount *uint64 `codec:"asset-closing-amount,omitempty"` - ApplicationIndex *uint64 `codec:"application-index,omitempty"` + ApplicationIndex *basics.AppIndex `codec:"application-index,omitempty"` CloseRewards *uint64 `codec:"close-rewards,omitempty"` ClosingAmount *uint64 `codec:"closing-amount,omitempty"` - ConfirmedRound *uint64 `codec:"confirmed-round,omitempty"` + ConfirmedRound *basics.Round `codec:"confirmed-round,omitempty"` GlobalStateDelta *model.StateDelta `codec:"global-state-delta,omitempty"` LocalStateDelta *[]model.AccountStateDelta `codec:"local-state-delta,omitempty"` PoolError string `codec:"pool-error"` @@ -1552,8 +1525,8 @@ func (v2 *Handlers) PendingTransactionInformation(ctx echo.Context, txid string, // We didn't find it, return a failure if !ok { - err := errors.New(errTransactionNotFound) - return notFound(ctx, err, err.Error(), v2.Log) + err1 := errors.New(errTransactionNotFound) + return notFound(ctx, err1, err1.Error(), v2.Log) } // Encoding wasn't working well without embedding "real" objects. @@ -1563,8 +1536,7 @@ func (v2 *Handlers) PendingTransactionInformation(ctx echo.Context, txid string, } if txn.ConfirmedRound != 0 { - r := uint64(txn.ConfirmedRound) - response.ConfirmedRound = &r + response.ConfirmedRound = &txn.ConfirmedRound response.ClosingAmount = &txn.ApplyData.ClosingAmount.Raw response.AssetClosingAmount = &txn.ApplyData.AssetClosingAmount @@ -1592,7 +1564,7 @@ func (v2 *Handlers) PendingTransactionInformation(ctx echo.Context, txid string, } // getPendingTransactions returns to the provided context a list of uncomfirmed transactions currently in the transaction pool with optional Max/Address filters. -func (v2 *Handlers) getPendingTransactions(ctx echo.Context, max *uint64, format *string, addrFilter *string) error { +func (v2 *Handlers) getPendingTransactions(ctx echo.Context, max *uint64, format *string, addrFilter *basics.Address) error { stat, err := v2.Node.Status() if err != nil { @@ -1603,16 +1575,6 @@ func (v2 *Handlers) getPendingTransactions(ctx echo.Context, max *uint64, format return serviceUnavailable(ctx, fmt.Errorf("PendingTransactionInformation failed as the node was catchpoint catchuping"), errOperationNotAvailableDuringCatchup, v2.Log) } - var addrPtr *basics.Address - - if addrFilter != nil { - addr, err := basics.UnmarshalChecksumAddress(*addrFilter) - if err != nil { - return badRequest(ctx, err, errFailedToParseAddress, v2.Log) - } - addrPtr = &addr - } - handle, contentType, err := getCodecHandle(format) if err != nil { return badRequest(ctx, err, errFailedParsingFormatOption, v2.Log) @@ -1637,7 +1599,7 @@ func (v2 *Handlers) getPendingTransactions(ctx echo.Context, max *uint64, format } // continue if we have an address filter and the address doesn't match the transaction. - if addrPtr != nil && !txn.Txn.MatchAddress(*addrPtr) { + if addrFilter != nil && !txn.Txn.MatchAddress(*addrFilter) { continue } @@ -1662,7 +1624,7 @@ func (v2 *Handlers) getPendingTransactions(ctx echo.Context, max *uint64, format } // startCatchup Given a catchpoint, it starts catching up to this catchpoint -func (v2 *Handlers) startCatchup(ctx echo.Context, catchpoint string, minRounds uint64) error { +func (v2 *Handlers) startCatchup(ctx echo.Context, catchpoint string, minRounds basics.Round) error { catchpointRound, _, err := ledgercore.ParseCatchpointLabel(catchpoint) if err != nil { return badRequest(ctx, err, errFailedToParseCatchpoint, v2.Log) @@ -1724,10 +1686,9 @@ func (v2 *Handlers) GetPendingTransactions(ctx echo.Context, params model.GetPen // GetApplicationByID returns application information by app idx. // (GET /v2/applications/{application-id}) -func (v2 *Handlers) GetApplicationByID(ctx echo.Context, applicationID uint64) error { - appIdx := basics.AppIndex(applicationID) +func (v2 *Handlers) GetApplicationByID(ctx echo.Context, applicationID basics.AppIndex) error { ledger := v2.Node.LedgerForAPI() - creator, ok, err := ledger.GetCreator(basics.CreatableIndex(appIdx), basics.AppCreatable) + creator, ok, err := ledger.GetCreator(basics.CreatableIndex(applicationID), basics.AppCreatable) if err != nil { return internalError(ctx, err, errFailedLookingUpLedger, v2.Log) } @@ -1737,7 +1698,7 @@ func (v2 *Handlers) GetApplicationByID(ctx echo.Context, applicationID uint64) e lastRound := ledger.Latest() - record, err := ledger.LookupApplication(lastRound, creator, basics.AppIndex(applicationID)) + record, err := ledger.LookupApplication(lastRound, creator, applicationID) if err != nil { return internalError(ctx, err, errFailedLookingUpLedger, v2.Log) } @@ -1746,7 +1707,7 @@ func (v2 *Handlers) GetApplicationByID(ctx echo.Context, applicationID uint64) e return notFound(ctx, errors.New(errAppDoesNotExist), errAppDoesNotExist, v2.Log) } appParams := *record.AppParams - app := AppParamsToApplication(creator.String(), appIdx, &appParams) + app := AppParamsToApplication(creator.String(), applicationID, &appParams) response := model.ApplicationResponse(app) return ctx.JSON(http.StatusOK, response) } @@ -1766,26 +1727,25 @@ func applicationBoxesMaxKeys(requestedMax uint64, algodMax uint64) uint64 { return algodMax + 1 // API limit dominates. Increments by 1 to test if more than max supported results exist. } -// GetApplicationBoxes returns the box names of an application +// GetApplicationBoxes returns the boxes of an application // (GET /v2/applications/{application-id}/boxes) -func (v2 *Handlers) GetApplicationBoxes(ctx echo.Context, applicationID uint64, params model.GetApplicationBoxesParams) error { - appIdx := basics.AppIndex(applicationID) +func (v2 *Handlers) GetApplicationBoxes(ctx echo.Context, applicationID basics.AppIndex, params model.GetApplicationBoxesParams) error { ledger := v2.Node.LedgerForAPI() lastRound := ledger.Latest() - keyPrefix := apps.MakeBoxKey(uint64(appIdx), "") + keyPrefix := apps.MakeBoxKey(uint64(applicationID), "") requestedMax, algodMax := nilToZero(params.Max), v2.Node.Config().MaxAPIBoxPerApplication max := applicationBoxesMaxKeys(requestedMax, algodMax) if max != math.MaxUint64 { - record, _, _, err := ledger.LookupAccount(ledger.Latest(), appIdx.Address()) + record, _, _, err := ledger.LookupAccount(ledger.Latest(), applicationID.Address()) if err != nil { return internalError(ctx, err, errFailedLookingUpLedger, v2.Log) } if record.TotalBoxes > max { return ctx.JSON(http.StatusBadRequest, model.ErrorResponse{ Message: "Result limit exceeded", - Data: &map[string]interface{}{ + Data: &map[string]any{ "max-api-box-per-application": algodMax, "max": requestedMax, "total-boxes": record.TotalBoxes, @@ -1812,8 +1772,7 @@ func (v2 *Handlers) GetApplicationBoxes(ctx echo.Context, applicationID uint64, // GetApplicationBoxByName returns the value of an application's box // (GET /v2/applications/{application-id}/box) -func (v2 *Handlers) GetApplicationBoxByName(ctx echo.Context, applicationID uint64, params model.GetApplicationBoxByNameParams) error { - appIdx := basics.AppIndex(applicationID) +func (v2 *Handlers) GetApplicationBoxByName(ctx echo.Context, applicationID basics.AppIndex, params model.GetApplicationBoxByNameParams) error { ledger := v2.Node.LedgerForAPI() lastRound := ledger.Latest() @@ -1827,7 +1786,7 @@ func (v2 *Handlers) GetApplicationBoxByName(ctx echo.Context, applicationID uint return badRequest(ctx, err, err.Error(), v2.Log) } - value, err := ledger.LookupKv(lastRound, apps.MakeBoxKey(uint64(appIdx), string(boxName))) + value, err := ledger.LookupKv(lastRound, apps.MakeBoxKey(uint64(applicationID), string(boxName))) if err != nil { return internalError(ctx, err, errFailedLookingUpLedger, v2.Log) } @@ -1836,7 +1795,7 @@ func (v2 *Handlers) GetApplicationBoxByName(ctx echo.Context, applicationID uint } response := model.BoxResponse{ - Round: uint64(lastRound), + Round: lastRound, Name: boxName, Value: value, } @@ -1845,10 +1804,9 @@ func (v2 *Handlers) GetApplicationBoxByName(ctx echo.Context, applicationID uint // GetAssetByID returns application information by app idx. // (GET /v2/assets/{asset-id}) -func (v2 *Handlers) GetAssetByID(ctx echo.Context, assetID uint64) error { - assetIdx := basics.AssetIndex(assetID) +func (v2 *Handlers) GetAssetByID(ctx echo.Context, assetID basics.AssetIndex) error { ledger := v2.Node.LedgerForAPI() - creator, ok, err := ledger.GetCreator(basics.CreatableIndex(assetIdx), basics.AssetCreatable) + creator, ok, err := ledger.GetCreator(basics.CreatableIndex(assetID), basics.AssetCreatable) if err != nil { return internalError(ctx, err, errFailedLookingUpLedger, v2.Log) } @@ -1857,7 +1815,7 @@ func (v2 *Handlers) GetAssetByID(ctx echo.Context, assetID uint64) error { } lastRound := ledger.Latest() - record, err := ledger.LookupAsset(lastRound, creator, basics.AssetIndex(assetID)) + record, err := ledger.LookupAsset(lastRound, creator, assetID) if err != nil { return internalError(ctx, err, errFailedLookingUpLedger, v2.Log) } @@ -1866,15 +1824,15 @@ func (v2 *Handlers) GetAssetByID(ctx echo.Context, assetID uint64) error { return notFound(ctx, errors.New(errAssetDoesNotExist), errAssetDoesNotExist, v2.Log) } assetParams := *record.AssetParams - asset := AssetParamsToAsset(creator.String(), assetIdx, &assetParams) + asset := AssetParamsToAsset(creator.String(), assetID, &assetParams) response := model.AssetResponse(asset) return ctx.JSON(http.StatusOK, response) } // GetPendingTransactionsByAddress takes an Algorand address and returns its associated list of unconfirmed transactions currently in the transaction pool. // (GET /v2/accounts/{address}/transactions/pending) -func (v2 *Handlers) GetPendingTransactionsByAddress(ctx echo.Context, addr string, params model.GetPendingTransactionsByAddressParams) error { - return v2.getPendingTransactions(ctx, params.Max, (*string)(params.Format), &addr) +func (v2 *Handlers) GetPendingTransactionsByAddress(ctx echo.Context, address basics.Address, params model.GetPendingTransactionsByAddressParams) error { + return v2.getPendingTransactions(ctx, params.Max, (*string)(params.Format), &address) } // StartCatchup Given a catchpoint, it starts catching up to this catchpoint @@ -1945,16 +1903,16 @@ func (v2 *Handlers) TealCompile(ctx echo.Context, params model.TealCompileParams // GetStateProof returns the state proof for a given round. // (GET /v2/stateproofs/{round}) -func (v2 *Handlers) GetStateProof(ctx echo.Context, round uint64) error { +func (v2 *Handlers) GetStateProof(ctx echo.Context, round basics.Round) error { ctxWithTimeout, cancel := context.WithTimeout(ctx.Request().Context(), time.Minute) defer cancel() ledger := v2.Node.LedgerForAPI() - if ledger.Latest() < basics.Round(round) { + if ledger.Latest() < round { return internalError(ctx, errors.New(errRoundGreaterThanTheLatest), errRoundGreaterThanTheLatest, v2.Log) } - tx, err := GetStateProofTransactionForRound(ctxWithTimeout, ledger, basics.Round(round), ledger.Latest(), v2.Shutdown) + tx, err := GetStateProofTransactionForRound(ctxWithTimeout, ledger, round, ledger.Latest(), v2.Shutdown) if err != nil { return v2.wrapStateproofError(ctx, err) } @@ -1984,24 +1942,24 @@ func (v2 *Handlers) wrapStateproofError(ctx echo.Context, err error) error { // GetLightBlockHeaderProof Gets a proof of a light block header for a given round // (GET /v2/blocks/{round}/lightheader/proof) -func (v2 *Handlers) GetLightBlockHeaderProof(ctx echo.Context, round uint64) error { +func (v2 *Handlers) GetLightBlockHeaderProof(ctx echo.Context, round basics.Round) error { ctxWithTimeout, cancel := context.WithTimeout(ctx.Request().Context(), time.Minute) defer cancel() ledger := v2.Node.LedgerForAPI() - if ledger.Latest() < basics.Round(round) { + if ledger.Latest() < round { return internalError(ctx, errors.New(errRoundGreaterThanTheLatest), errRoundGreaterThanTheLatest, v2.Log) } - stateProof, err := GetStateProofTransactionForRound(ctxWithTimeout, ledger, basics.Round(round), ledger.Latest(), v2.Shutdown) + stateProof, err := GetStateProofTransactionForRound(ctxWithTimeout, ledger, round, ledger.Latest(), v2.Shutdown) if err != nil { return v2.wrapStateproofError(ctx, err) } lastAttestedRound := stateProof.Message.LastAttestedRound firstAttestedRound := stateProof.Message.FirstAttestedRound - stateProofInterval := lastAttestedRound - firstAttestedRound + 1 + stateProofInterval := uint64(lastAttestedRound - firstAttestedRound + 1) - lightHeaders, err := stateproof.FetchLightHeaders(ledger, stateProofInterval, basics.Round(lastAttestedRound)) + lightHeaders, err := stateproof.FetchLightHeaders(ledger, stateProofInterval, lastAttestedRound) if err != nil { return notFound(ctx, err, err.Error(), v2.Log) } @@ -2013,9 +1971,9 @@ func (v2 *Handlers) GetLightBlockHeaderProof(ctx echo.Context, round uint64) err } response := model.LightBlockHeaderProofResponse{ - Index: blockIndex, + Index: uint64(blockIndex), Proof: leafproof.GetConcatenatedProof(), - Treedepth: uint64(leafproof.TreeDepth), + Treedepth: int(leafproof.TreeDepth), } return ctx.JSON(http.StatusOK, response) } @@ -2077,7 +2035,7 @@ func (v2 *Handlers) GetLedgerStateDeltaForTransactionGroup(ctx echo.Context, id // GetTransactionGroupLedgerStateDeltasForRound retrieves the deltas for transaction groups in a given round. // (GET /v2/deltas/{round}/txn/group) -func (v2 *Handlers) GetTransactionGroupLedgerStateDeltasForRound(ctx echo.Context, round uint64, params model.GetTransactionGroupLedgerStateDeltasForRoundParams) error { +func (v2 *Handlers) GetTransactionGroupLedgerStateDeltasForRound(ctx echo.Context, round basics.Round, params model.GetTransactionGroupLedgerStateDeltasForRoundParams) error { handle, contentType, err := getCodecHandle((*string)(params.Format)) if err != nil { return badRequest(ctx, err, errFailedParsingFormatOption, v2.Log) @@ -2086,7 +2044,7 @@ func (v2 *Handlers) GetTransactionGroupLedgerStateDeltasForRound(ctx echo.Contex if !ok { return notImplemented(ctx, err, errFailedRetrievingTracer, v2.Log) } - deltas, err := tracer.GetDeltasForRound(basics.Round(round)) + deltas, err := tracer.GetDeltasForRound(round) if err != nil { return notFound(ctx, err, fmt.Sprintf(errFailedRetrievingStateDelta, err), v2.Log) } diff --git a/daemon/algod/api/server/v2/test/genesis_types_test.go b/daemon/algod/api/server/v2/test/genesis_types_test.go index bab000e3fb..0860235f37 100644 --- a/daemon/algod/api/server/v2/test/genesis_types_test.go +++ b/daemon/algod/api/server/v2/test/genesis_types_test.go @@ -23,6 +23,7 @@ import ( "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model" "github.com/algorand/go-algorand/data/bookkeeping" + "github.com/algorand/go-algorand/test/partitiontest" "github.com/stretchr/testify/require" ) @@ -45,6 +46,7 @@ func getJSONTag(field reflect.StructField) string { // TestGenesisTypeCompatibility verifies that model.Genesis matches the field structure // of bookkeeping.Genesis, using the codec tags from bookkeeping as the source of truth. func TestGenesisTypeCompatibility(t *testing.T) { + partitiontest.PartitionTest(t) // Test Genesis struct compatibility verifyStructCompatibility(t, reflect.TypeOf(bookkeeping.Genesis{}), reflect.TypeOf(model.Genesis{})) @@ -166,6 +168,16 @@ func verifyTypeCompatibility(t *testing.T, bkType, modelType reflect.Type, tag s return } + case reflect.Int: + // Special case: Simple integer is fine for basics.Status which is a + // byte. You might think that we should also allow bkType to be an int + // here, and that makes some sense, but we don't use simple ints in + // go-algorand, so it seems more likely to indicate a bug somewhere. + switch { + case bkType.String() == "basics.Status": + return + } + case reflect.Ptr: switch modelType.Elem().Kind() { case reflect.String: diff --git a/daemon/algod/api/server/v2/test/handlers_resources_test.go b/daemon/algod/api/server/v2/test/handlers_resources_test.go index 5fa1f70e51..40d56f8981 100644 --- a/daemon/algod/api/server/v2/test/handlers_resources_test.go +++ b/daemon/algod/api/server/v2/test/handlers_resources_test.go @@ -100,10 +100,10 @@ func (l *mockLedger) LookupAsset(rnd basics.Round, addr basics.Address, aidx bas if !ok { return ledgercore.AssetResource{}, nil } - if ap, ok := ad.AssetParams[basics.AssetIndex(aidx)]; ok { + if ap, ok := ad.AssetParams[aidx]; ok { ar.AssetParams = &ap } - if ah, ok := ad.Assets[basics.AssetIndex(aidx)]; ok { + if ah, ok := ad.Assets[aidx]; ok { ar.AssetHolding = &ah } return ar, nil @@ -140,10 +140,10 @@ func (l *mockLedger) LookupApplication(rnd basics.Round, addr basics.Address, ai if !ok { return ledgercore.AppResource{}, nil } - if ap, ok := ad.AppParams[basics.AppIndex(aidx)]; ok { + if ap, ok := ad.AppParams[aidx]; ok { ar.AppParams = &ap } - if ls, ok := ad.AppLocalStates[basics.AppIndex(aidx)]; ok { + if ls, ok := ad.AppLocalStates[aidx]; ok { ar.AppLocalState = &ls } return ar, nil @@ -302,7 +302,7 @@ func accountInformationResourceLimitsTest(t *testing.T, accountMaker func(int) b params.Exclude = (*model.AccountInformationParamsExclude)(&exclude) } ctx, rec := newReq(t) - err := handlers.AccountInformation(ctx, addr.String(), params) + err := handlers.AccountInformation(ctx, addr, params) require.NoError(t, err) require.Equal(t, expectedCode, rec.Code) @@ -356,7 +356,7 @@ func accountInformationResourceLimitsTest(t *testing.T, accountMaker func(int) b for i := 0; i < ret.TotalAssets; i++ { ctx, rec = newReq(t) aidx := basics.AssetIndex(i * 4) - err = handlers.AccountAssetInformation(ctx, addr.String(), uint64(aidx), model.AccountAssetInformationParams{}) + err = handlers.AccountAssetInformation(ctx, addr, aidx, model.AccountAssetInformationParams{}) require.NoError(t, err) require.Equal(t, 200, rec.Code) var ret model.AccountAssetResponse @@ -365,14 +365,14 @@ func accountInformationResourceLimitsTest(t *testing.T, accountMaker func(int) b assert.Nil(t, ret.CreatedAsset) assert.Equal(t, ret.AssetHolding, &model.AssetHolding{ Amount: acctData.Assets[aidx].Amount, - AssetID: uint64(aidx), + AssetID: aidx, IsFrozen: acctData.Assets[aidx].Frozen, }) } for i := 0; i < ret.TotalCreatedAssets; i++ { ctx, rec = newReq(t) aidx := basics.AssetIndex(i*4 + 1) - err = handlers.AccountAssetInformation(ctx, addr.String(), uint64(aidx), model.AccountAssetInformationParams{}) + err = handlers.AccountAssetInformation(ctx, addr, aidx, model.AccountAssetInformationParams{}) require.NoError(t, err) require.Equal(t, 200, rec.Code) var ret model.AccountAssetResponse @@ -386,7 +386,7 @@ func accountInformationResourceLimitsTest(t *testing.T, accountMaker func(int) b for i := 0; i < ret.TotalApps; i++ { ctx, rec = newReq(t) aidx := basics.AppIndex(i*4 + 2) - err = handlers.AccountApplicationInformation(ctx, addr.String(), uint64(aidx), model.AccountApplicationInformationParams{}) + err = handlers.AccountApplicationInformation(ctx, addr, aidx, model.AccountApplicationInformationParams{}) require.NoError(t, err) require.Equal(t, 200, rec.Code) var ret model.AccountApplicationResponse @@ -394,7 +394,7 @@ func accountInformationResourceLimitsTest(t *testing.T, accountMaker func(int) b require.NoError(t, err) assert.Nil(t, ret.CreatedApp) require.NotNil(t, ret.AppLocalState) - assert.Equal(t, uint64(aidx), ret.AppLocalState.Id) + assert.Equal(t, aidx, ret.AppLocalState.Id) ls := acctData.AppLocalStates[aidx] assert.Equal(t, ls.Schema.NumByteSlice, ret.AppLocalState.Schema.NumByteSlice) assert.Equal(t, ls.Schema.NumUint, ret.AppLocalState.Schema.NumUint) @@ -402,7 +402,7 @@ func accountInformationResourceLimitsTest(t *testing.T, accountMaker func(int) b for i := 0; i < ret.TotalCreatedApps; i++ { ctx, rec = newReq(t) aidx := basics.AppIndex(i*4 + 3) - err = handlers.AccountApplicationInformation(ctx, addr.String(), uint64(aidx), model.AccountApplicationInformationParams{}) + err = handlers.AccountApplicationInformation(ctx, addr, aidx, model.AccountApplicationInformationParams{}) require.NoError(t, err) require.Equal(t, 200, rec.Code) var ret model.AccountApplicationResponse @@ -421,7 +421,7 @@ func accountAssetInformationResourceLimitsTest(t *testing.T, handlers v2.Handler acctData basics.AccountData, params model.AccountAssetsInformationParams, inputNextToken int, maxResults int, expectToken bool) { ctx, rec := newReq(t) - err := handlers.AccountAssetsInformation(ctx, addr.String(), params) + err := handlers.AccountAssetsInformation(ctx, addr, params) require.NoError(t, err) require.Equal(t, 200, rec.Code) var ret model.AccountAssetsInformationResponse @@ -432,7 +432,7 @@ func accountAssetInformationResourceLimitsTest(t *testing.T, handlers v2.Handler nextRaw, err0 := strconv.ParseUint(*ret.NextToken, 10, 64) require.NoError(t, err0) // The next token decoded is actually the last asset id returned - assert.Equal(t, (*ret.AssetHoldings)[maxResults-1].AssetHolding.AssetID, nextRaw) + assert.EqualValues(t, (*ret.AssetHoldings)[maxResults-1].AssetHolding.AssetID, nextRaw) } assert.Equal(t, maxResults, len(*ret.AssetHoldings)) @@ -442,11 +442,11 @@ func accountAssetInformationResourceLimitsTest(t *testing.T, handlers v2.Handler minForResults = inputNextToken } for i := minForResults; i < minForResults+maxResults; i++ { - expectedIndex := i + 1 + expectedIndex := basics.AssetIndex(i + 1) - assert.Equal(t, acctData.Assets[basics.AssetIndex(expectedIndex)].Amount, (*ret.AssetHoldings)[i-minForResults].AssetHolding.Amount) - assert.Equal(t, acctData.Assets[basics.AssetIndex(expectedIndex)].Frozen, (*ret.AssetHoldings)[i-minForResults].AssetHolding.IsFrozen) - assert.Equal(t, uint64(expectedIndex), (*ret.AssetHoldings)[i-minForResults].AssetHolding.AssetID) + assert.Equal(t, acctData.Assets[expectedIndex].Amount, (*ret.AssetHoldings)[i-minForResults].AssetHolding.Amount) + assert.Equal(t, acctData.Assets[expectedIndex].Frozen, (*ret.AssetHoldings)[i-minForResults].AssetHolding.IsFrozen) + assert.Equal(t, expectedIndex, (*ret.AssetHoldings)[i-minForResults].AssetHolding.AssetID) } } @@ -493,12 +493,7 @@ func TestAccountAssetsInformation(t *testing.T) { accountAssetInformationResourceLimitsTest(t, handlers, addr, acctData, model.AccountAssetsInformationParams{Limit: &limit, Next: &nextTk}, rawNext, totalAssetHoldings-rawNext, false) - // 5. Malformed address - ctx, rec := newReq(t) - err := handlers.AccountAssetsInformation(ctx, "", model.AccountAssetsInformationParams{}) - require.NoError(t, err) - require.Equal(t, 400, rec.Code) - require.Equal(t, "{\"message\":\"failed to parse the address\"}\n", rec.Body.String()) + // 5. Malformed address is now handled by the echo framework // 6. Unknown address (200 returned, just no asset data) unknownAddress := basics.Address{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} @@ -506,8 +501,8 @@ func TestAccountAssetsInformation(t *testing.T) { 0, 0, false) // 7a. Invalid limits - larger than configured max - ctx, rec = newReq(t) - err = handlers.AccountAssetsInformation(ctx, addr.String(), model.AccountAssetsInformationParams{ + ctx, rec := newReq(t) + err := handlers.AccountAssetsInformation(ctx, addr, model.AccountAssetsInformationParams{ Limit: func() *uint64 { l := uint64(v2.MaxAssetResults + 1) return &l @@ -519,7 +514,7 @@ func TestAccountAssetsInformation(t *testing.T) { // 7b. Invalid limits - zero ctx, rec = newReq(t) - err = handlers.AccountAssetsInformation(ctx, addr.String(), model.AccountAssetsInformationParams{ + err = handlers.AccountAssetsInformation(ctx, addr, model.AccountAssetsInformationParams{ Limit: func() *uint64 { l := uint64(0) return &l diff --git a/daemon/algod/api/server/v2/test/handlers_test.go b/daemon/algod/api/server/v2/test/handlers_test.go index 84e084760b..1e989332f2 100644 --- a/daemon/algod/api/server/v2/test/handlers_test.go +++ b/daemon/algod/api/server/v2/test/handlers_test.go @@ -72,7 +72,7 @@ import ( "github.com/algorand/go-algorand/util/execpool" ) -const stateProofInterval = uint64(256) +const stateProofInterval = 256 func setupMockNodeForMethodGet(t *testing.T, status node.StatusReport, devmode bool) (v2.Handlers, echo.Context, *httptest.ResponseRecorder, []account.Root, []transactions.SignedTxn, func()) { return setupMockNodeForMethodGetWithShutdown(t, status, devmode, make(chan struct{})) @@ -100,11 +100,13 @@ func setupTestForMethodGet(t *testing.T, status node.StatusReport) (v2.Handlers, return setupMockNodeForMethodGet(t, status, false) } -func numOrNil(n uint64) *uint64 { - if n == 0 { +// omitEmpty defines a handy impl for all comparable types to convert from default value to nil ptr +func omitEmpty[T comparable](val T) *T { + var defaultVal T + if val == defaultVal { return nil } - return &n + return &val } func TestSimpleMockBuilding(t *testing.T) { @@ -116,13 +118,13 @@ func TestSimpleMockBuilding(t *testing.T) { require.Equal(t, t.Name(), handler.Node.GenesisID()) } -func accountInformationTest(t *testing.T, address string, expectedCode int) { +func accountInformationTest(t *testing.T, address basics.Address, expectedCode int) { handler, c, rec, _, _, releasefunc := setupTestForMethodGet(t, cannedStatusReportGolden) defer releasefunc() err := handler.AccountInformation(c, address, model.AccountInformationParams{}) require.NoError(t, err) require.Equal(t, expectedCode, rec.Code) - if address == poolAddr.String() { + if address == poolAddr { expectedResponse := poolAddrResponseGolden actualResponse := model.AccountResponse{} err = protocol.DecodeJSON(rec.Body.Bytes(), &actualResponse) @@ -135,11 +137,10 @@ func TestAccountInformation(t *testing.T) { partitiontest.PartitionTest(t) t.Parallel() - accountInformationTest(t, poolAddr.String(), 200) - accountInformationTest(t, "bad account", 400) + accountInformationTest(t, poolAddr, 200) } -func getBlockTest(t *testing.T, blockNum uint64, format string, expectedCode int) { +func getBlockTest(t *testing.T, blockNum basics.Round, format string, expectedCode int) { handler, c, rec, _, _, releasefunc := setupTestForMethodGet(t, cannedStatusReportGolden) defer releasefunc() err := handler.GetBlock(c, blockNum, model.GetBlockParams{Format: (*model.GetBlockParamsFormat)(&format)}) @@ -164,7 +165,7 @@ type blockResponseTest struct { Cert *map[string]interface{} `codec:"cert,omitempty"` } -func getBlockHeaderTest(t *testing.T, blockNum uint64, format string, expectedCode int, headerOnly *bool) { +func getBlockHeaderTest(t *testing.T, blockNum basics.Round, format string, expectedCode int, headerOnly *bool) { handler, c, rec, _, _, releasefunc := setupTestForMethodGet(t, cannedStatusReportGolden) defer releasefunc() @@ -233,7 +234,7 @@ func TestGetBlockHeader(t *testing.T) { }) } -func testGetLedgerStateDelta(t *testing.T, round uint64, format string, expectedCode int) { +func testGetLedgerStateDelta(t *testing.T, round basics.Round, format string, expectedCode int) { handler, c, rec, _, _, releasefunc := setupTestForMethodGet(t, cannedStatusReportGolden) defer releasefunc() insertRounds(require.New(t), handler, 3) @@ -560,13 +561,13 @@ func TestGetStatus(t *testing.T) { require.NoError(t, err) stat := cannedStatusReportGolden expectedResult := model.NodeStatusResponse{ - LastRound: uint64(stat.LastRound), + LastRound: stat.LastRound, LastVersion: string(stat.LastVersion), NextVersion: string(stat.NextVersion), - NextVersionRound: uint64(stat.NextVersionRound), + NextVersionRound: stat.NextVersionRound, NextVersionSupported: stat.NextVersionSupported, - TimeSinceLastRound: uint64(stat.TimeSinceLastRound().Nanoseconds()), - CatchupTime: uint64(stat.CatchupTime.Nanoseconds()), + TimeSinceLastRound: stat.TimeSinceLastRound().Nanoseconds(), + CatchupTime: stat.CatchupTime.Nanoseconds(), StoppedAtUnsupportedRound: stat.StoppedAtUnsupportedRound, LastCatchpoint: &stat.LastCatchpoint, Catchpoint: &stat.Catchpoint, @@ -598,7 +599,7 @@ func TestGetStatusConsensusUpgradeUnderflow(t *testing.T) { NextVersion: protocol.ConsensusCurrentVersion, UpgradePropose: "upgradePropose", NextProtocolVoteBefore: currentRound, - NextProtocolApprovals: proto.UpgradeVoteRounds, + NextProtocolApprovals: basics.Round(proto.UpgradeVoteRounds), } handler, c, rec, _, _, releasefunc := setupTestForMethodGet(t, stat) @@ -610,10 +611,10 @@ func TestGetStatusConsensusUpgradeUnderflow(t *testing.T) { require.NoError(t, err) // Make sure the votes are all yes, and 0 no. - require.Equal(t, uint64(0), *actualResult.UpgradeNoVotes) - require.Equal(t, proto.UpgradeVoteRounds, *actualResult.UpgradeYesVotes) - require.Equal(t, proto.UpgradeVoteRounds, *actualResult.UpgradeVotes) - require.Equal(t, proto.UpgradeThreshold, *actualResult.UpgradeVotesRequired) + require.Zero(t, *actualResult.UpgradeNoVotes) + require.EqualValues(t, proto.UpgradeVoteRounds, *actualResult.UpgradeYesVotes) + require.EqualValues(t, proto.UpgradeVoteRounds, *actualResult.UpgradeVotes) + require.EqualValues(t, proto.UpgradeThreshold, *actualResult.UpgradeVotesRequired) } func TestGetStatusConsensusUpgrade(t *testing.T) { @@ -650,19 +651,22 @@ func TestGetStatusConsensusUpgrade(t *testing.T) { require.NoError(t, err) stat := cannedStatusReportConsensusUpgradeGolden consensus := config.Consensus[protocol.ConsensusCurrentVersion] - votesToGo := uint64(stat.NextProtocolVoteBefore) - uint64(stat.LastRound) - 1 - nextProtocolVoteBefore := uint64(stat.NextProtocolVoteBefore) - votes := uint64(consensus.UpgradeVoteRounds) - votesToGo + votesToGo := stat.NextProtocolVoteBefore - stat.LastRound - 1 + nextProtocolVoteBefore := stat.NextProtocolVoteBefore + votes := basics.Round(consensus.UpgradeVoteRounds) - votesToGo votesNo := votes - stat.NextProtocolApprovals + upgradeThreshold := basics.Round(consensus.UpgradeThreshold) + upgradeVoteRounds := basics.Round(consensus.UpgradeVoteRounds) + expectedResult := model.NodeStatusResponse{ - LastRound: uint64(stat.LastRound), + LastRound: stat.LastRound, LastVersion: string(stat.LastVersion), NextVersion: string(stat.NextVersion), - NextVersionRound: uint64(stat.NextVersionRound), + NextVersionRound: stat.NextVersionRound, NextVersionSupported: stat.NextVersionSupported, - TimeSinceLastRound: uint64(stat.TimeSinceLastRound().Nanoseconds()), - CatchupTime: uint64(stat.CatchupTime.Nanoseconds()), + TimeSinceLastRound: stat.TimeSinceLastRound().Nanoseconds(), + CatchupTime: stat.CatchupTime.Nanoseconds(), StoppedAtUnsupportedRound: stat.StoppedAtUnsupportedRound, LastCatchpoint: &stat.LastCatchpoint, Catchpoint: &stat.Catchpoint, @@ -674,12 +678,12 @@ func TestGetStatusConsensusUpgrade(t *testing.T) { CatchpointTotalKvs: &stat.CatchpointCatchupTotalKVs, CatchpointProcessedKvs: &stat.CatchpointCatchupProcessedKVs, CatchpointVerifiedKvs: &stat.CatchpointCatchupVerifiedKVs, - UpgradeVotesRequired: &consensus.UpgradeThreshold, + UpgradeVotesRequired: &upgradeThreshold, UpgradeNodeVote: &stat.UpgradeApprove, UpgradeDelay: &stat.UpgradeDelay, UpgradeNoVotes: &votesNo, UpgradeYesVotes: &stat.NextProtocolApprovals, - UpgradeVoteRounds: &consensus.UpgradeVoteRounds, + UpgradeVoteRounds: &upgradeVoteRounds, UpgradeNextProtocolVoteBefore: &nextProtocolVoteBefore, UpgradeVotes: &votes, } @@ -763,7 +767,7 @@ func TestGetStatusAfterBlockTimeout(t *testing.T) { var resp model.NodeStatusResponse err = dec.Decode(&resp) require.NoError(t, err) - require.Equal(t, uint64(1), resp.LastRound) + require.EqualValues(t, 1, resp.LastRound) } func TestGetTransactionParams(t *testing.T) { @@ -822,8 +826,8 @@ func getPendingTransactionsTest(t *testing.T, format string, max uint64, expecte require.Equal(t, uint64(len(response.TopTransactions)), max) } - require.Equal(t, response.TotalTransactions, uint64(len(txnPoolGolden))) - require.GreaterOrEqual(t, response.TotalTransactions, uint64(len(response.TopTransactions))) + require.Equal(t, response.TotalTransactions, len(txnPoolGolden)) + require.GreaterOrEqual(t, response.TotalTransactions, len(response.TopTransactions)) } } @@ -882,10 +886,8 @@ func TestPendingTransactions(t *testing.T) { func pendingTransactionsByAddressTest(t *testing.T, rootkeyToUse int, format string, expectedCode int) { handler, c, rec, rootkeys, _, releasefunc := setupTestForMethodGet(t, cannedStatusReportGolden) defer releasefunc() - address := "bad address" - if rootkeyToUse >= 0 { - address = rootkeys[rootkeyToUse].Address().String() - } + + address := rootkeys[rootkeyToUse].Address() params := model.GetPendingTransactionsByAddressParams{Format: (*model.GetPendingTransactionsByAddressParamsFormat)(&format)} err := handler.GetPendingTransactionsByAddress(c, address, params) require.NoError(t, err) @@ -899,7 +901,6 @@ func TestPendingTransactionsByAddress(t *testing.T) { pendingTransactionsByAddressTest(t, 0, "json", 200) pendingTransactionsByAddressTest(t, 0, "msgpack", 200) pendingTransactionsByAddressTest(t, 0, "bad format", 400) - pendingTransactionsByAddressTest(t, -1, "json", 400) } func prepareTransactionTest(t *testing.T, txnToUse int, txnPrep func(transactions.SignedTxn) []byte, cfg config.Local) (handler v2.Handlers, c echo.Context, rec *httptest.ResponseRecorder, releasefunc func()) { @@ -1238,18 +1239,18 @@ int 1`, err = decoder.Decode(&actualBody) require.NoError(t, err) - var expectedFailedAt *[]uint64 + var expectedFailedAt *[]int if len(scenario.FailedAt) != 0 { clone := slices.Clone(scenario.FailedAt) clone[0]++ expectedFailedAt = &clone } - var txnAppBudgetUsed []*uint64 - appBudgetAdded := numOrNil(scenario.AppBudgetAdded) - appBudgetConsumed := numOrNil(scenario.AppBudgetConsumed) + var txnAppBudgetUsed []*int + appBudgetAdded := omitEmpty(scenario.AppBudgetAdded) + appBudgetConsumed := omitEmpty(scenario.AppBudgetConsumed) for i := range scenario.TxnAppBudgetConsumed { - txnAppBudgetUsed = append(txnAppBudgetUsed, numOrNil(scenario.TxnAppBudgetConsumed[i])) + txnAppBudgetUsed = append(txnAppBudgetUsed, omitEmpty(scenario.TxnAppBudgetConsumed[i])) } expectedBody := v2.PreEncodedSimulateResponse{ Version: 2, @@ -1284,7 +1285,7 @@ int 1`, } } -func TestSimulateTransactionVerificationFailure(t *testing.T) { +func TestSimulateTransactionVerificationErr(t *testing.T) { partitiontest.PartitionTest(t) t.Parallel() @@ -1405,7 +1406,7 @@ func startCatchupTest(t *testing.T, catchpoint string, nodeError error, expected startCatchupTestFull(t, catchpoint, nodeError, expectedCode, 0, "") } -func startCatchupTestFull(t *testing.T, catchpoint string, nodeError error, expectedCode int, minRounds uint64, response string) { +func startCatchupTestFull(t *testing.T, catchpoint string, nodeError error, expectedCode int, minRounds basics.Round, response string) { numAccounts := 1 numTransactions := 1 offlineAccounts := true @@ -1435,7 +1436,7 @@ func TestStartCatchupInit(t *testing.T) { partitiontest.PartitionTest(t) t.Parallel() - minRoundsToInitialize := uint64(1_000_000) + const minRoundsToInitialize = 1_000_000 tooSmallCatchpoint := fmt.Sprintf("%d#DVFRZUYHEFKRLK5N6DNJRR4IABEVN2D6H76F3ZSEPIE6MKXMQWQA", minRoundsToInitialize-1) startCatchupTestFull(t, tooSmallCatchpoint, nil, 200, minRoundsToInitialize, "the node has already been initialized") @@ -1923,7 +1924,7 @@ func TestGetProofDefault(t *testing.T) { var resp model.TransactionProofResponse err = json.Unmarshal(rec.Body.Bytes(), &resp) a.NoError(err) - a.Equal(model.TransactionProofResponseHashtypeSha512256, resp.Hashtype) + a.Equal(model.TransactionProofHashtypeSha512256, resp.Hashtype) l := handler.Node.LedgerForAPI() blkHdr, err := l.BlockHdr(1) @@ -1981,8 +1982,7 @@ func newEmptyBlock(a *require.Assertions, lastBlock bookkeeping.Block, genBlk bo } func addStateProof(blk bookkeeping.Block) bookkeeping.Block { - round := uint64(blk.Round()) - stateProofRound := (round/stateProofInterval - 1) * stateProofInterval + stateProofRound := (blk.Round()/stateProofInterval - 1) * stateProofInterval tx := transactions.SignedTxn{ Txn: transactions.Transaction{ Type: protocol.StateProofTx, @@ -2126,7 +2126,7 @@ func TestGetBlockProof200(t *testing.T) { proofResp := model.LightBlockHeaderProofResponse{} a.NoError(json.Unmarshal(responseRecorder.Body.Bytes(), &proofResp)) a.Equal(proofResp.Proof, leafproof.GetConcatenatedProof()) - a.Equal(proofResp.Treedepth, uint64(leafproof.TreeDepth)) + a.EqualValues(proofResp.Treedepth, leafproof.TreeDepth) } func TestStateproofTransactionForRound(t *testing.T) { @@ -2150,14 +2150,14 @@ func TestStateproofTransactionForRound(t *testing.T) { defer cncl() txn, err := v2.GetStateProofTransactionForRound(ctx, &ledger, basics.Round(stateProofInterval*2+1), 1000, nil) a.NoError(err) - a.Equal(2*stateProofInterval+1, txn.Message.FirstAttestedRound) - a.Equal(3*stateProofInterval, txn.Message.LastAttestedRound) + a.EqualValues(2*stateProofInterval+1, txn.Message.FirstAttestedRound) + a.EqualValues(3*stateProofInterval, txn.Message.LastAttestedRound) a.Equal([]byte{0x0, 0x1, 0x2}, txn.Message.BlockHeadersCommitment) txn, err = v2.GetStateProofTransactionForRound(ctx, &ledger, basics.Round(2*stateProofInterval), 1000, nil) a.NoError(err) - a.Equal(stateProofInterval+1, txn.Message.FirstAttestedRound) - a.Equal(2*stateProofInterval, txn.Message.LastAttestedRound) + a.EqualValues(stateProofInterval+1, txn.Message.FirstAttestedRound) + a.EqualValues(2*stateProofInterval, txn.Message.LastAttestedRound) txn, err = v2.GetStateProofTransactionForRound(ctx, &ledger, 999, 1000, nil) a.ErrorIs(err, v2.ErrNoStateProofForRound) @@ -2347,7 +2347,7 @@ func TestDeltasForTxnGroup(t *testing.T) { jsonFormatForRound := model.GetTransactionGroupLedgerStateDeltasForRoundParamsFormatJson err = handlers.GetTransactionGroupLedgerStateDeltasForRound( c, - uint64(1), + 1, model.GetTransactionGroupLedgerStateDeltasForRoundParams{Format: &jsonFormatForRound}, ) require.NoError(t, err) @@ -2365,7 +2365,7 @@ func TestDeltasForTxnGroup(t *testing.T) { c, rec = newReq(t) err = handlers.GetTransactionGroupLedgerStateDeltasForRound( c, - uint64(4), + 4, model.GetTransactionGroupLedgerStateDeltasForRoundParams{Format: &jsonFormatForRound}, ) require.NoError(t, err) @@ -2501,7 +2501,7 @@ func TestGeneratePartkeys(t *testing.T) { rec := httptest.NewRecorder() c := e.NewContext(req, rec) - err := handler.GenerateParticipationKeys(c, addr.String(), model.GenerateParticipationKeysParams{ + err := handler.GenerateParticipationKeys(c, addr, model.GenerateParticipationKeysParams{ First: 1000, Last: 2000, }) @@ -2523,7 +2523,7 @@ func TestGeneratePartkeys(t *testing.T) { // Simulate a blocked keygen process (and block until the previous keygen is complete) err := handler.KeygenLimiter.Acquire(context.Background(), 1) require.NoError(t, err) - err = handler.GenerateParticipationKeys(c, addr.String(), model.GenerateParticipationKeysParams{ + err = handler.GenerateParticipationKeys(c, addr, model.GenerateParticipationKeysParams{ First: 1000, Last: 2000, }) diff --git a/daemon/algod/api/server/v2/test/helpers.go b/daemon/algod/api/server/v2/test/helpers.go index 676836a03a..bb602ad726 100644 --- a/daemon/algod/api/server/v2/test/helpers.go +++ b/daemon/algod/api/server/v2/test/helpers.go @@ -116,7 +116,7 @@ func (m *mockNode) RemoveParticipationKey(id account.ParticipationID) error { panic("implement me") } -func (m *mockNode) SetSyncRound(rnd uint64) error { +func (m *mockNode) SetSyncRound(rnd basics.Round) error { args := m.Called(rnd) return args.Error(0) } @@ -124,9 +124,9 @@ func (m *mockNode) SetSyncRound(rnd uint64) error { func (m *mockNode) UnsetSyncRound() { } -func (m *mockNode) GetSyncRound() uint64 { +func (m *mockNode) GetSyncRound() basics.Round { args := m.Called() - return uint64(args.Int(0)) + return basics.Round(args.Int(0)) } func (m *mockNode) AppendParticipationKeys(id account.ParticipationID, keys account.StateProofKeys) error { diff --git a/daemon/algod/api/server/v2/utils.go b/daemon/algod/api/server/v2/utils.go index a560872a2e..837b88e0a4 100644 --- a/daemon/algod/api/server/v2/utils.go +++ b/daemon/algod/api/server/v2/utils.go @@ -95,10 +95,6 @@ func convertMap[X comparable, Y, Z any](input map[X]Y, fn func(X, Y) Z) []Z { return output } -func uint64Slice[T ~uint64](s []T) []uint64 { - return convertSlice(s, func(t T) uint64 { return uint64(t) }) -} - func stringSlice[T fmt.Stringer](s []T) []string { return convertSlice(s, func(t T) string { return t.String() }) } @@ -163,7 +159,7 @@ func computeCreatableIndexInPayset(tx node.TxnWithStatus, txnCounter uint64, pay // computeAssetIndexFromTxn returns the created asset index given a confirmed // transaction whose confirmation block is available in the ledger. Note that // 0 is an invalid asset index (they start at 1). -func computeAssetIndexFromTxn(tx node.TxnWithStatus, l LedgerForAPI) *uint64 { +func computeAssetIndexFromTxn(tx node.TxnWithStatus, l LedgerForAPI) *basics.AssetIndex { // Must have ledger if l == nil { return nil @@ -181,7 +177,7 @@ func computeAssetIndexFromTxn(tx node.TxnWithStatus, l LedgerForAPI) *uint64 { return nil } - aid := uint64(tx.ApplyData.ConfigAsset) + aid := tx.ApplyData.ConfigAsset if aid > 0 { return &aid } @@ -201,13 +197,13 @@ func computeAssetIndexFromTxn(tx node.TxnWithStatus, l LedgerForAPI) *uint64 { return nil } - return computeCreatableIndexInPayset(tx, blk.BlockHeader.TxnCounter, payset) + return (*basics.AssetIndex)(computeCreatableIndexInPayset(tx, blk.BlockHeader.TxnCounter, payset)) } // computeAppIndexFromTxn returns the created app index given a confirmed // transaction whose confirmation block is available in the ledger. Note that // 0 is an invalid asset index (they start at 1). -func computeAppIndexFromTxn(tx node.TxnWithStatus, l LedgerForAPI) *uint64 { +func computeAppIndexFromTxn(tx node.TxnWithStatus, l LedgerForAPI) *basics.AppIndex { // Must have ledger if l == nil { return nil @@ -225,7 +221,7 @@ func computeAppIndexFromTxn(tx node.TxnWithStatus, l LedgerForAPI) *uint64 { return nil } - aid := uint64(tx.ApplyData.ApplicationID) + aid := tx.ApplyData.ApplicationID if aid > 0 { return &aid } @@ -245,7 +241,7 @@ func computeAppIndexFromTxn(tx node.TxnWithStatus, l LedgerForAPI) *uint64 { return nil } - return computeCreatableIndexInPayset(tx, blk.BlockHeader.TxnCounter, payset) + return (*basics.AppIndex)(computeCreatableIndexInPayset(tx, blk.BlockHeader.TxnCounter, payset)) } // getCodecHandle converts a format string into the encoder + content type @@ -378,8 +374,8 @@ func ConvertInnerTxn(txn *transactions.SignedTxnWithAD) PreEncodedTxInfo { // Since this is an inner txn, we know these indexes will be populated. No // need to search payset for IDs - response.AssetIndex = omitEmpty(uint64(txn.ApplyData.ConfigAsset)) - response.ApplicationIndex = omitEmpty(uint64(txn.ApplyData.ApplicationID)) + response.AssetIndex = omitEmpty(txn.ApplyData.ConfigAsset) + response.ApplicationIndex = omitEmpty(txn.ApplyData.ApplicationID) response.LocalStateDelta = sliceOrNil(localDeltasToLocalDeltas(txn.ApplyData.EvalDelta, &txn.Txn)) response.GlobalStateDelta = sliceOrNil(globalDeltaToStateDelta(txn.ApplyData.EvalDelta.GlobalDelta)) @@ -444,7 +440,7 @@ func convertApplicationStateChange(stateChange simulation.StateOperation) model. func convertOpcodeTraceUnit(opcodeTraceUnit simulation.OpcodeTraceUnit) model.SimulationOpcodeTraceUnit { return model.SimulationOpcodeTraceUnit{ Pc: opcodeTraceUnit.PC, - SpawnedInners: sliceOrNil(convertSlice(opcodeTraceUnit.SpawnedInners, func(v int) uint64 { return uint64(v) })), + SpawnedInners: sliceOrNil(opcodeTraceUnit.SpawnedInners), StackAdditions: sliceOrNil(convertSlice(opcodeTraceUnit.StackAdded, convertToAVMValue)), StackPopCount: omitEmpty(opcodeTraceUnit.StackPopCount), ScratchChanges: sliceOrNil(convertSlice(opcodeTraceUnit.ScratchSlotChanges, convertScratchChange)), @@ -496,25 +492,25 @@ func convertUnnamedResourcesAccessed(resources *simulation.ResourceTracker) *mod } return &model.SimulateUnnamedResourcesAccessed{ Accounts: sliceOrNil(stringSlice(slices.Collect(maps.Keys(resources.Accounts)))), - Assets: sliceOrNil(uint64Slice(slices.Collect(maps.Keys(resources.Assets)))), - Apps: sliceOrNil(uint64Slice(slices.Collect(maps.Keys(resources.Apps)))), + Assets: sliceOrNil(slices.Collect(maps.Keys(resources.Assets))), + Apps: sliceOrNil(slices.Collect(maps.Keys(resources.Apps))), Boxes: sliceOrNil(convertSlice(slices.Collect(maps.Keys(resources.Boxes)), func(box logic.BoxRef) model.BoxReference { return model.BoxReference{ - App: uint64(box.App), + App: box.App, Name: []byte(box.Name), } })), - ExtraBoxRefs: omitEmpty(uint64(resources.NumEmptyBoxRefs)), + ExtraBoxRefs: omitEmpty(resources.NumEmptyBoxRefs), AssetHoldings: sliceOrNil(convertSlice(slices.Collect(maps.Keys(resources.AssetHoldings)), func(holding ledgercore.AccountAsset) model.AssetHoldingReference { return model.AssetHoldingReference{ Account: holding.Address.String(), - Asset: uint64(holding.Asset), + Asset: holding.Asset, } })), AppLocals: sliceOrNil(convertSlice(slices.Collect(maps.Keys(resources.AppLocals)), func(local ledgercore.AccountApp) model.ApplicationLocalReference { return model.ApplicationLocalReference{ Account: local.Address.String(), - App: uint64(local.App), + App: local.App, } })), } @@ -549,7 +545,7 @@ func convertAppKVStoreInstance(address basics.Address, appKVPairs simulation.App func convertApplicationInitialStates(appID basics.AppIndex, states simulation.SingleAppInitialStates) model.ApplicationInitialStates { return model.ApplicationInitialStates{ - Id: uint64(appID), + Id: appID, AppBoxes: convertAppKVStorePtr(basics.Address{}, states.AppBoxes), AppGlobals: convertAppKVStorePtr(basics.Address{}, states.AppGlobals), AppLocals: sliceOrNil(convertMap(states.AppLocals, convertAppKVStoreInstance)), @@ -580,7 +576,7 @@ func convertTxnGroupResult(txnGroupResult simulation.TxnGroupResult) PreEncodedS } if len(txnGroupResult.FailedAt) > 0 { - failedAt := slices.Clone[[]uint64, uint64](txnGroupResult.FailedAt) + failedAt := slices.Clone[[]int, int](txnGroupResult.FailedAt) encoded.FailedAt = &failedAt } @@ -602,7 +598,7 @@ func convertSimulationResult(result simulation.Result) PreEncodedSimulateRespons return PreEncodedSimulateResponse{ Version: result.Version, - LastRound: uint64(result.LastRound), + LastRound: result.LastRound, TxnGroups: convertSlice(result.TxnGroups, convertTxnGroupResult), EvalOverrides: evalOverrides, ExecTraceConfig: result.TraceConfig, diff --git a/daemon/algod/api/templates/echo/echo-register.tmpl b/daemon/algod/api/templates/echo/echo-register.tmpl new file mode 100644 index 0000000000..e9d3456514 --- /dev/null +++ b/daemon/algod/api/templates/echo/echo-register.tmpl @@ -0,0 +1,33 @@ + + +// This is a simple interface which specifies echo.Route addition functions which +// are present on both echo.Echo and echo.Group, since we want to allow using +// either of them for path registration +type EchoRouter interface { + CONNECT(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route + DELETE(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route + GET(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route + HEAD(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route + OPTIONS(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route + PATCH(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route + POST(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route + PUT(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route + TRACE(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route +} + +// RegisterHandlers adds each server route to the EchoRouter. +func RegisterHandlers(router EchoRouter, si ServerInterface, m ...echo.MiddlewareFunc) { + RegisterHandlersWithBaseURL(router, si, "", m...) +} + +// Registers handlers, and prepends BaseURL to the paths, so that the paths +// can be served under a prefix. +func RegisterHandlersWithBaseURL(router EchoRouter, si ServerInterface, baseURL string, m ...echo.MiddlewareFunc) { +{{if .}} + wrapper := ServerInterfaceWrapper{ + Handler: si, + } +{{end}} +{{range .}}router.{{.Method}}(baseURL + "{{.Path | swaggerUriToEchoUri}}", wrapper.{{.OperationId}}, m...) +{{end}} +} diff --git a/daemon/algod/server.go b/daemon/algod/server.go index 8a8bbde437..8fb650078e 100644 --- a/daemon/algod/server.go +++ b/daemon/algod/server.go @@ -78,7 +78,7 @@ type Server struct { } // Initialize creates a Node instance with applicable network services -func (s *Server) Initialize(cfg config.Local, phonebookAddresses []string, genesisText string) error { +func (s *Server) Initialize(cfg config.Local, phonebookAddresses []string, genesisText string, migrationResults []config.MigrationResult) error { // set up node s.log = logging.Base() @@ -234,6 +234,11 @@ func (s *Server) Initialize(cfg config.Local, phonebookAddresses []string, genes } s.log.Infoln("++++++++++++++++++++++++++++++++++++++++") + for _, m := range migrationResults { + s.log.Infof("Upgraded default config value for %s from %v (version %d) to %v (version %d)", + m.FieldName, m.OldValue, m.OldVersion, m.NewValue, m.NewVersion) + } + metricLabels := map[string]string{} if s.log.GetTelemetryEnabled() { metricLabels["telemetry_session"] = s.log.GetTelemetrySession() @@ -294,7 +299,7 @@ func makeListener(addr string) (net.Listener, error) { preferredAddr := strings.Replace(addr, ":0", ":8080", -1) listener, err = net.Listen("tcp", preferredAddr) if err == nil { - return listener, err + return listener, nil } } // err was not nil or :0 was not provided, fall back to originally passed addr @@ -339,9 +344,9 @@ func (s *Server) Start() { } if cfg.EnableMetricReporting { - if err := s.metricCollector.Start(context.Background()); err != nil { + if err1 := s.metricCollector.Start(context.Background()); err1 != nil { // log this error - s.log.Infof("Unable to start metric collection service : %v", err) + s.log.Infof("Unable to start metric collection service : %v", err1) } s.metricServiceStarted = true } diff --git a/daemon/kmd/config/config.go b/daemon/kmd/config/config.go index 235935950a..95bce23129 100644 --- a/daemon/kmd/config/config.go +++ b/daemon/kmd/config/config.go @@ -116,7 +116,7 @@ func LoadKMDConfig(dataDir string) (cfg KMDConfig, err error) { // SaveObjectToFile may return an unhandled error because // there is nothing to do if an error occurs codecs.SaveObjectToFile(exampleFilename, cfg, true) - return cfg, nil + return cfg, nil //nolint:nilerr // intentional } // Fill in the non-default values err = json.Unmarshal(dat, &cfg) diff --git a/daemon/kmd/server/server.go b/daemon/kmd/server/server.go index ec5e695b5b..b94530eaf9 100644 --- a/daemon/kmd/server/server.go +++ b/daemon/kmd/server/server.go @@ -223,9 +223,9 @@ func (ws *WalletServer) start(kill chan os.Signal) (died chan error, sock string ws.mux.Unlock() // Shut down the server - err := srv.Shutdown(context.Background()) - if err != nil { - ws.Log.Warnf("non-nil error stopping kmd wallet HTTP server: %s", err) + err1 := srv.Shutdown(context.Background()) + if err1 != nil { + ws.Log.Warnf("non-nil error stopping kmd wallet HTTP server: %s", err1) } }() diff --git a/daemon/kmd/wallet/driver/ledger.go b/daemon/kmd/wallet/driver/ledger.go index 754b7bdd9b..8bca370920 100644 --- a/daemon/kmd/wallet/driver/ledger.go +++ b/daemon/kmd/wallet/driver/ledger.go @@ -136,9 +136,9 @@ func (lwd *LedgerWalletDriver) scanWalletsLocked() error { continue } - dev, err := info.Open() - if err != nil { - lwd.log.Warnf("enumerated but failed to open ledger %s %x: %v", info.Path, info.ProductID, err) + dev, err1 := info.Open() + if err1 != nil { + lwd.log.Warnf("enumerated but failed to open ledger %s %x: %v", info.Path, info.ProductID, err1) continue } diff --git a/data/account/account.go b/data/account/account.go index d5840e041d..03ac4471fb 100644 --- a/data/account/account.go +++ b/data/account/account.go @@ -57,19 +57,19 @@ func ImportRoot(store db.Accessor, seed [32]byte) (acc Root, err error) { raw := protocol.Encode(s) err = store.Atomic(func(ctx context.Context, tx *sql.Tx) error { - err := rootInstallDatabase(tx) - if err != nil { - return fmt.Errorf("ImportRoot: failed to install database: %v", err) + err1 := rootInstallDatabase(tx) + if err1 != nil { + return fmt.Errorf("ImportRoot: failed to install database: %v", err1) } - stmt, err := tx.Prepare("insert into RootAccount values (?)") - if err != nil { - return fmt.Errorf("ImportRoot: failed to prepare statement: %v", err) + stmt, err1 := tx.Prepare("insert into RootAccount values (?)") + if err1 != nil { + return fmt.Errorf("ImportRoot: failed to prepare statement: %v", err1) } - _, err = stmt.Exec(raw) - if err != nil { - return fmt.Errorf("ImportRoot: failed to insert account: %v", err) + _, err1 = stmt.Exec(raw) + if err1 != nil { + return fmt.Errorf("ImportRoot: failed to insert account: %v", err1) } return nil @@ -91,18 +91,18 @@ func RestoreRoot(store db.Accessor) (acc Root, err error) { err = store.Atomic(func(ctx context.Context, tx *sql.Tx) error { var nrows int row := tx.QueryRow("select count(*) from RootAccount") - err := row.Scan(&nrows) - if err != nil { - return fmt.Errorf("RestoreRoot: could not query storage: %v", err) + err1 := row.Scan(&nrows) + if err1 != nil { + return fmt.Errorf("RestoreRoot: could not query storage: %v", err1) } if nrows != 1 { logging.Base().Infof("RestoreRoot: state not found (n = %v)", nrows) } row = tx.QueryRow("select data from RootAccount") - err = row.Scan(&raw) - if err != nil { - return fmt.Errorf("RestoreRoot: could not read account raw data: %v", err) + err1 = row.Scan(&raw) + if err1 != nil { + return fmt.Errorf("RestoreRoot: could not read account raw data: %v", err1) } return nil @@ -146,9 +146,9 @@ func RestoreParticipation(store db.Accessor) (acc PersistedParticipation, err er err = store.Atomic(func(ctx context.Context, tx *sql.Tx) error { var nrows int row := tx.QueryRow("select count(*) from ParticipationAccount") - err := row.Scan(&nrows) - if err != nil { - return fmt.Errorf("RestoreParticipation: could not query storage: %v", err) + err1 := row.Scan(&nrows) + if err1 != nil { + return fmt.Errorf("RestoreParticipation: could not query storage: %v", err1) } if nrows != 1 { logging.Base().Infof("RestoreParticipation: state not found (n = %v)", nrows) @@ -156,9 +156,9 @@ func RestoreParticipation(store db.Accessor) (acc PersistedParticipation, err er row = tx.QueryRow("select parent, vrf, voting, firstValid, lastValid, keyDilution, stateProof from ParticipationAccount") - err = row.Scan(&rawParent, &rawVRF, &rawVoting, &acc.FirstValid, &acc.LastValid, &acc.KeyDilution, &rawStateProof) - if err != nil { - return fmt.Errorf("RestoreParticipation: could not read account raw data: %v", err) + err1 = row.Scan(&rawParent, &rawVRF, &rawVoting, &acc.FirstValid, &acc.LastValid, &acc.KeyDilution, &rawStateProof) + if err1 != nil { + return fmt.Errorf("RestoreParticipation: could not read account raw data: %v", err1) } copy(acc.Parent[:32], rawParent) diff --git a/data/account/participationRegistry_test.go b/data/account/participationRegistry_test.go index eaab8ce998..e2ed12ba82 100644 --- a/data/account/participationRegistry_test.go +++ b/data/account/participationRegistry_test.go @@ -1341,13 +1341,13 @@ func BenchmarkDeleteExpired(b *testing.B) { }() // make participation key - lastValid := 3000000 + lastValid := basics.Round(3000000) keyDilution := 10000 if kd, err := strconv.Atoi(os.Getenv("DILUTION")); err == nil { // allow setting key dilution via env var keyDilution = kd } if lv, err := strconv.Atoi(os.Getenv("LASTVALID")); err == nil { // allow setting last valid via env var - lastValid = lv + lastValid = basics.Round(lv) } var part Participation @@ -1359,7 +1359,7 @@ func BenchmarkDeleteExpired(b *testing.B) { if os.Getenv("SLOWKEYS") == "" { // makeTestParticipation makes small state proof secrets to save time b.Log("making fast part key", i, "for firstValid 0 lastValid", lastValid, "dilution", keyDilution) - part = makeTestParticipation(a, i+1, 0, basics.Round(lastValid), uint64(keyDilution)) + part = makeTestParticipation(a, i+1, 0, lastValid, uint64(keyDilution)) a.NotNil(part) } else { // generate key the same way as BenchmarkOldKeysDeletion @@ -1374,7 +1374,7 @@ func BenchmarkDeleteExpired(b *testing.B) { }() b.Log("making part key", i, "for firstValid 0 lastValid", lastValid, "dilution", keyDilution) - ppart, err := FillDBWithParticipationKeys(ppartDB, rootAddr, 0, basics.Round(lastValid), uint64(keyDilution)) + ppart, err := FillDBWithParticipationKeys(ppartDB, rootAddr, 0, lastValid, uint64(keyDilution)) ppartDB.Close() a.NoError(err) part = ppart.Participation diff --git a/data/account/participation_test.go b/data/account/participation_test.go index 8f246c0f2f..d2d967d8c8 100644 --- a/data/account/participation_test.go +++ b/data/account/participation_test.go @@ -163,24 +163,24 @@ func BenchmarkOldKeysDeletion(b *testing.B) { }() // make participation key - lastValid := 3000000 + lastValid := basics.Round(3000000) keyDilution := 10000 if kd, err := strconv.Atoi(os.Getenv("DILUTION")); err == nil { // allow setting key dilution via env var keyDilution = kd } if lv, err := strconv.Atoi(os.Getenv("LASTVALID")); err == nil { // allow setting last valid via env var - lastValid = lv + lastValid = basics.Round(lv) } b.Log("making part keys for firstValid 0 lastValid", lastValid, "dilution", keyDilution) - part, err := FillDBWithParticipationKeys(partDB, rootAddr, 0, basics.Round(lastValid), uint64(keyDilution)) + part, err := FillDBWithParticipationKeys(partDB, rootAddr, 0, lastValid, uint64(keyDilution)) a.NoError(err) a.NotNil(part) proto := config.Consensus[protocol.ConsensusCurrentVersion] b.Log("starting DeleteOldKeys benchmark up to round", b.N) b.ResetTimer() - for i := 0; i < b.N; i++ { - errCh := part.DeleteOldKeys(basics.Round(i), proto) + for i := range basics.Round(b.N) { + errCh := part.DeleteOldKeys(i, proto) err := <-errCh a.NoError(err) } @@ -500,7 +500,7 @@ func TestKeyregValidityOverLimit(t *testing.T) { partitiontest.PartitionTest(t) a := require.New(t) - maxValidPeriod := config.Consensus[protocol.ConsensusCurrentVersion].MaxKeyregValidPeriod + maxValidPeriod := basics.Round(config.Consensus[protocol.ConsensusCurrentVersion].MaxKeyregValidPeriod) dilution := config.Consensus[protocol.ConsensusCurrentVersion].DefaultKeyDilution var address basics.Address @@ -509,7 +509,7 @@ func TestKeyregValidityOverLimit(t *testing.T) { store := createMerkleSignatureSchemeTestDB(a) defer store.Close() firstValid := basics.Round(0) - lastValid := basics.Round(maxValidPeriod + 1) + lastValid := maxValidPeriod + 1 _, err := FillDBWithParticipationKeys(*store, address, firstValid, lastValid, dilution) a.Error(err) } @@ -546,7 +546,7 @@ func TestKeyregValidityPeriod(t *testing.T) { //nolint:paralleltest // Not paral config.Consensus[protocol.ConsensusCurrentVersion] = version }() - maxValidPeriod := config.Consensus[protocol.ConsensusCurrentVersion].MaxKeyregValidPeriod + maxValidPeriod := basics.Round(config.Consensus[protocol.ConsensusCurrentVersion].MaxKeyregValidPeriod) dilution := config.Consensus[protocol.ConsensusCurrentVersion].DefaultKeyDilution var address basics.Address @@ -554,7 +554,7 @@ func TestKeyregValidityPeriod(t *testing.T) { //nolint:paralleltest // Not paral store := createMerkleSignatureSchemeTestDB(a) defer store.Close() firstValid := basics.Round(0) - lastValid := basics.Round(maxValidPeriod) + lastValid := maxValidPeriod crypto.RandBytes(address[:]) _, err := FillDBWithParticipationKeys(*store, address, firstValid, lastValid, dilution) a.NoError(err) @@ -562,7 +562,7 @@ func TestKeyregValidityPeriod(t *testing.T) { //nolint:paralleltest // Not paral store = createMerkleSignatureSchemeTestDB(a) defer store.Close() firstValid = basics.Round(0) - lastValid = basics.Round(maxValidPeriod + 1) + lastValid = maxValidPeriod + 1 _, err = FillDBWithParticipationKeys(*store, address, firstValid, lastValid, dilution) a.Error(err) } diff --git a/data/appRateLimiter.go b/data/appRateLimiter.go index 42d6812916..2f9e0e8f9a 100644 --- a/data/appRateLimiter.go +++ b/data/appRateLimiter.go @@ -22,7 +22,7 @@ import ( "sync/atomic" "time" - "github.com/algorand/go-algorand/config" + "github.com/algorand/go-algorand/config/bounds" "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/data/transactions" @@ -214,10 +214,10 @@ func (r *appRateLimiter) len() int { var appKeyPool = sync.Pool{ New: func() interface{} { return &appKeyBuf{ - // max config.MaxTxGroupSize apps per txgroup, each app has up to MaxAppTxnForeignApps extra foreign apps - // at moment of writing config.MaxTxGroupSize = 16, config.MaxAppTxnForeignApps = 8 - keys: make([]keyType, 0, config.MaxTxGroupSize*(1+config.MaxAppTxnForeignApps)), - buckets: make([]int, 0, config.MaxTxGroupSize*(1+config.MaxAppTxnForeignApps)), + // max bounds.MaxTxGroupSize apps per txgroup, each app has up to MaxAppTxnForeignApps extra foreign apps + // at moment of writing bounds.MaxTxGroupSize = 16, bounds.MaxAppTxnForeignApps = 8 + keys: make([]keyType, 0, bounds.MaxTxGroupSize*(1+bounds.MaxAppTxnForeignApps)), + buckets: make([]int, 0, bounds.MaxTxGroupSize*(1+bounds.MaxAppTxnForeignApps)), } }, } @@ -265,7 +265,7 @@ func txgroupToKeys(txgroup []transactions.SignedTxn, origin []byte, seed uint64, txnToBucket := func(appIdx basics.AppIndex) int { return int(memhash64(uint64(appIdx), seed) % uint64(numBuckets)) } - seen := make(map[basics.AppIndex]struct{}, len(txgroup)*(1+config.MaxAppTxnForeignApps)) + seen := make(map[basics.AppIndex]struct{}, len(txgroup)*(1+bounds.MaxAppTxnForeignApps)) valid := func(appIdx basics.AppIndex) bool { if appIdx != 0 { _, ok := seen[appIdx] diff --git a/data/appRateLimiter_test.go b/data/appRateLimiter_test.go index a1971f5144..6a9efbd30a 100644 --- a/data/appRateLimiter_test.go +++ b/data/appRateLimiter_test.go @@ -22,6 +22,7 @@ import ( "time" "github.com/algorand/go-algorand/config" + "github.com/algorand/go-algorand/config/bounds" "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/data/transactions" @@ -492,8 +493,8 @@ func BenchmarkAppRateLimiter_TxgroupToKeys(b *testing.B) { txgroups := make([][]transactions.SignedTxn, 0, b.N) for i := 0; i < b.N; i++ { - txgroup := make([]transactions.SignedTxn, 0, config.MaxTxGroupSize) - for j := 0; j < config.MaxTxGroupSize; j++ { + txgroup := make([]transactions.SignedTxn, 0, bounds.MaxTxGroupSize) + for j := 0; j < bounds.MaxTxGroupSize; j++ { apptxn := transactions.Transaction{ Type: protocol.ApplicationCallTx, ApplicationCallTxnFields: transactions.ApplicationCallTxnFields{ diff --git a/data/basics/msgp_gen.go b/data/basics/msgp_gen.go index 585a100765..71c0c87ec0 100644 --- a/data/basics/msgp_gen.go +++ b/data/basics/msgp_gen.go @@ -7,7 +7,7 @@ import ( "github.com/algorand/msgp/msgp" - "github.com/algorand/go-algorand/config" + "github.com/algorand/go-algorand/config/bounds" "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/crypto/merklesignature" ) @@ -711,8 +711,8 @@ func (z *AccountData) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) err = msgp.WrapError(err, "struct-from-array", "AssetParams") return } - if zb0016 > encodedMaxAssetsPerAccount { - err = msgp.ErrOverflow(uint64(zb0016), uint64(encodedMaxAssetsPerAccount)) + if zb0016 > bounds.EncodedMaxAssetsPerAccount { + err = msgp.ErrOverflow(uint64(zb0016), uint64(bounds.EncodedMaxAssetsPerAccount)) err = msgp.WrapError(err, "struct-from-array", "AssetParams") return } @@ -747,8 +747,8 @@ func (z *AccountData) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) err = msgp.WrapError(err, "struct-from-array", "Assets") return } - if zb0018 > encodedMaxAssetsPerAccount { - err = msgp.ErrOverflow(uint64(zb0018), uint64(encodedMaxAssetsPerAccount)) + if zb0018 > bounds.EncodedMaxAssetsPerAccount { + err = msgp.ErrOverflow(uint64(zb0018), uint64(bounds.EncodedMaxAssetsPerAccount)) err = msgp.WrapError(err, "struct-from-array", "Assets") return } @@ -863,8 +863,8 @@ func (z *AccountData) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) err = msgp.WrapError(err, "struct-from-array", "AppLocalStates") return } - if zb0022 > EncodedMaxAppLocalStates { - err = msgp.ErrOverflow(uint64(zb0022), uint64(EncodedMaxAppLocalStates)) + if zb0022 > bounds.EncodedMaxAppLocalStates { + err = msgp.ErrOverflow(uint64(zb0022), uint64(bounds.EncodedMaxAppLocalStates)) err = msgp.WrapError(err, "struct-from-array", "AppLocalStates") return } @@ -899,8 +899,8 @@ func (z *AccountData) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) err = msgp.WrapError(err, "struct-from-array", "AppParams") return } - if zb0024 > EncodedMaxAppParams { - err = msgp.ErrOverflow(uint64(zb0024), uint64(EncodedMaxAppParams)) + if zb0024 > bounds.EncodedMaxAppParams { + err = msgp.ErrOverflow(uint64(zb0024), uint64(bounds.EncodedMaxAppParams)) err = msgp.WrapError(err, "struct-from-array", "AppParams") return } @@ -1145,8 +1145,8 @@ func (z *AccountData) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) err = msgp.WrapError(err, "AssetParams") return } - if zb0033 > encodedMaxAssetsPerAccount { - err = msgp.ErrOverflow(uint64(zb0033), uint64(encodedMaxAssetsPerAccount)) + if zb0033 > bounds.EncodedMaxAssetsPerAccount { + err = msgp.ErrOverflow(uint64(zb0033), uint64(bounds.EncodedMaxAssetsPerAccount)) err = msgp.WrapError(err, "AssetParams") return } @@ -1179,8 +1179,8 @@ func (z *AccountData) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) err = msgp.WrapError(err, "Assets") return } - if zb0035 > encodedMaxAssetsPerAccount { - err = msgp.ErrOverflow(uint64(zb0035), uint64(encodedMaxAssetsPerAccount)) + if zb0035 > bounds.EncodedMaxAssetsPerAccount { + err = msgp.ErrOverflow(uint64(zb0035), uint64(bounds.EncodedMaxAssetsPerAccount)) err = msgp.WrapError(err, "Assets") return } @@ -1289,8 +1289,8 @@ func (z *AccountData) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) err = msgp.WrapError(err, "AppLocalStates") return } - if zb0039 > EncodedMaxAppLocalStates { - err = msgp.ErrOverflow(uint64(zb0039), uint64(EncodedMaxAppLocalStates)) + if zb0039 > bounds.EncodedMaxAppLocalStates { + err = msgp.ErrOverflow(uint64(zb0039), uint64(bounds.EncodedMaxAppLocalStates)) err = msgp.WrapError(err, "AppLocalStates") return } @@ -1323,8 +1323,8 @@ func (z *AccountData) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) err = msgp.WrapError(err, "AppParams") return } - if zb0041 > EncodedMaxAppParams { - err = msgp.ErrOverflow(uint64(zb0041), uint64(EncodedMaxAppParams)) + if zb0041 > bounds.EncodedMaxAppParams { + err = msgp.ErrOverflow(uint64(zb0041), uint64(bounds.EncodedMaxAppParams)) err = msgp.WrapError(err, "AppParams") return } @@ -1506,28 +1506,28 @@ func AccountDataMaxSize() (s int) { s = 3 + 4 + msgp.ByteSize + 5 + MicroAlgosMaxSize() + 6 + msgp.Uint64Size + 4 + MicroAlgosMaxSize() + 5 + crypto.OneTimeSignatureVerifierMaxSize() + 4 + crypto.VRFVerifierMaxSize() + 6 + merklesignature.CommitmentMaxSize() + 8 + msgp.Uint64Size + 8 + msgp.Uint64Size + 7 + msgp.Uint64Size + 4 + msgp.Uint64Size + 4 + msgp.Uint64Size + 5 s += msgp.MapHeaderSize // Adding size of map keys for z.AssetParams - s += encodedMaxAssetsPerAccount * (AssetIndexMaxSize()) + s += bounds.EncodedMaxAssetsPerAccount * (AssetIndexMaxSize()) // Adding size of map values for z.AssetParams - s += encodedMaxAssetsPerAccount * (AssetParamsMaxSize()) + s += bounds.EncodedMaxAssetsPerAccount * (AssetParamsMaxSize()) s += 6 s += msgp.MapHeaderSize // Adding size of map keys for z.Assets - s += encodedMaxAssetsPerAccount * (AssetIndexMaxSize()) + s += bounds.EncodedMaxAssetsPerAccount * (AssetIndexMaxSize()) // Adding size of map values for z.Assets - s += encodedMaxAssetsPerAccount * (1) + s += bounds.EncodedMaxAssetsPerAccount * (1) s += 2 + msgp.Uint64Size + 2 + msgp.BoolSize s += 6 + AddressMaxSize() + 3 + msgp.BoolSize + 5 s += msgp.MapHeaderSize // Adding size of map keys for z.AppLocalStates - s += EncodedMaxAppLocalStates * (AppIndexMaxSize()) + s += bounds.EncodedMaxAppLocalStates * (AppIndexMaxSize()) // Adding size of map values for z.AppLocalStates - s += EncodedMaxAppLocalStates * (AppLocalStateMaxSize()) + s += bounds.EncodedMaxAppLocalStates * (AppLocalStateMaxSize()) s += 5 s += msgp.MapHeaderSize // Adding size of map keys for z.AppParams - s += EncodedMaxAppParams * (AppIndexMaxSize()) + s += bounds.EncodedMaxAppParams * (AppIndexMaxSize()) // Adding size of map values for z.AppParams - s += EncodedMaxAppParams * (AppParamsMaxSize()) + s += bounds.EncodedMaxAppParams * (AppParamsMaxSize()) s += 5 + 1 + 4 + msgp.Uint64Size + 4 + msgp.Uint64Size + 5 + msgp.Uint32Size + 4 + msgp.Uint64Size + 5 + msgp.Uint64Size return } @@ -1800,8 +1800,8 @@ func (z *AppLocalState) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState err = msgp.WrapError(err, "struct-from-array", "KeyValue") return } - if zb0007 > EncodedMaxKeyValueEntries { - err = msgp.ErrOverflow(uint64(zb0007), uint64(EncodedMaxKeyValueEntries)) + if zb0007 > bounds.EncodedMaxKeyValueEntries { + err = msgp.ErrOverflow(uint64(zb0007), uint64(bounds.EncodedMaxKeyValueEntries)) err = msgp.WrapError(err, "struct-from-array", "KeyValue") return } @@ -1928,8 +1928,8 @@ func (z *AppLocalState) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState err = msgp.WrapError(err, "KeyValue") return } - if zb0011 > EncodedMaxKeyValueEntries { - err = msgp.ErrOverflow(uint64(zb0011), uint64(EncodedMaxKeyValueEntries)) + if zb0011 > bounds.EncodedMaxKeyValueEntries { + err = msgp.ErrOverflow(uint64(zb0011), uint64(bounds.EncodedMaxKeyValueEntries)) err = msgp.WrapError(err, "KeyValue") return } @@ -1998,9 +1998,9 @@ func AppLocalStateMaxSize() (s int) { s = 1 + 5 + 1 + 4 + msgp.Uint64Size + 4 + msgp.Uint64Size + 4 s += msgp.MapHeaderSize // Adding size of map keys for z.KeyValue - s += EncodedMaxKeyValueEntries * (msgp.StringPrefixSize + config.MaxAppBytesKeyLen) + s += bounds.EncodedMaxKeyValueEntries * (msgp.StringPrefixSize + bounds.MaxAppBytesKeyLen) // Adding size of map values for z.KeyValue - s += EncodedMaxKeyValueEntries * (TealValueMaxSize()) + s += bounds.EncodedMaxKeyValueEntries * (TealValueMaxSize()) return } @@ -2170,8 +2170,8 @@ func (z *AppParams) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) (o err = msgp.WrapError(err, "struct-from-array", "ApprovalProgram") return } - if zb0005 > config.MaxAvailableAppProgramLen { - err = msgp.ErrOverflow(uint64(zb0005), uint64(config.MaxAvailableAppProgramLen)) + if zb0005 > bounds.MaxAvailableAppProgramLen { + err = msgp.ErrOverflow(uint64(zb0005), uint64(bounds.MaxAvailableAppProgramLen)) return } (*z).ApprovalProgram, bts, err = msgp.ReadBytesBytes(bts, (*z).ApprovalProgram) @@ -2188,8 +2188,8 @@ func (z *AppParams) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) (o err = msgp.WrapError(err, "struct-from-array", "ClearStateProgram") return } - if zb0006 > config.MaxAvailableAppProgramLen { - err = msgp.ErrOverflow(uint64(zb0006), uint64(config.MaxAvailableAppProgramLen)) + if zb0006 > bounds.MaxAvailableAppProgramLen { + err = msgp.ErrOverflow(uint64(zb0006), uint64(bounds.MaxAvailableAppProgramLen)) return } (*z).ClearStateProgram, bts, err = msgp.ReadBytesBytes(bts, (*z).ClearStateProgram) @@ -2207,8 +2207,8 @@ func (z *AppParams) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) (o err = msgp.WrapError(err, "struct-from-array", "GlobalState") return } - if zb0007 > EncodedMaxKeyValueEntries { - err = msgp.ErrOverflow(uint64(zb0007), uint64(EncodedMaxKeyValueEntries)) + if zb0007 > bounds.EncodedMaxKeyValueEntries { + err = msgp.ErrOverflow(uint64(zb0007), uint64(bounds.EncodedMaxKeyValueEntries)) err = msgp.WrapError(err, "struct-from-array", "GlobalState") return } @@ -2424,8 +2424,8 @@ func (z *AppParams) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) (o err = msgp.WrapError(err, "ApprovalProgram") return } - if zb0013 > config.MaxAvailableAppProgramLen { - err = msgp.ErrOverflow(uint64(zb0013), uint64(config.MaxAvailableAppProgramLen)) + if zb0013 > bounds.MaxAvailableAppProgramLen { + err = msgp.ErrOverflow(uint64(zb0013), uint64(bounds.MaxAvailableAppProgramLen)) return } (*z).ApprovalProgram, bts, err = msgp.ReadBytesBytes(bts, (*z).ApprovalProgram) @@ -2440,8 +2440,8 @@ func (z *AppParams) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) (o err = msgp.WrapError(err, "ClearStateProgram") return } - if zb0014 > config.MaxAvailableAppProgramLen { - err = msgp.ErrOverflow(uint64(zb0014), uint64(config.MaxAvailableAppProgramLen)) + if zb0014 > bounds.MaxAvailableAppProgramLen { + err = msgp.ErrOverflow(uint64(zb0014), uint64(bounds.MaxAvailableAppProgramLen)) return } (*z).ClearStateProgram, bts, err = msgp.ReadBytesBytes(bts, (*z).ClearStateProgram) @@ -2457,8 +2457,8 @@ func (z *AppParams) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) (o err = msgp.WrapError(err, "GlobalState") return } - if zb0015 > EncodedMaxKeyValueEntries { - err = msgp.ErrOverflow(uint64(zb0015), uint64(EncodedMaxKeyValueEntries)) + if zb0015 > bounds.EncodedMaxKeyValueEntries { + err = msgp.ErrOverflow(uint64(zb0015), uint64(bounds.EncodedMaxKeyValueEntries)) err = msgp.WrapError(err, "GlobalState") return } @@ -2677,12 +2677,12 @@ func (z *AppParams) MsgIsZero() bool { // MaxSize returns a maximum valid message size for this message type func AppParamsMaxSize() (s int) { - s = 1 + 7 + msgp.BytesPrefixSize + config.MaxAvailableAppProgramLen + 7 + msgp.BytesPrefixSize + config.MaxAvailableAppProgramLen + 3 + s = 1 + 7 + msgp.BytesPrefixSize + bounds.MaxAvailableAppProgramLen + 7 + msgp.BytesPrefixSize + bounds.MaxAvailableAppProgramLen + 3 s += msgp.MapHeaderSize // Adding size of map keys for z.GlobalState - s += EncodedMaxKeyValueEntries * (msgp.StringPrefixSize + config.MaxAppBytesKeyLen) + s += bounds.EncodedMaxKeyValueEntries * (msgp.StringPrefixSize + bounds.MaxAppBytesKeyLen) // Adding size of map values for z.GlobalState - s += EncodedMaxKeyValueEntries * (TealValueMaxSize()) + s += bounds.EncodedMaxKeyValueEntries * (TealValueMaxSize()) s += 5 + 1 + 4 + msgp.Uint64Size + 4 + msgp.Uint64Size + 5 + 1 + 4 + msgp.Uint64Size + 4 + msgp.Uint64Size + 4 + msgp.Uint32Size + 2 + msgp.Uint64Size return } @@ -3057,8 +3057,8 @@ func (z *AssetParams) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) err = msgp.WrapError(err, "struct-from-array", "UnitName") return } - if zb0004 > config.MaxAssetUnitNameBytes { - err = msgp.ErrOverflow(uint64(zb0004), uint64(config.MaxAssetUnitNameBytes)) + if zb0004 > bounds.MaxAssetUnitNameBytes { + err = msgp.ErrOverflow(uint64(zb0004), uint64(bounds.MaxAssetUnitNameBytes)) return } (*z).UnitName, bts, err = msgp.ReadStringBytes(bts) @@ -3075,8 +3075,8 @@ func (z *AssetParams) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) err = msgp.WrapError(err, "struct-from-array", "AssetName") return } - if zb0005 > config.MaxAssetNameBytes { - err = msgp.ErrOverflow(uint64(zb0005), uint64(config.MaxAssetNameBytes)) + if zb0005 > bounds.MaxAssetNameBytes { + err = msgp.ErrOverflow(uint64(zb0005), uint64(bounds.MaxAssetNameBytes)) return } (*z).AssetName, bts, err = msgp.ReadStringBytes(bts) @@ -3093,8 +3093,8 @@ func (z *AssetParams) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) err = msgp.WrapError(err, "struct-from-array", "URL") return } - if zb0006 > config.MaxAssetURLBytes { - err = msgp.ErrOverflow(uint64(zb0006), uint64(config.MaxAssetURLBytes)) + if zb0006 > bounds.MaxAssetURLBytes { + err = msgp.ErrOverflow(uint64(zb0006), uint64(bounds.MaxAssetURLBytes)) return } (*z).URL, bts, err = msgp.ReadStringBytes(bts) @@ -3191,8 +3191,8 @@ func (z *AssetParams) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) err = msgp.WrapError(err, "UnitName") return } - if zb0007 > config.MaxAssetUnitNameBytes { - err = msgp.ErrOverflow(uint64(zb0007), uint64(config.MaxAssetUnitNameBytes)) + if zb0007 > bounds.MaxAssetUnitNameBytes { + err = msgp.ErrOverflow(uint64(zb0007), uint64(bounds.MaxAssetUnitNameBytes)) return } (*z).UnitName, bts, err = msgp.ReadStringBytes(bts) @@ -3207,8 +3207,8 @@ func (z *AssetParams) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) err = msgp.WrapError(err, "AssetName") return } - if zb0008 > config.MaxAssetNameBytes { - err = msgp.ErrOverflow(uint64(zb0008), uint64(config.MaxAssetNameBytes)) + if zb0008 > bounds.MaxAssetNameBytes { + err = msgp.ErrOverflow(uint64(zb0008), uint64(bounds.MaxAssetNameBytes)) return } (*z).AssetName, bts, err = msgp.ReadStringBytes(bts) @@ -3223,8 +3223,8 @@ func (z *AssetParams) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) err = msgp.WrapError(err, "URL") return } - if zb0009 > config.MaxAssetURLBytes { - err = msgp.ErrOverflow(uint64(zb0009), uint64(config.MaxAssetURLBytes)) + if zb0009 > bounds.MaxAssetURLBytes { + err = msgp.ErrOverflow(uint64(zb0009), uint64(bounds.MaxAssetURLBytes)) return } (*z).URL, bts, err = msgp.ReadStringBytes(bts) @@ -3296,7 +3296,7 @@ func (z *AssetParams) MsgIsZero() bool { // MaxSize returns a maximum valid message size for this message type func AssetParamsMaxSize() (s int) { - s = 1 + 2 + msgp.Uint64Size + 3 + msgp.Uint32Size + 3 + msgp.BoolSize + 3 + msgp.StringPrefixSize + config.MaxAssetUnitNameBytes + 3 + msgp.StringPrefixSize + config.MaxAssetNameBytes + 3 + msgp.StringPrefixSize + config.MaxAssetURLBytes + 3 + s = 1 + 2 + msgp.Uint64Size + 3 + msgp.Uint32Size + 3 + msgp.BoolSize + 3 + msgp.StringPrefixSize + bounds.MaxAssetUnitNameBytes + 3 + msgp.StringPrefixSize + bounds.MaxAssetNameBytes + 3 + msgp.StringPrefixSize + bounds.MaxAssetURLBytes + 3 // Calculating size of array: z.MetadataHash s += msgp.ArrayHeaderSize + ((32) * (msgp.ByteSize)) s += 2 + AddressMaxSize() + 2 + AddressMaxSize() + 2 + AddressMaxSize() + 2 + AddressMaxSize() @@ -3785,8 +3785,8 @@ func (z *BalanceRecord) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState err = msgp.WrapError(err, "struct-from-array", "AssetParams") return } - if zb0016 > encodedMaxAssetsPerAccount { - err = msgp.ErrOverflow(uint64(zb0016), uint64(encodedMaxAssetsPerAccount)) + if zb0016 > bounds.EncodedMaxAssetsPerAccount { + err = msgp.ErrOverflow(uint64(zb0016), uint64(bounds.EncodedMaxAssetsPerAccount)) err = msgp.WrapError(err, "struct-from-array", "AssetParams") return } @@ -3821,8 +3821,8 @@ func (z *BalanceRecord) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState err = msgp.WrapError(err, "struct-from-array", "Assets") return } - if zb0018 > encodedMaxAssetsPerAccount { - err = msgp.ErrOverflow(uint64(zb0018), uint64(encodedMaxAssetsPerAccount)) + if zb0018 > bounds.EncodedMaxAssetsPerAccount { + err = msgp.ErrOverflow(uint64(zb0018), uint64(bounds.EncodedMaxAssetsPerAccount)) err = msgp.WrapError(err, "struct-from-array", "Assets") return } @@ -3937,8 +3937,8 @@ func (z *BalanceRecord) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState err = msgp.WrapError(err, "struct-from-array", "AppLocalStates") return } - if zb0022 > EncodedMaxAppLocalStates { - err = msgp.ErrOverflow(uint64(zb0022), uint64(EncodedMaxAppLocalStates)) + if zb0022 > bounds.EncodedMaxAppLocalStates { + err = msgp.ErrOverflow(uint64(zb0022), uint64(bounds.EncodedMaxAppLocalStates)) err = msgp.WrapError(err, "struct-from-array", "AppLocalStates") return } @@ -3973,8 +3973,8 @@ func (z *BalanceRecord) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState err = msgp.WrapError(err, "struct-from-array", "AppParams") return } - if zb0024 > EncodedMaxAppParams { - err = msgp.ErrOverflow(uint64(zb0024), uint64(EncodedMaxAppParams)) + if zb0024 > bounds.EncodedMaxAppParams { + err = msgp.ErrOverflow(uint64(zb0024), uint64(bounds.EncodedMaxAppParams)) err = msgp.WrapError(err, "struct-from-array", "AppParams") return } @@ -4225,8 +4225,8 @@ func (z *BalanceRecord) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState err = msgp.WrapError(err, "AssetParams") return } - if zb0033 > encodedMaxAssetsPerAccount { - err = msgp.ErrOverflow(uint64(zb0033), uint64(encodedMaxAssetsPerAccount)) + if zb0033 > bounds.EncodedMaxAssetsPerAccount { + err = msgp.ErrOverflow(uint64(zb0033), uint64(bounds.EncodedMaxAssetsPerAccount)) err = msgp.WrapError(err, "AssetParams") return } @@ -4259,8 +4259,8 @@ func (z *BalanceRecord) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState err = msgp.WrapError(err, "Assets") return } - if zb0035 > encodedMaxAssetsPerAccount { - err = msgp.ErrOverflow(uint64(zb0035), uint64(encodedMaxAssetsPerAccount)) + if zb0035 > bounds.EncodedMaxAssetsPerAccount { + err = msgp.ErrOverflow(uint64(zb0035), uint64(bounds.EncodedMaxAssetsPerAccount)) err = msgp.WrapError(err, "Assets") return } @@ -4369,8 +4369,8 @@ func (z *BalanceRecord) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState err = msgp.WrapError(err, "AppLocalStates") return } - if zb0039 > EncodedMaxAppLocalStates { - err = msgp.ErrOverflow(uint64(zb0039), uint64(EncodedMaxAppLocalStates)) + if zb0039 > bounds.EncodedMaxAppLocalStates { + err = msgp.ErrOverflow(uint64(zb0039), uint64(bounds.EncodedMaxAppLocalStates)) err = msgp.WrapError(err, "AppLocalStates") return } @@ -4403,8 +4403,8 @@ func (z *BalanceRecord) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState err = msgp.WrapError(err, "AppParams") return } - if zb0041 > EncodedMaxAppParams { - err = msgp.ErrOverflow(uint64(zb0041), uint64(EncodedMaxAppParams)) + if zb0041 > bounds.EncodedMaxAppParams { + err = msgp.ErrOverflow(uint64(zb0041), uint64(bounds.EncodedMaxAppParams)) err = msgp.WrapError(err, "AppParams") return } @@ -4586,28 +4586,28 @@ func BalanceRecordMaxSize() (s int) { s = 3 + 5 + AddressMaxSize() + 4 + msgp.ByteSize + 5 + MicroAlgosMaxSize() + 6 + msgp.Uint64Size + 4 + MicroAlgosMaxSize() + 5 + crypto.OneTimeSignatureVerifierMaxSize() + 4 + crypto.VRFVerifierMaxSize() + 6 + merklesignature.CommitmentMaxSize() + 8 + msgp.Uint64Size + 8 + msgp.Uint64Size + 7 + msgp.Uint64Size + 4 + msgp.Uint64Size + 4 + msgp.Uint64Size + 5 s += msgp.MapHeaderSize // Adding size of map keys for z.AccountData.AssetParams - s += encodedMaxAssetsPerAccount * (AssetIndexMaxSize()) + s += bounds.EncodedMaxAssetsPerAccount * (AssetIndexMaxSize()) // Adding size of map values for z.AccountData.AssetParams - s += encodedMaxAssetsPerAccount * (AssetParamsMaxSize()) + s += bounds.EncodedMaxAssetsPerAccount * (AssetParamsMaxSize()) s += 6 s += msgp.MapHeaderSize // Adding size of map keys for z.AccountData.Assets - s += encodedMaxAssetsPerAccount * (AssetIndexMaxSize()) + s += bounds.EncodedMaxAssetsPerAccount * (AssetIndexMaxSize()) // Adding size of map values for z.AccountData.Assets - s += encodedMaxAssetsPerAccount * (1) + s += bounds.EncodedMaxAssetsPerAccount * (1) s += 2 + msgp.Uint64Size + 2 + msgp.BoolSize s += 6 + AddressMaxSize() + 3 + msgp.BoolSize + 5 s += msgp.MapHeaderSize // Adding size of map keys for z.AccountData.AppLocalStates - s += EncodedMaxAppLocalStates * (AppIndexMaxSize()) + s += bounds.EncodedMaxAppLocalStates * (AppIndexMaxSize()) // Adding size of map values for z.AccountData.AppLocalStates - s += EncodedMaxAppLocalStates * (AppLocalStateMaxSize()) + s += bounds.EncodedMaxAppLocalStates * (AppLocalStateMaxSize()) s += 5 s += msgp.MapHeaderSize // Adding size of map keys for z.AccountData.AppParams - s += EncodedMaxAppParams * (AppIndexMaxSize()) + s += bounds.EncodedMaxAppParams * (AppIndexMaxSize()) // Adding size of map values for z.AccountData.AppParams - s += EncodedMaxAppParams * (AppParamsMaxSize()) + s += bounds.EncodedMaxAppParams * (AppParamsMaxSize()) s += 5 + 1 + 4 + msgp.Uint64Size + 4 + msgp.Uint64Size + 5 + msgp.Uint32Size + 4 + msgp.Uint64Size + 5 + msgp.Uint64Size return } @@ -5099,8 +5099,8 @@ func (z *StateDelta) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) ( err = msgp.WrapError(err) return } - if zb0003 > config.MaxStateDeltaKeys { - err = msgp.ErrOverflow(uint64(zb0003), uint64(config.MaxStateDeltaKeys)) + if zb0003 > bounds.MaxStateDeltaKeys { + err = msgp.ErrOverflow(uint64(zb0003), uint64(bounds.MaxStateDeltaKeys)) err = msgp.WrapError(err) return } @@ -5159,9 +5159,9 @@ func (z StateDelta) MsgIsZero() bool { func StateDeltaMaxSize() (s int) { s += msgp.MapHeaderSize // Adding size of map keys for z - s += config.MaxStateDeltaKeys * (msgp.StringPrefixSize + config.MaxAppBytesKeyLen) + s += bounds.MaxStateDeltaKeys * (msgp.StringPrefixSize + bounds.MaxAppBytesKeyLen) // Adding size of map values for z - s += config.MaxStateDeltaKeys * (ValueDeltaMaxSize()) + s += bounds.MaxStateDeltaKeys * (ValueDeltaMaxSize()) return } @@ -5855,8 +5855,8 @@ func (z *TealKeyValue) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) err = msgp.WrapError(err) return } - if zb0003 > EncodedMaxKeyValueEntries { - err = msgp.ErrOverflow(uint64(zb0003), uint64(EncodedMaxKeyValueEntries)) + if zb0003 > bounds.EncodedMaxKeyValueEntries { + err = msgp.ErrOverflow(uint64(zb0003), uint64(bounds.EncodedMaxKeyValueEntries)) err = msgp.WrapError(err) return } @@ -5915,9 +5915,9 @@ func (z TealKeyValue) MsgIsZero() bool { func TealKeyValueMaxSize() (s int) { s += msgp.MapHeaderSize // Adding size of map keys for z - s += EncodedMaxKeyValueEntries * (msgp.StringPrefixSize + config.MaxAppBytesKeyLen) + s += bounds.EncodedMaxKeyValueEntries * (msgp.StringPrefixSize + bounds.MaxAppBytesKeyLen) // Adding size of map values for z - s += EncodedMaxKeyValueEntries * (TealValueMaxSize()) + s += bounds.EncodedMaxKeyValueEntries * (TealValueMaxSize()) return } @@ -6240,8 +6240,8 @@ func (z *ValueDelta) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) ( err = msgp.WrapError(err, "struct-from-array", "Bytes") return } - if zb0004 > config.MaxAppBytesValueLen { - err = msgp.ErrOverflow(uint64(zb0004), uint64(config.MaxAppBytesValueLen)) + if zb0004 > bounds.MaxAppBytesValueLen { + err = msgp.ErrOverflow(uint64(zb0004), uint64(bounds.MaxAppBytesValueLen)) return } (*z).Bytes, bts, err = msgp.ReadStringBytes(bts) @@ -6298,8 +6298,8 @@ func (z *ValueDelta) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) ( err = msgp.WrapError(err, "Bytes") return } - if zb0006 > config.MaxAppBytesValueLen { - err = msgp.ErrOverflow(uint64(zb0006), uint64(config.MaxAppBytesValueLen)) + if zb0006 > bounds.MaxAppBytesValueLen { + err = msgp.ErrOverflow(uint64(zb0006), uint64(bounds.MaxAppBytesValueLen)) return } (*z).Bytes, bts, err = msgp.ReadStringBytes(bts) @@ -6347,6 +6347,6 @@ func (z *ValueDelta) MsgIsZero() bool { // MaxSize returns a maximum valid message size for this message type func ValueDeltaMaxSize() (s int) { - s = 1 + 3 + msgp.Uint64Size + 3 + msgp.StringPrefixSize + config.MaxAppBytesValueLen + 3 + msgp.Uint64Size + s = 1 + 3 + msgp.Uint64Size + 3 + msgp.StringPrefixSize + bounds.MaxAppBytesValueLen + 3 + msgp.Uint64Size return } diff --git a/data/basics/teal.go b/data/basics/teal.go index 7fafe536b4..a2b340c325 100644 --- a/data/basics/teal.go +++ b/data/basics/teal.go @@ -20,8 +20,6 @@ import ( "encoding/hex" "fmt" "maps" - - "github.com/algorand/go-algorand/config" ) // DeltaAction is an enum of actions that may be performed when applying a @@ -44,7 +42,7 @@ type ValueDelta struct { _struct struct{} `codec:",omitempty,omitemptyarray"` Action DeltaAction `codec:"at"` - Bytes string `codec:"bs,allocbound=config.MaxAppBytesValueLen"` + Bytes string `codec:"bs,allocbound=bounds.MaxAppBytesValueLen"` Uint uint64 `codec:"ui"` } @@ -71,7 +69,7 @@ func (vd *ValueDelta) ToTealValue() (value TealValue, ok bool) { // StateDelta is a map from key/value store keys to ValueDeltas, indicating // what should happen for that key // -//msgp:allocbound StateDelta config.MaxStateDeltaKeys,config.MaxAppBytesKeyLen +//msgp:allocbound StateDelta bounds.MaxStateDeltaKeys,bounds.MaxAppBytesKeyLen type StateDelta map[string]ValueDelta // Equal checks whether two StateDeltas are equal. We don't check for nilness @@ -111,25 +109,22 @@ func (sm StateSchema) NumEntries() (tot uint64) { } // MinBalance computes the MinBalance requirements for a StateSchema based on -// the consensus parameters -func (sm StateSchema) MinBalance(proto *config.ConsensusParams) (res MicroAlgos) { +// the requirements for the state values in the schema. +func (sm StateSchema) MinBalance(reqs BalanceRequirements) MicroAlgos { // Flat cost for each key/value pair - flatCost := MulSaturate(proto.SchemaMinBalancePerEntry, sm.NumEntries()) + flatCost := MulSaturate(reqs.SchemaMinBalancePerEntry, sm.NumEntries()) // Cost for uints - uintCost := MulSaturate(proto.SchemaUintMinBalance, sm.NumUint) + uintCost := MulSaturate(reqs.SchemaUintMinBalance, sm.NumUint) // Cost for byte slices - bytesCost := MulSaturate(proto.SchemaBytesMinBalance, sm.NumByteSlice) + bytesCost := MulSaturate(reqs.SchemaBytesMinBalance, sm.NumByteSlice) // Sum the separate costs - var min uint64 - min = AddSaturate(min, flatCost) - min = AddSaturate(min, uintCost) + min := AddSaturate(flatCost, uintCost) min = AddSaturate(min, bytesCost) - res.Raw = min - return res + return MicroAlgos{Raw: min} } // TealType is an enum of the types in a TEAL program: Bytes and Uint @@ -185,7 +180,7 @@ func (tv *TealValue) String() string { // TealKeyValue represents a key/value store for use in an application's // LocalState or GlobalState // -//msgp:allocbound TealKeyValue EncodedMaxKeyValueEntries,config.MaxAppBytesKeyLen +//msgp:allocbound TealKeyValue bounds.EncodedMaxKeyValueEntries,bounds.MaxAppBytesKeyLen type TealKeyValue map[string]TealValue // Clone returns a copy of a TealKeyValue that may be modified without diff --git a/data/basics/units.go b/data/basics/units.go index 84904ee8d0..2934936feb 100644 --- a/data/basics/units.go +++ b/data/basics/units.go @@ -22,7 +22,6 @@ import ( "github.com/algorand/go-codec/codec" "github.com/algorand/msgp/msgp" - "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/crypto" ) @@ -56,8 +55,8 @@ func (a MicroAlgos) ToUint64() uint64 { } // RewardUnits returns the number of reward units in some number of algos -func (a MicroAlgos) RewardUnits(proto config.ConsensusParams) uint64 { - return a.Raw / proto.RewardUnit +func (a MicroAlgos) RewardUnits(unitSize uint64) uint64 { + return a.Raw / unitSize } // We generate our own encoders and decoders for MicroAlgos diff --git a/data/basics/userBalance.go b/data/basics/userBalance.go index 468507a102..84c762a7ac 100644 --- a/data/basics/userBalance.go +++ b/data/basics/userBalance.go @@ -21,7 +21,6 @@ import ( "fmt" "slices" - "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/crypto/merklesignature" "github.com/algorand/go-algorand/logging" @@ -41,29 +40,6 @@ const ( // Two special accounts that are defined as NotParticipating are the incentive pool (also know as rewards pool) and the fee sink. // These two accounts also have additional Algo transfer restrictions. NotParticipating - - // encodedMaxAssetsPerAccount is the decoder limit of number of assets stored per account. - // it's being verified by the unit test TestEncodedAccountAllocationBounds to align - // with config.Consensus[protocol.ConsensusCurrentVersion].MaxAssetsPerAccount; note that the decoded - // parameter is used only for protecting the decoder against malicious encoded account data stream. - // protocol-specific contains would be tested once the decoding is complete. - encodedMaxAssetsPerAccount = 1024 - - // EncodedMaxAppLocalStates is the decoder limit for number of opted-in apps in a single account. - // It is verified in TestEncodedAccountAllocationBounds to align with - // config.Consensus[protocol.ConsensusCurrentVersion].MaxAppsOptedIn - EncodedMaxAppLocalStates = 64 - - // EncodedMaxAppParams is the decoder limit for number of created apps in a single account. - // It is verified in TestEncodedAccountAllocationBounds to align with - // config.Consensus[protocol.ConsensusCurrentVersion].MaxAppsCreated - EncodedMaxAppParams = 64 - - // EncodedMaxKeyValueEntries is the decoder limit for the length of a key/value store. - // It is verified in TestEncodedAccountAllocationBounds to align with - // config.Consensus[protocol.ConsensusCurrentVersion].MaxLocalSchemaEntries and - // config.Consensus[protocol.ConsensusCurrentVersion].MaxGlobalSchemaEntries - EncodedMaxKeyValueEntries = 1024 ) func (s Status) String() string { @@ -159,8 +135,7 @@ type AccountData struct { // If the account is Status=Offline or Status=Online, its // effective balance (if a transaction were to be issued // against this account) may be higher, as computed by - // AccountData.Money(). That function calls - // AccountData.WithUpdatedRewards() to apply the deferred + // WithUpdatedRewards() which applies the deferred // rewards to AccountData.MicroAlgos. RewardsBase uint64 `codec:"ebase"` @@ -199,7 +174,7 @@ type AccountData struct { // NOTE: do not modify this value in-place in existing AccountData // structs; allocate a copy and modify that instead. AccountData // is expected to have copy-by-value semantics. - AssetParams map[AssetIndex]AssetParams `codec:"apar,allocbound=encodedMaxAssetsPerAccount"` + AssetParams map[AssetIndex]AssetParams `codec:"apar,allocbound=bounds.EncodedMaxAssetsPerAccount"` // Assets is the set of assets that can be held by this // account. Assets (i.e., slots in this map) are explicitly @@ -216,7 +191,7 @@ type AccountData struct { // NOTE: do not modify this value in-place in existing AccountData // structs; allocate a copy and modify that instead. AccountData // is expected to have copy-by-value semantics. - Assets map[AssetIndex]AssetHolding `codec:"asset,allocbound=encodedMaxAssetsPerAccount"` + Assets map[AssetIndex]AssetHolding `codec:"asset,allocbound=bounds.EncodedMaxAssetsPerAccount"` // AuthAddr is the address against which signatures/multisigs/logicsigs should be checked. // If empty, the address of the account whose AccountData this is is used. @@ -231,11 +206,11 @@ type AccountData struct { // AppLocalStates stores the local states associated with any applications // that this account has opted in to. - AppLocalStates map[AppIndex]AppLocalState `codec:"appl,allocbound=EncodedMaxAppLocalStates"` + AppLocalStates map[AppIndex]AppLocalState `codec:"appl,allocbound=bounds.EncodedMaxAppLocalStates"` // AppParams stores the global parameters and state associated with any // applications that this account has created. - AppParams map[AppIndex]AppParams `codec:"appp,allocbound=EncodedMaxAppParams"` + AppParams map[AppIndex]AppParams `codec:"appp,allocbound=bounds.EncodedMaxAppParams"` // TotalAppSchema stores the sum of all of the LocalStateSchemas // and GlobalStateSchemas in this account (global for applications @@ -269,8 +244,8 @@ type AppLocalState struct { type AppParams struct { _struct struct{} `codec:",omitempty,omitemptyarray"` - ApprovalProgram []byte `codec:"approv,allocbound=config.MaxAvailableAppProgramLen"` - ClearStateProgram []byte `codec:"clearp,allocbound=config.MaxAvailableAppProgramLen"` + ApprovalProgram []byte `codec:"approv,allocbound=bounds.MaxAvailableAppProgramLen"` + ClearStateProgram []byte `codec:"clearp,allocbound=bounds.MaxAvailableAppProgramLen"` GlobalState TealKeyValue `codec:"gs"` StateSchemas ExtraProgramPages uint32 `codec:"epp"` @@ -393,14 +368,14 @@ type AssetParams struct { // UnitName specifies a hint for the name of a unit of // this asset. - UnitName string `codec:"un,allocbound=config.MaxAssetUnitNameBytes"` + UnitName string `codec:"un,allocbound=bounds.MaxAssetUnitNameBytes"` // AssetName specifies a hint for the name of the asset. - AssetName string `codec:"an,allocbound=config.MaxAssetNameBytes"` + AssetName string `codec:"an,allocbound=bounds.MaxAssetNameBytes"` // URL specifies a URL where more information about the asset can be // retrieved - URL string `codec:"au,allocbound=config.MaxAssetURLBytes"` + URL string `codec:"au,allocbound=bounds.MaxAssetURLBytes"` // MetadataHash specifies a commitment to some unspecified asset // metadata. The format of this metadata is up to the application. @@ -435,16 +410,10 @@ func (app AppIndex) Address() Address { return Address(crypto.HashObj(app)) } -// Money returns the amount of MicroAlgos associated with the user's account -func (u AccountData) Money(proto config.ConsensusParams, rewardsLevel uint64) (money MicroAlgos, rewards MicroAlgos) { - e := u.WithUpdatedRewards(proto, rewardsLevel) - return e.MicroAlgos, e.RewardedMicroAlgos -} - // PendingRewards computes the amount of rewards (in microalgos) that // have yet to be added to the account balance. -func PendingRewards(ot *OverflowTracker, proto config.ConsensusParams, microAlgos MicroAlgos, rewardsBase uint64, rewardsLevel uint64) MicroAlgos { - rewardsUnits := microAlgos.RewardUnits(proto) +func PendingRewards(ot *OverflowTracker, unitSize uint64, microAlgos MicroAlgos, rewardsBase uint64, rewardsLevel uint64) MicroAlgos { + rewardsUnits := microAlgos.RewardUnits(unitSize) rewardsDelta := ot.Sub(rewardsLevel, rewardsBase) return MicroAlgos{Raw: ot.Mul(rewardsUnits, rewardsDelta)} } @@ -452,14 +421,14 @@ func PendingRewards(ot *OverflowTracker, proto config.ConsensusParams, microAlgo // WithUpdatedRewards returns an updated number of algos, total rewards and new rewards base // to reflect rewards up to some rewards level. func WithUpdatedRewards( - proto config.ConsensusParams, status Status, microAlgosIn MicroAlgos, rewardedMicroAlgosIn MicroAlgos, rewardsBaseIn uint64, rewardsLevelIn uint64, + rewardUnits uint64, status Status, microAlgosIn MicroAlgos, rewardedMicroAlgosIn MicroAlgos, rewardsBaseIn uint64, rewardsLevelIn uint64, ) (MicroAlgos, MicroAlgos, uint64) { if status == NotParticipating { return microAlgosIn, rewardedMicroAlgosIn, rewardsBaseIn } var ot OverflowTracker - rewardsUnits := microAlgosIn.RewardUnits(proto) + rewardsUnits := microAlgosIn.RewardUnits(rewardUnits) rewardsDelta := ot.Sub(rewardsLevelIn, rewardsBaseIn) rewards := MicroAlgos{Raw: ot.Mul(rewardsUnits, rewardsDelta)} microAlgosOut := ot.AddA(microAlgosIn, rewards) @@ -475,20 +444,36 @@ func WithUpdatedRewards( // WithUpdatedRewards returns an updated number of algos in an AccountData // to reflect rewards up to some rewards level. -func (u AccountData) WithUpdatedRewards(proto config.ConsensusParams, rewardsLevel uint64) AccountData { +func (u AccountData) WithUpdatedRewards(rewardUnit uint64, rewardsLevel uint64) AccountData { u.MicroAlgos, u.RewardedMicroAlgos, u.RewardsBase = WithUpdatedRewards( - proto, u.Status, u.MicroAlgos, u.RewardedMicroAlgos, u.RewardsBase, rewardsLevel, + rewardUnit, u.Status, u.MicroAlgos, u.RewardedMicroAlgos, u.RewardsBase, rewardsLevel, ) return u } +// BalanceRequirements defines the amounts an account must hold, based on +// various resources the account has. The names are taken directly from +// config.ConsensusParams, as this struct only exists so that `basics` does not +// need to `config` directly. +type BalanceRequirements struct { + MinBalance uint64 + AppFlatParamsMinBalance uint64 + AppFlatOptInMinBalance uint64 + BoxFlatMinBalance uint64 + BoxByteMinBalance uint64 + + SchemaMinBalancePerEntry uint64 + SchemaUintMinBalance uint64 + SchemaBytesMinBalance uint64 +} + // MinBalance computes the minimum balance requirements for an account based on // some consensus parameters. MinBalance should correspond roughly to how much // storage the account is allowed to store on disk. -func (u AccountData) MinBalance(proto *config.ConsensusParams) MicroAlgos { +func (u AccountData) MinBalance(reqs BalanceRequirements) MicroAlgos { return MinBalance( - proto, + reqs, uint64(len(u.Assets)), u.TotalAppSchema, uint64(len(u.AppParams)), uint64(len(u.AppLocalStates)), @@ -501,7 +486,7 @@ func (u AccountData) MinBalance(proto *config.ConsensusParams) MicroAlgos { // some consensus parameters. MinBalance should correspond roughly to how much // storage the account is allowed to store on disk. func MinBalance( - proto *config.ConsensusParams, + reqs BalanceRequirements, totalAssets uint64, totalAppSchema StateSchema, totalAppParams uint64, totalAppLocalStates uint64, @@ -511,35 +496,35 @@ func MinBalance( var min uint64 // First, base MinBalance - min = proto.MinBalance + min = reqs.MinBalance // MinBalance for each Asset - assetCost := MulSaturate(proto.MinBalance, totalAssets) + assetCost := MulSaturate(reqs.MinBalance, totalAssets) min = AddSaturate(min, assetCost) // Base MinBalance for each created application - appCreationCost := MulSaturate(proto.AppFlatParamsMinBalance, totalAppParams) + appCreationCost := MulSaturate(reqs.AppFlatParamsMinBalance, totalAppParams) min = AddSaturate(min, appCreationCost) // Base MinBalance for each opted in application - appOptInCost := MulSaturate(proto.AppFlatOptInMinBalance, totalAppLocalStates) + appOptInCost := MulSaturate(reqs.AppFlatOptInMinBalance, totalAppLocalStates) min = AddSaturate(min, appOptInCost) // MinBalance for state usage measured by LocalStateSchemas and // GlobalStateSchemas - schemaCost := totalAppSchema.MinBalance(proto) + schemaCost := totalAppSchema.MinBalance(reqs) min = AddSaturate(min, schemaCost.Raw) // MinBalance for each extra app program page - extraAppProgramLenCost := MulSaturate(proto.AppFlatParamsMinBalance, totalExtraAppPages) + extraAppProgramLenCost := MulSaturate(reqs.AppFlatParamsMinBalance, totalExtraAppPages) min = AddSaturate(min, extraAppProgramLenCost) // Base MinBalance for each created box - boxBaseCost := MulSaturate(proto.BoxFlatMinBalance, totalBoxes) + boxBaseCost := MulSaturate(reqs.BoxFlatMinBalance, totalBoxes) min = AddSaturate(min, boxBaseCost) // Per byte MinBalance for boxes - boxByteCost := MulSaturate(proto.BoxByteMinBalance, totalBoxBytes) + boxByteCost := MulSaturate(reqs.BoxByteMinBalance, totalBoxBytes) min = AddSaturate(min, boxByteCost) return MicroAlgos{min} @@ -552,16 +537,6 @@ func (u OnlineAccountData) VotingStake() MicroAlgos { return u.MicroAlgosWithRewards } -// KeyDilution returns the key dilution for this account, -// returning the default key dilution if not explicitly specified. -func (u OnlineAccountData) KeyDilution(proto config.ConsensusParams) uint64 { - if u.VoteKeyDilution != 0 { - return u.VoteKeyDilution - } - - return proto.DefaultKeyDilution -} - // NormalizedOnlineBalance returns a “normalized” balance for this account. // // The normalization compensates for rewards that have not yet been applied, @@ -580,8 +555,8 @@ func (u OnlineAccountData) KeyDilution(proto config.ConsensusParams) uint64 { // on how recently the account has been touched (our rewards do not implement // compounding). However, online accounts have to periodically renew // participation keys, so the scale of the inconsistency is small. -func (u AccountData) NormalizedOnlineBalance(proto config.ConsensusParams) uint64 { - return NormalizedOnlineAccountBalance(u.Status, u.RewardsBase, u.MicroAlgos, proto) +func (u AccountData) NormalizedOnlineBalance(rewardUnit uint64) uint64 { + return NormalizedOnlineAccountBalance(u.Status, u.RewardsBase, u.MicroAlgos, rewardUnit) } // NormalizedOnlineAccountBalance returns a “normalized” balance for an account @@ -603,26 +578,26 @@ func (u AccountData) NormalizedOnlineBalance(proto config.ConsensusParams) uint6 // on how recently the account has been touched (our rewards do not implement // compounding). However, online accounts have to periodically renew // participation keys, so the scale of the inconsistency is small. -func NormalizedOnlineAccountBalance(status Status, rewardsBase uint64, microAlgos MicroAlgos, genesisProto config.ConsensusParams) uint64 { +func NormalizedOnlineAccountBalance(status Status, rewardsBase uint64, microAlgos MicroAlgos, rewardUnit uint64) uint64 { if status != Online { return 0 } // If this account had one RewardUnit of microAlgos in round 0, it would // have perRewardUnit microAlgos at the account's current rewards level. - perRewardUnit := rewardsBase + genesisProto.RewardUnit + perRewardUnit := rewardsBase + rewardUnit // To normalize, we compute, mathematically, // u.MicroAlgos / perRewardUnit * proto.RewardUnit, as // (u.MicroAlgos * proto.RewardUnit) / perRewardUnit. - norm, overflowed := Muldiv(microAlgos.ToUint64(), genesisProto.RewardUnit, perRewardUnit) + norm, overflowed := Muldiv(microAlgos.ToUint64(), rewardUnit, perRewardUnit) // Mathematically should be impossible to overflow // because perRewardUnit >= proto.RewardUnit, as long // as u.RewardBase isn't huge enough to cause overflow.. if overflowed { logging.Base().Panicf("overflow computing normalized balance %d * %d / (%d + %d)", - microAlgos.ToUint64(), genesisProto.RewardUnit, rewardsBase, genesisProto.RewardUnit) + microAlgos.ToUint64(), rewardUnit, rewardsBase, rewardUnit) } return norm diff --git a/data/basics/userBalance_test.go b/data/basics/userBalance_test.go index 52b9c54c94..4cb93340e7 100644 --- a/data/basics/userBalance_test.go +++ b/data/basics/userBalance_test.go @@ -23,7 +23,6 @@ import ( "github.com/stretchr/testify/require" - "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/protocol" "github.com/algorand/go-algorand/test/partitiontest" ) @@ -38,7 +37,7 @@ func TestEmptyEncoding(t *testing.T) { func TestRewards(t *testing.T) { partitiontest.PartitionTest(t) - proto := config.Consensus[protocol.ConsensusCurrentVersion] + const unitSize = 1_000_000 accountAlgos := []MicroAlgos{{Raw: 0}, {Raw: 8000}, {Raw: 13000}, {Raw: 83000}} for _, accountAlgo := range accountAlgos { ad := AccountData{ @@ -50,9 +49,13 @@ func TestRewards(t *testing.T) { levels := []uint64{uint64(0), uint64(1), uint64(30), uint64(3000)} for _, level := range levels { - money, rewards := ad.Money(proto, ad.RewardsBase+level) - require.Equal(t, money.Raw, ad.MicroAlgos.Raw+level*ad.MicroAlgos.RewardUnits(proto)) - require.Equal(t, rewards.Raw, ad.RewardedMicroAlgos.Raw+level*ad.MicroAlgos.RewardUnits(proto)) + money := func(u AccountData, rewardsLevel uint64) (balance MicroAlgos, rewards MicroAlgos) { + u = u.WithUpdatedRewards(unitSize, rewardsLevel) + return u.MicroAlgos, u.RewardedMicroAlgos + } + balance, rewards := money(ad, ad.RewardsBase+level) + require.Equal(t, balance.Raw, ad.MicroAlgos.Raw+level*ad.MicroAlgos.RewardUnits(unitSize)) + require.Equal(t, rewards.Raw, ad.RewardedMicroAlgos.Raw+level*ad.MicroAlgos.RewardUnits(unitSize)) } } } @@ -60,7 +63,7 @@ func TestRewards(t *testing.T) { func TestWithUpdatedRewardsPanics(t *testing.T) { partitiontest.PartitionTest(t) - proto := config.Consensus[protocol.ConsensusCurrentVersion] + const unitSize = 1_000_000 t.Run("AlgoPanic", func(t *testing.T) { paniced := false func() { @@ -79,7 +82,7 @@ func TestWithUpdatedRewardsPanics(t *testing.T) { RewardedMicroAlgos: MicroAlgos{Raw: 0}, RewardsBase: 0, } - a.WithUpdatedRewards(proto, 100) + a.WithUpdatedRewards(unitSize, 100) }() require.Equal(t, true, paniced) }) @@ -91,36 +94,11 @@ func TestWithUpdatedRewardsPanics(t *testing.T) { RewardedMicroAlgos: MicroAlgos{Raw: ^uint64(0)}, RewardsBase: 0, } - b := a.WithUpdatedRewards(proto, 100) - require.Equal(t, 100*a.MicroAlgos.RewardUnits(proto)-1, b.RewardedMicroAlgos.Raw) + b := a.WithUpdatedRewards(unitSize, 100) + require.Equal(t, 100*a.MicroAlgos.RewardUnits(unitSize)-1, b.RewardedMicroAlgos.Raw) }) } -func TestEncodedAccountAllocationBounds(t *testing.T) { - partitiontest.PartitionTest(t) - - // ensure that all the supported protocols have value limits less or - // equal to their corresponding codec allocbounds - for protoVer, proto := range config.Consensus { - if proto.MaxAssetsPerAccount > 0 && proto.MaxAssetsPerAccount > encodedMaxAssetsPerAccount { - require.Failf(t, "proto.MaxAssetsPerAccount > encodedMaxAssetsPerAccount", "protocol version = %s", protoVer) - } - if proto.MaxAppsCreated > 0 && proto.MaxAppsCreated > EncodedMaxAppParams { - require.Failf(t, "proto.MaxAppsCreated > encodedMaxAppParams", "protocol version = %s", protoVer) - } - if proto.MaxAppsOptedIn > 0 && proto.MaxAppsOptedIn > EncodedMaxAppLocalStates { - require.Failf(t, "proto.MaxAppsOptedIn > encodedMaxAppLocalStates", "protocol version = %s", protoVer) - } - if proto.MaxLocalSchemaEntries > EncodedMaxKeyValueEntries { - require.Failf(t, "proto.MaxLocalSchemaEntries > encodedMaxKeyValueEntries", "protocol version = %s", protoVer) - } - if proto.MaxGlobalSchemaEntries > EncodedMaxKeyValueEntries { - require.Failf(t, "proto.MaxGlobalSchemaEntries > encodedMaxKeyValueEntries", "protocol version = %s", protoVer) - } - // There is no protocol limit to the number of Boxes per account, so that allocbound is not checked. - } -} - func TestAppIndexHashing(t *testing.T) { partitiontest.PartitionTest(t) diff --git a/data/bookkeeping/block.go b/data/bookkeeping/block.go index afce45e899..c305ae6ffc 100644 --- a/data/bookkeeping/block.go +++ b/data/bookkeeping/block.go @@ -29,238 +29,244 @@ import ( "github.com/algorand/go-algorand/protocol" ) -type ( - // BlockHash represents the hash of a block - BlockHash crypto.Digest - - // A BlockHeader represents the metadata and commitments to the state of a Block. - // The Algorand Ledger may be defined minimally as a cryptographically authenticated series of BlockHeader objects. - BlockHeader struct { - _struct struct{} `codec:",omitempty,omitemptyarray"` - - Round basics.Round `codec:"rnd"` - - // The hash of the previous block - Branch BlockHash `codec:"prev"` - - // Sortition seed - Seed committee.Seed `codec:"seed"` - - // TxnCommitments authenticates the set of transactions appearing in the block. - TxnCommitments - - // TimeStamp in seconds since epoch - TimeStamp int64 `codec:"ts"` - - // Genesis ID to which this block belongs. - GenesisID string `codec:"gen,allocbound=config.MaxGenesisIDLen"` - - // Genesis hash to which this block belongs. - GenesisHash crypto.Digest `codec:"gh"` - - // Proposer is the proposer of this block. Like the Seed, agreement adds - // this after the block is assembled by the transaction pool, so that the same block can be prepared - // for multiple participating accounts in the same node. Therefore, it can not be used - // to influence block evaluation. Populated if proto.Payouts.Enabled - Proposer basics.Address `codec:"prp"` - - // FeesCollected is the sum of all fees paid by transactions in this - // block. Populated if proto.Payouts.Enabled - FeesCollected basics.MicroAlgos `codec:"fc"` - - // Bonus is the bonus incentive to be paid for proposing this block. It - // begins as a consensus parameter value, and decays periodically. - Bonus basics.MicroAlgos `codec:"bi"` - - // ProposerPayout is the amount that is moved from the FeeSink to - // the Proposer in this block. It is basically the - // bonus + the payouts percent of FeesCollected, but may be zero'd by - // proposer ineligibility. - ProposerPayout basics.MicroAlgos `codec:"pp"` - - // Rewards. - // - // When a block is applied, some amount of rewards are accrued to - // every account with AccountData.Status=/=NotParticipating. The - // amount is (thisBlock.RewardsLevel-prevBlock.RewardsLevel) of - // MicroAlgos for every whole config.Protocol.RewardUnit of MicroAlgos in - // that account's AccountData.MicroAlgos. - // - // Rewards are not compounded (i.e., not added to AccountData.MicroAlgos) - // until some other transaction is executed on that account. - // - // Not compounding rewards allows us to precisely know how many algos - // of rewards will be distributed without having to examine every - // account to determine if it should get one more algo of rewards - // because compounding formed another whole config.Protocol.RewardUnit - // of algos. - RewardsState - - // Consensus protocol versioning. - // - // Each block is associated with a version of the consensus protocol, - // stored under UpgradeState.CurrentProtocol. The protocol version - // for a block can be determined without having to first decode the - // block and its CurrentProtocol field, and this field is present for - // convenience and explicitness. Block.Valid() checks that this field - // correctly matches the expected protocol version. - // - // Each block is associated with at most one active upgrade proposal - // (a new version of the protocol). An upgrade proposal can be made - // by a block proposer, as long as no other upgrade proposal is active. - // The upgrade proposal lasts for many rounds (UpgradeVoteRounds), and - // in each round, that round's block proposer votes to support (or not) - // the proposed upgrade. - // - // If enough votes are collected, the proposal is approved, and will - // definitely take effect. The proposal lingers for some number of - // rounds to give clients a chance to notify users about an approved - // upgrade, if the client doesn't support it, so the user has a chance - // to download updated client software. - // - // Block proposers influence this upgrade machinery through two fields - // in UpgradeVote: UpgradePropose, which proposes an upgrade to a new - // protocol, and UpgradeApprove, which signals approval of the current - // proposal. - // - // Once a block proposer determines its UpgradeVote, then UpdateState - // is updated deterministically based on the previous UpdateState and - // the new block's UpgradeVote. - UpgradeState - UpgradeVote - - // TxnCounter is the number of the next transaction that will be - // committed after this block. Genesis blocks can start at either - // 0 or 1000, depending on a consensus parameter (AppForbidLowResources). - TxnCounter uint64 `codec:"tc"` - - // StateProofTracking tracks the status of the state proofs, potentially - // for multiple types of ASPs (Algorand's State Proofs). - //msgp:sort protocol.StateProofType protocol.SortStateProofType - StateProofTracking map[protocol.StateProofType]StateProofTrackingData `codec:"spt,allocbound=protocol.NumStateProofTypes"` - - // ParticipationUpdates contains the information needed to mark - // certain accounts offline because their participation keys expired - ParticipationUpdates - } - - // TxnCommitments represents the commitments computed from the transactions in the block. - // It contains multiple commitments based on different algorithms and hash functions, to support different use cases. - TxnCommitments struct { - _struct struct{} `codec:",omitempty,omitemptyarray"` - // Root of transaction merkle tree using SHA512_256 hash function. - // This commitment is computed based on the PaysetCommit type specified in the block's consensus protocol. - NativeSha512_256Commitment crypto.Digest `codec:"txn"` - - // Root of transaction vector commitment merkle tree using SHA256 hash function - Sha256Commitment crypto.Digest `codec:"txn256"` - } - - // ParticipationUpdates represents participation account data that - // needs to be checked/acted on by the network - ParticipationUpdates struct { - _struct struct{} `codec:",omitempty,omitemptyarray"` - - // ExpiredParticipationAccounts contains a list of online accounts - // that needs to be converted to offline since their - // participation key expired. - ExpiredParticipationAccounts []basics.Address `codec:"partupdrmv,allocbound=config.MaxProposedExpiredOnlineAccounts"` - - // AbsentParticipationAccounts contains a list of online accounts that - // needs to be converted to offline since they are not proposing. - AbsentParticipationAccounts []basics.Address `codec:"partupdabs,allocbound=config.MaxMarkAbsent"` - } - - // RewardsState represents the global parameters controlling the rate - // at which accounts accrue rewards. - RewardsState struct { - _struct struct{} `codec:",omitempty,omitemptyarray"` - - // The FeeSink accepts transaction fees. It can only spend to - // the incentive pool. - FeeSink basics.Address `codec:"fees"` - - // The RewardsPool accepts periodic injections from the - // FeeSink and continually redistributes them to addresses as - // rewards. - RewardsPool basics.Address `codec:"rwd"` - - // RewardsLevel specifies how many rewards, in MicroAlgos, - // have been distributed to each config.Protocol.RewardUnit - // of MicroAlgos since genesis. - RewardsLevel uint64 `codec:"earn"` - - // The number of new MicroAlgos added to the participation stake from rewards at the next round. - RewardsRate uint64 `codec:"rate"` - - // The number of leftover MicroAlgos after the distribution of RewardsRate/rewardUnits - // MicroAlgos for every reward unit in the next round. - RewardsResidue uint64 `codec:"frac"` - - // The round at which the RewardsRate will be recalculated. - RewardsRecalculationRound basics.Round `codec:"rwcalr"` - } - - // UpgradeVote represents the vote of the block proposer with - // respect to protocol upgrades. - UpgradeVote struct { - _struct struct{} `codec:",omitempty,omitemptyarray"` - - // UpgradePropose indicates a proposed upgrade - UpgradePropose protocol.ConsensusVersion `codec:"upgradeprop"` - - // UpgradeDelay indicates the time between acceptance and execution - UpgradeDelay basics.Round `codec:"upgradedelay"` - - // UpgradeApprove indicates a yes vote for the current proposal - UpgradeApprove bool `codec:"upgradeyes"` - } - - // UpgradeState tracks the protocol upgrade state machine. It is, - // strictly speaking, computable from the history of all UpgradeVotes - // but we keep it in the block for explicitness and convenience - // (instead of materializing it separately, like balances). - //msgp:ignore UpgradeState - UpgradeState struct { - CurrentProtocol protocol.ConsensusVersion `codec:"proto"` - NextProtocol protocol.ConsensusVersion `codec:"nextproto"` - NextProtocolApprovals uint64 `codec:"nextyes"` - // NextProtocolVoteBefore specify the last voting round for the next protocol proposal. If there is no voting for - // an upgrade taking place, this would be zero. - NextProtocolVoteBefore basics.Round `codec:"nextbefore"` - // NextProtocolSwitchOn specify the round number at which the next protocol would be adopted. If there is no upgrade taking place, - // nor a wait for the next protocol, this would be zero. - NextProtocolSwitchOn basics.Round `codec:"nextswitch"` - } - - // StateProofTrackingData tracks the status of state proofs. - StateProofTrackingData struct { - _struct struct{} `codec:",omitempty,omitemptyarray"` - - // StateProofVotersCommitment is the root of a vector commitment containing the - // online accounts that will help sign a state proof. The - // VC root, and the state proof, happen on blocks that - // are a multiple of ConsensusParams.StateProofRounds. For blocks - // that are not a multiple of ConsensusParams.StateProofRounds, - // this value is zero. - StateProofVotersCommitment crypto.GenericDigest `codec:"v"` - - // StateProofOnlineTotalWeight is the total number of microalgos held by the online accounts - // during the StateProof round (or zero, if the merkle root is zero - no commitment for StateProof voters). - // This is intended for computing the threshold of votes to expect from StateProofVotersCommitment. - StateProofOnlineTotalWeight basics.MicroAlgos `codec:"t"` - - // StateProofNextRound is the next round for which we will accept - // a StateProof transaction. - StateProofNextRound basics.Round `codec:"n"` - } +// BlockHash represents the hash of a block +type BlockHash crypto.Digest + +// A BlockHeader represents the metadata and commitments to the state of a Block. +// The Algorand Ledger may be defined minimally as a cryptographically authenticated series of BlockHeader objects. +type BlockHeader struct { + _struct struct{} `codec:",omitempty,omitemptyarray"` + + Round basics.Round `codec:"rnd"` + + // The hash of the previous block + Branch BlockHash `codec:"prev"` + + // The hash of the previous block, using SHA-512 + Branch512 crypto.Sha512Digest `codec:"prev512"` + + // Sortition seed + Seed committee.Seed `codec:"seed"` + + // TxnCommitments authenticates the set of transactions appearing in the block. + TxnCommitments + + // TimeStamp in seconds since epoch + TimeStamp int64 `codec:"ts"` + + // Genesis ID to which this block belongs. + GenesisID string `codec:"gen,allocbound=bounds.MaxGenesisIDLen"` + + // Genesis hash to which this block belongs. + GenesisHash crypto.Digest `codec:"gh"` + + // Proposer is the proposer of this block. Like the Seed, agreement adds + // this after the block is assembled by the transaction pool, so that the same block can be prepared + // for multiple participating accounts in the same node. Therefore, it can not be used + // to influence block evaluation. Populated if proto.Payouts.Enabled + Proposer basics.Address `codec:"prp"` + + // FeesCollected is the sum of all fees paid by transactions in this + // block. Populated if proto.Payouts.Enabled + FeesCollected basics.MicroAlgos `codec:"fc"` + + // Bonus is the bonus incentive to be paid for proposing this block. It + // begins as a consensus parameter value, and decays periodically. + Bonus basics.MicroAlgos `codec:"bi"` + + // ProposerPayout is the amount that is moved from the FeeSink to + // the Proposer in this block. It is basically the + // bonus + the payouts percent of FeesCollected, but may be zero'd by + // proposer ineligibility. + ProposerPayout basics.MicroAlgos `codec:"pp"` + + // Rewards. + // + // When a block is applied, some amount of rewards are accrued to + // every account with AccountData.Status=/=NotParticipating. The + // amount is (thisBlock.RewardsLevel-prevBlock.RewardsLevel) of + // MicroAlgos for every whole config.Protocol.RewardUnit of MicroAlgos in + // that account's AccountData.MicroAlgos. + // + // Rewards are not compounded (i.e., not added to AccountData.MicroAlgos) + // until some other transaction is executed on that account. + // + // Not compounding rewards allows us to precisely know how many algos + // of rewards will be distributed without having to examine every + // account to determine if it should get one more algo of rewards + // because compounding formed another whole config.Protocol.RewardUnit + // of algos. + RewardsState + + // Consensus protocol versioning. + // + // Each block is associated with a version of the consensus protocol, + // stored under UpgradeState.CurrentProtocol. The protocol version + // for a block can be determined without having to first decode the + // block and its CurrentProtocol field, and this field is present for + // convenience and explicitness. Block.Valid() checks that this field + // correctly matches the expected protocol version. + // + // Each block is associated with at most one active upgrade proposal + // (a new version of the protocol). An upgrade proposal can be made + // by a block proposer, as long as no other upgrade proposal is active. + // The upgrade proposal lasts for many rounds (UpgradeVoteRounds), and + // in each round, that round's block proposer votes to support (or not) + // the proposed upgrade. + // + // If enough votes are collected, the proposal is approved, and will + // definitely take effect. The proposal lingers for some number of + // rounds to give clients a chance to notify users about an approved + // upgrade, if the client doesn't support it, so the user has a chance + // to download updated client software. + // + // Block proposers influence this upgrade machinery through two fields + // in UpgradeVote: UpgradePropose, which proposes an upgrade to a new + // protocol, and UpgradeApprove, which signals approval of the current + // proposal. + // + // Once a block proposer determines its UpgradeVote, then UpdateState + // is updated deterministically based on the previous UpdateState and + // the new block's UpgradeVote. + UpgradeState + UpgradeVote + + // TxnCounter is the number of the next transaction that will be + // committed after this block. Genesis blocks can start at either + // 0 or 1000, depending on a consensus parameter (AppForbidLowResources). + TxnCounter uint64 `codec:"tc"` + + // StateProofTracking tracks the status of the state proofs, potentially + // for multiple types of ASPs (Algorand's State Proofs). + //msgp:sort protocol.StateProofType protocol.SortStateProofType + StateProofTracking map[protocol.StateProofType]StateProofTrackingData `codec:"spt,allocbound=protocol.NumStateProofTypes"` + + // ParticipationUpdates contains the information needed to mark + // certain accounts offline because their participation keys expired + ParticipationUpdates +} + +// TxnCommitments represents the commitments computed from the transactions in the block. +// It contains multiple commitments based on different algorithms and hash functions, to support different use cases. +type TxnCommitments struct { + _struct struct{} `codec:",omitempty,omitemptyarray"` + // Root of transaction Merkle tree using the SHA-512/256 hash function. + // This commitment is computed based on the PaysetCommit type specified in the block's consensus protocol. + NativeSha512_256Commitment crypto.Digest `codec:"txn"` + + // Root of transaction vector commitment Merkle tree using the SHA-256 hash function. + Sha256Commitment crypto.Digest `codec:"txn256"` + + // Root of transaction vector commitment Merkle tree using the SHA-512 hash function. + Sha512Commitment crypto.Sha512Digest `codec:"txn512"` +} + +// ParticipationUpdates represents participation account data that +// needs to be checked/acted on by the network +type ParticipationUpdates struct { + _struct struct{} `codec:",omitempty,omitemptyarray"` + + // ExpiredParticipationAccounts contains a list of online accounts + // that needs to be converted to offline since their + // participation key expired. + ExpiredParticipationAccounts []basics.Address `codec:"partupdrmv,allocbound=bounds.MaxProposedExpiredOnlineAccounts"` + + // AbsentParticipationAccounts contains a list of online accounts that + // needs to be converted to offline since they are not proposing. + AbsentParticipationAccounts []basics.Address `codec:"partupdabs,allocbound=bounds.MaxMarkAbsent"` +} + +// RewardsState represents the global parameters controlling the rate +// at which accounts accrue rewards. +type RewardsState struct { + _struct struct{} `codec:",omitempty,omitemptyarray"` + + // The FeeSink accepts transaction fees. It can only spend to + // the incentive pool. + FeeSink basics.Address `codec:"fees"` + + // The RewardsPool accepts periodic injections from the + // FeeSink and continually redistributes them to addresses as + // rewards. + RewardsPool basics.Address `codec:"rwd"` + + // RewardsLevel specifies how many rewards, in MicroAlgos, + // have been distributed to each config.Protocol.RewardUnit + // of MicroAlgos since genesis. + RewardsLevel uint64 `codec:"earn"` + + // The number of new MicroAlgos added to the participation stake from rewards at the next round. + RewardsRate uint64 `codec:"rate"` + + // The number of leftover MicroAlgos after the distribution of RewardsRate/rewardUnits + // MicroAlgos for every reward unit in the next round. + RewardsResidue uint64 `codec:"frac"` + + // The round at which the RewardsRate will be recalculated. + RewardsRecalculationRound basics.Round `codec:"rwcalr"` +} + +// UpgradeVote represents the vote of the block proposer with +// respect to protocol upgrades. +type UpgradeVote struct { + _struct struct{} `codec:",omitempty,omitemptyarray"` + + // UpgradePropose indicates a proposed upgrade + UpgradePropose protocol.ConsensusVersion `codec:"upgradeprop"` + + // UpgradeDelay indicates the time between acceptance and execution + UpgradeDelay basics.Round `codec:"upgradedelay"` + + // UpgradeApprove indicates a yes vote for the current proposal + UpgradeApprove bool `codec:"upgradeyes"` +} - // A Block contains the Payset and metadata corresponding to a given Round. - Block struct { - BlockHeader - Payset transactions.Payset `codec:"txns,maxtotalbytes=config.MaxTxnBytesPerBlock"` - } -) +// UpgradeState tracks the protocol upgrade state machine. It is, +// strictly speaking, computable from the history of all UpgradeVotes +// but we keep it in the block for explicitness and convenience +// (instead of materializing it separately, like balances). +// +//msgp:ignore UpgradeState +type UpgradeState struct { + CurrentProtocol protocol.ConsensusVersion `codec:"proto"` + NextProtocol protocol.ConsensusVersion `codec:"nextproto"` + // NextProtocolApprovals is the number of approvals for the next protocol proposal. It is expressed in basics.Round because it is a count of rounds. + NextProtocolApprovals basics.Round `codec:"nextyes"` + // NextProtocolVoteBefore specify the last voting round for the next protocol proposal. If there is no voting for + // an upgrade taking place, this would be zero. + NextProtocolVoteBefore basics.Round `codec:"nextbefore"` + // NextProtocolSwitchOn specify the round number at which the next protocol would be adopted. If there is no upgrade taking place, + // nor a wait for the next protocol, this would be zero. + NextProtocolSwitchOn basics.Round `codec:"nextswitch"` +} + +// StateProofTrackingData tracks the status of state proofs. +type StateProofTrackingData struct { + _struct struct{} `codec:",omitempty,omitemptyarray"` + + // StateProofVotersCommitment is the root of a vector commitment containing the + // online accounts that will help sign a state proof. The + // VC root, and the state proof, happen on blocks that + // are a multiple of ConsensusParams.StateProofRounds. For blocks + // that are not a multiple of ConsensusParams.StateProofRounds, + // this value is zero. + StateProofVotersCommitment crypto.GenericDigest `codec:"v"` + + // StateProofOnlineTotalWeight is the total number of microalgos held by the online accounts + // during the StateProof round (or zero, if the merkle root is zero - no commitment for StateProof voters). + // This is intended for computing the threshold of votes to expect from StateProofVotersCommitment. + StateProofOnlineTotalWeight basics.MicroAlgos `codec:"t"` + + // StateProofNextRound is the next round for which we will accept + // a StateProof transaction. + StateProofNextRound basics.Round `codec:"n"` +} + +// A Block contains the Payset and metadata corresponding to a given Round. +type Block struct { + BlockHeader + Payset transactions.Payset `codec:"txns,maxtotalbytes=bounds.MaxTxnBytesPerBlock"` +} // TxnDeadError defines an error type which indicates a transaction is outside of the // round validity window. @@ -321,6 +327,11 @@ func (bh BlockHeader) Hash() BlockHash { return BlockHash(crypto.HashObj(bh)) } +// Hash512 returns the hash of a block header using SHA-512. +func (bh BlockHeader) Hash512() crypto.Sha512Digest { + return crypto.Sha512Digest(crypto.GenericHashObj(crypto.HashFactory{HashType: crypto.Sha512}.NewHash(), bh)) +} + // ToBeHashed implements the crypto.Hashable interface func (bh BlockHeader) ToBeHashed() (protocol.HashID, []byte) { return protocol.BlockHeader, protocol.Encode(&bh) @@ -506,7 +517,7 @@ func (s UpgradeState) applyUpgradeVote(r basics.Round, vote UpgradeVote) (res Up } // Clear out failed proposal - if r == s.NextProtocolVoteBefore && s.NextProtocolApprovals < params.UpgradeThreshold { + if r == s.NextProtocolVoteBefore && s.NextProtocolApprovals < basics.Round(params.UpgradeThreshold) { s.NextProtocol = "" s.NextProtocolApprovals = 0 s.NextProtocolVoteBefore = basics.Round(0) @@ -628,6 +639,9 @@ func MakeBlock(prev BlockHeader) Block { Bonus: bonus, }, } + if params.EnableSha512BlockHash { + blk.Branch512 = prev.Hash512() + } blk.TxnCommitments, err = blk.PaysetCommit() if err != nil { logging.Base().Warnf("MakeBlock: computing empty TxnCommitments: %v", err) @@ -660,9 +674,18 @@ func (block Block) PaysetCommit() (TxnCommitments, error) { } } + var digestSHA512 crypto.Sha512Digest + if params.EnableSha512BlockHash { + digestSHA512, err = block.paysetCommitSHA512() + if err != nil { + return TxnCommitments{}, err + } + } + return TxnCommitments{ Sha256Commitment: digestSHA256, NativeSha512_256Commitment: digestSHA512_256, + Sha512Commitment: digestSHA512, }, nil } @@ -700,6 +723,18 @@ func (block Block) paysetCommitSHA256() (crypto.Digest, error) { return rootAsByteArray, nil } +func (block Block) paysetCommitSHA512() (crypto.Sha512Digest, error) { + tree, err := block.TxnMerkleTreeSHA512() + if err != nil { + return crypto.Sha512Digest{}, err + } + + rootSlice := tree.Root() + var rootAsByteArray crypto.Sha512Digest + copy(rootAsByteArray[:], rootSlice) + return rootAsByteArray, nil +} + // PreCheck checks if the block header bh is a valid successor to // the previous block's header, prev. func (bh BlockHeader) PreCheck(prev BlockHeader) error { @@ -720,6 +755,12 @@ func (bh BlockHeader) PreCheck(prev BlockHeader) error { return fmt.Errorf("block branch incorrect %v != %v", bh.Branch, prev.Hash()) } + if params.EnableSha512BlockHash && bh.Branch512 != prev.Hash512() { + return fmt.Errorf("block branch512 incorrect %v != %v", bh.Branch512, prev.Hash512()) + } else if !params.EnableSha512BlockHash && bh.Branch512 != (crypto.Sha512Digest{}) { + return fmt.Errorf("block branch512 not allowed: %v", bh.Branch512) + } + // check upgrade state nextUpgradeState, err := prev.UpgradeState.applyUpgradeVote(round, bh.UpgradeVote) if err != nil { diff --git a/data/bookkeeping/block_test.go b/data/bookkeeping/block_test.go index 89bf96a5be..367e27b157 100644 --- a/data/bookkeeping/block_test.go +++ b/data/bookkeeping/block_test.go @@ -100,9 +100,9 @@ func TestUpgradeVote(t *testing.T) { s = UpgradeState{ CurrentProtocol: proto1, NextProtocol: proto2, - NextProtocolApprovals: config.Consensus[protocol.ConsensusCurrentVersion].UpgradeThreshold - 1, - NextProtocolVoteBefore: basics.Round(20), - NextProtocolSwitchOn: basics.Round(30), + NextProtocolApprovals: basics.Round(config.Consensus[protocol.ConsensusCurrentVersion].UpgradeThreshold) - 1, + NextProtocolVoteBefore: 20, + NextProtocolSwitchOn: 30, } // Check that applyUpgradeVote rejects concurrent proposal @@ -122,9 +122,9 @@ func TestUpgradeVote(t *testing.T) { s1, err = s.applyUpgradeVote(basics.Round(20), UpgradeVote{}) require.NoError(t, err) require.Equal(t, s1.NextProtocol, protocol.ConsensusVersion("")) - require.Equal(t, s1.NextProtocolApprovals, uint64(0)) - require.Equal(t, s1.NextProtocolVoteBefore, basics.Round(0)) - require.Equal(t, s1.NextProtocolSwitchOn, basics.Round(0)) + require.Zero(t, s1.NextProtocolApprovals) + require.Zero(t, s1.NextProtocolVoteBefore) + require.Zero(t, s1.NextProtocolSwitchOn) // Check that proposal gets approved with sufficient votes s.NextProtocolApprovals++ @@ -137,9 +137,9 @@ func TestUpgradeVote(t *testing.T) { require.NoError(t, err) require.Equal(t, s1.CurrentProtocol, proto2) require.Equal(t, s1.NextProtocol, protocol.ConsensusVersion("")) - require.Equal(t, s1.NextProtocolApprovals, uint64(0)) - require.Equal(t, s1.NextProtocolVoteBefore, basics.Round(0)) - require.Equal(t, s1.NextProtocolSwitchOn, basics.Round(0)) + require.Zero(t, s1.NextProtocolApprovals) + require.Zero(t, s1.NextProtocolVoteBefore) + require.Zero(t, s1.NextProtocolSwitchOn) } func TestUpgradeVariableDelay(t *testing.T) { @@ -562,7 +562,7 @@ func TestInitialRewardsRateCalculation(t *testing.T) { return true } - // test expected failuire + // test expected failure consensusParams.InitialRewardsRateCalculation = false require.False(t, runTest()) @@ -658,7 +658,7 @@ func TestNextRewardsRateWithFix(t *testing.T) { } } -func TestNextRewardsRateFailsWithoutFix(t *testing.T) { +func TestNextRewardsRateErrsWithoutFix(t *testing.T) { partitiontest.PartitionTest(t) t.Parallel() @@ -842,11 +842,26 @@ func TestNextRewardsRateWithFixNextRewardLevelOverflow(t *testing.T) { func TestBlock_ContentsMatchHeader(t *testing.T) { partitiontest.PartitionTest(t) t.Parallel() - a := require.New(t) + for _, cv := range []struct { + name string + ver protocol.ConsensusVersion + }{ + {"v32", protocol.ConsensusV32}, + {"v34", protocol.ConsensusV34}, + {"current", protocol.ConsensusCurrentVersion}, + {"future", protocol.ConsensusFuture}, + } { + t.Run(cv.name, func(t *testing.T) { + testBlockContentsMatchHeader(t, cv.ver) + }) + } +} +func testBlockContentsMatchHeader(t *testing.T, cv protocol.ConsensusVersion) { + a := require.New(t) // Create a block without SHA256 TxnCommitments var block Block - block.CurrentProtocol = protocol.ConsensusV32 + block.CurrentProtocol = cv crypto.RandBytes(block.BlockHeader.GenesisHash[:]) for i := 0; i < 1024; i++ { @@ -880,51 +895,93 @@ func TestBlock_ContentsMatchHeader(t *testing.T) { a.NoError(err) rootSliceSHA256 := tree.Root() + tree, err = block.TxnMerkleTreeSHA512() + a.NoError(err) + rootSliceSHA512 := tree.Root() + badDigestSlice := []byte("(>^-^)>") - /* Test V32 */ + // Get consensus parameters for this version + params, ok := config.Consensus[cv] + a.True(ok) + + // Initially all roots empty, should fail + block.BlockHeader.TxnCommitments = TxnCommitments{} a.False(block.ContentsMatchHeader()) + // Copy the appropriate txn roots based on consensus version copy(block.BlockHeader.TxnCommitments.NativeSha512_256Commitment[:], rootSliceSHA512_256) - block.BlockHeader.TxnCommitments.Sha256Commitment = crypto.Digest{} + if params.EnableSHA256TxnCommitmentHeader { + copy(block.BlockHeader.TxnCommitments.Sha256Commitment[:], rootSliceSHA256) + } + if params.EnableSha512BlockHash { + copy(block.BlockHeader.TxnCommitments.Sha512Commitment[:], rootSliceSHA512) + } a.True(block.ContentsMatchHeader()) - copy(block.BlockHeader.TxnCommitments.NativeSha512_256Commitment[:], rootSliceSHA512_256) - copy(block.BlockHeader.TxnCommitments.Sha256Commitment[:], rootSliceSHA256) - a.False(block.ContentsMatchHeader()) + // Test with SHA256 set when it shouldn't be + if !params.EnableSHA256TxnCommitmentHeader { + copy(block.BlockHeader.TxnCommitments.Sha256Commitment[:], rootSliceSHA256) + a.False(block.ContentsMatchHeader()) + block.BlockHeader.TxnCommitments.Sha256Commitment = crypto.Digest{} + } - copy(block.BlockHeader.TxnCommitments.NativeSha512_256Commitment[:], badDigestSlice) - copy(block.BlockHeader.TxnCommitments.Sha256Commitment[:], rootSliceSHA256) - a.False(block.ContentsMatchHeader()) + // Test with SHA512 set when it shouldn't be + if !params.EnableSha512BlockHash { + copy(block.BlockHeader.TxnCommitments.Sha512Commitment[:], rootSliceSHA512) + a.False(block.ContentsMatchHeader()) + block.BlockHeader.TxnCommitments.Sha512Commitment = crypto.Sha512Digest{} + } - block.BlockHeader.TxnCommitments.NativeSha512_256Commitment = crypto.Digest{} - copy(block.BlockHeader.TxnCommitments.Sha256Commitment[:], rootSliceSHA256) + // Test with bad NativeSha512_256Commitment (should fail for all protocols) + copy(block.BlockHeader.TxnCommitments.NativeSha512_256Commitment[:], badDigestSlice) a.False(block.ContentsMatchHeader()) - /* Test Consensus Current */ - // Create a block with SHA256 TxnCommitments - block.CurrentProtocol = protocol.ConsensusCurrentVersion - + // Test with missing NativeSha512_256Commitment block.BlockHeader.TxnCommitments.NativeSha512_256Commitment = crypto.Digest{} - block.BlockHeader.TxnCommitments.Sha256Commitment = crypto.Digest{} + if params.EnableSHA256TxnCommitmentHeader { + copy(block.BlockHeader.TxnCommitments.Sha256Commitment[:], rootSliceSHA256) + } + if params.EnableSha512BlockHash { + copy(block.BlockHeader.TxnCommitments.Sha512Commitment[:], rootSliceSHA512) + } a.False(block.ContentsMatchHeader()) - // Now update the SHA256 header to its correct value - copy(block.BlockHeader.TxnCommitments.NativeSha512_256Commitment[:], rootSliceSHA512_256) - copy(block.BlockHeader.TxnCommitments.Sha256Commitment[:], rootSliceSHA256) - a.True(block.ContentsMatchHeader()) + // For protocols with SHA256 enabled, test with bad/missing SHA256 commitment + if params.EnableSHA256TxnCommitmentHeader { + copy(block.BlockHeader.TxnCommitments.NativeSha512_256Commitment[:], rootSliceSHA512_256) + copy(block.BlockHeader.TxnCommitments.Sha256Commitment[:], badDigestSlice) + if params.EnableSha512BlockHash { + copy(block.BlockHeader.TxnCommitments.Sha512Commitment[:], rootSliceSHA512) + } + a.False(block.ContentsMatchHeader()) - copy(block.BlockHeader.TxnCommitments.NativeSha512_256Commitment[:], badDigestSlice) - copy(block.BlockHeader.TxnCommitments.Sha256Commitment[:], rootSliceSHA256) - a.False(block.ContentsMatchHeader()) + // Test with missing SHA256 commitment + copy(block.BlockHeader.TxnCommitments.NativeSha512_256Commitment[:], rootSliceSHA512_256) + block.BlockHeader.TxnCommitments.Sha256Commitment = crypto.Digest{} + if params.EnableSha512BlockHash { + copy(block.BlockHeader.TxnCommitments.Sha512Commitment[:], rootSliceSHA512) + } + a.False(block.ContentsMatchHeader()) + } - copy(block.BlockHeader.TxnCommitments.NativeSha512_256Commitment[:], rootSliceSHA512_256) - copy(block.BlockHeader.TxnCommitments.Sha256Commitment[:], badDigestSlice) - a.False(block.ContentsMatchHeader()) + // For protocols with SHA512 enabled, test with bad/missing SHA512 commitment + if params.EnableSha512BlockHash { + copy(block.BlockHeader.TxnCommitments.NativeSha512_256Commitment[:], rootSliceSHA512_256) + if params.EnableSHA256TxnCommitmentHeader { + copy(block.BlockHeader.TxnCommitments.Sha256Commitment[:], rootSliceSHA256) + } + copy(block.BlockHeader.TxnCommitments.Sha512Commitment[:], badDigestSlice) + a.False(block.ContentsMatchHeader()) - block.BlockHeader.TxnCommitments.NativeSha512_256Commitment = crypto.Digest{} - copy(block.BlockHeader.TxnCommitments.Sha256Commitment[:], rootSliceSHA256) - a.False(block.ContentsMatchHeader()) + // Test with missing SHA512 commitment + copy(block.BlockHeader.TxnCommitments.NativeSha512_256Commitment[:], rootSliceSHA512_256) + if params.EnableSHA256TxnCommitmentHeader { + copy(block.BlockHeader.TxnCommitments.Sha256Commitment[:], rootSliceSHA256) + } + block.BlockHeader.TxnCommitments.Sha512Commitment = crypto.Sha512Digest{} + a.False(block.ContentsMatchHeader()) + } } func TestBlockHeader_Serialization(t *testing.T) { @@ -943,6 +1000,54 @@ func TestBlockHeader_Serialization(t *testing.T) { a.Equal(crypto.Digest{}, blkHdr.TxnCommitments.Sha256Commitment) a.NotEqual(crypto.Digest{}, blkHdr.TxnCommitments.NativeSha512_256Commitment) + a.Equal(crypto.Sha512Digest{}, blkHdr.TxnCommitments.Sha512Commitment) + a.Equal(crypto.Sha512Digest{}, blkHdr.Branch512) +} + +// TestBlockHeader_PreCheck_Branch512 tests the Branch512 validation in PreCheck +func TestBlockHeader_PreCheck_Branch512(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + a := require.New(t) + + // Test consensus v40 (no EnableSha512BlockHash) + cv := protocol.ConsensusV40 + prevHeader := BlockHeader{Round: 1, GenesisID: "test"} + prevHeader.CurrentProtocol = cv + crypto.RandBytes(prevHeader.GenesisHash[:]) + // Make round 2 block that references block 1 as prev + currentHeader := BlockHeader{ + Round: prevHeader.Round + 1, GenesisID: prevHeader.GenesisID, GenesisHash: prevHeader.GenesisHash, + Branch: prevHeader.Hash(), + } + currentHeader.CurrentProtocol = cv + // empty Branch512 passes + a.NoError(currentHeader.PreCheck(prevHeader)) + // correct Branch512 fails + currentHeader.Branch512 = prevHeader.Hash512() + a.ErrorContains(currentHeader.PreCheck(prevHeader), "block branch512 not allowed") + // non-empty Branch512 fails + crypto.RandBytes(currentHeader.Branch512[:]) + a.ErrorContains(currentHeader.PreCheck(prevHeader), "block branch512 not allowed") + + // Test consensus future (EnableSha512BlockHash set) + cv = protocol.ConsensusFuture + prevHeader = BlockHeader{Round: 1, GenesisID: "test"} + prevHeader.CurrentProtocol = cv + crypto.RandBytes(prevHeader.GenesisHash[:]) + currentHeader = BlockHeader{ + Round: prevHeader.Round + 1, GenesisID: prevHeader.GenesisID, GenesisHash: prevHeader.GenesisHash, + Branch: prevHeader.Hash(), + } + currentHeader.CurrentProtocol = cv + // empty Branch512 fails + a.ErrorContains(currentHeader.PreCheck(prevHeader), "block branch512 incorrect") + // correct Branch512 passes + currentHeader.Branch512 = prevHeader.Hash512() + a.NoError(currentHeader.PreCheck(prevHeader)) + // incorrect Branch512 fails + crypto.RandBytes(currentHeader.Branch512[:]) + a.ErrorContains(currentHeader.PreCheck(prevHeader), "block branch512 incorrect") } func TestBonusUpgrades(t *testing.T) { diff --git a/data/bookkeeping/genesis.go b/data/bookkeeping/genesis.go index f98f03f73d..5951354ad8 100644 --- a/data/bookkeeping/genesis.go +++ b/data/bookkeeping/genesis.go @@ -241,7 +241,7 @@ func MakeGenesisBlock(proto protocol.ConsensusVersion, genesisBal GenesisBalance Round: 0, Branch: BlockHash{}, Seed: committee.Seed(genesisHash), - TxnCommitments: TxnCommitments{NativeSha512_256Commitment: transactions.Payset{}.CommitGenesis(), Sha256Commitment: crypto.Digest{}}, + TxnCommitments: TxnCommitments{NativeSha512_256Commitment: transactions.Payset{}.CommitGenesis(), Sha256Commitment: crypto.Digest{}, Sha512Commitment: crypto.Sha512Digest{}}, TimeStamp: genesisBal.Timestamp, GenesisID: genesisID, RewardsState: genesisRewardsState, diff --git a/data/bookkeeping/msgp_gen.go b/data/bookkeeping/msgp_gen.go index fef26fd1b9..09c66bf58d 100644 --- a/data/bookkeeping/msgp_gen.go +++ b/data/bookkeeping/msgp_gen.go @@ -7,7 +7,7 @@ import ( "github.com/algorand/msgp/msgp" - "github.com/algorand/go-algorand/config" + "github.com/algorand/go-algorand/config/bounds" "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/crypto/merklesignature" "github.com/algorand/go-algorand/data/basics" @@ -143,8 +143,8 @@ import ( func (z *Block) MarshalMsg(b []byte) (o []byte) { o = msgp.Require(b, z.Msgsize()) // omitempty: check for empty values - zb0005Len := uint32(31) - var zb0005Mask uint64 /* 36 bits */ + zb0005Len := uint32(33) + var zb0005Mask uint64 /* 38 bits */ if (*z).BlockHeader.Bonus.MsgIsZero() { zb0005Len-- zb0005Mask |= 0x20 @@ -185,7 +185,7 @@ func (z *Block) MarshalMsg(b []byte) (o []byte) { zb0005Len-- zb0005Mask |= 0x4000 } - if (*z).BlockHeader.UpgradeState.NextProtocolApprovals == 0 { + if (*z).BlockHeader.UpgradeState.NextProtocolApprovals.MsgIsZero() { zb0005Len-- zb0005Mask |= 0x8000 } @@ -205,70 +205,78 @@ func (z *Block) MarshalMsg(b []byte) (o []byte) { zb0005Len-- zb0005Mask |= 0x80000 } - if (*z).BlockHeader.UpgradeState.CurrentProtocol.MsgIsZero() { + if (*z).BlockHeader.Branch512.MsgIsZero() { zb0005Len-- zb0005Mask |= 0x100000 } - if (*z).BlockHeader.Proposer.MsgIsZero() { + if (*z).BlockHeader.UpgradeState.CurrentProtocol.MsgIsZero() { zb0005Len-- zb0005Mask |= 0x200000 } - if (*z).BlockHeader.RewardsState.RewardsRate == 0 { + if (*z).BlockHeader.Proposer.MsgIsZero() { zb0005Len-- zb0005Mask |= 0x400000 } - if (*z).BlockHeader.Round.MsgIsZero() { + if (*z).BlockHeader.RewardsState.RewardsRate == 0 { zb0005Len-- zb0005Mask |= 0x800000 } - if (*z).BlockHeader.RewardsState.RewardsRecalculationRound.MsgIsZero() { + if (*z).BlockHeader.Round.MsgIsZero() { zb0005Len-- zb0005Mask |= 0x1000000 } - if (*z).BlockHeader.RewardsState.RewardsPool.MsgIsZero() { + if (*z).BlockHeader.RewardsState.RewardsRecalculationRound.MsgIsZero() { zb0005Len-- zb0005Mask |= 0x2000000 } - if (*z).BlockHeader.Seed.MsgIsZero() { + if (*z).BlockHeader.RewardsState.RewardsPool.MsgIsZero() { zb0005Len-- zb0005Mask |= 0x4000000 } - if len((*z).BlockHeader.StateProofTracking) == 0 { + if (*z).BlockHeader.Seed.MsgIsZero() { zb0005Len-- zb0005Mask |= 0x8000000 } - if (*z).BlockHeader.TxnCounter == 0 { + if len((*z).BlockHeader.StateProofTracking) == 0 { zb0005Len-- zb0005Mask |= 0x10000000 } - if (*z).BlockHeader.TimeStamp == 0 { + if (*z).BlockHeader.TxnCounter == 0 { zb0005Len-- zb0005Mask |= 0x20000000 } - if (*z).BlockHeader.TxnCommitments.NativeSha512_256Commitment.MsgIsZero() { + if (*z).BlockHeader.TimeStamp == 0 { zb0005Len-- zb0005Mask |= 0x40000000 } - if (*z).BlockHeader.TxnCommitments.Sha256Commitment.MsgIsZero() { + if (*z).BlockHeader.TxnCommitments.NativeSha512_256Commitment.MsgIsZero() { zb0005Len-- zb0005Mask |= 0x80000000 } - if (*z).Payset.MsgIsZero() { + if (*z).BlockHeader.TxnCommitments.Sha256Commitment.MsgIsZero() { zb0005Len-- zb0005Mask |= 0x100000000 } - if (*z).BlockHeader.UpgradeVote.UpgradeDelay.MsgIsZero() { + if (*z).BlockHeader.TxnCommitments.Sha512Commitment.MsgIsZero() { zb0005Len-- zb0005Mask |= 0x200000000 } - if (*z).BlockHeader.UpgradeVote.UpgradePropose.MsgIsZero() { + if (*z).Payset.MsgIsZero() { zb0005Len-- zb0005Mask |= 0x400000000 } - if (*z).BlockHeader.UpgradeVote.UpgradeApprove == false { + if (*z).BlockHeader.UpgradeVote.UpgradeDelay.MsgIsZero() { zb0005Len-- zb0005Mask |= 0x800000000 } + if (*z).BlockHeader.UpgradeVote.UpgradePropose.MsgIsZero() { + zb0005Len-- + zb0005Mask |= 0x1000000000 + } + if (*z).BlockHeader.UpgradeVote.UpgradeApprove == false { + zb0005Len-- + zb0005Mask |= 0x2000000000 + } // variable map header, size zb0005Len o = msgp.AppendMapHeader(o, zb0005Len) if zb0005Len != 0 { @@ -325,7 +333,7 @@ func (z *Block) MarshalMsg(b []byte) (o []byte) { if (zb0005Mask & 0x8000) == 0 { // if not empty // string "nextyes" o = append(o, 0xa7, 0x6e, 0x65, 0x78, 0x74, 0x79, 0x65, 0x73) - o = msgp.AppendUint64(o, (*z).BlockHeader.UpgradeState.NextProtocolApprovals) + o = (*z).BlockHeader.UpgradeState.NextProtocolApprovals.MarshalMsg(o) } if (zb0005Mask & 0x10000) == 0 { // if not empty // string "partupdabs" @@ -362,41 +370,46 @@ func (z *Block) MarshalMsg(b []byte) (o []byte) { o = (*z).BlockHeader.Branch.MarshalMsg(o) } if (zb0005Mask & 0x100000) == 0 { // if not empty + // string "prev512" + o = append(o, 0xa7, 0x70, 0x72, 0x65, 0x76, 0x35, 0x31, 0x32) + o = (*z).BlockHeader.Branch512.MarshalMsg(o) + } + if (zb0005Mask & 0x200000) == 0 { // if not empty // string "proto" o = append(o, 0xa5, 0x70, 0x72, 0x6f, 0x74, 0x6f) o = (*z).BlockHeader.UpgradeState.CurrentProtocol.MarshalMsg(o) } - if (zb0005Mask & 0x200000) == 0 { // if not empty + if (zb0005Mask & 0x400000) == 0 { // if not empty // string "prp" o = append(o, 0xa3, 0x70, 0x72, 0x70) o = (*z).BlockHeader.Proposer.MarshalMsg(o) } - if (zb0005Mask & 0x400000) == 0 { // if not empty + if (zb0005Mask & 0x800000) == 0 { // if not empty // string "rate" o = append(o, 0xa4, 0x72, 0x61, 0x74, 0x65) o = msgp.AppendUint64(o, (*z).BlockHeader.RewardsState.RewardsRate) } - if (zb0005Mask & 0x800000) == 0 { // if not empty + if (zb0005Mask & 0x1000000) == 0 { // if not empty // string "rnd" o = append(o, 0xa3, 0x72, 0x6e, 0x64) o = (*z).BlockHeader.Round.MarshalMsg(o) } - if (zb0005Mask & 0x1000000) == 0 { // if not empty + if (zb0005Mask & 0x2000000) == 0 { // if not empty // string "rwcalr" o = append(o, 0xa6, 0x72, 0x77, 0x63, 0x61, 0x6c, 0x72) o = (*z).BlockHeader.RewardsState.RewardsRecalculationRound.MarshalMsg(o) } - if (zb0005Mask & 0x2000000) == 0 { // if not empty + if (zb0005Mask & 0x4000000) == 0 { // if not empty // string "rwd" o = append(o, 0xa3, 0x72, 0x77, 0x64) o = (*z).BlockHeader.RewardsState.RewardsPool.MarshalMsg(o) } - if (zb0005Mask & 0x4000000) == 0 { // if not empty + if (zb0005Mask & 0x8000000) == 0 { // if not empty // string "seed" o = append(o, 0xa4, 0x73, 0x65, 0x65, 0x64) o = (*z).BlockHeader.Seed.MarshalMsg(o) } - if (zb0005Mask & 0x8000000) == 0 { // if not empty + if (zb0005Mask & 0x10000000) == 0 { // if not empty // string "spt" o = append(o, 0xa3, 0x73, 0x70, 0x74) if (*z).BlockHeader.StateProofTracking == nil { @@ -416,42 +429,47 @@ func (z *Block) MarshalMsg(b []byte) (o []byte) { o = zb0002.MarshalMsg(o) } } - if (zb0005Mask & 0x10000000) == 0 { // if not empty + if (zb0005Mask & 0x20000000) == 0 { // if not empty // string "tc" o = append(o, 0xa2, 0x74, 0x63) o = msgp.AppendUint64(o, (*z).BlockHeader.TxnCounter) } - if (zb0005Mask & 0x20000000) == 0 { // if not empty + if (zb0005Mask & 0x40000000) == 0 { // if not empty // string "ts" o = append(o, 0xa2, 0x74, 0x73) o = msgp.AppendInt64(o, (*z).BlockHeader.TimeStamp) } - if (zb0005Mask & 0x40000000) == 0 { // if not empty + if (zb0005Mask & 0x80000000) == 0 { // if not empty // string "txn" o = append(o, 0xa3, 0x74, 0x78, 0x6e) o = (*z).BlockHeader.TxnCommitments.NativeSha512_256Commitment.MarshalMsg(o) } - if (zb0005Mask & 0x80000000) == 0 { // if not empty + if (zb0005Mask & 0x100000000) == 0 { // if not empty // string "txn256" o = append(o, 0xa6, 0x74, 0x78, 0x6e, 0x32, 0x35, 0x36) o = (*z).BlockHeader.TxnCommitments.Sha256Commitment.MarshalMsg(o) } - if (zb0005Mask & 0x100000000) == 0 { // if not empty + if (zb0005Mask & 0x200000000) == 0 { // if not empty + // string "txn512" + o = append(o, 0xa6, 0x74, 0x78, 0x6e, 0x35, 0x31, 0x32) + o = (*z).BlockHeader.TxnCommitments.Sha512Commitment.MarshalMsg(o) + } + if (zb0005Mask & 0x400000000) == 0 { // if not empty // string "txns" o = append(o, 0xa4, 0x74, 0x78, 0x6e, 0x73) o = (*z).Payset.MarshalMsg(o) } - if (zb0005Mask & 0x200000000) == 0 { // if not empty + if (zb0005Mask & 0x800000000) == 0 { // if not empty // string "upgradedelay" o = append(o, 0xac, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x64, 0x65, 0x6c, 0x61, 0x79) o = (*z).BlockHeader.UpgradeVote.UpgradeDelay.MarshalMsg(o) } - if (zb0005Mask & 0x400000000) == 0 { // if not empty + if (zb0005Mask & 0x1000000000) == 0 { // if not empty // string "upgradeprop" o = append(o, 0xab, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x70, 0x72, 0x6f, 0x70) o = (*z).BlockHeader.UpgradeVote.UpgradePropose.MarshalMsg(o) } - if (zb0005Mask & 0x800000000) == 0 { // if not empty + if (zb0005Mask & 0x2000000000) == 0 { // if not empty // string "upgradeyes" o = append(o, 0xaa, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x79, 0x65, 0x73) o = msgp.AppendBool(o, (*z).BlockHeader.UpgradeVote.UpgradeApprove) @@ -499,6 +517,14 @@ func (z *Block) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) (o []b return } } + if zb0005 > 0 { + zb0005-- + bts, err = (*z).BlockHeader.Branch512.UnmarshalMsgWithState(bts, st) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "Branch512") + return + } + } if zb0005 > 0 { zb0005-- bts, err = (*z).BlockHeader.Seed.UnmarshalMsgWithState(bts, st) @@ -523,6 +549,14 @@ func (z *Block) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) (o []b return } } + if zb0005 > 0 { + zb0005-- + bts, err = (*z).BlockHeader.TxnCommitments.Sha512Commitment.UnmarshalMsgWithState(bts, st) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "Sha512Commitment") + return + } + } if zb0005 > 0 { zb0005-- (*z).BlockHeader.TimeStamp, bts, err = msgp.ReadInt64Bytes(bts) @@ -539,8 +573,8 @@ func (z *Block) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) (o []b err = msgp.WrapError(err, "struct-from-array", "GenesisID") return } - if zb0007 > config.MaxGenesisIDLen { - err = msgp.ErrOverflow(uint64(zb0007), uint64(config.MaxGenesisIDLen)) + if zb0007 > bounds.MaxGenesisIDLen { + err = msgp.ErrOverflow(uint64(zb0007), uint64(bounds.MaxGenesisIDLen)) return } (*z).BlockHeader.GenesisID, bts, err = msgp.ReadStringBytes(bts) @@ -655,7 +689,7 @@ func (z *Block) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) (o []b } if zb0005 > 0 { zb0005-- - (*z).BlockHeader.UpgradeState.NextProtocolApprovals, bts, err = msgp.ReadUint64Bytes(bts) + bts, err = (*z).BlockHeader.UpgradeState.NextProtocolApprovals.UnmarshalMsgWithState(bts, st) if err != nil { err = msgp.WrapError(err, "struct-from-array", "NextProtocolApprovals") return @@ -754,8 +788,8 @@ func (z *Block) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) (o []b err = msgp.WrapError(err, "struct-from-array", "ExpiredParticipationAccounts") return } - if zb0010 > config.MaxProposedExpiredOnlineAccounts { - err = msgp.ErrOverflow(uint64(zb0010), uint64(config.MaxProposedExpiredOnlineAccounts)) + if zb0010 > bounds.MaxProposedExpiredOnlineAccounts { + err = msgp.ErrOverflow(uint64(zb0010), uint64(bounds.MaxProposedExpiredOnlineAccounts)) err = msgp.WrapError(err, "struct-from-array", "ExpiredParticipationAccounts") return } @@ -783,8 +817,8 @@ func (z *Block) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) (o []b err = msgp.WrapError(err, "struct-from-array", "AbsentParticipationAccounts") return } - if zb0012 > config.MaxMarkAbsent { - err = msgp.ErrOverflow(uint64(zb0012), uint64(config.MaxMarkAbsent)) + if zb0012 > bounds.MaxMarkAbsent { + err = msgp.ErrOverflow(uint64(zb0012), uint64(bounds.MaxMarkAbsent)) err = msgp.WrapError(err, "struct-from-array", "AbsentParticipationAccounts") return } @@ -846,6 +880,12 @@ func (z *Block) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) (o []b err = msgp.WrapError(err, "Branch") return } + case "prev512": + bts, err = (*z).BlockHeader.Branch512.UnmarshalMsgWithState(bts, st) + if err != nil { + err = msgp.WrapError(err, "Branch512") + return + } case "seed": bts, err = (*z).BlockHeader.Seed.UnmarshalMsgWithState(bts, st) if err != nil { @@ -864,6 +904,12 @@ func (z *Block) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) (o []b err = msgp.WrapError(err, "Sha256Commitment") return } + case "txn512": + bts, err = (*z).BlockHeader.TxnCommitments.Sha512Commitment.UnmarshalMsgWithState(bts, st) + if err != nil { + err = msgp.WrapError(err, "Sha512Commitment") + return + } case "ts": (*z).BlockHeader.TimeStamp, bts, err = msgp.ReadInt64Bytes(bts) if err != nil { @@ -877,8 +923,8 @@ func (z *Block) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) (o []b err = msgp.WrapError(err, "GenesisID") return } - if zb0014 > config.MaxGenesisIDLen { - err = msgp.ErrOverflow(uint64(zb0014), uint64(config.MaxGenesisIDLen)) + if zb0014 > bounds.MaxGenesisIDLen { + err = msgp.ErrOverflow(uint64(zb0014), uint64(bounds.MaxGenesisIDLen)) return } (*z).BlockHeader.GenesisID, bts, err = msgp.ReadStringBytes(bts) @@ -965,7 +1011,7 @@ func (z *Block) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) (o []b return } case "nextyes": - (*z).BlockHeader.UpgradeState.NextProtocolApprovals, bts, err = msgp.ReadUint64Bytes(bts) + bts, err = (*z).BlockHeader.UpgradeState.NextProtocolApprovals.UnmarshalMsgWithState(bts, st) if err != nil { err = msgp.WrapError(err, "NextProtocolApprovals") return @@ -1048,8 +1094,8 @@ func (z *Block) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) (o []b err = msgp.WrapError(err, "ExpiredParticipationAccounts") return } - if zb0017 > config.MaxProposedExpiredOnlineAccounts { - err = msgp.ErrOverflow(uint64(zb0017), uint64(config.MaxProposedExpiredOnlineAccounts)) + if zb0017 > bounds.MaxProposedExpiredOnlineAccounts { + err = msgp.ErrOverflow(uint64(zb0017), uint64(bounds.MaxProposedExpiredOnlineAccounts)) err = msgp.WrapError(err, "ExpiredParticipationAccounts") return } @@ -1075,8 +1121,8 @@ func (z *Block) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) (o []b err = msgp.WrapError(err, "AbsentParticipationAccounts") return } - if zb0019 > config.MaxMarkAbsent { - err = msgp.ErrOverflow(uint64(zb0019), uint64(config.MaxMarkAbsent)) + if zb0019 > bounds.MaxMarkAbsent { + err = msgp.ErrOverflow(uint64(zb0019), uint64(bounds.MaxMarkAbsent)) err = msgp.WrapError(err, "AbsentParticipationAccounts") return } @@ -1123,7 +1169,7 @@ func (_ *Block) CanUnmarshalMsg(z interface{}) bool { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *Block) Msgsize() (s int) { - s = 3 + 4 + (*z).BlockHeader.Round.Msgsize() + 5 + (*z).BlockHeader.Branch.Msgsize() + 5 + (*z).BlockHeader.Seed.Msgsize() + 4 + (*z).BlockHeader.TxnCommitments.NativeSha512_256Commitment.Msgsize() + 7 + (*z).BlockHeader.TxnCommitments.Sha256Commitment.Msgsize() + 3 + msgp.Int64Size + 4 + msgp.StringPrefixSize + len((*z).BlockHeader.GenesisID) + 3 + (*z).BlockHeader.GenesisHash.Msgsize() + 4 + (*z).BlockHeader.Proposer.Msgsize() + 3 + (*z).BlockHeader.FeesCollected.Msgsize() + 3 + (*z).BlockHeader.Bonus.Msgsize() + 3 + (*z).BlockHeader.ProposerPayout.Msgsize() + 5 + (*z).BlockHeader.RewardsState.FeeSink.Msgsize() + 4 + (*z).BlockHeader.RewardsState.RewardsPool.Msgsize() + 5 + msgp.Uint64Size + 5 + msgp.Uint64Size + 5 + msgp.Uint64Size + 7 + (*z).BlockHeader.RewardsState.RewardsRecalculationRound.Msgsize() + 6 + (*z).BlockHeader.UpgradeState.CurrentProtocol.Msgsize() + 10 + (*z).BlockHeader.UpgradeState.NextProtocol.Msgsize() + 8 + msgp.Uint64Size + 11 + (*z).BlockHeader.UpgradeState.NextProtocolVoteBefore.Msgsize() + 11 + (*z).BlockHeader.UpgradeState.NextProtocolSwitchOn.Msgsize() + 12 + (*z).BlockHeader.UpgradeVote.UpgradePropose.Msgsize() + 13 + (*z).BlockHeader.UpgradeVote.UpgradeDelay.Msgsize() + 11 + msgp.BoolSize + 3 + msgp.Uint64Size + 4 + msgp.MapHeaderSize + s = 3 + 4 + (*z).BlockHeader.Round.Msgsize() + 5 + (*z).BlockHeader.Branch.Msgsize() + 8 + (*z).BlockHeader.Branch512.Msgsize() + 5 + (*z).BlockHeader.Seed.Msgsize() + 4 + (*z).BlockHeader.TxnCommitments.NativeSha512_256Commitment.Msgsize() + 7 + (*z).BlockHeader.TxnCommitments.Sha256Commitment.Msgsize() + 7 + (*z).BlockHeader.TxnCommitments.Sha512Commitment.Msgsize() + 3 + msgp.Int64Size + 4 + msgp.StringPrefixSize + len((*z).BlockHeader.GenesisID) + 3 + (*z).BlockHeader.GenesisHash.Msgsize() + 4 + (*z).BlockHeader.Proposer.Msgsize() + 3 + (*z).BlockHeader.FeesCollected.Msgsize() + 3 + (*z).BlockHeader.Bonus.Msgsize() + 3 + (*z).BlockHeader.ProposerPayout.Msgsize() + 5 + (*z).BlockHeader.RewardsState.FeeSink.Msgsize() + 4 + (*z).BlockHeader.RewardsState.RewardsPool.Msgsize() + 5 + msgp.Uint64Size + 5 + msgp.Uint64Size + 5 + msgp.Uint64Size + 7 + (*z).BlockHeader.RewardsState.RewardsRecalculationRound.Msgsize() + 6 + (*z).BlockHeader.UpgradeState.CurrentProtocol.Msgsize() + 10 + (*z).BlockHeader.UpgradeState.NextProtocol.Msgsize() + 8 + (*z).BlockHeader.UpgradeState.NextProtocolApprovals.Msgsize() + 11 + (*z).BlockHeader.UpgradeState.NextProtocolVoteBefore.Msgsize() + 11 + (*z).BlockHeader.UpgradeState.NextProtocolSwitchOn.Msgsize() + 12 + (*z).BlockHeader.UpgradeVote.UpgradePropose.Msgsize() + 13 + (*z).BlockHeader.UpgradeVote.UpgradeDelay.Msgsize() + 11 + msgp.BoolSize + 3 + msgp.Uint64Size + 4 + msgp.MapHeaderSize if (*z).BlockHeader.StateProofTracking != nil { for zb0001, zb0002 := range (*z).BlockHeader.StateProofTracking { _ = zb0001 @@ -1145,12 +1191,12 @@ func (z *Block) Msgsize() (s int) { // MsgIsZero returns whether this is a zero value func (z *Block) MsgIsZero() bool { - return ((*z).BlockHeader.Round.MsgIsZero()) && ((*z).BlockHeader.Branch.MsgIsZero()) && ((*z).BlockHeader.Seed.MsgIsZero()) && ((*z).BlockHeader.TxnCommitments.NativeSha512_256Commitment.MsgIsZero()) && ((*z).BlockHeader.TxnCommitments.Sha256Commitment.MsgIsZero()) && ((*z).BlockHeader.TimeStamp == 0) && ((*z).BlockHeader.GenesisID == "") && ((*z).BlockHeader.GenesisHash.MsgIsZero()) && ((*z).BlockHeader.Proposer.MsgIsZero()) && ((*z).BlockHeader.FeesCollected.MsgIsZero()) && ((*z).BlockHeader.Bonus.MsgIsZero()) && ((*z).BlockHeader.ProposerPayout.MsgIsZero()) && ((*z).BlockHeader.RewardsState.FeeSink.MsgIsZero()) && ((*z).BlockHeader.RewardsState.RewardsPool.MsgIsZero()) && ((*z).BlockHeader.RewardsState.RewardsLevel == 0) && ((*z).BlockHeader.RewardsState.RewardsRate == 0) && ((*z).BlockHeader.RewardsState.RewardsResidue == 0) && ((*z).BlockHeader.RewardsState.RewardsRecalculationRound.MsgIsZero()) && ((*z).BlockHeader.UpgradeState.CurrentProtocol.MsgIsZero()) && ((*z).BlockHeader.UpgradeState.NextProtocol.MsgIsZero()) && ((*z).BlockHeader.UpgradeState.NextProtocolApprovals == 0) && ((*z).BlockHeader.UpgradeState.NextProtocolVoteBefore.MsgIsZero()) && ((*z).BlockHeader.UpgradeState.NextProtocolSwitchOn.MsgIsZero()) && ((*z).BlockHeader.UpgradeVote.UpgradePropose.MsgIsZero()) && ((*z).BlockHeader.UpgradeVote.UpgradeDelay.MsgIsZero()) && ((*z).BlockHeader.UpgradeVote.UpgradeApprove == false) && ((*z).BlockHeader.TxnCounter == 0) && (len((*z).BlockHeader.StateProofTracking) == 0) && (len((*z).BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts) == 0) && (len((*z).BlockHeader.ParticipationUpdates.AbsentParticipationAccounts) == 0) && ((*z).Payset.MsgIsZero()) + return ((*z).BlockHeader.Round.MsgIsZero()) && ((*z).BlockHeader.Branch.MsgIsZero()) && ((*z).BlockHeader.Branch512.MsgIsZero()) && ((*z).BlockHeader.Seed.MsgIsZero()) && ((*z).BlockHeader.TxnCommitments.NativeSha512_256Commitment.MsgIsZero()) && ((*z).BlockHeader.TxnCommitments.Sha256Commitment.MsgIsZero()) && ((*z).BlockHeader.TxnCommitments.Sha512Commitment.MsgIsZero()) && ((*z).BlockHeader.TimeStamp == 0) && ((*z).BlockHeader.GenesisID == "") && ((*z).BlockHeader.GenesisHash.MsgIsZero()) && ((*z).BlockHeader.Proposer.MsgIsZero()) && ((*z).BlockHeader.FeesCollected.MsgIsZero()) && ((*z).BlockHeader.Bonus.MsgIsZero()) && ((*z).BlockHeader.ProposerPayout.MsgIsZero()) && ((*z).BlockHeader.RewardsState.FeeSink.MsgIsZero()) && ((*z).BlockHeader.RewardsState.RewardsPool.MsgIsZero()) && ((*z).BlockHeader.RewardsState.RewardsLevel == 0) && ((*z).BlockHeader.RewardsState.RewardsRate == 0) && ((*z).BlockHeader.RewardsState.RewardsResidue == 0) && ((*z).BlockHeader.RewardsState.RewardsRecalculationRound.MsgIsZero()) && ((*z).BlockHeader.UpgradeState.CurrentProtocol.MsgIsZero()) && ((*z).BlockHeader.UpgradeState.NextProtocol.MsgIsZero()) && ((*z).BlockHeader.UpgradeState.NextProtocolApprovals.MsgIsZero()) && ((*z).BlockHeader.UpgradeState.NextProtocolVoteBefore.MsgIsZero()) && ((*z).BlockHeader.UpgradeState.NextProtocolSwitchOn.MsgIsZero()) && ((*z).BlockHeader.UpgradeVote.UpgradePropose.MsgIsZero()) && ((*z).BlockHeader.UpgradeVote.UpgradeDelay.MsgIsZero()) && ((*z).BlockHeader.UpgradeVote.UpgradeApprove == false) && ((*z).BlockHeader.TxnCounter == 0) && (len((*z).BlockHeader.StateProofTracking) == 0) && (len((*z).BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts) == 0) && (len((*z).BlockHeader.ParticipationUpdates.AbsentParticipationAccounts) == 0) && ((*z).Payset.MsgIsZero()) } // MaxSize returns a maximum valid message size for this message type func BlockMaxSize() (s int) { - s = 3 + 4 + basics.RoundMaxSize() + 5 + BlockHashMaxSize() + 5 + committee.SeedMaxSize() + 4 + crypto.DigestMaxSize() + 7 + crypto.DigestMaxSize() + 3 + msgp.Int64Size + 4 + msgp.StringPrefixSize + config.MaxGenesisIDLen + 3 + crypto.DigestMaxSize() + 4 + basics.AddressMaxSize() + 3 + basics.MicroAlgosMaxSize() + 3 + basics.MicroAlgosMaxSize() + 3 + basics.MicroAlgosMaxSize() + 5 + basics.AddressMaxSize() + 4 + basics.AddressMaxSize() + 5 + msgp.Uint64Size + 5 + msgp.Uint64Size + 5 + msgp.Uint64Size + 7 + basics.RoundMaxSize() + 6 + protocol.ConsensusVersionMaxSize() + 10 + protocol.ConsensusVersionMaxSize() + 8 + msgp.Uint64Size + 11 + basics.RoundMaxSize() + 11 + basics.RoundMaxSize() + 12 + protocol.ConsensusVersionMaxSize() + 13 + basics.RoundMaxSize() + 11 + msgp.BoolSize + 3 + msgp.Uint64Size + 4 + s = 3 + 4 + basics.RoundMaxSize() + 5 + BlockHashMaxSize() + 8 + crypto.Sha512DigestMaxSize() + 5 + committee.SeedMaxSize() + 4 + crypto.DigestMaxSize() + 7 + crypto.DigestMaxSize() + 7 + crypto.Sha512DigestMaxSize() + 3 + msgp.Int64Size + 4 + msgp.StringPrefixSize + bounds.MaxGenesisIDLen + 3 + crypto.DigestMaxSize() + 4 + basics.AddressMaxSize() + 3 + basics.MicroAlgosMaxSize() + 3 + basics.MicroAlgosMaxSize() + 3 + basics.MicroAlgosMaxSize() + 5 + basics.AddressMaxSize() + 4 + basics.AddressMaxSize() + 5 + msgp.Uint64Size + 5 + msgp.Uint64Size + 5 + msgp.Uint64Size + 7 + basics.RoundMaxSize() + 6 + protocol.ConsensusVersionMaxSize() + 10 + protocol.ConsensusVersionMaxSize() + 8 + basics.RoundMaxSize() + 11 + basics.RoundMaxSize() + 11 + basics.RoundMaxSize() + 12 + protocol.ConsensusVersionMaxSize() + 13 + basics.RoundMaxSize() + 11 + msgp.BoolSize + 3 + msgp.Uint64Size + 4 s += msgp.MapHeaderSize // Adding size of map keys for z.BlockHeader.StateProofTracking s += protocol.NumStateProofTypes * (protocol.StateProofTypeMaxSize()) @@ -1158,13 +1204,13 @@ func BlockMaxSize() (s int) { s += protocol.NumStateProofTypes * (StateProofTrackingDataMaxSize()) s += 11 // Calculating size of slice: z.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts - s += msgp.ArrayHeaderSize + ((config.MaxProposedExpiredOnlineAccounts) * (basics.AddressMaxSize())) + s += msgp.ArrayHeaderSize + ((bounds.MaxProposedExpiredOnlineAccounts) * (basics.AddressMaxSize())) s += 11 // Calculating size of slice: z.BlockHeader.ParticipationUpdates.AbsentParticipationAccounts - s += msgp.ArrayHeaderSize + ((config.MaxMarkAbsent) * (basics.AddressMaxSize())) + s += msgp.ArrayHeaderSize + ((bounds.MaxMarkAbsent) * (basics.AddressMaxSize())) s += 5 // Using maxtotalbytes for: z.Payset - s += config.MaxTxnBytesPerBlock + s += bounds.MaxTxnBytesPerBlock return } @@ -1208,8 +1254,8 @@ func BlockHashMaxSize() int { func (z *BlockHeader) MarshalMsg(b []byte) (o []byte) { o = msgp.Require(b, z.Msgsize()) // omitempty: check for empty values - zb0005Len := uint32(30) - var zb0005Mask uint64 /* 35 bits */ + zb0005Len := uint32(32) + var zb0005Mask uint64 /* 37 bits */ if (*z).Bonus.MsgIsZero() { zb0005Len-- zb0005Mask |= 0x20 @@ -1250,7 +1296,7 @@ func (z *BlockHeader) MarshalMsg(b []byte) (o []byte) { zb0005Len-- zb0005Mask |= 0x4000 } - if (*z).UpgradeState.NextProtocolApprovals == 0 { + if (*z).UpgradeState.NextProtocolApprovals.MsgIsZero() { zb0005Len-- zb0005Mask |= 0x8000 } @@ -1270,66 +1316,74 @@ func (z *BlockHeader) MarshalMsg(b []byte) (o []byte) { zb0005Len-- zb0005Mask |= 0x80000 } - if (*z).UpgradeState.CurrentProtocol.MsgIsZero() { + if (*z).Branch512.MsgIsZero() { zb0005Len-- zb0005Mask |= 0x100000 } - if (*z).Proposer.MsgIsZero() { + if (*z).UpgradeState.CurrentProtocol.MsgIsZero() { zb0005Len-- zb0005Mask |= 0x200000 } - if (*z).RewardsState.RewardsRate == 0 { + if (*z).Proposer.MsgIsZero() { zb0005Len-- zb0005Mask |= 0x400000 } - if (*z).Round.MsgIsZero() { + if (*z).RewardsState.RewardsRate == 0 { zb0005Len-- zb0005Mask |= 0x800000 } - if (*z).RewardsState.RewardsRecalculationRound.MsgIsZero() { + if (*z).Round.MsgIsZero() { zb0005Len-- zb0005Mask |= 0x1000000 } - if (*z).RewardsState.RewardsPool.MsgIsZero() { + if (*z).RewardsState.RewardsRecalculationRound.MsgIsZero() { zb0005Len-- zb0005Mask |= 0x2000000 } - if (*z).Seed.MsgIsZero() { + if (*z).RewardsState.RewardsPool.MsgIsZero() { zb0005Len-- zb0005Mask |= 0x4000000 } - if len((*z).StateProofTracking) == 0 { + if (*z).Seed.MsgIsZero() { zb0005Len-- zb0005Mask |= 0x8000000 } - if (*z).TxnCounter == 0 { + if len((*z).StateProofTracking) == 0 { zb0005Len-- zb0005Mask |= 0x10000000 } - if (*z).TimeStamp == 0 { + if (*z).TxnCounter == 0 { zb0005Len-- zb0005Mask |= 0x20000000 } - if (*z).TxnCommitments.NativeSha512_256Commitment.MsgIsZero() { + if (*z).TimeStamp == 0 { zb0005Len-- zb0005Mask |= 0x40000000 } - if (*z).TxnCommitments.Sha256Commitment.MsgIsZero() { + if (*z).TxnCommitments.NativeSha512_256Commitment.MsgIsZero() { zb0005Len-- zb0005Mask |= 0x80000000 } - if (*z).UpgradeVote.UpgradeDelay.MsgIsZero() { + if (*z).TxnCommitments.Sha256Commitment.MsgIsZero() { zb0005Len-- zb0005Mask |= 0x100000000 } - if (*z).UpgradeVote.UpgradePropose.MsgIsZero() { + if (*z).TxnCommitments.Sha512Commitment.MsgIsZero() { zb0005Len-- zb0005Mask |= 0x200000000 } - if (*z).UpgradeVote.UpgradeApprove == false { + if (*z).UpgradeVote.UpgradeDelay.MsgIsZero() { zb0005Len-- zb0005Mask |= 0x400000000 } + if (*z).UpgradeVote.UpgradePropose.MsgIsZero() { + zb0005Len-- + zb0005Mask |= 0x800000000 + } + if (*z).UpgradeVote.UpgradeApprove == false { + zb0005Len-- + zb0005Mask |= 0x1000000000 + } // variable map header, size zb0005Len o = msgp.AppendMapHeader(o, zb0005Len) if zb0005Len != 0 { @@ -1386,7 +1440,7 @@ func (z *BlockHeader) MarshalMsg(b []byte) (o []byte) { if (zb0005Mask & 0x8000) == 0 { // if not empty // string "nextyes" o = append(o, 0xa7, 0x6e, 0x65, 0x78, 0x74, 0x79, 0x65, 0x73) - o = msgp.AppendUint64(o, (*z).UpgradeState.NextProtocolApprovals) + o = (*z).UpgradeState.NextProtocolApprovals.MarshalMsg(o) } if (zb0005Mask & 0x10000) == 0 { // if not empty // string "partupdabs" @@ -1423,41 +1477,46 @@ func (z *BlockHeader) MarshalMsg(b []byte) (o []byte) { o = (*z).Branch.MarshalMsg(o) } if (zb0005Mask & 0x100000) == 0 { // if not empty + // string "prev512" + o = append(o, 0xa7, 0x70, 0x72, 0x65, 0x76, 0x35, 0x31, 0x32) + o = (*z).Branch512.MarshalMsg(o) + } + if (zb0005Mask & 0x200000) == 0 { // if not empty // string "proto" o = append(o, 0xa5, 0x70, 0x72, 0x6f, 0x74, 0x6f) o = (*z).UpgradeState.CurrentProtocol.MarshalMsg(o) } - if (zb0005Mask & 0x200000) == 0 { // if not empty + if (zb0005Mask & 0x400000) == 0 { // if not empty // string "prp" o = append(o, 0xa3, 0x70, 0x72, 0x70) o = (*z).Proposer.MarshalMsg(o) } - if (zb0005Mask & 0x400000) == 0 { // if not empty + if (zb0005Mask & 0x800000) == 0 { // if not empty // string "rate" o = append(o, 0xa4, 0x72, 0x61, 0x74, 0x65) o = msgp.AppendUint64(o, (*z).RewardsState.RewardsRate) } - if (zb0005Mask & 0x800000) == 0 { // if not empty + if (zb0005Mask & 0x1000000) == 0 { // if not empty // string "rnd" o = append(o, 0xa3, 0x72, 0x6e, 0x64) o = (*z).Round.MarshalMsg(o) } - if (zb0005Mask & 0x1000000) == 0 { // if not empty + if (zb0005Mask & 0x2000000) == 0 { // if not empty // string "rwcalr" o = append(o, 0xa6, 0x72, 0x77, 0x63, 0x61, 0x6c, 0x72) o = (*z).RewardsState.RewardsRecalculationRound.MarshalMsg(o) } - if (zb0005Mask & 0x2000000) == 0 { // if not empty + if (zb0005Mask & 0x4000000) == 0 { // if not empty // string "rwd" o = append(o, 0xa3, 0x72, 0x77, 0x64) o = (*z).RewardsState.RewardsPool.MarshalMsg(o) } - if (zb0005Mask & 0x4000000) == 0 { // if not empty + if (zb0005Mask & 0x8000000) == 0 { // if not empty // string "seed" o = append(o, 0xa4, 0x73, 0x65, 0x65, 0x64) o = (*z).Seed.MarshalMsg(o) } - if (zb0005Mask & 0x8000000) == 0 { // if not empty + if (zb0005Mask & 0x10000000) == 0 { // if not empty // string "spt" o = append(o, 0xa3, 0x73, 0x70, 0x74) if (*z).StateProofTracking == nil { @@ -1477,37 +1536,42 @@ func (z *BlockHeader) MarshalMsg(b []byte) (o []byte) { o = zb0002.MarshalMsg(o) } } - if (zb0005Mask & 0x10000000) == 0 { // if not empty + if (zb0005Mask & 0x20000000) == 0 { // if not empty // string "tc" o = append(o, 0xa2, 0x74, 0x63) o = msgp.AppendUint64(o, (*z).TxnCounter) } - if (zb0005Mask & 0x20000000) == 0 { // if not empty + if (zb0005Mask & 0x40000000) == 0 { // if not empty // string "ts" o = append(o, 0xa2, 0x74, 0x73) o = msgp.AppendInt64(o, (*z).TimeStamp) } - if (zb0005Mask & 0x40000000) == 0 { // if not empty + if (zb0005Mask & 0x80000000) == 0 { // if not empty // string "txn" o = append(o, 0xa3, 0x74, 0x78, 0x6e) o = (*z).TxnCommitments.NativeSha512_256Commitment.MarshalMsg(o) } - if (zb0005Mask & 0x80000000) == 0 { // if not empty + if (zb0005Mask & 0x100000000) == 0 { // if not empty // string "txn256" o = append(o, 0xa6, 0x74, 0x78, 0x6e, 0x32, 0x35, 0x36) o = (*z).TxnCommitments.Sha256Commitment.MarshalMsg(o) } - if (zb0005Mask & 0x100000000) == 0 { // if not empty + if (zb0005Mask & 0x200000000) == 0 { // if not empty + // string "txn512" + o = append(o, 0xa6, 0x74, 0x78, 0x6e, 0x35, 0x31, 0x32) + o = (*z).TxnCommitments.Sha512Commitment.MarshalMsg(o) + } + if (zb0005Mask & 0x400000000) == 0 { // if not empty // string "upgradedelay" o = append(o, 0xac, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x64, 0x65, 0x6c, 0x61, 0x79) o = (*z).UpgradeVote.UpgradeDelay.MarshalMsg(o) } - if (zb0005Mask & 0x200000000) == 0 { // if not empty + if (zb0005Mask & 0x800000000) == 0 { // if not empty // string "upgradeprop" o = append(o, 0xab, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x70, 0x72, 0x6f, 0x70) o = (*z).UpgradeVote.UpgradePropose.MarshalMsg(o) } - if (zb0005Mask & 0x400000000) == 0 { // if not empty + if (zb0005Mask & 0x1000000000) == 0 { // if not empty // string "upgradeyes" o = append(o, 0xaa, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x79, 0x65, 0x73) o = msgp.AppendBool(o, (*z).UpgradeVote.UpgradeApprove) @@ -1555,6 +1619,14 @@ func (z *BlockHeader) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) return } } + if zb0005 > 0 { + zb0005-- + bts, err = (*z).Branch512.UnmarshalMsgWithState(bts, st) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "Branch512") + return + } + } if zb0005 > 0 { zb0005-- bts, err = (*z).Seed.UnmarshalMsgWithState(bts, st) @@ -1579,6 +1651,14 @@ func (z *BlockHeader) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) return } } + if zb0005 > 0 { + zb0005-- + bts, err = (*z).TxnCommitments.Sha512Commitment.UnmarshalMsgWithState(bts, st) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "Sha512Commitment") + return + } + } if zb0005 > 0 { zb0005-- (*z).TimeStamp, bts, err = msgp.ReadInt64Bytes(bts) @@ -1595,8 +1675,8 @@ func (z *BlockHeader) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) err = msgp.WrapError(err, "struct-from-array", "GenesisID") return } - if zb0007 > config.MaxGenesisIDLen { - err = msgp.ErrOverflow(uint64(zb0007), uint64(config.MaxGenesisIDLen)) + if zb0007 > bounds.MaxGenesisIDLen { + err = msgp.ErrOverflow(uint64(zb0007), uint64(bounds.MaxGenesisIDLen)) return } (*z).GenesisID, bts, err = msgp.ReadStringBytes(bts) @@ -1711,7 +1791,7 @@ func (z *BlockHeader) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) } if zb0005 > 0 { zb0005-- - (*z).UpgradeState.NextProtocolApprovals, bts, err = msgp.ReadUint64Bytes(bts) + bts, err = (*z).UpgradeState.NextProtocolApprovals.UnmarshalMsgWithState(bts, st) if err != nil { err = msgp.WrapError(err, "struct-from-array", "NextProtocolApprovals") return @@ -1810,8 +1890,8 @@ func (z *BlockHeader) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) err = msgp.WrapError(err, "struct-from-array", "ExpiredParticipationAccounts") return } - if zb0010 > config.MaxProposedExpiredOnlineAccounts { - err = msgp.ErrOverflow(uint64(zb0010), uint64(config.MaxProposedExpiredOnlineAccounts)) + if zb0010 > bounds.MaxProposedExpiredOnlineAccounts { + err = msgp.ErrOverflow(uint64(zb0010), uint64(bounds.MaxProposedExpiredOnlineAccounts)) err = msgp.WrapError(err, "struct-from-array", "ExpiredParticipationAccounts") return } @@ -1839,8 +1919,8 @@ func (z *BlockHeader) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) err = msgp.WrapError(err, "struct-from-array", "AbsentParticipationAccounts") return } - if zb0012 > config.MaxMarkAbsent { - err = msgp.ErrOverflow(uint64(zb0012), uint64(config.MaxMarkAbsent)) + if zb0012 > bounds.MaxMarkAbsent { + err = msgp.ErrOverflow(uint64(zb0012), uint64(bounds.MaxMarkAbsent)) err = msgp.WrapError(err, "struct-from-array", "AbsentParticipationAccounts") return } @@ -1894,6 +1974,12 @@ func (z *BlockHeader) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) err = msgp.WrapError(err, "Branch") return } + case "prev512": + bts, err = (*z).Branch512.UnmarshalMsgWithState(bts, st) + if err != nil { + err = msgp.WrapError(err, "Branch512") + return + } case "seed": bts, err = (*z).Seed.UnmarshalMsgWithState(bts, st) if err != nil { @@ -1912,6 +1998,12 @@ func (z *BlockHeader) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) err = msgp.WrapError(err, "Sha256Commitment") return } + case "txn512": + bts, err = (*z).TxnCommitments.Sha512Commitment.UnmarshalMsgWithState(bts, st) + if err != nil { + err = msgp.WrapError(err, "Sha512Commitment") + return + } case "ts": (*z).TimeStamp, bts, err = msgp.ReadInt64Bytes(bts) if err != nil { @@ -1925,8 +2017,8 @@ func (z *BlockHeader) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) err = msgp.WrapError(err, "GenesisID") return } - if zb0014 > config.MaxGenesisIDLen { - err = msgp.ErrOverflow(uint64(zb0014), uint64(config.MaxGenesisIDLen)) + if zb0014 > bounds.MaxGenesisIDLen { + err = msgp.ErrOverflow(uint64(zb0014), uint64(bounds.MaxGenesisIDLen)) return } (*z).GenesisID, bts, err = msgp.ReadStringBytes(bts) @@ -2013,7 +2105,7 @@ func (z *BlockHeader) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) return } case "nextyes": - (*z).UpgradeState.NextProtocolApprovals, bts, err = msgp.ReadUint64Bytes(bts) + bts, err = (*z).UpgradeState.NextProtocolApprovals.UnmarshalMsgWithState(bts, st) if err != nil { err = msgp.WrapError(err, "NextProtocolApprovals") return @@ -2096,8 +2188,8 @@ func (z *BlockHeader) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) err = msgp.WrapError(err, "ExpiredParticipationAccounts") return } - if zb0017 > config.MaxProposedExpiredOnlineAccounts { - err = msgp.ErrOverflow(uint64(zb0017), uint64(config.MaxProposedExpiredOnlineAccounts)) + if zb0017 > bounds.MaxProposedExpiredOnlineAccounts { + err = msgp.ErrOverflow(uint64(zb0017), uint64(bounds.MaxProposedExpiredOnlineAccounts)) err = msgp.WrapError(err, "ExpiredParticipationAccounts") return } @@ -2123,8 +2215,8 @@ func (z *BlockHeader) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) err = msgp.WrapError(err, "AbsentParticipationAccounts") return } - if zb0019 > config.MaxMarkAbsent { - err = msgp.ErrOverflow(uint64(zb0019), uint64(config.MaxMarkAbsent)) + if zb0019 > bounds.MaxMarkAbsent { + err = msgp.ErrOverflow(uint64(zb0019), uint64(bounds.MaxMarkAbsent)) err = msgp.WrapError(err, "AbsentParticipationAccounts") return } @@ -2165,7 +2257,7 @@ func (_ *BlockHeader) CanUnmarshalMsg(z interface{}) bool { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *BlockHeader) Msgsize() (s int) { - s = 3 + 4 + (*z).Round.Msgsize() + 5 + (*z).Branch.Msgsize() + 5 + (*z).Seed.Msgsize() + 4 + (*z).TxnCommitments.NativeSha512_256Commitment.Msgsize() + 7 + (*z).TxnCommitments.Sha256Commitment.Msgsize() + 3 + msgp.Int64Size + 4 + msgp.StringPrefixSize + len((*z).GenesisID) + 3 + (*z).GenesisHash.Msgsize() + 4 + (*z).Proposer.Msgsize() + 3 + (*z).FeesCollected.Msgsize() + 3 + (*z).Bonus.Msgsize() + 3 + (*z).ProposerPayout.Msgsize() + 5 + (*z).RewardsState.FeeSink.Msgsize() + 4 + (*z).RewardsState.RewardsPool.Msgsize() + 5 + msgp.Uint64Size + 5 + msgp.Uint64Size + 5 + msgp.Uint64Size + 7 + (*z).RewardsState.RewardsRecalculationRound.Msgsize() + 6 + (*z).UpgradeState.CurrentProtocol.Msgsize() + 10 + (*z).UpgradeState.NextProtocol.Msgsize() + 8 + msgp.Uint64Size + 11 + (*z).UpgradeState.NextProtocolVoteBefore.Msgsize() + 11 + (*z).UpgradeState.NextProtocolSwitchOn.Msgsize() + 12 + (*z).UpgradeVote.UpgradePropose.Msgsize() + 13 + (*z).UpgradeVote.UpgradeDelay.Msgsize() + 11 + msgp.BoolSize + 3 + msgp.Uint64Size + 4 + msgp.MapHeaderSize + s = 3 + 4 + (*z).Round.Msgsize() + 5 + (*z).Branch.Msgsize() + 8 + (*z).Branch512.Msgsize() + 5 + (*z).Seed.Msgsize() + 4 + (*z).TxnCommitments.NativeSha512_256Commitment.Msgsize() + 7 + (*z).TxnCommitments.Sha256Commitment.Msgsize() + 7 + (*z).TxnCommitments.Sha512Commitment.Msgsize() + 3 + msgp.Int64Size + 4 + msgp.StringPrefixSize + len((*z).GenesisID) + 3 + (*z).GenesisHash.Msgsize() + 4 + (*z).Proposer.Msgsize() + 3 + (*z).FeesCollected.Msgsize() + 3 + (*z).Bonus.Msgsize() + 3 + (*z).ProposerPayout.Msgsize() + 5 + (*z).RewardsState.FeeSink.Msgsize() + 4 + (*z).RewardsState.RewardsPool.Msgsize() + 5 + msgp.Uint64Size + 5 + msgp.Uint64Size + 5 + msgp.Uint64Size + 7 + (*z).RewardsState.RewardsRecalculationRound.Msgsize() + 6 + (*z).UpgradeState.CurrentProtocol.Msgsize() + 10 + (*z).UpgradeState.NextProtocol.Msgsize() + 8 + (*z).UpgradeState.NextProtocolApprovals.Msgsize() + 11 + (*z).UpgradeState.NextProtocolVoteBefore.Msgsize() + 11 + (*z).UpgradeState.NextProtocolSwitchOn.Msgsize() + 12 + (*z).UpgradeVote.UpgradePropose.Msgsize() + 13 + (*z).UpgradeVote.UpgradeDelay.Msgsize() + 11 + msgp.BoolSize + 3 + msgp.Uint64Size + 4 + msgp.MapHeaderSize if (*z).StateProofTracking != nil { for zb0001, zb0002 := range (*z).StateProofTracking { _ = zb0001 @@ -2186,12 +2278,12 @@ func (z *BlockHeader) Msgsize() (s int) { // MsgIsZero returns whether this is a zero value func (z *BlockHeader) MsgIsZero() bool { - return ((*z).Round.MsgIsZero()) && ((*z).Branch.MsgIsZero()) && ((*z).Seed.MsgIsZero()) && ((*z).TxnCommitments.NativeSha512_256Commitment.MsgIsZero()) && ((*z).TxnCommitments.Sha256Commitment.MsgIsZero()) && ((*z).TimeStamp == 0) && ((*z).GenesisID == "") && ((*z).GenesisHash.MsgIsZero()) && ((*z).Proposer.MsgIsZero()) && ((*z).FeesCollected.MsgIsZero()) && ((*z).Bonus.MsgIsZero()) && ((*z).ProposerPayout.MsgIsZero()) && ((*z).RewardsState.FeeSink.MsgIsZero()) && ((*z).RewardsState.RewardsPool.MsgIsZero()) && ((*z).RewardsState.RewardsLevel == 0) && ((*z).RewardsState.RewardsRate == 0) && ((*z).RewardsState.RewardsResidue == 0) && ((*z).RewardsState.RewardsRecalculationRound.MsgIsZero()) && ((*z).UpgradeState.CurrentProtocol.MsgIsZero()) && ((*z).UpgradeState.NextProtocol.MsgIsZero()) && ((*z).UpgradeState.NextProtocolApprovals == 0) && ((*z).UpgradeState.NextProtocolVoteBefore.MsgIsZero()) && ((*z).UpgradeState.NextProtocolSwitchOn.MsgIsZero()) && ((*z).UpgradeVote.UpgradePropose.MsgIsZero()) && ((*z).UpgradeVote.UpgradeDelay.MsgIsZero()) && ((*z).UpgradeVote.UpgradeApprove == false) && ((*z).TxnCounter == 0) && (len((*z).StateProofTracking) == 0) && (len((*z).ParticipationUpdates.ExpiredParticipationAccounts) == 0) && (len((*z).ParticipationUpdates.AbsentParticipationAccounts) == 0) + return ((*z).Round.MsgIsZero()) && ((*z).Branch.MsgIsZero()) && ((*z).Branch512.MsgIsZero()) && ((*z).Seed.MsgIsZero()) && ((*z).TxnCommitments.NativeSha512_256Commitment.MsgIsZero()) && ((*z).TxnCommitments.Sha256Commitment.MsgIsZero()) && ((*z).TxnCommitments.Sha512Commitment.MsgIsZero()) && ((*z).TimeStamp == 0) && ((*z).GenesisID == "") && ((*z).GenesisHash.MsgIsZero()) && ((*z).Proposer.MsgIsZero()) && ((*z).FeesCollected.MsgIsZero()) && ((*z).Bonus.MsgIsZero()) && ((*z).ProposerPayout.MsgIsZero()) && ((*z).RewardsState.FeeSink.MsgIsZero()) && ((*z).RewardsState.RewardsPool.MsgIsZero()) && ((*z).RewardsState.RewardsLevel == 0) && ((*z).RewardsState.RewardsRate == 0) && ((*z).RewardsState.RewardsResidue == 0) && ((*z).RewardsState.RewardsRecalculationRound.MsgIsZero()) && ((*z).UpgradeState.CurrentProtocol.MsgIsZero()) && ((*z).UpgradeState.NextProtocol.MsgIsZero()) && ((*z).UpgradeState.NextProtocolApprovals.MsgIsZero()) && ((*z).UpgradeState.NextProtocolVoteBefore.MsgIsZero()) && ((*z).UpgradeState.NextProtocolSwitchOn.MsgIsZero()) && ((*z).UpgradeVote.UpgradePropose.MsgIsZero()) && ((*z).UpgradeVote.UpgradeDelay.MsgIsZero()) && ((*z).UpgradeVote.UpgradeApprove == false) && ((*z).TxnCounter == 0) && (len((*z).StateProofTracking) == 0) && (len((*z).ParticipationUpdates.ExpiredParticipationAccounts) == 0) && (len((*z).ParticipationUpdates.AbsentParticipationAccounts) == 0) } // MaxSize returns a maximum valid message size for this message type func BlockHeaderMaxSize() (s int) { - s = 3 + 4 + basics.RoundMaxSize() + 5 + BlockHashMaxSize() + 5 + committee.SeedMaxSize() + 4 + crypto.DigestMaxSize() + 7 + crypto.DigestMaxSize() + 3 + msgp.Int64Size + 4 + msgp.StringPrefixSize + config.MaxGenesisIDLen + 3 + crypto.DigestMaxSize() + 4 + basics.AddressMaxSize() + 3 + basics.MicroAlgosMaxSize() + 3 + basics.MicroAlgosMaxSize() + 3 + basics.MicroAlgosMaxSize() + 5 + basics.AddressMaxSize() + 4 + basics.AddressMaxSize() + 5 + msgp.Uint64Size + 5 + msgp.Uint64Size + 5 + msgp.Uint64Size + 7 + basics.RoundMaxSize() + 6 + protocol.ConsensusVersionMaxSize() + 10 + protocol.ConsensusVersionMaxSize() + 8 + msgp.Uint64Size + 11 + basics.RoundMaxSize() + 11 + basics.RoundMaxSize() + 12 + protocol.ConsensusVersionMaxSize() + 13 + basics.RoundMaxSize() + 11 + msgp.BoolSize + 3 + msgp.Uint64Size + 4 + s = 3 + 4 + basics.RoundMaxSize() + 5 + BlockHashMaxSize() + 8 + crypto.Sha512DigestMaxSize() + 5 + committee.SeedMaxSize() + 4 + crypto.DigestMaxSize() + 7 + crypto.DigestMaxSize() + 7 + crypto.Sha512DigestMaxSize() + 3 + msgp.Int64Size + 4 + msgp.StringPrefixSize + bounds.MaxGenesisIDLen + 3 + crypto.DigestMaxSize() + 4 + basics.AddressMaxSize() + 3 + basics.MicroAlgosMaxSize() + 3 + basics.MicroAlgosMaxSize() + 3 + basics.MicroAlgosMaxSize() + 5 + basics.AddressMaxSize() + 4 + basics.AddressMaxSize() + 5 + msgp.Uint64Size + 5 + msgp.Uint64Size + 5 + msgp.Uint64Size + 7 + basics.RoundMaxSize() + 6 + protocol.ConsensusVersionMaxSize() + 10 + protocol.ConsensusVersionMaxSize() + 8 + basics.RoundMaxSize() + 11 + basics.RoundMaxSize() + 11 + basics.RoundMaxSize() + 12 + protocol.ConsensusVersionMaxSize() + 13 + basics.RoundMaxSize() + 11 + msgp.BoolSize + 3 + msgp.Uint64Size + 4 s += msgp.MapHeaderSize // Adding size of map keys for z.StateProofTracking s += protocol.NumStateProofTypes * (protocol.StateProofTypeMaxSize()) @@ -2199,10 +2291,10 @@ func BlockHeaderMaxSize() (s int) { s += protocol.NumStateProofTypes * (StateProofTrackingDataMaxSize()) s += 11 // Calculating size of slice: z.ParticipationUpdates.ExpiredParticipationAccounts - s += msgp.ArrayHeaderSize + ((config.MaxProposedExpiredOnlineAccounts) * (basics.AddressMaxSize())) + s += msgp.ArrayHeaderSize + ((bounds.MaxProposedExpiredOnlineAccounts) * (basics.AddressMaxSize())) s += 11 // Calculating size of slice: z.ParticipationUpdates.AbsentParticipationAccounts - s += msgp.ArrayHeaderSize + ((config.MaxMarkAbsent) * (basics.AddressMaxSize())) + s += msgp.ArrayHeaderSize + ((bounds.MaxMarkAbsent) * (basics.AddressMaxSize())) return } @@ -3290,8 +3382,8 @@ func (z *ParticipationUpdates) UnmarshalMsgWithState(bts []byte, st msgp.Unmarsh err = msgp.WrapError(err, "struct-from-array", "ExpiredParticipationAccounts") return } - if zb0005 > config.MaxProposedExpiredOnlineAccounts { - err = msgp.ErrOverflow(uint64(zb0005), uint64(config.MaxProposedExpiredOnlineAccounts)) + if zb0005 > bounds.MaxProposedExpiredOnlineAccounts { + err = msgp.ErrOverflow(uint64(zb0005), uint64(bounds.MaxProposedExpiredOnlineAccounts)) err = msgp.WrapError(err, "struct-from-array", "ExpiredParticipationAccounts") return } @@ -3319,8 +3411,8 @@ func (z *ParticipationUpdates) UnmarshalMsgWithState(bts []byte, st msgp.Unmarsh err = msgp.WrapError(err, "struct-from-array", "AbsentParticipationAccounts") return } - if zb0007 > config.MaxMarkAbsent { - err = msgp.ErrOverflow(uint64(zb0007), uint64(config.MaxMarkAbsent)) + if zb0007 > bounds.MaxMarkAbsent { + err = msgp.ErrOverflow(uint64(zb0007), uint64(bounds.MaxMarkAbsent)) err = msgp.WrapError(err, "struct-from-array", "AbsentParticipationAccounts") return } @@ -3370,8 +3462,8 @@ func (z *ParticipationUpdates) UnmarshalMsgWithState(bts []byte, st msgp.Unmarsh err = msgp.WrapError(err, "ExpiredParticipationAccounts") return } - if zb0009 > config.MaxProposedExpiredOnlineAccounts { - err = msgp.ErrOverflow(uint64(zb0009), uint64(config.MaxProposedExpiredOnlineAccounts)) + if zb0009 > bounds.MaxProposedExpiredOnlineAccounts { + err = msgp.ErrOverflow(uint64(zb0009), uint64(bounds.MaxProposedExpiredOnlineAccounts)) err = msgp.WrapError(err, "ExpiredParticipationAccounts") return } @@ -3397,8 +3489,8 @@ func (z *ParticipationUpdates) UnmarshalMsgWithState(bts []byte, st msgp.Unmarsh err = msgp.WrapError(err, "AbsentParticipationAccounts") return } - if zb0011 > config.MaxMarkAbsent { - err = msgp.ErrOverflow(uint64(zb0011), uint64(config.MaxMarkAbsent)) + if zb0011 > bounds.MaxMarkAbsent { + err = msgp.ErrOverflow(uint64(zb0011), uint64(bounds.MaxMarkAbsent)) err = msgp.WrapError(err, "AbsentParticipationAccounts") return } @@ -3459,10 +3551,10 @@ func (z *ParticipationUpdates) MsgIsZero() bool { func ParticipationUpdatesMaxSize() (s int) { s = 1 + 11 // Calculating size of slice: z.ExpiredParticipationAccounts - s += msgp.ArrayHeaderSize + ((config.MaxProposedExpiredOnlineAccounts) * (basics.AddressMaxSize())) + s += msgp.ArrayHeaderSize + ((bounds.MaxProposedExpiredOnlineAccounts) * (basics.AddressMaxSize())) s += 11 // Calculating size of slice: z.AbsentParticipationAccounts - s += msgp.ArrayHeaderSize + ((config.MaxMarkAbsent) * (basics.AddressMaxSize())) + s += msgp.ArrayHeaderSize + ((bounds.MaxMarkAbsent) * (basics.AddressMaxSize())) return } @@ -3871,8 +3963,8 @@ func StateProofTrackingDataMaxSize() (s int) { func (z *TxnCommitments) MarshalMsg(b []byte) (o []byte) { o = msgp.Require(b, z.Msgsize()) // omitempty: check for empty values - zb0001Len := uint32(2) - var zb0001Mask uint8 /* 3 bits */ + zb0001Len := uint32(3) + var zb0001Mask uint8 /* 4 bits */ if (*z).NativeSha512_256Commitment.MsgIsZero() { zb0001Len-- zb0001Mask |= 0x2 @@ -3881,6 +3973,10 @@ func (z *TxnCommitments) MarshalMsg(b []byte) (o []byte) { zb0001Len-- zb0001Mask |= 0x4 } + if (*z).Sha512Commitment.MsgIsZero() { + zb0001Len-- + zb0001Mask |= 0x8 + } // variable map header, size zb0001Len o = append(o, 0x80|uint8(zb0001Len)) if zb0001Len != 0 { @@ -3894,6 +3990,11 @@ func (z *TxnCommitments) MarshalMsg(b []byte) (o []byte) { o = append(o, 0xa6, 0x74, 0x78, 0x6e, 0x32, 0x35, 0x36) o = (*z).Sha256Commitment.MarshalMsg(o) } + if (zb0001Mask & 0x8) == 0 { // if not empty + // string "txn512" + o = append(o, 0xa6, 0x74, 0x78, 0x6e, 0x35, 0x31, 0x32) + o = (*z).Sha512Commitment.MarshalMsg(o) + } } return } @@ -3937,6 +4038,14 @@ func (z *TxnCommitments) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalStat return } } + if zb0001 > 0 { + zb0001-- + bts, err = (*z).Sha512Commitment.UnmarshalMsgWithState(bts, st) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "Sha512Commitment") + return + } + } if zb0001 > 0 { err = msgp.ErrTooManyArrayFields(zb0001) if err != nil { @@ -3972,6 +4081,12 @@ func (z *TxnCommitments) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalStat err = msgp.WrapError(err, "Sha256Commitment") return } + case "txn512": + bts, err = (*z).Sha512Commitment.UnmarshalMsgWithState(bts, st) + if err != nil { + err = msgp.WrapError(err, "Sha512Commitment") + return + } default: err = msgp.ErrNoField(string(field)) if err != nil { @@ -3995,18 +4110,18 @@ func (_ *TxnCommitments) CanUnmarshalMsg(z interface{}) bool { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *TxnCommitments) Msgsize() (s int) { - s = 1 + 4 + (*z).NativeSha512_256Commitment.Msgsize() + 7 + (*z).Sha256Commitment.Msgsize() + s = 1 + 4 + (*z).NativeSha512_256Commitment.Msgsize() + 7 + (*z).Sha256Commitment.Msgsize() + 7 + (*z).Sha512Commitment.Msgsize() return } // MsgIsZero returns whether this is a zero value func (z *TxnCommitments) MsgIsZero() bool { - return ((*z).NativeSha512_256Commitment.MsgIsZero()) && ((*z).Sha256Commitment.MsgIsZero()) + return ((*z).NativeSha512_256Commitment.MsgIsZero()) && ((*z).Sha256Commitment.MsgIsZero()) && ((*z).Sha512Commitment.MsgIsZero()) } // MaxSize returns a maximum valid message size for this message type func TxnCommitmentsMaxSize() (s int) { - s = 1 + 4 + crypto.DigestMaxSize() + 7 + crypto.DigestMaxSize() + s = 1 + 4 + crypto.DigestMaxSize() + 7 + crypto.DigestMaxSize() + 7 + crypto.Sha512DigestMaxSize() return } diff --git a/data/bookkeeping/txn_merkle.go b/data/bookkeeping/txn_merkle.go index cb00037b78..66e3d60213 100644 --- a/data/bookkeeping/txn_merkle.go +++ b/data/bookkeeping/txn_merkle.go @@ -26,7 +26,7 @@ import ( ) // TxnMerkleTree returns a cryptographic commitment to the transactions in the -// block, along with their ApplyData, as a Merkle tree. This allows the +// block, along with their ApplyData, as a Merkle tree using SHA-512/256. This allows the // caller to either extract the root hash (for inclusion in the block // header), or to generate proofs of membership for transactions that are // in this block. @@ -35,7 +35,7 @@ func (block Block) TxnMerkleTree() (*merklearray.Tree, error) { } // TxnMerkleTreeSHA256 returns a cryptographic commitment to the transactions in the -// block, along with their ApplyData, as a Merkle tree vector commitment, using SHA256. This allows the +// block, along with their ApplyData, as a Merkle tree vector commitment, using SHA-256. This allows the // caller to either extract the root hash (for inclusion in the block // header), or to generate proofs of membership for transactions that are // in this block. @@ -43,6 +43,15 @@ func (block Block) TxnMerkleTreeSHA256() (*merklearray.Tree, error) { return merklearray.BuildVectorCommitmentTree(&txnMerkleArray{block: block, hashType: crypto.Sha256}, crypto.HashFactory{HashType: crypto.Sha256}) } +// TxnMerkleTreeSHA512 returns a cryptographic commitment to the transactions in the +// block, along with their ApplyData, as a Merkle tree vector commitment, using SHA-512. This allows the +// caller to either extract the root hash (for inclusion in the block +// header), or to generate proofs of membership for transactions that are +// in this block. +func (block Block) TxnMerkleTreeSHA512() (*merklearray.Tree, error) { + return merklearray.BuildVectorCommitmentTree(&txnMerkleArray{block: block, hashType: crypto.Sha512}, crypto.HashFactory{HashType: crypto.Sha512}) +} + // txnMerkleArray is a representation of the transactions in this block, // along with their ApplyData, as an array for the merklearray package. type txnMerkleArray struct { diff --git a/data/ledger_test.go b/data/ledger_test.go index 50efbd0339..00d633e232 100644 --- a/data/ledger_test.go +++ b/data/ledger_test.go @@ -722,6 +722,10 @@ func getEmptyBlock(afterRound basics.Round, l *ledger.Ledger, genesisID string, blk.BlockHeader.GenesisHash = crypto.Hash([]byte(genesisID)) } + if proto.EnableSha512BlockHash { + blk.BlockHeader.Branch512 = lastBlock.Hash512() + } + blk.RewardsPool = testPoolAddr blk.FeeSink = testSinkAddr blk.CurrentProtocol = lastBlock.CurrentProtocol diff --git a/data/stateproofmsg/message.go b/data/stateproofmsg/message.go index 88f3493d69..765786423e 100644 --- a/data/stateproofmsg/message.go +++ b/data/stateproofmsg/message.go @@ -19,6 +19,7 @@ package stateproofmsg import ( "github.com/algorand/go-algorand/crypto" sp "github.com/algorand/go-algorand/crypto/stateproof" + "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/protocol" ) @@ -29,11 +30,11 @@ import ( type Message struct { _struct struct{} `codec:",omitempty,omitemptyarray"` // BlockHeadersCommitment contains a commitment on all light block headers within a state proof interval. - BlockHeadersCommitment []byte `codec:"b,allocbound=crypto.Sha256Size"` - VotersCommitment []byte `codec:"v,allocbound=crypto.SumhashDigestSize"` - LnProvenWeight uint64 `codec:"P"` - FirstAttestedRound uint64 `codec:"f"` - LastAttestedRound uint64 `codec:"l"` + BlockHeadersCommitment []byte `codec:"b,allocbound=crypto.Sha256Size"` + VotersCommitment []byte `codec:"v,allocbound=crypto.SumhashDigestSize"` + LnProvenWeight uint64 `codec:"P"` + FirstAttestedRound basics.Round `codec:"f"` + LastAttestedRound basics.Round `codec:"l"` } // ToBeHashed returns the bytes of the message. diff --git a/data/stateproofmsg/msgp_gen.go b/data/stateproofmsg/msgp_gen.go index c6d6396a2b..ed245dad0d 100644 --- a/data/stateproofmsg/msgp_gen.go +++ b/data/stateproofmsg/msgp_gen.go @@ -6,6 +6,7 @@ import ( "github.com/algorand/msgp/msgp" "github.com/algorand/go-algorand/crypto" + "github.com/algorand/go-algorand/data/basics" ) // The following msgp objects are implemented in this file: @@ -34,11 +35,11 @@ func (z *Message) MarshalMsg(b []byte) (o []byte) { zb0001Len-- zb0001Mask |= 0x4 } - if (*z).FirstAttestedRound == 0 { + if (*z).FirstAttestedRound.MsgIsZero() { zb0001Len-- zb0001Mask |= 0x8 } - if (*z).LastAttestedRound == 0 { + if (*z).LastAttestedRound.MsgIsZero() { zb0001Len-- zb0001Mask |= 0x10 } @@ -62,12 +63,12 @@ func (z *Message) MarshalMsg(b []byte) (o []byte) { if (zb0001Mask & 0x8) == 0 { // if not empty // string "f" o = append(o, 0xa1, 0x66) - o = msgp.AppendUint64(o, (*z).FirstAttestedRound) + o = (*z).FirstAttestedRound.MarshalMsg(o) } if (zb0001Mask & 0x10) == 0 { // if not empty // string "l" o = append(o, 0xa1, 0x6c) - o = msgp.AppendUint64(o, (*z).LastAttestedRound) + o = (*z).LastAttestedRound.MarshalMsg(o) } if (zb0001Mask & 0x20) == 0 { // if not empty // string "v" @@ -147,7 +148,7 @@ func (z *Message) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) (o [ } if zb0001 > 0 { zb0001-- - (*z).FirstAttestedRound, bts, err = msgp.ReadUint64Bytes(bts) + bts, err = (*z).FirstAttestedRound.UnmarshalMsgWithState(bts, st) if err != nil { err = msgp.WrapError(err, "struct-from-array", "FirstAttestedRound") return @@ -155,7 +156,7 @@ func (z *Message) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) (o [ } if zb0001 > 0 { zb0001-- - (*z).LastAttestedRound, bts, err = msgp.ReadUint64Bytes(bts) + bts, err = (*z).LastAttestedRound.UnmarshalMsgWithState(bts, st) if err != nil { err = msgp.WrapError(err, "struct-from-array", "LastAttestedRound") return @@ -223,13 +224,13 @@ func (z *Message) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) (o [ return } case "f": - (*z).FirstAttestedRound, bts, err = msgp.ReadUint64Bytes(bts) + bts, err = (*z).FirstAttestedRound.UnmarshalMsgWithState(bts, st) if err != nil { err = msgp.WrapError(err, "FirstAttestedRound") return } case "l": - (*z).LastAttestedRound, bts, err = msgp.ReadUint64Bytes(bts) + bts, err = (*z).LastAttestedRound.UnmarshalMsgWithState(bts, st) if err != nil { err = msgp.WrapError(err, "LastAttestedRound") return @@ -257,17 +258,17 @@ func (_ *Message) CanUnmarshalMsg(z interface{}) bool { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *Message) Msgsize() (s int) { - s = 1 + 2 + msgp.BytesPrefixSize + len((*z).BlockHeadersCommitment) + 2 + msgp.BytesPrefixSize + len((*z).VotersCommitment) + 2 + msgp.Uint64Size + 2 + msgp.Uint64Size + 2 + msgp.Uint64Size + s = 1 + 2 + msgp.BytesPrefixSize + len((*z).BlockHeadersCommitment) + 2 + msgp.BytesPrefixSize + len((*z).VotersCommitment) + 2 + msgp.Uint64Size + 2 + (*z).FirstAttestedRound.Msgsize() + 2 + (*z).LastAttestedRound.Msgsize() return } // MsgIsZero returns whether this is a zero value func (z *Message) MsgIsZero() bool { - return (len((*z).BlockHeadersCommitment) == 0) && (len((*z).VotersCommitment) == 0) && ((*z).LnProvenWeight == 0) && ((*z).FirstAttestedRound == 0) && ((*z).LastAttestedRound == 0) + return (len((*z).BlockHeadersCommitment) == 0) && (len((*z).VotersCommitment) == 0) && ((*z).LnProvenWeight == 0) && ((*z).FirstAttestedRound.MsgIsZero()) && ((*z).LastAttestedRound.MsgIsZero()) } // MaxSize returns a maximum valid message size for this message type func MessageMaxSize() (s int) { - s = 1 + 2 + msgp.BytesPrefixSize + crypto.Sha256Size + 2 + msgp.BytesPrefixSize + crypto.SumhashDigestSize + 2 + msgp.Uint64Size + 2 + msgp.Uint64Size + 2 + msgp.Uint64Size + s = 1 + 2 + msgp.BytesPrefixSize + crypto.Sha256Size + 2 + msgp.BytesPrefixSize + crypto.SumhashDigestSize + 2 + msgp.Uint64Size + 2 + basics.RoundMaxSize() + 2 + basics.RoundMaxSize() return } diff --git a/data/transactions/application.go b/data/transactions/application.go index 21793b1976..db43130aad 100644 --- a/data/transactions/application.go +++ b/data/transactions/application.go @@ -109,7 +109,7 @@ type ApplicationCallTxnFields struct { // ApplicationArgs are arguments accessible to the executing // ApprovalProgram or ClearStateProgram. - ApplicationArgs [][]byte `codec:"apaa,allocbound=encodedMaxApplicationArgs,maxtotalbytes=config.MaxAppTotalArgLen"` + ApplicationArgs [][]byte `codec:"apaa,allocbound=encodedMaxApplicationArgs,maxtotalbytes=bounds.MaxAppTotalArgLen"` // Accounts are accounts whose balance records are accessible // by the executing ApprovalProgram or ClearStateProgram. To @@ -152,14 +152,14 @@ type ApplicationCallTxnFields struct { // except for those where OnCompletion is equal to ClearStateOC. If // this program fails, the transaction is rejected. This program may // read and write local and global state for this application. - ApprovalProgram []byte `codec:"apap,allocbound=config.MaxAvailableAppProgramLen"` + ApprovalProgram []byte `codec:"apap,allocbound=bounds.MaxAvailableAppProgramLen"` // ClearStateProgram is the stateful TEAL bytecode that executes on // ApplicationCall transactions associated with this application when // OnCompletion is equal to ClearStateOC. This program will not cause // the transaction to be rejected, even if it fails. This program may // read and write local and global state for this application. - ClearStateProgram []byte `codec:"apsu,allocbound=config.MaxAvailableAppProgramLen"` + ClearStateProgram []byte `codec:"apsu,allocbound=bounds.MaxAvailableAppProgramLen"` // ExtraProgramPages specifies the additional app program len requested in pages. // A page is MaxAppProgramLen bytes. This field enables execution of app programs @@ -179,7 +179,7 @@ type BoxRef struct { _struct struct{} `codec:",omitempty,omitemptyarray"` Index uint64 `codec:"i"` - Name []byte `codec:"n,allocbound=config.MaxBytesKeyValueLen"` + Name []byte `codec:"n,allocbound=bounds.MaxBytesKeyValueLen"` } // Empty indicates whether or not all the fields in the diff --git a/data/transactions/logic/eval.go b/data/transactions/logic/eval.go index a8448a3194..e058a3687c 100644 --- a/data/transactions/logic/eval.go +++ b/data/transactions/logic/eval.go @@ -33,6 +33,7 @@ import ( "strings" "github.com/algorand/go-algorand/config" + "github.com/algorand/go-algorand/config/bounds" "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/data/bookkeeping" @@ -52,7 +53,7 @@ const maxStringSize = 4096 const maxByteMathSize = 64 // maxLogSize is the limit of total log size from n log calls in a program -const maxLogSize = config.MaxEvalDeltaTotalLogSize +const maxLogSize = bounds.MaxEvalDeltaTotalLogSize // maxLogCalls is the limit of total log calls during a program execution const maxLogCalls = 32 @@ -66,7 +67,7 @@ var maxAppCallDepth = 8 // maxStackDepth should not change unless controlled by an AVM version change const maxStackDepth = 1000 -// maxTxGroupSize is the same as config.MaxTxGroupSize, but is a constant so +// maxTxGroupSize is the same as bounds.MaxTxGroupSize, but is a constant so // that we can declare an array of this size. A unit test confirms that they // match. const maxTxGroupSize = 16 @@ -294,10 +295,10 @@ type UnnamedResourcePolicy interface { // EvalConstants contains constant parameters that are used by opcodes during evaluation (including both real-execution and simulation). type EvalConstants struct { // MaxLogSize is the limit of total log size from n log calls in a program - MaxLogSize uint64 + MaxLogSize int // MaxLogCalls is the limit of total log calls during a program execution - MaxLogCalls uint64 + MaxLogCalls int // UnnamedResources, if provided, allows resources to be used without being named according to // this policy. @@ -307,8 +308,8 @@ type EvalConstants struct { // RuntimeEvalConstants gives a set of const params used in normal runtime of opcodes func RuntimeEvalConstants() EvalConstants { return EvalConstants{ - MaxLogSize: uint64(maxLogSize), - MaxLogCalls: uint64(maxLogCalls), + MaxLogSize: maxLogSize, + MaxLogCalls: maxLogCalls, } } @@ -627,15 +628,9 @@ func (ep *EvalParams) RecordAD(gi int, ad transactions.ApplyData) { } ep.available.createdAsas[aid] = struct{}{} } - if aid := ad.ApplicationID; aid != 0 { - if ep.available == nil { - ep.available = ep.computeAvailability() - } - if ep.available.createdApps == nil { - ep.available.createdApps = make(map[basics.AppIndex]struct{}) - } - ep.available.createdApps[aid] = struct{}{} - } + // we don't need to add ad.ApplicationID to createdApps, because that is + // done at the beginning of app execution now, so that newly created apps + // will already have their appID present. } type frame struct { @@ -1151,12 +1146,10 @@ func EvalContract(program []byte, gi int, aid basics.AppIndex, params *EvalParam } } // and add the appID to `createdApps` - if cx.EvalParams.Proto.LogicSigVersion >= sharedResourcesVersion { - if cx.EvalParams.available.createdApps == nil { - cx.EvalParams.available.createdApps = make(map[basics.AppIndex]struct{}) - } - cx.EvalParams.available.createdApps[cx.appID] = struct{}{} + if cx.EvalParams.available.createdApps == nil { + cx.EvalParams.available.createdApps = make(map[basics.AppIndex]struct{}) } + cx.EvalParams.available.createdApps[cx.appID] = struct{}{} } // Check the I/O budget for reading if this is the first top-level app call @@ -4284,8 +4277,7 @@ func (cx *EvalContext) availableAccount(addr basics.Address) bool { // Allow an address for an app that was created in group if cx.version >= createdResourcesVersion { for appID := range cx.available.createdApps { - createdAddress := cx.GetApplicationAddress(appID) - if addr == createdAddress { + if addr == cx.GetApplicationAddress(appID) { return true } } @@ -5108,12 +5100,12 @@ func opOnlineStake(cx *EvalContext) error { func opLog(cx *EvalContext) error { last := len(cx.Stack) - 1 - if uint64(len(cx.txn.EvalDelta.Logs)) >= cx.MaxLogCalls { + if len(cx.txn.EvalDelta.Logs) >= cx.MaxLogCalls { return fmt.Errorf("too many log calls in program. up to %d is allowed", cx.MaxLogCalls) } log := cx.Stack[last] cx.logSize += len(log.Bytes) - if uint64(cx.logSize) > cx.MaxLogSize { + if cx.logSize > cx.MaxLogSize { return fmt.Errorf("program logs too large. %d bytes > %d bytes limit", cx.logSize, cx.MaxLogSize) } cx.txn.EvalDelta.Logs = append(cx.txn.EvalDelta.Logs, string(log.Bytes)) diff --git a/data/transactions/logic/evalAppTxn_test.go b/data/transactions/logic/evalAppTxn_test.go index 5c06495973..3008d517d6 100644 --- a/data/transactions/logic/evalAppTxn_test.go +++ b/data/transactions/logic/evalAppTxn_test.go @@ -1379,7 +1379,7 @@ int 1 }) } -func TestCreateOldAppFails(t *testing.T) { +func TestCreateOldAppErrs(t *testing.T) { partitiontest.PartitionTest(t) t.Parallel() @@ -2600,7 +2600,7 @@ func TestGtixn(t *testing.T) { ledger.NewApp(tx.Receiver, 888, basics.AppParams{}) ledger.NewAccount(appAddr(888), 50_000) - tx.ForeignApps = []basics.AppIndex{basics.AppIndex(222), basics.AppIndex(333), basics.AppIndex(444)} + tx.ForeignApps = []basics.AppIndex{222, 333, 444} TestApp(t, ` itxn_begin @@ -2683,7 +2683,7 @@ func TestGtxnLog(t *testing.T) { ledger.NewApp(tx.Receiver, 888, basics.AppParams{}) ledger.NewAccount(appAddr(888), 50_000) - tx.ForeignApps = []basics.AppIndex{basics.AppIndex(222), basics.AppIndex(333)} + tx.ForeignApps = []basics.AppIndex{222, 333} TestApp(t, `itxn_begin int appl; itxn_field TypeEnum diff --git a/data/transactions/logic/evalStateful_test.go b/data/transactions/logic/evalStateful_test.go index f41b537692..4f7f0d3aea 100644 --- a/data/transactions/logic/evalStateful_test.go +++ b/data/transactions/logic/evalStateful_test.go @@ -1456,16 +1456,16 @@ func TestAppDisambiguation(t *testing.T) { ep.UnnamedResources = &mockUnnamedResourcePolicy{allowEverything: true} } // make apps with identifiable properties, so we can tell what we get - makeIdentifiableApp := func(appID uint64) { - ledger.NewApp(tx.Sender, basics.AppIndex(appID), basics.AppParams{ + makeIdentifiableApp := func(appID basics.AppIndex) { + ledger.NewApp(tx.Sender, appID, basics.AppParams{ GlobalState: map[string]basics.TealValue{"a": { Type: basics.TealUintType, - Uint: appID, + Uint: uint64(appID), }}, ExtraProgramPages: uint32(appID), }) ledger.NewLocals(tx.Sender, appID) - ledger.NewLocal(tx.Sender, appID, "x", basics.TealValue{Type: basics.TealUintType, Uint: appID * 10}) + ledger.NewLocal(tx.Sender, appID, "x", basics.TealValue{Type: basics.TealUintType, Uint: uint64(appID) * 10}) } makeIdentifiableApp(1) makeIdentifiableApp(20) diff --git a/data/transactions/logic/eval_test.go b/data/transactions/logic/eval_test.go index f5bb656ef2..923279dd81 100644 --- a/data/transactions/logic/eval_test.go +++ b/data/transactions/logic/eval_test.go @@ -32,6 +32,7 @@ import ( "github.com/stretchr/testify/require" "github.com/algorand/go-algorand/config" + "github.com/algorand/go-algorand/config/bounds" "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/data/bookkeeping" @@ -4589,7 +4590,7 @@ func TestLinearOpcodes(t *testing.T) { } } -func TestRekeyFailsOnOldVersion(t *testing.T) { +func TestRekeyErrsOnOldVersion(t *testing.T) { partitiontest.PartitionTest(t) t.Parallel() @@ -6226,5 +6227,5 @@ func TestMaxTxGroup(t *testing.T) { partitiontest.PartitionTest(t) t.Parallel() - require.Equal(t, config.MaxTxGroupSize, maxTxGroupSize) + require.Equal(t, bounds.MaxTxGroupSize, maxTxGroupSize) } diff --git a/data/transactions/logic/ledger_test.go b/data/transactions/logic/ledger_test.go index 7a43019ecc..eb55406075 100644 --- a/data/transactions/logic/ledger_test.go +++ b/data/transactions/logic/ledger_test.go @@ -170,41 +170,41 @@ func (l *Ledger) Counter() uint64 { } // NewHolding sets the ASA balance of a given account. -func (l *Ledger) NewHolding(addr basics.Address, assetID uint64, amount uint64, frozen bool) { +func (l *Ledger) NewHolding(addr basics.Address, assetID basics.AssetIndex, amount uint64, frozen bool) { br, ok := l.balances[addr] if !ok { br = newBalanceRecord(addr, 0) } - br.holdings[basics.AssetIndex(assetID)] = basics.AssetHolding{Amount: amount, Frozen: frozen} + br.holdings[assetID] = basics.AssetHolding{Amount: amount, Frozen: frozen} l.balances[addr] = br } // NewLocals essentially "opts in" an address to an app id. -func (l *Ledger) NewLocals(addr basics.Address, appID uint64) { +func (l *Ledger) NewLocals(addr basics.Address, appID basics.AppIndex) { if _, ok := l.balances[addr]; !ok { l.balances[addr] = newBalanceRecord(addr, 0) } - l.balances[addr].locals[basics.AppIndex(appID)] = basics.TealKeyValue{} + l.balances[addr].locals[appID] = basics.TealKeyValue{} } // NewLocal sets a local value of an app on an address -func (l *Ledger) NewLocal(addr basics.Address, appID uint64, key string, value basics.TealValue) { - l.balances[addr].locals[basics.AppIndex(appID)][key] = value +func (l *Ledger) NewLocal(addr basics.Address, appID basics.AppIndex, key string, value basics.TealValue) { + l.balances[addr].locals[appID][key] = value } // NoLocal removes a key from an address locals for an app. -func (l *Ledger) NoLocal(addr basics.Address, appID uint64, key string) { - delete(l.balances[addr].locals[basics.AppIndex(appID)], key) +func (l *Ledger) NoLocal(addr basics.Address, appID basics.AppIndex, key string) { + delete(l.balances[addr].locals[appID], key) } // NewGlobal sets a global value for an app -func (l *Ledger) NewGlobal(appID uint64, key string, value basics.TealValue) { - l.applications[basics.AppIndex(appID)].GlobalState[key] = value +func (l *Ledger) NewGlobal(appID basics.AppIndex, key string, value basics.TealValue) { + l.applications[appID].GlobalState[key] = value } // NoGlobal removes a global key for an app -func (l *Ledger) NoGlobal(appID uint64, key string) { - delete(l.applications[basics.AppIndex(appID)].GlobalState, key) +func (l *Ledger) NoGlobal(appID basics.AppIndex, key string) { + delete(l.applications[appID].GlobalState, key) } // Rekey sets the authAddr for an address. @@ -996,9 +996,9 @@ func (l *Ledger) GetCreator(cidx basics.CreatableIndex, ctype basics.CreatableTy // SetKey creates a new key-value in {addr, aidx, global} storage func (l *Ledger) SetKey(addr basics.Address, aidx basics.AppIndex, global bool, key string, value basics.TealValue, accountIdx uint64) error { if global { - l.NewGlobal(uint64(aidx), key, value) + l.NewGlobal(aidx, key, value) } else { - l.NewLocal(addr, uint64(aidx), key, value) + l.NewLocal(addr, aidx, key, value) } return nil } @@ -1006,9 +1006,9 @@ func (l *Ledger) SetKey(addr basics.Address, aidx basics.AppIndex, global bool, // DelKey removes a key from {addr, aidx, global} storage func (l *Ledger) DelKey(addr basics.Address, aidx basics.AppIndex, global bool, key string, accountIdx uint64) error { if global { - l.NoGlobal(uint64(aidx), key) + l.NoGlobal(aidx, key) } else { - l.NoLocal(addr, uint64(aidx), key) + l.NoLocal(addr, aidx, key) } return nil } diff --git a/data/transactions/logic/mocktracer/scenarios.go b/data/transactions/logic/mocktracer/scenarios.go index df21245de1..c1210ee1ee 100644 --- a/data/transactions/logic/mocktracer/scenarios.go +++ b/data/transactions/logic/mocktracer/scenarios.go @@ -325,13 +325,13 @@ type TestScenario struct { Outcome TestScenarioOutcome Program string ExpectedError string - FailedAt []uint64 + FailedAt []int ExpectedEvents []Event ExpectedSimulationAD transactions.ApplyData ExpectedStateDelta ledgercore.StateDelta - AppBudgetAdded uint64 - AppBudgetConsumed uint64 - TxnAppBudgetConsumed []uint64 + AppBudgetAdded int + AppBudgetConsumed int + TxnAppBudgetConsumed []int } // TestScenarioGenerator is a function which instantiates a TestScenario @@ -405,7 +405,7 @@ func GetTestScenarios() map[string]TestScenarioGenerator { ExpectedStateDelta: expectedDelta, AppBudgetAdded: 2100, AppBudgetConsumed: 35, - TxnAppBudgetConsumed: []uint64{0, 35}, + TxnAppBudgetConsumed: []int{0, 35}, } } @@ -451,7 +451,7 @@ func GetTestScenarios() map[string]TestScenarioGenerator { Outcome: outcome, Program: program, ExpectedError: failureMessage, - FailedAt: []uint64{0}, + FailedAt: []int{0}, ExpectedEvents: FlattenEvents([][]Event{ { BeforeTxn(protocol.ApplicationCallTx), @@ -467,7 +467,7 @@ func GetTestScenarios() map[string]TestScenarioGenerator { ExpectedStateDelta: expectedDelta, AppBudgetAdded: 700, AppBudgetConsumed: 4, - TxnAppBudgetConsumed: []uint64{0, 4}, + TxnAppBudgetConsumed: []int{0, 4}, } } scenarios[beforeInnersName] = beforeInners @@ -496,7 +496,7 @@ func GetTestScenarios() map[string]TestScenarioGenerator { Outcome: outcome, Program: program, ExpectedError: failureMessage, - FailedAt: []uint64{0, 0}, + FailedAt: []int{0, 0}, ExpectedEvents: FlattenEvents([][]Event{ { BeforeTxn(protocol.ApplicationCallTx), @@ -523,7 +523,7 @@ func GetTestScenarios() map[string]TestScenarioGenerator { ExpectedStateDelta: expectedDelta, AppBudgetAdded: 1400, AppBudgetConsumed: 15, - TxnAppBudgetConsumed: []uint64{0, 15}, + TxnAppBudgetConsumed: []int{0, 15}, } } scenarios[firstInnerName] = firstInner @@ -551,7 +551,7 @@ func GetTestScenarios() map[string]TestScenarioGenerator { Outcome: outcome, Program: program, ExpectedError: failureMessage, - FailedAt: []uint64{0}, + FailedAt: []int{0}, ExpectedEvents: FlattenEvents([][]Event{ { BeforeTxn(protocol.ApplicationCallTx), @@ -581,7 +581,7 @@ func GetTestScenarios() map[string]TestScenarioGenerator { ExpectedStateDelta: expectedDelta, AppBudgetAdded: 1400, AppBudgetConsumed: 19, - TxnAppBudgetConsumed: []uint64{0, 19}, + TxnAppBudgetConsumed: []int{0, 19}, } } scenarios[betweenInnersName] = betweenInners @@ -611,7 +611,7 @@ func GetTestScenarios() map[string]TestScenarioGenerator { Outcome: ErrorOutcome, Program: program, ExpectedError: "overspend", - FailedAt: []uint64{0, 1}, + FailedAt: []int{0, 1}, ExpectedEvents: FlattenEvents([][]Event{ { BeforeTxn(protocol.ApplicationCallTx), @@ -647,7 +647,7 @@ func GetTestScenarios() map[string]TestScenarioGenerator { ExpectedStateDelta: expectedDelta, AppBudgetAdded: 2100, AppBudgetConsumed: 32, - TxnAppBudgetConsumed: []uint64{0, 32}, + TxnAppBudgetConsumed: []int{0, 32}, } } scenarios[secondInnerName] = secondInner @@ -677,7 +677,7 @@ func GetTestScenarios() map[string]TestScenarioGenerator { Outcome: ErrorOutcome, Program: program, ExpectedError: "overspend", - FailedAt: []uint64{0, 2}, + FailedAt: []int{0, 2}, ExpectedEvents: FlattenEvents([][]Event{ { BeforeTxn(protocol.ApplicationCallTx), @@ -715,7 +715,7 @@ func GetTestScenarios() map[string]TestScenarioGenerator { ExpectedStateDelta: expectedDelta, AppBudgetAdded: 2100, AppBudgetConsumed: 32, - TxnAppBudgetConsumed: []uint64{0, 32}, + TxnAppBudgetConsumed: []int{0, 32}, } } scenarios[thirdInnerName] = thirdInner @@ -740,7 +740,7 @@ func GetTestScenarios() map[string]TestScenarioGenerator { Outcome: outcome, Program: program, ExpectedError: failureMessage, - FailedAt: []uint64{0}, + FailedAt: []int{0}, ExpectedEvents: FlattenEvents([][]Event{ { BeforeTxn(protocol.ApplicationCallTx), @@ -781,7 +781,7 @@ func GetTestScenarios() map[string]TestScenarioGenerator { ExpectedStateDelta: expectedDelta, AppBudgetAdded: 2100, AppBudgetConsumed: 35, - TxnAppBudgetConsumed: []uint64{0, 35}, + TxnAppBudgetConsumed: []int{0, 35}, } } scenarios[afterInnersName] = afterInners diff --git a/data/transactions/logic/resources.go b/data/transactions/logic/resources.go index 95c693c85a..d093d97492 100644 --- a/data/transactions/logic/resources.go +++ b/data/transactions/logic/resources.go @@ -104,11 +104,11 @@ func (cx *EvalContext) allows(tx *transactions.Transaction, calleeVer uint64) er // if the caller is pre-sharing, it can't prepare transactions with // resources that are not available, so `tx` is surely legal. if cx.version < sharedResourcesVersion { - // this is important, not just an optimization, because a pre-sharing - // creation txn has access to the app and app account it is currently - // creating (and therefore can pass that access down), but cx.available - // doesn't track that properly until v9's protocol upgrade. See - // TestInnerAppCreateAndOptin for an example. + // this is just an optimization, from the perspective of properly + // evaluating transactions in "normal" mode. However, it is an + // important short-circuit for simulation. Simulation does not + // understand how to handle missing cross-products in non-sharing + // program versions. return nil } switch tx.Type { @@ -177,11 +177,7 @@ func (cx *EvalContext) allowsHolding(addr basics.Address, ai basics.AssetIndex) return cx.availableAsset(ai) } } - // If the current txn is a creation, the new appID won't be in r.createdApps - // yet, but it should get the same special treatment. - if cx.txn.Txn.ApplicationID == 0 && cx.GetApplicationAddress(cx.appID) == addr { - return cx.availableAsset(ai) - } + if cx.UnnamedResources != nil { // Ensure that the account and asset are available before consulting cx.UnnamedResources.AllowsHolding. // This way cx.UnnamedResources.AllowsHolding only needs to make a decision about the asset holding @@ -201,9 +197,6 @@ func (cx *EvalContext) allowsLocals(addr basics.Address, ai basics.AppIndex) boo if _, ok := r.createdApps[ai]; ok { return cx.availableAccount(addr) } - if cx.txn.Txn.ApplicationID == 0 && cx.appID == ai { - return cx.availableAccount(addr) - } // All locals of created app accounts are available for created := range r.createdApps { @@ -211,9 +204,7 @@ func (cx *EvalContext) allowsLocals(addr basics.Address, ai basics.AppIndex) boo return cx.availableApp(ai) } } - if cx.txn.Txn.ApplicationID == 0 && cx.GetApplicationAddress(cx.appID) == addr { - return cx.availableApp(ai) - } + if cx.UnnamedResources != nil { // Ensure that the account and app are available before consulting cx.UnnamedResources.AllowsLocal. // This way cx.UnnamedResources.AllowsLocal only needs to make a decision about the app local diff --git a/data/transactions/logicsig.go b/data/transactions/logicsig.go index 165e38a277..a3883e55ba 100644 --- a/data/transactions/logicsig.go +++ b/data/transactions/logicsig.go @@ -38,13 +38,13 @@ type LogicSig struct { _struct struct{} `codec:",omitempty,omitemptyarray"` // Logic signed by Sig or Msig, OR hashed to be the Address of an account. - Logic []byte `codec:"l,allocbound=config.MaxLogicSigMaxSize"` + Logic []byte `codec:"l,allocbound=bounds.MaxLogicSigMaxSize"` Sig crypto.Signature `codec:"sig"` Msig crypto.MultisigSig `codec:"msig"` // Args are not signed, but checked by Logic - Args [][]byte `codec:"arg,allocbound=EvalMaxArgs,allocbound=MaxLogicSigArgSize,maxtotalbytes=config.MaxLogicSigMaxSize"` + Args [][]byte `codec:"arg,allocbound=EvalMaxArgs,allocbound=MaxLogicSigArgSize,maxtotalbytes=bounds.MaxLogicSigMaxSize"` } // Blank returns true if there is no content in this LogicSig diff --git a/data/transactions/msgp_gen.go b/data/transactions/msgp_gen.go index fe1d52680e..9a800fd36d 100644 --- a/data/transactions/msgp_gen.go +++ b/data/transactions/msgp_gen.go @@ -7,7 +7,7 @@ import ( "github.com/algorand/msgp/msgp" - "github.com/algorand/go-algorand/config" + "github.com/algorand/go-algorand/config/bounds" "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/crypto/merklesignature" "github.com/algorand/go-algorand/crypto/stateproof" @@ -595,8 +595,8 @@ func (z *ApplicationCallTxnFields) UnmarshalMsgWithState(bts []byte, st msgp.Unm err = msgp.WrapError(err, "struct-from-array", "Boxes", zb0004, "struct-from-array", "Name") return } - if zb0019 > config.MaxBytesKeyValueLen { - err = msgp.ErrOverflow(uint64(zb0019), uint64(config.MaxBytesKeyValueLen)) + if zb0019 > bounds.MaxBytesKeyValueLen { + err = msgp.ErrOverflow(uint64(zb0019), uint64(bounds.MaxBytesKeyValueLen)) return } (*z).Boxes[zb0004].Name, bts, err = msgp.ReadBytesBytes(bts, (*z).Boxes[zb0004].Name) @@ -641,8 +641,8 @@ func (z *ApplicationCallTxnFields) UnmarshalMsgWithState(bts []byte, st msgp.Unm err = msgp.WrapError(err, "struct-from-array", "Boxes", zb0004, "Name") return } - if zb0020 > config.MaxBytesKeyValueLen { - err = msgp.ErrOverflow(uint64(zb0020), uint64(config.MaxBytesKeyValueLen)) + if zb0020 > bounds.MaxBytesKeyValueLen { + err = msgp.ErrOverflow(uint64(zb0020), uint64(bounds.MaxBytesKeyValueLen)) return } (*z).Boxes[zb0004].Name, bts, err = msgp.ReadBytesBytes(bts, (*z).Boxes[zb0004].Name) @@ -714,8 +714,8 @@ func (z *ApplicationCallTxnFields) UnmarshalMsgWithState(bts []byte, st msgp.Unm err = msgp.WrapError(err, "struct-from-array", "ApprovalProgram") return } - if zb0023 > config.MaxAvailableAppProgramLen { - err = msgp.ErrOverflow(uint64(zb0023), uint64(config.MaxAvailableAppProgramLen)) + if zb0023 > bounds.MaxAvailableAppProgramLen { + err = msgp.ErrOverflow(uint64(zb0023), uint64(bounds.MaxAvailableAppProgramLen)) return } (*z).ApprovalProgram, bts, err = msgp.ReadBytesBytes(bts, (*z).ApprovalProgram) @@ -732,8 +732,8 @@ func (z *ApplicationCallTxnFields) UnmarshalMsgWithState(bts []byte, st msgp.Unm err = msgp.WrapError(err, "struct-from-array", "ClearStateProgram") return } - if zb0024 > config.MaxAvailableAppProgramLen { - err = msgp.ErrOverflow(uint64(zb0024), uint64(config.MaxAvailableAppProgramLen)) + if zb0024 > bounds.MaxAvailableAppProgramLen { + err = msgp.ErrOverflow(uint64(zb0024), uint64(bounds.MaxAvailableAppProgramLen)) return } (*z).ClearStateProgram, bts, err = msgp.ReadBytesBytes(bts, (*z).ClearStateProgram) @@ -924,8 +924,8 @@ func (z *ApplicationCallTxnFields) UnmarshalMsgWithState(bts []byte, st msgp.Unm err = msgp.WrapError(err, "Boxes", zb0004, "struct-from-array", "Name") return } - if zb0036 > config.MaxBytesKeyValueLen { - err = msgp.ErrOverflow(uint64(zb0036), uint64(config.MaxBytesKeyValueLen)) + if zb0036 > bounds.MaxBytesKeyValueLen { + err = msgp.ErrOverflow(uint64(zb0036), uint64(bounds.MaxBytesKeyValueLen)) return } (*z).Boxes[zb0004].Name, bts, err = msgp.ReadBytesBytes(bts, (*z).Boxes[zb0004].Name) @@ -970,8 +970,8 @@ func (z *ApplicationCallTxnFields) UnmarshalMsgWithState(bts []byte, st msgp.Unm err = msgp.WrapError(err, "Boxes", zb0004, "Name") return } - if zb0037 > config.MaxBytesKeyValueLen { - err = msgp.ErrOverflow(uint64(zb0037), uint64(config.MaxBytesKeyValueLen)) + if zb0037 > bounds.MaxBytesKeyValueLen { + err = msgp.ErrOverflow(uint64(zb0037), uint64(bounds.MaxBytesKeyValueLen)) return } (*z).Boxes[zb0004].Name, bts, err = msgp.ReadBytesBytes(bts, (*z).Boxes[zb0004].Name) @@ -1035,8 +1035,8 @@ func (z *ApplicationCallTxnFields) UnmarshalMsgWithState(bts []byte, st msgp.Unm err = msgp.WrapError(err, "ApprovalProgram") return } - if zb0040 > config.MaxAvailableAppProgramLen { - err = msgp.ErrOverflow(uint64(zb0040), uint64(config.MaxAvailableAppProgramLen)) + if zb0040 > bounds.MaxAvailableAppProgramLen { + err = msgp.ErrOverflow(uint64(zb0040), uint64(bounds.MaxAvailableAppProgramLen)) return } (*z).ApprovalProgram, bts, err = msgp.ReadBytesBytes(bts, (*z).ApprovalProgram) @@ -1051,8 +1051,8 @@ func (z *ApplicationCallTxnFields) UnmarshalMsgWithState(bts []byte, st msgp.Unm err = msgp.WrapError(err, "ClearStateProgram") return } - if zb0041 > config.MaxAvailableAppProgramLen { - err = msgp.ErrOverflow(uint64(zb0041), uint64(config.MaxAvailableAppProgramLen)) + if zb0041 > bounds.MaxAvailableAppProgramLen { + err = msgp.ErrOverflow(uint64(zb0041), uint64(bounds.MaxAvailableAppProgramLen)) return } (*z).ClearStateProgram, bts, err = msgp.ReadBytesBytes(bts, (*z).ClearStateProgram) @@ -1128,7 +1128,7 @@ func (z *ApplicationCallTxnFields) MsgIsZero() bool { func ApplicationCallTxnFieldsMaxSize() (s int) { s = 1 + 5 + basics.AppIndexMaxSize() + 5 + msgp.Uint64Size + 5 // Calculating size of slice: z.ApplicationArgs - s += msgp.ArrayHeaderSize + config.MaxAppTotalArgLen + 5 + s += msgp.ArrayHeaderSize + bounds.MaxAppTotalArgLen + 5 // Calculating size of slice: z.Accounts s += msgp.ArrayHeaderSize + ((encodedMaxAccounts) * (basics.AddressMaxSize())) s += 5 @@ -1140,7 +1140,7 @@ func ApplicationCallTxnFieldsMaxSize() (s int) { s += 5 // Calculating size of slice: z.ForeignAssets s += msgp.ArrayHeaderSize + ((encodedMaxForeignAssets) * (basics.AssetIndexMaxSize())) - s += 5 + basics.StateSchemaMaxSize() + 5 + basics.StateSchemaMaxSize() + 5 + msgp.BytesPrefixSize + config.MaxAvailableAppProgramLen + 5 + msgp.BytesPrefixSize + config.MaxAvailableAppProgramLen + 5 + msgp.Uint32Size + 5 + msgp.Uint64Size + s += 5 + basics.StateSchemaMaxSize() + 5 + basics.StateSchemaMaxSize() + 5 + msgp.BytesPrefixSize + bounds.MaxAvailableAppProgramLen + 5 + msgp.BytesPrefixSize + bounds.MaxAvailableAppProgramLen + 5 + msgp.Uint32Size + 5 + msgp.Uint64Size return } @@ -2016,8 +2016,8 @@ func (z *BoxRef) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) (o [] err = msgp.WrapError(err, "struct-from-array", "Name") return } - if zb0003 > config.MaxBytesKeyValueLen { - err = msgp.ErrOverflow(uint64(zb0003), uint64(config.MaxBytesKeyValueLen)) + if zb0003 > bounds.MaxBytesKeyValueLen { + err = msgp.ErrOverflow(uint64(zb0003), uint64(bounds.MaxBytesKeyValueLen)) return } (*z).Name, bts, err = msgp.ReadBytesBytes(bts, (*z).Name) @@ -2062,8 +2062,8 @@ func (z *BoxRef) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) (o [] err = msgp.WrapError(err, "Name") return } - if zb0004 > config.MaxBytesKeyValueLen { - err = msgp.ErrOverflow(uint64(zb0004), uint64(config.MaxBytesKeyValueLen)) + if zb0004 > bounds.MaxBytesKeyValueLen { + err = msgp.ErrOverflow(uint64(zb0004), uint64(bounds.MaxBytesKeyValueLen)) return } (*z).Name, bts, err = msgp.ReadBytesBytes(bts, (*z).Name) @@ -2105,7 +2105,7 @@ func (z *BoxRef) MsgIsZero() bool { // MaxSize returns a maximum valid message size for this message type func BoxRefMaxSize() (s int) { - s = 1 + 2 + msgp.Uint64Size + 2 + msgp.BytesPrefixSize + config.MaxBytesKeyValueLen + s = 1 + 2 + msgp.Uint64Size + 2 + msgp.BytesPrefixSize + bounds.MaxBytesKeyValueLen return } @@ -2243,8 +2243,8 @@ func (z *EvalDelta) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) (o err = msgp.WrapError(err, "struct-from-array", "LocalDeltas") return } - if zb0008 > config.MaxEvalDeltaAccounts { - err = msgp.ErrOverflow(uint64(zb0008), uint64(config.MaxEvalDeltaAccounts)) + if zb0008 > bounds.MaxEvalDeltaAccounts { + err = msgp.ErrOverflow(uint64(zb0008), uint64(bounds.MaxEvalDeltaAccounts)) err = msgp.WrapError(err, "struct-from-array", "LocalDeltas") return } @@ -2279,8 +2279,8 @@ func (z *EvalDelta) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) (o err = msgp.WrapError(err, "struct-from-array", "SharedAccts") return } - if zb0010 > config.MaxEvalDeltaAccounts { - err = msgp.ErrOverflow(uint64(zb0010), uint64(config.MaxEvalDeltaAccounts)) + if zb0010 > bounds.MaxEvalDeltaAccounts { + err = msgp.ErrOverflow(uint64(zb0010), uint64(bounds.MaxEvalDeltaAccounts)) err = msgp.WrapError(err, "struct-from-array", "SharedAccts") return } @@ -2308,8 +2308,8 @@ func (z *EvalDelta) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) (o err = msgp.WrapError(err, "struct-from-array", "Logs") return } - if zb0012 > config.MaxLogCalls { - err = msgp.ErrOverflow(uint64(zb0012), uint64(config.MaxLogCalls)) + if zb0012 > bounds.MaxLogCalls { + err = msgp.ErrOverflow(uint64(zb0012), uint64(bounds.MaxLogCalls)) err = msgp.WrapError(err, "struct-from-array", "Logs") return } @@ -2337,8 +2337,8 @@ func (z *EvalDelta) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) (o err = msgp.WrapError(err, "struct-from-array", "InnerTxns") return } - if zb0014 > config.MaxInnerTransactionsPerDelta { - err = msgp.ErrOverflow(uint64(zb0014), uint64(config.MaxInnerTransactionsPerDelta)) + if zb0014 > bounds.MaxInnerTransactionsPerDelta { + err = msgp.ErrOverflow(uint64(zb0014), uint64(bounds.MaxInnerTransactionsPerDelta)) err = msgp.WrapError(err, "struct-from-array", "InnerTxns") return } @@ -2394,8 +2394,8 @@ func (z *EvalDelta) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) (o err = msgp.WrapError(err, "LocalDeltas") return } - if zb0016 > config.MaxEvalDeltaAccounts { - err = msgp.ErrOverflow(uint64(zb0016), uint64(config.MaxEvalDeltaAccounts)) + if zb0016 > bounds.MaxEvalDeltaAccounts { + err = msgp.ErrOverflow(uint64(zb0016), uint64(bounds.MaxEvalDeltaAccounts)) err = msgp.WrapError(err, "LocalDeltas") return } @@ -2428,8 +2428,8 @@ func (z *EvalDelta) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) (o err = msgp.WrapError(err, "SharedAccts") return } - if zb0018 > config.MaxEvalDeltaAccounts { - err = msgp.ErrOverflow(uint64(zb0018), uint64(config.MaxEvalDeltaAccounts)) + if zb0018 > bounds.MaxEvalDeltaAccounts { + err = msgp.ErrOverflow(uint64(zb0018), uint64(bounds.MaxEvalDeltaAccounts)) err = msgp.WrapError(err, "SharedAccts") return } @@ -2455,8 +2455,8 @@ func (z *EvalDelta) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) (o err = msgp.WrapError(err, "Logs") return } - if zb0020 > config.MaxLogCalls { - err = msgp.ErrOverflow(uint64(zb0020), uint64(config.MaxLogCalls)) + if zb0020 > bounds.MaxLogCalls { + err = msgp.ErrOverflow(uint64(zb0020), uint64(bounds.MaxLogCalls)) err = msgp.WrapError(err, "Logs") return } @@ -2482,8 +2482,8 @@ func (z *EvalDelta) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) (o err = msgp.WrapError(err, "InnerTxns") return } - if zb0022 > config.MaxInnerTransactionsPerDelta { - err = msgp.ErrOverflow(uint64(zb0022), uint64(config.MaxInnerTransactionsPerDelta)) + if zb0022 > bounds.MaxInnerTransactionsPerDelta { + err = msgp.ErrOverflow(uint64(zb0022), uint64(bounds.MaxInnerTransactionsPerDelta)) err = msgp.WrapError(err, "InnerTxns") return } @@ -2557,17 +2557,17 @@ func EvalDeltaMaxSize() (s int) { s = 1 + 3 + basics.StateDeltaMaxSize() + 3 s += msgp.MapHeaderSize // Adding size of map keys for z.LocalDeltas - s += config.MaxEvalDeltaAccounts * (msgp.Uint64Size) + s += bounds.MaxEvalDeltaAccounts * (msgp.Uint64Size) // Adding size of map values for z.LocalDeltas - s += config.MaxEvalDeltaAccounts * (basics.StateDeltaMaxSize()) + s += bounds.MaxEvalDeltaAccounts * (basics.StateDeltaMaxSize()) s += 3 // Calculating size of slice: z.SharedAccts - s += msgp.ArrayHeaderSize + ((config.MaxEvalDeltaAccounts) * (basics.AddressMaxSize())) + s += msgp.ArrayHeaderSize + ((bounds.MaxEvalDeltaAccounts) * (basics.AddressMaxSize())) s += 3 // Calculating size of slice: z.Logs - s += msgp.ArrayHeaderSize + (config.MaxLogCalls * msgp.StringPrefixSize) + config.MaxEvalDeltaTotalLogSize + 4 + s += msgp.ArrayHeaderSize + (bounds.MaxLogCalls * msgp.StringPrefixSize) + bounds.MaxEvalDeltaTotalLogSize + 4 // Calculating size of slice: z.InnerTxns - s += msgp.ArrayHeaderSize + ((config.MaxInnerTransactionsPerDelta) * (SignedTxnWithADMaxSize())) + s += msgp.ArrayHeaderSize + ((bounds.MaxInnerTransactionsPerDelta) * (SignedTxnWithADMaxSize())) return } @@ -2737,8 +2737,8 @@ func (z *Header) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) (o [] err = msgp.WrapError(err, "struct-from-array", "Note") return } - if zb0004 > config.MaxTxnNoteBytes { - err = msgp.ErrOverflow(uint64(zb0004), uint64(config.MaxTxnNoteBytes)) + if zb0004 > bounds.MaxTxnNoteBytes { + err = msgp.ErrOverflow(uint64(zb0004), uint64(bounds.MaxTxnNoteBytes)) return } (*z).Note, bts, err = msgp.ReadBytesBytes(bts, (*z).Note) @@ -2755,8 +2755,8 @@ func (z *Header) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) (o [] err = msgp.WrapError(err, "struct-from-array", "GenesisID") return } - if zb0005 > config.MaxGenesisIDLen { - err = msgp.ErrOverflow(uint64(zb0005), uint64(config.MaxGenesisIDLen)) + if zb0005 > bounds.MaxGenesisIDLen { + err = msgp.ErrOverflow(uint64(zb0005), uint64(bounds.MaxGenesisIDLen)) return } (*z).GenesisID, bts, err = msgp.ReadStringBytes(bts) @@ -2851,8 +2851,8 @@ func (z *Header) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) (o [] err = msgp.WrapError(err, "Note") return } - if zb0006 > config.MaxTxnNoteBytes { - err = msgp.ErrOverflow(uint64(zb0006), uint64(config.MaxTxnNoteBytes)) + if zb0006 > bounds.MaxTxnNoteBytes { + err = msgp.ErrOverflow(uint64(zb0006), uint64(bounds.MaxTxnNoteBytes)) return } (*z).Note, bts, err = msgp.ReadBytesBytes(bts, (*z).Note) @@ -2867,8 +2867,8 @@ func (z *Header) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) (o [] err = msgp.WrapError(err, "GenesisID") return } - if zb0007 > config.MaxGenesisIDLen { - err = msgp.ErrOverflow(uint64(zb0007), uint64(config.MaxGenesisIDLen)) + if zb0007 > bounds.MaxGenesisIDLen { + err = msgp.ErrOverflow(uint64(zb0007), uint64(bounds.MaxGenesisIDLen)) return } (*z).GenesisID, bts, err = msgp.ReadStringBytes(bts) @@ -2934,7 +2934,7 @@ func (z *Header) MsgIsZero() bool { // MaxSize returns a maximum valid message size for this message type func HeaderMaxSize() (s int) { - s = 1 + 4 + basics.AddressMaxSize() + 4 + basics.MicroAlgosMaxSize() + 3 + basics.RoundMaxSize() + 3 + basics.RoundMaxSize() + 5 + msgp.BytesPrefixSize + config.MaxTxnNoteBytes + 4 + msgp.StringPrefixSize + config.MaxGenesisIDLen + 3 + crypto.DigestMaxSize() + 4 + crypto.DigestMaxSize() + 3 + s = 1 + 4 + basics.AddressMaxSize() + 4 + basics.MicroAlgosMaxSize() + 3 + basics.RoundMaxSize() + 3 + basics.RoundMaxSize() + 5 + msgp.BytesPrefixSize + bounds.MaxTxnNoteBytes + 4 + msgp.StringPrefixSize + bounds.MaxGenesisIDLen + 3 + crypto.DigestMaxSize() + 4 + crypto.DigestMaxSize() + 3 // Calculating size of array: z.Lease s += msgp.ArrayHeaderSize + ((32) * (msgp.ByteSize)) s += 6 + basics.AddressMaxSize() @@ -3498,8 +3498,8 @@ func (z *LogicSig) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) (o err = msgp.WrapError(err, "struct-from-array", "Logic") return } - if zb0004 > config.MaxLogicSigMaxSize { - err = msgp.ErrOverflow(uint64(zb0004), uint64(config.MaxLogicSigMaxSize)) + if zb0004 > bounds.MaxLogicSigMaxSize { + err = msgp.ErrOverflow(uint64(zb0004), uint64(bounds.MaxLogicSigMaxSize)) return } (*z).Logic, bts, err = msgp.ReadBytesBytes(bts, (*z).Logic) @@ -3593,8 +3593,8 @@ func (z *LogicSig) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) (o err = msgp.WrapError(err, "Logic") return } - if zb0008 > config.MaxLogicSigMaxSize { - err = msgp.ErrOverflow(uint64(zb0008), uint64(config.MaxLogicSigMaxSize)) + if zb0008 > bounds.MaxLogicSigMaxSize { + err = msgp.ErrOverflow(uint64(zb0008), uint64(bounds.MaxLogicSigMaxSize)) return } (*z).Logic, bts, err = msgp.ReadBytesBytes(bts, (*z).Logic) @@ -3688,9 +3688,9 @@ func (z *LogicSig) MsgIsZero() bool { // MaxSize returns a maximum valid message size for this message type func LogicSigMaxSize() (s int) { - s = 1 + 2 + msgp.BytesPrefixSize + config.MaxLogicSigMaxSize + 4 + crypto.SignatureMaxSize() + 5 + crypto.MultisigSigMaxSize() + 4 + s = 1 + 2 + msgp.BytesPrefixSize + bounds.MaxLogicSigMaxSize + 4 + crypto.SignatureMaxSize() + 5 + crypto.MultisigSigMaxSize() + 4 // Calculating size of slice: z.Args - s += msgp.ArrayHeaderSize + config.MaxLogicSigMaxSize + s += msgp.ArrayHeaderSize + bounds.MaxLogicSigMaxSize return } @@ -5801,8 +5801,8 @@ func (z *Transaction) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) err = msgp.WrapError(err, "struct-from-array", "Note") return } - if zb0009 > config.MaxTxnNoteBytes { - err = msgp.ErrOverflow(uint64(zb0009), uint64(config.MaxTxnNoteBytes)) + if zb0009 > bounds.MaxTxnNoteBytes { + err = msgp.ErrOverflow(uint64(zb0009), uint64(bounds.MaxTxnNoteBytes)) return } (*z).Header.Note, bts, err = msgp.ReadBytesBytes(bts, (*z).Header.Note) @@ -5819,8 +5819,8 @@ func (z *Transaction) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) err = msgp.WrapError(err, "struct-from-array", "GenesisID") return } - if zb0010 > config.MaxGenesisIDLen { - err = msgp.ErrOverflow(uint64(zb0010), uint64(config.MaxGenesisIDLen)) + if zb0010 > bounds.MaxGenesisIDLen { + err = msgp.ErrOverflow(uint64(zb0010), uint64(bounds.MaxGenesisIDLen)) return } (*z).Header.GenesisID, bts, err = msgp.ReadStringBytes(bts) @@ -6175,8 +6175,8 @@ func (z *Transaction) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) err = msgp.WrapError(err, "struct-from-array", "Boxes", zb0005, "struct-from-array", "Name") return } - if zb0022 > config.MaxBytesKeyValueLen { - err = msgp.ErrOverflow(uint64(zb0022), uint64(config.MaxBytesKeyValueLen)) + if zb0022 > bounds.MaxBytesKeyValueLen { + err = msgp.ErrOverflow(uint64(zb0022), uint64(bounds.MaxBytesKeyValueLen)) return } (*z).ApplicationCallTxnFields.Boxes[zb0005].Name, bts, err = msgp.ReadBytesBytes(bts, (*z).ApplicationCallTxnFields.Boxes[zb0005].Name) @@ -6221,8 +6221,8 @@ func (z *Transaction) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) err = msgp.WrapError(err, "struct-from-array", "Boxes", zb0005, "Name") return } - if zb0023 > config.MaxBytesKeyValueLen { - err = msgp.ErrOverflow(uint64(zb0023), uint64(config.MaxBytesKeyValueLen)) + if zb0023 > bounds.MaxBytesKeyValueLen { + err = msgp.ErrOverflow(uint64(zb0023), uint64(bounds.MaxBytesKeyValueLen)) return } (*z).ApplicationCallTxnFields.Boxes[zb0005].Name, bts, err = msgp.ReadBytesBytes(bts, (*z).ApplicationCallTxnFields.Boxes[zb0005].Name) @@ -6294,8 +6294,8 @@ func (z *Transaction) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) err = msgp.WrapError(err, "struct-from-array", "ApprovalProgram") return } - if zb0026 > config.MaxAvailableAppProgramLen { - err = msgp.ErrOverflow(uint64(zb0026), uint64(config.MaxAvailableAppProgramLen)) + if zb0026 > bounds.MaxAvailableAppProgramLen { + err = msgp.ErrOverflow(uint64(zb0026), uint64(bounds.MaxAvailableAppProgramLen)) return } (*z).ApplicationCallTxnFields.ApprovalProgram, bts, err = msgp.ReadBytesBytes(bts, (*z).ApplicationCallTxnFields.ApprovalProgram) @@ -6312,8 +6312,8 @@ func (z *Transaction) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) err = msgp.WrapError(err, "struct-from-array", "ClearStateProgram") return } - if zb0027 > config.MaxAvailableAppProgramLen { - err = msgp.ErrOverflow(uint64(zb0027), uint64(config.MaxAvailableAppProgramLen)) + if zb0027 > bounds.MaxAvailableAppProgramLen { + err = msgp.ErrOverflow(uint64(zb0027), uint64(bounds.MaxAvailableAppProgramLen)) return } (*z).ApplicationCallTxnFields.ClearStateProgram, bts, err = msgp.ReadBytesBytes(bts, (*z).ApplicationCallTxnFields.ClearStateProgram) @@ -6441,8 +6441,8 @@ func (z *Transaction) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) err = msgp.WrapError(err, "Note") return } - if zb0028 > config.MaxTxnNoteBytes { - err = msgp.ErrOverflow(uint64(zb0028), uint64(config.MaxTxnNoteBytes)) + if zb0028 > bounds.MaxTxnNoteBytes { + err = msgp.ErrOverflow(uint64(zb0028), uint64(bounds.MaxTxnNoteBytes)) return } (*z).Header.Note, bts, err = msgp.ReadBytesBytes(bts, (*z).Header.Note) @@ -6457,8 +6457,8 @@ func (z *Transaction) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) err = msgp.WrapError(err, "GenesisID") return } - if zb0029 > config.MaxGenesisIDLen { - err = msgp.ErrOverflow(uint64(zb0029), uint64(config.MaxGenesisIDLen)) + if zb0029 > bounds.MaxGenesisIDLen { + err = msgp.ErrOverflow(uint64(zb0029), uint64(bounds.MaxGenesisIDLen)) return } (*z).Header.GenesisID, bts, err = msgp.ReadStringBytes(bts) @@ -6753,8 +6753,8 @@ func (z *Transaction) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) err = msgp.WrapError(err, "Boxes", zb0005, "struct-from-array", "Name") return } - if zb0041 > config.MaxBytesKeyValueLen { - err = msgp.ErrOverflow(uint64(zb0041), uint64(config.MaxBytesKeyValueLen)) + if zb0041 > bounds.MaxBytesKeyValueLen { + err = msgp.ErrOverflow(uint64(zb0041), uint64(bounds.MaxBytesKeyValueLen)) return } (*z).ApplicationCallTxnFields.Boxes[zb0005].Name, bts, err = msgp.ReadBytesBytes(bts, (*z).ApplicationCallTxnFields.Boxes[zb0005].Name) @@ -6799,8 +6799,8 @@ func (z *Transaction) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) err = msgp.WrapError(err, "Boxes", zb0005, "Name") return } - if zb0042 > config.MaxBytesKeyValueLen { - err = msgp.ErrOverflow(uint64(zb0042), uint64(config.MaxBytesKeyValueLen)) + if zb0042 > bounds.MaxBytesKeyValueLen { + err = msgp.ErrOverflow(uint64(zb0042), uint64(bounds.MaxBytesKeyValueLen)) return } (*z).ApplicationCallTxnFields.Boxes[zb0005].Name, bts, err = msgp.ReadBytesBytes(bts, (*z).ApplicationCallTxnFields.Boxes[zb0005].Name) @@ -6864,8 +6864,8 @@ func (z *Transaction) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) err = msgp.WrapError(err, "ApprovalProgram") return } - if zb0045 > config.MaxAvailableAppProgramLen { - err = msgp.ErrOverflow(uint64(zb0045), uint64(config.MaxAvailableAppProgramLen)) + if zb0045 > bounds.MaxAvailableAppProgramLen { + err = msgp.ErrOverflow(uint64(zb0045), uint64(bounds.MaxAvailableAppProgramLen)) return } (*z).ApplicationCallTxnFields.ApprovalProgram, bts, err = msgp.ReadBytesBytes(bts, (*z).ApplicationCallTxnFields.ApprovalProgram) @@ -6880,8 +6880,8 @@ func (z *Transaction) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) err = msgp.WrapError(err, "ClearStateProgram") return } - if zb0046 > config.MaxAvailableAppProgramLen { - err = msgp.ErrOverflow(uint64(zb0046), uint64(config.MaxAvailableAppProgramLen)) + if zb0046 > bounds.MaxAvailableAppProgramLen { + err = msgp.ErrOverflow(uint64(zb0046), uint64(bounds.MaxAvailableAppProgramLen)) return } (*z).ApplicationCallTxnFields.ClearStateProgram, bts, err = msgp.ReadBytesBytes(bts, (*z).ApplicationCallTxnFields.ClearStateProgram) @@ -6995,12 +6995,12 @@ func (z *Transaction) MsgIsZero() bool { // MaxSize returns a maximum valid message size for this message type func TransactionMaxSize() (s int) { - s = 3 + 5 + protocol.TxTypeMaxSize() + 4 + basics.AddressMaxSize() + 4 + basics.MicroAlgosMaxSize() + 3 + basics.RoundMaxSize() + 3 + basics.RoundMaxSize() + 5 + msgp.BytesPrefixSize + config.MaxTxnNoteBytes + 4 + msgp.StringPrefixSize + config.MaxGenesisIDLen + 3 + crypto.DigestMaxSize() + 4 + crypto.DigestMaxSize() + 3 + s = 3 + 5 + protocol.TxTypeMaxSize() + 4 + basics.AddressMaxSize() + 4 + basics.MicroAlgosMaxSize() + 3 + basics.RoundMaxSize() + 3 + basics.RoundMaxSize() + 5 + msgp.BytesPrefixSize + bounds.MaxTxnNoteBytes + 4 + msgp.StringPrefixSize + bounds.MaxGenesisIDLen + 3 + crypto.DigestMaxSize() + 4 + crypto.DigestMaxSize() + 3 // Calculating size of array: z.Header.Lease s += msgp.ArrayHeaderSize + ((32) * (msgp.ByteSize)) s += 6 + basics.AddressMaxSize() + 8 + crypto.OneTimeSignatureVerifierMaxSize() + 7 + crypto.VRFVerifierMaxSize() + 8 + merklesignature.CommitmentMaxSize() + 8 + basics.RoundMaxSize() + 8 + basics.RoundMaxSize() + 7 + msgp.Uint64Size + 8 + msgp.BoolSize + 4 + basics.AddressMaxSize() + 4 + basics.MicroAlgosMaxSize() + 6 + basics.AddressMaxSize() + 5 + basics.AssetIndexMaxSize() + 5 + basics.AssetParamsMaxSize() + 5 + basics.AssetIndexMaxSize() + 5 + msgp.Uint64Size + 5 + basics.AddressMaxSize() + 5 + basics.AddressMaxSize() + 7 + basics.AddressMaxSize() + 5 + basics.AddressMaxSize() + 5 + basics.AssetIndexMaxSize() + 5 + msgp.BoolSize + 5 + basics.AppIndexMaxSize() + 5 + msgp.Uint64Size + 5 // Calculating size of slice: z.ApplicationCallTxnFields.ApplicationArgs - s += msgp.ArrayHeaderSize + config.MaxAppTotalArgLen + 5 + s += msgp.ArrayHeaderSize + bounds.MaxAppTotalArgLen + 5 // Calculating size of slice: z.ApplicationCallTxnFields.Accounts s += msgp.ArrayHeaderSize + ((encodedMaxAccounts) * (basics.AddressMaxSize())) s += 5 @@ -7012,7 +7012,7 @@ func TransactionMaxSize() (s int) { s += 5 // Calculating size of slice: z.ApplicationCallTxnFields.ForeignAssets s += msgp.ArrayHeaderSize + ((encodedMaxForeignAssets) * (basics.AssetIndexMaxSize())) - s += 5 + basics.StateSchemaMaxSize() + 5 + basics.StateSchemaMaxSize() + 5 + msgp.BytesPrefixSize + config.MaxAvailableAppProgramLen + 5 + msgp.BytesPrefixSize + config.MaxAvailableAppProgramLen + 5 + msgp.Uint32Size + 5 + msgp.Uint64Size + 7 + protocol.StateProofTypeMaxSize() + 3 + stateproof.StateProofMaxSize() + 6 + stateproofmsg.MessageMaxSize() + 3 + s += 5 + basics.StateSchemaMaxSize() + 5 + basics.StateSchemaMaxSize() + 5 + msgp.BytesPrefixSize + bounds.MaxAvailableAppProgramLen + 5 + msgp.BytesPrefixSize + bounds.MaxAvailableAppProgramLen + 5 + msgp.Uint32Size + 5 + msgp.Uint64Size + 7 + protocol.StateProofTypeMaxSize() + 3 + stateproof.StateProofMaxSize() + 6 + stateproofmsg.MessageMaxSize() + 3 s += HeartbeatTxnFieldsMaxSize() return } @@ -7078,8 +7078,8 @@ func (z *TxGroup) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) (o [ err = msgp.WrapError(err, "struct-from-array", "TxGroupHashes") return } - if zb0004 > config.MaxTxGroupSize { - err = msgp.ErrOverflow(uint64(zb0004), uint64(config.MaxTxGroupSize)) + if zb0004 > bounds.MaxTxGroupSize { + err = msgp.ErrOverflow(uint64(zb0004), uint64(bounds.MaxTxGroupSize)) err = msgp.WrapError(err, "struct-from-array", "TxGroupHashes") return } @@ -7129,8 +7129,8 @@ func (z *TxGroup) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) (o [ err = msgp.WrapError(err, "TxGroupHashes") return } - if zb0006 > config.MaxTxGroupSize { - err = msgp.ErrOverflow(uint64(zb0006), uint64(config.MaxTxGroupSize)) + if zb0006 > bounds.MaxTxGroupSize { + err = msgp.ErrOverflow(uint64(zb0006), uint64(bounds.MaxTxGroupSize)) err = msgp.WrapError(err, "TxGroupHashes") return } @@ -7187,7 +7187,7 @@ func (z *TxGroup) MsgIsZero() bool { func TxGroupMaxSize() (s int) { s = 1 + 7 // Calculating size of slice: z.TxGroupHashes - s += msgp.ArrayHeaderSize + ((config.MaxTxGroupSize) * (crypto.DigestMaxSize())) + s += msgp.ArrayHeaderSize + ((bounds.MaxTxGroupSize) * (crypto.DigestMaxSize())) return } diff --git a/data/transactions/payset.go b/data/transactions/payset.go index beff43ae6e..8519f361f9 100644 --- a/data/transactions/payset.go +++ b/data/transactions/payset.go @@ -21,11 +21,10 @@ import ( "github.com/algorand/go-algorand/protocol" ) -type ( - // A Payset represents a common, unforgeable, consistent, ordered set of SignedTxn objects. - //msgp:allocbound Payset 100000 - Payset []SignedTxnInBlock -) +// Payset represents a common, unforgeable, consistent, ordered set of SignedTxn objects. +// +//msgp:allocbound Payset 100000 +type Payset []SignedTxnInBlock // CommitFlat returns a commitment to the Payset, as a flat array. func (payset Payset) CommitFlat() crypto.Digest { diff --git a/data/transactions/teal.go b/data/transactions/teal.go index b5c3e9034b..075466ba79 100644 --- a/data/transactions/teal.go +++ b/data/transactions/teal.go @@ -35,18 +35,18 @@ type EvalDelta struct { // When decoding EvalDeltas, the integer key represents an offset into // [txn.Sender, txn.Accounts[0], txn.Accounts[1], ..., SharedAccts[0], SharedAccts[1], ...] - LocalDeltas map[uint64]basics.StateDelta `codec:"ld,allocbound=config.MaxEvalDeltaAccounts"` + LocalDeltas map[uint64]basics.StateDelta `codec:"ld,allocbound=bounds.MaxEvalDeltaAccounts"` // If a program modifies the local of an account that is not the Sender, or // in txn.Accounts, it must be recorded here, so that the key in LocalDeltas // can refer to it. - SharedAccts []basics.Address `codec:"sa,allocbound=config.MaxEvalDeltaAccounts"` + SharedAccts []basics.Address `codec:"sa,allocbound=bounds.MaxEvalDeltaAccounts"` - // The total allocbound calculation here accounts for the worse possible case of having config.MaxLogCalls individual log entries - // with the legnth of all of them summing up to config.MaxEvalDeltaTotalLogSize which is the limit for the sum of individual log lengths - Logs []string `codec:"lg,allocbound=config.MaxLogCalls,maxtotalbytes=(config.MaxLogCalls*msgp.StringPrefixSize) + config.MaxEvalDeltaTotalLogSize"` + // The total allocbound calculation here accounts for the worse possible case of having bounds.MaxLogCalls individual log entries + // with the length of all of them summing up to bounds.MaxEvalDeltaTotalLogSize which is the limit for the sum of individual log lengths + Logs []string `codec:"lg,allocbound=bounds.MaxLogCalls,maxtotalbytes=(bounds.MaxLogCalls*msgp.StringPrefixSize) + bounds.MaxEvalDeltaTotalLogSize"` - InnerTxns []SignedTxnWithAD `codec:"itx,allocbound=config.MaxInnerTransactionsPerDelta"` + InnerTxns []SignedTxnWithAD `codec:"itx,allocbound=bounds.MaxInnerTransactionsPerDelta"` } // Equal compares two EvalDeltas and returns whether or not they are diff --git a/data/transactions/teal_test.go b/data/transactions/teal_test.go index f7c1adedf5..56edec8d13 100644 --- a/data/transactions/teal_test.go +++ b/data/transactions/teal_test.go @@ -202,7 +202,7 @@ func TestUnchangedAllocBounds(t *testing.T) { partitiontest.PartitionTest(t) delta := &EvalDelta{} - max := 256 // Hardcodes config.MaxEvalDeltaAccounts + max := 256 // Hardcodes bounds.MaxEvalDeltaAccounts for i := 0; i < max; i++ { delta.InnerTxns = append(delta.InnerTxns, SignedTxnWithAD{}) msg := delta.MarshalMsg(nil) @@ -215,7 +215,7 @@ func TestUnchangedAllocBounds(t *testing.T) { require.Error(t, err) delta = &EvalDelta{} - max = 2048 // Hardcodes config.MaxLogCalls, currently MaxAppProgramLen + max = 2048 // Hardcodes bounds.MaxLogCalls, currently MaxAppProgramLen for i := 0; i < max; i++ { delta.Logs = append(delta.Logs, "junk") msg := delta.MarshalMsg(nil) @@ -228,7 +228,7 @@ func TestUnchangedAllocBounds(t *testing.T) { require.Error(t, err) delta = &EvalDelta{} - max = 256 // Hardcodes config.MaxInnerTransactionsPerDelta + max = 256 // Hardcodes bounds.MaxInnerTransactionsPerDelta for i := 0; i < max; i++ { delta.InnerTxns = append(delta.InnerTxns, SignedTxnWithAD{}) msg := delta.MarshalMsg(nil) @@ -244,7 +244,7 @@ func TestUnchangedAllocBounds(t *testing.T) { // MaxAppTxnAccounts (4) + 1, since the key must be an index in the static // array of touchable accounts. delta = &EvalDelta{LocalDeltas: make(map[uint64]basics.StateDelta)} - max = 2048 // Hardcodes config.MaxEvalDeltaAccounts + max = 2048 // Hardcodes bounds.MaxEvalDeltaAccounts for i := 0; i < max; i++ { delta.LocalDeltas[uint64(i)] = basics.StateDelta{} msg := delta.MarshalMsg(nil) @@ -260,7 +260,7 @@ func TestUnchangedAllocBounds(t *testing.T) { // globals, but I don't know what happens if you set and delete 65 (or way // more) keys in a single transaction. delta = &EvalDelta{GlobalDelta: make(basics.StateDelta)} - max = 2048 // Hardcodes config.MaxStateDeltaKeys + max = 2048 // Hardcodes bounds.MaxStateDeltaKeys for i := 0; i < max; i++ { delta.GlobalDelta[fmt.Sprintf("%d", i)] = basics.ValueDelta{} msg := delta.MarshalMsg(nil) diff --git a/data/transactions/transaction.go b/data/transactions/transaction.go index 9223b3640a..70b2068f3c 100644 --- a/data/transactions/transaction.go +++ b/data/transactions/transaction.go @@ -58,8 +58,8 @@ type Header struct { Fee basics.MicroAlgos `codec:"fee"` FirstValid basics.Round `codec:"fv"` LastValid basics.Round `codec:"lv"` - Note []byte `codec:"note,allocbound=config.MaxTxnNoteBytes"` // Uniqueness or app-level data about txn - GenesisID string `codec:"gen,allocbound=config.MaxGenesisIDLen"` + Note []byte `codec:"note,allocbound=bounds.MaxTxnNoteBytes"` // Uniqueness or app-level data about txn + GenesisID string `codec:"gen,allocbound=bounds.MaxGenesisIDLen"` GenesisHash crypto.Digest `codec:"gh"` // Group specifies that this transaction is part of a @@ -169,7 +169,7 @@ type TxGroup struct { // valid. Each hash in the list is a hash of a transaction with // the `Group` field omitted. // These are all `Txid` which is equivalent to `crypto.Digest` - TxGroupHashes []crypto.Digest `codec:"txlist,allocbound=config.MaxTxGroupSize"` + TxGroupHashes []crypto.Digest `codec:"txlist,allocbound=bounds.MaxTxGroupSize"` } // ToBeHashed implements the crypto.Hashable interface. diff --git a/data/transactions/verify/txn.go b/data/transactions/verify/txn.go index 089875cc60..ec4c112810 100644 --- a/data/transactions/verify/txn.go +++ b/data/transactions/verify/txn.go @@ -489,7 +489,7 @@ func PaysetGroups(ctx context.Context, payset [][]transactions.SignedTxn, blkHea return tasksCtx.Err() case worksets <- struct{}{}: if len(nextWorkset) > 0 { - err := verificationPool.EnqueueBacklog(ctx, func(arg interface{}) interface{} { + err1 := verificationPool.EnqueueBacklog(ctx, func(arg interface{}) interface{} { var grpErr error // check if we've canceled the request while this was in the queue. if tasksCtx.Err() != nil { @@ -514,8 +514,8 @@ func PaysetGroups(ctx context.Context, payset [][]transactions.SignedTxn, blkHea cache.AddPayset(txnGroups, groupCtxs) return nil }, nextWorkset, worksDoneCh) - if err != nil { - return err + if err1 != nil { + return err1 } processing++ nextWorkset = nil diff --git a/data/transactions/verify/txn_test.go b/data/transactions/verify/txn_test.go index 9206b0a46c..4c5b5bc86d 100644 --- a/data/transactions/verify/txn_test.go +++ b/data/transactions/verify/txn_test.go @@ -932,9 +932,9 @@ func TestTxnHeartbeat(t *testing.T) { verifyGroup(t, txnGroups, &blkHdr, breakHbProofFunc, restoreHbProofFunc, crypto.ErrBatchHasFailedSigs.Error()) } -// TestTxnGroupCacheUpdateFailLogic test makes sure that a payment transaction contains a logic (and no signature) +// TestTxnGroupCacheUpdateRejLogic test makes sure that a payment transaction contains a logic (and no signature) // is valid (and added to the cache) only if logic passes -func TestTxnGroupCacheUpdateFailLogic(t *testing.T) { +func TestTxnGroupCacheUpdateRejLogic(t *testing.T) { partitiontest.PartitionTest(t) _, signedTxn, _, _ := generateTestObjects(100, 20, 0, 50) diff --git a/data/txDupCache.go b/data/txDupCache.go index b9981a4574..130fe3c5b5 100644 --- a/data/txDupCache.go +++ b/data/txDupCache.go @@ -23,7 +23,7 @@ import ( "sync" "time" - "github.com/algorand/go-algorand/config" + "github.com/algorand/go-algorand/config/bounds" "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-deadlock" @@ -263,6 +263,6 @@ var saltedPool = sync.Pool{ // 2 x MaxAvailableAppProgramLen that covers // max approve + clear state programs with max args for app create txn. // other transactions are much smaller. - return make([]byte, 2*config.MaxAvailableAppProgramLen) + return make([]byte, 2*bounds.MaxAvailableAppProgramLen) }, } diff --git a/data/txHandler.go b/data/txHandler.go index 59194210c5..c3108f05c7 100644 --- a/data/txHandler.go +++ b/data/txHandler.go @@ -26,6 +26,7 @@ import ( "time" "github.com/algorand/go-algorand/config" + "github.com/algorand/go-algorand/config/bounds" "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/data/bookkeeping" "github.com/algorand/go-algorand/data/pools" @@ -781,7 +782,7 @@ func decodeMsg(data []byte) (unverifiedTxGroup []transactions.SignedTxn, consume } consumed = dec.Consumed() ntx++ - if ntx >= config.MaxTxGroupSize { + if ntx >= bounds.MaxTxGroupSize { // max ever possible group size reached, done reading input. if dec.Remaining() > 0 { // if something else left in the buffer - this is an error, drop @@ -797,7 +798,7 @@ func decodeMsg(data []byte) (unverifiedTxGroup []transactions.SignedTxn, consume unverifiedTxGroup = unverifiedTxGroup[:ntx] - if ntx == config.MaxTxGroupSize { + if ntx == bounds.MaxTxGroupSize { transactionMessageTxGroupFull.Inc(nil) } diff --git a/data/txHandler_test.go b/data/txHandler_test.go index 28ad141020..9a07218c6e 100644 --- a/data/txHandler_test.go +++ b/data/txHandler_test.go @@ -39,6 +39,7 @@ import ( "github.com/algorand/go-algorand/agreement" "github.com/algorand/go-algorand/components/mocks" "github.com/algorand/go-algorand/config" + "github.com/algorand/go-algorand/config/bounds" "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/data/bookkeeping" @@ -619,11 +620,11 @@ func TestTxHandlerProcessIncomingGroup(t *testing.T) { action network.ForwardingPolicy } var checks = []T{} - for i := 1; i <= config.MaxTxGroupSize; i++ { + for i := 1; i <= bounds.MaxTxGroupSize; i++ { checks = append(checks, T{i, i, network.Ignore}) } for i := 1; i < 10; i++ { - checks = append(checks, T{config.MaxTxGroupSize + i, 0, network.Disconnect}) + checks = append(checks, T{bounds.MaxTxGroupSize + i, 0, network.Disconnect}) } for _, check := range checks { @@ -728,8 +729,8 @@ func TestTxHandlerProcessIncomingCensoring(t *testing.T) { t.Run("group", func(t *testing.T) { handler := makeTestTxHandlerOrphanedWithContext(context.Background(), txBacklogSize, txBacklogSize, txHandlerConfig{true, true}, 0) - num := rand.Intn(config.MaxTxGroupSize-1) + 2 // 2..config.MaxTxGroupSize - require.LessOrEqual(t, num, config.MaxTxGroupSize) + num := rand.Intn(bounds.MaxTxGroupSize-1) + 2 // 2..bounds.MaxTxGroupSize + require.LessOrEqual(t, num, bounds.MaxTxGroupSize) stxns, blob := makeRandomTransactions(num) action := handler.processIncomingTxn(network.IncomingMessage{Data: blob}) require.Equal(t, network.OutgoingMessage{Action: network.Ignore}, action) @@ -2187,7 +2188,7 @@ func TestTxHandlerRememberReportErrorsWithTxPool(t *testing.T) { //nolint:parall const inMem = true cfg := config.GetDefaultLocal() cfg.Archival = true - cfg.TxPoolSize = config.MaxTxGroupSize + 1 + cfg.TxPoolSize = bounds.MaxTxGroupSize + 1 ledger, err := LoadLedger(log, ledgerName, inMem, protocol.ConsensusCurrentVersion, genBal, genesisID, genesisHash, cfg) require.NoError(t, err) defer ledger.Close() @@ -2247,7 +2248,7 @@ func TestTxHandlerRememberReportErrorsWithTxPool(t *testing.T) { //nolint:parall // trigger group too large error wi.unverifiedTxGroup = []transactions.SignedTxn{txn1.Sign(secrets[0])} - for i := 0; i < config.MaxTxGroupSize; i++ { + for i := 0; i < bounds.MaxTxGroupSize; i++ { txn := txn1 crypto.RandBytes(txn.Note[:]) wi.unverifiedTxGroup = append(wi.unverifiedTxGroup, txn.Sign(secrets[0])) diff --git a/docker/Dockerfile b/docker/Dockerfile index db01e6fad8..6a74444bc7 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -1,4 +1,4 @@ -FROM ubuntu:22.04 +FROM ubuntu:24.04 ARG GOLANG_VERSION ENV DEBIAN_FRONTEND noninteractive diff --git a/docker/build/Dockerfile b/docker/build/Dockerfile index 5d096cbabb..f42825451a 100644 --- a/docker/build/Dockerfile +++ b/docker/build/Dockerfile @@ -1,4 +1,4 @@ -FROM ubuntu:20.04 +FROM ubuntu:24.04 ARG GOLANG_VERSION RUN apt-get update && apt-get install -y git wget autoconf build-essential shellcheck diff --git a/docker/build/Dockerfile-deploy b/docker/build/Dockerfile-deploy index ac1fb5b4e0..26a917b945 100644 --- a/docker/build/Dockerfile-deploy +++ b/docker/build/Dockerfile-deploy @@ -1,4 +1,4 @@ -FROM --platform=linux/amd64 ubuntu:20.04 +FROM --platform=linux/amd64 ubuntu:24.04 ARG GOLANG_VERSION RUN apt-get update && apt-get install -y git wget autoconf jq bsdmainutils shellcheck make python3 libtool g++ diff --git a/docker/build/aptly.Dockerfile b/docker/build/aptly.Dockerfile index 24982e7412..a5ce3817ca 100644 --- a/docker/build/aptly.Dockerfile +++ b/docker/build/aptly.Dockerfile @@ -1,8 +1,12 @@ -FROM ubuntu:22.04 +FROM ubuntu:24.04 ARG ARCH=amd64 -RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install aptly awscli binutils build-essential curl gnupg2 -y +RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install aptly binutils build-essential curl gnupg2 -y +RUN apt-get install -y curl unzip +RUN curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" +RUN unzip awscliv2.zip +RUN ./aws/install --bin-dir /usr/local/bin --install-dir /usr/local/aws-cli --update WORKDIR /root COPY .aptly.conf . diff --git a/docker/build/cicd.centos9.Dockerfile b/docker/build/cicd.centos10.Dockerfile similarity index 91% rename from docker/build/cicd.centos9.Dockerfile rename to docker/build/cicd.centos10.Dockerfile index e0d53e467e..44f820e48a 100644 --- a/docker/build/cicd.centos9.Dockerfile +++ b/docker/build/cicd.centos10.Dockerfile @@ -1,9 +1,9 @@ ARG ARCH="amd64" -FROM quay.io/centos/centos:stream9 +FROM quay.io/centos/centos:stream10 ARG GOLANG_VERSION ARG ARCH="amd64" -RUN dnf install -y epel-release epel-next-release && dnf config-manager --set-enabled crb && \ +RUN dnf install -y epel-release && dnf config-manager --set-enabled crb && \ dnf update -y && \ dnf install -y autoconf wget awscli git gnupg2 nfs-utils python3-devel expect jq \ libtool gcc-c++ libstdc++-devel rpmdevtools createrepo rpm-sign bzip2 which \ diff --git a/docker/build/cicd.ubuntu.Dockerfile b/docker/build/cicd.ubuntu.Dockerfile index 0b9fce77d4..8943c6545f 100644 --- a/docker/build/cicd.ubuntu.Dockerfile +++ b/docker/build/cicd.ubuntu.Dockerfile @@ -1,26 +1,29 @@ ARG ARCH="amd64" -FROM ${ARCH}/ubuntu:20.04 +FROM ${ARCH}/ubuntu:24.04 ARG GOLANG_VERSION ARG ARCH="amd64" ARG GOARCH="amd64" -ENV DEBIAN_FRONTEND noninteractive -RUN apt-get update && apt-get install -y build-essential git wget autoconf jq bsdmainutils shellcheck awscli libtool +ENV DEBIAN_FRONTEND=noninteractive +RUN apt-get -y update +RUN apt-get install -y build-essential git wget autoconf jq bsdmainutils shellcheck libtool +RUN apt-get install -y curl unzip +RUN curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" +RUN unzip awscliv2.zip +RUN ./aws/install --bin-dir /usr/local/bin --install-dir /usr/local/aws-cli --update WORKDIR /root RUN wget https://dl.google.com/go/go${GOLANG_VERSION}.linux-${GOARCH}.tar.gz \ && tar -xvf go${GOLANG_VERSION}.linux-${GOARCH}.tar.gz && \ mv go /usr/local +RUN mkdir -p /app/go ENV GOROOT=/usr/local/go \ - GOPATH=$HOME/go \ + GOPATH=/app/go \ ARCH_TYPE=${ARCH} -RUN mkdir -p $GOPATH/src/github.com/algorand -COPY . $GOPATH/src/github.com/algorand/go-algorand +WORKDIR /app +COPY . /app ENV PATH=$GOPATH/bin:$GOROOT/bin:$PATH \ GOPROXY=https://proxy.golang.org,https://pkg.go.dev,https://goproxy.io,direct -WORKDIR $GOPATH/src/github.com/algorand/go-algorand RUN git config --global --add safe.directory '*' RUN make clean -RUN rm -rf $GOPATH/src/github.com/algorand/go-algorand && \ - mkdir -p $GOPATH/src/github.com/algorand/go-algorand RUN echo "vm.max_map_count = 262144" >> /etc/sysctl.conf -CMD ["/bin/bash"] +CMD ["/bin/bash"] \ No newline at end of file diff --git a/docker/build/docker.ubuntu.Dockerfile b/docker/build/docker.ubuntu.Dockerfile index e82dba3d71..12b0a01bca 100644 --- a/docker/build/docker.ubuntu.Dockerfile +++ b/docker/build/docker.ubuntu.Dockerfile @@ -1,6 +1,6 @@ ARG ARCH="amd64" -FROM ${ARCH}/ubuntu:20.04 +FROM ${ARCH}/ubuntu:24.04 ARG GOLANG_VERSION ARG ARCH="amd64" RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install curl python python3.7 python3-pip build-essential apt-transport-https ca-certificates software-properties-common -y && \ diff --git a/docker/build/releases-page.Dockerfile b/docker/build/releases-page.Dockerfile index c96f332d58..11743eadfd 100644 --- a/docker/build/releases-page.Dockerfile +++ b/docker/build/releases-page.Dockerfile @@ -1,8 +1,11 @@ -FROM ubuntu:20.04 +FROM ubuntu:24.04 ENV DEBIAN_FRONTEND=noninteractive -RUN apt-get update && apt-get install git python3 python3-pip -y && \ - pip3 install awscli boto3 +RUN apt-get update && apt-get install git python3 python3-pip python3-boto3 -y +RUN apt-get install -y curl unzip +RUN curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" +RUN unzip awscliv2.zip +RUN ./aws/install --bin-dir /usr/local/bin --install-dir /usr/local/aws-cli --update WORKDIR /root diff --git a/docker/releases/build_releases.sh b/docker/releases/build_releases.sh index 11376eba48..e9c40b15f6 100755 --- a/docker/releases/build_releases.sh +++ b/docker/releases/build_releases.sh @@ -76,7 +76,7 @@ case $NETWORK in esac IFS='' read -r -d '' DOCKERFILE < d.VoteLastValid { // Online expired: insert or overwrite the old data in expiredAccounts. - oadata := d.OnlineAccountData(rewardsParams, rewardsLevel) + oadata := d.OnlineAccountData(rewardsParams.RewardUnit, rewardsLevel) expiredAccounts[addr] = &oadata } else { // addr went offline not expired, so do not report as an expired ONLINE account. diff --git a/ledger/acctonline_expired_test.go b/ledger/acctonline_expired_test.go index c43f5fabcf..3e46c41ecb 100644 --- a/ledger/acctonline_expired_test.go +++ b/ledger/acctonline_expired_test.go @@ -657,27 +657,28 @@ func BenchmarkExpiredOnlineCirculation(b *testing.B) { return addr } - var blockCounter, acctCounter uint64 + var blockCounter basics.Round + var acctCounter uint64 for i := 0; i < totalAccounts/maxKeyregPerBlock; i++ { blockCounter++ for j := 0; j < maxKeyregPerBlock; j++ { acctCounter++ // go online for a random number of rounds, from 400 to 1600 - validFor := 400 + uint64(rand.Intn(1200)) - m.goOnline(addrFromUint64(acctCounter), basics.Round(blockCounter), basics.Round(blockCounter+validFor)) + validFor := 400 + basics.Round(rand.Intn(1200)) + m.goOnline(addrFromUint64(acctCounter), blockCounter, blockCounter+validFor) } b.Log("built block", blockCounter, "accts", acctCounter) m.nextRound() } // then advance ~1K rounds to exercise the exercise accounts going offline - m.advanceToRound(basics.Round(blockCounter + 1000)) + m.advanceToRound(blockCounter + 1000) b.Log("advanced to round", m.currentRound()) b.ResetTimer() - for i := uint64(0); i < uint64(b.N); i++ { + for i := range basics.Round(b.N) { // query expired circulation across the available range (last 320 rounds, from ~680 to ~1000) startRnd := m.currentRound() - 320 - offset := basics.Round(i % 320) + offset := i % 320 _, err := m.dl.validator.expiredOnlineCirculation(startRnd+offset, startRnd+offset+320) require.NoError(b, err) //total, err := m.dl.validator.OnlineTotalStake(startRnd + offset) diff --git a/ledger/acctonline_test.go b/ledger/acctonline_test.go index 0335a20f6f..d45f7da550 100644 --- a/ledger/acctonline_test.go +++ b/ledger/acctonline_test.go @@ -1392,7 +1392,7 @@ func compareTopAccounts(a *require.Assertions, testingResult []*ledgercore.Onlin Address: expectedAccountsBalances[i].Addr, MicroAlgos: expectedAccountsBalances[i].MicroAlgos, RewardsBase: 0, - NormalizedOnlineBalance: expectedAccountsBalances[i].NormalizedOnlineBalance(config.Consensus[protocol.ConsensusCurrentVersion]), + NormalizedOnlineBalance: expectedAccountsBalances[i].AccountData.NormalizedOnlineBalance(config.Consensus[protocol.ConsensusCurrentVersion].RewardUnit), VoteFirstValid: expectedAccountsBalances[i].VoteFirstValid, VoteLastValid: expectedAccountsBalances[i].VoteLastValid}) } @@ -2158,7 +2158,7 @@ func TestAcctOnline_ExpiredOnlineCirculation(t *testing.T) { if err != nil { return err } - expiredAccts, err = reader.ExpiredOnlineAccountsForRound(rnd-1, targetVoteRnd, params, 0) + expiredAccts, err = reader.ExpiredOnlineAccountsForRound(rnd-1, targetVoteRnd, params.RewardUnit, 0) if err != nil { return err } diff --git a/ledger/acctupdates.go b/ledger/acctupdates.go index ca9b0c28e3..6c6c428ddf 100644 --- a/ledger/acctupdates.go +++ b/ledger/acctupdates.go @@ -989,7 +989,7 @@ func (au *accountUpdates) lookupLatest(addr basics.Address) (data basics.Account if err == nil { ledgercore.AssignAccountData(&data, ad) withoutRewards = data.MicroAlgos // record balance before updating rewards - data = data.WithUpdatedRewards(rewardsProto, rewardsLevel) + data = data.WithUpdatedRewards(rewardsProto.RewardUnit, rewardsLevel) } }() withRewards = false diff --git a/ledger/acctupdates_test.go b/ledger/acctupdates_test.go index e72dea9444..892cf43766 100644 --- a/ledger/acctupdates_test.go +++ b/ledger/acctupdates_test.go @@ -87,7 +87,7 @@ func accumulateTotals(t testing.TB, consensusVersion protocol.ConsensusVersion, totals.RewardsLevel = rewardLevel for _, ar := range accts { for _, data := range ar { - totals.AddAccount(proto, data, &ot) + totals.AddAccount(proto.RewardUnit, data, &ot) } } require.False(t, ot.Overflowed) @@ -109,7 +109,7 @@ func setupAccts(niter int) []map[basics.Address]basics.AccountData { return accts } -func makeMockLedgerForTrackerWithLogger(t testing.TB, inMemory bool, initialBlocksCount int, consensusVersion protocol.ConsensusVersion, accts []map[basics.Address]basics.AccountData, l logging.Logger) *mockLedgerForTracker { +func makeMockLedgerForTrackerWithLogger(t testing.TB, inMemory bool, initialBlocksCount basics.Round, consensusVersion protocol.ConsensusVersion, accts []map[basics.Address]basics.AccountData, l logging.Logger) *mockLedgerForTracker { dbs, fileName := sqlitedriver.OpenForTesting(t, inMemory) blocks := randomInitChain(consensusVersion, initialBlocksCount) @@ -145,7 +145,7 @@ func makeMockLedgerForTrackerWithLogger(t testing.TB, inMemory bool, initialBloc } -func makeMockLedgerForTracker(t testing.TB, inMemory bool, initialBlocksCount int, consensusVersion protocol.ConsensusVersion, accts []map[basics.Address]basics.AccountData) *mockLedgerForTracker { +func makeMockLedgerForTracker(t testing.TB, inMemory bool, initialBlocksCount basics.Round, consensusVersion protocol.ConsensusVersion, accts []map[basics.Address]basics.AccountData) *mockLedgerForTracker { dblogger := logging.TestingLog(t) dblogger.SetLevel(logging.Info) @@ -356,7 +356,7 @@ func checkAcctUpdates(t *testing.T, au *accountUpdates, ao *onlineAccounts, base var validThrough basics.Round _, validThrough, err = au.LookupWithoutRewards(latest+1, ledgertesting.RandomAddress()) require.Error(t, err) - require.Equal(t, basics.Round(0), validThrough) + require.Zero(t, validThrough) if base > 0 && base >= basics.Round(ao.maxBalLookback()) { rnd := base - basics.Round(ao.maxBalLookback()) @@ -365,7 +365,7 @@ func checkAcctUpdates(t *testing.T, au *accountUpdates, ao *onlineAccounts, base _, validThrough, err = au.LookupWithoutRewards(base-1, ledgertesting.RandomAddress()) require.Error(t, err) - require.Equal(t, basics.Round(0), validThrough) + require.Zero(t, validThrough) } roundsRanges := []struct { @@ -548,7 +548,7 @@ func testAcctUpdates(t *testing.T, conf config.Local) { accts := setupAccts(20) rewardsLevels := []uint64{0} - initialBlocksCount := int(lookback) + initialBlocksCount := basics.Round(lookback) ml := makeMockLedgerForTracker(t, true, initialBlocksCount, protocol.ConsensusCurrentVersion, accts) defer ml.Close() @@ -557,12 +557,12 @@ func testAcctUpdates(t *testing.T, conf config.Local) { // cover 10 genesis blocks rewardLevel := uint64(0) - for i := 1; i < initialBlocksCount; i++ { + for i := 1; i < int(initialBlocksCount); i++ { accts = append(accts, accts[0]) rewardsLevels = append(rewardsLevels, rewardLevel) } - checkAcctUpdates(t, au, ao, 0, basics.Round(initialBlocksCount-1), accts, rewardsLevels, proto) + checkAcctUpdates(t, au, ao, 0, initialBlocksCount-1, accts, rewardsLevels, proto) // lastCreatableID stores asset or app max used index to get rid of conflicts lastCreatableID := basics.CreatableIndex(crypto.RandUint64() % 512) @@ -672,16 +672,16 @@ func BenchmarkBalancesChanges(b *testing.B) { } protocolVersion := protocol.ConsensusCurrentVersion - initialRounds := uint64(1) + const initialRounds basics.Round = 1 accountsCount := 5000 accts := setupAccts(accountsCount) rewardsLevels := []uint64{0} - ml := makeMockLedgerForTracker(b, true, int(initialRounds), protocolVersion, accts) + ml := makeMockLedgerForTracker(b, true, initialRounds, protocolVersion, accts) defer ml.Close() conf := config.GetDefaultLocal() - maxAcctLookback := conf.MaxAcctLookback + maxAcctLookback := basics.Round(conf.MaxAcctLookback) au, _ := newAcctUpdates(b, ml, conf) // accountUpdates and onlineAccounts are closed via: ml.Close() -> ml.trackers.close() @@ -692,12 +692,12 @@ func BenchmarkBalancesChanges(b *testing.B) { rewardsLevels = append(rewardsLevels, rewardLevel) } - for i := basics.Round(initialRounds); i < basics.Round(maxAcctLookback+uint64(b.N)); i++ { + for i := initialRounds; i < maxAcctLookback+basics.Round(b.N); i++ { rewardLevelDelta := crypto.RandUint64() % 5 rewardLevel += rewardLevelDelta accountChanges := 0 - if i <= basics.Round(initialRounds)+basics.Round(b.N) { - accountChanges = accountsCount - 2 - int(basics.Round(maxAcctLookback+uint64(b.N))+i) + if i <= initialRounds+basics.Round(b.N) { + accountChanges = accountsCount - 2 - int(maxAcctLookback+basics.Round(b.N)+i) } updates, totals := ledgertesting.RandomDeltasBalanced(accountChanges, accts[i-1], rewardLevel) @@ -733,10 +733,10 @@ func BenchmarkBalancesChanges(b *testing.B) { ml.trackers.waitAccountsWriting() b.ResetTimer() startTime := time.Now() - for i := maxAcctLookback + initialRounds; i < maxAcctLookback+uint64(b.N); i++ { + for i := maxAcctLookback + initialRounds; i < maxAcctLookback+basics.Round(b.N); i++ { // Clear the timer to ensure a flush ml.trackers.lastFlushTime = time.Time{} - ml.trackers.committedUpTo(basics.Round(i)) + ml.trackers.committedUpTo(i) } ml.trackers.waitAccountsWriting() deltaTime := time.Since(startTime) @@ -744,7 +744,7 @@ func BenchmarkBalancesChanges(b *testing.B) { return } // we want to fake the N to reflect the time it took us, if we were to wait an entire second. - singleIterationTime := deltaTime / time.Duration(uint64(b.N)-initialRounds) + singleIterationTime := deltaTime / time.Duration(uint64(basics.Round(b.N)-initialRounds)) b.N = int(time.Second / singleIterationTime) // and now, wait for the reminder of the second. time.Sleep(time.Second - deltaTime) @@ -877,7 +877,7 @@ func testAcctUpdatesUpdatesCorrectness(t *testing.T, cfg config.Local) { // we might get an error like "round 2 before dbRound 5", which is the success case, so we'll ignore it. roundOffsetError := &RoundOffsetError{} if errors.As(err, &roundOffsetError) { - require.Equal(t, basics.Round(0), validThrough) + require.Zero(t, validThrough) // verify it's the expected error and not anything else. require.Less(t, int64(roundOffsetError.round), int64(roundOffsetError.dbRound)) if testback > 1 { @@ -936,7 +936,7 @@ func TestBoxNamesByAppIDs(t *testing.T) { partitiontest.PartitionTest(t) t.Parallel() - initialBlocksCount := 1 + const initialBlocksCount = 1 accts := make(map[basics.Address]basics.AccountData) protoParams := config.Consensus[protocol.ConsensusCurrentVersion] @@ -1058,7 +1058,7 @@ func TestKVCache(t *testing.T) { partitiontest.PartitionTest(t) t.Parallel() - initialBlocksCount := 1 + const initialBlocksCount = 1 accts := make(map[basics.Address]basics.AccountData) protoParams := config.Consensus[protocol.ConsensusCurrentVersion] @@ -1327,7 +1327,7 @@ func TestCompactDeltas(t *testing.T) { stateDeltas[0].Creatables[100] = ledgercore.ModifiedCreatable{Creator: addrs[2], Created: true} var baseAccounts lruAccounts baseAccounts.init(nil, 100, 80) - outAccountDeltas := makeCompactAccountDeltas(stateDeltas, basics.Round(1), true, baseAccounts) + outAccountDeltas := makeCompactAccountDeltas(stateDeltas, 1, true, baseAccounts) outCreatableDeltas := compactCreatableDeltas(stateDeltas) require.Equal(t, stateDeltas[0].Accts.Len(), outAccountDeltas.len()) @@ -1360,7 +1360,7 @@ func TestCompactDeltas(t *testing.T) { baseAccounts.write(trackerdb.PersistedAccountData{Addr: addrs[0], AccountData: trackerdb.BaseAccountData{MicroAlgos: basics.MicroAlgos{Raw: 1}}}) baseAccounts.write(trackerdb.PersistedAccountData{Addr: addrs[3], AccountData: trackerdb.BaseAccountData{}}) - outAccountDeltas = makeCompactAccountDeltas(stateDeltas, basics.Round(1), true, baseAccounts) + outAccountDeltas = makeCompactAccountDeltas(stateDeltas, 1, true, baseAccounts) outCreatableDeltas = compactCreatableDeltas(stateDeltas) require.Equal(t, 2, outAccountDeltas.len()) @@ -1449,7 +1449,7 @@ func TestCompactDeltasResources(t *testing.T) { baseResources.init(nil, 100, 80) - outResourcesDeltas = makeCompactResourceDeltas(stateDeltas, basics.Round(1), true, baseAccounts, baseResources) + outResourcesDeltas = makeCompactResourceDeltas(stateDeltas, 1, true, baseAccounts, baseResources) // 6 entries are missing: same app (asset) params and local state are combined into a single entry require.Equal(t, 6, len(outResourcesDeltas.misses)) require.Equal(t, 6, len(outResourcesDeltas.deltas)) @@ -1509,7 +1509,7 @@ func TestCompactDeltasResources(t *testing.T) { } } - outResourcesDeltas = makeCompactResourceDeltas(stateDeltas, basics.Round(1), true, baseAccounts, baseResources) + outResourcesDeltas = makeCompactResourceDeltas(stateDeltas, 1, true, baseAccounts, baseResources) require.Equal(t, 0, len(outResourcesDeltas.misses)) require.Equal(t, 6, len(outResourcesDeltas.deltas)) @@ -1564,12 +1564,12 @@ func TestAcctUpdatesCachesInitialization(t *testing.T) { protocolVersion := protocol.ConsensusCurrentVersion - initialRounds := uint64(1) + const initialRounds = 1 accountsCount := 5 rewardsLevels := []uint64{0} accts := setupAccts(accountsCount) - ml := makeMockLedgerForTracker(t, true, int(initialRounds), protocolVersion, accts) + ml := makeMockLedgerForTracker(t, true, initialRounds, protocolVersion, accts) ml.log.SetLevel(logging.Warn) defer ml.Close() @@ -1611,7 +1611,7 @@ func TestAcctUpdatesCachesInitialization(t *testing.T) { delta.Accts.MergeAccounts(updates) delta.Totals = accumulateTotals(t, protocol.ConsensusCurrentVersion, []map[basics.Address]ledgercore.AccountData{totals}, rewardLevel) ml.addBlock(blockEntry{block: blk}, delta) - ml.trackers.committedUpTo(basics.Round(i)) + ml.trackers.committedUpTo(i) ml.trackers.waitAccountsWriting() accts = append(accts, newAccts) rewardsLevels = append(rewardsLevels, rewardLevel) @@ -1622,7 +1622,7 @@ func TestAcctUpdatesCachesInitialization(t *testing.T) { accts = []map[basics.Address]basics.AccountData{ledgertesting.RandomAccounts(accountsCount, true)} // create another mocked ledger, but this time with a fresh new tracker database. - ml2 := makeMockLedgerForTracker(t, true, int(initialRounds), protocolVersion, accts) + ml2 := makeMockLedgerForTracker(t, true, initialRounds, protocolVersion, accts) ml2.log.SetLevel(logging.Warn) defer ml2.Close() @@ -1648,13 +1648,13 @@ func TestAcctUpdatesSplittingConsensusVersionCommits(t *testing.T) { initProtocolVersion := protocol.ConsensusV20 - initialRounds := uint64(1) + const initialRounds = 1 accountsCount := 5 rewardsLevels := []uint64{0} accts := setupAccts(accountsCount) - ml := makeMockLedgerForTracker(t, true, int(initialRounds), initProtocolVersion, accts) + ml := makeMockLedgerForTracker(t, true, initialRounds, initProtocolVersion, accts) ml.log.SetLevel(logging.Warn) defer ml.Close() @@ -1669,7 +1669,7 @@ func TestAcctUpdatesSplittingConsensusVersionCommits(t *testing.T) { rewardsLevels = append(rewardsLevels, rewardLevel) } - extraRounds := uint64(39) + extraRounds := basics.Round(39) // write the extraRounds rounds so that we will fill up the queue. for i := basics.Round(initialRounds); i < basics.Round(initialRounds+extraRounds); i++ { @@ -1690,7 +1690,7 @@ func TestAcctUpdatesSplittingConsensusVersionCommits(t *testing.T) { blk := bookkeeping.Block{ BlockHeader: bookkeeping.BlockHeader{ - Round: basics.Round(i), + Round: i, }, } blk.RewardsLevel = rewardLevel @@ -1704,12 +1704,12 @@ func TestAcctUpdatesSplittingConsensusVersionCommits(t *testing.T) { rewardsLevels = append(rewardsLevels, rewardLevel) } - newVersionBlocksCount := uint64(47) + newVersionBlocksCount := basics.Round(47) newVersion := protocol.ConsensusV21 - maxAcctLookback := conf.MaxAcctLookback + maxAcctLookback := basics.Round(conf.MaxAcctLookback) // add 47 more rounds that contains blocks using a newer consensus version, and stuff it with maxAcctLookback - lastRoundToWrite := basics.Round(initialRounds + maxAcctLookback + extraRounds + newVersionBlocksCount) - for i := basics.Round(initialRounds + extraRounds); i < lastRoundToWrite; i++ { + lastRoundToWrite := initialRounds + maxAcctLookback + extraRounds + newVersionBlocksCount + for i := initialRounds + extraRounds; i < lastRoundToWrite; i++ { rewardLevelDelta := crypto.RandUint64() % 5 rewardLevel += rewardLevelDelta accountChanges := 2 @@ -1727,7 +1727,7 @@ func TestAcctUpdatesSplittingConsensusVersionCommits(t *testing.T) { blk := bookkeeping.Block{ BlockHeader: bookkeeping.BlockHeader{ - Round: basics.Round(i), + Round: i, }, } blk.RewardsLevel = rewardLevel @@ -1754,12 +1754,12 @@ func TestAcctUpdatesSplittingConsensusVersionCommitsBoundary(t *testing.T) { initProtocolVersion := protocol.ConsensusV20 - initialRounds := uint64(1) + const initialRounds basics.Round = 1 accountsCount := 5 rewardsLevels := []uint64{0} accts := setupAccts(accountsCount) - ml := makeMockLedgerForTracker(t, true, int(initialRounds), initProtocolVersion, accts) + ml := makeMockLedgerForTracker(t, true, initialRounds, initProtocolVersion, accts) ml.log.SetLevel(logging.Warn) defer ml.Close() @@ -1774,10 +1774,10 @@ func TestAcctUpdatesSplittingConsensusVersionCommitsBoundary(t *testing.T) { rewardsLevels = append(rewardsLevels, rewardLevel) } - extraRounds := uint64(39) + const extraRounds basics.Round = 39 // write extraRounds rounds so that we will fill up the queue. - for i := basics.Round(initialRounds); i < basics.Round(initialRounds+extraRounds); i++ { + for i := initialRounds; i < initialRounds+extraRounds; i++ { rewardLevelDelta := crypto.RandUint64() % 5 rewardLevel += rewardLevelDelta accountChanges := 2 @@ -1810,10 +1810,10 @@ func TestAcctUpdatesSplittingConsensusVersionCommitsBoundary(t *testing.T) { } newVersion := protocol.ConsensusV21 - maxAcctLockback := conf.MaxAcctLookback + maxAcctLockback := basics.Round(conf.MaxAcctLookback) // add maxAcctLockback-extraRounds more rounds that contains blocks using a newer consensus version. - endOfFirstNewProtocolSegment := basics.Round(initialRounds + extraRounds + maxAcctLockback) - for i := basics.Round(initialRounds + extraRounds); i <= endOfFirstNewProtocolSegment; i++ { + endOfFirstNewProtocolSegment := initialRounds + extraRounds + maxAcctLockback + for i := initialRounds + extraRounds; i <= endOfFirstNewProtocolSegment; i++ { rewardLevelDelta := crypto.RandUint64() % 5 rewardLevel += rewardLevelDelta accountChanges := 2 @@ -1831,7 +1831,7 @@ func TestAcctUpdatesSplittingConsensusVersionCommitsBoundary(t *testing.T) { blk := bookkeeping.Block{ BlockHeader: bookkeeping.BlockHeader{ - Round: basics.Round(i), + Round: i, }, } blk.RewardsLevel = rewardLevel @@ -1850,7 +1850,7 @@ func TestAcctUpdatesSplittingConsensusVersionCommitsBoundary(t *testing.T) { require.Equal(t, basics.Round(initialRounds+extraRounds)-1, au.cachedDBRound) // write additional extraRounds elements and verify these can be flushed. - for i := endOfFirstNewProtocolSegment + 1; i <= basics.Round(initialRounds+2*extraRounds+maxAcctLockback); i++ { + for i := endOfFirstNewProtocolSegment + 1; i <= initialRounds+2*extraRounds+maxAcctLockback; i++ { rewardLevelDelta := crypto.RandUint64() % 5 rewardLevel += rewardLevelDelta accountChanges := 2 @@ -1868,7 +1868,7 @@ func TestAcctUpdatesSplittingConsensusVersionCommitsBoundary(t *testing.T) { blk := bookkeeping.Block{ BlockHeader: bookkeeping.BlockHeader{ - Round: basics.Round(i), + Round: i, }, } blk.RewardsLevel = rewardLevel @@ -1883,7 +1883,7 @@ func TestAcctUpdatesSplittingConsensusVersionCommitsBoundary(t *testing.T) { } ml.trackers.committedUpTo(endOfFirstNewProtocolSegment + basics.Round(extraRounds)) ml.trackers.waitAccountsWriting() - require.Equal(t, basics.Round(initialRounds+2*extraRounds), au.cachedDBRound) + require.Equal(t, initialRounds+2*extraRounds, au.cachedDBRound) } // TestAcctUpdatesResources checks that created, deleted, and created resource keep @@ -1996,7 +1996,7 @@ func TestAcctUpdatesResources(t *testing.T) { blk := bookkeeping.Block{ BlockHeader: bookkeeping.BlockHeader{ - Round: basics.Round(i), + Round: i, }, } blk.RewardsLevel = rewardLevel @@ -2137,7 +2137,7 @@ func testAcctUpdatesLookupRetry(t *testing.T, assertFn func(au *accountUpdates, rewardsLevels := []uint64{0} conf := config.GetDefaultLocal() - initialBlocksCount := int(conf.MaxAcctLookback) + initialBlocksCount := basics.Round(conf.MaxAcctLookback) ml := makeMockLedgerForTracker(t, false, initialBlocksCount, testProtocolVersion, accts) defer ml.Close() @@ -2146,7 +2146,7 @@ func testAcctUpdatesLookupRetry(t *testing.T, assertFn func(au *accountUpdates, // cover 10 genesis blocks rewardLevel := uint64(0) - for i := 1; i < initialBlocksCount; i++ { + for i := basics.Round(1); i < initialBlocksCount; i++ { accts = append(accts, accts[0]) rewardsLevels = append(rewardsLevels, rewardLevel) } @@ -2157,7 +2157,7 @@ func testAcctUpdatesLookupRetry(t *testing.T, assertFn func(au *accountUpdates, lastCreatableID := basics.CreatableIndex(crypto.RandUint64() % 512) knownCreatables := make(map[basics.CreatableIndex]bool) - for i := basics.Round(initialBlocksCount); i < basics.Round(conf.MaxAcctLookback+15); i++ { + for i := initialBlocksCount; i < basics.Round(conf.MaxAcctLookback+15); i++ { rewardLevelDelta := crypto.RandUint64() % 5 rewardLevel += rewardLevelDelta var updates ledgercore.AccountDeltas @@ -2177,7 +2177,7 @@ func testAcctUpdatesLookupRetry(t *testing.T, assertFn func(au *accountUpdates, blk := bookkeeping.Block{ BlockHeader: bookkeeping.BlockHeader{ - Round: basics.Round(i), + Round: i, }, } blk.RewardsLevel = rewardLevel @@ -2203,8 +2203,8 @@ func testAcctUpdatesLookupRetry(t *testing.T, assertFn func(au *accountUpdates, } // flush a couple of rounds (indirectly schedules commitSyncer) - flushRound(basics.Round(0)) - flushRound(basics.Round(1)) + flushRound(0) + flushRound(1) // add stallingTracker to list of trackers stallingTracker := &blockingTracker{ @@ -2263,7 +2263,7 @@ func TestAcctUpdatesLookupLatestRetry(t *testing.T) { // issue a LookupWithoutRewards while persistedData.round != au.cachedDBRound d, validThrough, withoutRewards, err := au.lookupLatest(addr) require.NoError(t, err) - require.Equal(t, accts[validThrough][addr].WithUpdatedRewards(proto, rewardsLevels[validThrough]), d) + require.Equal(t, accts[validThrough][addr].WithUpdatedRewards(proto.RewardUnit, rewardsLevels[validThrough]), d) require.Equal(t, accts[validThrough][addr].MicroAlgos, withoutRewards) require.GreaterOrEqualf(t, uint64(validThrough), uint64(rnd), "validThrough: %v rnd :%v", validThrough, rnd) }) @@ -2574,7 +2574,7 @@ func TestAcctUpdatesLookupResources(t *testing.T) { func TestAcctUpdatesLookupStateDelta(t *testing.T) { partitiontest.PartitionTest(t) - initialBlocksCount := 1 + const initialBlocksCount = 1 accts := setupAccts(1) testProtocolVersion := protocol.ConsensusVersion("test-protocol-TestAcctUpdatesLookupStateDelta") @@ -2670,21 +2670,21 @@ func TestAcctUpdatesLookupStateDelta(t *testing.T) { rnd := au.latest() require.Equal(t, currentRound, rnd) if uint64(currentRound) > conf.MaxAcctLookback { - require.Equal(t, basics.Round(uint64(currentRound)-conf.MaxAcctLookback), au.cachedDBRound) + require.Equal(t, currentRound-basics.Round(conf.MaxAcctLookback), au.cachedDBRound) } else { require.Equal(t, basics.Round(0), au.cachedDBRound) } // Iterate backwards through deltas, ensuring proper data exists in StateDelta - for j := uint64(rnd); j > uint64(au.cachedDBRound); j-- { + for j := rnd; j > au.cachedDBRound; j-- { // fetch StateDelta - actualDelta, err := au.lookupStateDelta(basics.Round(j)) + actualDelta, err := au.lookupStateDelta(j) require.NoError(t, err) actualAccountDeltas := actualDelta.Accts actualKvDeltas := actualDelta.KvMods // Make sure we know about the expected changes for the delta's round - expectedAccountDeltas, has := updatesI[basics.Round(j)] + expectedAccountDeltas, has := updatesI[j] require.True(t, has) // Do basic checking on the size and existence of accounts in deltas require.Equal(t, expectedAccountDeltas.Len(), actualAccountDeltas.Len()) @@ -2702,8 +2702,8 @@ func TestAcctUpdatesLookupStateDelta(t *testing.T) { require.Equal(t, len(expectedAccountDeltas.AppResources), len(actualAccountDeltas.AppResources)) // Validate KvDeltas contains updates w/ new/old values. - startKV := (j - 1) * uint64(kvsPerBlock) - expectedKvDeltas, has := roundMods[basics.Round(j)] + startKV := (uint64(j) - 1) * uint64(kvsPerBlock) + expectedKvDeltas, has := roundMods[j] require.True(t, has) for kv := 0; kv < kvsPerBlock; kv++ { name := fmt.Sprintf("%d", startKV+uint64(kv)) diff --git a/ledger/apply/application_test.go b/ledger/apply/application_test.go index b0f04ad00a..a11578bd59 100644 --- a/ledger/apply/application_test.go +++ b/ledger/apply/application_test.go @@ -441,7 +441,7 @@ func TestAppCallCreate(t *testing.T) { // no balance record appIdx, err := createApplication(&ac, b, creator, txnCounter) a.Error(err) - a.Equal(basics.AppIndex(0), appIdx) + a.Zero(appIdx) b.balances = make(map[basics.Address]basics.AccountData) b.balances[creator] = basics.AccountData{} diff --git a/ledger/apply/challenge_test.go b/ledger/apply/challenge_test.go index 4949f11144..24389cf3e8 100644 --- a/ledger/apply/challenge_test.go +++ b/ledger/apply/challenge_test.go @@ -51,7 +51,7 @@ func TestBitsMatch(t *testing.T) { require.False(t, bitsMatch([]byte{0x1, 0xff}, []byte{0x1, 00}, 9)) } -func TestFailsChallenge(t *testing.T) { +func TestUnsuccessfulChallenge(t *testing.T) { partitiontest.PartitionTest(t) t.Parallel() a := assert.New(t) diff --git a/ledger/apply/keyreg_test.go b/ledger/apply/keyreg_test.go index 16b0bfbcdf..2acf9fb615 100644 --- a/ledger/apply/keyreg_test.go +++ b/ledger/apply/keyreg_test.go @@ -269,8 +269,8 @@ func createTestKeyregWithPeriod(t *testing.T, src basics.Address, sigVerifier cr Header: transactions.Header{ Sender: src, Fee: basics.MicroAlgos{Raw: 1}, - FirstValid: basics.Round(defaultParticipationFirstRound), - LastValid: basics.Round(defaultParticipationLastRound), + FirstValid: defaultParticipationFirstRound, + LastValid: defaultParticipationLastRound, }, KeyregTxnFields: transactions.KeyregTxnFields{ VotePK: crypto.OneTimeSignatureVerifier(sigVerifier), diff --git a/ledger/apply/stateproof_test.go b/ledger/apply/stateproof_test.go index 8e287708db..a09ef8a61c 100644 --- a/ledger/apply/stateproof_test.go +++ b/ledger/apply/stateproof_test.go @@ -106,7 +106,7 @@ func TestApplyStateProofV34(t *testing.T) { stateProofTx.StateProofType = protocol.StateProofBasic // stateproof txn doesn't confirm the next state proof round. expected is in the past validate = true - stateProofTx.Message.LastAttestedRound = uint64(16) + stateProofTx.Message.LastAttestedRound = 16 applier.SetStateProofNextRound(8) err = StateProof(stateProofTx, atRound, applier, validate) a.ErrorIs(err, ErrExpectedDifferentStateProofRound) @@ -114,7 +114,7 @@ func TestApplyStateProofV34(t *testing.T) { // stateproof txn doesn't confirm the next state proof round. expected is in the future validate = true - stateProofTx.Message.LastAttestedRound = uint64(16) + stateProofTx.Message.LastAttestedRound = 16 applier.SetStateProofNextRound(32) err = StateProof(stateProofTx, atRound, applier, validate) a.ErrorIs(err, ErrExpectedDifferentStateProofRound) @@ -152,7 +152,7 @@ func TestApplyStateProofV34(t *testing.T) { spHdr.Round = 15 blocks[spHdr.Round] = spHdr - stateProofTx.Message.LastAttestedRound = uint64(spHdr.Round) + stateProofTx.Message.LastAttestedRound = spHdr.Round applier.SetStateProofNextRound(15) blockErr[13] = noBlockErr err = StateProof(stateProofTx, atRound, applier, validate) @@ -179,7 +179,7 @@ func TestApplyStateProofV34(t *testing.T) { atRoundBlock.CurrentProtocol = version blocks[atRound] = atRoundBlock - stateProofTx.Message.LastAttestedRound = 2 * config.Consensus[version].StateProofInterval + stateProofTx.Message.LastAttestedRound = 2 * basics.Round(config.Consensus[version].StateProofInterval) stateProofTx.StateProof.SignedWeight = 100 applier.SetStateProofNextRound(basics.Round(2 * config.Consensus[version].StateProofInterval)) @@ -220,7 +220,7 @@ func TestApplyStateProof(t *testing.T) { stateProofTx.StateProofType = protocol.StateProofBasic // stateproof txn doesn't confirm the next state proof round. expected is in the past validate = true - stateProofTx.Message.LastAttestedRound = uint64(16) + stateProofTx.Message.LastAttestedRound = 16 applier.SetStateProofNextRound(8) err = StateProof(stateProofTx, atRound, applier, validate) a.ErrorIs(err, ErrExpectedDifferentStateProofRound) @@ -228,7 +228,7 @@ func TestApplyStateProof(t *testing.T) { // stateproof txn doesn't confirm the next state proof round. expected is in the future validate = true - stateProofTx.Message.LastAttestedRound = uint64(16) + stateProofTx.Message.LastAttestedRound = 16 applier.SetStateProofNextRound(32) err = StateProof(stateProofTx, atRound, applier, validate) a.ErrorIs(err, ErrExpectedDifferentStateProofRound) diff --git a/ledger/apptxn_test.go b/ledger/apptxn_test.go index 4509b66c0b..d338382169 100644 --- a/ledger/apptxn_test.go +++ b/ledger/apptxn_test.go @@ -984,21 +984,22 @@ func TestInnerAppCreateAndOptin(t *testing.T) { // TestParentGlobals tests that a newly created app can call an inner app, and // the inner app will have access to the parent globals, even if the originally // created app ID isn't passed down, because the rule is that "pending" created -// apps are available, starting from v38 +// apps are available. We added this rule in v38, but because it is more +// lenient, not more restrictive, we removed the consensus gated code. So it now +// works from v31 on. func TestParentGlobals(t *testing.T) { partitiontest.PartitionTest(t) t.Parallel() genBalances, addrs, _ := ledgertesting.NewTestGenesis() - // v38 allows parent access, but we start with v31 to make sure we don't mistakenly change it ledgertesting.TestConsensusRange(t, 31, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion, cfg config.Local) { dl := NewDoubleLedger(t, genBalances, cv, cfg) defer dl.Close() - // helper app, is called during the creation of an app. this app tries - // to access its parent's globals, by using `global CallerApplicationID` - helper := dl.fundedApp(addrs[0], 1_000_000, + // checkParent is called during the creation of an app. It tries to + // access its parent's globals, by using `global CallerApplicationID` + checkParent := dl.fundedApp(addrs[0], 1_000_000, main(` global CallerApplicationID byte "X" @@ -1009,7 +1010,7 @@ func TestParentGlobals(t *testing.T) { createProgram := ` itxn_begin int appl; itxn_field TypeEnum - int ` + strconv.Itoa(int(helper)) + `; itxn_field ApplicationID + int ` + strconv.Itoa(int(checkParent)) + `; itxn_field ApplicationID itxn_submit int 1 ` @@ -1018,15 +1019,11 @@ func TestParentGlobals(t *testing.T) { Sender: addrs[0], Fee: 2 * 1000, // to pay for self and call to helper ApprovalProgram: createProgram, - ForeignApps: []basics.AppIndex{helper}, + ForeignApps: []basics.AppIndex{checkParent}, } var creator basics.AppIndex - if ver >= 38 { - creator = dl.txn(&createapp).ApplyData.ApplicationID - require.NotZero(t, creator) - } else { - dl.txn(&createapp, "unavailable App") - } + creator = dl.txn(&createapp).ApplyData.ApplicationID + require.NotZero(t, creator) // Now, test the same pattern, but do it all inside of yet another outer // app, to show that the parent is available even if it was, itself @@ -1041,12 +1038,13 @@ func TestParentGlobals(t *testing.T) { ApprovalProgram: ` itxn_begin int appl; itxn_field TypeEnum + txna Applications 1; itxn_field Applications; // We are checking some versions from before resource sharing byte 0x` + hex.EncodeToString(createapp.SignedTxn().Txn.ApprovalProgram) + `; itxn_field ApprovalProgram byte 0x` + hex.EncodeToString(createapp.SignedTxn().Txn.ClearStateProgram) + `; itxn_field ClearStateProgram itxn_submit int 1 `, - ForeignApps: []basics.AppIndex{creator, helper}, + ForeignApps: []basics.AppIndex{checkParent, creator}, } fund := txntest.Txn{ Type: "pay", @@ -1054,12 +1052,7 @@ func TestParentGlobals(t *testing.T) { Sender: addrs[0], Receiver: outerAppAddress, } - if ver >= 38 { - dl.txgroup("", &fund, &outer) - } else { - dl.txn(&createapp, "unavailable App") - } - + dl.txgroup("", &fund, &outer) }) } @@ -1725,7 +1718,7 @@ assert }) } -func TestAbortWhenInnerAppCallFails(t *testing.T) { +func TestAbortWhenInnerAppCallErrs(t *testing.T) { partitiontest.PartitionTest(t) t.Parallel() diff --git a/ledger/archival_test.go b/ledger/archival_test.go index 2e2d5c408a..6c83d74ce2 100644 --- a/ledger/archival_test.go +++ b/ledger/archival_test.go @@ -268,18 +268,18 @@ func makeUnsignedAssetCreateTx(firstValid, lastValid basics.Round, total uint64, return tx, nil } -func makeUnsignedAssetDestroyTx(firstValid, lastValid basics.Round, assetIndex uint64) (transactions.Transaction, error) { +func makeUnsignedAssetDestroyTx(firstValid, lastValid basics.Round, assetIndex basics.AssetIndex) (transactions.Transaction, error) { var txn transactions.Transaction txn.Type = protocol.AssetConfigTx - txn.ConfigAsset = basics.AssetIndex(assetIndex) + txn.ConfigAsset = assetIndex txn.FirstValid = firstValid txn.LastValid = lastValid return txn, nil } -func makeUnsignedApplicationCallTx(appIdx uint64, onCompletion transactions.OnCompletion) (tx transactions.Transaction, err error) { +func makeUnsignedApplicationCallTx(appIdx basics.AppIndex, onCompletion transactions.OnCompletion) (tx transactions.Transaction, err error) { tx.Type = protocol.ApplicationCallTx - tx.ApplicationID = basics.AppIndex(appIdx) + tx.ApplicationID = appIdx tx.OnCompletion = onCompletion // If creating, set programs @@ -405,10 +405,10 @@ func TestArchivalCreatables(t *testing.T) { if i >= maxBlocks/2 && i < (3*(maxBlocks/4)) { switch creatableIdxs[createdIdx] { case AssetCreated: - tx, err = makeUnsignedAssetDestroyTx(blk.BlockHeader.Round-1, blk.BlockHeader.Round+3, uint64(createdIdx)) + tx, err = makeUnsignedAssetDestroyTx(blk.BlockHeader.Round-1, blk.BlockHeader.Round+3, basics.AssetIndex(createdIdx)) creatableIdxs[createdIdx] = AssetDeleted case AppCreated: - tx, err = makeUnsignedApplicationCallTx(uint64(createdIdx), transactions.DeleteApplicationOC) + tx, err = makeUnsignedApplicationCallTx(basics.AppIndex(createdIdx), transactions.DeleteApplicationOC) creatableIdxs[createdIdx] = AppDeleted default: panic("unknown action") @@ -513,10 +513,10 @@ func TestArchivalCreatables(t *testing.T) { var tx0 transactions.Transaction switch creatableIdxs[creatableToDelete] { case AssetCreated: - tx0, err = makeUnsignedAssetDestroyTx(blk.BlockHeader.Round-1, blk.BlockHeader.Round+3, uint64(creatableToDelete)) + tx0, err = makeUnsignedAssetDestroyTx(blk.BlockHeader.Round-1, blk.BlockHeader.Round+3, basics.AssetIndex(creatableToDelete)) creatableIdxs[creatableToDelete] = AssetDeleted case AppCreated: - tx0, err = makeUnsignedApplicationCallTx(uint64(creatableToDelete), transactions.DeleteApplicationOC) + tx0, err = makeUnsignedApplicationCallTx(basics.AppIndex(creatableToDelete), transactions.DeleteApplicationOC) creatableIdxs[creatableToDelete] = AppDeleted default: panic("unknown action") @@ -531,10 +531,10 @@ func TestArchivalCreatables(t *testing.T) { var tx1 transactions.Transaction switch creatableIdxs[creatableToDelete] { case AssetCreated: - tx1, err = makeUnsignedAssetDestroyTx(blk.BlockHeader.Round-1, blk.BlockHeader.Round+3, uint64(creatableToDelete)) + tx1, err = makeUnsignedAssetDestroyTx(blk.BlockHeader.Round-1, blk.BlockHeader.Round+3, basics.AssetIndex(creatableToDelete)) creatableIdxs[creatableToDelete] = AssetDeleted case AppCreated: - tx1, err = makeUnsignedApplicationCallTx(uint64(creatableToDelete), transactions.DeleteApplicationOC) + tx1, err = makeUnsignedApplicationCallTx(basics.AppIndex(creatableToDelete), transactions.DeleteApplicationOC) creatableIdxs[creatableToDelete] = AppDeleted default: panic("unknown action") diff --git a/ledger/blockqueue_test.go b/ledger/blockqueue_test.go index a60c231cd2..2084a67971 100644 --- a/ledger/blockqueue_test.go +++ b/ledger/blockqueue_test.go @@ -56,10 +56,10 @@ func randomBlock(r basics.Round) blockEntry { } } -func randomInitChain(proto protocol.ConsensusVersion, nblock int) []blockEntry { +func randomInitChain(proto protocol.ConsensusVersion, nblock basics.Round) []blockEntry { res := make([]blockEntry, 0) - for i := 0; i < nblock; i++ { - blkent := randomBlock(basics.Round(i)) + for i := range nblock { + blkent := randomBlock(i) blkent.cert = agreement.Certificate{} blkent.block.CurrentProtocol = proto res = append(res, blkent) diff --git a/ledger/catchpointfilewriter_test.go b/ledger/catchpointfilewriter_test.go index 72557466f6..1ff274c8fd 100644 --- a/ledger/catchpointfilewriter_test.go +++ b/ledger/catchpointfilewriter_test.go @@ -430,7 +430,7 @@ func TestCatchpointReadDatabaseOverflowSingleAccount(t *testing.T) { accts := ledgertesting.RandomAccounts(1, false) // force acct to have overflowing number of resources - assetIndex := 1000 + assetIndex := basics.AssetIndex(1000) for addr, acct := range accts { if acct.AssetParams == nil { acct.AssetParams = make(map[basics.AssetIndex]basics.AssetParams, 0) @@ -438,7 +438,7 @@ func TestCatchpointReadDatabaseOverflowSingleAccount(t *testing.T) { } for i := uint64(0); i < 20; i++ { ap := ledgertesting.RandomAssetParams() - acct.AssetParams[basics.AssetIndex(assetIndex)] = ap + acct.AssetParams[assetIndex] = ap assetIndex++ } } @@ -526,7 +526,7 @@ func TestCatchpointReadDatabaseOverflowAccounts(t *testing.T) { accts := ledgertesting.RandomAccounts(5, false) // force each acct to have overflowing number of resources - assetIndex := 1000 + assetIndex := basics.AssetIndex(1000) for addr, acct := range accts { if acct.AssetParams == nil { acct.AssetParams = make(map[basics.AssetIndex]basics.AssetParams, 0) @@ -534,7 +534,7 @@ func TestCatchpointReadDatabaseOverflowAccounts(t *testing.T) { } for i := uint64(0); i < 20; i++ { ap := ledgertesting.RandomAssetParams() - acct.AssetParams[basics.AssetIndex(assetIndex)] = ap + acct.AssetParams[assetIndex] = ap assetIndex++ } } @@ -643,7 +643,7 @@ func TestFullCatchpointWriterOverflowAccounts(t *testing.T) { acctData, validThrough, _, err := l.LookupLatest(addr) require.NoErrorf(t, err, "failed to lookup for account %v after restoring from catchpoint", addr) require.Equal(t, acct, acctData) - require.Equal(t, basics.Round(0), validThrough) + require.Zero(t, validThrough) } // TODO: uncomment if we want to test re-initializing the ledger fully @@ -840,7 +840,7 @@ func TestFullCatchpointWriter(t *testing.T) { acctData, validThrough, _, err := l.LookupLatest(addr) require.NoErrorf(t, err, "failed to lookup for account %v after restoring from catchpoint", addr) require.Equal(t, acct, acctData) - require.Equal(t, basics.Round(0), validThrough) + require.Zero(t, validThrough) } } @@ -1199,6 +1199,12 @@ assert require.Empty(t, vb.Block().AbsentParticipationAccounts) } + require.Eventually(t, func() bool { + gr, _ := dl.generator.LatestCommitted() + vr, _ := dl.validator.LatestCommitted() + return gr == vr + }, 1*time.Second, 50*time.Millisecond) + // wait for tracker to flush testCatchpointFlushRound(dl.generator) testCatchpointFlushRound(dl.validator) diff --git a/ledger/catchpointtracker_test.go b/ledger/catchpointtracker_test.go index 3bbc70cadd..a26e0c6f5d 100644 --- a/ledger/catchpointtracker_test.go +++ b/ledger/catchpointtracker_test.go @@ -1892,7 +1892,7 @@ func TestCatchpointFastUpdates(t *testing.T) { conf := config.GetDefaultLocal() conf.CatchpointInterval = 1 conf.CatchpointTracking = 1 - initialBlocksCount := int(conf.MaxAcctLookback) + initialBlocksCount := basics.Round(conf.MaxAcctLookback) ml := makeMockLedgerForTracker(t, true, initialBlocksCount, protocol.ConsensusFuture, accts) defer ml.Close() @@ -1913,7 +1913,7 @@ func TestCatchpointFastUpdates(t *testing.T) { // cover 10 genesis blocks rewardLevel := uint64(0) - for i := 1; i < initialBlocksCount; i++ { + for i := basics.Round(1); i < initialBlocksCount; i++ { accts = append(accts, accts[0]) rewardsLevels = append(rewardsLevels, rewardLevel) } @@ -2007,7 +2007,7 @@ func TestCatchpointLargeAccountCountCatchpointGeneration(t *testing.T) { conf.CatchpointInterval = 32 conf.CatchpointTracking = 1 conf.Archival = true - initialBlocksCount := int(conf.MaxAcctLookback) + initialBlocksCount := basics.Round(conf.MaxAcctLookback) ml := makeMockLedgerForTracker(t, true, initialBlocksCount, testProtocolVersion, accts) defer ml.Close() @@ -2024,7 +2024,7 @@ func TestCatchpointLargeAccountCountCatchpointGeneration(t *testing.T) { // cover 10 genesis blocks rewardLevel := uint64(0) - for i := 1; i < initialBlocksCount; i++ { + for i := basics.Round(1); i < initialBlocksCount; i++ { accts = append(accts, accts[0]) rewardsLevels = append(rewardsLevels, rewardLevel) } diff --git a/ledger/catchupaccessor.go b/ledger/catchupaccessor.go index dd64e46d1e..114dd7f91a 100644 --- a/ledger/catchupaccessor.go +++ b/ledger/catchupaccessor.go @@ -531,7 +531,7 @@ func (c *catchpointCatchupAccessorImpl) processStagingBalances(ctx context.Conte return fmt.Errorf("processStagingBalances received a chunk with no accounts") } - normalizedAccountBalances, err = prepareNormalizedBalancesV5(balances.Balances, c.ledger.GenesisProto()) + normalizedAccountBalances, err = prepareNormalizedBalancesV5(balances.Balances, c.ledger.GenesisProto().RewardUnit) expectingMoreEntries = make([]bool, len(balances.Balances)) case CatchpointFileVersionV6: @@ -978,8 +978,8 @@ func (c *catchpointCatchupAccessorImpl) BuildMerkleTrie(ctx context.Context, pro wg.Wait() select { - case err := <-errChan: - return err + case err1 := <-errChan: + return err1 default: } diff --git a/ledger/eval/appcow.go b/ledger/eval/appcow.go index 83aa739629..83603acb22 100644 --- a/ledger/eval/appcow.go +++ b/ledger/eval/appcow.go @@ -415,7 +415,7 @@ func (cb *roundCowState) delKey(addr basics.Address, aidx basics.AppIndex, globa // Write the value delta associated with deleting this key lsd, err := cb.ensureStorageDelta(addr, aidx, global, remainAllocAction, accountIdx) if err != nil { - return nil + return err } vdelta, ok := lsd.kvCow[key] diff --git a/ledger/eval/cow.go b/ledger/eval/cow.go index edeaa93da4..382ff9d25d 100644 --- a/ledger/eval/cow.go +++ b/ledger/eval/cow.go @@ -385,8 +385,8 @@ func (cb *roundCowState) CalculateTotals() error { if lookupError != nil { return fmt.Errorf("roundCowState.CalculateTotals unable to load account data for address %v", accountAddr) } - totals.DelAccount(cb.proto, previousAccountData, &ot) - totals.AddAccount(cb.proto, updatedAccountData, &ot) + totals.DelAccount(cb.proto.RewardUnit, previousAccountData, &ot) + totals.AddAccount(cb.proto.RewardUnit, updatedAccountData, &ot) } if ot.Overflowed { diff --git a/ledger/eval/cow_test.go b/ledger/eval/cow_test.go index d0a75fef21..a3af28b1ec 100644 --- a/ledger/eval/cow_test.go +++ b/ledger/eval/cow_test.go @@ -338,20 +338,20 @@ func TestCowStateProof(t *testing.T) { c0.SetStateProofNextRound(firstStateproof) stateproofTxn := transactions.StateProofTxnFields{ StateProofType: protocol.StateProofBasic, - Message: stateproofmsg.Message{LastAttestedRound: uint64(firstStateproof) + version.StateProofInterval}, + Message: stateproofmsg.Message{LastAttestedRound: firstStateproof + basics.Round(version.StateProofInterval)}, } // can not apply state proof for 3*version.StateProofInterval when we expect 2*version.StateProofInterval err := apply.StateProof(stateproofTxn, firstStateproof+1, c0, false) a.ErrorIs(err, apply.ErrExpectedDifferentStateProofRound) - stateproofTxn.Message.LastAttestedRound = uint64(firstStateproof) + stateproofTxn.Message.LastAttestedRound = firstStateproof err = apply.StateProof(stateproofTxn, firstStateproof+1, c0, false) a.NoError(err) a.Equal(3*basics.Round(version.StateProofInterval), c0.GetStateProofNextRound()) // try to apply the next stateproof 3*version.StateProofInterval - stateproofTxn.Message.LastAttestedRound = 3 * version.StateProofInterval + stateproofTxn.Message.LastAttestedRound = 3 * basics.Round(version.StateProofInterval) err = apply.StateProof(stateproofTxn, firstStateproof+1, c0, false) a.NoError(err) a.Equal(4*basics.Round(version.StateProofInterval), c0.GetStateProofNextRound()) diff --git a/ledger/eval/eval.go b/ledger/eval/eval.go index 8b422af70f..a0f64a9438 100644 --- a/ledger/eval/eval.go +++ b/ledger/eval/eval.go @@ -551,7 +551,7 @@ func (cs *roundCowState) Get(addr basics.Address, withPendingRewards bool) (ledg return ledgercore.AccountData{}, err } if withPendingRewards { - acct = acct.WithUpdatedRewards(cs.proto, cs.rewardsLevel()) + acct = acct.WithUpdatedRewards(cs.proto.RewardUnit, cs.rewardsLevel()) } return acct, nil } @@ -588,7 +588,7 @@ func (cs *roundCowState) Move(from basics.Address, to basics.Address, amt basics if err != nil { return err } - fromBalNew := fromBal.WithUpdatedRewards(cs.proto, rewardlvl) + fromBalNew := fromBal.WithUpdatedRewards(cs.proto.RewardUnit, rewardlvl) if fromRewards != nil { var ot basics.OverflowTracker @@ -600,7 +600,7 @@ func (cs *roundCowState) Move(from basics.Address, to basics.Address, amt basics } // Only write the change if it's meaningful (or required by old code). - if !amt.IsZero() || fromBal.MicroAlgos.RewardUnits(cs.proto) > 0 || !cs.proto.UnfundedSenders { + if !amt.IsZero() || fromBal.MicroAlgos.RewardUnits(cs.proto.RewardUnit) > 0 || !cs.proto.UnfundedSenders { var overflowed bool fromBalNew.MicroAlgos, overflowed = basics.OSubA(fromBalNew.MicroAlgos, amt) if overflowed { @@ -617,7 +617,7 @@ func (cs *roundCowState) Move(from basics.Address, to basics.Address, amt basics if err != nil { return err } - toBalNew := toBal.WithUpdatedRewards(cs.proto, rewardlvl) + toBalNew := toBal.WithUpdatedRewards(cs.proto.RewardUnit, rewardlvl) if toRewards != nil { var ot basics.OverflowTracker @@ -629,7 +629,7 @@ func (cs *roundCowState) Move(from basics.Address, to basics.Address, amt basics } // Only write the change if it's meaningful (or required by old code). - if !amt.IsZero() || toBal.MicroAlgos.RewardUnits(cs.proto) > 0 || !cs.proto.UnfundedSenders { + if !amt.IsZero() || toBal.MicroAlgos.RewardUnits(cs.proto.RewardUnit) > 0 || !cs.proto.UnfundedSenders { var overflowed bool toBalNew.MicroAlgos, overflowed = basics.OAddA(toBalNew.MicroAlgos, amt) if overflowed { @@ -810,7 +810,7 @@ func StartEvaluator(l LedgerForEvaluator, hdr bookkeeping.BlockHeader, evalOpts } // this is expected to be a no-op, but update the rewards on the rewards pool if it was configured to receive rewards ( unlike mainnet ). - rewardsPoolData = rewardsPoolData.WithUpdatedRewards(prevProto, eval.prevHeader.RewardsLevel) + rewardsPoolData = rewardsPoolData.WithUpdatedRewards(prevProto.RewardUnit, eval.prevHeader.RewardsLevel) if evalOpts.Generate { if eval.proto.SupportGenesisHash { @@ -1151,7 +1151,7 @@ func (eval *BlockEvaluator) checkMinBalance(cow *roundCowState) error { continue } - dataNew := data.WithUpdatedRewards(eval.proto, rewardlvl) + dataNew := data.WithUpdatedRewards(eval.proto.RewardUnit, rewardlvl) effectiveMinBalance := dataNew.MinBalance(&eval.proto) if dataNew.MicroAlgos.Raw < effectiveMinBalance.Raw { return fmt.Errorf("account %v balance %d below min %d (%d assets)", @@ -1713,7 +1713,7 @@ func (eval *BlockEvaluator) generateKnockOfflineAccountsList(participating []bas Status: acctData.Status, LastProposed: acctData.LastProposed, LastHeartbeat: acctData.LastHeartbeat, - MicroAlgosWithRewards: acctData.WithUpdatedRewards(eval.proto, eval.state.rewardsLevel()).MicroAlgos, + MicroAlgosWithRewards: acctData.WithUpdatedRewards(eval.proto.RewardUnit, eval.state.rewardsLevel()).MicroAlgos, IncentiveEligible: acctData.IncentiveEligible, } } diff --git a/ledger/eval/eval_test.go b/ledger/eval/eval_test.go index b90530b1e3..c66cc9e182 100644 --- a/ledger/eval/eval_test.go +++ b/ledger/eval/eval_test.go @@ -716,7 +716,7 @@ func newTestLedger(t testing.TB, balances bookkeeping.GenesisBalances) *evalTest // calculate the accounts totals. var ot basics.OverflowTracker for _, acctData := range balances.Balances { - l.latestTotals.AddAccount(proto, ledgercore.ToAccountData(acctData), &ot) + l.latestTotals.AddAccount(proto.RewardUnit, ledgercore.ToAccountData(acctData), &ot) } l.genesisProto = proto l.genesisProtoVersion = protoVersion @@ -1247,8 +1247,8 @@ func (p *failRoundCowParent) lookup(basics.Address) (ledgercore.AccountData, err return ledgercore.AccountData{}, fmt.Errorf("disk I/O fail (on purpose)") } -// TestExpiredAccountGenerationWithDiskFailure tests edge cases where disk failures can lead to ledger look up failures -func TestExpiredAccountGenerationWithDiskFailure(t *testing.T) { +// TestExpiredAccountGenerationWithDiskErr tests edge cases where disk failures can lead to ledger look up failures +func TestExpiredAccountGenerationWithDiskErr(t *testing.T) { partitiontest.PartitionTest(t) t.Parallel() diff --git a/ledger/eval_simple_test.go b/ledger/eval_simple_test.go index b7817e4e36..a60311f127 100644 --- a/ledger/eval_simple_test.go +++ b/ledger/eval_simple_test.go @@ -1409,10 +1409,11 @@ func TestMinBalanceChanges(t *testing.T) { proto := l.GenesisProto() // Check balance and min balance requirement changes - require.Equal(t, ad0init.MicroAlgos.Raw, ad0new.MicroAlgos.Raw+1000) // fee - require.Equal(t, ad0init.MinBalance(&proto).Raw, ad0new.MinBalance(&proto).Raw-100000) // create - require.Equal(t, ad5init.MicroAlgos.Raw, ad5new.MicroAlgos.Raw+1000) // fee - require.Equal(t, ad5init.MinBalance(&proto).Raw, ad5new.MinBalance(&proto).Raw-100000) // optin + require.Equal(t, ad0init.MicroAlgos.Raw, ad0new.MicroAlgos.Raw+1000) // fee + reqs := proto.BalanceRequirements() + require.Equal(t, ad0init.MinBalance(reqs).Raw, ad0new.MinBalance(reqs).Raw-100000) // create + require.Equal(t, ad5init.MicroAlgos.Raw, ad5new.MicroAlgos.Raw+1000) // fee + require.Equal(t, ad5init.MinBalance(reqs).Raw, ad5new.MinBalance(reqs).Raw-100000) // optin optOutTxn := txntest.Txn{ Type: "axfer", @@ -1437,8 +1438,8 @@ func TestMinBalanceChanges(t *testing.T) { ad5final, _, _, err := l.LookupLatest(addrs[5]) require.NoError(t, err) // Check we got our balance "back" - require.Equal(t, ad0final.MinBalance(&proto), ad0init.MinBalance(&proto)) - require.Equal(t, ad5final.MinBalance(&proto), ad5init.MinBalance(&proto)) + require.Equal(t, ad0final.MinBalance(reqs), ad0init.MinBalance(reqs)) + require.Equal(t, ad5final.MinBalance(reqs), ad5init.MinBalance(reqs)) } // TestAppInsMinBalance checks that accounts with MaxAppsOptedIn are accepted by block evaluator diff --git a/ledger/evalbench_test.go b/ledger/evalbench_test.go index f2f0e848e2..a9cc6d4fad 100644 --- a/ledger/evalbench_test.go +++ b/ledger/evalbench_test.go @@ -176,7 +176,7 @@ func (g *benchAppOptInsTxnGenerator) Prepare(tb testing.TB, addrs []basics.Addre GenesisHash: gh, }, ApplicationCallTxnFields: transactions.ApplicationCallTxnFields{ - ApplicationID: basics.AppIndex(appIdx), + ApplicationID: appIdx, OnCompletion: transactions.OptInOC, }, } diff --git a/ledger/fullblock_perf_test.go b/ledger/fullblock_perf_test.go index f3ffcd51a3..b2c86c20f8 100644 --- a/ledger/fullblock_perf_test.go +++ b/ledger/fullblock_perf_test.go @@ -44,8 +44,8 @@ import ( ) type benchConfig struct { - txnCount uint64 - round uint64 + txnCount int + round basics.Round b *testing.B creator basics.Address accts []basics.Address @@ -54,9 +54,9 @@ type benchConfig struct { l0 *Ledger l1 *Ledger eval *eval.BlockEvaluator - numPay uint64 - numAst uint64 - numApp uint64 + numPay int + numAst int + numApp int blocks []bookkeeping.Block } @@ -241,7 +241,7 @@ func sendAssetTo(bc *benchConfig, from, to basics.Address, assIdx basics.AssetIn } func payTo(bc *benchConfig, from, to basics.Address, amt uint64) { - tx := createPaymentTransaction(uint64(bc.txnCount), bc.round, from, to, amt) + tx := createPaymentTransaction(bc.txnCount, bc.round, from, to, amt) var stxn transactions.SignedTxn stxn.Txn = tx stxn.Sig = crypto.Signature{1} @@ -390,7 +390,7 @@ func BenchmarkBlockValidationMix(b *testing.B) { func benchmarkBlockValidationMix(b *testing.B, newAcctProb, payProb, astProb float64, numAccts int) { bc := setupEnv(b, numAccts) - numBlocks := uint64(b.N) + numBlocks := basics.Round(b.N) cert := agreement.Certificate{} fmt.Printf("Preparing... /%d: ", numBlocks) s3 := time.Now() @@ -417,7 +417,7 @@ func benchmarkBlockValidationMix(b *testing.B, newAcctProb, payProb, astProb flo } fmt.Printf("\nSummary %d blocks and %d txns: pay %d/blk (%d%%) assets %d/blk (%d%%) apps %d/blk (%d%%)\n", - numBlocks, bc.txnCount, bc.numPay/numBlocks, bc.numPay*100/bc.txnCount, bc.numAst/numBlocks, bc.numAst*100/bc.txnCount, bc.numApp/numBlocks, bc.numApp*100/bc.txnCount) + numBlocks, bc.txnCount, bc.numPay/b.N, bc.numPay*100/bc.txnCount, bc.numAst/b.N, bc.numAst*100/bc.txnCount, bc.numApp/b.N, bc.numApp*100/bc.txnCount) // eval + add all the (valid) blocks to the second ledger, measuring it this time vc := verify.GetMockedCache(true) @@ -433,21 +433,21 @@ func benchmarkBlockValidationMix(b *testing.B, newAcctProb, payProb, astProb flo } func createPaymentTransaction( - counter uint64, - round uint64, + counter int, + round basics.Round, sender basics.Address, receiver basics.Address, amount uint64) (txn transactions.Transaction) { note := make([]byte, 8) - binary.LittleEndian.PutUint64(note, counter) + binary.LittleEndian.PutUint64(note, uint64(counter)) txn = transactions.Transaction{ Type: protocol.PaymentTx, Header: transactions.Header{ Sender: sender, Fee: basics.MicroAlgos{Raw: config.Consensus[protocol.ConsensusCurrentVersion].MinTxnFee}, - FirstValid: basics.Round(round), - LastValid: basics.Round(round + 1000), + FirstValid: round, + LastValid: round + 1000, GenesisHash: crypto.Digest{1}, Note: note, }, @@ -461,19 +461,19 @@ func createPaymentTransaction( // prepares a create asset transaction func createAssetTransaction( - counter uint64, - round uint64, + counter int, + round basics.Round, sender basics.Address) (assetTx transactions.Transaction) { note := make([]byte, 8) - binary.LittleEndian.PutUint64(note, counter) + binary.LittleEndian.PutUint64(note, uint64(counter)) assetTx = transactions.Transaction{ Type: protocol.AssetConfigTx, Header: transactions.Header{ Sender: sender, Fee: basics.MicroAlgos{Raw: config.Consensus[protocol.ConsensusCurrentVersion].MinTxnFee}, - FirstValid: basics.Round(round), - LastValid: basics.Round(round + 1000), + FirstValid: round, + LastValid: round + 1000, GenesisHash: crypto.Digest{1}, Note: note, }, @@ -490,22 +490,22 @@ func createAssetTransaction( // prepares a send asset transaction func sendAssetTransaction( - counter uint64, - round uint64, + counter int, + round basics.Round, sender basics.Address, receiver basics.Address, assetID basics.AssetIndex, amt uint64) (tx transactions.Transaction) { note := make([]byte, 8) - binary.LittleEndian.PutUint64(note, counter) + binary.LittleEndian.PutUint64(note, uint64(counter)) tx = transactions.Transaction{ Type: protocol.AssetTransferTx, Header: transactions.Header{ Sender: sender, Fee: basics.MicroAlgos{Raw: config.Consensus[protocol.ConsensusCurrentVersion].MinTxnFee}, - FirstValid: basics.Round(round), - LastValid: basics.Round(round + 1000), + FirstValid: round, + LastValid: round + 1000, GenesisHash: crypto.Digest{1}, Note: note, }, @@ -519,12 +519,10 @@ func sendAssetTransaction( } func makeAppTransaction( - counter uint64, - round uint64, + counter int, + round basics.Round, sender basics.Address) (appTx transactions.Transaction, err error) { - progCounter := uint64(1) - progCounter = counter prog := fmt.Sprintf(`#pragma version 2 // a simple global and local calls counter app byte b64 Y291bnRlcg== // counter @@ -548,7 +546,7 @@ int 1 // increment + app_local_put int 1 -`, progCounter) +`, counter) approvalOps, err := logic.AssembleString(prog) if err != nil { @@ -572,7 +570,7 @@ int 1 appTx.LocalStateSchema = schema note := make([]byte, 8) - binary.LittleEndian.PutUint64(note, counter) + binary.LittleEndian.PutUint64(note, uint64(counter)) appTx.Header = transactions.Header{ Sender: sender, @@ -588,23 +586,23 @@ int 1 // prepares a opt-in app transaction func makeOptInAppTransaction( - counter uint64, + counter int, appIdx basics.AppIndex, - round uint64, + round basics.Round, sender basics.Address) (appTx transactions.Transaction) { note := make([]byte, 8) - binary.LittleEndian.PutUint64(note, counter) + binary.LittleEndian.PutUint64(note, uint64(counter)) appTx = transactions.Transaction{} - appTx.ApplicationID = basics.AppIndex(appIdx) + appTx.ApplicationID = appIdx appTx.OnCompletion = transactions.OptInOC appTx.Header = transactions.Header{ Sender: sender, Fee: basics.MicroAlgos{Raw: config.Consensus[protocol.ConsensusCurrentVersion].MinTxnFee}, - FirstValid: basics.Round(round), - LastValid: basics.Round(round + 1000), + FirstValid: round, + LastValid: round + 1000, GenesisHash: crypto.Digest{1}, Note: note, } @@ -614,16 +612,16 @@ func makeOptInAppTransaction( // prepare app call transaction func callAppTransaction( - counter uint64, + counter int, appIdx basics.AppIndex, - round uint64, + round basics.Round, sender basics.Address) (appTx transactions.Transaction) { note := make([]byte, 8) - binary.LittleEndian.PutUint64(note, counter) + binary.LittleEndian.PutUint64(note, uint64(counter)) appTx = transactions.Transaction{} - appTx.ApplicationID = basics.AppIndex(appIdx) + appTx.ApplicationID = appIdx appTx.OnCompletion = transactions.NoOpOC appTx.Header = transactions.Header{ diff --git a/ledger/ledger.go b/ledger/ledger.go index 8e0114f222..908548a0b2 100644 --- a/ledger/ledger.go +++ b/ledger/ledger.go @@ -557,7 +557,7 @@ func (l *Ledger) LookupLatest(addr basics.Address) (basics.AccountData, basics.R // Intentionally apply (pending) rewards up to rnd. data, rnd, withoutRewards, err := l.accts.lookupLatest(addr) if err != nil { - return basics.AccountData{}, basics.Round(0), basics.MicroAlgos{}, err + return basics.AccountData{}, 0, basics.MicroAlgos{}, err } return data, rnd, withoutRewards, nil } @@ -579,7 +579,7 @@ func (l *Ledger) LookupAccount(round basics.Round, addr basics.Address) (data le // Intentionally apply (pending) rewards up to rnd, remembering the old value withoutRewards = data.MicroAlgos - data = data.WithUpdatedRewards(config.Consensus[rewardsVersion], rewardsLevel) + data = data.WithUpdatedRewards(config.Consensus[rewardsVersion].RewardUnit, rewardsLevel) return data, rnd, withoutRewards, nil } diff --git a/ledger/ledger_perf_test.go b/ledger/ledger_perf_test.go index d8c698b250..80ecbf227f 100644 --- a/ledger/ledger_perf_test.go +++ b/ledger/ledger_perf_test.go @@ -56,14 +56,14 @@ var testCases map[string]testParams var asaClearStateProgram []byte var asaAppovalProgram []byte -func makeUnsignedApplicationCallTxPerf(appIdx uint64, params testParams, onCompletion transactions.OnCompletion, round int) transactions.Transaction { +func makeUnsignedApplicationCallTxPerf(appIdx basics.AppIndex, params testParams, onCompletion transactions.OnCompletion, round basics.Round) transactions.Transaction { var tx transactions.Transaction tx.Type = protocol.ApplicationCallTx - tx.ApplicationID = basics.AppIndex(appIdx) + tx.ApplicationID = appIdx tx.OnCompletion = onCompletion - tx.Header.FirstValid = basics.Round(round) - tx.Header.LastValid = basics.Round(round + 1000) + tx.Header.FirstValid = round + tx.Header.LastValid = round + 1000 tx.Header.Fee = basics.MicroAlgos{Raw: 1000} // If creating, set programs @@ -81,13 +81,13 @@ func makeUnsignedApplicationCallTxPerf(appIdx uint64, params testParams, onCompl return tx } -func makeUnsignedASATx(appIdx uint64, creator basics.Address, round int) transactions.Transaction { +func makeUnsignedASATx(appIdx basics.AppIndex, creator basics.Address, round basics.Round) transactions.Transaction { var tx transactions.Transaction tx.Type = protocol.ApplicationCallTx - tx.ApplicationID = basics.AppIndex(appIdx) - tx.Header.FirstValid = basics.Round(round) - tx.Header.LastValid = basics.Round(round + 1000) + tx.ApplicationID = appIdx + tx.Header.FirstValid = round + tx.Header.LastValid = round + 1000 tx.Header.Fee = basics.MicroAlgos{Raw: 1000} if appIdx == 0 { @@ -115,12 +115,12 @@ func makeUnsignedASATx(appIdx uint64, creator basics.Address, round int) transac return tx } -func makeUnsignedPaymentTx(sender basics.Address, round int) transactions.Transaction { +func makeUnsignedPaymentTx(sender basics.Address, round basics.Round) transactions.Transaction { return transactions.Transaction{ Type: protocol.PaymentTx, Header: transactions.Header{ - FirstValid: basics.Round(round), - LastValid: basics.Round(round + 1000), + FirstValid: round, + LastValid: round + 1000, Fee: basics.MicroAlgos{Raw: 1000}, }, PaymentTxnFields: transactions.PaymentTxnFields{ @@ -182,13 +182,13 @@ func benchmarkFullBlocks(params testParams, b *testing.B) { blk := genesisInitState.Block - numBlocks := b.N + numBlocks := basics.Round(b.N) cert := agreement.Certificate{} var blocks []bookkeeping.Block - var createdAppIdx uint64 + var createdAppIdx basics.AppIndex var txPerBlock int onCompletion := transactions.OptInOC - for i := 0; i < numBlocks+2; i++ { + for i := range numBlocks + 2 { blk.BlockHeader.Round++ blk.BlockHeader.TimeStamp += int64(crypto.RandUint64() % 100 * 1000) blk.BlockHeader.GenesisID = "x" @@ -271,7 +271,7 @@ func benchmarkFullBlocks(params testParams, b *testing.B) { // First block just creates app + opts in accts if asa test if i == 1 { onCompletion = transactions.NoOpOC - createdAppIdx = eval.TestingTxnCounter() + createdAppIdx = basics.AppIndex(eval.TestingTxnCounter()) // On first block, opt in all accts to asa (accts is empty if not asa test) k := 0 diff --git a/ledger/ledger_test.go b/ledger/ledger_test.go index 633fc4397d..8c6bdf7b7e 100644 --- a/ledger/ledger_test.go +++ b/ledger/ledger_test.go @@ -29,6 +29,7 @@ import ( "sort" "testing" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/algorand/go-algorand/agreement" @@ -132,7 +133,7 @@ func makeNewEmptyBlock(t *testing.T, l *Ledger, GenesisID string, initAccounts m require.NotNil(t, initAccounts) for _, acctdata := range initAccounts { if acctdata.Status != basics.NotParticipating { - totalRewardUnits += acctdata.MicroAlgos.RewardUnits(proto) + totalRewardUnits += acctdata.MicroAlgos.RewardUnits(proto.RewardUnit) } } } else { @@ -159,6 +160,9 @@ func makeNewEmptyBlock(t *testing.T, l *Ledger, GenesisID string, initAccounts m if proto.Payouts.Enabled { blk.BlockHeader.Proposer = basics.Address{0x01} // Must be set to _something_. } + if proto.EnableSha512BlockHash { + blk.BlockHeader.Branch512 = lastBlock.Hash512() + } blk.TxnCommitments, err = blk.PaysetCommit() require.NoError(t, err) @@ -241,163 +245,195 @@ func TestLedgerBasic(t *testing.T) { func TestLedgerBlockHeaders(t *testing.T) { partitiontest.PartitionTest(t) - a := require.New(t) - - genesisInitState, _ := ledgertesting.GenerateInitState(t, protocol.ConsensusCurrentVersion, 100) - const inMem = true - cfg := config.GetDefaultLocal() - cfg.Archival = true - l, err := OpenLedger(logging.Base(), t.Name(), inMem, genesisInitState, cfg) - a.NoError(err, "could not open ledger") - defer l.Close() - - lastBlock, err := l.Block(l.Latest()) - a.NoError(err, "could not get last block") + a := assert.New(t) + + for _, cv := range []protocol.ConsensusVersion{ + protocol.ConsensusV25, // some oldish version to test against backward compatibility + protocol.ConsensusCurrentVersion, + protocol.ConsensusFuture, + } { + genesisInitState, _ := ledgertesting.GenerateInitState(t, cv, 100) + const inMem = true + cfg := config.GetDefaultLocal() + cfg.Archival = true + l, err := OpenLedger(logging.Base(), t.Name()+string(cv), inMem, genesisInitState, cfg) + a.NoError(err, "could not open ledger") + defer l.Close() + + lastBlock, err := l.Block(l.Latest()) + a.NoError(err, "could not get last block") + + proto := config.Consensus[genesisInitState.Block.CurrentProtocol] + poolAddr := testPoolAddr + var totalRewardUnits uint64 + for _, acctdata := range genesisInitState.Accounts { + totalRewardUnits += acctdata.MicroAlgos.RewardUnits(proto.RewardUnit) + } + poolBal, _, _, err := l.LookupLatest(poolAddr) + a.NoError(err, "could not get incentive pool balance") + + correctHeader := bookkeeping.BlockHeader{ + Round: l.Latest() + 1, + Branch: lastBlock.Hash(), + // Seed: does not matter, + Bonus: bookkeeping.NextBonus(lastBlock.BlockHeader, &proto), + TimeStamp: 0, + GenesisID: t.Name(), + RewardsState: lastBlock.NextRewardsState(l.Latest()+1, proto, poolBal.MicroAlgos, totalRewardUnits, logging.Base()), + UpgradeState: lastBlock.UpgradeState, + // UpgradeVote: empty, + } + if proto.Payouts.Enabled { + correctHeader.Proposer = basics.Address{0x01} // Must be set to _something_. + } + + emptyBlock := bookkeeping.Block{ + BlockHeader: correctHeader, + } + correctHeader.TxnCommitments, err = emptyBlock.PaysetCommit() + require.NoError(t, err) - proto := config.Consensus[protocol.ConsensusCurrentVersion] - poolAddr := testPoolAddr - var totalRewardUnits uint64 - for _, acctdata := range genesisInitState.Accounts { - totalRewardUnits += acctdata.MicroAlgos.RewardUnits(proto) - } - poolBal, _, _, err := l.LookupLatest(poolAddr) - a.NoError(err, "could not get incentive pool balance") + correctHeader.RewardsPool = testPoolAddr + correctHeader.FeeSink = testSinkAddr - correctHeader := bookkeeping.BlockHeader{ - Round: l.Latest() + 1, - Branch: lastBlock.Hash(), - // Seed: does not matter, - Bonus: bookkeeping.NextBonus(lastBlock.BlockHeader, &proto), - Proposer: basics.Address{0x01}, // Must be set to _something_. - TimeStamp: 0, - GenesisID: t.Name(), - RewardsState: lastBlock.NextRewardsState(l.Latest()+1, proto, poolBal.MicroAlgos, totalRewardUnits, logging.Base()), - UpgradeState: lastBlock.UpgradeState, - // UpgradeVote: empty, - } - - emptyBlock := bookkeeping.Block{ - BlockHeader: correctHeader, - } - correctHeader.TxnCommitments, err = emptyBlock.PaysetCommit() - require.NoError(t, err) + if proto.SupportGenesisHash { + correctHeader.GenesisHash = crypto.Hash([]byte(t.Name())) + } + if proto.EnableSha512BlockHash { + correctHeader.Branch512 = lastBlock.Hash512() + } - correctHeader.RewardsPool = testPoolAddr - correctHeader.FeeSink = testSinkAddr + initNextBlockHeader(&correctHeader, lastBlock, proto) - if proto.SupportGenesisHash { - correctHeader.GenesisHash = crypto.Hash([]byte(t.Name())) - } + var badBlock bookkeeping.Block - initNextBlockHeader(&correctHeader, lastBlock, proto) + badBlock = bookkeeping.Block{BlockHeader: correctHeader} + badBlock.BlockHeader.Round++ + a.ErrorContains(l.appendUnvalidated(badBlock), "ledger does not have entry") - var badBlock bookkeeping.Block + badBlock = bookkeeping.Block{BlockHeader: correctHeader} + badBlock.BlockHeader.Round-- + a.ErrorIs(l.appendUnvalidated(badBlock), eval.ErrRoundZero) - badBlock = bookkeeping.Block{BlockHeader: correctHeader} - badBlock.BlockHeader.Round++ - a.ErrorContains(l.appendUnvalidated(badBlock), "ledger does not have entry") + badBlock = bookkeeping.Block{BlockHeader: correctHeader} + badBlock.BlockHeader.Round = 0 + a.ErrorIs(l.appendUnvalidated(badBlock), eval.ErrRoundZero) - badBlock = bookkeeping.Block{BlockHeader: correctHeader} - badBlock.BlockHeader.Round-- - a.ErrorIs(l.appendUnvalidated(badBlock), eval.ErrRoundZero) + badBlock = bookkeeping.Block{BlockHeader: correctHeader} + badBlock.BlockHeader.GenesisID = "" + a.ErrorContains(l.appendUnvalidated(badBlock), "genesis ID missing") - badBlock = bookkeeping.Block{BlockHeader: correctHeader} - badBlock.BlockHeader.Round = 0 - a.ErrorIs(l.appendUnvalidated(badBlock), eval.ErrRoundZero) + badBlock = bookkeeping.Block{BlockHeader: correctHeader} + badBlock.BlockHeader.GenesisID = "incorrect" + a.ErrorContains(l.appendUnvalidated(badBlock), "genesis ID mismatch") - badBlock = bookkeeping.Block{BlockHeader: correctHeader} - badBlock.BlockHeader.GenesisID = "" - a.ErrorContains(l.appendUnvalidated(badBlock), "genesis ID missing") + badBlock = bookkeeping.Block{BlockHeader: correctHeader} + badBlock.BlockHeader.UpgradePropose = "invalid" + a.ErrorContains(l.appendUnvalidated(badBlock), "proposed upgrade wait rounds 0") - badBlock = bookkeeping.Block{BlockHeader: correctHeader} - badBlock.BlockHeader.GenesisID = "incorrect" - a.ErrorContains(l.appendUnvalidated(badBlock), "genesis ID mismatch") + badBlock = bookkeeping.Block{BlockHeader: correctHeader} + badBlock.BlockHeader.UpgradePropose = "invalid" + badBlock.BlockHeader.UpgradeDelay = 20000 + a.ErrorContains(l.appendUnvalidated(badBlock), "UpgradeState mismatch") - badBlock = bookkeeping.Block{BlockHeader: correctHeader} - badBlock.BlockHeader.UpgradePropose = "invalid" - a.ErrorContains(l.appendUnvalidated(badBlock), "proposed upgrade wait rounds 0") + badBlock = bookkeeping.Block{BlockHeader: correctHeader} + badBlock.BlockHeader.UpgradeApprove = true + a.ErrorContains(l.appendUnvalidated(badBlock), "approval without an active proposal") - badBlock = bookkeeping.Block{BlockHeader: correctHeader} - badBlock.BlockHeader.UpgradePropose = "invalid" - badBlock.BlockHeader.UpgradeDelay = 20000 - a.ErrorContains(l.appendUnvalidated(badBlock), "UpgradeState mismatch") + badBlock = bookkeeping.Block{BlockHeader: correctHeader} + badBlock.BlockHeader.CurrentProtocol = "incorrect" + a.ErrorContains(l.appendUnvalidated(badBlock), "protocol not supported") - badBlock = bookkeeping.Block{BlockHeader: correctHeader} - badBlock.BlockHeader.UpgradeApprove = true - a.ErrorContains(l.appendUnvalidated(badBlock), "approval without an active proposal") + badBlock = bookkeeping.Block{BlockHeader: correctHeader} + badBlock.BlockHeader.CurrentProtocol = "" + a.ErrorContains(l.appendUnvalidated(badBlock), "protocol not supported", "header with empty current protocol") - badBlock = bookkeeping.Block{BlockHeader: correctHeader} - badBlock.BlockHeader.CurrentProtocol = "incorrect" - a.ErrorContains(l.appendUnvalidated(badBlock), "protocol not supported") + badBlock = bookkeeping.Block{BlockHeader: correctHeader} + var wrongVersion protocol.ConsensusVersion + for ver := range config.Consensus { + if ver != correctHeader.CurrentProtocol { + wrongVersion = ver + break + } + } + a.NotEmpty(wrongVersion) + badBlock.BlockHeader.CurrentProtocol = wrongVersion + // Handle Branch512 field mismatch between correctHeader and wrongVersion's expectations + // We want to set the Branch512 header to match wrongVersion so that PreCheck will reach + // the intended "UpgradeState mismatch" error, which happens after the Branch512 check. + if !proto.EnableSha512BlockHash && config.Consensus[wrongVersion].EnableSha512BlockHash { + // correctHeader has empty Branch512, but wrongVersion expects it during validation + badBlock.BlockHeader.Branch512 = lastBlock.Hash512() + } else if proto.EnableSha512BlockHash && !config.Consensus[wrongVersion].EnableSha512BlockHash { + // correctHeader has non-zero Branch512, but wrongVersion doesn't support it + badBlock.BlockHeader.Branch512 = crypto.Sha512Digest{} + } + // Otherwise, Branch512 is already correct (both support or both don't support SHA512) + a.ErrorContains(l.appendUnvalidated(badBlock), "UpgradeState mismatch") - badBlock = bookkeeping.Block{BlockHeader: correctHeader} - badBlock.BlockHeader.CurrentProtocol = "" - a.ErrorContains(l.appendUnvalidated(badBlock), "protocol not supported", "header with empty current protocol") + badBlock = bookkeeping.Block{BlockHeader: correctHeader} + badBlock.BlockHeader.NextProtocol = "incorrect" + a.ErrorContains(l.appendUnvalidated(badBlock), "UpgradeState mismatch", "added block header with incorrect next protocol") - badBlock = bookkeeping.Block{BlockHeader: correctHeader} - var wrongVersion protocol.ConsensusVersion - for ver := range config.Consensus { - if ver != correctHeader.CurrentProtocol { - wrongVersion = ver - break - } - } - a.NotEmpty(wrongVersion) - badBlock.BlockHeader.CurrentProtocol = wrongVersion - a.ErrorContains(l.appendUnvalidated(badBlock), "UpgradeState mismatch") + badBlock = bookkeeping.Block{BlockHeader: correctHeader} + badBlock.BlockHeader.NextProtocolApprovals++ + a.ErrorContains(l.appendUnvalidated(badBlock), "UpgradeState mismatch", "added block header with incorrect number of upgrade approvals") - badBlock = bookkeeping.Block{BlockHeader: correctHeader} - badBlock.BlockHeader.NextProtocol = "incorrect" - a.ErrorContains(l.appendUnvalidated(badBlock), "UpgradeState mismatch", "added block header with incorrect next protocol") + badBlock = bookkeeping.Block{BlockHeader: correctHeader} + badBlock.BlockHeader.NextProtocolVoteBefore++ + a.ErrorContains(l.appendUnvalidated(badBlock), "UpgradeState mismatch", "added block header with incorrect next protocol vote deadline") - badBlock = bookkeeping.Block{BlockHeader: correctHeader} - badBlock.BlockHeader.NextProtocolApprovals++ - a.ErrorContains(l.appendUnvalidated(badBlock), "UpgradeState mismatch", "added block header with incorrect number of upgrade approvals") + badBlock = bookkeeping.Block{BlockHeader: correctHeader} + badBlock.BlockHeader.NextProtocolSwitchOn++ + a.ErrorContains(l.appendUnvalidated(badBlock), "UpgradeState mismatch", "added block header with incorrect next protocol switch round") - badBlock = bookkeeping.Block{BlockHeader: correctHeader} - badBlock.BlockHeader.NextProtocolVoteBefore++ - a.ErrorContains(l.appendUnvalidated(badBlock), "UpgradeState mismatch", "added block header with incorrect next protocol vote deadline") + // TODO test upgrade cases with a valid upgrade in progress - badBlock = bookkeeping.Block{BlockHeader: correctHeader} - badBlock.BlockHeader.NextProtocolSwitchOn++ - a.ErrorContains(l.appendUnvalidated(badBlock), "UpgradeState mismatch", "added block header with incorrect next protocol switch round") + // TODO test timestamp bounds - // TODO test upgrade cases with a valid upgrade in progress + badBlock = bookkeeping.Block{BlockHeader: correctHeader} + badBlock.BlockHeader.Branch = bookkeeping.BlockHash{} + a.ErrorContains(l.appendUnvalidated(badBlock), "block branch incorrect") - // TODO test timestamp bounds + badBlock = bookkeeping.Block{BlockHeader: correctHeader} + badBlock.BlockHeader.Branch[0]++ + a.ErrorContains(l.appendUnvalidated(badBlock), "block branch incorrect") - badBlock = bookkeeping.Block{BlockHeader: correctHeader} - badBlock.BlockHeader.Branch = bookkeeping.BlockHash{} - a.ErrorContains(l.appendUnvalidated(badBlock), "block branch incorrect") + if proto.EnableSha512BlockHash { + badBlock = bookkeeping.Block{BlockHeader: correctHeader} + badBlock.BlockHeader.Branch512 = crypto.Sha512Digest{} + a.ErrorContains(l.appendUnvalidated(badBlock), "block branch512 incorrect") - badBlock = bookkeeping.Block{BlockHeader: correctHeader} - badBlock.BlockHeader.Branch[0]++ - a.ErrorContains(l.appendUnvalidated(badBlock), "block branch incorrect") + badBlock = bookkeeping.Block{BlockHeader: correctHeader} + badBlock.BlockHeader.Branch512[0]++ + a.ErrorContains(l.appendUnvalidated(badBlock), "block branch512 incorrect") + } - badBlock = bookkeeping.Block{BlockHeader: correctHeader} - badBlock.BlockHeader.RewardsLevel++ - a.ErrorContains(l.appendUnvalidated(badBlock), "bad rewards state") + badBlock = bookkeeping.Block{BlockHeader: correctHeader} + badBlock.BlockHeader.RewardsLevel++ + a.ErrorContains(l.appendUnvalidated(badBlock), "bad rewards state") - badBlock = bookkeeping.Block{BlockHeader: correctHeader} - badBlock.BlockHeader.RewardsRate++ - a.ErrorContains(l.appendUnvalidated(badBlock), "bad rewards state") + badBlock = bookkeeping.Block{BlockHeader: correctHeader} + badBlock.BlockHeader.RewardsRate++ + a.ErrorContains(l.appendUnvalidated(badBlock), "bad rewards state") - badBlock = bookkeeping.Block{BlockHeader: correctHeader} - badBlock.BlockHeader.RewardsResidue++ - a.ErrorContains(l.appendUnvalidated(badBlock), "bad rewards state") + badBlock = bookkeeping.Block{BlockHeader: correctHeader} + badBlock.BlockHeader.RewardsResidue++ + a.ErrorContains(l.appendUnvalidated(badBlock), "bad rewards state") - // TODO test rewards cases with changing poolAddr money, with changing round, and with changing total reward units + // TODO test rewards cases with changing poolAddr money, with changing round, and with changing total reward units - badBlock = bookkeeping.Block{BlockHeader: correctHeader} - badBlock.BlockHeader.TxnCommitments.NativeSha512_256Commitment = crypto.Hash([]byte{0}) - a.ErrorContains(l.appendUnvalidated(badBlock), "txn root wrong") + badBlock = bookkeeping.Block{BlockHeader: correctHeader} + badBlock.BlockHeader.TxnCommitments.NativeSha512_256Commitment = crypto.Hash([]byte{0}) + a.ErrorContains(l.appendUnvalidated(badBlock), "txn root wrong") - badBlock = bookkeeping.Block{BlockHeader: correctHeader} - badBlock.BlockHeader.TxnCommitments.NativeSha512_256Commitment[0]++ - a.ErrorContains(l.appendUnvalidated(badBlock), "txn root wrong") + badBlock = bookkeeping.Block{BlockHeader: correctHeader} + badBlock.BlockHeader.TxnCommitments.NativeSha512_256Commitment[0]++ + a.ErrorContains(l.appendUnvalidated(badBlock), "txn root wrong") - correctBlock := bookkeeping.Block{BlockHeader: correctHeader} - a.NoError(l.appendUnvalidated(correctBlock), "could not add block with correct header") + correctBlock := bookkeeping.Block{BlockHeader: correctHeader} + a.NoError(l.appendUnvalidated(correctBlock), "could not add block with correct header") + } } func TestLedgerSingleTx(t *testing.T) { @@ -1272,7 +1308,7 @@ func testLedgerSingleTxApplyData(t *testing.T, version protocol.ConsensusVersion var totalRewardUnits uint64 for _, acctdata := range initAccounts { - totalRewardUnits += acctdata.MicroAlgos.RewardUnits(proto) + totalRewardUnits += acctdata.MicroAlgos.RewardUnits(proto.RewardUnit) } poolBal, _, _, err := l.LookupLatest(testPoolAddr) a.NoError(err, "could not get incentive pool balance") @@ -1296,6 +1332,9 @@ func testLedgerSingleTxApplyData(t *testing.T, version protocol.ConsensusVersion if proto.SupportGenesisHash { correctHeader.GenesisHash = crypto.Hash([]byte(t.Name())) } + if proto.EnableSha512BlockHash { + correctHeader.Branch512 = lastBlock.Hash512() + } initNextBlockHeader(&correctHeader, lastBlock, proto) diff --git a/ledger/ledgercore/accountdata.go b/ledger/ledgercore/accountdata.go index 2136baafe5..5d11b7563e 100644 --- a/ledger/ledgercore/accountdata.go +++ b/ledger/ledgercore/accountdata.go @@ -113,9 +113,9 @@ func AssignAccountData(a *basics.AccountData, acct AccountData) { } // WithUpdatedRewards calls basics account data WithUpdatedRewards -func (u AccountData) WithUpdatedRewards(proto config.ConsensusParams, rewardsLevel uint64) AccountData { +func (u AccountData) WithUpdatedRewards(rewardUnit uint64, rewardsLevel uint64) AccountData { u.MicroAlgos, u.RewardedMicroAlgos, u.RewardsBase = basics.WithUpdatedRewards( - proto, u.Status, u.MicroAlgos, u.RewardedMicroAlgos, u.RewardsBase, rewardsLevel, + rewardUnit, u.Status, u.MicroAlgos, u.RewardedMicroAlgos, u.RewardsBase, rewardsLevel, ) return u } @@ -149,10 +149,10 @@ func (u AccountData) LastSeen() basics.Round { // storage the account is allowed to store on disk. func (u AccountData) MinBalance(proto *config.ConsensusParams) basics.MicroAlgos { return basics.MinBalance( - proto, - uint64(u.TotalAssets), + proto.BalanceRequirements(), + u.TotalAssets, u.TotalAppSchema, - uint64(u.TotalAppParams), uint64(u.TotalAppLocalStates), + u.TotalAppParams, u.TotalAppLocalStates, uint64(u.TotalExtraAppPages), u.TotalBoxes, u.TotalBoxBytes, ) @@ -173,20 +173,20 @@ func (u AccountData) IsZero() bool { } // Money is similar to basics account data Money function -func (u AccountData) Money(proto config.ConsensusParams, rewardsLevel uint64) (money basics.MicroAlgos, rewards basics.MicroAlgos) { - e := u.WithUpdatedRewards(proto, rewardsLevel) +func (u AccountData) Money(rewardUnit uint64, rewardsLevel uint64) (money basics.MicroAlgos, rewards basics.MicroAlgos) { + e := u.WithUpdatedRewards(rewardUnit, rewardsLevel) return e.MicroAlgos, e.RewardedMicroAlgos } // OnlineAccountData calculates the online account data given an AccountData, by adding the rewards. -func (u AccountData) OnlineAccountData(proto config.ConsensusParams, rewardsLevel uint64) basics.OnlineAccountData { +func (u AccountData) OnlineAccountData(rewardUnit uint64, rewardsLevel uint64) basics.OnlineAccountData { if u.Status != basics.Online { // if the account is not Online and agreement requests it for some reason, clear it out return basics.OnlineAccountData{} } microAlgos, _, _ := basics.WithUpdatedRewards( - proto, u.Status, u.MicroAlgos, u.RewardedMicroAlgos, u.RewardsBase, rewardsLevel, + rewardUnit, u.Status, u.MicroAlgos, u.RewardedMicroAlgos, u.RewardsBase, rewardsLevel, ) return basics.OnlineAccountData{ MicroAlgosWithRewards: microAlgos, @@ -198,6 +198,6 @@ func (u AccountData) OnlineAccountData(proto config.ConsensusParams, rewardsLeve } // NormalizedOnlineBalance wraps basics.NormalizedOnlineAccountBalance -func (u *AccountData) NormalizedOnlineBalance(genesisProto config.ConsensusParams) uint64 { - return basics.NormalizedOnlineAccountBalance(u.Status, u.RewardsBase, u.MicroAlgos, genesisProto) +func (u *AccountData) NormalizedOnlineBalance(rewardUnit uint64) uint64 { + return basics.NormalizedOnlineAccountBalance(u.Status, u.RewardsBase, u.MicroAlgos, rewardUnit) } diff --git a/ledger/ledgercore/totals.go b/ledger/ledgercore/totals.go index 1407098b15..31ac5a44dc 100644 --- a/ledger/ledgercore/totals.go +++ b/ledger/ledgercore/totals.go @@ -17,7 +17,6 @@ package ledgercore import ( - "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/protocol" @@ -79,19 +78,19 @@ func (at *AccountTotals) statusField(status basics.Status) *AlgoCount { } // AddAccount adds an account algos from the total money -func (at *AccountTotals) AddAccount(proto config.ConsensusParams, data AccountData, ot *basics.OverflowTracker) { +func (at *AccountTotals) AddAccount(rewardUnit uint64, data AccountData, ot *basics.OverflowTracker) { sum := at.statusField(data.Status) - algos, _ := data.Money(proto, at.RewardsLevel) + algos, _ := data.Money(rewardUnit, at.RewardsLevel) sum.Money = ot.AddA(sum.Money, algos) - sum.RewardUnits = ot.Add(sum.RewardUnits, data.MicroAlgos.RewardUnits(proto)) + sum.RewardUnits = ot.Add(sum.RewardUnits, data.MicroAlgos.RewardUnits(rewardUnit)) } // DelAccount removes an account algos from the total money -func (at *AccountTotals) DelAccount(proto config.ConsensusParams, data AccountData, ot *basics.OverflowTracker) { +func (at *AccountTotals) DelAccount(rewardUnit uint64, data AccountData, ot *basics.OverflowTracker) { sum := at.statusField(data.Status) - algos, _ := data.Money(proto, at.RewardsLevel) + algos, _ := data.Money(rewardUnit, at.RewardsLevel) sum.Money = ot.SubA(sum.Money, algos) - sum.RewardUnits = ot.Sub(sum.RewardUnits, data.MicroAlgos.RewardUnits(proto)) + sum.RewardUnits = ot.Sub(sum.RewardUnits, data.MicroAlgos.RewardUnits(rewardUnit)) } // ApplyRewards adds the reward to the account totals based on the new rewards level diff --git a/ledger/ledgercore/votersForRound.go b/ledger/ledgercore/votersForRound.go index 048f9cbf80..05ce84f91d 100644 --- a/ledger/ledgercore/votersForRound.go +++ b/ledger/ledgercore/votersForRound.go @@ -136,7 +136,7 @@ func (tr *VotersForRound) LoadTree(onlineAccountsFetcher OnlineAccountsFetcher, for i, acct := range top { var ot basics.OverflowTracker - rewards := basics.PendingRewards(&ot, tr.Proto, acct.MicroAlgos, acct.RewardsBase, hdr.RewardsLevel) + rewards := basics.PendingRewards(&ot, tr.Proto.RewardUnit, acct.MicroAlgos, acct.RewardsBase, hdr.RewardsLevel) money := ot.AddA(acct.MicroAlgos, rewards) if ot.Overflowed { return fmt.Errorf("votersTracker.LoadTree: overflow adding rewards %d + %d", acct.MicroAlgos, rewards) diff --git a/ledger/simulation/simulation_eval_test.go b/ledger/simulation/simulation_eval_test.go index c2b1b64726..acd3ad232a 100644 --- a/ledger/simulation/simulation_eval_test.go +++ b/ledger/simulation/simulation_eval_test.go @@ -136,7 +136,7 @@ func validateSimulationResult(t *testing.T, result simulation.Result) { } } -const ignoreAppBudgetConsumed = math.MaxUint64 +const ignoreAppBudgetConsumed = math.MaxInt func simulationTest(t *testing.T, f func(env simulationtesting.Environment) simulationTestCase) { t.Helper() @@ -174,7 +174,7 @@ func runSimulationTestCase(t *testing.T, env simulationtesting.Environment, test if testcase.expected.TxnGroups[i].AppBudgetConsumed == ignoreAppBudgetConsumed { // This test does not care about the app budget consumed. Replace it with the actual value. // But let's still ensure it's the sum of budgets consumed in this group. - var sum uint64 + var sum int for _, txn := range actual.TxnGroups[i].Txns { sum += txn.AppBudgetConsumed } @@ -566,7 +566,7 @@ btoi`) name string arguments [][]byte expectedError string - cost uint64 + cost int }{ { name: "approval", @@ -623,7 +623,7 @@ int 1`, expectedSuccess := len(testCase.expectedError) == 0 var expectedAppCallAD transactions.ApplyData expectedFailedAt := simulation.TxnPath{1} - var AppBudgetConsumed, AppBudgetAdded uint64 + var AppBudgetConsumed, AppBudgetAdded int if expectedSuccess { expectedAppCallAD = transactions.ApplyData{ ApplicationID: 1002, @@ -1219,7 +1219,7 @@ func TestAppCallWithExtraBudget(t *testing.T) { signedCreateTxn := createTxn.Txn().Sign(sender.Sk) signedExpensiveTxn := expensiveTxn.Txn().Sign(sender.Sk) - extraOpcodeBudget := uint64(100) + extraOpcodeBudget := 100 return simulationTestCase{ input: simulation.Request{ @@ -1293,7 +1293,7 @@ func TestAppCallWithExtraBudgetReturningPC(t *testing.T) { signedCreateTxn := createTxn.Txn().Sign(sender.Sk) signedExpensiveTxn := expensiveTxn.Txn().Sign(sender.Sk) - extraOpcodeBudget := uint64(100) + extraOpcodeBudget := 100 commonLeadingSteps := []simulation.OpcodeTraceUnit{ {PC: 1}, {PC: 4}, {PC: 6}, @@ -1308,7 +1308,7 @@ func TestAppCallWithExtraBudgetReturningPC(t *testing.T) { secondTrace := make([]simulation.OpcodeTraceUnit, len(commonLeadingSteps)) copy(secondTrace, commonLeadingSteps[:]) for i := 9; i <= 1409; i++ { - secondTrace = append(secondTrace, simulation.OpcodeTraceUnit{PC: uint64(i)}) + secondTrace = append(secondTrace, simulation.OpcodeTraceUnit{PC: i}) } return simulationTestCase{ @@ -1396,7 +1396,7 @@ func TestAppCallWithExtraBudgetOverBudget(t *testing.T) { signedCreateTxn := createTxn.Txn().Sign(sender.Sk) signedExpensiveTxn := expensiveTxn.Txn().Sign(sender.Sk) // Add a small bit of extra budget, but not enough - extraBudget := uint64(5) + extraBudget := 5 return simulationTestCase{ input: simulation.Request{ @@ -1832,7 +1832,7 @@ int 1` signedCreateTxn := createTxn.Txn().Sign(sender.Sk) signedCallsABunchLogs := callsABunchLogs.Txn().Sign(sender.Sk) - expectedMaxLogCalls, expectedMaxLogSize := uint64(2048), uint64(65536) + expectedMaxLogCalls, expectedMaxLogSize := 2048, 65536 expectedLog := make([]string, LogTimes) for i := 0; i < LogTimes; i++ { expectedLog[i] = LogLongLine @@ -1924,7 +1924,7 @@ int 1` signedCreateTxn := createTxn.Txn().Sign(sender.Sk) signedCallsABunchLogs := callsABunchLogs.Txn().Sign(sender.Sk) - expectedMaxLogCalls, expectedMaxLogSize := uint64(2048), uint64(65536) + expectedMaxLogCalls, expectedMaxLogSize := 2048, 65536 actualLogTimes := 65536 / len(LogLongLine) expectedLog := make([]string, actualLogTimes) for i := 0; i < actualLogTimes; i++ { @@ -2647,7 +2647,7 @@ byte "hello"; log; int 1`, }) } -func TestFailingLogicSigPCandStack(t *testing.T) { +func TestInvalidLogicSigPCandStack(t *testing.T) { partitiontest.PartitionTest(t) t.Parallel() @@ -2768,7 +2768,7 @@ byte "hello"; log; int 1`, }) } -func TestFailingApp(t *testing.T) { +func TestInvalidApp(t *testing.T) { partitiontest.PartitionTest(t) t.Parallel() @@ -4448,7 +4448,7 @@ app_local_put } } -func TestGlobalStateTypeChangeFailure(t *testing.T) { +func TestGlobalStateTypeChangeErr(t *testing.T) { partitiontest.PartitionTest(t) t.Parallel() @@ -4827,7 +4827,7 @@ int 1`, for i, boxOp := range testcase.boxOpsForSimulate { txnResults[i] = boxOpToSimResult(boxOp) } - totalConsumed := uint64(0) + totalConsumed := 0 for _, txnResult := range txnResults { totalConsumed += txnResult.AppBudgetConsumed } @@ -4871,7 +4871,7 @@ int 1`, TxnGroups: []simulation.TxnGroupResult{ { Txns: txnResults, - AppBudgetAdded: 700 * uint64(len(txnResults)), + AppBudgetAdded: 700 * len(txnResults), AppBudgetConsumed: totalConsumed, }, }, @@ -5174,7 +5174,7 @@ int 1 for i, boxOp := range testcase.boxOpsForSimulate { txnResults[i] = boxOpToSimResult(boxOp) } - totalConsumed := uint64(0) + totalConsumed := 0 for _, txnResult := range txnResults { totalConsumed += txnResult.AppBudgetConsumed } @@ -5218,7 +5218,7 @@ int 1 TxnGroups: []simulation.TxnGroupResult{ { Txns: txnResults, - AppBudgetAdded: 700 * uint64(len(txnResults)), + AppBudgetAdded: 700 * len(txnResults), AppBudgetConsumed: totalConsumed, }, }, @@ -5472,7 +5472,7 @@ int 1`, newlyCreatedGlobalKeySet.Add(string(txnArgs[1])) } - totalConsumed := uint64(0) + totalConsumed := 0 for _, txnResult := range txnResults { totalConsumed += txnResult.AppBudgetConsumed } @@ -5498,7 +5498,7 @@ int 1`, TxnGroups: []simulation.TxnGroupResult{ { Txns: txnResults, - AppBudgetAdded: 700 * uint64(len(txnResults)), + AppBudgetAdded: 700 * len(txnResults), AppBudgetConsumed: totalConsumed, }, }, @@ -5839,7 +5839,7 @@ int 1`, newlyCreatedLocalStates[acctAddress].Add(string(instruction.appArgs[1])) } - totalConsumed := uint64(0) + totalConsumed := 0 for _, txnResult := range txnResults { totalConsumed += txnResult.AppBudgetConsumed } @@ -5870,7 +5870,7 @@ int 1`, TxnGroups: []simulation.TxnGroupResult{ { Txns: txnResults, - AppBudgetAdded: 700 * uint64(len(txnResults)), + AppBudgetAdded: 700 * len(txnResults), AppBudgetConsumed: totalConsumed, }, }, @@ -6151,7 +6151,7 @@ int 1`, }, } - totalConsumed := uint64(0) + totalConsumed := 0 for _, txnResult := range txnResults { totalConsumed += txnResult.AppBudgetConsumed } @@ -6177,7 +6177,7 @@ int 1`, TxnGroups: []simulation.TxnGroupResult{ { Txns: txnResults, - AppBudgetAdded: 700 * uint64(len(txnResults)), + AppBudgetAdded: 700 * len(txnResults), AppBudgetConsumed: totalConsumed, }, }, @@ -6561,7 +6561,7 @@ func makeProgramToCallInner(t *testing.T, program string) string { return wrapCodeWithVersionAndReturn(itxnSubmitCode) } -func TestAppCallInnerTxnApplyDataOnFail(t *testing.T) { +func TestAppCallInnerTxnApplyDataOnErr(t *testing.T) { partitiontest.PartitionTest(t) t.Parallel() @@ -6671,7 +6671,7 @@ byte "finished asset create" log ` -func TestNonAppCallInnerTxnApplyDataOnFail(t *testing.T) { +func TestNonAppCallInnerTxnApplyDataOnErr(t *testing.T) { partitiontest.PartitionTest(t) t.Parallel() @@ -6769,7 +6769,7 @@ byte "finished asset config" log ` -func TestInnerTxnNonAppCallFailure(t *testing.T) { +func TestInnerTxnNonAppCallErr(t *testing.T) { partitiontest.PartitionTest(t) t.Parallel() simulationTest(t, func(env simulationtesting.Environment) simulationTestCase { @@ -7170,7 +7170,7 @@ func TestUnnamedResources(t *testing.T) { UnnamedResourcesAccessed: expectedUnnamedResourceTxnAssignment, }, }, - AppBudgetAdded: 700 + 700*uint64(innerCount), + AppBudgetAdded: 700 + 700*innerCount, AppBudgetConsumed: ignoreAppBudgetConsumed, UnnamedResourcesAccessed: expectedUnnamedResourceGroupAssignment, }, @@ -7653,7 +7653,7 @@ func testUnnamedBoxOperations(t *testing.T, env simulationtesting.Environment, a var failedAt simulation.TxnPath if expected.FailureMessage != "" { - failedAt = simulation.TxnPath{uint64(expected.FailingIndex)} + failedAt = simulation.TxnPath{expected.FailingIndex} } proto := env.TxnInfo.CurrentProtocolParams() @@ -7686,7 +7686,7 @@ func testUnnamedBoxOperations(t *testing.T, env simulationtesting.Environment, a TxnGroups: []simulation.TxnGroupResult{ { Txns: expectedTxnResults, - AppBudgetAdded: uint64(700 * len(boxOps)), + AppBudgetAdded: 700 * len(boxOps), AppBudgetConsumed: ignoreAppBudgetConsumed, UnnamedResourcesAccessed: expectedUnnamedResources, FailedAt: failedAt, @@ -8455,7 +8455,7 @@ func boxNamesToRefs(app basics.AppIndex, names []string) []logic.BoxRef { return refs } -func testUnnamedResourceLimits(t *testing.T, env simulationtesting.Environment, appVersion int, app basics.AppIndex, resources unnamedResourceArguments, otherTxns []txntest.Txn, extraBudget uint64, expectedError string) { +func testUnnamedResourceLimits(t *testing.T, env simulationtesting.Environment, appVersion int, app basics.AppIndex, resources unnamedResourceArguments, otherTxns []txntest.Txn, extraBudget int, expectedError string) { t.Helper() maxGroupSize := env.TxnInfo.CurrentProtocolParams().MaxTxGroupSize txns := make([]*txntest.Txn, maxGroupSize) @@ -8574,7 +8574,7 @@ func testUnnamedResourceLimits(t *testing.T, env simulationtesting.Environment, TxnGroups: []simulation.TxnGroupResult{ { Txns: expectedTxnResults, - AppBudgetAdded: uint64(700) + extraBudget, + AppBudgetAdded: 700 + extraBudget, AppBudgetConsumed: ignoreAppBudgetConsumed, UnnamedResourcesAccessed: expectedGroupResources, FailedAt: failedAt, @@ -8952,7 +8952,7 @@ func TestFixSigners(t *testing.T) { innerProgram := fmt.Sprintf(`#pragma version 9 txn ApplicationID bz end - + // Rekey to the the innerRekeyAddr itxn_begin int pay @@ -8962,7 +8962,7 @@ func TestFixSigners(t *testing.T) { addr %s itxn_field RekeyTo itxn_submit - + end: int 1 `, innerRekeyAddr) diff --git a/ledger/simulation/simulator.go b/ledger/simulation/simulator.go index 758275528f..47e4f4b819 100644 --- a/ledger/simulation/simulator.go +++ b/ledger/simulation/simulator.go @@ -39,7 +39,7 @@ type Request struct { AllowEmptySignatures bool AllowMoreLogging bool AllowUnnamedResources bool - ExtraOpcodeBudget uint64 + ExtraOpcodeBudget int TraceConfig ExecTraceConfig FixSigners bool } @@ -321,7 +321,7 @@ func (s Simulator) Simulate(simulateRequest Request) (Result, error) { return Result{}, InvalidRequestError{SimulatorError{err}} } simulatorTracer.result.TxnGroups[0].FailureMessage = verifyError.Error() - simulatorTracer.result.TxnGroups[0].FailedAt = TxnPath{uint64(verifyError.GroupIndex)} + simulatorTracer.result.TxnGroups[0].FailedAt = TxnPath{verifyError.GroupIndex} case errors.As(err, &EvalFailureError{}): simulatorTracer.result.TxnGroups[0].FailureMessage = err.Error() simulatorTracer.result.TxnGroups[0].FailedAt = simulatorTracer.failedAt @@ -350,7 +350,7 @@ func (s Simulator) Simulate(simulateRequest Request) (Result, error) { simulatorTracer.result.Block = block // Update total cost by aggregating individual txn costs - totalCost := uint64(0) + totalCost := 0 for _, txn := range simulatorTracer.result.TxnGroups[0].Txns { totalCost += txn.AppBudgetConsumed } diff --git a/ledger/simulation/trace.go b/ledger/simulation/trace.go index 055fc2da0f..7808be54d9 100644 --- a/ledger/simulation/trace.go +++ b/ledger/simulation/trace.go @@ -19,7 +19,7 @@ package simulation import ( "fmt" - "github.com/algorand/go-algorand/config" + "github.com/algorand/go-algorand/config/bounds" "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/data/transactions" @@ -29,13 +29,13 @@ import ( // TxnPath is a "transaction path": e.g. [0, 0, 1] means the second inner txn of the first inner txn of the first txn. // You can use this transaction path to find the txn data in the `TxnResults` list. -type TxnPath []uint64 +type TxnPath []int // TxnResult contains the simulation result for a single transaction type TxnResult struct { Txn transactions.SignedTxnWithAD - AppBudgetConsumed uint64 - LogicSigBudgetConsumed uint64 + AppBudgetConsumed int + LogicSigBudgetConsumed int Trace *TransactionTrace // UnnamedResourcesAccessed is present if all of the following are true: @@ -60,9 +60,9 @@ type TxnGroupResult struct { // FailedAt is the path to the txn that failed inside of this group FailedAt TxnPath // AppBudgetAdded is the total opcode budget for this group - AppBudgetAdded uint64 + AppBudgetAdded int // AppBudgetConsumed is the total opcode cost used for this group - AppBudgetConsumed uint64 + AppBudgetConsumed int // UnnamedResourcesAccessed will be present if AllowUnnamedResources is true. In that case, it // will be populated with the unnamed resources accessed by this transaction group from @@ -91,24 +91,24 @@ const ResultLatestVersion = uint64(2) type ResultEvalOverrides struct { AllowEmptySignatures bool AllowUnnamedResources bool - MaxLogCalls *uint64 - MaxLogSize *uint64 - ExtraOpcodeBudget uint64 + MaxLogCalls *int + MaxLogSize *int + ExtraOpcodeBudget int FixSigners bool } // LogBytesLimit hardcode limit of how much bytes one can log per transaction during simulation (with AllowMoreLogging) -const LogBytesLimit = uint64(65536) +const LogBytesLimit = 65536 // MaxExtraOpcodeBudget hardcode limit of how much extra budget one can add to one transaction group (which is group-size * logic-sig-budget) -const MaxExtraOpcodeBudget = uint64(20000 * 16) +const MaxExtraOpcodeBudget = 20000 * 16 // AllowMoreLogging method modify the log limits from lift option: // - if lift log limits, then overload result from local Config // - otherwise, set `LogLimits` field to be nil func (eo ResultEvalOverrides) AllowMoreLogging(allow bool) ResultEvalOverrides { if allow { - maxLogCalls, maxLogSize := uint64(config.MaxLogCalls), LogBytesLimit + maxLogCalls, maxLogSize := bounds.MaxLogCalls, LogBytesLimit eo.MaxLogCalls = &maxLogCalls eo.MaxLogSize = &maxLogSize } @@ -231,7 +231,7 @@ func makeSimulationResult(lastRound basics.Round, request Request, developerAPI // ScratchChange represents a write operation into a scratch slot type ScratchChange struct { // Slot stands for the scratch slot id get written to - Slot uint64 + Slot int // NewValue is the stack value written to scratch slot NewValue basics.TealValue @@ -263,7 +263,7 @@ type StateOperation struct { // OpcodeTraceUnit contains the trace effects of a single opcode evaluation. type OpcodeTraceUnit struct { // The PC of the opcode being evaluated - PC uint64 + PC int // SpawnedInners contains the indexes of traces for inner transactions spawned by this opcode, // if any. These indexes refer to the InnerTraces array of the TransactionTrace object containing @@ -274,7 +274,7 @@ type OpcodeTraceUnit struct { StackAdded []basics.TealValue // deleted element number from stack - StackPopCount uint64 + StackPopCount int // ScratchSlotChanges stands for write operations into scratch slots ScratchSlotChanges []ScratchChange diff --git a/ledger/simulation/tracer.go b/ledger/simulation/tracer.go index 072f6433e7..ea03718f0b 100644 --- a/ledger/simulation/tracer.go +++ b/ledger/simulation/tracer.go @@ -61,9 +61,9 @@ func (tracer *cursorEvalTracer) AfterTxnGroup(ep *logic.EvalParams, deltas *ledg func (tracer *cursorEvalTracer) absolutePath() TxnPath { path := make(TxnPath, len(tracer.relativeCursor)) for i, relativeGroupIndex := range tracer.relativeCursor { - absoluteIndex := uint64(relativeGroupIndex) + absoluteIndex := relativeGroupIndex if i > 0 { - absoluteIndex += uint64(tracer.previousInnerTxns[i-1]) + absoluteIndex += tracer.previousInnerTxns[i-1] } path[i] = absoluteIndex } @@ -97,7 +97,7 @@ type evalTracer struct { // scratchSlots are the scratch slots changed on current opcode (currently either `store` or `stores`). // NOTE: this field scratchSlots is used only for scratch change exposure. - scratchSlots []uint64 + scratchSlots []int groups [][]transactions.SignedTxnWithAD } @@ -126,7 +126,7 @@ func (tracer *evalTracer) getApplyDataAtPath(path TxnPath) (*transactions.ApplyD for _, index := range path[1:] { innerTxns := applyDataCursor.EvalDelta.InnerTxns - if index >= uint64(len(innerTxns)) { + if index >= len(innerTxns) { return nil, fmt.Errorf("simulator debugger error: index %d out of range with length %d. Full path: %v", index, len(innerTxns), path) } applyDataCursor = &innerTxns[index].ApplyData @@ -153,19 +153,19 @@ func (tracer *evalTracer) BeforeTxnGroup(ep *logic.EvalParams) { if ep.GetCaller() != nil { // If this is an inner txn group, save the txns tracer.populateInnerTransactions(ep.TxnGroup) - tracer.result.TxnGroups[0].AppBudgetAdded += uint64(ep.Proto.MaxAppProgramCost) + tracer.result.TxnGroups[0].AppBudgetAdded += ep.Proto.MaxAppProgramCost } tracer.cursorEvalTracer.BeforeTxnGroup(ep) // Currently only supports one (first) txn group if ep.PooledApplicationBudget != nil && tracer.result.TxnGroups[0].AppBudgetAdded == 0 { - tracer.result.TxnGroups[0].AppBudgetAdded = uint64(*ep.PooledApplicationBudget) + tracer.result.TxnGroups[0].AppBudgetAdded = *ep.PooledApplicationBudget } // Override transaction group budget if specified in request, retrieve from tracer.result if ep.PooledApplicationBudget != nil { tracer.result.TxnGroups[0].AppBudgetAdded += tracer.result.EvalOverrides.ExtraOpcodeBudget - *ep.PooledApplicationBudget += int(tracer.result.EvalOverrides.ExtraOpcodeBudget) + *ep.PooledApplicationBudget += tracer.result.EvalOverrides.ExtraOpcodeBudget } if ep.GetCaller() == nil { @@ -270,12 +270,12 @@ func (tracer *evalTracer) saveEvalDelta(evalDelta transactions.EvalDelta, appIDT } func (tracer *evalTracer) makeOpcodeTraceUnit(cx *logic.EvalContext) OpcodeTraceUnit { - return OpcodeTraceUnit{PC: uint64(cx.PC())} + return OpcodeTraceUnit{PC: cx.PC()} } func (o *OpcodeTraceUnit) computeStackValueDeletions(cx *logic.EvalContext, tracer *evalTracer) { tracer.popCount, tracer.addCount = cx.GetOpSpec().StackExplain(cx) - o.StackPopCount = uint64(tracer.popCount) + o.StackPopCount = tracer.popCount stackHeight := len(cx.Stack) tracer.stackHeightAfterDeletion = stackHeight - int(o.StackPopCount) @@ -354,8 +354,8 @@ func (tracer *evalTracer) recordChangedScratchSlots(cx *logic.EvalContext) { switch currentOpcodeName { case "store": - slot := uint64(cx.GetProgram()[cx.PC()+1]) - tracer.scratchSlots = append(tracer.scratchSlots, slot) + slot := cx.GetProgram()[cx.PC()+1] + tracer.scratchSlots = append(tracer.scratchSlots, int(slot)) case "stores": prev := last - 1 slot := cx.Stack[prev].Uint @@ -365,7 +365,7 @@ func (tracer *evalTracer) recordChangedScratchSlots(cx *logic.EvalContext) { if slot >= uint64(len(cx.Scratch)) { return } - tracer.scratchSlots = append(tracer.scratchSlots, slot) + tracer.scratchSlots = append(tracer.scratchSlots, int(slot)) } } @@ -492,7 +492,7 @@ func (tracer *evalTracer) AfterProgram(cx *logic.EvalContext, pass bool, evalErr if cx.RunMode() == logic.ModeSig { // Report cost for LogicSig program and exit - tracer.result.TxnGroups[0].Txns[groupIndex].LogicSigBudgetConsumed = uint64(cx.Cost()) + tracer.result.TxnGroups[0].Txns[groupIndex].LogicSigBudgetConsumed = cx.Cost() if tracer.result.ReturnTrace() { tracer.result.TxnGroups[0].Txns[groupIndex].Trace.programTraceRef = nil } @@ -501,7 +501,7 @@ func (tracer *evalTracer) AfterProgram(cx *logic.EvalContext, pass bool, evalErr // Report cost of this program. // If it is an inner app call, roll up its cost to the top level transaction. - tracer.result.TxnGroups[0].Txns[tracer.relativeCursor[0]].AppBudgetConsumed += uint64(cx.Cost()) + tracer.result.TxnGroups[0].Txns[tracer.relativeCursor[0]].AppBudgetConsumed += cx.Cost() if cx.TxnGroup[groupIndex].Txn.ApplicationCallTxnFields.OnCompletion == transactions.ClearStateOC { if tracer.result.ReturnTrace() && (!pass || evalError != nil) { diff --git a/ledger/store/trackerdb/data.go b/ledger/store/trackerdb/data.go index b548f7d6bb..3629fff186 100644 --- a/ledger/store/trackerdb/data.go +++ b/ledger/store/trackerdb/data.go @@ -19,7 +19,6 @@ package trackerdb import ( "context" - "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/crypto/merklesignature" "github.com/algorand/go-algorand/data/basics" @@ -112,8 +111,8 @@ type ResourcesData struct { KeyValue basics.TealKeyValue `codec:"p"` // application global params ( basics.AppParams ) - ApprovalProgram []byte `codec:"q,allocbound=config.MaxAvailableAppProgramLen"` - ClearStateProgram []byte `codec:"r,allocbound=config.MaxAvailableAppProgramLen"` + ApprovalProgram []byte `codec:"q,allocbound=bounds.MaxAvailableAppProgramLen"` + ClearStateProgram []byte `codec:"r,allocbound=bounds.MaxAvailableAppProgramLen"` GlobalState basics.TealKeyValue `codec:"s"` LocalStateSchemaNumUint uint64 `codec:"t"` LocalStateSchemaNumByteSlice uint64 `codec:"u"` @@ -282,8 +281,8 @@ func (prd *PersistedResourcesData) AccountResource() ledgercore.AccountResource } // NormalizedOnlineBalance getter for normalized online balance. -func (ba *BaseAccountData) NormalizedOnlineBalance(proto config.ConsensusParams) uint64 { - return basics.NormalizedOnlineAccountBalance(ba.Status, ba.RewardsBase, ba.MicroAlgos, proto) +func (ba *BaseAccountData) NormalizedOnlineBalance(rewardUnit uint64) uint64 { + return basics.NormalizedOnlineAccountBalance(ba.Status, ba.RewardsBase, ba.MicroAlgos, rewardUnit) } // SetCoreAccountData setter for core account data. @@ -482,9 +481,9 @@ func (bo *BaseOnlineAccountData) GetOnlineAccount(addr basics.Address, normBalan // GetOnlineAccountData returns basics.OnlineAccountData for lookup agreement // TODO: unify with GetOnlineAccount/ledgercore.OnlineAccount -func (bo *BaseOnlineAccountData) GetOnlineAccountData(proto config.ConsensusParams, rewardsLevel uint64) basics.OnlineAccountData { +func (bo *BaseOnlineAccountData) GetOnlineAccountData(rewardUnit uint64, rewardsLevel uint64) basics.OnlineAccountData { microAlgos, _, _ := basics.WithUpdatedRewards( - proto, basics.Online, bo.MicroAlgos, basics.MicroAlgos{}, bo.RewardsBase, rewardsLevel, + rewardUnit, basics.Online, bo.MicroAlgos, basics.MicroAlgos{}, bo.RewardsBase, rewardsLevel, ) return basics.OnlineAccountData{ @@ -504,8 +503,8 @@ func (bo *BaseOnlineAccountData) GetOnlineAccountData(proto config.ConsensusPara } // NormalizedOnlineBalance getter for normalized online balance. -func (bo *BaseOnlineAccountData) NormalizedOnlineBalance(proto config.ConsensusParams) uint64 { - return basics.NormalizedOnlineAccountBalance(basics.Online, bo.RewardsBase, bo.MicroAlgos, proto) +func (bo *BaseOnlineAccountData) NormalizedOnlineBalance(rewardUnit uint64) uint64 { + return basics.NormalizedOnlineAccountBalance(basics.Online, bo.RewardsBase, bo.MicroAlgos, rewardUnit) } // SetCoreAccountData setter for core account data. diff --git a/ledger/store/trackerdb/data_test.go b/ledger/store/trackerdb/data_test.go index 16ed8c2694..ba4efd94ef 100644 --- a/ledger/store/trackerdb/data_test.go +++ b/ledger/store/trackerdb/data_test.go @@ -1190,8 +1190,8 @@ func TestBaseOnlineAccountDataGettersSetters(t *testing.T) { require.Equal(t, data.VoteKeyDilution, ba.VoteKeyDilution) require.Equal(t, data.StateProofID, ba.StateProofID) - normBalance := basics.NormalizedOnlineAccountBalance(basics.Online, data.RewardsBase, data.MicroAlgos, proto) - require.Equal(t, normBalance, ba.NormalizedOnlineBalance(proto)) + normBalance := basics.NormalizedOnlineAccountBalance(basics.Online, data.RewardsBase, data.MicroAlgos, proto.RewardUnit) + require.Equal(t, normBalance, ba.NormalizedOnlineBalance(proto.RewardUnit)) oa := ba.GetOnlineAccount(addr, normBalance) require.Equal(t, addr, oa.Address) @@ -1204,9 +1204,9 @@ func TestBaseOnlineAccountDataGettersSetters(t *testing.T) { rewardsLevel := uint64(1) microAlgos, _, _ := basics.WithUpdatedRewards( - proto, basics.Online, oa.MicroAlgos, basics.MicroAlgos{}, ba.RewardsBase, rewardsLevel, + proto.RewardUnit, basics.Online, oa.MicroAlgos, basics.MicroAlgos{}, ba.RewardsBase, rewardsLevel, ) - oad := ba.GetOnlineAccountData(proto, rewardsLevel) + oad := ba.GetOnlineAccountData(proto.RewardUnit, rewardsLevel) require.Equal(t, microAlgos, oad.MicroAlgosWithRewards) require.Equal(t, ba.VoteID, oad.VoteID) diff --git a/ledger/store/trackerdb/dualdriver/accounts_reader_ext.go b/ledger/store/trackerdb/dualdriver/accounts_reader_ext.go index 28e7855a35..07bf8f2244 100644 --- a/ledger/store/trackerdb/dualdriver/accounts_reader_ext.go +++ b/ledger/store/trackerdb/dualdriver/accounts_reader_ext.go @@ -19,7 +19,6 @@ package dualdriver import ( "context" - "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/ledger/ledgercore" @@ -73,9 +72,9 @@ func (ar *accountsReaderExt) AccountsOnlineRoundParams() (onlineRoundParamsData } // AccountsOnlineTop implements trackerdb.AccountsReaderExt -func (ar *accountsReaderExt) AccountsOnlineTop(rnd basics.Round, offset uint64, n uint64, proto config.ConsensusParams) (onlineAccounts map[basics.Address]*ledgercore.OnlineAccount, err error) { - onlineAccountsP, errP := ar.primary.AccountsOnlineTop(rnd, offset, n, proto) - onlineAccountsS, errS := ar.secondary.AccountsOnlineTop(rnd, offset, n, proto) +func (ar *accountsReaderExt) AccountsOnlineTop(rnd basics.Round, offset uint64, n uint64, rewardUnit uint64) (onlineAccounts map[basics.Address]*ledgercore.OnlineAccount, err error) { + onlineAccountsP, errP := ar.primary.AccountsOnlineTop(rnd, offset, n, rewardUnit) + onlineAccountsS, errS := ar.secondary.AccountsOnlineTop(rnd, offset, n, rewardUnit) // coalesce errors err = coalesceErrors(errP, errS) if err != nil { @@ -284,9 +283,9 @@ func (ar *accountsReaderExt) OnlineAccountsAll(maxAccounts uint64) (accounts []t } // ExpiredOnlineAccountsForRound implements trackerdb.AccountsReaderExt -func (ar *accountsReaderExt) ExpiredOnlineAccountsForRound(rnd basics.Round, voteRnd basics.Round, proto config.ConsensusParams, rewardsLevel uint64) (expAccounts map[basics.Address]*basics.OnlineAccountData, err error) { - expAccountsP, errP := ar.primary.ExpiredOnlineAccountsForRound(rnd, voteRnd, proto, rewardsLevel) - expAccountsS, errS := ar.secondary.ExpiredOnlineAccountsForRound(rnd, voteRnd, proto, rewardsLevel) +func (ar *accountsReaderExt) ExpiredOnlineAccountsForRound(rnd basics.Round, voteRnd basics.Round, rewardUnit uint64, rewardsLevel uint64) (expAccounts map[basics.Address]*basics.OnlineAccountData, err error) { + expAccountsP, errP := ar.primary.ExpiredOnlineAccountsForRound(rnd, voteRnd, rewardUnit, rewardsLevel) + expAccountsS, errS := ar.secondary.ExpiredOnlineAccountsForRound(rnd, voteRnd, rewardUnit, rewardsLevel) // coalesce errors err = coalesceErrors(errP, errS) if err != nil { diff --git a/ledger/store/trackerdb/dualdriver/transaction_for_testing.go b/ledger/store/trackerdb/dualdriver/transaction_for_testing.go index 375b2bd82c..a5aea91797 100644 --- a/ledger/store/trackerdb/dualdriver/transaction_for_testing.go +++ b/ledger/store/trackerdb/dualdriver/transaction_for_testing.go @@ -20,7 +20,6 @@ import ( "context" "testing" - "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/ledger/store/trackerdb" "github.com/algorand/go-algorand/protocol" @@ -32,9 +31,9 @@ type writerForTesting struct { } // AccountsInitLightTest implements trackerdb.WriterTestExt -func (tx *writerForTesting) AccountsInitLightTest(tb testing.TB, initAccounts map[basics.Address]basics.AccountData, proto config.ConsensusParams) (newDatabse bool, err error) { - newDatabaseP, errP := tx.primary.AccountsInitLightTest(tb, initAccounts, proto) - newDatabaseS, errS := tx.secondary.AccountsInitLightTest(tb, initAccounts, proto) +func (tx *writerForTesting) AccountsInitLightTest(tb testing.TB, initAccounts map[basics.Address]basics.AccountData, rewardUnit uint64) (newDatabse bool, err error) { + newDatabaseP, errP := tx.primary.AccountsInitLightTest(tb, initAccounts, rewardUnit) + newDatabaseS, errS := tx.secondary.AccountsInitLightTest(tb, initAccounts, rewardUnit) // coalesce errors err = coalesceErrors(errP, errS) if err != nil { diff --git a/ledger/store/trackerdb/generickv/accounts_ext_reader.go b/ledger/store/trackerdb/generickv/accounts_ext_reader.go index 13294913bc..2624a400e6 100644 --- a/ledger/store/trackerdb/generickv/accounts_ext_reader.go +++ b/ledger/store/trackerdb/generickv/accounts_ext_reader.go @@ -22,7 +22,6 @@ import ( "encoding/binary" "fmt" - "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/ledger/ledgercore" @@ -162,7 +161,7 @@ func (r *accountsReader) LookupOnlineAccountDataByAddress(addr basics.Address) ( ref = onlineAccountRef{ addr: addr, round: rnd, - normBalance: oa.NormalizedOnlineBalance(r.proto), + normBalance: oa.NormalizedOnlineBalance(r.proto.RewardUnit), } } else { err = trackerdb.ErrNotFound @@ -181,7 +180,7 @@ func (r *accountsReader) LookupOnlineAccountDataByAddress(addr basics.Address) ( // // Note that this does not check if the accounts have a vote key valid for any // particular round (past, present, or future). -func (r *accountsReader) AccountsOnlineTop(rnd basics.Round, offset uint64, n uint64, proto config.ConsensusParams) (data map[basics.Address]*ledgercore.OnlineAccount, err error) { +func (r *accountsReader) AccountsOnlineTop(rnd basics.Round, offset uint64, n uint64, rewardUnit uint64) (data map[basics.Address]*ledgercore.OnlineAccount, err error) { // The SQL before the impl // SELECT // address, normalizedonlinebalance, data, max(updround) FROM onlineaccounts @@ -239,7 +238,7 @@ func (r *accountsReader) AccountsOnlineTop(rnd basics.Round, offset uint64, n ui Address: addr, MicroAlgos: oa.MicroAlgos, RewardsBase: oa.RewardsBase, - NormalizedOnlineBalance: oa.NormalizedOnlineBalance(proto), + NormalizedOnlineBalance: oa.NormalizedOnlineBalance(rewardUnit), VoteFirstValid: oa.VoteFirstValid, VoteLastValid: oa.VoteLastValid, StateProofID: oa.StateProofID, @@ -340,7 +339,7 @@ func (r *accountsReader) OnlineAccountsAll(maxAccounts uint64) ([]trackerdb.Pers return nil, err } // set ref - normBalance := pitem.AccountData.NormalizedOnlineBalance(r.proto) + normBalance := pitem.AccountData.NormalizedOnlineBalance(r.proto.RewardUnit) pitem.Ref = onlineAccountRef{addr, normBalance, pitem.UpdRound} // if maxAccounts is supplied, potentially stop reading data if we've collected enough if maxAccounts > 0 { @@ -363,7 +362,7 @@ func (r *accountsReader) OnlineAccountsAll(maxAccounts uint64) ([]trackerdb.Pers } // ExpiredOnlineAccountsForRound implements trackerdb.AccountsReaderExt -func (r *accountsReader) ExpiredOnlineAccountsForRound(rnd basics.Round, voteRnd basics.Round, proto config.ConsensusParams, rewardsLevel uint64) (data map[basics.Address]*basics.OnlineAccountData, err error) { +func (r *accountsReader) ExpiredOnlineAccountsForRound(rnd basics.Round, voteRnd basics.Round, rewardUnit uint64, rewardsLevel uint64) (data map[basics.Address]*basics.OnlineAccountData, err error) { // The SQL at the time of writing: // // SELECT address, data, max(updround) @@ -425,7 +424,7 @@ func (r *accountsReader) ExpiredOnlineAccountsForRound(rnd basics.Round, voteRnd } // load the data as a ledgercore OnlineAccount - oadata := oa.GetOnlineAccountData(proto, rewardsLevel) + oadata := oa.GetOnlineAccountData(rewardUnit, rewardsLevel) data[addr] = &oadata } diff --git a/ledger/store/trackerdb/generickv/init_accounts.go b/ledger/store/trackerdb/generickv/init_accounts.go index fa9e1aebfc..b85b6f531c 100644 --- a/ledger/store/trackerdb/generickv/init_accounts.go +++ b/ledger/store/trackerdb/generickv/init_accounts.go @@ -20,7 +20,6 @@ import ( "context" "testing" - "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/ledger/store/trackerdb" "github.com/algorand/go-algorand/protocol" @@ -48,10 +47,10 @@ func AccountsInitTest(tb testing.TB, db dbForInit, initAccounts map[basics.Addre // // This is duplicate due to a specific legacy test in accdeltas_test.go. // TODO: remove the need for this. -func AccountsInitLightTest(tb testing.TB, db dbForInit, initAccounts map[basics.Address]basics.AccountData, proto config.ConsensusParams) (newDatabase bool, err error) { +func AccountsInitLightTest(tb testing.TB, db dbForInit, initAccounts map[basics.Address]basics.AccountData, rewardUnit uint64) (newDatabase bool, err error) { params := trackerdb.Params{ InitAccounts: initAccounts, - // TODO: how do we get the correct version from the proto arg? + // TODO: how do we get the correct version? InitProto: protocol.ConsensusCurrentVersion, } _, err = RunMigrations(context.Background(), db, params, trackerdb.AccountDBVersion) diff --git a/ledger/store/trackerdb/generickv/migrations.go b/ledger/store/trackerdb/generickv/migrations.go index 5f7d9823c0..0613daa4d0 100644 --- a/ledger/store/trackerdb/generickv/migrations.go +++ b/ledger/store/trackerdb/generickv/migrations.go @@ -132,7 +132,7 @@ func (m *migrator) setVersion(ctx context.Context, version int32) error { } func (m *migrator) initialVersion(ctx context.Context) error { - proto := config.Consensus[m.params.InitProto] + rewardUnit := config.Consensus[m.params.InitProto].RewardUnit // TODO: make this a batch scope err := m.db.TransactionContext(ctx, func(ctx context.Context, tx trackerdb.TransactionScope) (err error) { @@ -168,7 +168,7 @@ func (m *migrator) initialVersion(ctx context.Context) error { var bad trackerdb.BaseAccountData bad.SetAccountData(&account) // insert the account - _, err = aow.InsertAccount(addr, account.NormalizedOnlineBalance(proto), bad) + _, err = aow.InsertAccount(addr, account.NormalizedOnlineBalance(rewardUnit), bad) if err != nil { return err } @@ -176,7 +176,7 @@ func (m *migrator) initialVersion(ctx context.Context) error { // build a ledgercore.AccountData to track the totals ad := ledgercore.ToAccountData(account) // track the totals - totals.AddAccount(proto, ad, &ot) + totals.AddAccount(rewardUnit, ad, &ot) // insert online account (if online) if bad.Status == basics.Online { @@ -185,7 +185,7 @@ func (m *migrator) initialVersion(ctx context.Context) error { baseOnlineAD.MicroAlgos = bad.MicroAlgos baseOnlineAD.RewardsBase = bad.RewardsBase - _, err = oaow.InsertOnlineAccount(addr, account.NormalizedOnlineBalance(proto), baseOnlineAD, uint64(updRound), uint64(baseOnlineAD.VoteLastValid)) + _, err = oaow.InsertOnlineAccount(addr, account.NormalizedOnlineBalance(rewardUnit), baseOnlineAD, uint64(updRound), uint64(baseOnlineAD.VoteLastValid)) if err != nil { return err } diff --git a/ledger/store/trackerdb/generickv/onlineaccounts_reader.go b/ledger/store/trackerdb/generickv/onlineaccounts_reader.go index e991d5eee1..2d693f68ff 100644 --- a/ledger/store/trackerdb/generickv/onlineaccounts_reader.go +++ b/ledger/store/trackerdb/generickv/onlineaccounts_reader.go @@ -72,7 +72,7 @@ func (r *accountsReader) LookupOnline(addr basics.Address, rnd basics.Round) (da return } - normBalance := data.AccountData.NormalizedOnlineBalance(r.proto) + normBalance := data.AccountData.NormalizedOnlineBalance(r.proto.RewardUnit) data.Ref = onlineAccountRef{addr, normBalance, rnd} // we have the record, we can leave @@ -122,7 +122,7 @@ func (r *accountsReader) LookupOnlineHistory(addr basics.Address) (result []trac } // set the ref - pitem.Ref = onlineAccountRef{addr, pitem.AccountData.NormalizedOnlineBalance(r.proto), pitem.UpdRound} + pitem.Ref = onlineAccountRef{addr, pitem.AccountData.NormalizedOnlineBalance(r.proto.RewardUnit), pitem.UpdRound} // append entry to accum result = append(result, pitem) diff --git a/ledger/store/trackerdb/generickv/writer.go b/ledger/store/trackerdb/generickv/writer.go index d67fd3790e..0dac0082d6 100644 --- a/ledger/store/trackerdb/generickv/writer.go +++ b/ledger/store/trackerdb/generickv/writer.go @@ -18,11 +18,11 @@ package generickv import ( "context" - "github.com/algorand/go-algorand/config" + "testing" + "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/ledger/store/trackerdb" "github.com/algorand/go-algorand/protocol" - "testing" ) type writer struct { @@ -71,7 +71,7 @@ type writerForTesting struct { } // AccountsInitLightTest implements trackerdb.WriterTestExt -func (w *writerForTesting) AccountsInitLightTest(tb testing.TB, initAccounts map[basics.Address]basics.AccountData, proto config.ConsensusParams) (newDatabase bool, err error) { +func (w *writerForTesting) AccountsInitLightTest(tb testing.TB, initAccounts map[basics.Address]basics.AccountData, rewardUnit uint64) (newDatabase bool, err error) { panic("unimplemented") } diff --git a/ledger/store/trackerdb/interface.go b/ledger/store/trackerdb/interface.go index 90f0ab5f63..d067556171 100644 --- a/ledger/store/trackerdb/interface.go +++ b/ledger/store/trackerdb/interface.go @@ -21,7 +21,6 @@ import ( "errors" "fmt" - "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/ledger/encoded" @@ -130,9 +129,9 @@ type AccountsReaderExt interface { TotalOnlineRoundParams(ctx context.Context) (total uint64, err error) AccountsRound() (rnd basics.Round, err error) LookupOnlineAccountDataByAddress(addr basics.Address) (ref OnlineAccountRef, data []byte, err error) - AccountsOnlineTop(rnd basics.Round, offset uint64, n uint64, proto config.ConsensusParams) (map[basics.Address]*ledgercore.OnlineAccount, error) + AccountsOnlineTop(rnd basics.Round, offset uint64, n uint64, rewardUnit uint64) (map[basics.Address]*ledgercore.OnlineAccount, error) AccountsOnlineRoundParams() (onlineRoundParamsData []ledgercore.OnlineRoundParamsData, endRound basics.Round, err error) - ExpiredOnlineAccountsForRound(rnd, voteRnd basics.Round, proto config.ConsensusParams, rewardsLevel uint64) (map[basics.Address]*basics.OnlineAccountData, error) + ExpiredOnlineAccountsForRound(rnd, voteRnd basics.Round, rewardUnit uint64, rewardsLevel uint64) (map[basics.Address]*basics.OnlineAccountData, error) OnlineAccountsAll(maxAccounts uint64) ([]PersistedOnlineAccountData, error) LoadTxTail(ctx context.Context, dbRound basics.Round) (roundData []*TxTailRound, roundHash []crypto.Digest, baseRound basics.Round, err error) LoadAllFullAccounts(ctx context.Context, balancesTable string, resourcesTable string, acctCb func(basics.Address, basics.AccountData)) (count int, err error) diff --git a/ledger/store/trackerdb/msgp_gen.go b/ledger/store/trackerdb/msgp_gen.go index c120d20e25..9e522d305d 100644 --- a/ledger/store/trackerdb/msgp_gen.go +++ b/ledger/store/trackerdb/msgp_gen.go @@ -5,7 +5,7 @@ package trackerdb import ( "github.com/algorand/msgp/msgp" - "github.com/algorand/go-algorand/config" + "github.com/algorand/go-algorand/config/bounds" "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/crypto/merklesignature" "github.com/algorand/go-algorand/data/basics" @@ -2155,8 +2155,8 @@ func (z *ResourcesData) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState err = msgp.WrapError(err, "struct-from-array", "ApprovalProgram") return } - if zb0004 > config.MaxAvailableAppProgramLen { - err = msgp.ErrOverflow(uint64(zb0004), uint64(config.MaxAvailableAppProgramLen)) + if zb0004 > bounds.MaxAvailableAppProgramLen { + err = msgp.ErrOverflow(uint64(zb0004), uint64(bounds.MaxAvailableAppProgramLen)) return } (*z).ApprovalProgram, bts, err = msgp.ReadBytesBytes(bts, (*z).ApprovalProgram) @@ -2173,8 +2173,8 @@ func (z *ResourcesData) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState err = msgp.WrapError(err, "struct-from-array", "ClearStateProgram") return } - if zb0005 > config.MaxAvailableAppProgramLen { - err = msgp.ErrOverflow(uint64(zb0005), uint64(config.MaxAvailableAppProgramLen)) + if zb0005 > bounds.MaxAvailableAppProgramLen { + err = msgp.ErrOverflow(uint64(zb0005), uint64(bounds.MaxAvailableAppProgramLen)) return } (*z).ClearStateProgram, bts, err = msgp.ReadBytesBytes(bts, (*z).ClearStateProgram) @@ -2385,8 +2385,8 @@ func (z *ResourcesData) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState err = msgp.WrapError(err, "ApprovalProgram") return } - if zb0007 > config.MaxAvailableAppProgramLen { - err = msgp.ErrOverflow(uint64(zb0007), uint64(config.MaxAvailableAppProgramLen)) + if zb0007 > bounds.MaxAvailableAppProgramLen { + err = msgp.ErrOverflow(uint64(zb0007), uint64(bounds.MaxAvailableAppProgramLen)) return } (*z).ApprovalProgram, bts, err = msgp.ReadBytesBytes(bts, (*z).ApprovalProgram) @@ -2401,8 +2401,8 @@ func (z *ResourcesData) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState err = msgp.WrapError(err, "ClearStateProgram") return } - if zb0008 > config.MaxAvailableAppProgramLen { - err = msgp.ErrOverflow(uint64(zb0008), uint64(config.MaxAvailableAppProgramLen)) + if zb0008 > bounds.MaxAvailableAppProgramLen { + err = msgp.ErrOverflow(uint64(zb0008), uint64(bounds.MaxAvailableAppProgramLen)) return } (*z).ClearStateProgram, bts, err = msgp.ReadBytesBytes(bts, (*z).ClearStateProgram) @@ -2511,7 +2511,7 @@ func ResourcesDataMaxSize() (s int) { s += 2 // Calculating size of array: z.MetadataHash s += msgp.ArrayHeaderSize + ((32) * (msgp.ByteSize)) - s += 2 + basics.AddressMaxSize() + 2 + basics.AddressMaxSize() + 2 + basics.AddressMaxSize() + 2 + basics.AddressMaxSize() + 2 + msgp.Uint64Size + 2 + msgp.BoolSize + 2 + msgp.Uint64Size + 2 + msgp.Uint64Size + 2 + basics.TealKeyValueMaxSize() + 2 + msgp.BytesPrefixSize + config.MaxAvailableAppProgramLen + 2 + msgp.BytesPrefixSize + config.MaxAvailableAppProgramLen + 2 + basics.TealKeyValueMaxSize() + 2 + msgp.Uint64Size + 2 + msgp.Uint64Size + 2 + msgp.Uint64Size + 2 + msgp.Uint64Size + 2 + msgp.Uint32Size + 2 + msgp.Uint8Size + 2 + msgp.Uint64Size + 2 + msgp.Uint64Size + s += 2 + basics.AddressMaxSize() + 2 + basics.AddressMaxSize() + 2 + basics.AddressMaxSize() + 2 + basics.AddressMaxSize() + 2 + msgp.Uint64Size + 2 + msgp.BoolSize + 2 + msgp.Uint64Size + 2 + msgp.Uint64Size + 2 + basics.TealKeyValueMaxSize() + 2 + msgp.BytesPrefixSize + bounds.MaxAvailableAppProgramLen + 2 + msgp.BytesPrefixSize + bounds.MaxAvailableAppProgramLen + 2 + basics.TealKeyValueMaxSize() + 2 + msgp.Uint64Size + 2 + msgp.Uint64Size + 2 + msgp.Uint64Size + 2 + msgp.Uint64Size + 2 + msgp.Uint32Size + 2 + msgp.Uint8Size + 2 + msgp.Uint64Size + 2 + msgp.Uint64Size return } diff --git a/ledger/store/trackerdb/sqlitedriver/accountsV2.go b/ledger/store/trackerdb/sqlitedriver/accountsV2.go index 31b3038b19..967832fb49 100644 --- a/ledger/store/trackerdb/sqlitedriver/accountsV2.go +++ b/ledger/store/trackerdb/sqlitedriver/accountsV2.go @@ -24,7 +24,6 @@ import ( "strings" "testing" - "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/ledger/ledgercore" @@ -203,7 +202,7 @@ func (r *accountsV2Reader) AccountsHashRound(ctx context.Context) (hashrnd basic // // Note that this does not check if the accounts have a vote key valid for any // particular round (past, present, or future). -func (r *accountsV2Reader) AccountsOnlineTop(rnd basics.Round, offset uint64, n uint64, proto config.ConsensusParams) (map[basics.Address]*ledgercore.OnlineAccount, error) { +func (r *accountsV2Reader) AccountsOnlineTop(rnd basics.Round, offset uint64, n uint64, rewardUnit uint64) (map[basics.Address]*ledgercore.OnlineAccount, error) { // onlineaccounts has historical data ordered by updround for both online and offline accounts. // This means some account A might have norm balance != 0 at round N and norm balance == 0 at some round K > N. // For online top query one needs to find entries not fresher than X with norm balance != 0. @@ -250,7 +249,7 @@ ORDER BY normalizedonlinebalance DESC, address DESC LIMIT ? OFFSET ?`, rnd, n, o // The original implementation uses current proto to recalculate norm balance // In the same time, in accountsNewRound genesis protocol is used to fill norm balance value // In order to be consistent with the original implementation recalculate the balance with current proto - normBalance := basics.NormalizedOnlineAccountBalance(basics.Online, data.RewardsBase, data.MicroAlgos, proto) + normBalance := basics.NormalizedOnlineAccountBalance(basics.Online, data.RewardsBase, data.MicroAlgos, rewardUnit) oa := data.GetOnlineAccount(addr, normBalance) res[addr] = &oa } @@ -303,7 +302,7 @@ func (r *accountsV2Reader) OnlineAccountsAll(maxAccounts uint64) ([]trackerdb.Pe } // ExpiredOnlineAccountsForRound returns all online accounts known at `rnd` that will be expired by `voteRnd`. -func (r *accountsV2Reader) ExpiredOnlineAccountsForRound(rnd, voteRnd basics.Round, proto config.ConsensusParams, rewardsLevel uint64) (map[basics.Address]*basics.OnlineAccountData, error) { +func (r *accountsV2Reader) ExpiredOnlineAccountsForRound(rnd, voteRnd basics.Round, rewardUnit uint64, rewardsLevel uint64) (map[basics.Address]*basics.OnlineAccountData, error) { // This relies on SQLite's handling of max(updround) and bare columns not in the GROUP BY. // The values of votelastvalid, votefirstvalid, and data will all be from the same row as max(updround) rows, err := r.q.Query(`SELECT address, data, max(updround) @@ -337,7 +336,7 @@ ORDER BY address`, rnd, voteRnd) if err != nil { return nil, err } - oadata := baseData.GetOnlineAccountData(proto, rewardsLevel) + oadata := baseData.GetOnlineAccountData(rewardUnit, rewardsLevel) if _, ok := ret[addr]; ok { return nil, fmt.Errorf("duplicate address in expired online accounts: %s", addr.String()) } diff --git a/ledger/store/trackerdb/sqlitedriver/schema.go b/ledger/store/trackerdb/sqlitedriver/schema.go index c1e9d976d7..6993c0f72b 100644 --- a/ledger/store/trackerdb/sqlitedriver/schema.go +++ b/ledger/store/trackerdb/sqlitedriver/schema.go @@ -182,7 +182,7 @@ var accountsResetExprs = []string{ // // accountsInit returns nil if either it has initialized the database // correctly, or if the database has already been initialized. -func accountsInit(e db.Executable, initAccounts map[basics.Address]basics.AccountData, proto config.ConsensusParams) (newDatabase bool, err error) { +func accountsInit(e db.Executable, initAccounts map[basics.Address]basics.AccountData, rewardUnit uint64) (newDatabase bool, err error) { for _, tableCreate := range accountsSchema { _, err = e.Exec(tableCreate) if err != nil { @@ -218,7 +218,7 @@ func accountsInit(e db.Executable, initAccounts map[basics.Address]basics.Accoun } ad := ledgercore.ToAccountData(data) - totals.AddAccount(proto, ad, &ot) + totals.AddAccount(rewardUnit, ad, &ot) } if ot.Overflowed { @@ -246,7 +246,7 @@ func accountsInit(e db.Executable, initAccounts map[basics.Address]basics.Accoun // accountsAddNormalizedBalance adds the normalizedonlinebalance column // to the accountbase table. -func accountsAddNormalizedBalance(e db.Executable, proto config.ConsensusParams) error { +func accountsAddNormalizedBalance(e db.Executable, rewardUnit uint64) error { var exists bool err := e.QueryRow("SELECT 1 FROM pragma_table_info('accountbase') WHERE name='normalizedonlinebalance'").Scan(&exists) if err == nil { @@ -284,7 +284,7 @@ func accountsAddNormalizedBalance(e db.Executable, proto config.ConsensusParams) return err } - normBalance := data.NormalizedOnlineBalance(proto) + normBalance := data.NormalizedOnlineBalance(rewardUnit) if normBalance > 0 { _, err = e.Exec("UPDATE accountbase SET normalizedonlinebalance=? WHERE address=?", normBalance, addrbuf) if err != nil { diff --git a/ledger/store/trackerdb/sqlitedriver/schema_test.go b/ledger/store/trackerdb/sqlitedriver/schema_test.go index b8a982b293..b2a8cf3aca 100644 --- a/ledger/store/trackerdb/sqlitedriver/schema_test.go +++ b/ledger/store/trackerdb/sqlitedriver/schema_test.go @@ -191,10 +191,10 @@ func TestRemoveStrayStateProofID(t *testing.T) { // this is the same seq as AccountsInitTest makes but it stops // before the online accounts table creation to generate a trie and commit it - _, err = accountsInit(tx, accounts, config.Consensus[protocol.ConsensusCurrentVersion]) + _, err = accountsInit(tx, accounts, config.Consensus[protocol.ConsensusCurrentVersion].RewardUnit) require.NoError(t, err) - err = accountsAddNormalizedBalance(tx, config.Consensus[protocol.ConsensusCurrentVersion]) + err = accountsAddNormalizedBalance(tx, config.Consensus[protocol.ConsensusCurrentVersion].RewardUnit) require.NoError(t, err) err = accountsCreateResourceTable(context.Background(), tx) diff --git a/ledger/store/trackerdb/sqlitedriver/sqlitedriver.go b/ledger/store/trackerdb/sqlitedriver/sqlitedriver.go index d5542f925e..a0fec44233 100644 --- a/ledger/store/trackerdb/sqlitedriver/sqlitedriver.go +++ b/ledger/store/trackerdb/sqlitedriver/sqlitedriver.go @@ -23,7 +23,6 @@ import ( "testing" "time" - "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/ledger/encoded" "github.com/algorand/go-algorand/ledger/store/trackerdb" @@ -251,8 +250,8 @@ func (w *sqlWriter) Testing() trackerdb.WriterTestExt { } // AccountsInitLightTest implements trackerdb.WriterTestExt -func (w *sqlWriter) AccountsInitLightTest(tb testing.TB, initAccounts map[basics.Address]basics.AccountData, proto config.ConsensusParams) (newDatabase bool, err error) { - return AccountsInitLightTest(tb, w.e, initAccounts, proto) +func (w *sqlWriter) AccountsInitLightTest(tb testing.TB, initAccounts map[basics.Address]basics.AccountData, rewardUnit uint64) (newDatabase bool, err error) { + return AccountsInitLightTest(tb, w.e, initAccounts, rewardUnit) } // AccountsInitTest implements trackerdb.WriterTestExt diff --git a/ledger/store/trackerdb/sqlitedriver/testing.go b/ledger/store/trackerdb/sqlitedriver/testing.go index 33a4303942..f3f096166b 100644 --- a/ledger/store/trackerdb/sqlitedriver/testing.go +++ b/ledger/store/trackerdb/sqlitedriver/testing.go @@ -46,8 +46,8 @@ func OpenForTesting(t testing.TB, inMemory bool) (trackerdb.Store, string) { // AccountsInitLightTest initializes an empty database for testing without the extra methods being called. // implements Testing interface, test function only -func AccountsInitLightTest(tb testing.TB, e db.Executable, initAccounts map[basics.Address]basics.AccountData, proto config.ConsensusParams) (newDatabase bool, err error) { - newDB, err := accountsInit(e, initAccounts, proto) +func AccountsInitLightTest(tb testing.TB, e db.Executable, initAccounts map[basics.Address]basics.AccountData, rewardUnit uint64) (newDatabase bool, err error) { + newDB, err := accountsInit(e, initAccounts, rewardUnit) require.NoError(tb, err) return newDB, err } @@ -61,11 +61,11 @@ func modifyAcctBaseTest(e db.Executable) error { // AccountsInitTest initializes an empty database for testing. // implements Testing interface, test function only -func AccountsInitTest(tb testing.TB, e db.Executable, initAccounts map[basics.Address]basics.AccountData, proto protocol.ConsensusVersion) (newDatabase bool) { - newDB, err := accountsInit(e, initAccounts, config.Consensus[proto]) +func AccountsInitTest(tb testing.TB, e db.Executable, initAccounts map[basics.Address]basics.AccountData, cv protocol.ConsensusVersion) (newDatabase bool) { + newDB, err := accountsInit(e, initAccounts, config.Consensus[cv].RewardUnit) require.NoError(tb, err) - err = accountsAddNormalizedBalance(e, config.Consensus[proto]) + err = accountsAddNormalizedBalance(e, config.Consensus[cv].RewardUnit) require.NoError(tb, err) err = accountsCreateResourceTable(context.Background(), e) @@ -92,7 +92,7 @@ func AccountsInitTest(tb testing.TB, e db.Executable, initAccounts map[basics.Ad err = accountsCreateOnlineRoundParamsTable(context.Background(), e) require.NoError(tb, err) - err = performOnlineRoundParamsTailMigration(context.Background(), e, db.Accessor{}, true, proto) + err = performOnlineRoundParamsTailMigration(context.Background(), e, db.Accessor{}, true, cv) require.NoError(tb, err) err = accountsCreateBoxTable(context.Background(), e) diff --git a/ledger/store/trackerdb/sqlitedriver/trackerdbV2.go b/ledger/store/trackerdb/sqlitedriver/trackerdbV2.go index 6c00113ea0..c59873d7f9 100644 --- a/ledger/store/trackerdb/sqlitedriver/trackerdbV2.go +++ b/ledger/store/trackerdb/sqlitedriver/trackerdbV2.go @@ -187,7 +187,7 @@ func (tu trackerDBSchemaInitializer) version() int32 { // The acctrounds would get updated to indicate that the balance matches round 0 func (tu *trackerDBSchemaInitializer) upgradeDatabaseSchema0(ctx context.Context, e db.Executable) (err error) { tu.log.Infof("upgradeDatabaseSchema0 initializing schema") - tu.newDatabase, err = accountsInit(e, tu.InitAccounts, config.Consensus[tu.InitProto]) + tu.newDatabase, err = accountsInit(e, tu.InitAccounts, config.Consensus[tu.InitProto].RewardUnit) if err != nil { return fmt.Errorf("upgradeDatabaseSchema0 unable to initialize schema : %v", err) } @@ -274,7 +274,7 @@ func (tu *trackerDBSchemaInitializer) upgradeDatabaseSchema2(ctx context.Context // upgradeDatabaseSchema3 upgrades the database schema from version 3 to version 4, // adding the normalizedonlinebalance column to the accountbase table. func (tu *trackerDBSchemaInitializer) upgradeDatabaseSchema3(ctx context.Context, e db.Executable) (err error) { - err = accountsAddNormalizedBalance(e, config.Consensus[tu.InitProto]) + err = accountsAddNormalizedBalance(e, config.Consensus[tu.InitProto].RewardUnit) if err != nil { return err } diff --git a/ledger/store/trackerdb/testinterface.go b/ledger/store/trackerdb/testinterface.go index 1186171a0a..14e73b5c86 100644 --- a/ledger/store/trackerdb/testinterface.go +++ b/ledger/store/trackerdb/testinterface.go @@ -20,7 +20,6 @@ import ( "context" "testing" - "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/ledger/ledgercore" "github.com/algorand/go-algorand/protocol" @@ -36,7 +35,7 @@ import ( // WriterTestExt is an interface to extend Writer with test-only methods type WriterTestExt interface { AccountsInitTest(tb testing.TB, initAccounts map[basics.Address]basics.AccountData, proto protocol.ConsensusVersion) (newDatabase bool) - AccountsInitLightTest(tb testing.TB, initAccounts map[basics.Address]basics.AccountData, proto config.ConsensusParams) (newDatabase bool, err error) + AccountsInitLightTest(tb testing.TB, initAccounts map[basics.Address]basics.AccountData, rewardUnit uint64) (newDatabase bool, err error) AccountsUpdateSchemaTest(ctx context.Context) (err error) ModifyAcctBaseTest() error } diff --git a/ledger/store/trackerdb/testsuite/accounts_ext_kv_test.go b/ledger/store/trackerdb/testsuite/accounts_ext_kv_test.go index 0a49ce8cfd..f6d6852b26 100644 --- a/ledger/store/trackerdb/testsuite/accounts_ext_kv_test.go +++ b/ledger/store/trackerdb/testsuite/accounts_ext_kv_test.go @@ -238,7 +238,7 @@ func CustomTestAccountLookupByRowID(t *customT) { dataA := trackerdb.BaseAccountData{ RewardsBase: 1000, } - normBalanceA := dataA.NormalizedOnlineBalance(t.proto) + normBalanceA := dataA.NormalizedOnlineBalance(t.proto.RewardUnit) refA, err := aow.InsertAccount(addrA, normBalanceA, dataA) require.NoError(t, err) @@ -267,7 +267,7 @@ func CustomTestResourceLookupByRowID(t *customT) { // generate some test data addrA := RandomAddress() accDataA := trackerdb.BaseAccountData{RewardsBase: 1000} - refAccA, err := aow.InsertAccount(addrA, accDataA.NormalizedOnlineBalance(t.proto), accDataA) + refAccA, err := aow.InsertAccount(addrA, accDataA.NormalizedOnlineBalance(t.proto.RewardUnit), accDataA) require.NoError(t, err) // generate some test data diff --git a/ledger/store/trackerdb/testsuite/accounts_kv_test.go b/ledger/store/trackerdb/testsuite/accounts_kv_test.go index 48476fd774..9759517c5a 100644 --- a/ledger/store/trackerdb/testsuite/accounts_kv_test.go +++ b/ledger/store/trackerdb/testsuite/accounts_kv_test.go @@ -61,7 +61,7 @@ func CustomTestAccountsCrud(t *customT) { } // insert the account - normBalanceA := dataA.NormalizedOnlineBalance(t.proto) + normBalanceA := dataA.NormalizedOnlineBalance(t.proto.RewardUnit) refA, err := aow.InsertAccount(addrA, normBalanceA, dataA) require.NoError(t, err) @@ -80,7 +80,7 @@ func CustomTestAccountsCrud(t *customT) { // update the account dataA.RewardsBase = 98287 - normBalanceA = dataA.NormalizedOnlineBalance(t.proto) + normBalanceA = dataA.NormalizedOnlineBalance(t.proto.RewardUnit) _, err = aow.UpdateAccount(refA, normBalanceA, dataA) require.NoError(t, err) @@ -126,7 +126,7 @@ func CustomTestResourcesCrud(t *customT) { // account addrA := RandomAddress() accDataA := trackerdb.BaseAccountData{RewardsBase: 1000} - refAccA, err := aow.InsertAccount(addrA, accDataA.NormalizedOnlineBalance(t.proto), accDataA) + refAccA, err := aow.InsertAccount(addrA, accDataA.NormalizedOnlineBalance(t.proto.RewardUnit), accDataA) require.NoError(t, err) // @@ -209,7 +209,7 @@ func CustomTestResourcesQueryAll(t *customT) { // account A addrA := RandomAddress() accDataA := trackerdb.BaseAccountData{RewardsBase: 1000} - refAccA, err := aow.InsertAccount(addrA, accDataA.NormalizedOnlineBalance(t.proto), accDataA) + refAccA, err := aow.InsertAccount(addrA, accDataA.NormalizedOnlineBalance(t.proto.RewardUnit), accDataA) require.NoError(t, err) // resource A-0 @@ -261,13 +261,13 @@ func CustomTestResourcesQueryAllLimited(t *customT) { // account A - will own creatables addrA := RandomAddress() accDataA := trackerdb.BaseAccountData{RewardsBase: 1000} - refAccA, err := aow.InsertAccount(addrA, accDataA.NormalizedOnlineBalance(t.proto), accDataA) + refAccA, err := aow.InsertAccount(addrA, accDataA.NormalizedOnlineBalance(t.proto.RewardUnit), accDataA) require.NoError(t, err) // account B - will opt into creatables addrB := RandomAddress() accDataB := trackerdb.BaseAccountData{RewardsBase: 1000} - refAccB, err := aow.InsertAccount(addrB, accDataB.NormalizedOnlineBalance(t.proto), accDataB) + refAccB, err := aow.InsertAccount(addrB, accDataB.NormalizedOnlineBalance(t.proto.RewardUnit), accDataB) require.NoError(t, err) // asset A-0 for accounts A and B @@ -450,7 +450,7 @@ func CustomTestAppKVCrud(t *customT) { // account addrA := RandomAddress() accDataA := trackerdb.BaseAccountData{RewardsBase: 1000} - refAccA, err := aow.InsertAccount(addrA, accDataA.NormalizedOnlineBalance(t.proto), accDataA) + refAccA, err := aow.InsertAccount(addrA, accDataA.NormalizedOnlineBalance(t.proto.RewardUnit), accDataA) require.NoError(t, err) // resource resDataA0 := trackerdb.ResourcesData{} @@ -524,7 +524,7 @@ func CustomTestCreatablesCrud(t *customT) { // account A addrA := RandomAddress() accDataA := trackerdb.BaseAccountData{RewardsBase: 1000} - refAccA, err := aow.InsertAccount(addrA, accDataA.NormalizedOnlineBalance(t.proto), accDataA) + refAccA, err := aow.InsertAccount(addrA, accDataA.NormalizedOnlineBalance(t.proto.RewardUnit), accDataA) require.NoError(t, err) // resource A-0 diff --git a/ledger/store/trackerdb/testsuite/dbsemantics_test.go b/ledger/store/trackerdb/testsuite/dbsemantics_test.go index d76e6d100e..b1cdf9956f 100644 --- a/ledger/store/trackerdb/testsuite/dbsemantics_test.go +++ b/ledger/store/trackerdb/testsuite/dbsemantics_test.go @@ -43,7 +43,7 @@ func CustomTestTransaction(t *customT) { } // insert the account - normBalanceA := dataA.NormalizedOnlineBalance(t.proto) + normBalanceA := dataA.NormalizedOnlineBalance(t.proto.RewardUnit) refA, err := aow.InsertAccount(addrA, normBalanceA, dataA) require.NoError(t, err) @@ -67,7 +67,7 @@ func CustomTestTransaction(t *customT) { // update the account dataA.RewardsBase = 98287 - normBalanceA = dataA.NormalizedOnlineBalance(t.proto) + normBalanceA = dataA.NormalizedOnlineBalance(t.proto.RewardUnit) _, err = aow.UpdateAccount(refA, normBalanceA, dataA) require.NoError(t, err) diff --git a/ledger/store/trackerdb/testsuite/dual_test.go b/ledger/store/trackerdb/testsuite/dual_test.go index d2f2236286..2ded73fe81 100644 --- a/ledger/store/trackerdb/testsuite/dual_test.go +++ b/ledger/store/trackerdb/testsuite/dual_test.go @@ -24,6 +24,7 @@ import ( ) func TestDualEngines(t *testing.T) { + // partitiontest.PartitionTest(t) // partitioning inside subtest dbFactory := func(proto config.ConsensusParams) dbForTests { db := testdb.OpenForTesting(t, true) seedDb(t, db) diff --git a/ledger/store/trackerdb/testsuite/mockdb_test.go b/ledger/store/trackerdb/testsuite/mockdb_test.go index 024dbf2287..2aebd87e58 100644 --- a/ledger/store/trackerdb/testsuite/mockdb_test.go +++ b/ledger/store/trackerdb/testsuite/mockdb_test.go @@ -23,6 +23,7 @@ import ( ) func TestMockDB(t *testing.T) { + // partitiontest.PartitionTest(t) // partitioning inside subtest dbFactory := func(proto config.ConsensusParams) dbForTests { db := makeMockDB(proto) diff --git a/ledger/store/trackerdb/testsuite/onlineaccounts_kv_test.go b/ledger/store/trackerdb/testsuite/onlineaccounts_kv_test.go index 6639a7b14c..9c0be20a60 100644 --- a/ledger/store/trackerdb/testsuite/onlineaccounts_kv_test.go +++ b/ledger/store/trackerdb/testsuite/onlineaccounts_kv_test.go @@ -60,7 +60,7 @@ func CustomTestOnlineAccountsWriteRead(t *customT) { MicroAlgos: basics.MicroAlgos{Raw: uint64(100)}, RewardsBase: uint64(200), } - normalizedBalA := dataA.NormalizedOnlineBalance(t.proto) + normalizedBalA := dataA.NormalizedOnlineBalance(t.proto.RewardUnit) // write refA, err := oaw.InsertOnlineAccount(addrA, normalizedBalA, dataA, updRoundA, lastValidA) @@ -77,7 +77,7 @@ func CustomTestOnlineAccountsWriteRead(t *customT) { // write a new version dataA.MicroAlgos = basics.MicroAlgos{Raw: uint64(321)} - normalizedBalA = dataA.NormalizedOnlineBalance(t.proto) + normalizedBalA = dataA.NormalizedOnlineBalance(t.proto.RewardUnit) updRoundA = uint64(450) _, err = oaw.InsertOnlineAccount(addrA, normalizedBalA, dataA, updRoundA, lastValidA) require.NoError(t, err) @@ -124,7 +124,7 @@ func CustomTestOnlineAccountHistory(t *customT) { MicroAlgos: basics.MicroAlgos{Raw: uint64(20)}, RewardsBase: uint64(200), } - normalizedBalA1 := dataA1.NormalizedOnlineBalance(t.proto) + normalizedBalA1 := dataA1.NormalizedOnlineBalance(t.proto.RewardUnit) refA1, err := oaw.InsertOnlineAccount(addrA, normalizedBalA1, dataA1, uint64(2), uint64(2)) require.NoError(t, err) @@ -135,7 +135,7 @@ func CustomTestOnlineAccountHistory(t *customT) { MicroAlgos: basics.MicroAlgos{Raw: uint64(100)}, RewardsBase: uint64(200), } - normalizedBalA2 := dataA2.NormalizedOnlineBalance(t.proto) + normalizedBalA2 := dataA2.NormalizedOnlineBalance(t.proto.RewardUnit) refA2, err := oaw.InsertOnlineAccount(addrA, normalizedBalA2, dataA2, uint64(3), uint64(3)) require.NoError(t, err) @@ -147,7 +147,7 @@ func CustomTestOnlineAccountHistory(t *customT) { MicroAlgos: basics.MicroAlgos{Raw: uint64(75)}, RewardsBase: uint64(200), } - normalizedBalB1 := dataB1.NormalizedOnlineBalance(t.proto) + normalizedBalB1 := dataB1.NormalizedOnlineBalance(t.proto.RewardUnit) refB1, err := oaw.InsertOnlineAccount(addrB, normalizedBalB1, dataB1, uint64(3), uint64(3)) require.NoError(t, err) @@ -192,27 +192,27 @@ func CustomTestOnlineAccountsAll(t *customT) { dataA0 := trackerdb.BaseOnlineAccountData{ MicroAlgos: basics.MicroAlgos{Raw: uint64(200)}, } - _, err = oaw.InsertOnlineAccount(addrA, dataA0.NormalizedOnlineBalance(t.proto), dataA0, 0, voteLastValid) + _, err = oaw.InsertOnlineAccount(addrA, dataA0.NormalizedOnlineBalance(t.proto.RewardUnit), dataA0, 0, voteLastValid) require.NoError(t, err) dataA1 := trackerdb.BaseOnlineAccountData{ MicroAlgos: basics.MicroAlgos{Raw: uint64(250)}, } - _, err = oaw.InsertOnlineAccount(addrA, dataA1.NormalizedOnlineBalance(t.proto), dataA1, 1, voteLastValid) + _, err = oaw.InsertOnlineAccount(addrA, dataA1.NormalizedOnlineBalance(t.proto.RewardUnit), dataA1, 1, voteLastValid) require.NoError(t, err) addrB := basics.Address(crypto.Hash([]byte("b"))) dataB := trackerdb.BaseOnlineAccountData{ MicroAlgos: basics.MicroAlgos{Raw: uint64(100)}, } - _, err = oaw.InsertOnlineAccount(addrB, dataB.NormalizedOnlineBalance(t.proto), dataB, 0, voteLastValid) + _, err = oaw.InsertOnlineAccount(addrB, dataB.NormalizedOnlineBalance(t.proto.RewardUnit), dataB, 0, voteLastValid) require.NoError(t, err) addrC := basics.Address(crypto.Hash([]byte("c"))) dataC := trackerdb.BaseOnlineAccountData{ MicroAlgos: basics.MicroAlgos{Raw: uint64(30)}, } - _, err = oaw.InsertOnlineAccount(addrC, dataC.NormalizedOnlineBalance(t.proto), dataC, 0, voteLastValid) + _, err = oaw.InsertOnlineAccount(addrC, dataC.NormalizedOnlineBalance(t.proto.RewardUnit), dataC, 0, voteLastValid) require.NoError(t, err) // @@ -257,7 +257,7 @@ func CustomTestAccountsOnlineTop(t *customT) { MicroAlgos: microAlgos, RewardsBase: rewardBase, } - normalizedBal := data.NormalizedOnlineBalance(t.proto) + normalizedBal := data.NormalizedOnlineBalance(t.proto.RewardUnit) // write _, err := oaw.InsertOnlineAccount(addr, normalizedBal, data, updRound, lastValid) @@ -267,13 +267,13 @@ func CustomTestAccountsOnlineTop(t *customT) { } // read (all) - poA, err := ar.AccountsOnlineTop(basics.Round(0), 0, 10, t.proto) + poA, err := ar.AccountsOnlineTop(basics.Round(0), 0, 10, t.proto.RewardUnit) require.NoError(t, err) require.Contains(t, poA, testData[9]) // most money require.Contains(t, poA, testData[0]) // least money // read (just a few) - poA, err = ar.AccountsOnlineTop(basics.Round(0), 1, 2, t.proto) + poA, err = ar.AccountsOnlineTop(basics.Round(0), 1, 2, t.proto.RewardUnit) require.NoError(t, err) require.Len(t, poA, 2) require.Contains(t, poA, testData[8]) // (second most money, we skipped 1) @@ -296,7 +296,7 @@ func CustomTestLookupOnlineAccountDataByAddress(t *customT) { MicroAlgos: basics.MicroAlgos{Raw: uint64(100)}, RewardsBase: uint64(200), } - normalizedBalA := dataA.NormalizedOnlineBalance(t.proto) + normalizedBalA := dataA.NormalizedOnlineBalance(t.proto.RewardUnit) refA, err := oaw.InsertOnlineAccount(addrA, normalizedBalA, dataA, updRoundA, lastValidA) require.NoError(t, err) @@ -357,7 +357,7 @@ func CustomTestOnlineAccountsDelete(t *customT) { MicroAlgos: basics.MicroAlgos{Raw: uint64(20)}, RewardsBase: uint64(200), } - normalizedBalA0 := dataA0.NormalizedOnlineBalance(t.proto) + normalizedBalA0 := dataA0.NormalizedOnlineBalance(t.proto.RewardUnit) _, err = oaw.InsertOnlineAccount(addrA, normalizedBalA0, dataA0, uint64(0), uint64(21)) require.NoError(t, err) @@ -369,7 +369,7 @@ func CustomTestOnlineAccountsDelete(t *customT) { MicroAlgos: basics.MicroAlgos{Raw: uint64(75)}, RewardsBase: uint64(200), } - normalizedBalB2 := dataB0.NormalizedOnlineBalance(t.proto) + normalizedBalB2 := dataB0.NormalizedOnlineBalance(t.proto.RewardUnit) _, err = oaw.InsertOnlineAccount(addrB, normalizedBalB2, dataB0, uint64(0), uint64(2)) require.NoError(t, err) @@ -382,7 +382,7 @@ func CustomTestOnlineAccountsDelete(t *customT) { MicroAlgos: basics.MicroAlgos{Raw: uint64(100)}, RewardsBase: uint64(200), } - normalizedBalA1 := dataA1.NormalizedOnlineBalance(t.proto) + normalizedBalA1 := dataA1.NormalizedOnlineBalance(t.proto.RewardUnit) _, err = oaw.InsertOnlineAccount(addrA, normalizedBalA1, dataA1, uint64(1), uint64(21)) require.NoError(t, err) @@ -395,7 +395,7 @@ func CustomTestOnlineAccountsDelete(t *customT) { MicroAlgos: basics.MicroAlgos{Raw: uint64(187)}, RewardsBase: uint64(200), } - normalizedBalA2 := dataA1.NormalizedOnlineBalance(t.proto) + normalizedBalA2 := dataA1.NormalizedOnlineBalance(t.proto.RewardUnit) _, err = oaw.InsertOnlineAccount(addrA, normalizedBalA2, dataA2, uint64(2), uint64(21)) require.NoError(t, err) @@ -407,7 +407,7 @@ func CustomTestOnlineAccountsDelete(t *customT) { MicroAlgos: basics.MicroAlgos{Raw: uint64(721)}, RewardsBase: uint64(200), } - normalizedBalC2 := dataC2.NormalizedOnlineBalance(t.proto) + normalizedBalC2 := dataC2.NormalizedOnlineBalance(t.proto.RewardUnit) _, err = oaw.InsertOnlineAccount(addrC, normalizedBalC2, dataC2, uint64(2), uint64(21)) require.NoError(t, err) @@ -422,7 +422,7 @@ func CustomTestOnlineAccountsDelete(t *customT) { // check accounts // expected: A touched [2], C touched [2] - oas, err := ar.AccountsOnlineTop(basics.Round(4), 0, 99, t.proto) + oas, err := ar.AccountsOnlineTop(basics.Round(4), 0, 99, t.proto.RewardUnit) require.NoError(t, err) require.Len(t, oas, 3) require.Equal(t, oas[addrA].MicroAlgos, dataA2.MicroAlgos) // check item @@ -459,7 +459,7 @@ func CustomTestAccountsOnlineExpired(t *customT) { MicroAlgos: basics.MicroAlgos{Raw: uint64(20)}, RewardsBase: uint64(0), } - normalizedBalA1 := dataA1.NormalizedOnlineBalance(t.proto) + normalizedBalA1 := dataA1.NormalizedOnlineBalance(t.proto.RewardUnit) _, err = oaw.InsertOnlineAccount(addrA, normalizedBalA1, dataA1, uint64(0), uint64(2)) require.NoError(t, err) @@ -470,7 +470,7 @@ func CustomTestAccountsOnlineExpired(t *customT) { MicroAlgos: basics.MicroAlgos{Raw: uint64(100)}, RewardsBase: uint64(0), } - normalizedBalA2 := dataA2.NormalizedOnlineBalance(t.proto) + normalizedBalA2 := dataA2.NormalizedOnlineBalance(t.proto.RewardUnit) _, err = oaw.InsertOnlineAccount(addrA, normalizedBalA2, dataA2, uint64(1), uint64(5)) require.NoError(t, err) @@ -483,7 +483,7 @@ func CustomTestAccountsOnlineExpired(t *customT) { MicroAlgos: basics.MicroAlgos{Raw: uint64(75)}, RewardsBase: uint64(0), } - normalizedBalB1 := dataB1.NormalizedOnlineBalance(t.proto) + normalizedBalB1 := dataB1.NormalizedOnlineBalance(t.proto.RewardUnit) _, err = oaw.InsertOnlineAccount(addrB, normalizedBalB1, dataB1, uint64(2), uint64(7)) require.NoError(t, err) @@ -498,33 +498,33 @@ func CustomTestAccountsOnlineExpired(t *customT) { // // read (none) - expAccts, err := ar.ExpiredOnlineAccountsForRound(basics.Round(0), basics.Round(0), t.proto, 0) + expAccts, err := ar.ExpiredOnlineAccountsForRound(basics.Round(0), basics.Round(0), t.proto.RewardUnit, 0) require.NoError(t, err) require.Empty(t, expAccts) // read (at acct round, voteRnd > lastValid) - expAccts, err = ar.ExpiredOnlineAccountsForRound(basics.Round(0), basics.Round(4), t.proto, 0) + expAccts, err = ar.ExpiredOnlineAccountsForRound(basics.Round(0), basics.Round(4), t.proto.RewardUnit, 0) require.NoError(t, err) require.Len(t, expAccts, 1) require.Equal(t, expAccts[addrA].MicroAlgosWithRewards, basics.MicroAlgos{Raw: uint64(20)}) // check item // read (at acct round, voteRnd = lastValid) - expAccts, err = ar.ExpiredOnlineAccountsForRound(basics.Round(0), basics.Round(2), t.proto, 0) + expAccts, err = ar.ExpiredOnlineAccountsForRound(basics.Round(0), basics.Round(2), t.proto.RewardUnit, 0) require.NoError(t, err) require.Empty(t, expAccts) // read (at acct round, voteRnd < lastValid) - expAccts, err = ar.ExpiredOnlineAccountsForRound(basics.Round(0), basics.Round(1), t.proto, 0) + expAccts, err = ar.ExpiredOnlineAccountsForRound(basics.Round(0), basics.Round(1), t.proto.RewardUnit, 0) require.NoError(t, err) require.Empty(t, expAccts) // read (take latest exp value) - expAccts, err = ar.ExpiredOnlineAccountsForRound(basics.Round(1), basics.Round(4), t.proto, 0) + expAccts, err = ar.ExpiredOnlineAccountsForRound(basics.Round(1), basics.Round(4), t.proto.RewardUnit, 0) require.NoError(t, err) require.Len(t, expAccts, 0) // read (all) - expAccts, err = ar.ExpiredOnlineAccountsForRound(basics.Round(3), basics.Round(20), t.proto, 0) + expAccts, err = ar.ExpiredOnlineAccountsForRound(basics.Round(3), basics.Round(20), t.proto.RewardUnit, 0) require.Len(t, expAccts, 2) require.Equal(t, expAccts[addrA].MicroAlgosWithRewards, basics.MicroAlgos{Raw: uint64(100)}) // check item require.Equal(t, expAccts[addrB].MicroAlgosWithRewards, basics.MicroAlgos{Raw: uint64(75)}) // check item diff --git a/ledger/store/trackerdb/testsuite/pebbledb_test.go b/ledger/store/trackerdb/testsuite/pebbledb_test.go index 6b54bce720..39f04f7e94 100644 --- a/ledger/store/trackerdb/testsuite/pebbledb_test.go +++ b/ledger/store/trackerdb/testsuite/pebbledb_test.go @@ -27,6 +27,7 @@ import ( ) func TestPebbleDB(t *testing.T) { + // partitiontest.PartitionTest(t) // partitioning inside subtest dbFactory := func(proto config.ConsensusParams) dbForTests { // create a tmp dir for the db, the testing runtime will clean it up automatically dir := fmt.Sprintf("%s/db", t.TempDir()) diff --git a/ledger/store/trackerdb/testsuite/sqlitedb_test.go b/ledger/store/trackerdb/testsuite/sqlitedb_test.go index 38a0a4f61a..c0cf86e8d0 100644 --- a/ledger/store/trackerdb/testsuite/sqlitedb_test.go +++ b/ledger/store/trackerdb/testsuite/sqlitedb_test.go @@ -27,6 +27,7 @@ import ( ) func TestSqliteDB(t *testing.T) { + // partitiontest.PartitionTest(t) // partitioning inside subtest dbFactory := func(config.ConsensusParams) dbForTests { // create a tmp dir for the db, the testing runtime will clean it up automatically fn := fmt.Sprintf("%s/tracker-db.sqlite", t.TempDir()) diff --git a/ledger/testing/accountsTotals.go b/ledger/testing/accountsTotals.go index 02f9526bfe..d9e2239087 100644 --- a/ledger/testing/accountsTotals.go +++ b/ledger/testing/accountsTotals.go @@ -34,8 +34,8 @@ func CalculateNewRoundAccountTotals(t *gotesting.T, newRoundDeltas ledgercore.Ac for i := 0; i < newRoundDeltas.Len(); i++ { addr, ad := newRoundDeltas.GetByIdx(i) prevBal := ledgercore.ToAccountData(prevRoundBalances[addr]) - newTotals.DelAccount(newRoundConsensusParams, prevBal, &ot) - newTotals.AddAccount(newRoundConsensusParams, ad, &ot) + newTotals.DelAccount(newRoundConsensusParams.RewardUnit, prevBal, &ot) + newTotals.AddAccount(newRoundConsensusParams.RewardUnit, ad, &ot) } require.False(t, ot.Overflowed) return diff --git a/ledger/testing/randomAccounts.go b/ledger/testing/randomAccounts.go index da711198af..0b0dffbd8c 100644 --- a/ledger/testing/randomAccounts.go +++ b/ledger/testing/randomAccounts.go @@ -20,6 +20,7 @@ import ( "fmt" "github.com/algorand/go-algorand/config" + "github.com/algorand/go-algorand/config/bounds" "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/protocol" @@ -174,8 +175,8 @@ func RandomAppParams() basics.AppParams { } ap := basics.AppParams{ - ApprovalProgram: make([]byte, int(crypto.RandUint63())%config.MaxAppProgramLen), - ClearStateProgram: make([]byte, int(crypto.RandUint63())%config.MaxAppProgramLen), + ApprovalProgram: make([]byte, int(crypto.RandUint63())%bounds.MaxAppProgramLen), + ClearStateProgram: make([]byte, int(crypto.RandUint63())%bounds.MaxAppProgramLen), GlobalState: make(basics.TealKeyValue), StateSchemas: schemas, ExtraProgramPages: uint32(crypto.RandUint64() % 4), @@ -214,7 +215,7 @@ func RandomAppParams() basics.AppParams { var bytes []byte if crypto.RandUint64()%5 != 0 { - bytes = make([]byte, crypto.RandUint64()%uint64(config.MaxBytesKeyValueLen-len(keyName))) + bytes = make([]byte, crypto.RandUint64()%uint64(bounds.MaxBytesKeyValueLen-len(keyName))) crypto.RandBytes(bytes[:]) } @@ -260,7 +261,7 @@ func RandomAppLocalState() basics.AppLocalState { } var bytes []byte if crypto.RandUint64()%5 != 0 { - bytes = make([]byte, crypto.RandUint64()%uint64(config.MaxBytesKeyValueLen-len(keyName))) + bytes = make([]byte, crypto.RandUint64()%uint64(bounds.MaxBytesKeyValueLen-len(keyName))) crypto.RandBytes(bytes[:]) } @@ -334,7 +335,7 @@ func RandomFullAccountData(rewardsLevel uint64, lastCreatableID *basics.Creatabl break } } - data.AppLocalStates[basics.AppIndex(aidx)] = ap + data.AppLocalStates[aidx] = ap } } @@ -388,7 +389,7 @@ func RandomDeltasFull(niter int, base map[basics.Address]basics.AccountData, rew // RandomDeltasImpl generates a random set of accounts delta func RandomDeltasImpl(niter int, base map[basics.Address]basics.AccountData, rewardsLevel uint64, simple bool, lastCreatableID *basics.CreatableIndex) (updates ledgercore.AccountDeltas, totals map[basics.Address]ledgercore.AccountData, imbalance int64) { - proto := config.Consensus[protocol.ConsensusCurrentVersion] + rewardUnit := config.Consensus[protocol.ConsensusCurrentVersion].RewardUnit totals = make(map[basics.Address]ledgercore.AccountData) updates = ledgercore.MakeAccountDeltas(len(base)) @@ -507,7 +508,7 @@ func RandomDeltasImpl(niter int, base map[basics.Address]basics.AccountData, rew updates.UpsertAssetResource(addr, aidx, res.Params, res.Holding) } } - imbalance += int64(old.WithUpdatedRewards(proto, rewardsLevel).MicroAlgos.Raw - new.MicroAlgos.Raw) + imbalance += int64(old.WithUpdatedRewards(rewardUnit, rewardsLevel).MicroAlgos.Raw - new.MicroAlgos.Raw) totals[addr] = new } } @@ -561,7 +562,7 @@ func RandomDeltasImpl(niter int, base map[basics.Address]basics.AccountData, rew updates.UpsertAssetResource(addr, aidx, res.Params, res.Holding) } } - imbalance += int64(old.WithUpdatedRewards(proto, rewardsLevel).MicroAlgos.Raw - new.MicroAlgos.Raw) + imbalance += int64(old.WithUpdatedRewards(rewardUnit, rewardsLevel).MicroAlgos.Raw - new.MicroAlgos.Raw) totals[addr] = new } diff --git a/ledger/txtail_test.go b/ledger/txtail_test.go index 00c57d7959..70ef924f0a 100644 --- a/ledger/txtail_test.go +++ b/ledger/txtail_test.go @@ -83,7 +83,7 @@ func TestTxTailCheckdup(t *testing.T) { Note: []byte{byte(rnd % 256), byte(rnd / 256), byte(1)}, }, } - err := tail.checkDup(proto, basics.Round(0), basics.Round(0), rnd+txvalidity, Txn.ID(), ledgercore.Txlease{}) + err := tail.checkDup(proto, 0, 0, rnd+txvalidity, Txn.ID(), ledgercore.Txlease{}) require.Errorf(t, err, "round %d", rnd) if rnd < lastRound-lookback-txvalidity-1 { var missingRoundErr *errTxTailMissingRound diff --git a/libgoal/libgoal.go b/libgoal/libgoal.go index fa0630a4d7..c3fe8d4d42 100644 --- a/libgoal/libgoal.go +++ b/libgoal/libgoal.go @@ -46,7 +46,7 @@ import ( // defaultKMDTimeoutSecs is the default number of seconds after which kmd will // kill itself if there are no requests. This can be overridden with // SetKMDStartArgs -const defaultKMDTimeoutSecs = 60 +const defaultKMDTimeoutSecs = 180 // DefaultKMDDataDir is the name of the directory within the algod data directory where kmd data goes const DefaultKMDDataDir = nodecontrol.DefaultKMDDataDir @@ -227,7 +227,7 @@ func getDataDir(dataDir string) (string, error) { func getNodeController(binDir, dataDir string) (nc nodecontrol.NodeController, err error) { dataDir, err = getDataDir(dataDir) if err != nil { - return nodecontrol.NodeController{}, nil + return nodecontrol.NodeController{}, err } return nodecontrol.MakeNodeController(binDir, dataDir), nil @@ -508,7 +508,7 @@ func (c *Client) signAndBroadcastTransactionWithWallet(walletHandle, pw []byte, // 0 | N | lastValid // M | 0 | first + validRounds - 1 // M | M | error -func (c *Client) ComputeValidityRounds(firstValid, lastValid, validRounds uint64) (first, last, latest uint64, err error) { +func (c *Client) ComputeValidityRounds(firstValid, lastValid, validRounds basics.Round) (first, last, latest basics.Round, err error) { params, err := c.cachedSuggestedParams() if err != nil { return 0, 0, 0, err @@ -522,7 +522,8 @@ func (c *Client) ComputeValidityRounds(firstValid, lastValid, validRounds uint64 return first, last, params.LastRound, err } -func computeValidityRounds(firstValid, lastValid, validRounds, lastRound, maxTxnLife uint64) (uint64, uint64, error) { +func computeValidityRounds(firstValid, lastValid, validRounds, lastRound basics.Round, maxTxnLife uint64) (basics.Round, basics.Round, error) { + lifeAsRounds := basics.Round(maxTxnLife) if validRounds != 0 && lastValid != 0 { return 0, 0, fmt.Errorf("cannot construct transaction: ambiguous input: lastValid = %d, validRounds = %d", lastValid, validRounds) } @@ -544,17 +545,17 @@ func computeValidityRounds(firstValid, lastValid, validRounds, lastRound, maxTxn if validRounds != 0 { // MaxTxnLife is the maximum difference between LastValid and FirstValid // so that validRounds = maxTxnLife+1 gives lastValid = firstValid + validRounds - 1 = firstValid + maxTxnLife - if validRounds > maxTxnLife+1 { + if validRounds > lifeAsRounds+1 { return 0, 0, fmt.Errorf("cannot construct transaction: txn validity period %d is greater than protocol max txn lifetime %d", validRounds-1, maxTxnLife) } lastValid = firstValid + validRounds - 1 } else if lastValid == 0 { - lastValid = firstValid + maxTxnLife + lastValid = firstValid + lifeAsRounds } if firstValid > lastValid { return 0, 0, fmt.Errorf("cannot construct transaction: txn would first be valid on round %d which is after last valid round %d", firstValid, lastValid) - } else if lastValid-firstValid > maxTxnLife { + } else if lastValid-firstValid > lifeAsRounds { return 0, 0, fmt.Errorf("cannot construct transaction: txn validity period ( %d to %d ) is greater than protocol max txn lifetime %d", firstValid, lastValid, maxTxnLife) } @@ -591,7 +592,7 @@ func (c *Client) ConstructPayment(from, to string, fee, amount uint64, note []by if !ok { return transactions.Transaction{}, fmt.Errorf("ConstructPayment: unknown consensus protocol %s", params.ConsensusVersion) } - fv, lv, err := computeValidityRounds(uint64(firstValid), uint64(lastValid), 0, params.LastRound, cp.MaxTxnLife) + fv, lv, err := computeValidityRounds(firstValid, lastValid, 0, params.LastRound, cp.MaxTxnLife) if err != nil { return transactions.Transaction{}, err } @@ -601,8 +602,8 @@ func (c *Client) ConstructPayment(from, to string, fee, amount uint64, note []by Header: transactions.Header{ Sender: fromAddr, Fee: basics.MicroAlgos{Raw: fee}, - FirstValid: basics.Round(fv), - LastValid: basics.Round(lv), + FirstValid: fv, + LastValid: lv, Lease: lease, Note: note, }, @@ -674,7 +675,7 @@ func (c *Client) AccountAssetsInformation(account string, next *string, limit *u } // AccountApplicationInformation gets account information about a given app. -func (c *Client) AccountApplicationInformation(accountAddress string, applicationID uint64) (resp model.AccountApplicationResponse, err error) { +func (c *Client) AccountApplicationInformation(accountAddress string, applicationID basics.AppIndex) (resp model.AccountApplicationResponse, err error) { algod, err := c.ensureAlgodClient() if err == nil { resp, err = algod.AccountApplicationInformation(accountAddress, applicationID) @@ -683,7 +684,7 @@ func (c *Client) AccountApplicationInformation(accountAddress string, applicatio } // RawAccountApplicationInformation gets account information about a given app. -func (c *Client) RawAccountApplicationInformation(accountAddress string, applicationID uint64) (accountResource modelV2.AccountApplicationModel, err error) { +func (c *Client) RawAccountApplicationInformation(accountAddress string, applicationID basics.AppIndex) (accountResource modelV2.AccountApplicationModel, err error) { algod, err := c.ensureAlgodClient() if err == nil { var resp []byte @@ -696,7 +697,7 @@ func (c *Client) RawAccountApplicationInformation(accountAddress string, applica } // AccountAssetInformation gets account information about a given asset. -func (c *Client) AccountAssetInformation(accountAddress string, assetID uint64) (resp model.AccountAssetResponse, err error) { +func (c *Client) AccountAssetInformation(accountAddress string, assetID basics.AssetIndex) (resp model.AccountAssetResponse, err error) { algod, err := c.ensureAlgodClient() if err == nil { resp, err = algod.AccountAssetInformation(accountAddress, assetID) @@ -704,19 +705,6 @@ func (c *Client) AccountAssetInformation(accountAddress string, assetID uint64) return } -// RawAccountAssetInformation gets account information about a given asset. -func (c *Client) RawAccountAssetInformation(accountAddress string, assetID uint64) (accountResource modelV2.AccountAssetModel, err error) { - algod, err := c.ensureAlgodClient() - if err == nil { - var resp []byte - resp, err = algod.RawAccountAssetInformation(accountAddress, assetID) - if err == nil { - err = protocol.Decode(resp, &accountResource) - } - } - return -} - // AccountData takes an address and returns its basics.AccountData func (c *Client) AccountData(account string) (accountData basics.AccountData, err error) { algod, err := c.ensureAlgodClient() @@ -731,7 +719,7 @@ func (c *Client) AccountData(account string) (accountData basics.AccountData, er } // AssetInformation takes an asset's index and returns its information -func (c *Client) AssetInformation(index uint64) (resp model.Asset, err error) { +func (c *Client) AssetInformation(index basics.AssetIndex) (resp model.Asset, err error) { algod, err := c.ensureAlgodClient() if err != nil { return @@ -765,7 +753,7 @@ func (c *Client) AssetInformation(index uint64) (resp model.Asset, err error) { } // ApplicationInformation takes an app's index and returns its information -func (c *Client) ApplicationInformation(index uint64) (resp model.Application, err error) { +func (c *Client) ApplicationInformation(index basics.AppIndex) (resp model.Application, err error) { algod, err := c.ensureAlgodClient() if err == nil { resp, err = algod.ApplicationInformation(index) @@ -774,7 +762,7 @@ func (c *Client) ApplicationInformation(index uint64) (resp model.Application, e } // ApplicationBoxes takes an app's index and returns the names of boxes under it -func (c *Client) ApplicationBoxes(appID uint64, maxBoxNum uint64) (resp model.BoxesResponse, err error) { +func (c *Client) ApplicationBoxes(appID basics.AppIndex, maxBoxNum uint64) (resp model.BoxesResponse, err error) { algod, err := c.ensureAlgodClient() if err == nil { resp, err = algod.ApplicationBoxes(appID, maxBoxNum) @@ -784,7 +772,7 @@ func (c *Client) ApplicationBoxes(appID uint64, maxBoxNum uint64) (resp model.Bo // GetApplicationBoxByName takes an app's index and box name and returns its value. // The box name should be of the form `encoding:value`. See apps.AppCallBytes for more information. -func (c *Client) GetApplicationBoxByName(index uint64, name string) (resp model.BoxResponse, err error) { +func (c *Client) GetApplicationBoxByName(index basics.AppIndex, name string) (resp model.BoxResponse, err error) { algod, err := c.ensureAlgodClient() if err == nil { resp, err = algod.GetApplicationBoxByName(index, name) @@ -819,7 +807,7 @@ func (c *Client) ParsedPendingTransaction(txid string) (txn v2.PreEncodedTxInfo, } // Block takes a round and returns its block -func (c *Client) Block(round uint64) (resp v2.BlockResponseJSON, err error) { +func (c *Client) Block(round basics.Round) (resp v2.BlockResponseJSON, err error) { algod, err := c.ensureAlgodClient() if err == nil { resp, err = algod.Block(round) @@ -828,7 +816,7 @@ func (c *Client) Block(round uint64) (resp v2.BlockResponseJSON, err error) { } // RawBlock takes a round and returns its block -func (c *Client) RawBlock(round uint64) (resp []byte, err error) { +func (c *Client) RawBlock(round basics.Round) (resp []byte, err error) { algod, err := c.ensureAlgodClient() if err != nil { return @@ -837,7 +825,7 @@ func (c *Client) RawBlock(round uint64) (resp []byte, err error) { } // BookkeepingBlock takes a round and returns its block -func (c *Client) BookkeepingBlock(round uint64) (block bookkeeping.Block, err error) { +func (c *Client) BookkeepingBlock(round basics.Round) (block bookkeeping.Block, err error) { algod, err := c.ensureAlgodClient() if err != nil { return @@ -861,7 +849,7 @@ func (c *Client) HealthCheck() error { // WaitForRound takes a round, waits up to one minute, for it to appear and // returns the node status. This function blocks and fails if the block does not // appear in one minute. -func (c *Client) WaitForRound(round uint64) (resp model.NodeStatusResponse, err error) { +func (c *Client) WaitForRound(round basics.Round) (resp model.NodeStatusResponse, err error) { algod, err := c.ensureAlgodClient() if err != nil { return @@ -897,16 +885,17 @@ func (c Client) LedgerSupply() (resp model.SupplyResponse, err error) { } // CurrentRound returns the current known round -func (c Client) CurrentRound() (lastRound uint64, err error) { +func (c Client) CurrentRound() (basics.Round, error) { // Get current round algod, err := c.ensureAlgodClient() - if err == nil { - resp, err := algod.Status() - if err == nil { - lastRound = resp.LastRound - } + if err != nil { + return 0, err } - return + resp, err := algod.Status() + if err != nil { + return 0, err + } + return resp.LastRound, nil } // SuggestedFee returns the suggested fee per byte by the network @@ -1092,7 +1081,7 @@ func (c *Client) ExportKey(walletHandle []byte, password, account string) (resp } // ConsensusParams returns the consensus parameters for the protocol active at the specified round -func (c *Client) ConsensusParams(round uint64) (consensus config.ConsensusParams, err error) { +func (c *Client) ConsensusParams(round basics.Round) (consensus config.ConsensusParams, err error) { block, err := c.BookkeepingBlock(round) if err != nil { return @@ -1226,14 +1215,14 @@ func MakeDryrunStateGenerated(client Client, txnOrStxnOrSlice interface{}, other } else { // otherwise need to fetch app state var app model.Application - if app, err = client.ApplicationInformation(uint64(appIdx)); err != nil { + if app, err = client.ApplicationInformation(appIdx); err != nil { return } appParams = app.Params accounts = append(accounts, appIdx.Address()) } dr.Apps = append(dr.Apps, model.Application{ - Id: uint64(appIdx), + Id: appIdx, Params: appParams, }) } @@ -1255,7 +1244,7 @@ func MakeDryrunStateGenerated(client Client, txnOrStxnOrSlice interface{}, other if b, err = client.BookkeepingBlock(dr.Round); err != nil { return } - dr.LatestTimestamp = uint64(b.BlockHeader.TimeStamp) + dr.LatestTimestamp = b.BlockHeader.TimeStamp } } return @@ -1295,7 +1284,7 @@ func (c *Client) SimulateTransactions(request v2.PreEncodedSimulateRequest) (res } // TransactionProof returns a Merkle proof for a transaction in a block. -func (c *Client) TransactionProof(txid string, round uint64, hashType crypto.HashType) (resp model.TransactionProofResponse, err error) { +func (c *Client) TransactionProof(txid string, round basics.Round, hashType crypto.HashType) (resp model.TransactionProofResponse, err error) { algod, err := c.ensureAlgodClient() if err == nil { return algod.TransactionProof(txid, round, hashType) @@ -1304,7 +1293,7 @@ func (c *Client) TransactionProof(txid string, round uint64, hashType crypto.Has } // LightBlockHeaderProof returns a Merkle proof for a block. -func (c *Client) LightBlockHeaderProof(round uint64) (resp model.LightBlockHeaderProofResponse, err error) { +func (c *Client) LightBlockHeaderProof(round basics.Round) (resp model.LightBlockHeaderProofResponse, err error) { algod, err := c.ensureAlgodClient() if err == nil { return algod.LightBlockHeaderProof(round) @@ -1313,7 +1302,7 @@ func (c *Client) LightBlockHeaderProof(round uint64) (resp model.LightBlockHeade } // SetSyncRound sets the sync round on a node w/ EnableFollowMode -func (c *Client) SetSyncRound(round uint64) (err error) { +func (c *Client) SetSyncRound(round basics.Round) (err error) { algod, err := c.ensureAlgodClient() if err == nil { return algod.SetSyncRound(round) @@ -1331,7 +1320,7 @@ func (c *Client) GetSyncRound() (rep model.GetSyncRoundResponse, err error) { } // GetLedgerStateDelta gets the LedgerStateDelta on a node w/ EnableFollowMode -func (c *Client) GetLedgerStateDelta(round uint64) (rep ledgercore.StateDelta, err error) { +func (c *Client) GetLedgerStateDelta(round basics.Round) (rep ledgercore.StateDelta, err error) { algod, err := c.ensureAlgodClient() if err == nil { return algod.GetLedgerStateDelta(round) @@ -1340,7 +1329,7 @@ func (c *Client) GetLedgerStateDelta(round uint64) (rep ledgercore.StateDelta, e } // BlockLogs returns all the logs in a block for a given round -func (c *Client) BlockLogs(round uint64) (resp model.BlockLogsResponse, err error) { +func (c *Client) BlockLogs(round basics.Round) (resp model.BlockLogsResponse, err error) { algod, err := c.ensureAlgodClient() if err == nil { return algod.BlockLogs(round) diff --git a/libgoal/libgoal_test.go b/libgoal/libgoal_test.go index 746f40a0cf..e2795ec998 100644 --- a/libgoal/libgoal_test.go +++ b/libgoal/libgoal_test.go @@ -19,6 +19,7 @@ package libgoal import ( "testing" + "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/test/partitiontest" "github.com/stretchr/testify/require" ) @@ -28,10 +29,10 @@ func TestValidRounds(t *testing.T) { t.Parallel() a := require.New(t) - var firstValid, lastValid, validRounds uint64 + var firstValid, lastValid, validRounds, lastRound basics.Round - lastRound := uint64(1) - maxTxnLife := uint64(1000) + lastRound = 1 + const maxTxnLife = 1000 firstValid = 0 lastValid = 0 @@ -82,8 +83,8 @@ func TestValidRounds(t *testing.T) { validRounds = 0 fv, lv, err = computeValidityRounds(firstValid, lastValid, validRounds, lastRound, maxTxnLife) a.NoError(err) - a.Equal(uint64(1), fv) - a.Equal(maxTxnLife+1, lv) + a.EqualValues(1, fv) + a.EqualValues(maxTxnLife+1, lv) firstValid = 0 lastValid = lastRound + 1 @@ -114,30 +115,30 @@ func TestValidRounds(t *testing.T) { validRounds = 1 fv, lv, err = computeValidityRounds(firstValid, lastValid, validRounds, lastRound, maxTxnLife) a.NoError(err) - a.Equal(uint64(1), fv) - a.Equal(uint64(1), lv) + a.EqualValues(1, fv) + a.EqualValues(1, lv) firstValid = 1 lastValid = 1 validRounds = 0 fv, lv, err = computeValidityRounds(firstValid, lastValid, validRounds, lastRound, maxTxnLife) a.NoError(err) - a.Equal(uint64(1), fv) - a.Equal(uint64(1), lv) + a.EqualValues(1, fv) + a.EqualValues(1, lv) firstValid = 100 lastValid = 0 validRounds = maxTxnLife fv, lv, err = computeValidityRounds(firstValid, lastValid, validRounds, lastRound, maxTxnLife) a.NoError(err) - a.Equal(uint64(100), fv) - a.Equal(100+maxTxnLife-1, lv) + a.EqualValues(100, fv) + a.EqualValues(100+maxTxnLife-1, lv) firstValid = 100 lastValid = maxTxnLife validRounds = 0 fv, lv, err = computeValidityRounds(firstValid, lastValid, validRounds, lastRound, maxTxnLife) a.NoError(err) - a.Equal(uint64(100), fv) - a.Equal(maxTxnLife, lv) + a.EqualValues(100, fv) + a.EqualValues(maxTxnLife, lv) } diff --git a/libgoal/participation.go b/libgoal/participation.go index a23f7831f9..48a08be64d 100644 --- a/libgoal/participation.go +++ b/libgoal/participation.go @@ -18,6 +18,7 @@ package libgoal import ( "fmt" + "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model" "github.com/algorand/go-algorand/data/account" "github.com/algorand/go-algorand/data/basics" @@ -33,14 +34,14 @@ func (c *Client) chooseParticipation(address basics.Address, round basics.Round) } // Loop through each of the participation keys; pick the one that expires farthest in the future. - var expiry uint64 = 0 + expiry := basics.Round(0) for _, info := range parts { // Choose the Participation valid for this round that relates to the passed address // that expires farthest in the future. // Note that algod will sign votes with all possible Participations. so any should work // in the short-term. // In the future we should allow the user to specify exactly which partkeys to register. - if info.Key.VoteFirstValid <= uint64(round) && uint64(round) <= info.Key.VoteLastValid && info.Address == address.String() && info.Key.VoteLastValid > expiry { + if info.Key.VoteFirstValid <= round && round <= info.Key.VoteLastValid && info.Address == address.String() && info.Key.VoteLastValid > expiry { part = info expiry = part.Key.VoteLastValid } @@ -56,7 +57,7 @@ func (c *Client) chooseParticipation(address basics.Address, round basics.Round) // GenParticipationKeys creates a .partkey database for a given address, fills // it with keys, and installs it in the right place -func (c *Client) GenParticipationKeys(address string, firstValid, lastValid, keyDilution uint64) (part account.Participation, filePath string, err error) { +func (c *Client) GenParticipationKeys(address string, firstValid, lastValid basics.Round, keyDilution uint64) (part account.Participation, filePath string, err error) { installFunc := func(keyPath string) error { _, err := c.AddParticipationKey(keyPath) return err diff --git a/libgoal/participation/participation.go b/libgoal/participation/participation.go index 4abf94a601..2f93b5f9fa 100644 --- a/libgoal/participation/participation.go +++ b/libgoal/participation/participation.go @@ -38,7 +38,7 @@ func participationKeysPath(dataDir string, address basics.Address, firstValid, l // GenParticipationKeysTo creates a .partkey database for a given address, fills // it with keys, and saves it in the specified output directory. If the output // directory is empty, the key will be installed. -func GenParticipationKeysTo(address string, firstValid, lastValid, keyDilution uint64, outDir string, installFunc func(keyPath string) error) (part account.Participation, filePath string, err error) { +func GenParticipationKeysTo(address string, firstValid, lastValid basics.Round, keyDilution uint64, outDir string, installFunc func(keyPath string) error) (part account.Participation, filePath string, err error) { install := outDir == "" if install && installFunc == nil { @@ -51,20 +51,18 @@ func GenParticipationKeysTo(address string, firstValid, lastValid, keyDilution u return } - firstRound, lastRound := basics.Round(firstValid), basics.Round(lastValid) - // If we are installing, generate in the temp dir if install { outDir = os.TempDir() } // Connect to the database - partKeyPath, err := participationKeysPath(outDir, parsedAddr, firstRound, lastRound) + partKeyPath, err := participationKeysPath(outDir, parsedAddr, firstValid, lastValid) if err != nil { return } _, err = os.Stat(partKeyPath) if err == nil { - err = fmt.Errorf("ParticipationKeys exist for the range %d to %d", firstRound, lastRound) + err = fmt.Errorf("ParticipationKeys exist for the range %d to %d", firstValid, lastValid) return } else if !os.IsNotExist(err) { err = fmt.Errorf("participation key file '%s' cannot be accessed : %w", partKeyPath, err) @@ -85,11 +83,11 @@ func GenParticipationKeysTo(address string, firstValid, lastValid, keyDilution u } if keyDilution == 0 { - keyDilution = account.DefaultKeyDilution(firstRound, lastRound) + keyDilution = account.DefaultKeyDilution(firstValid, lastValid) } // Fill the database with new participation keys - newPart, err := account.FillDBWithParticipationKeys(partdb, parsedAddr, firstRound, lastRound, keyDilution) + newPart, err := account.FillDBWithParticipationKeys(partdb, parsedAddr, firstValid, lastValid, keyDilution) part = newPart.Participation partdb.Close() diff --git a/libgoal/participation/participation_test.go b/libgoal/participation/participation_test.go index 44c8990d92..66f44224e7 100644 --- a/libgoal/participation/participation_test.go +++ b/libgoal/participation/participation_test.go @@ -72,8 +72,8 @@ func TestGenParticipationKeysTo_DefaultKeyDilution(t *testing.T) { var addr basics.Address addr[1] = 1 - first := uint64(1000) - last := uint64(2000) + const first = 1000 + const last = 2000 testcases := []struct { name string @@ -83,7 +83,7 @@ func TestGenParticipationKeysTo_DefaultKeyDilution(t *testing.T) { { name: "default", dilution: 0, - expected: account.DefaultKeyDilution(basics.Round(first), basics.Round(last)), + expected: account.DefaultKeyDilution(first, last), }, { name: "override", dilution: 5, diff --git a/libgoal/transactions.go b/libgoal/transactions.go index 3c80b69dea..14196e78c2 100644 --- a/libgoal/transactions.go +++ b/libgoal/transactions.go @@ -208,7 +208,7 @@ func (c *Client) SignAndBroadcastTransaction(walletHandle, pw []byte, utx transa } // WaitForConfirmedTxn waits for a transaction to be confirmed, returing information about it. -func (c *Client) WaitForConfirmedTxn(roundTimeout uint64, txid string) (txn v2.PreEncodedTxInfo, err error) { +func (c *Client) WaitForConfirmedTxn(roundTimeout basics.Round, txid string) (txn v2.PreEncodedTxInfo, err error) { algod, err := c.ensureAlgodClient() if err != nil { return @@ -264,15 +264,15 @@ func generateRegistrationTransaction(part model.ParticipationKey, fee basics.Mic StateProofPK: stateProofPk, }, } - t.KeyregTxnFields.VoteFirst = basics.Round(part.Key.VoteFirstValid) - t.KeyregTxnFields.VoteLast = basics.Round(part.Key.VoteLastValid) + t.KeyregTxnFields.VoteFirst = part.Key.VoteFirstValid + t.KeyregTxnFields.VoteLast = part.Key.VoteLastValid t.KeyregTxnFields.VoteKeyDilution = part.Key.VoteKeyDilution return t, nil } // MakeRegistrationTransactionWithGenesisID Generates a Registration transaction with the genesis ID set from the suggested parameters of the client -func (c *Client) MakeRegistrationTransactionWithGenesisID(part account.Participation, fee, txnFirstValid, txnLastValid uint64, leaseBytes [32]byte, includeStateProofKeys bool) (transactions.Transaction, error) { +func (c *Client) MakeRegistrationTransactionWithGenesisID(part account.Participation, fee uint64, txnFirstValid, txnLastValid basics.Round, leaseBytes [32]byte, includeStateProofKeys bool) (transactions.Transaction, error) { // Get current round, protocol, genesis ID params, err := c.cachedSuggestedParams() @@ -292,8 +292,7 @@ func (c *Client) MakeRegistrationTransactionWithGenesisID(part account.Participa goOnlineTx := part.GenerateRegistrationTransaction( basics.MicroAlgos{Raw: fee}, - basics.Round(txnFirstValid), - basics.Round(txnLastValid), + txnFirstValid, txnLastValid, leaseBytes, includeStateProofKeys) goOnlineTx.Header.GenesisID = params.GenesisId @@ -307,7 +306,7 @@ func (c *Client) MakeRegistrationTransactionWithGenesisID(part account.Participa } // MakeUnsignedGoOnlineTx creates a transaction that will bring an address online using available participation keys -func (c *Client) MakeUnsignedGoOnlineTx(address string, firstValid, lastValid, fee uint64, leaseBytes [32]byte) (transactions.Transaction, error) { +func (c *Client) MakeUnsignedGoOnlineTx(address string, firstValid, lastValid basics.Round, fee uint64, leaseBytes [32]byte) (transactions.Transaction, error) { // Parse the address parsedAddr, err := basics.UnmarshalChecksumAddress(address) if err != nil { @@ -332,16 +331,14 @@ func (c *Client) MakeUnsignedGoOnlineTx(address string, firstValid, lastValid, f // Choose which participation keys to go online with; // need to do this after filling in the round number. - part, err := c.chooseParticipation(parsedAddr, basics.Round(firstValid)) + part, err := c.chooseParticipation(parsedAddr, firstValid) if err != nil { return transactions.Transaction{}, err } - parsedFrstValid := basics.Round(firstValid) - parsedLastValid := basics.Round(lastValid) parsedFee := basics.MicroAlgos{Raw: fee} - goOnlineTransaction, err := generateRegistrationTransaction(part, parsedFee, parsedFrstValid, parsedLastValid, leaseBytes) + goOnlineTransaction, err := generateRegistrationTransaction(part, parsedFee, firstValid, lastValid, leaseBytes) if err != nil { return transactions.Transaction{}, err } @@ -365,7 +362,7 @@ func (c *Client) MakeUnsignedGoOnlineTx(address string, firstValid, lastValid, f } // MakeUnsignedGoOfflineTx creates a transaction that will bring an address offline -func (c *Client) MakeUnsignedGoOfflineTx(address string, firstValid, lastValid, fee uint64, leaseBytes [32]byte) (transactions.Transaction, error) { +func (c *Client) MakeUnsignedGoOfflineTx(address string, firstValid, lastValid basics.Round, fee uint64, leaseBytes [32]byte) (transactions.Transaction, error) { // Parse the address parsedAddr, err := basics.UnmarshalChecksumAddress(address) if err != nil { @@ -387,8 +384,6 @@ func (c *Client) MakeUnsignedGoOfflineTx(address string, firstValid, lastValid, return transactions.Transaction{}, err } - parsedFirstRound := basics.Round(firstValid) - parsedLastRound := basics.Round(lastValid) parsedFee := basics.MicroAlgos{Raw: fee} goOfflineTransaction := transactions.Transaction{ @@ -396,8 +391,8 @@ func (c *Client) MakeUnsignedGoOfflineTx(address string, firstValid, lastValid, Header: transactions.Header{ Sender: parsedAddr, Fee: parsedFee, - FirstValid: parsedFirstRound, - LastValid: parsedLastRound, + FirstValid: firstValid, + LastValid: lastValid, Lease: leaseBytes, }, } @@ -420,7 +415,7 @@ func (c *Client) MakeUnsignedGoOfflineTx(address string, firstValid, lastValid, } // MakeUnsignedBecomeNonparticipatingTx creates a transaction that will mark an account as non-participating -func (c *Client) MakeUnsignedBecomeNonparticipatingTx(address string, firstValid, lastValid, fee uint64) (transactions.Transaction, error) { +func (c *Client) MakeUnsignedBecomeNonparticipatingTx(address string, firstValid, lastValid basics.Round, fee uint64) (transactions.Transaction, error) { // Parse the address parsedAddr, err := basics.UnmarshalChecksumAddress(address) if err != nil { @@ -442,8 +437,6 @@ func (c *Client) MakeUnsignedBecomeNonparticipatingTx(address string, firstValid return transactions.Transaction{}, err } - parsedFirstRound := basics.Round(firstValid) - parsedLastRound := basics.Round(lastValid) parsedFee := basics.MicroAlgos{Raw: fee} becomeNonparticipatingTransaction := transactions.Transaction{ @@ -451,8 +444,8 @@ func (c *Client) MakeUnsignedBecomeNonparticipatingTx(address string, firstValid Header: transactions.Header{ Sender: parsedAddr, Fee: parsedFee, - FirstValid: parsedFirstRound, - LastValid: parsedLastRound, + FirstValid: firstValid, + LastValid: lastValid, }, } if cparams.SupportGenesisHash { @@ -475,7 +468,7 @@ func (c *Client) MakeUnsignedBecomeNonparticipatingTx(address string, firstValid } // FillUnsignedTxTemplate fills in header fields in a partially-filled-in transaction. -func (c *Client) FillUnsignedTxTemplate(sender string, firstValid, lastValid, fee uint64, tx transactions.Transaction) (transactions.Transaction, error) { +func (c *Client) FillUnsignedTxTemplate(sender string, firstValid, lastValid basics.Round, fee uint64, tx transactions.Transaction) (transactions.Transaction, error) { // Parse the address parsedAddr, err := basics.UnmarshalChecksumAddress(sender) if err != nil { @@ -501,8 +494,8 @@ func (c *Client) FillUnsignedTxTemplate(sender string, firstValid, lastValid, fe tx.Header.Sender = parsedAddr tx.Header.Fee = parsedFee - tx.Header.FirstValid = basics.Round(firstValid) - tx.Header.LastValid = basics.Round(lastValid) + tx.Header.FirstValid = firstValid + tx.Header.LastValid = lastValid if cparams.SupportGenesisHash { var genHash crypto.Digest @@ -530,47 +523,47 @@ func (c *Client) MakeUnsignedAppCreateTx(onComplete transactions.OnCompletion, a } // MakeUnsignedAppUpdateTx makes a transaction for updating an application's programs -func (c *Client) MakeUnsignedAppUpdateTx(appIdx uint64, appArgs [][]byte, accounts []string, foreignApps []uint64, foreignAssets []uint64, boxes []transactions.BoxRef, approvalProg []byte, clearProg []byte, rejectVersion uint64) (tx transactions.Transaction, err error) { +func (c *Client) MakeUnsignedAppUpdateTx(appIdx basics.AppIndex, appArgs [][]byte, accounts []string, foreignApps []uint64, foreignAssets []uint64, boxes []transactions.BoxRef, approvalProg []byte, clearProg []byte, rejectVersion uint64) (tx transactions.Transaction, err error) { return c.MakeUnsignedApplicationCallTx(appIdx, appArgs, accounts, foreignApps, foreignAssets, boxes, transactions.UpdateApplicationOC, approvalProg, clearProg, emptySchema, emptySchema, 0, rejectVersion) } // MakeUnsignedAppDeleteTx makes a transaction for deleting an application -func (c *Client) MakeUnsignedAppDeleteTx(appIdx uint64, appArgs [][]byte, accounts []string, foreignApps []uint64, foreignAssets []uint64, boxes []transactions.BoxRef, rejectVersion uint64) (tx transactions.Transaction, err error) { +func (c *Client) MakeUnsignedAppDeleteTx(appIdx basics.AppIndex, appArgs [][]byte, accounts []string, foreignApps []uint64, foreignAssets []uint64, boxes []transactions.BoxRef, rejectVersion uint64) (tx transactions.Transaction, err error) { return c.MakeUnsignedApplicationCallTx(appIdx, appArgs, accounts, foreignApps, foreignAssets, boxes, transactions.DeleteApplicationOC, nil, nil, emptySchema, emptySchema, 0, rejectVersion) } // MakeUnsignedAppOptInTx makes a transaction for opting in to (allocating // some account-specific state for) an application -func (c *Client) MakeUnsignedAppOptInTx(appIdx uint64, appArgs [][]byte, accounts []string, foreignApps []uint64, foreignAssets []uint64, boxes []transactions.BoxRef, rejectVersion uint64) (tx transactions.Transaction, err error) { +func (c *Client) MakeUnsignedAppOptInTx(appIdx basics.AppIndex, appArgs [][]byte, accounts []string, foreignApps []uint64, foreignAssets []uint64, boxes []transactions.BoxRef, rejectVersion uint64) (tx transactions.Transaction, err error) { return c.MakeUnsignedApplicationCallTx(appIdx, appArgs, accounts, foreignApps, foreignAssets, boxes, transactions.OptInOC, nil, nil, emptySchema, emptySchema, 0, rejectVersion) } // MakeUnsignedAppCloseOutTx makes a transaction for closing out of // (deallocating all account-specific state for) an application -func (c *Client) MakeUnsignedAppCloseOutTx(appIdx uint64, appArgs [][]byte, accounts []string, foreignApps []uint64, foreignAssets []uint64, boxes []transactions.BoxRef, rejectVersion uint64) (tx transactions.Transaction, err error) { +func (c *Client) MakeUnsignedAppCloseOutTx(appIdx basics.AppIndex, appArgs [][]byte, accounts []string, foreignApps []uint64, foreignAssets []uint64, boxes []transactions.BoxRef, rejectVersion uint64) (tx transactions.Transaction, err error) { return c.MakeUnsignedApplicationCallTx(appIdx, appArgs, accounts, foreignApps, foreignAssets, boxes, transactions.CloseOutOC, nil, nil, emptySchema, emptySchema, 0, rejectVersion) } // MakeUnsignedAppClearStateTx makes a transaction for clearing out all // account-specific state for an application. It may not be rejected by the // application's logic. -func (c *Client) MakeUnsignedAppClearStateTx(appIdx uint64, appArgs [][]byte, accounts []string, foreignApps []uint64, foreignAssets []uint64, boxes []transactions.BoxRef, rejectVersion uint64) (tx transactions.Transaction, err error) { +func (c *Client) MakeUnsignedAppClearStateTx(appIdx basics.AppIndex, appArgs [][]byte, accounts []string, foreignApps []uint64, foreignAssets []uint64, boxes []transactions.BoxRef, rejectVersion uint64) (tx transactions.Transaction, err error) { return c.MakeUnsignedApplicationCallTx(appIdx, appArgs, accounts, foreignApps, foreignAssets, boxes, transactions.ClearStateOC, nil, nil, emptySchema, emptySchema, 0, rejectVersion) } // MakeUnsignedAppNoOpTx makes a transaction for interacting with an existing // application, potentially updating any account-specific local state and // global state associated with it. -func (c *Client) MakeUnsignedAppNoOpTx(appIdx uint64, appArgs [][]byte, accounts []string, foreignApps []uint64, foreignAssets []uint64, boxes []transactions.BoxRef, rejectVersion uint64) (tx transactions.Transaction, err error) { +func (c *Client) MakeUnsignedAppNoOpTx(appIdx basics.AppIndex, appArgs [][]byte, accounts []string, foreignApps []uint64, foreignAssets []uint64, boxes []transactions.BoxRef, rejectVersion uint64) (tx transactions.Transaction, err error) { return c.MakeUnsignedApplicationCallTx(appIdx, appArgs, accounts, foreignApps, foreignAssets, boxes, transactions.NoOpOC, nil, nil, emptySchema, emptySchema, 0, rejectVersion) } // MakeUnsignedApplicationCallTx is a helper for the above ApplicationCall // transaction constructors. A fully custom ApplicationCall transaction may // be constructed using this method. -func (c *Client) MakeUnsignedApplicationCallTx(appIdx uint64, appArgs [][]byte, accounts []string, foreignApps []uint64, foreignAssets []uint64, boxes []transactions.BoxRef, onCompletion transactions.OnCompletion, approvalProg []byte, clearProg []byte, globalSchema basics.StateSchema, localSchema basics.StateSchema, extrapages uint32, rejectVersion uint64) (tx transactions.Transaction, err error) { +func (c *Client) MakeUnsignedApplicationCallTx(appIdx basics.AppIndex, appArgs [][]byte, accounts []string, foreignApps []uint64, foreignAssets []uint64, boxes []transactions.BoxRef, onCompletion transactions.OnCompletion, approvalProg []byte, clearProg []byte, globalSchema basics.StateSchema, localSchema basics.StateSchema, extrapages uint32, rejectVersion uint64) (tx transactions.Transaction, err error) { tx.Type = protocol.ApplicationCallTx - tx.ApplicationID = basics.AppIndex(appIdx) + tx.ApplicationID = appIdx tx.OnCompletion = onCompletion tx.RejectVersion = rejectVersion @@ -713,10 +706,10 @@ func (c *Client) MakeUnsignedAssetCreateTx(total uint64, defaultFrozen bool, man // // Call FillUnsignedTxTemplate afterwards to fill out common fields in // the resulting transaction template. -func (c *Client) MakeUnsignedAssetDestroyTx(index uint64) (transactions.Transaction, error) { +func (c *Client) MakeUnsignedAssetDestroyTx(index basics.AssetIndex) (transactions.Transaction, error) { var tx transactions.Transaction tx.Type = protocol.AssetConfigTx - tx.ConfigAsset = basics.AssetIndex(index) + tx.ConfigAsset = index return tx, nil } @@ -727,7 +720,7 @@ func (c *Client) MakeUnsignedAssetDestroyTx(index uint64) (transactions.Transact // // Call FillUnsignedTxTemplate afterwards to fill out common fields in // the resulting transaction template. -func (c *Client) MakeUnsignedAssetConfigTx(creator string, index uint64, newManager *string, newReserve *string, newFreeze *string, newClawback *string) (transactions.Transaction, error) { +func (c *Client) MakeUnsignedAssetConfigTx(creator string, index basics.AssetIndex, newManager *string, newReserve *string, newFreeze *string, newClawback *string) (transactions.Transaction, error) { var tx transactions.Transaction var err error @@ -759,7 +752,7 @@ func (c *Client) MakeUnsignedAssetConfigTx(creator string, index uint64, newMana } tx.Type = protocol.AssetConfigTx - tx.ConfigAsset = basics.AssetIndex(index) + tx.ConfigAsset = index if *newManager != "" { tx.AssetParams.Manager, err = basics.UnmarshalChecksumAddress(*newManager) @@ -797,13 +790,13 @@ func (c *Client) MakeUnsignedAssetConfigTx(creator string, index uint64, newMana // // Call FillUnsignedTxTemplate afterwards to fill out common fields in // the resulting transaction template. -func (c *Client) MakeUnsignedAssetSendTx(index uint64, amount uint64, recipient string, closeTo string, senderForClawback string) (transactions.Transaction, error) { +func (c *Client) MakeUnsignedAssetSendTx(index basics.AssetIndex, amount uint64, recipient string, closeTo string, senderForClawback string) (transactions.Transaction, error) { var tx transactions.Transaction var err error tx.Type = protocol.AssetTransferTx tx.AssetAmount = amount - tx.XferAsset = basics.AssetIndex(index) + tx.XferAsset = index if recipient != "" { tx.AssetReceiver, err = basics.UnmarshalChecksumAddress(recipient) @@ -833,12 +826,12 @@ func (c *Client) MakeUnsignedAssetSendTx(index uint64, amount uint64, recipient // // Call FillUnsignedTxTemplate afterwards to fill out common fields in // the resulting transaction template. -func (c *Client) MakeUnsignedAssetFreezeTx(index uint64, accountToChange string, newFreezeSetting bool) (transactions.Transaction, error) { +func (c *Client) MakeUnsignedAssetFreezeTx(index basics.AssetIndex, accountToChange string, newFreezeSetting bool) (transactions.Transaction, error) { var tx transactions.Transaction var err error tx.Type = protocol.AssetFreezeTx - tx.FreezeAsset = basics.AssetIndex(index) + tx.FreezeAsset = index tx.FreezeAccount, err = basics.UnmarshalChecksumAddress(accountToChange) if err != nil { diff --git a/logging/cyclicWriter.go b/logging/cyclicWriter.go index e860ffc9e3..978052d105 100644 --- a/logging/cyclicWriter.go +++ b/logging/cyclicWriter.go @@ -143,23 +143,23 @@ func (cyclic *CyclicFileWriter) Write(p []byte) (n int, err error) { now := time.Now() // we don't have enough space to write the entry, so archive data cyclic.writer.Close() - var err error + globPath := cyclic.getArchiveGlob() - oldarchives, err := filepath.Glob(globPath) - if err != nil && !os.IsNotExist(err) { - fmt.Fprintf(os.Stderr, "%s: glob err: %s\n", globPath, err) + oldarchives, err1 := filepath.Glob(globPath) + if err1 != nil && !os.IsNotExist(err1) { + fmt.Fprintf(os.Stderr, "%s: glob err: %s\n", globPath, err1) } else if cyclic.maxLogAge != 0 { tooOld := now.Add(-cyclic.maxLogAge) for _, path := range oldarchives { - finfo, err := os.Stat(path) - if err != nil { - fmt.Fprintf(os.Stderr, "%s: stat: %s\n", path, err) + finfo, err2 := os.Stat(path) + if err2 != nil { + fmt.Fprintf(os.Stderr, "%s: stat: %s\n", path, err2) continue } if finfo.ModTime().Before(tooOld) { - err = os.Remove(path) - if err != nil { - fmt.Fprintf(os.Stderr, "%s: rm: %s\n", path, err) + err2 = os.Remove(path) + if err2 != nil { + fmt.Fprintf(os.Stderr, "%s: rm: %s\n", path, err2) } } } @@ -174,30 +174,30 @@ func (cyclic *CyclicFileWriter) Write(p []byte) (n int, err error) { shouldBz2 = true archivePath = archivePath[:len(archivePath)-4] } - if err = util.MoveFile(cyclic.liveLog, archivePath); err != nil { - panic(fmt.Sprintf("CyclicFileWriter: cannot archive full log %v", err)) + if err1 = util.MoveFile(cyclic.liveLog, archivePath); err1 != nil { + panic(fmt.Sprintf("CyclicFileWriter: cannot archive full log %v", err1)) } if shouldGz { cmd := exec.Command("gzip", archivePath) - err = cmd.Start() - if err != nil { - fmt.Fprintf(os.Stderr, "%s: could not gzip: %s", archivePath, err) + err1 = cmd.Start() + if err1 != nil { + fmt.Fprintf(os.Stderr, "%s: could not gzip: %s", archivePath, err1) } else { go procWait(cmd, archivePath) } } else if shouldBz2 { cmd := exec.Command("bzip2", archivePath) - err = cmd.Start() - if err != nil { - fmt.Fprintf(os.Stderr, "%s: could not bzip2: %s", archivePath, err) + err1 = cmd.Start() + if err1 != nil { + fmt.Fprintf(os.Stderr, "%s: could not bzip2: %s", archivePath, err1) } else { go procWait(cmd, archivePath) } } cyclic.logStart = now - cyclic.writer, err = os.OpenFile(cyclic.liveLog, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0666) - if err != nil { - panic(fmt.Sprintf("CyclicFileWriter: cannot open log file %v", err)) + cyclic.writer, err1 = os.OpenFile(cyclic.liveLog, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0666) + if err1 != nil { + panic(fmt.Sprintf("CyclicFileWriter: cannot open log file %v", err1)) } cyclic.nextWrite = 0 } diff --git a/logging/telemetry.go b/logging/telemetry.go index fa4af559ba..acdd3f4ddf 100644 --- a/logging/telemetry.go +++ b/logging/telemetry.go @@ -18,7 +18,6 @@ package logging import ( "context" - "fmt" "io" "os" "path/filepath" @@ -27,7 +26,6 @@ import ( "github.com/sirupsen/logrus" - "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/logging/telemetryspec" "github.com/algorand/go-algorand/util/uuid" ) @@ -91,7 +89,7 @@ func makeTelemetryStateContext(ctx context.Context, cfg TelemetryConfig, hookFac } // ReadTelemetryConfigOrDefault reads telemetry config from file or defaults if no config file found. -func ReadTelemetryConfigOrDefault(dataDir string, genesisID string) (cfg TelemetryConfig, err error) { +func ReadTelemetryConfigOrDefault(dataDir string, globalDir string) (cfg TelemetryConfig, err error) { err = nil dataDirProvided := dataDir != "" var configPath string @@ -108,14 +106,8 @@ func ReadTelemetryConfigOrDefault(dataDir string, genesisID string) (cfg Telemet // If the reason is because the directory doesn't exist or we didn't provide a data directory then... if (err != nil && os.IsNotExist(err)) || !dataDirProvided { - configPath, err = config.GetConfigFilePath(TelemetryConfigFilename) - if err != nil { - // If the path could not be opened do nothing, the IsNotExist error - // is handled below. - } else { - // Load the telemetry from the default config path - cfg, err = LoadTelemetryConfig(configPath) - } + configPath = filepath.Join(globalDir, TelemetryConfigFilename) + cfg, err = LoadTelemetryConfig(configPath) } // If there was some error loading the configuration from the config path... @@ -131,35 +123,26 @@ func ReadTelemetryConfigOrDefault(dataDir string, genesisID string) (cfg Telemet return } } - ver := config.GetCurrentVersion() - ch := ver.Channel - // Should not happen, but default to "dev" if channel is unspecified. - if ch == "" { - ch = "dev" - } - cfg.ChainID = fmt.Sprintf("%s-%s", ch, genesisID) - cfg.Version = ver.String() return cfg, err } // EnsureTelemetryConfig creates a new TelemetryConfig structure with a generated GUID and the appropriate Telemetry endpoint // Err will be non-nil if the file doesn't exist, or if error loading. // Cfg will always be valid. -func EnsureTelemetryConfig(dataDir *string, genesisID string) (TelemetryConfig, error) { - cfg, _, err := EnsureTelemetryConfigCreated(dataDir, genesisID) +func EnsureTelemetryConfig(dataDir *string, globalDir *string) (TelemetryConfig, error) { + cfg, _, err := EnsureTelemetryConfigCreated(dataDir, globalDir) return cfg, err } // EnsureTelemetryConfigCreated is the same as EnsureTelemetryConfig but it also returns a bool indicating // whether EnsureTelemetryConfig had to create the config. -func EnsureTelemetryConfigCreated(dataDir *string, genesisID string) (TelemetryConfig, bool, error) { +func EnsureTelemetryConfigCreated(dataDir *string, globalDir *string) (TelemetryConfig, bool, error) { /* Our logic should be as follows: - - We first look inside the provided data-directory. If a config file is there, load it - and return it - - Otherwise, look in the global directory. If a config file is there, load it and return it. - - Otherwise, if a data-directory was provided then save the config file there. - - Otherwise, save the config file in the global directory + - We first look inside dataDir. If a config file is there, load and return it + - Otherwise, look in the globalDir. If a config file is there, load and return it. + - Otherwise, if dataDir was provided then save a default config there. + - Otherwise, save it to globalDir */ @@ -176,13 +159,13 @@ func EnsureTelemetryConfigCreated(dataDir *string, genesisID string) (TelemetryC } } if configPath == "" { - configPath, err = config.GetConfigFilePath(TelemetryConfigFilename) - if err != nil { + if globalDir == nil { cfg := createTelemetryConfig() // Since GetConfigFilePath failed, there is no chance that we // can save the next config files return cfg, true, err } + configPath = filepath.Join(*globalDir, TelemetryConfigFilename) cfg, err = LoadTelemetryConfig(configPath) } created := false @@ -194,11 +177,11 @@ func EnsureTelemetryConfigCreated(dataDir *string, genesisID string) (TelemetryC /* There could be a scenario where a data directory was supplied that doesn't exist. - In that case, we don't want to create the directory, just save in the global one + In that case, we don't want to create the directory, just save in cfgDir */ // If the directory exists... - if _, err := os.Stat(*dataDir); err == nil { + if _, err1 := os.Stat(*dataDir); err1 == nil { // Remember, if we had a data directory supplied we want to save the config there configPath = filepath.Join(*dataDir, TelemetryConfigFilename) @@ -212,15 +195,6 @@ func EnsureTelemetryConfigCreated(dataDir *string, genesisID string) (TelemetryC err = cfg.Save(configPath) } - ver := config.GetCurrentVersion() - ch := ver.Channel - // Should not happen, but default to "dev" if channel is unspecified. - if ch == "" { - ch = "dev" - } - cfg.ChainID = fmt.Sprintf("%s-%s", ch, genesisID) - cfg.Version = ver.String() - return cfg, created, err } diff --git a/logging/telemetryCommon.go b/logging/telemetryCommon.go index cbcd1c4e1c..ac187f7857 100644 --- a/logging/telemetryCommon.go +++ b/logging/telemetryCommon.go @@ -56,6 +56,7 @@ type TelemetryConfig struct { Version string `json:"-"` UserName string Password string + DataDirectory string `json:"-"` // distinguishes instances on the same node } // MarshalingTelemetryConfig is used for json serialization of the TelemetryConfig diff --git a/logging/telemetryConfig.go b/logging/telemetryConfig.go index f8c34d0e3a..9c69c77d88 100644 --- a/logging/telemetryConfig.go +++ b/logging/telemetryConfig.go @@ -25,7 +25,6 @@ import ( "github.com/sirupsen/logrus" - "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/util/uuid" ) @@ -73,11 +72,6 @@ func createTelemetryConfig() TelemetryConfig { } } -// LoadTelemetryConfig loads the TelemetryConfig from the config file -func LoadTelemetryConfig(configPath string) (TelemetryConfig, error) { - return loadTelemetryConfig(configPath) -} - // Save saves the TelemetryConfig to the config file func (cfg TelemetryConfig) Save(configPath string) error { f, err := os.Create(configPath) @@ -116,10 +110,9 @@ func (cfg TelemetryConfig) getHostGUID() string { // getInstanceName allows us to distinguish between multiple instances running on the same node. func (cfg TelemetryConfig) getInstanceName() string { - p := config.GetCurrentVersion().DataDirectory hash := sha256.New() hash.Write([]byte(cfg.GUID)) - hash.Write([]byte(p)) + hash.Write([]byte(cfg.DataDirectory)) pathHash := sha256.Sum256(hash.Sum(nil)) pathHashStr := base64.StdEncoding.EncodeToString(pathHash[:]) @@ -138,8 +131,9 @@ func SanitizeTelemetryString(input string, maxParts int) string { return input } -// Returns err if os.Open fails or if config is mal-formed -func loadTelemetryConfig(path string) (TelemetryConfig, error) { +// LoadTelemetryConfig loads the TelemetryConfig from the config file. It +// returns err if os.Open fails or if config is mal-formed +func LoadTelemetryConfig(path string) (TelemetryConfig, error) { f, err := os.Open(path) if err != nil { return createTelemetryConfig(), err diff --git a/logging/telemetryConfig_test.go b/logging/telemetryConfig_test.go index 5b7f18de1e..379b4b7f20 100644 --- a/logging/telemetryConfig_test.go +++ b/logging/telemetryConfig_test.go @@ -45,7 +45,7 @@ func Test_loadTelemetryConfig(t *testing.T) { a.NoError(err) configsPath := filepath.Join(ourPath, "../test/testdata/configs/logging/logging.config.example") - config, err := loadTelemetryConfig(configsPath) + config, err := LoadTelemetryConfig(configsPath) a.NoError(err) a.Equal(sample.Enable, config.Enable) @@ -75,7 +75,7 @@ func Test_CreateSaveLoadTelemetryConfig(t *testing.T) { err := config1.Save(configsPath) a.NoError(err) - config2, err := loadTelemetryConfig(configsPath) + config2, err := LoadTelemetryConfig(configsPath) a.NoError(err) a.Equal(config1.Enable, config2.Enable) @@ -115,7 +115,7 @@ func Test_SanitizeTelemetryString(t *testing.T) { func TestLoadTelemetryConfig(t *testing.T) { partitiontest.PartitionTest(t) testLoggingConfigFileName := "../test/testdata/configs/logging/logging.config.test1" - tc, err := loadTelemetryConfig(testLoggingConfigFileName) + tc, err := LoadTelemetryConfig(testLoggingConfigFileName) require.NoError(t, err) require.Equal(t, true, tc.Enable) // make sure the user name was loaded from the specified file @@ -128,7 +128,7 @@ func TestLoadTelemetryConfig(t *testing.T) { func TestLoadTelemetryConfigBlankUsernamePassword(t *testing.T) { partitiontest.PartitionTest(t) testLoggingConfigFileName := "../test/testdata/configs/logging/logging.config.test2" - tc, err := loadTelemetryConfig(testLoggingConfigFileName) + tc, err := LoadTelemetryConfig(testLoggingConfigFileName) require.NoError(t, err) // make sure the user name was loaded from the specified file require.Equal(t, defaultTelemetryUsername, tc.UserName) diff --git a/logging/telemetry_test.go b/logging/telemetry_test.go index a5a17c703b..6975038eaf 100644 --- a/logging/telemetry_test.go +++ b/logging/telemetry_test.go @@ -28,7 +28,6 @@ import ( "github.com/algorand/go-deadlock" - "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/logging/telemetryspec" "github.com/algorand/go-algorand/test/partitiontest" ) @@ -341,12 +340,9 @@ func TestReadTelemetryConfigOrDefaultNoDataDir(t *testing.T) { partitiontest.PartitionTest(t) a := require.New(t) tempDir := os.TempDir() - originalGlobalConfigFileRoot, _ := config.GetGlobalConfigFileRoot() - config.SetGlobalConfigFileRoot(tempDir) - cfg, err := ReadTelemetryConfigOrDefault("", "") + cfg, err := ReadTelemetryConfigOrDefault("", tempDir) defaultCfgSettings := createTelemetryConfig() - config.SetGlobalConfigFileRoot(originalGlobalConfigFileRoot) a.Nil(err) a.NotNil(cfg) diff --git a/logging/telemetryhook_test.go b/logging/telemetryhook_test.go index 3c5329cc61..417118696a 100644 --- a/logging/telemetryhook_test.go +++ b/logging/telemetryhook_test.go @@ -24,7 +24,6 @@ import ( "github.com/sirupsen/logrus" "github.com/stretchr/testify/require" - "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/test/partitiontest" ) @@ -45,13 +44,9 @@ func TestLoadDefaultConfig(t *testing.T) { partitiontest.PartitionTest(t) a := require.New(t) - currentRoot := config.SetGlobalConfigFileRoot(t.TempDir()) - defer config.SetGlobalConfigFileRoot(currentRoot) - - _, err := EnsureTelemetryConfig(nil, "") - + temp := t.TempDir() + _, err := EnsureTelemetryConfig(nil, &temp) a.Nil(err) - } func isDefault(cfg TelemetryConfig) bool { @@ -68,10 +63,8 @@ func TestLoggingConfigDataDirFirst(t *testing.T) { partitiontest.PartitionTest(t) a := require.New(t) - globalConfigRoot := t.TempDir() - oldConfigRoot := config.SetGlobalConfigFileRoot(globalConfigRoot) - defer config.SetGlobalConfigFileRoot(oldConfigRoot) - globalLoggingPath := filepath.Join(globalConfigRoot, TelemetryConfigFilename) + temp := t.TempDir() + globalLoggingPath := filepath.Join(temp, TelemetryConfigFilename) dataDir := t.TempDir() dataDirLoggingPath := filepath.Join(dataDir, TelemetryConfigFilename) @@ -89,7 +82,7 @@ func TestLoggingConfigDataDirFirst(t *testing.T) { fout.Write([]byte("{\"Enable\":true}")) fout.Close() - cfg, err := EnsureTelemetryConfig(&dataDir, "") + cfg, err := EnsureTelemetryConfig(&dataDir, &temp) a.Nil(err) _, err = os.Stat(globalLoggingPath) @@ -99,7 +92,6 @@ func TestLoggingConfigDataDirFirst(t *testing.T) { a.Equal(cfg.FilePath, dataDirLoggingPath) a.NotEqual(cfg.GUID, defaultCfg.GUID) - a.NotEmpty(cfg.Version) // We got this from the tiny file we wrote to earlier. a.True(cfg.Enable) @@ -113,16 +105,13 @@ func TestLoggingConfigGlobalSecond(t *testing.T) { a := require.New(t) globalConfigRoot := t.TempDir() - oldConfigRoot := config.SetGlobalConfigFileRoot(globalConfigRoot) - defer config.SetGlobalConfigFileRoot(oldConfigRoot) globalLoggingPath := filepath.Join(globalConfigRoot, TelemetryConfigFilename) _, err := os.Stat(globalLoggingPath) a.True(os.IsNotExist(err)) cfgPath := "/missing-directory" - cfg, err := EnsureTelemetryConfig(&cfgPath, "") - + cfg, err := EnsureTelemetryConfig(&cfgPath, &globalConfigRoot) a.Nil(err) _, err = os.Stat(globalLoggingPath) a.Nil(err) @@ -132,7 +121,6 @@ func TestLoggingConfigGlobalSecond(t *testing.T) { defaultCfg := createTelemetryConfig() a.Equal(cfg.FilePath, globalLoggingPath) a.NotEqual(cfg.GUID, defaultCfg.GUID) - a.NotEmpty(cfg.Version) a.True(isDefault(cfg)) @@ -145,14 +133,14 @@ func TestSaveLoadConfig(t *testing.T) { a := require.New(t) globalConfigRoot := t.TempDir() - oldConfigRoot := config.SetGlobalConfigFileRoot(globalConfigRoot) - defer config.SetGlobalConfigFileRoot(oldConfigRoot) configDir := t.TempDir() err := os.Mkdir(configDir, 0777) - cfg, err := EnsureTelemetryConfig(&configDir, "") + cfg, err := EnsureTelemetryConfig(&configDir, &globalConfigRoot) cfg.Name = "testname" + cfg.ChainID = "would end up set" + cfg.Version = "would also be set" err = cfg.Save(cfg.FilePath) a.NoError(err) diff --git a/netdeploy/network.go b/netdeploy/network.go index ec93b3800e..b49c9a2797 100644 --- a/netdeploy/network.go +++ b/netdeploy/network.go @@ -325,14 +325,14 @@ func (n Network) Start(binDir string, redirectOutput bool) error { PeerAddress: relayAddress, // on the first iteration it would be empty, which is ok. subsequent iterations would link all the relays. } - _, err := nc.StartAlgod(args) - if err != nil { - return err + _, err1 := nc.StartAlgod(args) + if err1 != nil { + return err1 } - relayAddress, err = n.getRelayAddress(nc) - if err != nil { - return err + relayAddress, err1 = n.getRelayAddress(nc) + if err1 != nil { + return err1 } relayNameToAddress[relayDir] = relayAddress } @@ -342,19 +342,18 @@ func (n Network) Start(binDir string, redirectOutput bool) error { } // retry fetching the relay address -func (n Network) getRelayAddress(nc nodecontrol.NodeController) (relayAddress string, err error) { +func (n Network) getRelayAddress(nc nodecontrol.NodeController) (string, error) { for i := 1; ; i++ { - relayAddress, err = nc.GetListeningAddress() + relayAddress, err := nc.GetListeningAddress() if err == nil { - return + return relayAddress, nil } if i <= maxGetRelayAddressRetry { time.Sleep(100 * time.Millisecond) } else { - break + return "", err } } - return } // GetPeerAddresses returns an array of Relay addresses, if any; to be used to start nodes diff --git a/netdeploy/remote/deployedNetwork.go b/netdeploy/remote/deployedNetwork.go index a02f396d6d..9ba0e1a02f 100644 --- a/netdeploy/remote/deployedNetwork.go +++ b/netdeploy/remote/deployedNetwork.go @@ -567,9 +567,9 @@ func createBlock(src basics.Address, prev bookkeeping.Block, roundTxnCnt uint64, } for _, stxn := range stxns { - txib, err := block.EncodeSignedTxn(stxn, transactions.ApplyData{}) - if err != nil { - return bookkeeping.Block{}, err + txib, err1 := block.EncodeSignedTxn(stxn, transactions.ApplyData{}) + if err1 != nil { + return bookkeeping.Block{}, err1 } txibs = append(txibs, txib) } diff --git a/netdeploy/remote/nodecfg/nodeDir.go b/netdeploy/remote/nodecfg/nodeDir.go index 5298a260a7..f22f8b73f9 100644 --- a/netdeploy/remote/nodecfg/nodeDir.go +++ b/netdeploy/remote/nodecfg/nodeDir.go @@ -284,11 +284,18 @@ func (nd *nodeDir) configureAdminAPIToken(token string) (err error) { } func (nd *nodeDir) configureTelemetry(enable bool) (err error) { - cfg, created, cfgErr := logging.EnsureTelemetryConfigCreated(nil, "") + root, err := config.GetGlobalConfigFileRoot() + var cfgDir *string + if err == nil { + cfgDir = &root + } + cfg, created, cfgErr := logging.EnsureTelemetryConfigCreated(nil, cfgDir) if cfgErr != nil { return cfgErr } + config.AnnotateTelemetry(&cfg, nd.configurator.genesisData.ID()) + // Override default enabling of new telemetry config if created { cfg.Enable = false diff --git a/network/addr.go b/network/addr.go index 19208c8b50..8fcc140883 100644 --- a/network/addr.go +++ b/network/addr.go @@ -34,6 +34,6 @@ func (wn *WebsocketNetwork) addrToGossipAddr(a string) (string, error) { if parsedURL.Scheme == "" { parsedURL.Scheme = "ws" } - parsedURL.Path = strings.Replace(path.Join(parsedURL.Path, GossipNetworkPath), "{genesisID}", wn.genesisID, -1) + parsedURL.Path = strings.Replace(path.Join(parsedURL.Path, GossipNetworkPath), "{genesisID}", wn.GetGenesisID(), -1) return parsedURL.String(), nil } diff --git a/network/hybridNetwork.go b/network/hybridNetwork.go index 3f514c7267..57ef3bd078 100644 --- a/network/hybridNetwork.go +++ b/network/hybridNetwork.go @@ -32,14 +32,14 @@ import ( type HybridP2PNetwork struct { p2pNetwork *P2PNetwork wsNetwork *WebsocketNetwork - genesisID string useP2PAddress bool + mesher mesher } // NewHybridP2PNetwork constructs a GossipNode that combines P2PNetwork and WebsocketNetwork // Hybrid mode requires both P2P and WS to be running in server (NetAddress set) or client (NetAddress empty) mode. -func NewHybridP2PNetwork(log logging.Logger, cfg config.Local, datadir string, phonebookAddresses []string, genesisID string, networkID protocol.NetworkID, nodeInfo NodeInfo) (*HybridP2PNetwork, error) { +func NewHybridP2PNetwork(log logging.Logger, cfg config.Local, datadir string, phonebookAddresses []string, genesisInfo GenesisInfo, nodeInfo NodeInfo, meshCreator MeshCreator) (*HybridP2PNetwork, error) { if err := cfg.ValidateP2PHybridConfig(); err != nil { return nil, err } @@ -48,7 +48,21 @@ func NewHybridP2PNetwork(log logging.Logger, cfg config.Local, datadir string, p p2pcfg.NetAddress = cfg.P2PHybridNetAddress p2pcfg.IncomingConnectionsLimit = cfg.P2PHybridIncomingConnectionsLimit identityTracker := NewIdentityTracker() - p2pnet, err := NewP2PNetwork(log, p2pcfg, datadir, phonebookAddresses, genesisID, networkID, nodeInfo, &identityOpts{tracker: identityTracker}) + + var childWsNetMeshCreator MeshCreator = meshCreator + var childP2PNetMeshCreator MeshCreator = meshCreator + var hybridMeshCreator MeshCreator = noopMeshCreator{} + _, isHybridMeshCreator := meshCreator.(hybridRelayMeshCreator) + if meshCreator == nil && cfg.IsHybridServer() || isHybridMeshCreator { + // no mesh creator provided and this node is a listening/relaying node + // then override and use hybrid relay meshing + // or, if a hybrid relay meshing requested explicitly, do the same + childWsNetMeshCreator = noopMeshCreator{} + childP2PNetMeshCreator = noopMeshPubSubFilteredCreator{} + hybridMeshCreator = hybridRelayMeshCreator{} + } + + p2pnet, err := NewP2PNetwork(log, p2pcfg, datadir, phonebookAddresses, genesisInfo, nodeInfo, &identityOpts{tracker: identityTracker}, childP2PNetMeshCreator) if err != nil { return nil, err } @@ -57,15 +71,25 @@ func NewHybridP2PNetwork(log logging.Logger, cfg config.Local, datadir string, p tracker: identityTracker, scheme: NewIdentityChallengeScheme(NetIdentityDedupNames(cfg.PublicAddress, p2pnet.PeerID().String()), NetIdentitySigner(p2pnet.PeerIDSigner())), } - wsnet, err := NewWebsocketNetwork(log, cfg, phonebookAddresses, genesisID, networkID, nodeInfo, &identOpts) + wsnet, err := NewWebsocketNetwork(log, cfg, phonebookAddresses, genesisInfo, nodeInfo, &identOpts, childWsNetMeshCreator) if err != nil { return nil, err } - return &HybridP2PNetwork{ + + hybridMesh, err := hybridMeshCreator.create( + withWebsocketNetwork(wsnet), + withP2PNetwork(p2pnet)) + if err != nil { + return nil, fmt.Errorf("failed to create hybrid mesh: %w", err) + } + + hn := &HybridP2PNetwork{ p2pNetwork: p2pnet, wsNetwork: wsnet, - genesisID: genesisID, - }, nil + mesher: hybridMesh, + } + + return hn, nil } // Address implements GossipNode @@ -179,7 +203,11 @@ func (n *HybridP2PNetwork) Start() error { err := n.runParallel(func(net GossipNode) error { return net.Start() }) - return err + if err != nil { + return err + } + n.mesher.start() + return nil } // Stop implements GossipNode @@ -188,6 +216,8 @@ func (n *HybridP2PNetwork) Stop() { net.Stop() return nil }) + + n.mesher.stop() } // RegisterHandlers adds to the set of given message handlers. @@ -236,7 +266,7 @@ func (n *HybridP2PNetwork) OnNetworkAdvance() { // GetGenesisID returns the network-specific genesisID. func (n *HybridP2PNetwork) GetGenesisID() string { - return n.genesisID + return n.wsNetwork.GetGenesisID() } // called from wsPeer to report that it has closed diff --git a/network/hybridNetwork_test.go b/network/hybridNetwork_test.go index 1eac45e6de..911db9f410 100644 --- a/network/hybridNetwork_test.go +++ b/network/hybridNetwork_test.go @@ -43,10 +43,11 @@ func TestHybridNetwork_DuplicateConn(t *testing.T) { const p2pKeyDir = "" identDiscValue := networkPeerIdentityDisconnect.GetUint64Value() + genesisInfo := GenesisInfo{genesisID, "net"} relayCfg := cfg relayCfg.ForceRelayMessages = true - netA, err := NewHybridP2PNetwork(log.With("node", "netA"), relayCfg, p2pKeyDir, nil, genesisID, "net", &nopeNodeInfo{}) + netA, err := NewHybridP2PNetwork(log.With("node", "netA"), relayCfg, p2pKeyDir, nil, genesisInfo, &nopeNodeInfo{}, &baseMeshCreator{}) require.NoError(t, err) err = netA.Start() @@ -65,7 +66,7 @@ func TestHybridNetwork_DuplicateConn(t *testing.T) { relayCfg.NetAddress = addr relayCfg.PublicAddress = addr relayCfg.P2PHybridNetAddress = "127.0.0.1:0" - netA, err = NewHybridP2PNetwork(log.With("node", "netA"), relayCfg, p2pKeyDir, nil, genesisID, "net", &nopeNodeInfo{}) + netA, err = NewHybridP2PNetwork(log.With("node", "netA"), relayCfg, p2pKeyDir, nil, genesisInfo, &nopeNodeInfo{}, &baseMeshCreator{}) require.NoError(t, err) err = netA.Start() @@ -86,14 +87,14 @@ func TestHybridNetwork_DuplicateConn(t *testing.T) { phoneBookAddresses := []string{multiAddrStr, addr} - netB, err := NewHybridP2PNetwork(log.With("node", "netB"), cfg, "", phoneBookAddresses, genesisID, "net", &nopeNodeInfo{}) + netB, err := NewHybridP2PNetwork(log.With("node", "netB"), cfg, "", phoneBookAddresses, genesisInfo, &nopeNodeInfo{}, &baseMeshCreator{}) require.NoError(t, err) // for netB start the p2p network first err = netB.p2pNetwork.Start() require.NoError(t, err) defer netB.Stop() - netC, err := NewHybridP2PNetwork(log.With("node", "netC"), cfg, "", phoneBookAddresses, genesisID, "net", &nopeNodeInfo{}) + netC, err := NewHybridP2PNetwork(log.With("node", "netC"), cfg, "", phoneBookAddresses, genesisInfo, &nopeNodeInfo{}, &baseMeshCreator{}) require.NoError(t, err) // for netC start the ws network first err = netC.wsNetwork.Start() @@ -190,12 +191,93 @@ func TestHybridNetwork_ValidateConfig(t *testing.T) { cfg.EnableP2PHybridMode = true cfg.NetAddress = ":0" cfg.P2PHybridNetAddress = "" + genesisInfo := GenesisInfo{genesisID, "net"} - _, err := NewHybridP2PNetwork(logging.TestingLog(t), cfg, "", nil, genesisID, "net", &nopeNodeInfo{}) + _, err := NewHybridP2PNetwork(logging.TestingLog(t), cfg, "", nil, genesisInfo, &nopeNodeInfo{}, &baseMeshCreator{}) require.ErrorContains(t, err, "both NetAddress and P2PHybridNetAddress") cfg.NetAddress = "" cfg.P2PHybridNetAddress = ":0" - _, err = NewHybridP2PNetwork(logging.TestingLog(t), cfg, "", nil, genesisID, "net", &nopeNodeInfo{}) + _, err = NewHybridP2PNetwork(logging.TestingLog(t), cfg, "", nil, genesisInfo, &nopeNodeInfo{}, &baseMeshCreator{}) require.ErrorContains(t, err, "both NetAddress and P2PHybridNetAddress") } + +func TestHybridNetwork_HybridRelayStrategy(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + cfg := config.GetDefaultLocal() + cfg.EnableP2PHybridMode = true + log := logging.TestingLog(t) + + genesisInfo := GenesisInfo{genesisID, "net"} + + startNewRelayNode := func(name string, phonebook []string) (*HybridP2PNetwork, []string) { + relayCfg := cfg + relayCfg.ForceRelayMessages = true + // no phonebook addresses since we start and and stop it to collect the ws address + net, err := NewHybridP2PNetwork(log.With("node", name), relayCfg, "", nil, genesisInfo, &nopeNodeInfo{}, nil) + require.NoError(t, err) + + err = net.Start() + require.NoError(t, err) + + // collect ws address + addr, portListen := net.wsNetwork.Address() + require.True(t, portListen) + require.NotZero(t, addr) + parsed, err := url.Parse(addr) + require.NoError(t, err) + addr = parsed.Host + net.Stop() + + // make it net address and restart the node + relayCfg.NetAddress = addr + relayCfg.PublicAddress = addr + relayCfg.P2PHybridNetAddress = "127.0.0.1:0" + net, err = NewHybridP2PNetwork(log.With("node", name), relayCfg, "", phonebook, genesisInfo, &nopeNodeInfo{}, nil) + require.NoError(t, err) + + err = net.Start() + require.NoError(t, err) + + // collect relay address and prepare nodes phonebook + peerInfo := net.p2pNetwork.service.AddrInfo() + addrsP2P, err := peer.AddrInfoToP2pAddrs(&peerInfo) + require.NoError(t, err) + require.NotZero(t, addrsP2P[0]) + multiAddrStr := addrsP2P[0].String() + + fullAddr, portListen := net.wsNetwork.Address() + require.True(t, portListen) + require.NotZero(t, addr) + require.Contains(t, fullAddr, addr) + + return net, []string{multiAddrStr, addr} + } + + netA, netAddrs := startNewRelayNode("netA", nil) + defer netA.Stop() + + phoneBookAddresses := append([]string{}, netAddrs...) + + netB, netAddrs := startNewRelayNode("netB", phoneBookAddresses) + defer netB.Stop() + + phoneBookAddresses = append(phoneBookAddresses, netAddrs...) + + netC, _ := startNewRelayNode("netC", phoneBookAddresses) + defer netC.Stop() + + // ensure initial connections are done + require.Eventually(t, func() bool { + return len(netA.GetPeers(PeersConnectedIn, PeersConnectedOut)) == 2 && + len(netB.GetPeers(PeersConnectedIn, PeersConnectedOut)) == 2 + }, 3*time.Second, 100*time.Millisecond) + + // make sure all are connected via ws net + wsPeersA := netA.wsNetwork.GetPeers(PeersConnectedIn, PeersConnectedOut) + wsPeersB := netB.wsNetwork.GetPeers(PeersConnectedIn, PeersConnectedOut) + require.Len(t, wsPeersA, 2) + require.Len(t, wsPeersB, 2) +} diff --git a/network/limitlistener/rejectingLimitListener_test.go b/network/limitlistener/rejectingLimitListener_test.go index a3b955fc5e..dbfbbacef2 100644 --- a/network/limitlistener/rejectingLimitListener_test.go +++ b/network/limitlistener/rejectingLimitListener_test.go @@ -24,10 +24,8 @@ func TestRejectingLimitListenerBasic(t *testing.T) { partitiontest.PartitionTest(t) const limit = 5 - attempts := (maxOpenFiles() - limit) / 2 - if attempts > 256 { // maximum length of accept queue is 128 by default - attempts = 256 - } + // maximum length of accept queue is 128 by default + attempts := min((maxOpenFiles()-limit)/2, 256) l, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { diff --git a/network/mesh.go b/network/mesh.go new file mode 100644 index 0000000000..7b020c610a --- /dev/null +++ b/network/mesh.go @@ -0,0 +1,299 @@ +// Copyright (C) 2019-2025 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package network + +import ( + "context" + "errors" + "math/rand" + "sync" + "time" + + "github.com/algorand/go-algorand/network/p2p" + "github.com/libp2p/go-libp2p/p2p/discovery/backoff" +) + +const meshThreadInterval = time.Minute + +type mesher interface { + start() + stop() +} + +type baseMesher struct { + wg sync.WaitGroup + meshConfig +} + +type meshConfig struct { + ctx context.Context + meshUpdateRequests chan meshRequest + meshThreadInterval time.Duration + backoff backoff.BackoffStrategy + netMeshFn func() bool + peerStatReporter func() + closer func() + + // wsnet and p2pnet are used in hybrid relay mode + wsnet *WebsocketNetwork + p2pnet *P2PNetwork +} + +type meshOption func(*meshConfig) + +func withMeshExpJitterBackoff() meshOption { + return func(cfg *meshConfig) { + // Add exponential backoff with jitter to the mesh thread to handle new networks startup + // when no DNS or DHT peers are available. + // The parameters produce approximate the following delays (although they are random but the sequence give the idea): + // 2 2.4 4.6 9 20 19.5 28 24 14 14 35 60 60 + ebf := backoff.NewExponentialDecorrelatedJitter(2*time.Second, meshThreadInterval, 3.0, rand.NewSource(rand.Int63())) + eb := ebf() + cfg.backoff = eb + } +} +func withMeshNetMeshFn(netMeshFn func() bool) meshOption { + return func(cfg *meshConfig) { + cfg.netMeshFn = netMeshFn + } +} +func withMeshPeerStatReporter(peerStatReporter func()) meshOption { + return func(cfg *meshConfig) { + cfg.peerStatReporter = peerStatReporter + } +} +func withMeshCloser(closer func()) meshOption { + return func(cfg *meshConfig) { + cfg.closer = closer + } +} + +func withMeshUpdateRequest(ch chan meshRequest) meshOption { + return func(cfg *meshConfig) { + cfg.meshUpdateRequests = ch + } +} + +func withMeshUpdateInterval(d time.Duration) meshOption { + return func(cfg *meshConfig) { + cfg.meshThreadInterval = d + } +} + +func withContext(ctx context.Context) meshOption { + return func(cfg *meshConfig) { + cfg.ctx = ctx + } +} + +func withWebsocketNetwork(wsnet *WebsocketNetwork) meshOption { + return func(cfg *meshConfig) { + cfg.wsnet = wsnet + } +} + +func withP2PNetwork(p2pnet *P2PNetwork) meshOption { + return func(cfg *meshConfig) { + cfg.p2pnet = p2pnet + } +} + +func newBaseMesher(opts ...meshOption) (*baseMesher, error) { + var cfg meshConfig + for _, opt := range opts { + opt(&cfg) + } + if cfg.ctx == nil { + return nil, errors.New("context is not set") + } + if cfg.netMeshFn == nil { + return nil, errors.New("mesh function is not set") + } + if cfg.meshUpdateRequests == nil { + return nil, errors.New("mesh update requests channel is not set") + } + if cfg.meshThreadInterval == 0 { + cfg.meshThreadInterval = meshThreadInterval + } + + return &baseMesher{ + meshConfig: cfg, + }, nil +} + +func (m *baseMesher) meshThread() { + defer m.wg.Done() + + timer := time.NewTicker(m.meshThreadInterval) + defer timer.Stop() + for { + var request meshRequest + select { + case <-timer.C: + request.done = nil + case request = <-m.meshUpdateRequests: + case <-m.ctx.Done(): + return + } + + hasPeers := m.netMeshFn() + if m.backoff != nil { + if hasPeers { + // found something, reset timer to the configured value + timer.Reset(m.meshThreadInterval) + m.backoff.Reset() + } else { + // no peers found, backoff + timer.Reset(m.backoff.Delay()) + } + } + if request.done != nil { + close(request.done) + } + + // send the currently connected peers information to the + // telemetry server; that would allow the telemetry server + // to construct a cross-node map of all the nodes interconnections. + m.peerStatReporter() + } +} + +func (m *baseMesher) start() { + m.wg.Add(1) + go m.meshThread() +} + +func (m *baseMesher) stop() { + m.wg.Wait() + if m.closer != nil { + m.closer() + } +} + +type networkConfig struct { + pubsubOpts []p2p.PubSubOption // at the moment only pubsub configuration options only +} + +// MeshCreator is an interface for creating mesh strategies. +type MeshCreator interface { + create(opts ...meshOption) (mesher, error) + makeConfig(wsnet *WebsocketNetwork, p2pnet *P2PNetwork) networkConfig +} + +// baseMeshCreator is a creator for the base mesh strategy used in our standard WS or P2P implementations: +// run a mesh thread that periodically checks for new peers. +type baseMeshCreator struct{} + +func (c baseMeshCreator) create(opts ...meshOption) (mesher, error) { + return newBaseMesher(opts...) +} + +func (c baseMeshCreator) makeConfig(wsnet *WebsocketNetwork, p2pnet *P2PNetwork) networkConfig { + return networkConfig{} +} + +// hybridRelayMeshCreator is a creator for the hybrid relay mesh strategy used in hybrid relays: +// always use wsnet nodes +type hybridRelayMeshCreator struct{} + +func (c hybridRelayMeshCreator) create(opts ...meshOption) (mesher, error) { + var cfg meshConfig + for _, opt := range opts { + opt(&cfg) + } + + if cfg.wsnet == nil || cfg.p2pnet == nil { + return nil, errors.New("both websocket and p2p networks must be provided") + } + + out := make(chan meshRequest, 5) + var wg sync.WaitGroup + + ctx := cfg.wsnet.ctx + mesh, err := newBaseMesher( + withContext(ctx), + withMeshNetMeshFn(cfg.wsnet.meshThreadInner), + withMeshPeerStatReporter(func() { + cfg.p2pnet.peerStater.sendPeerConnectionsTelemetryStatus(cfg.wsnet) + cfg.p2pnet.peerStater.sendPeerConnectionsTelemetryStatus(cfg.p2pnet) + }), + withMeshCloser(func() { + wg.Wait() + close(out) + }), + withMeshUpdateRequest(out), + withMeshUpdateInterval(meshThreadInterval), + ) + if err != nil { + return nil, err + } + + wg.Add(2) + go func() { + defer wg.Done() + select { + case <-ctx.Done(): + return + case req := <-cfg.wsnet.meshUpdateRequests: + out <- req + } + }() + + go func() { + defer wg.Done() + select { + case <-ctx.Done(): + return + case req := <-cfg.p2pnet.meshUpdateRequests: + out <- req + } + }() + + return mesh, nil +} + +func (c hybridRelayMeshCreator) makeConfig(wsnet *WebsocketNetwork, p2pnet *P2PNetwork) networkConfig { + return networkConfig{} +} + +type noopMeshCreator struct{} + +func (c noopMeshCreator) create(opts ...meshOption) (mesher, error) { + return &noopMesh{}, nil +} +func (c noopMeshCreator) makeConfig(wsnet *WebsocketNetwork, p2pnet *P2PNetwork) networkConfig { + return networkConfig{} +} + +type noopMesh struct{} + +func (m *noopMesh) start() {} +func (m *noopMesh) stop() {} + +type noopMeshPubSubFilteredCreator struct{} + +func (c noopMeshPubSubFilteredCreator) create(opts ...meshOption) (mesher, error) { + return &noopMesh{}, nil +} +func (c noopMeshPubSubFilteredCreator) makeConfig(wsnet *WebsocketNetwork, p2pnet *P2PNetwork) networkConfig { + return networkConfig{ + pubsubOpts: []p2p.PubSubOption{ + p2p.DisablePubSubPeerExchange(), + p2p.SetPubSubPeerFilter(p2pnet.p2pRelayPeerFilter, p2pnet.pstore), + }, + } +} diff --git a/network/metrics.go b/network/metrics.go index 1a0857a349..aa3fc23a9f 100644 --- a/network/metrics.go +++ b/network/metrics.go @@ -34,6 +34,7 @@ func init() { } networkSentBytesByTag = metrics.NewTagCounterFiltered("algod_network_sent_bytes_{TAG}", "Number of bytes that were sent over the network for {TAG} messages", tagStringList, "UNK") networkReceivedBytesByTag = metrics.NewTagCounterFiltered("algod_network_received_bytes_{TAG}", "Number of bytes that were received from the network for {TAG} messages", tagStringList, "UNK") + networkReceivedUncompressedBytesByTag = metrics.NewTagCounterFiltered("algod_network_received_uncompressed_bytes_{TAG}", "Number of bytes after decompression that were received from the network for {TAG} messages", tagStringList, "UNK") networkMessageReceivedByTag = metrics.NewTagCounterFiltered("algod_network_message_received_{TAG}", "Number of complete messages that were received from the network for {TAG} messages", tagStringList, "UNK") networkMessageSentByTag = metrics.NewTagCounterFiltered("algod_network_message_sent_{TAG}", "Number of complete messages that were sent to the network for {TAG} messages", tagStringList, "UNK") networkHandleCountByTag = metrics.NewTagCounterFiltered("algod_network_rx_handle_countbytag_{TAG}", "count of handler calls in the receive thread for {TAG} messages", tagStringList, "UNK") @@ -41,6 +42,7 @@ func init() { networkP2PSentBytesByTag = metrics.NewTagCounterFiltered("algod_network_p2p_sent_bytes_{TAG}", "Number of bytes that were sent over the network for {TAG} messages", tagStringList, "UNK") networkP2PReceivedBytesByTag = metrics.NewTagCounterFiltered("algod_network_p2p_received_bytes_{TAG}", "Number of bytes that were received from the network for {TAG} messages", tagStringList, "UNK") + networkP2PReceivedUncompressedBytesByTag = metrics.NewTagCounterFiltered("algod_network_p2p_received_uncompressed_bytes_{TAG}", "Number of bytes after decompression that were received from the network for {TAG} messages", tagStringList, "UNK") networkP2PMessageReceivedByTag = metrics.NewTagCounterFiltered("algod_network_p2p_message_received_{TAG}", "Number of complete messages that were received from the network for {TAG} messages", tagStringList, "UNK") networkP2PMessageSentByTag = metrics.NewTagCounterFiltered("algod_network_p2p_message_sent_{TAG}", "Number of complete messages that were sent to the network for {TAG} messages", tagStringList, "UNK") } @@ -53,6 +55,8 @@ var networkReceivedBytesTotal = metrics.MakeCounter(metrics.NetworkReceivedBytes var networkP2PReceivedBytesTotal = metrics.MakeCounter(metrics.NetworkP2PReceivedBytesTotal) var networkReceivedBytesByTag *metrics.TagCounter var networkP2PReceivedBytesByTag *metrics.TagCounter +var networkReceivedUncompressedBytesByTag *metrics.TagCounter +var networkP2PReceivedUncompressedBytesByTag *metrics.TagCounter var networkMessageReceivedTotal = metrics.MakeCounter(metrics.NetworkMessageReceivedTotal) var networkP2PMessageReceivedTotal = metrics.MakeCounter(metrics.NetworkP2PMessageReceivedTotal) diff --git a/network/msgp_gen.go b/network/msgp_gen.go index 125fa61660..5451e5e53a 100644 --- a/network/msgp_gen.go +++ b/network/msgp_gen.go @@ -1261,22 +1261,22 @@ func (z peerMetaHeaders) MarshalMsg(b []byte) (o []byte) { } else { o = msgp.AppendMapHeader(o, uint32(len(z))) } - za0005_keys := make([]string, 0, len(z)) - for za0005 := range z { - za0005_keys = append(za0005_keys, za0005) + za0006_keys := make([]string, 0, len(z)) + for za0006 := range z { + za0006_keys = append(za0006_keys, za0006) } - sort.Sort(SortString(za0005_keys)) - for _, za0005 := range za0005_keys { - za0006 := z[za0005] - _ = za0006 - o = msgp.AppendString(o, za0005) - if za0006 == nil { + sort.Sort(SortString(za0006_keys)) + for _, za0006 := range za0006_keys { + za0007 := z[za0006] + _ = za0007 + o = msgp.AppendString(o, za0006) + if za0007 == nil { o = msgp.AppendNil(o) } else { - o = msgp.AppendArrayHeader(o, uint32(len(za0006))) + o = msgp.AppendArrayHeader(o, uint32(len(za0007))) } - for za0007 := range za0006 { - o = msgp.AppendString(o, za0006[za0007]) + for za0008 := range za0007 { + o = msgp.AppendString(o, za0007[za0008]) } } return @@ -1367,12 +1367,12 @@ func (_ *peerMetaHeaders) CanUnmarshalMsg(z interface{}) bool { func (z peerMetaHeaders) Msgsize() (s int) { s = msgp.MapHeaderSize if z != nil { - for za0005, za0006 := range z { - _ = za0005 + for za0006, za0007 := range z { _ = za0006 - s += 0 + msgp.StringPrefixSize + len(za0005) + msgp.ArrayHeaderSize - for za0007 := range za0006 { - s += msgp.StringPrefixSize + len(za0006[za0007]) + _ = za0007 + s += 0 + msgp.StringPrefixSize + len(za0006) + msgp.ArrayHeaderSize + for za0008 := range za0007 { + s += msgp.StringPrefixSize + len(za0007[za0008]) } } } @@ -1389,12 +1389,12 @@ func PeerMetaHeadersMaxSize() (s int) { s += msgp.MapHeaderSize // Adding size of map keys for z s += maxHeaderKeys - panic("Unable to determine max size: String type za0005 is unbounded") + panic("Unable to determine max size: String type za0006 is unbounded") // Adding size of map values for z s += maxHeaderKeys - // Calculating size of slice: za0006 + // Calculating size of slice: za0007 s += msgp.ArrayHeaderSize - panic("Unable to determine max size: String type is unbounded for za0006[za0007]") + panic("Unable to determine max size: String type is unbounded for za0007[za0008]") return } diff --git a/network/p2p/capabilities.go b/network/p2p/capabilities.go index 71a63d4c0c..fafa4ee6b1 100644 --- a/network/p2p/capabilities.go +++ b/network/p2p/capabilities.go @@ -18,15 +18,18 @@ package p2p import ( "context" + randv1 "math/rand" "math/rand/v2" "sync" "time" dht "github.com/libp2p/go-libp2p-kad-dht" + kbucket "github.com/libp2p/go-libp2p-kbucket" "github.com/libp2p/go-libp2p/core/discovery" "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/peer" libpeerstore "github.com/libp2p/go-libp2p/core/peerstore" + "github.com/libp2p/go-libp2p/p2p/discovery/backoff" "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/logging" @@ -109,6 +112,8 @@ func (c *CapabilitiesDiscovery) PeersForCapability(capability Capability, n int) return peers, nil } +const capAdvertisementInitialDelay = time.Second / 10000 + // AdvertiseCapabilities periodically runs the Advertiser interface on the DHT // If a capability fails to advertise we will retry every 100 seconds until full success // This gets rerun every at the minimum ttl or the maxAdvertisementInterval. @@ -116,10 +121,13 @@ func (c *CapabilitiesDiscovery) AdvertiseCapabilities(capabilities ...Capability c.wg.Add(1) go func() { // Run the initial Advertisement immediately - nextExecution := time.After(time.Second / 10000) + nextExecution := time.After(capAdvertisementInitialDelay) defer func() { c.wg.Done() }() + // Create a exp jitter backoff strategy to use for retrying failed advertisements + ebf := backoff.NewExponentialDecorrelatedJitter(1*time.Second, 100*time.Second, 3.0, randv1.NewSource(randv1.Int63())) + eb := ebf() for { // shuffle capabilities to advertise in random order @@ -141,7 +149,12 @@ func (c *CapabilitiesDiscovery) AdvertiseCapabilities(capabilities ...Capability ttl, err0 := c.advertise(c.dht.Context(), string(capa)) if err0 != nil { err = err0 - c.log.Warnf("failed to advertise for capability %s: %v", capa, err0) + loggerFn := c.log.Errorf + if err0 == kbucket.ErrLookupFailure { + // No peers in a routing table, it is typical for startup and not an error + loggerFn = c.log.Debugf + } + loggerFn("failed to advertise for capability %s: %v", capa, err0) break } if ttl < advertisementInterval { @@ -149,12 +162,13 @@ func (c *CapabilitiesDiscovery) AdvertiseCapabilities(capabilities ...Capability } c.log.Infof("advertised capability %s", capa) } - // If we failed to advertise, retry every 100 seconds until successful + // If we failed to advertise, retry every according to exp jitter delays until successful if err != nil { - nextExecution = time.After(time.Second * 100) + nextExecution = time.After(eb.Delay()) } else { // Otherwise, ensure we're at the correct interval nextExecution = time.After(advertisementInterval) + eb.Reset() } } } diff --git a/network/p2p/capabilities_test.go b/network/p2p/capabilities_test.go index 508e5d37d6..c084ac1a24 100644 --- a/network/p2p/capabilities_test.go +++ b/network/p2p/capabilities_test.go @@ -17,6 +17,7 @@ package p2p import ( + "bytes" "context" "math/rand" "sync" @@ -315,3 +316,53 @@ func TestCapabilities_ExcludesSelf(t *testing.T) { require.NoError(t, err) disc[0].wg.Wait() } + +// TestCapabilities_NoPeers makes sure no errors logged when no peers in routing table on advertise +func TestCapabilities_NoPeers(t *testing.T) { + partitiontest.PartitionTest(t) + + // create a single host/DHT => no peers in routing table + cfg := config.GetDefaultLocal() + tmpdir := t.TempDir() + pk, err := GetPrivKey(cfg, tmpdir) + require.NoError(t, err) + ps, err := peerstore.NewPeerStore(nil, "") + require.NoError(t, err) + h, err := libp2p.New( + libp2p.ListenAddrStrings("/dns4/localhost/tcp/0"), + libp2p.Identity(pk), + libp2p.Peerstore(ps)) + require.NoError(t, err) + defer h.Close() + + ht, err := algodht.MakeDHT(context.Background(), h, "devtestnet", cfg, func() []peer.AddrInfo { return nil }) + require.NoError(t, err) + err = ht.Bootstrap(context.Background()) + require.NoError(t, err) + defer ht.Close() + + disc, err := algodht.MakeDiscovery(ht) + require.NoError(t, err) + + var buf bytes.Buffer + log := logging.NewLogger() + log.SetLevel(logging.Info) + log.SetOutput(&buf) + + cd := &CapabilitiesDiscovery{ + disc: disc, + dht: ht, + log: log, + } + defer cd.Close() + + cd.AdvertiseCapabilities(Archival) + + // sleep 3x capAdvertisementInitialDelay to allow for the log messages to be generated + time.Sleep(3 * capAdvertisementInitialDelay) + + logData := buf.String() + require.NotContains(t, logData, "advertised capability") + require.NotContains(t, logData, "failed to advertise for capability") + require.NotContains(t, logData, "failed to find any peer in table") +} diff --git a/network/p2p/dnsaddr/resolve_test.go b/network/p2p/dnsaddr/resolve_test.go index 1582515f84..0e259240ae 100644 --- a/network/p2p/dnsaddr/resolve_test.go +++ b/network/p2p/dnsaddr/resolve_test.go @@ -85,7 +85,7 @@ func (f *failureResolver) LookupTXT(context.Context, string) ([]string, error) { return nil, fmt.Errorf("always errors") } -func TestMultiaddrsFromResolverDnsFailure(t *testing.T) { +func TestMultiaddrsFromResolverDnsErr(t *testing.T) { partitiontest.PartitionTest(t) t.Parallel() diff --git a/network/p2p/p2p.go b/network/p2p/p2p.go index 23cb74c9fd..2c297f8479 100644 --- a/network/p2p/p2p.go +++ b/network/p2p/p2p.go @@ -83,7 +83,6 @@ type serviceImpl struct { host host.Host streams *streamManager pubsub *pubsub.PubSub - pubsubCtx context.Context privKey crypto.PrivKey topics map[string]*pubsub.Topic @@ -191,8 +190,35 @@ type StreamHandlerPair struct { // StreamHandlers is an ordered list of StreamHandlerPair type StreamHandlers []StreamHandlerPair +// PubSubOption is a function that modifies the pubsub options +type PubSubOption func(opts *[]pubsub.Option) + +// DisablePubSubPeerExchange disables PX (peer exchange) in pubsub +func DisablePubSubPeerExchange() PubSubOption { + return func(opts *[]pubsub.Option) { + *opts = append(*opts, pubsub.WithPeerExchange(false)) + } +} + +// SetPubSubMetricsTracer sets a pubsub.RawTracer for metrics collection +func SetPubSubMetricsTracer(metricsTracer pubsub.RawTracer) PubSubOption { + return func(opts *[]pubsub.Option) { + *opts = append(*opts, pubsub.WithRawTracer(metricsTracer)) + } +} + +// SetPubSubPeerFilter sets a pubsub.PeerFilter for peers filtering out +func SetPubSubPeerFilter(filter func(checker pstore.RoleChecker, pid peer.ID) bool, checker pstore.RoleChecker) PubSubOption { + return func(opts *[]pubsub.Option) { + f := func(pid peer.ID, topic string) bool { + return filter(checker, pid) + } + *opts = append(*opts, pubsub.WithPeerFilter(f)) + } +} + // MakeService creates a P2P service instance -func MakeService(ctx context.Context, log logging.Logger, cfg config.Local, h host.Host, listenAddr string, wsStreamHandlers StreamHandlers, metricsTracer pubsub.RawTracer) (*serviceImpl, error) { +func MakeService(ctx context.Context, log logging.Logger, cfg config.Local, h host.Host, listenAddr string, wsStreamHandlers StreamHandlers, pubsubOptions ...PubSubOption) (*serviceImpl, error) { sm := makeStreamManager(ctx, log, h, wsStreamHandlers, cfg.EnableGossipService) h.Network().Notify(sm) @@ -201,7 +227,12 @@ func MakeService(ctx context.Context, log logging.Logger, cfg config.Local, h ho h.SetStreamHandler(pair.ProtoID, sm.streamHandler) } - ps, err := makePubSub(ctx, cfg, h, metricsTracer) + pubsubOpts := []pubsub.Option{} + for _, opt := range pubsubOptions { + opt(&pubsubOpts) + } + + ps, err := makePubSub(ctx, cfg, h, pubsubOpts...) if err != nil { return nil, err } @@ -211,7 +242,6 @@ func MakeService(ctx context.Context, log logging.Logger, cfg config.Local, h ho host: h, streams: sm, pubsub: ps, - pubsubCtx: ctx, privKey: h.Peerstore().PrivKey(h.ID()), topics: make(map[string]*pubsub.Topic), }, nil diff --git a/network/p2p/p2p_test.go b/network/p2p/p2p_test.go index 9e255f7f33..86e8e299de 100644 --- a/network/p2p/p2p_test.go +++ b/network/p2p/p2p_test.go @@ -21,12 +21,16 @@ import ( "net" "testing" + pubsub "github.com/libp2p/go-libp2p-pubsub" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/protocol" "github.com/multiformats/go-multiaddr" manet "github.com/multiformats/go-multiaddr/net" "github.com/stretchr/testify/require" "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/network/p2p/peerstore" + "github.com/algorand/go-algorand/network/phonebook" "github.com/algorand/go-algorand/test/partitiontest" ) @@ -241,3 +245,50 @@ func TestP2PMakeHostAddressFilter(t *testing.T) { host.Close() } } + +func TestP2PPubSubOptions(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + var opts []pubsub.Option + option := DisablePubSubPeerExchange() + option(&opts) + require.Len(t, opts, 1) + + tracer := &mockRawTracer{} + option = SetPubSubMetricsTracer(tracer) + option(&opts) + require.Len(t, opts, 2) + + filterFunc := func(roleChecker peerstore.RoleChecker, pid peer.ID) bool { + return roleChecker.HasRole(pid, phonebook.RelayRole) + } + checker := &mockRoleChecker{} + option = SetPubSubPeerFilter(filterFunc, checker) + option(&opts) + require.Len(t, opts, 3) +} + +type mockRawTracer struct{} + +func (m *mockRawTracer) AddPeer(p peer.ID, proto protocol.ID) {} +func (m *mockRawTracer) RemovePeer(p peer.ID) {} +func (m *mockRawTracer) Join(topic string) {} +func (m *mockRawTracer) Leave(topic string) {} +func (m *mockRawTracer) Graft(p peer.ID, topic string) {} +func (m *mockRawTracer) Prune(p peer.ID, topic string) {} +func (m *mockRawTracer) ValidateMessage(msg *pubsub.Message) {} +func (m *mockRawTracer) DeliverMessage(msg *pubsub.Message) {} +func (m *mockRawTracer) RejectMessage(msg *pubsub.Message, reason string) {} +func (m *mockRawTracer) DuplicateMessage(msg *pubsub.Message) {} +func (m *mockRawTracer) ThrottlePeer(p peer.ID) {} +func (m *mockRawTracer) RecvRPC(rpc *pubsub.RPC) {} +func (m *mockRawTracer) SendRPC(rpc *pubsub.RPC, p peer.ID) {} +func (m *mockRawTracer) DropRPC(rpc *pubsub.RPC, p peer.ID) {} +func (m *mockRawTracer) UndeliverableMessage(msg *pubsub.Message) {} + +type mockRoleChecker struct{} + +func (m *mockRoleChecker) HasRole(pid peer.ID, role phonebook.Role) bool { + return true +} diff --git a/network/p2p/peerstore/peerstore.go b/network/p2p/peerstore/peerstore.go index 9bb5a46e88..5d0b2b24ca 100644 --- a/network/p2p/peerstore/peerstore.go +++ b/network/p2p/peerstore/peerstore.go @@ -289,6 +289,16 @@ func (ps *PeerStore) AddPersistentPeers(addrInfo []*peer.AddrInfo, networkName s } } +// HasRole checks if the peer has the given role. +func (ps *PeerStore) HasRole(peerID peer.ID, role phonebook.Role) bool { + data, err := ps.Get(peerID, addressDataKey) + if err != nil || data == nil { + return false + } + ad := data.(addressData) + return ad.roles.Has(role) +} + // Length returns the number of addrs in peerstore func (ps *PeerStore) Length() int { return len(ps.Peers()) @@ -381,3 +391,8 @@ func shuffleSelect(set []*peer.AddrInfo, n int) []*peer.AddrInfo { func shuffleAddrInfos(set []*peer.AddrInfo) { rand.Shuffle(len(set), func(i, j int) { set[i], set[j] = set[j], set[i] }) } + +// RoleChecker is an interface that checks if a peer has a specific role. +type RoleChecker interface { + HasRole(peerID peer.ID, role phonebook.Role) bool +} diff --git a/network/p2p/pubsub.go b/network/p2p/pubsub.go index 71eb57160f..568dc9b031 100644 --- a/network/p2p/pubsub.go +++ b/network/p2p/pubsub.go @@ -58,7 +58,7 @@ const TXTopicName = "algotx01" const incomingThreads = 20 // matches to number wsNetwork workers -func makePubSub(ctx context.Context, cfg config.Local, host host.Host, metricsTracer pubsub.RawTracer) (*pubsub.PubSub, error) { +func makePubSub(ctx context.Context, cfg config.Local, host host.Host, opts ...pubsub.Option) (*pubsub.PubSub, error) { //defaultParams := pubsub.DefaultGossipSubParams() options := []pubsub.Option{ @@ -103,10 +103,7 @@ func makePubSub(ctx context.Context, cfg config.Local, host host.Host, metricsTr pubsub.WithValidateWorkers(incomingThreads), } - if metricsTracer != nil { - options = append(options, pubsub.WithRawTracer(metricsTracer)) - } - + options = append(options, opts...) return pubsub.NewGossipSub(ctx, host, options...) } diff --git a/network/p2pNetwork.go b/network/p2pNetwork.go index 051cf2cbae..09db008e65 100644 --- a/network/p2pNetwork.go +++ b/network/p2pNetwork.go @@ -18,7 +18,7 @@ package network import ( "context" - "math/rand" + "fmt" "net/http" "strings" "sync" @@ -40,7 +40,6 @@ import ( pubsub "github.com/libp2p/go-libp2p-pubsub" "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/peer" - "github.com/libp2p/go-libp2p/p2p/discovery/backoff" "github.com/multiformats/go-multiaddr" manet "github.com/multiformats/go-multiaddr/net" ) @@ -56,8 +55,7 @@ type P2PNetwork struct { service p2p.Service log logging.Logger config config.Local - genesisID string - networkID protocol.NetworkID + genesisInfo GenesisInfo ctx context.Context ctxCancel context.CancelFunc peerStats map[peer.ID]*p2pPeerStats @@ -78,6 +76,10 @@ type P2PNetwork struct { wsPeersConnectivityCheckTicker *time.Ticker peerStater peerConnectionStater + meshUpdateRequests chan meshRequest + mesher mesher + meshCreator MeshCreator // save parameter to use in setup() + relayMessages bool // True if we should relay messages from other nodes (nominally true for relays, false otherwise) wantTXGossip atomic.Bool @@ -213,7 +215,7 @@ var gossipSubTags = map[protocol.Tag]string{ } // NewP2PNetwork returns an instance of GossipNode that uses the p2p.Service -func NewP2PNetwork(log logging.Logger, cfg config.Local, datadir string, phonebookAddresses []string, genesisID string, networkID protocol.NetworkID, node NodeInfo, identityOpts *identityOpts) (*P2PNetwork, error) { +func NewP2PNetwork(log logging.Logger, cfg config.Local, datadir string, phonebookAddresses []string, genesisInfo GenesisInfo, node NodeInfo, identityOpts *identityOpts, meshCreator MeshCreator) (*P2PNetwork, error) { const readBufferLen = 2048 // create Peerstore and add phonebook addresses @@ -221,7 +223,7 @@ func NewP2PNetwork(log logging.Logger, cfg config.Local, datadir string, phonebo for malAddr, malErr := range malformedAddrs { log.Infof("Ignoring malformed phonebook address %s: %s", malAddr, malErr) } - pstore, err := peerstore.NewPeerStore(addrInfo, string(networkID)) + pstore, err := peerstore.NewPeerStore(addrInfo, string(genesisInfo.NetworkID)) if err != nil { return nil, err } @@ -230,8 +232,7 @@ func NewP2PNetwork(log logging.Logger, cfg config.Local, datadir string, phonebo net := &P2PNetwork{ log: log, config: cfg, - genesisID: genesisID, - networkID: networkID, + genesisInfo: genesisInfo, topicTags: gossipSubTags, wsPeers: make(map[peer.ID]*wsPeer), wsPeersToIDs: make(map[*wsPeer]peer.ID), @@ -239,6 +240,7 @@ func NewP2PNetwork(log logging.Logger, cfg config.Local, datadir string, phonebo nodeInfo: node, pstore: pstore, relayMessages: relayMessages, + meshCreator: meshCreator, peerStater: peerConnectionStater{ log: log, peerConnectionsUpdateInterval: time.Duration(cfg.PeerConnectionsUpdateInterval) * time.Second, @@ -290,6 +292,13 @@ func NewP2PNetwork(log logging.Logger, cfg config.Local, datadir string, phonebo } log.Infof("P2P host created: peer ID %s addrs %s", h.ID(), h.Addrs()) + var extraOpts networkConfig + if meshCreator != nil { + extraOpts = meshCreator.makeConfig(nil, net) + } + + opts := append([]p2p.PubSubOption{p2p.SetPubSubMetricsTracer(pubsubMetricsTracer{})}, extraOpts.pubsubOpts...) + // TODO: remove after consensus v41 takes effect. // ordered list of supported protocol versions hm := p2p.StreamHandlers{} @@ -304,7 +313,7 @@ func NewP2PNetwork(log logging.Logger, cfg config.Local, datadir string, phonebo Handler: net.wsStreamHandlerV1, }) // END TODO - net.service, err = p2p.MakeService(net.ctx, log, cfg, h, la, hm, pubsubMetricsTracer{}) + net.service, err = p2p.MakeService(net.ctx, log, cfg, h, la, hm, opts...) if err != nil { return nil, err } @@ -317,7 +326,7 @@ func NewP2PNetwork(log logging.Logger, cfg config.Local, datadir string, phonebo } bootstrapper := &bootstrapper{ cfg: cfg, - networkID: networkID, + networkID: net.genesisInfo.NetworkID, phonebookPeers: addrInfos, resolveController: dnsaddr.NewMultiaddrDNSResolveController(cfg.DNSSecurityTXTEnforced(), ""), log: net.log, @@ -326,7 +335,7 @@ func NewP2PNetwork(log logging.Logger, cfg config.Local, datadir string, phonebo net.bootstrapperStop = bootstrapper.stop if cfg.EnableDHTProviders { - disc, err0 := p2p.MakeCapabilitiesDiscovery(net.ctx, cfg, h, networkID, net.log, bootstrapper.BootstrapFunc) + disc, err0 := p2p.MakeCapabilitiesDiscovery(net.ctx, cfg, h, net.genesisInfo.NetworkID, net.log, bootstrapper.BootstrapFunc) if err0 != nil { log.Errorf("Failed to create dht node capabilities discovery: %v", err) return nil, err @@ -336,8 +345,7 @@ func NewP2PNetwork(log logging.Logger, cfg config.Local, datadir string, phonebo net.httpServer = p2p.MakeHTTPServer(h) - err = net.setup() - if err != nil { + if err = net.setup(); err != nil { return nil, err } @@ -348,9 +356,33 @@ func (n *P2PNetwork) setup() error { if n.broadcaster.slowWritingPeerMonitorInterval == 0 { n.broadcaster.slowWritingPeerMonitorInterval = slowWritingPeerMonitorInterval } + n.meshUpdateRequests = make(chan meshRequest, 5) + meshCreator := n.meshCreator + if meshCreator == nil { + meshCreator = baseMeshCreator{} + } + var err error + n.mesher, err = meshCreator.create( + withContext(n.ctx), + withMeshExpJitterBackoff(), + withMeshNetMeshFn(n.meshThreadInner), + withMeshPeerStatReporter(func() { + n.peerStater.sendPeerConnectionsTelemetryStatus(n) + }), + withMeshUpdateRequest(n.meshUpdateRequests), + withMeshUpdateInterval(meshThreadInterval), + ) + if err != nil { + return fmt.Errorf("failed to create mesh: %w", err) + } + return nil } +func (n *P2PNetwork) p2pRelayPeerFilter(checker peerstore.RoleChecker, pid peer.ID) bool { + return !checker.HasRole(pid, phonebook.RelayRole) +} + // PeerID returns this node's peer ID. func (n *P2PNetwork) PeerID() p2p.PeerID { return p2p.PeerID(n.service.ID()) @@ -395,8 +427,8 @@ func (n *P2PNetwork) Start() error { n.wg.Add(1) go n.broadcaster.broadcastThread(&n.wg, n, "network", "P2PNetwork") - n.wg.Add(1) - go n.meshThread() + n.meshUpdateRequests <- meshRequest{} + n.mesher.start() if n.capabilitiesDiscovery != nil { n.capabilitiesDiscovery.AdvertiseCapabilities(n.nodeInfo.Capabilities()...) @@ -433,6 +465,7 @@ func (n *P2PNetwork) Stop() { n.service.Close() n.bootstrapperStop() n.httpServer.Close() + n.mesher.stop() n.wg.Wait() } @@ -457,12 +490,12 @@ func (n *P2PNetwork) innerStop() { } // meshThreadInner fetches nodes from DHT and attempts to connect to them -func (n *P2PNetwork) meshThreadInner() int { +func (n *P2PNetwork) meshThreadInner() bool { defer n.service.DialPeersUntilTargetCount(n.config.GossipFanout) // fetch peers from DNS var dnsPeers, dhtPeers []peer.AddrInfo - dnsPeers = dnsLookupBootstrapPeers(n.log, n.config, n.networkID, dnsaddr.NewMultiaddrDNSResolveController(n.config.DNSSecurityTXTEnforced(), "")) + dnsPeers = dnsLookupBootstrapPeers(n.log, n.config, n.genesisInfo.NetworkID, dnsaddr.NewMultiaddrDNSResolveController(n.config.DNSSecurityTXTEnforced(), "")) // discover peers from DHT if n.capabilitiesDiscovery != nil { @@ -486,7 +519,7 @@ func (n *P2PNetwork) meshThreadInner() int { for i := range dhtArchivalPeers { replace[i] = &dhtArchivalPeers[i] } - n.pstore.ReplacePeerList(replace, string(n.networkID), phonebook.ArchivalRole) + n.pstore.ReplacePeerList(replace, string(n.genesisInfo.NetworkID), phonebook.ArchivalRole) } } @@ -496,45 +529,9 @@ func (n *P2PNetwork) meshThreadInner() int { replace[i] = &peers[i] } if len(peers) > 0 { - n.pstore.ReplacePeerList(replace, string(n.networkID), phonebook.RelayRole) - } - return len(peers) -} - -func (n *P2PNetwork) meshThread() { - defer n.wg.Done() - - timer := time.NewTicker(1) // start immediately and reset after - - // Add exponential backoff with jitter to the mesh thread to handle new networks startup - // when no DNS or DHT peers are available. - // The parameters produce approximate the following delays (although they are random but the sequence give the idea): - // 2 2.4 4.6 9 20 19.5 28 24 14 14 35 60 60 - ebf := backoff.NewExponentialDecorrelatedJitter(2*time.Second, meshThreadInterval, 3.0, rand.NewSource(rand.Int63())) - eb := ebf() - - defer timer.Stop() - for { - select { - case <-timer.C: - numPeers := n.meshThreadInner() - if numPeers > 0 { - // found something, reset timer to the default value - timer.Reset(meshThreadInterval) - eb.Reset() - } else { - // no peers found, backoff - timer.Reset(eb.Delay()) - } - case <-n.ctx.Done(): - return - } - - // send the currently connected peers information to the - // telemetry server; that would allow the telemetry server - // to construct a cross-node map of all the nodes interconnections. - n.peerStater.sendPeerConnectionsTelemetryStatus(n) + n.pstore.ReplacePeerList(replace, string(n.genesisInfo.NetworkID), phonebook.RelayRole) } + return len(peers) > 0 } func (n *P2PNetwork) httpdThread() { @@ -549,7 +546,7 @@ func (n *P2PNetwork) httpdThread() { // GetGenesisID implements GossipNode func (n *P2PNetwork) GetGenesisID() string { - return n.genesisID + return n.genesisInfo.GenesisID } // Address returns a string and whether that is a 'final' address or guessed. @@ -651,7 +648,21 @@ func (n *P2PNetwork) RegisterHTTPHandlerFunc(path string, handler func(http.Resp // `replace` optionally drops existing connections before making new ones. // `quit` chan allows cancellation. func (n *P2PNetwork) RequestConnectOutgoing(replace bool, quit <-chan struct{}) { - n.meshThreadInner() + request := meshRequest{} + if quit != nil { + request.done = make(chan struct{}) + } + select { + case n.meshUpdateRequests <- request: + case <-quit: + return + } + if request.done != nil { + select { + case <-request.done: + case <-quit: + } + } } func addrInfoToWsPeerCore(n *P2PNetwork, addrInfo *peer.AddrInfo) (wsPeerCore, bool) { @@ -791,17 +802,12 @@ func (n *P2PNetwork) OnNetworkAdvance() { // TelemetryGUID returns the telemetry GUID of this node. func (n *P2PNetwork) TelemetryGUID() string { - return n.log.GetTelemetryGUID() + return "" } // InstanceName returns the instance name of this node. func (n *P2PNetwork) InstanceName() string { - return n.log.GetInstanceName() -} - -// GenesisID returns the genesis ID of this node. -func (n *P2PNetwork) GenesisID() string { - return n.genesisID + return "" } // SupportedProtoVersions returns the supported protocol versions of this node. diff --git a/network/p2pNetwork_test.go b/network/p2pNetwork_test.go index d3a3821d9f..dd2bad274b 100644 --- a/network/p2pNetwork_test.go +++ b/network/p2pNetwork_test.go @@ -73,7 +73,8 @@ func TestP2PSubmitTX(t *testing.T) { cfg.ForceFetchTransactions = true cfg.NetAddress = "127.0.0.1:0" log := logging.TestingLog(t) - netA, err := NewP2PNetwork(log, cfg, "", nil, genesisID, config.Devtestnet, &nopeNodeInfo{}, nil) + genesisInfo := GenesisInfo{genesisID, config.Devtestnet} + netA, err := NewP2PNetwork(log, cfg, "", nil, genesisInfo, &nopeNodeInfo{}, nil, nil) require.NoError(t, err) netA.Start() defer netA.Stop() @@ -85,12 +86,12 @@ func TestP2PSubmitTX(t *testing.T) { multiAddrStr := addrsA[0].String() phoneBookAddresses := []string{multiAddrStr} - netB, err := NewP2PNetwork(log, cfg, "", phoneBookAddresses, genesisID, config.Devtestnet, &nopeNodeInfo{}, nil) + netB, err := NewP2PNetwork(log, cfg, "", phoneBookAddresses, genesisInfo, &nopeNodeInfo{}, nil, nil) require.NoError(t, err) netB.Start() defer netB.Stop() - netC, err := NewP2PNetwork(log, cfg, "", phoneBookAddresses, genesisID, config.Devtestnet, &nopeNodeInfo{}, nil) + netC, err := NewP2PNetwork(log, cfg, "", phoneBookAddresses, genesisInfo, &nopeNodeInfo{}, nil, nil) require.NoError(t, err) netC.Start() defer netC.Stop() @@ -162,7 +163,8 @@ func TestP2PSubmitTXNoGossip(t *testing.T) { cfg.ForceFetchTransactions = true cfg.NetAddress = "127.0.0.1:0" log := logging.TestingLog(t) - netA, err := NewP2PNetwork(log, cfg, "", nil, genesisID, config.Devtestnet, &nopeNodeInfo{}, nil) + genesisInfo := GenesisInfo{genesisID, config.Devtestnet} + netA, err := NewP2PNetwork(log, cfg, "", nil, genesisInfo, &nopeNodeInfo{}, nil, nil) require.NoError(t, err) netA.Start() defer netA.Stop() @@ -174,7 +176,7 @@ func TestP2PSubmitTXNoGossip(t *testing.T) { multiAddrStr := addrsA[0].String() phoneBookAddresses := []string{multiAddrStr} - netB, err := NewP2PNetwork(log, cfg, "", phoneBookAddresses, genesisID, config.Devtestnet, &nopeNodeInfo{}, nil) + netB, err := NewP2PNetwork(log, cfg, "", phoneBookAddresses, genesisInfo, &nopeNodeInfo{}, nil, nil) require.NoError(t, err) netB.Start() defer netB.Stop() @@ -193,7 +195,7 @@ func TestP2PSubmitTXNoGossip(t *testing.T) { cfg.ForceFetchTransactions = false // Have to unset NetAddress to get IsGossipServer to return false cfg.NetAddress = "" - netC, err := NewP2PNetwork(log, cfg, "", phoneBookAddresses, genesisID, config.Devtestnet, &nopeNodeInfo{}, nil) + netC, err := NewP2PNetwork(log, cfg, "", phoneBookAddresses, genesisInfo, &nopeNodeInfo{}, nil, nil) require.NoError(t, err) netC.Start() defer netC.Stop() @@ -255,7 +257,8 @@ func TestP2PSubmitWS(t *testing.T) { cfg := config.GetDefaultLocal() cfg.NetAddress = "127.0.0.1:0" log := logging.TestingLog(t) - netA, err := NewP2PNetwork(log, cfg, "", nil, genesisID, config.Devtestnet, &nopeNodeInfo{}, nil) + genesisInfo := GenesisInfo{genesisID, config.Devtestnet} + netA, err := NewP2PNetwork(log, cfg, "", nil, genesisInfo, &nopeNodeInfo{}, nil, nil) require.NoError(t, err) err = netA.Start() @@ -269,13 +272,13 @@ func TestP2PSubmitWS(t *testing.T) { multiAddrStr := addrsA[0].String() phoneBookAddresses := []string{multiAddrStr} - netB, err := NewP2PNetwork(log, cfg, "", phoneBookAddresses, genesisID, config.Devtestnet, &nopeNodeInfo{}, nil) + netB, err := NewP2PNetwork(log, cfg, "", phoneBookAddresses, genesisInfo, &nopeNodeInfo{}, nil, nil) require.NoError(t, err) err = netB.Start() require.NoError(t, err) defer netB.Stop() - netC, err := NewP2PNetwork(log, cfg, "", phoneBookAddresses, genesisID, config.Devtestnet, &nopeNodeInfo{}, nil) + netC, err := NewP2PNetwork(log, cfg, "", phoneBookAddresses, genesisInfo, &nopeNodeInfo{}, nil, nil) require.NoError(t, err) err = netC.Start() require.NoError(t, err) @@ -394,7 +397,7 @@ func TestP2PNetworkAddress(t *testing.T) { cfg := config.GetDefaultLocal() log := logging.TestingLog(t) - netA, err := NewP2PNetwork(log, cfg, "", nil, genesisID, config.Devtestnet, &nopeNodeInfo{}, nil) + netA, err := NewP2PNetwork(log, cfg, "", nil, GenesisInfo{genesisID, config.Devtestnet}, &nopeNodeInfo{}, nil, nil) defer netA.Stop() require.NoError(t, err) addrInfo := netA.service.AddrInfo() @@ -511,7 +514,7 @@ func TestP2PBootstrapFunc(t *testing.T) { require.GreaterOrEqual(t, len(addr.Addrs), 1) } -func TestP2PdnsLookupBootstrapPeersFailure(t *testing.T) { +func TestP2PdnsLookupBootstrapPeersErr(t *testing.T) { t.Parallel() partitiontest.PartitionTest(t) @@ -593,6 +596,7 @@ func TestP2PNetworkDHTCapabilities(t *testing.T) { cfg.NetAddress = "127.0.0.1:0" cfg.EnableDHTProviders = true log := logging.TestingLog(t) + genesisInfo := GenesisInfo{genesisID, config.Devtestnet} cap := p2p.Archival tests := []struct { @@ -606,7 +610,7 @@ func TestP2PNetworkDHTCapabilities(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - netA, err := NewP2PNetwork(log.With("name", "netA"), cfg, "", nil, genesisID, config.Devtestnet, test.nis[0], nil) + netA, err := NewP2PNetwork(log.With("name", "netA"), cfg, "", nil, genesisInfo, test.nis[0], nil, nil) require.NoError(t, err) err = netA.Start() @@ -620,13 +624,13 @@ func TestP2PNetworkDHTCapabilities(t *testing.T) { multiAddrStr := addrsA[0].String() phoneBookAddresses := []string{multiAddrStr} - netB, err := NewP2PNetwork(log.With("name", "netB"), cfg, "", phoneBookAddresses, genesisID, config.Devtestnet, test.nis[1], nil) + netB, err := NewP2PNetwork(log.With("name", "netB"), cfg, "", phoneBookAddresses, genesisInfo, test.nis[1], nil, nil) require.NoError(t, err) err = netB.Start() require.NoError(t, err) defer netB.Stop() - netC, err := NewP2PNetwork(log.With("name", "netC"), cfg, "", phoneBookAddresses, genesisID, config.Devtestnet, test.nis[2], nil) + netC, err := NewP2PNetwork(log.With("name", "netC"), cfg, "", phoneBookAddresses, genesisInfo, test.nis[2], nil, nil) require.NoError(t, err) err = netC.Start() require.NoError(t, err) @@ -755,7 +759,7 @@ func TestP2PHTTPHandler(t *testing.T) { cfg.NetAddress = "127.0.0.1:0" log := logging.TestingLog(t) - netA, err := NewP2PNetwork(log, cfg, "", nil, genesisID, config.Devtestnet, &nopeNodeInfo{}, nil) + netA, err := NewP2PNetwork(log, cfg, "", nil, GenesisInfo{genesisID, config.Devtestnet}, &nopeNodeInfo{}, nil, nil) require.NoError(t, err) h := &p2phttpHandler{t, "hello", nil} @@ -795,7 +799,7 @@ func TestP2PHTTPHandler(t *testing.T) { // check rate limiting client: // zero clients allowed, rate limiting window (10s) is greater than queue deadline (1s) - netB, err := NewP2PNetwork(log, cfg, "", nil, genesisID, config.Devtestnet, &nopeNodeInfo{}, nil) + netB, err := NewP2PNetwork(log, cfg, "", nil, GenesisInfo{genesisID, config.Devtestnet}, &nopeNodeInfo{}, nil, nil) require.NoError(t, err) pstore, err := peerstore.MakePhonebook(0, 10*time.Second) require.NoError(t, err) @@ -817,7 +821,7 @@ func TestP2PHTTPHandlerAllInterfaces(t *testing.T) { cfg.NetAddress = ":0" log := logging.TestingLog(t) - netA, err := NewP2PNetwork(log, cfg, "", nil, genesisID, config.Devtestnet, &nopeNodeInfo{}, nil) + netA, err := NewP2PNetwork(log, cfg, "", nil, GenesisInfo{genesisID, config.Devtestnet}, &nopeNodeInfo{}, nil, nil) require.NoError(t, err) h := &p2phttpHandler{t, "hello", nil} @@ -863,8 +867,9 @@ func TestP2PRelay(t *testing.T) { cfg.BaseLoggerDebugLevel = 5 cfg.NetAddress = "127.0.0.1:0" log := logging.TestingLog(t) + genesisInfo := GenesisInfo{genesisID, config.Devtestnet} log.Debugln("Starting netA") - netA, err := NewP2PNetwork(log.With("net", "netA"), cfg, "", nil, genesisID, config.Devtestnet, &nopeNodeInfo{}, nil) + netA, err := NewP2PNetwork(log.With("net", "netA"), cfg, "", nil, genesisInfo, &nopeNodeInfo{}, nil, nil) require.NoError(t, err) err = netA.Start() @@ -882,7 +887,7 @@ func TestP2PRelay(t *testing.T) { // Explicitly unset NetAddress for netB cfg.NetAddress = "" log.Debugf("Starting netB with phonebook addresses %v", phoneBookAddresses) - netB, err := NewP2PNetwork(log.With("net", "netB"), cfg, "", phoneBookAddresses, genesisID, config.Devtestnet, &nopeNodeInfo{}, nil) + netB, err := NewP2PNetwork(log.With("net", "netB"), cfg, "", phoneBookAddresses, genesisInfo, &nopeNodeInfo{}, nil, nil) require.NoError(t, err) err = netB.Start() require.NoError(t, err) @@ -951,7 +956,7 @@ func TestP2PRelay(t *testing.T) { // ensure all messages from netB and netC are received by netA cfg.NetAddress = "127.0.0.1:0" log.Debugf("Starting netC with phonebook addresses %v", phoneBookAddresses) - netC, err := NewP2PNetwork(log.With("net", "netC"), cfg, "", phoneBookAddresses, genesisID, config.Devtestnet, &nopeNodeInfo{}, nil) + netC, err := NewP2PNetwork(log.With("net", "netC"), cfg, "", phoneBookAddresses, genesisInfo, &nopeNodeInfo{}, nil, nil) require.NoError(t, err) require.True(t, netC.relayMessages) err = netC.Start() @@ -1209,7 +1214,7 @@ func TestP2PwsStreamHandlerDedup(t *testing.T) { cfg.DNSBootstrapID = "" // disable DNS lookups since the test uses phonebook addresses cfg.NetAddress = "127.0.0.1:0" log := logging.TestingLog(t) - netA, err := NewP2PNetwork(log, cfg, "", nil, genesisID, config.Devtestnet, &nopeNodeInfo{}, &identityOpts{tracker: NewIdentityTracker()}) + netA, err := NewP2PNetwork(log, cfg, "", nil, GenesisInfo{genesisID, config.Devtestnet}, &nopeNodeInfo{}, &identityOpts{tracker: NewIdentityTracker()}, nil) require.NoError(t, err) err = netA.Start() require.NoError(t, err) @@ -1222,7 +1227,7 @@ func TestP2PwsStreamHandlerDedup(t *testing.T) { multiAddrStr := addrsA[0].String() phoneBookAddresses := []string{multiAddrStr} - netB, err := NewP2PNetwork(log, cfg, "", phoneBookAddresses, genesisID, config.Devtestnet, &nopeNodeInfo{}, &identityOpts{tracker: NewIdentityTracker()}) + netB, err := NewP2PNetwork(log, cfg, "", phoneBookAddresses, GenesisInfo{genesisID, config.Devtestnet}, &nopeNodeInfo{}, &identityOpts{tracker: NewIdentityTracker()}, nil) require.NoError(t, err) // now say netA's identity tracker knows about netB's peerID @@ -1284,7 +1289,7 @@ func TestP2PEnableGossipService_NodeDisable(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { relayCfg := test.relayCfg - netA, err := NewP2PNetwork(log, relayCfg, "", nil, genesisID, config.Devtestnet, &nopeNodeInfo{}, nil) + netA, err := NewP2PNetwork(log, relayCfg, "", nil, GenesisInfo{genesisID, config.Devtestnet}, &nopeNodeInfo{}, nil, nil) require.NoError(t, err) netA.Start() defer netA.Stop() @@ -1298,7 +1303,7 @@ func TestP2PEnableGossipService_NodeDisable(t *testing.T) { // start netB with gossip service disabled nodeCfg := test.nodeCfg - netB, err := NewP2PNetwork(log, nodeCfg, "", phoneBookAddresses, genesisID, config.Devtestnet, &nopeNodeInfo{}, nil) + netB, err := NewP2PNetwork(log, nodeCfg, "", phoneBookAddresses, GenesisInfo{genesisID, config.Devtestnet}, &nopeNodeInfo{}, nil, nil) require.NoError(t, err) netB.Start() defer netB.Stop() @@ -1377,7 +1382,7 @@ func TestP2PEnableGossipService_BothDisable(t *testing.T) { }, } - netA, err := NewP2PNetwork(log.With("net", "netA"), relayCfg, "", nil, genesisID, config.Devtestnet, &nopeNodeInfo{}, nil) + netA, err := NewP2PNetwork(log.With("net", "netA"), relayCfg, "", nil, GenesisInfo{genesisID, config.Devtestnet}, &nopeNodeInfo{}, nil, nil) require.NoError(t, err) netA.service.NetworkNotify(notifiee1) defer netA.service.NetworkStopNotify(notifiee1) @@ -1394,7 +1399,7 @@ func TestP2PEnableGossipService_BothDisable(t *testing.T) { nodeCfg := cfg nodeCfg.NetAddress = "" - netB, err := NewP2PNetwork(log.With("net", "netB"), nodeCfg, "", phoneBookAddresses, genesisID, config.Devtestnet, &nopeNodeInfo{}, nil) + netB, err := NewP2PNetwork(log.With("net", "netB"), nodeCfg, "", phoneBookAddresses, GenesisInfo{genesisID, config.Devtestnet}, &nopeNodeInfo{}, nil, nil) require.NoError(t, err) netB.service.NetworkNotify(notifiee2) defer netB.service.NetworkStopNotify(notifiee2) @@ -1419,7 +1424,7 @@ func TestP2PTxTopicValidator_NoWsPeer(t *testing.T) { cfg := config.GetDefaultLocal() cfg.DNSBootstrapID = "" // disable DNS lookups since the test uses phonebook addresses - net, err := NewP2PNetwork(log, cfg, "", nil, genesisID, config.Devtestnet, &nopeNodeInfo{}, nil) + net, err := NewP2PNetwork(log, cfg, "", nil, GenesisInfo{genesisID, config.Devtestnet}, &nopeNodeInfo{}, nil, nil) require.NoError(t, err) peerID := peer.ID("12345678") // must be 8+ in size @@ -1448,7 +1453,7 @@ func TestGetPeersFiltersSelf(t *testing.T) { log := logging.TestingLog(t) cfg := config.GetDefaultLocal() - net, err := NewP2PNetwork(log, cfg, t.TempDir(), []string{}, "test-genesis", "test-network", &nopeNodeInfo{}, nil) + net, err := NewP2PNetwork(log, cfg, t.TempDir(), []string{}, GenesisInfo{"test-genesis", "test-network"}, &nopeNodeInfo{}, nil, nil) require.NoError(t, err) selfID := net.service.ID() @@ -1496,7 +1501,7 @@ func TestP2PMetainfoExchange(t *testing.T) { log := logging.TestingLog(t) err := log.EnableTelemetryContext(context.Background(), logging.TelemetryConfig{Enable: true, SendToLog: true, GUID: uuid.New()}) require.NoError(t, err) - netA, err := NewP2PNetwork(log, cfg, "", nil, genesisID, config.Devtestnet, &nopeNodeInfo{}, nil) + netA, err := NewP2PNetwork(log, cfg, "", nil, GenesisInfo{genesisID, config.Devtestnet}, &nopeNodeInfo{}, nil, nil) require.NoError(t, err) err = netA.Start() require.NoError(t, err) @@ -1512,7 +1517,7 @@ func TestP2PMetainfoExchange(t *testing.T) { cfg.NetAddress = "" multiAddrStr := addrsA[0].String() phoneBookAddresses := []string{multiAddrStr} - netB, err := NewP2PNetwork(log, cfg2, "", phoneBookAddresses, genesisID, config.Devtestnet, &nopeNodeInfo{}, nil) + netB, err := NewP2PNetwork(log, cfg2, "", phoneBookAddresses, GenesisInfo{genesisID, config.Devtestnet}, &nopeNodeInfo{}, nil, nil) require.NoError(t, err) err = netB.Start() require.NoError(t, err) @@ -1522,7 +1527,12 @@ func TestP2PMetainfoExchange(t *testing.T) { return len(netA.service.Conns()) > 0 && len(netB.service.Conns()) > 0 }, 2*time.Second, 50*time.Millisecond) - peers := netA.GetPeers(PeersConnectedIn) + var peers []Peer + require.Eventually(t, func() bool { + peers = netA.GetPeers(PeersConnectedIn) + return len(peers) > 0 + }, 2*time.Second, 50*time.Millisecond) + require.Len(t, peers, 1) peer := peers[0].(*wsPeer) require.True(t, peer.features&pfCompressedProposal != 0) @@ -1546,7 +1556,8 @@ func TestP2PMetainfoV1vsV22(t *testing.T) { cfg.NetAddress = "127.0.0.1:0" cfg.EnableVoteCompression = true log := logging.TestingLog(t) - netA, err := NewP2PNetwork(log, cfg, "", nil, genesisID, config.Devtestnet, &nopeNodeInfo{}, nil) + genesisInfo := GenesisInfo{genesisID, config.Devtestnet} + netA, err := NewP2PNetwork(log, cfg, "", nil, genesisInfo, &nopeNodeInfo{}, nil, nil) require.NoError(t, err) err = netA.Start() require.NoError(t, err) @@ -1566,7 +1577,7 @@ func TestP2PMetainfoV1vsV22(t *testing.T) { defer func() { disableV22Protocol = false }() - netB, err := NewP2PNetwork(log, cfg2, "", phoneBookAddresses, genesisID, config.Devtestnet, &nopeNodeInfo{}, nil) + netB, err := NewP2PNetwork(log, cfg2, "", phoneBookAddresses, genesisInfo, &nopeNodeInfo{}, nil, nil) require.NoError(t, err) err = netB.Start() require.NoError(t, err) @@ -1581,6 +1592,7 @@ func TestP2PMetainfoV1vsV22(t *testing.T) { peers = netA.GetPeers(PeersConnectedIn) return len(peers) > 0 }, 2*time.Second, 50*time.Millisecond) + require.Len(t, peers, 1) peer := peers[0].(*wsPeer) require.False(t, peer.features&pfCompressedProposal != 0) require.False(t, peer.vpackVoteCompressionSupported()) @@ -1615,7 +1627,7 @@ func TestP2PVoteCompression(t *testing.T) { cfg.GossipFanout = 1 cfg.EnableVoteCompression = test.netAEnableCompression log := logging.TestingLog(t) - netA, err := NewP2PNetwork(log.With("name", "netA"), cfg, "", nil, genesisID, config.Devtestnet, &nopeNodeInfo{}, nil) + netA, err := NewP2PNetwork(log.With("name", "netA"), cfg, "", nil, GenesisInfo{genesisID, config.Devtestnet}, &nopeNodeInfo{}, nil, nil) require.NoError(t, err) err = netA.Start() require.NoError(t, err) @@ -1631,7 +1643,7 @@ func TestP2PVoteCompression(t *testing.T) { cfgB.NetAddress = "" multiAddrStr := addrsA[0].String() phoneBookAddresses := []string{multiAddrStr} - netB, err := NewP2PNetwork(log.With("name", "netB"), cfgB, "", phoneBookAddresses, genesisID, config.Devtestnet, &nopeNodeInfo{}, nil) + netB, err := NewP2PNetwork(log.With("name", "netB"), cfgB, "", phoneBookAddresses, GenesisInfo{genesisID, config.Devtestnet}, &nopeNodeInfo{}, nil, nil) require.NoError(t, err) err = netB.Start() require.NoError(t, err) diff --git a/network/phonebook/phonebook_test.go b/network/phonebook/phonebook_test.go index b0805582d0..b07b8f2f7d 100644 --- a/network/phonebook/phonebook_test.go +++ b/network/phonebook/phonebook_test.go @@ -17,6 +17,7 @@ package phonebook import ( + "slices" "testing" "time" @@ -27,25 +28,13 @@ import ( func testPhonebookAll(t *testing.T, set []string, ph Phonebook) { actual := ph.GetAddresses(len(set), RelayRole) for _, got := range actual { - ok := false - for _, known := range set { - if got == known { - ok = true - break - } - } + ok := slices.Contains(set, got) if !ok { t.Errorf("get returned junk %#v", got) } } for _, known := range set { - ok := false - for _, got := range actual { - if got == known { - ok = true - break - } - } + ok := slices.Contains(actual, known) if !ok { t.Errorf("get missed %#v; actual=%#v; set=%#v", known, actual, set) } diff --git a/network/requestLogger_test.go b/network/requestLogger_test.go index 04e288bca6..35e569c166 100644 --- a/network/requestLogger_test.go +++ b/network/requestLogger_test.go @@ -50,11 +50,13 @@ func TestRequestLogger(t *testing.T) { dl := eventsDetailsLogger{Logger: log, eventReceived: make(chan interface{}, 1), eventIdentifier: telemetryspec.HTTPRequestEvent} log.SetLevel(logging.Level(defaultConfig.BaseLoggerDebugLevel)) netA := &WebsocketNetwork{ - log: dl, - config: defaultConfig, - phonebook: phonebook.MakePhonebook(1, 1*time.Millisecond), - genesisID: "go-test-network-genesis", - NetworkID: config.Devtestnet, + log: dl, + config: defaultConfig, + phonebook: phonebook.MakePhonebook(1, 1*time.Millisecond), + genesisInfo: GenesisInfo{ + GenesisID: "go-test-network-genesis", + NetworkID: config.Devtestnet, + }, peerStater: peerConnectionStater{log: log}, identityTracker: noopIdentityTracker{}, } diff --git a/network/requestTracker_test.go b/network/requestTracker_test.go index 8ef12ad714..dcc5067667 100644 --- a/network/requestTracker_test.go +++ b/network/requestTracker_test.go @@ -87,11 +87,13 @@ func TestRateLimiting(t *testing.T) { // This test is conducted locally, so we want to treat all hosts the same for counting incoming requests. testConfig.DisableLocalhostConnectionRateLimit = false wn := &WebsocketNetwork{ - log: log, - config: testConfig, - phonebook: phonebook.MakePhonebook(1, 1), - genesisID: "go-test-network-genesis", - NetworkID: config.Devtestnet, + log: log, + config: testConfig, + phonebook: phonebook.MakePhonebook(1, 1), + genesisInfo: GenesisInfo{ + GenesisID: "go-test-network-genesis", + NetworkID: config.Devtestnet, + }, peerStater: peerConnectionStater{log: log}, identityTracker: noopIdentityTracker{}, } diff --git a/network/wsNetwork.go b/network/wsNetwork.go index 38bdad2b8f..184616e998 100644 --- a/network/wsNetwork.go +++ b/network/wsNetwork.go @@ -149,6 +149,12 @@ func (nnni *nopeNodeInfo) Capabilities() []p2p.Capability { return nil } +// GenesisInfo contains information about the genesis of the network. +type GenesisInfo struct { + GenesisID string + NetworkID protocol.NetworkID +} + // WebsocketNetwork implements GossipNode type WebsocketNetwork struct { listener net.Listener @@ -176,14 +182,15 @@ type WebsocketNetwork struct { phonebook phonebook.Phonebook - genesisID string - NetworkID protocol.NetworkID - randomID string + genesisInfo GenesisInfo + randomID string ready atomic.Int32 readyChan chan struct{} meshUpdateRequests chan meshRequest + mesher mesher + meshCreator MeshCreator // save parameter to use in setup() // Keep a record of pending outgoing connections so // we don't start duplicates connection attempts. @@ -478,7 +485,7 @@ func (wn *WebsocketNetwork) RegisterHTTPHandlerFunc(path string, handler func(ht // RequestConnectOutgoing tries to actually do the connect to new peers. // `replace` drop all connections first and find new peers. func (wn *WebsocketNetwork) RequestConnectOutgoing(replace bool, quit <-chan struct{}) { - request := meshRequest{disconnect: false} + request := meshRequest{} if quit != nil { request.done = make(chan struct{}) } @@ -539,7 +546,7 @@ func (wn *WebsocketNetwork) GetPeers(options ...PeerOption) []Peer { return outPeers } -func (wn *WebsocketNetwork) setup() { +func (wn *WebsocketNetwork) setup() error { var preferredResolver dnssec.ResolverIf if wn.config.DNSSecurityRelayAddrEnforced() { preferredResolver = dnssec.MakeDefaultDnssecResolver(wn.config.FallbackDNSResolverAddress, wn.log) @@ -590,6 +597,24 @@ func (wn *WebsocketNetwork) setup() { wn.broadcaster.slowWritingPeerMonitorInterval = slowWritingPeerMonitorInterval } wn.meshUpdateRequests = make(chan meshRequest, 5) + meshCreator := wn.meshCreator + if meshCreator == nil { + meshCreator = baseMeshCreator{} + } + var err error + wn.mesher, err = meshCreator.create( + withContext(wn.ctx), + withMeshNetMeshFn(wn.meshThreadInner), + withMeshPeerStatReporter(func() { + wn.peerStater.sendPeerConnectionsTelemetryStatus(wn) + }), + withMeshUpdateRequest(wn.meshUpdateRequests), + withMeshUpdateInterval(meshThreadInterval), + ) + if err != nil { + return fmt.Errorf("failed to create mesh: %w", err) + } + wn.readyChan = make(chan struct{}) wn.tryConnectAddrs = make(map[string]int64) wn.eventualReadyDelay = time.Minute @@ -634,6 +659,7 @@ func (wn *WebsocketNetwork) setup() { if wn.relayMessages { wn.registerMessageInterest(protocol.StateProofSigTag) } + return nil } // Start makes network connections and threads @@ -689,7 +715,7 @@ func (wn *WebsocketNetwork) Start() error { wn.identityScheme = NewIdentityChallengeScheme(NetIdentityDedupNames(wn.config.PublicAddress)) } - wn.meshUpdateRequests <- meshRequest{false, nil} + wn.meshUpdateRequests <- meshRequest{} if wn.prioScheme != nil { wn.RegisterHandlers(prioHandlers) } @@ -697,8 +723,8 @@ func (wn *WebsocketNetwork) Start() error { wn.wg.Add(1) go wn.httpdThread() } - wn.wg.Add(1) - go wn.meshThread() + + wn.mesher.start() // we shouldn't have any ticker here.. but in case we do - just stop it. if wn.peersConnectivityCheckTicker != nil { @@ -719,7 +745,7 @@ func (wn *WebsocketNetwork) Start() error { go wn.postMessagesOfInterestThread() - wn.log.Infof("serving genesisID=%s on %#v with RandomID=%s", wn.genesisID, wn.PublicAddress(), wn.randomID) + wn.log.Infof("serving genesisID=%s on %#v with RandomID=%s", wn.genesisInfo.GenesisID, wn.PublicAddress(), wn.randomID) return nil } @@ -779,6 +805,7 @@ func (wn *WebsocketNetwork) Stop() { if err != nil { wn.log.Warnf("problem shutting down %s: %v", listenAddr, err) } + wn.mesher.stop() wn.wg.Wait() if wn.listener != nil { wn.log.Debugf("closed %s", listenAddr) @@ -816,7 +843,7 @@ func (wn *WebsocketNetwork) ClearValidatorHandlers() { type peerMetadataProvider interface { TelemetryGUID() string InstanceName() string - GenesisID() string + GetGenesisID() string PublicAddress() string RandomID() string SupportedProtoVersions() []string @@ -833,11 +860,6 @@ func (wn *WebsocketNetwork) InstanceName() string { return wn.log.GetInstanceName() } -// GenesisID returns the genesis ID of this node. -func (wn *WebsocketNetwork) GenesisID() string { - return wn.genesisID -} - // RandomID returns the random ID of this node. func (wn *WebsocketNetwork) RandomID() string { return wn.randomID @@ -862,7 +884,7 @@ func setHeaders(header http.Header, netProtoVer string, meta peerMetadataProvide if rid := meta.RandomID(); rid != "" { header.Set(NodeRandomHeader, rid) } - header.Set(GenesisHeader, meta.GenesisID()) + header.Set(GenesisHeader, meta.GetGenesisID()) // set the features header (comma-separated list) header.Set(PeerFeaturesHeader, PeerFeatureProposalCompression) @@ -901,11 +923,11 @@ func (wn *WebsocketNetwork) checkServerResponseVariables(otherHeader http.Header return false, "" } otherGenesisID := otherHeader.Get(GenesisHeader) - if wn.genesisID != otherGenesisID { + if wn.genesisInfo.GenesisID != otherGenesisID { if otherGenesisID != "" { - wn.log.Warn(filterASCII(fmt.Sprintf("new peer %#v genesis mismatch, mine=%#v theirs=%#v, headers %#v", addr, wn.genesisID, otherGenesisID, otherHeader))) + wn.log.Warn(filterASCII(fmt.Sprintf("new peer %#v genesis mismatch, mine=%#v theirs=%#v, headers %#v", addr, wn.genesisInfo.GenesisID, otherGenesisID, otherHeader))) } else { - wn.log.Warnf("new peer %#v did not include genesis header in response. mine=%#v headers %#v", addr, wn.genesisID, otherHeader) + wn.log.Warnf("new peer %#v did not include genesis header in response. mine=%#v headers %#v", addr, wn.genesisInfo.GenesisID, otherHeader) } return false, "" } @@ -985,8 +1007,8 @@ func (wn *WebsocketNetwork) checkIncomingConnectionVariables(response http.Respo return http.StatusNotFound } - if wn.genesisID != otherGenesisID { - wn.log.Warn(filterASCII(fmt.Sprintf("new peer %#v genesis mismatch, mine=%#v theirs=%#v, headers %#v", remoteAddrForLogging, wn.genesisID, otherGenesisID, request.Header))) + if wn.genesisInfo.GenesisID != otherGenesisID { + wn.log.Warn(filterASCII(fmt.Sprintf("new peer %#v genesis mismatch, mine=%#v theirs=%#v, headers %#v", remoteAddrForLogging, wn.genesisInfo.GenesisID, otherGenesisID, request.Header))) networkConnectionsDroppedTotal.Inc(map[string]string{"reason": "mismatching genesis-id"}) response.WriteHeader(http.StatusPreconditionFailed) n, err := response.Write([]byte("mismatching genesis ID")) @@ -1532,63 +1554,33 @@ func (wn *WebsocketNetwork) connectedForIP(host string) (totalConnections int) { return } -const meshThreadInterval = time.Minute const cliqueResolveInterval = 5 * time.Minute type meshRequest struct { - disconnect bool - done chan struct{} + done chan struct{} } -// meshThread maintains the network, e.g. that we have sufficient connectivity to peers -func (wn *WebsocketNetwork) meshThread() { - defer wn.wg.Done() - timer := time.NewTicker(meshThreadInterval) - defer timer.Stop() - for { - var request meshRequest - select { - case <-timer.C: - request.disconnect = false - request.done = nil - case request = <-wn.meshUpdateRequests: - case <-wn.ctx.Done(): - return - } - - if request.disconnect { - wn.DisconnectPeers() - } - - wn.refreshRelayArchivePhonebookAddresses() +func (wn *WebsocketNetwork) meshThreadInner() bool { + wn.refreshRelayArchivePhonebookAddresses() - // as long as the call to checkExistingConnectionsNeedDisconnecting is deleting existing connections, we want to - // kick off the creation of new connections. - for { - if wn.checkNewConnectionsNeeded() { - // new connections were created. - break - } - if !wn.checkExistingConnectionsNeedDisconnecting() { - // no connection were removed. - break - } + // as long as the call to checkExistingConnectionsNeedDisconnecting is deleting existing connections, we want to + // kick off the creation of new connections. + for { + if wn.checkNewConnectionsNeeded() { + // new connections were created. + break } - - if request.done != nil { - close(request.done) + if !wn.checkExistingConnectionsNeedDisconnecting() { + // no connection were removed. + break } - - // send the currently connected peers information to the - // telemetry server; that would allow the telemetry server - // to construct a cross-node map of all the nodes interconnections. - wn.peerStater.sendPeerConnectionsTelemetryStatus(wn) } + return true } func (wn *WebsocketNetwork) refreshRelayArchivePhonebookAddresses() { // TODO: only do DNS fetch every N seconds? Honor DNS TTL? Trust DNS library we're using to handle caching and TTL? - dnsBootstrapArray := wn.config.DNSBootstrapArray(wn.NetworkID) + dnsBootstrapArray := wn.config.DNSBootstrapArray(wn.genesisInfo.NetworkID) for _, dnsBootstrap := range dnsBootstrapArray { primaryRelayAddrs, primaryArchivalAddrs := wn.getDNSAddrs(dnsBootstrap.PrimarySRVBootstrap) @@ -1609,14 +1601,14 @@ func (wn *WebsocketNetwork) refreshRelayArchivePhonebookAddresses() { func (wn *WebsocketNetwork) updatePhonebookAddresses(relayAddrs []string, archiveAddrs []string) { if len(relayAddrs) > 0 { wn.log.Debugf("got %d relay dns addrs, %#v", len(relayAddrs), relayAddrs[:min(5, len(relayAddrs))]) - wn.phonebook.ReplacePeerList(relayAddrs, string(wn.NetworkID), phonebook.RelayRole) + wn.phonebook.ReplacePeerList(relayAddrs, string(wn.genesisInfo.NetworkID), phonebook.RelayRole) } else { - wn.log.Infof("got no relay DNS addrs for network %s", wn.NetworkID) + wn.log.Infof("got no relay DNS addrs for network %s", wn.genesisInfo.NetworkID) } if len(archiveAddrs) > 0 { - wn.phonebook.ReplacePeerList(archiveAddrs, string(wn.NetworkID), phonebook.ArchivalRole) + wn.phonebook.ReplacePeerList(archiveAddrs, string(wn.genesisInfo.NetworkID), phonebook.ArchivalRole) } else { - wn.log.Infof("got no archive DNS addrs for network %s", wn.NetworkID) + wn.log.Infof("got no archive DNS addrs for network %s", wn.genesisInfo.NetworkID) } } @@ -1889,7 +1881,7 @@ func (wn *WebsocketNetwork) getDNSAddrs(dnsBootstrap string) (relaysAddresses [] relaysAddresses, err = wn.resolveSRVRecords(wn.ctx, "algobootstrap", "tcp", dnsBootstrap, wn.config.FallbackDNSResolverAddress, wn.config.DNSSecuritySRVEnforced()) if err != nil { // only log this warning on testnet or devnet - if wn.NetworkID == config.Devnet || wn.NetworkID == config.Testnet { + if wn.genesisInfo.NetworkID == config.Devnet || wn.genesisInfo.NetworkID == config.Testnet { wn.log.Warnf("Cannot lookup algobootstrap SRV record for %s: %v", dnsBootstrap, err) } relaysAddresses = nil @@ -1898,7 +1890,7 @@ func (wn *WebsocketNetwork) getDNSAddrs(dnsBootstrap string) (relaysAddresses [] archivalAddresses, err = wn.resolveSRVRecords(wn.ctx, "archive", "tcp", dnsBootstrap, wn.config.FallbackDNSResolverAddress, wn.config.DNSSecuritySRVEnforced()) if err != nil { // only log this warning on testnet or devnet - if wn.NetworkID == config.Devnet || wn.NetworkID == config.Testnet { + if wn.genesisInfo.NetworkID == config.Devnet || wn.genesisInfo.NetworkID == config.Testnet { wn.log.Warnf("Cannot lookup archive SRV record for %s: %v", dnsBootstrap, err) } archivalAddresses = nil @@ -2267,26 +2259,26 @@ func (wn *WebsocketNetwork) SetPeerData(peer Peer, key string, value interface{} } // NewWebsocketNetwork constructor for websockets based gossip network -func NewWebsocketNetwork(log logging.Logger, config config.Local, phonebookAddresses []string, genesisID string, networkID protocol.NetworkID, nodeInfo NodeInfo, identityOpts *identityOpts) (wn *WebsocketNetwork, err error) { +func NewWebsocketNetwork(log logging.Logger, config config.Local, phonebookAddresses []string, genesisInfo GenesisInfo, nodeInfo NodeInfo, identityOpts *identityOpts, meshCreator MeshCreator) (wn *WebsocketNetwork, err error) { pb := phonebook.MakePhonebook(config.ConnectionsRateLimitingCount, time.Duration(config.ConnectionsRateLimitingWindowSeconds)*time.Second) addresses := make([]string, 0, len(phonebookAddresses)) for _, a := range phonebookAddresses { - _, err := addr.ParseHostOrURL(a) - if err == nil { + _, err0 := addr.ParseHostOrURL(a) + if err0 == nil { addresses = append(addresses, a) } } - pb.AddPersistentPeers(addresses, string(networkID), phonebook.RelayRole) + pb.AddPersistentPeers(addresses, string(genesisInfo.NetworkID), phonebook.RelayRole) wn = &WebsocketNetwork{ log: log, config: config, phonebook: pb, - genesisID: genesisID, - NetworkID: networkID, + genesisInfo: genesisInfo, nodeInfo: nodeInfo, resolveSRVRecords: tools_network.ReadFromSRV, + meshCreator: meshCreator, peerStater: peerConnectionStater{ log: log, peerConnectionsUpdateInterval: time.Duration(config.PeerConnectionsUpdateInterval) * time.Second, @@ -2303,13 +2295,15 @@ func NewWebsocketNetwork(log logging.Logger, config config.Local, phonebookAddre wn.identityTracker = NewIdentityTracker() } - wn.setup() + if err = wn.setup(); err != nil { + return nil, err + } return wn, nil } // NewWebsocketGossipNode constructs a websocket network node and returns it as a GossipNode interface implementation func NewWebsocketGossipNode(log logging.Logger, config config.Local, phonebookAddresses []string, genesisID string, networkID protocol.NetworkID) (gn GossipNode, err error) { - return NewWebsocketNetwork(log, config, phonebookAddresses, genesisID, networkID, nil, nil) + return NewWebsocketNetwork(log, config, phonebookAddresses, GenesisInfo{genesisID, networkID}, nil, nil, nil) } // SetPrioScheme specifies the network priority scheme for a network node @@ -2531,4 +2525,4 @@ func (wn *WebsocketNetwork) postMessagesOfInterestThread() { } // GetGenesisID returns the network-specific genesisID. -func (wn *WebsocketNetwork) GetGenesisID() string { return wn.genesisID } +func (wn *WebsocketNetwork) GetGenesisID() string { return wn.genesisInfo.GenesisID } diff --git a/network/wsNetwork_test.go b/network/wsNetwork_test.go index 533b3c9e5d..a2a6bec1ce 100644 --- a/network/wsNetwork_test.go +++ b/network/wsNetwork_test.go @@ -128,11 +128,13 @@ func makeTestWebsocketNodeWithConfig(t testing.TB, conf config.Local, opts ...te log := logging.TestingLog(t) log.SetLevel(logging.Warn) wn := &WebsocketNetwork{ - log: log, - config: conf, - phonebook: phonebook.MakePhonebook(1, 1*time.Millisecond), - genesisID: genesisID, - NetworkID: config.Devtestnet, + log: log, + config: conf, + phonebook: phonebook.MakePhonebook(1, 1*time.Millisecond), + genesisInfo: GenesisInfo{ + GenesisID: genesisID, + NetworkID: config.Devtestnet, + }, peerStater: peerConnectionStater{log: log}, identityTracker: NewIdentityTracker(), } @@ -868,7 +870,7 @@ func TestAddrToGossipAddr(t *testing.T) { partitiontest.PartitionTest(t) wn := &WebsocketNetwork{} - wn.genesisID = "test genesisID" + wn.genesisInfo.GenesisID = "test genesisID" wn.log = logging.Base() addrtest(t, wn, "ws://r7.algodev.network.:4166/v1/test%20genesisID/gossip", "r7.algodev.network.:4166") addrtest(t, wn, "ws://r7.algodev.network.:4166/v1/test%20genesisID/gossip", "http://r7.algodev.network.:4166") @@ -1138,11 +1140,13 @@ func makeTestFilterWebsocketNode(t *testing.T, nodename string) *WebsocketNetwor dc.OutgoingMessageFilterBucketCount = 3 dc.OutgoingMessageFilterBucketSize = 128 wn := &WebsocketNetwork{ - log: logging.TestingLog(t).With("node", nodename), - config: dc, - phonebook: phonebook.MakePhonebook(1, 1*time.Millisecond), - genesisID: genesisID, - NetworkID: config.Devtestnet, + log: logging.TestingLog(t).With("node", nodename), + config: dc, + phonebook: phonebook.MakePhonebook(1, 1*time.Millisecond), + genesisInfo: GenesisInfo{ + GenesisID: genesisID, + NetworkID: config.Devtestnet, + }, peerStater: peerConnectionStater{log: logging.TestingLog(t).With("node", nodename)}, identityTracker: noopIdentityTracker{}, } @@ -2548,39 +2552,39 @@ func TestWebsocketNetwork_checkServerResponseVariables(t *testing.T) { partitiontest.PartitionTest(t) wn := makeTestWebsocketNode(t) - wn.genesisID = "genesis-id1" + wn.genesisInfo.GenesisID = "genesis-id1" wn.randomID = "random-id1" header := http.Header{} header.Set(ProtocolVersionHeader, ProtocolVersion) header.Set(NodeRandomHeader, wn.randomID+"tag") - header.Set(GenesisHeader, wn.genesisID) + header.Set(GenesisHeader, wn.genesisInfo.GenesisID) responseVariableOk, matchingVersion := wn.checkServerResponseVariables(header, "addressX") require.Equal(t, true, responseVariableOk) require.Equal(t, matchingVersion, ProtocolVersion) noVersionHeader := http.Header{} noVersionHeader.Set(NodeRandomHeader, wn.randomID+"tag") - noVersionHeader.Set(GenesisHeader, wn.genesisID) + noVersionHeader.Set(GenesisHeader, wn.genesisInfo.GenesisID) responseVariableOk, _ = wn.checkServerResponseVariables(noVersionHeader, "addressX") require.Equal(t, false, responseVariableOk) noRandomHeader := http.Header{} noRandomHeader.Set(ProtocolVersionHeader, ProtocolVersion) - noRandomHeader.Set(GenesisHeader, wn.genesisID) + noRandomHeader.Set(GenesisHeader, wn.genesisInfo.GenesisID) responseVariableOk, _ = wn.checkServerResponseVariables(noRandomHeader, "addressX") require.Equal(t, false, responseVariableOk) sameRandomHeader := http.Header{} sameRandomHeader.Set(ProtocolVersionHeader, ProtocolVersion) sameRandomHeader.Set(NodeRandomHeader, wn.randomID) - sameRandomHeader.Set(GenesisHeader, wn.genesisID) + sameRandomHeader.Set(GenesisHeader, wn.genesisInfo.GenesisID) responseVariableOk, _ = wn.checkServerResponseVariables(sameRandomHeader, "addressX") require.Equal(t, false, responseVariableOk) differentGenesisIDHeader := http.Header{} differentGenesisIDHeader.Set(ProtocolVersionHeader, ProtocolVersion) differentGenesisIDHeader.Set(NodeRandomHeader, wn.randomID+"tag") - differentGenesisIDHeader.Set(GenesisHeader, wn.genesisID+"tag") + differentGenesisIDHeader.Set(GenesisHeader, wn.genesisInfo.GenesisID+"tag") responseVariableOk, _ = wn.checkServerResponseVariables(differentGenesisIDHeader, "addressX") require.Equal(t, false, responseVariableOk) } @@ -2646,11 +2650,13 @@ func TestSlowPeerDisconnection(t *testing.T) { log := logging.TestingLog(t) log.SetLevel(logging.Info) wn := &WebsocketNetwork{ - log: log, - config: defaultConfig, - phonebook: phonebook.MakePhonebook(1, 1*time.Millisecond), - genesisID: genesisID, - NetworkID: config.Devtestnet, + log: log, + config: defaultConfig, + phonebook: phonebook.MakePhonebook(1, 1*time.Millisecond), + genesisInfo: GenesisInfo{ + GenesisID: genesisID, + NetworkID: config.Devtestnet, + }, peerStater: peerConnectionStater{log: log}, identityTracker: noopIdentityTracker{}, } @@ -2723,11 +2729,13 @@ func TestForceMessageRelaying(t *testing.T) { log := logging.TestingLog(t) log.SetLevel(logging.Level(defaultConfig.BaseLoggerDebugLevel)) wn := &WebsocketNetwork{ - log: log, - config: defaultConfig, - phonebook: phonebook.MakePhonebook(1, 1*time.Millisecond), - genesisID: genesisID, - NetworkID: config.Devtestnet, + log: log, + config: defaultConfig, + phonebook: phonebook.MakePhonebook(1, 1*time.Millisecond), + genesisInfo: GenesisInfo{ + GenesisID: genesisID, + NetworkID: config.Devtestnet, + }, peerStater: peerConnectionStater{log: log}, identityTracker: noopIdentityTracker{}, } @@ -2819,11 +2827,13 @@ func TestCheckProtocolVersionMatch(t *testing.T) { log := logging.TestingLog(t) log.SetLevel(logging.Level(defaultConfig.BaseLoggerDebugLevel)) wn := &WebsocketNetwork{ - log: log, - config: defaultConfig, - phonebook: phonebook.MakePhonebook(1, 1*time.Millisecond), - genesisID: genesisID, - NetworkID: config.Devtestnet, + log: log, + config: defaultConfig, + phonebook: phonebook.MakePhonebook(1, 1*time.Millisecond), + genesisInfo: GenesisInfo{ + GenesisID: genesisID, + NetworkID: config.Devtestnet, + }, peerStater: peerConnectionStater{log: log}, identityTracker: noopIdentityTracker{}, } @@ -3655,7 +3665,7 @@ func TestMaliciousCheckServerResponseVariables(t *testing.T) { partitiontest.PartitionTest(t) wn := makeTestWebsocketNode(t) - wn.genesisID = "genesis-id1" + wn.genesisInfo.GenesisID = "genesis-id1" wn.randomID = "random-id1" wn.log = callbackLogger{ Logger: wn.log, @@ -3680,7 +3690,7 @@ func TestMaliciousCheckServerResponseVariables(t *testing.T) { header1 := http.Header{} header1.Set(ProtocolVersionHeader, ProtocolVersion+"א") header1.Set(NodeRandomHeader, wn.randomID+"tag") - header1.Set(GenesisHeader, wn.genesisID) + header1.Set(GenesisHeader, wn.genesisInfo.GenesisID) responseVariableOk, matchingVersion := wn.checkServerResponseVariables(header1, "addressX") require.Equal(t, false, responseVariableOk) require.Equal(t, "", matchingVersion) @@ -3688,7 +3698,7 @@ func TestMaliciousCheckServerResponseVariables(t *testing.T) { header2 := http.Header{} header2.Set(ProtocolVersionHeader, ProtocolVersion) header2.Set("א", "א") - header2.Set(GenesisHeader, wn.genesisID) + header2.Set(GenesisHeader, wn.genesisInfo.GenesisID) responseVariableOk, matchingVersion = wn.checkServerResponseVariables(header2, "addressX") require.Equal(t, false, responseVariableOk) require.Equal(t, "", matchingVersion) @@ -3696,7 +3706,7 @@ func TestMaliciousCheckServerResponseVariables(t *testing.T) { header3 := http.Header{} header3.Set(ProtocolVersionHeader, ProtocolVersion) header3.Set(NodeRandomHeader, wn.randomID+"tag") - header3.Set(GenesisHeader, wn.genesisID+"א") + header3.Set(GenesisHeader, wn.genesisInfo.GenesisID+"א") responseVariableOk, matchingVersion = wn.checkServerResponseVariables(header3, "addressX") require.Equal(t, false, responseVariableOk) require.Equal(t, "", matchingVersion) @@ -4228,10 +4238,10 @@ func TestRefreshRelayArchivePhonebookAddresses(t *testing.T) { rapid.Check(t, func(t1 *rapid.T) { refreshTestConf.DNSBootstrapID = refreshRelayDNSBootstrapID netA = makeTestWebsocketNodeWithConfig(t, refreshTestConf) - netA.NetworkID = nonHardcodedNetworkIDGen().Draw(t1, "network") + netA.genesisInfo.NetworkID = nonHardcodedNetworkIDGen().Draw(t1, "network") - primarySRVBootstrap := strings.Replace(".algorand.network", "", string(netA.NetworkID), -1) - backupSRVBootstrap := strings.Replace(".algorand.net", "", string(netA.NetworkID), -1) + primarySRVBootstrap := strings.Replace(".algorand.network", "", string(netA.genesisInfo.NetworkID), -1) + backupSRVBootstrap := strings.Replace(".algorand.net", "", string(netA.genesisInfo.NetworkID), -1) var primaryRelayResolvedRecords []string var secondaryRelayResolvedRecords []string var primaryArchiveResolvedRecords []string @@ -4239,14 +4249,14 @@ func TestRefreshRelayArchivePhonebookAddresses(t *testing.T) { for _, record := range []string{"r1.algorand-.network", "r2.algorand-.network", "r3.algorand-.network"} { - var recordSub = strings.Replace(record, "", string(netA.NetworkID), -1) + var recordSub = strings.Replace(record, "", string(netA.genesisInfo.NetworkID), -1) primaryRelayResolvedRecords = append(primaryRelayResolvedRecords, recordSub) secondaryRelayResolvedRecords = append(secondaryRelayResolvedRecords, strings.Replace(recordSub, "network", "net", -1)) } for _, record := range []string{"r1archive.algorand-.network", "r2archive.algorand-.network", "r3archive.algorand-.network"} { - var recordSub = strings.Replace(record, "", string(netA.NetworkID), -1) + var recordSub = strings.Replace(record, "", string(netA.genesisInfo.NetworkID), -1) primaryArchiveResolvedRecords = append(primaryArchiveResolvedRecords, recordSub) secondaryArchiveResolvedRecords = append(secondaryArchiveResolvedRecords, strings.Replace(recordSub, "network", "net", -1)) } @@ -4644,8 +4654,11 @@ func TestWsNetworkPhonebookMix(t *testing.T) { logging.TestingLog(t), config.GetDefaultLocal(), []string{"127.0.0.1:1234", "/ip4/127.0.0.1/tcp/1234", "/ip4/127.0.0.1/p2p/QmcgpsyWgH8Y8ajJz1Cu72KnS5uo2Aa2LpzU7kinSupNKC"}, - "test", - "net", + GenesisInfo{ + "test", + "net", + }, + nil, nil, nil, ) diff --git a/network/wsPeer.go b/network/wsPeer.go index 9c7c5a7700..9b7f768233 100644 --- a/network/wsPeer.go +++ b/network/wsPeer.go @@ -608,6 +608,11 @@ func (wp *wsPeer) readLoop() { wp.reportReadErr(err) return } + if wp.peerType == peerTypeWs { + networkReceivedUncompressedBytesByTag.Add(string(tag[:]), uint64(len(msg.Data)+2)) + } else { + networkP2PReceivedUncompressedBytesByTag.Add(string(tag[:]), uint64(len(msg.Data)+2)) + } msg.Sender = wp // for outgoing connections, we want to notify the connection monitor that we've received diff --git a/node/follower_node.go b/node/follower_node.go index 573b66dfd8..bffce44558 100644 --- a/node/follower_node.go +++ b/node/follower_node.go @@ -93,8 +93,12 @@ func MakeFollower(log logging.Logger, rootDir string, cfg config.Local, phoneboo } node.config = cfg + var genesisInfo = network.GenesisInfo{ + GenesisID: genesis.ID(), + NetworkID: genesis.Network, + } // tie network, block fetcher, and agreement services together - p2pNode, err := network.NewWebsocketNetwork(node.log, node.config, phonebookAddresses, genesis.ID(), genesis.Network, nil, nil) + p2pNode, err := network.NewWebsocketNetwork(node.log, node.config, phonebookAddresses, genesisInfo, nil, nil, nil) if err != nil { log.Errorf("could not create websocket node: %v", err) return nil, err @@ -133,7 +137,7 @@ func MakeFollower(log logging.Logger, rootDir string, cfg config.Local, phoneboo node.catchupService = catchup.MakeService(node.log, node.config, p2pNode, node.ledger, node.catchupBlockAuth, make(chan catchup.PendingUnmatchedCertificate), node.lowPriorityCryptoVerificationPool) // Initialize sync round to the latest db round + 1 so that nothing falls out of the cache on Start - err = node.SetSyncRound(uint64(node.Ledger().LatestTrackerCommitted() + 1)) + err = node.SetSyncRound(node.Ledger().LatestTrackerCommitted() + 1) if err != nil { log.Errorf("unable to set sync round to Ledger.DBRound %v", err) return nil, err @@ -426,7 +430,7 @@ func (node *AlgorandFollowerNode) SetCatchpointCatchupMode(catchpointCatchupMode defer node.mu.Unlock() // update sync round before starting services - if err := node.SetSyncRound(uint64(node.ledger.LastRound())); err != nil { + if err := node.SetSyncRound(node.ledger.LastRound()); err != nil { node.log.Warnf("unable to set sync round while resuming fast catchup: %v", err) } @@ -446,16 +450,16 @@ func (node *AlgorandFollowerNode) SetCatchpointCatchupMode(catchpointCatchupMode } // SetSyncRound sets the minimum sync round on the catchup service -func (node *AlgorandFollowerNode) SetSyncRound(rnd uint64) error { +func (node *AlgorandFollowerNode) SetSyncRound(rnd basics.Round) error { // Calculate the first round for which we want to disable catchup from the network. // This is based on the size of the cache used in the ledger. - disableSyncRound := rnd + node.Config().MaxAcctLookback + disableSyncRound := rnd + basics.Round(node.Config().MaxAcctLookback) return node.catchupService.SetDisableSyncRound(disableSyncRound) } // GetSyncRound retrieves the sync round, removes cache offset used during SetSyncRound -func (node *AlgorandFollowerNode) GetSyncRound() uint64 { - return basics.SubSaturate(node.catchupService.GetDisableSyncRound(), node.Config().MaxAcctLookback) +func (node *AlgorandFollowerNode) GetSyncRound() basics.Round { + return basics.SubSaturate(node.catchupService.GetDisableSyncRound(), basics.Round(node.Config().MaxAcctLookback)) } // UnsetSyncRound removes the sync round constraint on the catchup service diff --git a/node/follower_node_test.go b/node/follower_node_test.go index 587fb0505d..6c4e357ea3 100644 --- a/node/follower_node_test.go +++ b/node/follower_node_test.go @@ -93,7 +93,7 @@ func TestSyncRound(t *testing.T) { b.CurrentProtocol = protocol.ConsensusCurrentVersion err := node.Ledger().AddBlock(b, agreement.Certificate{}) require.NoError(t, err) - dbRound := uint64(node.Ledger().LatestTrackerCommitted()) + dbRound := node.Ledger().LatestTrackerCommitted() // Sync Round should be initialized to the ledger's dbRound + 1 require.Equal(t, dbRound+1, node.GetSyncRound()) // Set a new sync round @@ -102,7 +102,7 @@ func TestSyncRound(t *testing.T) { require.Equal(t, dbRound+11, node.GetSyncRound()) // Unset the sync round and make sure get returns 0 node.UnsetSyncRound() - require.Equal(t, uint64(0), node.GetSyncRound()) + require.Zero(t, node.GetSyncRound()) } func TestErrors(t *testing.T) { @@ -156,7 +156,7 @@ func TestFastCatchupResume(t *testing.T) { node.ctx = context.Background() // Initialize sync round to a future round. - syncRound := uint64(10000) + syncRound := basics.Round(10000) node.SetSyncRound(syncRound) require.Equal(t, syncRound, node.GetSyncRound()) @@ -165,7 +165,7 @@ func TestFastCatchupResume(t *testing.T) { <-out // Verify the sync was reset. - assert.Equal(t, uint64(0), node.GetSyncRound()) + assert.Zero(t, node.GetSyncRound()) } // TestDefaultResourcePaths confirms that when no extra configuration is provided, all resources are created in the dataDir diff --git a/node/netprio.go b/node/netprio.go index 5c05724b0f..65193673a9 100644 --- a/node/netprio.go +++ b/node/netprio.go @@ -134,7 +134,7 @@ func (node *AlgorandFullNode) VerifyPrioResponse(challenge string, response []by return } - ephID := basics.OneTimeIDForRound(rs.Round, data.KeyDilution(proto)) + ephID := basics.OneTimeIDForRound(rs.Round, proto.EffectiveKeyDilution(data.VoteKeyDilution)) if !data.VoteID.Verify(ephID, rs.Response, rs.Sig) { err = fmt.Errorf("signature verification failure") return diff --git a/node/node.go b/node/node.go index 15b2bea5e5..0782f595cf 100644 --- a/node/node.go +++ b/node/node.go @@ -29,6 +29,7 @@ import ( "time" "github.com/algorand/go-deadlock" + "github.com/labstack/gommon/log" "github.com/algorand/go-algorand/agreement" "github.com/algorand/go-algorand/agreement/gossip" @@ -95,9 +96,9 @@ type StatusReport struct { CatchpointCatchupAcquiredBlocks uint64 UpgradePropose protocol.ConsensusVersion UpgradeApprove bool - UpgradeDelay uint64 + UpgradeDelay basics.Round NextProtocolVoteBefore basics.Round - NextProtocolApprovals uint64 + NextProtocolApprovals basics.Round } // TimeSinceLastRound returns the time since the last block was approved (locally), or 0 if no blocks seen @@ -152,6 +153,9 @@ type AlgorandFullNode struct { oldKeyDeletionNotify chan struct{} monitoringRoutinesWaitGroup sync.WaitGroup + hybridError string // whether the MakeFull switched to non-hybrid mode due to a P2PHybridConfigError and needs to be logged periodically + hybridErrorRoutineWaitGroup sync.WaitGroup + tracer messagetracer.MessageTracer stateProofWorker *stateproof.Worker @@ -202,21 +206,40 @@ func MakeFull(log logging.Logger, rootDir string, cfg config.Local, phonebookAdd // tie network, block fetcher, and agreement services together var p2pNode network.GossipNode + var genesisInfo = network.GenesisInfo{ + GenesisID: genesis.ID(), + NetworkID: genesis.Network, + } +recreateNetwork: if cfg.EnableP2PHybridMode { - p2pNode, err = network.NewHybridP2PNetwork(node.log, node.config, rootDir, phonebookAddresses, genesis.ID(), genesis.Network, node) + p2pNode, err = network.NewHybridP2PNetwork(node.log, node.config, rootDir, phonebookAddresses, genesisInfo, node, nil) if err != nil { - log.Errorf("could not create hybrid p2p node: %v", err) - return nil, err + if _, ok := err.(config.P2PHybridConfigError); !ok { + log.Errorf("could not create hybrid p2p node: %v", err) + return nil, err + } + // it was P2PHybridConfigError error so fallback to non-hybrid mode (either P2P or WS) + cfg.EnableP2PHybridMode = false + + // indicate we need to start logging the error into the log periodically + fallbackNetName := "WS" + if cfg.EnableP2P { + fallbackNetName = "P2P" + } + node.hybridError = fmt.Sprintf("could not create hybrid p2p node: %v. Falling back to %s network", err, fallbackNetName) + log.Error(node.hybridError) + + goto recreateNetwork } } else if cfg.EnableP2P { - p2pNode, err = network.NewP2PNetwork(node.log, node.config, rootDir, phonebookAddresses, genesis.ID(), genesis.Network, node, nil) + p2pNode, err = network.NewP2PNetwork(node.log, node.config, rootDir, phonebookAddresses, genesisInfo, node, nil, nil) if err != nil { log.Errorf("could not create p2p node: %v", err) return nil, err } } else { var wsNode *network.WebsocketNetwork - wsNode, err = network.NewWebsocketNetwork(node.log, node.config, phonebookAddresses, genesis.ID(), genesis.Network, node, nil) + wsNode, err = network.NewWebsocketNetwork(node.log, node.config, phonebookAddresses, genesisInfo, node, nil, nil) if err != nil { log.Errorf("could not create websocket node: %v", err) return nil, err @@ -372,6 +395,24 @@ func (node *AlgorandFullNode) Start() error { return nil } + if node.hybridError != "" { + node.hybridErrorRoutineWaitGroup.Add(1) + go func() { + defer node.hybridErrorRoutineWaitGroup.Done() + ticker := time.NewTicker(6 * time.Hour) + defer ticker.Stop() + for { + select { + case <-node.ctx.Done(): + return + case <-ticker.C: + // continue logging the error periodically + log.Errorf(node.hybridError) + } + } + }() + } + if node.catchpointCatchupService != nil { startNetwork() node.catchpointCatchupService.Start(node.ctx) @@ -447,6 +488,7 @@ func (node *AlgorandFullNode) Stop() { defer func() { node.mu.Unlock() node.waitMonitoringRoutines() + node.hybridErrorRoutineWaitGroup.Wait() // oldKeyDeletionThread uses accountManager registry so must be stopped before accountManager is closed node.accountManager.Registry().Close() @@ -788,7 +830,7 @@ func latestBlockStatus(ledger *data.Ledger, catchupService *catchup.Service) (s s.UpgradePropose = b.UpgradeVote.UpgradePropose s.UpgradeApprove = b.UpgradeApprove - s.UpgradeDelay = uint64(b.UpgradeVote.UpgradeDelay) + s.UpgradeDelay = b.UpgradeVote.UpgradeDelay s.NextProtocolVoteBefore = b.NextProtocolVoteBefore s.NextProtocolApprovals = b.UpgradeState.NextProtocolApprovals @@ -1448,12 +1490,12 @@ func (node *AlgorandFullNode) IsParticipating() bool { } // SetSyncRound no-ops -func (node *AlgorandFullNode) SetSyncRound(_ uint64) error { +func (node *AlgorandFullNode) SetSyncRound(_ basics.Round) error { return nil } // GetSyncRound returns 0 (not set) in the base node implementation -func (node *AlgorandFullNode) GetSyncRound() uint64 { +func (node *AlgorandFullNode) GetSyncRound() basics.Round { return 0 } diff --git a/node/node_test.go b/node/node_test.go index b39c114a5d..93e0d10428 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -17,6 +17,7 @@ package node import ( + "bytes" "fmt" "math/rand" "os" @@ -32,6 +33,7 @@ import ( "github.com/algorand/go-algorand/agreement" "github.com/algorand/go-algorand/config" + "github.com/algorand/go-algorand/config/bounds" "github.com/algorand/go-algorand/crypto" csp "github.com/algorand/go-algorand/crypto/stateproof" "github.com/algorand/go-algorand/data/account" @@ -822,7 +824,7 @@ func TestMaxSizesCorrect(t *testing.T) { // the logicsig size is *also* an overestimate, because it thinks that the logicsig and // the logicsig args can both be up to to MaxLogicSigMaxSize, but that's the max for // them combined, so it double counts and we have to subtract one. - maxCombinedTxnSize -= uint64(config.MaxLogicSigMaxSize) + maxCombinedTxnSize -= uint64(bounds.MaxLogicSigMaxSize) // maxCombinedTxnSize is still an overestimate because it assumes all txn // type fields can be in the same txn. That's not true, but it provides an @@ -1350,3 +1352,36 @@ func TestNodeP2P_NetProtoVersions(t *testing.T) { } } } + +func TestNodeMakeFullHybrid(t *testing.T) { + partitiontest.PartitionTest(t) + + testDirectory := t.TempDir() + + genesis := bookkeeping.Genesis{ + SchemaID: "go-test-node-genesis", + Proto: protocol.ConsensusCurrentVersion, + Network: config.Devtestnet, + FeeSink: sinkAddr.String(), + RewardsPool: poolAddr.String(), + } + + var buf bytes.Buffer + log := logging.NewLogger() + log.SetOutput(&buf) + + cfg := config.GetDefaultLocal() + cfg.EnableP2PHybridMode = true + cfg.NetAddress = ":0" + + node, err := MakeFull(log, testDirectory, cfg, []string{}, genesis) + require.NoError(t, err) + err = node.Start() + require.NoError(t, err) + require.IsType(t, &network.WebsocketNetwork{}, node.net) + + node.Stop() + messages := buf.String() + require.Contains(t, messages, "could not create hybrid p2p node: P2PHybridMode requires both NetAddress") + require.Contains(t, messages, "Falling back to WS network") +} diff --git a/nodecontrol/NodeController.go b/nodecontrol/NodeController.go index 504e1714b6..30179a3d39 100644 --- a/nodecontrol/NodeController.go +++ b/nodecontrol/NodeController.go @@ -126,7 +126,7 @@ func killPID(pid int) (killed bool, err error) { // Send null signal - if process still exists, it'll return nil // So when we get an error, assume it's gone. if err = process.Signal(syscall.Signal(0)); err != nil { - return false, nil + return false, nil //nolint:nilerr // intentional } select { case <-waitLong: diff --git a/package-deploy.yaml b/package-deploy.yaml index b4da6c2eec..d5c95b2e25 100644 --- a/package-deploy.yaml +++ b/package-deploy.yaml @@ -45,8 +45,8 @@ agents: workDir: $HOME/projects/go-algorand - name: rpm - dockerFilePath: docker/build/cicd.centos9.Dockerfile - image: algorand/go-algorand-ci-linux-centos9 + dockerFilePath: docker/build/cicd.centos10.Dockerfile + image: algorand/go-algorand-ci-linux-centos10 version: scripts/configure_dev-deps.sh buildArgs: - GOLANG_VERSION=`./scripts/get_golang_version.sh` diff --git a/package-test.yaml b/package-test.yaml index cd526dda68..8ae35b7f4b 100644 --- a/package-test.yaml +++ b/package-test.yaml @@ -16,8 +16,8 @@ agents: workDir: $HOME/projects/go-algorand - name: rpm - dockerFilePath: docker/build/cicd.centos9.Dockerfile - image: algorand/mule-linux-centos9 + dockerFilePath: docker/build/cicd.centos10.Dockerfile + image: algorand/mule-linux-centos10 version: scripts/configure_dev-deps.sh buildArgs: - GOLANG_VERSION=`./scripts/get_golang_version.sh` diff --git a/package.yaml b/package.yaml index 0cc1d588eb..41b5a9f371 100644 --- a/package.yaml +++ b/package.yaml @@ -11,8 +11,8 @@ agents: workDir: $HOME/projects/go-algorand - name: rpm - dockerFilePath: docker/build/cicd.centos9.Dockerfile - image: algorand/go-algorand-ci-linux-centos9 + dockerFilePath: docker/build/cicd.centos10.Dockerfile + image: algorand/go-algorand-ci-linux-centos10 version: scripts/configure_dev-deps.sh buildArgs: - GOLANG_VERSION=`./scripts/get_golang_version.sh` diff --git a/protocol/codec_test.go b/protocol/codec_test.go index f8503b4fb1..1971a79f9d 100644 --- a/protocol/codec_test.go +++ b/protocol/codec_test.go @@ -161,7 +161,7 @@ func TestEncodeJSON(t *testing.T) { type ar []string type mp struct { - Map map[int]ar `codec:"ld,allocbound=config.MaxEvalDeltaAccounts"` + Map map[int]ar `codec:"ld,allocbound=bounds.MaxEvalDeltaAccounts"` } var v mp diff --git a/protocol/consensus.go b/protocol/consensus.go index a6d0190f47..e8bcfe1161 100644 --- a/protocol/consensus.go +++ b/protocol/consensus.go @@ -23,13 +23,9 @@ import ( // ConsensusVersion is a string that identifies a version of the // consensus protocol. // -//msgp:allocbound ConsensusVersion maxConsensusVersionLen +//msgp:allocbound ConsensusVersion bounds.MaxConsensusVersionLen type ConsensusVersion string -// maxConsensusVersionLen is used for generating MaxSize functions on types that contain ConsensusVersion -// as it's member. 128 is slightly larger than the existing URL length of consensus version URL+hash=89 -const maxConsensusVersionLen = 128 - // DEPRECATEDConsensusV0 is a baseline version of the Algorand consensus protocol. // at the time versioning was introduced. // It is now deprecated. diff --git a/protocol/msgp_gen.go b/protocol/msgp_gen.go index d2c7c1a2e1..76c2639896 100644 --- a/protocol/msgp_gen.go +++ b/protocol/msgp_gen.go @@ -4,6 +4,8 @@ package protocol import ( "github.com/algorand/msgp/msgp" + + "github.com/algorand/go-algorand/config/bounds" ) // The following msgp objects are implemented in this file: @@ -108,8 +110,8 @@ func (z *ConsensusVersion) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalSt err = msgp.WrapError(err) return } - if zb0002 > maxConsensusVersionLen { - err = msgp.ErrOverflow(uint64(zb0002), uint64(maxConsensusVersionLen)) + if zb0002 > bounds.MaxConsensusVersionLen { + err = msgp.ErrOverflow(uint64(zb0002), uint64(bounds.MaxConsensusVersionLen)) return } zb0001, bts, err = msgp.ReadStringBytes(bts) @@ -144,7 +146,7 @@ func (z ConsensusVersion) MsgIsZero() bool { // MaxSize returns a maximum valid message size for this message type func ConsensusVersionMaxSize() (s int) { - s = msgp.StringPrefixSize + maxConsensusVersionLen + s = msgp.StringPrefixSize + bounds.MaxConsensusVersionLen return } diff --git a/protocol/tags.go b/protocol/tags.go index d187fec1a2..ab905d2acc 100644 --- a/protocol/tags.go +++ b/protocol/tags.go @@ -67,7 +67,7 @@ const NetIDVerificationTagMaxSize = 215 // ProposalPayloadTagMaxSize is the maximum size of a ProposalPayloadTag message // This value is dominated by the MaxTxnBytesPerBlock -const ProposalPayloadTagMaxSize = 5250313 +const ProposalPayloadTagMaxSize = 5250594 // StateProofSigTagMaxSize is the maximum size of a StateProofSigTag message const StateProofSigTagMaxSize = 6378 diff --git a/rpcs/ledgerService.go b/rpcs/ledgerService.go index a51a9fcc3d..60459d9f98 100644 --- a/rpcs/ledgerService.go +++ b/rpcs/ledgerService.go @@ -225,9 +225,9 @@ func (ls *LedgerService) ServeHTTP(response http.ResponseWriter, request *http.R requestedCompressedResponse := strings.Contains(request.Header.Get("Accept-Encoding"), "gzip") if requestedCompressedResponse { response.Header().Set("Content-Encoding", "gzip") - written, err := io.Copy(response, cs) - if err != nil { - logging.Base().Infof("LedgerService.ServeHTTP : unable to write compressed catchpoint file for round %d, written bytes %d : %v", round, written, err) + written, err1 := io.Copy(response, cs) + if err1 != nil { + logging.Base().Infof("LedgerService.ServeHTTP : unable to write compressed catchpoint file for round %d, written bytes %d : %v", round, written, err1) } elapsed := time.Since(start) logging.Base().Infof("LedgerService.ServeHTTP: served catchpoint round %d in %d sec", round, int(elapsed.Seconds())) diff --git a/scripts/build_package.sh b/scripts/build_package.sh index 7b538329ff..d9b3c9b18a 100755 --- a/scripts/build_package.sh +++ b/scripts/build_package.sh @@ -37,8 +37,22 @@ if [[ "${UNAME}" == *"MINGW"* ]]; then GOPATH1=$HOME/go else export GOPATH=$(go env GOPATH) + GOPATH1=${GOPATH%%:*} fi -export GOPATHBIN=${GOPATH%%:*}/bin + +# Setup GOBIN +# If env var GOBIN is set, use it +# Otherwise, use go env GOBIN if set +# Otherwise default to $GOPATH1/bin + +if [[ -n "${GOBIN}" ]]; then + export GOBIN=${GOBIN} +elif [[ -n "$(go env GOBIN)" ]]; then + export GOBIN=$(go env GOBIN) +else + export GOBIN=${GOPATH1}/bin +fi + REPO_DIR=$(pwd) echo "Building package for '${OS} - ${ARCH}'" @@ -63,9 +77,9 @@ DEFAULT_RELEASE_NETWORK=$(./scripts/compute_branch_release_network.sh "${DEFAULT mkdir ${PKG_ROOT}/bin # If you modify this list, also update this list in ./cmd/updater/update.sh backup_binaries() -bin_files=("algocfg" "algod" "algoh" "algokey" "carpenter" "catchupsrv" "ddconfig.sh" "diagcfg" "find-nodes.sh" "goal" "kmd" "msgpacktool" "node_exporter" "tealcut" "tealdbg" "update.sh" "updater" "COPYING") +bin_files=("algocfg" "algotmpl" "algod" "algoh" "algokey" "carpenter" "catchupsrv" "ddconfig.sh" "diagcfg" "find-nodes.sh" "goal" "kmd" "msgpacktool" "node_exporter" "tealcut" "tealdbg" "update.sh" "updater" "COPYING") for bin in "${bin_files[@]}"; do - cp ${GOPATHBIN}/${bin} ${PKG_ROOT}/bin + cp ${GOBIN}/${bin} ${PKG_ROOT}/bin if [ $? -ne 0 ]; then exit 1; fi done @@ -109,7 +123,7 @@ echo "Staging tools package files" bin_files=("algons" "coroner" "dispenser" "netgoal" "nodecfg" "pingpong" "cc_service" "cc_agent" "cc_client" "loadgenerator" "COPYING" "dsign" "catchpointdump" "block-generator") mkdir -p ${TOOLS_ROOT} for bin in "${bin_files[@]}"; do - cp ${GOPATHBIN}/${bin} ${TOOLS_ROOT} + cp ${GOBIN}/${bin} ${TOOLS_ROOT} if [ $? -ne 0 ]; then exit 1; fi done @@ -118,7 +132,7 @@ TEST_UTILS_ROOT=${PKG_ROOT}/test-utils bin_files=("algotmpl" "COPYING") mkdir -p ${TEST_UTILS_ROOT} for bin in "${bin_files[@]}"; do - cp ${GOPATHBIN}/${bin} ${TEST_UTILS_ROOT} + cp ${GOBIN}/${bin} ${TEST_UTILS_ROOT} if [ $? -ne 0 ]; then exit 1; fi done @@ -126,4 +140,4 @@ cp "scripts/sysctl.sh" ${TOOLS_ROOT} if [ $? -ne 0 ]; then exit 1; fi cp "scripts/sysctl-all.sh" ${TOOLS_ROOT} -if [ $? -ne 0 ]; then exit 1; fi +if [ $? -ne 0 ]; then exit 1; fi \ No newline at end of file diff --git a/scripts/buildtools/install_buildtools.sh b/scripts/buildtools/install_buildtools.sh index 7a004851a1..75e74fd8f8 100755 --- a/scripts/buildtools/install_buildtools.sh +++ b/scripts/buildtools/install_buildtools.sh @@ -91,5 +91,4 @@ install_go_module golang.org/x/tools golang.org/x/tools/cmd/stringer install_go_module github.com/go-swagger/go-swagger github.com/go-swagger/go-swagger/cmd/swagger install_go_module github.com/algorand/msgp install_go_module gotest.tools/gotestsum -install_go_module github.com/algorand/oapi-codegen github.com/algorand/oapi-codegen/cmd/oapi-codegen install_go_module github.com/golangci/golangci-lint/cmd/golangci-lint diff --git a/scripts/buildtools/versions b/scripts/buildtools/versions index e34ad22500..330cd8bf90 100644 --- a/scripts/buildtools/versions +++ b/scripts/buildtools/versions @@ -1,7 +1,6 @@ golang.org/x/lint v0.0.0-20241112194109-818c5a804067 golang.org/x/tools v0.27.0 github.com/algorand/msgp v1.1.60 -github.com/algorand/oapi-codegen v1.12.0-algorand.0 github.com/go-swagger/go-swagger v0.31.0 gotest.tools/gotestsum v1.12.0 github.com/golangci/golangci-lint/cmd/golangci-lint v1.62.0 diff --git a/scripts/check_deps.sh b/scripts/check_deps.sh index 4022bb544b..229e4edf4a 100755 --- a/scripts/check_deps.sh +++ b/scripts/check_deps.sh @@ -37,7 +37,6 @@ missing_dep() { GO_DEPS=( "msgp" "golangci-lint" - "oapi-codegen" "swagger" ) diff --git a/scripts/configure_dev.sh b/scripts/configure_dev.sh index 242c89e3d6..55d9f59198 100755 --- a/scripts/configure_dev.sh +++ b/scripts/configure_dev.sh @@ -73,7 +73,7 @@ function install_windows_shellcheck() { if [ "${OS}" = "linux" ]; then if ! which sudo >/dev/null; then - "$SCRIPTPATH/install_linux_deps.sh" + DEBIAN_FRONTEND="$DEBIAN_FRONTEND" "$SCRIPTPATH/install_linux_deps.sh" else sudo "$SCRIPTPATH/install_linux_deps.sh" fi diff --git a/scripts/export_sdk_types.py b/scripts/export_sdk_types.py index 9a9efc1954..a95d742ada 100755 --- a/scripts/export_sdk_types.py +++ b/scripts/export_sdk_types.py @@ -53,7 +53,7 @@ def replace_between(filename, content, start_pattern, stop_pattern=None): start_idx = original.find(start_pattern) if start_idx == -1: - raise ValueError("Start pattern not found") + raise ValueError(f"Start pattern '{start_pattern}' not found in {filename}") start_idx += len(start_pattern) stop_idx = len(original) @@ -67,17 +67,42 @@ def replace_between(filename, content, start_pattern, stop_pattern=None): with open(filename, 'w', encoding='utf-8') as f: f.write(updated) +def find_line(filename, s): + """ + Returns the line from `filename` that contains `s` + Args: + filename (str): Path to the file to modify. + s (str): Name of the substring to look for + """ + with open(filename, 'r', encoding='utf-8') as f: + original = f.read() + + start_idx = original.find(s) + if start_idx == -1: + return "" + stop_idx = original.find("\n", start_idx) + + return original[start_idx:stop_idx] SDK="../go-algorand-sdk/" def sdkize(input): # allocbounds are not used by the SDK. It's confusing to leave them in. - input = re.sub(",allocbound=.*\"", '"', input) + input = re.sub(",(allocbound|maxtotalbytes)=.*\"", '"', input) + input = re.sub("^\\s*//msgp:(allocbound|sort|ignore).*\n", '', input, flags=re.MULTILINE) + + # protocol.ConsensusVersion and protocolConsensusVxx constants are + # the only things that stays in the protocol package. So we "hide" + # them from the replacements below, then switch it back + input = input.replace("protocol.ConsensusV", "protocolConsensusV") + input = input.replace("protocol.ConsensusFuture", "protocolConsensusFuture") # All types are in the same package in the SDK - input = input.replace("basics.", "") - input = input.replace("crypto.", "") - input = input.replace("protocol.", "") + input = re.sub(r'(basics|crypto|committee|transactions|protocol)\.\b', r'', input) + + # and go back... + input = input.replace("protocolConsensusV", "protocol.ConsensusV") + input = input.replace("protocolConsensusFuture", "protocol.ConsensusFuture") # keyreg input = input.replace("OneTimeSignatureVerifier", "VotePK") @@ -89,44 +114,99 @@ def sdkize(input): # transaction - for some reason, ApplicationCallTxnFields is wrapped in this nothing-burger input = input.replace("ApplicationCallTxnFields", "ApplicationFields") + # These are "string" in the SDK, even though we actually have + # `protocol.ConsensusVersion` available. Who knows? + for field in ["UpgradePropose", "CurrentProtocol", "NextProtocol"]: + input = re.sub(field+"\\s+protocol.ConsensusVersion", field+" string", input) + return input -def export(src, dst, start, stop): +def export(src, dst, start, stop=None): x = extract_between(src, start, stop) x = sdkize(x) replace_between(SDK+dst, x, start, stop) subprocess.run(["gofmt", "-w", SDK+dst]) +def export_type(name, src, dst): + export_thing("type {thing} ", name, src, dst) + +def export_var(name, src, dst): + export_thing("var {thing} ", name, src, dst) + +def export_func(name, src, dst): + export_thing("func {thing}(", name, src, dst) + +def export_thing(pattern, name, src, dst): + start = pattern.format(thing=name) + line = find_line(src, start) + if line == "": + raise ValueError(f"Unable to find {name} in {src}") + stop = "\n}\n" if line.endswith("{") else "\n" + x = extract_between(src, start, stop) + x = sdkize(x) + if dst.endswith(".go"): # explicit dst + dst = f"{SDK}{dst}" + else: + dst = f"{SDK}types/{dst}.go" + replace_between(dst, x, start, stop) + subprocess.run(["gofmt", "-w", dst]) if __name__ == "__main__": - # Replace the entire file, starting with "type ConsensusParams" - consensus = extract_between("config/consensus.go", "type ConsensusParams") - replace_between(SDK+"protocol/config/consensus.go", consensus, "type ConsensusParams") - - # Common tranbsaction types - export("data/transactions/transaction.go", "types/transaction.go", - "type Header ", "\n}") - export("data/transactions/transaction.go", "types/transaction.go", - "type Transaction ", "\n}") - export("data/transactions/signedtxn.go", "types/transaction.go", - "type SignedTxn ", "\n}") - - # The transaction types - export("data/transactions/payment.go", "types/transaction.go", - "type PaymentTxnFields ", "\n}") - export("data/transactions/keyreg.go", "types/transaction.go", - "type KeyregTxnFields ", "\n}") - - export("data/transactions/asset.go", "types/transaction.go", - "type AssetConfigTxnFields ", "\n}") - export("data/transactions/asset.go", "types/transaction.go", - "type AssetTransferTxnFields ", "\n}") - export("data/transactions/asset.go", "types/transaction.go", - "type AssetFreezeTxnFields ", "\n}") - - export("data/transactions/application.go", "types/applications.go", - "type ApplicationCallTxnFields ", "\n}") + # Replace the entire file, after "import" (basically just relicense it) + export("protocol/consensus.go", "protocol/consensus.go", "import") + + src = "config/consensus.go" + dst = "protocol/config/consensus.go" + export_type("ConsensusParams", src, dst) + export_type("ProposerPayoutRules", src, dst) + export_type("BonusPlan", src, dst) + export_type("PaysetCommitType", src, dst) + export_type("ConsensusProtocols", src, dst) + export_var("Consensus", src, dst) + export_func("initConsensusProtocols", src, dst) + export_type("Global", src, dst) + export_var("Protocol", src, dst) + # do _not_ export init(), since go-algorand sets bounds, SDK does not + + # Common transaction types + export_type("Header", "data/transactions/transaction.go", "transaction") + export_type("Transaction", "data/transactions/transaction.go", "transaction") + export_type("SignedTxn", "data/transactions/signedtxn.go", "transaction") + + # The transaction types themselves + # payment + export_type("PaymentTxnFields", "data/transactions/payment.go", "transaction") + # keyreg + export_type("KeyregTxnFields", "data/transactions/keyreg.go", "transaction") + # assets + export_type("AssetConfigTxnFields", "data/transactions/asset.go", "transaction") + export_type("AssetTransferTxnFields", "data/transactions/asset.go", "transaction") + export_type("AssetFreezeTxnFields", "data/transactions/asset.go", "transaction") + export_type("AssetIndex", "data/basics/userBalance.go", "asset") + export_type("AssetParams", "data/basics/userBalance.go", "asset") + # apps + export_type("ApplicationCallTxnFields", "data/transactions/application.go", "applications") + export_type("AppIndex", "data/basics/userBalance.go", "applications") + + # Block + export_type("BlockHeader", "data/bookkeeping/block.go", "block") + export_type("TxnCommitments", "data/bookkeeping/block.go", "block") + export_type("ParticipationUpdates", "data/bookkeeping/block.go", "block") + export_type("RewardsState", "data/bookkeeping/block.go", "block") + export_type("UpgradeVote", "data/bookkeeping/block.go", "block") + export_type("UpgradeState", "data/bookkeeping/block.go", "block") + export_type("StateProofTrackingData", "data/bookkeeping/block.go", "block") + export_type("Block", "data/bookkeeping/block.go", "block") + export_type("Payset", "data/transactions/payset.go", "block") + export_type("SignedTxnInBlock", "data/transactions/signedtxn.go", "block") + export_type("SignedTxnWithAD", "data/transactions/signedtxn.go", "block") + export_type("ApplyData", "data/transactions/transaction.go", "block") + export_type("EvalDelta", "data/transactions/teal.go", "block") + export_type("StateDelta", "data/basics/teal.go", "block") + export_type("ValueDelta", "data/basics/teal.go", "block") + export_type("DeltaAction", "data/basics/teal.go", "block") # StateDelta. Eventually need to deal with all types from ledgercore.StateDelta down - export("data/basics/userBalance.go", "types/statedelta.go", - "type AppParams ", "\n}") + export_type("AppParams", "data/basics/userBalance.go", "statedelta") + export_type("TealKeyValue", "data/basics/teal.go", "statedelta") + export_type("TealValue", "data/basics/teal.go", "statedelta") diff --git a/scripts/get_golang_version.sh b/scripts/get_golang_version.sh index f1d88bbe27..9b2692454d 100755 --- a/scripts/get_golang_version.sh +++ b/scripts/get_golang_version.sh @@ -11,7 +11,7 @@ # Our build task-runner `mule` will refer to this script and will automatically # build a new image whenever the version number has been changed. -BUILD=1.23.3 +BUILD=1.23.9 MIN=$(echo $BUILD | cut -d. -f1-2).0 if [ "$1" = all ] diff --git a/scripts/release/README.md b/scripts/release/README.md index 534bc70faa..5395195f8c 100644 --- a/scripts/release/README.md +++ b/scripts/release/README.md @@ -50,13 +50,13 @@ This section briefly describes the expected outcomes of the current build pipeli 1. build - 1. Build (compile) the binaries in a Centos 9 docker container that will then be used by both `deb` and `rpm` packaging. + 1. Build (compile) the binaries in a Centos 10 docker container that will then be used by both `deb` and `rpm` packaging. - 1. Docker containers will package `deb` and `rpm` artifacts inside of Ubuntu 20.04 and Centos 7 & 8, respectively. + 1. Docker containers will package `deb` and `rpm` artifacts inside of Ubuntu 24.04 and Centos 10, respectively. 1. Jenkins will then pause to wait for [the only manual part of the build/package/test phase], which is to forward the `gpg-agent` that establishes a direct between the local machine that contains the signing keys and the remote ec2 instance. - 1. Once the signatures have been verified, the all the build artificats (tarballs, `rpm` and `deb` packages, signatures) to an `s3` bucket. Included in the bucket are the build logs. + 1. Once the signatures have been verified, the all the build artifacts (tarballs, `rpm` and `deb` packages, signatures) to an `s3` bucket. Included in the bucket are the build logs. 1. test @@ -70,10 +70,11 @@ This section briefly describes the expected outcomes of the current build pipeli - The packages are built from the correct branch and channel and are the correct version. This done by running `algod -v`. + This is done for the following docker containers: - quay.io/centos/centos:stream9 - - fedora:39 - - fedora:40 - - ubuntu:20.04 + - quay.io/centos/centos:stream10 + - fedora:41 + - fedora:42 - ubuntu:22.04 + - ubuntu:24.04 - Creates a test network using `goal`. - et. al. diff --git a/scripts/release/build/stage/build/task.sh b/scripts/release/build/stage/build/task.sh index 944aeb7b34..42a8a94497 100755 --- a/scripts/release/build/stage/build/task.sh +++ b/scripts/release/build/stage/build/task.sh @@ -31,7 +31,7 @@ else fi # Run RPM build in Centos 9 Docker container -sg docker "docker build -t algocentosbuild - < $HOME/go/src/github.com/algorand/go-algorand/scripts/release/common/docker/centos9.Dockerfile" +sg docker "docker build -t algocentosbuild - < $HOME/go/src/github.com/algorand/go-algorand/scripts/release/common/docker/centos10.Dockerfile" sg docker "docker run --rm --env-file ${HOME}/build_env_docker --mount type=bind,src=${HOME},dst=/root/subhome algocentosbuild /root/subhome/go/src/github.com/algorand/go-algorand/scripts/release/build/rpm/build.sh" echo diff --git a/scripts/release/common/docker/centos9.Dockerfile b/scripts/release/common/docker/centos10.Dockerfile similarity index 81% rename from scripts/release/common/docker/centos9.Dockerfile rename to scripts/release/common/docker/centos10.Dockerfile index 1151201edb..7c50ec9368 100644 --- a/scripts/release/common/docker/centos9.Dockerfile +++ b/scripts/release/common/docker/centos10.Dockerfile @@ -1,7 +1,7 @@ -FROM quay.io/centos/centos:stream9 +FROM quay.io/centos/centos:stream10 WORKDIR /root -RUN dnf install -y epel-release epel-next-release && dnf config-manager --set-enabled crb && \ +RUN dnf install -y epel-release && dnf config-manager --set-enabled crb && \ dnf update -y && \ dnf install -y autoconf awscli curl git gnupg2 nfs-utils python36 expect jq libtool gcc-c++ libstdc++-devel rpmdevtools createrepo rpm-sign bzip2 which && \ dnf -y --enablerepo=powertools install libstdc++-static diff --git a/scripts/release/common/docker/setup.Dockerfile b/scripts/release/common/docker/setup.Dockerfile index 7d2988ca9a..05262b186f 100644 --- a/scripts/release/common/docker/setup.Dockerfile +++ b/scripts/release/common/docker/setup.Dockerfile @@ -9,7 +9,7 @@ # https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=869194 # https://github.com/boto/s3transfer/pull/102 -FROM ubuntu:20.04 +FROM ubuntu:24.04 ENV DEBIAN_FRONTEND=noninteractive RUN apt-get update && apt-get install -y jq git python python-pip python3-boto3 ssh && \ pip install awscli diff --git a/scripts/release/common/setup.sh b/scripts/release/common/setup.sh index 6124e0b95f..07dc4ce1ad 100755 --- a/scripts/release/common/setup.sh +++ b/scripts/release/common/setup.sh @@ -104,8 +104,8 @@ else fi sudo usermod -a -G docker ubuntu -sg docker "docker pull quay.io/centos/centos:stream9" -sg docker "docker pull ubuntu:22.04" +sg docker "docker pull quay.io/centos/centos:stream10" +sg docker "docker pull ubuntu:24.04" cat << EOF >> "${HOME}/.bashrc" export EDITOR=vi diff --git a/scripts/release/mule/common/ensure_centos9_image.sh b/scripts/release/mule/common/ensure_centos10_image.sh similarity index 70% rename from scripts/release/mule/common/ensure_centos9_image.sh rename to scripts/release/mule/common/ensure_centos10_image.sh index bb03624c1b..963726f5c6 100755 --- a/scripts/release/mule/common/ensure_centos9_image.sh +++ b/scripts/release/mule/common/ensure_centos10_image.sh @@ -4,7 +4,7 @@ set -exo pipefail # Ensure the centos docker image is built and available -DOCKER_IMAGE="algorand/go-algorand-ci-linux-centos9:amd64-$(sha1sum scripts/configure_dev-deps.sh | cut -f1 -d' ')" +DOCKER_IMAGE="algorand/go-algorand-ci-linux-centos10:amd64-$(sha1sum scripts/configure_dev-deps.sh | cut -f1 -d' ')" MATCH=${DOCKER_IMAGE/:*/} echo "Checking for RPM image" @@ -13,5 +13,5 @@ if docker images $DOCKER_IMAGE | grep -qs $MATCH > /dev/null 2>&1; then else echo "RPM image doesn't exist, building" docker build --platform=linux/amd64 --build-arg ARCH=amd64 \ - --build-arg GOLANG_VERSION=$(./scripts/get_golang_version.sh) -t $DOCKER_IMAGE -f docker/build/cicd.centos9.Dockerfile . + --build-arg GOLANG_VERSION=$(./scripts/get_golang_version.sh) -t $DOCKER_IMAGE -f docker/build/cicd.centos10.Dockerfile . fi diff --git a/scripts/release/prod/rpm/run_centos.sh b/scripts/release/prod/rpm/run_centos.sh index abb2b73f9e..f0973c4dd7 100755 --- a/scripts/release/prod/rpm/run_centos.sh +++ b/scripts/release/prod/rpm/run_centos.sh @@ -5,8 +5,8 @@ set -ex . "${HOME}"/build_env -# Run RPM build in Centos 9 Docker container -sg docker "docker build -t algocentosbuild - < ${HOME}/go/src/github.com/algorand/go-algorand/scripts/release/common/docker/centos9.Dockerfile" +# Run RPM build in Centos 10 Docker container +sg docker "docker build -t algocentosbuild - < ${HOME}/go/src/github.com/algorand/go-algorand/scripts/release/common/docker/centos10.Dockerfile" sg docker "docker run --rm --env-file ${HOME}/build_env_docker --mount type=bind,src=/run/user/1000/gnupg/S.gpg-agent,dst=/root/S.gpg-agent --mount type=bind,src=${HOME}/prodrepo,dst=/root/prodrepo --mount type=bind,src=${HOME}/keys,dst=/root/keys --mount type=bind,src=${HOME},dst=/root/subhome algocentosbuild /root/subhome/go/src/github.com/algorand/go-algorand/scripts/release/prod/rpm/snapshot.sh" diff --git a/scripts/release/test/rpm/run_centos.sh b/scripts/release/test/rpm/run_centos.sh index d206a085d7..baa7b9421a 100755 --- a/scripts/release/test/rpm/run_centos.sh +++ b/scripts/release/test/rpm/run_centos.sh @@ -15,7 +15,7 @@ if [ "$CHANNEL" = beta ]; then fi # Run RPM build in Centos 9 Docker container -sg docker "docker build -t algocentosbuild - < ${HOME}/go/src/github.com/algorand/go-algorand/scripts/release/common/docker/centos9.Dockerfile" +sg docker "docker build -t algocentosbuild - < ${HOME}/go/src/github.com/algorand/go-algorand/scripts/release/common/docker/centos10.Dockerfile" cat <"${HOME}"/dummyrepo/algodummy.repo [algodummy] diff --git a/scripts/release/test/util/test_package.sh b/scripts/release/test/util/test_package.sh index c8dd206c8c..8052477c78 100755 --- a/scripts/release/test/util/test_package.sh +++ b/scripts/release/test/util/test_package.sh @@ -9,10 +9,11 @@ set -ex OS_LIST=( quay.io/centos/centos:stream9 - fedora:39 - fedora:40 - ubuntu:20.04 + quay.io/centos/centos:stream10 + fedora:41 + fedora:42 ubuntu:22.04 + ubuntu:24.04 ) FAILED=() diff --git a/scripts/travis/configure_dev.sh b/scripts/travis/configure_dev.sh index 82cc9c25da..e06c36917b 100755 --- a/scripts/travis/configure_dev.sh +++ b/scripts/travis/configure_dev.sh @@ -6,7 +6,7 @@ set +e SCRIPTPATH="$( cd "$(dirname "$0")" ; pwd -P )" OS=$("${SCRIPTPATH}/../ostype.sh") ARCH=$("${SCRIPTPATH}/../archtype.sh") - +export DEBIAN_FRONTEND="noninteractive" if [[ "${OS}" == "linux" ]]; then if [[ "${ARCH}" == "arm64" ]]; then set -e diff --git a/scripts/travis/deploy_packages.sh b/scripts/travis/deploy_packages.sh index 8eae9307b4..1f188721b7 100755 --- a/scripts/travis/deploy_packages.sh +++ b/scripts/travis/deploy_packages.sh @@ -29,6 +29,8 @@ if [ "${NIGHTLY_BUILD}" == "true" ]; then if [ "${OSARCH}" == "darwin/arm64" ]; then ./scripts/travis/build.sh --make_universal OSARCH="darwin/universal" + else + scripts/travis/build.sh --make_debug fi NO_BUILD=true fi diff --git a/shared/pingpong/accounts.go b/shared/pingpong/accounts.go index f011a2679d..8af1f9bff7 100644 --- a/shared/pingpong/accounts.go +++ b/shared/pingpong/accounts.go @@ -161,13 +161,13 @@ func (pps *WorkerState) ensureAccounts(ac *libgoal.Client) (err error) { } if pps.cinfo.OptIns == nil { - pps.cinfo.OptIns = make(map[uint64][]string, pps.cfg.NumAsset+pps.cfg.NumApp) + pps.cinfo.OptIns = make(map[any][]string, pps.cfg.NumAsset+pps.cfg.NumApp) } if pps.cinfo.AssetParams == nil { - pps.cinfo.AssetParams = make(map[uint64]model.AssetParams, pps.cfg.NumAsset) + pps.cinfo.AssetParams = make(map[basics.AssetIndex]model.AssetParams, pps.cfg.NumAsset) } if pps.cinfo.AppParams == nil { - pps.cinfo.AppParams = make(map[uint64]model.ApplicationParams, pps.cfg.NumApp) + pps.cinfo.AppParams = make(map[basics.AppIndex]model.ApplicationParams, pps.cfg.NumApp) } sources := make([]<-chan *crypto.SignatureSecrets, 0, 2) @@ -262,7 +262,7 @@ func (pps *WorkerState) integrateAccountInfo(addr string, ppa *pingPongAccount, assetID := holding.AssetID pps.cinfo.OptIns[assetID] = uniqueAppend(pps.cinfo.OptIns[assetID], addr) if ppa.holdings == nil { - ppa.holdings = make(map[uint64]uint64) + ppa.holdings = make(map[basics.AssetIndex]uint64) } ppa.holdings[assetID] = holding.Amount } @@ -285,7 +285,7 @@ func (pps *WorkerState) integrateAccountInfo(addr string, ppa *pingPongAccount, } type assetopti struct { - assetID uint64 + assetID basics.AssetIndex params model.AssetParams optins []string // addr strings } @@ -312,10 +312,10 @@ func (as *assetSet) Swap(a, b int) { func (pps *WorkerState) prepareAssets(client *libgoal.Client) (err error) { if pps.cinfo.AssetParams == nil { - pps.cinfo.AssetParams = make(map[uint64]model.AssetParams) + pps.cinfo.AssetParams = make(map[basics.AssetIndex]model.AssetParams) } if pps.cinfo.OptIns == nil { - pps.cinfo.OptIns = make(map[uint64][]string) + pps.cinfo.OptIns = make(map[any][]string) } // create new assets as needed @@ -337,7 +337,7 @@ func (pps *WorkerState) prepareAssets(client *libgoal.Client) (err error) { sort.Sort(&ta) if len(assets) > int(pps.cfg.NumAsset) { assets = assets[:pps.cfg.NumAsset] - nap := make(map[uint64]model.AssetParams, pps.cfg.NumAsset) + nap := make(map[basics.AssetIndex]model.AssetParams, pps.cfg.NumAsset) for _, asset := range assets { nap[asset.assetID] = asset.params } @@ -418,7 +418,7 @@ func (pps *WorkerState) makeNewAssets(client *libgoal.Client) (err error) { newAssetAddrs[addr] = acct } // wait for new assets to be created, fetch account data for them - newAssets := make(map[uint64]model.AssetParams, assetsNeeded) + newAssets := make(map[basics.AssetIndex]model.AssetParams, assetsNeeded) timeout := time.Now().Add(10 * time.Second) for len(newAssets) < assetsNeeded { for addr, acct := range newAssetAddrs { @@ -443,7 +443,7 @@ func (pps *WorkerState) makeNewAssets(client *libgoal.Client) (err error) { assetID := holding.AssetID pps.cinfo.OptIns[assetID] = uniqueAppend(pps.cinfo.OptIns[assetID], addr) if acct.holdings == nil { - acct.holdings = make(map[uint64]uint64) + acct.holdings = make(map[basics.AssetIndex]uint64) } acct.holdings[assetID] = holding.Amount } @@ -731,11 +731,11 @@ func getProto(client *libgoal.Client) (config.ConsensusParams, error) { // ensure that cfg.NumPartAccounts have cfg.NumAppOptIn opted in selecting from cfg.NumApp func (pps *WorkerState) prepareApps(client *libgoal.Client) (err error) { if pps.cinfo.AppParams == nil { - pps.cinfo.AppParams = make(map[uint64]model.ApplicationParams) + pps.cinfo.AppParams = make(map[basics.AppIndex]model.ApplicationParams) } if pps.cinfo.OptIns == nil { - pps.cinfo.OptIns = make(map[uint64][]string, pps.cfg.NumAsset+pps.cfg.NumApp) + pps.cinfo.OptIns = make(map[any][]string, pps.cfg.NumAsset+pps.cfg.NumApp) } // generate new apps @@ -890,7 +890,7 @@ func (pps *WorkerState) newApp(addr string, client *libgoal.Client) (tx transact return tx, err } -func (pps *WorkerState) appOptIn(addr string, appID uint64, client *libgoal.Client) (tx transactions.Transaction, err error) { +func (pps *WorkerState) appOptIn(addr string, appID basics.AppIndex, client *libgoal.Client) (tx transactions.Transaction, err error) { tx, err = client.MakeUnsignedAppOptInTx(appID, nil, nil, nil, nil, nil, 0) if err != nil { fmt.Printf("Cannot create app txn\n") @@ -908,7 +908,7 @@ func (pps *WorkerState) appOptIn(addr string, appID uint64, client *libgoal.Clie return } -func (pps *WorkerState) appFundFromSourceAccount(appID uint64, client *libgoal.Client) (err error) { +func (pps *WorkerState) appFundFromSourceAccount(appID basics.AppIndex, client *libgoal.Client) (err error) { // currently, apps only need to be funded if boxes are used if pps.getNumBoxes() > 0 { var srcFunds uint64 @@ -917,7 +917,7 @@ func (pps *WorkerState) appFundFromSourceAccount(appID uint64, client *libgoal.C return err } - appAddr := basics.AppIndex(appID).Address() + appAddr := appID.Address() mbr := proto.MinBalance + proto.BoxFlatMinBalance*uint64(pps.getNumBoxes()) + proto.BoxByteMinBalance*(proto.MaxBoxSize+uint64(proto.MaxAppKeyLen))*uint64(pps.getNumBoxes()) diff --git a/shared/pingpong/pingpong.go b/shared/pingpong/pingpong.go index 5727bc00dd..bfcff07deb 100644 --- a/shared/pingpong/pingpong.go +++ b/shared/pingpong/pingpong.go @@ -49,9 +49,9 @@ import ( // CreatablesInfo has information about created assets, apps and opting in type CreatablesInfo struct { - AssetParams map[uint64]model.AssetParams - AppParams map[uint64]model.ApplicationParams - OptIns map[uint64][]string + AssetParams map[basics.AssetIndex]model.AssetParams + AppParams map[basics.AppIndex]model.ApplicationParams + OptIns map[any][]string } // pingPongAccount represents the account state for each account in the pingpong application @@ -65,7 +65,7 @@ type pingPongAccount struct { pk basics.Address // asset holdings - holdings map[uint64]uint64 + holdings map[basics.AssetIndex]uint64 } func (ppa *pingPongAccount) getBalance() uint64 { @@ -91,18 +91,18 @@ func (ppa *pingPongAccount) addBalance(offset int64) { } } -func (ppa *pingPongAccount) getAsset(aid uint64) (v uint64, ok bool) { +func (ppa *pingPongAccount) getAsset(aid basics.AssetIndex) (v uint64, ok bool) { ppa.Lock() defer ppa.Unlock() v, ok = ppa.holdings[aid] return } -func (ppa *pingPongAccount) setAsset(aid, value uint64) { +func (ppa *pingPongAccount) setAsset(aid basics.AssetIndex, value uint64) { ppa.Lock() defer ppa.Unlock() ppa.holdings[aid] = value } -func (ppa *pingPongAccount) addAsset(aid uint64, dv int64) { +func (ppa *pingPongAccount) addAsset(aid basics.AssetIndex, dv int64) { ppa.Lock() defer ppa.Unlock() v := ppa.holdings[aid] @@ -267,8 +267,8 @@ func computeAccountMinBalance(client *libgoal.Client, cfg PpConfig) (fundingRequ optInCost := uint64(cfg.NumApp) * proto.AppFlatOptInMinBalance * uint64(maxAppsOptedIn) maxGlobalSchema := basics.StateSchema{NumUint: proto.MaxGlobalSchemaEntries, NumByteSlice: proto.MaxGlobalSchemaEntries} maxLocalSchema := basics.StateSchema{NumUint: proto.MaxLocalSchemaEntries, NumByteSlice: proto.MaxLocalSchemaEntries} - schemaCost := uint64(cfg.NumApp) * (maxGlobalSchema.MinBalance(&proto).Raw*uint64(maxAppsCreated) + - maxLocalSchema.MinBalance(&proto).Raw*uint64(maxAppsOptedIn)) + schemaCost := uint64(cfg.NumApp) * (maxGlobalSchema.MinBalance(proto.BalanceRequirements()).Raw*uint64(maxAppsCreated) + + maxLocalSchema.MinBalance(proto.BalanceRequirements()).Raw*uint64(maxAppsOptedIn)) fundingRequiredBalance += creationCost + optInCost + schemaCost runningRequiredBalance += creationCost + optInCost + schemaCost } @@ -682,7 +682,7 @@ func NewPingpong(cfg PpConfig) *WorkerState { } } -func (pps *WorkerState) randAssetID() (aidx uint64) { +func (pps *WorkerState) randAssetID() (aidx basics.AssetIndex) { if len(pps.cinfo.AssetParams) == 0 { return 0 } @@ -696,7 +696,7 @@ func (pps *WorkerState) randAssetID() (aidx uint64) { } return } -func (pps *WorkerState) randAppID() (aidx uint64) { +func (pps *WorkerState) randAppID() (aidx basics.AppIndex) { if len(pps.cinfo.AppParams) == 0 { return 0 } @@ -960,14 +960,14 @@ func (pps *WorkerState) constructTxn(from, to string, fee uint64, client *libgoa totalWeight := pps.cfg.WeightPayment + pps.cfg.WeightAsset + pps.cfg.WeightApp target := rand.Float64() * totalWeight if target < pps.cfg.WeightAsset && pps.cfg.NumAsset > 0 { - txn, sender, update, err = pps.constructAssetTxn(from, to, fee, client, noteField, lease) + txn, sender, update, err = pps.constructAssetTxn(fee, client, noteField, lease) if err != errNotOptedIn { goto weightdone } } target -= pps.cfg.WeightAsset if target < pps.cfg.WeightApp && pps.cfg.NumApp > 0 { - txn, sender, update, err = pps.constructAppTxn(from, to, fee, client, noteField, lease) + txn, sender, update, err = pps.constructAppTxn(from, fee, client, noteField, lease) if err != errNotOptedIn { goto weightdone } @@ -1052,7 +1052,7 @@ func pReplace(i int) bool { return rand.Intn(i) == 0 } -func (pps *WorkerState) constructAssetTxn(from, toUnused string, fee uint64, client *libgoal.Client, noteField []byte, lease [32]byte) (txn transactions.Transaction, sender string, update txnUpdate, err error) { +func (pps *WorkerState) constructAssetTxn(fee uint64, client *libgoal.Client, noteField []byte, lease [32]byte) (txn transactions.Transaction, sender string, update txnUpdate, err error) { // select a pair of random opted-in accounts by aidx // use them as from/to addresses amt := uint64(1) @@ -1062,17 +1062,27 @@ func (pps *WorkerState) constructAssetTxn(from, toUnused string, fee uint64, cli return } if len(pps.cinfo.OptIns[aidx]) == 0 { - // Opt-in another - // TODO: continue opt-in up to some amount? gradually? - txn, err = pps.appOptIn(from, aidx, client) - if err != nil { - return - } - update = &appOptInUpdate{ - addr: from, - aidx: aidx, - } - return txn, from, update, nil + panic("This probably never happens. If it does, investigate this.") + + /* + This code was here, but it makes no sense. After selecting an + _asset_ id, it performs an _app_ opt-in. Best guess is that this + never runs - enough accounts are opted in during setup that the len=0 + condition above never occurs. The code used to compile because we + conflated asset and app id as `uint64`. + + // Opt-in another + // TODO: continue opt-in up to some amount? gradually? + txn, err = pps.appOptIn(from, aidx, client) + if err != nil { + return + } + update = &appOptInUpdate{ + addr: from, + aidx: aidx, + } + return txn, from, update, nil + */ } optInsForAsset := pps.cinfo.OptIns[aidx] @@ -1109,7 +1119,7 @@ func (pps *WorkerState) constructAssetTxn(from, toUnused string, fee uint64, cli } to := toAcct.pk.String() - from = fromAcct.pk.String() + from := fromAcct.pk.String() sender = from if to != from { if toAcct.holdings[aidx] < 1000 && fromAcct.holdings[aidx] > 11000 { @@ -1137,15 +1147,20 @@ func (pps *WorkerState) constructAssetTxn(from, toUnused string, fee uint64, cli return txn, sender, update, err } +/* This was part of the mystery in constructAppTxn, which was conflating app and + asset IDs. Commenting out because it does not compile now that we more + strongly segregate app/asset indexes. + type appOptInUpdate struct { addr string - aidx uint64 + aidx basics.AppIndex } func (au *appOptInUpdate) apply(pps *WorkerState) { pps.accounts[au.addr].holdings[au.aidx] = 0 pps.cinfo.OptIns[au.aidx] = uniqueAppend(pps.cinfo.OptIns[au.aidx], au.addr) } +*/ type nopUpdate struct { } @@ -1158,7 +1173,7 @@ var nopUpdateSingleton = &nopUpdate{} type assetUpdate struct { from string to string - aidx uint64 + aidx basics.AssetIndex amt uint64 fee uint64 } @@ -1168,12 +1183,12 @@ func (au *assetUpdate) apply(pps *WorkerState) { pps.accounts[au.from].holdings[au.aidx] -= au.amt to := pps.accounts[au.to] if to.holdings == nil { - to.holdings = make(map[uint64]uint64) + to.holdings = make(map[basics.AssetIndex]uint64) } to.holdings[au.aidx] += au.amt } -func (pps *WorkerState) constructAppTxn(from, to string, fee uint64, client *libgoal.Client, noteField []byte, lease [32]byte) (txn transactions.Transaction, sender string, update txnUpdate, err error) { +func (pps *WorkerState) constructAppTxn(from string, fee uint64, client *libgoal.Client, noteField []byte, lease [32]byte) (txn transactions.Transaction, sender string, update txnUpdate, err error) { // select opted-in accounts for Txn.Accounts field var accounts []string aidx := pps.randAppID() @@ -1184,7 +1199,7 @@ func (pps *WorkerState) constructAppTxn(from, to string, fee uint64, client *lib // construct box ref array var boxRefs []transactions.BoxRef - for i := uint32(0); i < pps.getNumBoxes(); i++ { + for i := range pps.getNumBoxes() { boxRefs = append(boxRefs, transactions.BoxRef{Index: 0, Name: []byte{fmt.Sprintf("%d", i)[0]}}) } diff --git a/stateproof/stateproofMessageGenerator.go b/stateproof/stateproofMessageGenerator.go index 7cc5cdf442..6f5357f734 100644 --- a/stateproof/stateproofMessageGenerator.go +++ b/stateproof/stateproofMessageGenerator.go @@ -59,7 +59,7 @@ func GenerateStateProofMessage(l BlockHeaderFetcher, round basics.Round) (statep } proto := config.Consensus[latestRoundHeader.CurrentProtocol] - votersRound := uint64(round.SubSaturate(basics.Round(proto.StateProofInterval))) + votersRound := round.SubSaturate(basics.Round(proto.StateProofInterval)) commitment, err := createHeaderCommitment(l, &proto, &latestRoundHeader) if err != nil { return stateproofmsg.Message{}, err @@ -75,7 +75,7 @@ func GenerateStateProofMessage(l BlockHeaderFetcher, round basics.Round) (statep VotersCommitment: latestRoundHeader.StateProofTracking[protocol.StateProofBasic].StateProofVotersCommitment, LnProvenWeight: lnProvenWeight, FirstAttestedRound: votersRound + 1, - LastAttestedRound: uint64(latestRoundHeader.Round), + LastAttestedRound: latestRoundHeader.Round, }, nil } @@ -98,7 +98,7 @@ func calculateLnProvenWeight(latestRoundInInterval *bookkeeping.BlockHeader, pro func createHeaderCommitment(l BlockHeaderFetcher, proto *config.ConsensusParams, latestRoundHeader *bookkeeping.BlockHeader) (crypto.GenericDigest, error) { stateProofInterval := proto.StateProofInterval - if latestRoundHeader.Round < basics.Round(stateProofInterval) { + if uint64(latestRoundHeader.Round) < stateProofInterval { return nil, fmt.Errorf("createHeaderCommitment stateProofRound must be >= than stateproofInterval (%w)", errInvalidParams) } @@ -124,9 +124,8 @@ func FetchLightHeaders(l BlockHeaderFetcher, stateProofInterval uint64, latestRo blkHdrArr := make(lightBlockHeaders, stateProofInterval) firstRound := latestRound - basics.Round(stateProofInterval) + 1 - for i := uint64(0); i < stateProofInterval; i++ { - rnd := firstRound + basics.Round(i) - hdr, err := l.BlockHdr(rnd) + for i := range basics.Round(stateProofInterval) { + hdr, err := l.BlockHdr(firstRound + i) if err != nil { return nil, err } @@ -136,7 +135,7 @@ func FetchLightHeaders(l BlockHeaderFetcher, stateProofInterval uint64, latestRo } // GenerateProofOfLightBlockHeaders sets up a tree over the blkHdrArr and returns merkle proof over one of the blocks. -func GenerateProofOfLightBlockHeaders(stateProofInterval uint64, blkHdrArr lightBlockHeaders, blockIndex uint64) (*merklearray.SingleLeafProof, error) { +func GenerateProofOfLightBlockHeaders(stateProofInterval uint64, blkHdrArr lightBlockHeaders, blockIndex basics.Round) (*merklearray.SingleLeafProof, error) { if blkHdrArr.Length() != stateProofInterval { return nil, fmt.Errorf("received wrong amount of block headers. err: %w - %d != %d", errInvalidParams, blkHdrArr.Length(), stateProofInterval) } @@ -149,5 +148,5 @@ func GenerateProofOfLightBlockHeaders(stateProofInterval uint64, blkHdrArr light return nil, err } - return tree.ProveSingleLeaf(blockIndex) + return tree.ProveSingleLeaf(uint64(blockIndex)) } diff --git a/stateproof/stateproofMessageGenerator_test.go b/stateproof/stateproofMessageGenerator_test.go index 54b9bfe4f3..b5e46b137f 100644 --- a/stateproof/stateproofMessageGenerator_test.go +++ b/stateproof/stateproofMessageGenerator_test.go @@ -17,10 +17,11 @@ package stateproof import ( - "github.com/stretchr/testify/require" "testing" "time" + "github.com/stretchr/testify/require" + "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/crypto/merklearray" @@ -81,15 +82,15 @@ func TestStateProofMessage(t *testing.T) { } // since a state proof txn was created, we update the header with the next state proof round // i.e network has accepted the state proof. - s.addBlock(basics.Round(tx.Txn.Message.LastAttestedRound + proto.StateProofInterval)) + s.addBlock(tx.Txn.Message.LastAttestedRound + basics.Round(proto.StateProofInterval)) lastMessage = tx.Txn.Message } } func verifySha256BlockHeadersCommitments(a *require.Assertions, message stateproofmsg.Message, blocks map[basics.Round]bookkeeping.BlockHeader) { blkHdrArr := make(lightBlockHeaders, message.LastAttestedRound-message.FirstAttestedRound+1) - for i := uint64(0); i < message.LastAttestedRound-message.FirstAttestedRound+1; i++ { - hdr := blocks[basics.Round(message.FirstAttestedRound+i)] + for i := range message.LastAttestedRound - message.FirstAttestedRound + 1 { + hdr := blocks[message.FirstAttestedRound+i] blkHdrArr[i] = hdr.ToLightBlockHeader() } @@ -217,7 +218,7 @@ func TestGenerateBlockProof(t *testing.T) { verifyLightBlockHeaderProof(&tx, &proto, headers, a) - s.addBlock(basics.Round(tx.Txn.Message.LastAttestedRound + proto.StateProofInterval)) + s.addBlock(tx.Txn.Message.LastAttestedRound + basics.Round(proto.StateProofInterval)) lastAttestedRound = basics.Round(tx.Txn.Message.LastAttestedRound) } } @@ -233,7 +234,7 @@ func verifyLightBlockHeaderProof(tx *transactions.SignedTxn, proto *config.Conse lightheader := headers[headerIndex] err = merklearray.VerifyVectorCommitment( tx.Txn.Message.BlockHeadersCommitment, - map[uint64]crypto.Hashable{headerIndex: &lightheader}, + map[uint64]crypto.Hashable{uint64(headerIndex): &lightheader}, proof.ToProof()) a.NoError(err) diff --git a/stateproof/verify/stateproof.go b/stateproof/verify/stateproof.go index b88cef0640..b01cd5c95b 100644 --- a/stateproof/verify/stateproof.go +++ b/stateproof/verify/stateproof.go @@ -171,7 +171,7 @@ func ValidateStateProof(verificationContext *ledgercore.StateProofVerificationCo return err } - err = verifier.Verify(uint64(verificationContext.LastAttestedRound), msg.Hash(), stateProof) + err = verifier.Verify(verificationContext.LastAttestedRound, msg.Hash(), stateProof) if err != nil { return fmt.Errorf("%v: %w", err, errStateProofCrypto) } diff --git a/stateproof/worker_test.go b/stateproof/worker_test.go index fbf4491405..1690e09379 100644 --- a/stateproof/worker_test.go +++ b/stateproof/worker_test.go @@ -616,7 +616,7 @@ func TestWorkerAllSigs(t *testing.T) { verif, err := stateproof.MkVerifier(voters.Tree.Root(), provenWeight, proto.StateProofStrengthTarget) require.NoError(t, err) - err = verif.Verify(uint64(lastAttestedRound), tx.Txn.Message.Hash(), &tx.Txn.StateProof) + err = verif.Verify(lastAttestedRound, tx.Txn.Message.Hash(), &tx.Txn.StateProof) require.NoError(t, err) break } @@ -682,7 +682,7 @@ func TestWorkerPartialSigs(t *testing.T) { verif, err := stateproof.MkVerifier(voters.Tree.Root(), provenWeight, proto.StateProofStrengthTarget) require.NoError(t, err) - err = verif.Verify(uint64(lastAttestedRound), msg.Hash(), &tx.Txn.StateProof) + err = verif.Verify(lastAttestedRound, msg.Hash(), &tx.Txn.StateProof) require.NoError(t, err) } @@ -742,7 +742,7 @@ func TestWorkerRestart(t *testing.T) { proto := config.Consensus[protocol.ConsensusCurrentVersion] s.advanceRoundsWithoutStateProof(t, 1) - lastRound := uint64(0) + lastRound := basics.Round(0) for i := 0; i < expectedStateProofs; i++ { s.advanceRoundsWithoutStateProof(t, proto.StateProofInterval/2-1) w.Stop() @@ -763,10 +763,10 @@ func TestWorkerRestart(t *testing.T) { // since a state proof txn was created, we update the header with the next state proof round // i.e network has accepted the state proof. - s.addBlock(basics.Round(tx.Txn.Message.LastAttestedRound + proto.StateProofInterval)) + s.addBlock(tx.Txn.Message.LastAttestedRound + basics.Round(proto.StateProofInterval)) lastRound = tx.Txn.Message.LastAttestedRound } - a.Equal(uint64(expectedStateProofs+1), lastRound/proto.StateProofInterval) + a.EqualValues(expectedStateProofs+1, lastRound/basics.Round(proto.StateProofInterval)) } func TestWorkerHandleSig(t *testing.T) { @@ -1019,7 +1019,7 @@ func TestWorkersProversCacheAndSignatures(t *testing.T) { a.NoError(err) a.Equal(expectedStateProofs, countDB) - threshold := onlineProversThreshold(&proto, basics.Round(512)) // 512 since no StateProofs are confirmed yet (512 is the first, commitment at 256) + threshold := onlineProversThreshold(&proto, 512) // 512 since no StateProofs are confirmed yet (512 is the first, commitment at 256) var roundSigs map[basics.Round][]pendingSig err = w.db.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) { roundSigs, err = getPendingSigs(tx, threshold, basics.Round(256+proto.StateProofInterval*expectedStateProofs), false) @@ -1755,7 +1755,7 @@ func TestWorkerCacheAndDiskAfterRestart(t *testing.T) { a.NoError(err) a.Equal(expectedStateProofs, countDB) - threshold := onlineProversThreshold(&proto, basics.Round(512)) // 512 since no StateProofs are confirmed yet (512 is the first, commitment at 256) + threshold := onlineProversThreshold(&proto, 512) // 512 since no StateProofs are confirmed yet (512 is the first, commitment at 256) var roundSigs map[basics.Round][]pendingSig err = w.db.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) { roundSigs, err = getPendingSigs(tx, threshold, basics.Round(256+proto.StateProofInterval*expectedStateProofs), false) @@ -1813,7 +1813,7 @@ func TestWorkerInitOnlySignaturesInDatabase(t *testing.T) { a.NoError(err) a.Equal(expectedStateProofs, countDB) - threshold := onlineProversThreshold(&proto, basics.Round(512)) // 512 since no StateProofs are confirmed yet (512 is the first, commitment at 256) + threshold := onlineProversThreshold(&proto, 512) // 512 since no StateProofs are confirmed yet (512 is the first, commitment at 256) var roundSigs map[basics.Round][]pendingSig err = w.db.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) { roundSigs, err = getPendingSigs(tx, threshold, basics.Round(256+proto.StateProofInterval*expectedStateProofs), false) diff --git a/test/commandandcontrol/cc_client/main.go b/test/commandandcontrol/cc_client/main.go index 7e3e30d5ee..731d51458d 100644 --- a/test/commandandcontrol/cc_client/main.go +++ b/test/commandandcontrol/cc_client/main.go @@ -78,9 +78,9 @@ func main() { } defer func() { log.Infof("closing service connection: %s", serverWs.RemoteAddr()) - err := serverWs.Close() - if err != nil { - log.Fatalf("error closing service websocket %v", err) + err1 := serverWs.Close() + if err1 != nil { + log.Fatalf("error closing service websocket %v", err1) } }() serverWs.Unsafe = true diff --git a/test/e2e-go/cli/goal/account_test.go b/test/e2e-go/cli/goal/account_test.go index 6d27f45bd5..a289d03196 100644 --- a/test/e2e-go/cli/goal/account_test.go +++ b/test/e2e-go/cli/goal/account_test.go @@ -56,7 +56,7 @@ func TestAccountNew(t *testing.T) { a.True(matched, "Account list should contain the account we just created") } -func TestAccountNewDuplicateFails(t *testing.T) { +func TestAccountNewDuplicateErrs(t *testing.T) { partitiontest.PartitionTest(t) defer fixtures.ShutdownSynchronizedTest(t) diff --git a/test/e2e-go/cli/goal/clerk_test.go b/test/e2e-go/cli/goal/clerk_test.go index cf83ec47b1..427fc6f309 100644 --- a/test/e2e-go/cli/goal/clerk_test.go +++ b/test/e2e-go/cli/goal/clerk_test.go @@ -23,6 +23,7 @@ import ( "github.com/stretchr/testify/require" + "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/test/framework/fixtures" "github.com/algorand/go-algorand/test/partitiontest" ) @@ -66,7 +67,7 @@ func TestClerkSendNoteEncoding(t *testing.T) { var foundTx1, foundTx2 bool const maxRetry = 10 - for i := uint64(0); i < maxRetry && (!foundTx1 || !foundTx2); i++ { + for i := basics.Round(0); i < maxRetry && (!foundTx1 || !foundTx2); i++ { if !foundTx1 { tx1, err := fixture.WaitForConfirmedTxn(status.LastRound+i, txID) if err == nil { diff --git a/test/e2e-go/features/accountPerf/sixMillion_test.go b/test/e2e-go/features/accountPerf/sixMillion_test.go index 1315fd041e..ea0e69d455 100644 --- a/test/e2e-go/features/accountPerf/sixMillion_test.go +++ b/test/e2e-go/features/accountPerf/sixMillion_test.go @@ -138,7 +138,7 @@ func getAccountInformation( func getAccountApplicationInformation( fixture *fixtures.RestClientFixture, address string, - appID uint64) (appInfo model.AccountApplicationResponse, err error) { + appID basics.AppIndex) (appInfo model.AccountApplicationResponse, err error) { appInfo, err = fixture.AlgodClient.AccountApplicationInformation(address, appID) return @@ -243,7 +243,7 @@ func test5MAssets(t *testing.T, scenario int) { require.NoError(t, err) var genesisHash crypto.Digest copy(genesisHash[:], suggestedParams.GenesisHash) - tLife := config.Consensus[protocol.ConsensusVersion(suggestedParams.ConsensusVersion)].MaxTxnLife + tLife := basics.Round(config.Consensus[protocol.ConsensusVersion(suggestedParams.ConsensusVersion)].MaxTxnLife) // fund the non-wallet base account ba := generateKeys(1) @@ -339,11 +339,11 @@ func generateKeys(numAccounts int) (keys []psKey) { // prepares a send algo transaction func sendAlgoTransaction( t *testing.T, - round uint64, + round basics.Round, sender basics.Address, receiver basics.Address, amount uint64, - tLife uint64, + tLife basics.Round, genesisHash crypto.Digest) (txn transactions.Transaction) { txn = transactions.Transaction{ @@ -351,8 +351,8 @@ func sendAlgoTransaction( Header: transactions.Header{ Sender: sender, Fee: basics.MicroAlgos{Raw: config.Consensus[protocol.ConsensusCurrentVersion].MinTxnFee}, - FirstValid: basics.Round(round), - LastValid: basics.Round(round + tLife), + FirstValid: round, + LastValid: round + tLife, GenesisHash: genesisHash, }, PaymentTxnFields: transactions.PaymentTxnFields{ @@ -367,9 +367,9 @@ func sendAlgoTransaction( func createAssetTransaction( t *testing.T, counter uint64, - round uint64, + round basics.Round, sender basics.Address, - tLife uint64, + tLife basics.Round, amount uint64, genesisHash crypto.Digest) (assetTx transactions.Transaction) { @@ -380,8 +380,8 @@ func createAssetTransaction( Header: transactions.Header{ Sender: sender, Fee: basics.MicroAlgos{Raw: config.Consensus[protocol.ConsensusCurrentVersion].MinTxnFee}, - FirstValid: basics.Round(round), - LastValid: basics.Round(round + tLife), + FirstValid: round, + LastValid: round + tLife, GenesisHash: genesisHash, Note: note, }, @@ -399,9 +399,9 @@ func createAssetTransaction( // prepares a send asset transaction func sendAssetTransaction( t *testing.T, - round uint64, + round basics.Round, sender basics.Address, - tLife uint64, + tLife basics.Round, genesisHash crypto.Digest, assetID basics.AssetIndex, receiver basics.Address, @@ -442,7 +442,7 @@ func scenarioA( genesisHash crypto.Digest, txnChan chan<- *txnKey, txnGrpChan chan<- []txnKey, - tLife uint64, + tLife basics.Round, stopChan <-chan struct{}, log logging.Logger) { @@ -460,7 +460,7 @@ func scenarioA( close(txnGrpChan) }() - firstValid := uint64(2) + firstValid := basics.Round(2) counter := uint64(0) txnGroup := make([]txnKey, 0, maxTxGroupSize) var err error @@ -543,7 +543,7 @@ func scenarioA( ownAllAccount.pk, tLife, genesisHash, - basics.AssetIndex(asset.AssetID), + asset.AssetID, ownAllAccount.pk, uint64(0)) @@ -582,7 +582,7 @@ func scenarioA( nacc.pk, tLife, genesisHash, - basics.AssetIndex(asset.AssetID), + asset.AssetID, ownAllAccount.pk, asset.Amount) counter, txnGroup = queueTransaction(nacc.sk, assSend, txnChan, txnGrpChan, counter, txnGroup) @@ -630,11 +630,11 @@ func scenarioB( genesisHash crypto.Digest, txnChan chan<- *txnKey, txnGrpChan chan<- []txnKey, - tLife uint64, + tLife basics.Round, stopChan <-chan struct{}, log logging.Logger) { - numberOfAssets := uint64(targetCreateableCount) // 6M + const numberOfAssets = targetCreateableCount // 6M totalAssetAmount := uint64(0) defer func() { @@ -642,7 +642,7 @@ func scenarioB( close(txnGrpChan) }() - firstValid := uint64(2) + firstValid := basics.Round(2) counter := uint64(0) txnGroup := make([]txnKey, 0, maxTxGroupSize) var err error @@ -676,8 +676,8 @@ func scenarioB( info, err := fixture.AlgodClient.AccountInformation(baseAcct.pk.String(), false) require.NoError(t, err) - require.Equal(t, numberOfAssets, info.TotalAssetsOptedIn) - require.Equal(t, numberOfAssets, info.TotalCreatedAssets) + require.EqualValues(t, numberOfAssets, info.TotalAssetsOptedIn) + require.EqualValues(t, numberOfAssets, info.TotalCreatedAssets) log.Infof("Verifying assets...") // Verify the assets are transferred here @@ -685,7 +685,7 @@ func scenarioB( counter = 0 // this loop iterates over all the range of potential assets, tries to confirm all of them. // many of these are expected to be non-existing. - startIdx := uint64(1000) // tx counter starts from 1000 + startIdx := basics.AssetIndex(1000) // tx counter starts from 1000 for aid := startIdx; counter < numberOfAssets && aid < 2*startIdx*numberOfAssets; aid++ { select { case <-stopChan: @@ -716,7 +716,7 @@ func scenarioC( genesisHash crypto.Digest, txnChan chan<- *txnKey, txnGrpChan chan<- []txnKey, - tLife uint64, + tLife basics.Round, stopChan <-chan struct{}, log logging.Logger) { @@ -732,7 +732,7 @@ func scenarioC( close(txnGrpChan) }() - firstValid := uint64(2) + firstValid := basics.Round(2) counter := uint64(0) txnGroup := make([]txnKey, 0, maxTxGroupSize) var err error @@ -805,7 +805,7 @@ func scenarioC( require.Fail(t, "Test errored") default: } - optInTx := makeOptInAppTransaction(t, client, basics.AppIndex(app.Id), firstValid, ownAllAccount.pk, tLife, genesisHash) + optInTx := makeOptInAppTransaction(t, client, app.Id, firstValid, ownAllAccount.pk, tLife, genesisHash) counter, txnGroup = queueTransaction(ownAllAccount.sk, optInTx, txnChan, txnGrpChan, counter, txnGroup) counter, firstValid, err = checkPoint(counter, firstValid, tLife, false, fixture, log) @@ -854,7 +854,7 @@ func scenarioC( require.Fail(t, "Test errored") default: } - optInTx := callAppTransaction(t, client, basics.AppIndex(app.Id), firstValid, ownAllAccount.pk, tLife, genesisHash) + optInTx := callAppTransaction(t, client, app.Id, firstValid, ownAllAccount.pk, tLife, genesisHash) counter, txnGroup = queueTransaction(ownAllAccount.sk, optInTx, txnChan, txnGrpChan, counter, txnGroup) counter, firstValid, err = checkPoint(counter, firstValid, tLife, false, fixture, log) @@ -896,18 +896,18 @@ func scenarioD( genesisHash crypto.Digest, txnChan chan<- *txnKey, txnGrpChan chan<- []txnKey, - tLife uint64, + tLife basics.Round, stopChan <-chan struct{}, log logging.Logger) { client := fixture.LibGoalClient - numberOfApps := uint64(targetCreateableCount) // 6M + const numberOfApps = targetCreateableCount // 6M defer func() { close(txnChan) close(txnGrpChan) }() - firstValid := uint64(2) + firstValid := basics.Round(2) counter := uint64(0) txnGroup := make([]txnKey, 0, maxTxGroupSize) var err error @@ -940,7 +940,7 @@ func scenarioD( // check the results in parallel parallelCheckers := numberOfGoRoutines - checkAppChan := make(chan uint64, parallelCheckers) + checkAppChan := make(chan basics.AppIndex, parallelCheckers) checkResChan := make(chan uint64, parallelCheckers) var wg sync.WaitGroup var globalStateCheckMu deadlock.Mutex @@ -979,7 +979,7 @@ func scenarioD( checked := uint64(0) passed := uint64(0) lastPrint := uint64(0) - for i := uint64(0); checked < numberOfApps; { + for i := basics.AppIndex(0); checked < numberOfApps; { select { case <-stopChan: require.Fail(t, "Test errored") @@ -999,7 +999,7 @@ func scenarioD( close(checkAppChan) wg.Wait() - require.Equal(t, numberOfApps, passed) + require.EqualValues(t, numberOfApps, passed) for _, x := range globalStateCheck { require.True(t, x) } @@ -1018,9 +1018,9 @@ func handleError(err error, message string, errChan chan<- error) { } // handle the counters to prepare and send transactions in batches of MaxTxnLife transactions -func checkPoint(counter, firstValid, tLife uint64, force bool, fixture *fixtures.RestClientFixture, log logging.Logger) (newCounter, nextFirstValid uint64, err error) { - lastRound := firstValid + counter - 1 - if force || counter == tLife { +func checkPoint(counter uint64, firstValid basics.Round, tLife basics.Round, force bool, fixture *fixtures.RestClientFixture, log logging.Logger) (newcounter uint64, nextFirstValid basics.Round, err error) { + lastRound := firstValid + basics.Round(counter) - 1 + if force || basics.Round(counter) == tLife { if verbose { fmt.Printf("Waiting for round %d...", int(lastRound)) } @@ -1035,7 +1035,7 @@ func checkPoint(counter, firstValid, tLife uint64, force bool, fixture *fixtures // signs and broadcasts a single transaction func signAndBroadcastTransaction( - round uint64, + round basics.Round, txn *transactions.Transaction, client libgoal.Client, fixture *fixtures.RestClientFixture) error { @@ -1109,9 +1109,9 @@ func makeAppTransaction( t *testing.T, client libgoal.Client, counter uint64, - round uint64, + round basics.Round, sender basics.Address, - tLife uint64, + tLife basics.Round, setCounterInProg bool, genesisHash crypto.Digest) (appTx transactions.Transaction) { @@ -1163,8 +1163,8 @@ int 1 appTx.Header = transactions.Header{ Sender: sender, Fee: basics.MicroAlgos{Raw: config.Consensus[protocol.ConsensusCurrentVersion].MinTxnFee}, - FirstValid: basics.Round(round), - LastValid: basics.Round(round + tLife), + FirstValid: round, + LastValid: round + tLife, GenesisHash: genesisHash, Note: note, } @@ -1176,19 +1176,19 @@ func makeOptInAppTransaction( t *testing.T, client libgoal.Client, appIdx basics.AppIndex, - round uint64, + round basics.Round, sender basics.Address, - tLife uint64, + tLife basics.Round, genesisHash crypto.Digest) (appTx transactions.Transaction) { - appTx, err := client.MakeUnsignedAppOptInTx(uint64(appIdx), nil, nil, nil, nil, nil, 0) + appTx, err := client.MakeUnsignedAppOptInTx(appIdx, nil, nil, nil, nil, nil, 0) require.NoError(t, err) appTx.Header = transactions.Header{ Sender: sender, Fee: basics.MicroAlgos{Raw: config.Consensus[protocol.ConsensusCurrentVersion].MinTxnFee}, - FirstValid: basics.Round(round), - LastValid: basics.Round(round + tLife), + FirstValid: round, + LastValid: round + tLife, GenesisHash: genesisHash, } return @@ -1241,15 +1241,15 @@ func createAccounts( fixture *fixtures.RestClientFixture, numberOfAccounts uint64, baseAcct psKey, - firstValid uint64, + firstValid basics.Round, balance uint64, counter uint64, - tLife uint64, + tLife basics.Round, genesisHash crypto.Digest, txnChan chan<- *txnKey, txnGrpChan chan<- []txnKey, stopChan <-chan struct{}, - log logging.Logger) (newFirstValid uint64, newCounter uint64, keys []psKey) { + log logging.Logger) (newFirstValid basics.Round, newcounter uint64, keys []psKey) { log.Infof("Creating accounts...") @@ -1282,19 +1282,19 @@ func callAppTransaction( t *testing.T, client libgoal.Client, appIdx basics.AppIndex, - round uint64, + round basics.Round, sender basics.Address, - tLife uint64, + tLife basics.Round, genesisHash crypto.Digest) (appTx transactions.Transaction) { - appTx, err := client.MakeUnsignedAppNoOpTx(uint64(appIdx), nil, nil, nil, nil, nil, 0) + appTx, err := client.MakeUnsignedAppNoOpTx(appIdx, nil, nil, nil, nil, nil, 0) require.NoError(t, err) appTx.Header = transactions.Header{ Sender: sender, Fee: basics.MicroAlgos{Raw: config.Consensus[protocol.ConsensusCurrentVersion].MinTxnFee}, - FirstValid: basics.Round(round), - LastValid: basics.Round(round + tLife), + FirstValid: round, + LastValid: round + tLife, GenesisHash: genesisHash, } return diff --git a/test/e2e-go/features/catchup/basicCatchup_test.go b/test/e2e-go/features/catchup/basicCatchup_test.go index 1944f8cfad..b0422a9790 100644 --- a/test/e2e-go/features/catchup/basicCatchup_test.go +++ b/test/e2e-go/features/catchup/basicCatchup_test.go @@ -56,8 +56,8 @@ func TestBasicCatchup(t *testing.T) { a.NoError(err) // Let the network make some progress - waitForRound := uint64(3) - err = fixture.GetAlgodClientForController(nc).WaitForRoundWithTimeout(3) + const waitForRound = 3 + err = fixture.GetAlgodClientForController(nc).WaitForRoundWithTimeout(waitForRound) a.NoError(err) // Now spin up third node @@ -153,7 +153,7 @@ func runCatchupOverGossip(t fixtures.TestingTB, a.NoError(err) // Let the secondary make progress up to round 3, while the primary was never startred ( hence, it's on round = 0) - waitForRound := uint64(3) + const waitForRound = 3 err = fixture.GetAlgodClientForController(nc).WaitForRoundWithTimeout(waitForRound) a.NoError(err) @@ -256,7 +256,7 @@ func TestStoppedCatchupOnUnsupported(t *testing.T) { // Let the network make some progress a.NoError(err) - waitForRound := uint64(3) // UpgradeVoteRounds + DefaultUpgradeWaitRounds + const waitForRound = 3 // UpgradeVoteRounds + DefaultUpgradeWaitRounds err = fixture.GetAlgodClientForController(nc).WaitForRoundWithTimeout(waitForRound) a.NoError(err) @@ -364,7 +364,7 @@ func TestBasicCatchupCompletes(t *testing.T) { // Let the network make some progress. // Make it long enough so the catchup to it is longer than a single round agreement a.NoError(err) - waitForRound := uint64(100) + const waitForRound = 100 // Now prepare a third node cloneDataDir := filepath.Join(fixture.PrimaryDataDir(), "../clone") diff --git a/test/e2e-go/features/catchup/catchpointCatchup_test.go b/test/e2e-go/features/catchup/catchpointCatchup_test.go index 0a4aa1331b..5e7ec6d14b 100644 --- a/test/e2e-go/features/catchup/catchpointCatchup_test.go +++ b/test/e2e-go/features/catchup/catchpointCatchup_test.go @@ -52,7 +52,7 @@ import ( const basicTestCatchpointInterval = 4 func waitForCatchpointGeneration(t *testing.T, fixture *fixtures.RestClientFixture, client client.RestClient, catchpointRound basics.Round) string { - err := client.WaitForRoundWithTimeout(uint64(catchpointRound + 1)) + err := client.WaitForRoundWithTimeout(catchpointRound + 1) if err != nil { return "" } @@ -285,7 +285,7 @@ func getFixture(consensusParams *config.ConsensusParams) *fixtures.RestClientFix return &fixture } -func TestCatchpointCatchupFailure(t *testing.T) { +func TestCatchpointCatchupErr(t *testing.T) { // Overview of this test: // Start a two-node network (primary has 100%, using has 0%) // create a web proxy, have the using node use it as a peer, blocking all requests for round #2. ( and allowing everything else ) @@ -371,11 +371,11 @@ func TestBasicCatchpointCatchup(t *testing.T) { _, err = usingNodeRestClient.Catchup(catchpointLabel, 0) a.NoError(err) - err = usingNodeRestClient.WaitForRoundWithTimeout(uint64(targetCatchpointRound + 1)) + err = usingNodeRestClient.WaitForRoundWithTimeout(targetCatchpointRound + 1) a.NoError(err) // ensure the raw block can be downloaded (including cert) - _, err = usingNodeRestClient.RawBlock(uint64(targetCatchpointRound)) + _, err = usingNodeRestClient.RawBlock(targetCatchpointRound) a.NoError(err) } @@ -439,8 +439,8 @@ func TestCatchpointLabelGeneration(t *testing.T) { defer primaryNode.StopAlgod() // Let the network make some progress - currentRound := uint64(1) - targetRound := uint64(21) + currentRound := basics.Round(1) + targetRound := basics.Round(21) primaryNodeRestClient := fixture.GetAlgodClientForController(primaryNode) log.Infof("Building ledger history..") for { @@ -844,7 +844,7 @@ func TestReadyEndpoint(t *testing.T) { status1, err = client1.Status() a.NoError(err) - a.Equal(status1.CatchupTime, uint64(0)) + a.Equal(status1.CatchupTime, int64(0)) a.Empty(status1.Catchpoint) break } diff --git a/test/e2e-go/features/catchup/stateproofsCatchup_test.go b/test/e2e-go/features/catchup/stateproofsCatchup_test.go index 23390413ec..9aad0231dc 100644 --- a/test/e2e-go/features/catchup/stateproofsCatchup_test.go +++ b/test/e2e-go/features/catchup/stateproofsCatchup_test.go @@ -45,7 +45,7 @@ func applyCatchpointStateProofConsensusChanges(consensusParams *config.Consensus } func getStateProofNextRound(a *require.Assertions, goalClient *libgoal.Client, round basics.Round) basics.Round { - block, err := goalClient.BookkeepingBlock(uint64(round)) + block, err := goalClient.BookkeepingBlock(round) a.NoError(err) return block.StateProofTracking[protocol.StateProofBasic].StateProofNextRound } @@ -119,7 +119,7 @@ func TestStateProofInReplayCatchpoint(t *testing.T) { } // wait for fastcatchup to complete and the node is synced - err = usingNodeRestClient.WaitForRoundWithTimeout(uint64(targetCatchpointRound + 1)) + err = usingNodeRestClient.WaitForRoundWithTimeout(targetCatchpointRound + 1) a.NoError(err) primaryLibGoal := fixture.GetLibGoalClientFromNodeController(primaryNode) @@ -182,7 +182,7 @@ func TestStateProofAfterCatchpoint(t *testing.T) { roundAfterSPGeneration := targetCatchpointRound.RoundUpToMultipleOf(basics.Round(consensusParams.StateProofInterval)) + basics.Round(consensusParams.StateProofInterval/2) - err = usingNodeRestClient.WaitForRoundWithTimeout(uint64(roundAfterSPGeneration)) + err = usingNodeRestClient.WaitForRoundWithTimeout(roundAfterSPGeneration) a.NoError(err) primaryLibGoal := fixture.GetLibGoalClientFromNodeController(primaryNode) @@ -279,7 +279,7 @@ func TestSendSigsAfterCatchpointCatchup(t *testing.T) { a.NotEmpty(chunks) validateCatchpointChunks(t, a, chunks, consensusParams) - err = usingNodeRestClient.WaitForRoundWithTimeout(uint64(targetCatchpointRound) + 1) + err = usingNodeRestClient.WaitForRoundWithTimeout(targetCatchpointRound + 1) a.NoError(err) lastNormalRound, err := fixture.GetLibGoalClientFromNodeController(normalNode).CurrentRound() @@ -292,7 +292,7 @@ func TestSendSigsAfterCatchpointCatchup(t *testing.T) { lastNormalNodeSignedRound := basics.Round(lastNormalRound).RoundDownToMultipleOf(basics.Round(consensusParams.StateProofInterval)) lastNormalNextStateProofRound := lastNormalNodeSignedRound + basics.Round(consensusParams.StateProofInterval) targetRound := lastNormalNextStateProofRound + basics.Round(consensusParams.StateProofInterval*2) - err = usingNodeRestClient.WaitForRoundWithTimeout(uint64(targetRound)) + err = usingNodeRestClient.WaitForRoundWithTimeout(targetRound) a.NoError(err) primaryClient := fixture.GetLibGoalClientFromNodeController(primaryNode) diff --git a/test/e2e-go/features/devmode/devmode_test.go b/test/e2e-go/features/devmode/devmode_test.go index d751cbc7e5..9eb3866972 100644 --- a/test/e2e-go/features/devmode/devmode_test.go +++ b/test/e2e-go/features/devmode/devmode_test.go @@ -63,11 +63,11 @@ func testDevMode(t *testing.T, version protocol.ConsensusVersion) { require.Equal(t, blkOffset, resp.Offset) // 2 transactions should be sent within one normal confirmation time. - for i := uint64(0); i < 2; i++ { + for i := basics.Round(0); i < 2; i++ { round := firstRound + i txn = fixture.SendMoneyAndWait(round, 100001, 1000, sender.Address, receiver.String(), "") // SendMoneyAndWait subtracts 1 from firstValid - require.Equal(t, round-1, uint64(txn.Txn.Txn.FirstValid)) + require.Equal(t, round-1, txn.Txn.Txn.FirstValid) newBlk, err := fixture.AlgodClient.Block(round) require.NoError(t, err) newBlkSeconds := newBlk.Block.TimeStamp @@ -103,7 +103,7 @@ func testTxnGroupDeltasDevMode(t *testing.T, version protocol.ConsensusVersion) fundingTx, err := fixture.LibGoalClient.SendPaymentFromWalletWithLease(wh, nil, sender.Address, receiver.String(), 1000, 100000, nil, "", [32]byte{1, 2, 3}, basics.Round(curRound).SubSaturate(1), 0) require.NoError(t, err) - txn, err := fixture.WaitForConfirmedTxn(curRound+uint64(5), fundingTx.ID().String()) + txn, err := fixture.WaitForConfirmedTxn(curRound+5, fundingTx.ID().String()) require.NoError(t, err) require.NotNil(t, txn.ConfirmedRound) _, err = fixture.AlgodClient.Block(*txn.ConfirmedRound) diff --git a/test/e2e-go/features/followernode/syncDeltas_test.go b/test/e2e-go/features/followernode/syncDeltas_test.go index 5bb4ff2991..0a76b26310 100644 --- a/test/e2e-go/features/followernode/syncDeltas_test.go +++ b/test/e2e-go/features/followernode/syncDeltas_test.go @@ -73,7 +73,7 @@ func TestBasicSyncMode(t *testing.T) { require.LessOrEqual(t, *txn.ConfirmedRound, uint64(5), "Transaction should be confirmed in the first 5 rounds") // Let the network make some progress - waitForRound := uint64(5) + const waitForRound = 5 err = fixture.GetAlgodClientForController(nc).WaitForRoundWithTimeout(waitForRound) a.NoError(err) @@ -82,7 +82,7 @@ func TestBasicSyncMode(t *testing.T) { a.NoError(err) followClient := fixture.GetAlgodClientForController(followControl) // Now, catch up round by round, retrieving state deltas for each - for round := uint64(1); round <= waitForRound; round++ { + for round := basics.Round(1); round <= waitForRound; round++ { // assert sync round set rResp, err := followClient.GetSyncRound() a.NoError(err) diff --git a/test/e2e-go/features/followernode/syncRestart_test.go b/test/e2e-go/features/followernode/syncRestart_test.go index eaf9c3f59e..911e48593e 100644 --- a/test/e2e-go/features/followernode/syncRestart_test.go +++ b/test/e2e-go/features/followernode/syncRestart_test.go @@ -24,6 +24,7 @@ import ( "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/daemon/algod/api/client" + "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/test/framework/fixtures" "github.com/algorand/go-algorand/test/partitiontest" ) @@ -59,7 +60,7 @@ func TestSyncRestart(t *testing.T) { a.NoError(err) a.Equal(uint64(2), cfg.MaxAcctLookback) - waitTill := func(node string, round uint64) { + waitTill := func(node string, round basics.Round) { controller, err := fixture.GetNodeController(node) a.NoError(err) err = fixture.GetAlgodClientForController(controller).WaitForRoundWithTimeout(round) @@ -73,54 +74,54 @@ func TestSyncRestart(t *testing.T) { return algod } - getRound := func(node string) uint64 { + getRound := func(node string) basics.Round { algod := getAlgod(node) status, err := algod.Status() a.NoError(err) return status.LastRound } - getSyncRound := func() uint64 { + getSyncRound := func() basics.Round { followClient := getAlgod("Follower") rResp, err := followClient.GetSyncRound() a.NoError(err) return rResp.Round } - a.Equal(uint64(1), getSyncRound()) + a.EqualValues(1, getSyncRound()) waitTill("Primary", 3) // with a max account lookback of 2, and the sync round at 1, // the follower cannot advance past round 2 = 1 - 1 + 2 waitTill("Follower", 2) a.LessOrEqual(uint64(3), getRound("Primary")) - a.Equal(uint64(2), getRound("Follower")) - a.Equal(uint64(1), getSyncRound()) + a.EqualValues(2, getRound("Follower")) + a.EqualValues(1, getSyncRound()) /** restart the network **/ fixture.ShutdownImpl(true) fixture.Start() a.LessOrEqual(uint64(3), getRound("Primary")) - a.Equal(uint64(1), getSyncRound()) - a.Equal(uint64(2), getRound("Follower")) + a.EqualValues(1, getSyncRound()) + a.EqualValues(2, getRound("Follower")) waitTill("Primary", 6) followerClient := getAlgod("Follower") - err = followerClient.SetSyncRound(uint64(3)) + err = followerClient.SetSyncRound(3) a.NoError(err) - a.Equal(uint64(3), getSyncRound()) + a.EqualValues(3, getSyncRound()) // with a max account lookback of 2, and the sync round at 3, // the follower cannot advance past round 4 = 3 - 1 + 2 waitTill("Follower", 4) - a.LessOrEqual(uint64(6), getRound("Primary")) - a.Equal(uint64(4), getRound("Follower")) - a.Equal(uint64(3), getSyncRound()) + a.LessOrEqual(basics.Round(6), getRound("Primary")) + a.EqualValues(4, getRound("Follower")) + a.EqualValues(3, getSyncRound()) fixture.ShutdownImpl(true) fixture.Start() - a.LessOrEqual(uint64(6), getRound("Primary")) - a.Equal(uint64(4), getRound("Follower")) - a.Equal(uint64(3), getSyncRound()) + a.LessOrEqual(basics.Round(6), getRound("Primary")) + a.EqualValues(4, getRound("Follower")) + a.EqualValues(3, getSyncRound()) } diff --git a/test/e2e-go/features/incentives/payouts_test.go b/test/e2e-go/features/incentives/payouts_test.go index 8f3dde1297..1a437f3555 100644 --- a/test/e2e-go/features/incentives/payouts_test.go +++ b/test/e2e-go/features/incentives/payouts_test.go @@ -89,7 +89,7 @@ func TestBasicPayouts(t *testing.T) { txn, err := c01.SendPaymentFromUnencryptedWallet(account01.Address, basics.Address{}.String(), 1000, 60_000_000_000_000, nil) a.NoError(err) - burn, err := fixture.WaitForConfirmedTxn(uint64(txn.LastValid), txn.ID().String()) + burn, err := fixture.WaitForConfirmedTxn(txn.LastValid, txn.ID().String()) a.NoError(err) burnRound := *burn.ConfirmedRound t.Logf("burn round is %d", burnRound) @@ -139,7 +139,7 @@ func TestBasicPayouts(t *testing.T) { a.LessOrEqual(int(status.LastRound), int(next.LastProposed)) switch block.Proposer().String() { case account01.Address: - if uint64(block.Round()) < burnRound+lookback { + if block.Round() < burnRound+lookback { // until the burn is lookback rounds old, account01 can't earn a.Zero(block.ProposerPayout()) a.Equal(data01.MicroAlgos, next.MicroAlgos) @@ -178,7 +178,7 @@ func TestBasicPayouts(t *testing.T) { a.NoError(err) offlineTxID, err := c15.SignAndBroadcastTransaction(wh, nil, offline) a.NoError(err) - offTxn, err := fixture.WaitForConfirmedTxn(uint64(offline.LastValid), offlineTxID) + offTxn, err := fixture.WaitForConfirmedTxn(offline.LastValid, offlineTxID) a.NoError(err) t.Logf(" c15 (%s) will be truly offline (not proposing) after round %d\n", @@ -235,7 +235,7 @@ func TestBasicPayouts(t *testing.T) { // put 50 algos back into the feesink, show it pays out again txn, err = c01.SendPaymentFromUnencryptedWallet(account01.Address, feesink.String(), 1000, 50_000_000, nil) a.NoError(err) - refill, err := fixture.WaitForConfirmedTxn(uint64(txn.LastValid), txn.ID().String()) + refill, err := fixture.WaitForConfirmedTxn(txn.LastValid, txn.ID().String()) fmt.Printf("refilled fee sink in %d\n", *refill.ConfirmedRound) a.NoError(err) block, err := client.BookkeepingBlock(*refill.ConfirmedRound) @@ -256,7 +256,7 @@ func TestBasicPayouts(t *testing.T) { junk := basics.Address{0x01, 0x01}.String() txn, err = c01.SendPaymentFromWallet(wh, nil, account01.Address, junk, 1000, 0, nil, junk /* close to */, 0, 0) a.NoError(err) - close, err := fixture.WaitForConfirmedTxn(uint64(txn.LastValid), txn.ID().String()) + close, err := fixture.WaitForConfirmedTxn(txn.LastValid, txn.ID().String()) a.NoError(err) fmt.Printf("closed c01 in %d\n", *close.ConfirmedRound) block, err = client.BookkeepingBlock(*close.ConfirmedRound) @@ -289,7 +289,7 @@ func TestBasicPayouts(t *testing.T) { // getblock waits for the given block because we use when we might be talking to // a client that is behind the network (since it has low stake) -func getblock(client libgoal.Client, round uint64) (bookkeeping.Block, error) { +func getblock(client libgoal.Client, round basics.Round) (bookkeeping.Block, error) { if _, err := client.WaitForRound(round); err != nil { return bookkeeping.Block{}, err } @@ -329,7 +329,7 @@ func rekeyreg(a *require.Assertions, client libgoal.Client, address string, beco a.NoError(err) onlineTxID, err := client.SignAndBroadcastTransaction(wh, nil, reReg) a.NoError(err) - txn, err := client.WaitForConfirmedTxn(uint64(reReg.LastValid), onlineTxID) + txn, err := client.WaitForConfirmedTxn(reReg.LastValid, onlineTxID) a.NoError(err) // sync up with the network _, err = client.WaitForRound(*txn.ConfirmedRound) diff --git a/test/e2e-go/features/incentives/whalejoin_test.go b/test/e2e-go/features/incentives/whalejoin_test.go index 4c0f200942..7c7ae400d9 100644 --- a/test/e2e-go/features/incentives/whalejoin_test.go +++ b/test/e2e-go/features/incentives/whalejoin_test.go @@ -78,7 +78,7 @@ func TestWhaleJoin(t *testing.T) { burn, err := c01.SendPaymentFromUnencryptedWallet(account01.Address, basics.Address{}.String(), 1000, 99_900_000_000_000, nil) a.NoError(err) - receipt, err := fixture.WaitForConfirmedTxn(uint64(burn.LastValid), burn.ID().String()) + receipt, err := fixture.WaitForConfirmedTxn(burn.LastValid, burn.ID().String()) a.NoError(err) // 3. Wait lookback rounds @@ -100,7 +100,7 @@ func TestWhaleJoin(t *testing.T) { txn, err := c15.SendPaymentFromUnencryptedWallet(account15.Address, basics.Address{}.String(), 1000, 1, nil) a.NoError(err) - _, err = fixture.WaitForConfirmedTxn(uint64(txn.LastValid), txn.ID().String()) + _, err = fixture.WaitForConfirmedTxn(txn.LastValid, txn.ID().String()) a.NoError(err) data, err = c15.AccountData(account15.Address) a.NoError(err) @@ -244,7 +244,7 @@ func TestBigIncrease(t *testing.T) { a.True(data.IncentiveEligible) } -func wait(f *fixtures.RestClientFixture, a *require.Assertions, count uint64) { +func wait(f *fixtures.RestClientFixture, a *require.Assertions, count basics.Round) { res, err := f.AlgodClient.Status() a.NoError(err) round := res.LastRound + count @@ -255,7 +255,7 @@ func pay(a *require.Assertions, c libgoal.Client, from string, to string, amount uint64) v2.PreEncodedTxInfo { pay, err := c.SendPaymentFromUnencryptedWallet(from, to, 1000, amount, nil) a.NoError(err) - tx, err := c.WaitForConfirmedTxn(uint64(pay.LastValid), pay.ID().String()) + tx, err := c.WaitForConfirmedTxn(pay.LastValid, pay.ID().String()) a.NoError(err) return tx } @@ -284,7 +284,7 @@ func offline(a *require.Assertions, client libgoal.Client, address string) trans a.NoError(err) onlineTxID, err := client.SignAndBroadcastTransaction(wh, nil, offTx) a.NoError(err) - txn, err := client.WaitForConfirmedTxn(uint64(offTx.LastValid), onlineTxID) + txn, err := client.WaitForConfirmedTxn(offTx.LastValid, onlineTxID) a.NoError(err) // sync up with the network _, err = client.WaitForRound(*txn.ConfirmedRound) @@ -296,7 +296,7 @@ func offline(a *require.Assertions, client libgoal.Client, address string) trans } // Go online with the supplied key material -func online(a *require.Assertions, client libgoal.Client, address string, keys transactions.KeyregTxnFields) uint64 { +func online(a *require.Assertions, client libgoal.Client, address string, keys transactions.KeyregTxnFields) basics.Round { // sanity check that we start offline data, err := client.AccountData(address) a.NoError(err) @@ -311,7 +311,7 @@ func online(a *require.Assertions, client libgoal.Client, address string, keys t a.NoError(err) onlineTxID, err := client.SignAndBroadcastTransaction(wh, nil, onTx) a.NoError(err) - receipt, err := client.WaitForConfirmedTxn(uint64(onTx.LastValid), onlineTxID) + receipt, err := client.WaitForConfirmedTxn(onTx.LastValid, onlineTxID) a.NoError(err) data, err = client.AccountData(address) a.NoError(err) diff --git a/test/e2e-go/features/multisig/multisig_test.go b/test/e2e-go/features/multisig/multisig_test.go index 4e4e83c45a..a04ff487a0 100644 --- a/test/e2e-go/features/multisig/multisig_test.go +++ b/test/e2e-go/features/multisig/multisig_test.go @@ -91,7 +91,7 @@ func TestBasicMultisig(t *testing.T) { txid, err := client.BroadcastTransaction(signedTransactionWithTwo) r.NoError(err, "Trying to broadcast 2-of-3 multisig with 2 sig should not cause error") curStatus, _ = client.Status() - r.True(fixture.WaitForTxnConfirmation(curStatus.LastRound+uint64(5), txid)) + r.True(fixture.WaitForTxnConfirmation(curStatus.LastRound+5, txid)) // Need a new txid to avoid dup detection unsignedTransaction, err = client.ConstructPayment(multisigAddr, addrs[0], minTxnFee, amountToSend, []byte("foobar"), "", [32]byte{}, 0, 0) diff --git a/test/e2e-go/features/p2p/p2p_basic_test.go b/test/e2e-go/features/p2p/p2p_basic_test.go index 605a136e7a..10da4632cd 100644 --- a/test/e2e-go/features/p2p/p2p_basic_test.go +++ b/test/e2e-go/features/p2p/p2p_basic_test.go @@ -110,10 +110,10 @@ func TestP2PTwoNodes(t *testing.T) { curRound := curStatus.LastRound fixture.AlgodClient = fixture.GetAlgodClientForController(fixture.GetNodeControllerForDataDir(pongClient.DataDir())) - confirmed := fixture.WaitForAllTxnsToConfirm(curRound+uint64(5), pingTxidsToAddresses) - require.True(t, confirmed, "failed to see confirmed ping transaction by round %v", curRound+uint64(5)) - confirmed = fixture.WaitForAllTxnsToConfirm(curRound+uint64(5), pongTxidsToAddresses) - require.True(t, confirmed, "failed to see confirmed pong transaction by round %v", curRound+uint64(5)) + confirmed := fixture.WaitForAllTxnsToConfirm(curRound+5, pingTxidsToAddresses) + require.True(t, confirmed, "failed to see confirmed ping transaction by round %v", curRound+5) + confirmed = fixture.WaitForAllTxnsToConfirm(curRound+5, pongTxidsToAddresses) + require.True(t, confirmed, "failed to see confirmed pong transaction by round %v", curRound+5) pingBalance, err = pongClient.GetBalance(pingAccount) require.NoError(t, err) diff --git a/test/e2e-go/features/participation/accountParticipationTransitions_test.go b/test/e2e-go/features/participation/accountParticipationTransitions_test.go index fe99b6077a..bc7d920bef 100644 --- a/test/e2e-go/features/participation/accountParticipationTransitions_test.go +++ b/test/e2e-go/features/participation/accountParticipationTransitions_test.go @@ -32,6 +32,7 @@ import ( "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model" "github.com/algorand/go-algorand/data/account" + "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/libgoal" "github.com/algorand/go-algorand/libgoal/participation" "github.com/algorand/go-algorand/test/framework/fixtures" @@ -39,7 +40,7 @@ import ( ) // installParticipationKey generates a new key for a given account and installs it with the client. -func installParticipationKey(t *testing.T, client libgoal.Client, addr string, firstValid, lastValid uint64) (resp model.PostParticipationResponse, part account.Participation, err error) { +func installParticipationKey(t *testing.T, client libgoal.Client, addr string, firstValid, lastValid basics.Round) (resp model.PostParticipationResponse, part account.Participation, err error) { // Install overlapping participation keys... installFunc := func(keyPath string) error { return errors.New("the install directory is provided, so keys should not be installed") @@ -79,14 +80,14 @@ func TestKeyRegistration(t *testing.T) { t.Skip() } - checkKey := func(key model.ParticipationKey, firstValid, lastValid, lastProposal uint64, msg string) { + checkKey := func(key model.ParticipationKey, firstValid, lastValid, lastProposal basics.Round, msg string) { require.NotNil(t, key.EffectiveFirstValid, fmt.Sprintf("%s.EffectiveFirstValid", msg)) require.NotNil(t, key.EffectiveLastValid, fmt.Sprintf("%s.EffectiveLastValid", msg)) require.NotNil(t, key.LastBlockProposal, fmt.Sprintf("%s.LastBlockProposal", msg)) - assert.Equal(t, int(*(key.EffectiveFirstValid)), int(firstValid), fmt.Sprintf("%s.EffectiveFirstValid", msg)) - assert.Equal(t, int(*(key.EffectiveLastValid)), int(lastValid), fmt.Sprintf("%s.EffectiveLastValid", msg)) - assert.Equal(t, int(*(key.LastBlockProposal)), int(lastProposal), fmt.Sprintf("%s.LastBlockProposal", msg)) + assert.Equal(t, *(key.EffectiveFirstValid), firstValid, fmt.Sprintf("%s.EffectiveFirstValid", msg)) + assert.Equal(t, *(key.EffectiveLastValid), lastValid, fmt.Sprintf("%s.EffectiveLastValid", msg)) + assert.Equal(t, *(key.LastBlockProposal), lastProposal, fmt.Sprintf("%s.LastBlockProposal", msg)) } // Start devmode network and initialize things for the test. @@ -102,10 +103,10 @@ func TestKeyRegistration(t *testing.T) { sAccount := accountResponse.Address // Add an overlapping participation keys for the account on round 1 and 2 - last := uint64(3_000) + last := basics.Round(3_000) numNew := 2 - for i := 0; i < numNew; i++ { - response, part, err := installParticipationKey(t, sClient, sAccount, 0, last+uint64(i)) + for i := range basics.Round(numNew) { + response, part, err := installParticipationKey(t, sClient, sAccount, 0, last+i) require.NoError(t, err) require.NotNil(t, response) registerParticipationAndWait(t, sClient, part) @@ -119,8 +120,8 @@ func TestKeyRegistration(t *testing.T) { // Zip ahead MaxBalLookback. params, err := fixture.CurrentConsensusParams() require.NoError(t, err) - lookback := params.MaxBalLookback - for i := uint64(1); i < lookback; i++ { + lookback := basics.Round(params.MaxBalLookback) + for i := basics.Round(1); i < lookback; i++ { fixture.SendMoneyAndWait(2+i, 0, minTxnFee, sAccount, sAccount, "") } diff --git a/test/e2e-go/features/participation/onlineOfflineParticipation_test.go b/test/e2e-go/features/participation/onlineOfflineParticipation_test.go index 7bd3a1fd3f..21193d120f 100644 --- a/test/e2e-go/features/participation/onlineOfflineParticipation_test.go +++ b/test/e2e-go/features/participation/onlineOfflineParticipation_test.go @@ -174,11 +174,11 @@ func TestNewAccountCanGoOnlineAndParticipate(t *testing.T) { a.Equal(amountToSendInitial, amt, "new account should be funded with the amount the rich account sent") // account adds part key - partKeyFirstValid := uint64(0) - partKeyValidityPeriod := uint64(10000) + const partKeyFirstValid basics.Round = 0 + const partKeyValidityPeriod = 10000 partKeyLastValid := partKeyFirstValid + partKeyValidityPeriod - maxTxnLife := consensus[protocol.ConsensusVersion("shortpartkeysprotocol")].MaxTxnLife + maxTxnLife := basics.Round(consensus[protocol.ConsensusVersion("shortpartkeysprotocol")].MaxTxnLife) if partKeyLastValid > maxTxnLife { partKeyLastValid = maxTxnLife @@ -195,7 +195,7 @@ func TestNewAccountCanGoOnlineAndParticipate(t *testing.T) { a.NoError(err, "new account with new partkey should be able to go online") fixture.AssertValidTxid(onlineTxID) - maxRoundsToWaitForTxnConfirm := uint64(5) + const maxRoundsToWaitForTxnConfirm = 5 fixture.WaitForTxnConfirmation(seededRound+maxRoundsToWaitForTxnConfirm, onlineTxID) nodeStatus, _ = client.Status() onlineRound := nodeStatus.LastRound @@ -212,24 +212,24 @@ func TestNewAccountCanGoOnlineAndParticipate(t *testing.T) { nodeStatus, _ = client.Status() params, err := client.ConsensusParams(nodeStatus.LastRound) a.NoError(err) - accountProposesStarting := balanceRoundOf(basics.Round(fundedRound), params) + accountProposesStarting := balanceRoundOf(fundedRound, params) // Need to wait for funding to take effect on selection, then we can see if we're participating // Stop before the account should become eligible for selection so we can ensure it wasn't - err = fixture.WaitForRound(uint64(accountProposesStarting-1), + err = fixture.WaitForRound(accountProposesStarting-1, time.Duration(uint64(globals.MaxTimePerRound)*uint64(accountProposesStarting-1))) a.NoError(err) // Check if the account did not propose any blocks up to this round - blockWasProposed := fixture.VerifyBlockProposedRange(newAccount, int(accountProposesStarting)-1, - int(accountProposesStarting)-1) + blockWasProposed := fixture.VerifyBlockProposedRange(newAccount, accountProposesStarting-1, + accountProposesStarting-1) a.False(blockWasProposed, "account should not be selected until BalLookback (round %d) passes", int(accountProposesStarting-1)) // Now wait until the round where the funded account will be used. - err = fixture.WaitForRound(uint64(accountProposesStarting), 10*globals.MaxTimePerRound) + err = fixture.WaitForRound(accountProposesStarting, 10*globals.MaxTimePerRound) a.NoError(err) - blockWasProposedByNewAccountRecently := fixture.VerifyBlockProposedRange(newAccount, int(accountProposesStarting), 1) + blockWasProposedByNewAccountRecently := fixture.VerifyBlockProposedRange(newAccount, accountProposesStarting, 1) a.True(blockWasProposedByNewAccountRecently, "newly online account should be proposing blocks") } @@ -287,9 +287,9 @@ func TestAccountGoesOnlineForShortPeriod(t *testing.T) { a.Equal(amountToSendInitial, amt, "new account should be funded with the amount the rich account sent") // we try to register online with a period in which we don't have stateproof keys - partKeyFirstValid := uint64(1) + const partKeyFirstValid = 1 // TODO: Change consensus version when state proofs are deployed - partKeyLastValid := config.Consensus[protocol.ConsensusFuture].StateProofInterval - 1 + partKeyLastValid := basics.Round(config.Consensus[protocol.ConsensusFuture].StateProofInterval) - 1 partkeyResponse, _, err := client.GenParticipationKeys(newAccount, partKeyFirstValid, partKeyLastValid, 1000) a.NoError(err, "rest client should be able to add participation key to new account") a.Equal(newAccount, partkeyResponse.Parent.String(), "partkey response should echo queried account") @@ -303,7 +303,7 @@ func TestAccountGoesOnlineForShortPeriod(t *testing.T) { a.NoError(err, "new account with new partkey should be able to go online") fixture.AssertValidTxid(onlineTxID) - maxRoundsToWaitForTxnConfirm := uint64(5) + const maxRoundsToWaitForTxnConfirm = 5 nodeStatus, err := client.Status() a.NoError(err) seededRound := nodeStatus.LastRound diff --git a/test/e2e-go/features/participation/overlappingParticipationKeys_test.go b/test/e2e-go/features/participation/overlappingParticipationKeys_test.go index 122a5a3858..6e63e1b198 100644 --- a/test/e2e-go/features/participation/overlappingParticipationKeys_test.go +++ b/test/e2e-go/features/participation/overlappingParticipationKeys_test.go @@ -87,7 +87,7 @@ func TestOverlappingParticipationKeys(t *testing.T) { genesisHash := genesis.Hash() rootKeys := make(map[int]*account.Root) regTransactions := make(map[int]transactions.SignedTxn) - lastRound := uint64(39) // check 3 rounds of keys rotations + const lastRound = 39 // check 3 rounds of keys rotations // prepare the participation keys ahead of time. for round := uint64(1); round < lastRound; round++ { @@ -117,7 +117,7 @@ func TestOverlappingParticipationKeys(t *testing.T) { } fixture.Start() - currentRound := uint64(0) + currentRound := basics.Round(0) fixture.AlgodClient = fixture.GetAlgodClientForController(fixture.NC) // ******** IMPORTANT ******** @@ -139,8 +139,8 @@ func TestOverlappingParticipationKeys(t *testing.T) { a.Equal(sts.LastRound, currentRound+1) currentRound++ - if (currentRound-1)%10 < uint64(accountsNum) { - acctIdx := (currentRound - 1) % 10 + if (currentRound-1)%10 < basics.Round(accountsNum) { + acctIdx := int((currentRound - 1) % 10) // We do a plus two because the filenames were stored with a plus 2 startRound := currentRound + 2 // +2 and -2 below to balance, start/end must match in part key file name @@ -166,11 +166,11 @@ func TestOverlappingParticipationKeys(t *testing.T) { } -func addParticipationKey(a *require.Assertions, fixture *fixtures.RestClientFixture, acctNum uint64, startRound, endRound uint64, regTransactions map[int]transactions.SignedTxn) (crypto.OneTimeSignatureVerifier, error) { +func addParticipationKey(a *require.Assertions, fixture *fixtures.RestClientFixture, acctNum int, startRound, endRound basics.Round, regTransactions map[int]transactions.SignedTxn) (crypto.OneTimeSignatureVerifier, error) { dataDir := fixture.NodeDataDirs()[acctNum] nc := fixture.GetNodeControllerForDataDir(dataDir) - partKeyName := filepath.Join(dataDir, config.PartKeyFilename("Wallet", startRound, endRound)) + partKeyName := filepath.Join(dataDir, config.PartKeyFilename("Wallet", uint64(startRound), uint64(endRound))) // This function can take more than a couple seconds, we can't have this function block so // we wrap it in a go routine diff --git a/test/e2e-go/features/participation/participationExpiration_test.go b/test/e2e-go/features/participation/participationExpiration_test.go index ab1e24b8cd..0f7af6cb71 100644 --- a/test/e2e-go/features/participation/participationExpiration_test.go +++ b/test/e2e-go/features/participation/participationExpiration_test.go @@ -69,15 +69,15 @@ func testExpirationAccounts(t *testing.T, fixture *fixtures.RestClientFixture, f a.Equal(basics.Offline.String(), newAccountStatus.Status) var onlineTxID string - var partKeyLastValid uint64 + var partKeyLastValid basics.Round startTime := time.Now() for time.Since(startTime) < 2*time.Minute { currentRound := fetchLatestRound(fixture, a) // account adds part key - partKeyFirstValid := uint64(0) - partKeyValidityPeriod := uint64(10) + const partKeyFirstValid = 0 + const partKeyValidityPeriod = 10 partKeyLastValid = currentRound + partKeyValidityPeriod partkeyResponse, _, err := sClient.GenParticipationKeys(sAccount, partKeyFirstValid, partKeyLastValid, 0) a.NoError(err) @@ -105,7 +105,7 @@ func testExpirationAccounts(t *testing.T, fixture *fixtures.RestClientFixture, f } fixture.AssertValidTxid(onlineTxID) - maxRoundsToWaitForTxnConfirm := uint64(3) + const maxRoundsToWaitForTxnConfirm = 3 sNodeStatus, err := sClient.Status() a.NoError(err) @@ -131,10 +131,10 @@ func testExpirationAccounts(t *testing.T, fixture *fixtures.RestClientFixture, f a.NoError(err) lastValidRound := sAccountData.VoteLastValid - a.Equal(basics.Round(partKeyLastValid), lastValidRound) + a.Equal(partKeyLastValid, lastValidRound) // We want to wait until we get to one round past the last valid round - err = fixture.WaitForRoundWithTimeout(uint64(lastValidRound) + 1) + err = fixture.WaitForRoundWithTimeout(lastValidRound + 1) newAccountStatus, err = pClient.AccountInformation(sAccount, false) a.NoError(err) @@ -147,7 +147,7 @@ func testExpirationAccounts(t *testing.T, fixture *fixtures.RestClientFixture, f latestRound = fetchLatestRound(fixture, a) // making certain sClient has the same blocks as pClient. - _, err = sClient.WaitForRound(uint64(lastValidRound + 1)) + _, err = sClient.WaitForRound(lastValidRound + 1) a.NoError(err) blk, err := sClient.BookkeepingBlock(latestRound) @@ -166,7 +166,7 @@ func testExpirationAccounts(t *testing.T, fixture *fixtures.RestClientFixture, f a.Equal(finalStatus.String(), newAccountStatus.Status) } -func fetchLatestRound(fixture *fixtures.RestClientFixture, a *require.Assertions) uint64 { +func fetchLatestRound(fixture *fixtures.RestClientFixture, a *require.Assertions) basics.Round { status, err := fixture.LibGoalClient.Status() a.NoError(err) return status.LastRound diff --git a/test/e2e-go/features/participation/participationRewards_test.go b/test/e2e-go/features/participation/participationRewards_test.go index c95553bb8f..d65214f39f 100644 --- a/test/e2e-go/features/participation/participationRewards_test.go +++ b/test/e2e-go/features/participation/participationRewards_test.go @@ -41,7 +41,7 @@ func getFirstAccountFromNamedNode(fixture *fixtures.RestClientFixture, r *requir return } -func waitUntilRewards(t *testing.T, fixture *fixtures.RestClientFixture, round uint64) (uint64, error) { +func waitUntilRewards(t *testing.T, fixture *fixtures.RestClientFixture, round basics.Round) (basics.Round, error) { a := require.New(fixtures.SynchronizedTest(t)) block, err := fixture.LibGoalClient.BookkeepingBlock(round) @@ -66,14 +66,14 @@ func waitUntilRewards(t *testing.T, fixture *fixtures.RestClientFixture, round u } } -func spendToNonParticipating(t *testing.T, fixture *fixtures.RestClientFixture, lastRound uint64, account string, balance uint64, minFee uint64) uint64 { +func spendToNonParticipating(t *testing.T, fixture *fixtures.RestClientFixture, lastRound basics.Round, account string, balance uint64, minFee uint64) uint64 { a := require.New(fixtures.SynchronizedTest(t)) // move a lot of Algos to a non participating account -- the incentive pool poolAddr := basics.Address{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} // hardcoded; change if the pool address changes pd := poolAddr drainTx, err := fixture.LibGoalClient.SendPaymentFromUnencryptedWallet(account, pd.String(), minFee, balance-balance/100-minFee, nil) a.NoError(err) - fixture.WaitForAllTxnsToConfirm(lastRound+uint64(10), map[string]string{drainTx.ID().String(): account}) + fixture.WaitForAllTxnsToConfirm(lastRound+10, map[string]string{drainTx.ID().String(): account}) return balance / 100 } @@ -95,7 +95,7 @@ func TestOnlineOfflineRewards(t *testing.T) { offlineClient := fixture.GetLibGoalClientForNamedNode("Offline") // learn initial balances - initialRound := uint64(11) + const initialRound = 11 r.NoError(fixture.WaitForRoundWithTimeout(initialRound)) initialOnlineBalance, _ := onlineClient.GetBalance(onlineAccount) initialOfflineBalance, _ := offlineClient.GetBalance(offlineAccount) @@ -118,7 +118,7 @@ func TestOnlineOfflineRewards(t *testing.T) { tx2, err := offlineClient.SendPaymentFromUnencryptedWallet(offlineAccount, onlineAccount, minFee, pokeAmount, nil) txidsAndAddresses[tx2.ID().String()] = offlineAccount r.NoError(err) - fixture.WaitForAllTxnsToConfirm(rewardRound+uint64(10), txidsAndAddresses) + fixture.WaitForAllTxnsToConfirm(rewardRound+10, txidsAndAddresses) // make sure the nodes agree on current round status, err := onlineClient.Status() r.NoError(err) @@ -167,7 +167,7 @@ func TestPartkeyOnlyRewards(t *testing.T) { initialBalance, err := client.GetBalance(account.String()) r.NoError(err) // accrue rewards by letting time pass - arbitraryPostGenesisRound := uint64(316) + const arbitraryPostGenesisRound = 316 r.NoError(fixture.WaitForRoundWithTimeout(arbitraryPostGenesisRound)) // move a lot of Algos to a non participating account so we accrue rewards faster @@ -181,7 +181,7 @@ func TestPartkeyOnlyRewards(t *testing.T) { // do a balance poke by moving funds b/w accounts. this will cause balances to reflect received rewards tx, err := fixture.LibGoalClient.SendPaymentFromUnencryptedWallet(richAccount.Address, account.String(), minFee, minBalance, nil) r.NoError(err) - fixture.WaitForTxnConfirmation(arbitraryPostGenesisRound+uint64(10), tx.ID().String()) + fixture.WaitForTxnConfirmation(arbitraryPostGenesisRound+10, tx.ID().String()) finalBalance, err := client.GetBalance(account.String()) r.NoError(err) delta := finalBalance - initialBalance @@ -236,7 +236,7 @@ func TestRewardUnitThreshold(t *testing.T) { tx, err := fixture.LibGoalClient.SendPaymentFromUnencryptedWallet(richAccount.Address, newAccount, txnFee, lessThanRewardUnit, nil) r.NoError(err) - fixture.WaitForAllTxnsToConfirm(initialRound+uint64(10), map[string]string{tx.ID().String(): richAccount.Address}) + fixture.WaitForAllTxnsToConfirm(initialRound+10, map[string]string{tx.ID().String(): richAccount.Address}) initialBalanceNewAccount = lessThanRewardUnit // wait for the client node to catch up to the same round as the fixture node @@ -280,7 +280,7 @@ func TestRewardUnitThreshold(t *testing.T) { tx2, err := fixture.LibGoalClient.SendPaymentFromUnencryptedWallet(richAccount.Address, newAccount, txnFee, amountRichAccountPokesWith, nil) r.NoError(err) txidsAndAddresses[tx2.ID().String()] = richAccount.Address - fixture.WaitForAllTxnsToConfirm(rewardRound+uint64(10), txidsAndAddresses) + fixture.WaitForAllTxnsToConfirm(rewardRound+10, txidsAndAddresses) // Now the new account should have enough stake to get rewards. curStatus, _ = fixture.AlgodClient.Status() @@ -338,7 +338,7 @@ func TestRewardRateRecalculation(t *testing.T) { r.NoError(err) client := fixture.LibGoalClient - r.NoError(fixture.WaitForRoundWithTimeout(uint64(5))) + r.NoError(fixture.WaitForRoundWithTimeout(5)) richAccount, err := fixture.GetRichestAccount() r.NoError(err) rewardsAccount := defaultPoolAddr.String() @@ -354,7 +354,7 @@ func TestRewardRateRecalculation(t *testing.T) { r.NoError(err) r.Equal(protocol.ConsensusVersion(blk.CurrentProtocol), consensusTestRapidRewardRecalculation) consensusParams := consensus[protocol.ConsensusVersion(blk.CurrentProtocol)] - rewardRecalcRound := consensusParams.RewardsRateRefreshInterval + rewardRecalcRound := basics.Round(consensusParams.RewardsRateRefreshInterval) r.NoError(fixture.WaitForRoundWithTimeout(rewardRecalcRound - 1)) balanceOfRewardsPool, roundQueried := fixture.GetBalanceAndRound(rewardsAccount) if roundQueried != rewardRecalcRound-1 { @@ -375,7 +375,7 @@ func TestRewardRateRecalculation(t *testing.T) { r.NoError(err) fixture.SendMoneyAndWait(curStatus.LastRound, amountToSend, minFee, richAccount.Address, rewardsAccount, "") - rewardRecalcRound = rewardRecalcRound + consensusParams.RewardsRateRefreshInterval + rewardRecalcRound += basics.Round(consensusParams.RewardsRateRefreshInterval) r.NoError(fixture.WaitForRoundWithTimeout(rewardRecalcRound - 1)) balanceOfRewardsPool, roundQueried = fixture.GetBalanceAndRound(rewardsAccount) @@ -394,6 +394,5 @@ func TestRewardRateRecalculation(t *testing.T) { r.Equal((balanceOfRewardsPool-minBal-lastRoundBeforeRewardRecals.RewardsResidue)/consensusParams.RewardsRateRefreshInterval, blk.RewardsRate) // if the network keeps progressing without error, // this shows the network is healthy and that we didn't panic - finalRound := rewardRecalcRound + uint64(5) - r.NoError(fixture.WaitForRoundWithTimeout(finalRound)) + r.NoError(fixture.WaitForRoundWithTimeout(rewardRecalcRound + 5)) } diff --git a/test/e2e-go/features/partitionRecovery/partitionRecovery_test.go b/test/e2e-go/features/partitionRecovery/partitionRecovery_test.go index 1d398de3bc..f2994b5a73 100644 --- a/test/e2e-go/features/partitionRecovery/partitionRecovery_test.go +++ b/test/e2e-go/features/partitionRecovery/partitionRecovery_test.go @@ -56,7 +56,7 @@ func TestBasicPartitionRecovery(t *testing.T) { a.NoError(err) // Let the network make some progress - waitForRound := uint64(3) + const waitForRound = 3 err = fixture.GetAlgodClientForController(nc).WaitForRoundWithTimeout(waitForRound) a.NoError(err) @@ -132,7 +132,7 @@ func runTestWithStaggeredStopStart(t *testing.T, fixture *fixtures.RestClientFix a.NoError(err) // Let the network make some progress - waitForRound := uint64(3) + const waitForRound = 3 err = fixture.GetAlgodClientForController(nc1).WaitForRoundWithTimeout(waitForRound) a.NoError(err) @@ -195,7 +195,7 @@ func TestBasicPartitionRecoveryPartOffline(t *testing.T) { a.NoError(err) // Let the network make some progress - waitForRound := uint64(3) + const waitForRound = 3 err = fixture.GetAlgodClientForController(nc1).WaitForRoundWithTimeout(waitForRound) a.NoError(err) diff --git a/test/e2e-go/features/stateproofs/stateproofs_test.go b/test/e2e-go/features/stateproofs/stateproofs_test.go index 8c58412c96..db770285c9 100644 --- a/test/e2e-go/features/stateproofs/stateproofs_test.go +++ b/test/e2e-go/features/stateproofs/stateproofs_test.go @@ -68,7 +68,7 @@ func (a accountFetcher) getBalance(r *require.Assertions, f *fixtures.RestClient return balance } -func (a accountFetcher) goOffline(r *require.Assertions, f *fixtures.RestClientFixture, round uint64) { +func (a accountFetcher) goOffline(r *require.Assertions, f *fixtures.RestClientFixture, round basics.Round) { account0 := a.getAccount(r, f) minTxnFee, _, err := f.CurrentMinFeeAndBalance() @@ -89,7 +89,7 @@ type paymentSender struct { amount uint64 } -func (p paymentSender) sendPayment(a *require.Assertions, f *fixtures.RestClientFixture, round uint64) { +func (p paymentSender) sendPayment(a *require.Assertions, f *fixtures.RestClientFixture, round basics.Round) { account0 := p.from.getAccount(a, f) account1 := p.to.getAccount(a, f) @@ -162,9 +162,9 @@ func verifyStateProofsCreation(t *testing.T, fixture *fixtures.RestClientFixture var lastStateProofMessage stateproofmsg.Message libgoal := fixture.LibGoalClient - expectedNumberOfStateProofs := uint64(4) + const expectedNumberOfStateProofs = 4 // Loop through the rounds enough to check for expectedNumberOfStateProofs state proofs - for rnd := uint64(1); rnd <= consensusParams.StateProofInterval*(expectedNumberOfStateProofs+1); rnd++ { + for rnd := basics.Round(1); rnd <= basics.Round(consensusParams.StateProofInterval)*(expectedNumberOfStateProofs+1); rnd++ { // send a dummy payment transaction to create non-empty blocks. paymentSender{ from: accountFetcher{nodeName: "Node0", accountNumber: 0}, @@ -178,7 +178,7 @@ func verifyStateProofsCreation(t *testing.T, fixture *fixtures.RestClientFixture blk, err := libgoal.BookkeepingBlock(rnd) r.NoErrorf(err, "failed to retrieve block from algod on round %d", rnd) - if (rnd % consensusParams.StateProofInterval) == 0 { + if (rnd % basics.Round(consensusParams.StateProofInterval)) == 0 { // Must have a merkle commitment for participants r.True(len(blk.StateProofTracking[protocol.StateProofBasic].StateProofVotersCommitment) > 0) r.True(blk.StateProofTracking[protocol.StateProofBasic].StateProofOnlineTotalWeight != basics.MicroAlgos{}) @@ -195,7 +195,7 @@ func verifyStateProofsCreation(t *testing.T, fixture *fixtures.RestClientFixture for lastStateProofBlock.Round()+basics.Round(consensusParams.StateProofInterval) < blk.StateProofTracking[protocol.StateProofBasic].StateProofNextRound && lastStateProofBlock.Round() != 0 { - nextStateProofRound := uint64(lastStateProofBlock.Round()) + consensusParams.StateProofInterval + nextStateProofRound := lastStateProofBlock.Round() + basics.Round(consensusParams.StateProofInterval) t.Logf("found a state proof for round %d at round %d", nextStateProofRound, blk.Round()) // Find the state proof transaction @@ -257,9 +257,9 @@ func TestStateProofOverlappingKeys(t *testing.T) { var lastStateProofMessage stateproofmsg.Message libgoalClient := fixture.LibGoalClient - expectedNumberOfStateProofs := uint64(8) - for rnd := uint64(1); rnd <= consensusParams.StateProofInterval*(expectedNumberOfStateProofs+1); rnd++ { - if rnd == consensusParams.StateProofInterval*(5) { // allow some buffer period before the voting keys are expired (for the keyreg to take effect) + const expectedNumberOfStateProofs = 8 + for rnd := basics.Round(1); rnd <= basics.Round(consensusParams.StateProofInterval)*(expectedNumberOfStateProofs+1); rnd++ { + if rnd == basics.Round(consensusParams.StateProofInterval)*5 { // allow some buffer period before the voting keys are expired (for the keyreg to take effect) fmt.Println("at round.. installing", rnd) // Generate participation keys (for the same accounts) for i := 0; i < pNodes; i++ { @@ -287,7 +287,7 @@ func TestStateProofOverlappingKeys(t *testing.T) { blk, err := libgoalClient.BookkeepingBlock(rnd) r.NoErrorf(err, "failed to retrieve block from algod on round %d", rnd) - if (rnd % consensusParams.StateProofInterval) == 0 { + if (rnd % basics.Round(consensusParams.StateProofInterval)) == 0 { // Must have a merkle commitment for participants r.True(len(blk.StateProofTracking[protocol.StateProofBasic].StateProofVotersCommitment) > 0) r.True(blk.StateProofTracking[protocol.StateProofBasic].StateProofOnlineTotalWeight != basics.MicroAlgos{}) @@ -300,7 +300,7 @@ func TestStateProofOverlappingKeys(t *testing.T) { } for lastStateProofBlock.Round() != 0 && lastStateProofBlock.Round()+basics.Round(consensusParams.StateProofInterval) < blk.StateProofTracking[protocol.StateProofBasic].StateProofNextRound { - nextStateProofRound := uint64(lastStateProofBlock.Round()) + consensusParams.StateProofInterval + nextStateProofRound := lastStateProofBlock.Round() + basics.Round(consensusParams.StateProofInterval) t.Logf("found a state proof for round %d at round %d", nextStateProofRound, blk.Round()) // Find the state proof transaction @@ -337,9 +337,9 @@ func TestStateProofMessageCommitmentVerification(t *testing.T) { libgoalClient := fixture.LibGoalClient - var startRound = uint64(1) - var nextStateProofRound = uint64(0) - var firstStateProofRound = 2 * consensusParams.StateProofInterval + var startRound = basics.Round(1) + var nextStateProofRound = basics.Round(0) + var firstStateProofRound = basics.Round(2 * consensusParams.StateProofInterval) for rnd := startRound; nextStateProofRound <= firstStateProofRound; rnd++ { paymentSender{ @@ -354,7 +354,7 @@ func TestStateProofMessageCommitmentVerification(t *testing.T) { blk, err := libgoalClient.BookkeepingBlock(rnd) r.NoError(err) - nextStateProofRound = uint64(blk.StateProofTracking[protocol.StateProofBasic].StateProofNextRound) + nextStateProofRound = blk.StateProofTracking[protocol.StateProofBasic].StateProofNextRound } _, stateProofMessage := getStateProofByLastRound(r, &fixture, firstStateProofRound) @@ -397,7 +397,7 @@ func getDefaultStateProofConsensusParams() config.ConsensusParams { return consensusParams } -func getStateProofByLastRound(r *require.Assertions, fixture *fixtures.RestClientFixture, stateProofLatestRound uint64) (sp.StateProof, stateproofmsg.Message) { +func getStateProofByLastRound(r *require.Assertions, fixture *fixtures.RestClientFixture, stateProofLatestRound basics.Round) (sp.StateProof, stateproofmsg.Message) { restClient, err := fixture.NC.AlgodClient() r.NoError(err) @@ -419,7 +419,7 @@ func getStateProofByLastRound(r *require.Assertions, fixture *fixtures.RestClien return stateProof, msg } -func verifyStateProofForRound(r *require.Assertions, fixture *fixtures.RestClientFixture, nextStateProofRound uint64, prevStateProofMessage stateproofmsg.Message, lastStateProofBlock bookkeeping.Block, consensusParams config.ConsensusParams) (stateproofmsg.Message, bookkeeping.Block) { +func verifyStateProofForRound(r *require.Assertions, fixture *fixtures.RestClientFixture, nextStateProofRound basics.Round, prevStateProofMessage stateproofmsg.Message, lastStateProofBlock bookkeeping.Block, consensusParams config.ConsensusParams) (stateproofmsg.Message, bookkeeping.Block) { stateProof, stateProofMessage := getStateProofByLastRound(r, fixture, nextStateProofRound) nextStateProofBlock, err := fixture.LibGoalClient.BookkeepingBlock(nextStateProofRound) @@ -429,7 +429,7 @@ func verifyStateProofForRound(r *require.Assertions, fixture *fixtures.RestClien if !prevStateProofMessage.MsgIsZero() { //if we have a previous stateproof message we can verify the current stateproof using data from it verifier := sp.MkVerifierWithLnProvenWeight(prevStateProofMessage.VotersCommitment, prevStateProofMessage.LnProvenWeight, consensusParams.StateProofStrengthTarget) - err = verifier.Verify(uint64(nextStateProofBlock.Round()), stateProofMessage.Hash(), &stateProof) + err = verifier.Verify(nextStateProofBlock.Round(), stateProofMessage.Hash(), &stateProof) r.NoError(err) } var votersRoot = make([]byte, sp.HashSize) @@ -441,7 +441,7 @@ func verifyStateProofForRound(r *require.Assertions, fixture *fixtures.RestClien verifier, err := sp.MkVerifier(votersRoot, provenWeight, consensusParams.StateProofStrengthTarget) r.NoError(err) - err = verifier.Verify(uint64(nextStateProofBlock.Round()), stateProofMessage.Hash(), &stateProof) + err = verifier.Verify(nextStateProofBlock.Round(), stateProofMessage.Hash(), &stateProof) r.NoError(err) return stateProofMessage, nextStateProofBlock } @@ -495,9 +495,9 @@ func TestStateProofRecoveryDuringRecoveryPeriod(t *testing.T) { expectedNumberOfStateProofs := uint64(4) // Loop through the rounds enough to check for expectedNumberOfStateProofs state proofs - for rnd := uint64(2); rnd <= consensusParams.StateProofInterval*(expectedNumberOfStateProofs+1); rnd++ { + for rnd := basics.Round(2); rnd <= basics.Round(consensusParams.StateProofInterval*(expectedNumberOfStateProofs+1)); rnd++ { // Start the node in the last interval after which the SP will be abandoned if SPs are not generated. - if rnd == (consensusParams.StateProofMaxRecoveryIntervals)*consensusParams.StateProofInterval { + if rnd == basics.Round((consensusParams.StateProofMaxRecoveryIntervals)*consensusParams.StateProofInterval) { t.Logf("at round %d starting node\n", rnd) dir, err = fixture.GetNodeDir("Node4") r.NoError(err) @@ -517,7 +517,7 @@ func TestStateProofRecoveryDuringRecoveryPeriod(t *testing.T) { blk, err := libgoal.BookkeepingBlock(rnd) r.NoErrorf(err, "failed to retrieve block from algod on round %d", rnd) - if (rnd % consensusParams.StateProofInterval) == 0 { + if (rnd % basics.Round(consensusParams.StateProofInterval)) == 0 { // Must have a merkle commitment for participants r.True(len(blk.StateProofTracking[protocol.StateProofBasic].StateProofVotersCommitment) > 0) r.True(blk.StateProofTracking[protocol.StateProofBasic].StateProofOnlineTotalWeight != basics.MicroAlgos{}) @@ -533,7 +533,7 @@ func TestStateProofRecoveryDuringRecoveryPeriod(t *testing.T) { // since the stateproof chain is catching up there would be several proofs to check for lastStateProofBlock.Round()+basics.Round(consensusParams.StateProofInterval) < blk.StateProofTracking[protocol.StateProofBasic].StateProofNextRound && lastStateProofBlock.Round() != 0 { - nextStateProofRound := uint64(lastStateProofBlock.Round()) + consensusParams.StateProofInterval + nextStateProofRound := lastStateProofBlock.Round() + basics.Round(consensusParams.StateProofInterval) t.Logf("found a state proof for round %d at round %d", nextStateProofRound, blk.Round()) // Find the state proof transaction @@ -594,11 +594,11 @@ func TestStateProofRecovery(t *testing.T) { var lastStateProofMessage stateproofmsg.Message - expectedNumberOfStateProofs := uint64(7) - numberOfGraceIntervals := uint64(3) - rnd := uint64(2) - for ; rnd <= consensusParams.StateProofInterval*(expectedNumberOfStateProofs); rnd++ { - if rnd == (consensusParams.StateProofMaxRecoveryIntervals+4)*consensusParams.StateProofInterval { + const expectedNumberOfStateProofs = 7 + const numberOfGraceIntervals = 3 + rnd := basics.Round(2) + for ; rnd <= basics.Round(consensusParams.StateProofInterval)*expectedNumberOfStateProofs; rnd++ { + if rnd == basics.Round((consensusParams.StateProofMaxRecoveryIntervals+4)*consensusParams.StateProofInterval) { t.Logf("at round %d starting node\n", rnd) dir, err = fixture.GetNodeDir("Node4") r.NoError(err) @@ -618,7 +618,7 @@ func TestStateProofRecovery(t *testing.T) { blk, err := libgoal.BookkeepingBlock(rnd) r.NoErrorf(err, "failed to retrieve block from algod on round %d", rnd) - if (rnd % consensusParams.StateProofInterval) == 0 { + if (rnd % basics.Round(consensusParams.StateProofInterval)) == 0 { // Must have a merkle commitment for participants r.True(len(blk.StateProofTracking[protocol.StateProofBasic].StateProofVotersCommitment) > 0) r.True(blk.StateProofTracking[protocol.StateProofBasic].StateProofOnlineTotalWeight != basics.MicroAlgos{}) @@ -632,7 +632,7 @@ func TestStateProofRecovery(t *testing.T) { if lastStateProofBlock.Round()+basics.Round(consensusParams.StateProofInterval) < blk.StateProofTracking[protocol.StateProofBasic].StateProofNextRound && lastStateProofBlock.Round() != 0 { - nextStateProofRound := uint64(lastStateProofBlock.Round()) + consensusParams.StateProofInterval + nextStateProofRound := lastStateProofBlock.Round() + basics.Round(consensusParams.StateProofInterval) t.Logf("found a state proof for round %d at round %d", nextStateProofRound, blk.Round()) // Find the state proof transaction @@ -644,7 +644,7 @@ func TestStateProofRecovery(t *testing.T) { // at this point we expect the state proof chain to be completely caught up. However, In order to avoid flakiness on // heavily loaded machines, we would wait some extra round for the state proofs to catch up - for ; rnd <= consensusParams.StateProofInterval*(expectedNumberOfStateProofs+numberOfGraceIntervals); rnd++ { + for ; rnd <= basics.Round(consensusParams.StateProofInterval)*(expectedNumberOfStateProofs+numberOfGraceIntervals); rnd++ { err = fixture.WaitForRound(rnd, timeoutUntilNextRound) r.NoError(err) @@ -658,7 +658,7 @@ func TestStateProofRecovery(t *testing.T) { if lastStateProofBlock.Round()+basics.Round(consensusParams.StateProofInterval) < blk.StateProofTracking[protocol.StateProofBasic].StateProofNextRound && lastStateProofBlock.Round() != 0 { - nextStateProofRound := uint64(lastStateProofBlock.Round()) + consensusParams.StateProofInterval + nextStateProofRound := lastStateProofBlock.Round() + basics.Round(consensusParams.StateProofInterval) t.Logf("found a state proof for round %d at round %d", nextStateProofRound, blk.Round()) // Find the state proof transaction @@ -675,7 +675,7 @@ func TestStateProofRecovery(t *testing.T) { } // installParticipationKey generates a new key for a given account and installs it with the client. -func installParticipationKey(t *testing.T, client libgoal.Client, addr string, firstValid, lastValid uint64) (resp model.PostParticipationResponse, part account.Participation, err error) { +func installParticipationKey(t *testing.T, client libgoal.Client, addr string, firstValid, lastValid basics.Round) (resp model.PostParticipationResponse, part account.Participation, err error) { dir, err := os.MkdirTemp("", "temporary_partkey_dir") require.NoError(t, err) defer os.RemoveAll(dir) @@ -699,7 +699,7 @@ func registerParticipationAndWait(t *testing.T, client libgoal.Client, part acco sAccount := part.Address().String() sWH, err := client.GetUnencryptedWalletHandle() require.NoError(t, err) - goOnlineTx, err := client.MakeRegistrationTransactionWithGenesisID(part, 1000, currentRnd, uint64(part.LastValid), [32]byte{}, true) + goOnlineTx, err := client.MakeRegistrationTransactionWithGenesisID(part, 1000, currentRnd, part.LastValid, [32]byte{}, true) assert.NoError(t, err) require.Equal(t, sAccount, goOnlineTx.Src().String()) onlineTxID, err := client.SignAndBroadcastTransaction(sWH, nil, goOnlineTx) @@ -745,7 +745,7 @@ func TestAttestorsChange(t *testing.T) { var lastStateProofMessage stateproofmsg.Message libgoal := fixture.LibGoalClient - expectedNumberOfStateProofs := uint64(4) + const expectedNumberOfStateProofs = 4 // Loop through the rounds enough to check for expectedNumberOfStateProofs state proofs paymentMaker := paymentSender{ @@ -753,9 +753,9 @@ func TestAttestorsChange(t *testing.T) { to: accountFetcher{nodeName: "poorNode", accountNumber: 0}, } - for rnd := uint64(1); rnd <= consensusParams.StateProofInterval*(expectedNumberOfStateProofs+1); rnd++ { + for rnd := basics.Round(1); rnd <= basics.Round(consensusParams.StateProofInterval)*(expectedNumberOfStateProofs+1); rnd++ { // Changing the amount to pay. This should transfer most of the money from the rich node to the poor node. - if consensusParams.StateProofInterval*2 == rnd { + if basics.Round(consensusParams.StateProofInterval)*2 == rnd { balance := paymentMaker.from.getBalance(a, &fixture) // ensuring that before the test, the rich node (from) has a significantly larger balance. a.True(balance/2 > paymentMaker.to.getBalance(a, &fixture)) @@ -765,7 +765,7 @@ func TestAttestorsChange(t *testing.T) { } // verifies that rich account transferred most of its money to the account that sits on poorNode. - if consensusParams.StateProofInterval*3 == rnd { + if basics.Round(consensusParams.StateProofInterval)*3 == rnd { a.True(paymentMaker.to.getBalance(a, &fixture) > paymentMaker.from.getBalance(a, &fixture)) } @@ -774,7 +774,7 @@ func TestAttestorsChange(t *testing.T) { blk, err := libgoal.BookkeepingBlock(rnd) a.NoErrorf(err, "failed to retrieve block from algod on round %d", rnd) - if (rnd % consensusParams.StateProofInterval) == 0 { + if (rnd % basics.Round(consensusParams.StateProofInterval)) == 0 { // Must have a merkle commitment for participants a.True(len(blk.StateProofTracking[protocol.StateProofBasic].StateProofVotersCommitment) > 0) a.True(blk.StateProofTracking[protocol.StateProofBasic].StateProofOnlineTotalWeight != basics.MicroAlgos{}) @@ -790,7 +790,7 @@ func TestAttestorsChange(t *testing.T) { for lastStateProofBlock.Round()+basics.Round(consensusParams.StateProofInterval) < blk.StateProofTracking[protocol.StateProofBasic].StateProofNextRound && lastStateProofBlock.Round() != 0 { - nextStateProofRound := uint64(lastStateProofBlock.Round()) + consensusParams.StateProofInterval + nextStateProofRound := lastStateProofBlock.Round() + basics.Round(consensusParams.StateProofInterval) t.Logf("found a state proof for round %d at round %d", nextStateProofRound, blk.Round()) // Find the state proof transaction @@ -834,12 +834,12 @@ func TestTotalWeightChanges(t *testing.T) { richNode := accountFetcher{nodeName: "richNode", accountNumber: 0} - expectedNumberOfStateProofs := uint64(4) + const expectedNumberOfStateProofs = 4 // Loop through the rounds enough to check for expectedNumberOfStateProofs state proofs - for rnd := uint64(1); rnd <= consensusParams.StateProofInterval*(expectedNumberOfStateProofs+1); rnd++ { + for rnd := basics.Round(1); rnd <= basics.Round(consensusParams.StateProofInterval)*(expectedNumberOfStateProofs+1); rnd++ { // Rich node goes offline - if consensusParams.StateProofInterval*2-(consensusParams.StateProofInterval/2) == rnd { + if basics.Round(consensusParams.StateProofInterval*2-(consensusParams.StateProofInterval/2)) == rnd { // subtract 8 rounds since the total online stake is calculated prior to the actual state proof round (lookback) richNode.goOffline(a, &fixture, rnd) } @@ -852,13 +852,13 @@ func TestTotalWeightChanges(t *testing.T) { blk, err := libgoal.BookkeepingBlock(rnd) a.NoErrorf(err, "failed to retrieve block from algod on round %d", rnd) - if (rnd % consensusParams.StateProofInterval) == 0 { + if (rnd % basics.Round(consensusParams.StateProofInterval)) == 0 { // Must have a merkle commitment for participants a.Greater(len(blk.StateProofTracking[protocol.StateProofBasic].StateProofVotersCommitment), 0) totalStake := blk.BlockHeader.StateProofTracking[protocol.StateProofBasic].StateProofOnlineTotalWeight.ToUint64() a.NotEqual(basics.MicroAlgos{}, totalStake) - if rnd <= consensusParams.StateProofInterval { + if rnd <= basics.Round(consensusParams.StateProofInterval) { a.Equal(uint64(10000000000000000), totalStake) } else { // richNode should be offline by now a.Greater(uint64(10000000000000000), totalStake) @@ -875,7 +875,7 @@ func TestTotalWeightChanges(t *testing.T) { for lastStateProofBlock.Round()+basics.Round(consensusParams.StateProofInterval) < blk.StateProofTracking[protocol.StateProofBasic].StateProofNextRound && lastStateProofBlock.Round() != 0 { - nextStateProofRound := uint64(lastStateProofBlock.Round()) + consensusParams.StateProofInterval + nextStateProofRound := lastStateProofBlock.Round() + basics.Round(consensusParams.StateProofInterval) t.Logf("found a state proof for round %d at round %d", nextStateProofRound, blk.Round()) // Find the state proof transaction @@ -927,8 +927,8 @@ func TestSPWithTXPoolFull(t *testing.T) { var genesisHash crypto.Digest copy(genesisHash[:], params.GenesisHash) - round := uint64(0) - for round < uint64(20) { + round := basics.Round(0) + for round = range 20 { params, err = relay.SuggestedParams() require.NoError(t, err) @@ -942,10 +942,10 @@ func TestSPWithTXPoolFull(t *testing.T) { continue } require.Equal(t, protocol.StateProofTx, b.Payset[0].Txn.Type) - require.Equal(t, uint64(8), b.Payset[0].Txn.StateProofTxnFields.Message.LastAttestedRound) + require.EqualValues(t, 8, b.Payset[0].Txn.StateProofTxnFields.Message.LastAttestedRound) break } - require.Less(t, round, uint64(20)) + require.Less(t, round, basics.Round(20)) } // TestAtMostOneSPFullPool tests that there is at most one SP txn is admitted to the pool per roound @@ -984,9 +984,9 @@ func TestAtMostOneSPFullPool(t *testing.T) { require.NoError(t, err) // Check that the first 2 stateproofs are added to the blockchain in different rounds - round := uint64(0) - expectedSPRound := consensusParams.StateProofInterval * 2 - for round < consensusParams.StateProofInterval*10 { + round := basics.Round(0) + expectedSPRound := basics.Round(consensusParams.StateProofInterval) * 2 + for round < basics.Round(consensusParams.StateProofInterval)*10 { round = params.LastRound err := fixture.WaitForRound(round+1, 6*time.Second) @@ -1011,11 +1011,11 @@ func TestAtMostOneSPFullPool(t *testing.T) { require.Equal(t, int(expectedSPRound), int(b.Payset[tid].Txn.StateProofTxnFields.Message.LastAttestedRound)) - expectedSPRound = expectedSPRound + consensusParams.StateProofInterval + expectedSPRound = expectedSPRound + basics.Round(consensusParams.StateProofInterval) break } } - if expectedSPRound == consensusParams.StateProofInterval*4 { + if expectedSPRound == basics.Round(consensusParams.StateProofInterval*4) { break } } @@ -1121,9 +1121,9 @@ func TestAtMostOneSPFullPoolWithLoad(t *testing.T) { } // Check that the first 2 stateproofs are added to the blockchain - round := uint64(0) + round := basics.Round(0) expectedSPRound := consensusParams.StateProofInterval * 2 - for round < consensusParams.StateProofInterval*10 { + for round < basics.Round(consensusParams.StateProofInterval)*10 { round = params.LastRound err := fixture.WaitForRound(round+1, 6*time.Second) @@ -1160,7 +1160,7 @@ func TestAtMostOneSPFullPoolWithLoad(t *testing.T) { // In some environments (ARM) the high load may prevent it. } -func getWellformedSPTransaction(round uint64, genesisHash crypto.Digest, consensusParams config.ConsensusParams, t *testing.T) (stxn transactions.SignedTxn) { +func getWellformedSPTransaction(round basics.Round, genesisHash crypto.Digest, consensusParams config.ConsensusParams, t *testing.T) (stxn transactions.SignedTxn) { msg := stateproofmsg.Message{} proof := &sp.StateProof{} @@ -1206,7 +1206,7 @@ func TestStateProofCheckTotalStake(t *testing.T) { var fixture fixtures.RestClientFixture pNodes := 5 - expectedNumberOfStateProofs := uint64(4) + const expectedNumberOfStateProofs = 4 fixture.SetConsensus(configurableConsensus) fixture.Setup(t, filepath.Join("nettemplates", "StateProof.json")) @@ -1230,9 +1230,9 @@ func TestStateProofCheckTotalStake(t *testing.T) { var totalSupplyAtRound [1000]model.SupplyResponse var accountSnapshotAtRound [1000][]model.Account - for rnd := uint64(1); rnd <= consensusParams.StateProofInterval*(expectedNumberOfStateProofs+1); rnd++ { - if rnd == consensusParams.StateProofInterval+consensusParams.StateProofVotersLookback { // here we register the keys of address 0 so it won't be able the sign a state proof (its stake would be removed for the total) - _, part, err := installParticipationKey(t, libgoalNodeClients[0], accountsAddresses[0], 0, consensusParams.StateProofInterval*2-1) + for rnd := basics.Round(1); rnd <= basics.Round(consensusParams.StateProofInterval)*(expectedNumberOfStateProofs+1); rnd++ { + if rnd == basics.Round(consensusParams.StateProofInterval+consensusParams.StateProofVotersLookback) { // here we register the keys of address 0 so it won't be able the sign a state proof (its stake would be removed for the total) + _, part, err := installParticipationKey(t, libgoalNodeClients[0], accountsAddresses[0], 0, basics.Round(consensusParams.StateProofInterval*2-1)) r.NoError(err) participations[0] = part registerParticipationAndWait(t, libgoalNodeClients[0], participations[0]) @@ -1251,7 +1251,7 @@ func TestStateProofCheckTotalStake(t *testing.T) { // this is the round in we take a snapshot of the account balances. // We would use this snapshot later on to compare the weights on the state proof, and to make sure that // the totalWeight commitment is correct - if ((rnd + 2) % consensusParams.StateProofInterval) == 0 { + if ((rnd + 2) % basics.Round(consensusParams.StateProofInterval)) == 0 { totalSupply, err := libgoalClient.LedgerSupply() r.NoError(err) @@ -1270,15 +1270,15 @@ func TestStateProofCheckTotalStake(t *testing.T) { blk, err := libgoalClient.BookkeepingBlock(rnd) r.NoErrorf(err, "failed to retrieve block from algod on round %d", rnd) - if (rnd % consensusParams.StateProofInterval) == 0 { - if rnd >= consensusParams.StateProofInterval*2 { + if (rnd % basics.Round(consensusParams.StateProofInterval)) == 0 { + if rnd >= basics.Round(consensusParams.StateProofInterval)*2 { // since account 0 would no longer be able to sign the state proof, its stake should // be removed from the total stake in the commitment - total := totalSupplyAtRound[rnd-consensusParams.StateProofVotersLookback].OnlineMoney - total = total - accountSnapshotAtRound[rnd-consensusParams.StateProofVotersLookback][0].Amount + total := totalSupplyAtRound[rnd-basics.Round(consensusParams.StateProofVotersLookback)].OnlineMoney + total = total - accountSnapshotAtRound[rnd-basics.Round(consensusParams.StateProofVotersLookback)][0].Amount r.Equal(total, blk.StateProofTracking[protocol.StateProofBasic].StateProofOnlineTotalWeight.Raw) } else { - r.Equal(totalSupplyAtRound[rnd-consensusParams.StateProofVotersLookback].OnlineMoney, blk.StateProofTracking[protocol.StateProofBasic].StateProofOnlineTotalWeight.Raw) + r.Equal(totalSupplyAtRound[rnd-basics.Round(consensusParams.StateProofVotersLookback)].OnlineMoney, blk.StateProofTracking[protocol.StateProofBasic].StateProofOnlineTotalWeight.Raw) } // Special case: bootstrap validation with the first block @@ -1289,13 +1289,13 @@ func TestStateProofCheckTotalStake(t *testing.T) { } for lastStateProofBlock.Round() != 0 && lastStateProofBlock.Round()+basics.Round(consensusParams.StateProofInterval) < blk.StateProofTracking[protocol.StateProofBasic].StateProofNextRound { - nextStateProofRound := uint64(lastStateProofBlock.Round()) + consensusParams.StateProofInterval + nextStateProofRound := lastStateProofBlock.Round() + basics.Round(consensusParams.StateProofInterval) t.Logf("found a state proof for round %d at round %d", nextStateProofRound, blk.Round()) stateProof, stateProofMsg := getStateProofByLastRound(r, &fixture, nextStateProofRound) - accountSnapshot := accountSnapshotAtRound[stateProofMsg.LastAttestedRound-consensusParams.StateProofInterval-consensusParams.StateProofVotersLookback] + accountSnapshot := accountSnapshotAtRound[stateProofMsg.LastAttestedRound-basics.Round(consensusParams.StateProofInterval-consensusParams.StateProofVotersLookback)] // once the state proof is accepted we want to make sure that the weight for _, v := range stateProof.Reveals { diff --git a/test/e2e-go/features/transactions/accountv2_test.go b/test/e2e-go/features/transactions/accountv2_test.go index b833b55951..08bf3335d5 100644 --- a/test/e2e-go/features/transactions/accountv2_test.go +++ b/test/e2e-go/features/transactions/accountv2_test.go @@ -35,7 +35,7 @@ import ( "github.com/algorand/go-algorand/test/partitiontest" ) -func checkEvalDelta(t *testing.T, client *libgoal.Client, startRnd, endRnd uint64, gval uint64, lval uint64) { +func checkEvalDelta(t *testing.T, client *libgoal.Client, startRnd, endRnd basics.Round, gval uint64, lval uint64) { a := require.New(fixtures.SynchronizedTest(t)) foundGlobal := false @@ -215,7 +215,7 @@ int 1 checkEvalDelta(t, &client, txnRound, txnRound+1, 1, 1) // call the app - tx, err = client.MakeUnsignedAppOptInTx(uint64(appIdx), nil, nil, nil, nil, nil, 0) + tx, err = client.MakeUnsignedAppOptInTx(appIdx, nil, nil, nil, nil, nil, 0) a.NoError(err) tx, err = client.FillUnsignedTxTemplate(user, 0, 0, fee, tx) a.NoError(err) @@ -232,7 +232,7 @@ int 1 // Ensure the txn committed resp, err := client.GetPendingTransactions(2) a.NoError(err) - a.Equal(uint64(0), resp.TotalTransactions) + a.Zero(resp.TotalTransactions) txinfo, err := client.PendingTransactionInformation(txid) a.NoError(err) a.NotNil(txinfo.ConfirmedRound) @@ -289,13 +289,13 @@ int 1 a.Equal(basics.MicroAlgos{Raw: 10000000000 - fee}, ad.MicroAlgos) - app, err := client.ApplicationInformation(uint64(appIdx)) + app, err := client.ApplicationInformation(appIdx) a.NoError(err) - a.Equal(uint64(appIdx), app.Id) + a.Equal(appIdx, app.Id) a.Equal(creator, app.Params.Creator) // call the app - tx, err = client.MakeUnsignedAppNoOpTx(uint64(appIdx), nil, nil, nil, nil, nil, 0) + tx, err = client.MakeUnsignedAppNoOpTx(appIdx, nil, nil, nil, nil, nil, 0) a.NoError(err) tx, err = client.FillUnsignedTxTemplate(user, 0, 0, fee, tx) a.NoError(err) @@ -315,7 +315,7 @@ int 1 a.Equal(resp.TopTransactions[0].Txn.ID().String(), txid) continue } - a.Equal(uint64(0), resp.TotalTransactions) + a.Zero(resp.TotalTransactions) break } @@ -520,7 +520,7 @@ int 1 checkEvalDelta(t, &client, txnRound, txnRound+1, 1, 1) // call the app - tx, err = client.MakeUnsignedAppOptInTx(uint64(appIdx), nil, nil, nil, nil, nil, 0) + tx, err = client.MakeUnsignedAppOptInTx(appIdx, nil, nil, nil, nil, nil, 0) a.NoError(err) if foreignAssets != nil { tx.ForeignAssets = foreignAssets @@ -547,7 +547,7 @@ int 1 // Ensure the txn committed resp, err := client.GetPendingTransactions(2) a.NoError(err) - a.Equal(uint64(0), resp.TotalTransactions) + a.Zero(resp.TotalTransactions) txinfo, err := client.PendingTransactionInformation(txid) a.NoError(err) a.NotNil(txinfo.ConfirmedRound) @@ -604,13 +604,13 @@ int 1 a.Equal(basics.MicroAlgos{Raw: 10000000000 - fee}, ad.MicroAlgos) - app, err := client.ApplicationInformation(uint64(appIdx)) + app, err := client.ApplicationInformation(appIdx) a.NoError(err) - a.Equal(uint64(appIdx), app.Id) + a.Equal(appIdx, app.Id) a.Equal(creator, app.Params.Creator) // call the app - tx, err = client.MakeUnsignedAppNoOpTx(uint64(appIdx), nil, nil, nil, nil, nil, 0) + tx, err = client.MakeUnsignedAppNoOpTx(appIdx, nil, nil, nil, nil, nil, 0) a.NoError(err) tx, err = client.FillUnsignedTxTemplate(user, 0, 0, fee, tx) a.NoError(err) @@ -631,7 +631,7 @@ int 1 a.Equal(pendingTxn.Txn.ID().String(), txid) continue } - a.Equal(uint64(0), resp.TotalTransactions) + a.Zero(resp.TotalTransactions) break } diff --git a/test/e2e-go/features/transactions/asset_test.go b/test/e2e-go/features/transactions/asset_test.go index b83860bdd1..16f7787113 100644 --- a/test/e2e-go/features/transactions/asset_test.go +++ b/test/e2e-go/features/transactions/asset_test.go @@ -36,7 +36,7 @@ import ( ) type assetIDParams struct { - idx uint64 + idx basics.AssetIndex params model.AssetParams } @@ -68,27 +68,26 @@ func TestAssetValidRounds(t *testing.T) { client := fixture.LibGoalClient // First, test valid rounds to last valid conversion - var firstValid, lastValid, lastRound, validRounds uint64 + var firstValid, lastValid, lastRound, validRounds basics.Round params, err := client.SuggestedParams() a.NoError(err) cparams, ok := config.Consensus[protocol.ConsensusVersion(params.ConsensusVersion)] a.True(ok) - + maxTxnLife := basics.Round(cparams.MaxTxnLife) firstValid = 0 lastValid = 0 - validRounds = cparams.MaxTxnLife + 1 + validRounds = maxTxnLife + 1 firstValid, lastValid, lastRound, err = client.ComputeValidityRounds(firstValid, lastValid, validRounds) a.NoError(err) a.True(firstValid == 1 || firstValid == lastRound) - a.Equal(firstValid+cparams.MaxTxnLife, lastValid) + a.Equal(firstValid+maxTxnLife, lastValid) firstValid = 0 lastValid = 0 - validRounds = cparams.MaxTxnLife + 2 + validRounds = maxTxnLife + 2 _, _, _, err = client.ComputeValidityRounds(firstValid, lastValid, validRounds) - a.Error(err) - a.True(strings.Contains(err.Error(), "cannot construct transaction: txn validity period")) + a.ErrorContains(err, "cannot construct transaction: txn validity period") firstValid = 0 lastValid = 0 @@ -102,24 +101,24 @@ func TestAssetValidRounds(t *testing.T) { validRounds = 1 firstValid, lastValid, _, err = client.ComputeValidityRounds(firstValid, lastValid, validRounds) a.NoError(err) - a.Equal(uint64(1), firstValid) + a.EqualValues(1, firstValid) a.Equal(firstValid, lastValid) firstValid = 1 lastValid = 0 - validRounds = cparams.MaxTxnLife + validRounds = maxTxnLife firstValid, lastValid, _, err = client.ComputeValidityRounds(firstValid, lastValid, validRounds) a.NoError(err) - a.Equal(uint64(1), firstValid) - a.Equal(cparams.MaxTxnLife, lastValid) + a.EqualValues(1, firstValid) + a.Equal(maxTxnLife, lastValid) firstValid = 100 lastValid = 0 - validRounds = cparams.MaxTxnLife + validRounds = maxTxnLife firstValid, lastValid, _, err = client.ComputeValidityRounds(firstValid, lastValid, validRounds) a.NoError(err) - a.Equal(uint64(100), firstValid) - a.Equal(firstValid+cparams.MaxTxnLife-1, lastValid) + a.EqualValues(100, firstValid) + a.Equal(firstValid+maxTxnLife-1, lastValid) // Second, test transaction creation accountList, err := fixture.GetWalletsSortedByBalance() @@ -154,7 +153,7 @@ func TestAssetValidRounds(t *testing.T) { a.NoError(err) // zeros are special cases // first valid never should be zero - a.NotEqual(basics.Round(0), tx.FirstValid) + a.NotZero(tx.FirstValid) params, err = client.SuggestedParams() a.NoError(err) @@ -162,13 +161,13 @@ func TestAssetValidRounds(t *testing.T) { // ledger may advance between SuggestedParams and FillUnsignedTxTemplate calls // expect validity sequence - var firstValidRange, lastValidRange []uint64 + var firstValidRange, lastValidRange []basics.Round for i := lastRoundBefore; i <= lastRoundAfter+1; i++ { firstValidRange = append(firstValidRange, i) - lastValidRange = append(lastValidRange, i+cparams.MaxTxnLife) + lastValidRange = append(lastValidRange, i+maxTxnLife) } - a.Contains(firstValidRange, uint64(tx.FirstValid)) - a.Contains(lastValidRange, uint64(tx.LastValid)) + a.Contains(firstValidRange, tx.FirstValid) + a.Contains(lastValidRange, tx.LastValid) firstValid = 1 lastValid = 1 @@ -182,7 +181,7 @@ func TestAssetValidRounds(t *testing.T) { tx, err = client.FillUnsignedTxTemplate(account0, firstValid, lastValid, fee, tx) a.NoError(err) a.Equal(basics.Round(1), tx.FirstValid) - a.Equal(basics.Round(cparams.MaxTxnLife+1), tx.LastValid) + a.Equal(maxTxnLife+1, tx.LastValid) } func TestAssetConfig(t *testing.T) { @@ -568,7 +567,7 @@ func TestAssetGroupCreateSendDestroy(t *testing.T) { txCreate1, err = client0.FillUnsignedTxTemplate(account0, 0, 0, fee, txCreate1) a.NoError(err) - assetID1 := txCount + 1 + assetID1 := basics.AssetIndex(txCount + 1) txSend, err := client1.MakeUnsignedAssetSendTx(assetID1, 0, account1, "", "") a.NoError(err) txSend, err = client1.FillUnsignedTxTemplate(account1, 0, 0, fee, txSend) @@ -605,7 +604,7 @@ func TestAssetGroupCreateSendDestroy(t *testing.T) { txCreate2, err = client0.FillUnsignedTxTemplate(account0, 0, 0, fee, txCreate2) a.NoError(err) - assetID3 := txCount + 1 + assetID3 := basics.AssetIndex(txCount + 1) txDestroy, err := client0.MakeUnsignedAssetDestroyTx(assetID3) a.NoError(err) txDestroy, err = client0.FillUnsignedTxTemplate(account0, 0, 0, fee, txDestroy) @@ -746,7 +745,7 @@ func TestAssetSend(t *testing.T) { a.NoError(err) a.NotNil(info.CreatedAssets) a.Equal(len(*info.CreatedAssets), 2) - var frozenIdx, nonFrozenIdx uint64 + var frozenIdx, nonFrozenIdx basics.AssetIndex for _, asset := range *info.CreatedAssets { idx := asset.Index cp := asset.Params @@ -977,7 +976,7 @@ func TestAssetCreateWaitRestartDelete(t *testing.T) { a.NotNil(info.CreatedAssets) a.Equal(len(*info.CreatedAssets), 1) var asset model.AssetParams - var assetIndex uint64 + var assetIndex basics.AssetIndex for _, cp := range *info.CreatedAssets { asset = cp.Params assetIndex = cp.Index @@ -1078,7 +1077,7 @@ func TestAssetCreateWaitBalLookbackDelete(t *testing.T) { a.NotNil(info.CreatedAssets) a.Equal(len(*info.CreatedAssets), 1) var asset model.AssetParams - var assetIndex uint64 + var assetIndex basics.AssetIndex for _, cp := range *info.CreatedAssets { asset = cp.Params assetIndex = cp.Index @@ -1095,7 +1094,7 @@ func TestAssetCreateWaitBalLookbackDelete(t *testing.T) { nodeStatus, _ := client.Status() consParams, err := client.ConsensusParams(nodeStatus.LastRound) a.NoError(err) - err = fixture.WaitForRoundWithTimeout(curRound + consParams.MaxBalLookback + 1) + err = fixture.WaitForRoundWithTimeout(curRound + basics.Round(consParams.MaxBalLookback) + 1) a.NoError(err) // Check again that asset is visible diff --git a/test/e2e-go/features/transactions/lease_test.go b/test/e2e-go/features/transactions/lease_test.go index 6e5b68e0c8..135dea6073 100644 --- a/test/e2e-go/features/transactions/lease_test.go +++ b/test/e2e-go/features/transactions/lease_test.go @@ -410,10 +410,10 @@ func TestOverlappingLeases(t *testing.T) { // construct transactions for sending money to account1 and account2 // from same sender with identical lease, but different, overlapping ranges - tx1, err := client.ConstructPayment(account0, account1, 0, 1000000, nil, "", lease, basics.Round(leaseStart), basics.Round(leaseStart+firstTxLeaseLife)) + tx1, err := client.ConstructPayment(account0, account1, 0, 1000000, nil, "", lease, leaseStart, leaseStart+firstTxLeaseLife) a.NoError(err) - tx2, err := client.ConstructPayment(account0, account2, 0, 2000000, nil, "", lease, basics.Round(leaseStart), basics.Round(leaseStart+secondTxLeaseLife)) + tx2, err := client.ConstructPayment(account0, account2, 0, 2000000, nil, "", lease, leaseStart, leaseStart+secondTxLeaseLife) a.NoError(err) stx1, err := client.SignTransactionWithWallet(wh, nil, tx1) diff --git a/test/e2e-go/features/transactions/onlineStatusChange_test.go b/test/e2e-go/features/transactions/onlineStatusChange_test.go index 5a9d1c213a..98b1f76708 100644 --- a/test/e2e-go/features/transactions/onlineStatusChange_test.go +++ b/test/e2e-go/features/transactions/onlineStatusChange_test.go @@ -30,7 +30,7 @@ import ( "github.com/algorand/go-algorand/test/partitiontest" ) -const transactionValidityPeriod = uint64(100) // rounds +const transactionValidityPeriod = basics.Round(100) const transactionFee = uint64(0) func TestAccountsCanChangeOnlineState(t *testing.T) { @@ -126,12 +126,12 @@ func testAccountsCanChangeOnlineState(t *testing.T, templatePath string) { if doNonparticipationTest { txidsForStatusChange[nonparticipatingTxID] = becomesNonparticipating } - txnConfirmationDeadline := curRound + uint64(5) + txnConfirmationDeadline := curRound + 5 confirmed := fixture.WaitForAllTxnsToConfirm(txnConfirmationDeadline, txidsForStatusChange) a.True(confirmed, "Transactions failed to confirm.") _, curRound = fixture.GetBalanceAndRound(initiallyOnline) - fixture.WaitForRoundWithTimeout(curRound + uint64(1)) + fixture.WaitForRoundWithTimeout(curRound + 1) // assert that initiallyOffline is now online initiallyOfflineAccountStatus, err = client.AccountInformation(initiallyOffline, false) diff --git a/test/e2e-go/features/transactions/sendReceive_test.go b/test/e2e-go/features/transactions/sendReceive_test.go index 316f0cf224..067691a6a0 100644 --- a/test/e2e-go/features/transactions/sendReceive_test.go +++ b/test/e2e-go/features/transactions/sendReceive_test.go @@ -135,7 +135,7 @@ func testAccountsCanSendMoney(t *testing.T, templatePath string, numberOfSends i if waitForTransaction { curStatus, _ := pongClient.Status() curRound := curStatus.LastRound - err = fixture.WaitForRoundWithTimeout(curRound + uint64(1)) + err = fixture.WaitForRoundWithTimeout(curRound + 1) a.NoError(err) } } @@ -145,10 +145,10 @@ func testAccountsCanSendMoney(t *testing.T, templatePath string, numberOfSends i confirmed := true fixture.AlgodClient = fixture.GetAlgodClientForController(fixture.GetNodeControllerForDataDir(pongClient.DataDir())) - confirmed = fixture.WaitForAllTxnsToConfirm(curRound+uint64(5), pingTxidsToAddresses) - a.True(confirmed, "failed to see confirmed ping transaction by round %v", curRound+uint64(5)) - confirmed = fixture.WaitForAllTxnsToConfirm(curRound+uint64(5), pongTxidsToAddresses) - a.True(confirmed, "failed to see confirmed pong transaction by round %v", curRound+uint64(5)) + confirmed = fixture.WaitForAllTxnsToConfirm(curRound+5, pingTxidsToAddresses) + a.True(confirmed, "failed to see confirmed ping transaction by round %v", curRound+5) + confirmed = fixture.WaitForAllTxnsToConfirm(curRound+5, pongTxidsToAddresses) + a.True(confirmed, "failed to see confirmed pong transaction by round %v", curRound+5) pingBalance, err = pongClient.GetBalance(pingAccount) a.NoError(err) @@ -158,10 +158,10 @@ func testAccountsCanSendMoney(t *testing.T, templatePath string, numberOfSends i a.True(expectedPongBalance <= pongBalance, "pong balance is different than expected.") fixture.AlgodClient = fixture.GetAlgodClientForController(fixture.GetNodeControllerForDataDir(pingClient.DataDir())) - confirmed = fixture.WaitForAllTxnsToConfirm(curRound+uint64(5), pingTxidsToAddresses) - a.True(confirmed, "failed to see confirmed ping transaction by round %v", curRound+uint64(5)) - confirmed = fixture.WaitForAllTxnsToConfirm(curRound+uint64(5), pongTxidsToAddresses) - a.True(confirmed, "failed to see confirmed pong transaction by round %v", curRound+uint64(5)) + confirmed = fixture.WaitForAllTxnsToConfirm(curRound+5, pingTxidsToAddresses) + a.True(confirmed, "failed to see confirmed ping transaction by round %v", curRound+5) + confirmed = fixture.WaitForAllTxnsToConfirm(curRound+5, pongTxidsToAddresses) + a.True(confirmed, "failed to see confirmed pong transaction by round %v", curRound+5) pingBalance, err = pingClient.GetBalance(pingAccount) a.NoError(err) diff --git a/test/e2e-go/kmd/e2e_kmd_server_client_test.go b/test/e2e-go/kmd/e2e_kmd_server_client_test.go index 7fcc8ec1f0..fe326f3ea9 100644 --- a/test/e2e-go/kmd/e2e_kmd_server_client_test.go +++ b/test/e2e-go/kmd/e2e_kmd_server_client_test.go @@ -45,7 +45,7 @@ func TestServerStartsStopsSuccessfully(t *testing.T) { a.NoError(err) } -func TestBadAuthFails(t *testing.T) { +func TestBadAuthErrs(t *testing.T) { partitiontest.PartitionTest(t) defer fixtures.ShutdownSynchronizedTest(t) diff --git a/test/e2e-go/kmd/e2e_kmd_sqlite_test.go b/test/e2e-go/kmd/e2e_kmd_sqlite_test.go index 00d4127c4c..52dbe88283 100644 --- a/test/e2e-go/kmd/e2e_kmd_sqlite_test.go +++ b/test/e2e-go/kmd/e2e_kmd_sqlite_test.go @@ -26,7 +26,7 @@ import ( "github.com/algorand/go-algorand/test/partitiontest" ) -func TestNonAbsSQLiteWalletConfigFails(t *testing.T) { +func TestNonAbsSQLiteWalletConfigErrs(t *testing.T) { partitiontest.PartitionTest(t) defer fixtures.ShutdownSynchronizedTest(t) diff --git a/test/e2e-go/restAPI/other/appsRestAPI_test.go b/test/e2e-go/restAPI/other/appsRestAPI_test.go index a9894d0435..b0c2accd5f 100644 --- a/test/e2e-go/restAPI/other/appsRestAPI_test.go +++ b/test/e2e-go/restAPI/other/appsRestAPI_test.go @@ -119,7 +119,7 @@ return a.NoError(err) // call app, which will issue an ASA create inner txn - appCallTxn, err := testClient.MakeUnsignedAppNoOpTx(uint64(createdAppID), nil, nil, nil, nil, nil, 0) + appCallTxn, err := testClient.MakeUnsignedAppNoOpTx(createdAppID, nil, nil, nil, nil, nil, 0) a.NoError(err) appCallTxn, err = testClient.FillUnsignedTxTemplate(someAddress, 0, 0, 0, appCallTxn) a.NoError(err) @@ -280,7 +280,7 @@ end: } txns[i], err = testClient.MakeUnsignedAppNoOpTx( - uint64(createdAppID), appArgs, + createdAppID, appArgs, nil, nil, nil, []transactions.BoxRef{boxRef}, 0, ) @@ -330,21 +330,21 @@ end: // `assertBoxCount` sanity checks that the REST API respects `expectedCount` through different queries against app ID = `createdAppID`. assertBoxCount := func(expectedCount uint64) { // Query without client-side limit. - resp, err := testClient.ApplicationBoxes(uint64(createdAppID), 0) + resp, err := testClient.ApplicationBoxes(createdAppID, 0) a.NoError(err) a.Len(resp.Boxes, int(expectedCount)) // Query with requested max < expected expectedCount. - _, err = testClient.ApplicationBoxes(uint64(createdAppID), expectedCount-1) + _, err = testClient.ApplicationBoxes(createdAppID, expectedCount-1) assertErrorResponse(err, expectedCount, expectedCount-1) // Query with requested max == expected expectedCount. - resp, err = testClient.ApplicationBoxes(uint64(createdAppID), expectedCount) + resp, err = testClient.ApplicationBoxes(createdAppID, expectedCount) a.NoError(err) a.Len(resp.Boxes, int(expectedCount)) // Query with requested max > expected expectedCount. - resp, err = testClient.ApplicationBoxes(uint64(createdAppID), expectedCount+1) + resp, err = testClient.ApplicationBoxes(createdAppID, expectedCount+1) a.NoError(err) a.Len(resp.Boxes, int(expectedCount)) } @@ -385,7 +385,7 @@ end: } var resp model.BoxesResponse - resp, err = testClient.ApplicationBoxes(uint64(createdAppID), 0) + resp, err = testClient.ApplicationBoxes(createdAppID, 0) a.NoError(err) expectedCreatedBoxes := make([]string, 0, createdBoxCount) @@ -444,7 +444,7 @@ end: } // Happy Vanilla paths: - resp, err := testClient.ApplicationBoxes(uint64(createdAppID), 0) + resp, err := testClient.ApplicationBoxes(createdAppID, 0) a.NoError(err) a.Empty(resp.Boxes) @@ -454,7 +454,7 @@ end: // querying it for boxes _DOES NOT ERROR_. There is no easy way to tell // the difference between non-existing boxes for an app that once existed // vs. an app the NEVER existed. - nonexistantAppIndex := uint64(1337) + nonexistantAppIndex := basics.AppIndex(1337) _, err = testClient.ApplicationInformation(nonexistantAppIndex) a.ErrorContains(err, "application does not exist") resp, err = testClient.ApplicationBoxes(nonexistantAppIndex, 0) @@ -487,7 +487,7 @@ end: operateAndMatchRes("delete", strSliceTest) } - resp, err = testClient.ApplicationBoxes(uint64(createdAppID), 0) + resp, err = testClient.ApplicationBoxes(createdAppID, 0) a.NoError(err) a.Empty(resp.Boxes) @@ -515,7 +515,7 @@ end: currentRoundBeforeBoxes, err := testClient.CurrentRound() a.NoError(err) - boxResponse, err := testClient.GetApplicationBoxByName(uint64(createdAppID), boxTest.encodedName) + boxResponse, err := testClient.GetApplicationBoxByName(createdAppID, boxTest.encodedName) a.NoError(err) currentRoundAfterBoxes, err := testClient.CurrentRound() a.NoError(err) @@ -536,7 +536,7 @@ end: a.Equal(uint64(30), appAccountData.TotalBoxBytes) // delete the app - appDeleteTxn, err := testClient.MakeUnsignedAppDeleteTx(uint64(createdAppID), nil, nil, nil, nil, nil, 0) + appDeleteTxn, err := testClient.MakeUnsignedAppDeleteTx(createdAppID, nil, nil, nil, nil, nil, 0) a.NoError(err) appDeleteTxn, err = testClient.FillUnsignedTxTemplate(someAddress, 0, 0, 0, appDeleteTxn) a.NoError(err) @@ -545,7 +545,7 @@ end: _, err = helper.WaitForTransaction(t, testClient, appDeleteTxID, 30*time.Second) a.NoError(err) - _, err = testClient.ApplicationInformation(uint64(createdAppID)) + _, err = testClient.ApplicationInformation(createdAppID) a.ErrorContains(err, "application does not exist") assertBoxCount(numberOfBoxesRemaining) @@ -648,7 +648,7 @@ func TestBlockLogs(t *testing.T) { // call app twice appCallTxn, err := testClient.MakeUnsignedAppNoOpTx( - uint64(createdAppID), nil, nil, nil, + createdAppID, nil, nil, nil, nil, nil, 0, ) a.NoError(err) @@ -692,22 +692,22 @@ func TestBlockLogs(t *testing.T) { expected = model.BlockLogsResponse{ Logs: []model.AppCallLogs{ { - ApplicationIndex: uint64(createdAppID), + ApplicationIndex: createdAppID, TxId: stxn0.ID().String(), Logs: [][]byte{dd0000dd, {}, deadDood}, }, { - ApplicationIndex: uint64(createdAppID + 3), + ApplicationIndex: createdAppID + 3, TxId: stxn0.ID().String(), Logs: [][]byte{deadBeef}, }, { - ApplicationIndex: uint64(createdAppID), + ApplicationIndex: createdAppID, TxId: stxn1.ID().String(), Logs: [][]byte{dd0000dd, {}, deadDood}, }, { - ApplicationIndex: uint64(createdAppID + 5), + ApplicationIndex: createdAppID + 5, TxId: stxn1.ID().String(), Logs: [][]byte{deadBeef}, }, diff --git a/test/e2e-go/restAPI/other/misc_test.go b/test/e2e-go/restAPI/other/misc_test.go index 20602fda1f..e9a07f9f72 100644 --- a/test/e2e-go/restAPI/other/misc_test.go +++ b/test/e2e-go/restAPI/other/misc_test.go @@ -81,7 +81,7 @@ func TestDisabledAPIConfig(t *testing.T) { assert.Contains(t, err.Error(), "Invalid API Token") } -func TestSendingNotClosingAccountFails(t *testing.T) { +func TestSendingNotClosingAccountErrs(t *testing.T) { partitiontest.PartitionTest(t) defer fixtures.ShutdownSynchronizedTest(t) diff --git a/test/e2e-go/restAPI/restClient_test.go b/test/e2e-go/restAPI/restClient_test.go index 1561ac6b40..d7608d259b 100644 --- a/test/e2e-go/restAPI/restClient_test.go +++ b/test/e2e-go/restAPI/restClient_test.go @@ -410,8 +410,8 @@ func TestAccountParticipationInfo(t *testing.T) { a.NoError(err) a.Equal(randomVotePKStr, string(account.Participation.VoteParticipationKey), "API must print correct root voting key") a.Equal(randomSelPKStr, string(account.Participation.SelectionParticipationKey), "API must print correct vrf key") - a.Equal(uint64(firstRound), account.Participation.VoteFirstValid, "API must print correct first participation round") - a.Equal(uint64(lastRound), account.Participation.VoteLastValid, "API must print correct last participation round") + a.Equal(firstRound, account.Participation.VoteFirstValid, "API must print correct first participation round") + a.Equal(lastRound, account.Participation.VoteLastValid, "API must print correct last participation round") a.Equal(dilution, account.Participation.VoteKeyDilution, "API must print correct key dilution") // TODO: should we update the v1 API to support state proof? Currently it does not return this field. } @@ -445,7 +445,7 @@ func TestClientCanGetGoRoutines(t *testing.T) { a.True(strings.Contains(goRoutines, "goroutine profile:")) } -func TestSendingTooMuchFails(t *testing.T) { +func TestSendingTooMuchErrs(t *testing.T) { partitiontest.PartitionTest(t) defer fixtures.ShutdownSynchronizedTest(t) @@ -487,7 +487,7 @@ func TestSendingTooMuchFails(t *testing.T) { a.Error(err) } -func TestSendingFromEmptyAccountFails(t *testing.T) { +func TestSendingFromEmptyAccountErrs(t *testing.T) { partitiontest.PartitionTest(t) defer fixtures.ShutdownSynchronizedTest(t) @@ -527,7 +527,7 @@ func TestSendingFromEmptyAccountFails(t *testing.T) { a.Error(err) } -func TestSendingTooLittleToEmptyAccountFails(t *testing.T) { +func TestSendingTooLittleToEmptyAccountErrs(t *testing.T) { partitiontest.PartitionTest(t) defer fixtures.ShutdownSynchronizedTest(t) @@ -560,7 +560,7 @@ func TestSendingTooLittleToEmptyAccountFails(t *testing.T) { a.Error(err) } -func TestSendingLowFeeFails(t *testing.T) { +func TestSendingLowFeeErrs(t *testing.T) { partitiontest.PartitionTest(t) defer fixtures.ShutdownSynchronizedTest(t) diff --git a/test/e2e-go/restAPI/simulate/simulateRestAPI_test.go b/test/e2e-go/restAPI/simulate/simulateRestAPI_test.go index 90e59773e8..472b6f8ecb 100644 --- a/test/e2e-go/restAPI/simulate/simulateRestAPI_test.go +++ b/test/e2e-go/restAPI/simulate/simulateRestAPI_test.go @@ -279,7 +279,7 @@ int 1` followClient := fixture.GetAlgodClientForController(followControl) // Set sync round on follower - followerSyncRound := uint64(4) + const followerSyncRound basics.Round = 4 err = followClient.SetSyncRound(followerSyncRound) a.NoError(err) @@ -288,11 +288,11 @@ int 1` // Let the primary node make some progress primaryClient := fixture.GetAlgodClientForController(nc) - err = primaryClient.WaitForRoundWithTimeout(followerSyncRound + uint64(cfg.MaxAcctLookback)) + err = primaryClient.WaitForRoundWithTimeout(followerSyncRound + basics.Round(cfg.MaxAcctLookback)) a.NoError(err) // Let follower node progress as far as it can - err = followClient.WaitForRoundWithTimeout(followerSyncRound + uint64(cfg.MaxAcctLookback) - 1) + err = followClient.WaitForRoundWithTimeout(followerSyncRound + basics.Round(cfg.MaxAcctLookback) - 1) a.NoError(err) simulateRequest := v2.PreEncodedSimulateRequest{ @@ -323,11 +323,11 @@ int 1` a.Len(result.TxnGroups[0].Txns, 1) a.NotNil(result.TxnGroups[0].Txns[0].Txn.Logs) a.Len(*result.TxnGroups[0].Txns[0].Txn.Logs, 1) - a.Equal(followerSyncRound+uint64(cfg.MaxAcctLookback), binary.BigEndian.Uint64((*result.TxnGroups[0].Txns[0].Txn.Logs)[0])) + a.EqualValues(followerSyncRound+basics.Round(cfg.MaxAcctLookback), binary.BigEndian.Uint64((*result.TxnGroups[0].Txns[0].Txn.Logs)[0])) // Test with previous rounds - for i := uint64(0); i < cfg.MaxAcctLookback; i++ { - simulateRequest.Round = basics.Round(followerSyncRound + i) + for i := range basics.Round(cfg.MaxAcctLookback) { + simulateRequest.Round = followerSyncRound + i result, err = simulateTransactions(simulateRequest) a.NoError(err) a.Len(result.TxnGroups, 1) @@ -339,7 +339,7 @@ int 1` } // If the round is too far back, we should get an error saying so. - simulateRequest.Round = basics.Round(followerSyncRound - 3) + simulateRequest.Round = followerSyncRound - 3 endTime := time.Now().Add(6 * time.Second) for { result, err = simulateTransactions(simulateRequest) @@ -501,7 +501,7 @@ int 1` // construct app call appCallTxn, err := testClient.MakeUnsignedAppNoOpTx( - uint64(createdAppID), [][]byte{[]byte("first-arg")}, + createdAppID, [][]byte{[]byte("first-arg")}, nil, nil, nil, nil, 0, ) a.NoError(err) @@ -525,8 +525,8 @@ int 1` logs = append(logs, []byte("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")) } - budgetAdded, budgetUsed := uint64(700), uint64(40) - maxLogSize, maxLogCalls := uint64(65536), uint64(2048) + budgetAdded, budgetUsed := 700, 40 + maxLogSize, maxLogCalls := 65536, 2048 expectedResult := v2.PreEncodedSimulateResponse{ Version: 2, @@ -629,7 +629,7 @@ int 1` // construct app call appCallTxn, err := testClient.MakeUnsignedAppNoOpTx( - uint64(createdAppID), nil, nil, nil, nil, nil, 0, + createdAppID, nil, nil, nil, nil, nil, 0, ) a.NoError(err) appCallTxn, err = testClient.FillUnsignedTxTemplate(senderAddress, 0, 0, 0, appCallTxn) @@ -637,7 +637,7 @@ int 1` appCallTxnSigned, err := testClient.SignTransactionWithWallet(wh, nil, appCallTxn) a.NoError(err) - extraBudget := uint64(704) + extraBudget := 704 resp, err := testClient.SimulateTransactions(v2.PreEncodedSimulateRequest{ TxnGroups: []v2.PreEncodedSimulateRequestTransactionGroup{ { @@ -648,7 +648,7 @@ int 1` }) a.NoError(err) - budgetAdded, budgetUsed := uint64(1404), uint64(1404) + budgetAdded, budgetUsed := 1404, 1404 expectedResult := v2.PreEncodedSimulateResponse{ Version: 2, @@ -894,7 +894,7 @@ func TestMaxDepthAppWithPCandStackTrace(t *testing.T) { // construct app calls appCallTxn, err := testClient.MakeUnsignedAppNoOpTx( - uint64(futureAppID), [][]byte{uint64ToBytes(uint64(MaxDepth))}, nil, nil, nil, nil, 0, + futureAppID, [][]byte{uint64ToBytes(uint64(MaxDepth))}, nil, nil, nil, nil, 0, ) a.NoError(err) appCallTxn, err = testClient.FillUnsignedTxTemplate(senderAddress, 0, 0, MinFee*uint64(3*MaxDepth+2), appCallTxn) @@ -960,13 +960,13 @@ func TestMaxDepthAppWithPCandStackTrace(t *testing.T) { // == { Pc: 9, - StackPopCount: toPtr[uint64](2), + StackPopCount: toPtr(2), StackAdditions: goValuesToAvmValues(1), }, // bnz main_l6 { Pc: 10, - StackPopCount: toPtr[uint64](1), + StackPopCount: toPtr(1), }, // int 1 { @@ -977,7 +977,7 @@ func TestMaxDepthAppWithPCandStackTrace(t *testing.T) { { Pc: 150, StackAdditions: goValuesToAvmValues(1), - StackPopCount: toPtr[uint64](1), + StackPopCount: toPtr(1), }, } @@ -1002,12 +1002,12 @@ func TestMaxDepthAppWithPCandStackTrace(t *testing.T) { { Pc: 9, StackAdditions: goValuesToAvmValues(false), - StackPopCount: toPtr[uint64](2), + StackPopCount: toPtr(2), }, // bnz main_l6 { Pc: 10, - StackPopCount: toPtr[uint64](1), + StackPopCount: toPtr(1), }, // txn NumAppArgs { @@ -1022,13 +1022,13 @@ func TestMaxDepthAppWithPCandStackTrace(t *testing.T) { // == { Pc: 16, - StackPopCount: toPtr[uint64](2), + StackPopCount: toPtr(2), StackAdditions: goValuesToAvmValues(true), }, // bnz main_l3 { Pc: 17, - StackPopCount: toPtr[uint64](1), + StackPopCount: toPtr(1), }, // global CurrentApplicationID { @@ -1039,17 +1039,17 @@ func TestMaxDepthAppWithPCandStackTrace(t *testing.T) { { Pc: 23, StackAdditions: goValuesToAvmValues(approval, 1), - StackPopCount: toPtr[uint64](1), + StackPopCount: toPtr(1), }, // store 1 { Pc: 25, - StackPopCount: toPtr[uint64](1), + StackPopCount: toPtr(1), }, // store 0 { Pc: 27, - StackPopCount: toPtr[uint64](1), + StackPopCount: toPtr(1), }, // global CurrentApplicationID { @@ -1060,17 +1060,17 @@ func TestMaxDepthAppWithPCandStackTrace(t *testing.T) { { Pc: 31, StackAdditions: goValuesToAvmValues(clearState, 1), - StackPopCount: toPtr[uint64](1), + StackPopCount: toPtr(1), }, // store 3 { Pc: 33, - StackPopCount: toPtr[uint64](1), + StackPopCount: toPtr(1), }, // store 2 { Pc: 35, - StackPopCount: toPtr[uint64](1), + StackPopCount: toPtr(1), }, // global CurrentApplicationAddress { @@ -1081,17 +1081,17 @@ func TestMaxDepthAppWithPCandStackTrace(t *testing.T) { { Pc: 39, StackAdditions: goValuesToAvmValues(uint64(3-layer)*MinBalance, 1), - StackPopCount: toPtr[uint64](1), + StackPopCount: toPtr(1), }, // store 5 { Pc: 41, - StackPopCount: toPtr[uint64](1), + StackPopCount: toPtr(1), }, // store 4 { Pc: 43, - StackPopCount: toPtr[uint64](1), + StackPopCount: toPtr(1), }, // load 1 { @@ -1101,7 +1101,7 @@ func TestMaxDepthAppWithPCandStackTrace(t *testing.T) { // assert { Pc: 47, - StackPopCount: toPtr[uint64](1), + StackPopCount: toPtr(1), }, // load 3 { @@ -1111,7 +1111,7 @@ func TestMaxDepthAppWithPCandStackTrace(t *testing.T) { // assert { Pc: 50, - StackPopCount: toPtr[uint64](1), + StackPopCount: toPtr(1), }, // load 5 { @@ -1121,7 +1121,7 @@ func TestMaxDepthAppWithPCandStackTrace(t *testing.T) { // assert { Pc: 53, - StackPopCount: toPtr[uint64](1), + StackPopCount: toPtr(1), }, // int 2 { @@ -1137,24 +1137,24 @@ func TestMaxDepthAppWithPCandStackTrace(t *testing.T) { { Pc: 59, StackAdditions: goValuesToAvmValues(uint64(MaxDepth - layer)), - StackPopCount: toPtr[uint64](1), + StackPopCount: toPtr(1), }, // exp { Pc: 60, StackAdditions: goValuesToAvmValues(1 << (MaxDepth - layer)), - StackPopCount: toPtr[uint64](2), + StackPopCount: toPtr(2), }, // itob { Pc: 61, StackAdditions: goValuesToAvmValues(uint64ToBytes(1 << uint64(MaxDepth-layer))), - StackPopCount: toPtr[uint64](1), + StackPopCount: toPtr(1), }, // log { Pc: 62, - StackPopCount: toPtr[uint64](1), + StackPopCount: toPtr(1), }, // txna ApplicationArgs 0 { @@ -1165,7 +1165,7 @@ func TestMaxDepthAppWithPCandStackTrace(t *testing.T) { { Pc: 66, StackAdditions: goValuesToAvmValues(MaxDepth - layer), - StackPopCount: toPtr[uint64](1), + StackPopCount: toPtr(1), }, // int 0 { @@ -1176,12 +1176,12 @@ func TestMaxDepthAppWithPCandStackTrace(t *testing.T) { { Pc: 68, StackAdditions: goValuesToAvmValues(MaxDepth-layer > 0), - StackPopCount: toPtr[uint64](2), + StackPopCount: toPtr(2), }, // bnz main_l5 { Pc: 69, - StackPopCount: toPtr[uint64](1), + StackPopCount: toPtr(1), }, // itxn_begin { @@ -1195,7 +1195,7 @@ func TestMaxDepthAppWithPCandStackTrace(t *testing.T) { // itxn_field TypeEnum { Pc: 76, - StackPopCount: toPtr[uint64](1), + StackPopCount: toPtr(1), }, // int 0 { @@ -1205,7 +1205,7 @@ func TestMaxDepthAppWithPCandStackTrace(t *testing.T) { // itxn_field Fee { Pc: 79, - StackPopCount: toPtr[uint64](1), + StackPopCount: toPtr(1), }, // load 0 { @@ -1215,7 +1215,7 @@ func TestMaxDepthAppWithPCandStackTrace(t *testing.T) { // itxn_field ApprovalProgram { Pc: 83, - StackPopCount: toPtr[uint64](1), + StackPopCount: toPtr(1), }, // load 2 { @@ -1225,12 +1225,12 @@ func TestMaxDepthAppWithPCandStackTrace(t *testing.T) { // itxn_field ClearStateProgram { Pc: 87, - StackPopCount: toPtr[uint64](1), + StackPopCount: toPtr(1), }, // itxn_submit { Pc: 89, - SpawnedInners: &[]uint64{0}, + SpawnedInners: &[]int{0}, }, // itxn_begin { @@ -1244,7 +1244,7 @@ func TestMaxDepthAppWithPCandStackTrace(t *testing.T) { // itxn_field TypeEnum { Pc: 92, - StackPopCount: toPtr[uint64](1), + StackPopCount: toPtr(1), }, // int 0 { @@ -1254,7 +1254,7 @@ func TestMaxDepthAppWithPCandStackTrace(t *testing.T) { // itxn_field Fee { Pc: 95, - StackPopCount: toPtr[uint64](1), + StackPopCount: toPtr(1), }, // load 4 { @@ -1269,13 +1269,13 @@ func TestMaxDepthAppWithPCandStackTrace(t *testing.T) { // - { Pc: 103, - StackPopCount: toPtr[uint64](2), + StackPopCount: toPtr(2), StackAdditions: goValuesToAvmValues(uint64(2-layer) * MinBalance), }, // itxn_field Amount { Pc: 104, - StackPopCount: toPtr[uint64](1), + StackPopCount: toPtr(1), }, // byte "appID" { @@ -1291,24 +1291,24 @@ func TestMaxDepthAppWithPCandStackTrace(t *testing.T) { { Pc: 116, StackAdditions: goValuesToAvmValues(uint64ToBytes(uint64(appID) + 3)), - StackPopCount: toPtr[uint64](1), + StackPopCount: toPtr(1), }, // concat { Pc: 117, StackAdditions: goValuesToAvmValues([]byte("appID" + string(uint64ToBytes(uint64(appID)+3)))), - StackPopCount: toPtr[uint64](2), + StackPopCount: toPtr(2), }, // sha512_256 { Pc: 118, - StackAdditions: goValuesToAvmValues(crypto.Digest(basics.AppIndex(uint64(appID) + 3).Address()).ToSlice()), - StackPopCount: toPtr[uint64](1), + StackAdditions: goValuesToAvmValues(crypto.Digest((appID + 3).Address()).ToSlice()), + StackPopCount: toPtr(1), }, // itxn_field Receiver { Pc: 119, - StackPopCount: toPtr[uint64](1), + StackPopCount: toPtr(1), }, { Pc: 121, @@ -1321,7 +1321,7 @@ func TestMaxDepthAppWithPCandStackTrace(t *testing.T) { // itxn_field TypeEnum { Pc: 123, - StackPopCount: toPtr[uint64](1), + StackPopCount: toPtr(1), }, // txna ApplicationArgs 0 { @@ -1332,7 +1332,7 @@ func TestMaxDepthAppWithPCandStackTrace(t *testing.T) { { Pc: 128, StackAdditions: goValuesToAvmValues(MaxDepth - layer), - StackPopCount: toPtr[uint64](1), + StackPopCount: toPtr(1), }, // int 1 { @@ -1343,18 +1343,18 @@ func TestMaxDepthAppWithPCandStackTrace(t *testing.T) { { Pc: 130, StackAdditions: goValuesToAvmValues(MaxDepth - layer - 1), - StackPopCount: toPtr[uint64](2), + StackPopCount: toPtr(2), }, // itob { Pc: 131, StackAdditions: goValuesToAvmValues(uint64ToBytes(uint64(MaxDepth - layer - 1))), - StackPopCount: toPtr[uint64](1), + StackPopCount: toPtr(1), }, // itxn_field ApplicationArgs { Pc: 132, - StackPopCount: toPtr[uint64](1), + StackPopCount: toPtr(1), }, // itxn CreatedApplicationID { @@ -1364,7 +1364,7 @@ func TestMaxDepthAppWithPCandStackTrace(t *testing.T) { // itxn_field ApplicationID { Pc: 136, - StackPopCount: toPtr[uint64](1), + StackPopCount: toPtr(1), }, // int 0 { @@ -1374,7 +1374,7 @@ func TestMaxDepthAppWithPCandStackTrace(t *testing.T) { // itxn_field Fee { Pc: 139, - StackPopCount: toPtr[uint64](1), + StackPopCount: toPtr(1), }, // int DeleteApplication { @@ -1384,12 +1384,12 @@ func TestMaxDepthAppWithPCandStackTrace(t *testing.T) { // itxn_field OnCompletion { Pc: 143, - StackPopCount: toPtr[uint64](1), + StackPopCount: toPtr(1), }, // itxn_submit { Pc: 145, - SpawnedInners: &[]uint64{1, 2}, + SpawnedInners: &[]int{1, 2}, }, // b main_l4 { @@ -1404,7 +1404,7 @@ func TestMaxDepthAppWithPCandStackTrace(t *testing.T) { { Pc: 73, StackAdditions: goValuesToAvmValues(1), - StackPopCount: toPtr[uint64](1), + StackPopCount: toPtr(1), }, } } @@ -1428,12 +1428,12 @@ func TestMaxDepthAppWithPCandStackTrace(t *testing.T) { { Pc: 9, StackAdditions: goValuesToAvmValues(false), - StackPopCount: toPtr[uint64](2), + StackPopCount: toPtr(2), }, // bnz main_l6 { Pc: 10, - StackPopCount: toPtr[uint64](1), + StackPopCount: toPtr(1), }, // txn NumAppArgs { @@ -1448,13 +1448,13 @@ func TestMaxDepthAppWithPCandStackTrace(t *testing.T) { // == { Pc: 16, - StackPopCount: toPtr[uint64](2), + StackPopCount: toPtr(2), StackAdditions: goValuesToAvmValues(true), }, // bnz main_l3 { Pc: 17, - StackPopCount: toPtr[uint64](1), + StackPopCount: toPtr(1), }, // global CurrentApplicationID { @@ -1465,17 +1465,17 @@ func TestMaxDepthAppWithPCandStackTrace(t *testing.T) { { Pc: 23, StackAdditions: goValuesToAvmValues(approval, 1), - StackPopCount: toPtr[uint64](1), + StackPopCount: toPtr(1), }, // store 1 { Pc: 25, - StackPopCount: toPtr[uint64](1), + StackPopCount: toPtr(1), }, // store 0 { Pc: 27, - StackPopCount: toPtr[uint64](1), + StackPopCount: toPtr(1), }, // global CurrentApplicationID { @@ -1486,17 +1486,17 @@ func TestMaxDepthAppWithPCandStackTrace(t *testing.T) { { Pc: 31, StackAdditions: goValuesToAvmValues(clearState, 1), - StackPopCount: toPtr[uint64](1), + StackPopCount: toPtr(1), }, // store 3 { Pc: 33, - StackPopCount: toPtr[uint64](1), + StackPopCount: toPtr(1), }, // store 2 { Pc: 35, - StackPopCount: toPtr[uint64](1), + StackPopCount: toPtr(1), }, // global CurrentApplicationAddress { @@ -1507,17 +1507,17 @@ func TestMaxDepthAppWithPCandStackTrace(t *testing.T) { { Pc: 39, StackAdditions: goValuesToAvmValues(uint64(3-layer)*MinBalance, 1), - StackPopCount: toPtr[uint64](1), + StackPopCount: toPtr(1), }, // store 5 { Pc: 41, - StackPopCount: toPtr[uint64](1), + StackPopCount: toPtr(1), }, // store 4 { Pc: 43, - StackPopCount: toPtr[uint64](1), + StackPopCount: toPtr(1), }, // load 1 { @@ -1527,7 +1527,7 @@ func TestMaxDepthAppWithPCandStackTrace(t *testing.T) { // assert { Pc: 47, - StackPopCount: toPtr[uint64](1), + StackPopCount: toPtr(1), }, // load 3 { @@ -1537,7 +1537,7 @@ func TestMaxDepthAppWithPCandStackTrace(t *testing.T) { // assert { Pc: 50, - StackPopCount: toPtr[uint64](1), + StackPopCount: toPtr(1), }, // load 5 { @@ -1547,7 +1547,7 @@ func TestMaxDepthAppWithPCandStackTrace(t *testing.T) { // assert { Pc: 53, - StackPopCount: toPtr[uint64](1), + StackPopCount: toPtr(1), }, // int 2 { @@ -1563,24 +1563,24 @@ func TestMaxDepthAppWithPCandStackTrace(t *testing.T) { { Pc: 59, StackAdditions: goValuesToAvmValues(uint64(MaxDepth - layer)), - StackPopCount: toPtr[uint64](1), + StackPopCount: toPtr(1), }, // exp { Pc: 60, StackAdditions: goValuesToAvmValues(1 << (MaxDepth - layer)), - StackPopCount: toPtr[uint64](2), + StackPopCount: toPtr(2), }, // itob { Pc: 61, StackAdditions: goValuesToAvmValues(uint64ToBytes(1 << uint64(MaxDepth-layer))), - StackPopCount: toPtr[uint64](1), + StackPopCount: toPtr(1), }, // log { Pc: 62, - StackPopCount: toPtr[uint64](1), + StackPopCount: toPtr(1), }, // txna ApplicationArgs 0 { @@ -1591,7 +1591,7 @@ func TestMaxDepthAppWithPCandStackTrace(t *testing.T) { { Pc: 66, StackAdditions: goValuesToAvmValues(MaxDepth - layer), - StackPopCount: toPtr[uint64](1), + StackPopCount: toPtr(1), }, // int 0 { @@ -1602,12 +1602,12 @@ func TestMaxDepthAppWithPCandStackTrace(t *testing.T) { { Pc: 68, StackAdditions: goValuesToAvmValues(MaxDepth-layer > 0), - StackPopCount: toPtr[uint64](2), + StackPopCount: toPtr(2), }, // bnz main_l5 { Pc: 69, - StackPopCount: toPtr[uint64](1), + StackPopCount: toPtr(1), }, // int 1 { @@ -1618,7 +1618,7 @@ func TestMaxDepthAppWithPCandStackTrace(t *testing.T) { { Pc: 73, StackAdditions: goValuesToAvmValues(1), - StackPopCount: toPtr[uint64](1), + StackPopCount: toPtr(1), }, } } @@ -1736,7 +1736,7 @@ func TestSimulateScratchSlotChange(t *testing.T) { // construct app calls appCallTxn, err := testClient.MakeUnsignedAppNoOpTx( - uint64(futureAppID), [][]byte{}, nil, nil, nil, nil, 0, + futureAppID, [][]byte{}, nil, nil, nil, nil, 0, ) a.NoError(err) appCallTxn, err = testClient.FillUnsignedTxTemplate(senderAddress, 0, 0, MinFee, appCallTxn) @@ -1930,18 +1930,18 @@ end: // construct app call "global" appCallGlobalTxn, err := testClient.MakeUnsignedAppNoOpTx( - uint64(futureAppID), [][]byte{[]byte("global")}, nil, nil, nil, nil, 0, + futureAppID, [][]byte{[]byte("global")}, nil, nil, nil, nil, 0, ) a.NoError(err) appCallGlobalTxn, err = testClient.FillUnsignedTxTemplate(senderAddress, 0, 0, MinFee, appCallGlobalTxn) a.NoError(err) // construct app optin - appOptInTxn, err := testClient.MakeUnsignedAppOptInTx(uint64(futureAppID), nil, nil, nil, nil, nil, 0) + appOptInTxn, err := testClient.MakeUnsignedAppOptInTx(futureAppID, nil, nil, nil, nil, nil, 0) a.NoError(err) appOptInTxn, err = testClient.FillUnsignedTxTemplate(senderAddress, 0, 0, MinFee, appOptInTxn) // construct app call "global" appCallLocalTxn, err := testClient.MakeUnsignedAppNoOpTx( - uint64(futureAppID), [][]byte{[]byte("local")}, nil, nil, nil, nil, 0, + futureAppID, [][]byte{[]byte("local")}, nil, nil, nil, nil, 0, ) a.NoError(err) appCallLocalTxn, err = testClient.FillUnsignedTxTemplate(senderAddress, 0, 0, MinFee, appCallLocalTxn) @@ -2212,18 +2212,18 @@ end: // construct app call "global" appCallGlobalTxn, err := testClient.MakeUnsignedAppNoOpTx( - uint64(futureAppID), [][]byte{[]byte("global")}, nil, nil, nil, nil, 0, + futureAppID, [][]byte{[]byte("global")}, nil, nil, nil, nil, 0, ) a.NoError(err) appCallGlobalTxn, err = testClient.FillUnsignedTxTemplate(senderAddress, 0, 0, MinFee, appCallGlobalTxn) a.NoError(err) // construct app optin - appOptInTxn, err := testClient.MakeUnsignedAppOptInTx(uint64(futureAppID), nil, nil, nil, nil, nil, 0) + appOptInTxn, err := testClient.MakeUnsignedAppOptInTx(futureAppID, nil, nil, nil, nil, nil, 0) a.NoError(err) appOptInTxn, err = testClient.FillUnsignedTxTemplate(senderAddress, 0, 0, MinFee, appOptInTxn) // construct app call "local" appCallLocalTxn, err := testClient.MakeUnsignedAppNoOpTx( - uint64(futureAppID), [][]byte{[]byte("local")}, nil, nil, nil, nil, 0, + futureAppID, [][]byte{[]byte("local")}, nil, nil, nil, nil, 0, ) a.NoError(err) appCallLocalTxn, err = testClient.FillUnsignedTxTemplate(senderAddress, 0, 0, MinFee, appCallLocalTxn) @@ -2624,7 +2624,7 @@ int 1 // construct app call txn, err = testClient.MakeUnsignedAppNoOpTx( - uint64(testAppID), nil, nil, nil, nil, nil, 0, + testAppID, nil, nil, nil, nil, nil, 0, ) a.NoError(err) txn, err = testClient.FillUnsignedTxTemplate(senderAddress, 0, 0, 0, txn) @@ -2643,7 +2643,7 @@ int 1 }) a.NoError(err) a.Contains(*resp.TxnGroups[0].FailureMessage, "logic eval error: invalid Account reference "+otherAddress) - a.Equal([]uint64{0}, *resp.TxnGroups[0].FailedAt) + a.Equal([]int{0}, *resp.TxnGroups[0].FailedAt) // It should work with AllowUnnamedResources=true resp, err = testClient.SimulateTransactions(v2.PreEncodedSimulateRequest{ @@ -2658,19 +2658,19 @@ int 1 expectedUnnamedGroupResources := model.SimulateUnnamedResourcesAccessed{ Accounts: &[]string{otherAddress}, - Assets: &[]uint64{assetID}, - Apps: &[]uint64{uint64(otherAppID)}, - Boxes: &[]model.BoxReference{{App: uint64(testAppID), Name: []byte("A")}}, - ExtraBoxRefs: toPtr[uint64](1), + Assets: &[]basics.AssetIndex{assetID}, + Apps: &[]basics.AppIndex{otherAppID}, + Boxes: &[]model.BoxReference{{App: testAppID, Name: []byte("A")}}, + ExtraBoxRefs: toPtr(1), AssetHoldings: &[]model.AssetHoldingReference{ {Account: otherAddress, Asset: assetID}, }, AppLocals: &[]model.ApplicationLocalReference{ - {Account: otherAddress, App: uint64(otherAppID)}, + {Account: otherAddress, App: otherAppID}, }, } - budgetAdded, budgetUsed := uint64(700), uint64(40) + budgetAdded, budgetUsed := 700, 40 allowUnnamedResources := true expectedResult := v2.PreEncodedSimulateResponse{ diff --git a/test/e2e-go/stress/transactions/createManyAndGoOnline_test.go b/test/e2e-go/stress/transactions/createManyAndGoOnline_test.go index afb18e7be4..380690df65 100644 --- a/test/e2e-go/stress/transactions/createManyAndGoOnline_test.go +++ b/test/e2e-go/stress/transactions/createManyAndGoOnline_test.go @@ -65,7 +65,7 @@ func TestManyAccountsCanGoOnline(t *testing.T) { txidsToAccountsWaveOne := make(map[string]string) const transactionFee = uint64(1) - fundingTimeoutRound := uint64(400) + const fundingTimeoutRound basics.Round = 400 // cascade-create and fund 1000 accounts amountToSend := uint64(560000) // ends up leaving each acct with ~4300 algos, which is more than absolutely necessary to go online txidsToAccountsWaveOnePartOne := cascadeCreateAndFundAccounts(amountToSend, transactionFee, fundingAccount, client, a) @@ -120,7 +120,7 @@ func TestManyAccountsCanGoOnline(t *testing.T) { a.True(allConfirmed, "Not all transactions confirmed. Failing test and aborting early.") // make funded accounts go online - const transactionValidityPeriod = uint64(100) // rounds + const transactionValidityPeriod = 100 // rounds _, curRound := fixture.GetBalanceAndRound(fundingAccount) i := 0 // for assert debug messages txidsToAccountsGoOnline := make(map[string]string) @@ -138,7 +138,7 @@ func TestManyAccountsCanGoOnline(t *testing.T) { txidsToAccountsGoOnline[onlineTxID] = account } // wait for txns to clear - goOnlineTimeoutRound := fundingTimeoutRound + uint64(100) + goOnlineTimeoutRound := fundingTimeoutRound + 100 allConfirmed = fixture.WaitForAllTxnsToConfirm(goOnlineTimeoutRound, txidsToAccountsGoOnline) a.True(allConfirmed, "Not all transactions confirmed. Failing test and aborting early.") @@ -160,7 +160,7 @@ func TestManyAccountsCanGoOnline(t *testing.T) { // use debug counter to wait for batches of transactions to clear before adding more to the pool if i%20 == 0 { - goOnlineTimeoutRound = fundingTimeoutRound + uint64(100) + goOnlineTimeoutRound = fundingTimeoutRound + 100 allConfirmed = fixture.WaitForAllTxnsToConfirm(goOnlineTimeoutRound, txidsToAccountsGoOnline) a.True(allConfirmed, "Not all transactions confirmed. Failing test and aborting early.") } diff --git a/test/e2e-go/upgrades/application_support_test.go b/test/e2e-go/upgrades/application_support_test.go index 2570a76046..08c2705733 100644 --- a/test/e2e-go/upgrades/application_support_test.go +++ b/test/e2e-go/upgrades/application_support_test.go @@ -201,7 +201,7 @@ int 1 client.WaitForRound(round + 2) pendingTx, err := client.GetPendingTransactions(1) a.NoError(err) - a.Equal(uint64(0), pendingTx.TotalTransactions) + a.Zero(pendingTx.TotalTransactions) // check creator's balance record for the app entry and the state changes ad, err = client.AccountData(creator) @@ -233,7 +233,7 @@ int 1 a.Equal(uint64(1), value.Uint) // call the app - tx, err = client.MakeUnsignedAppOptInTx(uint64(appIdx), nil, nil, nil, nil, nil, 0) + tx, err = client.MakeUnsignedAppOptInTx(appIdx, nil, nil, nil, nil, nil, 0) a.NoError(err) tx, err = client.FillUnsignedTxTemplate(user, 0, 0, fee, tx) a.NoError(err) @@ -288,9 +288,9 @@ int 1 a.Equal(basics.MicroAlgos{Raw: 10000000000 - fee}, ad.MicroAlgos) - app, err := client.ApplicationInformation(uint64(appIdx)) + app, err := client.ApplicationInformation(appIdx) a.NoError(err) - a.Equal(uint64(appIdx), app.Id) + a.Equal(appIdx, app.Id) a.Equal(creator, app.Params.Creator) } @@ -394,7 +394,7 @@ int 1 tx, err := client.MakeUnsignedAppCreateTx( transactions.OptInOC, approvalOps.Program, clearstateOps.Program, schema, schema, nil, nil, nil, nil, nil, 0) a.NoError(err) - tx, err = client.FillUnsignedTxTemplate(creator, round, round+primaryNodeUnupgradedProtocol.DefaultUpgradeWaitRounds, fee, tx) + tx, err = client.FillUnsignedTxTemplate(creator, round, round+basics.Round(primaryNodeUnupgradedProtocol.DefaultUpgradeWaitRounds), fee, tx) a.NoError(err) signedTxn, err := client.SignTransactionWithWallet(wh, nil, tx) a.NoError(err) @@ -413,16 +413,16 @@ int 1 round, err = client.CurrentRound() a.NoError(err) - if round > round+primaryNodeUnupgradedProtocol.DefaultUpgradeWaitRounds { + if round > round+basics.Round(primaryNodeUnupgradedProtocol.DefaultUpgradeWaitRounds) { t.Skip("Test platform is too slow for this test") } - a.Equal(uint64(1), pendingTx.TotalTransactions) + a.Equal(1, pendingTx.TotalTransactions) // check that the secondary node doesn't have that transaction in it's transaction pool. pendingTx, err = secondary.GetPendingTransactions(1) a.NoError(err) - a.Equal(uint64(0), pendingTx.TotalTransactions) + a.Zero(pendingTx.TotalTransactions) curStatus, err := client.Status() a.NoError(err) @@ -484,7 +484,7 @@ int 1 a.Equal(uint64(1), value.Uint) // call the app - tx, err = client.MakeUnsignedAppOptInTx(uint64(appIdx), nil, nil, nil, nil, nil, 0) + tx, err = client.MakeUnsignedAppOptInTx(appIdx, nil, nil, nil, nil, nil, 0) a.NoError(err) tx, err = client.FillUnsignedTxTemplate(user, 0, 0, fee, tx) a.NoError(err) @@ -539,8 +539,8 @@ int 1 a.Equal(basics.MicroAlgos{Raw: 10000000000 - fee}, ad.MicroAlgos) - app, err := client.ApplicationInformation(uint64(appIdx)) + app, err := client.ApplicationInformation(appIdx) a.NoError(err) - a.Equal(uint64(appIdx), app.Id) + a.Equal(appIdx, app.Id) a.Equal(creator, app.Params.Creator) } diff --git a/test/e2e-go/upgrades/send_receive_upgrade_test.go b/test/e2e-go/upgrades/send_receive_upgrade_test.go index 72a3e627dc..1f0a62ef8a 100644 --- a/test/e2e-go/upgrades/send_receive_upgrade_test.go +++ b/test/e2e-go/upgrades/send_receive_upgrade_test.go @@ -25,6 +25,7 @@ import ( "github.com/stretchr/testify/require" "github.com/algorand/go-algorand/config" + "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/protocol" "github.com/algorand/go-algorand/test/framework/fixtures" "github.com/algorand/go-algorand/test/partitiontest" @@ -184,7 +185,7 @@ func runUntilProtocolUpgrades(a *require.Assertions, fixture *fixtures.RestClien pingWalletHandle, err := pingClient.GetUnencryptedWalletHandle() a.NoError(err) startTime := time.Now() - var lastTxnSendRound uint64 + var lastTxnSendRound basics.Round for curStatus.LastVersion == initialStatus.LastVersion { iterationStartTime := time.Now() if lastTxnSendRound != curStatus.LastRound { diff --git a/test/e2e-go/upgrades/stateproof_participation_test.go b/test/e2e-go/upgrades/stateproof_participation_test.go index ca94c067da..8935421b37 100644 --- a/test/e2e-go/upgrades/stateproof_participation_test.go +++ b/test/e2e-go/upgrades/stateproof_participation_test.go @@ -70,7 +70,7 @@ func TestKeysWithoutStateProofKeyCannotRegister(t *testing.T) { fixture.SetConsensus(consensus) fixture.Setup(t, filepath.Join("nettemplates", "TwoNodesWithoutStateProofPartkeys.json")) defer fixture.Shutdown() - lastValid := uint64(1000 * 5) + const lastValid = 5000 nodeClient := fixture.GetLibGoalClientForNamedNode("Node") @@ -89,7 +89,7 @@ func TestKeysWithoutStateProofKeyCanRegister(t *testing.T) { var fixture fixtures.RestClientFixture fixture.Setup(t, filepath.Join("nettemplates", "TwoNodes50EachV30.json")) defer fixture.Shutdown() - lastValid := uint64(1000 * 5) + const lastValid = 5000 nodeClient := fixture.GetLibGoalClientForNamedNode("Node") @@ -97,7 +97,7 @@ func TestKeysWithoutStateProofKeyCanRegister(t *testing.T) { a.Error(registerKeyInto(&nodeClient, a, lastValid+1, protocol.ConsensusV31)) } -func registerKeyInto(client *libgoal.Client, a *require.Assertions, lastValid uint64, ver protocol.ConsensusVersion) error { +func registerKeyInto(client *libgoal.Client, a *require.Assertions, lastValid basics.Round, ver protocol.ConsensusVersion) error { wh, err := client.GetUnencryptedWalletHandle() a.NoError(err) diff --git a/test/framework/fixtures/goalFixture.go b/test/framework/fixtures/goalFixture.go index 9c217e3e35..5c5c3fa6ce 100644 --- a/test/framework/fixtures/goalFixture.go +++ b/test/framework/fixtures/goalFixture.go @@ -82,7 +82,7 @@ func (f *GoalFixture) executeCommand(args ...string) (retStdout string, retStder // combine the error and the output so that we could return it as a single error object. func combineExecuteError(retStdout string, retStderr string, err error) error { if err == nil { - return err + return nil } return fmt.Errorf("%v\nStdout:\n%s\nStderr:\n%s", err, retStdout, retStderr) } diff --git a/test/framework/fixtures/libgoalFixture.go b/test/framework/fixtures/libgoalFixture.go index 9af69b3c8a..bd91a0d8e6 100644 --- a/test/framework/fixtures/libgoalFixture.go +++ b/test/framework/fixtures/libgoalFixture.go @@ -207,18 +207,18 @@ func (f *LibGoalFixture) importRootKeys(lg *libgoal.Client, dataDir string) { } // Fetch an account.Root from the database - root, err := account.RestoreRoot(handle) - if err != nil { + root, err1 := account.RestoreRoot(handle) + if err1 != nil { // Couldn't read it, skip it continue } secretKey := root.Secrets().SK - wh, err := lg.GetUnencryptedWalletHandle() - f.failOnError(err, "couldn't get default wallet handle: %v") - _, err = lg.ImportKey(wh, secretKey[:]) - if err != nil && !strings.Contains(err.Error(), "key already exists") { - f.failOnError(err, "couldn't import secret: %v") + wh, err1 := lg.GetUnencryptedWalletHandle() + f.failOnError(err1, "couldn't get default wallet handle: %v") + _, err1 = lg.ImportKey(wh, secretKey[:]) + if err1 != nil && !strings.Contains(err1.Error(), "key already exists") { + f.failOnError(err1, "couldn't import secret: %v") } accountsWithRootKeys[root.Address().String()] = true handle.Close() @@ -476,7 +476,7 @@ func (f *LibGoalFixture) CurrentConsensusParams() (consensus config.ConsensusPar } // ConsensusParams returns the consensus parameters for the protocol from the specified round -func (f *LibGoalFixture) ConsensusParams(round uint64) (config.ConsensusParams, error) { +func (f *LibGoalFixture) ConsensusParams(round basics.Round) (config.ConsensusParams, error) { block, err := f.LibGoalClient.BookkeepingBlock(round) if err != nil { return config.ConsensusParams{}, err @@ -511,7 +511,7 @@ func (f *LibGoalFixture) CurrentMinFeeAndBalance() (minFee, minBalance uint64, e // MinFeeAndBalance returns the MinTxnFee and MinBalance for the protocol from the specified round // If MinBalance is 0, we provide a resonable default of 1000 to ensure accounts have funds when // MinBalance is used to fund new accounts -func (f *LibGoalFixture) MinFeeAndBalance(round uint64) (minFee, minBalance uint64, err error) { +func (f *LibGoalFixture) MinFeeAndBalance(round basics.Round) (minFee, minBalance uint64, err error) { params, err := f.ConsensusParams(round) if err != nil { return @@ -524,7 +524,7 @@ func (f *LibGoalFixture) MinFeeAndBalance(round uint64) (minFee, minBalance uint } // TransactionProof returns a proof for usage in merkle array verification for the provided transaction. -func (f *LibGoalFixture) TransactionProof(txid string, round uint64, hashType crypto.HashType) (model.TransactionProofResponse, merklearray.SingleLeafProof, error) { +func (f *LibGoalFixture) TransactionProof(txid string, round basics.Round, hashType crypto.HashType) (model.TransactionProofResponse, merklearray.SingleLeafProof, error) { proofResp, err := f.LibGoalClient.TransactionProof(txid, round, hashType) if err != nil { return model.TransactionProofResponse{}, merklearray.SingleLeafProof{}, err @@ -539,7 +539,7 @@ func (f *LibGoalFixture) TransactionProof(txid string, round uint64, hashType cr } // LightBlockHeaderProof returns a proof for usage in merkle array verification for the provided block's light block header. -func (f *LibGoalFixture) LightBlockHeaderProof(round uint64) (model.LightBlockHeaderProofResponse, merklearray.SingleLeafProof, error) { +func (f *LibGoalFixture) LightBlockHeaderProof(round basics.Round) (model.LightBlockHeaderProofResponse, merklearray.SingleLeafProof, error) { proofResp, err := f.LibGoalClient.LightBlockHeaderProof(round) if err != nil { diff --git a/test/framework/fixtures/restClientFixture.go b/test/framework/fixtures/restClientFixture.go index f4131b5422..9d05276933 100644 --- a/test/framework/fixtures/restClientFixture.go +++ b/test/framework/fixtures/restClientFixture.go @@ -78,13 +78,13 @@ func (f *RestClientFixture) GetAlgodClientForController(nc nodecontrol.NodeContr // WaitForRound waits up to the specified amount of time for // the network to reach or pass the specified round -func (f *RestClientFixture) WaitForRound(round uint64, waitTime time.Duration) error { +func (f *RestClientFixture) WaitForRound(round basics.Round, waitTime time.Duration) error { _, err := f.AlgodClient.WaitForRound(round, waitTime) return err } // WithEveryBlock calls the provided function for every block from first to last. -func (f *RestClientFixture) WithEveryBlock(first, last uint64, visit func(bookkeeping.Block)) { +func (f *RestClientFixture) WithEveryBlock(first, last basics.Round, visit func(bookkeeping.Block)) { for round := first; round <= last; round++ { err := f.WaitForRoundWithTimeout(round) require.NoError(f.t, err) @@ -96,12 +96,12 @@ func (f *RestClientFixture) WithEveryBlock(first, last uint64, visit func(bookke // WaitForRoundWithTimeout waits for a given round to reach. The implementation also ensures to limit the wait time for each round to the // globals.MaxTimePerRound so we can alert when we're getting "hung" before waiting for all the expected rounds to reach. -func (f *RestClientFixture) WaitForRoundWithTimeout(roundToWaitFor uint64) error { +func (f *RestClientFixture) WaitForRoundWithTimeout(roundToWaitFor basics.Round) error { return f.AlgodClient.WaitForRoundWithTimeout(roundToWaitFor) } // WaitForBlockWithTimeout waits for a given round and returns its block. -func (f *RestClientFixture) WaitForBlockWithTimeout(roundToWaitFor uint64) (bookkeeping.Block, error) { +func (f *RestClientFixture) WaitForBlockWithTimeout(roundToWaitFor basics.Round) (bookkeeping.Block, error) { if err := f.AlgodClient.WaitForRoundWithTimeout(roundToWaitFor); err != nil { return bookkeeping.Block{}, err } @@ -137,7 +137,7 @@ func (f *RestClientFixture) GetRichestAccount() (richest model.Account, err erro } // GetBalanceAndRound returns the current balance of an account and the current round for that balance -func (f *RestClientFixture) GetBalanceAndRound(account string) (balance uint64, round uint64) { +func (f *RestClientFixture) GetBalanceAndRound(account string) (balance uint64, round basics.Round) { client := f.LibGoalClient status, err := client.Status() require.NoError(f.t, err, "client should be able to get status") @@ -182,7 +182,7 @@ func (f *RestClientFixture) GetNodeWalletsSortedByBalance(client libgoal.Client) // WaitForTxnConfirmation waits until either the passed txid is confirmed // or until the passed roundTimeout passes // or until waiting for a round to pass times out -func (f *RestClientFixture) WaitForTxnConfirmation(roundTimeout uint64, txid string) bool { +func (f *RestClientFixture) WaitForTxnConfirmation(roundTimeout basics.Round, txid string) bool { _, err := f.WaitForConfirmedTxn(roundTimeout, txid) return err == nil } @@ -190,13 +190,13 @@ func (f *RestClientFixture) WaitForTxnConfirmation(roundTimeout uint64, txid str // WaitForConfirmedTxn waits until either the passed txid is confirmed // or until the passed roundTimeout passes // or until waiting for a round to pass times out -func (f *RestClientFixture) WaitForConfirmedTxn(roundTimeout uint64, txid string) (txn v2.PreEncodedTxInfo, err error) { +func (f *RestClientFixture) WaitForConfirmedTxn(roundTimeout basics.Round, txid string) (txn v2.PreEncodedTxInfo, err error) { return f.AlgodClient.WaitForConfirmedTxn(roundTimeout, txid) } // WaitForAllTxnsToConfirm is as WaitForTxnConfirmation, // but accepting a whole map of txids to their issuing address -func (f *RestClientFixture) WaitForAllTxnsToConfirm(roundTimeout uint64, txidsAndAddresses map[string]string) bool { +func (f *RestClientFixture) WaitForAllTxnsToConfirm(roundTimeout basics.Round, txidsAndAddresses map[string]string) bool { if len(txidsAndAddresses) == 0 { return true } @@ -247,7 +247,7 @@ func (f *RestClientFixture) WaitForAllTxnsToConfirm(roundTimeout uint64, txidsAn // WaitForAccountFunded waits until either the passed account gets non-empty balance // or until the passed roundTimeout passes // or until waiting for a round to pass times out -func (f *RestClientFixture) WaitForAccountFunded(roundTimeout uint64, accountAddress string) (err error) { +func (f *RestClientFixture) WaitForAccountFunded(roundTimeout basics.Round, accountAddress string) (err error) { client := f.AlgodClient for { // Get current round information @@ -274,7 +274,7 @@ func (f *RestClientFixture) WaitForAccountFunded(roundTimeout uint64, accountAdd // SendMoneyAndWait uses the rest client to send money and WaitForTxnConfirmation to wait for the send to confirm // it adds some extra error checking as well -func (f *RestClientFixture) SendMoneyAndWait(curRound, amountToSend, transactionFee uint64, fromAccount, toAccount string, closeToAccount string) (txn v2.PreEncodedTxInfo) { +func (f *RestClientFixture) SendMoneyAndWait(curRound basics.Round, amountToSend, transactionFee uint64, fromAccount, toAccount string, closeToAccount string) (txn v2.PreEncodedTxInfo) { client := f.LibGoalClient wh, err := client.GetUnencryptedWalletHandle() require.NoError(f.t, err, "client should be able to get unencrypted wallet handle") @@ -283,13 +283,13 @@ func (f *RestClientFixture) SendMoneyAndWait(curRound, amountToSend, transaction } // SendMoneyAndWaitFromWallet is as above, but for a specific wallet -func (f *RestClientFixture) SendMoneyAndWaitFromWallet(walletHandle, walletPassword []byte, curRound, amountToSend, transactionFee uint64, fromAccount, toAccount string, closeToAccount string) (txn v2.PreEncodedTxInfo) { +func (f *RestClientFixture) SendMoneyAndWaitFromWallet(walletHandle, walletPassword []byte, curRound basics.Round, amountToSend, transactionFee uint64, fromAccount, toAccount string, closeToAccount string) (txn v2.PreEncodedTxInfo) { client := f.LibGoalClient // use one curRound - 1 in case other nodes are behind fundingTx, err := client.SendPaymentFromWallet(walletHandle, walletPassword, fromAccount, toAccount, transactionFee, amountToSend, nil, closeToAccount, basics.Round(curRound).SubSaturate(1), 0) require.NoError(f.t, err, "client should be able to send money from rich to poor account") require.NotEmpty(f.t, fundingTx.ID().String(), "transaction ID should not be empty") - waitingDeadline := curRound + uint64(5) + waitingDeadline := curRound + 5 txn, err = client.WaitForConfirmedTxn(waitingDeadline, fundingTx.ID().String()) require.NoError(f.t, err) return @@ -297,9 +297,9 @@ func (f *RestClientFixture) SendMoneyAndWaitFromWallet(walletHandle, walletPassw // VerifyBlockProposedRange checks the rounds starting at fromRounds and moving backwards checking countDownNumRounds rounds if any // blocks were proposed by address -func (f *RestClientFixture) VerifyBlockProposedRange(account string, fromRound, countDownNumRounds int) bool { - for i := 0; i < countDownNumRounds; i++ { - cert, err := f.AlgodClient.EncodedBlockCert(uint64(fromRound - i)) +func (f *RestClientFixture) VerifyBlockProposedRange(account string, fromRound, countDownNumRounds basics.Round) bool { + for i := range countDownNumRounds { + cert, err := f.AlgodClient.EncodedBlockCert(fromRound - i) require.NoError(f.t, err, "client failed to get block %d", fromRound-i) if cert.Certificate.Proposal.OriginalProposer.GetUserAddress() == account { return true @@ -309,23 +309,23 @@ func (f *RestClientFixture) VerifyBlockProposedRange(account string, fromRound, } // VerifyBlockProposed checks the last searchRange blocks to see if any blocks were proposed by address -func (f *RestClientFixture) VerifyBlockProposed(account string, searchRange int) (blockWasProposed bool) { +func (f *RestClientFixture) VerifyBlockProposed(account string, searchRange basics.Round) (blockWasProposed bool) { c := f.LibGoalClient currentRound, err := c.CurrentRound() if err != nil { require.NoError(f.t, err, "client failed to get the last round") } - return f.VerifyBlockProposedRange(account, int(currentRound), int(searchRange)) + return f.VerifyBlockProposedRange(account, currentRound, searchRange) } // GetBalancesOnSameRound gets the balances for the passed addresses, and keeps trying until the balances are all the same round // if it can't get the balances for the same round within maxRetries retries, it will return the last balance seen for each acct // it also returns whether it got balances all for the same round, and what the last queried round was -func (f *RestClientFixture) GetBalancesOnSameRound(maxRetries int, accounts ...string) (balances map[string]uint64, allSameRound bool, lastRound uint64) { +func (f *RestClientFixture) GetBalancesOnSameRound(maxRetries int, accounts ...string) (balances map[string]uint64, allSameRound bool, lastRound basics.Round) { retries := 0 balances = make(map[string]uint64) for { - lastRound = uint64(0) + lastRound = 0 allSameRound = true for _, account := range accounts { balance, thisRound := f.GetBalanceAndRound(account) diff --git a/test/muleCI/mule.yaml b/test/muleCI/mule.yaml index e1ce86b9f8..f514087629 100644 --- a/test/muleCI/mule.yaml +++ b/test/muleCI/mule.yaml @@ -15,9 +15,9 @@ agents: - GOLANG_VERSION=`./scripts/get_golang_version.sh` - ARCH=amd64 - GOARCH=amd64 - - name: cicd.centos9.amd64 - dockerFilePath: docker/build/cicd.centos9.Dockerfile - image: algorand/go-algorand-ci-linux-centos9 + - name: cicd.centos10.amd64 + dockerFilePath: docker/build/cicd.centos10.Dockerfile + image: algorand/go-algorand-ci-linux-centos10 version: scripts/configure_dev-deps.sh arch: amd64 env: @@ -92,12 +92,12 @@ tasks: - task: docker.Make name: archive - agent: cicd.centos9.amd64 + agent: cicd.centos10.amd64 target: archive - task: docker.Make name: rpm.amd64 - agent: cicd.centos9.amd64 + agent: cicd.centos10.amd64 target: mule-package-rpm - task: docker.Make diff --git a/test/netperf-go/puppeteer/puppeteer.go b/test/netperf-go/puppeteer/puppeteer.go index a2ebbd812c..a2a6b385c4 100644 --- a/test/netperf-go/puppeteer/puppeteer.go +++ b/test/netperf-go/puppeteer/puppeteer.go @@ -201,9 +201,9 @@ func (p *puppet) exec(wg *sync.WaitGroup, errs chan error) { fmt.Printf("%s: Disabled step '%s' skipped.\n", p.recipeName, recipeStep.StepName) continue } - err := p.runStep(recipeStep, time.Hour) - if err != nil { - errs <- fmt.Errorf("Failed running recipe step '%s' : %v", recipeStep.StepName, err) + err1 := p.runStep(recipeStep, time.Hour) + if err1 != nil { + errs <- fmt.Errorf("Failed running recipe step '%s' : %v", recipeStep.StepName, err1) return } } @@ -218,9 +218,9 @@ func (p *puppet) exec(wg *sync.WaitGroup, errs chan error) { fmt.Printf("%s: Disabled step '%s' skipped.\n", p.recipeName, recipeStep.StepName) continue } - err := p.runStep(recipeStep, time.Hour) - if err != nil { - errs <- fmt.Errorf("Failed running teardown step '%s' : %v", recipeStep.StepName, err) + err1 := p.runStep(recipeStep, time.Hour) + if err1 != nil { + errs <- fmt.Errorf("Failed running teardown step '%s' : %v", recipeStep.StepName, err1) return } } @@ -440,8 +440,8 @@ func (p *puppet) collectMetrics() { } else { metricFetcher := makePromMetricFetcher(string(hostNameBytes)) var err error - if results, err := metricFetcher.getMetric(metric.Query); err == nil { - if result, err := metricFetcher.getSingleValue(results); err == nil { + if results, err1 := metricFetcher.getMetric(metric.Query); err1 == nil { + if result, err1 := metricFetcher.getSingleValue(results); err1 == nil { p.metrics[metric.Name] = result } } diff --git a/test/packages/test_release.sh b/test/packages/test_release.sh index e92f52e459..722465e53f 100755 --- a/test/packages/test_release.sh +++ b/test/packages/test_release.sh @@ -16,9 +16,9 @@ fi OS_LIST=( quay.io/centos/centos:stream9 - fedora:39 - fedora:40 - ubuntu:20.04 + quay.io/centos/centos:stream10 + fedora:41 + fedora:42 ubuntu:22.04 ubuntu:24.04 ) diff --git a/test/platform/test_linux_amd64_compatibility.sh b/test/platform/test_linux_amd64_compatibility.sh index 2ab8d4c990..f6b4e2fcd4 100755 --- a/test/platform/test_linux_amd64_compatibility.sh +++ b/test/platform/test_linux_amd64_compatibility.sh @@ -8,10 +8,11 @@ END_FG_COLOR=$(tput sgr0 2>/dev/null) OS_LIST=( quay.io/centos/centos:stream9 - fedora:39 - fedora:40 - ubuntu:20.04 + quay.io/centos/centos:stream10 + fedora:41 + fedora:42 ubuntu:22.04 + ubuntu:24.04 ) FAILED=() diff --git a/test/scripts/e2e.sh b/test/scripts/e2e.sh index 6f6e4a1f03..c1ecc2a289 100755 --- a/test/scripts/e2e.sh +++ b/test/scripts/e2e.sh @@ -7,6 +7,8 @@ set -e # Suppress telemetry reporting for tests export ALGOTEST=1 +S3_TESTDATA=${S3_TESTDATA:-algorand-testdata} + SCRIPT_PATH="$( cd "$(dirname "$0")" ; pwd -P )" SRCROOT="$(pwd -P)" @@ -170,8 +172,10 @@ if [ -z "$E2E_TEST_FILTER" ] || [ "$E2E_TEST_FILTER" == "SCRIPTS" ]; then KEEP_TEMPS_CMD_STR="" - # If the platform is arm64, we want to pass "--keep-temps" into e2e_client_runner.py + # For one platform, we want to pass "--keep-temps" into e2e_client_runner.py # so that we can keep the temporary test artifact for use in the indexer e2e tests. + # This is done in the CI environment, where the CI_KEEP_TEMP_PLATFORM variable is set to the platform + # that should keep the temporary test artifact. # The file is located at ${TEMPDIR}/net_done.tar.bz2 if [ -n "$CI_KEEP_TEMP_PLATFORM" ] && [ "$CI_KEEP_TEMP_PLATFORM" == "$CI_PLATFORM" ]; then echo "Setting --keep-temps so that an e2e artifact can be saved." @@ -203,8 +207,8 @@ if [ -z "$E2E_TEST_FILTER" ] || [ "$E2E_TEST_FILTER" == "SCRIPTS" ]; then tar -j -c -f "${CI_E2E_FILENAME}.tar.bz2" --exclude node.log --exclude agreement.cdv net rm -rf "${TEMPDIR}/net" RSTAMP=$(TZ=UTC python -c 'import time; print("{:08x}".format(0xffffffff - int(time.time() - time.mktime((2020,1,1,0,0,0,-1,-1,-1)))))') - echo aws s3 cp --acl public-read "${TEMPDIR}/${CI_E2E_FILENAME}.tar.bz2" "s3://algorand-testdata/indexer/e2e4/${RSTAMP}/${CI_E2E_FILENAME}.tar.bz2" - aws s3 cp --acl public-read "${TEMPDIR}/${CI_E2E_FILENAME}.tar.bz2" "s3://algorand-testdata/indexer/e2e4/${RSTAMP}/${CI_E2E_FILENAME}.tar.bz2" + echo aws s3 cp --acl public-read "${TEMPDIR}/${CI_E2E_FILENAME}.tar.bz2" "s3://${S3_TESTDATA}/indexer/e2e4/${RSTAMP}/${CI_E2E_FILENAME}.tar.bz2" + aws s3 cp --acl public-read "${TEMPDIR}/${CI_E2E_FILENAME}.tar.bz2" "s3://${S3_TESTDATA}/indexer/e2e4/${RSTAMP}/${CI_E2E_FILENAME}.tar.bz2" popd fi diff --git a/test/scripts/e2e_subs/app-accounts.sh b/test/scripts/e2e_subs/app-accounts.sh index fe0078597d..d79f30192b 100755 --- a/test/scripts/e2e_subs/app-accounts.sh +++ b/test/scripts/e2e_subs/app-accounts.sh @@ -26,18 +26,18 @@ ACCOUNT=$(${gcmd} account list|awk '{ print $3 }') # Create a smaller account so rewards won't change balances. SMALL=$(${gcmd} account new | awk '{ print $6 }') # Under one algo receives no rewards -${gcmd} clerk send -a 1000000 -f "$ACCOUNT" -t "$SMALL" +${gcmd} clerk send -a 900000 -f "$ACCOUNT" -t "$SMALL" function balance { acct=$1; shift goal account balance -a "$acct" | awk '{print $1}' } -[ "$(balance "$ACCOUNT")" = 999998999000 ] -[ "$(balance "$SMALL")" = 1000000 ] +[ "$(balance "$ACCOUNT")" = 999999099000 ] +[ "$(balance "$SMALL")" = 900000 ] APPID=$(${gcmd} app create --creator "${SMALL}" --approval-prog=${TEAL}/app-escrow.teal --global-byteslices 4 --global-ints 0 --local-byteslices 0 --local-ints 1 --clear-prog=${TEAL}/approve-all.teal | grep Created | awk '{ print $6 }') -[ "$(balance "$SMALL")" = 999000 ] # 1000 fee +[ "$(balance "$SMALL")" = 899000 ] # 1000 fee function appl { method=$1; shift @@ -66,14 +66,14 @@ function sign { TXID=$(${gcmd} app optin --app-id "$APPID" --from "${SMALL}" | app-txid) # Rest succeeds, no stray inner-txn array [ "$(rest "/v2/transactions/pending/$TXID" | jq '.["inner-txn"]')" == null ] -[ "$(balance "$SMALL")" = 998000 ] # 1000 fee +[ "$(balance "$SMALL")" = 898000 ] # 1000 fee appl "deposit():void" -o "$T/deposit.tx" payin 150000 -o "$T/pay1.tx" cat "$T/deposit.tx" "$T/pay1.tx" | ${gcmd} clerk group -i - -o "$T/group.tx" sign group ${gcmd} clerk rawsend -f "$T/group.stx" -[ "$(balance "$SMALL")" = 846000 ] # 2 fees, 150,000 deposited +[ "$(balance "$SMALL")" = 746000 ] # 2 fees, 150,000 deposited [ "$(balance "$APPACCT")" = 150000 ] # Withdraw 20,000 in app. Confirm that inner txn is visible to transaction API. @@ -86,32 +86,32 @@ TXID=$(appl "withdraw(uint64):void" --app-arg="int:20000" | app-txid) ROUND=$(rest "/v2/transactions/pending/$TXID" | jq '.["confirmed-round"]') rest "/v2/blocks/$ROUND" | jq .block.txns[0].dt.itx -[ "$(balance "$SMALL")" = 865000 ] # 1 fee, 20,000 withdrawn +[ "$(balance "$SMALL")" = 765000 ] # 1 fee, 20,000 withdrawn [ "$(balance "$APPACCT")" = 129000 ] # 20k withdraw, fee paid by app account appl "withdraw(uint64):void" --app-arg="int:10000" --fee 2000 -[ "$(balance "$SMALL")" = 873000 ] # 2000 fee, 10k withdrawn +[ "$(balance "$SMALL")" = 773000 ] # 2000 fee, 10k withdrawn [ "$(balance "$APPACCT")" = 119000 ] # 10k withdraw, fee credit used # Try to get app account below zero # (By app logic, it's OK - 150k was deposited, but fees have cut in) appl "withdraw(uint64):void" --app-arg="int:120000" && exit 1 -[ "$(balance "$SMALL")" = 873000 ] # no change +[ "$(balance "$SMALL")" = 773000 ] # no change [ "$(balance "$APPACCT")" = 119000 ] # no change # Try to get app account below min balance by withdrawing too much appl "withdraw(uint64):void" --app-arg="int:20000" && exit 1 -[ "$(balance "$SMALL")" = 873000 ] # no change +[ "$(balance "$SMALL")" = 773000 ] # no change [ "$(balance "$APPACCT")" = 119000 ] # no change # Try to get app account below min balance b/c of fee appl "withdraw(uint64):void" --app-arg="int:18001" && exit 1 -[ "$(balance "$SMALL")" = 873000 ] # no change +[ "$(balance "$SMALL")" = 773000 ] # no change [ "$(balance "$APPACCT")" = 119000 ] # no change # Show that it works AT exactly min balance appl "withdraw(uint64):void" --app-arg="int:18000" -[ "$(balance "$SMALL")" = 890000 ] # +17k (18k - fee) +[ "$(balance "$SMALL")" = 790000 ] # +17k (18k - fee) [ "$(balance "$APPACCT")" = 100000 ] # -19k (18k + fee) diff --git a/tools/block-generator/generator/generate.go b/tools/block-generator/generator/generate.go index fd8f457ca0..5efe9d016d 100644 --- a/tools/block-generator/generator/generate.go +++ b/tools/block-generator/generator/generate.go @@ -48,7 +48,7 @@ const ( // ---- constructors ---- // MakeGenerator initializes the Generator object. -func MakeGenerator(log logging.Logger, dbround uint64, bkGenesis bookkeeping.Genesis, config GenerationConfig, verbose bool) (Generator, error) { +func MakeGenerator(log logging.Logger, dbround basics.Round, bkGenesis bookkeeping.Genesis, config GenerationConfig, verbose bool) (Generator, error) { if err := config.validateWithDefaults(false); err != nil { return nil, fmt.Errorf("invalid generator configuration: %w", err) } @@ -96,13 +96,13 @@ func MakeGenerator(log logging.Logger, dbround uint64, bkGenesis bookkeeping.Gen appKindBoxes: make([]*appData, 0), appKindSwap: make([]*appData, 0), } - gen.appMap = map[appKind]map[uint64]*appData{ - appKindBoxes: make(map[uint64]*appData), - appKindSwap: make(map[uint64]*appData), + gen.appMap = map[appKind]map[basics.AppIndex]*appData{ + appKindBoxes: make(map[basics.AppIndex]*appData), + appKindSwap: make(map[basics.AppIndex]*appData), } - gen.accountAppOptins = map[appKind]map[uint64][]uint64{ - appKindBoxes: make(map[uint64][]uint64), - appKindSwap: make(map[uint64][]uint64), + gen.accountAppOptins = map[appKind]map[uint64][]basics.AppIndex{ + appKindBoxes: make(map[uint64][]basics.AppIndex), + appKindSwap: make(map[uint64][]basics.AppIndex), } gen.initializeAccounting() @@ -268,7 +268,7 @@ func (g *generator) WriteGenesis(output io.Writer) error { // - requested round < generator's round + offset - 1 ---> error // // NOTE: nextRound represents the generator's expectations about the next database round. -func (g *generator) WriteBlock(output io.Writer, round uint64) error { +func (g *generator) WriteBlock(output io.Writer, round basics.Round) error { if round < g.roundOffset { return fmt.Errorf("cannot generate block for round %d, already in database", round) } @@ -314,7 +314,7 @@ func (g *generator) WriteBlock(output io.Writer, round uint64) error { var cert rpcs.EncodedBlockCert if g.round == 0 { // we'll write genesis block / offset round for non-empty database - cert.Block, _, _ = g.ledger.BlockCert(basics.Round(round - g.roundOffset)) + cert.Block, _, _ = g.ledger.BlockCert(round - g.roundOffset) } else { start := time.Now() var generated, evaluated, validated time.Time @@ -453,7 +453,7 @@ func (g *generator) WriteAccount(output io.Writer, accountString string) error { } // WriteDeltas generates returns the deltas for payset. -func (g *generator) WriteDeltas(output io.Writer, round uint64) error { +func (g *generator) WriteDeltas(output io.Writer, round basics.Round) error { // the first generated round has no statedelta. if round-g.roundOffset == 0 { data, _ := encode(protocol.CodecHandle, ledgercore.StateDelta{}) @@ -463,7 +463,7 @@ func (g *generator) WriteDeltas(output io.Writer, round uint64) error { } return nil } - delta, err := g.ledger.GetStateDeltaForRound(basics.Round(round - g.roundOffset)) + delta, err := g.ledger.GetStateDeltaForRound(round - g.roundOffset) if err != nil { return fmt.Errorf("err getting state delta for round %d: %w", round, err) } @@ -514,7 +514,7 @@ func getAppTxOptions() []interface{} { // ---- Transaction Generation (Pay/Asset/Apps) ---- -func (g *generator) generateTxGroup(round uint64, intra uint64) ([]txn.SignedTxnWithAD, uint64 /* numTxns */, error) { +func (g *generator) generateTxGroup(round basics.Round, intra uint64) ([]txn.SignedTxnWithAD, uint64 /* numTxns */, error) { selection, err := weightedSelection(g.transactionWeights, getTransactionOptions(), paymentTx) if err != nil { return nil, 0, err @@ -530,10 +530,14 @@ func (g *generator) generateTxGroup(round uint64, intra uint64) ([]txn.SignedTxn signedTxns = []txn.SignedTxn{signedTxn} case assetTx: var signedTxn txn.SignedTxn - signedTxn, numTxns, expectedID, err = g.generateAssetTxn(round, intra) + var assetID basics.AssetIndex + signedTxn, numTxns, assetID, err = g.generateAssetTxn(round, intra) + expectedID = uint64(assetID) signedTxns = []txn.SignedTxn{signedTxn} case applicationTx: - signedTxns, numTxns, expectedID, err = g.generateAppTxn(round, intra) + var appID basics.AppIndex + signedTxns, numTxns, appID, err = g.generateAppTxn(round, intra) + expectedID = uint64(appID) default: return nil, 0, fmt.Errorf("no generator available for %s", selection) } @@ -568,7 +572,7 @@ func (g *generator) generateTxGroup(round uint64, intra uint64) ([]txn.SignedTxn // generatePaymentTxn creates a new payment transaction. The sender is always a genesis account, the receiver is random, // or a new account. -func (g *generator) generatePaymentTxn(round uint64, intra uint64) (txn.SignedTxn, uint64 /* numTxns */, error) { +func (g *generator) generatePaymentTxn(round basics.Round, intra uint64) (txn.SignedTxn, uint64 /* numTxns */, error) { selection, err := weightedSelection(g.payTxWeights, getPaymentTxOptions(), paymentPayTx) if err != nil { return txn.SignedTxn{}, 0, err @@ -576,7 +580,7 @@ func (g *generator) generatePaymentTxn(round uint64, intra uint64) (txn.SignedTx return g.generatePaymentTxnInternal(selection.(TxTypeID), round, intra) } -func (g *generator) generatePaymentTxnInternal(selection TxTypeID, round uint64, intra uint64) (txn.SignedTxn, uint64 /* numTxns */, error) { +func (g *generator) generatePaymentTxnInternal(selection TxTypeID, round basics.Round, intra uint64) (txn.SignedTxn, uint64 /* numTxns */, error) { defer g.recordData(track(selection)) minBal := g.params.MinBalance @@ -618,7 +622,7 @@ func (g *generator) generatePaymentTxnInternal(selection TxTypeID, round uint64, // ---- 2. Asset Transactions ---- -func (g *generator) generateAssetTxn(round uint64, intra uint64) (txn.SignedTxn, uint64 /* numTxns */, uint64 /* assetID */, error) { +func (g *generator) generateAssetTxn(round basics.Round, intra uint64) (txn.SignedTxn, uint64 /* numTxns */, basics.AssetIndex, error) { start := time.Now() selection, err := weightedSelection(g.assetTxWeights, getAssetTxOptions(), assetXfer) if err != nil { @@ -636,11 +640,11 @@ func (g *generator) generateAssetTxn(round uint64, intra uint64) (txn.SignedTxn, return signTxn(transaction), 1, assetID, nil } -func (g *generator) generateAssetTxnInternal(txType TxTypeID, round uint64, intra uint64) (actual TxTypeID, txn txn.Transaction, assetID uint64) { +func (g *generator) generateAssetTxnInternal(txType TxTypeID, round basics.Round, intra uint64) (actual TxTypeID, txn txn.Transaction, assetID basics.AssetIndex) { return g.generateAssetTxnInternalHint(txType, round, intra, 0, nil) } -func (g *generator) generateAssetTxnInternalHint(txType TxTypeID, round uint64, intra uint64, hintIndex uint64, hint *assetData) (actual TxTypeID, txn txn.Transaction, assetID uint64) { +func (g *generator) generateAssetTxnInternalHint(txType TxTypeID, round basics.Round, intra uint64, hintIndex uint64, hint *assetData) (actual TxTypeID, txn txn.Transaction, assetID basics.AssetIndex) { actual = txType // If there are no assets the next operation needs to be a create. numAssets := uint64(len(g.assets)) @@ -655,7 +659,7 @@ func (g *generator) generateAssetTxnInternalHint(txType TxTypeID, round uint64, senderAcct := indexToAccount(senderIndex) total := assetTotal - assetID = g.txnCounter + intra + 1 + assetID = basics.AssetIndex(g.txnCounter + intra + 1) assetName := fmt.Sprintf("asset #%d", assetID) txn = g.makeAssetCreateTxn(g.makeTxnHeader(senderAcct, round, intra), total, false, assetName) // Compute asset ID and initialize holdings diff --git a/tools/block-generator/generator/generate_apps.go b/tools/block-generator/generator/generate_apps.go index 7a868a87e3..25665fb7ad 100644 --- a/tools/block-generator/generator/generate_apps.go +++ b/tools/block-generator/generator/generate_apps.go @@ -22,6 +22,7 @@ import ( "math/rand" "time" + "github.com/algorand/go-algorand/data/basics" txn "github.com/algorand/go-algorand/data/transactions" "github.com/algorand/go-algorand/data/transactions/logic" "github.com/algorand/go-algorand/tools/block-generator/util" @@ -71,9 +72,9 @@ func (g *generator) resetPendingApps() { appKindBoxes: make([]*appData, 0), appKindSwap: make([]*appData, 0), } - g.pendingAppMap = map[appKind]map[uint64]*appData{ - appKindBoxes: make(map[uint64]*appData), - appKindSwap: make(map[uint64]*appData), + g.pendingAppMap = map[appKind]map[basics.AppIndex]*appData{ + appKindBoxes: make(map[basics.AppIndex]*appData), + appKindSwap: make(map[basics.AppIndex]*appData), } } @@ -123,7 +124,7 @@ func CumulativeEffects(report Report) EffectsReport { // ---- 3. App Transactions ---- -func (g *generator) generateAppTxn(round uint64, intra uint64) ([]txn.SignedTxn, uint64 /* numTxns */, uint64 /* appID */, error) { +func (g *generator) generateAppTxn(round basics.Round, intra uint64) ([]txn.SignedTxn, uint64 /* numTxns */, basics.AppIndex, error) { start := time.Now() selection, err := weightedSelection(g.appTxWeights, getAppTxOptions(), appSwapCall) if err != nil { @@ -142,7 +143,7 @@ func (g *generator) generateAppTxn(round uint64, intra uint64) ([]txn.SignedTxn, // generateAppCallInternal is the main workhorse for generating app transactions. // Senders are always genesis accounts to avoid running out of funds. -func (g *generator) generateAppCallInternal(txType TxTypeID, round, intra uint64, hintApp *appData) (TxTypeID, []txn.SignedTxn, uint64 /* appID */, error) { +func (g *generator) generateAppCallInternal(txType TxTypeID, round basics.Round, intra uint64, hintApp *appData) (TxTypeID, []txn.SignedTxn, basics.AppIndex, error) { var senderIndex uint64 if hintApp != nil { senderIndex = hintApp.sender @@ -164,7 +165,7 @@ func (g *generator) generateAppCallInternal(txType TxTypeID, round, intra uint64 var signedTxns []txn.SignedTxn switch appCallType { case appTxTypeCreate: - appID = g.txnCounter + intra + 1 + appID = basics.AppIndex(g.txnCounter + intra + 1) signedTxns = g.makeAppCreateTxn(kind, senderAcct, round, intra, appID) reSignTxns(signedTxns) @@ -214,8 +215,8 @@ func (g *generator) generateAppCallInternal(txType TxTypeID, round, intra uint64 return actual, signedTxns, appID, nil } -func (g *generator) getAppData(existing bool, kind appKind, senderIndex, appID uint64) (*appData, bool /* appInMap */, bool /* senderOptedin */) { - var appMapOrPendingAppMap map[appKind]map[uint64]*appData +func (g *generator) getAppData(existing bool, kind appKind, senderIndex uint64, appID basics.AppIndex) (*appData, bool /* appInMap */, bool /* senderOptedin */) { + var appMapOrPendingAppMap map[appKind]map[basics.AppIndex]*appData if existing { appMapOrPendingAppMap = g.appMap } else { @@ -239,7 +240,7 @@ func (g *generator) getAppData(existing bool, kind appKind, senderIndex, appID u // * it switches to create instead of optin when only opted into pending apps // * it switches to optin when noopoc if not opted in and follows the logic of the optins above // * the appID is 0 for creates, and otherwise a random appID from the existing apps for the kind -func (g *generator) getActualAppCall(txType TxTypeID, senderIndex uint64) (TxTypeID, appKind, appTxType, uint64 /* appID */, error) { +func (g *generator) getActualAppCall(txType TxTypeID, senderIndex uint64) (TxTypeID, appKind, appTxType, basics.AppIndex, error) { isApp, kind, appTxType, err := parseAppTxType(txType) if err != nil { return "", 0, 0, 0, err diff --git a/tools/block-generator/generator/generate_test.go b/tools/block-generator/generator/generate_test.go index 00dc480d77..4040c0724d 100644 --- a/tools/block-generator/generator/generate_test.go +++ b/tools/block-generator/generator/generate_test.go @@ -37,7 +37,7 @@ import ( "github.com/algorand/go-algorand/test/partitiontest" ) -func makePrivateGenerator(t *testing.T, round uint64, genesis bookkeeping.Genesis) *generator { +func makePrivateGenerator(t *testing.T, round basics.Round, genesis bookkeeping.Genesis) *generator { cfg := GenerationConfig{ Name: "test", NumGenesisAccounts: 10, @@ -125,17 +125,14 @@ func TestAssetOptinEveryAccountOverride(t *testing.T) { g.finishRound() // Opt all the accounts in, this also verifies that no account is opted in twice - var txn transactions.Transaction - var actual TxTypeID - var assetID uint64 - for i := 2; uint64(i) <= g.numAccounts; i++ { - actual, txn, assetID = g.generateAssetTxnInternal(assetOptin, 2, uint64(1+i)) + for i := uint64(2); i <= g.numAccounts; i++ { + actual, txn, assetID := g.generateAssetTxnInternal(assetOptin, 2, 1+i) require.NotEqual(t, 0, assetID) require.Equal(t, assetOptin, actual) require.Equal(t, protocol.AssetTransferTx, txn.Type) require.Len(t, g.assets, 1) - require.Len(t, g.assets[0].holdings, i) - require.Len(t, g.assets[0].holders, i) + require.Len(t, g.assets[0].holdings, int(i)) + require.Len(t, g.assets[0].holders, int(i)) } g.finishRound() @@ -143,7 +140,7 @@ func TestAssetOptinEveryAccountOverride(t *testing.T) { require.Equal(t, g.numAccounts, uint64(len(g.assets[0].holdings))) // The next optin closes instead - actual, txn, assetID = g.generateAssetTxnInternal(assetOptin, 3, 0) + actual, txn, assetID := g.generateAssetTxnInternal(assetOptin, 3, 0) require.Greater(t, assetID, uint64(0)) g.finishRound() require.Equal(t, assetClose, actual) @@ -240,7 +237,7 @@ func TestAppCreate(t *testing.T) { g := makePrivateGenerator(t, 0, bookkeeping.Genesis{}) assembled := assembleApps(t) - round, intra := uint64(1337), uint64(0) + round, intra := basics.Round(1337), uint64(0) hint := appData{sender: 7} // app call transaction creating appBoxes @@ -317,7 +314,7 @@ func TestAppBoxesOptin(t *testing.T) { g := makePrivateGenerator(t, 0, bookkeeping.Genesis{}) assembled := assembleApps(t) - round, intra := uint64(1337), uint64(0) + round, intra := basics.Round(1337), uint64(0) hint := appData{sender: 7} @@ -443,8 +440,8 @@ func TestWriteRoundZero(t *testing.T) { partitiontest.PartitionTest(t) var testcases = []struct { name string - dbround uint64 - round uint64 + dbround basics.Round + round basics.Round genesis bookkeeping.Genesis }{ { @@ -486,45 +483,45 @@ func TestWriteRound(t *testing.T) { } // Initial conditions of g from makePrivateGenerator: - require.Equal(t, uint64(0), g.round) + require.Zero(t, g.round) // Round 0: blockBuff, block0_1 := prepBuffer() err := g.WriteBlock(blockBuff, 0) require.NoError(t, err) - require.Equal(t, uint64(1), g.round) + require.Equal(t, basics.Round(1), g.round) protocol.Decode(blockBuff.Bytes(), &block0_1) require.Equal(t, "blockgen-test", block0_1.Block.BlockHeader.GenesisID) - require.Equal(t, basics.Round(0), block0_1.Block.BlockHeader.Round) + require.Zero(t, block0_1.Block.BlockHeader.Round) require.NotNil(t, g.ledger) - require.Equal(t, basics.Round(0), g.ledger.Latest()) + require.Zero(t, g.ledger.Latest()) // WriteBlocks only advances the _internal_ round // the first time called for a particular _given_ round blockBuff, block0_2 := prepBuffer() err = g.WriteBlock(blockBuff, 0) require.NoError(t, err) - require.Equal(t, uint64(1), g.round) + require.Equal(t, basics.Round(1), g.round) protocol.Decode(blockBuff.Bytes(), &block0_2) require.Equal(t, block0_1, block0_2) require.NotNil(t, g.ledger) - require.Equal(t, basics.Round(0), g.ledger.Latest()) + require.Zero(t, g.ledger.Latest()) blockBuff, block0_3 := prepBuffer() err = g.WriteBlock(blockBuff, 0) require.NoError(t, err) - require.Equal(t, uint64(1), g.round) + require.Equal(t, basics.Round(1), g.round) protocol.Decode(blockBuff.Bytes(), &block0_3) require.Equal(t, block0_1, block0_3) require.NotNil(t, g.ledger) - require.Equal(t, basics.Round(0), g.ledger.Latest()) + require.Zero(t, g.ledger.Latest()) // Round 1: blockBuff, block1_1 := prepBuffer() err = g.WriteBlock(blockBuff, 1) require.NoError(t, err) - require.Equal(t, uint64(2), g.round) + require.Equal(t, basics.Round(2), g.round) protocol.Decode(blockBuff.Bytes(), &block1_1) require.Equal(t, "blockgen-test", block1_1.Block.BlockHeader.GenesisID) require.Equal(t, basics.Round(1), block1_1.Block.BlockHeader.Round) @@ -537,7 +534,7 @@ func TestWriteRound(t *testing.T) { blockBuff, block1_2 := prepBuffer() err = g.WriteBlock(blockBuff, 1) require.NoError(t, err) - require.Equal(t, uint64(2), g.round) + require.Equal(t, basics.Round(2), g.round) protocol.Decode(blockBuff.Bytes(), &block1_2) require.Equal(t, block1_1, block1_2) require.NotNil(t, g.ledger) @@ -555,8 +552,8 @@ func TestWriteRoundWithPreloadedDB(t *testing.T) { partitiontest.PartitionTest(t) var testcases = []struct { name string - dbround uint64 - round uint64 + dbround basics.Round + round basics.Round genesis bookkeeping.Genesis err error }{ diff --git a/tools/block-generator/generator/generator_ledger.go b/tools/block-generator/generator/generator_ledger.go index b3d5677652..71f95aa9d7 100644 --- a/tools/block-generator/generator/generator_ledger.go +++ b/tools/block-generator/generator/generator_ledger.go @@ -98,7 +98,7 @@ func (g *generator) initializeLedger() { g.ledger = l } -func (g *generator) minTxnsForBlock(round uint64) uint64 { +func (g *generator) minTxnsForBlock(round basics.Round) uint64 { // There are no transactions in the 0th round if round == 0 { return 0 @@ -206,12 +206,11 @@ func countInners(ad txn.ApplyData) int { } // introspectLedgerVsGenerator is only called when the --verbose command line argument is specified. -func (g *generator) introspectLedgerVsGenerator(roundNumber, intra uint64) (errs []error) { +func (g *generator) introspectLedgerVsGenerator(round basics.Round, intra uint64) (errs []error) { if !g.verbose { errs = append(errs, fmt.Errorf("introspectLedgerVsGenerator called when verbose=false")) } - round := basics.Round(roundNumber) block, err := g.ledger.Block(round) if err != nil { round = err.(ledgercore.ErrNoEntry).Committed @@ -247,7 +246,6 @@ func (g *generator) introspectLedgerVsGenerator(roundNumber, intra uint64) (errs sum += cnt } fmt.Print("--------------------\n") - fmt.Printf("roundNumber (generator): %d\n", roundNumber) fmt.Printf("round (ledger): %d\n", round) fmt.Printf("g.txnCounter + intra: %d\n", g.txnCounter+intra) fmt.Printf("block.BlockHeader.TxnCounter: %d\n", block.BlockHeader.TxnCounter) @@ -263,20 +261,21 @@ func (g *generator) introspectLedgerVsGenerator(roundNumber, intra uint64) (errs // ---- FROM THE LEDGER: box and createable evidence ---- // ledgerBoxEvidenceCount := 0 - ledgerBoxEvidence := make(map[uint64][]uint64) + ledgerBoxEvidence := make(map[basics.AppIndex][]uint64) boxes := ledgerStateDeltas.KvMods for k := range boxes { - appID, nameIEsender, _ := apps.SplitBoxKey(k) + appNum, nameIEsender, _ := apps.SplitBoxKey(k) + appID := basics.AppIndex(appNum) ledgerBoxEvidence[appID] = append(ledgerBoxEvidence[appID], binary.LittleEndian.Uint64([]byte(nameIEsender))-1) ledgerBoxEvidenceCount++ } // TODO: can get richer info about app-Creatables from: // updates.Accts.AppResources - ledgerCreatableAppsEvidence := make(map[uint64]uint64) + ledgerCreatableAppsEvidence := make(map[basics.AppIndex]uint64) for creatableID, creatable := range ledgerStateDeltas.Creatables { if creatable.Ctype == basics.AppCreatable { - ledgerCreatableAppsEvidence[uint64(creatableID)] = accountToIndex(creatable.Creator) + ledgerCreatableAppsEvidence[basics.AppIndex(creatableID)] = accountToIndex(creatable.Creator) } } fmt.Printf("ledgerBoxEvidenceCount: %d\n", ledgerBoxEvidenceCount) @@ -284,13 +283,13 @@ func (g *generator) introspectLedgerVsGenerator(roundNumber, intra uint64) (errs // ---- FROM THE GENERATOR: expected created and optins ---- // - expectedCreated := map[appKind]map[uint64]uint64{ - appKindBoxes: make(map[uint64]uint64), - appKindSwap: make(map[uint64]uint64), + expectedCreated := map[appKind]map[basics.AppIndex]uint64{ + appKindBoxes: make(map[basics.AppIndex]uint64), + appKindSwap: make(map[basics.AppIndex]uint64), } - expectedOptins := map[appKind]map[uint64]map[uint64]bool{ - appKindBoxes: make(map[uint64]map[uint64]bool), - appKindSwap: make(map[uint64]map[uint64]bool), + expectedOptins := map[appKind]map[basics.AppIndex]map[uint64]bool{ + appKindBoxes: make(map[basics.AppIndex]map[uint64]bool), + appKindSwap: make(map[basics.AppIndex]map[uint64]bool), } expectedOptinsCount := 0 @@ -309,20 +308,20 @@ func (g *generator) introspectLedgerVsGenerator(roundNumber, intra uint64) (errs // ---- COMPARE LEDGER AND GENERATOR EVIDENCE ---- // - ledgerCreatablesUnexpected := map[uint64]uint64{} + ledgerCreatablesUnexpected := map[basics.AppIndex]uint64{} for creatableID, creator := range ledgerCreatableAppsEvidence { if expectedCreated[appKindSwap][creatableID] != creator && expectedCreated[appKindBoxes][creatableID] != creator { ledgerCreatablesUnexpected[creatableID] = creator } } - generatorExpectedCreatablesNotFound := map[uint64]uint64{} + generatorExpectedCreatablesNotFound := map[basics.AppIndex]uint64{} for creatableID, creator := range expectedCreated[appKindBoxes] { if ledgerCreatableAppsEvidence[creatableID] != creator { generatorExpectedCreatablesNotFound[creatableID] = creator } } - ledgerBoxOptinsUnexpected := map[uint64][]uint64{} + ledgerBoxOptinsUnexpected := map[basics.AppIndex][]uint64{} for appId, boxOptins := range ledgerBoxEvidence { for _, optin := range boxOptins { if _, ok := expectedOptins[appKindBoxes][appId][optin]; !ok { @@ -331,7 +330,7 @@ func (g *generator) introspectLedgerVsGenerator(roundNumber, intra uint64) (errs } } - generatorExpectedOptinsNotFound := map[uint64][]uint64{} + generatorExpectedOptinsNotFound := map[basics.AppIndex][]uint64{} for appId, appOptins := range expectedOptins[appKindBoxes] { for optin := range appOptins { if !slices.Contains(ledgerBoxEvidence[appId], optin) { diff --git a/tools/block-generator/generator/generator_types.go b/tools/block-generator/generator/generator_types.go index 293e9aaa85..a13d652f8f 100644 --- a/tools/block-generator/generator/generator_types.go +++ b/tools/block-generator/generator/generator_types.go @@ -34,9 +34,9 @@ import ( type Generator interface { WriteReport(output io.Writer) error WriteGenesis(output io.Writer) error - WriteBlock(output io.Writer, round uint64) error + WriteBlock(output io.Writer, round basics.Round) error WriteAccount(output io.Writer, accountString string) error - WriteDeltas(output io.Writer, round uint64) error + WriteDeltas(output io.Writer, round basics.Round) error WriteStatus(output io.Writer) error Stop() } @@ -54,7 +54,7 @@ type generator struct { numAccounts uint64 // Block stuff - round uint64 + round basics.Round txnCounter uint64 prevBlockHash string timestamp int64 @@ -85,7 +85,7 @@ type generator struct { // pendingAppMap provides a live mapping from appID to appData for each appKind // for the current round - pendingAppMap map[appKind]map[uint64]*appData + pendingAppMap map[appKind]map[basics.AppIndex]*appData // pendingAppSlice provides a live slice of appData for each appKind. The reason // for maintaining both appMap and pendingAppSlice is to enable @@ -95,12 +95,12 @@ type generator struct { // appMap and appSlice store the information from their corresponding pending* // data structures at the end of each round and for the rest of the experiment - appMap map[appKind]map[uint64]*appData + appMap map[appKind]map[basics.AppIndex]*appData appSlice map[appKind][]*appData // accountAppOptins is used to keep track of which accounts have opted into - // and app and enable random selection. - accountAppOptins map[appKind]map[uint64][]uint64 + // an app and enable random selection. + accountAppOptins map[appKind]map[uint64][]basics.AppIndex transactionWeights []float32 @@ -120,14 +120,14 @@ type generator struct { // latestBlockMsgp caches the latest written block latestBlockMsgp []byte - // latestPaysetWithExpectedID provides the ordered payest transactions + // latestPaysetWithExpectedID provides the ordered payset transactions // together the expected asset/app IDs (or 0 if not applicable) latestPaysetWithExpectedID []txnWithExpectedID - roundOffset uint64 + roundOffset basics.Round } type assetData struct { - assetID uint64 + assetID basics.AssetIndex creator uint64 name string // Holding at index 0 is the creator. @@ -137,7 +137,7 @@ type assetData struct { } type appData struct { - appID uint64 + appID basics.AppIndex sender uint64 kind appKind optins map[uint64]bool @@ -150,7 +150,7 @@ type assetHolding struct { // Report is the generation report. type Report struct { - InitialRound uint64 `json:"initial_round"` + InitialRound basics.Round `json:"initial_round"` Counters map[string]uint64 `json:"counters"` Transactions map[TxTypeID]TxData `json:"transactions"` } diff --git a/tools/block-generator/generator/make_transactions.go b/tools/block-generator/generator/make_transactions.go index 00d0f9b127..76c5590555 100644 --- a/tools/block-generator/generator/make_transactions.go +++ b/tools/block-generator/generator/make_transactions.go @@ -28,15 +28,15 @@ import ( // ---- header / boilerplate ---- -func (g *generator) makeTxnHeader(sender basics.Address, round, intra uint64) txn.Header { +func (g *generator) makeTxnHeader(sender basics.Address, round basics.Round, intra uint64) txn.Header { note := make([]byte, 8) binary.LittleEndian.PutUint64(note, g.txnCounter+intra) return txn.Header{ Sender: sender, Fee: basics.MicroAlgos{Raw: g.params.MinTxnFee}, - FirstValid: basics.Round(round), - LastValid: basics.Round(round + 1000), + FirstValid: round, + LastValid: round + 1000, GenesisID: g.genesisID, GenesisHash: g.genesisHash, Note: note, @@ -44,15 +44,15 @@ func (g *generator) makeTxnHeader(sender basics.Address, round, intra uint64) tx } // makeTestTxn creates and populates the flat txntest.Txn structure with the given values. -func (g *generator) makeTestTxn(sender basics.Address, round, intra uint64) txntest.Txn { +func (g *generator) makeTestTxn(sender basics.Address, round basics.Round, intra uint64) txntest.Txn { note := make([]byte, 8) binary.LittleEndian.PutUint64(note, g.txnCounter+intra) return txntest.Txn{ Sender: sender, Fee: basics.MicroAlgos{Raw: g.params.MinTxnFee}, - FirstValid: basics.Round(round), - LastValid: basics.Round(round + 1000), + FirstValid: round, + LastValid: round + 1000, GenesisID: g.genesisID, GenesisHash: g.genesisHash, Note: note, @@ -93,22 +93,22 @@ func (g *generator) makeAssetCreateTxn(header txn.Header, total uint64, defaultF } } -func (g *generator) makeAssetDestroyTxn(header txn.Header, index uint64) txn.Transaction { +func (g *generator) makeAssetDestroyTxn(header txn.Header, index basics.AssetIndex) txn.Transaction { return txn.Transaction{ Type: protocol.AssetConfigTx, Header: header, AssetConfigTxnFields: txn.AssetConfigTxnFields{ - ConfigAsset: basics.AssetIndex(index), + ConfigAsset: index, }, } } -func (g *generator) makeAssetTransferTxn(header txn.Header, receiver basics.Address, amount uint64, closeAssetsTo basics.Address, index uint64) txn.Transaction { +func (g *generator) makeAssetTransferTxn(header txn.Header, receiver basics.Address, amount uint64, closeAssetsTo basics.Address, index basics.AssetIndex) txn.Transaction { return txn.Transaction{ Type: protocol.AssetTransferTx, Header: header, AssetTransferTxnFields: txn.AssetTransferTxnFields{ - XferAsset: basics.AssetIndex(index), + XferAsset: index, AssetAmount: amount, AssetReceiver: receiver, AssetCloseTo: closeAssetsTo, @@ -116,13 +116,13 @@ func (g *generator) makeAssetTransferTxn(header txn.Header, receiver basics.Addr } } -func (g *generator) makeAssetAcceptanceTxn(header txn.Header, index uint64) txn.Transaction { +func (g *generator) makeAssetAcceptanceTxn(header txn.Header, index basics.AssetIndex) txn.Transaction { return g.makeAssetTransferTxn(header, header.Sender, 0, basics.Address{}, index) } // ---- application transactions ---- -func (g *generator) makeAppCreateTxn(kind appKind, sender basics.Address, round, intra uint64, futureAppId uint64) []txn.SignedTxn { +func (g *generator) makeAppCreateTxn(kind appKind, sender basics.Address, round basics.Round, intra uint64, futureAppId basics.AppIndex) []txn.SignedTxn { var approval, clear interface{} if kind == appKindSwap { approval, clear = approvalSwapBytes, clearSwapBytes @@ -162,7 +162,7 @@ func (g *generator) makeAppCreateTxn(kind appKind, sender basics.Address, round, paySibTxn := g.makeTestTxn(sender, round, intra) paySibTxn.Type = protocol.PaymentTx - paySibTxn.Receiver = basics.AppIndex(futureAppId).Address() + paySibTxn.Receiver = futureAppId.Address() paySibTxn.Fee = basics.MicroAlgos{Raw: pstFee} paySibTxn.Amount = uint64(pstAmt) @@ -173,7 +173,7 @@ func (g *generator) makeAppCreateTxn(kind appKind, sender basics.Address, round, } // makeAppOptinTxn currently only works for the boxes app -func (g *generator) makeAppOptinTxn(sender basics.Address, round, intra uint64, kind appKind, appIndex uint64) []txn.SignedTxn { +func (g *generator) makeAppOptinTxn(sender basics.Address, round basics.Round, intra uint64, kind appKind, appIndex basics.AppIndex) []txn.SignedTxn { if kind != appKindBoxes { panic("makeAppOptinTxn only works for the boxes app currently") } @@ -187,7 +187,7 @@ func (g *generator) makeAppOptinTxn(sender basics.Address, round, intra uint64, */ optInTxn.Type = protocol.ApplicationCallTx - optInTxn.ApplicationID = basics.AppIndex(appIndex) + optInTxn.ApplicationID = appIndex optInTxn.OnCompletion = txn.OptInOC // the first inner sends some algo to the creator: optInTxn.Accounts = []basics.Address{indexToAccount(g.appMap[kind][appIndex].sender)} @@ -202,7 +202,7 @@ func (g *generator) makeAppOptinTxn(sender basics.Address, round, intra uint64, paySibTxn := g.makeTestTxn(sender, round, intra) paySibTxn.Type = protocol.PaymentTx - paySibTxn.Receiver = basics.AppIndex(appIndex).Address() + paySibTxn.Receiver = appIndex.Address() paySibTxn.Fee = basics.MicroAlgos{Raw: pstFee} paySibTxn.Amount = uint64(pstAmt) @@ -215,10 +215,10 @@ func (g *generator) makeAppOptinTxn(sender basics.Address, round, intra uint64, } // makeAppCallTxn currently only works for the boxes app -func (g *generator) makeAppCallTxn(sender basics.Address, round, intra, appIndex uint64) txn.Transaction { +func (g *generator) makeAppCallTxn(sender basics.Address, round basics.Round, intra uint64, appIndex basics.AppIndex) txn.Transaction { callTxn := g.makeTestTxn(sender, round, intra) callTxn.Type = protocol.ApplicationCallTx - callTxn.ApplicationID = basics.AppIndex(appIndex) + callTxn.ApplicationID = appIndex callTxn.OnCompletion = txn.NoOpOC // redundant for clarity callTxn.ApplicationArgs = [][]byte{ {0xe1, 0xf9, 0x3f, 0x1d}, // the method selector for getting a box diff --git a/tools/block-generator/generator/server.go b/tools/block-generator/generator/server.go index 8fdd8d213e..3694f95df6 100644 --- a/tools/block-generator/generator/server.go +++ b/tools/block-generator/generator/server.go @@ -23,6 +23,7 @@ import ( "strings" "time" + "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/data/bookkeeping" "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/tools/block-generator/util" @@ -42,7 +43,7 @@ type BlocksMiddleware func(next http.Handler) http.Handler // MakeServerWithMiddleware allows injecting a middleware for the blocks handler. // This is needed to simplify tests by stopping block production while validation // is done on the data. -func MakeServerWithMiddleware(log logging.Logger, dbround uint64, genesisFile string, configFile string, verbose bool, addr string, blocksMiddleware BlocksMiddleware) (*http.Server, Generator) { +func MakeServerWithMiddleware(log logging.Logger, dbround basics.Round, genesisFile string, configFile string, verbose bool, addr string, blocksMiddleware BlocksMiddleware) (*http.Server, Generator) { cfg, err := initializeConfigFile(configFile) util.MaybeFail(err, "problem loading config file. Use '--config' or create a config file.") var bkGenesis bookkeeping.Genesis @@ -115,7 +116,7 @@ func getBlockHandler(gen Generator) func(w http.ResponseWriter, r *http.Request) http.Error(w, err.Error(), http.StatusBadRequest) return } - maybeWriteError("block", w, gen.WriteBlock(w, round)) + maybeWriteError("block", w, gen.WriteBlock(w, basics.Round(round))) } } @@ -143,7 +144,7 @@ func getDeltasHandler(gen Generator) func(w http.ResponseWriter, r *http.Request http.Error(w, err.Error(), http.StatusBadRequest) return } - maybeWriteError("deltas", w, gen.WriteDeltas(w, round)) + maybeWriteError("deltas", w, gen.WriteDeltas(w, basics.Round(round))) } } diff --git a/tools/block-generator/go.mod b/tools/block-generator/go.mod index a391443cbd..59961ea55a 100644 --- a/tools/block-generator/go.mod +++ b/tools/block-generator/go.mod @@ -4,7 +4,7 @@ replace github.com/algorand/go-algorand => ../.. go 1.23.0 -toolchain go1.23.3 +toolchain go1.23.9 require ( github.com/algorand/avm-abi v0.2.0 @@ -13,7 +13,7 @@ require ( github.com/algorand/go-deadlock v0.2.4 github.com/lib/pq v1.10.9 github.com/spf13/cobra v1.7.0 - github.com/stretchr/testify v1.9.0 + github.com/stretchr/testify v1.10.0 gopkg.in/yaml.v3 v3.0.1 ) @@ -22,7 +22,6 @@ require ( github.com/algorand/falcon v0.1.0 // indirect github.com/algorand/go-sumhash v0.1.0 // indirect github.com/algorand/msgp v1.1.60 // indirect - github.com/algorand/oapi-codegen v1.12.0-algorand.0 // indirect github.com/algorand/sortition v1.0.0 // indirect github.com/algorand/websocket v1.4.6 // indirect github.com/aws/aws-sdk-go v1.34.0 // indirect @@ -122,6 +121,7 @@ require ( github.com/multiformats/go-multistream v0.5.0 // indirect github.com/multiformats/go-varint v0.0.7 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/oapi-codegen/runtime v1.1.1 // indirect github.com/olivere/elastic v6.2.14+incompatible // indirect github.com/onsi/ginkgo/v2 v2.20.2 // indirect github.com/opencontainers/runtime-spec v1.2.0 // indirect @@ -130,12 +130,12 @@ require ( github.com/pion/datachannel v1.5.9 // indirect github.com/pion/dtls/v2 v2.2.12 // indirect github.com/pion/ice/v2 v2.3.36 // indirect - github.com/pion/interceptor v0.1.37 // indirect - github.com/pion/logging v0.2.2 // indirect + github.com/pion/interceptor v0.1.39 // indirect + github.com/pion/logging v0.2.3 // indirect github.com/pion/mdns v0.0.12 // indirect github.com/pion/randutil v0.1.0 // indirect - github.com/pion/rtcp v1.2.14 // indirect - github.com/pion/rtp v1.8.9 // indirect + github.com/pion/rtcp v1.2.15 // indirect + github.com/pion/rtp v1.8.18 // indirect github.com/pion/sctp v1.8.33 // indirect github.com/pion/sdp/v3 v3.0.9 // indirect github.com/pion/srtp/v2 v2.0.20 // indirect @@ -154,7 +154,7 @@ require ( github.com/quic-go/quic-go v0.48.2 // indirect github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66 // indirect github.com/raulk/go-watchdog v1.3.0 // indirect - github.com/rogpeppe/go-internal v1.10.0 // indirect + github.com/rogpeppe/go-internal v1.12.0 // indirect github.com/sirupsen/logrus v1.8.1 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect @@ -169,13 +169,13 @@ require ( go.uber.org/mock v0.5.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/crypto v0.35.0 // indirect + golang.org/x/crypto v0.37.0 // indirect golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c // indirect golang.org/x/mod v0.22.0 // indirect - golang.org/x/net v0.36.0 // indirect - golang.org/x/sync v0.11.0 // indirect - golang.org/x/sys v0.30.0 // indirect - golang.org/x/text v0.22.0 // indirect + golang.org/x/net v0.39.0 // indirect + golang.org/x/sync v0.13.0 // indirect + golang.org/x/sys v0.32.0 // indirect + golang.org/x/text v0.24.0 // indirect golang.org/x/tools v0.27.0 // indirect gonum.org/v1/gonum v0.15.0 // indirect google.golang.org/protobuf v1.35.1 // indirect diff --git a/tools/block-generator/go.sum b/tools/block-generator/go.sum index 67095905ea..d7caa73f45 100644 --- a/tools/block-generator/go.sum +++ b/tools/block-generator/go.sum @@ -29,8 +29,6 @@ github.com/algorand/go-sumhash v0.1.0 h1:b/QRhyLuF//vOcicBIxBXYW8bERNoeLxieht/dU github.com/algorand/go-sumhash v0.1.0/go.mod h1:OOe7jdDWUhLkuP1XytkK5gnLu9entAviN5DfDZh6XAc= github.com/algorand/msgp v1.1.60 h1:+IVUC34+tSj1P2M1mkYtl4GLyfzdzXfBLSw6TDT19M8= github.com/algorand/msgp v1.1.60/go.mod h1:RqZQBzAFDWpwh5TlabzZkWy+6kwL9cvXfLbU0gD99EA= -github.com/algorand/oapi-codegen v1.12.0-algorand.0 h1:W9PvED+wAJc+9EeXPONnA+0zE9UhynEqoDs4OgAxKhk= -github.com/algorand/oapi-codegen v1.12.0-algorand.0/go.mod h1:tIWJ9K/qrLDVDt5A1p82UmxZIEGxv2X+uoujdhEAL48= github.com/algorand/sortition v1.0.0 h1:PJiZtdSTBm4nArQrZXBnhlljHXhuyAXRJBqVWowQu3E= github.com/algorand/sortition v1.0.0/go.mod h1:23CZwAbTWPv0bBsq+Php/2J6Y/iXDyzlfcZyepeY5Fo= github.com/algorand/websocket v1.4.6 h1:I0kV4EYwatuUrKtNiwzYYgojgwh6pksDmlqntKG2Woc= @@ -436,6 +434,8 @@ github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OS github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/oapi-codegen/runtime v1.1.1 h1:EXLHh0DXIJnWhdRPN2w4MXAzFyE4CskzhNLUmtpMYro= +github.com/oapi-codegen/runtime v1.1.1/go.mod h1:SK9X900oXmPWilYR5/WKPzt3Kqxn/uS/+lbpREv+eCg= github.com/olivere/elastic v6.2.14+incompatible h1:k+KadwNP/dkXE0/eu+T6otk1+5fe0tEpPyQJ4XVm5i8= github.com/olivere/elastic v6.2.14+incompatible/go.mod h1:J+q1zQJTgAz9woqsbVRqGeB5G1iqDKVBWLNSYW8yfJ8= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= @@ -467,20 +467,21 @@ github.com/pion/dtls/v2 v2.2.12 h1:KP7H5/c1EiVAAKUmXyCzPiQe5+bCJrpOeKg/L05dunk= github.com/pion/dtls/v2 v2.2.12/go.mod h1:d9SYc9fch0CqK90mRk1dC7AkzzpwJj6u2GU3u+9pqFE= github.com/pion/ice/v2 v2.3.36 h1:SopeXiVbbcooUg2EIR8sq4b13RQ8gzrkkldOVg+bBsc= github.com/pion/ice/v2 v2.3.36/go.mod h1:mBF7lnigdqgtB+YHkaY/Y6s6tsyRyo4u4rPGRuOjUBQ= -github.com/pion/interceptor v0.1.37 h1:aRA8Zpab/wE7/c0O3fh1PqY0AJI3fCSEM5lRWJVorwI= -github.com/pion/interceptor v0.1.37/go.mod h1:JzxbJ4umVTlZAf+/utHzNesY8tmRkM2lVmkS82TTj8Y= -github.com/pion/logging v0.2.2 h1:M9+AIj/+pxNsDfAT64+MAVgJO0rsyLnoJKCqf//DoeY= +github.com/pion/interceptor v0.1.39 h1:Y6k0bN9Y3Lg/Wb21JBWp480tohtns8ybJ037AGr9UuA= +github.com/pion/interceptor v0.1.39/go.mod h1:Z6kqH7M/FYirg3frjGJ21VLSRJGBXB/KqaTIrdqnOic= github.com/pion/logging v0.2.2/go.mod h1:k0/tDVsRCX2Mb2ZEmTqNa7CWsQPc+YYCB7Q+5pahoms= +github.com/pion/logging v0.2.3 h1:gHuf0zpoh1GW67Nr6Gj4cv5Z9ZscU7g/EaoC/Ke/igI= +github.com/pion/logging v0.2.3/go.mod h1:z8YfknkquMe1csOrxK5kc+5/ZPAzMxbKLX5aXpbpC90= github.com/pion/mdns v0.0.12 h1:CiMYlY+O0azojWDmxdNr7ADGrnZ+V6Ilfner+6mSVK8= github.com/pion/mdns v0.0.12/go.mod h1:VExJjv8to/6Wqm1FXK+Ii/Z9tsVk/F5sD/N70cnYFbk= github.com/pion/randutil v0.1.0 h1:CFG1UdESneORglEsnimhUjf33Rwjubwj6xfiOXBa3mA= github.com/pion/randutil v0.1.0/go.mod h1:XcJrSMMbbMRhASFVOlj/5hQial/Y8oH/HVo7TBZq+j8= github.com/pion/rtcp v1.2.12/go.mod h1:sn6qjxvnwyAkkPzPULIbVqSKI5Dv54Rv7VG0kNxh9L4= -github.com/pion/rtcp v1.2.14 h1:KCkGV3vJ+4DAJmvP0vaQShsb0xkRfWkO540Gy102KyE= -github.com/pion/rtcp v1.2.14/go.mod h1:sn6qjxvnwyAkkPzPULIbVqSKI5Dv54Rv7VG0kNxh9L4= +github.com/pion/rtcp v1.2.15 h1:LZQi2JbdipLOj4eBjK4wlVoQWfrZbh3Q6eHtWtJBZBo= +github.com/pion/rtcp v1.2.15/go.mod h1:jlGuAjHMEXwMUHK78RgX0UmEJFV4zUKOFHR7OP+D3D0= github.com/pion/rtp v1.8.3/go.mod h1:pBGHaFt/yW7bf1jjWAoUjpSNoDnw98KTMg+jWWvziqU= -github.com/pion/rtp v1.8.9 h1:E2HX740TZKaqdcPmf4pw6ZZuG8u5RlMMt+l3dxeu6Wk= -github.com/pion/rtp v1.8.9/go.mod h1:pBGHaFt/yW7bf1jjWAoUjpSNoDnw98KTMg+jWWvziqU= +github.com/pion/rtp v1.8.18 h1:yEAb4+4a8nkPCecWzQB6V/uEU18X1lQCGAQCjP+pyvU= +github.com/pion/rtp v1.8.18/go.mod h1:bAu2UFKScgzyFqvUKmbvzSdPr+NGbZtv6UB2hesqXBk= github.com/pion/sctp v1.8.33 h1:dSE4wX6uTJBcNm8+YlMg7lw1wqyKHggsP5uKbdj+NZw= github.com/pion/sctp v1.8.33/go.mod h1:beTnqSzewI53KWoG3nqB282oDMGrhNxBdb+JZnkCwRM= github.com/pion/sdp/v3 v3.0.9 h1:pX++dCHoHUwq43kuwf3PyJfHlwIj4hXA7Vrifiq0IJY= @@ -532,8 +533,8 @@ github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66/go.mod h github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk= github.com/raulk/go-watchdog v1.3.0/go.mod h1:fIvOnLbF0b0ZwkB9YU4mOW9Did//4vPZtDqv66NfsMU= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= -github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= -github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= @@ -602,8 +603,9 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= @@ -677,8 +679,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE= golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= -golang.org/x/crypto v0.35.0 h1:b15kiHdrGCHrP6LvwaQ3c03kgNhhiMgvlhxHQhmg2Xs= -golang.org/x/crypto v0.35.0/go.mod h1:dy7dXNW32cAb/6/PRuTNsix8T+vJAqvuIy5Bli/x0YQ= +golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE= +golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c h1:7dEasQXItcW1xKJ2+gg5VOiBnqWrJc+rq0DPKyvvdbY= golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c/go.mod h1:NQtJDoLvd6faHhE7m4T/1IY708gDefGGjR/iUW8yQQ8= @@ -727,8 +729,8 @@ golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= -golang.org/x/net v0.36.0 h1:vWF2fRbw4qslQsQzgFqZff+BItCvGFQqKzKIzx1rmoA= -golang.org/x/net v0.36.0/go.mod h1:bFmbeoIPfrw4sMHNhb4J9f6+tPziuGjq7Jk/38fxi1I= +golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY= +golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -744,8 +746,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w= -golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610= +golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -782,8 +784,8 @@ golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc= -golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20= +golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= @@ -801,12 +803,12 @@ golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM= -golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY= +golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0= +golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= -golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg= +golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/tools/block-generator/runner/run.go b/tools/block-generator/runner/run.go index 864b516e79..28216d868f 100644 --- a/tools/block-generator/runner/run.go +++ b/tools/block-generator/runner/run.go @@ -38,6 +38,7 @@ import ( "github.com/algorand/go-deadlock" + "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/tools/block-generator/generator" "github.com/algorand/go-algorand/tools/block-generator/util" @@ -57,7 +58,7 @@ type Args struct { Path string ConduitBinary string MetricsPort uint64 - Template string + Template string PostgresConnectionString string CPUProfilePath string RunDuration time.Duration @@ -155,7 +156,7 @@ func (r *Args) run(reportDirectory string) error { }) } // get next db round - var nextRound uint64 + var nextRound basics.Round var err error switch r.Template { case "file-exporter": @@ -517,7 +518,7 @@ func (r *Args) runTest(w io.Writer, metricsURL string, generatorURL string) erro } // startGenerator starts the generator server. -func startGenerator(ledgerLogFile, configFile string, dbround uint64, genesisFile string, verbose bool, addr string, blockMiddleware func(http.Handler) http.Handler) (func() error, generator.Generator) { +func startGenerator(ledgerLogFile, configFile string, dbround basics.Round, genesisFile string, verbose bool, addr string, blockMiddleware func(http.Handler) http.Handler) (func() error, generator.Generator) { f, err := os.OpenFile(ledgerLogFile, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666) util.MaybeFail(err, "unable to open ledger log file '%s'", ledgerLogFile) log := logging.NewLogger() @@ -548,13 +549,13 @@ func startGenerator(ledgerLogFile, configFile string, dbround uint64, genesisFil } // startConduit starts the conduit binary. -func startConduit(dataDir string, conduitBinary string, round uint64) (func() error, error) { +func startConduit(dataDir string, conduitBinary string, round basics.Round) (func() error, error) { fmt.Printf("%sConduit starting with data directory: %s\n", pad, dataDir) ctx, cf := context.WithCancel(context.Background()) cmd := exec.CommandContext( ctx, conduitBinary, - "-r", strconv.FormatUint(round, 10), + "-r", strconv.FormatUint(uint64(round), 10), "-d", dataDir, ) cmd.WaitDelay = 5 * time.Second diff --git a/tools/block-generator/util/util.go b/tools/block-generator/util/util.go index 5db381563f..e1c66df7e0 100644 --- a/tools/block-generator/util/util.go +++ b/tools/block-generator/util/util.go @@ -24,6 +24,8 @@ import ( "os" "strings" + "github.com/algorand/go-algorand/data/basics" + // import postgres driver _ "github.com/lib/pq" ) @@ -42,7 +44,7 @@ func MaybeFail(err error, errfmt string, params ...interface{}) { } // GetNextRound returns the next account round from the metastate table. -func GetNextRound(postgresConnectionString string) (uint64, error) { +func GetNextRound(postgresConnectionString string) (basics.Round, error) { conn, err := sql.Open("postgres", postgresConnectionString) if err != nil { return 0, fmt.Errorf("postgres connection string did not work: %w", err) @@ -56,7 +58,7 @@ func GetNextRound(postgresConnectionString string) (uint64, error) { } return 0, fmt.Errorf("unable to get next db round: %w", err) } - kv := make(map[string]uint64) + kv := make(map[string]basics.Round) err = json.Unmarshal(state, &kv) if err != nil { return 0, fmt.Errorf("unable to get next account round: %w", err) diff --git a/tools/debug/carpenter/main.go b/tools/debug/carpenter/main.go index e1a5647d66..b13bbb9b2b 100644 --- a/tools/debug/carpenter/main.go +++ b/tools/debug/carpenter/main.go @@ -305,10 +305,7 @@ func outputTableFormat(out string, event logspec.Event, columns []string, colPos maxLen := len(out) for i := 0; i < rowCount; i++ { start := i * columnWidth - end := start + columnWidth - if end > maxLen { - end = maxLen - } + end := min(start+columnWidth, maxLen) if start < len(out) { row := strings.TrimSpace(out[start:end]) output := "" diff --git a/tools/debug/transplanter/main.go b/tools/debug/transplanter/main.go index bef6fe3ab5..2433fecbef 100644 --- a/tools/debug/transplanter/main.go +++ b/tools/debug/transplanter/main.go @@ -35,6 +35,7 @@ import ( "github.com/algorand/go-algorand/agreement" "github.com/algorand/go-algorand/config" + "github.com/algorand/go-algorand/config/bounds" "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/data/bookkeeping" "github.com/algorand/go-algorand/data/pools" @@ -83,7 +84,7 @@ func decodeTxGroup(data []byte) ([]transactions.SignedTxn, error) { return nil, fmt.Errorf("received a non-decodable txn: %v", err) } ntx++ - if ntx >= config.MaxTxGroupSize { + if ntx >= bounds.MaxTxGroupSize { // max ever possible group size reached, done reading input. if dec.Remaining() > 0 { // if something else left in the buffer - this is an error, drop @@ -387,7 +388,7 @@ func main() { os.Exit(1) } syncRound := uint64(*roundStart) - cfg.MaxAcctLookback + 1 - err = followerNode.SetSyncRound(syncRound) + err = followerNode.SetSyncRound(basics.Round(syncRound)) if err != nil { fmt.Fprintf(os.Stderr, "Cannot configure catchup: %v", err) os.Exit(1) diff --git a/tools/network/dnssec/trustedchain_test.go b/tools/network/dnssec/trustedchain_test.go index 5977ce192f..8b459669b7 100644 --- a/tools/network/dnssec/trustedchain_test.go +++ b/tools/network/dnssec/trustedchain_test.go @@ -274,7 +274,7 @@ func TestEnsureTrustChain(t *testing.T) { a.Contains(err.Error(), "failed to verify test. KSK against digest in parent DS") } -func TestEnsureTrustChainFailures(t *testing.T) { +func TestEnsureTrustChainError(t *testing.T) { partitiontest.PartitionTest(t) a := require.New(t) diff --git a/tools/x-repo-types/go.mod b/tools/x-repo-types/go.mod index d6c97063c9..bd3a1ab9ed 100644 --- a/tools/x-repo-types/go.mod +++ b/tools/x-repo-types/go.mod @@ -2,14 +2,14 @@ module github.com/algorand/go-algorand/tools/x-repo-types go 1.23.0 -toolchain go1.23.3 +toolchain go1.23.9 replace github.com/algorand/go-algorand => ../.. require ( github.com/algorand/go-algorand v0.0.0 github.com/spf13/cobra v1.7.0 - github.com/stretchr/testify v1.9.0 + github.com/stretchr/testify v1.10.0 ) require ( diff --git a/tools/x-repo-types/go.sum b/tools/x-repo-types/go.sum index 206a6f52af..d012d353c8 100644 --- a/tools/x-repo-types/go.sum +++ b/tools/x-repo-types/go.sum @@ -10,8 +10,8 @@ github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= -github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= diff --git a/tools/x-repo-types/xrt_test.go b/tools/x-repo-types/xrt_test.go index 624e17d683..0e4b895585 100644 --- a/tools/x-repo-types/xrt_test.go +++ b/tools/x-repo-types/xrt_test.go @@ -63,7 +63,7 @@ func TestCrossRepoTypes(t *testing.T) { yBranch: "main", yType: "Block", skip: true, - skipReason: `Several issues. For example: LEVEL 5 of goal bookkeeping.Block is EvalDelta with field [SharedAccts](codec:"sa,allocbound=config.MaxEvalDeltaAccounts") VS SDK types.EvalDelta is missing SharedAccts field`, + skipReason: `Several issues. For example: LEVEL 5 of goal bookkeeping.Block is EvalDelta with field [SharedAccts](codec:"sa,allocbound=bounds.MaxEvalDeltaAccounts") VS SDK types.EvalDelta is missing SharedAccts field`, }, { name: "goal-v-sdk-eval-delta", diff --git a/util/bloom/bloom.go b/util/bloom/bloom.go index 483cabc6f4..1bd470aa84 100644 --- a/util/bloom/bloom.go +++ b/util/bloom/bloom.go @@ -50,10 +50,7 @@ func Optimal(numElements int, falsePositiveRate float64) (sizeBits int, numHashe m := -(n+0.5)*math.Log(p)/math.Pow(math.Log(2), 2) + 1 k := -math.Log(p) / math.Log(2) - numHashes = uint32(math.Ceil(k)) - if numHashes > maxHashes { - numHashes = maxHashes - } + numHashes = min(uint32(math.Ceil(k)), maxHashes) return int(math.Ceil(m)), numHashes } diff --git a/util/io.go b/util/io.go index f01b58be31..1244498267 100644 --- a/util/io.go +++ b/util/io.go @@ -190,8 +190,8 @@ func CopyFolder(source, dest string) error { } func copyFolder(source string, dest string, info os.FileInfo, includeFilter IncludeFilter) (err error) { - if err := os.MkdirAll(dest, info.Mode()); err != nil { - return fmt.Errorf("error creating destination folder: %v", err) + if err1 := os.MkdirAll(dest, info.Mode()); err1 != nil { + return fmt.Errorf("error creating destination folder: %v", err1) } contents, err := os.ReadDir(source) diff --git a/util/tar/tar.go b/util/tar/tar.go index e97c03b793..012ead433f 100644 --- a/util/tar/tar.go +++ b/util/tar/tar.go @@ -72,8 +72,8 @@ func Compress(src string, writers ...io.Writer) error { header.Name = strings.TrimPrefix(strings.Replace(file, src, "", -1), string(filepath.Separator)) // write the header - if err := tw.WriteHeader(header); err != nil { - return err + if err1 := tw.WriteHeader(header); err1 != nil { + return err1 } // return on non-regular files (thanks to [kumo](https://medium.com/@komuw/just-like-you-did-fbdd7df829d3) for this suggested update) diff --git a/util/watchdogStreamReader.go b/util/watchdogStreamReader.go index 9d05b511ee..0d60a95580 100644 --- a/util/watchdogStreamReader.go +++ b/util/watchdogStreamReader.go @@ -110,10 +110,7 @@ func (r *watchdogStreamReader) Read(p []byte) (n int, err error) { } if len(r.stageBuffer) > 0 { // copy the data to the buffer p - n = len(p) - if n > len(r.stageBuffer) { - n = len(r.stageBuffer) - } + n = min(len(p), len(r.stageBuffer)) copy(p, r.stageBuffer) r.stageBuffer = r.stageBuffer[n:] }