diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 00000000000000..e01f5318e40257 --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1 @@ +* @anza-xyz/backport-reviewers diff --git a/.github/scripts/downstream-project-spl-common.sh b/.github/scripts/downstream-project-spl-common.sh index 0bfff86dcca2df..779af8f2568110 100644 --- a/.github/scripts/downstream-project-spl-common.sh +++ b/.github/scripts/downstream-project-spl-common.sh @@ -19,6 +19,7 @@ project_used_solana_version=$(sed -nE 's/solana-sdk = \"[>=<~]*(.*)\"/\1/p' <"to echo "used solana version: $project_used_solana_version" if semverGT "$project_used_solana_version" "$SOLANA_VER"; then echo "skip" + export SKIP_SPL_DOWNSTREAM_PROJECT_TEST=1 return fi diff --git a/.github/workflows/client-targets.yml b/.github/workflows/client-targets.yml index 848d10f85089e2..1a33d2ae59493c 100644 --- a/.github/workflows/client-targets.yml +++ b/.github/workflows/client-targets.yml @@ -47,7 +47,7 @@ jobs: strategy: matrix: os: - - macos-11 + - macos-12 target: - aarch64-apple-ios - x86_64-apple-ios diff --git a/.github/workflows/downstream-project-spl.yml b/.github/workflows/downstream-project-spl.yml index d875afbd4a9c56..8d3baf25949e99 100644 --- a/.github/workflows/downstream-project-spl.yml +++ b/.github/workflows/downstream-project-spl.yml @@ -55,6 +55,10 @@ jobs: run: | source .github/scripts/downstream-project-spl-install-deps.sh source .github/scripts/downstream-project-spl-common.sh + if [ -n "$SKIP_SPL_DOWNSTREAM_PROJECT_TEST" ]; then + exit 0 + fi + cargo check test: @@ -103,6 +107,9 @@ jobs: run: | source .github/scripts/downstream-project-spl-install-deps.sh source .github/scripts/downstream-project-spl-common.sh + if [ -n "$SKIP_SPL_DOWNSTREAM_PROJECT_TEST" ]; then + exit 0 + fi programStr="${{ tojson(matrix.arrays.required_programs) }}" IFS=', ' read -ra programs <<<"${programStr//[\[\]$'\n'$'\r' ]/}" @@ -154,6 +161,9 @@ jobs: run: | source .github/scripts/downstream-project-spl-install-deps.sh source .github/scripts/downstream-project-spl-common.sh + if [ -n "$SKIP_SPL_DOWNSTREAM_PROJECT_TEST" ]; then + exit 0 + fi programStr="${{ tojson(matrix.programs) }}" IFS=', ' read -ra programs <<<"${programStr//[\[\]$'\n'$'\r' ]/}" diff --git a/CHANGELOG.md b/CHANGELOG.md index 102aa724f7e32d..c8d5c088ca3382 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,14 +8,28 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm and follows a [Backwards Compatibility Policy](https://docs.solanalabs.com/backwards-compatibility) Release channels have their own copy of this changelog: -* [edge - v2.0](#edge-channel) -* [beta - v1.18](https://github.com/solana-labs/solana/blob/v1.18/CHANGELOG.md) -* [stable - v1.17](https://github.com/solana-labs/solana/blob/v1.17/CHANGELOG.md) +* [edge - v2.1](#edge-channel) +* [beta - v2.0](https://github.com/solana-labs/solana/blob/v2.0/CHANGELOG.md) +* [stable - v1.18](https://github.com/solana-labs/solana/blob/v1.18/CHANGELOG.md) -## [2.0.0] - Unreleased +## [2.1.0] - Unreleased + +## [2.0.0] * Breaking - * SDK: Support for Borsh v0.9 removed, please use v1 or v0.10 (#1440) + * SDK: + * Support for Borsh v0.9 removed, please use v1 or v0.10 (#1440) + * `Copy` is no longer derived on `Rent` and `EpochSchedule`, please switch to using `clone()` (solana-labs#32767) + * `solana-sdk`: deprecated symbols removed + * `solana-program`: deprecated symbols removed + * RPC: obsolete and deprecated v1 endpoints are removed. These endpoints are: + confirmTransaction, getSignatureStatus, getSignatureConfirmation, getTotalSupply, + getConfirmedSignaturesForAddress, getConfirmedBlock, getConfirmedBlocks, getConfirmedBlocksWithLimit, + getConfirmedTransaction, getConfirmedSignaturesForAddress2, getRecentBlockhash, getFees, + getFeeCalculatorForBlockhash, getFeeRateGovernor, getSnapshotSlot getStakeActivation + * `--enable-rpc-obsolete_v1_7` flag removed + * Deprecated methods are removed from `RpcClient` and `RpcClient::nonblocking` + * `solana-client`: deprecated re-exports removed; please import `solana-connection-cache`, `solana-quic-client`, or `solana-udp-client` directly * Changes * `central-scheduler` as default option for `--block-production-method` (#34891) * `solana-rpc-client-api`: `RpcFilterError` depends on `base64` version 0.22, so users may need to upgrade to `base64` version 0.22 @@ -25,6 +39,16 @@ Release channels have their own copy of this changelog: when the `replaceRecentBlockhash` config param is `true` (#380) * SDK: `cargo test-sbf` accepts `--tools-version`, just like `build-sbf` (#1359) * CLI: Can specify `--full-snapshot-archive-path` (#1631) + * transaction-status: The SPL Token `amountToUiAmount` instruction parses the amount into a string instead of a number (#1737) + * Implemented partitioned epoch rewards as per [SIMD-0118](https://github.com/solana-foundation/solana-improvement-documents/blob/fae25d5a950f43bd787f1f5d75897ef1fdd425a7/proposals/0118-partitioned-epoch-reward-distribution.md). Feature gate: #426. Specific changes include: + * EpochRewards sysvar expanded and made persistent (#428, #572) + * Stake Program credits now allowed during distribution (#631) + * Updated type in Bank::epoch_rewards_status (#1277) + * Partitions are recalculated on boot from snapshot (#1159) + * `epoch_rewards_status` removed from snapshot (#1274) + * Added `unified-scheduler` option for `--block-verification-method` (#1668) + * Deprecate the `fifo` option for `--rocksdb-shred-compaction` (#1882) + * `fifo` will remain supported in v2.0 with plans to fully remove in v2.1 ## [1.18.0] * Changes diff --git a/Cargo.lock b/Cargo.lock index e1c869720b7747..af027b270249ee 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -65,7 +65,7 @@ dependencies = [ [[package]] name = "agave-accounts-hash-cache-tool" -version = "2.0.0" +version = "2.0.2" dependencies = [ "bytemuck", "clap 2.34.0", @@ -75,7 +75,7 @@ dependencies = [ [[package]] name = "agave-cargo-registry" -version = "2.0.0" +version = "2.0.2" dependencies = [ "clap 2.34.0", "flate2", @@ -105,7 +105,7 @@ dependencies = [ [[package]] name = "agave-geyser-plugin-interface" -version = "2.0.0" +version = "2.0.2" dependencies = [ "log", "solana-sdk", @@ -115,7 +115,7 @@ dependencies = [ [[package]] name = "agave-install" -version = "2.0.0" +version = "2.0.2" dependencies = [ "atty", "bincode", @@ -151,7 +151,7 @@ dependencies = [ [[package]] name = "agave-ledger-tool" -version = "2.0.0" +version = "2.0.2" dependencies = [ "assert_cmd", "bs58", @@ -194,6 +194,7 @@ dependencies = [ "solana-storage-bigtable", "solana-streamer", "solana-transaction-status", + "solana-type-overrides", "solana-unified-scheduler-pool", "solana-version", "solana-vote-program", @@ -205,7 +206,7 @@ dependencies = [ [[package]] name = "agave-store-tool" -version = "2.0.0" +version = "2.0.2" dependencies = [ "clap 2.34.0", "solana-accounts-db", @@ -215,7 +216,7 @@ dependencies = [ [[package]] name = "agave-validator" -version = "2.0.0" +version = "2.0.2" dependencies = [ "agave-geyser-plugin-interface", "chrono", @@ -283,7 +284,7 @@ dependencies = [ [[package]] name = "agave-watchtower" -version = "2.0.0" +version = "2.0.2" dependencies = [ "clap 2.34.0", "humantime", @@ -626,6 +627,12 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b34d609dfbaf33d6889b2b7106d3ca345eacad44200913df5ba02bfd31d2ba9" +[[package]] +name = "assoc" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfdc70193dadb9d7287fa4b633f15f90c876915b31f6af17da307fc59c9859a8" + [[package]] name = "async-channel" version = "1.9.0" @@ -679,18 +686,18 @@ checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.71", ] [[package]] name = "async-trait" -version = "0.1.80" +version = "0.1.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6fa2087f2753a7da8cc1c0dbfcf89579dd57458e36769de5ac750b4671737ca" +checksum = "6e0c28dcc82d7c8ead5cb13beb15405b57b8546e93215673ff8ca0349a028107" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.71", ] [[package]] @@ -843,7 +850,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.68", + "syn 2.0.71", ] [[package]] @@ -885,6 +892,18 @@ dependencies = [ "typenum", ] +[[package]] +name = "bitvec" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bc2832c24239b0141d5674bb9174f9d68a8b5b3f2753311927c172ca46f7e9c" +dependencies = [ + "funty", + "radium", + "tap", + "wyz", +] + [[package]] name = "blake3" version = "1.5.1" @@ -967,7 +986,7 @@ dependencies = [ "proc-macro-crate 3.1.0", "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.71", "syn_derive", ] @@ -1083,7 +1102,7 @@ checksum = "1ee891b04274a59bd38b412188e24b849617b2e45a0fd8d057deb63e7403761b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.71", ] [[package]] @@ -1094,9 +1113,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.6.0" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "514de17de45fdb8dc022b1a7975556c53c86f9f0aa5f534b98977b171857c2c9" +checksum = "a12916984aab3fa6e39d655a33e09c0071eb36d6ab3aea5c2d78551f1df6d952" [[package]] name = "bytesize" @@ -1184,13 +1203,12 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.0.99" +version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96c51067fd44124faa7f870b4b1c969379ad32b2ba805aa959430ceaa384f695" +checksum = "324c74f2155653c90b04f25b2a47a8a631360cb908f92a772695f430c7e31052" dependencies = [ "jobserver", "libc", - "once_cell", ] [[package]] @@ -1238,7 +1256,7 @@ dependencies = [ "num-traits", "serde", "wasm-bindgen", - "windows-targets 0.52.5", + "windows-targets 0.52.6", ] [[package]] @@ -1332,18 +1350,18 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.7" +version = "4.5.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5db83dced34638ad474f39f250d7fea9598bdd239eaced1bdf45d597da0f433f" +checksum = "64acc1846d54c1fe936a78dc189c34e28d3f5afc348403f28ecf53660b9b8462" dependencies = [ "clap_builder", ] [[package]] name = "clap_builder" -version = "4.5.7" +version = "4.5.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7e204572485eb3fbf28f871612191521df159bc3e15a9f5064c66dba3a8c05f" +checksum = "6fb8393d67ba2e7bfaf28a23458e4e2b543cc73a99595511eb207fdb8aede942" dependencies = [ "anstyle", "clap_lex 0.7.1", @@ -1519,7 +1537,7 @@ dependencies = [ "anes", "cast 0.3.0", "ciborium", - "clap 4.5.7", + "clap 4.5.9", "criterion-plot", "is-terminal", "itertools 0.10.5", @@ -1675,9 +1693,9 @@ dependencies = [ [[package]] name = "darling" -version = "0.20.9" +version = "0.20.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83b2eb4d90d12bdda5ed17de686c2acb4c57914f8f921b8da7e112b5a36f3fe1" +checksum = "6f63b86c8a8826a49b8c21f08a2d07338eec8d900540f8630dc76284be802989" dependencies = [ "darling_core", "darling_macro", @@ -1685,27 +1703,27 @@ dependencies = [ [[package]] name = "darling_core" -version = "0.20.9" +version = "0.20.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "622687fe0bac72a04e5599029151f5796111b90f1baaa9b544d807a5e31cd120" +checksum = "95133861a8032aaea082871032f5815eb9e98cef03fa916ab4500513994df9e5" dependencies = [ "fnv", "ident_case", "proc-macro2", "quote", "strsim 0.11.1", - "syn 2.0.68", + "syn 2.0.71", ] [[package]] name = "darling_macro" -version = "0.20.9" +version = "0.20.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "733cabb43482b1a1b53eee8583c2b9e8684d592215ea83efd305dd31bc2f0178" +checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core", "quote", - "syn 2.0.68", + "syn 2.0.71", ] [[package]] @@ -1776,7 +1794,7 @@ checksum = "67e77553c4162a157adbf834ebae5b415acbecbeafc7a74b0e886657506a7611" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.71", ] [[package]] @@ -1789,7 +1807,7 @@ dependencies = [ "proc-macro2", "quote", "rustc_version", - "syn 2.0.68", + "syn 2.0.71", ] [[package]] @@ -1868,7 +1886,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.71", ] [[package]] @@ -1891,7 +1909,7 @@ checksum = "a6cbae11b3de8fce2a456e8ea3dada226b35fe791f0dc1d360c0941f0bb681f3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.71", ] [[package]] @@ -1997,7 +2015,7 @@ checksum = "a1ab991c1362ac86c61ab6f556cff143daa22e5a15e4e189df818b2fd19fe65b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.71", ] [[package]] @@ -2010,7 +2028,7 @@ dependencies = [ "num-traits", "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.71", ] [[package]] @@ -2192,6 +2210,12 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba" +[[package]] +name = "funty" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" + [[package]] name = "futures" version = "0.1.31" @@ -2255,7 +2279,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.71", ] [[package]] @@ -2315,7 +2339,7 @@ dependencies = [ [[package]] name = "gen-headers" -version = "2.0.0" +version = "2.0.2" dependencies = [ "log", "regex", @@ -2323,11 +2347,25 @@ dependencies = [ [[package]] name = "gen-syscall-list" -version = "2.0.0" +version = "2.0.2" dependencies = [ "regex", ] +[[package]] +name = "generator" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "186014d53bc231d0090ef8d6f03e0920c54d85a5ed22f4f2f74315ec56cf83fb" +dependencies = [ + "cc", + "cfg-if 1.0.0", + "libc", + "log", + "rustversion", + "windows", +] + [[package]] name = "generic-array" version = "0.14.7" @@ -2647,9 +2685,9 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.29" +version = "0.14.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f361cde2f109281a220d4307746cdfd5ee3f410da58a70377762396775634b33" +checksum = "a152ddd61dfaec7273fe8419ab357f33aee0d914c5f4efbf0d96fa749eea5ec9" dependencies = [ "bytes", "futures-channel", @@ -2737,7 +2775,7 @@ dependencies = [ "iana-time-zone-haiku", "js-sys", "wasm-bindgen", - "windows-core", + "windows-core 0.52.0", ] [[package]] @@ -2819,9 +2857,9 @@ dependencies = [ [[package]] name = "index_list" -version = "0.2.11" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70891286cb8e844fdfcf1178b47569699f9e20b5ecc4b45a6240a64771444638" +checksum = "2cb725b6505e51229de32027e0cfcd9db29da4d89156f9747b0a5195643fa3e1" [[package]] name = "indexmap" @@ -3128,7 +3166,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e310b3a6b5907f99202fcdb4960ff45b93735d7c7d96b760fcff8db2dc0e103d" dependencies = [ "cfg-if 1.0.0", - "windows-targets 0.52.5", + "windows-targets 0.52.6", ] [[package]] @@ -3342,9 +3380,9 @@ checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" [[package]] name = "mime_guess" -version = "2.0.4" +version = "2.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4192263c238a5f0d0c6bfd21f336a313a4ce1c450542449ca191bb657b4642ef" +checksum = "f7c44f8e672c00fe5308fa235f821cb4198414e1c77935c1ab6948d3fd78550e" dependencies = [ "mime", "unicase", @@ -3552,7 +3590,7 @@ checksum = "ed3955f1a9c7c0c15e092f9c887db08b1fc683305fdf6eb6684f22555355e202" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.71", ] [[package]] @@ -3625,7 +3663,7 @@ dependencies = [ "proc-macro-crate 3.1.0", "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.71", ] [[package]] @@ -3636,9 +3674,9 @@ checksum = "830b246a0e5f20af87141b25c173cd1b609bd7779a4617d6ec582abaf90870f3" [[package]] name = "object" -version = "0.36.0" +version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "576dfe1fc8f9df304abb159d767a29d0476f7750fbf8aa7ad07816004a207434" +checksum = "081b846d1d56ddfc18fdf1a922e4f6e07a11768ea1b92dec44e42b72712ccfce" dependencies = [ "memchr", ] @@ -3660,9 +3698,9 @@ checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" [[package]] name = "oorandom" -version = "11.1.3" +version = "11.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575" +checksum = "b410bbe7e14ab526a0e86877eb47c6996a2bd7746f027ba551028c925390e4e9" [[package]] name = "opaque-debug" @@ -3693,7 +3731,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.71", ] [[package]] @@ -3749,6 +3787,12 @@ version = "6.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2355d85b9a3786f481747ced0e0ff2ba35213a1f9bd406ed906554d7af805a1" +[[package]] +name = "owo-colors" +version = "3.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1b04fb49957986fdce4d6ee7a65027d55d4b6d2265e5848bbb507b58ccfdb6f" + [[package]] name = "parity-tokio-ipc" version = "0.9.0" @@ -3806,9 +3850,9 @@ checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" dependencies = [ "cfg-if 1.0.0", "libc", - "redox_syscall 0.5.2", + "redox_syscall 0.5.3", "smallvec", - "windows-targets 0.52.5", + "windows-targets 0.52.6", ] [[package]] @@ -3867,9 +3911,9 @@ dependencies = [ [[package]] name = "pest" -version = "2.7.10" +version = "2.7.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "560131c633294438da9f7c4b08189194b20946c8274c6b9e38881a7874dc8ee8" +checksum = "cd53dff83f26735fdc1ca837098ccf133605d794cdae66acfc2bfac3ec809d95" dependencies = [ "memchr", "thiserror", @@ -3878,9 +3922,9 @@ dependencies = [ [[package]] name = "pest_derive" -version = "2.7.10" +version = "2.7.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26293c9193fbca7b1a3bf9b79dc1e388e927e6cacaa78b4a3ab705a1d3d41459" +checksum = "2a548d2beca6773b1c244554d36fcf8548a8a58e74156968211567250e48e49a" dependencies = [ "pest", "pest_generator", @@ -3888,22 +3932,22 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.7.10" +version = "2.7.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ec22af7d3fb470a85dd2ca96b7c577a1eb4ef6f1683a9fe9a8c16e136c04687" +checksum = "3c93a82e8d145725dcbaf44e5ea887c8a869efdcc28706df2d08c69e17077183" dependencies = [ "pest", "pest_meta", "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.71", ] [[package]] name = "pest_meta" -version = "2.7.10" +version = "2.7.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7a240022f37c361ec1878d646fc5b7d7c4d28d5946e1a80ad5a7a4f4ca0bdcd" +checksum = "a941429fea7e08bedec25e4f6785b6ffaacc6b755da98df5ef3e7dcf4a124c4f" dependencies = [ "once_cell", "pest", @@ -3947,7 +3991,7 @@ checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.71", ] [[package]] @@ -4222,7 +4266,7 @@ dependencies = [ [[package]] name = "proto" -version = "2.0.0" +version = "2.0.2" dependencies = [ "protobuf-src", "tonic-build", @@ -4254,7 +4298,7 @@ checksum = "9e2e25ee72f5b24d773cae88422baddefff7714f97aab68d96fe2b6fc4a28fb2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.71", ] [[package]] @@ -4320,6 +4364,12 @@ dependencies = [ "proc-macro2", ] +[[package]] +name = "radium" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" + [[package]] name = "rand" version = "0.4.6" @@ -4419,6 +4469,15 @@ dependencies = [ "rand_core 0.5.1", ] +[[package]] +name = "rand_pcg" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59cad018caf63deb318e5a4586d99a24424a364f40f1e5778c29aca23f4fc73e" +dependencies = [ + "rand_core 0.6.4", +] + [[package]] name = "rand_xorshift" version = "0.3.0" @@ -4459,7 +4518,7 @@ dependencies = [ [[package]] name = "rbpf-cli" -version = "2.0.0" +version = "2.0.2" [[package]] name = "rdrand" @@ -4490,9 +4549,9 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.5.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c82cf8cff14456045f55ec4241383baeff27af886adb72ffb2162f99911de0fd" +checksum = "2a908a6e00f1fdd0dfd9c0eb08ce85126f6d8bbda50017e74bc4a4b7d4a926a4" dependencies = [ "bitflags 2.6.0", ] @@ -4812,6 +4871,12 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "scoped-tls" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1cf6437eb19a8f4a6cc0f7dca544973b0b78843adbfeb3683d1a94a0024a294" + [[package]] name = "scopeguard" version = "1.2.0" @@ -4835,7 +4900,7 @@ checksum = "1db149f81d46d2deba7cd3c50772474707729550221e69588478ebf9ada425ae" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.71", ] [[package]] @@ -4850,9 +4915,9 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.11.0" +version = "2.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c627723fd09706bacdb5cf41499e95098555af3c3c29d014dc3c458ef6be11c0" +checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" dependencies = [ "bitflags 2.6.0", "core-foundation", @@ -4863,9 +4928,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.11.0" +version = "2.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "317936bbbd05227752583946b9e66d7ce3b489f84e11a94a510b4437fef407d7" +checksum = "75da29fe9b9b08fe9d6b22b5b4bcbc75d8db3aa31e639aa56bb62e9d46bfceaf" dependencies = [ "core-foundation-sys", "libc", @@ -4891,9 +4956,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.203" +version = "1.0.204" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7253ab4de971e72fb7be983802300c30b5a7f0c2e56fab8abfc6a214307c0094" +checksum = "bc76f558e0cbb2a839d37354c575f1dc3fdc6546b5be373ba43d95f231bf7c12" dependencies = [ "serde_derive", ] @@ -4909,20 +4974,20 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.203" +version = "1.0.204" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "500cbc0ebeb6f46627f50f3f5811ccf6bf00643be300b4c3eabc0ef55dc5b5ba" +checksum = "e0cd7e117be63d3c3678776753929474f3b04a43a080c744d6b0ae2a8c28e222" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.71", ] [[package]] name = "serde_json" -version = "1.0.118" +version = "1.0.120" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d947f6b3163d8857ea16c4fa0dd4840d52f3041039a85decd46867eb1abef2e4" +checksum = "4e0d21c9a8cae1235ad58a00c11cb40d4b1e5c784f1ef2c537876ed6ffd8b7c5" dependencies = [ "itoa", "ryu", @@ -4969,7 +5034,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.71", ] [[package]] @@ -5019,7 +5084,7 @@ checksum = "91d129178576168c589c9ec973feedf7d3126c01ac2bf08795109aa35b69fb8f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.71", ] [[package]] @@ -5113,6 +5178,25 @@ version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" +[[package]] +name = "shuttle" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d9a8db61a44e2b663f169a08206a789bcbd22ba32011e14951562848e7b9c98" +dependencies = [ + "assoc", + "bitvec", + "generator", + "hex", + "owo-colors", + "rand 0.8.5", + "rand_core 0.6.4", + "rand_pcg", + "scoped-tls", + "smallvec", + "tracing", +] + [[package]] name = "signal-hook" version = "0.3.17" @@ -5228,7 +5312,7 @@ dependencies = [ [[package]] name = "solana-account-decoder" -version = "2.0.0" +version = "2.0.2" dependencies = [ "Inflector", "assert_matches", @@ -5252,7 +5336,7 @@ dependencies = [ [[package]] name = "solana-accounts-bench" -version = "2.0.0" +version = "2.0.2" dependencies = [ "clap 2.34.0", "log", @@ -5266,7 +5350,7 @@ dependencies = [ [[package]] name = "solana-accounts-cluster-bench" -version = "2.0.0" +version = "2.0.2" dependencies = [ "clap 2.34.0", "log", @@ -5297,13 +5381,14 @@ dependencies = [ [[package]] name = "solana-accounts-db" -version = "2.0.0" +version = "2.0.2" dependencies = [ "assert_matches", "bincode", "blake3", "bv", "bytemuck", + "bytemuck_derive", "criterion", "crossbeam-channel", "dashmap", @@ -5331,6 +5416,7 @@ dependencies = [ "smallvec", "solana-accounts-db", "solana-bucket-map", + "solana-compute-budget", "solana-frozen-abi", "solana-frozen-abi-macro", "solana-inline-spl", @@ -5352,7 +5438,7 @@ dependencies = [ [[package]] name = "solana-address-lookup-table-program" -version = "2.0.0" +version = "2.0.2" dependencies = [ "bincode", "bytemuck", @@ -5368,7 +5454,7 @@ dependencies = [ [[package]] name = "solana-address-lookup-table-program-tests" -version = "2.0.0" +version = "2.0.2" dependencies = [ "assert_matches", "bincode", @@ -5379,7 +5465,7 @@ dependencies = [ [[package]] name = "solana-banking-bench" -version = "2.0.0" +version = "2.0.2" dependencies = [ "clap 3.2.25", "crossbeam-channel", @@ -5403,7 +5489,7 @@ dependencies = [ [[package]] name = "solana-banks-client" -version = "2.0.0" +version = "2.0.2" dependencies = [ "borsh 1.5.1", "futures 0.3.30", @@ -5420,7 +5506,7 @@ dependencies = [ [[package]] name = "solana-banks-interface" -version = "2.0.0" +version = "2.0.2" dependencies = [ "serde", "serde_derive", @@ -5430,7 +5516,7 @@ dependencies = [ [[package]] name = "solana-banks-server" -version = "2.0.0" +version = "2.0.2" dependencies = [ "bincode", "crossbeam-channel", @@ -5448,7 +5534,7 @@ dependencies = [ [[package]] name = "solana-bench-streamer" -version = "2.0.0" +version = "2.0.2" dependencies = [ "clap 3.2.25", "crossbeam-channel", @@ -5459,7 +5545,7 @@ dependencies = [ [[package]] name = "solana-bench-tps" -version = "2.0.0" +version = "2.0.2" dependencies = [ "chrono", "clap 2.34.0", @@ -5505,7 +5591,7 @@ dependencies = [ [[package]] name = "solana-bloom" -version = "2.0.0" +version = "2.0.2" dependencies = [ "bv", "fnv", @@ -5522,7 +5608,7 @@ dependencies = [ [[package]] name = "solana-bpf-loader-program" -version = "2.0.0" +version = "2.0.2" dependencies = [ "assert_matches", "bincode", @@ -5533,12 +5619,13 @@ dependencies = [ "rand 0.8.5", "scopeguard", "solana-compute-budget", + "solana-curve25519", "solana-measure", "solana-poseidon", "solana-program-runtime", "solana-sdk", + "solana-type-overrides", "solana-vote", - "solana-zk-token-sdk", "solana_rbpf", "test-case", "thiserror", @@ -5546,7 +5633,7 @@ dependencies = [ [[package]] name = "solana-bpf-loader-program-tests" -version = "2.0.0" +version = "2.0.2" dependencies = [ "assert_matches", "bincode", @@ -5557,10 +5644,11 @@ dependencies = [ [[package]] name = "solana-bucket-map" -version = "2.0.0" +version = "2.0.2" dependencies = [ "bv", "bytemuck", + "bytemuck_derive", "fs_extra", "log", "memmap2", @@ -5576,7 +5664,7 @@ dependencies = [ [[package]] name = "solana-cargo-build-bpf" -version = "2.0.0" +version = "2.0.2" dependencies = [ "log", "solana-logger", @@ -5584,7 +5672,7 @@ dependencies = [ [[package]] name = "solana-cargo-build-sbf" -version = "2.0.0" +version = "2.0.2" dependencies = [ "assert_cmd", "bzip2", @@ -5605,11 +5693,11 @@ dependencies = [ [[package]] name = "solana-cargo-test-bpf" -version = "2.0.0" +version = "2.0.2" [[package]] name = "solana-cargo-test-sbf" -version = "2.0.0" +version = "2.0.2" dependencies = [ "cargo_metadata", "clap 3.2.25", @@ -5621,7 +5709,7 @@ dependencies = [ [[package]] name = "solana-clap-utils" -version = "2.0.0" +version = "2.0.2" dependencies = [ "assert_matches", "chrono", @@ -5638,7 +5726,7 @@ dependencies = [ [[package]] name = "solana-clap-v3-utils" -version = "2.0.0" +version = "2.0.2" dependencies = [ "assert_matches", "chrono", @@ -5656,7 +5744,7 @@ dependencies = [ [[package]] name = "solana-cli" -version = "2.0.0" +version = "2.0.2" dependencies = [ "assert_matches", "bincode", @@ -5715,7 +5803,7 @@ dependencies = [ [[package]] name = "solana-cli-config" -version = "2.0.0" +version = "2.0.2" dependencies = [ "anyhow", "dirs-next", @@ -5730,7 +5818,7 @@ dependencies = [ [[package]] name = "solana-cli-output" -version = "2.0.0" +version = "2.0.2" dependencies = [ "Inflector", "base64 0.22.1", @@ -5756,7 +5844,7 @@ dependencies = [ [[package]] name = "solana-client" -version = "2.0.0" +version = "2.0.2" dependencies = [ "async-trait", "bincode", @@ -5788,7 +5876,7 @@ dependencies = [ [[package]] name = "solana-client-test" -version = "2.0.0" +version = "2.0.2" dependencies = [ "futures-util", "rand 0.8.5", @@ -5818,7 +5906,7 @@ dependencies = [ [[package]] name = "solana-compute-budget" -version = "2.0.0" +version = "2.0.2" dependencies = [ "rustc_version", "solana-frozen-abi", @@ -5827,7 +5915,7 @@ dependencies = [ [[package]] name = "solana-compute-budget-program" -version = "2.0.0" +version = "2.0.2" dependencies = [ "solana-program-runtime", "solana-sdk", @@ -5835,7 +5923,7 @@ dependencies = [ [[package]] name = "solana-config-program" -version = "2.0.0" +version = "2.0.2" dependencies = [ "bincode", "chrono", @@ -5848,7 +5936,7 @@ dependencies = [ [[package]] name = "solana-connection-cache" -version = "2.0.0" +version = "2.0.2" dependencies = [ "async-trait", "bincode", @@ -5871,7 +5959,7 @@ dependencies = [ [[package]] name = "solana-core" -version = "2.0.0" +version = "2.0.2" dependencies = [ "ahash 0.8.11", "assert_matches", @@ -5910,6 +5998,7 @@ dependencies = [ "solana-bloom", "solana-client", "solana-compute-budget", + "solana-connection-cache", "solana-core", "solana-cost-model", "solana-entry", @@ -5958,7 +6047,7 @@ dependencies = [ [[package]] name = "solana-cost-model" -version = "2.0.0" +version = "2.0.2" dependencies = [ "ahash 0.8.11", "itertools 0.12.1", @@ -5982,9 +6071,20 @@ dependencies = [ "test-case", ] +[[package]] +name = "solana-curve25519" +version = "2.0.2" +dependencies = [ + "bytemuck", + "bytemuck_derive", + "curve25519-dalek", + "solana-program", + "thiserror", +] + [[package]] name = "solana-dos" -version = "2.0.0" +version = "2.0.2" dependencies = [ "bincode", "clap 3.2.25", @@ -5995,6 +6095,7 @@ dependencies = [ "serde", "solana-bench-tps", "solana-client", + "solana-connection-cache", "solana-core", "solana-faucet", "solana-gossip", @@ -6016,7 +6117,7 @@ dependencies = [ [[package]] name = "solana-download-utils" -version = "2.0.0" +version = "2.0.2" dependencies = [ "console", "indicatif", @@ -6028,7 +6129,7 @@ dependencies = [ [[package]] name = "solana-ed25519-program-tests" -version = "2.0.0" +version = "2.0.2" dependencies = [ "assert_matches", "ed25519-dalek", @@ -6039,7 +6140,7 @@ dependencies = [ [[package]] name = "solana-entry" -version = "2.0.0" +version = "2.0.2" dependencies = [ "assert_matches", "bincode", @@ -6061,7 +6162,7 @@ dependencies = [ [[package]] name = "solana-faucet" -version = "2.0.0" +version = "2.0.2" dependencies = [ "bincode", "byteorder", @@ -6083,7 +6184,7 @@ dependencies = [ [[package]] name = "solana-frozen-abi" -version = "2.0.0" +version = "2.0.2" dependencies = [ "bitflags 2.6.0", "bs58", @@ -6104,17 +6205,17 @@ dependencies = [ [[package]] name = "solana-frozen-abi-macro" -version = "2.0.0" +version = "2.0.2" dependencies = [ "proc-macro2", "quote", "rustc_version", - "syn 2.0.68", + "syn 2.0.71", ] [[package]] name = "solana-genesis" -version = "2.0.0" +version = "2.0.2" dependencies = [ "base64 0.22.1", "bincode", @@ -6139,7 +6240,7 @@ dependencies = [ [[package]] name = "solana-genesis-utils" -version = "2.0.0" +version = "2.0.2" dependencies = [ "log", "solana-accounts-db", @@ -6150,7 +6251,7 @@ dependencies = [ [[package]] name = "solana-geyser-plugin-manager" -version = "2.0.0" +version = "2.0.2" dependencies = [ "agave-geyser-plugin-interface", "bs58", @@ -6175,7 +6276,7 @@ dependencies = [ [[package]] name = "solana-gossip" -version = "2.0.0" +version = "2.0.2" dependencies = [ "assert_matches", "bincode", @@ -6226,7 +6327,7 @@ dependencies = [ [[package]] name = "solana-inline-spl" -version = "2.0.0" +version = "2.0.2" dependencies = [ "bytemuck", "rustc_version", @@ -6235,7 +6336,7 @@ dependencies = [ [[package]] name = "solana-keygen" -version = "2.0.0" +version = "2.0.2" dependencies = [ "bs58", "clap 3.2.25", @@ -6252,7 +6353,7 @@ dependencies = [ [[package]] name = "solana-ledger" -version = "2.0.0" +version = "2.0.2" dependencies = [ "assert_matches", "bincode", @@ -6324,7 +6425,7 @@ dependencies = [ [[package]] name = "solana-loader-v4-program" -version = "2.0.0" +version = "2.0.2" dependencies = [ "bincode", "log", @@ -6332,12 +6433,13 @@ dependencies = [ "solana-measure", "solana-program-runtime", "solana-sdk", + "solana-type-overrides", "solana_rbpf", ] [[package]] name = "solana-local-cluster" -version = "2.0.0" +version = "2.0.2" dependencies = [ "assert_matches", "crossbeam-channel", @@ -6356,6 +6458,7 @@ dependencies = [ "solana-entry", "solana-gossip", "solana-ledger", + "solana-local-cluster", "solana-logger", "solana-pubsub-client", "solana-quic-client", @@ -6377,7 +6480,7 @@ dependencies = [ [[package]] name = "solana-log-analyzer" -version = "2.0.0" +version = "2.0.2" dependencies = [ "byte-unit", "clap 3.2.25", @@ -6389,7 +6492,7 @@ dependencies = [ [[package]] name = "solana-logger" -version = "2.0.0" +version = "2.0.2" dependencies = [ "env_logger", "lazy_static", @@ -6398,7 +6501,7 @@ dependencies = [ [[package]] name = "solana-measure" -version = "2.0.0" +version = "2.0.2" dependencies = [ "log", "solana-sdk", @@ -6406,11 +6509,11 @@ dependencies = [ [[package]] name = "solana-memory-management" -version = "2.0.0" +version = "2.0.2" [[package]] name = "solana-merkle-root-bench" -version = "2.0.0" +version = "2.0.2" dependencies = [ "clap 2.34.0", "log", @@ -6423,7 +6526,7 @@ dependencies = [ [[package]] name = "solana-merkle-tree" -version = "2.0.0" +version = "2.0.2" dependencies = [ "fast-math", "hex", @@ -6432,7 +6535,7 @@ dependencies = [ [[package]] name = "solana-metrics" -version = "2.0.0" +version = "2.0.2" dependencies = [ "crossbeam-channel", "env_logger", @@ -6448,7 +6551,7 @@ dependencies = [ [[package]] name = "solana-net-shaper" -version = "2.0.0" +version = "2.0.2" dependencies = [ "clap 3.2.25", "rand 0.8.5", @@ -6460,7 +6563,7 @@ dependencies = [ [[package]] name = "solana-net-utils" -version = "2.0.0" +version = "2.0.2" dependencies = [ "bincode", "clap 3.2.25", @@ -6487,7 +6590,7 @@ checksum = "8b8a731ed60e89177c8a7ab05fe0f1511cedd3e70e773f288f9de33a9cfdc21e" [[package]] name = "solana-notifier" -version = "2.0.0" +version = "2.0.2" dependencies = [ "log", "reqwest", @@ -6497,17 +6600,17 @@ dependencies = [ [[package]] name = "solana-package-metadata-macro" -version = "2.0.0" +version = "2.0.2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.71", "toml 0.8.14", ] [[package]] name = "solana-perf" -version = "2.0.0" +version = "2.0.2" dependencies = [ "ahash 0.8.11", "assert_matches", @@ -6537,7 +6640,7 @@ dependencies = [ [[package]] name = "solana-poh" -version = "2.0.0" +version = "2.0.2" dependencies = [ "assert_matches", "bincode", @@ -6559,7 +6662,7 @@ dependencies = [ [[package]] name = "solana-poh-bench" -version = "2.0.0" +version = "2.0.2" dependencies = [ "clap 3.2.25", "log", @@ -6575,7 +6678,7 @@ dependencies = [ [[package]] name = "solana-poseidon" -version = "2.0.0" +version = "2.0.2" dependencies = [ "ark-bn254", "light-poseidon", @@ -6584,7 +6687,7 @@ dependencies = [ [[package]] name = "solana-program" -version = "2.0.0" +version = "2.0.2" dependencies = [ "anyhow", "arbitrary", @@ -6603,7 +6706,7 @@ dependencies = [ "bs58", "bv", "bytemuck", - "cc", + "bytemuck_derive", "console_error_panic_hook", "console_log", "curve25519-dalek", @@ -6618,6 +6721,7 @@ dependencies = [ "num-derive", "num-traits", "parking_lot 0.12.3", + "qualifier_attr", "rand 0.8.5", "rustc_version", "rustversion", @@ -6638,7 +6742,7 @@ dependencies = [ [[package]] name = "solana-program-runtime" -version = "2.0.0" +version = "2.0.2" dependencies = [ "assert_matches", "base64 0.22.1", @@ -6660,6 +6764,7 @@ dependencies = [ "solana-logger", "solana-measure", "solana-sdk", + "solana-type-overrides", "solana-vote", "solana_rbpf", "test-case", @@ -6668,7 +6773,7 @@ dependencies = [ [[package]] name = "solana-program-test" -version = "2.0.0" +version = "2.0.2" dependencies = [ "assert_matches", "async-trait", @@ -6700,7 +6805,7 @@ dependencies = [ [[package]] name = "solana-pubsub-client" -version = "2.0.0" +version = "2.0.2" dependencies = [ "anyhow", "crossbeam-channel", @@ -6724,7 +6829,7 @@ dependencies = [ [[package]] name = "solana-quic-client" -version = "2.0.0" +version = "2.0.2" dependencies = [ "async-mutex", "async-trait", @@ -6751,7 +6856,7 @@ dependencies = [ [[package]] name = "solana-rayon-threadlimit" -version = "2.0.0" +version = "2.0.2" dependencies = [ "lazy_static", "num_cpus", @@ -6759,7 +6864,7 @@ dependencies = [ [[package]] name = "solana-remote-wallet" -version = "2.0.0" +version = "2.0.2" dependencies = [ "assert_matches", "console", @@ -6778,7 +6883,7 @@ dependencies = [ [[package]] name = "solana-rpc" -version = "2.0.0" +version = "2.0.2" dependencies = [ "base64 0.22.1", "bincode", @@ -6839,7 +6944,7 @@ dependencies = [ [[package]] name = "solana-rpc-client" -version = "2.0.0" +version = "2.0.2" dependencies = [ "assert_matches", "async-trait", @@ -6869,11 +6974,12 @@ dependencies = [ [[package]] name = "solana-rpc-client-api" -version = "2.0.0" +version = "2.0.2" dependencies = [ "anyhow", "base64 0.22.1", "bs58", + "const_format", "jsonrpc-core", "reqwest", "reqwest-middleware", @@ -6891,7 +6997,7 @@ dependencies = [ [[package]] name = "solana-rpc-client-nonce-utils" -version = "2.0.0" +version = "2.0.2" dependencies = [ "anyhow", "clap 2.34.0", @@ -6908,7 +7014,7 @@ dependencies = [ [[package]] name = "solana-rpc-test" -version = "2.0.0" +version = "2.0.2" dependencies = [ "bincode", "bs58", @@ -6935,7 +7041,7 @@ dependencies = [ [[package]] name = "solana-runtime" -version = "2.0.0" +version = "2.0.2" dependencies = [ "aquamarine", "arrayref", @@ -7002,6 +7108,8 @@ dependencies = [ "solana-version", "solana-vote", "solana-vote-program", + "solana-zk-elgamal-proof-program", + "solana-zk-sdk", "solana-zk-token-proof-program", "solana-zk-token-sdk", "static_assertions", @@ -7014,7 +7122,7 @@ dependencies = [ [[package]] name = "solana-runtime-transaction" -version = "2.0.0" +version = "2.0.2" dependencies = [ "bincode", "log", @@ -7028,7 +7136,7 @@ dependencies = [ [[package]] name = "solana-sdk" -version = "2.0.0" +version = "2.0.2" dependencies = [ "anyhow", "assert_matches", @@ -7037,6 +7145,7 @@ dependencies = [ "borsh 1.5.1", "bs58", "bytemuck", + "bytemuck_derive", "byteorder", "chrono", "curve25519-dalek", @@ -7085,13 +7194,13 @@ dependencies = [ [[package]] name = "solana-sdk-macro" -version = "2.0.0" +version = "2.0.2" dependencies = [ "bs58", "proc-macro2", "quote", "rustversion", - "syn 2.0.68", + "syn 2.0.71", ] [[package]] @@ -7102,11 +7211,12 @@ checksum = "468aa43b7edb1f9b7b7b686d5c3aeb6630dc1708e86e31343499dd5c4d775183" [[package]] name = "solana-send-transaction-service" -version = "2.0.0" +version = "2.0.2" dependencies = [ "crossbeam-channel", "log", "solana-client", + "solana-connection-cache", "solana-logger", "solana-measure", "solana-metrics", @@ -7117,7 +7227,7 @@ dependencies = [ [[package]] name = "solana-stake-accounts" -version = "2.0.0" +version = "2.0.2" dependencies = [ "clap 2.34.0", "solana-clap-utils", @@ -7133,7 +7243,7 @@ dependencies = [ [[package]] name = "solana-stake-program" -version = "2.0.0" +version = "2.0.2" dependencies = [ "assert_matches", "bincode", @@ -7145,13 +7255,14 @@ dependencies = [ "solana-logger", "solana-program-runtime", "solana-sdk", + "solana-type-overrides", "solana-vote-program", "test-case", ] [[package]] name = "solana-storage-bigtable" -version = "2.0.0" +version = "2.0.2" dependencies = [ "backoff", "bincode", @@ -7183,7 +7294,7 @@ dependencies = [ [[package]] name = "solana-storage-proto" -version = "2.0.0" +version = "2.0.2" dependencies = [ "bincode", "bs58", @@ -7199,7 +7310,7 @@ dependencies = [ [[package]] name = "solana-streamer" -version = "2.0.0" +version = "2.0.2" dependencies = [ "assert_matches", "async-channel", @@ -7233,7 +7344,7 @@ dependencies = [ [[package]] name = "solana-svm" -version = "2.0.0" +version = "2.0.2" dependencies = [ "bincode", "itertools 0.12.1", @@ -7261,12 +7372,13 @@ dependencies = [ "solana-sdk", "solana-svm", "solana-system-program", + "solana-type-overrides", "solana-vote", ] [[package]] name = "solana-system-program" -version = "2.0.0" +version = "2.0.2" dependencies = [ "assert_matches", "bincode", @@ -7277,11 +7389,12 @@ dependencies = [ "solana-logger", "solana-program-runtime", "solana-sdk", + "solana-type-overrides", ] [[package]] name = "solana-test-validator" -version = "2.0.0" +version = "2.0.2" dependencies = [ "base64 0.22.1", "bincode", @@ -7311,7 +7424,7 @@ dependencies = [ [[package]] name = "solana-thin-client" -version = "2.0.0" +version = "2.0.2" dependencies = [ "bincode", "log", @@ -7325,7 +7438,7 @@ dependencies = [ [[package]] name = "solana-tokens" -version = "2.0.0" +version = "2.0.2" dependencies = [ "assert_matches", "bincode", @@ -7359,7 +7472,7 @@ dependencies = [ [[package]] name = "solana-tps-client" -version = "2.0.0" +version = "2.0.2" dependencies = [ "log", "serial_test", @@ -7380,7 +7493,7 @@ dependencies = [ [[package]] name = "solana-tpu-client" -version = "2.0.0" +version = "2.0.2" dependencies = [ "async-trait", "bincode", @@ -7402,7 +7515,7 @@ dependencies = [ [[package]] name = "solana-transaction-dos" -version = "2.0.0" +version = "2.0.2" dependencies = [ "bincode", "clap 2.34.0", @@ -7429,7 +7542,7 @@ dependencies = [ [[package]] name = "solana-transaction-metrics-tracker" -version = "2.0.0" +version = "2.0.2" dependencies = [ "Inflector", "base64 0.22.1", @@ -7443,7 +7556,7 @@ dependencies = [ [[package]] name = "solana-transaction-status" -version = "2.0.0" +version = "2.0.2" dependencies = [ "Inflector", "base64 0.22.1", @@ -7468,7 +7581,7 @@ dependencies = [ [[package]] name = "solana-turbine" -version = "2.0.0" +version = "2.0.2" dependencies = [ "assert_matches", "bincode", @@ -7503,9 +7616,19 @@ dependencies = [ "tokio", ] +[[package]] +name = "solana-type-overrides" +version = "2.0.2" +dependencies = [ + "futures 0.3.30", + "lazy_static", + "rand 0.8.5", + "shuttle", +] + [[package]] name = "solana-udp-client" -version = "2.0.0" +version = "2.0.2" dependencies = [ "async-trait", "solana-connection-cache", @@ -7518,7 +7641,7 @@ dependencies = [ [[package]] name = "solana-unified-scheduler-logic" -version = "2.0.0" +version = "2.0.2" dependencies = [ "assert_matches", "solana-sdk", @@ -7527,7 +7650,7 @@ dependencies = [ [[package]] name = "solana-unified-scheduler-pool" -version = "2.0.0" +version = "2.0.2" dependencies = [ "assert_matches", "crossbeam-channel", @@ -7548,7 +7671,7 @@ dependencies = [ [[package]] name = "solana-upload-perf" -version = "2.0.0" +version = "2.0.2" dependencies = [ "serde_json", "solana-metrics", @@ -7556,7 +7679,7 @@ dependencies = [ [[package]] name = "solana-version" -version = "2.0.0" +version = "2.0.2" dependencies = [ "log", "rustc_version", @@ -7570,7 +7693,7 @@ dependencies = [ [[package]] name = "solana-vote" -version = "2.0.0" +version = "2.0.2" dependencies = [ "bincode", "itertools 0.12.1", @@ -7587,7 +7710,7 @@ dependencies = [ [[package]] name = "solana-vote-program" -version = "2.0.0" +version = "2.0.2" dependencies = [ "assert_matches", "bincode", @@ -7609,7 +7732,7 @@ dependencies = [ [[package]] name = "solana-wen-restart" -version = "2.0.0" +version = "2.0.2" dependencies = [ "anyhow", "assert_matches", @@ -7638,7 +7761,7 @@ dependencies = [ [[package]] name = "solana-zk-elgamal-proof-program" -version = "2.0.0" +version = "2.0.2" dependencies = [ "bytemuck", "num-derive", @@ -7650,7 +7773,7 @@ dependencies = [ [[package]] name = "solana-zk-keygen" -version = "2.0.0" +version = "2.0.2" dependencies = [ "bs58", "clap 3.2.25", @@ -7669,12 +7792,13 @@ dependencies = [ [[package]] name = "solana-zk-sdk" -version = "2.0.0" +version = "2.0.2" dependencies = [ "aes-gcm-siv", "base64 0.22.1", "bincode", "bytemuck", + "bytemuck_derive", "curve25519-dalek", "itertools 0.12.1", "lazy_static", @@ -7696,7 +7820,7 @@ dependencies = [ [[package]] name = "solana-zk-token-proof-program" -version = "2.0.0" +version = "2.0.2" dependencies = [ "bytemuck", "criterion", @@ -7710,7 +7834,7 @@ dependencies = [ [[package]] name = "solana-zk-token-proof-program-tests" -version = "2.0.0" +version = "2.0.2" dependencies = [ "bytemuck", "curve25519-dalek", @@ -7722,15 +7846,15 @@ dependencies = [ [[package]] name = "solana-zk-token-sdk" -version = "2.0.0" +version = "2.0.2" dependencies = [ "aes-gcm-siv", "base64 0.22.1", "bincode", "bytemuck", + "bytemuck_derive", "byteorder", "curve25519-dalek", - "getrandom 0.1.16", "itertools 0.12.1", "lazy_static", "merlin", @@ -7741,6 +7865,7 @@ dependencies = [ "serde_derive", "serde_json", "sha3 0.9.1", + "solana-curve25519", "solana-program", "solana-sdk", "subtle", @@ -7783,9 +7908,9 @@ checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" [[package]] name = "spl-associated-token-account" -version = "3.0.2" +version = "4.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2e688554bac5838217ffd1fab7845c573ff106b6336bf7d290db7c98d5a8efd" +checksum = "68034596cf4804880d265f834af1ff2f821ad5293e41fa0f8f59086c181fc38e" dependencies = [ "assert_matches", "borsh 1.5.1", @@ -7799,9 +7924,9 @@ dependencies = [ [[package]] name = "spl-discriminator" -version = "0.2.2" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34d1814406e98b08c5cd02c1126f83fd407ad084adce0b05fda5730677822eac" +checksum = "a38ea8b6dedb7065887f12d62ed62c1743aa70749e8558f963609793f6fb12bc" dependencies = [ "bytemuck", "solana-program", @@ -7816,7 +7941,7 @@ checksum = "d9e8418ea6269dcfb01c712f0444d2c75542c04448b480e87de59d2865edc750" dependencies = [ "quote", "spl-discriminator-syn", - "syn 2.0.68", + "syn 2.0.71", ] [[package]] @@ -7828,15 +7953,15 @@ dependencies = [ "proc-macro2", "quote", "sha2 0.10.8", - "syn 2.0.68", + "syn 2.0.71", "thiserror", ] [[package]] name = "spl-instruction-padding" -version = "0.1.1" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be3f0c53b6eb2dfccb77b5710bddb04548da338a3f56bed214177f6a577d1ca6" +checksum = "8cdbcd2652240c5b04befd4807c2b0f9412c66b18db398ca955f236a8ff1c378" dependencies = [ "num_enum", "solana-program", @@ -7844,21 +7969,22 @@ dependencies = [ [[package]] name = "spl-memo" -version = "4.0.1" +version = "5.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58e9bae02de3405079a057fe244c867a08f92d48327d231fc60da831f94caf0a" +checksum = "a0dba2f2bb6419523405d21c301a32c9f9568354d4742552e7972af801f4bdb3" dependencies = [ "solana-program", ] [[package]] name = "spl-pod" -version = "0.2.2" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "046ce669f48cf2eca1ec518916d8725596bfb655beb1c74374cf71dc6cb773c9" +checksum = "e6166a591d93af33afd75bbd8573c5fd95fb1213f1bf254f0508c89fdb5ee156" dependencies = [ "borsh 1.5.1", "bytemuck", + "bytemuck_derive", "solana-program", "solana-zk-token-sdk", "spl-program-error", @@ -7866,9 +7992,9 @@ dependencies = [ [[package]] name = "spl-program-error" -version = "0.4.1" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49065093ea91f57b9b2bd81493ff705e2ad4e64507a07dbc02b085778e02770e" +checksum = "d7b28bed65356558133751cc32b48a7a5ddfc59ac4e941314630bbed1ac10532" dependencies = [ "num-derive", "num-traits", @@ -7886,14 +8012,14 @@ dependencies = [ "proc-macro2", "quote", "sha2 0.10.8", - "syn 2.0.68", + "syn 2.0.71", ] [[package]] name = "spl-tlv-account-resolution" -version = "0.6.3" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cace91ba08984a41556efe49cbf2edca4db2f577b649da7827d3621161784bf8" +checksum = "37a75a5f0fcc58126693ed78a17042e9dc53f07e357d6be91789f7d62aff61a4" dependencies = [ "bytemuck", "solana-program", @@ -7905,9 +8031,9 @@ dependencies = [ [[package]] name = "spl-token" -version = "4.0.1" +version = "6.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95ae123223633a389f95d1da9d49c2d0a50d499e7060b9624626a69e536ad2a4" +checksum = "70a0f06ac7f23dc0984931b1fe309468f14ea58e32660439c1cef19456f5d0e3" dependencies = [ "arrayref", "bytemuck", @@ -7920,9 +8046,9 @@ dependencies = [ [[package]] name = "spl-token-2022" -version = "3.0.2" +version = "4.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5412f99ae7ee6e0afde00defaa354e6228e47e30c0e3adf553e2e01e6abb584" +checksum = "d9c10f3483e48679619c76598d4e4aebb955bc49b0a5cc63323afbf44135c9bf" dependencies = [ "arrayref", "bytemuck", @@ -7944,9 +8070,9 @@ dependencies = [ [[package]] name = "spl-token-group-interface" -version = "0.2.3" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d419b5cfa3ee8e0f2386fd7e02a33b3ec8a7db4a9c7064a2ea24849dc4a273b6" +checksum = "df8752b85a5ecc1d9f3a43bce3dd9a6a053673aacf5deb513d1cbb88d3534ffd" dependencies = [ "bytemuck", "solana-program", @@ -7957,9 +8083,9 @@ dependencies = [ [[package]] name = "spl-token-metadata-interface" -version = "0.3.3" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30179c47e93625680dabb620c6e7931bd12d62af390f447bc7beb4a3a9b5feee" +checksum = "c6c2318ddff97e006ed9b1291ebec0750a78547f870f62a69c56fe3b46a5d8fc" dependencies = [ "borsh 1.5.1", "solana-program", @@ -7971,9 +8097,9 @@ dependencies = [ [[package]] name = "spl-transfer-hook-interface" -version = "0.6.3" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66a98359769cd988f7b35c02558daa56d496a7e3bd8626e61f90a7c757eedb9b" +checksum = "a110f33d941275d9f868b96daaa993f1e73b6806cc8836e43075b4d3ad8338a7" dependencies = [ "arrayref", "bytemuck", @@ -7987,9 +8113,9 @@ dependencies = [ [[package]] name = "spl-type-length-value" -version = "0.4.3" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "422ce13429dbd41d2cee8a73931c05fda0b0c8ca156a8b0c19445642550bb61a" +checksum = "bdcd73ec187bc409464c60759232e309f83b52a18a9c5610bf281c9c6432918c" dependencies = [ "bytemuck", "solana-program", @@ -8080,9 +8206,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.68" +version = "2.0.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "901fa70d88b9d6c98022e23b4136f9f3e54e4662c3bc1bd1d84a42a9a0f0c1e9" +checksum = "b146dcf730474b4bcd16c311627b31ede9ab149045db4d6088b3becaea046462" dependencies = [ "proc-macro2", "quote", @@ -8098,7 +8224,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.71", ] [[package]] @@ -8177,6 +8303,12 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "tap" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" + [[package]] name = "tar" version = "0.4.41" @@ -8277,7 +8409,7 @@ dependencies = [ "cfg-if 1.0.0", "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.71", ] [[package]] @@ -8288,7 +8420,7 @@ checksum = "5c89e72a01ed4c579669add59014b9a524d609c0c88c6a585ce37485879f6ffb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.71", "test-case-core", ] @@ -8309,22 +8441,22 @@ checksum = "23d434d3f8967a09480fb04132ebe0a3e088c173e6d0ee7897abbdf4eab0f8b9" [[package]] name = "thiserror" -version = "1.0.61" +version = "1.0.62" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c546c80d6be4bc6a00c0f01730c08df82eaa7a7a61f11d656526506112cc1709" +checksum = "f2675633b1499176c2dff06b0856a27976a8f9d436737b4cf4f312d4d91d8bbb" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.61" +version = "1.0.62" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46c3384250002a6d5af4d114f2845d37b57521033f30d5c3f46c4d70e1197533" +checksum = "d20468752b09f49e909e55a5d338caa8bedf615594e9d80bc4c565d30faf798c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.71", ] [[package]] @@ -8426,9 +8558,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "1.6.1" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c55115c6fbe2d2bef26eb09ad74bde02d8255476fc0c7b515ef09fbb35742d82" +checksum = "445e881f4f6d382d5f27c034e25eb92edd7c784ceab92a0937db7f2e9471b938" dependencies = [ "tinyvec_macros", ] @@ -8475,7 +8607,7 @@ source = "git+https://github.com/anza-xyz/solana-tokio.git?rev=7cf47705faacf7bf0 dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.71", ] [[package]] @@ -8587,7 +8719,7 @@ dependencies = [ "serde", "serde_spanned", "toml_datetime", - "toml_edit 0.22.14", + "toml_edit 0.22.15", ] [[package]] @@ -8612,9 +8744,9 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.22.14" +version = "0.22.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f21c7aaf97f1bd9ca9d4f9e73b0a6c74bd5afef56f2bc931943a6e1c37e04e38" +checksum = "d59a3a72298453f564e2b111fa896f8d07fabb36f51f06d7e875fc5e0b5a3ef1" dependencies = [ "indexmap 2.2.6", "serde", @@ -8719,7 +8851,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.71", ] [[package]] @@ -9027,7 +9159,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.71", "wasm-bindgen-shared", ] @@ -9061,7 +9193,7 @@ checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.71", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -9152,13 +9284,42 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" +[[package]] +name = "windows" +version = "0.54.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9252e5725dbed82865af151df558e754e4a3c2c30818359eb17465f1346a1b49" +dependencies = [ + "windows-core 0.54.0", + "windows-targets 0.52.6", +] + [[package]] name = "windows-core" version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" dependencies = [ - "windows-targets 0.52.5", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-core" +version = "0.54.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "12661b9c89351d684a50a8a643ce5f608e20243b9fb84687800163429f161d65" +dependencies = [ + "windows-result", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-result" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e383302e8ec8515204254685643de10811af0ed97ea37210dc26fb0032647f8" +dependencies = [ + "windows-targets 0.52.6", ] [[package]] @@ -9176,7 +9337,7 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ - "windows-targets 0.52.5", + "windows-targets 0.52.6", ] [[package]] @@ -9196,18 +9357,18 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f0713a46559409d202e70e28227288446bf7841d3211583a4b53e3f6d96e7eb" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" dependencies = [ - "windows_aarch64_gnullvm 0.52.5", - "windows_aarch64_msvc 0.52.5", - "windows_i686_gnu 0.52.5", + "windows_aarch64_gnullvm 0.52.6", + "windows_aarch64_msvc 0.52.6", + "windows_i686_gnu 0.52.6", "windows_i686_gnullvm", - "windows_i686_msvc 0.52.5", - "windows_x86_64_gnu 0.52.5", - "windows_x86_64_gnullvm 0.52.5", - "windows_x86_64_msvc 0.52.5", + "windows_i686_msvc 0.52.6", + "windows_x86_64_gnu 0.52.6", + "windows_x86_64_gnullvm 0.52.6", + "windows_x86_64_msvc 0.52.6", ] [[package]] @@ -9218,9 +9379,9 @@ checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" [[package]] name = "windows_aarch64_gnullvm" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7088eed71e8b8dda258ecc8bac5fb1153c5cffaf2578fc8ff5d61e23578d3263" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" [[package]] name = "windows_aarch64_msvc" @@ -9230,9 +9391,9 @@ checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" [[package]] name = "windows_aarch64_msvc" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9985fd1504e250c615ca5f281c3f7a6da76213ebd5ccc9561496568a2752afb6" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" [[package]] name = "windows_i686_gnu" @@ -9242,15 +9403,15 @@ checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" [[package]] name = "windows_i686_gnu" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88ba073cf16d5372720ec942a8ccbf61626074c6d4dd2e745299726ce8b89670" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" [[package]] name = "windows_i686_gnullvm" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87f4261229030a858f36b459e748ae97545d6f1ec60e5e0d6a3d32e0dc232ee9" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" [[package]] name = "windows_i686_msvc" @@ -9260,9 +9421,9 @@ checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" [[package]] name = "windows_i686_msvc" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db3c2bf3d13d5b658be73463284eaf12830ac9a26a90c717b7f771dfe97487bf" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" [[package]] name = "windows_x86_64_gnu" @@ -9272,9 +9433,9 @@ checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" [[package]] name = "windows_x86_64_gnu" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e4246f76bdeff09eb48875a0fd3e2af6aada79d409d33011886d3e1581517d9" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" [[package]] name = "windows_x86_64_gnullvm" @@ -9284,9 +9445,9 @@ checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" [[package]] name = "windows_x86_64_gnullvm" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "852298e482cd67c356ddd9570386e2862b5673c85bd5f88df9ab6802b334c596" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" [[package]] name = "windows_x86_64_msvc" @@ -9296,9 +9457,9 @@ checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "windows_x86_64_msvc" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bec47e5bfd1bff0eeaf6d8b485cc1074891a197ab4225d504cb7a1ab88b02bf0" +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] name = "winnow" @@ -9328,6 +9489,15 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "wyz" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05f360fc0b24296329c78fda852a1e9ae82de9cf7b27dae4b7f62f118f77b9ed" +dependencies = [ + "tap", +] + [[package]] name = "x509-parser" version = "0.14.0" @@ -9368,22 +9538,22 @@ dependencies = [ [[package]] name = "zerocopy" -version = "0.7.34" +version = "0.7.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae87e3fcd617500e5d106f0380cf7b77f3c6092aae37191433159dda23cfb087" +checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.7.34" +version = "0.7.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15e934569e47891f7d9411f1a451d947a60e000ab3bd24fbb970f000387d1b3b" +checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.71", ] [[package]] @@ -9403,7 +9573,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.71", ] [[package]] @@ -9427,9 +9597,9 @@ dependencies = [ [[package]] name = "zstd-sys" -version = "2.0.11+zstd.1.5.6" +version = "2.0.12+zstd.1.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75652c55c0b6f3e6f12eb786fe1bc960396bf05a1eb3bf1f3691c3610ac2e6d4" +checksum = "0a4e40c320c3cb459d9a9ff6de98cff88f4751ee9275d140e2be94a2b74e4c13" dependencies = [ "cc", "pkg-config", diff --git a/Cargo.toml b/Cargo.toml index cdea68a673eff1..b1169976cf2434 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -31,6 +31,7 @@ members = [ "connection-cache", "core", "cost-model", + "curves/*", "dos", "download-utils", "entry", @@ -116,6 +117,7 @@ members = [ "transaction-metrics-tracker", "transaction-status", "turbine", + "type-overrides", "udp-client", "unified-scheduler-logic", "unified-scheduler-pool", @@ -135,7 +137,7 @@ exclude = ["programs/sbf", "svm/tests/example-programs"] resolver = "2" [workspace.package] -version = "2.0.0" +version = "2.0.2" authors = ["Anza Maintainers "] repository = "https://github.com/anza-xyz/agave" homepage = "https://anza.xyz/" @@ -171,13 +173,13 @@ bs58 = "0.5.1" bv = "0.11.1" byte-unit = "4.0.19" bytecount = "0.6.8" -bytemuck = "1.16.0" +bytemuck = "1.16.1" +bytemuck_derive = "1.7.0" byteorder = "1.5.0" bytes = "1.6" bzip2 = "0.4.4" caps = "0.5.5" cargo_metadata = "0.15.4" -cc = "1.0.94" chrono = { version = "0.4.38", default-features = false } chrono-humanize = "0.2.3" clap = "2.33.1" @@ -310,110 +312,113 @@ serde_yaml = "0.9.34" serial_test = "2.0.0" sha2 = "0.10.8" sha3 = "0.10.8" +shuttle = "0.7.1" signal-hook = "0.3.17" siphasher = "0.3.11" smallvec = "1.13.2" smpl_jwt = "0.7.1" socket2 = "0.5.7" soketto = "0.7" -solana-account-decoder = { path = "account-decoder", version = "=2.0.0" } -solana-accounts-db = { path = "accounts-db", version = "=2.0.0" } -solana-address-lookup-table-program = { path = "programs/address-lookup-table", version = "=2.0.0" } -solana-banks-client = { path = "banks-client", version = "=2.0.0" } -solana-banks-interface = { path = "banks-interface", version = "=2.0.0" } -solana-banks-server = { path = "banks-server", version = "=2.0.0" } -solana-bench-tps = { path = "bench-tps", version = "=2.0.0" } -solana-bloom = { path = "bloom", version = "=2.0.0" } -solana-bpf-loader-program = { path = "programs/bpf_loader", version = "=2.0.0" } -solana-bucket-map = { path = "bucket_map", version = "=2.0.0" } -agave-cargo-registry = { path = "cargo-registry", version = "=2.0.0" } -solana-clap-utils = { path = "clap-utils", version = "=2.0.0" } -solana-clap-v3-utils = { path = "clap-v3-utils", version = "=2.0.0" } -solana-cli = { path = "cli", version = "=2.0.0" } -solana-cli-config = { path = "cli-config", version = "=2.0.0" } -solana-cli-output = { path = "cli-output", version = "=2.0.0" } -solana-client = { path = "client", version = "=2.0.0" } -solana-compute-budget = { path = "compute-budget", version = "=2.0.0" } -solana-compute-budget-program = { path = "programs/compute-budget", version = "=2.0.0" } -solana-config-program = { path = "programs/config", version = "=2.0.0" } -solana-connection-cache = { path = "connection-cache", version = "=2.0.0", default-features = false } -solana-core = { path = "core", version = "=2.0.0" } -solana-cost-model = { path = "cost-model", version = "=2.0.0" } -solana-download-utils = { path = "download-utils", version = "=2.0.0" } -solana-entry = { path = "entry", version = "=2.0.0" } -solana-faucet = { path = "faucet", version = "=2.0.0" } -solana-frozen-abi = { path = "frozen-abi", version = "=2.0.0" } -solana-frozen-abi-macro = { path = "frozen-abi/macro", version = "=2.0.0" } -solana-tps-client = { path = "tps-client", version = "=2.0.0" } -solana-genesis = { path = "genesis", version = "=2.0.0" } -solana-genesis-utils = { path = "genesis-utils", version = "=2.0.0" } -agave-geyser-plugin-interface = { path = "geyser-plugin-interface", version = "=2.0.0" } -solana-geyser-plugin-manager = { path = "geyser-plugin-manager", version = "=2.0.0" } -solana-gossip = { path = "gossip", version = "=2.0.0" } -solana-inline-spl = { path = "inline-spl", version = "=2.0.0" } -solana-ledger = { path = "ledger", version = "=2.0.0" } -solana-loader-v4-program = { path = "programs/loader-v4", version = "=2.0.0" } -solana-local-cluster = { path = "local-cluster", version = "=2.0.0" } -solana-logger = { path = "logger", version = "=2.0.0" } -solana-measure = { path = "measure", version = "=2.0.0" } -solana-merkle-tree = { path = "merkle-tree", version = "=2.0.0" } -solana-metrics = { path = "metrics", version = "=2.0.0" } -solana-net-utils = { path = "net-utils", version = "=2.0.0" } +solana-account-decoder = { path = "account-decoder", version = "=2.0.2" } +solana-accounts-db = { path = "accounts-db", version = "=2.0.2" } +solana-address-lookup-table-program = { path = "programs/address-lookup-table", version = "=2.0.2" } +solana-banks-client = { path = "banks-client", version = "=2.0.2" } +solana-banks-interface = { path = "banks-interface", version = "=2.0.2" } +solana-banks-server = { path = "banks-server", version = "=2.0.2" } +solana-bench-tps = { path = "bench-tps", version = "=2.0.2" } +solana-bloom = { path = "bloom", version = "=2.0.2" } +solana-bpf-loader-program = { path = "programs/bpf_loader", version = "=2.0.2" } +solana-bucket-map = { path = "bucket_map", version = "=2.0.2" } +agave-cargo-registry = { path = "cargo-registry", version = "=2.0.2" } +solana-clap-utils = { path = "clap-utils", version = "=2.0.2" } +solana-clap-v3-utils = { path = "clap-v3-utils", version = "=2.0.2" } +solana-cli = { path = "cli", version = "=2.0.2" } +solana-cli-config = { path = "cli-config", version = "=2.0.2" } +solana-cli-output = { path = "cli-output", version = "=2.0.2" } +solana-client = { path = "client", version = "=2.0.2" } +solana-compute-budget = { path = "compute-budget", version = "=2.0.2" } +solana-compute-budget-program = { path = "programs/compute-budget", version = "=2.0.2" } +solana-config-program = { path = "programs/config", version = "=2.0.2" } +solana-connection-cache = { path = "connection-cache", version = "=2.0.2", default-features = false } +solana-core = { path = "core", version = "=2.0.2" } +solana-cost-model = { path = "cost-model", version = "=2.0.2" } +solana-curve25519 = { path = "curves/curve25519", version = "=2.0.2" } +solana-download-utils = { path = "download-utils", version = "=2.0.2" } +solana-entry = { path = "entry", version = "=2.0.2" } +solana-faucet = { path = "faucet", version = "=2.0.2" } +solana-frozen-abi = { path = "frozen-abi", version = "=2.0.2" } +solana-frozen-abi-macro = { path = "frozen-abi/macro", version = "=2.0.2" } +solana-tps-client = { path = "tps-client", version = "=2.0.2" } +solana-genesis = { path = "genesis", version = "=2.0.2" } +solana-genesis-utils = { path = "genesis-utils", version = "=2.0.2" } +agave-geyser-plugin-interface = { path = "geyser-plugin-interface", version = "=2.0.2" } +solana-geyser-plugin-manager = { path = "geyser-plugin-manager", version = "=2.0.2" } +solana-gossip = { path = "gossip", version = "=2.0.2" } +solana-inline-spl = { path = "inline-spl", version = "=2.0.2" } +solana-ledger = { path = "ledger", version = "=2.0.2" } +solana-loader-v4-program = { path = "programs/loader-v4", version = "=2.0.2" } +solana-local-cluster = { path = "local-cluster", version = "=2.0.2" } +solana-logger = { path = "logger", version = "=2.0.2" } +solana-measure = { path = "measure", version = "=2.0.2" } +solana-merkle-tree = { path = "merkle-tree", version = "=2.0.2" } +solana-metrics = { path = "metrics", version = "=2.0.2" } +solana-net-utils = { path = "net-utils", version = "=2.0.2" } solana-nohash-hasher = "0.2.1" -solana-notifier = { path = "notifier", version = "=2.0.0" } -solana-package-metadata-macro = { path = "sdk/package-metadata-macro", version = "=2.0.0" } -solana-perf = { path = "perf", version = "=2.0.0" } -solana-poh = { path = "poh", version = "=2.0.0" } -solana-poseidon = { path = "poseidon", version = "=2.0.0" } -solana-program = { path = "sdk/program", version = "=2.0.0" } -solana-program-runtime = { path = "program-runtime", version = "=2.0.0" } -solana-program-test = { path = "program-test", version = "=2.0.0" } -solana-pubsub-client = { path = "pubsub-client", version = "=2.0.0" } -solana-quic-client = { path = "quic-client", version = "=2.0.0" } -solana-rayon-threadlimit = { path = "rayon-threadlimit", version = "=2.0.0" } -solana-remote-wallet = { path = "remote-wallet", version = "=2.0.0", default-features = false } -solana-unified-scheduler-logic = { path = "unified-scheduler-logic", version = "=2.0.0" } -solana-unified-scheduler-pool = { path = "unified-scheduler-pool", version = "=2.0.0" } -solana-rpc = { path = "rpc", version = "=2.0.0" } -solana-rpc-client = { path = "rpc-client", version = "=2.0.0", default-features = false } -solana-rpc-client-api = { path = "rpc-client-api", version = "=2.0.0" } -solana-rpc-client-nonce-utils = { path = "rpc-client-nonce-utils", version = "=2.0.0" } -solana-runtime = { path = "runtime", version = "=2.0.0" } -solana-runtime-transaction = { path = "runtime-transaction", version = "=2.0.0" } -solana-sdk = { path = "sdk", version = "=2.0.0" } -solana-sdk-macro = { path = "sdk/macro", version = "=2.0.0" } -solana-send-transaction-service = { path = "send-transaction-service", version = "=2.0.0" } -solana-stake-program = { path = "programs/stake", version = "=2.0.0" } -solana-storage-bigtable = { path = "storage-bigtable", version = "=2.0.0" } -solana-storage-proto = { path = "storage-proto", version = "=2.0.0" } -solana-streamer = { path = "streamer", version = "=2.0.0" } -solana-svm = { path = "svm", version = "=2.0.0" } -solana-system-program = { path = "programs/system", version = "=2.0.0" } -solana-test-validator = { path = "test-validator", version = "=2.0.0" } -solana-thin-client = { path = "thin-client", version = "=2.0.0" } -solana-tpu-client = { path = "tpu-client", version = "=2.0.0", default-features = false } -solana-transaction-status = { path = "transaction-status", version = "=2.0.0" } -solana-transaction-metrics-tracker = { path = "transaction-metrics-tracker", version = "=2.0.0" } -solana-turbine = { path = "turbine", version = "=2.0.0" } -solana-udp-client = { path = "udp-client", version = "=2.0.0" } -solana-version = { path = "version", version = "=2.0.0" } -solana-vote = { path = "vote", version = "=2.0.0" } -solana-vote-program = { path = "programs/vote", version = "=2.0.0" } -solana-wen-restart = { path = "wen-restart", version = "=2.0.0" } -solana-zk-elgamal-proof-program = { path = "programs/zk-elgamal-proof", version = "=2.0.0" } -solana-zk-keygen = { path = "zk-keygen", version = "=2.0.0" } -solana-zk-sdk = { path = "zk-sdk", version = "=2.0.0" } -solana-zk-token-proof-program = { path = "programs/zk-token-proof", version = "=2.0.0" } -solana-zk-token-sdk = { path = "zk-token-sdk", version = "=2.0.0" } +solana-notifier = { path = "notifier", version = "=2.0.2" } +solana-package-metadata-macro = { path = "sdk/package-metadata-macro", version = "=2.0.2" } +solana-perf = { path = "perf", version = "=2.0.2" } +solana-poh = { path = "poh", version = "=2.0.2" } +solana-poseidon = { path = "poseidon", version = "=2.0.2" } +solana-program = { path = "sdk/program", version = "=2.0.2" } +solana-program-runtime = { path = "program-runtime", version = "=2.0.2" } +solana-program-test = { path = "program-test", version = "=2.0.2" } +solana-pubsub-client = { path = "pubsub-client", version = "=2.0.2" } +solana-quic-client = { path = "quic-client", version = "=2.0.2" } +solana-rayon-threadlimit = { path = "rayon-threadlimit", version = "=2.0.2" } +solana-remote-wallet = { path = "remote-wallet", version = "=2.0.2", default-features = false } +solana-unified-scheduler-logic = { path = "unified-scheduler-logic", version = "=2.0.2" } +solana-unified-scheduler-pool = { path = "unified-scheduler-pool", version = "=2.0.2" } +solana-rpc = { path = "rpc", version = "=2.0.2" } +solana-rpc-client = { path = "rpc-client", version = "=2.0.2", default-features = false } +solana-rpc-client-api = { path = "rpc-client-api", version = "=2.0.2" } +solana-rpc-client-nonce-utils = { path = "rpc-client-nonce-utils", version = "=2.0.2" } +solana-runtime = { path = "runtime", version = "=2.0.2" } +solana-runtime-transaction = { path = "runtime-transaction", version = "=2.0.2" } +solana-sdk = { path = "sdk", version = "=2.0.2" } +solana-sdk-macro = { path = "sdk/macro", version = "=2.0.2" } +solana-send-transaction-service = { path = "send-transaction-service", version = "=2.0.2" } +solana-stake-program = { path = "programs/stake", version = "=2.0.2" } +solana-storage-bigtable = { path = "storage-bigtable", version = "=2.0.2" } +solana-storage-proto = { path = "storage-proto", version = "=2.0.2" } +solana-streamer = { path = "streamer", version = "=2.0.2" } +solana-svm = { path = "svm", version = "=2.0.2" } +solana-system-program = { path = "programs/system", version = "=2.0.2" } +solana-test-validator = { path = "test-validator", version = "=2.0.2" } +solana-thin-client = { path = "thin-client", version = "=2.0.2" } +solana-tpu-client = { path = "tpu-client", version = "=2.0.2", default-features = false } +solana-transaction-status = { path = "transaction-status", version = "=2.0.2" } +solana-transaction-metrics-tracker = { path = "transaction-metrics-tracker", version = "=2.0.2" } +solana-turbine = { path = "turbine", version = "=2.0.2" } +solana-type-overrides = { path = "type-overrides", version = "=2.0.2" } +solana-udp-client = { path = "udp-client", version = "=2.0.2" } +solana-version = { path = "version", version = "=2.0.2" } +solana-vote = { path = "vote", version = "=2.0.2" } +solana-vote-program = { path = "programs/vote", version = "=2.0.2" } +solana-wen-restart = { path = "wen-restart", version = "=2.0.2" } +solana-zk-elgamal-proof-program = { path = "programs/zk-elgamal-proof", version = "=2.0.2" } +solana-zk-keygen = { path = "zk-keygen", version = "=2.0.2" } +solana-zk-sdk = { path = "zk-sdk", version = "=2.0.2" } +solana-zk-token-proof-program = { path = "programs/zk-token-proof", version = "=2.0.2" } +solana-zk-token-sdk = { path = "zk-token-sdk", version = "=2.0.2" } solana_rbpf = "=0.8.1" -spl-associated-token-account = "=3.0.2" -spl-instruction-padding = "0.1" -spl-memo = "=4.0.1" -spl-pod = "=0.2.2" -spl-token = "=4.0.1" -spl-token-2022 = "=3.0.2" -spl-token-group-interface = "=0.2.3" -spl-token-metadata-interface = "=0.3.3" +spl-associated-token-account = "=4.0.0" +spl-instruction-padding = "0.2" +spl-memo = "=5.0.0" +spl-pod = "=0.3.0" +spl-token = "=6.0.0" +spl-token-2022 = "=4.0.0" +spl-token-group-interface = "=0.3.0" +spl-token-metadata-interface = "=0.4.0" static_assertions = "1.1.0" stream-cancel = "0.8.2" strum = "0.24" @@ -442,7 +447,7 @@ tonic-build = "0.9.2" trees = "0.4.2" tungstenite = "0.20.1" uriparse = "0.6.4" -url = "2.5.1" +url = "2.5.2" vec_extract_if_polyfill = "0.1.0" wasm-bindgen = "0.2" winapi = "0.3.8" @@ -488,6 +493,7 @@ crossbeam-epoch = { git = "https://github.com/anza-xyz/crossbeam", rev = "fd279d # # There is a similar override in `programs/sbf/Cargo.toml`. Please keep both # comments and the overrides in sync. +solana-curve25519 = { path = "curves/curve25519" } solana-program = { path = "sdk/program" } solana-zk-sdk = { path = "zk-sdk" } solana-zk-token-sdk = { path = "zk-token-sdk" } diff --git a/account-decoder/src/parse_token.rs b/account-decoder/src/parse_token.rs index 41a7eb44f9e14f..878d738fe03367 100644 --- a/account-decoder/src/parse_token.rs +++ b/account-decoder/src/parse_token.rs @@ -26,37 +26,6 @@ pub fn is_known_spl_token_id(program_id: &Pubkey) -> bool { *program_id == spl_token::id() || *program_id == spl_token_2022::id() } -// A helper function to convert spl_token::native_mint::id() as spl_sdk::pubkey::Pubkey to -// solana_sdk::pubkey::Pubkey -#[deprecated( - since = "1.16.0", - note = "Pubkey conversions no longer needed. Please use spl_token::native_mint::id() directly" -)] -pub fn spl_token_native_mint() -> Pubkey { - Pubkey::new_from_array(spl_token::native_mint::id().to_bytes()) -} - -// The program id of the `spl_token_native_mint` account -#[deprecated( - since = "1.16.0", - note = "Pubkey conversions no longer needed. Please use spl_token::id() directly" -)] -pub fn spl_token_native_mint_program_id() -> Pubkey { - spl_token::id() -} - -// A helper function to convert a solana_sdk::pubkey::Pubkey to spl_sdk::pubkey::Pubkey -#[deprecated(since = "1.16.0", note = "Pubkey conversions no longer needed")] -pub fn spl_token_pubkey(pubkey: &Pubkey) -> SplTokenPubkey { - SplTokenPubkey::new_from_array(pubkey.to_bytes()) -} - -// A helper function to convert a spl_sdk::pubkey::Pubkey to solana_sdk::pubkey::Pubkey -#[deprecated(since = "1.16.0", note = "Pubkey conversions no longer needed")] -pub fn pubkey_from_spl_token(pubkey: &SplTokenPubkey) -> Pubkey { - Pubkey::new_from_array(pubkey.to_bytes()) -} - #[deprecated(since = "2.0.0", note = "Use `parse_token_v2` instead")] pub fn parse_token( data: &[u8], diff --git a/accounts-db/Cargo.toml b/accounts-db/Cargo.toml index 62632c24de696f..b7c1327194f2dd 100644 --- a/accounts-db/Cargo.toml +++ b/accounts-db/Cargo.toml @@ -14,6 +14,7 @@ bincode = { workspace = true } blake3 = { workspace = true } bv = { workspace = true, features = ["serde"] } bytemuck = { workspace = true } +bytemuck_derive = { workspace = true } # bzip2 = { workspace = true } crossbeam-channel = { workspace = true } dashmap = { workspace = true, features = ["rayon", "raw-api"] } @@ -65,8 +66,10 @@ rand_chacha = { workspace = true } serde_bytes = { workspace = true } # See order-crates-for-publishing.py for using this unusual `path = "."` solana-accounts-db = { path = ".", features = ["dev-context-only-utils"] } +solana-compute-budget = { workspace = true } solana-logger = { workspace = true } solana-sdk = { workspace = true, features = ["dev-context-only-utils"] } +solana-svm = { workspace = true, features = ["dev-context-only-utils"] } static_assertions = { workspace = true } strum = { workspace = true, features = ["derive"] } strum_macros = { workspace = true } diff --git a/accounts-db/accounts-hash-cache-tool/Cargo.toml b/accounts-db/accounts-hash-cache-tool/Cargo.toml index 501e0dfdb8b71d..e4803261ef6995 100644 --- a/accounts-db/accounts-hash-cache-tool/Cargo.toml +++ b/accounts-db/accounts-hash-cache-tool/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "agave-accounts-hash-cache-tool" -description = "Tool to inspect accounts hash cache files" +description = "Tool for accounts hash cache files" publish = false version = { workspace = true } authors = { workspace = true } diff --git a/accounts-db/accounts-hash-cache-tool/src/main.rs b/accounts-db/accounts-hash-cache-tool/src/main.rs index 98778049504216..c56369391c8077 100644 --- a/accounts-db/accounts-hash-cache-tool/src/main.rs +++ b/accounts-db/accounts-hash-cache-tool/src/main.rs @@ -1,12 +1,17 @@ use { bytemuck::Zeroable as _, - clap::{crate_description, crate_name, value_t_or_exit, App, Arg}, + clap::{ + crate_description, crate_name, value_t_or_exit, App, AppSettings, Arg, ArgMatches, + SubCommand, + }, solana_accounts_db::{CacheHashDataFileEntry, CacheHashDataFileHeader}, std::{ + collections::HashMap, fs::File, io::{self, BufReader, Read as _}, mem::size_of, num::Saturating, + path::Path, }, }; @@ -14,62 +19,73 @@ fn main() { let matches = App::new(crate_name!()) .about(crate_description!()) .version(solana_version::version!()) - .arg( - Arg::with_name("path") - .index(1) - .takes_value(true) - .value_name("PATH") - .help("Accounts hash cache file to inspect"), + .global_setting(AppSettings::ArgRequiredElseHelp) + .global_setting(AppSettings::ColoredHelp) + .global_setting(AppSettings::InferSubcommands) + .global_setting(AppSettings::UnifiedHelpMessage) + .global_setting(AppSettings::VersionlessSubcommands) + .subcommand( + SubCommand::with_name("inspect") + .about( + "Inspect an accounts hash cache file and display \ + each account's address, hash, and balance", + ) + .arg( + Arg::with_name("force") + .long("force") + .takes_value(false) + .help("Continue even if sanity checks fail"), + ) + .arg( + Arg::with_name("path") + .index(1) + .takes_value(true) + .value_name("PATH") + .help("Accounts hash cache file to inspect"), + ), ) - .arg( - Arg::with_name("force") - .long("force") - .takes_value(false) - .help("Continue even if sanity checks fail"), + .subcommand( + SubCommand::with_name("diff") + .about("Diff two accounts hash cache files") + .arg( + Arg::with_name("path1") + .index(1) + .takes_value(true) + .value_name("PATH1") + .help("Accounts hash cache file 1 to diff"), + ) + .arg( + Arg::with_name("path2") + .index(2) + .takes_value(true) + .value_name("PATH2") + .help("Accounts hash cache file 2 to diff"), + ), ) .get_matches(); - let force = matches.is_present("force"); - let path = value_t_or_exit!(matches, "path", String); - - let file = File::open(&path).unwrap_or_else(|err| { - eprintln!("Failed to open accounts hash cache file '{path}': {err}"); - std::process::exit(1); - }); - let actual_file_size = file - .metadata() - .unwrap_or_else(|err| { - eprintln!("Failed to query file metadata: {err}"); - std::process::exit(1); - }) - .len(); - let mut reader = BufReader::new(file); - - let header = { - let mut header = CacheHashDataFileHeader::zeroed(); - reader - .read_exact(bytemuck::bytes_of_mut(&mut header)) - .unwrap_or_else(|err| { - eprintln!("Failed to read header: {err}"); - std::process::exit(1); - }); - header - }; - - // Sanity checks -- ensure the actual file size matches the expected file size - let expected_file_size = size_of::() - .saturating_add(size_of::().saturating_mul(header.count)); - if actual_file_size != expected_file_size as u64 { - eprintln!( - "Failed sanitization: actual file size does not match expected file size! \ - actual: {actual_file_size}, expected: {expected_file_size}", - ); - if !force { - std::process::exit(1); + match matches.subcommand() { + ("inspect", Some(subcommand_matches)) => do_inspect(&matches, subcommand_matches) + .map_err(|err| format!("inspection failed: {err}")), + ("diff", Some(subcommand_matches)) => { + do_diff(&matches, subcommand_matches).map_err(|err| format!("diff failed: {err}")) } - eprintln!("Forced. Continuing... Results may be incorrect."); + _ => unreachable!(), } + .unwrap_or_else(|err| { + eprintln!("Error: {err}"); + std::process::exit(1); + }); +} +fn do_inspect( + _app_matches: &ArgMatches<'_>, + subcommand_matches: &ArgMatches<'_>, +) -> Result<(), String> { + let force = subcommand_matches.is_present("force"); + let path = value_t_or_exit!(subcommand_matches, "path", String); + let (mut reader, header) = open_file(&path, force) + .map_err(|err| format!("failed to open accounts hash cache file '{path}': {err}"))?; let count_width = (header.count as f64).log10().ceil() as usize; let mut count = Saturating(0usize); loop { @@ -80,10 +96,13 @@ fn main() { Err(err) => { if err.kind() == io::ErrorKind::UnexpectedEof && count.0 == header.count { // we've hit the expected end of the file + break; } else { - eprintln!("Failed to read entry {count}: {err}"); + return Err(format!( + "failed to read entry {count}, expected {}: {err}", + header.count, + )); } - break; } }; println!( @@ -96,4 +115,173 @@ fn main() { } println!("actual entries: {count}, expected: {}", header.count); + Ok(()) +} + +fn do_diff( + _app_matches: &ArgMatches<'_>, + subcommand_matches: &ArgMatches<'_>, +) -> Result<(), String> { + let force = false; // skipping sanity checks is not supported when diffing + let path1 = value_t_or_exit!(subcommand_matches, "path1", String); + let path2 = value_t_or_exit!(subcommand_matches, "path2", String); + let (mut reader1, header1) = open_file(&path1, force) + .map_err(|err| format!("failed to open accounts hash cache file 1 '{path1}': {err}"))?; + let (mut reader2, header2) = open_file(&path2, force) + .map_err(|err| format!("failed to open accounts hash cache file 2 '{path2}': {err}"))?; + // Note: Purposely open both files before reading either one. This way, if there's an error + // opening file 2, we can bail early without having to wait for file 1 to be read completely. + + // extract the entries from both files + let do_extract = |num, reader: &mut BufReader<_>, header: &CacheHashDataFileHeader| { + let mut entries = HashMap::<_, _>::default(); + loop { + let mut entry = CacheHashDataFileEntry::zeroed(); + let result = reader.read_exact(bytemuck::bytes_of_mut(&mut entry)); + match result { + Ok(()) => {} + Err(err) => { + if err.kind() == io::ErrorKind::UnexpectedEof && entries.len() == header.count { + // we've hit the expected end of the file + break; + } else { + return Err(format!( + "failed to read entry {}, expected {}: {err}", + entries.len(), + header.count, + )); + } + } + }; + let CacheHashDataFileEntry { + hash, + lamports, + pubkey, + } = entry; + let old_value = entries.insert(pubkey, (hash, lamports)); + if let Some(old_value) = old_value { + let new_value = entries.get(&pubkey); + return Err(format!("found duplicate pubkey in file {num}: {pubkey}, old value: {old_value:?}, new value: {new_value:?}")); + } + } + Ok(entries) + }; + let entries1 = do_extract(1, &mut reader1, &header1)?; + let entries2 = do_extract(2, &mut reader2, &header2)?; + + // compute the differences between the files + let do_compute = |lhs: &HashMap<_, (_, _)>, rhs: &HashMap<_, (_, _)>| { + let mut unique_entries = Vec::new(); + let mut mismatch_entries = Vec::new(); + for (lhs_key, lhs_value) in lhs.iter() { + if let Some(rhs_value) = rhs.get(lhs_key) { + if lhs_value != rhs_value { + mismatch_entries.push(( + CacheHashDataFileEntry { + hash: lhs_value.0, + lamports: lhs_value.1, + pubkey: *lhs_key, + }, + CacheHashDataFileEntry { + hash: rhs_value.0, + lamports: rhs_value.1, + pubkey: *lhs_key, + }, + )); + } + } else { + unique_entries.push(CacheHashDataFileEntry { + hash: lhs_value.0, + lamports: lhs_value.1, + pubkey: *lhs_key, + }); + } + } + unique_entries.sort_unstable_by(|a, b| a.pubkey.cmp(&b.pubkey)); + mismatch_entries.sort_unstable_by(|a, b| a.0.pubkey.cmp(&b.0.pubkey)); + (unique_entries, mismatch_entries) + }; + let (unique_entries1, mismatch_entries) = do_compute(&entries1, &entries2); + let (unique_entries2, _) = do_compute(&entries2, &entries1); + + // display the unique entries in each file + let do_print = |entries: &[CacheHashDataFileEntry]| { + let count_width = (entries.len() as f64).log10().ceil() as usize; + if entries.is_empty() { + println!("(none)"); + } else { + for (i, entry) in entries.iter().enumerate() { + println!( + "{i:count_width$}: pubkey: {:44}, hash: {:44}, lamports: {}", + entry.pubkey.to_string(), + entry.hash.0.to_string(), + entry.lamports, + ); + } + } + }; + println!("Unique entries in file 1:"); + do_print(&unique_entries1); + println!("Unique entries in file 2:"); + do_print(&unique_entries2); + + println!("Mismatch values:"); + let count_width = (mismatch_entries.len() as f64).log10().ceil() as usize; + if mismatch_entries.is_empty() { + println!("(none)"); + } else { + for (i, (lhs, rhs)) in mismatch_entries.iter().enumerate() { + println!( + "{i:count_width$}: pubkey: {:44}, hash: {:44}, lamports: {}", + lhs.pubkey.to_string(), + lhs.hash.0.to_string(), + lhs.lamports, + ); + println!( + "{i:count_width$}: file 2: {:44}, hash: {:44}, lamports: {}", + "(same)".to_string(), + rhs.hash.0.to_string(), + rhs.lamports, + ); + } + } + + Ok(()) +} + +fn open_file( + path: impl AsRef, + force: bool, +) -> Result<(BufReader, CacheHashDataFileHeader), String> { + let file = File::open(path).map_err(|err| format!("{err}"))?; + let actual_file_size = file + .metadata() + .map_err(|err| format!("failed to query file metadata: {err}"))? + .len(); + let mut reader = BufReader::new(file); + + let header = { + let mut header = CacheHashDataFileHeader::zeroed(); + reader + .read_exact(bytemuck::bytes_of_mut(&mut header)) + .map_err(|err| format!("failed to read header: {err}"))?; + header + }; + + // Sanity checks -- ensure the actual file size matches the expected file size + let expected_file_size = size_of::() + .saturating_add(size_of::().saturating_mul(header.count)); + if actual_file_size != expected_file_size as u64 { + let err_msg = format!( + "failed sanitization: actual file size does not match expected file size! \ + actual: {actual_file_size}, expected: {expected_file_size}", + ); + if force { + eprintln!("Warning: {err_msg}\nForced. Continuing... Results may be incorrect."); + } else { + return Err(err_msg); + } + } + + Ok((reader, header)) } diff --git a/accounts-db/benches/accounts.rs b/accounts-db/benches/accounts.rs index 6c3f26ebfd3b41..202a220e10e425 100644 --- a/accounts-db/benches/accounts.rs +++ b/accounts-db/benches/accounts.rs @@ -340,6 +340,7 @@ fn bench_load_largest_accounts(b: &mut Bencher) { 20, &HashSet::new(), AccountAddressFilter::Exclude, + false, ) }); } diff --git a/accounts-db/src/accounts.rs b/accounts-db/src/accounts.rs index 67fde4ce30394b..1f87be1ae86e44 100644 --- a/accounts-db/src/accounts.rs +++ b/accounts-db/src/accounts.rs @@ -29,9 +29,8 @@ use { transaction_context::TransactionAccount, }, solana_svm::{ - account_loader::TransactionLoadResult, - nonce_info::{NonceFull, NonceInfo}, - transaction_results::TransactionExecutionResult, + account_loader::TransactionLoadResult, nonce_info::NonceInfo, + rollback_accounts::RollbackAccounts, transaction_results::TransactionExecutionResult, }, std::{ cmp::Reverse, @@ -254,6 +253,7 @@ impl Accounts { num: usize, filter_by_address: &HashSet, filter: AccountAddressFilter, + sort_results: bool, ) -> ScanResult> { if num == 0 { return Ok(vec![]); @@ -287,7 +287,7 @@ impl Accounts { account_balances.push(Reverse((account.lamports(), *pubkey))); } }, - &ScanConfig::default(), + &ScanConfig::new(!sort_results), )?; Ok(account_balances .into_sorted_vec() @@ -480,6 +480,7 @@ impl Accounts { &self, ancestors: &Ancestors, bank_id: BankId, + sort_results: bool, ) -> ScanResult> { let mut collector = Vec::new(); self.accounts_db @@ -493,7 +494,7 @@ impl Accounts { collector.push((*pubkey, account, slot)) } }, - &ScanConfig::default(), + &ScanConfig::new(!sort_results), ) .map(|_| collector) } @@ -503,12 +504,17 @@ impl Accounts { ancestors: &Ancestors, bank_id: BankId, scan_func: F, + sort_results: bool, ) -> ScanResult<()> where F: FnMut(Option<(&Pubkey, AccountSharedData, Slot)>), { - self.accounts_db - .scan_accounts(ancestors, bank_id, scan_func, &ScanConfig::default()) + self.accounts_db.scan_accounts( + ancestors, + bank_id, + scan_func, + &ScanConfig::new(!sort_results), + ) } pub fn hold_range_in_memory( @@ -534,7 +540,7 @@ impl Accounts { "", // disable logging of this. We now parallelize it and this results in multiple parallel logs ancestors, range, - &ScanConfig::new(true), + &ScanConfig::default(), |option| Self::load_with_slot(&mut collector, option), ); collector @@ -716,21 +722,6 @@ impl Accounts { TransactionExecutionResult::NotExecuted(_) => continue, }; - enum AccountCollectionMode<'a> { - Normal, - FailedWithNonce { nonce: &'a NonceFull }, - } - - let collection_mode = match (execution_status, &loaded_transaction.nonce) { - (Ok(_), _) => AccountCollectionMode::Normal, - (Err(_), Some(nonce)) => AccountCollectionMode::FailedWithNonce { nonce }, - (Err(_), None) => { - // Fees for failed transactions which don't use durable nonces are - // deducted in Bank::filter_program_errors_and_collect_fee - continue; - } - }; - // Accounts that are invoked and also not passed as an instruction // account to a program don't need to be stored because it's assumed // to be impossible for a committable transaction to modify an @@ -748,21 +739,24 @@ impl Accounts { }; let message = tx.message(); + let rollback_accounts = &loaded_transaction.rollback_accounts; + let maybe_nonce_address = rollback_accounts.nonce().map(|account| account.address()); + for (i, (address, account)) in (0..message.account_keys().len()) .zip(loaded_transaction.accounts.iter_mut()) .filter(|(i, _)| is_storable_account(message, *i)) { if message.is_writable(i) { - let should_collect_account = match collection_mode { - AccountCollectionMode::Normal => true, - AccountCollectionMode::FailedWithNonce { nonce } => { + let should_collect_account = match execution_status { + Ok(()) => true, + Err(_) => { let is_fee_payer = i == 0; - let is_nonce_account = address == nonce.address(); - post_process_failed_nonce( + let is_nonce_account = Some(&*address) == maybe_nonce_address; + post_process_failed_tx( account, is_fee_payer, is_nonce_account, - nonce, + rollback_accounts, durable_nonce, lamports_per_signature, ); @@ -783,41 +777,43 @@ impl Accounts { } } -fn post_process_failed_nonce( +fn post_process_failed_tx( account: &mut AccountSharedData, is_fee_payer: bool, is_nonce_account: bool, - nonce: &NonceFull, + rollback_accounts: &RollbackAccounts, &durable_nonce: &DurableNonce, lamports_per_signature: u64, ) { + // For the case of RollbackAccounts::SameNonceAndFeePayer, it's crucial + // for `is_nonce_account` to be checked earlier than `is_fee_payer`. if is_nonce_account { - // The transaction failed which would normally drop the account - // processing changes, since this account is now being included - // in the accounts written back to the db, roll it back to - // pre-processing state. - *account = nonce.account().clone(); - - // Advance the stored blockhash to prevent fee theft by someone - // replaying nonce transactions that have failed with an - // `InstructionError`. - // - // Since we know we are dealing with a valid nonce account, - // unwrap is safe here - let nonce_versions = StateMut::::state(account).unwrap(); - if let NonceState::Initialized(ref data) = nonce_versions.state() { - let nonce_state = - NonceState::new_initialized(&data.authority, durable_nonce, lamports_per_signature); - let nonce_versions = NonceVersions::new(nonce_state); - account.set_state(&nonce_versions).unwrap(); + if let Some(nonce) = rollback_accounts.nonce() { + // The transaction failed which would normally drop the account + // processing changes, since this account is now being included + // in the accounts written back to the db, roll it back to + // pre-processing state. + *account = nonce.account().clone(); + + // Advance the stored blockhash to prevent fee theft by someone + // replaying nonce transactions that have failed with an + // `InstructionError`. + // + // Since we know we are dealing with a valid nonce account, + // unwrap is safe here + let nonce_versions = StateMut::::state(account).unwrap(); + if let NonceState::Initialized(ref data) = nonce_versions.state() { + let nonce_state = NonceState::new_initialized( + &data.authority, + durable_nonce, + lamports_per_signature, + ); + let nonce_versions = NonceVersions::new(nonce_state); + account.set_state(&nonce_versions).unwrap(); + } } } else if is_fee_payer { - if let Some(fee_payer_account) = nonce.fee_payer_account() { - // Instruction error and fee-payer for this nonce tx is not - // the nonce account itself, rollback the fee payer to the - // fee-paid original state. - *account = fee_payer_account.clone(); - } + *account = rollback_accounts.fee_payer_account().clone(); } } @@ -826,6 +822,7 @@ mod tests { use { super::*, assert_matches::assert_matches, + solana_compute_budget::compute_budget_processor::ComputeBudgetLimits, solana_sdk::{ account::{AccountSharedData, WritableAccount}, address_lookup_table::state::LookupTableMeta, @@ -833,14 +830,17 @@ mod tests { hash::Hash, instruction::{CompiledInstruction, InstructionError}, message::{Message, MessageHeader}, - native_loader, nonce, nonce_account, + native_loader, + nonce::state::Data as NonceData, + nonce_account, rent_debits::RentDebits, signature::{keypair_from_seed, signers::Signers, Keypair, Signer}, system_instruction, system_program, transaction::{Transaction, MAX_TX_ACCOUNT_LOCKS}, }, solana_svm::{ - account_loader::LoadedTransaction, transaction_results::TransactionExecutionDetails, + account_loader::LoadedTransaction, nonce_info::NoncePartial, + transaction_results::TransactionExecutionDetails, }, std::{ borrow::Cow, @@ -862,17 +862,13 @@ mod tests { )) } - fn new_execution_result( - status: Result<()>, - nonce: Option<&NonceFull>, - ) -> TransactionExecutionResult { + fn new_execution_result(status: Result<()>) -> TransactionExecutionResult { TransactionExecutionResult::Executed { details: TransactionExecutionDetails { status, log_messages: None, inner_instructions: None, fee_details: FeeDetails::default(), - is_nonce: nonce.is_some(), return_data: None, executed_units: 0, accounts_data_len_delta: 0, @@ -1569,8 +1565,9 @@ mod tests { let loaded0 = Ok(LoadedTransaction { accounts: transaction_accounts0, program_indices: vec![], - nonce: None, fee_details: FeeDetails::default(), + rollback_accounts: RollbackAccounts::default(), + compute_budget_limits: ComputeBudgetLimits::default(), rent: 0, rent_debits: RentDebits::default(), loaded_accounts_data_size: 0, @@ -1579,8 +1576,9 @@ mod tests { let loaded1 = Ok(LoadedTransaction { accounts: transaction_accounts1, program_indices: vec![], - nonce: None, fee_details: FeeDetails::default(), + rollback_accounts: RollbackAccounts::default(), + compute_budget_limits: ComputeBudgetLimits::default(), rent: 0, rent_debits: RentDebits::default(), loaded_accounts_data_size: 0, @@ -1598,7 +1596,7 @@ mod tests { .insert_new_readonly(&pubkey); } let txs = vec![tx0.clone(), tx1.clone()]; - let execution_results = vec![new_execution_result(Ok(()), None); 2]; + let execution_results = vec![new_execution_result(Ok(())); 2]; let (collected_accounts, transactions) = accounts.collect_accounts_to_store( &txs, &execution_results, @@ -1655,34 +1653,26 @@ mod tests { accounts.accounts_db.clean_accounts_for_tests(); } - fn create_accounts_post_process_failed_nonce() -> ( + fn create_accounts_post_process_failed_tx() -> ( Pubkey, AccountSharedData, AccountSharedData, DurableNonce, u64, - Option, ) { - let data = NonceVersions::new(NonceState::Initialized(nonce::state::Data::default())); + let data = NonceVersions::new(NonceState::Initialized(NonceData::default())); let account = AccountSharedData::new_data(42, &data, &system_program::id()).unwrap(); let mut pre_account = account.clone(); pre_account.set_lamports(43); let durable_nonce = DurableNonce::from_blockhash(&Hash::new(&[1u8; 32])); - ( - Pubkey::default(), - pre_account, - account, - durable_nonce, - 1234, - None, - ) + (Pubkey::default(), pre_account, account, durable_nonce, 1234) } - fn run_post_process_failed_nonce_test( + fn run_post_process_failed_tx_test( account: &mut AccountSharedData, is_fee_payer: bool, is_nonce_account: bool, - nonce: &NonceFull, + rollback_accounts: &RollbackAccounts, durable_nonce: &DurableNonce, lamports_per_signature: u64, expect_account: &AccountSharedData, @@ -1690,17 +1680,17 @@ mod tests { // Verify expect_account's relationship if !is_fee_payer { if is_nonce_account { - assert_ne!(expect_account, nonce.account()); + assert_ne!(expect_account, rollback_accounts.nonce().unwrap().account()); } else { assert_eq!(expect_account, account); } } - post_process_failed_nonce( + post_process_failed_tx( account, is_fee_payer, is_nonce_account, - nonce, + rollback_accounts, durable_nonce, lamports_per_signature, ); @@ -1709,33 +1699,25 @@ mod tests { } #[test] - fn test_post_process_failed_nonce_expected() { - let ( - pre_account_address, - pre_account, - mut post_account, - blockhash, - lamports_per_signature, - maybe_fee_payer_account, - ) = create_accounts_post_process_failed_nonce(); - let nonce = NonceFull::new( - pre_account_address, - pre_account.clone(), - maybe_fee_payer_account, - ); + fn test_post_process_failed_tx_expected() { + let (pre_account_address, pre_account, mut post_account, blockhash, lamports_per_signature) = + create_accounts_post_process_failed_tx(); + let rollback_accounts = RollbackAccounts::SameNonceAndFeePayer { + nonce: NoncePartial::new(pre_account_address, pre_account.clone()), + }; let mut expect_account = pre_account; expect_account .set_state(&NonceVersions::new(NonceState::Initialized( - nonce::state::Data::new(Pubkey::default(), blockhash, lamports_per_signature), + NonceData::new(Pubkey::default(), blockhash, lamports_per_signature), ))) .unwrap(); - assert!(run_post_process_failed_nonce_test( + assert!(run_post_process_failed_tx_test( &mut post_account, false, // is_fee_payer true, // is_nonce_account - &nonce, + &rollback_accounts, &blockhash, lamports_per_signature, &expect_account, @@ -1743,24 +1725,20 @@ mod tests { } #[test] - fn test_post_process_failed_nonce_not_nonce_address() { - let ( - pre_account_address, - pre_account, - mut post_account, - blockhash, - lamports_per_signature, - maybe_fee_payer_account, - ) = create_accounts_post_process_failed_nonce(); + fn test_post_process_failed_tx_not_nonce_address() { + let (pre_account_address, pre_account, mut post_account, blockhash, lamports_per_signature) = + create_accounts_post_process_failed_tx(); - let nonce = NonceFull::new(pre_account_address, pre_account, maybe_fee_payer_account); + let rollback_accounts = RollbackAccounts::SameNonceAndFeePayer { + nonce: NoncePartial::new(pre_account_address, pre_account.clone()), + }; let expect_account = post_account.clone(); - assert!(run_post_process_failed_nonce_test( + assert!(run_post_process_failed_tx_test( &mut post_account, false, // is_fee_payer false, // is_nonce_account - &nonce, + &rollback_accounts, &blockhash, lamports_per_signature, &expect_account, @@ -1774,27 +1752,26 @@ mod tests { AccountSharedData::new_data(42, &(), &system_program::id()).unwrap(); let post_fee_payer_account = AccountSharedData::new_data(84, &[1, 2, 3, 4], &system_program::id()).unwrap(); - let nonce = NonceFull::new( - Pubkey::new_unique(), - nonce_account, - Some(pre_fee_payer_account.clone()), - ); + let rollback_accounts = RollbackAccounts::SeparateNonceAndFeePayer { + nonce: NoncePartial::new(Pubkey::new_unique(), nonce_account), + fee_payer_account: pre_fee_payer_account.clone(), + }; - assert!(run_post_process_failed_nonce_test( + assert!(run_post_process_failed_tx_test( &mut post_fee_payer_account.clone(), false, // is_fee_payer false, // is_nonce_account - &nonce, + &rollback_accounts, &DurableNonce::default(), 1, &post_fee_payer_account, )); - assert!(run_post_process_failed_nonce_test( + assert!(run_post_process_failed_tx_test( &mut post_fee_payer_account.clone(), true, // is_fee_payer false, // is_nonce_account - &nonce, + &rollback_accounts, &DurableNonce::default(), 1, &pre_fee_payer_account, @@ -1809,7 +1786,7 @@ mod tests { let from_address = from.pubkey(); let to_address = Pubkey::new_unique(); let durable_nonce = DurableNonce::from_blockhash(&Hash::new_unique()); - let nonce_state = NonceVersions::new(NonceState::Initialized(nonce::state::Data::new( + let nonce_state = NonceVersions::new(NonceState::Initialized(NonceData::new( nonce_authority.pubkey(), durable_nonce, 0, @@ -1837,7 +1814,7 @@ mod tests { let tx = new_sanitized_tx(&[&nonce_authority, &from], message, blockhash); let durable_nonce = DurableNonce::from_blockhash(&Hash::new_unique()); - let nonce_state = NonceVersions::new(NonceState::Initialized(nonce::state::Data::new( + let nonce_state = NonceVersions::new(NonceState::Initialized(NonceData::new( nonce_authority.pubkey(), durable_nonce, 0, @@ -1846,17 +1823,16 @@ mod tests { AccountSharedData::new_data(42, &nonce_state, &system_program::id()).unwrap(); let from_account_pre = AccountSharedData::new(4242, 0, &Pubkey::default()); - let nonce = Some(NonceFull::new( - nonce_address, - nonce_account_pre.clone(), - Some(from_account_pre.clone()), - )); - + let nonce = NoncePartial::new(nonce_address, nonce_account_pre.clone()); let loaded = Ok(LoadedTransaction { accounts: transaction_accounts, program_indices: vec![], - nonce: nonce.clone(), fee_details: FeeDetails::default(), + rollback_accounts: RollbackAccounts::SeparateNonceAndFeePayer { + nonce: nonce.clone(), + fee_payer_account: from_account_pre.clone(), + }, + compute_budget_limits: ComputeBudgetLimits::default(), rent: 0, rent_debits: RentDebits::default(), loaded_accounts_data_size: 0, @@ -1868,13 +1844,9 @@ mod tests { let accounts_db = AccountsDb::new_single_for_tests(); let accounts = Accounts::new(Arc::new(accounts_db)); let txs = vec![tx]; - let execution_results = vec![new_execution_result( - Err(TransactionError::InstructionError( - 1, - InstructionError::InvalidArgument, - )), - nonce.as_ref(), - )]; + let execution_results = vec![new_execution_result(Err( + TransactionError::InstructionError(1, InstructionError::InvalidArgument), + ))]; let (collected_accounts, _) = accounts.collect_accounts_to_store( &txs, &execution_results, @@ -1916,7 +1888,7 @@ mod tests { let from_address = from.pubkey(); let to_address = Pubkey::new_unique(); let durable_nonce = DurableNonce::from_blockhash(&Hash::new_unique()); - let nonce_state = NonceVersions::new(NonceState::Initialized(nonce::state::Data::new( + let nonce_state = NonceVersions::new(NonceState::Initialized(NonceData::new( nonce_authority.pubkey(), durable_nonce, 0, @@ -1944,7 +1916,7 @@ mod tests { let tx = new_sanitized_tx(&[&nonce_authority, &from], message, blockhash); let durable_nonce = DurableNonce::from_blockhash(&Hash::new_unique()); - let nonce_state = NonceVersions::new(NonceState::Initialized(nonce::state::Data::new( + let nonce_state = NonceVersions::new(NonceState::Initialized(NonceData::new( nonce_authority.pubkey(), durable_nonce, 0, @@ -1952,17 +1924,15 @@ mod tests { let nonce_account_pre = AccountSharedData::new_data(42, &nonce_state, &system_program::id()).unwrap(); - let nonce = Some(NonceFull::new( - nonce_address, - nonce_account_pre.clone(), - None, - )); - + let nonce = NoncePartial::new(nonce_address, nonce_account_pre.clone()); let loaded = Ok(LoadedTransaction { accounts: transaction_accounts, program_indices: vec![], - nonce: nonce.clone(), fee_details: FeeDetails::default(), + rollback_accounts: RollbackAccounts::SameNonceAndFeePayer { + nonce: nonce.clone(), + }, + compute_budget_limits: ComputeBudgetLimits::default(), rent: 0, rent_debits: RentDebits::default(), loaded_accounts_data_size: 0, @@ -1974,13 +1944,9 @@ mod tests { let accounts_db = AccountsDb::new_single_for_tests(); let accounts = Accounts::new(Arc::new(accounts_db)); let txs = vec![tx]; - let execution_results = vec![new_execution_result( - Err(TransactionError::InstructionError( - 1, - InstructionError::InvalidArgument, - )), - nonce.as_ref(), - )]; + let execution_results = vec![new_execution_result(Err( + TransactionError::InstructionError(1, InstructionError::InvalidArgument), + ))]; let (collected_accounts, _) = accounts.collect_accounts_to_store( &txs, &execution_results, @@ -2046,7 +2012,8 @@ mod tests { bank_id, 0, &HashSet::new(), - AccountAddressFilter::Exclude + AccountAddressFilter::Exclude, + false ) .unwrap(), vec![] @@ -2058,7 +2025,8 @@ mod tests { bank_id, 0, &all_pubkeys, - AccountAddressFilter::Include + AccountAddressFilter::Include, + false ) .unwrap(), vec![] @@ -2073,7 +2041,8 @@ mod tests { bank_id, 1, &HashSet::new(), - AccountAddressFilter::Exclude + AccountAddressFilter::Exclude, + false ) .unwrap(), vec![(pubkey1, 42)] @@ -2085,7 +2054,8 @@ mod tests { bank_id, 2, &HashSet::new(), - AccountAddressFilter::Exclude + AccountAddressFilter::Exclude, + false ) .unwrap(), vec![(pubkey1, 42), (pubkey0, 42)] @@ -2097,7 +2067,8 @@ mod tests { bank_id, 3, &HashSet::new(), - AccountAddressFilter::Exclude + AccountAddressFilter::Exclude, + false ) .unwrap(), vec![(pubkey1, 42), (pubkey0, 42), (pubkey2, 41)] @@ -2111,7 +2082,8 @@ mod tests { bank_id, 6, &HashSet::new(), - AccountAddressFilter::Exclude + AccountAddressFilter::Exclude, + false ) .unwrap(), vec![(pubkey1, 42), (pubkey0, 42), (pubkey2, 41)] @@ -2126,7 +2098,8 @@ mod tests { bank_id, 1, &exclude1, - AccountAddressFilter::Exclude + AccountAddressFilter::Exclude, + false ) .unwrap(), vec![(pubkey0, 42)] @@ -2138,7 +2111,8 @@ mod tests { bank_id, 2, &exclude1, - AccountAddressFilter::Exclude + AccountAddressFilter::Exclude, + false ) .unwrap(), vec![(pubkey0, 42), (pubkey2, 41)] @@ -2150,7 +2124,8 @@ mod tests { bank_id, 3, &exclude1, - AccountAddressFilter::Exclude + AccountAddressFilter::Exclude, + false ) .unwrap(), vec![(pubkey0, 42), (pubkey2, 41)] @@ -2165,7 +2140,8 @@ mod tests { bank_id, 1, &include1_2, - AccountAddressFilter::Include + AccountAddressFilter::Include, + false ) .unwrap(), vec![(pubkey1, 42)] @@ -2177,7 +2153,8 @@ mod tests { bank_id, 2, &include1_2, - AccountAddressFilter::Include + AccountAddressFilter::Include, + false ) .unwrap(), vec![(pubkey1, 42), (pubkey2, 41)] @@ -2189,7 +2166,8 @@ mod tests { bank_id, 3, &include1_2, - AccountAddressFilter::Include + AccountAddressFilter::Include, + false ) .unwrap(), vec![(pubkey1, 42), (pubkey2, 41)] @@ -2217,7 +2195,10 @@ mod tests { #[test] fn test_maybe_abort_scan() { assert!(Accounts::maybe_abort_scan(ScanResult::Ok(vec![]), &ScanConfig::default()).is_ok()); - let config = ScanConfig::default().recreate_with_abort(); + assert!( + Accounts::maybe_abort_scan(ScanResult::Ok(vec![]), &ScanConfig::new(false)).is_ok() + ); + let config = ScanConfig::new(false).recreate_with_abort(); assert!(Accounts::maybe_abort_scan(ScanResult::Ok(vec![]), &config).is_ok()); config.abort(); assert!(Accounts::maybe_abort_scan(ScanResult::Ok(vec![]), &config).is_err()); diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index c21f000975b235..5f312704f540b5 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -1701,7 +1701,7 @@ impl SplitAncientStorages { i += 1; if treat_as_ancient(storage) { // even though the slot is in range of being an ancient append vec, if it isn't actually a large append vec, - // then we are better off treating all these slots as normally cachable to reduce work in dedup. + // then we are better off treating all these slots as normally cacheable to reduce work in dedup. // Since this one is large, for the moment, this one becomes the highest slot where we want to individually cache files. len_truncate = i; } @@ -1957,6 +1957,9 @@ pub(crate) struct ShrinkAncientStats { pub(crate) slots_considered: AtomicU64, pub(crate) ancient_scanned: AtomicU64, pub(crate) bytes_ancient_created: AtomicU64, + pub(crate) bytes_from_must_shrink: AtomicU64, + pub(crate) bytes_from_smallest_storages: AtomicU64, + pub(crate) bytes_from_newest_storages: AtomicU64, pub(crate) many_ref_slots_skipped: AtomicU64, pub(crate) slots_cannot_move_count: AtomicU64, pub(crate) many_refs_old_alive: AtomicU64, @@ -2250,6 +2253,21 @@ impl ShrinkAncientStats { // i64 // ), // ( + // "bytes_from_must_shrink", + // self.bytes_from_must_shrink.swap(0, Ordering::Relaxed) as i64, + // i64 + // ), + // ( + // "bytes_from_smallest_storages", + // self.bytes_from_smallest_storages.swap(0, Ordering::Relaxed) as i64, + // i64 + // ), + // ( + // "bytes_from_newest_storages", + // self.bytes_from_newest_storages.swap(0, Ordering::Relaxed) as i64, + // i64 + // ), + // ( // "many_ref_slots_skipped", // self.many_ref_slots_skipped.swap(0, Ordering::Relaxed), // i64 @@ -5790,7 +5808,7 @@ impl AccountsDb { /// This should only be called after the `Bank::drop()` runs in bank.rs, See BANK_DROP_SAFETY /// comment below for more explanation. - /// * `is_serialized_with_abs` - indicates whehter this call runs sequentially with all other + /// * `is_serialized_with_abs` - indicates whether this call runs sequentially with all other /// accounts_db relevant calls, such as shrinking, purging etc., in account background /// service. pub fn purge_slot(&self, slot: Slot, bank_id: BankId, is_serialized_with_abs: bool) { @@ -6174,7 +6192,7 @@ impl AccountsDb { // allocate a buffer on the stack that's big enough // to hold a token account or a stake account const META_SIZE: usize = 8 /* lamports */ + 8 /* rent_epoch */ + 1 /* executable */ + 32 /* owner */ + 32 /* pubkey */; - const DATA_SIZE: usize = 200; // stake acounts are 200 B and token accounts are 165-182ish B + const DATA_SIZE: usize = 200; // stake accounts are 200 B and token accounts are 165-182ish B const BUFFER_SIZE: usize = META_SIZE + DATA_SIZE; let mut buffer = SmallVec::<[u8; BUFFER_SIZE]>::new(); @@ -8942,7 +8960,7 @@ impl AccountsDb { // these write directly to disk, so the more threads, the better num_cpus::get() } else { - // seems to be a good hueristic given varying # cpus for in-mem disk index + // seems to be a good heuristic given varying # cpus for in-mem disk index 8 }; let chunk_size = (outer_slots_len / (std::cmp::max(1, threads.saturating_sub(1)))) + 1; // approximately 400k slots in a snapshot diff --git a/accounts-db/src/accounts_db/geyser_plugin_utils.rs b/accounts-db/src/accounts_db/geyser_plugin_utils.rs index efd765e0c722b7..b4f11664ab7955 100644 --- a/accounts-db/src/accounts_db/geyser_plugin_utils.rs +++ b/accounts-db/src/accounts_db/geyser_plugin_utils.rs @@ -121,7 +121,7 @@ impl AccountsDb { // later entries in the same slot are more recent and override earlier accounts for the same pubkey // We can pass an incrementing number here for write_version in the future, if the storage does not have a write_version. - // As long as all accounts for this slot are in 1 append vec that can be itereated olest to newest. + // As long as all accounts for this slot are in 1 append vec that can be iterated oldest to newest. self.notify_filtered_accounts( slot, notified_accounts, diff --git a/accounts-db/src/accounts_hash.rs b/accounts-db/src/accounts_hash.rs index 2d673b13d2d08d..b2ffff49eea56b 100644 --- a/accounts-db/src/accounts_hash.rs +++ b/accounts-db/src/accounts_hash.rs @@ -5,7 +5,7 @@ use { ancestors::Ancestors, pubkey_bins::PubkeyBinCalculator24, }, - bytemuck::{Pod, Zeroable}, + bytemuck_derive::{Pod, Zeroable}, log::*, memmap2::MmapMut, rayon::prelude::*, diff --git a/accounts-db/src/accounts_index.rs b/accounts-db/src/accounts_index.rs index db7ddf6ea2e44e..6df2051556c809 100644 --- a/accounts-db/src/accounts_index.rs +++ b/accounts-db/src/accounts_index.rs @@ -90,7 +90,7 @@ pub enum UpsertReclaim { IgnoreReclaims, } -#[derive(Debug, Default)] +#[derive(Debug)] pub struct ScanConfig { /// checked by the scan. When true, abort scan. pub abort: Option>, @@ -100,11 +100,20 @@ pub struct ScanConfig { pub collect_all_unsorted: bool, } +impl Default for ScanConfig { + fn default() -> Self { + Self { + abort: None, + collect_all_unsorted: true, + } + } +} + impl ScanConfig { pub fn new(collect_all_unsorted: bool) -> Self { Self { collect_all_unsorted, - ..ScanConfig::default() + ..Default::default() } } @@ -4210,10 +4219,14 @@ pub mod tests { assert!(!config.is_aborted()); } - let config = ScanConfig::default(); + let config = ScanConfig::new(false); assert!(!config.collect_all_unsorted); assert!(config.abort.is_none()); + let config = ScanConfig::default(); + assert!(config.collect_all_unsorted); + assert!(config.abort.is_none()); + let config = config.recreate_with_abort(); assert!(config.abort.is_some()); assert!(!config.is_aborted()); diff --git a/accounts-db/src/ancient_append_vecs.rs b/accounts-db/src/ancient_append_vecs.rs index 5b73aab489d6ac..07660e33efaf31 100644 --- a/accounts-db/src/ancient_append_vecs.rs +++ b/accounts-db/src/ancient_append_vecs.rs @@ -8,8 +8,8 @@ use { account_storage::ShrinkInProgress, accounts_db::{ AccountFromStorage, AccountStorageEntry, AccountsDb, AliveAccounts, - GetUniqueAccountsResult, ShrinkCollect, ShrinkCollectAliveSeparatedByRefs, - ShrinkStatsSub, + GetUniqueAccountsResult, ShrinkAncientStats, ShrinkCollect, + ShrinkCollectAliveSeparatedByRefs, ShrinkStatsSub, }, accounts_file::AccountsFile, accounts_index::AccountsIndexScanResult, @@ -27,6 +27,10 @@ use { }, }; +/// this many # of highest slot values should be treated as desirable to pack. +/// This gives us high slots to move packed accounts into. +const HIGH_SLOT_OFFSET: u64 = 100; + /// ancient packing algorithm tuning per pass #[derive(Debug)] struct PackedAncientStorageTuning { @@ -57,6 +61,9 @@ struct SlotInfo { alive_bytes: u64, /// true if this should be shrunk due to ratio should_shrink: bool, + /// this slot is a high slot # + /// It is important to include some high slot #s so that we have new slots to try each time pack runs. + is_high_slot: bool, } /// info for all storages in ancient slots @@ -83,6 +90,7 @@ impl AncientSlotInfos { storage: Arc, can_randomly_shrink: bool, ideal_size: NonZeroU64, + is_high_slot: bool, ) -> bool { let mut was_randomly_shrunk = false; let alive_bytes = storage.alive_bytes() as u64; @@ -122,6 +130,7 @@ impl AncientSlotInfos { storage, alive_bytes, should_shrink, + is_high_slot, }); self.total_alive_bytes += alive_bytes; } @@ -130,12 +139,16 @@ impl AncientSlotInfos { /// modify 'self' to contain only the slot infos for the slots that should be combined /// (and in this process effectively shrunk) - fn filter_ancient_slots(&mut self, tuning: &PackedAncientStorageTuning) { + fn filter_ancient_slots( + &mut self, + tuning: &PackedAncientStorageTuning, + stats: &ShrinkAncientStats, + ) { // figure out which slots to combine // 1. should_shrink: largest bytes saved above some cutoff of ratio self.choose_storages_to_shrink(tuning); // 2. smallest files so we get the largest number of files to remove - self.filter_by_smallest_capacity(tuning); + self.filter_by_smallest_capacity(tuning, stats); } // sort 'shrink_indexes' by most bytes saved, highest to lowest @@ -185,42 +198,73 @@ impl AncientSlotInfos { /// 'all_infos' are combined, the total number of storages <= 'max_storages' /// The idea is that 'all_infos' is sorted from smallest capacity to largest, /// but that isn't required for this function to be 'correct'. - fn truncate_to_max_storages(&mut self, tuning: &PackedAncientStorageTuning) { + fn truncate_to_max_storages( + &mut self, + tuning: &PackedAncientStorageTuning, + stats: &ShrinkAncientStats, + ) { // these indexes into 'all_infos' are useless once we truncate 'all_infos', so make sure they're cleared out to avoid any issues self.shrink_indexes.clear(); let total_storages = self.all_infos.len(); let mut cumulative_bytes = Saturating(0u64); let low_threshold = tuning.max_ancient_slots * 50 / 100; + let mut bytes_from_must_shrink = 0; + let mut bytes_from_smallest_storages = 0; + let mut bytes_from_newest_storages = 0; for (i, info) in self.all_infos.iter().enumerate() { cumulative_bytes += info.alive_bytes; let ancient_storages_required = - (cumulative_bytes.0 / tuning.ideal_storage_size + 1) as usize; + div_ceil(cumulative_bytes.0, tuning.ideal_storage_size) as usize; let storages_remaining = total_storages - i - 1; // if the remaining uncombined storages and the # of resulting - // combined ancient storages is less than the threshold, then + // combined ancient storages are less than the threshold, then // we've gone too far, so get rid of this entry and all after it. - // Every storage after this one is larger. + // Every storage after this one is larger than the ones we've chosen. // if we ever get to more than `max_resulting_storages` required ancient storages, that is enough to stop for now. - // It will take a while to create that many. This should be a limit that only affects - // extreme testing environments. - if storages_remaining + ancient_storages_required < low_threshold - || ancient_storages_required as u64 > u64::from(tuning.max_resulting_storages) + // It will take a lot of time for the pack algorithm to create that many, and that is bad for system performance. + // This should be a limit that only affects extreme testing environments. + // We do not stop including entries until we have dealt with all the high slot #s. This allows the algorithm to continue + // to make progress each time it is called. There are exceptions that can cause the pack to fail, such as accounts with multiple + // refs. + if !info.is_high_slot + && (storages_remaining + ancient_storages_required < low_threshold + || ancient_storages_required as u64 > u64::from(tuning.max_resulting_storages)) { self.all_infos.truncate(i); break; } + if info.should_shrink { + bytes_from_must_shrink += info.alive_bytes; + } else if info.is_high_slot { + bytes_from_newest_storages += info.alive_bytes; + } else { + bytes_from_smallest_storages += info.alive_bytes; + } } + stats + .bytes_from_must_shrink + .fetch_add(bytes_from_must_shrink, Ordering::Relaxed); + stats + .bytes_from_smallest_storages + .fetch_add(bytes_from_smallest_storages, Ordering::Relaxed); + stats + .bytes_from_newest_storages + .fetch_add(bytes_from_newest_storages, Ordering::Relaxed); } /// remove entries from 'all_infos' such that combining /// the remaining entries into storages of 'ideal_storage_size' /// will get us below 'max_storages' - /// The entires that are removed will be reconsidered the next time around. + /// The entries that are removed will be reconsidered the next time around. /// Combining too many storages costs i/o and cpu so the goal is to find the sweet spot so /// that we make progress in cleaning/shrinking/combining but that we don't cause unnecessary /// churn. - fn filter_by_smallest_capacity(&mut self, tuning: &PackedAncientStorageTuning) { + fn filter_by_smallest_capacity( + &mut self, + tuning: &PackedAncientStorageTuning, + stats: &ShrinkAncientStats, + ) { let total_storages = self.all_infos.len(); if total_storages <= tuning.max_ancient_slots { // currently fewer storages than max, so nothing to shrink @@ -229,16 +273,21 @@ impl AncientSlotInfos { return; } - // sort by 'should_shrink' then smallest capacity to largest + // sort by: + // 1. `high_slot`: we want to include new, high slots each time so that we try new slots + // each time alg runs and have several high target slots for packed storages. + // 2. 'should_shrink' so we make progress on shrinking ancient storages + // 3. smallest capacity to largest so that we remove the most slots possible self.all_infos.sort_unstable_by(|l, r| { - r.should_shrink - .cmp(&l.should_shrink) + r.is_high_slot + .cmp(&l.is_high_slot) + .then_with(|| r.should_shrink.cmp(&l.should_shrink)) .then_with(|| l.capacity.cmp(&r.capacity)) }); // remove any storages we don't need to combine this pass to achieve // # resulting storages <= 'max_storages' - self.truncate_to_max_storages(tuning); + self.truncate_to_max_storages(tuning, stats); } } @@ -452,7 +501,7 @@ impl AccountsDb { tuning.ideal_storage_size, ); - ancient_slot_infos.filter_ancient_slots(tuning); + ancient_slot_infos.filter_ancient_slots(tuning, &self.shrink_ancient_stats); ancient_slot_infos } @@ -498,10 +547,20 @@ impl AccountsDb { ..AncientSlotInfos::default() }; let mut randoms = 0; + let max_slot = slots.iter().max().cloned().unwrap_or_default(); + // heuristic to include some # of newly eligible ancient slots so that the pack algorithm always makes progress + let high_slot_boundary = max_slot.saturating_sub(HIGH_SLOT_OFFSET); + let is_high_slot = |slot| slot >= high_slot_boundary; for slot in &slots { if let Some(storage) = self.storage.get_slot_storage_entry(*slot) { - if infos.add(*slot, storage, can_randomly_shrink, ideal_size) { + if infos.add( + *slot, + storage, + can_randomly_shrink, + ideal_size, + is_high_slot(*slot), + ) { randoms += 1; } } @@ -548,9 +607,6 @@ impl AccountsDb { self.thread_pool_clean.install(|| { packer.par_iter().for_each(|(target_slot, pack)| { let mut write_ancient_accounts_local = WriteAncientAccounts::default(); - self.shrink_ancient_stats - .bytes_ancient_created - .fetch_add(pack.bytes, Ordering::Relaxed); self.write_one_packed_storage( pack, **target_slot, @@ -1059,6 +1115,25 @@ pub fn is_ancient(storage: &AccountsFile) -> bool { storage.capacity() >= get_ancient_append_vec_capacity() } +/// Divides `x` by `y` and rounds up +/// +/// # Notes +/// +/// It is undefined behavior if `x + y` overflows a u64. +/// Debug builds check this invariant, and will panic if broken. +fn div_ceil(x: u64, y: NonZeroU64) -> u64 { + let y = y.get(); + debug_assert!( + x.checked_add(y).is_some(), + "x + y must not overflow! x: {x}, y: {y}", + ); + // SAFETY: The caller guaranteed `x + y` does not overflow + // SAFETY: Since `y` is NonZero: + // - we know the denominator is > 0, and thus safe (cannot have divide-by-zero) + // - we know `x + y` is non-zero, and thus the numerator is safe (cannot underflow) + (x + y - 1) / y +} + #[cfg(test)] pub mod tests { use { @@ -1078,17 +1153,22 @@ pub mod tests { }, accounts_hash::AccountHash, accounts_index::UpsertReclaim, - append_vec::{aligned_stored_size, AppendVec, AppendVecStoredAccountMeta}, + append_vec::{ + aligned_stored_size, AppendVec, AppendVecStoredAccountMeta, + MAXIMUM_APPEND_VEC_FILE_SIZE, + }, storable_accounts::{tests::build_accounts_from_storage, StorableAccountsBySlot}, }, + rand::seq::SliceRandom as _, solana_sdk::{ account::{AccountSharedData, ReadableAccount, WritableAccount}, hash::Hash, pubkey::Pubkey, }, - std::ops::Range, + std::{collections::HashSet, ops::Range}, strum::IntoEnumIterator, strum_macros::EnumIter, + test_case::test_case, }; fn get_sample_storages( @@ -1105,6 +1185,7 @@ pub mod tests { let original_stores = (0..slots) .filter_map(|slot| db.storage.get_slot_storage_entry((slot as Slot) + slot1)) .collect::>(); + let is_high_slot = false; let slot_infos = original_stores .iter() .map(|storage| SlotInfo { @@ -1113,6 +1194,7 @@ pub mod tests { capacity: 0, alive_bytes: 0, should_shrink: false, + is_high_slot, }) .collect(); ( @@ -2379,6 +2461,7 @@ pub mod tests { let mut infos = AncientSlotInfos::default(); let storage = db.storage.get_slot_storage_entry(slot1).unwrap(); let alive_bytes_expected = storage.alive_bytes(); + let high_slot = false; match method { TestCollectInfo::Add => { // test lower level 'add' @@ -2387,6 +2470,7 @@ pub mod tests { Arc::clone(&storage), can_randomly_shrink, NonZeroU64::new(get_ancient_append_vec_capacity()).unwrap(), + high_slot, ); } TestCollectInfo::CalcAncientSlotInfo => { @@ -2445,12 +2529,14 @@ pub mod tests { let (db, slot1) = create_db_with_storages_and_index(alive, slots, None); let mut infos = AncientSlotInfos::default(); let storage = db.storage.get_slot_storage_entry(slot1).unwrap(); + let high_slot = false; if call_add { infos.add( slot1, Arc::clone(&storage), can_randomly_shrink, NonZeroU64::new(get_ancient_append_vec_capacity()).unwrap(), + high_slot, ); } else { infos = db.calc_ancient_slot_info( @@ -2628,6 +2714,7 @@ pub mod tests { capacity: 1, alive_bytes: 1, should_shrink: false, + is_high_slot: false, }) .collect(), shrink_indexes: (0..count).collect(), @@ -2659,10 +2746,10 @@ pub mod tests { match method { TestSmallestCapacity::FilterAncientSlots => { infos.shrink_indexes.clear(); - infos.filter_ancient_slots(&tuning); + infos.filter_ancient_slots(&tuning, &ShrinkAncientStats::default()); } TestSmallestCapacity::FilterBySmallestCapacity => { - infos.filter_by_smallest_capacity(&tuning); + infos.filter_by_smallest_capacity(&tuning, &ShrinkAncientStats::default()); } } assert!(infos.all_infos.is_empty()); @@ -2671,7 +2758,7 @@ pub mod tests { } #[test] - fn test_filter_by_smaller_capacity_sort() { + fn test_filter_by_smallest_capacity_sort() { // max is 6 // 7 storages // storage[last] is big enough to cause us to need another storage @@ -2705,11 +2792,11 @@ pub mod tests { }; match method { TestSmallestCapacity::FilterBySmallestCapacity => { - infos.filter_by_smallest_capacity(&tuning); + infos.filter_by_smallest_capacity(&tuning, &ShrinkAncientStats::default()); } TestSmallestCapacity::FilterAncientSlots => { infos.shrink_indexes.clear(); - infos.filter_ancient_slots(&tuning); + infos.filter_ancient_slots(&tuning, &ShrinkAncientStats::default()); } } assert_eq!( @@ -2729,11 +2816,106 @@ pub mod tests { } } + /// Test that we always include the high slots when filtering which ancient infos to pack + /// + /// If we have *more* high slots than max resulting storages set in the tuning parameters, + /// we should still have all the high slots after calling `filter_by_smallest_capacity(). + #[test] + fn test_filter_by_smallest_capacity_high_slot_more() { + let tuning = default_tuning(); + + // Ensure we have more storages with high slots than the 'max resulting storages'. + let num_high_slots = tuning.max_resulting_storages.get() * 2; + let num_ancient_storages = num_high_slots * 3; + let mut infos = create_test_infos(num_ancient_storages as usize); + infos + .all_infos + .sort_unstable_by_key(|slot_info| slot_info.slot); + infos + .all_infos + .iter_mut() + .rev() + .take(num_high_slots as usize) + .for_each(|slot_info| { + slot_info.is_high_slot = true; + }); + let slots_expected: Vec<_> = infos + .all_infos + .iter() + .filter_map(|slot_info| slot_info.is_high_slot.then_some(slot_info.slot)) + .collect(); + + // shuffle the infos so they actually need to be sorted + infos.all_infos.shuffle(&mut thread_rng()); + infos.filter_by_smallest_capacity(&tuning, &ShrinkAncientStats::default()); + + infos + .all_infos + .sort_unstable_by_key(|slot_info| slot_info.slot); + let slots_actual: Vec<_> = infos + .all_infos + .iter() + .map(|slot_info| slot_info.slot) + .collect(); + assert_eq!(infos.all_infos.len() as u64, num_high_slots); + assert_eq!(slots_actual, slots_expected); + } + + /// Test that we always include the high slots when filtering which ancient infos to pack + /// + /// If we have *less* high slots than max resulting storages set in the tuning parameters, + /// we should still have all the high slots after calling `filter_by_smallest_capacity(). + #[test] + fn test_filter_by_smallest_capacity_high_slot_less() { + let tuning = default_tuning(); + + // Ensure we have less storages with high slots than the 'max resulting storages'. + let num_high_slots = tuning.max_resulting_storages.get() / 2; + let num_ancient_storages = num_high_slots * 5; + let mut infos = create_test_infos(num_ancient_storages as usize); + infos + .all_infos + .sort_unstable_by_key(|slot_info| slot_info.slot); + infos + .all_infos + .iter_mut() + .rev() + .take(num_high_slots as usize) + .for_each(|slot_info| { + slot_info.is_high_slot = true; + }); + let high_slots: Vec<_> = infos + .all_infos + .iter() + .filter_map(|slot_info| slot_info.is_high_slot.then_some(slot_info.slot)) + .collect(); + + // shuffle the infos so they actually need to be sorted + infos.all_infos.shuffle(&mut thread_rng()); + infos.filter_by_smallest_capacity(&tuning, &ShrinkAncientStats::default()); + + infos + .all_infos + .sort_unstable_by_key(|slot_info| slot_info.slot); + let slots_actual: HashSet<_> = infos + .all_infos + .iter() + .map(|slot_info| slot_info.slot) + .collect(); + assert_eq!( + infos.all_infos.len() as u64, + tuning.max_resulting_storages.get(), + ); + assert!(high_slots + .iter() + .all(|high_slot| slots_actual.contains(high_slot))); + } + fn test(filter: bool, infos: &mut AncientSlotInfos, tuning: &PackedAncientStorageTuning) { if filter { - infos.filter_by_smallest_capacity(tuning); + infos.filter_by_smallest_capacity(tuning, &ShrinkAncientStats::default()); } else { - infos.truncate_to_max_storages(tuning); + infos.truncate_to_max_storages(tuning, &ShrinkAncientStats::default()); } } @@ -3157,7 +3339,7 @@ pub mod tests { }; match method { TestShouldShrink::FilterAncientSlots => { - infos.filter_ancient_slots(&tuning); + infos.filter_ancient_slots(&tuning, &ShrinkAncientStats::default()); } TestShouldShrink::ClearShouldShrink => { infos.clear_should_shrink_after_cutoff(&tuning); @@ -3212,6 +3394,7 @@ pub mod tests { capacity: info1_capacity, alive_bytes: 0, should_shrink: false, + is_high_slot: false, }; let info2 = SlotInfo { storage: storage.clone(), @@ -3219,6 +3402,7 @@ pub mod tests { capacity: 2, alive_bytes: 1, should_shrink: false, + is_high_slot: false, }; let mut infos = AncientSlotInfos { all_infos: vec![info1, info2], @@ -3653,4 +3837,29 @@ pub mod tests { assert!(expected_ref_counts.is_empty()); } } + + #[test_case(0, 1 => 0)] + #[test_case(1, 1 => 1)] + #[test_case(2, 1 => 2)] + #[test_case(2, 2 => 1)] + #[test_case(2, 3 => 1)] + #[test_case(2, 4 => 1)] + #[test_case(3, 4 => 1)] + #[test_case(4, 4 => 1)] + #[test_case(5, 4 => 2)] + #[test_case(0, u64::MAX => 0)] + #[test_case(MAXIMUM_APPEND_VEC_FILE_SIZE - 1, MAXIMUM_APPEND_VEC_FILE_SIZE => 1)] + #[test_case(MAXIMUM_APPEND_VEC_FILE_SIZE + 1, MAXIMUM_APPEND_VEC_FILE_SIZE => 2)] + fn test_div_ceil(x: u64, y: u64) -> u64 { + div_ceil(x, NonZeroU64::new(y).unwrap()) + } + + #[should_panic(expected = "x + y must not overflow")] + #[test_case(1, u64::MAX)] + #[test_case(u64::MAX, 1)] + #[test_case(u64::MAX/2 + 2, u64::MAX/2)] + #[test_case(u64::MAX/2, u64::MAX/2 + 2)] + fn test_div_ceil_overflow(x: u64, y: u64) { + div_ceil(x, NonZeroU64::new(y).unwrap()); + } } diff --git a/accounts-db/src/bucket_map_holder.rs b/accounts-db/src/bucket_map_holder.rs index bc7e19112e516f..fb90b68d124702 100644 --- a/accounts-db/src/bucket_map_holder.rs +++ b/accounts-db/src/bucket_map_holder.rs @@ -533,7 +533,7 @@ pub mod tests { (0..threads).into_par_iter().for_each(|_| { // This test used to be more strict with time, but in a parallel, multi test environment, // sometimes threads starve and this test intermittently fails. So, give it more time than it should require. - // This may be aggrevated by the strategy of only allowing thread 0 to advance the age. + // This may be aggravated by the strategy of only allowing thread 0 to advance the age. while now.elapsed().as_millis() < (time as u128) * 100 { if test.maybe_advance_age() { test.bucket_flushed_at_current_age(true); diff --git a/accounts-db/src/buffered_reader.rs b/accounts-db/src/buffered_reader.rs index e6efd07f7d138c..5298b386793d2a 100644 --- a/accounts-db/src/buffered_reader.rs +++ b/accounts-db/src/buffered_reader.rs @@ -5,7 +5,7 @@ //! calling read(), advance_offset() and set_required_data_len(account_data_len) once the next account //! data length is known. //! -//! Unlike BufRead/BufReader, this type guarnatees that on the next read() after calling +//! Unlike BufRead/BufReader, this type guarantees that on the next read() after calling //! set_required_data_len(len), the whole account data is buffered _linearly_ in memory and available to //! be returned. use { @@ -119,147 +119,209 @@ mod tests { #[test] fn test_buffered_reader() { // Setup a sample file with 32 bytes of data + let file_size = 32; let mut sample_file = tempfile().unwrap(); - let bytes: Vec = (0..32).collect(); + let bytes: Vec = (0..file_size as u8).collect(); sample_file.write_all(&bytes).unwrap(); // First read 16 bytes to fill buffer - let mut reader = BufferedReader::new(16, 32, &sample_file, 8); + let buffer_size = 16; + let file_len_valid = 32; + let default_min_read = 8; + let mut reader = + BufferedReader::new(buffer_size, file_len_valid, &sample_file, default_min_read); let result = reader.read().unwrap(); assert_eq!(result, BufferedReaderStatus::Success); let (offset, slice) = reader.get_offset_and_data(); - assert_eq!(offset, 0); - assert_eq!(slice.len(), 16); - assert_eq!(slice.slice(), &bytes[0..16]); + let mut expected_offset = 0; + assert_eq!(offset, expected_offset); + assert_eq!(slice.len(), buffer_size); + assert_eq!(slice.slice(), &bytes[0..buffer_size]); // Consume the data and attempt to read next 32 bytes, expect to hit EOF and only read 16 bytes - reader.advance_offset(16); - reader.set_required_data_len(32); + let advance = 16; + let mut required_len = 32; + reader.advance_offset(advance); + reader.set_required_data_len(required_len); let result = reader.read().unwrap(); assert_eq!(result, BufferedReaderStatus::Eof); let (offset, slice) = reader.get_offset_and_data(); - assert_eq!(offset, 16); - assert_eq!(slice.len(), 16); - assert_eq!(slice.slice(), &bytes[16..32]); + expected_offset += advance; + let expected_slice_len = 16; + assert_eq!(offset, expected_offset); + assert_eq!(slice.len(), expected_slice_len); + assert_eq!(slice.slice(), &bytes[offset..file_size]); // Continue reading should yield EOF and empty slice. - reader.advance_offset(16); - reader.set_required_data_len(32); + reader.advance_offset(advance); + reader.set_required_data_len(required_len); let result = reader.read().unwrap(); assert_eq!(result, BufferedReaderStatus::Eof); let (offset, slice) = reader.get_offset_and_data(); - assert_eq!(offset, 32); - assert_eq!(slice.len(), 0); + expected_offset += advance; + assert_eq!(offset, expected_offset); + let expected_slice_len = 0; + assert_eq!(slice.len(), expected_slice_len); // set_required_data to zero and offset should not change, and slice should be empty. - reader.set_required_data_len(0); + required_len = 0; + reader.set_required_data_len(required_len); let result = reader.read().unwrap(); assert_eq!(result, BufferedReaderStatus::Success); let (offset, slice) = reader.get_offset_and_data(); - assert_eq!(offset, 32); - assert_eq!(slice.len(), 0); + let expected_offset = file_len_valid; + assert_eq!(offset, expected_offset); + let expected_slice_len = 0; + assert_eq!(slice.len(), expected_slice_len); } #[test] fn test_buffered_reader_with_extra_data_in_file() { // Setup a sample file with 32 bytes of data let mut sample_file = tempfile().unwrap(); - let bytes: Vec = (0..32).collect(); + let file_size = 32; + let bytes: Vec = (0..file_size as u8).collect(); sample_file.write_all(&bytes).unwrap(); // Set file valid_len to 30 (i.e. 2 garbage bytes at the end of the file) let valid_len = 30; // First read 16 bytes to fill buffer - let mut reader = BufferedReader::new(16, valid_len, &sample_file, 8); + let buffer_size = 16; + let default_min_read_size = 8; + let mut reader = + BufferedReader::new(buffer_size, valid_len, &sample_file, default_min_read_size); let result = reader.read().unwrap(); assert_eq!(result, BufferedReaderStatus::Success); let (offset, slice) = reader.get_offset_and_data(); - assert_eq!(offset, 0); - assert_eq!(slice.len(), 16); - assert_eq!(slice.slice(), &bytes[0..16]); + let mut expected_offset = 0; + assert_eq!(offset, expected_offset); + assert_eq!(slice.len(), buffer_size); + assert_eq!(slice.slice(), &bytes[0..buffer_size]); // Consume the data and attempt read next 32 bytes, expect to hit `valid_len`, and only read 14 bytes - reader.advance_offset(16); - reader.set_required_data_len(32); + let mut advance = 16; + let mut required_data_len = 32; + reader.advance_offset(advance); + reader.set_required_data_len(required_data_len); let result = reader.read().unwrap(); assert_eq!(result, BufferedReaderStatus::Eof); let (offset, slice) = reader.get_offset_and_data(); - assert_eq!(offset, 16); - assert_eq!(slice.len(), 14); - assert_eq!(slice.slice(), &bytes[16..30]); + expected_offset += advance; + assert_eq!(offset, expected_offset); + let expected_slice_len = valid_len - offset; + assert_eq!(slice.len(), expected_slice_len); + let expected_slice_range = 16..30; + assert_eq!(slice.slice(), &bytes[expected_slice_range]); // Continue reading should yield EOF and empty slice. - reader.advance_offset(14); - reader.set_required_data_len(32); + advance = 14; + required_data_len = 32; + reader.advance_offset(advance); + reader.set_required_data_len(required_data_len); let result = reader.read().unwrap(); assert_eq!(result, BufferedReaderStatus::Eof); let (offset, slice) = reader.get_offset_and_data(); - assert_eq!(offset, 30); - assert_eq!(slice.len(), 0); + expected_offset += advance; + assert_eq!(offset, expected_offset); + let expected_slice_len = 0; + assert_eq!(slice.len(), expected_slice_len); // Move the offset passed `valid_len`, expect to hit EOF and return empty slice. - reader.advance_offset(1); - reader.set_required_data_len(8); + advance = 1; + required_data_len = 8; + reader.advance_offset(advance); + reader.set_required_data_len(required_data_len); let result = reader.read().unwrap(); assert_eq!(result, BufferedReaderStatus::Eof); let (offset, slice) = reader.get_offset_and_data(); - assert_eq!(offset, 31); - assert_eq!(slice.len(), 0); + expected_offset += advance; + assert_eq!(offset, expected_offset); + let expected_slice_len = 0; + assert_eq!(slice.len(), expected_slice_len); // Move the offset passed file_len, expect to hit EOF and return empty slice. - reader.advance_offset(3); - reader.set_required_data_len(8); + advance = 3; + required_data_len = 8; + reader.advance_offset(advance); + reader.set_required_data_len(required_data_len); let result = reader.read().unwrap(); assert_eq!(result, BufferedReaderStatus::Eof); let (offset, slice) = reader.get_offset_and_data(); - assert_eq!(offset, 34); - assert_eq!(slice.len(), 0); + expected_offset += advance; + assert_eq!(offset, expected_offset); + let expected_slice_len = 0; + assert_eq!(slice.len(), expected_slice_len); } #[test] fn test_buffered_reader_partial_consume() { // Setup a sample file with 32 bytes of data let mut sample_file = tempfile().unwrap(); - let bytes: Vec = (0..32).collect(); + let file_size = 32; + let bytes: Vec = (0..file_size as u8).collect(); sample_file.write_all(&bytes).unwrap(); // First read 16 bytes to fill buffer - let mut reader = BufferedReader::new(16, 32, &sample_file, 8); + let buffer_size = 16; + let file_len_valid = 32; + let default_min_read_size = 8; + let mut reader = BufferedReader::new( + buffer_size, + file_len_valid, + &sample_file, + default_min_read_size, + ); let result = reader.read().unwrap(); assert_eq!(result, BufferedReaderStatus::Success); let (offset, slice) = reader.get_offset_and_data(); - assert_eq!(offset, 0); - assert_eq!(slice.len(), 16); - assert_eq!(slice.slice(), &bytes[0..16]); + let mut expected_offset = 0; + assert_eq!(offset, expected_offset); + assert_eq!(slice.len(), buffer_size); + assert_eq!(slice.slice(), &bytes[0..buffer_size]); // Consume the partial data (8 byte) and attempt to read next 8 bytes - reader.advance_offset(8); - reader.set_required_data_len(8); + let mut advance = 8; + let mut required_len = 8; + reader.advance_offset(advance); + reader.set_required_data_len(required_len); let result = reader.read().unwrap(); assert_eq!(result, BufferedReaderStatus::Success); let (offset, slice) = reader.get_offset_and_data(); - assert_eq!(offset, 8); - assert_eq!(slice.len(), 8); - assert_eq!(slice.slice(), &bytes[8..16]); // no need to read more + expected_offset += advance; + assert_eq!(offset, expected_offset); + assert_eq!(slice.len(), required_len); + assert_eq!( + slice.slice(), + &bytes[expected_offset..expected_offset + required_len] + ); // no need to read more // Continue reading should succeed and read the rest 16 bytes. - reader.advance_offset(8); - reader.set_required_data_len(16); + advance = 8; + required_len = 16; + reader.advance_offset(advance); + reader.set_required_data_len(required_len); let result = reader.read().unwrap(); assert_eq!(result, BufferedReaderStatus::Success); let (offset, slice) = reader.get_offset_and_data(); - assert_eq!(offset, 16); - assert_eq!(slice.len(), 16); - assert_eq!(slice.slice(), &bytes[16..32]); + expected_offset += advance; + assert_eq!(offset, expected_offset); + assert_eq!(slice.len(), required_len); + assert_eq!( + slice.slice(), + &bytes[expected_offset..expected_offset + required_len] + ); // Continue reading should yield EOF and empty slice. - reader.advance_offset(16); - reader.set_required_data_len(32); + advance = 16; + required_len = 32; + reader.advance_offset(advance); + reader.set_required_data_len(required_len); let result = reader.read().unwrap(); assert_eq!(result, BufferedReaderStatus::Eof); let (offset, slice) = reader.get_offset_and_data(); - assert_eq!(offset, 32); + expected_offset += advance; + assert_eq!(offset, expected_offset); assert_eq!(slice.len(), 0); } @@ -267,37 +329,55 @@ mod tests { fn test_buffered_reader_partial_consume_with_move() { // Setup a sample file with 32 bytes of data let mut sample_file = tempfile().unwrap(); - let bytes: Vec = (0..32).collect(); + let file_size = 32; + let bytes: Vec = (0..file_size as u8).collect(); sample_file.write_all(&bytes).unwrap(); // First read 16 bytes to fill buffer - let mut reader = BufferedReader::new(16, 32, &sample_file, 8); + let buffer_size = 16; + let valid_len = 32; + let default_min_read = 8; + let mut reader = + BufferedReader::new(buffer_size, valid_len, &sample_file, default_min_read); let result = reader.read().unwrap(); assert_eq!(result, BufferedReaderStatus::Success); let (offset, slice) = reader.get_offset_and_data(); - assert_eq!(offset, 0); - assert_eq!(slice.len(), 16); - assert_eq!(slice.slice(), &bytes[0..16]); + let mut expected_offset = 0; + assert_eq!(offset, expected_offset); + assert_eq!(slice.len(), buffer_size); + assert_eq!(slice.slice(), &bytes[0..buffer_size]); // Consume the partial data (8 bytes) and attempt to read next 16 bytes // This will move the leftover 8bytes and read next 8 bytes. - reader.advance_offset(8); - reader.set_required_data_len(16); + let mut advance = 8; + let mut required_data_len = 16; + reader.advance_offset(advance); + reader.set_required_data_len(required_data_len); let result = reader.read().unwrap(); assert_eq!(result, BufferedReaderStatus::Success); let (offset, slice) = reader.get_offset_and_data(); - assert_eq!(offset, 8); - assert_eq!(slice.len(), 16); - assert_eq!(slice.slice(), &bytes[8..24]); + expected_offset += advance; + assert_eq!(offset, expected_offset); + assert_eq!(slice.len(), required_data_len); + assert_eq!( + slice.slice(), + &bytes[expected_offset..expected_offset + required_data_len] + ); // Continue reading should succeed and read the rest 8 bytes. - reader.advance_offset(16); - reader.set_required_data_len(8); + advance = 16; + required_data_len = 8; + reader.advance_offset(advance); + reader.set_required_data_len(required_data_len); let result = reader.read().unwrap(); assert_eq!(result, BufferedReaderStatus::Success); let (offset, slice) = reader.get_offset_and_data(); - assert_eq!(offset, 24); - assert_eq!(slice.len(), 8); - assert_eq!(slice.slice(), &bytes[24..32]); + expected_offset += advance; + assert_eq!(offset, expected_offset); + assert_eq!(slice.len(), required_data_len); + assert_eq!( + slice.slice(), + &bytes[expected_offset..expected_offset + required_data_len] + ); } } diff --git a/accounts-db/src/cache_hash_data.rs b/accounts-db/src/cache_hash_data.rs index f5e7c0129563fc..b69ee39185bda5 100644 --- a/accounts-db/src/cache_hash_data.rs +++ b/accounts-db/src/cache_hash_data.rs @@ -3,7 +3,7 @@ use crate::pubkey_bins::PubkeyBinCalculator24; use { crate::{accounts_hash::CalculateHashIntermediate, cache_hash_data_stats::CacheHashDataStats}, - bytemuck::{Pod, Zeroable}, + bytemuck_derive::{Pod, Zeroable}, memmap2::MmapMut, solana_measure::measure::Measure, solana_sdk::clock::Slot, diff --git a/accounts-db/src/file_io.rs b/accounts-db/src/file_io.rs index 517c2834f1dbe6..a217aa94002ed6 100644 --- a/accounts-db/src/file_io.rs +++ b/accounts-db/src/file_io.rs @@ -92,65 +92,105 @@ mod tests { fn test_read_into_buffer() { // Setup a sample file with 32 bytes of data let mut sample_file = tempfile().unwrap(); - let bytes: Vec = (0..32).collect(); + let file_size = 32; + let bytes: Vec = (0..file_size as u8).collect(); sample_file.write_all(&bytes).unwrap(); // Read all 32 bytes into buffer let mut buffer = [0; 32]; - let num_bytes_read = read_into_buffer(&sample_file, 32, 0, &mut buffer).unwrap(); - assert_eq!(num_bytes_read, 32); + let mut buffer_len = buffer.len(); + let mut valid_len = 32; + let mut start_offset = 0; + let num_bytes_read = + read_into_buffer(&sample_file, valid_len, start_offset, &mut buffer).unwrap(); + assert_eq!(num_bytes_read, buffer_len); assert_eq!(bytes, buffer); // Given a 64-byte buffer, it should only read 32 bytes into the buffer let mut buffer = [0; 64]; - let num_bytes_read = read_into_buffer(&sample_file, 32, 0, &mut buffer).unwrap(); - assert_eq!(num_bytes_read, 32); - assert_eq!(bytes, buffer[0..32]); - assert_eq!(buffer[32..64], [0; 32]); + buffer_len = buffer.len(); + let num_bytes_read = + read_into_buffer(&sample_file, valid_len, start_offset, &mut buffer).unwrap(); + assert_eq!(num_bytes_read, valid_len); + assert_eq!(bytes, buffer[0..valid_len]); + assert_eq!(buffer[valid_len..buffer_len], [0; 32]); // Given the `valid_file_len` is 16, it should only read 16 bytes into the buffer let mut buffer = [0; 32]; - let num_bytes_read = read_into_buffer(&sample_file, 16, 0, &mut buffer).unwrap(); - assert_eq!(num_bytes_read, 16); - assert_eq!(bytes[0..16], buffer[0..16]); + buffer_len = buffer.len(); + valid_len = 16; + let num_bytes_read = + read_into_buffer(&sample_file, valid_len, start_offset, &mut buffer).unwrap(); + assert_eq!(num_bytes_read, valid_len); + assert_eq!(bytes[0..valid_len], buffer[0..valid_len]); // As a side effect of the `read_into_buffer` the data passed `valid_file_len` was // read and put into the buffer, though these data should not be // consumed. - assert_eq!(buffer[16..32], bytes[16..32]); + assert_eq!(buffer[valid_len..buffer_len], bytes[valid_len..buffer_len]); // Given the start offset 8, it should only read 24 bytes into buffer let mut buffer = [0; 32]; - let num_bytes_read = read_into_buffer(&sample_file, 32, 8, &mut buffer).unwrap(); - assert_eq!(num_bytes_read, 24); - assert_eq!(buffer[0..24], bytes[8..32]); - assert_eq!(buffer[24..32], [0; 8]) + buffer_len = buffer.len(); + valid_len = 32; + start_offset = 8; + let num_bytes_read = + read_into_buffer(&sample_file, valid_len, start_offset, &mut buffer).unwrap(); + assert_eq!(num_bytes_read, valid_len - start_offset); + assert_eq!(buffer[0..num_bytes_read], bytes[start_offset..buffer_len]); + assert_eq!(buffer[num_bytes_read..buffer_len], [0; 8]) } #[test] fn test_read_more_buffer() { // Setup a sample file with 32 bytes of data let mut sample_file = tempfile().unwrap(); - let bytes: Vec = (0..32).collect(); + let file_size = 32; + let bytes: Vec = (0..file_size as u8).collect(); sample_file.write_all(&bytes).unwrap(); // Should move left-over 8 bytes to and read 24 bytes from file let mut buffer = [0xFFu8; 32]; + let buffer_len = buffer.len(); let mut offset = 0; let mut valid_bytes = 24..32; - read_more_buffer(&sample_file, 32, &mut offset, &mut buffer, &mut valid_bytes).unwrap(); - assert_eq!(offset, 24); - assert_eq!(valid_bytes, 0..32); - assert_eq!(buffer[0..8], [0xFFu8; 8]); - assert_eq!(buffer[8..32], bytes[0..24]); + let mut valid_bytes_len = valid_bytes.len(); + let valid_len = 32; + read_more_buffer( + &sample_file, + valid_len, + &mut offset, + &mut buffer, + &mut valid_bytes, + ) + .unwrap(); + assert_eq!(offset, buffer_len - valid_bytes_len); + assert_eq!(valid_bytes, 0..buffer_len); + assert_eq!(buffer[0..valid_bytes_len], [0xFFu8; 8]); + assert_eq!( + buffer[valid_bytes_len..buffer_len], + bytes[0..buffer_len - valid_bytes_len] + ); // Should move left-over 8 bytes to and read 16 bytes from file due to EOF let mut buffer = [0xFFu8; 32]; + let start_offset = 16; let mut offset = 16; let mut valid_bytes = 24..32; - read_more_buffer(&sample_file, 32, &mut offset, &mut buffer, &mut valid_bytes).unwrap(); - assert_eq!(offset, 32); + valid_bytes_len = valid_bytes.len(); + read_more_buffer( + &sample_file, + valid_len, + &mut offset, + &mut buffer, + &mut valid_bytes, + ) + .unwrap(); + assert_eq!(offset, file_size); assert_eq!(valid_bytes, 0..24); - assert_eq!(buffer[0..8], [0xFFu8; 8]); - assert_eq!(buffer[8..24], bytes[16..32]); + assert_eq!(buffer[0..valid_bytes_len], [0xFFu8; 8]); + assert_eq!( + buffer[valid_bytes_len..valid_bytes.end], + bytes[start_offset..file_size] + ); } } diff --git a/accounts-db/src/tiered_storage/file.rs b/accounts-db/src/tiered_storage/file.rs index e14f75673ca3f2..4728134a4f990c 100644 --- a/accounts-db/src/tiered_storage/file.rs +++ b/accounts-db/src/tiered_storage/file.rs @@ -1,6 +1,6 @@ use { super::{error::TieredStorageError, TieredStorageResult}, - bytemuck::{AnyBitPattern, NoUninit, Pod, Zeroable}, + bytemuck::{AnyBitPattern, NoUninit, Zeroable}, std::{ fs::{File, OpenOptions}, io::{BufWriter, Read, Result as IoResult, Seek, SeekFrom, Write}, @@ -13,7 +13,7 @@ use { /// The ending 8 bytes of a valid tiered account storage file. pub const FILE_MAGIC_NUMBER: u64 = u64::from_le_bytes(*b"AnzaTech"); -#[derive(Debug, PartialEq, Eq, Clone, Copy, Pod, Zeroable)] +#[derive(Debug, PartialEq, Eq, Clone, Copy, bytemuck_derive::Pod, bytemuck_derive::Zeroable)] #[repr(C)] pub struct TieredStorageMagicNumber(pub u64); diff --git a/accounts-db/src/tiered_storage/hot.rs b/accounts-db/src/tiered_storage/hot.rs index 4d4a14e9cf28c7..74656584dee99b 100644 --- a/accounts-db/src/tiered_storage/hot.rs +++ b/accounts-db/src/tiered_storage/hot.rs @@ -19,7 +19,7 @@ use { StorableAccounts, TieredStorageError, TieredStorageFormat, TieredStorageResult, }, }, - bytemuck::{Pod, Zeroable}, + bytemuck_derive::{Pod, Zeroable}, memmap2::{Mmap, MmapOptions}, modular_bitfield::prelude::*, solana_sdk::{ diff --git a/accounts-db/src/tiered_storage/index.rs b/accounts-db/src/tiered_storage/index.rs index 5caf0687be5d2b..326ab3df66ea1b 100644 --- a/accounts-db/src/tiered_storage/index.rs +++ b/accounts-db/src/tiered_storage/index.rs @@ -24,7 +24,7 @@ pub trait AccountOffset: Clone + Copy + Pod + Zeroable {} /// This can be used to obtain the AccountOffset and address by looking through /// the accounts index block. #[repr(C)] -#[derive(Clone, Copy, Debug, Eq, PartialEq, Pod, Zeroable)] +#[derive(Clone, Copy, Debug, Eq, PartialEq, bytemuck_derive::Pod, bytemuck_derive::Zeroable)] pub struct IndexOffset(pub u32); // Ensure there are no implicit padding bytes diff --git a/accounts-db/src/tiered_storage/meta.rs b/accounts-db/src/tiered_storage/meta.rs index a174bbc0e5299e..16d50f43086738 100644 --- a/accounts-db/src/tiered_storage/meta.rs +++ b/accounts-db/src/tiered_storage/meta.rs @@ -2,7 +2,7 @@ use { crate::tiered_storage::owners::OwnerOffset, - bytemuck::{Pod, Zeroable}, + bytemuck_derive::{Pod, Zeroable}, modular_bitfield::prelude::*, solana_sdk::{pubkey::Pubkey, stake_history::Epoch}, }; diff --git a/banks-client/src/lib.rs b/banks-client/src/lib.rs index 61105575e2ca2e..aae81192d3a07b 100644 --- a/banks-client/src/lib.rs +++ b/banks-client/src/lib.rs @@ -17,8 +17,7 @@ use { BanksTransactionResultWithSimulation, }, solana_program::{ - clock::Slot, fee_calculator::FeeCalculator, hash::Hash, program_pack::Pack, pubkey::Pubkey, - rent::Rent, sysvar::Sysvar, + clock::Slot, hash::Hash, program_pack::Pack, pubkey::Pubkey, rent::Rent, sysvar::Sysvar, }, solana_sdk::{ account::{from_account, Account}, @@ -69,21 +68,6 @@ impl BanksClient { .map_err(Into::into) } - #[deprecated( - since = "1.9.0", - note = "Please use `get_fee_for_message` or `is_blockhash_valid` instead" - )] - pub fn get_fees_with_commitment_and_context( - &mut self, - ctx: Context, - commitment: CommitmentLevel, - ) -> impl Future> + '_ { - #[allow(deprecated)] - self.inner - .get_fees_with_commitment_and_context(ctx, commitment) - .map_err(Into::into) - } - pub fn get_transaction_status_with_context( &mut self, ctx: Context, @@ -185,20 +169,6 @@ impl BanksClient { self.send_transaction_with_context(context::current(), transaction.into()) } - /// Return the fee parameters associated with a recent, rooted blockhash. The cluster - /// will use the transaction's blockhash to look up these same fee parameters and - /// use them to calculate the transaction fee. - #[deprecated( - since = "1.9.0", - note = "Please use `get_fee_for_message` or `is_blockhash_valid` instead" - )] - pub fn get_fees( - &mut self, - ) -> impl Future> + '_ { - #[allow(deprecated)] - self.get_fees_with_commitment_and_context(context::current(), CommitmentLevel::default()) - } - /// Return the cluster Sysvar pub fn get_sysvar( &mut self, @@ -216,17 +186,6 @@ impl BanksClient { self.get_sysvar::() } - /// Return a recent, rooted blockhash from the server. The cluster will only accept - /// transactions with a blockhash that has not yet expired. Use the `get_fees` - /// method to get both a blockhash and the blockhash's last valid slot. - #[deprecated(since = "1.9.0", note = "Please use `get_latest_blockhash` instead")] - pub fn get_recent_blockhash( - &mut self, - ) -> impl Future> + '_ { - #[allow(deprecated)] - self.get_fees().map(|result| Ok(result?.1)) - } - /// Send a transaction and return after the transaction has been rejected or /// reached the given level of commitment. pub fn process_transaction_with_commitment( diff --git a/banks-interface/src/lib.rs b/banks-interface/src/lib.rs index d1cd7b867b514d..8f005be62f9749 100644 --- a/banks-interface/src/lib.rs +++ b/banks-interface/src/lib.rs @@ -6,7 +6,6 @@ use { account::Account, clock::Slot, commitment_config::CommitmentLevel, - fee_calculator::FeeCalculator, hash::Hash, inner_instruction::InnerInstructions, message::Message, @@ -64,13 +63,6 @@ pub struct BanksTransactionResultWithMetadata { #[tarpc::service] pub trait Banks { async fn send_transaction_with_context(transaction: VersionedTransaction); - #[deprecated( - since = "1.9.0", - note = "Please use `get_fee_for_message_with_commitment_and_context` instead" - )] - async fn get_fees_with_commitment_and_context( - commitment: CommitmentLevel, - ) -> (FeeCalculator, Hash, Slot); async fn get_transaction_status_with_context(signature: Signature) -> Option; async fn get_slot_with_context(commitment: CommitmentLevel) -> Slot; diff --git a/banks-server/src/banks_server.rs b/banks-server/src/banks_server.rs index 92add43452dceb..c08a41c5d91a6b 100644 --- a/banks-server/src/banks_server.rs +++ b/banks-server/src/banks_server.rs @@ -18,7 +18,6 @@ use { clock::Slot, commitment_config::CommitmentLevel, feature_set::FeatureSet, - fee_calculator::FeeCalculator, hash::Hash, message::{Message, SanitizedMessage}, pubkey::Pubkey, @@ -232,24 +231,6 @@ impl Banks for BanksServer { self.transaction_sender.send(info).unwrap(); } - async fn get_fees_with_commitment_and_context( - self, - _: Context, - commitment: CommitmentLevel, - ) -> (FeeCalculator, Hash, u64) { - let bank = self.bank(commitment); - let blockhash = bank.last_blockhash(); - let lamports_per_signature = bank.get_lamports_per_signature(); - let last_valid_block_height = bank - .get_blockhash_last_valid_block_height(&blockhash) - .unwrap(); - ( - FeeCalculator::new(lamports_per_signature), - blockhash, - last_valid_block_height, - ) - } - async fn get_transaction_status_with_context( self, _: Context, diff --git a/bucket_map/Cargo.toml b/bucket_map/Cargo.toml index a37051e5d3054b..36d29140a025a1 100644 --- a/bucket_map/Cargo.toml +++ b/bucket_map/Cargo.toml @@ -12,7 +12,8 @@ edition = { workspace = true } [dependencies] bv = { workspace = true, features = ["serde"] } -bytemuck = { workspace = true, features = ["derive"] } +bytemuck = { workspace = true } +bytemuck_derive = { workspace = true } log = { workspace = true } memmap2 = { workspace = true } modular-bitfield = { workspace = true } diff --git a/bucket_map/src/restart.rs b/bucket_map/src/restart.rs index 1748a27b2f458e..bdf61ae2bfe82f 100644 --- a/bucket_map/src/restart.rs +++ b/bucket_map/src/restart.rs @@ -1,7 +1,7 @@ //! Persistent info of disk index files to allow files to be reused on restart. use { crate::bucket_map::{BucketMapConfig, MAX_SEARCH_DEFAULT}, - bytemuck::{Pod, Zeroable}, + bytemuck_derive::{Pod, Zeroable}, memmap2::MmapMut, std::{ collections::HashMap, diff --git a/ci/bench/part2.sh b/ci/bench/part2.sh index 44a6c46f2ed67f..76f1dfb50abffc 100755 --- a/ci/bench/part2.sh +++ b/ci/bench/part2.sh @@ -19,6 +19,8 @@ _ cargo +"$rust_nightly" bench --manifest-path runtime/Cargo.toml ${V:+--verbose _ cargo build --manifest-path=keygen/Cargo.toml export PATH="$PWD/target/debug":$PATH + _ make -C programs/sbf all + # Run sbf benches _ cargo +"$rust_nightly" bench --manifest-path programs/sbf/Cargo.toml ${V:+--verbose} --features=sbf_c \ -- -Z unstable-options --format=json --nocapture | tee -a "$BENCH_FILE" diff --git a/ci/do-audit.sh b/ci/do-audit.sh index 039df6b63cb635..df395e8a2bbc22 100755 --- a/ci/do-audit.sh +++ b/ci/do-audit.sh @@ -30,6 +30,9 @@ cargo_audit_ignores=( --ignore RUSTSEC-2023-0001 --ignore RUSTSEC-2022-0093 + + # curve25519-dalek + --ignore RUSTSEC-2024-0344 ) scripts/cargo-for-all-lock-files.sh audit "${cargo_audit_ignores[@]}" | $dep_tree_filter # we want the `cargo audit` exit code, not `$dep_tree_filter`'s diff --git a/ci/test-bench.sh b/ci/test-bench.sh index aacc82cffbb0a6..c145512d7b1cc6 100755 --- a/ci/test-bench.sh +++ b/ci/test-bench.sh @@ -61,6 +61,7 @@ _ $cargoNightly bench --manifest-path core/Cargo.toml ${V:+--verbose} \ -- -Z unstable-options --format=json | tee -a "$BENCH_FILE" # Run sbf benches +_ make -C programs/sbf all _ $cargoNightly bench --manifest-path programs/sbf/Cargo.toml ${V:+--verbose} --features=sbf_c \ -- -Z unstable-options --format=json --nocapture | tee -a "$BENCH_FILE" diff --git a/ci/test-stable.sh b/ci/test-stable.sh index 40ee0ae2c40a74..acd4578542f23c 100755 --- a/ci/test-stable.sh +++ b/ci/test-stable.sh @@ -50,6 +50,9 @@ test-stable-sbf) cargo_build_sbf="$(realpath ./cargo-build-sbf)" cargo_test_sbf="$(realpath ./cargo-test-sbf)" + # platform-tools version + "$cargo_build_sbf" --version + # SBF solana-sdk legacy compile test "$cargo_build_sbf" --manifest-path sdk/Cargo.toml @@ -67,11 +70,9 @@ test-stable-sbf) exit 1 fi - # SBF C program system tests - _ make -C programs/sbf/c tests - _ cargo test \ - --manifest-path programs/sbf/Cargo.toml \ - --no-default-features --features=sbf_c,sbf_rust -- --nocapture + # SBF program tests + export SBF_OUT_DIR=target/sbf-solana-solana/release + _ make -C programs/sbf test # SBF Rust program unit tests for sbf_test in programs/sbf/rust/*; do @@ -99,14 +100,11 @@ test-stable-sbf) exit 1 fi - # platform-tools version - "$cargo_build_sbf" -V - # SBF program instruction count assertion sbf_target_path=programs/sbf/target _ cargo test \ --manifest-path programs/sbf/Cargo.toml \ - --no-default-features --features=sbf_c,sbf_rust assert_instruction_count \ + --features=sbf_c,sbf_rust assert_instruction_count \ -- --nocapture &> "${sbf_target_path}"/deploy/instruction_counts.txt sbf_dump_archive="sbf-dumps.tar.bz2" diff --git a/cli/src/cli.rs b/cli/src/cli.rs index 5fc886134d0507..1aee7e38ec3bc8 100644 --- a/cli/src/cli.rs +++ b/cli/src/cli.rs @@ -65,9 +65,6 @@ pub enum CliCommand { ClusterVersion, Feature(FeatureCliCommand), Inflation(InflationCliCommand), - Fees { - blockhash: Option, - }, FindProgramDerivedAddress { seeds: Vec>, program_id: Pubkey, @@ -640,12 +637,6 @@ pub fn parse_command( ("feature", Some(matches)) => { parse_feature_subcommand(matches, default_signer, wallet_manager) } - ("fees", Some(matches)) => { - let blockhash = value_of::(matches, "blockhash"); - Ok(CliCommandInfo::without_signers(CliCommand::Fees { - blockhash, - })) - } ("first-available-block", Some(_matches)) => Ok(CliCommandInfo::without_signers( CliCommand::FirstAvailableBlock, )), @@ -911,7 +902,6 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { seed, program_id, } => process_create_address_with_seed(config, from_pubkey.as_ref(), seed, program_id), - CliCommand::Fees { ref blockhash } => process_fees(&rpc_client, config, blockhash.as_ref()), CliCommand::Feature(feature_subcommand) => { process_feature_subcommand(&rpc_client, config, feature_subcommand) } diff --git a/cli/src/cluster_query.rs b/cli/src/cluster_query.rs index eaa8142c3b05f3..46067f29d38eb1 100644 --- a/cli/src/cluster_query.rs +++ b/cli/src/cluster_query.rs @@ -171,19 +171,6 @@ impl ClusterQuerySubCommands for App<'_, '_> { SubCommand::with_name("cluster-version") .about("Get the version of the cluster entrypoint"), ) - // Deprecated in v1.8.0 - .subcommand( - SubCommand::with_name("fees") - .about("Display current cluster fees (Deprecated in v1.8.0)") - .arg( - Arg::with_name("blockhash") - .long("blockhash") - .takes_value(true) - .value_name("BLOCKHASH") - .validator(is_hash) - .help("Query fees for BLOCKHASH instead of the most recent blockhash"), - ), - ) .subcommand( SubCommand::with_name("first-available-block") .about("Get the first available block in the storage"), @@ -982,42 +969,6 @@ pub fn process_cluster_version(rpc_client: &RpcClient, config: &CliConfig) -> Pr } } -pub fn process_fees( - rpc_client: &RpcClient, - config: &CliConfig, - blockhash: Option<&Hash>, -) -> ProcessResult { - let fees = if let Some(recent_blockhash) = blockhash { - #[allow(deprecated)] - let result = rpc_client.get_fee_calculator_for_blockhash_with_commitment( - recent_blockhash, - config.commitment, - )?; - if let Some(fee_calculator) = result.value { - CliFees::some( - result.context.slot, - *recent_blockhash, - fee_calculator.lamports_per_signature, - None, - None, - ) - } else { - CliFees::none() - } - } else { - #[allow(deprecated)] - let result = rpc_client.get_fees_with_commitment(config.commitment)?; - CliFees::some( - result.context.slot, - result.value.blockhash, - result.value.fee_calculator.lamports_per_signature, - None, - Some(result.value.last_valid_block_height), - ) - }; - Ok(config.output_format.formatted_string(&fees)) -} - pub fn process_first_available_block(rpc_client: &RpcClient) -> ProcessResult { let first_available_block = rpc_client.get_first_available_block()?; Ok(format!("{first_available_block}")) @@ -1444,6 +1395,7 @@ pub fn process_largest_accounts( .get_largest_accounts_with_config(RpcLargestAccountsConfig { commitment: Some(config.commitment), filter, + sort_results: None, })? .value; let largest_accounts = CliAccountBalances { accounts }; @@ -2372,26 +2324,6 @@ mod tests { CliCommandInfo::without_signers(CliCommand::ClusterVersion) ); - let test_fees = test_commands.clone().get_matches_from(vec!["test", "fees"]); - assert_eq!( - parse_command(&test_fees, &default_signer, &mut None).unwrap(), - CliCommandInfo::without_signers(CliCommand::Fees { blockhash: None }) - ); - - let blockhash = Hash::new_unique(); - let test_fees = test_commands.clone().get_matches_from(vec![ - "test", - "fees", - "--blockhash", - &blockhash.to_string(), - ]); - assert_eq!( - parse_command(&test_fees, &default_signer, &mut None).unwrap(), - CliCommandInfo::without_signers(CliCommand::Fees { - blockhash: Some(blockhash) - }) - ); - let slot = 100; let test_get_block_time = test_commands diff --git a/cli/src/feature.rs b/cli/src/feature.rs index 785ed93425e5be..c6e397d2e01088 100644 --- a/cli/src/feature.rs +++ b/cli/src/feature.rs @@ -819,7 +819,7 @@ fn feature_activation_allowed( )) } -fn status_from_account(account: Account) -> Option { +pub(super) fn status_from_account(account: Account) -> Option { feature::from_account(&account).map(|feature| match feature.activated_at { None => CliFeatureStatus::Pending, Some(activation_slot) => CliFeatureStatus::Active(activation_slot), diff --git a/cli/src/program.rs b/cli/src/program.rs index df4aa8731bdb69..880bab2e435431 100644 --- a/cli/src/program.rs +++ b/cli/src/program.rs @@ -9,6 +9,7 @@ use { simulate_and_update_compute_unit_limit, ComputeUnitConfig, UpdateComputeUnitLimitResult, WithComputeUnitConfig, }, + feature::{status_from_account, CliFeatureStatus}, }, bip39::{Language, Mnemonic, MnemonicType, Seed}, clap::{App, AppSettings, Arg, ArgMatches, SubCommand}, @@ -47,6 +48,7 @@ use { client_error::ErrorKind as ClientErrorKind, config::{RpcAccountInfoConfig, RpcProgramAccountsConfig, RpcSendTransactionConfig}, filter::{Memcmp, RpcFilterType}, + request::MAX_MULTIPLE_ACCOUNTS, }, solana_rpc_client_nonce_utils::blockhash_query::BlockhashQuery, solana_sdk::{ @@ -56,7 +58,7 @@ use { bpf_loader_upgradeable::{self, get_program_data_address, UpgradeableLoaderState}, commitment_config::CommitmentConfig, compute_budget, - feature_set::FeatureSet, + feature_set::{FeatureSet, FEATURE_NAMES}, instruction::{Instruction, InstructionError}, message::Message, packet::PACKET_DATA_SIZE, @@ -99,6 +101,7 @@ pub enum ProgramCliCommand { max_sign_attempts: usize, auto_extend: bool, use_rpc: bool, + skip_feature_verification: bool, }, Upgrade { fee_payer_signer_index: SignerIndex, @@ -108,6 +111,7 @@ pub enum ProgramCliCommand { sign_only: bool, dump_transaction_message: bool, blockhash_query: BlockhashQuery, + skip_feature_verification: bool, }, WriteBuffer { program_location: String, @@ -120,6 +124,7 @@ pub enum ProgramCliCommand { compute_unit_price: Option, max_sign_attempts: usize, use_rpc: bool, + skip_feature_verification: bool, }, SetBufferAuthority { buffer_pubkey: Pubkey, @@ -279,6 +284,14 @@ impl ProgramSubCommands for App<'_, '_> { .long("no-auto-extend") .takes_value(false) .help("Don't automatically extend the program's data account size"), + ) + .arg( + Arg::with_name("skip_feature_verify") + .long("skip-feature-verify") + .takes_value(false) + .help("Don't verify program against the activated feature set. \ + This setting means a program containing a syscall not yet active on \ + mainnet will succeed local verification, but fail during the last step of deployment.") ), ) .subcommand( @@ -309,6 +322,14 @@ impl ProgramSubCommands for App<'_, '_> { "Upgrade authority [default: the default configured keypair]", ), ) + .arg( + Arg::with_name("skip_feature_verify") + .long("skip-feature-verify") + .takes_value(false) + .help("Don't verify program against the activated feature set. \ + This setting means a program containing a syscall not yet active on \ + mainnet will succeed local verification, but fail during the last step of deployment.") + ) .offline_args(), ) .subcommand( @@ -375,7 +396,15 @@ impl ProgramSubCommands for App<'_, '_> { .arg(Arg::with_name("use_rpc").long("use-rpc").help( "Send transactions to the configured RPC instead of validator TPUs", )) - .arg(compute_unit_price_arg()), + .arg(compute_unit_price_arg()) + .arg( + Arg::with_name("skip_feature_verify") + .long("skip-feature-verify") + .takes_value(false) + .help("Don't verify program against the activated feature set. \ + This setting means a program containing a syscall not yet active on \ + mainnet will succeed local verification, but fail during the last step of deployment.") + ), ) .subcommand( SubCommand::with_name("set-buffer-authority") @@ -673,6 +702,8 @@ pub fn parse_program_subcommand( let auto_extend = !matches.is_present("no_auto_extend"); + let skip_feature_verify = matches.is_present("skip_feature_verify"); + CliCommandInfo { command: CliCommand::Program(ProgramCliCommand::Deploy { program_location, @@ -691,6 +722,7 @@ pub fn parse_program_subcommand( max_sign_attempts, use_rpc: matches.is_present("use_rpc"), auto_extend, + skip_feature_verification: skip_feature_verify, }), signers: signer_info.signers, } @@ -720,6 +752,8 @@ pub fn parse_program_subcommand( let signer_info = default_signer.generate_unique_signers(bulk_signers, matches, wallet_manager)?; + let skip_feature_verify = matches.is_present("skip_feature_verify"); + CliCommandInfo { command: CliCommand::Program(ProgramCliCommand::Upgrade { fee_payer_signer_index: signer_info.index_of(fee_payer_pubkey).unwrap(), @@ -731,6 +765,7 @@ pub fn parse_program_subcommand( sign_only, dump_transaction_message, blockhash_query, + skip_feature_verification: skip_feature_verify, }), signers: signer_info.signers, } @@ -764,6 +799,7 @@ pub fn parse_program_subcommand( let compute_unit_price = value_of(matches, "compute_unit_price"); let max_sign_attempts = value_of(matches, "max_sign_attempts").unwrap(); + let skip_feature_verify = matches.is_present("skip_feature_verify"); CliCommandInfo { command: CliCommand::Program(ProgramCliCommand::WriteBuffer { @@ -779,6 +815,7 @@ pub fn parse_program_subcommand( compute_unit_price, max_sign_attempts, use_rpc: matches.is_present("use_rpc"), + skip_feature_verification: skip_feature_verify, }), signers: signer_info.signers, } @@ -979,6 +1016,7 @@ pub fn process_program_subcommand( max_sign_attempts, auto_extend, use_rpc, + skip_feature_verification, } => process_program_deploy( rpc_client, config, @@ -996,6 +1034,7 @@ pub fn process_program_subcommand( *max_sign_attempts, *auto_extend, *use_rpc, + *skip_feature_verification, ), ProgramCliCommand::Upgrade { fee_payer_signer_index, @@ -1005,6 +1044,7 @@ pub fn process_program_subcommand( sign_only, dump_transaction_message, blockhash_query, + skip_feature_verification, } => process_program_upgrade( rpc_client, config, @@ -1015,6 +1055,7 @@ pub fn process_program_subcommand( *sign_only, *dump_transaction_message, blockhash_query, + *skip_feature_verification, ), ProgramCliCommand::WriteBuffer { program_location, @@ -1027,6 +1068,7 @@ pub fn process_program_subcommand( compute_unit_price, max_sign_attempts, use_rpc, + skip_feature_verification, } => process_write_buffer( rpc_client, config, @@ -1040,6 +1082,7 @@ pub fn process_program_subcommand( *compute_unit_price, *max_sign_attempts, *use_rpc, + *skip_feature_verification, ), ProgramCliCommand::SetBufferAuthority { buffer_pubkey, @@ -1174,6 +1217,7 @@ fn process_program_deploy( max_sign_attempts: usize, auto_extend: bool, use_rpc: bool, + skip_feature_verification: bool, ) -> ProcessResult { let fee_payer_signer = config.signers[fee_payer_signer_index]; let upgrade_authority_signer = config.signers[upgrade_authority_signer_index]; @@ -1265,9 +1309,15 @@ fn process_program_deploy( true }; + let feature_set = if skip_feature_verification { + FeatureSet::all_enabled() + } else { + fetch_feature_set(&rpc_client)? + }; + let (program_data, program_len, buffer_program_data) = if let Some(program_location) = program_location { - let program_data = read_and_verify_elf(program_location)?; + let program_data = read_and_verify_elf(program_location, feature_set)?; let program_len = program_data.len(); // If a buffer was provided, check if it has already been created and set up properly @@ -1290,6 +1340,7 @@ fn process_program_deploy( config, buffer_pubkey, upgrade_authority_signer.pubkey(), + feature_set, )?; (vec![], buffer_program_data.len(), Some(buffer_program_data)) @@ -1382,6 +1433,7 @@ fn fetch_verified_buffer_program_data( config: &CliConfig, buffer_pubkey: Pubkey, buffer_authority: Pubkey, + feature_set: FeatureSet, ) -> Result, Box> { let Some(buffer_program_data) = fetch_buffer_program_data(rpc_client, config, None, buffer_pubkey, buffer_authority)? @@ -1389,7 +1441,7 @@ fn fetch_verified_buffer_program_data( return Err(format!("Buffer account {buffer_pubkey} not found").into()); }; - verify_elf(&buffer_program_data).map_err(|err| { + verify_elf(&buffer_program_data, feature_set).map_err(|err| { format!( "Buffer account {buffer_pubkey} has invalid program data: {:?}", err @@ -1466,6 +1518,7 @@ fn process_program_upgrade( sign_only: bool, dump_transaction_message: bool, blockhash_query: &BlockhashQuery, + skip_feature_verification: bool, ) -> ProcessResult { let fee_payer_signer = config.signers[fee_payer_signer_index]; let upgrade_authority_signer = config.signers[upgrade_authority_signer_index]; @@ -1496,11 +1549,18 @@ fn process_program_upgrade( }, ) } else { + let feature_set = if skip_feature_verification { + FeatureSet::all_enabled() + } else { + fetch_feature_set(&rpc_client)? + }; + fetch_verified_buffer_program_data( &rpc_client, config, buffer_pubkey, upgrade_authority_signer.pubkey(), + feature_set, )?; let fee = rpc_client.get_fee_for_message(&message)?; @@ -1539,11 +1599,18 @@ fn process_write_buffer( compute_unit_price: Option, max_sign_attempts: usize, use_rpc: bool, + skip_feature_verification: bool, ) -> ProcessResult { let fee_payer_signer = config.signers[fee_payer_signer_index]; let buffer_authority = config.signers[buffer_authority_signer_index]; - let program_data = read_and_verify_elf(program_location)?; + let feature_set = if skip_feature_verification { + FeatureSet::all_enabled() + } else { + fetch_feature_set(&rpc_client)? + }; + + let program_data = read_and_verify_elf(program_location, feature_set)?; let program_len = program_data.len(); // Create ephemeral keypair to use for Buffer account, if not provided @@ -2749,27 +2816,29 @@ fn extend_program_data_if_needed( Ok(()) } -fn read_and_verify_elf(program_location: &str) -> Result, Box> { +fn read_and_verify_elf( + program_location: &str, + feature_set: FeatureSet, +) -> Result, Box> { let mut file = File::open(program_location) .map_err(|err| format!("Unable to open program file: {err}"))?; let mut program_data = Vec::new(); file.read_to_end(&mut program_data) .map_err(|err| format!("Unable to read program file: {err}"))?; - verify_elf(&program_data)?; + verify_elf(&program_data, feature_set)?; Ok(program_data) } -fn verify_elf(program_data: &[u8]) -> Result<(), Box> { +fn verify_elf( + program_data: &[u8], + feature_set: FeatureSet, +) -> Result<(), Box> { // Verify the program - let program_runtime_environment = create_program_runtime_environment_v1( - &FeatureSet::all_enabled(), - &ComputeBudget::default(), - true, - false, - ) - .unwrap(); + let program_runtime_environment = + create_program_runtime_environment_v1(&feature_set, &ComputeBudget::default(), true, false) + .unwrap(); let executable = Executable::::from_elf(program_data, Arc::new(program_runtime_environment)) .map_err(|err| format!("ELF error: {err}"))?; @@ -2982,6 +3051,30 @@ fn report_ephemeral_mnemonic(words: usize, mnemonic: bip39::Mnemonic) { eprintln!("[BUFFER_ACCOUNT_ADDRESS] argument to `solana program close`.\n{divider}"); } +fn fetch_feature_set(rpc_client: &RpcClient) -> Result> { + let mut feature_set = FeatureSet::default(); + for feature_ids in FEATURE_NAMES + .keys() + .cloned() + .collect::>() + .chunks(MAX_MULTIPLE_ACCOUNTS) + { + rpc_client + .get_multiple_accounts(feature_ids)? + .into_iter() + .zip(feature_ids) + .for_each(|(account, feature_id)| { + let activation_slot = account.and_then(status_from_account); + + if let Some(CliFeatureStatus::Active(slot)) = activation_slot { + feature_set.activate(feature_id, slot); + } + }); + } + + Ok(feature_set) +} + #[cfg(test)] mod tests { use { @@ -3043,6 +3136,7 @@ mod tests { max_sign_attempts: 5, auto_extend: true, use_rpc: false, + skip_feature_verification: false, }), signers: vec![Box::new(read_keypair_file(&keypair_file).unwrap())], } @@ -3074,6 +3168,7 @@ mod tests { max_sign_attempts: 5, auto_extend: true, use_rpc: false, + skip_feature_verification: false, }), signers: vec![Box::new(read_keypair_file(&keypair_file).unwrap())], } @@ -3107,6 +3202,7 @@ mod tests { max_sign_attempts: 5, auto_extend: true, use_rpc: false, + skip_feature_verification: false, }), signers: vec![ Box::new(read_keypair_file(&keypair_file).unwrap()), @@ -3142,6 +3238,7 @@ mod tests { max_sign_attempts: 5, auto_extend: true, use_rpc: false, + skip_feature_verification: false, }), signers: vec![Box::new(read_keypair_file(&keypair_file).unwrap())], } @@ -3176,6 +3273,7 @@ mod tests { max_sign_attempts: 5, auto_extend: true, use_rpc: false, + skip_feature_verification: false, }), signers: vec![ Box::new(read_keypair_file(&keypair_file).unwrap()), @@ -3213,6 +3311,7 @@ mod tests { max_sign_attempts: 5, auto_extend: true, use_rpc: false, + skip_feature_verification: false, }), signers: vec![ Box::new(read_keypair_file(&keypair_file).unwrap()), @@ -3246,6 +3345,7 @@ mod tests { max_sign_attempts: 5, auto_extend: true, use_rpc: false, + skip_feature_verification: false, }), signers: vec![Box::new(read_keypair_file(&keypair_file).unwrap())], } @@ -3277,6 +3377,7 @@ mod tests { max_sign_attempts: 1, auto_extend: true, use_rpc: false, + skip_feature_verification: false, }), signers: vec![Box::new(read_keypair_file(&keypair_file).unwrap())], } @@ -3307,6 +3408,75 @@ mod tests { max_sign_attempts: 5, auto_extend: true, use_rpc: true, + skip_feature_verification: false, + }), + signers: vec![Box::new(read_keypair_file(&keypair_file).unwrap())], + } + ); + + let test_command = test_commands.clone().get_matches_from(vec![ + "test", + "program", + "deploy", + "/Users/test/program.so", + "--skip-feature-verify", + ]); + assert_eq!( + parse_command(&test_command, &default_signer, &mut None).unwrap(), + CliCommandInfo { + command: CliCommand::Program(ProgramCliCommand::Deploy { + program_location: Some("/Users/test/program.so".to_string()), + fee_payer_signer_index: 0, + buffer_signer_index: None, + buffer_pubkey: None, + program_signer_index: None, + program_pubkey: None, + upgrade_authority_signer_index: 0, + is_final: false, + max_len: None, + skip_fee_check: false, + compute_unit_price: None, + max_sign_attempts: 5, + auto_extend: true, + use_rpc: false, + skip_feature_verification: true, + }), + signers: vec![Box::new(read_keypair_file(&keypair_file).unwrap())], + } + ); + } + + #[test] + fn test_cli_parse_upgrade() { + let test_commands = get_clap_app("test", "desc", "version"); + + let default_keypair = Keypair::new(); + let keypair_file = make_tmp_path("keypair_file"); + write_keypair_file(&default_keypair, &keypair_file).unwrap(); + let default_signer = DefaultSigner::new("", &keypair_file); + + let program_key = Pubkey::new_unique(); + let buffer_key = Pubkey::new_unique(); + let test_command = test_commands.clone().get_matches_from(vec![ + "test", + "program", + "upgrade", + format!("{}", buffer_key).as_str(), + format!("{}", program_key).as_str(), + "--skip-feature-verify", + ]); + assert_eq!( + parse_command(&test_command, &default_signer, &mut None).unwrap(), + CliCommandInfo { + command: CliCommand::Program(ProgramCliCommand::Upgrade { + fee_payer_signer_index: 0, + program_pubkey: program_key, + buffer_pubkey: buffer_key, + upgrade_authority_signer_index: 0, + sign_only: false, + dump_transaction_message: false, + blockhash_query: BlockhashQuery::default(), + skip_feature_verification: true, }), signers: vec![Box::new(read_keypair_file(&keypair_file).unwrap())], } @@ -3344,6 +3514,7 @@ mod tests { compute_unit_price: None, max_sign_attempts: 5, use_rpc: false, + skip_feature_verification: false, }), signers: vec![Box::new(read_keypair_file(&keypair_file).unwrap())], } @@ -3372,6 +3543,7 @@ mod tests { compute_unit_price: None, max_sign_attempts: 5, use_rpc: false, + skip_feature_verification: false, }), signers: vec![Box::new(read_keypair_file(&keypair_file).unwrap())], } @@ -3403,6 +3575,7 @@ mod tests { compute_unit_price: None, max_sign_attempts: 5, use_rpc: false, + skip_feature_verification: false, }), signers: vec![ Box::new(read_keypair_file(&keypair_file).unwrap()), @@ -3437,6 +3610,7 @@ mod tests { compute_unit_price: None, max_sign_attempts: 5, use_rpc: false, + skip_feature_verification: false, }), signers: vec![ Box::new(read_keypair_file(&keypair_file).unwrap()), @@ -3476,6 +3650,7 @@ mod tests { compute_unit_price: None, max_sign_attempts: 5, use_rpc: false, + skip_feature_verification: false, }), signers: vec![ Box::new(read_keypair_file(&keypair_file).unwrap()), @@ -3508,6 +3683,35 @@ mod tests { compute_unit_price: None, max_sign_attempts: 10, use_rpc: false, + skip_feature_verification: false + }), + signers: vec![Box::new(read_keypair_file(&keypair_file).unwrap())], + } + ); + + // skip feature verification + let test_command = test_commands.clone().get_matches_from(vec![ + "test", + "program", + "write-buffer", + "/Users/test/program.so", + "--skip-feature-verify", + ]); + assert_eq!( + parse_command(&test_command, &default_signer, &mut None).unwrap(), + CliCommandInfo { + command: CliCommand::Program(ProgramCliCommand::WriteBuffer { + program_location: "/Users/test/program.so".to_string(), + fee_payer_signer_index: 0, + buffer_signer_index: None, + buffer_pubkey: None, + buffer_authority_signer_index: 0, + max_len: None, + skip_fee_check: false, + compute_unit_price: None, + max_sign_attempts: 5, + use_rpc: false, + skip_feature_verification: true, }), signers: vec![Box::new(read_keypair_file(&keypair_file).unwrap())], } @@ -4050,6 +4254,7 @@ mod tests { max_sign_attempts: 5, auto_extend: true, use_rpc: false, + skip_feature_verification: true, }), signers: vec![&default_keypair], output_format: OutputFormat::JsonCompact, diff --git a/cli/tests/fixtures/alt_bn128.so b/cli/tests/fixtures/alt_bn128.so new file mode 100755 index 00000000000000..6d20e91a8ac71c Binary files /dev/null and b/cli/tests/fixtures/alt_bn128.so differ diff --git a/cli/tests/fixtures/build.sh b/cli/tests/fixtures/build.sh index 872ddd7a849b18..76f248b1d03d0f 100755 --- a/cli/tests/fixtures/build.sh +++ b/cli/tests/fixtures/build.sh @@ -6,3 +6,4 @@ cd "$(dirname "$0")" make -C ../../../programs/sbf/c/ cp ../../../programs/sbf/c/out/noop.so . cat noop.so noop.so noop.so > noop_large.so +cp ../../../programs/sbf/c/out/alt_bn128.so . diff --git a/cli/tests/program.rs b/cli/tests/program.rs index 7a6bc4ee30c9a2..45d243c5babcd2 100644 --- a/cli/tests/program.rs +++ b/cli/tests/program.rs @@ -25,6 +25,7 @@ use { bpf_loader_upgradeable::{self, UpgradeableLoaderState}, commitment_config::CommitmentConfig, compute_budget::{self, ComputeBudgetInstruction}, + feature_set::enable_alt_bn128_syscall, fee_calculator::FeeRateGovernor, pubkey::Pubkey, rent::Rent, @@ -125,6 +126,7 @@ fn test_cli_program_deploy_non_upgradeable() { max_sign_attempts: 5, auto_extend: true, use_rpc: false, + skip_feature_verification: true, }); config.output_format = OutputFormat::JsonCompact; let response = process_command(&config); @@ -174,6 +176,7 @@ fn test_cli_program_deploy_non_upgradeable() { max_sign_attempts: 5, auto_extend: true, use_rpc: false, + skip_feature_verification: true, }); process_command(&config).unwrap(); let account1 = rpc_client @@ -232,6 +235,7 @@ fn test_cli_program_deploy_non_upgradeable() { max_sign_attempts: 5, auto_extend: true, use_rpc: false, + skip_feature_verification: true, }); expect_command_failure( &config, @@ -258,6 +262,7 @@ fn test_cli_program_deploy_non_upgradeable() { max_sign_attempts: 5, auto_extend: true, use_rpc: false, + skip_feature_verification: true, }); expect_command_failure( &config, @@ -330,6 +335,7 @@ fn test_cli_program_deploy_no_authority() { max_sign_attempts: 5, auto_extend: true, use_rpc: false, + skip_feature_verification: true, }); config.output_format = OutputFormat::JsonCompact; let response = process_command(&config); @@ -360,6 +366,7 @@ fn test_cli_program_deploy_no_authority() { max_sign_attempts: 5, auto_extend: true, use_rpc: false, + skip_feature_verification: true, }); expect_command_failure( &config, @@ -368,6 +375,298 @@ fn test_cli_program_deploy_no_authority() { ); } +#[test_case(true; "Feature enabled")] +#[test_case(false; "Feature disabled")] +fn test_cli_program_deploy_feature(enable_feature: bool) { + solana_logger::setup(); + + let mut program_path = PathBuf::from(env!("CARGO_MANIFEST_DIR")); + program_path.push("tests"); + program_path.push("fixtures"); + program_path.push("alt_bn128"); + program_path.set_extension("so"); + + let mint_keypair = Keypair::new(); + let mint_pubkey = mint_keypair.pubkey(); + let faucet_addr = run_local_faucet(mint_keypair, None); + let mut genesis = TestValidatorGenesis::default(); + let mut test_validator_builder = genesis + .fee_rate_governor(FeeRateGovernor::new(0, 0)) + .rent(Rent { + lamports_per_byte_year: 1, + exemption_threshold: 1.0, + ..Rent::default() + }) + .faucet_addr(Some(faucet_addr)); + + // Deactivate the enable alt bn128 syscall and try to submit a program with that syscall + if !enable_feature { + test_validator_builder = + test_validator_builder.deactivate_features(&[enable_alt_bn128_syscall::id()]); + } + + let test_validator = test_validator_builder + .start_with_mint_address(mint_pubkey, SocketAddrSpace::Unspecified) + .expect("validator start failed"); + + let rpc_client = + RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed()); + + let mut file = File::open(program_path.to_str().unwrap()).unwrap(); + let mut program_data = Vec::new(); + file.read_to_end(&mut program_data).unwrap(); + let max_len = program_data.len(); + let minimum_balance_for_programdata = rpc_client + .get_minimum_balance_for_rent_exemption(UpgradeableLoaderState::size_of_programdata( + max_len, + )) + .unwrap(); + let minimum_balance_for_program = rpc_client + .get_minimum_balance_for_rent_exemption(UpgradeableLoaderState::size_of_program()) + .unwrap(); + let upgrade_authority = Keypair::new(); + + let mut config = CliConfig::recent_for_tests(); + let keypair = Keypair::new(); + config.json_rpc_url = test_validator.rpc_url(); + config.command = CliCommand::Airdrop { + pubkey: None, + lamports: 100 * minimum_balance_for_programdata + minimum_balance_for_program, + }; + config.signers = vec![&keypair]; + process_command(&config).unwrap(); + + config.signers = vec![&keypair, &upgrade_authority]; + config.command = CliCommand::Program(ProgramCliCommand::Deploy { + program_location: Some(program_path.to_str().unwrap().to_string()), + fee_payer_signer_index: 0, + program_signer_index: None, + program_pubkey: None, + buffer_signer_index: None, + buffer_pubkey: None, + upgrade_authority_signer_index: 1, + is_final: true, + max_len: None, + skip_fee_check: false, + compute_unit_price: None, + max_sign_attempts: 5, + auto_extend: true, + use_rpc: false, + skip_feature_verification: false, + }); + config.output_format = OutputFormat::JsonCompact; + + if enable_feature { + let res = process_command(&config); + assert!(res.is_ok()); + } else { + expect_command_failure( + &config, + "Program contains a syscall from a deactivated feature", + "ELF error: ELF error: Unresolved symbol (sol_alt_bn128_group_op) at instruction #49 (ELF file offset 0x188)" + ); + + // If we bypass the verification, there should be no error + config.command = CliCommand::Program(ProgramCliCommand::Deploy { + program_location: Some(program_path.to_str().unwrap().to_string()), + fee_payer_signer_index: 0, + program_signer_index: None, + program_pubkey: None, + buffer_signer_index: None, + buffer_pubkey: None, + upgrade_authority_signer_index: 1, + is_final: true, + max_len: None, + skip_fee_check: false, + compute_unit_price: None, + max_sign_attempts: 5, + auto_extend: true, + use_rpc: false, + skip_feature_verification: true, + }); + + // When we skip verification, we fail at a later stage + let response = process_command(&config); + assert!(response + .err() + .unwrap() + .to_string() + .contains("Deploying program failed: RPC response error -32002:")); + } +} + +#[test_case(true; "Feature enabled")] +#[test_case(false; "Feature disabled")] +fn test_cli_program_upgrade_with_feature(enable_feature: bool) { + solana_logger::setup(); + + let mut noop_path = PathBuf::from(env!("CARGO_MANIFEST_DIR")); + noop_path.push("tests"); + noop_path.push("fixtures"); + noop_path.push("noop"); + noop_path.set_extension("so"); + + let mut syscall_program_path = PathBuf::from(env!("CARGO_MANIFEST_DIR")); + syscall_program_path.push("tests"); + syscall_program_path.push("fixtures"); + syscall_program_path.push("alt_bn128"); + syscall_program_path.set_extension("so"); + + let mint_keypair = Keypair::new(); + let mint_pubkey = mint_keypair.pubkey(); + let faucet_addr = run_local_faucet(mint_keypair, None); + + let mut genesis = TestValidatorGenesis::default(); + let mut test_validator_builder = genesis + .fee_rate_governor(FeeRateGovernor::new(0, 0)) + .rent(Rent { + lamports_per_byte_year: 1, + exemption_threshold: 1.0, + ..Rent::default() + }) + .faucet_addr(Some(faucet_addr)); + + // Deactivate the enable alt bn128 syscall and try to submit a program with that syscall + if !enable_feature { + test_validator_builder = + test_validator_builder.deactivate_features(&[enable_alt_bn128_syscall::id()]); + } + + let test_validator = test_validator_builder + .start_with_mint_address(mint_pubkey, SocketAddrSpace::Unspecified) + .expect("validator start failed"); + + let rpc_client = + RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed()); + + let blockhash = rpc_client.get_latest_blockhash().unwrap(); + + let mut file = File::open(syscall_program_path.to_str().unwrap()).unwrap(); + let mut large_program_data = Vec::new(); + file.read_to_end(&mut large_program_data).unwrap(); + let max_program_data_len = large_program_data.len(); + let minimum_balance_for_large_buffer = rpc_client + .get_minimum_balance_for_rent_exemption(UpgradeableLoaderState::size_of_programdata( + max_program_data_len, + )) + .unwrap(); + + let mut config = CliConfig::recent_for_tests(); + config.json_rpc_url = test_validator.rpc_url(); + + let online_signer = Keypair::new(); + let offline_signer = Keypair::new(); + let buffer_signer = Keypair::new(); + // Typically, keypair for program signer should be different from online signer or + // offline signer keypairs. + let program_signer = Keypair::new(); + + config.command = CliCommand::Airdrop { + pubkey: None, + lamports: 100 * minimum_balance_for_large_buffer, // gotta be enough for this test + }; + config.signers = vec![&online_signer]; + process_command(&config).unwrap(); + config.command = CliCommand::Airdrop { + pubkey: None, + lamports: 100 * minimum_balance_for_large_buffer, // gotta be enough for this test + }; + config.signers = vec![&offline_signer]; + process_command(&config).unwrap(); + + // Deploy upgradeable program with authority set to offline signer + config.signers = vec![&online_signer, &offline_signer, &program_signer]; + config.command = CliCommand::Program(ProgramCliCommand::Deploy { + program_location: Some(noop_path.to_str().unwrap().to_string()), + fee_payer_signer_index: 0, + program_signer_index: Some(2), + program_pubkey: Some(program_signer.pubkey()), + buffer_signer_index: None, + buffer_pubkey: None, + upgrade_authority_signer_index: 1, // must be offline signer for security reasons + is_final: false, + max_len: Some(max_program_data_len), // allows for larger program size with future upgrades + skip_fee_check: false, + compute_unit_price: None, + max_sign_attempts: 5, + auto_extend: true, + use_rpc: false, + skip_feature_verification: false, + }); + config.output_format = OutputFormat::JsonCompact; + process_command(&config).unwrap(); + + // Prepare buffer to upgrade deployed program to a larger program + create_buffer_with_offline_authority( + &rpc_client, + &syscall_program_path, + &mut config, + &online_signer, + &offline_signer, + &buffer_signer, + ); + + config.signers = vec![&offline_signer]; + config.command = CliCommand::Program(ProgramCliCommand::Upgrade { + fee_payer_signer_index: 0, + program_pubkey: program_signer.pubkey(), + buffer_pubkey: buffer_signer.pubkey(), + upgrade_authority_signer_index: 0, + sign_only: true, + dump_transaction_message: false, + blockhash_query: BlockhashQuery::new(Some(blockhash), true, None), + skip_feature_verification: false, + }); + config.output_format = OutputFormat::JsonCompact; + let sig_response = process_command(&config).unwrap(); + let sign_only = parse_sign_only_reply_string(&sig_response); + let offline_pre_signer = sign_only.presigner_of(&offline_signer.pubkey()).unwrap(); + // Attempt to deploy from buffer using signature over correct message (should succeed) + config.signers = vec![&offline_pre_signer, &program_signer]; + + config.command = CliCommand::Program(ProgramCliCommand::Upgrade { + fee_payer_signer_index: 0, + program_pubkey: program_signer.pubkey(), + buffer_pubkey: buffer_signer.pubkey(), + upgrade_authority_signer_index: 0, + sign_only: false, + dump_transaction_message: false, + blockhash_query: BlockhashQuery::new(Some(blockhash), true, None), + skip_feature_verification: false, + }); + config.output_format = OutputFormat::JsonCompact; + if enable_feature { + let res = process_command(&config); + assert!(res.is_ok()); + } else { + expect_command_failure( + &config, + "Program contains a syscall to a disabled feature", + format!("Buffer account {} has invalid program data: \"ELF error: ELF error: Unresolved symbol (sol_alt_bn128_group_op) at instruction #49 (ELF file offset 0x188)\"", buffer_signer.pubkey()).as_str(), + ); + + // If we skip verification, the failure should be at a later stage + config.command = CliCommand::Program(ProgramCliCommand::Upgrade { + fee_payer_signer_index: 0, + program_pubkey: program_signer.pubkey(), + buffer_pubkey: buffer_signer.pubkey(), + upgrade_authority_signer_index: 0, + sign_only: false, + dump_transaction_message: false, + blockhash_query: BlockhashQuery::new(Some(blockhash), true, None), + skip_feature_verification: true, + }); + config.output_format = OutputFormat::JsonCompact; + + let response = process_command(&config); + assert!(response + .err() + .unwrap() + .to_string() + .contains("Upgrading program failed: RPC response error -32002")); + } +} + #[test] fn test_cli_program_deploy_with_authority() { solana_logger::setup(); @@ -429,6 +728,7 @@ fn test_cli_program_deploy_with_authority() { max_sign_attempts: 5, auto_extend: true, use_rpc: false, + skip_feature_verification: true, }); config.output_format = OutputFormat::JsonCompact; let response = process_command(&config); @@ -481,6 +781,7 @@ fn test_cli_program_deploy_with_authority() { max_sign_attempts: 5, auto_extend: true, use_rpc: false, + skip_feature_verification: true, }); let response = process_command(&config); let json: Value = serde_json::from_str(&response.unwrap()).unwrap(); @@ -527,6 +828,7 @@ fn test_cli_program_deploy_with_authority() { max_sign_attempts: 5, auto_extend: true, use_rpc: false, + skip_feature_verification: true, }); process_command(&config).unwrap(); let program_account = rpc_client.get_account(&program_pubkey).unwrap(); @@ -605,6 +907,7 @@ fn test_cli_program_deploy_with_authority() { max_sign_attempts: 5, auto_extend: true, use_rpc: false, + skip_feature_verification: true, }); process_command(&config).unwrap(); let program_account = rpc_client.get_account(&program_pubkey).unwrap(); @@ -687,6 +990,7 @@ fn test_cli_program_deploy_with_authority() { max_sign_attempts: 5, auto_extend: true, use_rpc: false, + skip_feature_verification: true, }); expect_command_failure( &config, @@ -711,6 +1015,7 @@ fn test_cli_program_deploy_with_authority() { max_sign_attempts: 5, auto_extend: true, use_rpc: false, + skip_feature_verification: true, }); let response = process_command(&config); let json: Value = serde_json::from_str(&response.unwrap()).unwrap(); @@ -830,6 +1135,7 @@ fn test_cli_program_upgrade_auto_extend() { max_sign_attempts: 5, auto_extend: true, use_rpc: false, + skip_feature_verification: true, }); config.output_format = OutputFormat::JsonCompact; process_command(&config).unwrap(); @@ -852,6 +1158,7 @@ fn test_cli_program_upgrade_auto_extend() { max_sign_attempts: 5, auto_extend: false, // --no-auto-extend flag is present use_rpc: false, + skip_feature_verification: true, }); expect_command_failure( &config, @@ -884,6 +1191,7 @@ fn test_cli_program_upgrade_auto_extend() { max_sign_attempts: 5, auto_extend: true, // --no-auto-extend flag is absent use_rpc: false, + skip_feature_verification: true, }); let response = process_command(&config); let json: Value = serde_json::from_str(&response.unwrap()).unwrap(); @@ -974,6 +1282,7 @@ fn test_cli_program_close_program() { max_sign_attempts: 5, auto_extend: true, use_rpc: false, + skip_feature_verification: true, }); config.output_format = OutputFormat::JsonCompact; process_command(&config).unwrap(); @@ -1092,6 +1401,7 @@ fn test_cli_program_extend_program() { max_sign_attempts: 5, auto_extend: false, use_rpc: false, + skip_feature_verification: true, }); config.output_format = OutputFormat::JsonCompact; process_command(&config).unwrap(); @@ -1142,6 +1452,7 @@ fn test_cli_program_extend_program() { max_sign_attempts: 5, auto_extend: false, use_rpc: false, + skip_feature_verification: true, }); expect_command_failure( &config, @@ -1188,6 +1499,7 @@ fn test_cli_program_extend_program() { max_sign_attempts: 5, auto_extend: false, use_rpc: false, + skip_feature_verification: true, }); process_command(&config).unwrap(); } @@ -1255,6 +1567,7 @@ fn test_cli_program_write_buffer() { compute_unit_price: None, max_sign_attempts: 5, use_rpc: false, + skip_feature_verification: true, }); config.output_format = OutputFormat::JsonCompact; let response = process_command(&config); @@ -1294,6 +1607,7 @@ fn test_cli_program_write_buffer() { compute_unit_price: None, max_sign_attempts: 5, use_rpc: false, + skip_feature_verification: true, }); let response = process_command(&config); let json: Value = serde_json::from_str(&response.unwrap()).unwrap(); @@ -1360,6 +1674,7 @@ fn test_cli_program_write_buffer() { compute_unit_price: None, max_sign_attempts: 5, use_rpc: false, + skip_feature_verification: true, }); let response = process_command(&config); let json: Value = serde_json::from_str(&response.unwrap()).unwrap(); @@ -1402,6 +1717,7 @@ fn test_cli_program_write_buffer() { compute_unit_price: None, max_sign_attempts: 5, use_rpc: false, + skip_feature_verification: true, }); let response = process_command(&config); let json: Value = serde_json::from_str(&response.unwrap()).unwrap(); @@ -1484,6 +1800,7 @@ fn test_cli_program_write_buffer() { compute_unit_price: None, max_sign_attempts: 5, use_rpc: false, + skip_feature_verification: true, }); config.output_format = OutputFormat::JsonCompact; let response = process_command(&config); @@ -1533,6 +1850,7 @@ fn test_cli_program_write_buffer() { compute_unit_price: None, max_sign_attempts: 5, use_rpc: false, + skip_feature_verification: true, }); process_command(&config).unwrap(); config.signers = vec![&keypair, &buffer_keypair]; @@ -1551,6 +1869,7 @@ fn test_cli_program_write_buffer() { max_sign_attempts: 5, auto_extend: true, use_rpc: false, + skip_feature_verification: true, }); config.output_format = OutputFormat::JsonCompact; let buffer_account_len = { @@ -1573,6 +1892,111 @@ fn test_cli_program_write_buffer() { ); } +#[test_case(true; "Feature enabled")] +#[test_case(false; "Feature disabled")] +fn test_cli_program_write_buffer_feature(enable_feature: bool) { + solana_logger::setup(); + + let mut program_path = PathBuf::from(env!("CARGO_MANIFEST_DIR")); + program_path.push("tests"); + program_path.push("fixtures"); + program_path.push("alt_bn128"); + program_path.set_extension("so"); + + let mint_keypair = Keypair::new(); + let mint_pubkey = mint_keypair.pubkey(); + let faucet_addr = run_local_faucet(mint_keypair, None); + let mut genesis = TestValidatorGenesis::default(); + let mut test_validator_builder = genesis + .fee_rate_governor(FeeRateGovernor::new(0, 0)) + .rent(Rent { + lamports_per_byte_year: 1, + exemption_threshold: 1.0, + ..Rent::default() + }) + .faucet_addr(Some(faucet_addr)); + + // Deactivate the enable alt bn128 syscall and try to submit a program with that syscall + if !enable_feature { + test_validator_builder = + test_validator_builder.deactivate_features(&[enable_alt_bn128_syscall::id()]); + } + + let test_validator = test_validator_builder + .start_with_mint_address(mint_pubkey, SocketAddrSpace::Unspecified) + .expect("validator start failed"); + + let rpc_client = + RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed()); + + let mut file = File::open(program_path.to_str().unwrap()).unwrap(); + let mut program_data = Vec::new(); + file.read_to_end(&mut program_data).unwrap(); + let max_len = program_data.len(); + let minimum_balance_for_buffer = rpc_client + .get_minimum_balance_for_rent_exemption(UpgradeableLoaderState::size_of_programdata( + max_len, + )) + .unwrap(); + + let mut config = CliConfig::recent_for_tests(); + let keypair = Keypair::new(); + config.json_rpc_url = test_validator.rpc_url(); + config.signers = vec![&keypair]; + config.command = CliCommand::Airdrop { + pubkey: None, + lamports: 100 * minimum_balance_for_buffer, + }; + process_command(&config).unwrap(); + + // Write a buffer with default params + config.signers = vec![&keypair]; + config.command = CliCommand::Program(ProgramCliCommand::WriteBuffer { + program_location: program_path.to_str().unwrap().to_string(), + fee_payer_signer_index: 0, + buffer_signer_index: None, + buffer_pubkey: None, + buffer_authority_signer_index: 0, + max_len: None, + skip_fee_check: false, + compute_unit_price: None, + max_sign_attempts: 5, + use_rpc: false, + skip_feature_verification: false, + }); + config.output_format = OutputFormat::JsonCompact; + + if enable_feature { + let response = process_command(&config); + assert!(response.is_ok()); + } else { + expect_command_failure( + &config, + "Program contains a syscall from a deactivated feature", + "ELF error: ELF error: Unresolved symbol (sol_alt_bn128_group_op) at instruction #49 (ELF file offset 0x188)" + ); + + // If we bypass the verification, there should be no error + config.command = CliCommand::Program(ProgramCliCommand::WriteBuffer { + program_location: program_path.to_str().unwrap().to_string(), + fee_payer_signer_index: 0, + buffer_signer_index: None, + buffer_pubkey: None, + buffer_authority_signer_index: 0, + max_len: None, + skip_fee_check: false, + compute_unit_price: None, + max_sign_attempts: 5, + use_rpc: false, + skip_feature_verification: true, + }); + + // When we skip verification, we won't fail + let response = process_command(&config); + assert!(response.is_ok()); + } +} + #[test] fn test_cli_program_set_buffer_authority() { solana_logger::setup(); @@ -1626,6 +2050,7 @@ fn test_cli_program_set_buffer_authority() { compute_unit_price: None, max_sign_attempts: 5, use_rpc: false, + skip_feature_verification: true, }); process_command(&config).unwrap(); let buffer_account = rpc_client.get_account(&buffer_keypair.pubkey()).unwrap(); @@ -1681,6 +2106,7 @@ fn test_cli_program_set_buffer_authority() { max_sign_attempts: 5, auto_extend: true, use_rpc: false, + skip_feature_verification: true, }); config.output_format = OutputFormat::JsonCompact; expect_command_failure( @@ -1737,6 +2163,7 @@ fn test_cli_program_set_buffer_authority() { max_sign_attempts: 5, auto_extend: true, use_rpc: false, + skip_feature_verification: true, }); config.output_format = OutputFormat::JsonCompact; process_command(&config).unwrap(); @@ -1796,6 +2223,7 @@ fn test_cli_program_mismatch_buffer_authority() { compute_unit_price: None, max_sign_attempts: 5, use_rpc: false, + skip_feature_verification: true, }); process_command(&config).unwrap(); let buffer_account = rpc_client.get_account(&buffer_keypair.pubkey()).unwrap(); @@ -1823,6 +2251,7 @@ fn test_cli_program_mismatch_buffer_authority() { max_sign_attempts: 5, auto_extend: true, use_rpc: false, + skip_feature_verification: true, }); expect_command_failure( &config, @@ -1851,6 +2280,7 @@ fn test_cli_program_mismatch_buffer_authority() { max_sign_attempts: 5, auto_extend: true, use_rpc: false, + skip_feature_verification: true, }); process_command(&config).unwrap(); } @@ -1937,6 +2367,7 @@ fn test_cli_program_deploy_with_offline_signing(use_offline_signer_as_fee_payer: max_sign_attempts: 5, auto_extend: true, use_rpc: false, + skip_feature_verification: true, }); config.output_format = OutputFormat::JsonCompact; process_command(&config).unwrap(); @@ -1967,6 +2398,7 @@ fn test_cli_program_deploy_with_offline_signing(use_offline_signer_as_fee_payer: sign_only: true, dump_transaction_message: false, blockhash_query: BlockhashQuery::new(Some(blockhash), true, None), + skip_feature_verification: true, }); config.output_format = OutputFormat::JsonCompact; let sig_response = process_command(&config).unwrap(); @@ -1988,6 +2420,7 @@ fn test_cli_program_deploy_with_offline_signing(use_offline_signer_as_fee_payer: sign_only: false, dump_transaction_message: false, blockhash_query: BlockhashQuery::new(Some(blockhash), true, None), + skip_feature_verification: true, }); config.output_format = OutputFormat::JsonCompact; expect_command_failure( @@ -2012,6 +2445,7 @@ fn test_cli_program_deploy_with_offline_signing(use_offline_signer_as_fee_payer: sign_only: true, dump_transaction_message: false, blockhash_query: BlockhashQuery::new(Some(blockhash), true, None), + skip_feature_verification: true, }); config.output_format = OutputFormat::JsonCompact; let sig_response = process_command(&config).unwrap(); @@ -2033,6 +2467,7 @@ fn test_cli_program_deploy_with_offline_signing(use_offline_signer_as_fee_payer: sign_only: false, dump_transaction_message: false, blockhash_query: BlockhashQuery::new(Some(blockhash), true, None), + skip_feature_verification: true, }); config.output_format = OutputFormat::JsonCompact; process_command(&config).unwrap(); @@ -2110,6 +2545,7 @@ fn test_cli_program_show() { compute_unit_price: None, max_sign_attempts: 5, use_rpc: false, + skip_feature_verification: true, }); process_command(&config).unwrap(); @@ -2174,6 +2610,7 @@ fn test_cli_program_show() { max_sign_attempts: 5, auto_extend: true, use_rpc: false, + skip_feature_verification: true, }); config.output_format = OutputFormat::JsonCompact; let min_slot = rpc_client.get_slot().unwrap(); @@ -2305,6 +2742,7 @@ fn test_cli_program_dump() { compute_unit_price: None, max_sign_attempts: 5, use_rpc: false, + skip_feature_verification: true, }); process_command(&config).unwrap(); @@ -2351,6 +2789,7 @@ fn create_buffer_with_offline_authority<'a>( compute_unit_price: None, max_sign_attempts: 5, use_rpc: false, + skip_feature_verification: true, }); process_command(config).unwrap(); let buffer_account = rpc_client.get_account(&buffer_signer.pubkey()).unwrap(); @@ -2451,6 +2890,7 @@ fn test_cli_program_deploy_with_args(compute_unit_price: Option, use_rpc: b max_sign_attempts: 5, auto_extend: true, use_rpc, + skip_feature_verification: true, }); config.output_format = OutputFormat::JsonCompact; let response = process_command(&config); diff --git a/cli/tests/stake.rs b/cli/tests/stake.rs index 217e073f1c3ada..92a4b1ef109535 100644 --- a/cli/tests/stake.rs +++ b/cli/tests/stake.rs @@ -12,8 +12,7 @@ use { solana_faucet::faucet::run_local_faucet, solana_rpc_client::rpc_client::RpcClient, solana_rpc_client_api::{ - request::DELINQUENT_VALIDATOR_SLOT_DISTANCE, - response::{RpcStakeActivation, StakeActivationState}, + request::DELINQUENT_VALIDATOR_SLOT_DISTANCE, response::StakeActivationState, }, solana_rpc_client_nonce_utils::blockhash_query::{self, BlockhashQuery}, solana_sdk::{ @@ -29,8 +28,9 @@ use { stake::{ self, instruction::LockupArgs, - state::{Lockup, StakeAuthorize, StakeStateV2}, + state::{Delegation, Lockup, StakeActivationStatus, StakeAuthorize, StakeStateV2}, }, + sysvar::stake_history, }, solana_streamer::socket::SocketAddrSpace, solana_test_validator::{TestValidator, TestValidatorGenesis}, @@ -163,6 +163,30 @@ fn test_stake_redelegation() { // wait for new epoch plus one additional slot for rewards payout wait_for_next_epoch_plus_n_slots(&rpc_client, 1); + let check_activation_status = |delegation: &Delegation, + expected_state: StakeActivationState, + expected_active_stake: u64| { + let stake_history_account = rpc_client.get_account(&stake_history::id()).unwrap(); + let stake_history = solana_sdk::account::from_account(&stake_history_account).unwrap(); + let current_epoch = rpc_client.get_epoch_info().unwrap().epoch; + let StakeActivationStatus { + effective, + activating, + deactivating, + } = delegation.stake_activating_and_deactivating(current_epoch, &stake_history, None); + let stake_activation_state = if deactivating > 0 { + StakeActivationState::Deactivating + } else if activating > 0 { + StakeActivationState::Activating + } else if effective > 0 { + StakeActivationState::Active + } else { + StakeActivationState::Inactive + }; + assert_eq!(stake_activation_state, expected_state); + assert_eq!(effective, expected_active_stake); + }; + // `stake_keypair` should now be delegated to `vote_keypair` and fully activated let stake_account = rpc_client.get_account(&stake_keypair.pubkey()).unwrap(); let stake_state: StakeStateV2 = stake_account.state().unwrap(); @@ -170,21 +194,16 @@ fn test_stake_redelegation() { let rent_exempt_reserve = match stake_state { StakeStateV2::Stake(meta, stake, _) => { assert_eq!(stake.delegation.voter_pubkey, vote_keypair.pubkey()); + check_activation_status( + &stake.delegation, + StakeActivationState::Active, + 50_000_000_000 - meta.rent_exempt_reserve, + ); meta.rent_exempt_reserve } _ => panic!("Unexpected stake state!"), }; - assert_eq!( - rpc_client - .get_stake_activation(stake_keypair.pubkey(), None) - .unwrap(), - RpcStakeActivation { - state: StakeActivationState::Active, - active: 50_000_000_000 - rent_exempt_reserve, - inactive: 0 - } - ); check_balance!(50_000_000_000, &rpc_client, &stake_keypair.pubkey()); let stake2_keypair = Keypair::new(); @@ -226,28 +245,24 @@ fn test_stake_redelegation() { process_command(&config).unwrap(); // `stake_keypair` should now be deactivating - assert_eq!( - rpc_client - .get_stake_activation(stake_keypair.pubkey(), None) - .unwrap(), - RpcStakeActivation { - state: StakeActivationState::Deactivating, - active: 50_000_000_000 - rent_exempt_reserve, - inactive: 0, - } + let stake_account = rpc_client.get_account(&stake_keypair.pubkey()).unwrap(); + let stake_state: StakeStateV2 = stake_account.state().unwrap(); + let StakeStateV2::Stake(_, stake, _) = stake_state else { + panic!() + }; + check_activation_status( + &stake.delegation, + StakeActivationState::Deactivating, + 50_000_000_000 - rent_exempt_reserve, ); // `stake_keypair2` should now be activating - assert_eq!( - rpc_client - .get_stake_activation(stake2_keypair.pubkey(), None) - .unwrap(), - RpcStakeActivation { - state: StakeActivationState::Activating, - active: 0, - inactive: 50_000_000_000 - rent_exempt_reserve, - } - ); + let stake_account = rpc_client.get_account(&stake2_keypair.pubkey()).unwrap(); + let stake_state: StakeStateV2 = stake_account.state().unwrap(); + let StakeStateV2::Stake(_, stake, _) = stake_state else { + panic!() + }; + check_activation_status(&stake.delegation, StakeActivationState::Activating, 0); // check that all the stake, save `rent_exempt_reserve`, have been moved from `stake_keypair` // to `stake2_keypair` @@ -258,38 +273,28 @@ fn test_stake_redelegation() { wait_for_next_epoch_plus_n_slots(&rpc_client, 1); // `stake_keypair` should now be deactivated - assert_eq!( - rpc_client - .get_stake_activation(stake_keypair.pubkey(), None) - .unwrap(), - RpcStakeActivation { - state: StakeActivationState::Inactive, - active: 0, - inactive: 0, - } - ); + let stake_account = rpc_client.get_account(&stake_keypair.pubkey()).unwrap(); + let stake_state: StakeStateV2 = stake_account.state().unwrap(); + let StakeStateV2::Stake(_, stake, _) = stake_state else { + panic!() + }; + check_activation_status(&stake.delegation, StakeActivationState::Inactive, 0); // `stake2_keypair` should now be delegated to `vote2_keypair` and fully activated let stake2_account = rpc_client.get_account(&stake2_keypair.pubkey()).unwrap(); let stake2_state: StakeStateV2 = stake2_account.state().unwrap(); match stake2_state { - StakeStateV2::Stake(_meta, stake, _) => { + StakeStateV2::Stake(meta, stake, _) => { assert_eq!(stake.delegation.voter_pubkey, vote2_keypair.pubkey()); + check_activation_status( + &stake.delegation, + StakeActivationState::Active, + 50_000_000_000 - meta.rent_exempt_reserve, + ); } _ => panic!("Unexpected stake2 state!"), }; - - assert_eq!( - rpc_client - .get_stake_activation(stake2_keypair.pubkey(), None) - .unwrap(), - RpcStakeActivation { - state: StakeActivationState::Active, - active: 50_000_000_000 - rent_exempt_reserve, - inactive: 0 - } - ); } #[test] diff --git a/client/src/connection_cache.rs b/client/src/connection_cache.rs index a682b6e6db2247..107c7210082aab 100644 --- a/client/src/connection_cache.rs +++ b/client/src/connection_cache.rs @@ -16,7 +16,6 @@ use { solana_streamer::streamer::StakedNodes, solana_udp_client::{UdpConfig, UdpConnectionManager, UdpPool}, std::{ - error::Error, net::{IpAddr, Ipv4Addr, SocketAddr}, sync::{Arc, RwLock}, }, @@ -110,29 +109,6 @@ impl ConnectionCache { } } - #[deprecated( - since = "1.15.0", - note = "This method does not do anything. Please use `new_with_client_options` instead to set the client certificate." - )] - pub fn update_client_certificate( - &mut self, - _keypair: &Keypair, - _ipaddr: IpAddr, - ) -> Result<(), Box> { - Ok(()) - } - - #[deprecated( - since = "1.15.0", - note = "This method does not do anything. Please use `new_with_client_options` instead to set staked nodes information." - )] - pub fn set_staked_nodes( - &mut self, - _staked_nodes: &Arc>, - _client_pubkey: &Pubkey, - ) { - } - pub fn with_udp(name: &'static str, connection_pool_size: usize) -> Self { // The minimum pool size is 1. let connection_pool_size = 1.max(connection_pool_size); diff --git a/client/src/lib.rs b/client/src/lib.rs index 889b0c4d279c08..f5e045ff531604 100644 --- a/client/src/lib.rs +++ b/client/src/lib.rs @@ -2,13 +2,10 @@ pub mod connection_cache; pub mod nonblocking; -pub mod quic_client; pub mod send_and_confirm_transactions_in_parallel; pub mod thin_client; pub mod tpu_client; -pub mod tpu_connection; pub mod transaction_executor; -pub mod udp_client; extern crate solana_metrics; @@ -47,9 +44,6 @@ pub mod rpc_config { pub mod rpc_custom_error { pub use solana_rpc_client_api::custom_error::*; } -pub mod rpc_deprecated_config { - pub use solana_rpc_client_api::deprecated_config::*; -} pub mod rpc_filter { pub use solana_rpc_client_api::filter::*; } diff --git a/client/src/nonblocking/mod.rs b/client/src/nonblocking/mod.rs index ab11ae5c6782b2..b62618c024b5ca 100644 --- a/client/src/nonblocking/mod.rs +++ b/client/src/nonblocking/mod.rs @@ -1,7 +1,4 @@ -pub mod quic_client; pub mod tpu_client; -pub mod tpu_connection; -pub mod udp_client; pub mod blockhash_query { pub use solana_rpc_client_nonce_utils::nonblocking::blockhash_query::*; diff --git a/client/src/nonblocking/quic_client.rs b/client/src/nonblocking/quic_client.rs deleted file mode 100644 index 28b9649289e2b4..00000000000000 --- a/client/src/nonblocking/quic_client.rs +++ /dev/null @@ -1,8 +0,0 @@ -#[deprecated( - since = "1.15.0", - note = "Please use `solana_quic_client::nonblocking::quic_client::QuicClientConnection` instead." -)] -pub use solana_quic_client::nonblocking::quic_client::QuicClientConnection as QuicTpuConnection; -pub use solana_quic_client::nonblocking::quic_client::{ - QuicClient, QuicClientCertificate, QuicLazyInitializedEndpoint, -}; diff --git a/client/src/nonblocking/tpu_connection.rs b/client/src/nonblocking/tpu_connection.rs deleted file mode 100644 index b91a88853310b4..00000000000000 --- a/client/src/nonblocking/tpu_connection.rs +++ /dev/null @@ -1,5 +0,0 @@ -#[deprecated( - since = "1.15.0", - note = "Please use `solana_connection_cache::nonblocking::client_connection::ClientConnection` instead." -)] -pub use solana_connection_cache::nonblocking::client_connection::ClientConnection as TpuConnection; diff --git a/client/src/nonblocking/udp_client.rs b/client/src/nonblocking/udp_client.rs deleted file mode 100644 index e880b1fb107cf8..00000000000000 --- a/client/src/nonblocking/udp_client.rs +++ /dev/null @@ -1,5 +0,0 @@ -#[deprecated( - since = "1.15.0", - note = "Please use `solana_udp_client::nonblocking::udp_client::UdpClientConnection` instead." -)] -pub use solana_udp_client::nonblocking::udp_client::UdpClientConnection as UdpTpuConnection; diff --git a/client/src/quic_client.rs b/client/src/quic_client.rs deleted file mode 100644 index a32aa381cb10ef..00000000000000 --- a/client/src/quic_client.rs +++ /dev/null @@ -1,5 +0,0 @@ -#[deprecated( - since = "1.15.0", - note = "Please use `solana_quic_client::quic_client::QuicClientConnection` instead." -)] -pub use solana_quic_client::quic_client::QuicClientConnection as QuicTpuConnection; diff --git a/client/src/thin_client.rs b/client/src/thin_client.rs index 61f24018e8c778..596c9a13a6dbd3 100644 --- a/client/src/thin_client.rs +++ b/client/src/thin_client.rs @@ -11,10 +11,8 @@ use { solana_sdk::{ account::Account, client::{AsyncClient, Client, SyncClient}, - clock::Slot, commitment_config::CommitmentConfig, epoch_info::EpochInfo, - fee_calculator::{FeeCalculator, FeeRateGovernor}, hash::Hash, instruction::Instruction, message::Message, @@ -213,20 +211,6 @@ impl SyncClient for ThinClient { dispatch!(fn get_minimum_balance_for_rent_exemption(&self, data_len: usize) -> TransportResult); - dispatch!(#[allow(deprecated)] fn get_recent_blockhash(&self) -> TransportResult<(Hash, FeeCalculator)>); - - dispatch!(#[allow(deprecated)] fn get_recent_blockhash_with_commitment( - &self, - commitment_config: CommitmentConfig - ) -> TransportResult<(Hash, FeeCalculator, Slot)>); - - dispatch!(#[allow(deprecated)] fn get_fee_calculator_for_blockhash( - &self, - blockhash: &Hash - ) -> TransportResult>); - - dispatch!(#[allow(deprecated)] fn get_fee_rate_governor(&self) -> TransportResult); - dispatch!(fn get_signature_status( &self, signature: &Signature @@ -262,8 +246,6 @@ impl SyncClient for ThinClient { dispatch!(fn poll_for_signature(&self, signature: &Signature) -> TransportResult<()>); - dispatch!(#[allow(deprecated)] fn get_new_blockhash(&self, blockhash: &Hash) -> TransportResult<(Hash, FeeCalculator)>); - dispatch!(fn get_latest_blockhash(&self) -> TransportResult); dispatch!(fn get_latest_blockhash_with_commitment( diff --git a/client/src/tpu_connection.rs b/client/src/tpu_connection.rs deleted file mode 100644 index 9e000612a51e03..00000000000000 --- a/client/src/tpu_connection.rs +++ /dev/null @@ -1,6 +0,0 @@ -#[deprecated( - since = "1.15.0", - note = "Please use `solana_connection_cache::client_connection::ClientConnection` instead." -)] -pub use solana_connection_cache::client_connection::ClientConnection as TpuConnection; -pub use solana_connection_cache::client_connection::ClientStats; diff --git a/client/src/udp_client.rs b/client/src/udp_client.rs deleted file mode 100644 index c05b74b3640749..00000000000000 --- a/client/src/udp_client.rs +++ /dev/null @@ -1,5 +0,0 @@ -#[deprecated( - since = "1.15.0", - note = "Please use `solana_udp_client::udp_client::UdpClientConnection` instead." -)] -pub use solana_udp_client::udp_client::UdpClientConnection as UdpTpuConnection; diff --git a/compute-budget/src/compute_budget.rs b/compute-budget/src/compute_budget.rs index 6b91affca4dd1c..24eeb46815372d 100644 --- a/compute-budget/src/compute_budget.rs +++ b/compute-budget/src/compute_budget.rs @@ -1,9 +1,4 @@ -use { - crate::compute_budget_processor::{ - self, process_compute_budget_instructions, DEFAULT_HEAP_COST, - }, - solana_sdk::{instruction::CompiledInstruction, pubkey::Pubkey, transaction::Result}, -}; +use crate::compute_budget_processor::{self, ComputeBudgetLimits, DEFAULT_HEAP_COST}; #[cfg(all(RUSTC_WITH_SPECIALIZATION, feature = "frozen-abi"))] impl ::solana_frozen_abi::abi_example::AbiExample for ComputeBudget { @@ -13,6 +8,16 @@ impl ::solana_frozen_abi::abi_example::AbiExample for ComputeBudget { } } +/// Max instruction stack depth. This is the maximum nesting of instructions that can happen during +/// a transaction. +pub const MAX_INSTRUCTION_STACK_DEPTH: usize = 5; + +/// Max call depth. This is the maximum nesting of SBF to SBF call that can happen within a program. +pub const MAX_CALL_DEPTH: usize = 64; + +/// The size of one SBF stack frame. +pub const STACK_FRAME_SIZE: usize = 4096; + #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub struct ComputeBudget { /// Number of compute units that a transaction or individual instruction is @@ -26,11 +31,11 @@ pub struct ComputeBudget { /// Number of compute units consumed by an invoke call (not including the cost incurred by /// the called program) pub invoke_units: u64, - /// Maximum program instruction invocation stack height. Invocation stack - /// height starts at 1 for transaction instructions and the stack height is + /// Maximum program instruction invocation stack depth. Invocation stack + /// depth starts at 1 for transaction instructions and the stack depth is /// incremented each time a program invokes an instruction and decremented /// when a program returns. - pub max_invoke_stack_height: usize, + pub max_instruction_stack_depth: usize, /// Maximum cross-program invocation and instructions per transaction pub max_instruction_trace_length: usize, /// Base number of compute units consumed to call SHA256 @@ -126,6 +131,16 @@ impl Default for ComputeBudget { } } +impl From for ComputeBudget { + fn from(compute_budget_limits: ComputeBudgetLimits) -> Self { + ComputeBudget { + compute_unit_limit: u64::from(compute_budget_limits.compute_unit_limit), + heap_size: compute_budget_limits.updated_heap_bytes, + ..ComputeBudget::default() + } + } +} + impl ComputeBudget { pub fn new(compute_unit_limit: u64) -> Self { ComputeBudget { @@ -133,13 +148,13 @@ impl ComputeBudget { log_64_units: 100, create_program_address_units: 1500, invoke_units: 1000, - max_invoke_stack_height: 5, + max_instruction_stack_depth: MAX_INSTRUCTION_STACK_DEPTH, max_instruction_trace_length: 64, sha256_base_cost: 85, sha256_byte_cost: 1, sha256_max_slices: 20_000, - max_call_depth: 64, - stack_frame_size: 4_096, + max_call_depth: MAX_CALL_DEPTH, + stack_frame_size: STACK_FRAME_SIZE, log_pubkey_units: 100, max_cpi_instruction_size: 1280, // IPv6 Min MTU size cpi_bytes_per_unit: 250, // ~50MB at 200,000 units @@ -176,17 +191,6 @@ impl ComputeBudget { } } - pub fn try_from_instructions<'a>( - instructions: impl Iterator, - ) -> Result { - let compute_budget_limits = process_compute_budget_instructions(instructions)?; - Ok(ComputeBudget { - compute_unit_limit: u64::from(compute_budget_limits.compute_unit_limit), - heap_size: compute_budget_limits.updated_heap_bytes, - ..ComputeBudget::default() - }) - } - /// Returns cost of the Poseidon hash function for the given number of /// inputs is determined by the following quadratic function: /// diff --git a/compute-budget/src/compute_budget_processor.rs b/compute-budget/src/compute_budget_processor.rs index dc568f82f169bb..edd56e382a6bf2 100644 --- a/compute-budget/src/compute_budget_processor.rs +++ b/compute-budget/src/compute_budget_processor.rs @@ -3,7 +3,7 @@ use { solana_sdk::{ borsh1::try_from_slice_unchecked, compute_budget::{self, ComputeBudgetInstruction}, - entrypoint::HEAP_LENGTH as MIN_HEAP_FRAME_BYTES, + entrypoint::HEAP_LENGTH, fee::FeeBudgetLimits, instruction::{CompiledInstruction, InstructionError}, pubkey::Pubkey, @@ -11,18 +11,19 @@ use { }, }; -const MAX_HEAP_FRAME_BYTES: u32 = 256 * 1024; /// Roughly 0.5us/page, where page is 32K; given roughly 15CU/us, the /// default heap page cost = 0.5 * 15 ~= 8CU/page pub const DEFAULT_HEAP_COST: u64 = 8; pub const DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT: u32 = 200_000; pub const MAX_COMPUTE_UNIT_LIMIT: u32 = 1_400_000; +pub const MAX_HEAP_FRAME_BYTES: u32 = 256 * 1024; +pub const MIN_HEAP_FRAME_BYTES: u32 = HEAP_LENGTH as u32; /// The total accounts data a transaction can load is limited to 64MiB to not break /// anyone in Mainnet-beta today. It can be set by set_loaded_accounts_data_size_limit instruction pub const MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES: u32 = 64 * 1024 * 1024; -#[derive(Clone, Debug, PartialEq, Eq)] +#[derive(Clone, Copy, Debug, PartialEq, Eq)] pub struct ComputeBudgetLimits { pub updated_heap_bytes: u32, pub compute_unit_limit: u32, @@ -33,7 +34,7 @@ pub struct ComputeBudgetLimits { impl Default for ComputeBudgetLimits { fn default() -> Self { ComputeBudgetLimits { - updated_heap_bytes: u32::try_from(MIN_HEAP_FRAME_BYTES).unwrap(), + updated_heap_bytes: MIN_HEAP_FRAME_BYTES, compute_unit_limit: MAX_COMPUTE_UNIT_LIMIT, compute_unit_price: 0, loaded_accounts_bytes: MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES, @@ -122,7 +123,7 @@ pub fn process_compute_budget_instructions<'a>( // sanitize limits let updated_heap_bytes = requested_heap_size - .unwrap_or(u32::try_from(MIN_HEAP_FRAME_BYTES).unwrap()) // loader's default heap_size + .unwrap_or(MIN_HEAP_FRAME_BYTES) // loader's default heap_size .min(MAX_HEAP_FRAME_BYTES); let compute_unit_limit = updated_compute_unit_limit @@ -147,8 +148,7 @@ pub fn process_compute_budget_instructions<'a>( } fn sanitize_requested_heap_size(bytes: u32) -> bool { - (u32::try_from(MIN_HEAP_FRAME_BYTES).unwrap()..=MAX_HEAP_FRAME_BYTES).contains(&bytes) - && bytes % 1024 == 0 + (MIN_HEAP_FRAME_BYTES..=MAX_HEAP_FRAME_BYTES).contains(&bytes) && bytes % 1024 == 0 } #[cfg(test)] @@ -377,7 +377,7 @@ mod tests { test!( &[ Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ComputeBudgetInstruction::request_heap_frame(MIN_HEAP_FRAME_BYTES as u32), + ComputeBudgetInstruction::request_heap_frame(MIN_HEAP_FRAME_BYTES), ComputeBudgetInstruction::request_heap_frame(MAX_HEAP_FRAME_BYTES), ], Err(TransactionError::DuplicateInstruction(2)) diff --git a/core/Cargo.toml b/core/Cargo.toml index 09aaa981310e68..d396697613fb54 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -46,6 +46,7 @@ solana-accounts-db = { workspace = true } solana-bloom = { workspace = true } solana-client = { workspace = true } solana-compute-budget = { workspace = true } +solana-connection-cache = { workspace = true } solana-cost-model = { workspace = true } solana-entry = { workspace = true } solana-frozen-abi = { workspace = true, optional = true } diff --git a/core/benches/consumer.rs b/core/benches/consumer.rs index 14010e8d91a875..6dd9eb5b8bf0fa 100644 --- a/core/benches/consumer.rs +++ b/core/benches/consumer.rs @@ -22,7 +22,6 @@ use { solana_runtime::bank::Bank, solana_sdk::{ account::{Account, ReadableAccount}, - feature_set::apply_cost_tracker_during_replay, signature::Keypair, signer::Signer, stake_history::Epoch, @@ -97,7 +96,7 @@ struct BenchFrame { signal_receiver: Receiver<(Arc, (Entry, u64))>, } -fn setup(apply_cost_tracker_during_replay: bool) -> BenchFrame { +fn setup() -> BenchFrame { let mint_total = u64::MAX; let GenesisConfigInfo { mut genesis_config, .. @@ -109,10 +108,6 @@ fn setup(apply_cost_tracker_during_replay: bool) -> BenchFrame { let mut bank = Bank::new_for_benches(&genesis_config); - if !apply_cost_tracker_during_replay { - bank.deactivate_feature(&apply_cost_tracker_during_replay::id()); - } - // Allow arbitrary transaction processing time for the purposes of this bench bank.ns_per_slot = u128::MAX; @@ -139,11 +134,7 @@ fn setup(apply_cost_tracker_during_replay: bool) -> BenchFrame { } } -fn bench_process_and_record_transactions( - bencher: &mut Bencher, - batch_size: usize, - apply_cost_tracker_during_replay: bool, -) { +fn bench_process_and_record_transactions(bencher: &mut Bencher, batch_size: usize) { const TRANSACTIONS_PER_ITERATION: usize = 64; assert_eq!( TRANSACTIONS_PER_ITERATION % batch_size, @@ -161,7 +152,7 @@ fn bench_process_and_record_transactions( poh_recorder, poh_service, signal_receiver: _signal_receiver, - } = setup(apply_cost_tracker_during_replay); + } = setup(); let consumer = create_consumer(&poh_recorder); let transactions = create_transactions(&bank, 2_usize.pow(20)); let mut transaction_iter = transactions.chunks(batch_size); @@ -186,30 +177,15 @@ fn bench_process_and_record_transactions( #[bench] fn bench_process_and_record_transactions_unbatched(bencher: &mut Bencher) { - bench_process_and_record_transactions(bencher, 1, true); + bench_process_and_record_transactions(bencher, 1); } #[bench] fn bench_process_and_record_transactions_half_batch(bencher: &mut Bencher) { - bench_process_and_record_transactions(bencher, 32, true); + bench_process_and_record_transactions(bencher, 32); } #[bench] fn bench_process_and_record_transactions_full_batch(bencher: &mut Bencher) { - bench_process_and_record_transactions(bencher, 64, true); -} - -#[bench] -fn bench_process_and_record_transactions_unbatched_disable_tx_cost_update(bencher: &mut Bencher) { - bench_process_and_record_transactions(bencher, 1, false); -} - -#[bench] -fn bench_process_and_record_transactions_half_batch_disable_tx_cost_update(bencher: &mut Bencher) { - bench_process_and_record_transactions(bencher, 32, false); -} - -#[bench] -fn bench_process_and_record_transactions_full_batch_disable_tx_cost_update(bencher: &mut Bencher) { - bench_process_and_record_transactions(bencher, 64, false); + bench_process_and_record_transactions(bencher, 64); } diff --git a/core/src/banking_stage/committer.rs b/core/src/banking_stage/committer.rs index 2637d38c883f14..0ca1304a4560f5 100644 --- a/core/src/banking_stage/committer.rs +++ b/core/src/banking_stage/committer.rs @@ -6,7 +6,7 @@ use { }, solana_measure::measure_us, solana_runtime::{ - bank::{Bank, CommitTransactionCounts, TransactionBalancesSet}, + bank::{Bank, ExecutedTransactionCounts, TransactionBalancesSet}, bank_utils, prioritization_fee_cache::PrioritizationFeeCache, transaction_batch::TransactionBatch, @@ -92,10 +92,10 @@ impl Committer { execution_results, last_blockhash, lamports_per_signature, - CommitTransactionCounts { - committed_transactions_count: executed_transactions_count as u64, - committed_non_vote_transactions_count: executed_non_vote_transactions_count as u64, - committed_with_failure_result_count: executed_transactions_count + ExecutedTransactionCounts { + executed_transactions_count: executed_transactions_count as u64, + executed_non_vote_transactions_count: executed_non_vote_transactions_count as u64, + executed_with_failure_result_count: executed_transactions_count .saturating_sub(executed_with_successful_result_count) as u64, signature_count, diff --git a/core/src/banking_stage/consume_worker.rs b/core/src/banking_stage/consume_worker.rs index 4a4b4e729bc114..f83ca6724d415e 100644 --- a/core/src/banking_stage/consume_worker.rs +++ b/core/src/banking_stage/consume_worker.rs @@ -318,6 +318,7 @@ impl ConsumeWorkerMetrics { invalid_account_for_fee, invalid_account_index, invalid_program_for_execution, + invalid_compute_budget, not_allowed_during_cluster_maintenance, invalid_writable_account, invalid_rent_paying_account, @@ -371,6 +372,9 @@ impl ConsumeWorkerMetrics { self.error_metrics .invalid_program_for_execution .fetch_add(*invalid_program_for_execution, Ordering::Relaxed); + self.error_metrics + .invalid_compute_budget + .fetch_add(*invalid_compute_budget, Ordering::Relaxed); self.error_metrics .not_allowed_during_cluster_maintenance .fetch_add(*not_allowed_during_cluster_maintenance, Ordering::Relaxed); @@ -561,6 +565,7 @@ struct ConsumeWorkerTransactionErrorMetrics { invalid_account_for_fee: AtomicUsize, invalid_account_index: AtomicUsize, invalid_program_for_execution: AtomicUsize, + invalid_compute_budget: AtomicUsize, not_allowed_during_cluster_maintenance: AtomicUsize, invalid_writable_account: AtomicUsize, invalid_rent_paying_account: AtomicUsize, @@ -644,6 +649,12 @@ impl ConsumeWorkerTransactionErrorMetrics { .swap(0, Ordering::Relaxed), i64 ), + ( + "invalid_compute_budget", + self.invalid_compute_budget + .swap(0, Ordering::Relaxed), + i64 + ), ( "not_allowed_during_cluster_maintenance", self.not_allowed_during_cluster_maintenance diff --git a/core/src/banking_stage/consumer.rs b/core/src/banking_stage/consumer.rs index 2fd8524d7454b8..6ae0881da45d8a 100644 --- a/core/src/banking_stage/consumer.rs +++ b/core/src/banking_stage/consumer.rs @@ -518,27 +518,14 @@ impl Consumer { // Costs of all transactions are added to the cost_tracker before processing. // To ensure accurate tracking of compute units, transactions that ultimately - // were not included in the block should have their cost removed. - QosService::remove_costs( + // were not included in the block should have their cost removed, the rest + // should update with their actually consumed units. + QosService::remove_or_update_costs( transaction_qos_cost_results.iter(), commit_transactions_result.as_ref().ok(), bank, ); - // once feature `apply_cost_tracker_during_replay` is activated, leader shall no longer - // adjust block with executed cost (a behavior more inline with bankless leader), it - // should use requested, or default `compute_unit_limit` as transaction's execution cost. - if !bank - .feature_set - .is_active(&feature_set::apply_cost_tracker_during_replay::id()) - { - QosService::update_costs( - transaction_qos_cost_results.iter(), - commit_transactions_result.as_ref().ok(), - bank, - ); - } - retryable_transaction_indexes .iter_mut() .for_each(|x| *x += chunk_offset); @@ -606,6 +593,7 @@ impl Consumer { &mut execute_and_commit_timings.execute_timings, TransactionProcessingConfig { account_overrides: None, + check_program_modification_slot: bank.check_program_modification_slot(), compute_budget: bank.compute_budget(), log_messages_bytes_limit: self.log_messages_bytes_limit, limit_to_load_programs: true, @@ -1431,16 +1419,6 @@ mod tests { #[test] fn test_bank_process_and_record_transactions_cost_tracker() { - for apply_cost_tracker_during_replay_enabled in [true, false] { - bank_process_and_record_transactions_cost_tracker( - apply_cost_tracker_during_replay_enabled, - ); - } - } - - fn bank_process_and_record_transactions_cost_tracker( - apply_cost_tracker_during_replay_enabled: bool, - ) { solana_logger::setup(); let GenesisConfigInfo { genesis_config, @@ -1449,9 +1427,6 @@ mod tests { } = create_slow_genesis_config(10_000); let mut bank = Bank::new_for_tests(&genesis_config); bank.ns_per_slot = u128::MAX; - if !apply_cost_tracker_during_replay_enabled { - bank.deactivate_feature(&feature_set::apply_cost_tracker_during_replay::id()); - } let bank = bank.wrap_with_bank_forks_for_tests().0; let pubkey = solana_sdk::pubkey::new_rand(); @@ -1520,8 +1495,7 @@ mod tests { // TEST: it's expected that the allocation will execute but the transfer will not // because of a shared write-lock between mint_keypair. Ensure only the first transaction - // takes compute units in the block AND the apply_cost_tracker_during_replay_enabled feature - // is applied correctly + // takes compute units in the block let allocate_keypair = Keypair::new(); let transactions = sanitize_transactions(vec![ system_transaction::allocate( @@ -1560,7 +1534,7 @@ mod tests { ); assert_eq!(retryable_transaction_indexes, vec![1]); - let expected_block_cost = if !apply_cost_tracker_during_replay_enabled { + let expected_block_cost = { let (actual_programs_execution_cost, actual_loaded_accounts_data_size_cost) = match commit_transactions_result.first().unwrap() { CommitTransactionDetails::Committed { @@ -1586,8 +1560,6 @@ mod tests { } block_cost + cost.sum() - } else { - block_cost + CostModel::calculate_cost(&transactions[0], &bank.feature_set).sum() }; assert_eq!(get_block_cost(), expected_block_cost); diff --git a/core/src/banking_stage/forwarder.rs b/core/src/banking_stage/forwarder.rs index 492ba94504558b..acb34b8b4dc1e9 100644 --- a/core/src/banking_stage/forwarder.rs +++ b/core/src/banking_stage/forwarder.rs @@ -10,7 +10,8 @@ use { next_leader::{next_leader, next_leader_tpu_vote}, tracer_packet_stats::TracerPacketStats, }, - solana_client::{connection_cache::ConnectionCache, tpu_connection::TpuConnection}, + solana_client::connection_cache::ConnectionCache, + solana_connection_cache::client_connection::ClientConnection as TpuConnection, solana_gossip::cluster_info::ClusterInfo, solana_measure::measure_us, solana_perf::{data_budget::DataBudget, packet::Packet}, diff --git a/core/src/banking_stage/immutable_deserialized_packet.rs b/core/src/banking_stage/immutable_deserialized_packet.rs index bd64b81f77ec3a..cb4561d50d8f08 100644 --- a/core/src/banking_stage/immutable_deserialized_packet.rs +++ b/core/src/banking_stage/immutable_deserialized_packet.rs @@ -3,7 +3,6 @@ use { solana_perf::packet::Packet, solana_runtime::compute_budget_details::{ComputeBudgetDetails, GetComputeBudgetDetails}, solana_sdk::{ - feature_set, hash::Hash, message::Message, pubkey::Pubkey, @@ -15,7 +14,7 @@ use { VersionedTransaction, }, }, - std::{cmp::Ordering, collections::HashSet, mem::size_of, sync::Arc}, + std::{cmp::Ordering, collections::HashSet, mem::size_of}, thiserror::Error, }; @@ -106,7 +105,6 @@ impl ImmutableDeserializedPacket { // messages. pub fn build_sanitized_transaction( &self, - _feature_set: &Arc, votes_only: bool, address_loader: impl AddressLoader, reserved_account_keys: &HashSet, diff --git a/core/src/banking_stage/latest_unprocessed_votes.rs b/core/src/banking_stage/latest_unprocessed_votes.rs index 44ed870e3fb86b..084e4125b842ae 100644 --- a/core/src/banking_stage/latest_unprocessed_votes.rs +++ b/core/src/banking_stage/latest_unprocessed_votes.rs @@ -203,7 +203,10 @@ impl LatestUnprocessedVotes { let pubkey = vote.pubkey(); let slot = vote.slot(); let timestamp = vote.timestamp(); - if let Some(latest_vote) = self.get_entry(pubkey) { + + let with_latest_vote = |latest_vote: &RwLock, + vote: LatestValidatorVotePacket| + -> Option { let (latest_slot, latest_timestamp) = latest_vote .read() .map(|vote| (vote.slot(), vote.timestamp())) @@ -225,15 +228,24 @@ impl LatestUnprocessedVotes { } } } - return Some(vote); - } + Some(vote) + }; - // Should have low lock contention because this is only hit on the first few blocks of startup - // and when a new vote account starts voting. - let mut latest_votes_per_pubkey = self.latest_votes_per_pubkey.write().unwrap(); - latest_votes_per_pubkey.insert(pubkey, Arc::new(RwLock::new(vote))); - self.num_unprocessed_votes.fetch_add(1, Ordering::Relaxed); - None + if let Some(latest_vote) = self.get_entry(pubkey) { + with_latest_vote(&latest_vote, vote) + } else { + // Grab write-lock to insert new vote. + match self.latest_votes_per_pubkey.write().unwrap().entry(pubkey) { + std::collections::hash_map::Entry::Occupied(entry) => { + with_latest_vote(entry.get(), vote) + } + std::collections::hash_map::Entry::Vacant(entry) => { + entry.insert(Arc::new(RwLock::new(vote))); + self.num_unprocessed_votes.fetch_add(1, Ordering::Relaxed); + None + } + } + } } #[cfg(test)] @@ -280,7 +292,6 @@ impl LatestUnprocessedVotes { let deserialized_vote_packet = vote.vote.as_ref().unwrap().clone(); if let Some(sanitized_vote_transaction) = deserialized_vote_packet .build_sanitized_transaction( - &bank.feature_set, bank.vote_only_bank(), bank.as_ref(), bank.get_reserved_account_keys(), @@ -682,6 +693,47 @@ mod tests { ); } + #[test] + fn test_update_latest_vote_race() { + // There was a race condition in updating the same pubkey in the hashmap + // when the entry does not initially exist. + let latest_unprocessed_votes = Arc::new(LatestUnprocessedVotes::new()); + + const NUM_VOTES: usize = 100; + let keypairs = Arc::new( + (0..NUM_VOTES) + .map(|_| ValidatorVoteKeypairs::new_rand()) + .collect_vec(), + ); + + // Insert votes in parallel + let insert_vote = |latest_unprocessed_votes: &LatestUnprocessedVotes, + keypairs: &Arc>, + i: usize| { + let vote = from_slots(vec![(i as u64, 1)], VoteSource::Gossip, &keypairs[i], None); + latest_unprocessed_votes.update_latest_vote(vote); + }; + + let hdl = Builder::new() + .spawn({ + let latest_unprocessed_votes = latest_unprocessed_votes.clone(); + let keypairs = keypairs.clone(); + move || { + for i in 0..NUM_VOTES { + insert_vote(&latest_unprocessed_votes, &keypairs, i); + } + } + }) + .unwrap(); + + for i in 0..NUM_VOTES { + insert_vote(&latest_unprocessed_votes, &keypairs, i); + } + + hdl.join().unwrap(); + assert_eq!(NUM_VOTES, latest_unprocessed_votes.len()); + } + #[test] fn test_simulate_threads() { let latest_unprocessed_votes = Arc::new(LatestUnprocessedVotes::new()); diff --git a/core/src/banking_stage/qos_service.rs b/core/src/banking_stage/qos_service.rs index 23d4ebd97619bd..bf8b7df963e392 100644 --- a/core/src/banking_stage/qos_service.rs +++ b/core/src/banking_stage/qos_service.rs @@ -132,39 +132,28 @@ impl QosService { (select_results, num_included) } - /// Updates the transaction costs for committed transactions. Does not handle removing costs - /// for transactions that didn't get recorded or committed - pub fn update_costs<'a>( - transaction_cost_results: impl Iterator>, - transaction_committed_status: Option<&Vec>, - bank: &Bank, - ) { - if let Some(transaction_committed_status) = transaction_committed_status { - Self::update_committed_transaction_costs( - transaction_cost_results, - transaction_committed_status, - bank, - ) - } - } - - /// Removes transaction costs from the cost tracker if not committed or recorded - pub fn remove_costs<'a>( + /// Removes transaction costs from the cost tracker if not committed or recorded, or + /// updates the transaction costs for committed transactions. + pub fn remove_or_update_costs<'a>( transaction_cost_results: impl Iterator>, transaction_committed_status: Option<&Vec>, bank: &Bank, ) { match transaction_committed_status { - Some(transaction_committed_status) => Self::remove_uncommitted_transaction_costs( - transaction_cost_results, - transaction_committed_status, - bank, - ), - None => Self::remove_transaction_costs(transaction_cost_results, bank), + Some(transaction_committed_status) => { + Self::remove_or_update_recorded_transaction_costs( + transaction_cost_results, + transaction_committed_status, + bank, + ) + } + None => Self::remove_unrecorded_transaction_costs(transaction_cost_results, bank), } } - fn remove_uncommitted_transaction_costs<'a>( + /// For recorded transactions, remove units reserved by uncommitted transaction, or update + /// units for committed transactions. + fn remove_or_update_recorded_transaction_costs<'a>( transaction_cost_results: impl Iterator>, transaction_committed_status: &Vec, bank: &Bank, @@ -178,45 +167,31 @@ impl QosService { // checked for update if let Ok(tx_cost) = tx_cost { num_included += 1; - if *transaction_committed_details == CommitTransactionDetails::NotCommitted { - cost_tracker.remove(tx_cost) + match transaction_committed_details { + CommitTransactionDetails::Committed { + compute_units, + loaded_accounts_data_size, + } => { + cost_tracker.update_execution_cost( + tx_cost, + *compute_units, + CostModel::calculate_loaded_accounts_data_size_cost( + *loaded_accounts_data_size, + &bank.feature_set, + ), + ); + } + CommitTransactionDetails::NotCommitted => { + cost_tracker.remove(tx_cost); + } } } }); cost_tracker.sub_transactions_in_flight(num_included); } - fn update_committed_transaction_costs<'a>( - transaction_cost_results: impl Iterator>, - transaction_committed_status: &Vec, - bank: &Bank, - ) { - let mut cost_tracker = bank.write_cost_tracker().unwrap(); - transaction_cost_results - .zip(transaction_committed_status) - .for_each(|(estimated_tx_cost, transaction_committed_details)| { - // Only transactions that the qos service included have to be - // checked for update - if let Ok(estimated_tx_cost) = estimated_tx_cost { - if let CommitTransactionDetails::Committed { - compute_units, - loaded_accounts_data_size, - } = transaction_committed_details - { - cost_tracker.update_execution_cost( - estimated_tx_cost, - *compute_units, - CostModel::calculate_loaded_accounts_data_size_cost( - *loaded_accounts_data_size, - &bank.feature_set, - ), - ) - } - } - }); - } - - fn remove_transaction_costs<'a>( + /// Remove reserved units for transaction batch that unsuccessfully recorded. + fn remove_unrecorded_transaction_costs<'a>( transaction_cost_results: impl Iterator>, bank: &Bank, ) { @@ -784,18 +759,11 @@ mod tests { + (execute_units_adjustment + loaded_accounts_data_size_cost_adjustment) * transaction_count; - // All transactions are committed, no costs should be removed - QosService::remove_costs(qos_cost_results.iter(), Some(&committed_status), &bank); - assert_eq!( - total_txs_cost, - bank.read_cost_tracker().unwrap().block_cost() - ); - assert_eq!( - transaction_count, - bank.read_cost_tracker().unwrap().transaction_count() + QosService::remove_or_update_costs( + qos_cost_results.iter(), + Some(&committed_status), + &bank, ); - - QosService::update_costs(qos_cost_results.iter(), Some(&committed_status), &bank); assert_eq!( final_txs_cost, bank.read_cost_tracker().unwrap().block_cost() @@ -843,18 +811,7 @@ mod tests { bank.read_cost_tracker().unwrap().block_cost() ); - // update costs doesn't impact non-committed - QosService::update_costs(qos_cost_results.iter(), None, &bank); - assert_eq!( - total_txs_cost, - bank.read_cost_tracker().unwrap().block_cost() - ); - assert_eq!( - transaction_count, - bank.read_cost_tracker().unwrap().transaction_count() - ); - - QosService::remove_costs(qos_cost_results.iter(), None, &bank); + QosService::remove_or_update_costs(qos_cost_results.iter(), None, &bank); assert_eq!(0, bank.read_cost_tracker().unwrap().block_cost()); assert_eq!(0, bank.read_cost_tracker().unwrap().transaction_count()); } @@ -926,8 +883,11 @@ mod tests { }) .collect(); - QosService::remove_costs(qos_cost_results.iter(), Some(&committed_status), &bank); - QosService::update_costs(qos_cost_results.iter(), Some(&committed_status), &bank); + QosService::remove_or_update_costs( + qos_cost_results.iter(), + Some(&committed_status), + &bank, + ); // assert the final block cost let mut expected_final_txs_count = 0u64; diff --git a/core/src/banking_stage/read_write_account_set.rs b/core/src/banking_stage/read_write_account_set.rs index 6d6b908249f168..4b1efc015e2bbf 100644 --- a/core/src/banking_stage/read_write_account_set.rs +++ b/core/src/banking_stage/read_write_account_set.rs @@ -1,15 +1,15 @@ use { + ahash::AHashSet, solana_sdk::{message::SanitizedMessage, pubkey::Pubkey}, - std::collections::HashSet, }; /// Wrapper struct to accumulate locks for a batch of transactions. #[derive(Debug, Default)] pub struct ReadWriteAccountSet { /// Set of accounts that are locked for read - read_set: HashSet, + read_set: AHashSet, /// Set of accounts that are locked for write - write_set: HashSet, + write_set: AHashSet, } impl ReadWriteAccountSet { diff --git a/core/src/banking_stage/transaction_scheduler/prio_graph_scheduler.rs b/core/src/banking_stage/transaction_scheduler/prio_graph_scheduler.rs index 6dde96420c1d19..045d2cca1d8dba 100644 --- a/core/src/banking_stage/transaction_scheduler/prio_graph_scheduler.rs +++ b/core/src/banking_stage/transaction_scheduler/prio_graph_scheduler.rs @@ -358,12 +358,18 @@ impl PrioGraphScheduler { ) { let thread_id = self.in_flight_tracker.complete_batch(batch_id); for transaction in transactions { - let account_locks = transaction.get_account_locks_unchecked(); - self.account_locks.unlock_accounts( - account_locks.writable.into_iter(), - account_locks.readonly.into_iter(), - thread_id, - ); + let message = transaction.message(); + let account_keys = message.account_keys(); + let write_account_locks = account_keys + .iter() + .enumerate() + .filter_map(|(index, key)| message.is_writable(index).then_some(key)); + let read_account_locks = account_keys + .iter() + .enumerate() + .filter_map(|(index, key)| (!message.is_writable(index)).then_some(key)); + self.account_locks + .unlock_accounts(write_account_locks, read_account_locks, thread_id); } } diff --git a/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs b/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs index 57a52f58a5c1cc..20462a2a1b42b2 100644 --- a/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs +++ b/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs @@ -475,7 +475,6 @@ impl SchedulerController { let bank = self.bank_forks.read().unwrap().working_bank(); let last_slot_in_epoch = bank.epoch_schedule().get_last_slot_in_epoch(bank.epoch()); let transaction_account_lock_limit = bank.get_transaction_account_lock_limit(); - let feature_set = &bank.feature_set; let vote_only = bank.vote_only_bank(); const CHUNK_SIZE: usize = 128; @@ -493,7 +492,6 @@ impl SchedulerController { .filter_map(|packet| { packet .build_sanitized_transaction( - feature_set, vote_only, bank.as_ref(), bank.get_reserved_account_keys(), diff --git a/core/src/banking_stage/unprocessed_packet_batches.rs b/core/src/banking_stage/unprocessed_packet_batches.rs index ebb91773c49923..2bec44dbd0ea5e 100644 --- a/core/src/banking_stage/unprocessed_packet_batches.rs +++ b/core/src/banking_stage/unprocessed_packet_batches.rs @@ -316,7 +316,6 @@ mod tests { transaction::{SimpleAddressLoader, Transaction}, }, solana_vote_program::vote_transaction, - std::sync::Arc, }; fn simple_deserialized_packet() -> DeserializedPacket { @@ -465,7 +464,6 @@ mod tests { #[test] fn test_transaction_from_deserialized_packet() { - use solana_sdk::feature_set::FeatureSet; let keypair = Keypair::new(); let transfer_tx = system_transaction::transfer(&keypair, &keypair.pubkey(), 1, Hash::default()); @@ -488,7 +486,6 @@ mod tests { let mut votes_only = false; let txs = packet_vector.iter().filter_map(|tx| { tx.immutable_section().build_sanitized_transaction( - &Arc::new(FeatureSet::default()), votes_only, SimpleAddressLoader::Disabled, &ReservedAccountKeys::empty_key_set(), @@ -499,7 +496,6 @@ mod tests { votes_only = true; let txs = packet_vector.iter().filter_map(|tx| { tx.immutable_section().build_sanitized_transaction( - &Arc::new(FeatureSet::default()), votes_only, SimpleAddressLoader::Disabled, &ReservedAccountKeys::empty_key_set(), @@ -519,7 +515,6 @@ mod tests { let mut votes_only = false; let txs = packet_vector.iter().filter_map(|tx| { tx.immutable_section().build_sanitized_transaction( - &Arc::new(FeatureSet::default()), votes_only, SimpleAddressLoader::Disabled, &ReservedAccountKeys::empty_key_set(), @@ -530,7 +525,6 @@ mod tests { votes_only = true; let txs = packet_vector.iter().filter_map(|tx| { tx.immutable_section().build_sanitized_transaction( - &Arc::new(FeatureSet::default()), votes_only, SimpleAddressLoader::Disabled, &ReservedAccountKeys::empty_key_set(), @@ -550,7 +544,6 @@ mod tests { let mut votes_only = false; let txs = packet_vector.iter().filter_map(|tx| { tx.immutable_section().build_sanitized_transaction( - &Arc::new(FeatureSet::default()), votes_only, SimpleAddressLoader::Disabled, &ReservedAccountKeys::empty_key_set(), @@ -561,7 +554,6 @@ mod tests { votes_only = true; let txs = packet_vector.iter().filter_map(|tx| { tx.immutable_section().build_sanitized_transaction( - &Arc::new(FeatureSet::default()), votes_only, SimpleAddressLoader::Disabled, &ReservedAccountKeys::empty_key_set(), diff --git a/core/src/banking_stage/unprocessed_transaction_storage.rs b/core/src/banking_stage/unprocessed_transaction_storage.rs index dd429cdc03e9f0..bc2dec2a82c7c1 100644 --- a/core/src/banking_stage/unprocessed_transaction_storage.rs +++ b/core/src/banking_stage/unprocessed_transaction_storage.rs @@ -155,7 +155,6 @@ fn consume_scan_should_process_packet( // Try to sanitize the packet let (maybe_sanitized_transaction, sanitization_time_us) = measure_us!(packet .build_sanitized_transaction( - &bank.feature_set, bank.vote_only_bank(), bank, bank.get_reserved_account_keys(), @@ -775,7 +774,6 @@ impl ThreadLocalUnprocessedPackets { .filter_map(|(packet_index, deserialized_packet)| { deserialized_packet .build_sanitized_transaction( - &bank.feature_set, bank.vote_only_bank(), bank, bank.get_reserved_account_keys(), diff --git a/core/src/commitment_service.rs b/core/src/commitment_service.rs index e7e349d05c57a4..cae40c587cb572 100644 --- a/core/src/commitment_service.rs +++ b/core/src/commitment_service.rs @@ -8,7 +8,7 @@ use { bank::Bank, commitment::{BlockCommitment, BlockCommitmentCache, CommitmentSlots, VOTE_THRESHOLD_SIZE}, }, - solana_sdk::clock::Slot, + solana_sdk::{clock::Slot, pubkey::Pubkey}, solana_vote_program::vote_state::VoteState, std::{ cmp::max, @@ -26,14 +26,23 @@ pub struct CommitmentAggregationData { bank: Arc, root: Slot, total_stake: Stake, + // The latest local vote state of the node running this service. + // Used for commitment aggregation if the node's vote account is staked. + node_vote_state: (Pubkey, VoteState), } impl CommitmentAggregationData { - pub fn new(bank: Arc, root: Slot, total_stake: Stake) -> Self { + pub fn new( + bank: Arc, + root: Slot, + total_stake: Stake, + node_vote_state: (Pubkey, VoteState), + ) -> Self { Self { bank, root, total_stake, + node_vote_state, } } } @@ -139,8 +148,11 @@ impl AggregateCommitmentService { aggregation_data: CommitmentAggregationData, ancestors: Vec, ) -> CommitmentSlots { - let (block_commitment, rooted_stake) = - Self::aggregate_commitment(&ancestors, &aggregation_data.bank); + let (block_commitment, rooted_stake) = Self::aggregate_commitment( + &ancestors, + &aggregation_data.bank, + &aggregation_data.node_vote_state, + ); let highest_super_majority_root = get_highest_super_majority_root(rooted_stake, aggregation_data.total_stake); @@ -173,6 +185,7 @@ impl AggregateCommitmentService { pub fn aggregate_commitment( ancestors: &[Slot], bank: &Bank, + (node_vote_pubkey, node_vote_state): &(Pubkey, VoteState), ) -> (HashMap, Vec<(Slot, u64)>) { assert!(!ancestors.is_empty()); @@ -183,11 +196,17 @@ impl AggregateCommitmentService { let mut commitment = HashMap::new(); let mut rooted_stake: Vec<(Slot, u64)> = Vec::new(); - for (lamports, account) in bank.vote_accounts().values() { + for (pubkey, (lamports, account)) in bank.vote_accounts().iter() { if *lamports == 0 { continue; } - if let Ok(vote_state) = account.vote_state().as_ref() { + let vote_state = if pubkey == node_vote_pubkey { + // Override old vote_state in bank with latest one for my own vote pubkey + Ok(node_vote_state) + } else { + account.vote_state() + }; + if let Ok(vote_state) = vote_state { Self::aggregate_commitment_for_vote_account( &mut commitment, &mut rooted_stake, @@ -382,8 +401,7 @@ mod tests { assert_eq!(rooted_stake[0], (root, lamports)); } - #[test] - fn test_aggregate_commitment_validity() { + fn do_test_aggregate_commitment_validity(with_node_vote_state: bool) { let ancestors = vec![3, 4, 5, 7, 9, 10, 11]; let GenesisConfigInfo { mut genesis_config, .. @@ -447,9 +465,11 @@ mod tests { let mut vote_state1 = vote_state::from(&vote_account1).unwrap(); process_slot_vote_unchecked(&mut vote_state1, 3); process_slot_vote_unchecked(&mut vote_state1, 5); - let versioned = VoteStateVersions::new_current(vote_state1); - vote_state::to(&versioned, &mut vote_account1).unwrap(); - bank.store_account(&pk1, &vote_account1); + if !with_node_vote_state { + let versioned = VoteStateVersions::new_current(vote_state1.clone()); + vote_state::to(&versioned, &mut vote_account1).unwrap(); + bank.store_account(&pk1, &vote_account1); + } let mut vote_state2 = vote_state::from(&vote_account2).unwrap(); process_slot_vote_unchecked(&mut vote_state2, 9); @@ -470,8 +490,18 @@ mod tests { vote_state::to(&versioned, &mut vote_account4).unwrap(); bank.store_account(&pk4, &vote_account4); - let (commitment, rooted_stake) = - AggregateCommitmentService::aggregate_commitment(&ancestors, &bank); + let node_vote_pubkey = if with_node_vote_state { + pk1 + } else { + // Use some random pubkey as dummy to suppress the override. + solana_sdk::pubkey::new_rand() + }; + + let (commitment, rooted_stake) = AggregateCommitmentService::aggregate_commitment( + &ancestors, + &bank, + &(node_vote_pubkey, vote_state1), + ); for a in ancestors { if a <= 3 { @@ -499,17 +529,21 @@ mod tests { assert_eq!(get_highest_super_majority_root(rooted_stake, 100), 1) } + #[test] + fn test_aggregate_commitment_validity_with_node_vote_state() { + do_test_aggregate_commitment_validity(true) + } + + #[test] + fn test_aggregate_commitment_validity_without_node_vote_state() { + do_test_aggregate_commitment_validity(false); + } + #[test] fn test_highest_super_majority_root_advance() { - fn get_vote_account_root_slot(vote_pubkey: Pubkey, bank: &Bank) -> Slot { + fn get_vote_state(vote_pubkey: Pubkey, bank: &Bank) -> VoteState { let vote_account = bank.get_vote_account(&vote_pubkey).unwrap(); - let slot = vote_account - .vote_state() - .as_ref() - .unwrap() - .root_slot - .unwrap(); - slot + vote_account.vote_state().cloned().unwrap() } let block_commitment_cache = RwLock::new(BlockCommitmentCache::new_for_tests()); @@ -547,10 +581,10 @@ mod tests { } let working_bank = bank_forks.read().unwrap().working_bank(); - let root = get_vote_account_root_slot( - validator_vote_keypairs.vote_keypair.pubkey(), - &working_bank, - ); + let vote_pubkey = validator_vote_keypairs.vote_keypair.pubkey(); + let root = get_vote_state(vote_pubkey, &working_bank) + .root_slot + .unwrap(); for x in 0..root { bank_forks .write() @@ -579,10 +613,8 @@ mod tests { bank34.process_transaction(&vote33).unwrap(); let working_bank = bank_forks.read().unwrap().working_bank(); - let root = get_vote_account_root_slot( - validator_vote_keypairs.vote_keypair.pubkey(), - &working_bank, - ); + let vote_state = get_vote_state(vote_pubkey, &working_bank); + let root = vote_state.root_slot.unwrap(); let ancestors = working_bank.status_cache_ancestors(); let _ = AggregateCommitmentService::update_commitment_cache( &block_commitment_cache, @@ -590,6 +622,7 @@ mod tests { bank: working_bank, root: 0, total_stake: 100, + node_vote_state: (vote_pubkey, vote_state.clone()), }, ancestors, ); @@ -628,6 +661,7 @@ mod tests { bank: working_bank, root: 1, total_stake: 100, + node_vote_state: (vote_pubkey, vote_state), }, ancestors, ); @@ -662,10 +696,9 @@ mod tests { } let working_bank = bank_forks.read().unwrap().working_bank(); - let root = get_vote_account_root_slot( - validator_vote_keypairs.vote_keypair.pubkey(), - &working_bank, - ); + let vote_state = + get_vote_state(validator_vote_keypairs.vote_keypair.pubkey(), &working_bank); + let root = vote_state.root_slot.unwrap(); let ancestors = working_bank.status_cache_ancestors(); let _ = AggregateCommitmentService::update_commitment_cache( &block_commitment_cache, @@ -673,6 +706,7 @@ mod tests { bank: working_bank, root: 0, total_stake: 100, + node_vote_state: (vote_pubkey, vote_state), }, ancestors, ); diff --git a/core/src/replay_stage.rs b/core/src/replay_stage.rs index 4861b7893e5554..627e0175c89e71 100644 --- a/core/src/replay_stage.rs +++ b/core/src/replay_stage.rs @@ -76,7 +76,7 @@ use { timing::timestamp, transaction::Transaction, }, - solana_vote_program::vote_state::VoteTransaction, + solana_vote_program::vote_state::{VoteState, VoteTransaction}, std::{ collections::{HashMap, HashSet}, num::NonZeroUsize, @@ -2406,10 +2406,28 @@ impl ReplayStage { } let mut update_commitment_cache_time = Measure::start("update_commitment_cache"); + // Send (voted) bank along with the updated vote account state for this node, the vote + // state is always newer than the one in the bank by definition, because banks can't + // contain vote transactions which are voting on its own slot. + // + // It should be acceptable to aggressively use the vote for our own _local view_ of + // commitment aggregation, although it's not guaranteed that the new vote transaction is + // observed by other nodes at this point. + // + // The justification stems from the assumption of the sensible voting behavior from the + // consensus subsystem. That's because it means there would be a slashing possibility + // otherwise. + // + // This behavior isn't significant normally for mainnet-beta, because staked nodes aren't + // servicing RPC requests. However, this eliminates artificial 1-slot delay of the + // `finalized` confirmation if a node is materially staked and servicing RPC requests at + // the same time for development purposes. + let node_vote_state = (*vote_account_pubkey, tower.vote_state.clone()); Self::update_commitment_cache( bank.clone(), bank_forks.read().unwrap().root(), progress.get_fork_stats(bank.slot()).unwrap().total_stake, + node_vote_state, lockouts_sender, ); update_commitment_cache_time.stop(); @@ -2699,11 +2717,15 @@ impl ReplayStage { bank: Arc, root: Slot, total_stake: Stake, + node_vote_state: (Pubkey, VoteState), lockouts_sender: &Sender, ) { - if let Err(e) = - lockouts_sender.send(CommitmentAggregationData::new(bank, root, total_stake)) - { + if let Err(e) = lockouts_sender.send(CommitmentAggregationData::new( + bank, + root, + total_stake, + node_vote_state, + )) { trace!("lockouts_sender failed: {:?}", e); } } @@ -3009,10 +3031,14 @@ impl ReplayStage { .expect("Bank fork progress entry missing for completed bank"); let replay_stats = bank_progress.replay_stats.clone(); + let mut is_unified_scheduler_enabled = false; if let Some((result, completed_execute_timings)) = bank.wait_for_completed_scheduler() { + // It's guaranteed that wait_for_completed_scheduler() returns Some(_), iff the + // unified scheduler is enabled for the bank. + is_unified_scheduler_enabled = true; let metrics = ExecuteBatchesInternalMetrics::new_with_timings_from_all_threads( completed_execute_timings, ); @@ -3020,7 +3046,7 @@ impl ReplayStage { .write() .unwrap() .batch_execute - .accumulate(metrics); + .accumulate(metrics, is_unified_scheduler_enabled); if let Err(err) = result { let root = bank_forks.read().unwrap().root(); @@ -3219,6 +3245,7 @@ impl ReplayStage { r_replay_progress.num_entries, r_replay_progress.num_shreds, bank_complete_time.as_us(), + is_unified_scheduler_enabled, ); execute_timings.accumulate(&r_replay_stats.batch_execute.totals); } else { @@ -4393,10 +4420,10 @@ impl ReplayStage { fn record_rewards(bank: &Bank, rewards_recorder_sender: &Option) { if let Some(rewards_recorder_sender) = rewards_recorder_sender { - let rewards = bank.rewards.read().unwrap(); - if !rewards.is_empty() { + let rewards = bank.get_rewards_and_num_partitions(); + if rewards.should_record() { rewards_recorder_sender - .send(RewardsMessage::Batch((bank.slot(), rewards.clone()))) + .send(RewardsMessage::Batch((bank.slot(), rewards))) .unwrap_or_else(|err| warn!("rewards_recorder_sender failed: {:?}", err)); } rewards_recorder_sender @@ -5276,13 +5303,14 @@ pub(crate) mod tests { #[test] fn test_replay_commitment_cache() { - fn leader_vote(vote_slot: Slot, bank: &Bank, pubkey: &Pubkey) { + fn leader_vote(vote_slot: Slot, bank: &Bank, pubkey: &Pubkey) -> (Pubkey, VoteState) { let mut leader_vote_account = bank.get_account(pubkey).unwrap(); let mut vote_state = vote_state::from(&leader_vote_account).unwrap(); vote_state::process_slot_vote_unchecked(&mut vote_state, vote_slot); - let versioned = VoteStateVersions::new_current(vote_state); + let versioned = VoteStateVersions::new_current(vote_state.clone()); vote_state::to(&versioned, &mut leader_vote_account).unwrap(); bank.store_account(pubkey, &leader_vote_account); + (*pubkey, vote_state) } let leader_pubkey = solana_sdk::pubkey::new_rand(); @@ -5348,11 +5376,12 @@ pub(crate) mod tests { } let arc_bank = bank_forks.read().unwrap().get(i).unwrap(); - leader_vote(i - 1, &arc_bank, &leader_voting_pubkey); + let node_vote_state = leader_vote(i - 1, &arc_bank, &leader_voting_pubkey); ReplayStage::update_commitment_cache( arc_bank.clone(), 0, leader_lamports, + node_vote_state, &lockouts_sender, ); arc_bank.freeze(); diff --git a/core/src/rewards_recorder_service.rs b/core/src/rewards_recorder_service.rs index 3fc2c8dc5b5149..044fd2de53adc7 100644 --- a/core/src/rewards_recorder_service.rs +++ b/core/src/rewards_recorder_service.rs @@ -1,8 +1,9 @@ use { crossbeam_channel::{Receiver, RecvTimeoutError, Sender}, solana_ledger::blockstore::Blockstore, - solana_sdk::{clock::Slot, pubkey::Pubkey, reward_info::RewardInfo}, - solana_transaction_status::Reward, + solana_runtime::bank::KeyedRewardsAndNumPartitions, + solana_sdk::clock::Slot, + solana_transaction_status::{Reward, RewardsAndNumPartitions}, std::{ sync::{ atomic::{AtomicBool, AtomicU64, Ordering}, @@ -13,7 +14,7 @@ use { }, }; -pub type RewardsBatch = (Slot, Vec<(Pubkey, RewardInfo)>); +pub type RewardsBatch = (Slot, KeyedRewardsAndNumPartitions); pub type RewardsRecorderReceiver = Receiver; pub type RewardsRecorderSender = Sender; @@ -55,7 +56,13 @@ impl RewardsRecorderService { blockstore: &Blockstore, ) -> Result<(), RecvTimeoutError> { match rewards_receiver.recv_timeout(Duration::from_secs(1))? { - RewardsMessage::Batch((slot, rewards)) => { + RewardsMessage::Batch(( + slot, + KeyedRewardsAndNumPartitions { + keyed_rewards: rewards, + num_partitions, + }, + )) => { let rpc_rewards = rewards .into_iter() .map(|(pubkey, reward_info)| Reward { @@ -68,7 +75,13 @@ impl RewardsRecorderService { .collect(); blockstore - .write_rewards(slot, rpc_rewards) + .write_rewards( + slot, + RewardsAndNumPartitions { + rewards: rpc_rewards, + num_partitions, + }, + ) .expect("Expect database write to succeed"); } RewardsMessage::Complete(slot) => { diff --git a/core/src/tvu.rs b/core/src/tvu.rs index 6ba61f578b5671..4dcd7bbfa3e589 100644 --- a/core/src/tvu.rs +++ b/core/src/tvu.rs @@ -352,6 +352,7 @@ impl Tvu { leader_schedule_cache.clone(), bank_forks.clone(), duplicate_slots_sender, + tvu_config.shred_version, ), ); diff --git a/core/src/warm_quic_cache_service.rs b/core/src/warm_quic_cache_service.rs index b7f196661ba150..e4a67cbe993169 100644 --- a/core/src/warm_quic_cache_service.rs +++ b/core/src/warm_quic_cache_service.rs @@ -3,10 +3,8 @@ use { rand::{thread_rng, Rng}, - solana_client::{ - connection_cache::{ConnectionCache, Protocol}, - tpu_connection::TpuConnection, - }, + solana_client::connection_cache::{ConnectionCache, Protocol}, + solana_connection_cache::client_connection::ClientConnection as TpuConnection, solana_gossip::cluster_info::ClusterInfo, solana_poh::poh_recorder::PohRecorder, std::{ diff --git a/cost-model/src/cost_model.rs b/cost-model/src/cost_model.rs index ba40811d033a31..c444ad0885566f 100644 --- a/cost-model/src/cost_model.rs +++ b/cost-model/src/cost_model.rs @@ -15,7 +15,7 @@ use { solana_sdk::{ borsh1::try_from_slice_unchecked, compute_budget::{self, ComputeBudgetInstruction}, - feature_set::{self, include_loaded_accounts_data_size_in_fee_calculation, FeatureSet}, + feature_set::{self, FeatureSet}, fee::FeeStructure, instruction::CompiledInstruction, program_utils::limited_deserialize, @@ -43,13 +43,45 @@ impl CostModel { Self::get_signature_cost(&mut tx_cost, transaction); Self::get_write_lock_cost(&mut tx_cost, transaction, feature_set); Self::get_transaction_cost(&mut tx_cost, transaction, feature_set); - tx_cost.account_data_size = Self::calculate_account_data_size(transaction); + tx_cost.allocated_accounts_data_size = + Self::calculate_allocated_accounts_data_size(transaction); debug!("transaction {:?} has cost {:?}", transaction, tx_cost); TransactionCost::Transaction(tx_cost) } } + // Calculate executed transaction CU cost, with actual execution and loaded accounts size + // costs. + pub fn calculate_cost_for_executed_transaction( + transaction: &SanitizedTransaction, + actual_programs_execution_cost: u64, + actual_loaded_accounts_data_size_bytes: usize, + feature_set: &FeatureSet, + ) -> TransactionCost { + if transaction.is_simple_vote_transaction() { + TransactionCost::SimpleVote { + writable_accounts: Self::get_writable_accounts(transaction), + } + } else { + let mut tx_cost = UsageCostDetails::new_with_default_capacity(); + + Self::get_signature_cost(&mut tx_cost, transaction); + Self::get_write_lock_cost(&mut tx_cost, transaction, feature_set); + Self::get_instructions_data_cost(&mut tx_cost, transaction); + tx_cost.allocated_accounts_data_size = + Self::calculate_allocated_accounts_data_size(transaction); + + tx_cost.programs_execution_cost = actual_programs_execution_cost; + tx_cost.loaded_accounts_data_size_cost = Self::calculate_loaded_accounts_data_size_cost( + actual_loaded_accounts_data_size_bytes, + feature_set, + ); + + TransactionCost::Transaction(tx_cost) + } + } + fn get_signature_cost(tx_cost: &mut UsageCostDetails, transaction: &SanitizedTransaction) { let signatures_count_detail = transaction.message().get_signature_details(); tx_cost.num_transaction_signatures = signatures_count_detail.num_transaction_signatures(); @@ -168,15 +200,25 @@ impl CostModel { tx_cost.data_bytes_cost = data_bytes_len_total / INSTRUCTION_DATA_BYTES_COST; } + fn get_instructions_data_cost( + tx_cost: &mut UsageCostDetails, + transaction: &SanitizedTransaction, + ) { + let ix_data_bytes_len_total: u64 = transaction + .message() + .instructions() + .iter() + .map(|instruction| instruction.data.len() as u64) + .sum(); + + tx_cost.data_bytes_cost = ix_data_bytes_len_total / INSTRUCTION_DATA_BYTES_COST; + } + pub fn calculate_loaded_accounts_data_size_cost( loaded_accounts_data_size: usize, - feature_set: &FeatureSet, + _feature_set: &FeatureSet, ) -> u64 { - if feature_set.is_active(&include_loaded_accounts_data_size_in_fee_calculation::id()) { - FeeStructure::calculate_memory_usage_cost(loaded_accounts_data_size, DEFAULT_HEAP_COST) - } else { - 0 - } + FeeStructure::calculate_memory_usage_cost(loaded_accounts_data_size, DEFAULT_HEAP_COST) } fn calculate_account_data_size_on_deserialized_system_instruction( @@ -222,7 +264,7 @@ impl CostModel { /// eventually, potentially determine account data size of all writable accounts /// at the moment, calculate account data size of account creation - fn calculate_account_data_size(transaction: &SanitizedTransaction) -> u64 { + fn calculate_allocated_accounts_data_size(transaction: &SanitizedTransaction) -> u64 { transaction .message() .program_instructions_iter() @@ -571,8 +613,6 @@ mod tests { let expected_execution_cost = BUILT_IN_INSTRUCTION_COSTS .get(&system_program::id()) .unwrap(); - // feature `include_loaded_accounts_data_size_in_fee_calculation` enabled, using - // default loaded_accounts_data_size_limit const DEFAULT_PAGE_COST: u64 = 8; let expected_loaded_accounts_data_size_cost = solana_compute_budget::compute_budget_processor::MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES @@ -590,35 +630,6 @@ mod tests { ); } - #[test] - fn test_cost_model_calculate_cost_disabled_feature() { - let (mint_keypair, start_hash) = test_setup(); - let tx = SanitizedTransaction::from_transaction_for_tests(system_transaction::transfer( - &mint_keypair, - &Keypair::new().pubkey(), - 2, - start_hash, - )); - - let feature_set = FeatureSet::default(); - assert!(!feature_set.is_active(&include_loaded_accounts_data_size_in_fee_calculation::id())); - let expected_account_cost = WRITE_LOCK_UNITS * 2; - let expected_execution_cost = BUILT_IN_INSTRUCTION_COSTS - .get(&system_program::id()) - .unwrap(); - // feature `include_loaded_accounts_data_size_in_fee_calculation` not enabled - let expected_loaded_accounts_data_size_cost = 0; - - let tx_cost = CostModel::calculate_cost(&tx, &feature_set); - assert_eq!(expected_account_cost, tx_cost.write_lock_cost()); - assert_eq!(*expected_execution_cost, tx_cost.programs_execution_cost()); - assert_eq!(2, tx_cost.writable_accounts().len()); - assert_eq!( - expected_loaded_accounts_data_size_cost, - tx_cost.loaded_accounts_data_size_cost() - ); - } - #[test] fn test_cost_model_calculate_cost_with_limit() { let (mint_keypair, start_hash) = test_setup(); @@ -636,7 +647,6 @@ mod tests { )); let feature_set = FeatureSet::all_enabled(); - assert!(feature_set.is_active(&include_loaded_accounts_data_size_in_fee_calculation::id())); let expected_account_cost = WRITE_LOCK_UNITS * 2; let expected_execution_cost = BUILT_IN_INSTRUCTION_COSTS .get(&system_program::id()) @@ -644,8 +654,6 @@ mod tests { + BUILT_IN_INSTRUCTION_COSTS .get(&compute_budget::id()) .unwrap(); - // feature `include_loaded_accounts_data_size_in_fee_calculation` is enabled, accounts data - // size limit is set. let expected_loaded_accounts_data_size_cost = (data_limit as u64) / (32 * 1024) * 8; let tx_cost = CostModel::calculate_cost(&tx, &feature_set); diff --git a/cost-model/src/cost_tracker.rs b/cost-model/src/cost_tracker.rs index f891cb22a5697b..49ccbbb035f9bb 100644 --- a/cost-model/src/cost_tracker.rs +++ b/cost-model/src/cost_tracker.rs @@ -67,7 +67,7 @@ pub struct CostTracker { block_cost: u64, vote_cost: u64, transaction_count: u64, - account_data_size: u64, + allocated_accounts_data_size: u64, transaction_signature_count: u64, secp256k1_instruction_signature_count: u64, ed25519_instruction_signature_count: u64, @@ -96,7 +96,7 @@ impl Default for CostTracker { block_cost: 0, vote_cost: 0, transaction_count: 0, - account_data_size: 0, + allocated_accounts_data_size: 0, transaction_signature_count: 0, secp256k1_instruction_signature_count: 0, ed25519_instruction_signature_count: 0, @@ -111,7 +111,7 @@ impl CostTracker { self.block_cost = 0; self.vote_cost = 0; self.transaction_count = 0; - self.account_data_size = 0; + self.allocated_accounts_data_size = 0; self.transaction_signature_count = 0; self.secp256k1_instruction_signature_count = 0; self.ed25519_instruction_signature_count = 0; @@ -213,7 +213,11 @@ impl CostTracker { // ("number_of_accounts", self.number_of_accounts() as i64, i64), // ("costliest_account", costliest_account.to_string(), String), // ("costliest_account_cost", costliest_account_cost as i64, i64), - // ("account_data_size", self.account_data_size, i64), + // ( + // "allocated_accounts_data_size", + // self.allocated_accounts_data_size, + // i64 + // ), // ( // "transaction_signature_count", // self.transaction_signature_count, @@ -265,11 +269,11 @@ impl CostTracker { return Err(CostTrackerError::WouldExceedAccountMaxLimit); } - let account_data_size = self - .account_data_size - .saturating_add(tx_cost.account_data_size()); + let allocated_accounts_data_size = self + .allocated_accounts_data_size + .saturating_add(tx_cost.allocated_accounts_data_size()); - if account_data_size > MAX_BLOCK_ACCOUNTS_DATA_SIZE_DELTA { + if allocated_accounts_data_size > MAX_BLOCK_ACCOUNTS_DATA_SIZE_DELTA { return Err(CostTrackerError::WouldExceedAccountDataBlockLimit); } @@ -292,7 +296,10 @@ impl CostTracker { // Returns the highest account cost for all write-lock accounts `TransactionCost` updated fn add_transaction_cost(&mut self, tx_cost: &TransactionCost) -> u64 { - saturating_add_assign!(self.account_data_size, tx_cost.account_data_size()); + saturating_add_assign!( + self.allocated_accounts_data_size, + tx_cost.allocated_accounts_data_size() + ); saturating_add_assign!(self.transaction_count, 1); saturating_add_assign!( self.transaction_signature_count, @@ -312,9 +319,9 @@ impl CostTracker { fn remove_transaction_cost(&mut self, tx_cost: &TransactionCost) { let cost = tx_cost.sum(); self.sub_transaction_execution_cost(tx_cost, cost); - self.account_data_size = self - .account_data_size - .saturating_sub(tx_cost.account_data_size()); + self.allocated_accounts_data_size = self + .allocated_accounts_data_size + .saturating_sub(tx_cost.allocated_accounts_data_size()); self.transaction_count = self.transaction_count.saturating_sub(1); self.transaction_signature_count = self .transaction_signature_count @@ -504,7 +511,7 @@ mod tests { let (mint_keypair, start_hash) = test_setup(); let (_tx, mut tx_cost) = build_simple_transaction(&mint_keypair, &start_hash); if let TransactionCost::Transaction(ref mut usage_cost) = tx_cost { - usage_cost.account_data_size = 1; + usage_cost.allocated_accounts_data_size = 1; } else { unreachable!(); } @@ -513,9 +520,9 @@ mod tests { // build testee to have capacity for one simple transaction let mut testee = CostTracker::new(cost, cost, cost); assert!(testee.would_fit(&tx_cost).is_ok()); - let old = testee.account_data_size; + let old = testee.allocated_accounts_data_size; testee.add_transaction_cost(&tx_cost); - assert_eq!(old + 1, testee.account_data_size); + assert_eq!(old + 1, testee.allocated_accounts_data_size); } #[test] @@ -652,12 +659,12 @@ mod tests { let (_tx1, mut tx_cost1) = build_simple_transaction(&mint_keypair, &start_hash); let (_tx2, mut tx_cost2) = build_simple_transaction(&second_account, &start_hash); if let TransactionCost::Transaction(ref mut usage_cost) = tx_cost1 { - usage_cost.account_data_size = MAX_BLOCK_ACCOUNTS_DATA_SIZE_DELTA; + usage_cost.allocated_accounts_data_size = MAX_BLOCK_ACCOUNTS_DATA_SIZE_DELTA; } else { unreachable!(); } if let TransactionCost::Transaction(ref mut usage_cost) = tx_cost2 { - usage_cost.account_data_size = MAX_BLOCK_ACCOUNTS_DATA_SIZE_DELTA + 1; + usage_cost.allocated_accounts_data_size = MAX_BLOCK_ACCOUNTS_DATA_SIZE_DELTA + 1; } else { unreachable!(); } @@ -945,7 +952,7 @@ mod tests { assert_eq!(1, cost_tracker.number_of_accounts()); assert_eq!(cost, cost_tracker.block_cost); assert_eq!(0, cost_tracker.vote_cost); - assert_eq!(0, cost_tracker.account_data_size); + assert_eq!(0, cost_tracker.allocated_accounts_data_size); cost_tracker.remove_transaction_cost(&tx_cost); // assert cost_tracker is reverted to default @@ -953,6 +960,6 @@ mod tests { assert_eq!(0, cost_tracker.number_of_accounts()); assert_eq!(0, cost_tracker.block_cost); assert_eq!(0, cost_tracker.vote_cost); - assert_eq!(0, cost_tracker.account_data_size); + assert_eq!(0, cost_tracker.allocated_accounts_data_size); } } diff --git a/cost-model/src/transaction_cost.rs b/cost-model/src/transaction_cost.rs index ee280c873312f9..4951e50036ca8b 100644 --- a/cost-model/src/transaction_cost.rs +++ b/cost-model/src/transaction_cost.rs @@ -56,10 +56,10 @@ impl TransactionCost { } } - pub fn account_data_size(&self) -> u64 { + pub fn allocated_accounts_data_size(&self) -> u64 { match self { Self::SimpleVote { .. } => 0, - Self::Transaction(usage_cost) => usage_cost.account_data_size, + Self::Transaction(usage_cost) => usage_cost.allocated_accounts_data_size, } } @@ -125,7 +125,7 @@ pub struct UsageCostDetails { pub data_bytes_cost: u64, pub programs_execution_cost: u64, pub loaded_accounts_data_size_cost: u64, - pub account_data_size: u64, + pub allocated_accounts_data_size: u64, pub num_transaction_signatures: u64, pub num_secp256k1_instruction_signatures: u64, pub num_ed25519_instruction_signatures: u64, @@ -140,7 +140,7 @@ impl Default for UsageCostDetails { data_bytes_cost: 0u64, programs_execution_cost: 0u64, loaded_accounts_data_size_cost: 0u64, - account_data_size: 0u64, + allocated_accounts_data_size: 0u64, num_transaction_signatures: 0u64, num_secp256k1_instruction_signatures: 0u64, num_ed25519_instruction_signatures: 0u64, @@ -160,7 +160,7 @@ impl PartialEq for UsageCostDetails { && self.data_bytes_cost == other.data_bytes_cost && self.programs_execution_cost == other.programs_execution_cost && self.loaded_accounts_data_size_cost == other.loaded_accounts_data_size_cost - && self.account_data_size == other.account_data_size + && self.allocated_accounts_data_size == other.allocated_accounts_data_size && self.num_transaction_signatures == other.num_transaction_signatures && self.num_secp256k1_instruction_signatures == other.num_secp256k1_instruction_signatures diff --git a/curves/curve25519/.gitignore b/curves/curve25519/.gitignore new file mode 100644 index 00000000000000..b645148aa9118c --- /dev/null +++ b/curves/curve25519/.gitignore @@ -0,0 +1 @@ +/farf/ diff --git a/curves/curve25519/Cargo.toml b/curves/curve25519/Cargo.toml new file mode 100644 index 00000000000000..fb04c29b60171b --- /dev/null +++ b/curves/curve25519/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "solana-curve25519" +description = "Solana Curve25519 Syscalls" +documentation = "https://docs.rs/solana-curve25519" +version = { workspace = true } +authors = { workspace = true } +repository = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +edition = { workspace = true } + +[dependencies] +bytemuck = { workspace = true } +bytemuck_derive = { workspace = true } +solana-program = { workspace = true } +thiserror = { workspace = true } + +[target.'cfg(not(target_os = "solana"))'.dependencies] +curve25519-dalek = { workspace = true, features = ["serde"] } diff --git a/zk-token-sdk/src/curve25519/curve_syscall_traits.rs b/curves/curve25519/src/curve_syscall_traits.rs similarity index 100% rename from zk-token-sdk/src/curve25519/curve_syscall_traits.rs rename to curves/curve25519/src/curve_syscall_traits.rs diff --git a/zk-token-sdk/src/curve25519/edwards.rs b/curves/curve25519/src/edwards.rs similarity index 98% rename from zk-token-sdk/src/curve25519/edwards.rs rename to curves/curve25519/src/edwards.rs index 0dd019b1910d0a..4de6bf81456601 100644 --- a/zk-token-sdk/src/curve25519/edwards.rs +++ b/curves/curve25519/src/edwards.rs @@ -1,4 +1,4 @@ -use bytemuck::{Pod, Zeroable}; +use bytemuck_derive::{Pod, Zeroable}; pub use target_arch::*; #[derive(Clone, Copy, Debug, Default, PartialEq, Eq, Pod, Zeroable)] @@ -9,7 +9,7 @@ pub struct PodEdwardsPoint(pub [u8; 32]); mod target_arch { use { super::*, - crate::curve25519::{ + crate::{ curve_syscall_traits::{GroupOperations, MultiScalarMultiplication, PointValidation}, errors::Curve25519Error, scalar::PodScalar, @@ -134,10 +134,11 @@ mod target_arch { mod target_arch { use { super::*, - crate::curve25519::{ + crate::{ curve_syscall_traits::{ADD, CURVE25519_EDWARDS, MUL, SUB}, scalar::PodScalar, }, + bytemuck::Zeroable, }; pub fn validate_edwards(point: &PodEdwardsPoint) -> bool { @@ -245,7 +246,7 @@ mod target_arch { mod tests { use { super::*, - crate::curve25519::scalar::PodScalar, + crate::scalar::PodScalar, curve25519_dalek::{ constants::ED25519_BASEPOINT_POINT as G, edwards::EdwardsPoint, traits::Identity, }, diff --git a/zk-token-sdk/src/curve25519/errors.rs b/curves/curve25519/src/errors.rs similarity index 100% rename from zk-token-sdk/src/curve25519/errors.rs rename to curves/curve25519/src/errors.rs diff --git a/curves/curve25519/src/lib.rs b/curves/curve25519/src/lib.rs new file mode 100644 index 00000000000000..d0ab9d4709da11 --- /dev/null +++ b/curves/curve25519/src/lib.rs @@ -0,0 +1,8 @@ +#![allow(clippy::arithmetic_side_effects, clippy::op_ref)] +//! Syscall operations for curve25519 + +pub mod curve_syscall_traits; +pub mod edwards; +pub mod errors; +pub mod ristretto; +pub mod scalar; diff --git a/zk-token-sdk/src/curve25519/ristretto.rs b/curves/curve25519/src/ristretto.rs similarity index 98% rename from zk-token-sdk/src/curve25519/ristretto.rs rename to curves/curve25519/src/ristretto.rs index 772441a32aa65f..e0b47c15f1dfbe 100644 --- a/zk-token-sdk/src/curve25519/ristretto.rs +++ b/curves/curve25519/src/ristretto.rs @@ -1,4 +1,4 @@ -use bytemuck::{Pod, Zeroable}; +use bytemuck_derive::{Pod, Zeroable}; pub use target_arch::*; #[derive(Clone, Copy, Debug, Default, PartialEq, Eq, Pod, Zeroable)] @@ -9,7 +9,7 @@ pub struct PodRistrettoPoint(pub [u8; 32]); mod target_arch { use { super::*, - crate::curve25519::{ + crate::{ curve_syscall_traits::{GroupOperations, MultiScalarMultiplication, PointValidation}, errors::Curve25519Error, scalar::PodScalar, @@ -135,10 +135,11 @@ mod target_arch { mod target_arch { use { super::*, - crate::curve25519::{ + crate::{ curve_syscall_traits::{ADD, CURVE25519_RISTRETTO, MUL, SUB}, scalar::PodScalar, }, + bytemuck::Zeroable, }; pub fn validate_ristretto(point: &PodRistrettoPoint) -> bool { @@ -247,7 +248,7 @@ mod target_arch { mod tests { use { super::*, - crate::curve25519::scalar::PodScalar, + crate::scalar::PodScalar, curve25519_dalek::{ constants::RISTRETTO_BASEPOINT_POINT as G, ristretto::RistrettoPoint, traits::Identity, }, diff --git a/zk-token-sdk/src/curve25519/scalar.rs b/curves/curve25519/src/scalar.rs similarity index 52% rename from zk-token-sdk/src/curve25519/scalar.rs rename to curves/curve25519/src/scalar.rs index e154851902a043..f840a27c1b4980 100644 --- a/zk-token-sdk/src/curve25519/scalar.rs +++ b/curves/curve25519/src/scalar.rs @@ -1,4 +1,4 @@ -pub use bytemuck::{Pod, Zeroable}; +pub use bytemuck_derive::{Pod, Zeroable}; #[derive(Clone, Copy, Debug, Default, PartialEq, Eq, Pod, Zeroable)] #[repr(transparent)] @@ -6,7 +6,7 @@ pub struct PodScalar(pub [u8; 32]); #[cfg(not(target_os = "solana"))] mod target_arch { - use {super::*, crate::curve25519::errors::Curve25519Error, curve25519_dalek::scalar::Scalar}; + use {super::*, crate::errors::Curve25519Error, curve25519_dalek::scalar::Scalar}; impl From<&Scalar> for PodScalar { fn from(scalar: &Scalar) -> Self { @@ -21,4 +21,18 @@ mod target_arch { Scalar::from_canonical_bytes(pod.0).ok_or(Curve25519Error::PodConversion) } } + + impl From for PodScalar { + fn from(scalar: Scalar) -> Self { + Self(scalar.to_bytes()) + } + } + + impl TryFrom for Scalar { + type Error = Curve25519Error; + + fn try_from(pod: PodScalar) -> Result { + Scalar::from_canonical_bytes(pod.0).ok_or(Curve25519Error::PodConversion) + } + } } diff --git a/docs/src/cli/install.md b/docs/src/cli/install.md index fbcd894660ccd8..c9a5c682d40592 100644 --- a/docs/src/cli/install.md +++ b/docs/src/cli/install.md @@ -19,26 +19,26 @@ on your preferred workflow: - Open your favorite Terminal application -- Install the Solana release - [LATEST_SOLANA_RELEASE_VERSION](https://github.com/anza-xyz/agave/releases/tag/LATEST_SOLANA_RELEASE_VERSION) +- Install the Agave release + [LATEST_AGAVE_RELEASE_VERSION](https://github.com/anza-xyz/agave/releases/tag/LATEST_AGAVE_RELEASE_VERSION) on your machine by running: ```bash -sh -c "$(curl -sSfL https://release.solana.com/LATEST_SOLANA_RELEASE_VERSION/install)" +sh -c "$(curl -sSfL https://release.anza.xyz/LATEST_AGAVE_RELEASE_VERSION/install)" ``` -- You can replace `LATEST_SOLANA_RELEASE_VERSION` with the release tag matching +- You can replace `LATEST_AGAVE_RELEASE_VERSION` with the release tag matching the software version of your desired release, or use one of the three symbolic channel names: `stable`, `beta`, or `edge`. - The following output indicates a successful update: ```text -downloading LATEST_SOLANA_RELEASE_VERSION installer +downloading LATEST_AGAVE_RELEASE_VERSION installer Configuration: /home/solana/.config/solana/install/config.yml Active release directory: /home/solana/.local/share/solana/install/active_release -* Release version: LATEST_SOLANA_RELEASE_VERSION -* Release URL: https://github.com/anza-xyz/agave/releases/download/LATEST_SOLANA_RELEASE_VERSION/solana-release-x86_64-unknown-linux-gnu.tar.bz2 +* Release version: LATEST_AGAVE_RELEASE_VERSION +* Release URL: https://github.com/anza-xyz/agave/releases/download/LATEST_AGAVE_RELEASE_VERSION/solana-release-x86_64-unknown-linux-gnu.tar.bz2 Update successful ``` @@ -74,7 +74,7 @@ solana --version installer into a temporary directory: ```bash -cmd /c "curl https://release.solana.com/LATEST_SOLANA_RELEASE_VERSION/agave-install-init-x86_64-pc-windows-msvc.exe --output C:\agave-install-tmp\agave-install-init.exe --create-dirs" +cmd /c "curl https://release.anza.xyz/LATEST_AGAVE_RELEASE_VERSION/agave-install-init-x86_64-pc-windows-msvc.exe --output C:\agave-install-tmp\agave-install-init.exe --create-dirs" ``` - Copy and paste the following command, then press Enter to install the latest @@ -82,7 +82,7 @@ cmd /c "curl https://release.solana.com/LATEST_SOLANA_RELEASE_VERSION/agave-inst to allow the program to run. ```bash -C:\agave-install-tmp\agave-install-init.exe LATEST_SOLANA_RELEASE_VERSION +C:\agave-install-tmp\agave-install-init.exe LATEST_AGAVE_RELEASE_VERSION ``` - When the installer is finished, press Enter. diff --git a/docs/src/runtime/zk-docs/ciphertext_validity.pdf b/docs/src/runtime/zk-docs/ciphertext_validity.pdf new file mode 100644 index 00000000000000..41d9c318112499 Binary files /dev/null and b/docs/src/runtime/zk-docs/ciphertext_validity.pdf differ diff --git a/docs/src/runtime/zk-docs/zero_proof.pdf b/docs/src/runtime/zk-docs/zero_proof.pdf index 1415a5d8a9e9f6..8227b131f1f204 100644 Binary files a/docs/src/runtime/zk-docs/zero_proof.pdf and b/docs/src/runtime/zk-docs/zero_proof.pdf differ diff --git a/docs/src/runtime/zk-token-proof.md b/docs/src/runtime/zk-elgamal-proof.md similarity index 65% rename from docs/src/runtime/zk-token-proof.md rename to docs/src/runtime/zk-elgamal-proof.md index 35384f17c9396b..e3372f03f383fb 100644 --- a/docs/src/runtime/zk-token-proof.md +++ b/docs/src/runtime/zk-elgamal-proof.md @@ -1,25 +1,22 @@ --- -title: Solana ZK Token Proof Program -pagination_label: Native ZK Token Proof Program -sidebar_label: ZK Token Proof Program +title: Solana ZK ElGamal Proof Program +pagination_label: Native ZK ElGamal Proof Program +sidebar_label: ZK ElGamal Proof Program --- - -The native Solana ZK Token proof program verifies a number of zero-knowledge +The native Solana ZK ElGamal Proof program verifies a number of zero-knowledge proofs that are tailored to work with Pedersen commitments and ElGamal encryption over the elliptic curve -[curve25519](https://www.rfc-editor.org/rfc/rfc7748#section-4.1). The program -was originally designed to verify the zero-knowledge proofs that are required -for the [SPL Token 2022](https://spl.solana.com/token-2022) program. However, -the zero-knowledge proofs in the proof program can be used in more general -contexts outside of SPL Token 2022 as well. +[curve25519](https://www.rfc-editor.org/rfc/rfc7748#section-4.1). The proof +verification instructions in the ZK ElGamal Proof program are flexibly designed +so that they can be combined to enable a number different applications. -- Program id: `ZkTokenProof1111111111111111111111111111111` +- Program id: `ZkE1Gama1Proof11111111111111111111111111111` - Instructions: - [ProofInstruction](https://github.com/solana-labs/solana/blob/master/zk-token-sdk/src/zk_token_proof_instruction.rs) + [ProofInstruction](https://github.com/anza-xyz/agave/blob/master/zk-sdk/src/zk_elgamal_proof_program/instruction.rs) ### Pedersen commitments and ElGamal encryption -The ZK Token proof program verifies zero-knowledge proofs for Pedersen +The ZK ElGamal Proof program verifies zero-knowledge proofs for Pedersen commitments and ElGamal encryption, which are common cryptographic primitives that are incorporated in many existing cryptographic protocols. @@ -48,24 +45,25 @@ treatment of Pedersen commitment and the (twisted) ElGamal encryption schemes. of the SPL Token 2022 confidential extension - Pretty Good Confidentiality [research paper](https://eprint.iacr.org/2019/319) -The ZK Token proof program contains proof verification instructions on various +The ZK ElGamal Proof program contains proof verification instructions on various zero-knowledge proofs for working with the Pedersen commitment and ElGamal -encryption schemes. For example, the `VerifyRangeProofU64` instruction verifies -a zero-knowledge proof certifying that a Pedersen commitment contains an -unsigned 64-bit number as the message. The `VerifyPubkeyValidity` instruction +encryption schemes. For example, the `VerifyBatchedRangeProofU64` instruction +verifies a zero-knowledge proof certifying that a Pedersen commitment contains +an unsigned 64-bit number as the message. The `VerifyPubkeyValidity` instruction verifies a zero-knowledge proof certifying that an ElGamal public key is a properly formed public key. ### Context Data -The proof data associated with each of the ZK Token proof instructions are +The proof data associated with each of the ZK ElGamal Proof instructions are logically divided into two parts: - The context component contains the data that a zero-knowledge proof - is certifying. For example, context component for a `VerifyRangeProofU64` - instruction data is the Pedersen commitment that holds an unsigned 64-bit - number. The context component for a `VerifyPubkeyValidity` instruction data is - the ElGamal public key that is properly formed. + is certifying. For example, context component for a + `VerifyBatchedRangeProofU64` instruction data is the Pedersen commitment that + holds an unsigned 64-bit number. The context component for a + `VerifyPubkeyValidity` instruction data is the ElGamal public key that is + properly formed. - The proof component contains the actual mathematical pieces that certify different properties of the context data. @@ -90,7 +88,8 @@ to fit inside a single transaction. ## Proof Instructions -The ZK Token proof program supports the following list of zero-knowledge proofs. +The ZK ElGamal Proof program supports the following list of zero-knowledge +proofs. #### Proofs on ElGamal encryption @@ -99,14 +98,14 @@ The ZK Token proof program supports the following list of zero-knowledge proofs. - The ElGamal public-key validity proof instruction certifies that an ElGamal public-key is a properly formed public key. - Mathematical description and proof of security: - [[Notes]](https://github.com/solana-labs/solana/blob/master/docs/src/runtime/zk-docs/pubkey_proof.pdf) + [[Notes]](https://github.com/anza-xyz/agave/blob/master/docs/src/runtime/zk-docs/pubkey_proof.pdf) -- `VerifyZeroBalance`: +- `VerifyZeroCiphertext`: - - The zero-balance proof certifies that an ElGamal ciphertext encrypts the + - The zero-ciphertext proof certifies that an ElGamal ciphertext encrypts the number zero. - Mathematical description and proof of security: - [[Notes]](https://github.com/solana-labs/solana/blob/master/docs/src/runtime/zk-docs/zero_proof.pdf) + [[Notes]](https://github.com/anza-xyz/agave/blob/master/docs/src/runtime/zk-docs/zero_proof.pdf) #### Equality proofs @@ -115,11 +114,20 @@ The ZK Token proof program supports the following list of zero-knowledge proofs. - The ciphertext-commitment equality proof certifies that an ElGamal ciphertext and a Pedersen commitment encode the same message. - Mathematical description and proof of security: - [[Notes]](https://github.com/solana-labs/solana/blob/master/docs/src/runtime/zk-docs/ciphertext_commitment_equality.pdf) + [[Notes]](https://github.com/anza-xyz/agave/blob/master/docs/src/runtime/zk-docs/ciphertext_commitment_equality.pdf) - `VerifyCiphertextCiphertextEquality`: - The ciphertext-ciphertext equality proof certifies that two ElGamal ciphertexts encrypt the same message. - Mathematical description and proof of security: - [[Notes]](https://github.com/solana-labs/solana/blob/master/docs/src/runtime/zk-docs/ciphertext_ciphertext_equality.pdf) + [[Notes]](https://github.com/anza-xyz/agave/blob/master/docs/src/runtime/zk-docs/ciphertext_ciphertext_equality.pdf) + +#### Ciphertext Validity proofs + +- `VerifyGroupedCiphertextValidity`: + + - The grouped ciphertext validity proof certifies that a grouped ElGamal + cipehrtext is well-formed + - Mathematical description and proof of security: + [[Notes]](https://github.com/anza-xyz/agave/blob/master/docs/src/runtime/zk-docs/ciphertext_validity.pdf) diff --git a/dos/Cargo.toml b/dos/Cargo.toml index a59dce7b337239..535653e3386e6c 100644 --- a/dos/Cargo.toml +++ b/dos/Cargo.toml @@ -19,6 +19,7 @@ rand = { workspace = true } serde = { workspace = true } solana-bench-tps = { workspace = true } solana-client = { workspace = true } +solana-connection-cache = { workspace = true } solana-core = { workspace = true } solana-faucet = { workspace = true } solana-gossip = { workspace = true } diff --git a/dos/src/main.rs b/dos/src/main.rs index 033de4bf4b49da..0b299718467134 100644 --- a/dos/src/main.rs +++ b/dos/src/main.rs @@ -46,10 +46,8 @@ use { log::*, rand::{thread_rng, Rng}, solana_bench_tps::bench::generate_and_fund_keypairs, - solana_client::{ - connection_cache::ConnectionCache, tpu_client::TpuClientWrapper, - tpu_connection::TpuConnection, - }, + solana_client::{connection_cache::ConnectionCache, tpu_client::TpuClientWrapper}, + solana_connection_cache::client_connection::ClientConnection as TpuConnection, solana_core::repair::serve_repair::{RepairProtocol, RepairRequestHeader, ServeRepair}, solana_dos::cli::*, solana_gossip::{ diff --git a/geyser-plugin-manager/src/accounts_update_notifier.rs b/geyser-plugin-manager/src/accounts_update_notifier.rs index b9a7ed964d8a5b..60df441a7e3cef 100644 --- a/geyser-plugin-manager/src/accounts_update_notifier.rs +++ b/geyser-plugin-manager/src/accounts_update_notifier.rs @@ -33,11 +33,9 @@ impl AccountsUpdateNotifierInterface for AccountsUpdateNotifierImpl { pubkey: &Pubkey, write_version: u64, ) { - if let Some(account_info) = - self.accountinfo_from_shared_account_data(account, txn, pubkey, write_version) - { - self.notify_plugins_of_account_update(account_info, slot, false); - } + let account_info = + self.accountinfo_from_shared_account_data(account, txn, pubkey, write_version); + self.notify_plugins_of_account_update(account_info, slot, false); } fn notify_account_restore_from_snapshot(&self, slot: Slot, account: &StoredAccountMeta) { @@ -54,9 +52,8 @@ impl AccountsUpdateNotifierInterface for AccountsUpdateNotifierImpl { 100000 ); - if let Some(account_info) = account { - self.notify_plugins_of_account_update(account_info, slot, true); - } + self.notify_plugins_of_account_update(account, slot, true); + measure_all.stop(); inc_new_counter_debug!( @@ -110,8 +107,8 @@ impl AccountsUpdateNotifierImpl { txn: &'a Option<&'a SanitizedTransaction>, pubkey: &'a Pubkey, write_version: u64, - ) -> Option> { - Some(ReplicaAccountInfoV3 { + ) -> ReplicaAccountInfoV3<'a> { + ReplicaAccountInfoV3 { pubkey: pubkey.as_ref(), lamports: account.lamports(), owner: account.owner().as_ref(), @@ -120,13 +117,13 @@ impl AccountsUpdateNotifierImpl { data: account.data(), write_version, txn: *txn, - }) + } } fn accountinfo_from_stored_account_meta<'a>( &self, stored_account_meta: &'a StoredAccountMeta, - ) -> Option> { + ) -> ReplicaAccountInfoV3<'a> { // We do not need to rely on the specific write_version read from the append vec. // So, overwrite the write_version with something that works. // There is already only entry per pubkey. @@ -134,7 +131,7 @@ impl AccountsUpdateNotifierImpl { // so it doesn't matter what value it gets here. // Passing 0 for everyone's write_version is sufficiently correct. let write_version = 0; - Some(ReplicaAccountInfoV3 { + ReplicaAccountInfoV3 { pubkey: stored_account_meta.pubkey().as_ref(), lamports: stored_account_meta.lamports(), owner: stored_account_meta.owner().as_ref(), @@ -143,7 +140,7 @@ impl AccountsUpdateNotifierImpl { data: stored_account_meta.data(), write_version, txn: None, - }) + } } fn notify_plugins_of_account_update( diff --git a/gossip/src/cluster_info.rs b/gossip/src/cluster_info.rs index 8b98b5ff3580ba..a9a14b5557239a 100644 --- a/gossip/src/cluster_info.rs +++ b/gossip/src/cluster_info.rs @@ -13,12 +13,6 @@ //! //! Bank needs to provide an interface for us to query the stake weight -#[deprecated( - since = "1.10.6", - note = "Please use `solana_net_utils::{MINIMUM_VALIDATOR_PORT_RANGE_WIDTH, VALIDATOR_PORT_RANGE}` instead" -)] -#[allow(deprecated)] -pub use solana_net_utils::{MINIMUM_VALIDATOR_PORT_RANGE_WIDTH, VALIDATOR_PORT_RANGE}; use { crate::{ cluster_info_metrics::{ @@ -55,7 +49,7 @@ use { solana_measure::measure::Measure, solana_net_utils::{ bind_common, bind_common_in_range, bind_in_range, bind_two_in_range_with_offset, - find_available_port_in_range, multi_bind_in_range, PortRange, + find_available_port_in_range, multi_bind_in_range, PortRange, VALIDATOR_PORT_RANGE, }, solana_perf::{ data_budget::DataBudget, @@ -1260,6 +1254,7 @@ impl ClusterInfo { other_payload, None:: Option>, // Leader schedule DUPLICATE_SHRED_MAX_PAYLOAD_SIZE, + self.my_shred_version(), )?; Ok(()) } @@ -3639,6 +3634,7 @@ mod tests { Some(leader_schedule), timestamp(), DUPLICATE_SHRED_MAX_PAYLOAD_SIZE, + version, ) .unwrap() .collect(); diff --git a/gossip/src/crds_gossip.rs b/gossip/src/crds_gossip.rs index 3300f3ec4d1d79..788e9f9af87a02 100644 --- a/gossip/src/crds_gossip.rs +++ b/gossip/src/crds_gossip.rs @@ -90,6 +90,7 @@ impl CrdsGossip { leader_schedule: Option, // Maximum serialized size of each DuplicateShred chunk payload. max_payload_size: usize, + shred_version: u16, ) -> Result<(), duplicate_shred::Error> where F: FnOnce(Slot) -> Option, @@ -114,6 +115,7 @@ impl CrdsGossip { leader_schedule, timestamp(), max_payload_size, + shred_version, )?; // Find the index of oldest duplicate shred. let mut num_dup_shreds = 0; diff --git a/gossip/src/duplicate_shred.rs b/gossip/src/duplicate_shred.rs index 4c270f61421aed..adfd89595a03db 100644 --- a/gossip/src/duplicate_shred.rs +++ b/gossip/src/duplicate_shred.rs @@ -69,6 +69,8 @@ pub enum Error { InvalidErasureMetaConflict, #[error("invalid last index conflict")] InvalidLastIndexConflict, + #[error("invalid shred version: {0}")] + InvalidShredVersion(u16), #[error("invalid signature")] InvalidSignature, #[error("invalid size limit")] @@ -93,6 +95,7 @@ pub enum Error { /// Check that `shred1` and `shred2` indicate a valid duplicate proof /// - Must be for the same slot +/// - Must match the expected shred version /// - Must both sigverify for the correct leader /// - Must have a merkle root conflict, otherwise `shred1` and `shred2` must have the same `shred_type` /// - If `shred1` and `shred2` share the same index they must be not equal @@ -101,7 +104,12 @@ pub enum Error { /// LAST_SHRED_IN_SLOT, however the other shred must have a higher index. /// - If `shred1` and `shred2` do not share the same index and are coding shreds /// verify that they have conflicting erasure metas -fn check_shreds(leader_schedule: Option, shred1: &Shred, shred2: &Shred) -> Result<(), Error> +fn check_shreds( + leader_schedule: Option, + shred1: &Shred, + shred2: &Shred, + shred_version: u16, +) -> Result<(), Error> where F: FnOnce(Slot) -> Option, { @@ -109,6 +117,13 @@ where return Err(Error::SlotMismatch); } + if shred1.version() != shred_version { + return Err(Error::InvalidShredVersion(shred1.version())); + } + if shred2.version() != shred_version { + return Err(Error::InvalidShredVersion(shred2.version())); + } + if let Some(leader_schedule) = leader_schedule { let slot_leader = leader_schedule(shred1.slot()).ok_or(Error::UnknownSlotLeader(shred1.slot()))?; @@ -168,6 +183,7 @@ pub(crate) fn from_shred( leader_schedule: Option, wallclock: u64, max_size: usize, // Maximum serialized size of each DuplicateShred. + shred_version: u16, ) -> Result, Error> where F: FnOnce(Slot) -> Option, @@ -176,7 +192,7 @@ where return Err(Error::InvalidDuplicateShreds); } let other_shred = Shred::new_from_serialized_shred(other_payload)?; - check_shreds(leader_schedule, &shred, &other_shred)?; + check_shreds(leader_schedule, &shred, &other_shred, shred_version)?; let slot = shred.slot(); let proof = DuplicateSlotProof { shred1: shred.into_payload(), @@ -229,6 +245,7 @@ fn check_chunk(slot: Slot, num_chunks: u8) -> impl Fn(&DuplicateShred) -> Result pub(crate) fn into_shreds( slot_leader: &Pubkey, chunks: impl IntoIterator, + shred_version: u16, ) -> Result<(Shred, Shred), Error> { let mut chunks = chunks.into_iter(); let DuplicateShred { @@ -264,10 +281,16 @@ pub(crate) fn into_shreds( } let shred1 = Shred::new_from_serialized_shred(proof.shred1)?; let shred2 = Shred::new_from_serialized_shred(proof.shred2)?; + if shred1.slot() != slot || shred2.slot() != slot { Err(Error::SlotMismatch) } else { - check_shreds(Some(|_| Some(slot_leader).copied()), &shred1, &shred2)?; + check_shreds( + Some(|_| Some(slot_leader).copied()), + &shred1, + &shred2, + shred_version, + )?; Ok((shred1, shred2)) } } @@ -490,11 +513,12 @@ pub(crate) mod tests { Some(leader_schedule), rng.gen(), // wallclock 512, // max_size + version, ) .unwrap() .collect(); assert!(chunks.len() > 4); - let (shred3, shred4) = into_shreds(&leader.pubkey(), chunks).unwrap(); + let (shred3, shred4) = into_shreds(&leader.pubkey(), chunks, version).unwrap(); assert_eq!(shred1, shred3); assert_eq!(shred2, shred4); } @@ -545,6 +569,7 @@ pub(crate) mod tests { Some(leader_schedule), rng.gen(), // wallclock 512, // max_size + version, ) .err() .unwrap(), @@ -563,7 +588,9 @@ pub(crate) mod tests { assert!(chunks.len() > 4); assert_matches!( - into_shreds(&leader.pubkey(), chunks).err().unwrap(), + into_shreds(&leader.pubkey(), chunks, version) + .err() + .unwrap(), Error::InvalidDuplicateSlotProof ); } @@ -632,11 +659,12 @@ pub(crate) mod tests { Some(leader_schedule), rng.gen(), // wallclock 512, // max_size + version, ) .unwrap() .collect(); assert!(chunks.len() > 4); - let (shred3, shred4) = into_shreds(&leader.pubkey(), chunks).unwrap(); + let (shred3, shred4) = into_shreds(&leader.pubkey(), chunks, version).unwrap(); assert_eq!(shred1, &shred3); assert_eq!(shred2, &shred4); } @@ -740,6 +768,7 @@ pub(crate) mod tests { Some(leader_schedule), rng.gen(), // wallclock 512, // max_size + version, ) .err() .unwrap(), @@ -758,7 +787,9 @@ pub(crate) mod tests { assert!(chunks.len() > 4); assert_matches!( - into_shreds(&leader.pubkey(), chunks).err().unwrap(), + into_shreds(&leader.pubkey(), chunks, version) + .err() + .unwrap(), Error::InvalidLastIndexConflict ); } @@ -817,11 +848,12 @@ pub(crate) mod tests { Some(leader_schedule), rng.gen(), // wallclock 512, // max_size + version, ) .unwrap() .collect(); assert!(chunks.len() > 4); - let (shred3, shred4) = into_shreds(&leader.pubkey(), chunks).unwrap(); + let (shred3, shred4) = into_shreds(&leader.pubkey(), chunks, version).unwrap(); assert_eq!(shred1, shred3); assert_eq!(shred2, shred4); } @@ -898,6 +930,7 @@ pub(crate) mod tests { Some(leader_schedule), rng.gen(), // wallclock 512, // max_size + version, ) .err() .unwrap(), @@ -916,7 +949,9 @@ pub(crate) mod tests { assert!(chunks.len() > 4); assert_matches!( - into_shreds(&leader.pubkey(), chunks).err().unwrap(), + into_shreds(&leader.pubkey(), chunks, version) + .err() + .unwrap(), Error::InvalidErasureMetaConflict ); } @@ -989,11 +1024,12 @@ pub(crate) mod tests { Some(leader_schedule), rng.gen(), // wallclock 512, // max_size + version, ) .unwrap() .collect(); assert!(chunks.len() > 4); - let (shred3, shred4) = into_shreds(&leader.pubkey(), chunks).unwrap(); + let (shred3, shred4) = into_shreds(&leader.pubkey(), chunks, version).unwrap(); assert_eq!(shred1, shred3); assert_eq!(shred2, shred4); } @@ -1080,6 +1116,7 @@ pub(crate) mod tests { Some(leader_schedule), rng.gen(), // wallclock 512, // max_size + version, ) .err() .unwrap(), @@ -1098,9 +1135,124 @@ pub(crate) mod tests { assert!(chunks.len() > 4); assert_matches!( - into_shreds(&leader.pubkey(), chunks).err().unwrap(), + into_shreds(&leader.pubkey(), chunks, version) + .err() + .unwrap(), Error::ShredTypeMismatch ); } } + + #[test] + fn test_shred_version() { + let mut rng = rand::thread_rng(); + let leader = Arc::new(Keypair::new()); + let (slot, parent_slot, reference_tick, version) = (53084024, 53084023, 0, 0); + let shredder = Shredder::new(slot, parent_slot, reference_tick, version).unwrap(); + let next_shred_index = rng.gen_range(0..31_000); + let leader_schedule = |s| { + if s == slot { + Some(leader.pubkey()) + } else { + None + } + }; + + let (data_shreds, coding_shreds) = new_rand_shreds( + &mut rng, + next_shred_index, + next_shred_index, + 10, + true, + &shredder, + &leader, + true, + ); + + // Wrong shred version 1 + let shredder = Shredder::new(slot, parent_slot, reference_tick, version + 1).unwrap(); + let (wrong_data_shreds_1, wrong_coding_shreds_1) = new_rand_shreds( + &mut rng, + next_shred_index, + next_shred_index, + 10, + true, + &shredder, + &leader, + true, + ); + + // Wrong shred version 2 + let shredder = Shredder::new(slot, parent_slot, reference_tick, version + 2).unwrap(); + let (wrong_data_shreds_2, wrong_coding_shreds_2) = new_rand_shreds( + &mut rng, + next_shred_index, + next_shred_index, + 10, + true, + &shredder, + &leader, + true, + ); + + let test_cases = vec![ + // One correct shred version, one wrong + (coding_shreds[0].clone(), wrong_coding_shreds_1[0].clone()), + (coding_shreds[0].clone(), wrong_data_shreds_1[0].clone()), + (data_shreds[0].clone(), wrong_coding_shreds_1[0].clone()), + (data_shreds[0].clone(), wrong_data_shreds_1[0].clone()), + // Both wrong shred version + ( + wrong_coding_shreds_2[0].clone(), + wrong_coding_shreds_1[0].clone(), + ), + ( + wrong_coding_shreds_2[0].clone(), + wrong_data_shreds_1[0].clone(), + ), + ( + wrong_data_shreds_2[0].clone(), + wrong_coding_shreds_1[0].clone(), + ), + ( + wrong_data_shreds_2[0].clone(), + wrong_data_shreds_1[0].clone(), + ), + ]; + + for (shred1, shred2) in test_cases.into_iter() { + assert_matches!( + from_shred( + shred1.clone(), + Pubkey::new_unique(), // self_pubkey + shred2.payload().clone(), + Some(leader_schedule), + rng.gen(), // wallclock + 512, // max_size + version, + ) + .err() + .unwrap(), + Error::InvalidShredVersion(_) + ); + + let chunks: Vec<_> = from_shred_bypass_checks( + shred1.clone(), + Pubkey::new_unique(), // self_pubkey + shred2.clone(), + rng.gen(), // wallclock + 512, // max_size + ) + .unwrap() + .collect(); + assert!(chunks.len() > 4); + + assert_matches!( + into_shreds(&leader.pubkey(), chunks, version) + .err() + .unwrap(), + Error::InvalidShredVersion(_) + ); + } + } } diff --git a/gossip/src/duplicate_shred_handler.rs b/gossip/src/duplicate_shred_handler.rs index 883d0a7da00504..edf62aaf4276fc 100644 --- a/gossip/src/duplicate_shred_handler.rs +++ b/gossip/src/duplicate_shred_handler.rs @@ -48,6 +48,7 @@ pub struct DuplicateShredHandler { cached_slots_in_epoch: u64, // Used to notify duplicate consensus state machine duplicate_slots_sender: Sender, + shred_version: u16, } impl DuplicateShredHandlerTrait for DuplicateShredHandler { @@ -68,6 +69,7 @@ impl DuplicateShredHandler { leader_schedule_cache: Arc, bank_forks: Arc>, duplicate_slots_sender: Sender, + shred_version: u16, ) -> Self { Self { buffer: HashMap::<(Slot, Pubkey), BufferEntry>::default(), @@ -80,6 +82,7 @@ impl DuplicateShredHandler { leader_schedule_cache, bank_forks, duplicate_slots_sender, + shred_version, } } @@ -130,7 +133,8 @@ impl DuplicateShredHandler { .leader_schedule_cache .slot_leader_at(slot, /*bank:*/ None) .ok_or(Error::UnknownSlotLeader(slot))?; - let (shred1, shred2) = duplicate_shred::into_shreds(&pubkey, chunks)?; + let (shred1, shred2) = + duplicate_shred::into_shreds(&pubkey, chunks, self.shred_version)?; if !self.blockstore.has_duplicate_shreds_in_slot(slot) { self.blockstore.store_duplicate_slot( slot, @@ -255,16 +259,17 @@ mod tests { slot: u64, expected_error: Option, chunk_size: usize, + shred_version: u16, ) -> Result, Error> { let my_keypair = match expected_error { Some(Error::InvalidSignature) => Arc::new(Keypair::new()), _ => keypair, }; let mut rng = rand::thread_rng(); - let shredder = Shredder::new(slot, slot - 1, 0, 0).unwrap(); + let shredder = Shredder::new(slot, slot - 1, 0, shred_version).unwrap(); let next_shred_index = 353; let shred1 = new_rand_shred(&mut rng, next_shred_index, &shredder, &my_keypair); - let shredder1 = Shredder::new(slot + 1, slot, 0, 0).unwrap(); + let shredder1 = Shredder::new(slot + 1, slot, 0, shred_version).unwrap(); let shred2 = match expected_error { Some(Error::SlotMismatch) => { new_rand_shred(&mut rng, next_shred_index, &shredder1, &my_keypair) @@ -283,6 +288,7 @@ mod tests { None:: Option>, timestamp(), // wallclock chunk_size, // max_size + shred_version, )?; Ok(chunks) } @@ -295,6 +301,7 @@ mod tests { let blockstore = Arc::new(Blockstore::open(ledger_path.path()).unwrap()); let my_keypair = Arc::new(Keypair::new()); let my_pubkey = my_keypair.pubkey(); + let shred_version = 0; let genesis_config_info = create_genesis_config_with_leader(10_000, &my_pubkey, 10_000); let GenesisConfigInfo { genesis_config, .. } = genesis_config_info; let mut bank = Bank::new_for_tests(&genesis_config); @@ -322,6 +329,7 @@ mod tests { leader_schedule_cache, bank_forks_arc, sender, + shred_version, ); let chunks = create_duplicate_proof( my_keypair.clone(), @@ -329,6 +337,7 @@ mod tests { start_slot, None, DUPLICATE_SHRED_MAX_PAYLOAD_SIZE, + shred_version, ) .unwrap(); let chunks1 = create_duplicate_proof( @@ -337,6 +346,7 @@ mod tests { start_slot + 1, None, DUPLICATE_SHRED_MAX_PAYLOAD_SIZE, + shred_version, ) .unwrap(); assert!(!blockstore.has_duplicate_shreds_in_slot(start_slot)); @@ -365,6 +375,7 @@ mod tests { start_slot + 2, Some(error), DUPLICATE_SHRED_MAX_PAYLOAD_SIZE, + shred_version, ) { Err(_) => (), Ok(chunks) => { @@ -386,6 +397,7 @@ mod tests { let blockstore = Arc::new(Blockstore::open(ledger_path.path()).unwrap()); let my_keypair = Arc::new(Keypair::new()); let my_pubkey = my_keypair.pubkey(); + let shred_version = 0; let genesis_config_info = create_genesis_config_with_leader(10_000, &my_pubkey, 10_000); let GenesisConfigInfo { genesis_config, .. } = genesis_config_info; let mut bank = Bank::new_for_tests(&genesis_config); @@ -410,6 +422,7 @@ mod tests { leader_schedule_cache, bank_forks_arc, sender, + shred_version, ); // The feature will only be activated at Epoch 1. let start_slot: Slot = slots_in_epoch + 1; @@ -421,6 +434,7 @@ mod tests { start_slot, None, DUPLICATE_SHRED_MAX_PAYLOAD_SIZE / 2, + shred_version, ) .unwrap(); for chunk in chunks { @@ -438,6 +452,7 @@ mod tests { future_slot, None, DUPLICATE_SHRED_MAX_PAYLOAD_SIZE, + shred_version, ) .unwrap(); for chunk in chunks { @@ -454,6 +469,7 @@ mod tests { start_slot, None, DUPLICATE_SHRED_MAX_PAYLOAD_SIZE, + shred_version, ) .unwrap(); // handle chunk 0 of the first proof. @@ -474,6 +490,7 @@ mod tests { let blockstore = Arc::new(Blockstore::open(ledger_path.path()).unwrap()); let my_keypair = Arc::new(Keypair::new()); let my_pubkey = my_keypair.pubkey(); + let shred_version = 0; let genesis_config_info = create_genesis_config_with_leader(10_000, &my_pubkey, 10_000); let GenesisConfigInfo { genesis_config, .. } = genesis_config_info; let mut bank = Bank::new_for_tests(&genesis_config); @@ -492,6 +509,7 @@ mod tests { leader_schedule_cache, bank_forks_arc, sender, + shred_version, ); let chunks = create_duplicate_proof( my_keypair.clone(), @@ -499,6 +517,7 @@ mod tests { 1, None, DUPLICATE_SHRED_MAX_PAYLOAD_SIZE, + shred_version, ) .unwrap(); assert!(!blockstore.has_duplicate_shreds_in_slot(1)); diff --git a/gossip/src/main.rs b/gossip/src/main.rs index 2e98516583778d..7c8b4c51490c27 100644 --- a/gossip/src/main.rs +++ b/gossip/src/main.rs @@ -29,6 +29,23 @@ fn parse_matches() -> ArgMatches<'static> { .default_value("0") .help("Filter gossip nodes by this shred version"); + let gossip_port_arg = clap::Arg::with_name("gossip_port") + .long("gossip-port") + .value_name("PORT") + .takes_value(true) + .validator(is_port) + .help("Gossip port number for the node"); + + let gossip_host_arg = clap::Arg::with_name("gossip_host") + .long("gossip-host") + .value_name("HOST") + .takes_value(true) + .validator(solana_net_utils::is_host) + .help( + "Gossip DNS name or IP address for the node to advertise in gossip \ + [default: ask --entrypoint, or 127.0.0.1 when --entrypoint is not provided]", + ); + App::new(crate_name!()) .about(crate_description!()) .version(solana_version::version!()) @@ -75,6 +92,8 @@ fn parse_matches() -> ArgMatches<'static> { .help("Timeout in seconds"), ) .arg(&shred_version_arg) + .arg(&gossip_port_arg) + .arg(&gossip_host_arg) .setting(AppSettings::DisableVersion), ) .subcommand( @@ -90,23 +109,6 @@ fn parse_matches() -> ArgMatches<'static> { .validator(solana_net_utils::is_host_port) .help("Rendezvous with the cluster at this entrypoint"), ) - .arg( - clap::Arg::with_name("gossip_port") - .long("gossip-port") - .value_name("PORT") - .takes_value(true) - .validator(is_port) - .help("Gossip port number for the node"), - ) - .arg( - clap::Arg::with_name("gossip_host") - .long("gossip-host") - .value_name("HOST") - .takes_value(true) - .validator(solana_net_utils::is_host) - .help("Gossip DNS name or IP address for the node to advertise in gossip \ - [default: ask --entrypoint, or 127.0.0.1 when --entrypoint is not provided]"), - ) .arg( Arg::with_name("identity") .short("i") @@ -144,6 +146,8 @@ fn parse_matches() -> ArgMatches<'static> { .help("Public key of a specific node to wait for"), ) .arg(&shred_version_arg) + .arg(&gossip_port_arg) + .arg(&gossip_host_arg) .arg( Arg::with_name("timeout") .long("timeout") @@ -226,21 +230,9 @@ fn process_spy(matches: &ArgMatches, socket_addr_space: SocketAddrSpace) -> std: let pubkeys = pubkeys_of(matches, "node_pubkey"); let shred_version = value_t_or_exit!(matches, "shred_version", u16); let identity_keypair = keypair_of(matches, "identity"); - let entrypoint_addr = parse_entrypoint(matches); + let gossip_addr = get_gossip_address(matches, entrypoint_addr); - let gossip_host = parse_gossip_host(matches, entrypoint_addr); - - let gossip_addr = SocketAddr::new( - gossip_host, - value_t!(matches, "gossip_port", u16).unwrap_or_else(|_| { - solana_net_utils::find_available_port_in_range( - IpAddr::V4(Ipv4Addr::UNSPECIFIED), - (0, 1), - ) - .expect("unable to find an available gossip port") - }), - ); let discover_timeout = Duration::from_secs(timeout.unwrap_or(u64::MAX)); let (_all_peers, validators) = discover( identity_keypair, @@ -280,9 +272,11 @@ fn process_rpc_url( ) -> std::io::Result<()> { let any = matches.is_present("any"); let all = matches.is_present("all"); - let entrypoint_addr = parse_entrypoint(matches); let timeout = value_t_or_exit!(matches, "timeout", u64); let shred_version = value_t_or_exit!(matches, "shred_version", u16); + let entrypoint_addr = parse_entrypoint(matches); + let gossip_addr = get_gossip_address(matches, entrypoint_addr); + let (_all_peers, validators) = discover( None, // keypair entrypoint_addr.as_ref(), @@ -290,7 +284,7 @@ fn process_rpc_url( Duration::from_secs(timeout), None, // find_nodes_by_pubkey entrypoint_addr.as_ref(), // find_node_by_gossip_addr - None, // my_gossip_addr + Some(&gossip_addr), // my_gossip_addr shred_version, socket_addr_space, )?; @@ -323,6 +317,20 @@ fn process_rpc_url( Ok(()) } +fn get_gossip_address(matches: &ArgMatches, entrypoint_addr: Option) -> SocketAddr { + let gossip_host = parse_gossip_host(matches, entrypoint_addr); + SocketAddr::new( + gossip_host, + value_t!(matches, "gossip_port", u16).unwrap_or_else(|_| { + solana_net_utils::find_available_port_in_range( + IpAddr::V4(Ipv4Addr::UNSPECIFIED), + (0, 1), + ) + .expect("unable to find an available gossip port") + }), + ) +} + fn main() -> Result<(), Box> { solana_logger::setup_with_default_filter(); diff --git a/ledger-tool/Cargo.toml b/ledger-tool/Cargo.toml index 351aa44462cf1e..d9e55d9771b36c 100644 --- a/ledger-tool/Cargo.toml +++ b/ledger-tool/Cargo.toml @@ -48,6 +48,7 @@ solana-stake-program = { workspace = true } solana-storage-bigtable = { workspace = true } solana-streamer = { workspace = true } solana-transaction-status = { workspace = true } +solana-type-overrides = { workspace = true } solana-unified-scheduler-pool = { workspace = true } solana-version = { workspace = true } solana-vote-program = { workspace = true } diff --git a/ledger-tool/src/args.rs b/ledger-tool/src/args.rs index ce0c8a5d7d3f36..94d0baa4f7b4e5 100644 --- a/ledger-tool/src/args.rs +++ b/ledger-tool/src/args.rs @@ -1,13 +1,17 @@ use { crate::LEDGER_TOOL_DIRECTORY, - clap::{value_t, value_t_or_exit, values_t, values_t_or_exit, ArgMatches}, + clap::{value_t, value_t_or_exit, values_t, values_t_or_exit, Arg, ArgMatches}, solana_accounts_db::{ accounts_db::{AccountsDb, AccountsDbConfig}, accounts_index::{AccountsIndexConfig, IndexLimitMb}, partitioned_rewards::TestPartitionedEpochRewards, utils::create_and_canonicalize_directories, }, - solana_clap_utils::input_parsers::pubkeys_of, + solana_clap_utils::{ + hidden_unless_forced, + input_parsers::pubkeys_of, + input_validators::{is_parsable, is_pow2}, + }, solana_ledger::{ blockstore_processor::ProcessOptions, use_snapshot_archives_at_startup::{self, UseSnapshotArchivesAtStartup}, @@ -21,6 +25,141 @@ use { }, }; +/// Returns the arguments that configure AccountsDb +pub fn accounts_db_args<'a, 'b>() -> Box<[Arg<'a, 'b>]> { + vec![ + Arg::with_name("account_paths") + .long("accounts") + .value_name("PATHS") + .takes_value(true) + .help( + "Persistent accounts location. May be specified multiple times. \ + [default: /accounts]", + ), + Arg::with_name("accounts_index_path") + .long("accounts-index-path") + .value_name("PATH") + .takes_value(true) + .multiple(true) + .help( + "Persistent accounts-index location. May be specified multiple times. \ + [default: /accounts_index]", + ), + Arg::with_name("accounts_hash_cache_path") + .long("accounts-hash-cache-path") + .value_name("PATH") + .takes_value(true) + .help( + "Use PATH as accounts hash cache location [default: /accounts_hash_cache]", + ), + Arg::with_name("accounts_index_bins") + .long("accounts-index-bins") + .value_name("BINS") + .validator(is_pow2) + .takes_value(true) + .help("Number of bins to divide the accounts index into"), + Arg::with_name("accounts_index_memory_limit_mb") + .long("accounts-index-memory-limit-mb") + .value_name("MEGABYTES") + .validator(is_parsable::) + .takes_value(true) + .help( + "How much memory the accounts index can consume. If this is exceeded, some \ + account index entries will be stored on disk.", + ), + Arg::with_name("disable_accounts_disk_index") + .long("disable-accounts-disk-index") + .help( + "Disable the disk-based accounts index. It is enabled by default. The entire \ + accounts index will be kept in memory.", + ) + .conflicts_with("accounts_index_memory_limit_mb"), + Arg::with_name("accounts_db_skip_shrink") + .long("accounts-db-skip-shrink") + .help( + "Enables faster starting of ledger-tool by skipping shrink. This option is for \ + use during testing.", + ), + Arg::with_name("accounts_db_verify_refcounts") + .long("accounts-db-verify-refcounts") + .help( + "Debug option to scan all AppendVecs and verify account index refcounts prior to \ + clean", + ) + .hidden(hidden_unless_forced()), + Arg::with_name("accounts_db_test_skip_rewrites") + .long("accounts-db-test-skip-rewrites") + .help( + "Debug option to skip rewrites for rent-exempt accounts but still add them in \ + bank delta hash calculation", + ) + .hidden(hidden_unless_forced()), + Arg::with_name("accounts_db_skip_initial_hash_calculation") + .long("accounts-db-skip-initial-hash-calculation") + .help("Do not verify accounts hash at startup.") + .hidden(hidden_unless_forced()), + Arg::with_name("accounts_db_ancient_append_vecs") + .long("accounts-db-ancient-append-vecs") + .value_name("SLOT-OFFSET") + .validator(is_parsable::) + .takes_value(true) + .help( + "AppendVecs that are older than (slots_per_epoch - SLOT-OFFSET) are squashed \ + together.", + ) + .hidden(hidden_unless_forced()), + ] + .into_boxed_slice() +} + +// For our current version of CLAP, the value passed to Arg::default_value() +// must be a &str. But, we can't convert an integer to a &str at compile time. +// So, declare this constant and enforce equality with the following unit test +// test_max_genesis_archive_unpacked_size_constant +const MAX_GENESIS_ARCHIVE_UNPACKED_SIZE_STR: &str = "10485760"; + +/// Returns the arguments that configure loading genesis +pub fn load_genesis_arg<'a, 'b>() -> Arg<'a, 'b> { + Arg::with_name("max_genesis_archive_unpacked_size") + .long("max-genesis-archive-unpacked-size") + .value_name("NUMBER") + .takes_value(true) + .default_value(MAX_GENESIS_ARCHIVE_UNPACKED_SIZE_STR) + .help("maximum total uncompressed size of unpacked genesis archive") +} + +/// Returns the arguments that configure snapshot loading +pub fn snapshot_args<'a, 'b>() -> Box<[Arg<'a, 'b>]> { + vec![ + Arg::with_name("no_snapshot") + .long("no-snapshot") + .takes_value(false) + .help("Do not start from a local snapshot if present"), + Arg::with_name("snapshots") + .long("snapshots") + .alias("snapshot-archive-path") + .alias("full-snapshot-archive-path") + .value_name("DIR") + .takes_value(true) + .global(true) + .help("Use DIR for snapshot location [default: --ledger value]"), + Arg::with_name("incremental_snapshot_archive_path") + .long("incremental-snapshot-archive-path") + .value_name("DIR") + .takes_value(true) + .global(true) + .help("Use DIR for separate incremental snapshot location"), + Arg::with_name(use_snapshot_archives_at_startup::cli::NAME) + .long(use_snapshot_archives_at_startup::cli::LONG_ARG) + .takes_value(true) + .possible_values(use_snapshot_archives_at_startup::cli::POSSIBLE_VALUES) + .default_value(use_snapshot_archives_at_startup::cli::default_value_for_ledger_tool()) + .help(use_snapshot_archives_at_startup::cli::HELP) + .long_help(use_snapshot_archives_at_startup::cli::LONG_HELP), + ] + .into_boxed_slice() +} + /// Parse a `ProcessOptions` from subcommand arguments. This function attempts /// to parse all flags related to `ProcessOptions`; however, subcommands that /// use this function may not support all flags. @@ -155,3 +294,18 @@ pub fn hardforks_of(matches: &ArgMatches<'_>, name: &str) -> Option> { None } } + +#[cfg(test)] +mod tests { + use {super::*, solana_accounts_db::hardened_unpack::MAX_GENESIS_ARCHIVE_UNPACKED_SIZE}; + + #[test] + fn test_max_genesis_archive_unpacked_size_constant() { + assert_eq!( + MAX_GENESIS_ARCHIVE_UNPACKED_SIZE, + MAX_GENESIS_ARCHIVE_UNPACKED_SIZE_STR + .parse::() + .unwrap() + ); + } +} diff --git a/ledger-tool/src/bigtable.rs b/ledger-tool/src/bigtable.rs index 17f7ba598fa473..a79645e4282e08 100644 --- a/ledger-tool/src/bigtable.rs +++ b/ledger-tool/src/bigtable.rs @@ -1,18 +1,21 @@ //! The `bigtable` subcommand use { crate::{ + args::{load_genesis_arg, snapshot_args}, ledger_path::canonicalize_ledger_path, + load_and_process_ledger_or_exit, open_genesis_config_by, output::{ encode_confirmed_block, CliBlockWithEntries, CliEntries, EncodedConfirmedBlockWithEntries, }, + parse_process_options, LoadAndProcessLedgerOutput, }, clap::{ value_t, value_t_or_exit, values_t_or_exit, App, AppSettings, Arg, ArgMatches, SubCommand, }, crossbeam_channel::unbounded, futures::stream::FuturesUnordered, - log::{debug, error, info}, + log::{debug, error, info, warn}, serde_json::json, solana_clap_utils::{ input_parsers::pubkey_of, @@ -22,11 +25,17 @@ use { display::println_transaction, CliBlock, CliTransaction, CliTransactionConfirmation, OutputFormat, }, + solana_entry::entry::{create_ticks, Entry}, solana_ledger::{ - bigtable_upload::ConfirmedBlockUploadConfig, blockstore::Blockstore, + bigtable_upload::ConfirmedBlockUploadConfig, + blockstore::Blockstore, blockstore_options::AccessType, + shred::{ProcessShredsStats, ReedSolomonCache, Shredder}, + }, + solana_sdk::{ + clock::Slot, hash::Hash, pubkey::Pubkey, shred_version::compute_shred_version, + signature::Signature, signer::keypair::keypair_from_seed, }, - solana_sdk::{clock::Slot, pubkey::Pubkey, signature::Signature}, solana_storage_bigtable::CredentialType, solana_transaction_status::{ConfirmedBlock, UiTransactionEncoding, VersionedConfirmedBlock}, std::{ @@ -164,6 +173,170 @@ async fn entries( Ok(()) } +struct ShredConfig { + shred_version: u16, + num_hashes_per_tick: u64, + num_ticks_per_slot: u64, + allow_mock_poh: bool, +} + +async fn shreds( + blockstore: Arc, + starting_slot: Slot, + ending_slot: Slot, + shred_config: ShredConfig, + config: solana_storage_bigtable::LedgerStorageConfig, +) -> Result<(), Box> { + let bigtable = solana_storage_bigtable::LedgerStorage::new_with_config(config) + .await + .map_err(|err| format!("Failed to connect to storage: {err:?}"))?; + + // Make the range inclusive of both starting and ending slot + let limit = ending_slot.saturating_sub(starting_slot).saturating_add(1) as usize; + let mut slots = bigtable.get_confirmed_blocks(starting_slot, limit).await?; + slots.retain(|&slot| slot <= ending_slot); + + // Create a "dummy" keypair to sign the shreds that will later be created. + // + // The validator shred ingestion path sigverifies shreds from the network + // using the known leader for any given slot. It is unlikely that a user of + // this tool will have access to these leader identity keypairs. However, + // shred sigverify occurs prior to Blockstore::insert_shreds(). Thus, the + // shreds being signed with the "dummy" keyapir can still be inserted and + // later read/replayed/etc + let keypair = keypair_from_seed(&[0; 64])?; + let ShredConfig { + shred_version, + num_hashes_per_tick, + num_ticks_per_slot, + allow_mock_poh, + } = shred_config; + + for slot in slots.iter() { + let block = bigtable.get_confirmed_block(*slot).await?; + let entry_summaries = match bigtable.get_entries(*slot).await { + Ok(summaries) => Some(summaries), + Err(err) => { + let err_msg = format!("Failed to get PoH entries for {slot}: {err}"); + + if allow_mock_poh { + warn!("{err_msg}. Will create mock PoH entries instead."); + } else { + return Err(format!( + "{err_msg}. Try passing --allow-mock-poh to allow \ + creation of shreds with mocked PoH entries" + ))?; + } + None + } + }; + + let entries = match entry_summaries { + Some(entry_summaries) => entry_summaries + .enumerate() + .map(|(i, entry_summary)| { + let num_hashes = entry_summary.num_hashes; + let hash = entry_summary.hash; + let starting_transaction_index = entry_summary.starting_transaction_index; + let num_transactions = entry_summary.num_transactions as usize; + + let Some(transactions) = block.transactions.get( + starting_transaction_index..starting_transaction_index + num_transactions, + ) else { + let num_block_transactions = block.transactions.len(); + return Err(format!( + "Entry summary {i} for slot {slot} with starting_transaction_index \ + {starting_transaction_index} and num_transactions {num_transactions} \ + is in conflict with the block, which has {num_block_transactions} \ + transactions" + )); + }; + let transactions = transactions + .iter() + .map(|tx_with_meta| tx_with_meta.get_transaction()) + .collect(); + + Ok(Entry { + num_hashes, + hash, + transactions, + }) + }) + .collect::, std::string::String>>()?, + None => { + let num_total_ticks = ((slot - block.parent_slot) * num_ticks_per_slot) as usize; + let num_total_entries = num_total_ticks + block.transactions.len(); + let mut entries = Vec::with_capacity(num_total_entries); + + // Create virtual tick entries for any skipped slots + // + // These ticks are necessary so that the tick height is + // advanced to the proper value when this block is processed. + // + // Additionally, a blockhash will still be inserted into the + // recent blockhashes sysvar for skipped slots. So, these + // virtual ticks will have the proper PoH + let num_skipped_slots = slot - block.parent_slot - 1; + if num_skipped_slots > 0 { + let num_virtual_ticks = num_skipped_slots * num_ticks_per_slot; + let parent_blockhash = Hash::from_str(&block.previous_blockhash)?; + let virtual_ticks_entries = + create_ticks(num_virtual_ticks, num_hashes_per_tick, parent_blockhash); + entries.extend(virtual_ticks_entries.into_iter()); + } + + // Create transaction entries + // + // Keep it simple and just do one transaction per Entry + let transaction_entries = block.transactions.iter().map(|tx_with_meta| Entry { + num_hashes: 0, + hash: Hash::default(), + transactions: vec![tx_with_meta.get_transaction()], + }); + entries.extend(transaction_entries.into_iter()); + + // Create the tick entries for this slot + // + // We do not know the intermediate hashes, so just use default + // hash for all ticks. The exception is the final tick; the + // final tick determines the blockhash so set it the known + // blockhash from the bigtable block + let blockhash = Hash::from_str(&block.blockhash)?; + let tick_entries = (0..num_ticks_per_slot).map(|idx| { + let hash = if idx == num_ticks_per_slot - 1 { + blockhash + } else { + Hash::default() + }; + Entry { + num_hashes: 0, + hash, + transactions: vec![], + } + }); + entries.extend(tick_entries.into_iter()); + + entries + } + }; + + let shredder = Shredder::new(*slot, block.parent_slot, 0, shred_version)?; + let (data_shreds, _coding_shreds) = shredder.entries_to_shreds( + &keypair, + &entries, + true, // last_in_slot + None, // chained_merkle_root + 0, // next_shred_index + 0, // next_code_index + false, // merkle_variant + &ReedSolomonCache::default(), + &mut ProcessShredsStats::default(), + ); + blockstore.insert_shreds(data_shreds, None, false)?; + } + Ok(()) +} + async fn blocks( starting_slot: Slot, limit: usize, @@ -848,6 +1021,45 @@ impl BigTableSubCommand for App<'_, '_> { .required(true), ), ) + .subcommand( + SubCommand::with_name("shreds") + .about( + "Get confirmed blocks from BigTable, reassemble the transactions \ + and entries, shred the block and then insert the shredded blocks into \ + the local Blockstore", + ) + .arg(load_genesis_arg()) + .args(&snapshot_args()) + .arg( + Arg::with_name("starting_slot") + .long("starting-slot") + .validator(is_slot) + .value_name("SLOT") + .takes_value(true) + .required(true) + .help("Start shred creation at this slot (inclusive)"), + ) + .arg( + Arg::with_name("ending_slot") + .long("ending-slot") + .validator(is_slot) + .value_name("SLOT") + .takes_value(true) + .required(true) + .help("Stop shred creation at this slot (inclusive)"), + ) + .arg( + Arg::with_name("allow_mock_poh") + .long("allow-mock-poh") + .takes_value(false) + .help( + "For slots where PoH entries are unavailable, allow the \ + generation of mock PoH entries. The mock PoH entries enable \ + the shredded block(s) to be replayable if PoH verification is \ + disabled.", + ), + ), + ) .subcommand( SubCommand::with_name("confirm") .about("Confirm transaction by signature") @@ -1142,6 +1354,87 @@ pub fn bigtable_process_command(ledger_path: &Path, matches: &ArgMatches<'_>) { }; runtime.block_on(entries(slot, output_format, config)) } + ("shreds", Some(arg_matches)) => { + let starting_slot = value_t_or_exit!(arg_matches, "starting_slot", Slot); + let ending_slot = value_t_or_exit!(arg_matches, "ending_slot", Slot); + if starting_slot > ending_slot { + eprintln!( + "The specified --starting-slot {starting_slot} must be less than or equal to \ + the specified --ending-slot {ending_slot}." + ); + exit(1); + } + let allow_mock_poh = arg_matches.is_present("allow_mock_poh"); + + let ledger_path = canonicalize_ledger_path(ledger_path); + let process_options = parse_process_options(&ledger_path, arg_matches); + let genesis_config = open_genesis_config_by(&ledger_path, arg_matches); + let blockstore = Arc::new(crate::open_blockstore( + &ledger_path, + arg_matches, + AccessType::Primary, + )); + let LoadAndProcessLedgerOutput { bank_forks, .. } = load_and_process_ledger_or_exit( + arg_matches, + &genesis_config, + blockstore.clone(), + process_options, + None, + ); + + let bank = bank_forks.read().unwrap().working_bank(); + // If mock PoH is allowed, ensure that the requested slots are in + // the same epoch as the working bank. This will ensure the values + // extracted from the Bank are accurate for the slot range + if allow_mock_poh { + let working_bank_epoch = bank.epoch(); + let epoch_schedule = bank.epoch_schedule(); + let starting_epoch = epoch_schedule.get_epoch(starting_slot); + let ending_epoch = epoch_schedule.get_epoch(ending_slot); + if starting_epoch != ending_epoch { + eprintln!( + "The specified --starting-slot and --ending-slot must be in the\ + same epoch. --starting-slot {starting_slot} is in epoch {starting_epoch},\ + but --ending-slot {ending_slot} is in epoch {ending_epoch}." + ); + exit(1); + } + if starting_epoch != working_bank_epoch { + eprintln!( + "The range of slots between --starting-slot and --ending-slot are in a \ + different epoch than the working bank. The specified range is in epoch \ + {starting_epoch}, but the working bank is in {working_bank_epoch}." + ); + exit(1); + } + } + + let shred_version = + compute_shred_version(&genesis_config.hash(), Some(&bank.hard_forks())); + let num_hashes_per_tick = bank.hashes_per_tick().unwrap_or(0); + let num_ticks_per_slot = bank.ticks_per_slot(); + let shred_config = ShredConfig { + shred_version, + num_hashes_per_tick, + num_ticks_per_slot, + allow_mock_poh, + }; + + let config = solana_storage_bigtable::LedgerStorageConfig { + read_only: true, + instance_name, + app_profile_id, + ..solana_storage_bigtable::LedgerStorageConfig::default() + }; + + runtime.block_on(shreds( + blockstore, + starting_slot, + ending_slot, + shred_config, + config, + )) + } ("blocks", Some(arg_matches)) => { let starting_slot = value_t_or_exit!(arg_matches, "starting_slot", Slot); let limit = value_t_or_exit!(arg_matches, "limit", usize); diff --git a/ledger-tool/src/ledger_utils.rs b/ledger-tool/src/ledger_utils.rs index 44b6667f0516f5..98a647e21f2851 100644 --- a/ledger-tool/src/ledger_utils.rs +++ b/ledger-tool/src/ledger_utils.rs @@ -54,6 +54,19 @@ use { thiserror::Error, }; +pub struct LoadAndProcessLedgerOutput { + pub bank_forks: Arc>, + pub starting_snapshot_hashes: Option, + // Typically, we would want to join all threads before returning. However, + // AccountsBackgroundService (ABS) performs several long running operations + // that don't respond to the exit flag. Blocking on these operations could + // significantly delay getting results that do not need ABS to finish. So, + // skip joining ABS and instead let the caller decide whether to block or + // not. It is safe to let ABS continue in the background, and ABS will stop + // if/when it finally checks the exit flag + pub accounts_background_service: AccountsBackgroundService, +} + const PROCESS_SLOTS_HELP_STRING: &str = "The starting slot is either the latest found snapshot slot, or genesis (slot 0) if the \ --no-snapshot flag was specified or if no snapshots were found. \ @@ -98,17 +111,13 @@ pub fn load_and_process_ledger_or_exit( genesis_config: &GenesisConfig, blockstore: Arc, process_options: ProcessOptions, - snapshot_archive_path: Option, - incremental_snapshot_archive_path: Option, transaction_status_sender: Option, -) -> (Arc>, Option) { +) -> LoadAndProcessLedgerOutput { load_and_process_ledger( arg_matches, genesis_config, blockstore, process_options, - snapshot_archive_path, - incremental_snapshot_archive_path, transaction_status_sender, ) .unwrap_or_else(|err| { @@ -122,10 +131,8 @@ pub fn load_and_process_ledger( genesis_config: &GenesisConfig, blockstore: Arc, process_options: ProcessOptions, - snapshot_archive_path: Option, - incremental_snapshot_archive_path: Option, transaction_status_sender: Option, -) -> Result<(Arc>, Option), LoadAndProcessLedgerError> { +) -> Result { let bank_snapshots_dir = if blockstore.is_primary_access() { blockstore.ledger_path().join("snapshot") } else { @@ -139,10 +146,15 @@ pub fn load_and_process_ledger( let snapshot_config = if arg_matches.is_present("no_snapshot") { None } else { - let full_snapshot_archives_dir = - snapshot_archive_path.unwrap_or_else(|| blockstore.ledger_path().to_path_buf()); + let full_snapshot_archives_dir = value_t!(arg_matches, "snapshots", String) + .ok() + .map(PathBuf::from) + .unwrap_or_else(|| blockstore.ledger_path().to_path_buf()); let incremental_snapshot_archives_dir = - incremental_snapshot_archive_path.unwrap_or_else(|| full_snapshot_archives_dir.clone()); + value_t!(arg_matches, "incremental_snapshot_archive_path", String) + .ok() + .map(PathBuf::from) + .unwrap_or_else(|| full_snapshot_archives_dir.clone()); if let Some(full_snapshot_slot) = snapshot_utils::get_highest_full_snapshot_archive_slot(&full_snapshot_archives_dir) { @@ -403,11 +415,14 @@ pub fn load_and_process_ledger( None, // Maybe support this later, though &accounts_background_request_sender, ) - .map(|_| (bank_forks, starting_snapshot_hashes)) + .map(|_| LoadAndProcessLedgerOutput { + bank_forks, + starting_snapshot_hashes, + accounts_background_service, + }) .map_err(LoadAndProcessLedgerError::ProcessBlockstoreFromRoot); exit.store(true, Ordering::Relaxed); - accounts_background_service.join().unwrap(); accounts_hash_verifier.join().unwrap(); if let Some(service) = transaction_status_service { service.join().unwrap(); diff --git a/ledger-tool/src/main.rs b/ledger-tool/src/main.rs index fee913c82b593d..8339c0a14d07ff 100644 --- a/ledger-tool/src/main.rs +++ b/ledger-tool/src/main.rs @@ -8,6 +8,7 @@ use { ledger_utils::*, output::{ output_account, AccountsOutputConfig, AccountsOutputMode, AccountsOutputStreamer, + SlotBankHash, }, program::*, }, @@ -19,15 +20,12 @@ use { log::*, serde_derive::Serialize, solana_account_decoder::UiAccountEncoding, - solana_accounts_db::{ - accounts_db::CalcAccountsHashDataSource, accounts_index::ScanConfig, - hardened_unpack::MAX_GENESIS_ARCHIVE_UNPACKED_SIZE, - }, + solana_accounts_db::{accounts_db::CalcAccountsHashDataSource, accounts_index::ScanConfig}, solana_clap_utils::{ hidden_unless_forced, input_parsers::{cluster_type_of, pubkey_of, pubkeys_of}, input_validators::{ - is_parsable, is_pow2, is_pubkey, is_pubkey_or_keypair, is_slot, is_valid_percentage, + is_parsable, is_pubkey, is_pubkey_or_keypair, is_slot, is_valid_percentage, is_within_range, }, }, @@ -43,7 +41,6 @@ use { blockstore_processor::{ ProcessSlotCallback, TransactionStatusMessage, TransactionStatusSender, }, - use_snapshot_archives_at_startup, }, solana_measure::{measure, measure::Measure}, solana_runtime::{ @@ -562,76 +559,10 @@ fn main() { solana_logger::setup_with_default_filter(); - let no_snapshot_arg = Arg::with_name("no_snapshot") - .long("no-snapshot") - .takes_value(false) - .help("Do not start from a local snapshot if present"); - let accounts_index_bins = Arg::with_name("accounts_index_bins") - .long("accounts-index-bins") - .value_name("BINS") - .validator(is_pow2) - .takes_value(true) - .help("Number of bins to divide the accounts index into"); - let accounts_index_limit = Arg::with_name("accounts_index_memory_limit_mb") - .long("accounts-index-memory-limit-mb") - .value_name("MEGABYTES") - .validator(is_parsable::) - .takes_value(true) - .help( - "How much memory the accounts index can consume. If this is exceeded, some account \ - index entries will be stored on disk.", - ); - let disable_disk_index = Arg::with_name("disable_accounts_disk_index") - .long("disable-accounts-disk-index") - .help( - "Disable the disk-based accounts index. It is enabled by default. The entire accounts \ - index will be kept in memory.", - ) - .conflicts_with("accounts_index_memory_limit_mb"); - let accountsdb_skip_shrink = Arg::with_name("accounts_db_skip_shrink") - .long("accounts-db-skip-shrink") - .help( - "Enables faster starting of ledger-tool by skipping shrink. This option is for use \ - during testing.", - ); - let accountsdb_verify_refcounts = Arg::with_name("accounts_db_verify_refcounts") - .long("accounts-db-verify-refcounts") - .help( - "Debug option to scan all AppendVecs and verify account index refcounts prior to clean", - ) - .hidden(hidden_unless_forced()); - let accounts_db_test_skip_rewrites_but_include_in_bank_hash = - Arg::with_name("accounts_db_test_skip_rewrites") - .long("accounts-db-test-skip-rewrites") - .help( - "Debug option to skip rewrites for rent-exempt accounts but still add them in \ - bank delta hash calculation", - ) - .hidden(hidden_unless_forced()); - let account_paths_arg = Arg::with_name("account_paths") - .long("accounts") - .value_name("PATHS") - .takes_value(true) - .help( - "Persistent accounts location. \ - May be specified multiple times. \ - [default: /accounts]", - ); - let accounts_hash_cache_path_arg = Arg::with_name("accounts_hash_cache_path") - .long("accounts-hash-cache-path") - .value_name("PATH") - .takes_value(true) - .help("Use PATH as accounts hash cache location [default: /accounts_hash_cache]"); - let accounts_index_path_arg = Arg::with_name("accounts_index_path") - .long("accounts-index-path") - .value_name("PATH") - .takes_value(true) - .multiple(true) - .help( - "Persistent accounts-index location. \ - May be specified multiple times. \ - [default: /accounts_index]", - ); + let load_genesis_config_arg = load_genesis_arg(); + let accounts_db_config_args = accounts_db_args(); + let snapshot_config_args = snapshot_args(); + let accounts_db_test_hash_calculation_arg = Arg::with_name("accounts_db_test_hash_calculation") .long("accounts-db-test-hash-calculation") .help("Enable hash calculation test"); @@ -644,20 +575,6 @@ fn main() { let os_memory_stats_reporting_arg = Arg::with_name("os_memory_stats_reporting") .long("os-memory-stats-reporting") .help("Enable reporting of OS memory statistics."); - let accounts_db_skip_initial_hash_calc_arg = - Arg::with_name("accounts_db_skip_initial_hash_calculation") - .long("accounts-db-skip-initial-hash-calculation") - .help("Do not verify accounts hash at startup.") - .hidden(hidden_unless_forced()); - let ancient_append_vecs = Arg::with_name("accounts_db_ancient_append_vecs") - .long("accounts-db-ancient-append-vecs") - .value_name("SLOT-OFFSET") - .validator(is_parsable::) - .takes_value(true) - .help( - "AppendVecs that are older than (slots_per_epoch - SLOT-OFFSET) are squashed together.", - ) - .hidden(hidden_unless_forced()); let halt_at_slot_store_hash_raw_data = Arg::with_name("halt_at_slot_store_hash_raw_data") .long("halt-at-slot-store-hash-raw-data") .help( @@ -690,13 +607,6 @@ fn main() { .long("allow-dead-slots") .takes_value(false) .help("Output dead slots as well"); - let default_genesis_archive_unpacked_size = MAX_GENESIS_ARCHIVE_UNPACKED_SIZE.to_string(); - let max_genesis_archive_unpacked_size_arg = Arg::with_name("max_genesis_archive_unpacked_size") - .long("max-genesis-archive-unpacked-size") - .value_name("NUMBER") - .takes_value(true) - .default_value(&default_genesis_archive_unpacked_size) - .help("maximum total uncompressed size of unpacked genesis archive"); let hashes_per_tick = Arg::with_name("hashes_per_tick") .long("hashes-per-tick") .value_name("NUM_HASHES|\"sleep\"") @@ -719,14 +629,6 @@ fn main() { .multiple(true) .takes_value(true) .help("Log when transactions are processed that reference the given key(s)."); - let use_snapshot_archives_at_startup = - Arg::with_name(use_snapshot_archives_at_startup::cli::NAME) - .long(use_snapshot_archives_at_startup::cli::LONG_ARG) - .takes_value(true) - .possible_values(use_snapshot_archives_at_startup::cli::POSSIBLE_VALUES) - .default_value(use_snapshot_archives_at_startup::cli::default_value_for_ledger_tool()) - .help(use_snapshot_archives_at_startup::cli::HELP) - .long_help(use_snapshot_archives_at_startup::cli::LONG_HELP); let geyser_plugin_args = Arg::with_name("geyser_plugin_config") .long("geyser-plugin-config") @@ -811,24 +713,6 @@ fn main() { run fine with a reduced file descriptor limit while others will not", ), ) - .arg( - Arg::with_name("snapshots") - .long("snapshots") - .alias("snapshot-archive-path") - .alias("full-snapshot-archive-path") - .value_name("DIR") - .takes_value(true) - .global(true) - .help("Use DIR for snapshot location [default: --ledger value]"), - ) - .arg( - Arg::with_name("incremental_snapshot_archive_path") - .long("incremental-snapshot-archive-path") - .value_name("DIR") - .takes_value(true) - .global(true) - .help("Use DIR for separate incremental snapshot location"), - ) .arg( Arg::with_name("block_verification_method") .long("block-verification-method") @@ -836,7 +720,6 @@ fn main() { .takes_value(true) .possible_values(BlockVerificationMethod::cli_names()) .global(true) - .hidden(hidden_unless_forced()) .help(BlockVerificationMethod::cli_message()), ) .arg( @@ -846,7 +729,6 @@ fn main() { .takes_value(true) .validator(|s| is_within_range(s, 1..)) .global(true) - .hidden(hidden_unless_forced()) .help(DefaultSchedulerPool::cli_message()), ) .arg( @@ -879,7 +761,7 @@ fn main() { .subcommand( SubCommand::with_name("genesis") .about("Prints the ledger's genesis config") - .arg(&max_genesis_archive_unpacked_size_arg) + .arg(&load_genesis_config_arg) .arg( Arg::with_name("accounts") .long("accounts") @@ -898,12 +780,12 @@ fn main() { .subcommand( SubCommand::with_name("genesis-hash") .about("Prints the ledger's genesis hash") - .arg(&max_genesis_archive_unpacked_size_arg), + .arg(&load_genesis_config_arg) ) .subcommand( SubCommand::with_name("modify-genesis") .about("Modifies genesis parameters") - .arg(&max_genesis_archive_unpacked_size_arg) + .arg(&load_genesis_config_arg) .arg(&hashes_per_tick) .arg( Arg::with_name("cluster_type") @@ -923,57 +805,36 @@ fn main() { .subcommand( SubCommand::with_name("shred-version") .about("Prints the ledger's shred hash") + .arg(&load_genesis_config_arg) + .args(&accounts_db_config_args) + .args(&snapshot_config_args) .arg(&hard_forks_arg) - .arg(&max_genesis_archive_unpacked_size_arg) - .arg(&accounts_index_bins) - .arg(&accounts_index_limit) - .arg(&disable_disk_index) - .arg(&accountsdb_verify_refcounts) - .arg(&accounts_db_skip_initial_hash_calc_arg) - .arg(&accounts_db_test_skip_rewrites_but_include_in_bank_hash) - .arg(&use_snapshot_archives_at_startup), ) .subcommand( SubCommand::with_name("bank-hash") .about("Prints the hash of the working bank after reading the ledger") - .arg(&max_genesis_archive_unpacked_size_arg) + .arg(&load_genesis_config_arg) + .args(&accounts_db_config_args) + .args(&snapshot_config_args) .arg(&halt_at_slot_arg) - .arg(&accounts_index_bins) - .arg(&accounts_index_limit) - .arg(&disable_disk_index) - .arg(&accountsdb_verify_refcounts) - .arg(&accounts_db_skip_initial_hash_calc_arg) - .arg(&accounts_db_test_skip_rewrites_but_include_in_bank_hash) - .arg(&use_snapshot_archives_at_startup), ) .subcommand( SubCommand::with_name("verify") .about("Verify the ledger") - .arg(&no_snapshot_arg) - .arg(&account_paths_arg) - .arg(&accounts_hash_cache_path_arg) - .arg(&accounts_index_path_arg) + .arg(&load_genesis_config_arg) + .args(&accounts_db_config_args) + .args(&snapshot_config_args) .arg(&halt_at_slot_arg) .arg(&limit_load_slot_count_from_snapshot_arg) - .arg(&accounts_index_bins) - .arg(&accounts_index_limit) - .arg(&disable_disk_index) - .arg(&accountsdb_skip_shrink) - .arg(&accountsdb_verify_refcounts) - .arg(&accounts_db_test_skip_rewrites_but_include_in_bank_hash) .arg(&verify_index_arg) - .arg(&accounts_db_skip_initial_hash_calc_arg) - .arg(&ancient_append_vecs) .arg(&halt_at_slot_store_hash_raw_data) .arg(&hard_forks_arg) .arg(&accounts_db_test_hash_calculation_arg) .arg(&os_memory_stats_reporting_arg) .arg(&allow_dead_slots_arg) - .arg(&max_genesis_archive_unpacked_size_arg) .arg(&debug_key_arg) .arg(&geyser_plugin_args) .arg(&log_messages_bytes_limit_arg) - .arg(&use_snapshot_archives_at_startup) .arg( Arg::with_name("skip_poh_verify") .long("skip-poh-verify") @@ -1096,19 +957,11 @@ fn main() { .subcommand( SubCommand::with_name("graph") .about("Create a Graphviz rendering of the ledger") - .arg(&no_snapshot_arg) - .arg(&account_paths_arg) - .arg(&accounts_hash_cache_path_arg) - .arg(&accounts_index_bins) - .arg(&accounts_index_limit) - .arg(&disable_disk_index) - .arg(&accountsdb_verify_refcounts) - .arg(&accounts_db_test_skip_rewrites_but_include_in_bank_hash) - .arg(&accounts_db_skip_initial_hash_calc_arg) + .arg(&load_genesis_config_arg) + .args(&accounts_db_config_args) + .args(&snapshot_config_args) .arg(&halt_at_slot_arg) .arg(&hard_forks_arg) - .arg(&max_genesis_archive_unpacked_size_arg) - .arg(&use_snapshot_archives_at_startup) .arg( Arg::with_name("include_all_votes") .long("include-all-votes") @@ -1137,23 +990,13 @@ fn main() { .subcommand( SubCommand::with_name("create-snapshot") .about("Create a new ledger snapshot") - .arg(&no_snapshot_arg) - .arg(&account_paths_arg) - .arg(&accounts_hash_cache_path_arg) - .arg(&accounts_index_bins) - .arg(&accounts_index_limit) - .arg(&disable_disk_index) - .arg(&accountsdb_verify_refcounts) - .arg(&accounts_db_test_skip_rewrites_but_include_in_bank_hash) - .arg(&accounts_db_skip_initial_hash_calc_arg) - .arg(&accountsdb_skip_shrink) - .arg(&ancient_append_vecs) + .arg(&load_genesis_config_arg) + .args(&accounts_db_config_args) + .args(&snapshot_config_args) .arg(&hard_forks_arg) - .arg(&max_genesis_archive_unpacked_size_arg) .arg(&snapshot_version_arg) .arg(&geyser_plugin_args) .arg(&log_messages_bytes_limit_arg) - .arg(&use_snapshot_archives_at_startup) .arg( Arg::with_name("snapshot_slot") .index(1) @@ -1353,22 +1196,14 @@ fn main() { .subcommand( SubCommand::with_name("accounts") .about("Print account stats and contents after processing the ledger") - .arg(&no_snapshot_arg) - .arg(&account_paths_arg) - .arg(&accounts_hash_cache_path_arg) - .arg(&accounts_index_bins) - .arg(&accounts_index_limit) - .arg(&disable_disk_index) - .arg(&accountsdb_verify_refcounts) - .arg(&accounts_db_test_skip_rewrites_but_include_in_bank_hash) - .arg(&accounts_db_skip_initial_hash_calc_arg) + .arg(&load_genesis_config_arg) + .args(&accounts_db_config_args) + .args(&snapshot_config_args) .arg(&halt_at_slot_arg) .arg(&hard_forks_arg) .arg(&geyser_plugin_args) .arg(&log_messages_bytes_limit_arg) .arg(&accounts_data_encoding_arg) - .arg(&use_snapshot_archives_at_startup) - .arg(&max_genesis_archive_unpacked_size_arg) .arg( Arg::with_name("include_sysvars") .long("include-sysvars") @@ -1415,21 +1250,13 @@ fn main() { .subcommand( SubCommand::with_name("capitalization") .about("Print capitalization (aka, total supply) while checksumming it") - .arg(&no_snapshot_arg) - .arg(&account_paths_arg) - .arg(&accounts_hash_cache_path_arg) - .arg(&accounts_index_bins) - .arg(&accounts_index_limit) - .arg(&disable_disk_index) - .arg(&accountsdb_verify_refcounts) - .arg(&accounts_db_test_skip_rewrites_but_include_in_bank_hash) - .arg(&accounts_db_skip_initial_hash_calc_arg) + .arg(&load_genesis_config_arg) + .args(&accounts_db_config_args) + .args(&snapshot_config_args) .arg(&halt_at_slot_arg) .arg(&hard_forks_arg) - .arg(&max_genesis_archive_unpacked_size_arg) .arg(&geyser_plugin_args) .arg(&log_messages_bytes_limit_arg) - .arg(&use_snapshot_archives_at_startup) .arg( Arg::with_name("warp_epoch") .required(false) @@ -1500,14 +1327,6 @@ fn main() { info!("{} {}", crate_name!(), solana_version::version!()); let ledger_path = PathBuf::from(value_t_or_exit!(matches, "ledger_path", String)); - let snapshot_archive_path = value_t!(matches, "snapshots", String) - .ok() - .map(PathBuf::from); - let incremental_snapshot_archive_path = - value_t!(matches, "incremental_snapshot_archive_path", String) - .ok() - .map(PathBuf::from); - let verbose_level = matches.occurrences_of("verbose"); // Name the rayon global thread pool @@ -1610,15 +1429,14 @@ fn main() { arg_matches, get_access_type(&process_options), ); - let (bank_forks, _) = load_and_process_ledger_or_exit( - arg_matches, - &genesis_config, - Arc::new(blockstore), - process_options, - snapshot_archive_path, - incremental_snapshot_archive_path, - None, - ); + let LoadAndProcessLedgerOutput { bank_forks, .. } = + load_and_process_ledger_or_exit( + arg_matches, + &genesis_config, + Arc::new(blockstore), + process_options, + None, + ); println!( "{}", @@ -1791,9 +1609,12 @@ fn main() { process_options.slot_callback = slot_callback; + let output_format = + OutputFormat::from_matches(arg_matches, "output_format", false); let print_accounts_stats = arg_matches.is_present("print_accounts_stats"); let print_bank_hash = arg_matches.is_present("print_bank_hash"); let write_bank_file = arg_matches.is_present("write_bank_file"); + let genesis_config = open_genesis_config_by(&ledger_path, arg_matches); info!("genesis hash: {}", genesis_config.hash()); @@ -1802,26 +1623,25 @@ fn main() { arg_matches, get_access_type(&process_options), ); - let (bank_forks, _) = load_and_process_ledger_or_exit( - arg_matches, - &genesis_config, - Arc::new(blockstore), - process_options, - snapshot_archive_path, - incremental_snapshot_archive_path, - transaction_status_sender, - ); + let LoadAndProcessLedgerOutput { bank_forks, .. } = + load_and_process_ledger_or_exit( + arg_matches, + &genesis_config, + Arc::new(blockstore), + process_options, + transaction_status_sender, + ); let working_bank = bank_forks.read().unwrap().working_bank(); if print_accounts_stats { working_bank.print_accounts_stats(); } if print_bank_hash { - println!( - "Bank hash for slot {}: {}", - working_bank.slot(), - working_bank.hash() - ); + let slot_bank_hash = SlotBankHash { + slot: working_bank.slot(), + hash: working_bank.hash().to_string(), + }; + println!("{}", output_format.formatted_string(&slot_bank_hash)); } if write_bank_file { bank_hash_details::write_bank_hash_details_file(&working_bank) @@ -1870,15 +1690,14 @@ fn main() { arg_matches, get_access_type(&process_options), ); - let (bank_forks, _) = load_and_process_ledger_or_exit( - arg_matches, - &genesis_config, - Arc::new(blockstore), - process_options, - snapshot_archive_path, - incremental_snapshot_archive_path, - None, - ); + let LoadAndProcessLedgerOutput { bank_forks, .. } = + load_and_process_ledger_or_exit( + arg_matches, + &genesis_config, + Arc::new(blockstore), + process_options, + None, + ); let dot = graph_forks(&bank_forks.read().unwrap(), &graph_config); let extension = Path::new(&output_file).extension(); @@ -1901,6 +1720,13 @@ fn main() { let is_minimized = arg_matches.is_present("minimized"); let output_directory = value_t!(arg_matches, "output_directory", PathBuf) .unwrap_or_else(|_| { + let snapshot_archive_path = value_t!(matches, "snapshots", String) + .ok() + .map(PathBuf::from); + let incremental_snapshot_archive_path = + value_t!(matches, "incremental_snapshot_archive_path", String) + .ok() + .map(PathBuf::from); match ( is_incremental, &snapshot_archive_path, @@ -2034,15 +1860,23 @@ fn main() { output_directory.display() ); - let (bank_forks, starting_snapshot_hashes) = load_and_process_ledger_or_exit( + let LoadAndProcessLedgerOutput { + bank_forks, + starting_snapshot_hashes, + accounts_background_service, + } = load_and_process_ledger_or_exit( arg_matches, &genesis_config, blockstore.clone(), process_options, - snapshot_archive_path, - incremental_snapshot_archive_path, None, ); + // Snapshot creation will implicitly perform AccountsDb + // flush and clean operations. These operations cannot be + // run concurrently, so ensure ABS is stopped to avoid that + // possibility. + accounts_background_service.join().unwrap(); + let mut bank = bank_forks .read() .unwrap() @@ -2091,7 +1925,7 @@ fn main() { if remove_stake_accounts { for (address, mut account) in bank - .get_program_accounts(&stake::program::id(), &ScanConfig::default()) + .get_program_accounts(&stake::program::id(), &ScanConfig::new(false)) .unwrap() .into_iter() { @@ -2141,7 +1975,7 @@ fn main() { if !vote_accounts_to_destake.is_empty() { for (address, mut account) in bank - .get_program_accounts(&stake::program::id(), &ScanConfig::default()) + .get_program_accounts(&stake::program::id(), &ScanConfig::new(false)) .unwrap() .into_iter() { @@ -2181,7 +2015,7 @@ fn main() { for (address, mut account) in bank .get_program_accounts( &solana_vote_program::id(), - &ScanConfig::default(), + &ScanConfig::new(false), ) .unwrap() .into_iter() @@ -2424,15 +2258,14 @@ fn main() { arg_matches, get_access_type(&process_options), ); - let (bank_forks, _) = load_and_process_ledger_or_exit( - arg_matches, - &genesis_config, - Arc::new(blockstore), - process_options, - snapshot_archive_path, - incremental_snapshot_archive_path, - None, - ); + let LoadAndProcessLedgerOutput { bank_forks, .. } = + load_and_process_ledger_or_exit( + arg_matches, + &genesis_config, + Arc::new(blockstore), + process_options, + None, + ); let bank = bank_forks.read().unwrap().working_bank(); let include_sysvars = arg_matches.is_present("include_sysvars"); @@ -2477,15 +2310,14 @@ fn main() { arg_matches, get_access_type(&process_options), ); - let (bank_forks, _) = load_and_process_ledger_or_exit( - arg_matches, - &genesis_config, - Arc::new(blockstore), - process_options, - snapshot_archive_path, - incremental_snapshot_archive_path, - None, - ); + let LoadAndProcessLedgerOutput { bank_forks, .. } = + load_and_process_ledger_or_exit( + arg_matches, + &genesis_config, + Arc::new(blockstore), + process_options, + None, + ); let bank_forks = bank_forks.read().unwrap(); let slot = bank_forks.working_bank().slot(); let bank = bank_forks.get(slot).unwrap_or_else(|| { diff --git a/ledger-tool/src/output.rs b/ledger-tool/src/output.rs index 3de08cec989806..fd0b64eb58c84a 100644 --- a/ledger-tool/src/output.rs +++ b/ledger-tool/src/output.rs @@ -101,6 +101,22 @@ impl Display for SlotBounds<'_> { } } +#[derive(Serialize, Debug, Default)] +#[serde(rename_all = "camelCase")] +pub struct SlotBankHash { + pub slot: Slot, + pub hash: String, +} + +impl VerboseDisplay for SlotBankHash {} +impl QuietDisplay for SlotBankHash {} + +impl Display for SlotBankHash { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + writeln!(f, "Bank hash for slot {}: {}", self.slot, self.hash) + } +} + fn writeln_entry(f: &mut dyn fmt::Write, i: usize, entry: &CliEntry, prefix: &str) -> fmt::Result { writeln!( f, @@ -656,7 +672,7 @@ impl AccountsScanner { match &self.config.mode { AccountsOutputMode::All => { - self.bank.scan_all_accounts(scan_func).unwrap(); + self.bank.scan_all_accounts(scan_func, true).unwrap(); } AccountsOutputMode::Individual(pubkeys) => pubkeys.iter().for_each(|pubkey| { if let Some((account, slot)) = self @@ -676,7 +692,7 @@ impl AccountsScanner { }), AccountsOutputMode::Program(program_pubkey) => self .bank - .get_program_accounts(program_pubkey, &ScanConfig::default()) + .get_program_accounts(program_pubkey, &ScanConfig::new(false)) .unwrap() .iter() .filter(|(_, account)| self.should_process_account(account)) diff --git a/ledger-tool/src/program.rs b/ledger-tool/src/program.rs index 15750290fbed2a..463d017b17dbed 100644 --- a/ledger-tool/src/program.rs +++ b/ledger-tool/src/program.rs @@ -1,6 +1,6 @@ use { crate::{args::*, canonicalize_ledger_path, ledger_utils::*}, - clap::{value_t, App, AppSettings, Arg, ArgMatches, SubCommand}, + clap::{App, AppSettings, Arg, ArgMatches, SubCommand}, log::*, serde_derive::{Deserialize, Serialize}, serde_json::Result, @@ -9,7 +9,7 @@ use { syscalls::create_program_runtime_environment_v1, }, solana_cli_output::{OutputFormat, QuietDisplay, VerboseDisplay}, - solana_ledger::{blockstore_options::AccessType, use_snapshot_archives_at_startup}, + solana_ledger::blockstore_options::AccessType, solana_program_runtime::{ invoke_context::InvokeContext, loaded_programs::{ @@ -36,7 +36,7 @@ use { fmt::{self, Debug, Formatter}, fs::File, io::{Read, Seek, Write}, - path::{Path, PathBuf}, + path::Path, process::exit, sync::Arc, time::{Duration, Instant}, @@ -75,24 +75,15 @@ fn load_accounts(path: &Path) -> Result { fn load_blockstore(ledger_path: &Path, arg_matches: &ArgMatches<'_>) -> Arc { let process_options = parse_process_options(ledger_path, arg_matches); - let snapshot_archive_path = value_t!(arg_matches, "snapshots", String) - .ok() - .map(PathBuf::from); - let incremental_snapshot_archive_path = - value_t!(arg_matches, "incremental_snapshot_archive_path", String) - .ok() - .map(PathBuf::from); let genesis_config = open_genesis_config_by(ledger_path, arg_matches); info!("genesis hash: {}", genesis_config.hash()); let blockstore = open_blockstore(ledger_path, arg_matches, AccessType::Secondary); - let (bank_forks, ..) = load_and_process_ledger_or_exit( + let LoadAndProcessLedgerOutput { bank_forks, .. } = load_and_process_ledger_or_exit( arg_matches, &genesis_config, Arc::new(blockstore), process_options, - snapshot_archive_path, - incremental_snapshot_archive_path, None, ); let bank = bank_forks.read().unwrap().working_bank(); @@ -112,22 +103,9 @@ impl ProgramSubCommand for App<'_, '_> { ) .required(true) .index(1); - let max_genesis_arg = Arg::with_name("max_genesis_archive_unpacked_size") - .long("max-genesis-archive-unpacked-size") - .value_name("NUMBER") - .takes_value(true) - .default_value("10485760") - .help("maximum total uncompressed size of unpacked genesis archive"); - let use_snapshot_archives_at_startup = - Arg::with_name(use_snapshot_archives_at_startup::cli::NAME) - .long(use_snapshot_archives_at_startup::cli::LONG_ARG) - .takes_value(true) - .possible_values(use_snapshot_archives_at_startup::cli::POSSIBLE_VALUES) - .default_value( - use_snapshot_archives_at_startup::cli::default_value_for_ledger_tool(), - ) - .help(use_snapshot_archives_at_startup::cli::HELP) - .long_help(use_snapshot_archives_at_startup::cli::LONG_HELP); + + let load_genesis_config_arg = load_genesis_arg(); + let snapshot_config_args = snapshot_args(); self.subcommand( SubCommand::with_name("program") @@ -179,8 +157,8 @@ and the following fields are required .takes_value(true) .default_value("0"), ) - .arg(&max_genesis_arg) - .arg(&use_snapshot_archives_at_startup) + .arg(&load_genesis_config_arg) + .args(&snapshot_config_args) .arg( Arg::with_name("memory") .help("Heap memory for the program to run on") @@ -579,7 +557,7 @@ pub fn program(ledger_path: &Path, matches: &ArgMatches<'_>) { account_lengths, &mut invoke_context, ); - let mut vm = vm.unwrap(); + let (mut vm, _, _) = vm.unwrap(); let start_time = Instant::now(); if matches.value_of("mode").unwrap() == "debugger" { vm.debug_port = Some(matches.value_of("port").unwrap().parse::().unwrap()); diff --git a/ledger/src/blockstore.rs b/ledger/src/blockstore.rs index 450f7b4f005f3c..0342a323905876 100644 --- a/ledger/src/blockstore.rs +++ b/ledger/src/blockstore.rs @@ -57,8 +57,9 @@ use { solana_storage_proto::{StoredExtendedRewards, StoredTransactionStatusMeta}, solana_transaction_status::{ ConfirmedTransactionStatusWithSignature, ConfirmedTransactionWithStatusMeta, Rewards, - TransactionStatusMeta, TransactionWithStatusMeta, VersionedConfirmedBlock, - VersionedConfirmedBlockWithEntries, VersionedTransactionWithStatusMeta, + RewardsAndNumPartitions, TransactionStatusMeta, TransactionWithStatusMeta, + VersionedConfirmedBlock, VersionedConfirmedBlockWithEntries, + VersionedTransactionWithStatusMeta, }, std::{ borrow::Cow, @@ -2678,7 +2679,7 @@ impl Blockstore { Hash::default() }; - let rewards = self + let (rewards, num_partitions) = self .rewards_cf .get_protobuf_or_bincode::(slot)? .unwrap_or_default() @@ -2699,6 +2700,7 @@ impl Blockstore { transactions: self .map_transactions_to_statuses(slot, slot_transaction_iterator)?, rewards, + num_partitions, block_time, block_height, }; @@ -3371,7 +3373,7 @@ impl Blockstore { .map(|result| result.map(|option| option.into())) } - pub fn write_rewards(&self, index: Slot, rewards: Rewards) -> Result<()> { + pub fn write_rewards(&self, index: Slot, rewards: RewardsAndNumPartitions) -> Result<()> { let rewards = rewards.into(); self.rewards_cf.put_protobuf(index, &rewards) } @@ -8302,6 +8304,7 @@ pub mod tests { blockhash: blockhash.to_string(), previous_blockhash: Hash::default().to_string(), rewards: vec![], + num_partitions: None, block_time: None, block_height: None, }; @@ -8316,6 +8319,7 @@ pub mod tests { blockhash: blockhash.to_string(), previous_blockhash: blockhash.to_string(), rewards: vec![], + num_partitions: None, block_time: None, block_height: None, }; @@ -8333,6 +8337,7 @@ pub mod tests { blockhash: blockhash.to_string(), previous_blockhash: blockhash.to_string(), rewards: vec![], + num_partitions: None, block_time: None, block_height: None, }; diff --git a/ledger/src/blockstore_processor.rs b/ledger/src/blockstore_processor.rs index 30abc9bf6eaea6..0e1c247b1af1e9 100644 --- a/ledger/src/blockstore_processor.rs +++ b/ledger/src/blockstore_processor.rs @@ -62,7 +62,8 @@ use { solana_svm::{ transaction_processor::ExecutionRecordingConfig, transaction_results::{ - TransactionExecutionDetails, TransactionExecutionResult, TransactionResults, + TransactionExecutionDetails, TransactionExecutionResult, + TransactionLoadedAccountsStats, TransactionResults, }, }, solana_transaction_status::token_balances::TransactionTokenBalancesSet, @@ -80,6 +81,7 @@ use { time::{Duration, Instant}, }, thiserror::Error, + ExecuteTimingType::{NumExecuteBatches, TotalBatchesLen}, }; pub struct TransactionBatchWithIndexes<'a, 'b> { @@ -180,11 +182,33 @@ pub fn execute_batch( let TransactionResults { fee_collection_results, + loaded_accounts_stats, execution_results, rent_debits, .. } = tx_results; + let (check_block_cost_limits_result, check_block_cost_limits_time): (Result<()>, Measure) = + measure!(if bank + .feature_set + .is_active(&feature_set::apply_cost_tracker_during_replay::id()) + { + check_block_cost_limits( + bank, + &loaded_accounts_stats, + &execution_results, + batch.sanitized_transactions(), + ) + } else { + Ok(()) + }); + + timings.saturating_add_in_place( + ExecuteTimingType::CheckBlockLimitsUs, + check_block_cost_limits_time.as_us(), + ); + check_block_cost_limits_result?; + let executed_transactions = execution_results .iter() .zip(batch.sanitized_transactions()) @@ -219,6 +243,49 @@ pub fn execute_batch( first_err.map(|(result, _)| result).unwrap_or(Ok(())) } +// collect transactions actual execution costs, subject to block limits; +// block will be marked as dead if exceeds cost limits, details will be +// reported to metric `replay-stage-mark_dead_slot` +fn check_block_cost_limits( + bank: &Bank, + loaded_accounts_stats: &[Result], + execution_results: &[TransactionExecutionResult], + sanitized_transactions: &[SanitizedTransaction], +) -> Result<()> { + assert_eq!(loaded_accounts_stats.len(), execution_results.len()); + + let tx_costs_with_actual_execution_units: Vec<_> = execution_results + .iter() + .zip(loaded_accounts_stats) + .zip(sanitized_transactions) + .filter_map(|((execution_result, loaded_accounts_stats), tx)| { + if let Some(details) = execution_result.details() { + let tx_cost = CostModel::calculate_cost_for_executed_transaction( + tx, + details.executed_units, + loaded_accounts_stats + .as_ref() + .map_or(0, |stats| stats.loaded_accounts_data_size), + &bank.feature_set, + ); + Some(tx_cost) + } else { + None + } + }) + .collect(); + + { + let mut cost_tracker = bank.write_cost_tracker().unwrap(); + for tx_cost in &tx_costs_with_actual_execution_units { + cost_tracker + .try_add(tx_cost) + .map_err(TransactionError::from)?; + } + } + Ok(()) +} + #[derive(Default)] pub struct ExecuteBatchesInternalMetrics { execution_timings_per_thread: HashMap, @@ -455,22 +522,10 @@ fn rebatch_and_execute_batches( let cost = tx_cost.sum(); minimal_tx_cost = std::cmp::min(minimal_tx_cost, cost); total_cost = total_cost.saturating_add(cost); - tx_cost + cost }) .collect::>(); - if bank - .feature_set - .is_active(&feature_set::apply_cost_tracker_during_replay::id()) - { - let mut cost_tracker = bank.write_cost_tracker().unwrap(); - for tx_cost in &tx_costs { - cost_tracker - .try_add(tx_cost) - .map_err(TransactionError::from)?; - } - } - let target_batch_count = get_thread_count() as u64; let mut tx_batches: Vec = vec![]; @@ -478,26 +533,23 @@ fn rebatch_and_execute_batches( let target_batch_cost = total_cost / target_batch_count; let mut batch_cost: u64 = 0; let mut slice_start = 0; - tx_costs - .into_iter() - .enumerate() - .for_each(|(index, tx_cost)| { - let next_index = index + 1; - batch_cost = batch_cost.saturating_add(tx_cost.sum()); - if batch_cost >= target_batch_cost || next_index == sanitized_txs.len() { - let tx_batch = rebatch_transactions( - &lock_results, - bank, - &sanitized_txs, - slice_start, - index, - &transaction_indexes, - ); - slice_start = next_index; - tx_batches.push(tx_batch); - batch_cost = 0; - } - }); + tx_costs.into_iter().enumerate().for_each(|(index, cost)| { + let next_index = index + 1; + batch_cost = batch_cost.saturating_add(cost); + if batch_cost >= target_batch_cost || next_index == sanitized_txs.len() { + let tx_batch = rebatch_transactions( + &lock_results, + bank, + &sanitized_txs, + slice_start, + index, + &transaction_indexes, + ); + slice_start = next_index; + tx_batches.push(tx_batch); + batch_cost = 0; + } + }); &tx_batches[..] } else { batches @@ -513,7 +565,8 @@ fn rebatch_and_execute_batches( prioritization_fee_cache, )?; - timing.accumulate(execute_batches_internal_metrics); + // Pass false because this code-path is never touched by unified scheduler. + timing.accumulate(execute_batches_internal_metrics, false); Ok(()) } @@ -1079,11 +1132,15 @@ pub struct ConfirmationTiming { /// and replay. As replay can run in parallel with the verification, this value can not be /// recovered from the `replay_elapsed` and or `{poh,transaction}_verify_elapsed`. This /// includes failed cases, when `confirm_slot_entries` exist with an error. In microseconds. + /// When unified scheduler is enabled, replay excludes the transaction execution, only + /// accounting for task creation and submission to the scheduler. pub confirmation_elapsed: u64, /// Wall clock time used by the entry replay code. Does not include the PoH or the transaction /// signature/precompiles verification, but can overlap with the PoH and signature verification. /// In microseconds. + /// When unified scheduler is enabled, replay excludes the transaction execution, only + /// accounting for task creation and submission to the scheduler. pub replay_elapsed: u64, /// Wall clock times, used for the PoH verification of entries. In microseconds. @@ -1129,42 +1186,59 @@ pub struct BatchExecutionTiming { /// Wall clock time used by the transaction execution part of pipeline. /// [`ConfirmationTiming::replay_elapsed`] includes this time. In microseconds. - pub wall_clock_us: u64, + wall_clock_us: u64, /// Time used to execute transactions, via `execute_batch()`, in the thread that consumed the - /// most time. - pub slowest_thread: ThreadExecuteTimings, + /// most time (in terms of total_thread_us) among rayon threads. Note that the slowest thread + /// is determined each time a given group of batches is newly processed. So, this is a coarse + /// approximation of wall-time single-threaded linearized metrics, discarding all metrics other + /// than the arbitrary set of batches mixed with various transactions, which replayed slowest + /// as a whole for each rayon processing session, also after blockstore_processor's rebatching. + /// + /// When unified scheduler is enabled, this field isn't maintained, because it's not batched at + /// all. + slowest_thread: ThreadExecuteTimings, } impl BatchExecutionTiming { - pub fn accumulate(&mut self, new_batch: ExecuteBatchesInternalMetrics) { + pub fn accumulate( + &mut self, + new_batch: ExecuteBatchesInternalMetrics, + is_unified_scheduler_enabled: bool, + ) { let Self { totals, wall_clock_us, slowest_thread, } = self; - saturating_add_assign!(*wall_clock_us, new_batch.execute_batches_us); + // These metric fields aren't applicable for the unified scheduler + if !is_unified_scheduler_enabled { + saturating_add_assign!(*wall_clock_us, new_batch.execute_batches_us); - use ExecuteTimingType::{NumExecuteBatches, TotalBatchesLen}; - totals.saturating_add_in_place(TotalBatchesLen, new_batch.total_batches_len); - totals.saturating_add_in_place(NumExecuteBatches, 1); + totals.saturating_add_in_place(TotalBatchesLen, new_batch.total_batches_len); + totals.saturating_add_in_place(NumExecuteBatches, 1); + } for thread_times in new_batch.execution_timings_per_thread.values() { totals.accumulate(&thread_times.execute_timings); } - let slowest = new_batch - .execution_timings_per_thread - .values() - .max_by_key(|thread_times| thread_times.total_thread_us); - - if let Some(slowest) = slowest { - slowest_thread.accumulate(slowest); - slowest_thread - .execute_timings - .saturating_add_in_place(NumExecuteBatches, 1); - }; + // This whole metric (replay-slot-end-to-end-stats) isn't applicable for the unified + // scheduler. + if !is_unified_scheduler_enabled { + let slowest = new_batch + .execution_timings_per_thread + .values() + .max_by_key(|thread_times| thread_times.total_thread_us); + + if let Some(slowest) = slowest { + slowest_thread.accumulate(slowest); + slowest_thread + .execute_timings + .saturating_add_in_place(NumExecuteBatches, 1); + }; + } } } @@ -1185,7 +1259,8 @@ impl ThreadExecuteTimings { ("total_transactions_executed", self.total_transactions_executed as i64, i64), // Everything inside the `eager!` block will be eagerly expanded before // evaluation of the rest of the surrounding macro. - eager!{report_execute_timings!(self.execute_timings)} + // Pass false because this code-path is never touched by unified scheduler. + eager!{report_execute_timings!(self.execute_timings, false)} ); }; } @@ -1222,7 +1297,24 @@ impl ReplaySlotStats { num_entries: usize, num_shreds: u64, bank_complete_time_us: u64, + is_unified_scheduler_enabled: bool, ) { + let confirmation_elapsed = if is_unified_scheduler_enabled { + "confirmation_without_replay_us" + } else { + "confirmation_time_us" + }; + let replay_elapsed = if is_unified_scheduler_enabled { + "task_submission_us" + } else { + "replay_time" + }; + let execute_batches_us = if is_unified_scheduler_enabled { + None + } else { + Some(self.batch_execute.wall_clock_us as i64) + }; + lazy! { datapoint_info!( "replay-slot-stats", @@ -1243,9 +1335,9 @@ impl ReplaySlotStats { self.transaction_verify_elapsed as i64, i64 ), - ("confirmation_time_us", self.confirmation_elapsed as i64, i64), - ("replay_time", self.replay_elapsed as i64, i64), - ("execute_batches_us", self.batch_execute.wall_clock_us as i64, i64), + (confirmation_elapsed, self.confirmation_elapsed as i64, i64), + (replay_elapsed, self.replay_elapsed as i64, i64), + ("execute_batches_us", execute_batches_us, Option), ( "replay_total_elapsed", self.started.elapsed().as_micros() as i64, @@ -1257,11 +1349,17 @@ impl ReplaySlotStats { ("total_shreds", num_shreds as i64, i64), // Everything inside the `eager!` block will be eagerly expanded before // evaluation of the rest of the surrounding macro. - eager!{report_execute_timings!(self.batch_execute.totals)} + eager!{report_execute_timings!(self.batch_execute.totals, is_unified_scheduler_enabled)} ); }; - self.batch_execute.slowest_thread.report_stats(slot); + // Skip reporting replay-slot-end-to-end-stats entirely if unified scheduler is enabled, + // because the whole metrics itself is only meaningful for rayon-based worker threads. + // + // See slowest_thread doc comment for details. + if !is_unified_scheduler_enabled { + self.batch_execute.slowest_thread.report_stats(slot); + } let mut per_pubkey_timings: Vec<_> = self .batch_execute @@ -2151,6 +2249,7 @@ pub mod tests { }, assert_matches::assert_matches, rand::{thread_rng, Rng}, + solana_cost_model::transaction_cost::TransactionCost, solana_entry::entry::{create_ticks, next_entry, next_entry_mut}, solana_program_runtime::declare_process_instruction, solana_runtime::{ @@ -4975,4 +5074,69 @@ pub mod tests { } } } + + #[test] + fn test_check_block_cost_limit() { + let dummy_leader_pubkey = solana_sdk::pubkey::new_rand(); + let GenesisConfigInfo { + genesis_config, + mint_keypair, + .. + } = create_genesis_config_with_leader(500, &dummy_leader_pubkey, 100); + let bank = Bank::new_for_tests(&genesis_config); + + let tx = SanitizedTransaction::from_transaction_for_tests(system_transaction::transfer( + &mint_keypair, + &Pubkey::new_unique(), + 1, + genesis_config.hash(), + )); + let mut tx_cost = CostModel::calculate_cost(&tx, &bank.feature_set); + let actual_execution_cu = 1; + let actual_loaded_accounts_data_size = 64 * 1024; + let TransactionCost::Transaction(ref mut usage_cost_details) = tx_cost else { + unreachable!("test tx is non-vote tx"); + }; + usage_cost_details.programs_execution_cost = actual_execution_cu; + usage_cost_details.loaded_accounts_data_size_cost = + CostModel::calculate_loaded_accounts_data_size_cost( + actual_loaded_accounts_data_size, + &bank.feature_set, + ); + // set block-limit to be able to just have one transaction + let block_limit = tx_cost.sum(); + + bank.write_cost_tracker() + .unwrap() + .set_limits(u64::MAX, block_limit, u64::MAX); + let txs = vec![tx.clone(), tx]; + let results = vec![ + TransactionExecutionResult::Executed { + details: TransactionExecutionDetails { + status: Ok(()), + log_messages: None, + inner_instructions: None, + fee_details: solana_sdk::fee::FeeDetails::default(), + return_data: None, + executed_units: actual_execution_cu, + accounts_data_len_delta: 0, + }, + programs_modified_by_tx: HashMap::new(), + }, + TransactionExecutionResult::NotExecuted(TransactionError::AccountNotFound), + ]; + let loaded_accounts_stats = vec![ + Ok(TransactionLoadedAccountsStats { + loaded_accounts_data_size: actual_loaded_accounts_data_size, + loaded_accounts_count: 2 + }); + 2 + ]; + + assert!(check_block_cost_limits(&bank, &loaded_accounts_stats, &results, &txs).is_ok()); + assert_eq!( + Err(TransactionError::WouldExceedMaxBlockCostLimit), + check_block_cost_limits(&bank, &loaded_accounts_stats, &results, &txs) + ); + } } diff --git a/local-cluster/Cargo.toml b/local-cluster/Cargo.toml index 0d72a867c60ac1..3e4cbc0e366531 100644 --- a/local-cluster/Cargo.toml +++ b/local-cluster/Cargo.toml @@ -48,6 +48,7 @@ serial_test = { workspace = true } solana-core = { workspace = true, features = ["dev-context-only-utils"] } solana-download-utils = { workspace = true } solana-ledger = { workspace = true, features = ["dev-context-only-utils"] } +solana-local-cluster = { path = ".", features = ["dev-context-only-utils"] } solana-runtime = { workspace = true, features = ["dev-context-only-utils"] } [package.metadata.docs.rs] diff --git a/local-cluster/src/cluster_tests.rs b/local-cluster/src/cluster_tests.rs index b31c874b9e6db3..9b80c15824f494 100644 --- a/local-cluster/src/cluster_tests.rs +++ b/local-cluster/src/cluster_tests.rs @@ -4,14 +4,11 @@ /// discover the rest of the network. use log::*; use { - crate::cluster::QuicTpuClient, + crate::{cluster::QuicTpuClient, local_cluster::LocalCluster}, rand::{thread_rng, Rng}, rayon::{prelude::*, ThreadPool}, solana_client::connection_cache::{ConnectionCache, Protocol}, - solana_core::consensus::{ - tower_storage::{FileTowerStorage, SavedTower, SavedTowerVersions, TowerStorage}, - VOTE_THRESHOLD_DEPTH, - }, + solana_core::consensus::VOTE_THRESHOLD_DEPTH, solana_entry::entry::{self, Entry, EntrySlice}, solana_gossip::{ cluster_info::{self, ClusterInfo}, @@ -44,7 +41,7 @@ use { std::{ collections::{HashMap, HashSet, VecDeque}, net::{IpAddr, Ipv4Addr, SocketAddr, TcpListener}, - path::{Path, PathBuf}, + path::Path, sync::{ atomic::{AtomicBool, Ordering}, Arc, RwLock, @@ -53,6 +50,13 @@ use { time::{Duration, Instant}, }, }; +#[cfg(feature = "dev-context-only-utils")] +use { + solana_core::consensus::tower_storage::{ + FileTowerStorage, SavedTower, SavedTowerVersions, TowerStorage, + }, + std::path::PathBuf, +}; pub fn get_client_facing_addr( protocol: Protocol, @@ -101,12 +105,17 @@ pub fn spend_and_verify_all_nodes( .rpc_client() .get_latest_blockhash_with_commitment(CommitmentConfig::confirmed()) .unwrap(); - let transaction = + let mut transaction = system_transaction::transfer(funding_keypair, &random_keypair.pubkey(), 1, blockhash); let confs = VOTE_THRESHOLD_DEPTH + 1; - client - .send_transaction_to_upcoming_leaders(&transaction) - .unwrap(); + LocalCluster::send_transaction_with_retries( + &client, + &[funding_keypair], + &mut transaction, + 10, + confs, + ) + .unwrap(); for validator in &cluster_nodes { if ignore_nodes.contains(validator.pubkey()) { continue; @@ -160,16 +169,21 @@ pub fn send_many_transactions( .unwrap(); let transfer_amount = thread_rng().gen_range(1..max_tokens_per_transfer); - let transaction = system_transaction::transfer( + let mut transaction = system_transaction::transfer( funding_keypair, &random_keypair.pubkey(), transfer_amount, blockhash, ); - client - .send_transaction_to_upcoming_leaders(&transaction) - .unwrap(); + LocalCluster::send_transaction_with_retries( + &client, + &[funding_keypair], + &mut transaction, + 5, + 0, + ) + .unwrap(); expected_balances.insert(random_keypair.pubkey(), transfer_amount); } @@ -292,7 +306,7 @@ pub fn kill_entry_and_spend_and_verify_rest( .rpc_client() .get_latest_blockhash_with_commitment(CommitmentConfig::processed()) .unwrap(); - let transaction = system_transaction::transfer( + let mut transaction = system_transaction::transfer( funding_keypair, &random_keypair.pubkey(), 1, @@ -300,16 +314,29 @@ pub fn kill_entry_and_spend_and_verify_rest( ); let confs = VOTE_THRESHOLD_DEPTH + 1; - if let Err(e) = client.send_transaction_to_upcoming_leaders(&transaction) { - result = Err(e); - continue; - } + let sig = { + let sig = LocalCluster::send_transaction_with_retries( + &client, + &[funding_keypair], + &mut transaction, + 5, + confs, + ); + match sig { + Err(e) => { + result = Err(e); + continue; + } + + Ok(sig) => sig, + } + }; info!("poll_all_nodes_for_signature()"); match poll_all_nodes_for_signature( entry_point_info, &cluster_nodes, connection_cache, - &transaction.signatures[0], + &sig, confs, ) { Err(e) => { @@ -325,6 +352,7 @@ pub fn kill_entry_and_spend_and_verify_rest( } } +#[cfg(feature = "dev-context-only-utils")] pub fn apply_votes_to_tower(node_keypair: &Keypair, votes: Vec<(Slot, Hash)>, tower_path: PathBuf) { let tower_storage = FileTowerStorage::new(tower_path); let mut tower = tower_storage.load(&node_keypair.pubkey()).unwrap(); diff --git a/local-cluster/src/local_cluster.rs b/local-cluster/src/local_cluster.rs index 64fefb1114f99e..f6bef8bff3c63d 100644 --- a/local-cluster/src/local_cluster.rs +++ b/local-cluster/src/local_cluster.rs @@ -28,7 +28,7 @@ use { }, solana_sdk::{ account::{Account, AccountSharedData}, - clock::{Slot, DEFAULT_DEV_SLOTS_PER_EPOCH, DEFAULT_TICKS_PER_SLOT}, + clock::{Slot, DEFAULT_DEV_SLOTS_PER_EPOCH, DEFAULT_TICKS_PER_SLOT, MAX_PROCESSING_AGE}, commitment_config::CommitmentConfig, epoch_schedule::EpochSchedule, feature_set, @@ -36,13 +36,15 @@ use { message::Message, poh_config::PohConfig, pubkey::Pubkey, - signature::{Keypair, Signer}, + signature::{Keypair, Signature, Signer}, + signers::Signers, stake::{ instruction as stake_instruction, state::{Authorized, Lockup}, }, system_transaction, transaction::Transaction, + transport::TransportError, }, solana_stake_program::stake_state, solana_streamer::{socket::SocketAddrSpace, streamer::StakedNodes}, @@ -61,6 +63,7 @@ use { net::{IpAddr, Ipv4Addr, UdpSocket}, path::{Path, PathBuf}, sync::{Arc, RwLock}, + time::Instant, }, }; @@ -187,44 +190,43 @@ impl LocalCluster { pub fn new(config: &mut ClusterConfig, socket_addr_space: SocketAddrSpace) -> Self { assert_eq!(config.validator_configs.len(), config.node_stakes.len()); - let connection_cache = match config.tpu_use_quic { - true => { - let client_keypair = Keypair::new(); - let stake = DEFAULT_NODE_STAKE; + let connection_cache = if config.tpu_use_quic { + let client_keypair = Keypair::new(); + let stake = DEFAULT_NODE_STAKE; - for validator_config in config.validator_configs.iter_mut() { - let mut overrides = HashMap::new(); - overrides.insert(client_keypair.pubkey(), stake); - validator_config.staked_nodes_overrides = Arc::new(RwLock::new(overrides)); - } - - assert!( - config.tpu_use_quic, - "no support for staked override forwarding without quic" - ); - - let total_stake = config.node_stakes.iter().sum::(); - let stakes = HashMap::from([ - (client_keypair.pubkey(), stake), - (Pubkey::new_unique(), total_stake.saturating_sub(stake)), - ]); - let staked_nodes = Arc::new(RwLock::new(StakedNodes::new( - Arc::new(stakes), - HashMap::::default(), // overrides - ))); - - Arc::new(ConnectionCache::new_with_client_options( - "connection_cache_local_cluster_quic_staked", - config.tpu_connection_pool_size, - None, - Some((&client_keypair, IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)))), - Some((&staked_nodes, &client_keypair.pubkey())), - )) + for validator_config in config.validator_configs.iter_mut() { + let mut overrides = HashMap::new(); + overrides.insert(client_keypair.pubkey(), stake); + validator_config.staked_nodes_overrides = Arc::new(RwLock::new(overrides)); } - false => Arc::new(ConnectionCache::with_udp( + + assert!( + config.tpu_use_quic, + "no support for staked override forwarding without quic" + ); + + let total_stake = config.node_stakes.iter().sum::(); + let stakes = HashMap::from([ + (client_keypair.pubkey(), stake), + (Pubkey::new_unique(), total_stake.saturating_sub(stake)), + ]); + let staked_nodes = Arc::new(RwLock::new(StakedNodes::new( + Arc::new(stakes), + HashMap::::default(), // overrides + ))); + + Arc::new(ConnectionCache::new_with_client_options( + "connection_cache_local_cluster_quic_staked", + config.tpu_connection_pool_size, + None, + Some((&client_keypair, IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)))), + Some((&staked_nodes, &client_keypair.pubkey())), + )) + } else { + Arc::new(ConnectionCache::with_udp( "connection_cache_local_cluster_udp", config.tpu_connection_pool_size, - )), + )) }; let mut validator_keys = { @@ -665,6 +667,53 @@ impl LocalCluster { info!("{} done waiting for roots", test_name); } + /// Attempt to send and confirm tx "attempts" times + /// Wait for signature confirmation before returning + /// Return the transaction signature + pub fn send_transaction_with_retries( + client: &QuicTpuClient, + keypairs: &T, + transaction: &mut Transaction, + attempts: usize, + pending_confirmations: usize, + ) -> std::result::Result { + for attempt in 0..attempts { + let now = Instant::now(); + let mut num_confirmed = 0; + let mut wait_time = MAX_PROCESSING_AGE; + + while now.elapsed().as_secs() < wait_time as u64 { + if num_confirmed == 0 { + client.send_transaction_to_upcoming_leaders(transaction)?; + } + + if let Ok(confirmed_blocks) = client.rpc_client().poll_for_signature_confirmation( + &transaction.signatures[0], + pending_confirmations, + ) { + num_confirmed = confirmed_blocks; + if confirmed_blocks >= pending_confirmations { + return Ok(transaction.signatures[0]); + } + // Since network has seen the transaction, wait longer to receive + // all pending confirmations. Resending the transaction could result into + // extra transaction fees + wait_time = wait_time.max( + MAX_PROCESSING_AGE * pending_confirmations.saturating_sub(num_confirmed), + ); + } + } + info!("{attempt} tries failed transfer"); + let blockhash = client.rpc_client().get_latest_blockhash()?; + transaction.sign(keypairs, blockhash); + } + Err(std::io::Error::new( + std::io::ErrorKind::Other, + "failed to confirm transaction".to_string(), + ) + .into()) + } + fn transfer_with_client( client: &QuicTpuClient, source_keypair: &Keypair, @@ -676,7 +725,7 @@ impl LocalCluster { .rpc_client() .get_latest_blockhash_with_commitment(CommitmentConfig::processed()) .unwrap(); - let tx = system_transaction::transfer(source_keypair, dest_pubkey, lamports, blockhash); + let mut tx = system_transaction::transfer(source_keypair, dest_pubkey, lamports, blockhash); info!( "executing transfer of {} from {} to {}", lamports, @@ -684,8 +733,7 @@ impl LocalCluster { *dest_pubkey ); - client - .send_transaction_to_upcoming_leaders(&tx) + LocalCluster::send_transaction_with_retries(client, &[source_keypair], &mut tx, 10, 0) .expect("client transfer should succeed"); client .rpc_client() @@ -749,7 +797,7 @@ impl LocalCluster { }, ); let message = Message::new(&instructions, Some(&from_account.pubkey())); - let transaction = Transaction::new( + let mut transaction = Transaction::new( &[from_account.as_ref(), vote_account], message, client @@ -758,9 +806,14 @@ impl LocalCluster { .unwrap() .0, ); - client - .send_transaction_to_upcoming_leaders(&transaction) - .expect("fund vote"); + LocalCluster::send_transaction_with_retries( + client, + &[from_account], + &mut transaction, + 10, + 0, + ) + .expect("should fund vote"); client .rpc_client() .wait_for_balance_with_commitment( @@ -779,7 +832,7 @@ impl LocalCluster { amount, ); let message = Message::new(&instructions, Some(&from_account.pubkey())); - let transaction = Transaction::new( + let mut transaction = Transaction::new( &[from_account.as_ref(), &stake_account_keypair], message, client @@ -789,9 +842,14 @@ impl LocalCluster { .0, ); - client - .send_transaction_to_upcoming_leaders(&transaction) - .expect("delegate stake"); + LocalCluster::send_transaction_with_retries( + client, + &[from_account.as_ref(), &stake_account_keypair], + &mut transaction, + 5, + 0, + ) + .expect("should delegate stake"); client .rpc_client() .wait_for_balance_with_commitment( diff --git a/local-cluster/tests/local_cluster.rs b/local-cluster/tests/local_cluster.rs index f0613a0d0e5469..8826ca0a17ed8c 100644 --- a/local-cluster/tests/local_cluster.rs +++ b/local-cluster/tests/local_cluster.rs @@ -222,7 +222,7 @@ fn test_local_cluster_signature_subscribe() { .get_latest_blockhash_with_commitment(CommitmentConfig::processed()) .unwrap(); - let transaction = system_transaction::transfer( + let mut transaction = system_transaction::transfer( &cluster.funding_keypair, &solana_sdk::pubkey::new_rand(), 10, @@ -239,9 +239,14 @@ fn test_local_cluster_signature_subscribe() { ) .unwrap(); - tx_client - .send_transaction_to_upcoming_leaders(&transaction) - .unwrap(); + LocalCluster::send_transaction_with_retries( + &tx_client, + &[&cluster.funding_keypair], + &mut transaction, + 5, + 0, + ) + .unwrap(); let mut got_received_notification = false; loop { @@ -1788,12 +1793,9 @@ fn test_validator_saves_tower() { // Wait for the first new root let last_replayed_root = loop { - #[allow(deprecated)] - // This test depends on knowing the immediate root, without any delay from the commitment - // service, so the deprecated CommitmentConfig::root() is retained if let Ok(root) = validator_client .rpc_client() - .get_slot_with_commitment(CommitmentConfig::root()) + .get_slot_with_commitment(CommitmentConfig::finalized()) { trace!("current root: {}", root); if root > 0 { @@ -1820,12 +1822,9 @@ fn test_validator_saves_tower() { // Wait for a new root, demonstrating the validator was able to make progress from the older `tower1` let new_root = loop { - #[allow(deprecated)] - // This test depends on knowing the immediate root, without any delay from the commitment - // service, so the deprecated CommitmentConfig::root() is retained if let Ok(root) = validator_client .rpc_client() - .get_slot_with_commitment(CommitmentConfig::root()) + .get_slot_with_commitment(CommitmentConfig::finalized()) { trace!( "current root: {}, last_replayed_root: {}", @@ -1856,12 +1855,9 @@ fn test_validator_saves_tower() { // Wait for another new root let new_root = loop { - #[allow(deprecated)] - // This test depends on knowing the immediate root, without any delay from the commitment - // service, so the deprecated CommitmentConfig::root() is retained if let Ok(root) = validator_client .rpc_client() - .get_slot_with_commitment(CommitmentConfig::root()) + .get_slot_with_commitment(CommitmentConfig::finalized()) { trace!("current root: {}, last tower root: {}", root, tower3_root); if root > tower3_root { @@ -2669,6 +2665,7 @@ fn test_oc_bad_signatures() { // 3) Start up a spy to listen for and push votes to leader TPU let client = cluster.build_tpu_quic_client().unwrap(); + let cluster_funding_keypair = cluster.funding_keypair.insecure_clone(); let voter_thread_sleep_ms: usize = 100; let num_votes_simulated = Arc::new(AtomicUsize::new(0)); let gossip_voter = cluster_tests::start_gossip_voter( @@ -2703,7 +2700,7 @@ fn test_oc_bad_signatures() { let vote_slots: Vec = vec![vote_slot]; let bad_authorized_signer_keypair = Keypair::new(); - let vote_tx = vote_transaction::new_vote_transaction( + let mut vote_tx = vote_transaction::new_vote_transaction( vote_slots, vote_hash, leader_vote_tx.message.recent_blockhash, @@ -2713,9 +2710,14 @@ fn test_oc_bad_signatures() { &bad_authorized_signer_keypair, None, ); - client - .send_transaction_to_upcoming_leaders(&vote_tx) - .unwrap(); + LocalCluster::send_transaction_with_retries( + &client, + &[&cluster_funding_keypair], + &mut vote_tx, + 5, + 0, + ) + .unwrap(); num_votes_simulated.fetch_add(1, Ordering::Relaxed); } diff --git a/net/gce.sh b/net/gce.sh index 058dcdcf0e7590..2c48cb7132f4a8 100755 --- a/net/gce.sh +++ b/net/gce.sh @@ -805,6 +805,7 @@ $( install-certbot.sh \ install-earlyoom.sh \ install-iftop.sh \ + install-jq.sh \ install-libssl-compatability.sh \ install-rsync.sh \ install-perf.sh \ diff --git a/net/remote/remote-node.sh b/net/remote/remote-node.sh index 71378019730f05..882f7891702cde 100755 --- a/net/remote/remote-node.sh +++ b/net/remote/remote-node.sh @@ -263,7 +263,7 @@ EOF agave-ledger-tool -l config/bootstrap-validator shred-version --max-genesis-archive-unpacked-size 1073741824 | tee config/shred-version if [[ -n "$maybeWaitForSupermajority" ]]; then - bankHash=$(agave-ledger-tool -l config/bootstrap-validator bank-hash --halt-at-slot 0) + bankHash=$(agave-ledger-tool -l config/bootstrap-validator verify --halt-at-slot 0 --print-bank-hash --output json | jq -r ".hash") shredVersion="$(cat "$SOLANA_CONFIG_DIR"/shred-version)" extraNodeArgs="$extraNodeArgs --expected-bank-hash $bankHash --expected-shred-version $shredVersion" echo "$bankHash" > config/bank-hash diff --git a/net/scripts/install-jq.sh b/net/scripts/install-jq.sh new file mode 100755 index 00000000000000..9d7c837c8dd9f1 --- /dev/null +++ b/net/scripts/install-jq.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash +# +# jq setup +# +set -ex + +[[ $(uname) = Linux ]] || exit 1 +[[ $USER = root ]] || exit 1 + +apt-get --assume-yes install jq diff --git a/program-runtime/Cargo.toml b/program-runtime/Cargo.toml index d84114e90213f3..8c8a1010051105 100644 --- a/program-runtime/Cargo.toml +++ b/program-runtime/Cargo.toml @@ -28,6 +28,7 @@ solana-frozen-abi-macro = { workspace = true, optional = true } solana-measure = { workspace = true } # solana-metrics = { workspace = true } solana-sdk = { workspace = true } +solana-type-overrides = { workspace = true } solana-vote = { workspace = true } solana_rbpf = { workspace = true } thiserror = { workspace = true } @@ -55,3 +56,4 @@ frozen-abi = [ "solana-compute-budget/frozen-abi", "solana-sdk/frozen-abi", ] +shuttle-test = ["solana-type-overrides/shuttle-test"] diff --git a/program-runtime/src/invoke_context.rs b/program-runtime/src/invoke_context.rs index 293214a870f13a..05404aa51c2c98 100644 --- a/program-runtime/src/invoke_context.rs +++ b/program-runtime/src/invoke_context.rs @@ -36,13 +36,13 @@ use { IndexOfAccount, InstructionAccount, TransactionAccount, TransactionContext, }, }, + solana_type_overrides::sync::{atomic::Ordering, Arc}, solana_vote::vote_account::VoteAccountsHashMap, std::{ alloc::Layout, cell::RefCell, fmt::{self, Debug}, rc::Rc, - sync::{atomic::Ordering, Arc}, }, }; @@ -193,7 +193,7 @@ pub struct InvokeContext<'a> { /// Information about the currently executing transaction. pub transaction_context: &'a mut TransactionContext, /// The local program cache for the transaction batch. - pub program_cache_for_tx_batch: &'a ProgramCacheForTxBatch, + pub program_cache_for_tx_batch: &'a mut ProgramCacheForTxBatch, /// Runtime configurations used to provision the invocation environment. pub environment_config: EnvironmentConfig<'a>, /// The compute budget for the current invocation. @@ -202,7 +202,6 @@ pub struct InvokeContext<'a> { /// the designated compute budget during program execution. compute_meter: RefCell, log_collector: Option>>, - pub programs_modified_by_tx: &'a mut ProgramCacheForTxBatch, /// Latest measurement not yet accumulated in [ExecuteDetailsTimings::execute_us] pub execute_time: Option, pub timings: ExecuteDetailsTimings, @@ -214,11 +213,10 @@ impl<'a> InvokeContext<'a> { #[allow(clippy::too_many_arguments)] pub fn new( transaction_context: &'a mut TransactionContext, - program_cache_for_tx_batch: &'a ProgramCacheForTxBatch, + program_cache_for_tx_batch: &'a mut ProgramCacheForTxBatch, environment_config: EnvironmentConfig<'a>, log_collector: Option>>, compute_budget: ComputeBudget, - programs_modified_by_tx: &'a mut ProgramCacheForTxBatch, ) -> Self { Self { transaction_context, @@ -227,7 +225,6 @@ impl<'a> InvokeContext<'a> { log_collector, compute_budget, compute_meter: RefCell::new(compute_budget.compute_unit_limit), - programs_modified_by_tx, execute_time: None, timings: ExecuteDetailsTimings::default(), syscall_context: Vec::new(), @@ -235,14 +232,6 @@ impl<'a> InvokeContext<'a> { } } - pub fn find_program_in_cache(&self, pubkey: &Pubkey) -> Option> { - // First lookup the cache of the programs modified by the current transaction. If not found, lookup - // the cache of the cache of the programs that are loaded for the transaction batch. - self.programs_modified_by_tx - .find(pubkey) - .or_else(|| self.program_cache_for_tx_batch.find(pubkey)) - } - pub fn get_environments_for_slot( &self, effective_slot: Slot, @@ -692,7 +681,7 @@ macro_rules! with_mock_invoke_context { account::ReadableAccount, feature_set::FeatureSet, hash::Hash, sysvar::rent::Rent, transaction_context::TransactionContext, }, - std::sync::Arc, + solana_type_overrides::sync::Arc, $crate::{ invoke_context::{EnvironmentConfig, InvokeContext}, loaded_programs::ProgramCacheForTxBatch, @@ -704,7 +693,7 @@ macro_rules! with_mock_invoke_context { let mut $transaction_context = TransactionContext::new( $transaction_accounts, Rent::default(), - compute_budget.max_invoke_stack_height, + compute_budget.max_instruction_stack_depth, compute_budget.max_instruction_trace_length, ); let mut sysvar_cache = SysvarCache::default(); @@ -733,15 +722,13 @@ macro_rules! with_mock_invoke_context { 0, &sysvar_cache, ); - let program_cache_for_tx_batch = ProgramCacheForTxBatch::default(); - let mut programs_modified_by_tx = ProgramCacheForTxBatch::default(); + let mut program_cache_for_tx_batch = ProgramCacheForTxBatch::default(); let mut $invoke_context = InvokeContext::new( &mut $transaction_context, - &program_cache_for_tx_batch, + &mut program_cache_for_tx_batch, environment_config, Some(LogCollector::new_ref()), compute_budget, - &mut programs_modified_by_tx, ); }; } @@ -802,7 +789,7 @@ pub fn mock_process_instruction>, + loading_entries: Mutex>, }, } @@ -663,6 +666,8 @@ pub struct ProgramCacheForTxBatch { /// Pubkey is the address of a program. /// ProgramCacheEntry is the corresponding program entry valid for the slot in which a transaction is being executed. entries: HashMap>, + /// Program entries modified during the transaction batch. + modified_entries: HashMap>, slot: Slot, pub environments: ProgramRuntimeEnvironments, /// Anticipated replacement for `environments` at the next epoch. @@ -689,6 +694,7 @@ impl ProgramCacheForTxBatch { ) -> Self { Self { entries: HashMap::new(), + modified_entries: HashMap::new(), slot, environments, upcoming_environments, @@ -706,6 +712,7 @@ impl ProgramCacheForTxBatch { ) -> Self { Self { entries: HashMap::new(), + modified_entries: HashMap::new(), slot, environments: cache.get_environments_for_epoch(epoch), upcoming_environments: cache.get_upcoming_environments_for_epoch(epoch), @@ -716,14 +723,6 @@ impl ProgramCacheForTxBatch { } } - pub fn entries(&self) -> &HashMap> { - &self.entries - } - - pub fn take_entries(&mut self) -> HashMap> { - std::mem::take(&mut self.entries) - } - /// Returns the current environments depending on the given epoch pub fn get_environments_for_epoch(&self, epoch: Epoch) -> &ProgramRuntimeEnvironments { if epoch != self.latest_root_epoch { @@ -747,21 +746,39 @@ impl ProgramCacheForTxBatch { (self.entries.insert(key, entry.clone()).is_some(), entry) } + /// Store an entry in `modified_entries` for a program modified during the + /// transaction batch. + pub fn store_modified_entry(&mut self, key: Pubkey, entry: Arc) { + self.modified_entries.insert(key, entry); + } + + /// Drain the program cache's modified entries, returning the owned + /// collection. + pub fn drain_modified_entries(&mut self) -> HashMap> { + std::mem::take(&mut self.modified_entries) + } + pub fn find(&self, key: &Pubkey) -> Option> { - self.entries.get(key).map(|entry| { - if entry.is_implicit_delay_visibility_tombstone(self.slot) { - // Found a program entry on the current fork, but it's not effective - // yet. It indicates that the program has delayed visibility. Return - // the tombstone to reflect that. - Arc::new(ProgramCacheEntry::new_tombstone( - entry.deployment_slot, - entry.account_owner, - ProgramCacheEntryType::DelayVisibility, - )) - } else { - entry.clone() - } - }) + // First lookup the cache of the programs modified by the current + // transaction. If not found, lookup the cache of the cache of the + // programs that are loaded for the transaction batch. + self.modified_entries + .get(key) + .or(self.entries.get(key)) + .map(|entry| { + if entry.is_implicit_delay_visibility_tombstone(self.slot) { + // Found a program entry on the current fork, but it's not effective + // yet. It indicates that the program has delayed visibility. Return + // the tombstone to reflect that. + Arc::new(ProgramCacheEntry::new_tombstone( + entry.deployment_slot, + entry.account_owner, + ProgramCacheEntryType::DelayVisibility, + )) + } else { + entry.clone() + } + }) } pub fn slot(&self) -> Slot { @@ -1108,7 +1125,7 @@ impl ProgramCache { if let Entry::Vacant(entry) = entry { entry.insert(( loaded_programs_for_tx_batch.slot, - std::thread::current().id(), + thread::current().id(), )); cooperative_loading_task = Some((*key, *usage_count)); } @@ -1142,7 +1159,7 @@ impl ProgramCache { loading_entries, .. } => { let loading_thread = loading_entries.get_mut().unwrap().remove(&key); - debug_assert_eq!(loading_thread, Some((slot, std::thread::current().id()))); + debug_assert_eq!(loading_thread, Some((slot, thread::current().id()))); // Check that it will be visible to our own fork once inserted if loaded_program.deployment_slot > self.latest_root_slot && !matches!( diff --git a/program-runtime/src/mem_pool.rs b/program-runtime/src/mem_pool.rs new file mode 100644 index 00000000000000..398de5bfa64c57 --- /dev/null +++ b/program-runtime/src/mem_pool.rs @@ -0,0 +1,146 @@ +use { + solana_compute_budget::{ + compute_budget::{MAX_CALL_DEPTH, MAX_INSTRUCTION_STACK_DEPTH, STACK_FRAME_SIZE}, + compute_budget_processor::{MAX_HEAP_FRAME_BYTES, MIN_HEAP_FRAME_BYTES}, + }, + solana_rbpf::{aligned_memory::AlignedMemory, ebpf::HOST_ALIGN}, + std::array, +}; + +trait Reset { + fn reset(&mut self); +} + +struct Pool { + items: [Option; SIZE], + next_empty: usize, +} + +impl Pool { + fn new(items: [T; SIZE]) -> Self { + Self { + items: items.map(|i| Some(i)), + next_empty: SIZE, + } + } + + fn len(&self) -> usize { + SIZE + } + + fn get(&mut self) -> Option { + if self.next_empty == 0 { + return None; + } + self.next_empty = self.next_empty.saturating_sub(1); + self.items + .get_mut(self.next_empty) + .and_then(|item| item.take()) + } + + fn put(&mut self, mut value: T) -> bool { + self.items + .get_mut(self.next_empty) + .map(|item| { + value.reset(); + item.replace(value); + self.next_empty = self.next_empty.saturating_add(1); + true + }) + .unwrap_or(false) + } +} + +impl Reset for AlignedMemory<{ HOST_ALIGN }> { + fn reset(&mut self) { + self.as_slice_mut().fill(0) + } +} + +pub struct VmMemoryPool { + stack: Pool, MAX_INSTRUCTION_STACK_DEPTH>, + heap: Pool, MAX_INSTRUCTION_STACK_DEPTH>, +} + +impl VmMemoryPool { + pub fn new() -> Self { + Self { + stack: Pool::new(array::from_fn(|_| { + AlignedMemory::zero_filled(STACK_FRAME_SIZE * MAX_CALL_DEPTH) + })), + heap: Pool::new(array::from_fn(|_| { + AlignedMemory::zero_filled(MAX_HEAP_FRAME_BYTES as usize) + })), + } + } + + pub fn stack_len(&self) -> usize { + self.stack.len() + } + + pub fn heap_len(&self) -> usize { + self.heap.len() + } + + pub fn get_stack(&mut self, size: usize) -> AlignedMemory<{ HOST_ALIGN }> { + debug_assert!(size == STACK_FRAME_SIZE * MAX_CALL_DEPTH); + self.stack + .get() + .unwrap_or_else(|| AlignedMemory::zero_filled(size)) + } + + pub fn put_stack(&mut self, stack: AlignedMemory<{ HOST_ALIGN }>) -> bool { + self.stack.put(stack) + } + + pub fn get_heap(&mut self, heap_size: u32) -> AlignedMemory<{ HOST_ALIGN }> { + debug_assert!((MIN_HEAP_FRAME_BYTES..=MAX_HEAP_FRAME_BYTES).contains(&heap_size)); + self.heap + .get() + .unwrap_or_else(|| AlignedMemory::zero_filled(MAX_HEAP_FRAME_BYTES as usize)) + } + + pub fn put_heap(&mut self, heap: AlignedMemory<{ HOST_ALIGN }>) -> bool { + let heap_size = heap.len(); + debug_assert!( + heap_size >= MIN_HEAP_FRAME_BYTES as usize + && heap_size <= MAX_HEAP_FRAME_BYTES as usize + ); + self.heap.put(heap) + } +} + +impl Default for VmMemoryPool { + fn default() -> Self { + Self::new() + } +} + +#[cfg(test)] +mod test { + use super::*; + + #[derive(Debug, Eq, PartialEq)] + struct Item(u8, u8); + impl Reset for Item { + fn reset(&mut self) { + self.1 = 0; + } + } + + #[test] + fn test_pool() { + let mut pool = Pool::::new([Item(0, 1), Item(1, 1)]); + assert_eq!(pool.get(), Some(Item(1, 1))); + assert_eq!(pool.get(), Some(Item(0, 1))); + assert_eq!(pool.get(), None); + pool.put(Item(1, 1)); + assert_eq!(pool.get(), Some(Item(1, 0))); + pool.put(Item(2, 2)); + pool.put(Item(3, 3)); + assert!(!pool.put(Item(4, 4))); + assert_eq!(pool.get(), Some(Item(3, 0))); + assert_eq!(pool.get(), Some(Item(2, 0))); + assert_eq!(pool.get(), None); + } +} diff --git a/program-runtime/src/sysvar_cache.rs b/program-runtime/src/sysvar_cache.rs index 313faec6e11b02..1a270484410531 100644 --- a/program-runtime/src/sysvar_cache.rs +++ b/program-runtime/src/sysvar_cache.rs @@ -13,7 +13,7 @@ use { }, transaction_context::{IndexOfAccount, InstructionContext, TransactionContext}, }, - std::sync::Arc, + solana_type_overrides::sync::Arc, }; #[cfg(all(RUSTC_WITH_SPECIALIZATION, feature = "frozen-abi"))] diff --git a/program-runtime/src/timings.rs b/program-runtime/src/timings.rs index f1966ba00151e0..9ffc4702178676 100644 --- a/program-runtime/src/timings.rs +++ b/program-runtime/src/timings.rs @@ -53,6 +53,7 @@ pub enum ExecuteTimingType { TotalBatchesLen, UpdateTransactionStatuses, ProgramCacheUs, + CheckBlockLimitsUs, } pub struct Metrics([u64; ExecuteTimingType::CARDINALITY]); @@ -88,7 +89,7 @@ impl core::fmt::Debug for Metrics { eager_macro_rules! { $eager_1 #[macro_export] macro_rules! report_execute_timings { - ($self: expr) => { + ($self: expr, $is_unified_scheduler_enabled: expr) => { ( "validate_transactions_us", *$self @@ -149,19 +150,25 @@ eager_macro_rules! { $eager_1 ), ( "total_batches_len", - *$self - - .metrics - .index(ExecuteTimingType::TotalBatchesLen), - i64 + (if $is_unified_scheduler_enabled { + None + } else { + Some(*$self + .metrics + .index(ExecuteTimingType::TotalBatchesLen)) + }), + Option ), ( "num_execute_batches", - *$self - - .metrics - .index(ExecuteTimingType::NumExecuteBatches), - i64 + (if $is_unified_scheduler_enabled { + None + } else { + Some(*$self + .metrics + .index(ExecuteTimingType::NumExecuteBatches)) + }), + Option ), ( "update_transaction_statuses", @@ -171,6 +178,11 @@ eager_macro_rules! { $eager_1 .index(ExecuteTimingType::UpdateTransactionStatuses), i64 ), + ( + "check_block_limits_us", + *$self.metrics.index(ExecuteTimingType::CheckBlockLimitsUs), + i64 + ), ( "execute_details_serialize_us", $self.details.serialize_us, @@ -235,13 +247,6 @@ eager_macro_rules! { $eager_1 .feature_set_clone_us, i64 ), - ( - "execute_accessories_compute_budget_process_transaction_us", - $self - .execute_accessories - .compute_budget_process_transaction_us, - i64 - ), ( "execute_accessories_get_executors_us", $self.execute_accessories.get_executors_us, @@ -342,7 +347,6 @@ impl ExecuteProcessInstructionTimings { #[derive(Default, Debug)] pub struct ExecuteAccessoryTimings { pub feature_set_clone_us: u64, - pub compute_budget_process_transaction_us: u64, pub get_executors_us: u64, pub process_message_us: u64, pub update_executors_us: u64, @@ -352,10 +356,6 @@ pub struct ExecuteAccessoryTimings { impl ExecuteAccessoryTimings { pub fn accumulate(&mut self, other: &ExecuteAccessoryTimings) { saturating_add_assign!(self.feature_set_clone_us, other.feature_set_clone_us); - saturating_add_assign!( - self.compute_budget_process_transaction_us, - other.compute_budget_process_transaction_us - ); saturating_add_assign!(self.get_executors_us, other.get_executors_us); saturating_add_assign!(self.process_message_us, other.process_message_us); saturating_add_assign!(self.update_executors_us, other.update_executors_us); diff --git a/program-test/src/lib.rs b/program-test/src/lib.rs index 37453f986b9519..dff52faea8cb51 100644 --- a/program-test/src/lib.rs +++ b/program-test/src/lib.rs @@ -34,7 +34,7 @@ use { clock::{Epoch, Slot}, entrypoint::{deserialize, ProgramResult, SUCCESS}, feature_set::FEATURE_NAMES, - fee_calculator::{FeeCalculator, FeeRateGovernor, DEFAULT_TARGET_LAMPORTS_PER_SIGNATURE}, + fee_calculator::{FeeRateGovernor, DEFAULT_TARGET_LAMPORTS_PER_SIGNATURE}, genesis_config::{ClusterType, GenesisConfig}, hash::Hash, instruction::{Instruction, InstructionError}, @@ -545,13 +545,6 @@ impl ProgramTest { self.transaction_account_lock_limit = Some(transaction_account_lock_limit); } - /// Override the SBF compute budget - #[allow(deprecated)] - #[deprecated(since = "1.8.0", note = "please use `set_compute_max_units` instead")] - pub fn set_bpf_compute_max_units(&mut self, bpf_compute_max_units: u64) { - self.set_compute_max_units(bpf_compute_max_units); - } - /// Add an account to the test environment's genesis config. pub fn add_genesis_account(&mut self, address: Pubkey, account: Account) { self.genesis_accounts @@ -972,47 +965,12 @@ impl ProgramTest { #[async_trait] pub trait ProgramTestBanksClientExt { - /// Get a new blockhash, similar in spirit to RpcClient::get_new_blockhash() - /// - /// This probably should eventually be moved into BanksClient proper in some form - #[deprecated( - since = "1.9.0", - note = "Please use `get_new_latest_blockhash `instead" - )] - async fn get_new_blockhash(&mut self, blockhash: &Hash) -> io::Result<(Hash, FeeCalculator)>; /// Get a new latest blockhash, similar in spirit to RpcClient::get_latest_blockhash() async fn get_new_latest_blockhash(&mut self, blockhash: &Hash) -> io::Result; } #[async_trait] impl ProgramTestBanksClientExt for BanksClient { - async fn get_new_blockhash(&mut self, blockhash: &Hash) -> io::Result<(Hash, FeeCalculator)> { - let mut num_retries = 0; - let start = Instant::now(); - while start.elapsed().as_secs() < 5 { - #[allow(deprecated)] - if let Ok((fee_calculator, new_blockhash, _slot)) = self.get_fees().await { - if new_blockhash != *blockhash { - return Ok((new_blockhash, fee_calculator)); - } - } - debug!("Got same blockhash ({:?}), will retry...", blockhash); - - tokio::time::sleep(Duration::from_millis(200)).await; - num_retries += 1; - } - - Err(io::Error::new( - io::ErrorKind::Other, - format!( - "Unable to get new blockhash after {}ms (retried {} times), stuck at {}", - start.elapsed().as_millis(), - num_retries, - blockhash - ), - )) - } - async fn get_new_latest_blockhash(&mut self, blockhash: &Hash) -> io::Result { let mut num_retries = 0; let start = Instant::now(); diff --git a/programs/address-lookup-table/src/lib.rs b/programs/address-lookup-table/src/lib.rs index 737c35e4c4b2f4..e146dd184b5385 100644 --- a/programs/address-lookup-table/src/lib.rs +++ b/programs/address-lookup-table/src/lib.rs @@ -3,13 +3,3 @@ #[cfg(not(target_os = "solana"))] pub mod processor; - -#[deprecated( - since = "1.17.0", - note = "Please use `solana_program::address_lookup_table` instead" -)] -pub use solana_program::address_lookup_table::{ - error, instruction, - program::{check_id, id, ID}, - state, -}; diff --git a/programs/bpf_loader/Cargo.toml b/programs/bpf_loader/Cargo.toml index aac0f10e7cd57e..148c0c92333995 100644 --- a/programs/bpf_loader/Cargo.toml +++ b/programs/bpf_loader/Cargo.toml @@ -16,11 +16,12 @@ libsecp256k1 = { workspace = true } log = { workspace = true } scopeguard = { workspace = true } solana-compute-budget = { workspace = true } +solana-curve25519 = { workspace = true } solana-measure = { workspace = true } solana-poseidon = { workspace = true } solana-program-runtime = { workspace = true } solana-sdk = { workspace = true } -solana-zk-token-sdk = { workspace = true } +solana-type-overrides = { workspace = true } solana_rbpf = { workspace = true } thiserror = { workspace = true } @@ -38,3 +39,6 @@ name = "solana_bpf_loader_program" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] + +[features] +shuttle-test = ["solana-type-overrides/shuttle-test", "solana-program-runtime/shuttle-test"] diff --git a/programs/bpf_loader/src/lib.rs b/programs/bpf_loader/src/lib.rs index 8f14544ffcf0b3..2c2b16245c88ce 100644 --- a/programs/bpf_loader/src/lib.rs +++ b/programs/bpf_loader/src/lib.rs @@ -5,6 +5,7 @@ pub mod serialization; pub mod syscalls; use { + solana_compute_budget::compute_budget::MAX_INSTRUCTION_STACK_DEPTH, solana_measure::measure::Measure, solana_program_runtime::{ ic_logger_msg, ic_msg, @@ -14,13 +15,13 @@ use { DELAY_VISIBILITY_SLOT_OFFSET, }, log_collector::LogCollector, + mem_pool::VmMemoryPool, stable_log, sysvar_cache::get_sysvar_with_account_check, }, solana_rbpf::{ - aligned_memory::AlignedMemory, declare_builtin_function, - ebpf::{self, HOST_ALIGN, MM_HEAP_START}, + ebpf::{self, MM_HEAP_START}, elf::Executable, error::{EbpfError, ProgramResult}, memory_region::{AccessType, MemoryCowCallback, MemoryMapping, MemoryRegion}, @@ -46,12 +47,8 @@ use { system_instruction::{self, MAX_PERMITTED_DATA_LENGTH}, transaction_context::{IndexOfAccount, InstructionContext, TransactionContext}, }, - std::{ - cell::RefCell, - mem, - rc::Rc, - sync::{atomic::Ordering, Arc}, - }, + solana_type_overrides::sync::{atomic::Ordering, Arc}, + std::{cell::RefCell, mem, rc::Rc}, syscalls::{create_program_runtime_environment_v1, morph_into_deployment_environment_v1}, }; @@ -59,6 +56,10 @@ pub const DEFAULT_LOADER_COMPUTE_UNITS: u64 = 570; pub const DEPRECATED_LOADER_COMPUTE_UNITS: u64 = 1_140; pub const UPGRADEABLE_LOADER_COMPUTE_UNITS: u64 = 2_370; +thread_local! { + pub static MEMORY_POOL: RefCell = RefCell::new(VmMemoryPool::new()); +} + #[allow(clippy::too_many_arguments)] pub fn load_program_from_bytes( log_collector: Option>>, @@ -152,7 +153,7 @@ macro_rules! deploy_program { environments.program_runtime_v1.clone(), true, )?; - if let Some(old_entry) = $invoke_context.find_program_in_cache(&$program_id) { + if let Some(old_entry) = $invoke_context.program_cache_for_tx_batch.find(&$program_id) { executor.tx_usage_counter.store( old_entry.tx_usage_counter.load(Ordering::Relaxed), Ordering::Relaxed @@ -165,7 +166,7 @@ macro_rules! deploy_program { $drop load_program_metrics.program_id = $program_id.to_string(); load_program_metrics.submit_datapoint(&mut $invoke_context.timings); - $invoke_context.programs_modified_by_tx.replenish($program_id, Arc::new(executor)); + $invoke_context.program_cache_for_tx_batch.store_modified_entry($program_id, Arc::new(executor)); }}; } @@ -244,8 +245,8 @@ pub fn create_vm<'a, 'b>( regions: Vec, accounts_metadata: Vec, invoke_context: &'a mut InvokeContext<'b>, - stack: &mut AlignedMemory, - heap: &mut AlignedMemory, + stack: &mut [u8], + heap: &mut [u8], ) -> Result>, Box> { let stack_size = stack.len(); let heap_size = heap.len(); @@ -299,24 +300,23 @@ macro_rules! create_vm { heap_size, invoke_context.get_compute_budget().heap_cost, )); - let mut allocations = None; let $vm = heap_cost_result.and_then(|_| { - let mut stack = solana_rbpf::aligned_memory::AlignedMemory::< - { solana_rbpf::ebpf::HOST_ALIGN }, - >::zero_filled(stack_size); - let mut heap = solana_rbpf::aligned_memory::AlignedMemory::< - { solana_rbpf::ebpf::HOST_ALIGN }, - >::zero_filled(usize::try_from(heap_size).unwrap()); + let (mut stack, mut heap) = $crate::MEMORY_POOL + .with_borrow_mut(|pool| (pool.get_stack(stack_size), pool.get_heap(heap_size))); let vm = $crate::create_vm( $program, $regions, $accounts_metadata, $invoke_context, - &mut stack, - &mut heap, + stack + .as_slice_mut() + .get_mut(..stack_size) + .expect("invalid stack size"), + heap.as_slice_mut() + .get_mut(..heap_size as usize) + .expect("invalid heap size"), ); - allocations = Some((stack, heap)); - vm + vm.map(|vm| (vm, stack, heap)) }); }; } @@ -324,7 +324,7 @@ macro_rules! create_vm { #[macro_export] macro_rules! mock_create_vm { ($vm:ident, $additional_regions:expr, $accounts_metadata:expr, $invoke_context:expr $(,)?) => { - let loader = std::sync::Arc::new(BuiltinProgram::new_mock()); + let loader = solana_type_overrides::sync::Arc::new(BuiltinProgram::new_mock()); let function_registry = solana_rbpf::program::FunctionRegistry::default(); let executable = solana_rbpf::elf::Executable::::from_text_bytes( &[0x95, 0, 0, 0, 0, 0, 0, 0], @@ -343,13 +343,14 @@ macro_rules! mock_create_vm { $accounts_metadata, $invoke_context, ); + let $vm = $vm.map(|(vm, _, _)| vm); }; } fn create_memory_mapping<'a, 'b, C: ContextObject>( executable: &'a Executable, - stack: &'b mut AlignedMemory<{ HOST_ALIGN }>, - heap: &'b mut AlignedMemory<{ HOST_ALIGN }>, + stack: &'b mut [u8], + heap: &'b mut [u8], additional_regions: Vec, cow_cb: Option, ) -> Result, Box> { @@ -358,7 +359,7 @@ fn create_memory_mapping<'a, 'b, C: ContextObject>( let regions: Vec = vec![ executable.get_ro_region(), MemoryRegion::new_writable_gapped( - stack.as_slice_mut(), + stack, ebpf::MM_STACK_START, if !sbpf_version.dynamic_stack_frames() && config.enable_stack_frame_gaps { config.stack_frame_size as u64 @@ -366,7 +367,7 @@ fn create_memory_mapping<'a, 'b, C: ContextObject>( 0 }, ), - MemoryRegion::new_writable(heap.as_slice_mut(), MM_HEAP_START), + MemoryRegion::new_writable(heap, MM_HEAP_START), ] .into_iter() .chain(additional_regions) @@ -437,7 +438,8 @@ pub fn process_instruction_inner( let mut get_or_create_executor_time = Measure::start("get_or_create_executor_time"); let executor = invoke_context - .find_program_in_cache(program_account.get_key()) + .program_cache_for_tx_batch + .find(program_account.get_key()) .ok_or_else(|| { ic_logger_msg!(log_collector, "Program is not cached"); InstructionError::InvalidAccountData @@ -1109,14 +1111,16 @@ fn process_loader_upgradeable_instruction( &log_collector, )?; let clock = invoke_context.get_sysvar_cache().get_clock()?; - invoke_context.programs_modified_by_tx.replenish( - program_key, - Arc::new(ProgramCacheEntry::new_tombstone( - clock.slot, - ProgramCacheEntryOwner::LoaderV3, - ProgramCacheEntryType::Closed, - )), - ); + invoke_context + .program_cache_for_tx_batch + .store_modified_entry( + program_key, + Arc::new(ProgramCacheEntry::new_tombstone( + clock.slot, + ProgramCacheEntryOwner::LoaderV3, + ProgramCacheEntryType::Closed, + )), + ); } _ => { ic_logger_msg!(log_collector, "Invalid Program account"); @@ -1388,7 +1392,7 @@ fn execute<'a, 'b: 'a>( let execution_result = { let compute_meter_prev = invoke_context.get_remaining(); create_vm!(vm, executable, regions, accounts_metadata, invoke_context); - let mut vm = match vm { + let (mut vm, stack, heap) = match vm { Ok(info) => info, Err(e) => { ic_logger_msg!(log_collector, "Failed to create SBF VM: {}", e); @@ -1399,6 +1403,12 @@ fn execute<'a, 'b: 'a>( vm.context_object_pointer.execute_time = Some(Measure::start("execute")); let (compute_units_consumed, result) = vm.execute_program(executable, !use_jit); + MEMORY_POOL.with_borrow_mut(|memory_pool| { + memory_pool.put_stack(stack); + memory_pool.put_heap(heap); + debug_assert!(memory_pool.stack_len() <= MAX_INSTRUCTION_STACK_DEPTH); + debug_assert!(memory_pool.heap_len() <= MAX_INSTRUCTION_STACK_DEPTH); + }); drop(vm); if let Some(execute_time) = invoke_context.execute_time.as_mut() { execute_time.stop(); @@ -1543,11 +1553,11 @@ pub mod test_utils { false, ) { invoke_context - .programs_modified_by_tx + .program_cache_for_tx_batch .set_slot_for_tests(DELAY_VISIBILITY_SLOT_OFFSET); invoke_context - .programs_modified_by_tx - .replenish(*pubkey, Arc::new(loaded_program)); + .program_cache_for_tx_batch + .store_modified_entry(*pubkey, Arc::new(loaded_program)); } } } @@ -3763,7 +3773,7 @@ mod tests { latest_access_slot: AtomicU64::new(0), }; invoke_context - .programs_modified_by_tx + .program_cache_for_tx_batch .replenish(program_id, Arc::new(program)); assert_matches!( @@ -3772,7 +3782,7 @@ mod tests { ); let updated_program = invoke_context - .programs_modified_by_tx + .program_cache_for_tx_batch .find(&program_id) .expect("Didn't find upgraded program in the cache"); @@ -3807,7 +3817,7 @@ mod tests { latest_access_slot: AtomicU64::new(0), }; invoke_context - .programs_modified_by_tx + .program_cache_for_tx_batch .replenish(program_id, Arc::new(program)); let program_id2 = Pubkey::new_unique(); @@ -3817,7 +3827,7 @@ mod tests { ); let program2 = invoke_context - .programs_modified_by_tx + .program_cache_for_tx_batch .find(&program_id2) .expect("Didn't find upgraded program in the cache"); diff --git a/programs/bpf_loader/src/syscalls/cpi.rs b/programs/bpf_loader/src/syscalls/cpi.rs index 71d5736f895e70..94046f5f741560 100644 --- a/programs/bpf_loader/src/syscalls/cpi.rs +++ b/programs/bpf_loader/src/syscalls/cpi.rs @@ -925,7 +925,7 @@ where // account (caller_account). We need to update the corresponding // BorrowedAccount (callee_account) so the callee can see the // changes. - update_callee_account( + let update_caller = update_callee_account( invoke_context, memory_mapping, is_loader_deprecated, @@ -934,7 +934,7 @@ where direct_mapping, )?; - let caller_account = if instruction_account.is_writable { + let caller_account = if instruction_account.is_writable || update_caller { Some(caller_account) } else { None @@ -1173,6 +1173,9 @@ fn cpi_common( // // This method updates callee_account so the CPI callee can see the caller's // changes. +// +// When true is returned, the caller account must be updated after CPI. This +// is only set for direct mapping when the pointer may have changed. fn update_callee_account( invoke_context: &InvokeContext, memory_mapping: &MemoryMapping, @@ -1180,7 +1183,9 @@ fn update_callee_account( caller_account: &CallerAccount, mut callee_account: BorrowedAccount<'_>, direct_mapping: bool, -) -> Result<(), Error> { +) -> Result { + let mut must_update_caller = false; + if callee_account.get_lamports() != *caller_account.lamports { callee_account.set_lamports(*caller_account.lamports)?; } @@ -1198,7 +1203,11 @@ fn update_callee_account( if is_loader_deprecated && realloc_bytes_used > 0 { return Err(InstructionError::InvalidRealloc.into()); } - callee_account.set_data_length(post_len)?; + if prev_len != post_len { + callee_account.set_data_length(post_len)?; + // pointer to data may have changed, so caller must be updated + must_update_caller = true; + } if realloc_bytes_used > 0 { let serialized_data = translate_slice::( memory_mapping, @@ -1239,7 +1248,7 @@ fn update_callee_account( callee_account.set_owner(caller_account.owner.as_ref())?; } - Ok(()) + Ok(must_update_caller) } fn update_caller_account_perms( diff --git a/programs/bpf_loader/src/syscalls/logging.rs b/programs/bpf_loader/src/syscalls/logging.rs index 1555053f87d7ae..522145b1d71408 100644 --- a/programs/bpf_loader/src/syscalls/logging.rs +++ b/programs/bpf_loader/src/syscalls/logging.rs @@ -80,7 +80,7 @@ declare_builtin_function!( ); declare_builtin_function!( - /// Log 5 64-bit values + /// Log a [`Pubkey`] as a base58 string SyscallLogPubkey, fn rust( invoke_context: &mut InvokeContext, diff --git a/programs/bpf_loader/src/syscalls/mod.rs b/programs/bpf_loader/src/syscalls/mod.rs index ddf655f48d4225..e982728e398e50 100644 --- a/programs/bpf_loader/src/syscalls/mod.rs +++ b/programs/bpf_loader/src/syscalls/mod.rs @@ -58,12 +58,12 @@ use { sysvar::{Sysvar, SysvarId}, transaction_context::{IndexOfAccount, InstructionAccount}, }, + solana_type_overrides::sync::Arc, std::{ alloc::Layout, mem::{align_of, size_of}, slice::from_raw_parts_mut, str::{from_utf8, Utf8Error}, - sync::Arc, }, thiserror::Error as ThisError, }; @@ -902,7 +902,7 @@ declare_builtin_function!( _arg5: u64, memory_mapping: &mut MemoryMapping, ) -> Result { - use solana_zk_token_sdk::curve25519::{curve_syscall_traits::*, edwards, ristretto}; + use solana_curve25519::{curve_syscall_traits::*, edwards, ristretto}; match curve_id { CURVE25519_EDWARDS => { let cost = invoke_context @@ -968,9 +968,7 @@ declare_builtin_function!( result_point_addr: u64, memory_mapping: &mut MemoryMapping, ) -> Result { - use solana_zk_token_sdk::curve25519::{ - curve_syscall_traits::*, edwards, ristretto, scalar, - }; + use solana_curve25519::{curve_syscall_traits::*, edwards, ristretto, scalar}; match curve_id { CURVE25519_EDWARDS => match group_op { ADD => { @@ -1196,9 +1194,7 @@ declare_builtin_function!( result_point_addr: u64, memory_mapping: &mut MemoryMapping, ) -> Result { - use solana_zk_token_sdk::curve25519::{ - curve_syscall_traits::*, edwards, ristretto, scalar, - }; + use solana_curve25519::{curve_syscall_traits::*, edwards, ristretto, scalar}; if points_len > 512 { return Err(Box::new(SyscallError::InvalidLength)); @@ -2766,7 +2762,7 @@ mod tests { #[test] fn test_syscall_edwards_curve_point_validation() { - use solana_zk_token_sdk::curve25519::curve_syscall_traits::CURVE25519_EDWARDS; + use solana_curve25519::curve_syscall_traits::CURVE25519_EDWARDS; let config = Config::default(); prepare_mockup!(invoke_context, program_id, bpf_loader::id()); @@ -2839,7 +2835,7 @@ mod tests { #[test] fn test_syscall_ristretto_curve_point_validation() { - use solana_zk_token_sdk::curve25519::curve_syscall_traits::CURVE25519_RISTRETTO; + use solana_curve25519::curve_syscall_traits::CURVE25519_RISTRETTO; let config = Config::default(); prepare_mockup!(invoke_context, program_id, bpf_loader::id()); @@ -2912,9 +2908,7 @@ mod tests { #[test] fn test_syscall_edwards_curve_group_ops() { - use solana_zk_token_sdk::curve25519::curve_syscall_traits::{ - ADD, CURVE25519_EDWARDS, MUL, SUB, - }; + use solana_curve25519::curve_syscall_traits::{ADD, CURVE25519_EDWARDS, MUL, SUB}; let config = Config::default(); prepare_mockup!(invoke_context, program_id, bpf_loader::id()); @@ -3069,9 +3063,7 @@ mod tests { #[test] fn test_syscall_ristretto_curve_group_ops() { - use solana_zk_token_sdk::curve25519::curve_syscall_traits::{ - ADD, CURVE25519_RISTRETTO, MUL, SUB, - }; + use solana_curve25519::curve_syscall_traits::{ADD, CURVE25519_RISTRETTO, MUL, SUB}; let config = Config::default(); prepare_mockup!(invoke_context, program_id, bpf_loader::id()); @@ -3228,9 +3220,7 @@ mod tests { #[test] fn test_syscall_multiscalar_multiplication() { - use solana_zk_token_sdk::curve25519::curve_syscall_traits::{ - CURVE25519_EDWARDS, CURVE25519_RISTRETTO, - }; + use solana_curve25519::curve_syscall_traits::{CURVE25519_EDWARDS, CURVE25519_RISTRETTO}; let config = Config::default(); prepare_mockup!(invoke_context, program_id, bpf_loader::id()); @@ -3336,9 +3326,7 @@ mod tests { #[test] fn test_syscall_multiscalar_multiplication_maximum_length_exceeded() { - use solana_zk_token_sdk::curve25519::curve_syscall_traits::{ - CURVE25519_EDWARDS, CURVE25519_RISTRETTO, - }; + use solana_curve25519::curve_syscall_traits::{CURVE25519_EDWARDS, CURVE25519_RISTRETTO}; let config = Config::default(); prepare_mockup!(invoke_context, program_id, bpf_loader::id()); diff --git a/programs/loader-v4/Cargo.toml b/programs/loader-v4/Cargo.toml index da7d5c10c72a51..79c078f29785b5 100644 --- a/programs/loader-v4/Cargo.toml +++ b/programs/loader-v4/Cargo.toml @@ -14,6 +14,7 @@ solana-compute-budget = { workspace = true } solana-measure = { workspace = true } solana-program-runtime = { workspace = true } solana-sdk = { workspace = true } +solana-type-overrides = { workspace = true } solana_rbpf = { workspace = true } [dev-dependencies] @@ -25,3 +26,6 @@ name = "solana_loader_v4_program" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] + +[features] +shuttle-test = ["solana-type-overrides/shuttle-test", "solana-program-runtime/shuttle-test"] diff --git a/programs/loader-v4/src/lib.rs b/programs/loader-v4/src/lib.rs index b564e84c43a9d4..161f72a478cc07 100644 --- a/programs/loader-v4/src/lib.rs +++ b/programs/loader-v4/src/lib.rs @@ -30,11 +30,8 @@ use { saturating_add_assign, transaction_context::{BorrowedAccount, InstructionContext}, }, - std::{ - cell::RefCell, - rc::Rc, - sync::{atomic::Ordering, Arc}, - }, + solana_type_overrides::sync::{atomic::Ordering, Arc}, + std::{cell::RefCell, rc::Rc}, }; pub const DEFAULT_COMPUTE_UNITS: u64 = 2_000; @@ -449,7 +446,10 @@ pub fn process_instruction_deploy( state.slot = current_slot; state.status = LoaderV4Status::Deployed; - if let Some(old_entry) = invoke_context.find_program_in_cache(program.get_key()) { + if let Some(old_entry) = invoke_context + .program_cache_for_tx_batch + .find(program.get_key()) + { executor.tx_usage_counter.store( old_entry.tx_usage_counter.load(Ordering::Relaxed), Ordering::Relaxed, @@ -460,8 +460,8 @@ pub fn process_instruction_deploy( ); } invoke_context - .programs_modified_by_tx - .replenish(*program.get_key(), Arc::new(executor)); + .program_cache_for_tx_batch + .store_modified_entry(*program.get_key(), Arc::new(executor)); Ok(()) } @@ -592,7 +592,8 @@ pub fn process_instruction_inner( } let mut get_or_create_executor_time = Measure::start("get_or_create_executor_time"); let loaded_program = invoke_context - .find_program_in_cache(program.get_key()) + .program_cache_for_tx_batch + .find(program.get_key()) .ok_or_else(|| { ic_logger_msg!(log_collector, "Program is not cached"); InstructionError::InvalidAccountData @@ -661,7 +662,7 @@ mod tests { if let Ok(loaded_program) = ProgramCacheEntry::new( &loader_v4::id(), invoke_context - .programs_modified_by_tx + .program_cache_for_tx_batch .environments .program_runtime_v2 .clone(), @@ -671,10 +672,12 @@ mod tests { account.data().len(), &mut load_program_metrics, ) { - invoke_context.programs_modified_by_tx.set_slot_for_tests(0); invoke_context - .programs_modified_by_tx - .replenish(*pubkey, Arc::new(loaded_program)); + .program_cache_for_tx_batch + .set_slot_for_tests(0); + invoke_context + .program_cache_for_tx_batch + .store_modified_entry(*pubkey, Arc::new(loaded_program)); } } } @@ -708,7 +711,7 @@ mod tests { Entrypoint::vm, |invoke_context| { invoke_context - .programs_modified_by_tx + .program_cache_for_tx_batch .environments .program_runtime_v2 = Arc::new(create_program_runtime_environment_v2( &ComputeBudget::default(), diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 27023f608791db..fe8f9472463d97 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -65,7 +65,7 @@ dependencies = [ [[package]] name = "agave-geyser-plugin-interface" -version = "2.0.0" +version = "2.0.2" dependencies = [ "log", "solana-sdk", @@ -75,7 +75,7 @@ dependencies = [ [[package]] name = "agave-validator" -version = "2.0.0" +version = "2.0.2" dependencies = [ "agave-geyser-plugin-interface", "chrono", @@ -417,7 +417,7 @@ dependencies = [ "proc-macro2", "quote", "syn 1.0.109", - "synstructure 0.12.6", + "synstructure", ] [[package]] @@ -847,22 +847,22 @@ dependencies = [ [[package]] name = "bytemuck" -version = "1.16.0" +version = "1.16.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78834c15cb5d5efe3452d58b1e8ba890dd62d21907f867f383358198e56ebca5" +checksum = "b236fc92302c97ed75b38da1f4917b5cdda4984745740f153a5d3059e48d725e" dependencies = [ "bytemuck_derive", ] [[package]] name = "bytemuck_derive" -version = "1.4.0" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1aca418a974d83d40a0c1f0c5cba6ff4bc28d8df099109ca459a2118d40b6322" +checksum = "1ee891b04274a59bd38b412188e24b849617b2e45a0fd8d057deb63e7403761b" dependencies = [ "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.58", ] [[package]] @@ -2210,124 +2210,6 @@ dependencies = [ "winapi 0.3.9", ] -[[package]] -name = "icu_collections" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db2fa452206ebee18c4b5c2274dbf1de17008e874b4dc4f0aea9d01ca79e4526" -dependencies = [ - "displaydoc", - "yoke", - "zerofrom", - "zerovec", -] - -[[package]] -name = "icu_locid" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13acbb8371917fc971be86fc8057c41a64b521c184808a698c02acc242dbf637" -dependencies = [ - "displaydoc", - "litemap", - "tinystr", - "writeable", - "zerovec", -] - -[[package]] -name = "icu_locid_transform" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01d11ac35de8e40fdeda00d9e1e9d92525f3f9d887cdd7aa81d727596788b54e" -dependencies = [ - "displaydoc", - "icu_locid", - "icu_locid_transform_data", - "icu_provider", - "tinystr", - "zerovec", -] - -[[package]] -name = "icu_locid_transform_data" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fdc8ff3388f852bede6b579ad4e978ab004f139284d7b28715f773507b946f6e" - -[[package]] -name = "icu_normalizer" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19ce3e0da2ec68599d193c93d088142efd7f9c5d6fc9b803774855747dc6a84f" -dependencies = [ - "displaydoc", - "icu_collections", - "icu_normalizer_data", - "icu_properties", - "icu_provider", - "smallvec", - "utf16_iter", - "utf8_iter", - "write16", - "zerovec", -] - -[[package]] -name = "icu_normalizer_data" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8cafbf7aa791e9b22bec55a167906f9e1215fd475cd22adfcf660e03e989516" - -[[package]] -name = "icu_properties" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f8ac670d7422d7f76b32e17a5db556510825b29ec9154f235977c9caba61036" -dependencies = [ - "displaydoc", - "icu_collections", - "icu_locid_transform", - "icu_properties_data", - "icu_provider", - "tinystr", - "zerovec", -] - -[[package]] -name = "icu_properties_data" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67a8effbc3dd3e4ba1afa8ad918d5684b8868b3b26500753effea8d2eed19569" - -[[package]] -name = "icu_provider" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ed421c8a8ef78d3e2dbc98a973be2f3770cb42b606e3ab18d6237c4dfde68d9" -dependencies = [ - "displaydoc", - "icu_locid", - "icu_provider_macros", - "stable_deref_trait", - "tinystr", - "writeable", - "yoke", - "zerofrom", - "zerovec", -] - -[[package]] -name = "icu_provider_macros" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.58", -] - [[package]] name = "ident_case" version = "1.0.1" @@ -2347,14 +2229,12 @@ dependencies = [ [[package]] name = "idna" -version = "1.0.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4716a3a0933a1d01c2f72450e89596eb51dd34ef3c211ccd875acdf1f8fe47ed" +checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6" dependencies = [ - "icu_normalizer", - "icu_properties", - "smallvec", - "utf8_iter", + "unicode-bidi", + "unicode-normalization", ] [[package]] @@ -2844,12 +2724,6 @@ version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c4cd1a83af159aa67994778be9070f0ae1bd732942279cabb14f86f986a21456" -[[package]] -name = "litemap" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "643cb0b8d4fcc284004d5fd0d67ccf61dfffadb7f75e1e71bc420f4688a3a704" - [[package]] name = "lock_api" version = "0.4.10" @@ -4074,7 +3948,7 @@ dependencies = [ "tokio-rustls", "tokio-util 0.7.1", "tower-service", - "url 2.5.1", + "url 2.5.2", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", @@ -4646,7 +4520,7 @@ dependencies = [ [[package]] name = "solana-account-decoder" -version = "2.0.0" +version = "2.0.2" dependencies = [ "Inflector", "base64 0.22.1", @@ -4669,12 +4543,13 @@ dependencies = [ [[package]] name = "solana-accounts-db" -version = "2.0.0" +version = "2.0.2" dependencies = [ "bincode", "blake3", "bv", "bytemuck", + "bytemuck_derive", "bzip2", "crossbeam-channel", "dashmap", @@ -4711,7 +4586,7 @@ dependencies = [ [[package]] name = "solana-address-lookup-table-program" -version = "2.0.0" +version = "2.0.2" dependencies = [ "bincode", "bytemuck", @@ -4727,7 +4602,7 @@ dependencies = [ [[package]] name = "solana-banks-client" -version = "2.0.0" +version = "2.0.2" dependencies = [ "borsh 1.5.1", "futures 0.3.30", @@ -4742,7 +4617,7 @@ dependencies = [ [[package]] name = "solana-banks-interface" -version = "2.0.0" +version = "2.0.2" dependencies = [ "serde", "serde_derive", @@ -4752,7 +4627,7 @@ dependencies = [ [[package]] name = "solana-banks-server" -version = "2.0.0" +version = "2.0.2" dependencies = [ "bincode", "crossbeam-channel", @@ -4770,7 +4645,7 @@ dependencies = [ [[package]] name = "solana-bloom" -version = "2.0.0" +version = "2.0.2" dependencies = [ "bv", "fnv", @@ -4785,7 +4660,7 @@ dependencies = [ [[package]] name = "solana-bpf-loader-program" -version = "2.0.0" +version = "2.0.2" dependencies = [ "bincode", "byteorder 1.5.0", @@ -4793,21 +4668,23 @@ dependencies = [ "log", "scopeguard", "solana-compute-budget", + "solana-curve25519", "solana-measure", "solana-poseidon", "solana-program-runtime", "solana-sdk", - "solana-zk-token-sdk", + "solana-type-overrides", "solana_rbpf", "thiserror", ] [[package]] name = "solana-bucket-map" -version = "2.0.0" +version = "2.0.2" dependencies = [ "bv", "bytemuck", + "bytemuck_derive", "log", "memmap2", "modular-bitfield", @@ -4820,7 +4697,7 @@ dependencies = [ [[package]] name = "solana-clap-utils" -version = "2.0.0" +version = "2.0.2" dependencies = [ "chrono", "clap 2.33.3", @@ -4830,12 +4707,12 @@ dependencies = [ "thiserror", "tiny-bip39", "uriparse", - "url 2.5.1", + "url 2.5.2", ] [[package]] name = "solana-cli-config" -version = "2.0.0" +version = "2.0.2" dependencies = [ "dirs-next", "lazy_static", @@ -4844,12 +4721,12 @@ dependencies = [ "serde_yaml", "solana-clap-utils", "solana-sdk", - "url 2.5.1", + "url 2.5.2", ] [[package]] name = "solana-cli-output" -version = "2.0.0" +version = "2.0.2" dependencies = [ "Inflector", "base64 0.22.1", @@ -4874,7 +4751,7 @@ dependencies = [ [[package]] name = "solana-client" -version = "2.0.0" +version = "2.0.2" dependencies = [ "async-trait", "bincode", @@ -4905,7 +4782,7 @@ dependencies = [ [[package]] name = "solana-compute-budget" -version = "2.0.0" +version = "2.0.2" dependencies = [ "rustc_version", "solana-sdk", @@ -4913,7 +4790,7 @@ dependencies = [ [[package]] name = "solana-compute-budget-program" -version = "2.0.0" +version = "2.0.2" dependencies = [ "solana-program-runtime", "solana-sdk", @@ -4921,7 +4798,7 @@ dependencies = [ [[package]] name = "solana-config-program" -version = "2.0.0" +version = "2.0.2" dependencies = [ "bincode", "chrono", @@ -4933,7 +4810,7 @@ dependencies = [ [[package]] name = "solana-connection-cache" -version = "2.0.0" +version = "2.0.2" dependencies = [ "async-trait", "bincode", @@ -4952,7 +4829,7 @@ dependencies = [ [[package]] name = "solana-core" -version = "2.0.0" +version = "2.0.2" dependencies = [ "ahash 0.8.10", "base64 0.22.1", @@ -4987,6 +4864,7 @@ dependencies = [ "solana-bloom", "solana-client", "solana-compute-budget", + "solana-connection-cache", "solana-cost-model", "solana-entry", "solana-geyser-plugin-manager", @@ -5027,7 +4905,7 @@ dependencies = [ [[package]] name = "solana-cost-model" -version = "2.0.0" +version = "2.0.2" dependencies = [ "ahash 0.8.10", "lazy_static", @@ -5046,9 +4924,20 @@ dependencies = [ "solana-vote-program", ] +[[package]] +name = "solana-curve25519" +version = "2.0.2" +dependencies = [ + "bytemuck", + "bytemuck_derive", + "curve25519-dalek", + "solana-program", + "thiserror", +] + [[package]] name = "solana-download-utils" -version = "2.0.0" +version = "2.0.2" dependencies = [ "console", "indicatif", @@ -5060,7 +4949,7 @@ dependencies = [ [[package]] name = "solana-entry" -version = "2.0.0" +version = "2.0.2" dependencies = [ "bincode", "crossbeam-channel", @@ -5080,7 +4969,7 @@ dependencies = [ [[package]] name = "solana-faucet" -version = "2.0.0" +version = "2.0.2" dependencies = [ "bincode", "byteorder 1.5.0", @@ -5102,7 +4991,7 @@ dependencies = [ [[package]] name = "solana-genesis-utils" -version = "2.0.0" +version = "2.0.2" dependencies = [ "log", "solana-accounts-db", @@ -5113,7 +5002,7 @@ dependencies = [ [[package]] name = "solana-geyser-plugin-manager" -version = "2.0.0" +version = "2.0.2" dependencies = [ "agave-geyser-plugin-interface", "bs58", @@ -5138,7 +5027,7 @@ dependencies = [ [[package]] name = "solana-gossip" -version = "2.0.0" +version = "2.0.2" dependencies = [ "assert_matches", "bincode", @@ -5184,7 +5073,7 @@ dependencies = [ [[package]] name = "solana-inline-spl" -version = "2.0.0" +version = "2.0.2" dependencies = [ "bytemuck", "rustc_version", @@ -5193,7 +5082,7 @@ dependencies = [ [[package]] name = "solana-ledger" -version = "2.0.0" +version = "2.0.2" dependencies = [ "assert_matches", "bincode", @@ -5259,19 +5148,20 @@ dependencies = [ [[package]] name = "solana-loader-v4-program" -version = "2.0.0" +version = "2.0.2" dependencies = [ "log", "solana-compute-budget", "solana-measure", "solana-program-runtime", "solana-sdk", + "solana-type-overrides", "solana_rbpf", ] [[package]] name = "solana-logger" -version = "2.0.0" +version = "2.0.2" dependencies = [ "env_logger", "lazy_static", @@ -5280,7 +5170,7 @@ dependencies = [ [[package]] name = "solana-measure" -version = "2.0.0" +version = "2.0.2" dependencies = [ "log", "solana-sdk", @@ -5288,7 +5178,7 @@ dependencies = [ [[package]] name = "solana-merkle-tree" -version = "2.0.0" +version = "2.0.2" dependencies = [ "fast-math", "solana-program", @@ -5296,7 +5186,7 @@ dependencies = [ [[package]] name = "solana-metrics" -version = "2.0.0" +version = "2.0.2" dependencies = [ "crossbeam-channel", "gethostname", @@ -5309,7 +5199,7 @@ dependencies = [ [[package]] name = "solana-net-utils" -version = "2.0.0" +version = "2.0.2" dependencies = [ "bincode", "clap 3.2.25", @@ -5325,7 +5215,7 @@ dependencies = [ "solana-version", "static_assertions", "tokio", - "url 2.5.1", + "url 2.5.2", ] [[package]] @@ -5336,7 +5226,7 @@ checksum = "8b8a731ed60e89177c8a7ab05fe0f1511cedd3e70e773f288f9de33a9cfdc21e" [[package]] name = "solana-perf" -version = "2.0.0" +version = "2.0.2" dependencies = [ "ahash 0.8.10", "bincode", @@ -5361,7 +5251,7 @@ dependencies = [ [[package]] name = "solana-poh" -version = "2.0.0" +version = "2.0.2" dependencies = [ "core_affinity", "crossbeam-channel", @@ -5377,7 +5267,7 @@ dependencies = [ [[package]] name = "solana-poseidon" -version = "2.0.0" +version = "2.0.2" dependencies = [ "ark-bn254", "light-poseidon", @@ -5386,7 +5276,7 @@ dependencies = [ [[package]] name = "solana-program" -version = "2.0.0" +version = "2.0.2" dependencies = [ "ark-bn254", "ark-ec", @@ -5401,12 +5291,11 @@ dependencies = [ "bs58", "bv", "bytemuck", - "cc", + "bytemuck_derive", "console_error_panic_hook", "console_log", "curve25519-dalek", "getrandom 0.2.10", - "itertools 0.12.1", "js-sys", "lazy_static", "libsecp256k1 0.6.0", @@ -5431,7 +5320,7 @@ dependencies = [ [[package]] name = "solana-program-runtime" -version = "2.0.0" +version = "2.0.2" dependencies = [ "base64 0.22.1", "bincode", @@ -5450,6 +5339,7 @@ dependencies = [ "solana-measure", "solana-metrics", "solana-sdk", + "solana-type-overrides", "solana-vote", "solana_rbpf", "thiserror", @@ -5457,7 +5347,7 @@ dependencies = [ [[package]] name = "solana-program-test" -version = "2.0.0" +version = "2.0.2" dependencies = [ "assert_matches", "async-trait", @@ -5487,7 +5377,7 @@ dependencies = [ [[package]] name = "solana-pubsub-client" -version = "2.0.0" +version = "2.0.2" dependencies = [ "crossbeam-channel", "futures-util", @@ -5505,12 +5395,12 @@ dependencies = [ "tokio-stream", "tokio-tungstenite", "tungstenite", - "url 2.5.1", + "url 2.5.2", ] [[package]] name = "solana-quic-client" -version = "2.0.0" +version = "2.0.2" dependencies = [ "async-mutex", "async-trait", @@ -5534,7 +5424,7 @@ dependencies = [ [[package]] name = "solana-rayon-threadlimit" -version = "2.0.0" +version = "2.0.2" dependencies = [ "lazy_static", "num_cpus", @@ -5542,7 +5432,7 @@ dependencies = [ [[package]] name = "solana-remote-wallet" -version = "2.0.0" +version = "2.0.2" dependencies = [ "console", "dialoguer", @@ -5559,7 +5449,7 @@ dependencies = [ [[package]] name = "solana-rpc" -version = "2.0.0" +version = "2.0.2" dependencies = [ "base64 0.22.1", "bincode", @@ -5616,7 +5506,7 @@ dependencies = [ [[package]] name = "solana-rpc-client" -version = "2.0.0" +version = "2.0.2" dependencies = [ "async-trait", "base64 0.22.1", @@ -5641,7 +5531,7 @@ dependencies = [ [[package]] name = "solana-rpc-client-api" -version = "2.0.0" +version = "2.0.2" dependencies = [ "anyhow", "base64 0.22.1", @@ -5663,7 +5553,7 @@ dependencies = [ [[package]] name = "solana-rpc-client-nonce-utils" -version = "2.0.0" +version = "2.0.2" dependencies = [ "clap 2.33.3", "solana-clap-utils", @@ -5674,7 +5564,7 @@ dependencies = [ [[package]] name = "solana-runtime" -version = "2.0.0" +version = "2.0.2" dependencies = [ "aquamarine", "arrayref", @@ -5736,6 +5626,8 @@ dependencies = [ "solana-version", "solana-vote", "solana-vote-program", + "solana-zk-elgamal-proof-program", + "solana-zk-sdk", "solana-zk-token-proof-program", "solana-zk-token-sdk", "static_assertions", @@ -5750,7 +5642,7 @@ dependencies = [ [[package]] name = "solana-sbf-programs" -version = "2.0.0" +version = "2.0.2" dependencies = [ "agave-validator", "bincode", @@ -5770,26 +5662,20 @@ dependencies = [ "solana-measure", "solana-program", "solana-program-runtime", - "solana-program-test", "solana-runtime", "solana-sbf-rust-invoke-dep", - "solana-sbf-rust-mem", "solana-sbf-rust-realloc-dep", "solana-sbf-rust-realloc-invoke-dep", - "solana-sbf-rust-remaining-compute-units", - "solana-sbf-rust-sanity", - "solana-sbf-rust-simulation", - "solana-sbf-rust-sysvar", "solana-sdk", "solana-svm", "solana-transaction-status", + "solana-type-overrides", "solana_rbpf", - "walkdir", ] [[package]] name = "solana-sbf-rust-128bit" -version = "2.0.0" +version = "2.0.2" dependencies = [ "solana-program", "solana-sbf-rust-128bit-dep", @@ -5797,21 +5683,21 @@ dependencies = [ [[package]] name = "solana-sbf-rust-128bit-dep" -version = "2.0.0" +version = "2.0.2" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-alloc" -version = "2.0.0" +version = "2.0.2" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-alt-bn128" -version = "2.0.0" +version = "2.0.2" dependencies = [ "array-bytes", "solana-program", @@ -5819,7 +5705,7 @@ dependencies = [ [[package]] name = "solana-sbf-rust-alt-bn128-compression" -version = "2.0.0" +version = "2.0.2" dependencies = [ "array-bytes", "solana-program", @@ -5827,7 +5713,7 @@ dependencies = [ [[package]] name = "solana-sbf-rust-big-mod-exp" -version = "2.0.0" +version = "2.0.2" dependencies = [ "array-bytes", "serde", @@ -5838,36 +5724,37 @@ dependencies = [ [[package]] name = "solana-sbf-rust-call-depth" -version = "2.0.0" +version = "2.0.2" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-caller-access" -version = "2.0.0" +version = "2.0.2" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-curve25519" -version = "2.0.0" +version = "2.0.2" dependencies = [ + "solana-curve25519", "solana-program", "solana-zk-token-sdk", ] [[package]] name = "solana-sbf-rust-custom-heap" -version = "2.0.0" +version = "2.0.2" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-dep-crate" -version = "2.0.0" +version = "2.0.2" dependencies = [ "byteorder 1.5.0", "solana-program", @@ -5875,21 +5762,21 @@ dependencies = [ [[package]] name = "solana-sbf-rust-deprecated-loader" -version = "2.0.0" +version = "2.0.2" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-dup-accounts" -version = "2.0.0" +version = "2.0.2" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-error-handling" -version = "2.0.0" +version = "2.0.2" dependencies = [ "num-derive", "num-traits", @@ -5899,42 +5786,42 @@ dependencies = [ [[package]] name = "solana-sbf-rust-external-spend" -version = "2.0.0" +version = "2.0.2" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-finalize" -version = "2.0.0" +version = "2.0.2" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-get-minimum-delegation" -version = "2.0.0" +version = "2.0.2" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-inner_instruction_alignment_check" -version = "2.0.0" +version = "2.0.2" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-instruction-introspection" -version = "2.0.0" +version = "2.0.2" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-invoke" -version = "2.0.0" +version = "2.0.2" dependencies = [ "rustversion", "solana-program", @@ -5945,32 +5832,32 @@ dependencies = [ [[package]] name = "solana-sbf-rust-invoke-and-error" -version = "2.0.0" +version = "2.0.2" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-invoke-and-ok" -version = "2.0.0" +version = "2.0.2" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-invoke-and-return" -version = "2.0.0" +version = "2.0.2" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-invoke-dep" -version = "2.0.0" +version = "2.0.2" [[package]] name = "solana-sbf-rust-invoked" -version = "2.0.0" +version = "2.0.2" dependencies = [ "solana-program", "solana-sbf-rust-invoked-dep", @@ -5978,28 +5865,28 @@ dependencies = [ [[package]] name = "solana-sbf-rust-invoked-dep" -version = "2.0.0" +version = "2.0.2" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-iter" -version = "2.0.0" +version = "2.0.2" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-log-data" -version = "2.0.0" +version = "2.0.2" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-many-args" -version = "2.0.0" +version = "2.0.2" dependencies = [ "solana-program", "solana-sbf-rust-many-args-dep", @@ -6007,14 +5894,14 @@ dependencies = [ [[package]] name = "solana-sbf-rust-many-args-dep" -version = "2.0.0" +version = "2.0.2" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-mem" -version = "2.0.0" +version = "2.0.2" dependencies = [ "solana-program", "solana-sbf-rust-mem-dep", @@ -6022,14 +5909,14 @@ dependencies = [ [[package]] name = "solana-sbf-rust-mem-dep" -version = "2.0.0" +version = "2.0.2" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-membuiltins" -version = "2.0.0" +version = "2.0.2" dependencies = [ "solana-program", "solana-sbf-rust-mem-dep", @@ -6037,21 +5924,21 @@ dependencies = [ [[package]] name = "solana-sbf-rust-noop" -version = "2.0.0" +version = "2.0.2" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-panic" -version = "2.0.0" +version = "2.0.2" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-param-passing" -version = "2.0.0" +version = "2.0.2" dependencies = [ "solana-program", "solana-sbf-rust-param-passing-dep", @@ -6059,14 +5946,14 @@ dependencies = [ [[package]] name = "solana-sbf-rust-param-passing-dep" -version = "2.0.0" +version = "2.0.2" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-poseidon" -version = "2.0.0" +version = "2.0.2" dependencies = [ "array-bytes", "solana-poseidon", @@ -6075,7 +5962,7 @@ dependencies = [ [[package]] name = "solana-sbf-rust-rand" -version = "2.0.0" +version = "2.0.2" dependencies = [ "getrandom 0.2.10", "rand 0.8.5", @@ -6084,7 +5971,7 @@ dependencies = [ [[package]] name = "solana-sbf-rust-realloc" -version = "2.0.0" +version = "2.0.2" dependencies = [ "solana-program", "solana-sbf-rust-realloc-dep", @@ -6092,14 +5979,14 @@ dependencies = [ [[package]] name = "solana-sbf-rust-realloc-dep" -version = "2.0.0" +version = "2.0.2" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-realloc-invoke" -version = "2.0.0" +version = "2.0.2" dependencies = [ "solana-program", "solana-sbf-rust-realloc-dep", @@ -6108,39 +5995,39 @@ dependencies = [ [[package]] name = "solana-sbf-rust-realloc-invoke-dep" -version = "2.0.0" +version = "2.0.2" [[package]] name = "solana-sbf-rust-remaining-compute-units" -version = "2.0.0" +version = "2.0.2" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-ro-account_modify" -version = "2.0.0" +version = "2.0.2" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-ro-modify" -version = "2.0.0" +version = "2.0.2" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-sanity" -version = "2.0.0" +version = "2.0.2" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-secp256k1-recover" -version = "2.0.0" +version = "2.0.2" dependencies = [ "libsecp256k1 0.7.0", "solana-program", @@ -6148,7 +6035,7 @@ dependencies = [ [[package]] name = "solana-sbf-rust-sha" -version = "2.0.0" +version = "2.0.2" dependencies = [ "blake3", "solana-program", @@ -6156,69 +6043,70 @@ dependencies = [ [[package]] name = "solana-sbf-rust-sibling-inner-instructions" -version = "2.0.0" +version = "2.0.2" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-sibling-instructions" -version = "2.0.0" +version = "2.0.2" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-simulation" -version = "2.0.0" +version = "2.0.2" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-spoof1" -version = "2.0.0" +version = "2.0.2" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-spoof1-system" -version = "2.0.0" +version = "2.0.2" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-sysvar" -version = "2.0.0" +version = "2.0.2" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-upgradeable" -version = "2.0.0" +version = "2.0.2" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-upgraded" -version = "2.0.0" +version = "2.0.2" dependencies = [ "solana-program", ] [[package]] name = "solana-sdk" -version = "2.0.0" +version = "2.0.2" dependencies = [ "bincode", "bitflags 2.5.0", "borsh 1.5.1", "bs58", "bytemuck", + "bytemuck_derive", "byteorder 1.5.0", "chrono", "derivation-path", @@ -6259,7 +6147,7 @@ dependencies = [ [[package]] name = "solana-sdk-macro" -version = "2.0.0" +version = "2.0.2" dependencies = [ "bs58", "proc-macro2", @@ -6276,11 +6164,12 @@ checksum = "468aa43b7edb1f9b7b7b686d5c3aeb6630dc1708e86e31343499dd5c4d775183" [[package]] name = "solana-send-transaction-service" -version = "2.0.0" +version = "2.0.2" dependencies = [ "crossbeam-channel", "log", "solana-client", + "solana-connection-cache", "solana-measure", "solana-metrics", "solana-runtime", @@ -6290,7 +6179,7 @@ dependencies = [ [[package]] name = "solana-stake-program" -version = "2.0.0" +version = "2.0.2" dependencies = [ "bincode", "log", @@ -6298,12 +6187,13 @@ dependencies = [ "solana-config-program", "solana-program-runtime", "solana-sdk", + "solana-type-overrides", "solana-vote-program", ] [[package]] name = "solana-storage-bigtable" -version = "2.0.0" +version = "2.0.2" dependencies = [ "backoff", "bincode", @@ -6335,7 +6225,7 @@ dependencies = [ [[package]] name = "solana-storage-proto" -version = "2.0.0" +version = "2.0.2" dependencies = [ "bincode", "bs58", @@ -6350,7 +6240,7 @@ dependencies = [ [[package]] name = "solana-streamer" -version = "2.0.0" +version = "2.0.2" dependencies = [ "async-channel", "bytes", @@ -6382,7 +6272,7 @@ dependencies = [ [[package]] name = "solana-svm" -version = "2.0.0" +version = "2.0.2" dependencies = [ "itertools 0.12.1", "log", @@ -6400,12 +6290,13 @@ dependencies = [ "solana-program-runtime", "solana-sdk", "solana-system-program", + "solana-type-overrides", "solana-vote", ] [[package]] name = "solana-system-program" -version = "2.0.0" +version = "2.0.2" dependencies = [ "bincode", "log", @@ -6413,11 +6304,12 @@ dependencies = [ "serde_derive", "solana-program-runtime", "solana-sdk", + "solana-type-overrides", ] [[package]] name = "solana-test-validator" -version = "2.0.0" +version = "2.0.2" dependencies = [ "base64 0.22.1", "bincode", @@ -6447,7 +6339,7 @@ dependencies = [ [[package]] name = "solana-thin-client" -version = "2.0.0" +version = "2.0.2" dependencies = [ "bincode", "log", @@ -6460,7 +6352,7 @@ dependencies = [ [[package]] name = "solana-tpu-client" -version = "2.0.0" +version = "2.0.2" dependencies = [ "async-trait", "bincode", @@ -6482,7 +6374,7 @@ dependencies = [ [[package]] name = "solana-transaction-metrics-tracker" -version = "2.0.0" +version = "2.0.2" dependencies = [ "Inflector", "base64 0.22.1", @@ -6496,7 +6388,7 @@ dependencies = [ [[package]] name = "solana-transaction-status" -version = "2.0.0" +version = "2.0.2" dependencies = [ "Inflector", "base64 0.22.1", @@ -6521,7 +6413,7 @@ dependencies = [ [[package]] name = "solana-turbine" -version = "2.0.0" +version = "2.0.2" dependencies = [ "bincode", "bytes", @@ -6553,9 +6445,17 @@ dependencies = [ "tokio", ] +[[package]] +name = "solana-type-overrides" +version = "2.0.2" +dependencies = [ + "lazy_static", + "rand 0.8.5", +] + [[package]] name = "solana-udp-client" -version = "2.0.0" +version = "2.0.2" dependencies = [ "async-trait", "solana-connection-cache", @@ -6568,7 +6468,7 @@ dependencies = [ [[package]] name = "solana-unified-scheduler-logic" -version = "2.0.0" +version = "2.0.2" dependencies = [ "assert_matches", "solana-sdk", @@ -6577,7 +6477,7 @@ dependencies = [ [[package]] name = "solana-unified-scheduler-pool" -version = "2.0.0" +version = "2.0.2" dependencies = [ "assert_matches", "crossbeam-channel", @@ -6596,7 +6496,7 @@ dependencies = [ [[package]] name = "solana-version" -version = "2.0.0" +version = "2.0.2" dependencies = [ "log", "rustc_version", @@ -6608,7 +6508,7 @@ dependencies = [ [[package]] name = "solana-vote" -version = "2.0.0" +version = "2.0.2" dependencies = [ "itertools 0.12.1", "log", @@ -6621,7 +6521,7 @@ dependencies = [ [[package]] name = "solana-vote-program" -version = "2.0.0" +version = "2.0.2" dependencies = [ "bincode", "log", @@ -6639,7 +6539,7 @@ dependencies = [ [[package]] name = "solana-wen-restart" -version = "2.0.0" +version = "2.0.2" dependencies = [ "anyhow", "log", @@ -6660,9 +6560,48 @@ dependencies = [ "solana-vote-program", ] +[[package]] +name = "solana-zk-elgamal-proof-program" +version = "2.0.2" +dependencies = [ + "bytemuck", + "num-derive", + "num-traits", + "solana-program-runtime", + "solana-sdk", + "solana-zk-sdk", +] + +[[package]] +name = "solana-zk-sdk" +version = "2.0.2" +dependencies = [ + "aes-gcm-siv", + "base64 0.22.1", + "bincode", + "bytemuck", + "bytemuck_derive", + "curve25519-dalek", + "itertools 0.12.1", + "lazy_static", + "merlin", + "num-derive", + "num-traits", + "rand 0.7.3", + "serde", + "serde_derive", + "serde_json", + "sha3 0.9.1", + "solana-program", + "solana-sdk", + "subtle", + "thiserror", + "zeroize", +] + [[package]] name = "solana-zk-token-proof-program" -version = "2.0.0" +version = "2.0.2" dependencies = [ "bytemuck", "num-derive", @@ -6674,15 +6613,15 @@ dependencies = [ [[package]] name = "solana-zk-token-sdk" -version = "2.0.0" +version = "2.0.2" dependencies = [ "aes-gcm-siv", "base64 0.22.1", "bincode", "bytemuck", + "bytemuck_derive", "byteorder 1.5.0", "curve25519-dalek", - "getrandom 0.1.14", "itertools 0.12.1", "lazy_static", "merlin", @@ -6693,6 +6632,7 @@ dependencies = [ "serde_derive", "serde_json", "sha3 0.9.1", + "solana-curve25519", "solana-program", "solana-sdk", "subtle", @@ -6733,9 +6673,9 @@ checksum = "c530c2b0d0bf8b69304b39fe2001993e267461948b890cd037d8ad4293fa1a0d" [[package]] name = "spl-associated-token-account" -version = "3.0.2" +version = "4.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2e688554bac5838217ffd1fab7845c573ff106b6336bf7d290db7c98d5a8efd" +checksum = "68034596cf4804880d265f834af1ff2f821ad5293e41fa0f8f59086c181fc38e" dependencies = [ "assert_matches", "borsh 1.5.1", @@ -6749,9 +6689,9 @@ dependencies = [ [[package]] name = "spl-discriminator" -version = "0.2.2" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34d1814406e98b08c5cd02c1126f83fd407ad084adce0b05fda5730677822eac" +checksum = "a38ea8b6dedb7065887f12d62ed62c1743aa70749e8558f963609793f6fb12bc" dependencies = [ "bytemuck", "solana-program", @@ -6784,21 +6724,22 @@ dependencies = [ [[package]] name = "spl-memo" -version = "4.0.1" +version = "5.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58e9bae02de3405079a057fe244c867a08f92d48327d231fc60da831f94caf0a" +checksum = "a0dba2f2bb6419523405d21c301a32c9f9568354d4742552e7972af801f4bdb3" dependencies = [ "solana-program", ] [[package]] name = "spl-pod" -version = "0.2.2" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "046ce669f48cf2eca1ec518916d8725596bfb655beb1c74374cf71dc6cb773c9" +checksum = "e6166a591d93af33afd75bbd8573c5fd95fb1213f1bf254f0508c89fdb5ee156" dependencies = [ "borsh 1.5.1", "bytemuck", + "bytemuck_derive", "solana-program", "solana-zk-token-sdk", "spl-program-error", @@ -6806,9 +6747,9 @@ dependencies = [ [[package]] name = "spl-program-error" -version = "0.4.1" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49065093ea91f57b9b2bd81493ff705e2ad4e64507a07dbc02b085778e02770e" +checksum = "d7b28bed65356558133751cc32b48a7a5ddfc59ac4e941314630bbed1ac10532" dependencies = [ "num-derive", "num-traits", @@ -6831,9 +6772,9 @@ dependencies = [ [[package]] name = "spl-tlv-account-resolution" -version = "0.6.3" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cace91ba08984a41556efe49cbf2edca4db2f577b649da7827d3621161784bf8" +checksum = "37a75a5f0fcc58126693ed78a17042e9dc53f07e357d6be91789f7d62aff61a4" dependencies = [ "bytemuck", "solana-program", @@ -6845,9 +6786,9 @@ dependencies = [ [[package]] name = "spl-token" -version = "4.0.1" +version = "6.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95ae123223633a389f95d1da9d49c2d0a50d499e7060b9624626a69e536ad2a4" +checksum = "70a0f06ac7f23dc0984931b1fe309468f14ea58e32660439c1cef19456f5d0e3" dependencies = [ "arrayref", "bytemuck", @@ -6860,9 +6801,9 @@ dependencies = [ [[package]] name = "spl-token-2022" -version = "3.0.2" +version = "4.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5412f99ae7ee6e0afde00defaa354e6228e47e30c0e3adf553e2e01e6abb584" +checksum = "d9c10f3483e48679619c76598d4e4aebb955bc49b0a5cc63323afbf44135c9bf" dependencies = [ "arrayref", "bytemuck", @@ -6884,9 +6825,9 @@ dependencies = [ [[package]] name = "spl-token-group-interface" -version = "0.2.3" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d419b5cfa3ee8e0f2386fd7e02a33b3ec8a7db4a9c7064a2ea24849dc4a273b6" +checksum = "df8752b85a5ecc1d9f3a43bce3dd9a6a053673aacf5deb513d1cbb88d3534ffd" dependencies = [ "bytemuck", "solana-program", @@ -6897,9 +6838,9 @@ dependencies = [ [[package]] name = "spl-token-metadata-interface" -version = "0.3.3" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30179c47e93625680dabb620c6e7931bd12d62af390f447bc7beb4a3a9b5feee" +checksum = "c6c2318ddff97e006ed9b1291ebec0750a78547f870f62a69c56fe3b46a5d8fc" dependencies = [ "borsh 1.5.1", "solana-program", @@ -6911,9 +6852,9 @@ dependencies = [ [[package]] name = "spl-transfer-hook-interface" -version = "0.6.3" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66a98359769cd988f7b35c02558daa56d496a7e3bd8626e61f90a7c757eedb9b" +checksum = "a110f33d941275d9f868b96daaa993f1e73b6806cc8836e43075b4d3ad8338a7" dependencies = [ "arrayref", "bytemuck", @@ -6927,9 +6868,9 @@ dependencies = [ [[package]] name = "spl-type-length-value" -version = "0.4.3" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "422ce13429dbd41d2cee8a73931c05fda0b0c8ca156a8b0c19445642550bb61a" +checksum = "bdcd73ec187bc409464c60759232e309f83b52a18a9c5610bf281c9c6432918c" dependencies = [ "bytemuck", "solana-program", @@ -6938,12 +6879,6 @@ dependencies = [ "spl-program-error", ] -[[package]] -name = "stable_deref_trait" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" - [[package]] name = "static_assertions" version = "1.1.0" @@ -7059,17 +6994,6 @@ dependencies = [ "unicode-xid", ] -[[package]] -name = "synstructure" -version = "0.13.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.58", -] - [[package]] name = "sys-info" version = "0.9.1" @@ -7312,16 +7236,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "tinystr" -version = "0.7.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9117f5d4db391c1cf6927e7bea3db74b9a1c1add8f7eda9ffd5364f40f57b82f" -dependencies = [ - "displaydoc", - "zerovec", -] - [[package]] name = "tinyvec" version = "1.6.0" @@ -7658,7 +7572,7 @@ dependencies = [ "rustls", "sha1", "thiserror", - "url 2.5.1", + "url 2.5.2", "utf-8", "webpki-roots 0.24.0", ] @@ -7777,12 +7691,12 @@ dependencies = [ [[package]] name = "url" -version = "2.5.1" +version = "2.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7c25da092f0a868cdf09e8674cd3b7ef3a7d92a24253e663a2fb85e2496de56" +checksum = "22784dbdf76fdde8af1aeda5622b546b422b6fc585325248a2bf9f5e41e94d6c" dependencies = [ "form_urlencoded", - "idna 1.0.0", + "idna 0.5.0", "percent-encoding 2.3.1", ] @@ -7792,18 +7706,6 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" -[[package]] -name = "utf16_iter" -version = "1.0.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8232dd3cdaed5356e0f716d285e4b40b932ac434100fe9b7e0e8e935b9e6246" - -[[package]] -name = "utf8_iter" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" - [[package]] name = "valuable" version = "0.1.0" @@ -8169,18 +8071,6 @@ dependencies = [ "windows-sys 0.48.0", ] -[[package]] -name = "write16" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1890f4022759daae28ed4fe62859b1236caebfc61ede2f63ed4e695f3f6d936" - -[[package]] -name = "writeable" -version = "0.5.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e9df38ee2d2c3c5948ea468a8406ff0db0b29ae1ffde1bcf20ef305bcc95c51" - [[package]] name = "x509-parser" version = "0.14.0" @@ -8210,30 +8100,6 @@ dependencies = [ "rustix", ] -[[package]] -name = "yoke" -version = "0.7.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c5b1314b079b0930c31e3af543d8ee1757b1951ae1e1565ec704403a7240ca5" -dependencies = [ - "serde", - "stable_deref_trait", - "yoke-derive", - "zerofrom", -] - -[[package]] -name = "yoke-derive" -version = "0.7.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28cc31741b18cb6f1d5ff12f5b7523e3d6eb0852bbbad19d73905511d9849b95" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.58", - "synstructure 0.13.1", -] - [[package]] name = "zerocopy" version = "0.7.31" @@ -8254,27 +8120,6 @@ dependencies = [ "syn 2.0.58", ] -[[package]] -name = "zerofrom" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91ec111ce797d0e0784a1116d0ddcdbea84322cd79e5d5ad173daeba4f93ab55" -dependencies = [ - "zerofrom-derive", -] - -[[package]] -name = "zerofrom-derive" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ea7b4a3637ea8669cedf0f1fd5c286a17f3de97b8dd5a70a6c167a1730e63a5" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.58", - "synstructure 0.13.1", -] - [[package]] name = "zeroize" version = "1.3.0" @@ -8295,28 +8140,6 @@ dependencies = [ "syn 2.0.58", ] -[[package]] -name = "zerovec" -version = "0.10.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb2cc8827d6c0994478a15c53f374f46fbd41bea663d809b14744bc42e6b109c" -dependencies = [ - "yoke", - "zerofrom", - "zerovec-derive", -] - -[[package]] -name = "zerovec-derive" -version = "0.10.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97cf56601ee5052b4417d90c8755c6683473c926039908196cf35d99f893ebe7" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.58", -] - [[package]] name = "zstd" version = "0.11.2+zstd.1.5.2" diff --git a/programs/sbf/Cargo.toml b/programs/sbf/Cargo.toml index 6e8f54a427a986..a2880b5b1c6d9c 100644 --- a/programs/sbf/Cargo.toml +++ b/programs/sbf/Cargo.toml @@ -1,5 +1,5 @@ [workspace.package] -version = "2.0.0" +version = "2.0.2" description = "Solana SBF test program written in Rust" authors = ["Anza Maintainers "] repository = "https://github.com/anza-xyz/agave" @@ -26,37 +26,33 @@ rustversion = "1.0.14" serde = "1.0.112" # must match the serde_derive version, see https://github.com/serde-rs/serde/issues/2584#issuecomment-1685252251 serde_derive = "1.0.112" # must match the serde version, see https://github.com/serde-rs/serde/issues/2584#issuecomment-1685252251 serde_json = "1.0.56" -solana-account-decoder = { path = "../../account-decoder", version = "=2.0.0" } -solana-accounts-db = { path = "../../accounts-db", version = "=2.0.0" } -solana-bpf-loader-program = { path = "../bpf_loader", version = "=2.0.0" } -solana-cli-output = { path = "../../cli-output", version = "=2.0.0" } -solana-compute-budget = { path = "../../compute-budget", version = "=2.0.0" } -solana-ledger = { path = "../../ledger", version = "=2.0.0" } -solana-logger = { path = "../../logger", version = "=2.0.0" } -solana-measure = { path = "../../measure", version = "=2.0.0" } -solana-poseidon = { path = "../../poseidon/", version = "=2.0.0" } -solana-program = { path = "../../sdk/program", version = "=2.0.0" } -solana-program-runtime = { path = "../../program-runtime", version = "=2.0.0" } -solana-program-test = { path = "../../program-test", version = "=2.0.0" } -solana-runtime = { path = "../../runtime", version = "=2.0.0" } -solana-sbf-rust-128bit-dep = { path = "rust/128bit_dep", version = "=2.0.0" } -solana-sbf-rust-invoke-dep = { path = "rust/invoke_dep", version = "=2.0.0" } -solana-sbf-rust-invoked-dep = { path = "rust/invoked_dep", version = "=2.0.0" } -solana-sbf-rust-many-args-dep = { path = "rust/many_args_dep", version = "=2.0.0" } -solana-sbf-rust-mem = { path = "rust/mem", version = "=2.0.0" } -solana-sbf-rust-mem-dep = { path = "rust/mem_dep", version = "=2.0.0" } -solana-sbf-rust-param-passing-dep = { path = "rust/param_passing_dep", version = "=2.0.0" } -solana-sbf-rust-realloc-dep = { path = "rust/realloc_dep", version = "=2.0.0" } -solana-sbf-rust-realloc-invoke-dep = { path = "rust/realloc_invoke_dep", version = "=2.0.0" } -solana-sbf-rust-remaining-compute-units = { path = "rust/remaining_compute_units", version = "=2.0.0" } -solana-sbf-rust-sanity = { path = "rust/sanity", version = "=2.0.0" } -solana-sbf-rust-simulation = { path = "rust/simulation", version = "=2.0.0" } -solana-sbf-rust-sysvar = { path = "rust/sysvar", version = "=2.0.0" } -solana-sdk = { path = "../../sdk", version = "=2.0.0" } -solana-svm = { path = "../../svm", version = "=2.0.0" } -solana-transaction-status = { path = "../../transaction-status", version = "=2.0.0" } -agave-validator = { path = "../../validator", version = "=2.0.0" } -solana-zk-token-sdk = { path = "../../zk-token-sdk", version = "=2.0.0" } +solana-account-decoder = { path = "../../account-decoder", version = "=2.0.2" } +solana-accounts-db = { path = "../../accounts-db", version = "=2.0.2" } +solana-bpf-loader-program = { path = "../bpf_loader", version = "=2.0.2" } +solana-cli-output = { path = "../../cli-output", version = "=2.0.2" } +solana-compute-budget = { path = "../../compute-budget", version = "=2.0.2" } +solana-curve25519 = { path = "../../curves/curve25519", version = "=2.0.2" } +solana-ledger = { path = "../../ledger", version = "=2.0.2" } +solana-logger = { path = "../../logger", version = "=2.0.2" } +solana-measure = { path = "../../measure", version = "=2.0.2" } +solana-poseidon = { path = "../../poseidon/", version = "=2.0.2" } +solana-program = { path = "../../sdk/program", version = "=2.0.2" } +solana-program-runtime = { path = "../../program-runtime", version = "=2.0.2" } +solana-runtime = { path = "../../runtime", version = "=2.0.2" } +solana-sbf-rust-128bit-dep = { path = "rust/128bit_dep", version = "=2.0.2" } +solana-sbf-rust-invoke-dep = { path = "rust/invoke_dep", version = "=2.0.2" } +solana-sbf-rust-invoked-dep = { path = "rust/invoked_dep", version = "=2.0.2" } +solana-sbf-rust-many-args-dep = { path = "rust/many_args_dep", version = "=2.0.2" } +solana-sbf-rust-mem-dep = { path = "rust/mem_dep", version = "=2.0.2" } +solana-sbf-rust-param-passing-dep = { path = "rust/param_passing_dep", version = "=2.0.2" } +solana-sbf-rust-realloc-dep = { path = "rust/realloc_dep", version = "=2.0.2" } +solana-sbf-rust-realloc-invoke-dep = { path = "rust/realloc_invoke_dep", version = "=2.0.2" } +solana-sdk = { path = "../../sdk", version = "=2.0.2" } +solana-svm = { path = "../../svm", version = "=2.0.2" } +solana-transaction-status = { path = "../../transaction-status", version = "=2.0.2" } +solana-type-overrides = { path = "../../type-overrides", version = "=2.0.2" } +agave-validator = { path = "../../validator", version = "=2.0.2" } +solana-zk-token-sdk = { path = "../../zk-token-sdk", version = "=2.0.2" } solana_rbpf = "=0.8.1" thiserror = "1.0" @@ -73,14 +69,16 @@ homepage = { workspace = true } license = { workspace = true } edition = { workspace = true } +[profile.release] +# The test programs are build in release mode +# Minimize their file size so that they fit into the account size limit +strip = true + [features] sbf_c = [] sbf_rust = [] dummy-for-ci-check = ["sbf_c", "sbf_rust"] -[build-dependencies] -walkdir = "2" - [dev-dependencies] agave-validator = { workspace = true } bincode = { workspace = true } @@ -100,19 +98,14 @@ solana-logger = { workspace = true } solana-measure = { workspace = true } solana-program = { workspace = true } solana-program-runtime = { workspace = true } -solana-program-test = { workspace = true } solana-runtime = { workspace = true, features = ["dev-context-only-utils"] } solana-sbf-rust-invoke-dep = { workspace = true } -solana-sbf-rust-mem = { workspace = true } solana-sbf-rust-realloc-dep = { workspace = true } solana-sbf-rust-realloc-invoke-dep = { workspace = true } -solana-sbf-rust-remaining-compute-units = { workspace = true } -solana-sbf-rust-sanity = { workspace = true } -solana-sbf-rust-simulation = { workspace = true } -solana-sbf-rust-sysvar = { workspace = true } solana-sdk = { workspace = true, features = ["dev-context-only-utils"] } solana-svm = { workspace = true } solana-transaction-status = { workspace = true } +solana-type-overrides = { workspace = true } solana_rbpf = { workspace = true } [[bench]] @@ -216,5 +209,6 @@ members = [ # # There is a similar override in `../../Cargo.toml`. Please keep both comments # and the overrides in sync. +solana-curve25519 = { path = "../../curves/curve25519" } solana-program = { path = "../../sdk/program" } solana-zk-token-sdk = { path = "../../zk-token-sdk" } diff --git a/programs/sbf/Makefile b/programs/sbf/Makefile new file mode 100755 index 00000000000000..3f213d352a2995 --- /dev/null +++ b/programs/sbf/Makefile @@ -0,0 +1,13 @@ +SBF_SDK_PATH := ../../sdk/sbf +SRC_DIR := c/src +OUT_DIR := target/sbf-solana-solana/release + +test: rust all + SBF_OUT_DIR=$(OUT_DIR) cargo test --features="sbf_rust,sbf_c" $(TEST_ARGS) + +rust: + cargo +solana build --release --target sbf-solana-solana --workspace + +.PHONY: rust + +include $(SBF_SDK_PATH)/c/sbf.mk diff --git a/programs/sbf/benches/bpf_loader.rs b/programs/sbf/benches/bpf_loader.rs index 489c5224f24ee1..ab5e950faab874 100644 --- a/programs/sbf/benches/bpf_loader.rs +++ b/programs/sbf/benches/bpf_loader.rs @@ -139,7 +139,7 @@ fn bench_program_alu(bencher: &mut Bencher) { vec![], &mut invoke_context, ); - let mut vm = vm.unwrap(); + let (mut vm, _, _) = vm.unwrap(); println!("Interpreted:"); vm.context_object_pointer @@ -314,7 +314,7 @@ fn bench_instruction_count_tuner(_bencher: &mut Bencher) { account_lengths, &mut invoke_context, ); - let mut vm = vm.unwrap(); + let (mut vm, _, _) = vm.unwrap(); let mut measure = Measure::start("tune"); let (instructions, _result) = vm.execute_program(&executable, true); diff --git a/programs/sbf/build.rs b/programs/sbf/build.rs deleted file mode 100644 index 97f2423162aee7..00000000000000 --- a/programs/sbf/build.rs +++ /dev/null @@ -1,130 +0,0 @@ -extern crate walkdir; - -use { - std::{env, path::Path, process::Command}, - walkdir::WalkDir, -}; - -fn rerun_if_changed(files: &[&str], directories: &[&str], excludes: &[&str]) { - let mut all_files: Vec<_> = files.iter().map(|f| f.to_string()).collect(); - - for directory in directories { - let files_in_directory: Vec<_> = WalkDir::new(directory) - .into_iter() - .map(|entry| entry.unwrap()) - .filter(|entry| { - if !entry.file_type().is_file() { - return false; - } - for exclude in excludes.iter() { - if entry.path().to_str().unwrap().contains(exclude) { - return false; - } - } - true - }) - .map(|f| f.path().to_str().unwrap().to_owned()) - .collect(); - all_files.extend_from_slice(&files_in_directory[..]); - } - - for file in all_files { - if !Path::new(&file).is_file() { - panic!("{file} is not a file"); - } - println!("cargo:rerun-if-changed={file}"); - } -} - -fn main() { - if env::var("CARGO_FEATURE_DUMMY_FOR_CI_CHECK").is_ok() { - println!("cargo:warning=(not a warning) Compiling with host toolchain for CI..."); - return; - } - - let build_profile = env::var("PROFILE").expect("`PROFILE` envvar to be set"); - let install_dir = format!("target/{build_profile}/sbf"); - let sbf_c = env::var("CARGO_FEATURE_SBF_C").is_ok(); - if sbf_c { - let install_dir = format!("OUT_DIR=../{install_dir}"); - println!("cargo:warning=(not a warning) Building C-based on-chain programs"); - assert!(Command::new("make") - .current_dir("c") - .arg("programs") - .arg(&install_dir) - .status() - .expect("Failed to build C-based SBF programs") - .success()); - - rerun_if_changed(&["c/makefile"], &["c/src", "../../sdk"], &["/target/"]); - } - - let sbf_rust = env::var("CARGO_FEATURE_SBF_RUST").is_ok(); - if sbf_rust { - let rust_programs = [ - "128bit", - "alloc", - "alt_bn128", - "alt_bn128_compression", - "big_mod_exp", - "call_depth", - "caller_access", - "curve25519", - "custom_heap", - "dep_crate", - "deprecated_loader", - "dup_accounts", - "error_handling", - "log_data", - "external_spend", - "finalize", - "get_minimum_delegation", - "inner_instruction_alignment_check", - "instruction_introspection", - "invoke", - "invoke_and_error", - "invoke_and_ok", - "invoke_and_return", - "invoked", - "iter", - "many_args", - "mem", - "membuiltins", - "noop", - "panic", - "param_passing", - "poseidon", - "rand", - "realloc", - "realloc_invoke", - "remaining_compute_units", - "ro_modify", - "ro_account_modify", - "sanity", - "secp256k1_recover", - "sha", - "sibling_inner_instructions", - "sibling_instructions", - "simulation", - "spoof1", - "spoof1_system", - "upgradeable", - "upgraded", - ]; - for program in rust_programs.iter() { - println!("cargo:warning=(not a warning) Building Rust-based on-chain programs: solana_sbf_rust_{program}"); - assert!(Command::new("../../cargo-build-sbf") - .args([ - "--manifest-path", - &format!("rust/{program}/Cargo.toml"), - "--sbf-out-dir", - &install_dir - ]) - .status() - .expect("Error calling cargo-build-sbf from build.rs") - .success()); - } - - rerun_if_changed(&[], &["rust", "../../sdk", &install_dir], &["/target/"]); - } -} diff --git a/programs/sbf/c/makefile b/programs/sbf/c/makefile deleted file mode 100644 index 77b774c2a6bf82..00000000000000 --- a/programs/sbf/c/makefile +++ /dev/null @@ -1,2 +0,0 @@ -SBF_SDK := ../../../sdk/sbf/c -include $(SBF_SDK)/sbf.mk diff --git a/programs/sbf/rust/curve25519/Cargo.toml b/programs/sbf/rust/curve25519/Cargo.toml index c75477788e0dca..ad555810ff203e 100644 --- a/programs/sbf/rust/curve25519/Cargo.toml +++ b/programs/sbf/rust/curve25519/Cargo.toml @@ -9,6 +9,7 @@ license = { workspace = true } edition = { workspace = true } [dependencies] +solana-curve25519 = { workspace = true } solana-program = { workspace = true } solana-zk-token-sdk = { workspace = true } diff --git a/programs/sbf/rust/curve25519/src/lib.rs b/programs/sbf/rust/curve25519/src/lib.rs index a8096d65b34710..42718278c2c685 100644 --- a/programs/sbf/rust/curve25519/src/lib.rs +++ b/programs/sbf/rust/curve25519/src/lib.rs @@ -2,8 +2,8 @@ extern crate solana_program; use { + solana_curve25519::{edwards, ristretto, scalar}, solana_program::{custom_heap_default, custom_panic_default, msg}, - solana_zk_token_sdk::curve25519::{edwards, ristretto, scalar}, }; #[no_mangle] diff --git a/programs/sbf/rust/invoke/src/lib.rs b/programs/sbf/rust/invoke/src/lib.rs index 72d23245ce7150..1f1eb97abf8281 100644 --- a/programs/sbf/rust/invoke/src/lib.rs +++ b/programs/sbf/rust/invoke/src/lib.rs @@ -1350,6 +1350,100 @@ fn process_instruction<'a>( let byte_index = usize::from_le_bytes(instruction_data[2..10].try_into().unwrap()); target_account.data.borrow_mut()[byte_index] = instruction_data[10]; } + TEST_CALLEE_ACCOUNT_UPDATES => { + msg!("TEST_CALLEE_ACCOUNT_UPDATES"); + + if instruction_data.len() < 2 + 2 * std::mem::size_of::() { + return Ok(()); + } + + let writable = instruction_data[1] != 0; + let resize = usize::from_le_bytes(instruction_data[2..10].try_into().unwrap()); + let write_offset = usize::from_le_bytes(instruction_data[10..18].try_into().unwrap()); + let invoke_struction = &instruction_data[18..]; + + let account = &accounts[ARGUMENT_INDEX]; + + if resize != 0 { + account.realloc(resize, false).unwrap(); + } + + if !invoke_struction.is_empty() { + // Invoke another program. With direct mapping, before CPI the callee will update the accounts (incl resizing) + // so the pointer may change. + let invoked_program_id = accounts[INVOKED_PROGRAM_INDEX].key; + + invoke( + &create_instruction( + *invoked_program_id, + &[ + (accounts[MINT_INDEX].key, false, false), + (accounts[ARGUMENT_INDEX].key, writable, false), + (invoked_program_id, false, false), + ], + invoke_struction.to_vec(), + ), + accounts, + ) + .unwrap(); + } + + if write_offset != 0 { + // Ensure we still have access to the correct account + account.data.borrow_mut()[write_offset] ^= 0xe5; + } + } + TEST_STACK_HEAP_ZEROED => { + msg!("TEST_STACK_HEAP_ZEROED"); + const MM_STACK_START: u64 = 0x200000000; + const MM_HEAP_START: u64 = 0x300000000; + const ZEROS: [u8; 256 * 1024] = [0; 256 * 1024]; + const STACK_FRAME_SIZE: usize = 4096; + const MAX_CALL_DEPTH: usize = 64; + + // Check that the heap is always zeroed. + // + // At this point the code up to here will have allocated some values on the heap. The + // bump allocator writes the current heap pointer to the start of the memory region. We + // read it to find the slice of unallocated memory and check that it's zeroed. We then + // fill this memory with a sentinel value, and in the next nested invocation check that + // it's been zeroed as expected. + let heap_len = usize::from_le_bytes(instruction_data[1..9].try_into().unwrap()); + let heap = unsafe { slice::from_raw_parts_mut(MM_HEAP_START as *mut u8, heap_len) }; + let pos = usize::from_le_bytes(heap[0..8].try_into().unwrap()) + .saturating_sub(MM_HEAP_START as usize); + assert!(heap[8..pos] == ZEROS[8..pos], "heap not zeroed"); + heap[8..pos].fill(42); + + // Check that the stack is zeroed too. + // + // We don't know in which frame we are now, so we skip a few (10) frames at the start + // which might have been used by the current call stack. We check that the memory for + // the 10..MAX_CALL_DEPTH frames is zeroed. Then we write a sentinel value, and in the + // next nested invocation check that it's been zeroed. + let stack = + unsafe { slice::from_raw_parts_mut(MM_STACK_START as *mut u8, 0x100000000) }; + for i in 10..MAX_CALL_DEPTH { + let stack = &mut stack[i * STACK_FRAME_SIZE..][..STACK_FRAME_SIZE]; + assert!(stack == &ZEROS[..STACK_FRAME_SIZE], "stack not zeroed"); + stack.fill(42); + } + + // Recurse to check that the stack and heap are zeroed. + // + // We recurse until we go over max CPI depth and error out. Stack and heap allocations + // are reused across CPI, by going over max depth we ensure that it's impossible to get + // non-zeroed regions through execution. + invoke( + &create_instruction( + *program_id, + &[(program_id, false, false)], + instruction_data.to_vec(), + ), + accounts, + ) + .unwrap(); + } _ => panic!("unexpected program data"), } diff --git a/programs/sbf/rust/invoke_dep/src/lib.rs b/programs/sbf/rust/invoke_dep/src/lib.rs index b335fb52f5b6b1..066e900b7f9d2e 100644 --- a/programs/sbf/rust/invoke_dep/src/lib.rs +++ b/programs/sbf/rust/invoke_dep/src/lib.rs @@ -39,6 +39,8 @@ pub const TEST_CPI_INVALID_LAMPORTS_POINTER: u8 = 36; pub const TEST_CPI_INVALID_DATA_POINTER: u8 = 37; pub const TEST_CPI_CHANGE_ACCOUNT_DATA_MEMORY_ALLOCATION: u8 = 38; pub const TEST_WRITE_ACCOUNT: u8 = 39; +pub const TEST_CALLEE_ACCOUNT_UPDATES: u8 = 40; +pub const TEST_STACK_HEAP_ZEROED: u8 = 41; pub const MINT_INDEX: usize = 0; pub const ARGUMENT_INDEX: usize = 1; diff --git a/programs/sbf/rust/mem/Cargo.toml b/programs/sbf/rust/mem/Cargo.toml index ab05428bcd0a26..9b99a551c7c8ec 100644 --- a/programs/sbf/rust/mem/Cargo.toml +++ b/programs/sbf/rust/mem/Cargo.toml @@ -13,4 +13,4 @@ solana-program = { workspace = true } solana-sbf-rust-mem-dep = { workspace = true } [lib] -crate-type = ["cdylib", "lib"] +crate-type = ["cdylib"] diff --git a/programs/sbf/rust/remaining_compute_units/Cargo.toml b/programs/sbf/rust/remaining_compute_units/Cargo.toml index 403177a8df61d6..c35ed06152b234 100644 --- a/programs/sbf/rust/remaining_compute_units/Cargo.toml +++ b/programs/sbf/rust/remaining_compute_units/Cargo.toml @@ -12,4 +12,4 @@ edition = { workspace = true } solana-program = { workspace = true } [lib] -crate-type = ["cdylib", "lib"] +crate-type = ["cdylib"] diff --git a/programs/sbf/rust/sanity/Cargo.toml b/programs/sbf/rust/sanity/Cargo.toml index 435acceddfef36..f01dd7501e6906 100644 --- a/programs/sbf/rust/sanity/Cargo.toml +++ b/programs/sbf/rust/sanity/Cargo.toml @@ -12,4 +12,4 @@ edition = { workspace = true } solana-program = { workspace = true } [lib] -crate-type = ["cdylib", "lib"] +crate-type = ["cdylib"] diff --git a/programs/sbf/rust/simulation/Cargo.toml b/programs/sbf/rust/simulation/Cargo.toml index 3114e2a1a75c8b..3fd65622c876a4 100644 --- a/programs/sbf/rust/simulation/Cargo.toml +++ b/programs/sbf/rust/simulation/Cargo.toml @@ -12,4 +12,4 @@ edition = { workspace = true } solana-program = { workspace = true } [lib] -crate-type = ["cdylib", "lib"] +crate-type = ["cdylib"] diff --git a/programs/sbf/rust/sysvar/Cargo.toml b/programs/sbf/rust/sysvar/Cargo.toml index 1144ebde960cae..bb9683fa5a63dd 100644 --- a/programs/sbf/rust/sysvar/Cargo.toml +++ b/programs/sbf/rust/sysvar/Cargo.toml @@ -12,4 +12,4 @@ edition = { workspace = true } solana-program = { workspace = true } [lib] -crate-type = ["cdylib", "lib"] +crate-type = ["cdylib"] diff --git a/programs/sbf/rust/sysvar/src/lib.rs b/programs/sbf/rust/sysvar/src/lib.rs index d460b5ca635da2..88b7a4aa404b4e 100644 --- a/programs/sbf/rust/sysvar/src/lib.rs +++ b/programs/sbf/rust/sysvar/src/lib.rs @@ -2,8 +2,6 @@ extern crate solana_program; #[allow(deprecated)] -use solana_program::sysvar::fees::Fees; -#[allow(deprecated)] use solana_program::sysvar::recent_blockhashes::RecentBlockhashes; use solana_program::{ account_info::AccountInfo, @@ -31,7 +29,7 @@ pub fn process_instruction( sysvar::clock::id().log(); let clock = Clock::from_account_info(&accounts[2]).unwrap(); assert_ne!(clock, Clock::default()); - let got_clock = Clock::get()?; + let got_clock = Clock::get().unwrap(); assert_eq!(clock, got_clock); } @@ -41,7 +39,7 @@ pub fn process_instruction( sysvar::epoch_schedule::id().log(); let epoch_schedule = EpochSchedule::from_account_info(&accounts[3]).unwrap(); assert_eq!(epoch_schedule, EpochSchedule::default()); - let got_epoch_schedule = EpochSchedule::get()?; + let got_epoch_schedule = EpochSchedule::get().unwrap(); assert_eq!(epoch_schedule, got_epoch_schedule); } @@ -49,8 +47,9 @@ pub fn process_instruction( msg!("Instructions identifier:"); sysvar::instructions::id().log(); assert_eq!(*accounts[4].owner, sysvar::id()); - let index = instructions::load_current_index_checked(&accounts[4])?; - let instruction = instructions::load_instruction_at_checked(index as usize, &accounts[4])?; + let index = instructions::load_current_index_checked(&accounts[4]).unwrap(); + let instruction = + instructions::load_instruction_at_checked(index as usize, &accounts[4]).unwrap(); assert_eq!(0, index); assert_eq!( instruction, @@ -69,7 +68,6 @@ pub fn process_instruction( AccountMeta::new_readonly(*accounts[8].key, false), AccountMeta::new_readonly(*accounts[9].key, false), AccountMeta::new_readonly(*accounts[10].key, false), - AccountMeta::new_readonly(*accounts[11].key, false), ], ) ); @@ -88,8 +86,7 @@ pub fn process_instruction( msg!("Rent identifier:"); sysvar::rent::id().log(); let rent = Rent::from_account_info(&accounts[6]).unwrap(); - assert_eq!(rent, Rent::default()); - let got_rent = Rent::get()?; + let got_rent = Rent::get().unwrap(); assert_eq!(rent, got_rent); } @@ -114,22 +111,12 @@ pub fn process_instruction( sysvar::stake_history::id().log(); let _ = StakeHistory::from_account_info(&accounts[9]).unwrap(); - // Fees - #[allow(deprecated)] - if instruction_data[0] == 1 { - msg!("Fee identifier:"); - sysvar::fees::id().log(); - let fees = Fees::from_account_info(&accounts[10]).unwrap(); - let got_fees = Fees::get()?; - assert_eq!(fees, got_fees); - } - // Epoch Rewards { msg!("EpochRewards identifier:"); sysvar::epoch_rewards::id().log(); - let epoch_rewards = EpochRewards::from_account_info(&accounts[11]).unwrap(); - let got_epoch_rewards = EpochRewards::get()?; + let epoch_rewards = EpochRewards::from_account_info(&accounts[10]).unwrap(); + let got_epoch_rewards = EpochRewards::get().unwrap(); assert_eq!(epoch_rewards, got_epoch_rewards); } diff --git a/programs/sbf/tests/mem.rs b/programs/sbf/tests/mem.rs deleted file mode 100644 index 530ad158a6b3e7..00000000000000 --- a/programs/sbf/tests/mem.rs +++ /dev/null @@ -1,27 +0,0 @@ -#![cfg(feature = "test-bpf")] - -use { - solana_program_test::*, - solana_sbf_rust_mem::process_instruction, - solana_sdk::{ - instruction::Instruction, pubkey::Pubkey, signature::Signer, transaction::Transaction, - }, -}; - -#[tokio::test] -async fn test_mem() { - let program_id = Pubkey::new_unique(); - let program_test = ProgramTest::new( - "solana_sbf_rust_mem", - program_id, - processor!(process_instruction), - ); - let (mut banks_client, payer, recent_blockhash) = program_test.start().await; - - let mut transaction = Transaction::new_with_payer( - &[Instruction::new_with_bincode(program_id, &(), vec![])], - Some(&payer.pubkey()), - ); - transaction.sign(&[&payer], recent_blockhash); - banks_client.process_transaction(transaction).await.unwrap(); -} diff --git a/programs/sbf/tests/programs.rs b/programs/sbf/tests/programs.rs index 62f12bcef1d823..b26c2e74e230ff 100644 --- a/programs/sbf/tests/programs.rs +++ b/programs/sbf/tests/programs.rs @@ -18,10 +18,15 @@ use { compute_budget_processor::process_compute_budget_instructions, }, solana_ledger::token_balances::collect_token_balances, - solana_program_runtime::timings::ExecuteTimings, + solana_program_runtime::{invoke_context::mock_process_instruction, timings::ExecuteTimings}, solana_rbpf::vm::ContextObject, solana_runtime::{ - bank::TransactionBalancesSet, + bank::{Bank, TransactionBalancesSet}, + bank_client::BankClient, + genesis_utils::{ + bootstrap_validator_stake_lamports, create_genesis_config, + create_genesis_config_with_leader_ex, GenesisConfigInfo, + }, loader_utils::{ create_program, load_program_from_file, load_upgradeable_buffer, load_upgradeable_program, load_upgradeable_program_and_advance_slot, @@ -32,60 +37,44 @@ use { solana_sbf_rust_realloc_dep::*, solana_sbf_rust_realloc_invoke_dep::*, solana_sdk::{ - account::{ReadableAccount, WritableAccount}, + account::{AccountSharedData, ReadableAccount, WritableAccount}, account_utils::StateMut, - bpf_loader_upgradeable, - clock::MAX_PROCESSING_AGE, + bpf_loader, bpf_loader_deprecated, bpf_loader_upgradeable, + client::SyncClient, + clock::{UnixTimestamp, MAX_PROCESSING_AGE}, compute_budget::ComputeBudgetInstruction, entrypoint::MAX_PERMITTED_DATA_INCREASE, feature_set::{self, FeatureSet}, fee::FeeStructure, - message::{v0::LoadedAddresses, SanitizedMessage}, - signature::keypair_from_seed, + fee_calculator::FeeRateGovernor, + genesis_config::ClusterType, + hash::Hash, + instruction::{AccountMeta, Instruction, InstructionError}, + message::{v0::LoadedAddresses, Message, SanitizedMessage}, + pubkey::Pubkey, + rent::Rent, + reserved_account_keys::ReservedAccountKeys, + signature::{keypair_from_seed, Keypair, Signer}, stake, system_instruction::{self, MAX_PERMITTED_DATA_LENGTH}, + system_program, sysvar::{self, clock}, - transaction::VersionedTransaction, + transaction::{SanitizedTransaction, Transaction, TransactionError, VersionedTransaction}, }, - solana_svm::transaction_processor::ExecutionRecordingConfig, - solana_svm::transaction_results::{ - InnerInstruction, TransactionExecutionDetails, TransactionExecutionResult, - TransactionResults, + solana_svm::{ + transaction_processor::ExecutionRecordingConfig, + transaction_results::{ + InnerInstruction, TransactionExecutionDetails, TransactionExecutionResult, + TransactionResults, + }, }, solana_transaction_status::{ map_inner_instructions, ConfirmedTransactionWithStatusMeta, TransactionStatusMeta, TransactionWithStatusMeta, VersionedTransactionWithStatusMeta, }, - std::collections::HashMap, -}; -use { - solana_program_runtime::invoke_context::mock_process_instruction, - solana_runtime::{ - bank::Bank, - bank_client::BankClient, - genesis_utils::{ - bootstrap_validator_stake_lamports, create_genesis_config, - create_genesis_config_with_leader_ex, GenesisConfigInfo, - }, - }, - solana_sdk::{ - account::AccountSharedData, - bpf_loader, bpf_loader_deprecated, - client::SyncClient, - clock::UnixTimestamp, - fee_calculator::FeeRateGovernor, - genesis_config::ClusterType, - hash::Hash, - instruction::{AccountMeta, Instruction, InstructionError}, - message::Message, - pubkey::Pubkey, - rent::Rent, - reserved_account_keys::ReservedAccountKeys, - signature::{Keypair, Signer}, - system_program, - transaction::{SanitizedTransaction, Transaction, TransactionError}, + std::{ + assert_eq, cell::RefCell, collections::HashMap, str::FromStr, sync::Arc, time::Duration, }, - std::{cell::RefCell, str::FromStr, sync::Arc, time::Duration}, }; #[cfg(feature = "sbf_rust")] @@ -227,6 +216,9 @@ fn execute_transactions( .collect() } +#[cfg(feature = "sbf_rust")] +const LOADED_ACCOUNTS_DATA_SIZE_LIMIT_FOR_TEST: u32 = 64 * 1024 * 1024; + #[test] #[cfg(any(feature = "sbf_c", feature = "sbf_rust"))] fn test_program_sbf_sanity() { @@ -270,12 +262,14 @@ fn test_program_sbf_sanity() { ("solana_sbf_rust_external_spend", false), ("solana_sbf_rust_iter", true), ("solana_sbf_rust_many_args", true), + ("solana_sbf_rust_mem", true), ("solana_sbf_rust_membuiltins", true), ("solana_sbf_rust_noop", true), ("solana_sbf_rust_panic", false), ("solana_sbf_rust_param_passing", true), ("solana_sbf_rust_poseidon", true), ("solana_sbf_rust_rand", true), + ("solana_sbf_rust_remaining_compute_units", true), ("solana_sbf_rust_sanity", true), ("solana_sbf_rust_secp256k1_recover", true), ("solana_sbf_rust_sha", true), @@ -2764,7 +2758,18 @@ fn test_program_sbf_realloc() { instruction.accounts[0].is_writable = false; assert_eq!( bank_client - .send_and_confirm_message(signer, Message::new(&[instruction], Some(&mint_pubkey),),) + .send_and_confirm_message( + signer, + Message::new( + &[ + instruction, + ComputeBudgetInstruction::set_loaded_accounts_data_size_limit( + LOADED_ACCOUNTS_DATA_SIZE_LIMIT_FOR_TEST + ), + ], + Some(&mint_pubkey), + ), + ) .unwrap_err() .unwrap(), TransactionError::InstructionError(0, InstructionError::ReadonlyDataModified) @@ -2776,7 +2781,12 @@ fn test_program_sbf_realloc() { .send_and_confirm_message( signer, Message::new( - &[realloc(&program_id, &pubkey, usize::MAX, &mut bump)], + &[ + realloc(&program_id, &pubkey, usize::MAX, &mut bump), + ComputeBudgetInstruction::set_loaded_accounts_data_size_limit( + LOADED_ACCOUNTS_DATA_SIZE_LIMIT_FOR_TEST + ), + ], Some(&mint_pubkey), ), ) @@ -2790,7 +2800,12 @@ fn test_program_sbf_realloc() { .send_and_confirm_message( signer, Message::new( - &[realloc(&program_id, &pubkey, 0, &mut bump)], + &[ + realloc(&program_id, &pubkey, 0, &mut bump), + ComputeBudgetInstruction::set_loaded_accounts_data_size_limit( + LOADED_ACCOUNTS_DATA_SIZE_LIMIT_FOR_TEST, + ), + ], Some(&mint_pubkey), ), ) @@ -2803,12 +2818,17 @@ fn test_program_sbf_realloc() { .send_and_confirm_message( signer, Message::new( - &[realloc_extend_and_undo( - &program_id, - &pubkey, - MAX_PERMITTED_DATA_INCREASE, - &mut bump, - )], + &[ + realloc_extend_and_undo( + &program_id, + &pubkey, + MAX_PERMITTED_DATA_INCREASE, + &mut bump, + ), + ComputeBudgetInstruction::set_loaded_accounts_data_size_limit( + LOADED_ACCOUNTS_DATA_SIZE_LIMIT_FOR_TEST, + ), + ], Some(&mint_pubkey), ), ) @@ -2822,12 +2842,17 @@ fn test_program_sbf_realloc() { .send_and_confirm_message( signer, Message::new( - &[realloc_extend_and_undo( - &program_id, - &pubkey, - MAX_PERMITTED_DATA_INCREASE + 1, - &mut bump, - )], + &[ + realloc_extend_and_undo( + &program_id, + &pubkey, + MAX_PERMITTED_DATA_INCREASE + 1, + &mut bump, + ), + ComputeBudgetInstruction::set_loaded_accounts_data_size_limit( + LOADED_ACCOUNTS_DATA_SIZE_LIMIT_FOR_TEST + ), + ], Some(&mint_pubkey), ), ) @@ -2842,12 +2867,17 @@ fn test_program_sbf_realloc() { .send_and_confirm_message( signer, Message::new( - &[realloc( - &program_id, - &pubkey, - MAX_PERMITTED_DATA_INCREASE + 1, - &mut bump - )], + &[ + realloc( + &program_id, + &pubkey, + MAX_PERMITTED_DATA_INCREASE + 1, + &mut bump + ), + ComputeBudgetInstruction::set_loaded_accounts_data_size_limit( + LOADED_ACCOUNTS_DATA_SIZE_LIMIT_FOR_TEST + ), + ], Some(&mint_pubkey), ), ) @@ -2863,13 +2893,18 @@ fn test_program_sbf_realloc() { .send_and_confirm_message( signer, Message::new( - &[realloc_extend_and_fill( - &program_id, - &pubkey, - MAX_PERMITTED_DATA_INCREASE, - 1, - &mut bump, - )], + &[ + realloc_extend_and_fill( + &program_id, + &pubkey, + MAX_PERMITTED_DATA_INCREASE, + 1, + &mut bump, + ), + ComputeBudgetInstruction::set_loaded_accounts_data_size_limit( + LOADED_ACCOUNTS_DATA_SIZE_LIMIT_FOR_TEST, + ), + ], Some(&mint_pubkey), ), ) @@ -2887,12 +2922,17 @@ fn test_program_sbf_realloc() { .send_and_confirm_message( signer, Message::new( - &[realloc_extend( - &program_id, - &pubkey, - MAX_PERMITTED_DATA_INCREASE, - &mut bump - )], + &[ + realloc_extend( + &program_id, + &pubkey, + MAX_PERMITTED_DATA_INCREASE, + &mut bump + ), + ComputeBudgetInstruction::set_loaded_accounts_data_size_limit( + LOADED_ACCOUNTS_DATA_SIZE_LIMIT_FOR_TEST + ), + ], Some(&mint_pubkey), ) ) @@ -2906,7 +2946,12 @@ fn test_program_sbf_realloc() { .send_and_confirm_message( signer, Message::new( - &[realloc(&program_id, &pubkey, 6, &mut bump)], + &[ + realloc(&program_id, &pubkey, 6, &mut bump), + ComputeBudgetInstruction::set_loaded_accounts_data_size_limit( + LOADED_ACCOUNTS_DATA_SIZE_LIMIT_FOR_TEST, + ), + ], Some(&mint_pubkey), ), ) @@ -2920,11 +2965,12 @@ fn test_program_sbf_realloc() { .send_and_confirm_message( signer, Message::new( - &[extend_and_write_u64( - &program_id, - &pubkey, - 0x1122334455667788, - )], + &[ + extend_and_write_u64(&program_id, &pubkey, 0x1122334455667788), + ComputeBudgetInstruction::set_loaded_accounts_data_size_limit( + LOADED_ACCOUNTS_DATA_SIZE_LIMIT_FOR_TEST, + ), + ], Some(&mint_pubkey), ), ) @@ -2938,7 +2984,12 @@ fn test_program_sbf_realloc() { .send_and_confirm_message( signer, Message::new( - &[realloc(&program_id, &pubkey, 0, &mut bump)], + &[ + realloc(&program_id, &pubkey, 0, &mut bump), + ComputeBudgetInstruction::set_loaded_accounts_data_size_limit( + LOADED_ACCOUNTS_DATA_SIZE_LIMIT_FOR_TEST, + ), + ], Some(&mint_pubkey), ), ) @@ -2951,11 +3002,16 @@ fn test_program_sbf_realloc() { .send_and_confirm_message( signer, Message::new( - &[Instruction::new_with_bytes( - program_id, - &[REALLOC_AND_ASSIGN], - vec![AccountMeta::new(pubkey, false)], - )], + &[ + Instruction::new_with_bytes( + program_id, + &[REALLOC_AND_ASSIGN], + vec![AccountMeta::new(pubkey, false)], + ), + ComputeBudgetInstruction::set_loaded_accounts_data_size_limit( + LOADED_ACCOUNTS_DATA_SIZE_LIMIT_FOR_TEST, + ), + ], Some(&mint_pubkey), ), ) @@ -2971,7 +3027,12 @@ fn test_program_sbf_realloc() { .send_and_confirm_message( signer, Message::new( - &[realloc(&program_id, &pubkey, 0, &mut bump)], + &[ + realloc(&program_id, &pubkey, 0, &mut bump), + ComputeBudgetInstruction::set_loaded_accounts_data_size_limit( + LOADED_ACCOUNTS_DATA_SIZE_LIMIT_FOR_TEST + ), + ], Some(&mint_pubkey), ), ) @@ -2986,14 +3047,19 @@ fn test_program_sbf_realloc() { .send_and_confirm_message( &[&mint_keypair, &keypair], Message::new( - &[Instruction::new_with_bytes( - program_id, - &[REALLOC_AND_ASSIGN_TO_SELF_VIA_SYSTEM_PROGRAM], - vec![ - AccountMeta::new(pubkey, true), - AccountMeta::new(solana_sdk::system_program::id(), false), - ], - )], + &[ + Instruction::new_with_bytes( + program_id, + &[REALLOC_AND_ASSIGN_TO_SELF_VIA_SYSTEM_PROGRAM], + vec![ + AccountMeta::new(pubkey, true), + AccountMeta::new(solana_sdk::system_program::id(), false), + ], + ), + ComputeBudgetInstruction::set_loaded_accounts_data_size_limit( + LOADED_ACCOUNTS_DATA_SIZE_LIMIT_FOR_TEST + ), + ], Some(&mint_pubkey), ) ) @@ -3007,14 +3073,19 @@ fn test_program_sbf_realloc() { .send_and_confirm_message( &[&mint_keypair, &keypair], Message::new( - &[Instruction::new_with_bytes( - program_id, - &[ASSIGN_TO_SELF_VIA_SYSTEM_PROGRAM_AND_REALLOC], - vec![ - AccountMeta::new(pubkey, true), - AccountMeta::new(solana_sdk::system_program::id(), false), - ], - )], + &[ + Instruction::new_with_bytes( + program_id, + &[ASSIGN_TO_SELF_VIA_SYSTEM_PROGRAM_AND_REALLOC], + vec![ + AccountMeta::new(pubkey, true), + AccountMeta::new(solana_sdk::system_program::id(), false), + ], + ), + ComputeBudgetInstruction::set_loaded_accounts_data_size_limit( + LOADED_ACCOUNTS_DATA_SIZE_LIMIT_FOR_TEST, + ), + ], Some(&mint_pubkey), ), ) @@ -3029,7 +3100,12 @@ fn test_program_sbf_realloc() { .send_and_confirm_message( signer, Message::new( - &[realloc(&program_id, &pubkey, 0, &mut bump)], + &[ + realloc(&program_id, &pubkey, 0, &mut bump), + ComputeBudgetInstruction::set_loaded_accounts_data_size_limit( + LOADED_ACCOUNTS_DATA_SIZE_LIMIT_FOR_TEST, + ), + ], Some(&mint_pubkey), ), ) @@ -3042,11 +3118,16 @@ fn test_program_sbf_realloc() { .send_and_confirm_message( &[&mint_keypair, &keypair], Message::new( - &[Instruction::new_with_bytes( - program_id, - &[ZERO_INIT], - vec![AccountMeta::new(pubkey, true)], - )], + &[ + Instruction::new_with_bytes( + program_id, + &[ZERO_INIT], + vec![AccountMeta::new(pubkey, true)], + ), + ComputeBudgetInstruction::set_loaded_accounts_data_size_limit( + LOADED_ACCOUNTS_DATA_SIZE_LIMIT_FOR_TEST, + ), + ], Some(&mint_pubkey), ), ) @@ -3104,14 +3185,19 @@ fn test_program_sbf_realloc_invoke() { .send_and_confirm_message( signer, Message::new( - &[Instruction::new_with_bytes( - realloc_invoke_program_id, - &[INVOKE_REALLOC_ZERO_RO], - vec![ - AccountMeta::new_readonly(pubkey, false), - AccountMeta::new_readonly(realloc_program_id, false), - ], - )], + &[ + Instruction::new_with_bytes( + realloc_invoke_program_id, + &[INVOKE_REALLOC_ZERO_RO], + vec![ + AccountMeta::new_readonly(pubkey, false), + AccountMeta::new_readonly(realloc_program_id, false), + ], + ), + ComputeBudgetInstruction::set_loaded_accounts_data_size_limit( + LOADED_ACCOUNTS_DATA_SIZE_LIMIT_FOR_TEST + ), + ], Some(&mint_pubkey), ) ) @@ -3127,7 +3213,12 @@ fn test_program_sbf_realloc_invoke() { .send_and_confirm_message( signer, Message::new( - &[realloc(&realloc_program_id, &pubkey, 0, &mut bump)], + &[ + realloc(&realloc_program_id, &pubkey, 0, &mut bump), + ComputeBudgetInstruction::set_loaded_accounts_data_size_limit( + LOADED_ACCOUNTS_DATA_SIZE_LIMIT_FOR_TEST, + ), + ], Some(&mint_pubkey), ), ) @@ -3143,14 +3234,19 @@ fn test_program_sbf_realloc_invoke() { .send_and_confirm_message( signer, Message::new( - &[Instruction::new_with_bytes( - realloc_invoke_program_id, - &[INVOKE_REALLOC_MAX_PLUS_ONE], - vec![ - AccountMeta::new(pubkey, false), - AccountMeta::new_readonly(realloc_program_id, false), - ], - )], + &[ + Instruction::new_with_bytes( + realloc_invoke_program_id, + &[INVOKE_REALLOC_MAX_PLUS_ONE], + vec![ + AccountMeta::new(pubkey, false), + AccountMeta::new_readonly(realloc_program_id, false), + ], + ), + ComputeBudgetInstruction::set_loaded_accounts_data_size_limit( + LOADED_ACCOUNTS_DATA_SIZE_LIMIT_FOR_TEST + ), + ], Some(&mint_pubkey), ) ) @@ -3165,14 +3261,19 @@ fn test_program_sbf_realloc_invoke() { .send_and_confirm_message( signer, Message::new( - &[Instruction::new_with_bytes( - realloc_invoke_program_id, - &[INVOKE_REALLOC_MAX_TWICE], - vec![ - AccountMeta::new(pubkey, false), - AccountMeta::new_readonly(realloc_program_id, false), - ], - )], + &[ + Instruction::new_with_bytes( + realloc_invoke_program_id, + &[INVOKE_REALLOC_MAX_TWICE], + vec![ + AccountMeta::new(pubkey, false), + AccountMeta::new_readonly(realloc_program_id, false), + ], + ), + ComputeBudgetInstruction::set_loaded_accounts_data_size_limit( + LOADED_ACCOUNTS_DATA_SIZE_LIMIT_FOR_TEST + ), + ], Some(&mint_pubkey), ) ) @@ -3186,7 +3287,12 @@ fn test_program_sbf_realloc_invoke() { .send_and_confirm_message( signer, Message::new( - &[realloc(&realloc_program_id, &pubkey, 0, &mut bump)], + &[ + realloc(&realloc_program_id, &pubkey, 0, &mut bump), + ComputeBudgetInstruction::set_loaded_accounts_data_size_limit( + LOADED_ACCOUNTS_DATA_SIZE_LIMIT_FOR_TEST, + ), + ], Some(&mint_pubkey), ), ) @@ -3201,14 +3307,19 @@ fn test_program_sbf_realloc_invoke() { .send_and_confirm_message( signer, Message::new( - &[Instruction::new_with_bytes( - realloc_invoke_program_id, - &[INVOKE_REALLOC_AND_ASSIGN], - vec![ - AccountMeta::new(pubkey, false), - AccountMeta::new_readonly(realloc_program_id, false), - ], - )], + &[ + Instruction::new_with_bytes( + realloc_invoke_program_id, + &[INVOKE_REALLOC_AND_ASSIGN], + vec![ + AccountMeta::new(pubkey, false), + AccountMeta::new_readonly(realloc_program_id, false), + ], + ), + ComputeBudgetInstruction::set_loaded_accounts_data_size_limit( + LOADED_ACCOUNTS_DATA_SIZE_LIMIT_FOR_TEST, + ), + ], Some(&mint_pubkey), ), ) @@ -3224,7 +3335,12 @@ fn test_program_sbf_realloc_invoke() { .send_and_confirm_message( signer, Message::new( - &[realloc(&realloc_program_id, &pubkey, 0, &mut bump)], + &[ + realloc(&realloc_program_id, &pubkey, 0, &mut bump), + ComputeBudgetInstruction::set_loaded_accounts_data_size_limit( + LOADED_ACCOUNTS_DATA_SIZE_LIMIT_FOR_TEST + ), + ], Some(&mint_pubkey), ) ) @@ -3239,15 +3355,20 @@ fn test_program_sbf_realloc_invoke() { .send_and_confirm_message( &[&mint_keypair, &keypair], Message::new( - &[Instruction::new_with_bytes( - realloc_invoke_program_id, - &[INVOKE_REALLOC_AND_ASSIGN_TO_SELF_VIA_SYSTEM_PROGRAM], - vec![ - AccountMeta::new(pubkey, true), - AccountMeta::new_readonly(realloc_program_id, false), - AccountMeta::new_readonly(solana_sdk::system_program::id(), false), - ], - )], + &[ + Instruction::new_with_bytes( + realloc_invoke_program_id, + &[INVOKE_REALLOC_AND_ASSIGN_TO_SELF_VIA_SYSTEM_PROGRAM], + vec![ + AccountMeta::new(pubkey, true), + AccountMeta::new_readonly(realloc_program_id, false), + AccountMeta::new_readonly(solana_sdk::system_program::id(), false), + ], + ), + ComputeBudgetInstruction::set_loaded_accounts_data_size_limit( + LOADED_ACCOUNTS_DATA_SIZE_LIMIT_FOR_TEST + ), + ], Some(&mint_pubkey), ) ) @@ -3261,15 +3382,20 @@ fn test_program_sbf_realloc_invoke() { .send_and_confirm_message( &[&mint_keypair, &keypair], Message::new( - &[Instruction::new_with_bytes( - realloc_invoke_program_id, - &[INVOKE_ASSIGN_TO_SELF_VIA_SYSTEM_PROGRAM_AND_REALLOC], - vec![ - AccountMeta::new(pubkey, true), - AccountMeta::new_readonly(realloc_program_id, false), - AccountMeta::new_readonly(solana_sdk::system_program::id(), false), - ], - )], + &[ + Instruction::new_with_bytes( + realloc_invoke_program_id, + &[INVOKE_ASSIGN_TO_SELF_VIA_SYSTEM_PROGRAM_AND_REALLOC], + vec![ + AccountMeta::new(pubkey, true), + AccountMeta::new_readonly(realloc_program_id, false), + AccountMeta::new_readonly(solana_sdk::system_program::id(), false), + ], + ), + ComputeBudgetInstruction::set_loaded_accounts_data_size_limit( + LOADED_ACCOUNTS_DATA_SIZE_LIMIT_FOR_TEST, + ), + ], Some(&mint_pubkey), ), ) @@ -3284,7 +3410,12 @@ fn test_program_sbf_realloc_invoke() { .send_and_confirm_message( signer, Message::new( - &[realloc(&realloc_program_id, &pubkey, 0, &mut bump)], + &[ + realloc(&realloc_program_id, &pubkey, 0, &mut bump), + ComputeBudgetInstruction::set_loaded_accounts_data_size_limit( + LOADED_ACCOUNTS_DATA_SIZE_LIMIT_FOR_TEST, + ), + ], Some(&mint_pubkey), ), ) @@ -3299,14 +3430,19 @@ fn test_program_sbf_realloc_invoke() { .send_and_confirm_message( signer, Message::new( - &[Instruction::new_with_bytes( - realloc_invoke_program_id, - &[INVOKE_REALLOC_INVOKE_CHECK], - vec![ - AccountMeta::new(invoke_pubkey, false), - AccountMeta::new_readonly(realloc_program_id, false), - ], - )], + &[ + Instruction::new_with_bytes( + realloc_invoke_program_id, + &[INVOKE_REALLOC_INVOKE_CHECK], + vec![ + AccountMeta::new(invoke_pubkey, false), + AccountMeta::new_readonly(realloc_program_id, false), + ], + ), + ComputeBudgetInstruction::set_loaded_accounts_data_size_limit( + LOADED_ACCOUNTS_DATA_SIZE_LIMIT_FOR_TEST, + ), + ], Some(&mint_pubkey), ), ) @@ -3333,16 +3469,21 @@ fn test_program_sbf_realloc_invoke() { .send_and_confirm_message( &[&mint_keypair, &new_keypair], Message::new( - &[Instruction::new_with_bytes( - realloc_invoke_program_id, - &instruction_data, - vec![ - AccountMeta::new(mint_pubkey, true), - AccountMeta::new(new_pubkey, true), - AccountMeta::new(solana_sdk::system_program::id(), false), - AccountMeta::new_readonly(realloc_invoke_program_id, false), - ], - )], + &[ + Instruction::new_with_bytes( + realloc_invoke_program_id, + &instruction_data, + vec![ + AccountMeta::new(mint_pubkey, true), + AccountMeta::new(new_pubkey, true), + AccountMeta::new(solana_sdk::system_program::id(), false), + AccountMeta::new_readonly(realloc_invoke_program_id, false), + ], + ), + ComputeBudgetInstruction::set_loaded_accounts_data_size_limit( + LOADED_ACCOUNTS_DATA_SIZE_LIMIT_FOR_TEST, + ), + ], Some(&mint_pubkey), ), ) @@ -3365,15 +3506,20 @@ fn test_program_sbf_realloc_invoke() { .send_and_confirm_message( signer, Message::new( - &[Instruction::new_with_bytes( - realloc_invoke_program_id, - &instruction_data, - vec![ - AccountMeta::new(invoke_pubkey, false), - AccountMeta::new_readonly(realloc_invoke_program_id, false), - AccountMeta::new_readonly(realloc_program_id, false), - ], - )], + &[ + Instruction::new_with_bytes( + realloc_invoke_program_id, + &instruction_data, + vec![ + AccountMeta::new(invoke_pubkey, false), + AccountMeta::new_readonly(realloc_invoke_program_id, false), + AccountMeta::new_readonly(realloc_program_id, false), + ], + ), + ComputeBudgetInstruction::set_loaded_accounts_data_size_limit( + LOADED_ACCOUNTS_DATA_SIZE_LIMIT_FOR_TEST, + ), + ], Some(&mint_pubkey), ), ) @@ -3395,14 +3541,19 @@ fn test_program_sbf_realloc_invoke() { .send_and_confirm_message( signer, Message::new( - &[Instruction::new_with_bytes( - realloc_invoke_program_id, - &[INVOKE_REALLOC_MAX_INVOKE_MAX], - vec![ - AccountMeta::new(invoke_pubkey, false), - AccountMeta::new_readonly(realloc_program_id, false), - ], - )], + &[ + Instruction::new_with_bytes( + realloc_invoke_program_id, + &[INVOKE_REALLOC_MAX_INVOKE_MAX], + vec![ + AccountMeta::new(invoke_pubkey, false), + AccountMeta::new_readonly(realloc_program_id, false), + ], + ), + ComputeBudgetInstruction::set_loaded_accounts_data_size_limit( + LOADED_ACCOUNTS_DATA_SIZE_LIMIT_FOR_TEST + ), + ], Some(&mint_pubkey), ) ) @@ -3429,14 +3580,19 @@ fn test_program_sbf_realloc_invoke() { let result = bank_client.send_and_confirm_message( signer, Message::new( - &[Instruction::new_with_bytes( - realloc_invoke_program_id, - &instruction_data, - vec![ - AccountMeta::new(invoke_pubkey, false), - AccountMeta::new_readonly(realloc_invoke_program_id, false), - ], - )], + &[ + Instruction::new_with_bytes( + realloc_invoke_program_id, + &instruction_data, + vec![ + AccountMeta::new(invoke_pubkey, false), + AccountMeta::new_readonly(realloc_invoke_program_id, false), + ], + ), + ComputeBudgetInstruction::set_loaded_accounts_data_size_limit( + LOADED_ACCOUNTS_DATA_SIZE_LIMIT_FOR_TEST, + ), + ], Some(&mint_pubkey), ), ); @@ -3464,15 +3620,20 @@ fn test_program_sbf_realloc_invoke() { .send_and_confirm_message( signer, Message::new( - &[Instruction::new_with_bytes( - realloc_invoke_program_id, - &[INVOKE_INVOKE_MAX_TWICE], - vec![ - AccountMeta::new(invoke_pubkey, false), - AccountMeta::new_readonly(realloc_invoke_program_id, false), - AccountMeta::new_readonly(realloc_program_id, false), - ], - )], + &[ + Instruction::new_with_bytes( + realloc_invoke_program_id, + &[INVOKE_INVOKE_MAX_TWICE], + vec![ + AccountMeta::new(invoke_pubkey, false), + AccountMeta::new_readonly(realloc_invoke_program_id, false), + AccountMeta::new_readonly(realloc_program_id, false), + ], + ), + ComputeBudgetInstruction::set_loaded_accounts_data_size_limit( + LOADED_ACCOUNTS_DATA_SIZE_LIMIT_FOR_TEST + ), + ], Some(&mint_pubkey), ) ) @@ -3500,14 +3661,19 @@ fn test_program_sbf_realloc_invoke() { .send_and_confirm_message( signer, Message::new( - &[Instruction::new_with_bytes( - realloc_invoke_program_id, - &[INVOKE_REALLOC_EXTEND_MAX, 1, i as u8, (i / 255) as u8], - vec![ - AccountMeta::new(pubkey, false), - AccountMeta::new_readonly(realloc_program_id, false), - ], - )], + &[ + Instruction::new_with_bytes( + realloc_invoke_program_id, + &[INVOKE_REALLOC_EXTEND_MAX, 1, i as u8, (i / 255) as u8], + vec![ + AccountMeta::new(pubkey, false), + AccountMeta::new_readonly(realloc_program_id, false), + ], + ), + ComputeBudgetInstruction::set_loaded_accounts_data_size_limit( + LOADED_ACCOUNTS_DATA_SIZE_LIMIT_FOR_TEST, + ), + ], Some(&mint_pubkey), ), ) @@ -3525,14 +3691,19 @@ fn test_program_sbf_realloc_invoke() { .send_and_confirm_message( signer, Message::new( - &[Instruction::new_with_bytes( - realloc_invoke_program_id, - &[INVOKE_REALLOC_EXTEND_MAX, 2, 1, 1], - vec![ - AccountMeta::new(pubkey, false), - AccountMeta::new_readonly(realloc_program_id, false), - ], - )], + &[ + Instruction::new_with_bytes( + realloc_invoke_program_id, + &[INVOKE_REALLOC_EXTEND_MAX, 2, 1, 1], + vec![ + AccountMeta::new(pubkey, false), + AccountMeta::new_readonly(realloc_program_id, false), + ], + ), + ComputeBudgetInstruction::set_loaded_accounts_data_size_limit( + LOADED_ACCOUNTS_DATA_SIZE_LIMIT_FOR_TEST + ), + ], Some(&mint_pubkey), ) ) @@ -3553,14 +3724,19 @@ fn test_program_sbf_realloc_invoke() { .send_and_confirm_message( signer, Message::new( - &[Instruction::new_with_bytes( - realloc_invoke_program_id, - &instruction_data, - vec![ - AccountMeta::new(invoke_pubkey, false), - AccountMeta::new_readonly(realloc_invoke_program_id, false), - ], - )], + &[ + Instruction::new_with_bytes( + realloc_invoke_program_id, + &instruction_data, + vec![ + AccountMeta::new(invoke_pubkey, false), + AccountMeta::new_readonly(realloc_invoke_program_id, false), + ], + ), + ComputeBudgetInstruction::set_loaded_accounts_data_size_limit( + LOADED_ACCOUNTS_DATA_SIZE_LIMIT_FOR_TEST, + ), + ], Some(&mint_pubkey), ), ) @@ -4463,3 +4639,282 @@ fn test_deny_executable_write() { ); } } + +#[test] +fn test_update_callee_account() { + // Test that fn update_callee_account() works and we are updating the callee account on CPI. + solana_logger::setup(); + + let GenesisConfigInfo { + genesis_config, + mint_keypair, + .. + } = create_genesis_config(100_123_456_789); + + for direct_mapping in [false, true] { + let mut bank = Bank::new_for_tests(&genesis_config); + let feature_set = Arc::make_mut(&mut bank.feature_set); + // by default test banks have all features enabled, so we only need to + // disable when needed + if !direct_mapping { + feature_set.deactivate(&feature_set::bpf_account_data_direct_mapping::id()); + } + let (bank, bank_forks) = bank.wrap_with_bank_forks_for_tests(); + let mut bank_client = BankClient::new_shared(bank.clone()); + let authority_keypair = Keypair::new(); + + let (bank, invoke_program_id) = load_upgradeable_program_and_advance_slot( + &mut bank_client, + bank_forks.as_ref(), + &mint_keypair, + &authority_keypair, + "solana_sbf_rust_invoke", + ); + + let account_keypair = Keypair::new(); + + let mint_pubkey = mint_keypair.pubkey(); + + let account_metas = vec![ + AccountMeta::new(mint_pubkey, true), + AccountMeta::new(account_keypair.pubkey(), false), + AccountMeta::new_readonly(invoke_program_id, false), + ]; + + // I. do CPI with account in read only (separate code path with direct mapping) + let mut account = AccountSharedData::new(42, 10240, &invoke_program_id); + let data: Vec = (0..10240).map(|n| n as u8).collect(); + account.set_data(data); + + bank.store_account(&account_keypair.pubkey(), &account); + + let mut instruction_data = vec![TEST_CALLEE_ACCOUNT_UPDATES, 0]; + instruction_data.extend_from_slice(20480usize.to_le_bytes().as_ref()); + instruction_data.extend_from_slice(16384usize.to_le_bytes().as_ref()); + // instruction data for inner CPI (2x) + instruction_data.extend_from_slice(&[TEST_CALLEE_ACCOUNT_UPDATES, 0]); + instruction_data.extend_from_slice(0usize.to_le_bytes().as_ref()); + instruction_data.extend_from_slice(0usize.to_le_bytes().as_ref()); + + instruction_data.extend_from_slice(&[TEST_CALLEE_ACCOUNT_UPDATES, 0]); + instruction_data.extend_from_slice(0usize.to_le_bytes().as_ref()); + instruction_data.extend_from_slice(0usize.to_le_bytes().as_ref()); + + let instruction = Instruction::new_with_bytes( + invoke_program_id, + &instruction_data, + account_metas.clone(), + ); + let result = bank_client.send_and_confirm_instruction(&mint_keypair, instruction); + assert!(result.is_ok()); + + let data = bank_client + .get_account_data(&account_keypair.pubkey()) + .unwrap() + .unwrap(); + + assert_eq!(data.len(), 20480); + + data.iter().enumerate().for_each(|(i, v)| { + let expected = match i { + ..=10240 => i as u8, + 16384 => 0xe5, + _ => 0, + }; + + assert_eq!(*v, expected, "offset:{i} {v:#x} != {expected:#x}"); + }); + + // II. do CPI with account with resize to smaller and write + let mut account = AccountSharedData::new(42, 10240, &invoke_program_id); + let data: Vec = (0..10240).map(|n| n as u8).collect(); + account.set_data(data); + bank.store_account(&account_keypair.pubkey(), &account); + + let mut instruction_data = vec![TEST_CALLEE_ACCOUNT_UPDATES, 1]; + instruction_data.extend_from_slice(20480usize.to_le_bytes().as_ref()); + instruction_data.extend_from_slice(16384usize.to_le_bytes().as_ref()); + // instruction data for inner CPI + instruction_data.extend_from_slice(&[TEST_CALLEE_ACCOUNT_UPDATES, 0]); + instruction_data.extend_from_slice(19480usize.to_le_bytes().as_ref()); + instruction_data.extend_from_slice(8129usize.to_le_bytes().as_ref()); + + let instruction = Instruction::new_with_bytes( + invoke_program_id, + &instruction_data, + account_metas.clone(), + ); + let result = bank_client.send_and_confirm_instruction(&mint_keypair, instruction); + assert!(result.is_ok()); + + let data = bank_client + .get_account_data(&account_keypair.pubkey()) + .unwrap() + .unwrap(); + + assert_eq!(data.len(), 19480); + + data.iter().enumerate().for_each(|(i, v)| { + let expected = match i { + 8129 => (i as u8) ^ 0xe5, + ..=10240 => i as u8, + 16384 => 0xe5, + _ => 0, + }; + + assert_eq!(*v, expected, "offset:{i} {v:#x} != {expected:#x}"); + }); + + // III. do CPI with account with resize to larger and write + let mut account = AccountSharedData::new(42, 10240, &invoke_program_id); + let data: Vec = (0..10240).map(|n| n as u8).collect(); + account.set_data(data); + bank.store_account(&account_keypair.pubkey(), &account); + + let mut instruction_data = vec![TEST_CALLEE_ACCOUNT_UPDATES, 1]; + instruction_data.extend_from_slice(16384usize.to_le_bytes().as_ref()); + instruction_data.extend_from_slice(16384usize.to_le_bytes().as_ref()); + // instruction data for inner CPI + instruction_data.extend_from_slice(&[TEST_CALLEE_ACCOUNT_UPDATES, 0]); + instruction_data.extend_from_slice(20480usize.to_le_bytes().as_ref()); + instruction_data.extend_from_slice(16385usize.to_le_bytes().as_ref()); + + let instruction = Instruction::new_with_bytes( + invoke_program_id, + &instruction_data, + account_metas.clone(), + ); + let result = bank_client.send_and_confirm_instruction(&mint_keypair, instruction); + assert!(result.is_ok()); + + let data = bank_client + .get_account_data(&account_keypair.pubkey()) + .unwrap() + .unwrap(); + + assert_eq!(data.len(), 20480); + + data.iter().enumerate().for_each(|(i, v)| { + let expected = match i { + ..=10240 => i as u8, + 16384 | 16385 => 0xe5, + _ => 0, + }; + + assert_eq!(*v, expected, "offset:{i} {v:#x} != {expected:#x}"); + }); + + // IV. do CPI with account with resize to larger and write + let mut account = AccountSharedData::new(42, 10240, &invoke_program_id); + let data: Vec = (0..10240).map(|n| n as u8).collect(); + account.set_data(data); + bank.store_account(&account_keypair.pubkey(), &account); + + let mut instruction_data = vec![TEST_CALLEE_ACCOUNT_UPDATES, 1]; + instruction_data.extend_from_slice(16384usize.to_le_bytes().as_ref()); + instruction_data.extend_from_slice(16384usize.to_le_bytes().as_ref()); + // instruction data for inner CPI (2x) + instruction_data.extend_from_slice(&[TEST_CALLEE_ACCOUNT_UPDATES, 1]); + instruction_data.extend_from_slice(0usize.to_le_bytes().as_ref()); + instruction_data.extend_from_slice(0usize.to_le_bytes().as_ref()); + + instruction_data.extend_from_slice(&[TEST_CALLEE_ACCOUNT_UPDATES, 1]); + instruction_data.extend_from_slice(0usize.to_le_bytes().as_ref()); + instruction_data.extend_from_slice(0usize.to_le_bytes().as_ref()); + // instruction data for inner CPI + instruction_data.extend_from_slice(&[TEST_CALLEE_ACCOUNT_UPDATES, 0]); + instruction_data.extend_from_slice(20480usize.to_le_bytes().as_ref()); + instruction_data.extend_from_slice(16385usize.to_le_bytes().as_ref()); + + let instruction = Instruction::new_with_bytes( + invoke_program_id, + &instruction_data, + account_metas.clone(), + ); + let result = bank_client.send_and_confirm_instruction(&mint_keypair, instruction); + assert!(result.is_ok()); + + let data = bank_client + .get_account_data(&account_keypair.pubkey()) + .unwrap() + .unwrap(); + + assert_eq!(data.len(), 20480); + + data.iter().enumerate().for_each(|(i, v)| { + let expected = match i { + ..=10240 => i as u8, + 16384 | 16385 => 0xe5, + _ => 0, + }; + + assert_eq!(*v, expected, "offset:{i} {v:#x} != {expected:#x}"); + }); + } +} + +#[test] +fn test_stack_heap_zeroed() { + solana_logger::setup(); + + let GenesisConfigInfo { + genesis_config, + mint_keypair, + .. + } = create_genesis_config(100_123_456_789); + + let bank = Bank::new_for_tests(&genesis_config); + + let (bank, bank_forks) = bank.wrap_with_bank_forks_for_tests(); + let mut bank_client = BankClient::new_shared(bank); + let authority_keypair = Keypair::new(); + + let (bank, invoke_program_id) = load_upgradeable_program_and_advance_slot( + &mut bank_client, + bank_forks.as_ref(), + &mint_keypair, + &authority_keypair, + "solana_sbf_rust_invoke", + ); + + let account_keypair = Keypair::new(); + let mint_pubkey = mint_keypair.pubkey(); + let account_metas = vec![ + AccountMeta::new(mint_pubkey, true), + AccountMeta::new(account_keypair.pubkey(), false), + AccountMeta::new_readonly(invoke_program_id, false), + ]; + + // Check multiple heap sizes. It's generally a good idea, and also it's needed to ensure that + // pooled heap and stack values are reused - and therefore zeroed - across executions. + for heap_len in [32usize * 1024, 64 * 1024, 128 * 1024, 256 * 1024] { + // TEST_STACK_HEAP_ZEROED will recursively check that stack and heap are zeroed until it + // reaches max CPI invoke depth. We make it fail at max depth so we're sure that there's no + // legit way to access non-zeroed stack and heap regions. + let mut instruction_data = vec![TEST_STACK_HEAP_ZEROED]; + instruction_data.extend_from_slice(&heap_len.to_le_bytes()); + + let instruction = Instruction::new_with_bytes( + invoke_program_id, + &instruction_data, + account_metas.clone(), + ); + + let message = Message::new( + &[ + ComputeBudgetInstruction::set_compute_unit_limit(1_400_000), + ComputeBudgetInstruction::request_heap_frame(heap_len as u32), + instruction, + ], + Some(&mint_pubkey), + ); + let tx = Transaction::new(&[&mint_keypair], message.clone(), bank.last_blockhash()); + let (result, _, logs) = process_transaction_and_record_inner(&bank, tx); + assert!(result.is_err(), "{result:?}"); + assert!( + logs.iter() + .any(|log| log.contains("Cross-program invocation call depth too deep")), + "{logs:?}" + ); + } +} diff --git a/programs/sbf/tests/remaining_compute_units.rs b/programs/sbf/tests/remaining_compute_units.rs deleted file mode 100644 index 30da15b2953a53..00000000000000 --- a/programs/sbf/tests/remaining_compute_units.rs +++ /dev/null @@ -1,27 +0,0 @@ -#![cfg(feature = "test-bpf")] - -use { - solana_program_test::*, - solana_sbf_rust_remaining_compute_units::process_instruction, - solana_sdk::{ - instruction::Instruction, pubkey::Pubkey, signature::Signer, transaction::Transaction, - }, -}; - -#[tokio::test] -async fn test_remaining_compute_units() { - let program_id = Pubkey::new_unique(); - let program_test = ProgramTest::new( - "solana_sbf_rust_remaining_compute_units", - program_id, - processor!(process_instruction), - ); - let (mut banks_client, payer, recent_blockhash) = program_test.start().await; - - let mut transaction = Transaction::new_with_payer( - &[Instruction::new_with_bincode(program_id, &(), vec![])], - Some(&payer.pubkey()), - ); - transaction.sign(&[&payer], recent_blockhash); - banks_client.process_transaction(transaction).await.unwrap(); -} diff --git a/programs/sbf/tests/sanity.rs b/programs/sbf/tests/sanity.rs deleted file mode 100644 index 6a561bcae1c395..00000000000000 --- a/programs/sbf/tests/sanity.rs +++ /dev/null @@ -1,37 +0,0 @@ -#![cfg(feature = "test-bpf")] - -use { - solana_program_test::*, - solana_sbf_rust_sanity::process_instruction, - solana_sdk::{ - instruction::{AccountMeta, Instruction}, - pubkey::Pubkey, - signature::{Keypair, Signer}, - transaction::Transaction, - }, -}; - -#[tokio::test] -async fn test_sanity() { - let program_id = Pubkey::new_unique(); - let program_test = ProgramTest::new( - "solana_sbf_rust_sanity", - program_id, - processor!(process_instruction), - ); - let (mut banks_client, payer_keypair, recent_blockhash) = program_test.start().await; - - let mut transaction = Transaction::new_with_payer( - &[Instruction::new_with_bincode( - program_id, - &(), - vec![ - AccountMeta::new(payer_keypair.pubkey(), true), - AccountMeta::new(Keypair::new().pubkey(), false), - ], - )], - Some(&payer_keypair.pubkey()), - ); - transaction.sign(&[&payer_keypair], recent_blockhash); - banks_client.process_transaction(transaction).await.unwrap(); -} diff --git a/programs/sbf/tests/simulation.rs b/programs/sbf/tests/simulation.rs index f27cf52eeb25f9..6b799c05690c0b 100644 --- a/programs/sbf/tests/simulation.rs +++ b/programs/sbf/tests/simulation.rs @@ -1,44 +1,86 @@ -#![cfg(feature = "test-bpf")] - use { - solana_program_test::{processor, tokio, ProgramTest}, - solana_sbf_rust_simulation::process_instruction, + agave_validator::test_validator::*, + solana_runtime::{ + bank::Bank, + bank_client::BankClient, + genesis_utils::{create_genesis_config, GenesisConfigInfo}, + loader_utils::load_upgradeable_program_and_advance_slot, + }, solana_sdk::{ instruction::{AccountMeta, Instruction}, + message::Message, pubkey::Pubkey, - signature::Signer, - sysvar, - transaction::Transaction, + signature::{Keypair, Signer}, + sysvar::{clock, slot_history}, + transaction::{SanitizedTransaction, Transaction}, }, }; -#[tokio::test] -async fn no_panic_banks_client() { - let program_id = Pubkey::new_unique(); - let program_test = ProgramTest::new( +#[test] +#[cfg(feature = "sbf_rust")] +fn test_no_panic_banks_client() { + solana_logger::setup(); + + let GenesisConfigInfo { + genesis_config, + mint_keypair, + .. + } = create_genesis_config(50); + let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); + let mut bank_client = BankClient::new_shared(bank.clone()); + let authority_keypair = Keypair::new(); + let (bank, program_id) = load_upgradeable_program_and_advance_slot( + &mut bank_client, + bank_forks.as_ref(), + &mint_keypair, + &authority_keypair, "solana_sbf_rust_simulation", + ); + bank.freeze(); + + let instruction = Instruction::new_with_bincode( program_id, - processor!(process_instruction), + &[0u8; 0], + vec![ + AccountMeta::new_readonly(slot_history::id(), false), + AccountMeta::new_readonly(clock::id(), false), + ], ); + let blockhash = bank.last_blockhash(); + let message = Message::new(&[instruction], Some(&mint_keypair.pubkey())); + let transaction = Transaction::new(&[&mint_keypair], message, blockhash); + let sanitized_tx = SanitizedTransaction::from_transaction_for_tests(transaction); + let result = bank.simulate_transaction(&sanitized_tx, false); + assert!(result.result.is_ok()); +} + +#[test] +#[cfg(feature = "sbf_rust")] +fn test_no_panic_rpc_client() { + solana_logger::setup(); + + let program_id = Pubkey::new_unique(); + let (test_validator, payer) = TestValidatorGenesis::default() + .add_program("solana_sbf_rust_simulation", program_id) + .start(); + let rpc_client = test_validator.get_rpc_client(); + let blockhash = rpc_client.get_latest_blockhash().unwrap(); - let mut context = program_test.start_with_context().await; let transaction = Transaction::new_signed_with_payer( &[Instruction { program_id, accounts: vec![ - AccountMeta::new_readonly(sysvar::slot_history::id(), false), - AccountMeta::new_readonly(sysvar::clock::id(), false), + AccountMeta::new_readonly(slot_history::id(), false), + AccountMeta::new_readonly(clock::id(), false), ], data: vec![], }], - Some(&context.payer.pubkey()), - &[&context.payer], - context.last_blockhash, + Some(&payer.pubkey()), + &[&payer], + blockhash, ); - context - .banks_client - .process_transaction_with_preflight(transaction) - .await + rpc_client + .send_and_confirm_transaction(&transaction) .unwrap(); } diff --git a/programs/sbf/tests/simulation_validator.rs b/programs/sbf/tests/simulation_validator.rs deleted file mode 100644 index 17de51e665e3ec..00000000000000 --- a/programs/sbf/tests/simulation_validator.rs +++ /dev/null @@ -1,41 +0,0 @@ -#![cfg(feature = "test-bpf")] - -use { - agave_validator::test_validator::*, - solana_program::{ - instruction::{AccountMeta, Instruction}, - pubkey::Pubkey, - sysvar, - }, - solana_sdk::{signature::Signer, transaction::Transaction}, -}; - -#[test] -fn no_panic_rpc_client() { - solana_logger::setup_with_default("solana_program_runtime=debug"); - let program_id = Pubkey::new_unique(); - - let (test_validator, payer) = TestValidatorGenesis::default() - .add_program("solana_sbf_rust_simulation", program_id) - .start(); - let rpc_client = test_validator.get_rpc_client(); - let blockhash = rpc_client.get_latest_blockhash().unwrap(); - - let transaction = Transaction::new_signed_with_payer( - &[Instruction { - program_id, - accounts: vec![ - AccountMeta::new_readonly(sysvar::slot_history::id(), false), - AccountMeta::new_readonly(sysvar::clock::id(), false), - ], - data: vec![], - }], - Some(&payer.pubkey()), - &[&payer], - blockhash, - ); - - rpc_client - .send_and_confirm_transaction(&transaction) - .unwrap(); -} diff --git a/programs/sbf/tests/sysvar.rs b/programs/sbf/tests/sysvar.rs index ffa2f625b6d03d..4e9dcc566dde63 100644 --- a/programs/sbf/tests/sysvar.rs +++ b/programs/sbf/tests/sysvar.rs @@ -1,31 +1,36 @@ -#![cfg(feature = "test-bpf")] - use { - solana_program_test::*, - solana_sbf_rust_sysvar::process_instruction, + solana_runtime::{ + bank::Bank, + bank_client::BankClient, + genesis_utils::{create_genesis_config, GenesisConfigInfo}, + loader_utils::load_upgradeable_program_and_advance_slot, + }, solana_sdk::{ feature_set::disable_fees_sysvar, instruction::{AccountMeta, Instruction}, + message::Message, pubkey::Pubkey, - signature::Signer, + signature::{Keypair, Signer}, sysvar::{ - clock, epoch_rewards, epoch_schedule, fees, instructions, recent_blockhashes, rent, + clock, epoch_rewards, epoch_schedule, instructions, recent_blockhashes, rent, slot_hashes, slot_history, stake_history, }, - transaction::Transaction, + transaction::{SanitizedTransaction, Transaction}, }, }; -#[tokio::test] -async fn test_sysvars() { - let program_id = Pubkey::new_unique(); - - let mut program_test = ProgramTest::new( - "solana_sbf_rust_sysvar", - program_id, - processor!(process_instruction), - ); +#[test] +#[cfg(feature = "sbf_rust")] +fn test_sysvar_syscalls() { + solana_logger::setup(); + let GenesisConfigInfo { + mut genesis_config, + mint_keypair, + .. + } = create_genesis_config(50); + genesis_config.accounts.remove(&disable_fees_sysvar::id()); + let bank = Bank::new_for_tests(&genesis_config); let epoch_rewards = epoch_rewards::EpochRewards { distribution_starting_block_height: 42, total_rewards: 100, @@ -33,50 +38,25 @@ async fn test_sysvars() { active: true, ..epoch_rewards::EpochRewards::default() }; - program_test.add_sysvar_account(epoch_rewards::id(), &epoch_rewards); - let (mut banks_client, payer, recent_blockhash) = program_test.start().await; - - let mut transaction = Transaction::new_with_payer( - &[Instruction::new_with_bincode( - program_id, - &[0u8], - vec![ - AccountMeta::new(payer.pubkey(), true), - AccountMeta::new(Pubkey::new_unique(), false), - AccountMeta::new_readonly(clock::id(), false), - AccountMeta::new_readonly(epoch_schedule::id(), false), - AccountMeta::new_readonly(instructions::id(), false), - #[allow(deprecated)] - AccountMeta::new_readonly(recent_blockhashes::id(), false), - AccountMeta::new_readonly(rent::id(), false), - AccountMeta::new_readonly(slot_hashes::id(), false), - AccountMeta::new_readonly(slot_history::id(), false), - AccountMeta::new_readonly(stake_history::id(), false), - #[allow(deprecated)] - AccountMeta::new_readonly(fees::id(), false), - AccountMeta::new_readonly(epoch_rewards::id(), false), - ], - )], - Some(&payer.pubkey()), - ); - transaction.sign(&[&payer], recent_blockhash); - banks_client.process_transaction(transaction).await.unwrap(); - - let mut program_test = ProgramTest::new( + bank.set_sysvar_for_tests(&epoch_rewards); + let (bank, bank_forks) = bank.wrap_with_bank_forks_for_tests(); + let mut bank_client = BankClient::new_shared(bank); + let authority_keypair = Keypair::new(); + let (bank, program_id) = load_upgradeable_program_and_advance_slot( + &mut bank_client, + bank_forks.as_ref(), + &mint_keypair, + &authority_keypair, "solana_sbf_rust_sysvar", - program_id, - processor!(process_instruction), ); - program_test.deactivate_feature(disable_fees_sysvar::id()); - program_test.add_sysvar_account(epoch_rewards::id(), &epoch_rewards); - let (mut banks_client, payer, recent_blockhash) = program_test.start().await; + bank.freeze(); - let mut transaction = Transaction::new_with_payer( - &[Instruction::new_with_bincode( + for instruction_data in &[0u8, 1u8] { + let instruction = Instruction::new_with_bincode( program_id, - &[1u8], + &[instruction_data], vec![ - AccountMeta::new(payer.pubkey(), true), + AccountMeta::new(mint_keypair.pubkey(), true), AccountMeta::new(Pubkey::new_unique(), false), AccountMeta::new_readonly(clock::id(), false), AccountMeta::new_readonly(epoch_schedule::id(), false), @@ -87,13 +67,14 @@ async fn test_sysvars() { AccountMeta::new_readonly(slot_hashes::id(), false), AccountMeta::new_readonly(slot_history::id(), false), AccountMeta::new_readonly(stake_history::id(), false), - #[allow(deprecated)] - AccountMeta::new_readonly(fees::id(), false), AccountMeta::new_readonly(epoch_rewards::id(), false), ], - )], - Some(&payer.pubkey()), - ); - transaction.sign(&[&payer], recent_blockhash); - banks_client.process_transaction(transaction).await.unwrap(); + ); + let blockhash = bank.last_blockhash(); + let message = Message::new(&[instruction], Some(&mint_keypair.pubkey())); + let transaction = Transaction::new(&[&mint_keypair], message, blockhash); + let sanitized_tx = SanitizedTransaction::from_transaction_for_tests(transaction); + let result = bank.simulate_transaction(&sanitized_tx, false); + assert!(result.result.is_ok()); + } } diff --git a/programs/stake/Cargo.toml b/programs/stake/Cargo.toml index 1ccd2e857c7521..b55c904b4b17a6 100644 --- a/programs/stake/Cargo.toml +++ b/programs/stake/Cargo.toml @@ -15,6 +15,7 @@ log = { workspace = true } solana-config-program = { workspace = true } solana-program-runtime = { workspace = true } solana-sdk = { workspace = true } +solana-type-overrides = { workspace = true } solana-vote-program = { workspace = true } [dev-dependencies] diff --git a/programs/system/Cargo.toml b/programs/system/Cargo.toml index a3366d5c8f3f4f..1e5643587c8f88 100644 --- a/programs/system/Cargo.toml +++ b/programs/system/Cargo.toml @@ -16,6 +16,7 @@ serde = { workspace = true } serde_derive = { workspace = true } solana-program-runtime = { workspace = true } solana-sdk = { workspace = true } +solana-type-overrides = { workspace = true } [dev-dependencies] assert_matches = { workspace = true } diff --git a/programs/system/src/system_processor.rs b/programs/system/src/system_processor.rs index d455fb84ba5c12..db14cc9c5ebba7 100644 --- a/programs/system/src/system_processor.rs +++ b/programs/system/src/system_processor.rs @@ -542,7 +542,10 @@ declare_process_instruction!(Entrypoint, DEFAULT_COMPUTE_UNITS, |invoke_context| mod tests { #[allow(deprecated)] use solana_sdk::{ - account::{self, Account, AccountSharedData, ReadableAccount}, + account::{ + self, create_account_shared_data_with_fields, to_account, Account, AccountSharedData, + ReadableAccount, DUMMY_INHERITABLE_ACCOUNT_FIELDS, + }, fee_calculator::FeeCalculator, hash::{hash, Hash}, instruction::{AccountMeta, Instruction, InstructionError}, @@ -552,8 +555,12 @@ mod tests { Data as NonceData, DurableNonce, State as NonceState, Versions as NonceVersions, }, }, - nonce_account, recent_blockhashes_account, system_instruction, system_program, - sysvar::{self, recent_blockhashes::IterItem, rent::Rent}, + nonce_account, system_instruction, system_program, + sysvar::{ + self, + recent_blockhashes::{IntoIterSorted, IterItem, RecentBlockhashes, MAX_ENTRIES}, + rent::Rent, + }, }; use { super::*, @@ -562,6 +569,7 @@ mod tests { solana_program_runtime::{ invoke_context::mock_process_instruction, with_mock_invoke_context, }, + std::collections::BinaryHeap, }; impl From for Address { @@ -595,11 +603,30 @@ mod tests { fn create_default_account() -> AccountSharedData { AccountSharedData::new(0, 0, &Pubkey::new_unique()) } + #[allow(deprecated)] + fn create_recent_blockhashes_account_for_test<'a, I>( + recent_blockhash_iter: I, + ) -> AccountSharedData + where + I: IntoIterator>, + { + let mut account = create_account_shared_data_with_fields::( + &RecentBlockhashes::default(), + DUMMY_INHERITABLE_ACCOUNT_FIELDS, + ); + let sorted = BinaryHeap::from_iter(recent_blockhash_iter); + let sorted_iter = IntoIterSorted::new(sorted); + let recent_blockhash_iter = sorted_iter.take(MAX_ENTRIES); + let recent_blockhashes: RecentBlockhashes = recent_blockhash_iter.collect(); + to_account(&recent_blockhashes, &mut account); + account + } fn create_default_recent_blockhashes_account() -> AccountSharedData { #[allow(deprecated)] - recent_blockhashes_account::create_account_with_data_for_test( - vec![IterItem(0u64, &Hash::default(), 0); sysvar::recent_blockhashes::MAX_ENTRIES], - ) + create_recent_blockhashes_account_for_test(vec![ + IterItem(0u64, &Hash::default(), 0); + sysvar::recent_blockhashes::MAX_ENTRIES + ]) } fn create_default_rent_account() -> AccountSharedData { account::create_account_shared_data_for_test(&Rent::free()) @@ -1551,10 +1578,10 @@ mod tests { ); let blockhash = hash(&serialize(&0).unwrap()); #[allow(deprecated)] - let new_recent_blockhashes_account = - solana_sdk::recent_blockhashes_account::create_account_with_data_for_test( - vec![IterItem(0u64, &blockhash, 0); sysvar::recent_blockhashes::MAX_ENTRIES], - ); + let new_recent_blockhashes_account = create_recent_blockhashes_account_for_test(vec![ + IterItem(0u64, &blockhash, 0); + sysvar::recent_blockhashes::MAX_ENTRIES + ]); mock_process_instruction( &system_program::id(), Vec::new(), @@ -1837,8 +1864,7 @@ mod tests { #[allow(deprecated)] let blockhash_id = sysvar::recent_blockhashes::id(); #[allow(deprecated)] - let new_recent_blockhashes_account = - solana_sdk::recent_blockhashes_account::create_account_with_data_for_test(vec![]); + let new_recent_blockhashes_account = create_recent_blockhashes_account_for_test(vec![]); process_instruction( &serialize(&SystemInstruction::InitializeNonceAccount(nonce_address)).unwrap(), vec![ @@ -1900,8 +1926,7 @@ mod tests { Ok(()), ); #[allow(deprecated)] - let new_recent_blockhashes_account = - solana_sdk::recent_blockhashes_account::create_account_with_data_for_test(vec![]); + let new_recent_blockhashes_account = create_recent_blockhashes_account_for_test(vec![]); mock_process_instruction( &system_program::id(), Vec::new(), diff --git a/programs/zk-elgamal-proof/Cargo.toml b/programs/zk-elgamal-proof/Cargo.toml index c6d795adeb467b..059f5481e91460 100644 --- a/programs/zk-elgamal-proof/Cargo.toml +++ b/programs/zk-elgamal-proof/Cargo.toml @@ -9,7 +9,7 @@ license = { workspace = true } edition = { workspace = true } [dependencies] -bytemuck = { workspace = true, features = ["derive"] } +bytemuck = { workspace = true } num-derive = { workspace = true } num-traits = { workspace = true } solana-program-runtime = { workspace = true } diff --git a/programs/zk-token-proof-tests/Cargo.toml b/programs/zk-token-proof-tests/Cargo.toml index 1a20cae3730767..a00c98b20e4d2b 100644 --- a/programs/zk-token-proof-tests/Cargo.toml +++ b/programs/zk-token-proof-tests/Cargo.toml @@ -8,7 +8,7 @@ license = { workspace = true } edition = { workspace = true } [dev-dependencies] -bytemuck = { workspace = true, features = ["derive"] } +bytemuck = { workspace = true } curve25519-dalek = { workspace = true } solana-compute-budget = { workspace = true } solana-program-test = { workspace = true } diff --git a/programs/zk-token-proof/Cargo.toml b/programs/zk-token-proof/Cargo.toml index ec577487a9f5c6..29f53ec069209f 100644 --- a/programs/zk-token-proof/Cargo.toml +++ b/programs/zk-token-proof/Cargo.toml @@ -9,7 +9,7 @@ license = { workspace = true } edition = { workspace = true } [dependencies] -bytemuck = { workspace = true, features = ["derive"] } +bytemuck = { workspace = true } num-derive = { workspace = true } num-traits = { workspace = true } solana-program-runtime = { workspace = true } diff --git a/pubsub-client/src/nonblocking/pubsub_client.rs b/pubsub-client/src/nonblocking/pubsub_client.rs index b79e91f681b97f..44663b3372cb2c 100644 --- a/pubsub-client/src/nonblocking/pubsub_client.rs +++ b/pubsub-client/src/nonblocking/pubsub_client.rs @@ -183,10 +183,9 @@ use { RpcTransactionLogsFilter, }, error_object::RpcErrorObject, - filter::maybe_map_filters, response::{ Response as RpcResponse, RpcBlockUpdate, RpcKeyedAccount, RpcLogsResponse, - RpcSignatureResult, RpcVersionInfo, RpcVote, SlotInfo, SlotUpdate, + RpcSignatureResult, RpcVote, SlotInfo, SlotUpdate, }, }, solana_sdk::{clock::Slot, pubkey::Pubkey, signature::Signature}, @@ -194,7 +193,7 @@ use { thiserror::Error, tokio::{ net::TcpStream, - sync::{mpsc, oneshot, RwLock}, + sync::{mpsc, oneshot}, task::JoinHandle, time::{sleep, Duration}, }, @@ -265,9 +264,8 @@ type RequestMsg = ( #[derive(Debug)] pub struct PubsubClient { subscribe_sender: mpsc::UnboundedSender, - request_sender: mpsc::UnboundedSender, + _request_sender: mpsc::UnboundedSender, shutdown_sender: oneshot::Sender<()>, - node_version: RwLock>, ws: JoinHandle, } @@ -279,14 +277,14 @@ impl PubsubClient { .map_err(PubsubClientError::ConnectionError)?; let (subscribe_sender, subscribe_receiver) = mpsc::unbounded_channel(); - let (request_sender, request_receiver) = mpsc::unbounded_channel(); + let (_request_sender, request_receiver) = mpsc::unbounded_channel(); let (shutdown_sender, shutdown_receiver) = oneshot::channel(); + #[allow(clippy::used_underscore_binding)] Ok(Self { subscribe_sender, - request_sender, + _request_sender, shutdown_sender, - node_version: RwLock::new(None), ws: tokio::spawn(PubsubClient::run_ws( ws, subscribe_receiver, @@ -301,43 +299,11 @@ impl PubsubClient { self.ws.await.unwrap() // WS future should not be cancelled or panicked } - pub async fn set_node_version(&self, version: semver::Version) -> Result<(), ()> { - let mut w_node_version = self.node_version.write().await; - *w_node_version = Some(version); + #[deprecated(since = "2.0.2", note = "PubsubClient::node_version is no longer used")] + pub async fn set_node_version(&self, _version: semver::Version) -> Result<(), ()> { Ok(()) } - async fn get_node_version(&self) -> PubsubClientResult { - let r_node_version = self.node_version.read().await; - if let Some(version) = &*r_node_version { - Ok(version.clone()) - } else { - drop(r_node_version); - let mut w_node_version = self.node_version.write().await; - let node_version = self.get_version().await?; - *w_node_version = Some(node_version.clone()); - Ok(node_version) - } - } - - async fn get_version(&self) -> PubsubClientResult { - let (response_sender, response_receiver) = oneshot::channel(); - self.request_sender - .send(("getVersion".to_string(), Value::Null, response_sender)) - .map_err(|err| PubsubClientError::ConnectionClosed(err.to_string()))?; - let result = response_receiver - .await - .map_err(|err| PubsubClientError::ConnectionClosed(err.to_string()))??; - let node_version: RpcVersionInfo = serde_json::from_value(result)?; - let node_version = semver::Version::parse(&node_version.solana_core).map_err(|e| { - PubsubClientError::RequestFailed { - reason: format!("failed to parse cluster version: {e}"), - message: "getVersion".to_string(), - } - })?; - Ok(node_version) - } - async fn subscribe<'a, T>(&self, operation: &str, params: Value) -> SubscribeResult<'a, T> where T: DeserializeOwned + Send + 'a, @@ -426,22 +392,8 @@ impl PubsubClient { pub async fn program_subscribe( &self, pubkey: &Pubkey, - mut config: Option, + config: Option, ) -> SubscribeResult<'_, RpcResponse> { - if let Some(ref mut config) = config { - if let Some(ref mut filters) = config.filters { - let node_version = self.get_node_version().await.ok(); - // If node does not support the pubsub `getVersion` method, assume version is old - // and filters should be mapped (node_version.is_none()). - maybe_map_filters(node_version, filters).map_err(|e| { - PubsubClientError::RequestFailed { - reason: e, - message: "maybe_map_filters".to_string(), - } - })?; - } - } - let params = json!([pubkey.to_string(), config]); self.subscribe("program", params).await } diff --git a/pubsub-client/src/pubsub_client.rs b/pubsub-client/src/pubsub_client.rs index 70769619db1f4d..5247bdb8b9e263 100644 --- a/pubsub-client/src/pubsub_client.rs +++ b/pubsub-client/src/pubsub_client.rs @@ -103,7 +103,6 @@ use { RpcProgramAccountsConfig, RpcSignatureSubscribeConfig, RpcTransactionLogsConfig, RpcTransactionLogsFilter, }, - filter, response::{ Response as RpcResponse, RpcBlockUpdate, RpcKeyedAccount, RpcLogsResponse, RpcSignatureResult, RpcVote, SlotInfo, SlotUpdate, @@ -207,35 +206,6 @@ where .map_err(|err| err.into()) } - fn get_version( - writable_socket: &Arc>>>, - ) -> Result { - writable_socket.write().unwrap().send(Message::Text( - json!({ - "jsonrpc":"2.0","id":1,"method":"getVersion", - }) - .to_string(), - ))?; - let message = writable_socket.write().unwrap().read()?; - let message_text = &message.into_text()?; - - if let Ok(json_msg) = serde_json::from_str::>(message_text) { - if let Some(Object(version_map)) = json_msg.get("result") { - if let Some(node_version) = version_map.get("solana-core") { - if let Some(node_version) = node_version.as_str() { - if let Ok(parsed) = semver::Version::parse(node_version) { - return Ok(parsed); - } - } - } - } - } - - Err(PubsubClientError::UnexpectedGetVersionResponse(format!( - "msg={message_text}" - ))) - } - fn read_message( writable_socket: &Arc>>>, ) -> Result, PubsubClientError> { @@ -523,7 +493,7 @@ impl PubsubClient { pub fn program_subscribe( url: &str, pubkey: &Pubkey, - mut config: Option, + config: Option, ) -> Result { let url = Url::parse(url)?; let socket = connect_with_retry(url)?; @@ -534,16 +504,6 @@ impl PubsubClient { let exit = Arc::new(AtomicBool::new(false)); let exit_clone = exit.clone(); - if let Some(ref mut config) = config { - if let Some(ref mut filters) = config.filters { - let node_version = PubsubProgramClientSubscription::get_version(&socket_clone).ok(); - // If node does not support the pubsub `getVersion` method, assume version is old - // and filters should be mapped (node_version.is_none()). - filter::maybe_map_filters(node_version, filters) - .map_err(PubsubClientError::RequestError)?; - } - } - let body = json!({ "jsonrpc":"2.0", "id":1, diff --git a/rpc-client-api/Cargo.toml b/rpc-client-api/Cargo.toml index c8d1eaad8b7959..22a883244c709e 100644 --- a/rpc-client-api/Cargo.toml +++ b/rpc-client-api/Cargo.toml @@ -28,6 +28,7 @@ solana-version = { workspace = true } thiserror = { workspace = true } [dev-dependencies] +const_format = { workspace = true } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/rpc-client-api/src/config.rs b/rpc-client-api/src/config.rs index 9bf1819b32d0a2..db13ea1280d829 100644 --- a/rpc-client-api/src/config.rs +++ b/rpc-client-api/src/config.rs @@ -119,6 +119,7 @@ pub struct RpcLargestAccountsConfig { #[serde(flatten)] pub commitment: Option, pub filter: Option, + pub sort_results: Option, } #[derive(Debug, Clone, Default, PartialEq, Eq, Serialize, Deserialize)] diff --git a/rpc-client-api/src/custom_error.rs b/rpc-client-api/src/custom_error.rs index b6175a9230bdcc..62857b1ee55c16 100644 --- a/rpc-client-api/src/custom_error.rs +++ b/rpc-client-api/src/custom_error.rs @@ -24,6 +24,7 @@ pub const JSON_RPC_SERVER_ERROR_TRANSACTION_SIGNATURE_LEN_MISMATCH: i64 = -32013 pub const JSON_RPC_SERVER_ERROR_BLOCK_STATUS_NOT_AVAILABLE_YET: i64 = -32014; pub const JSON_RPC_SERVER_ERROR_UNSUPPORTED_TRANSACTION_VERSION: i64 = -32015; pub const JSON_RPC_SERVER_ERROR_MIN_CONTEXT_SLOT_NOT_REACHED: i64 = -32016; +pub const JSON_RPC_SERVER_ERROR_EPOCH_REWARDS_PERIOD_ACTIVE: i64 = -32017; #[derive(Error, Debug)] pub enum RpcCustomError { @@ -65,6 +66,12 @@ pub enum RpcCustomError { UnsupportedTransactionVersion(u8), #[error("MinContextSlotNotReached")] MinContextSlotNotReached { context_slot: Slot }, + #[error("EpochRewardsPeriodActive")] + EpochRewardsPeriodActive { + slot: Slot, + current_block_height: u64, + rewards_complete_block_height: u64, + }, } #[derive(Debug, Serialize, Deserialize)] @@ -79,6 +86,13 @@ pub struct MinContextSlotNotReachedErrorData { pub context_slot: Slot, } +#[derive(Debug, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct EpochRewardsPeriodActiveErrorData { + pub current_block_height: u64, + pub rewards_complete_block_height: u64, +} + impl From for RpcCustomError { fn from(err: EncodeError) -> Self { match err { @@ -206,6 +220,14 @@ impl From for Error { context_slot, })), }, + RpcCustomError::EpochRewardsPeriodActive { slot, current_block_height, rewards_complete_block_height } => Self { + code: ErrorCode::ServerError(JSON_RPC_SERVER_ERROR_EPOCH_REWARDS_PERIOD_ACTIVE), + message: format!("Epoch rewards period still active at slot {slot}"), + data: Some(serde_json::json!(EpochRewardsPeriodActiveErrorData { + current_block_height, + rewards_complete_block_height, + })), + }, } } } diff --git a/rpc-client-api/src/deprecated_config.rs b/rpc-client-api/src/deprecated_config.rs deleted file mode 100644 index ab562aa0e8c05b..00000000000000 --- a/rpc-client-api/src/deprecated_config.rs +++ /dev/null @@ -1,122 +0,0 @@ -#![allow(deprecated)] -use { - crate::config::{ - EncodingConfig, RpcBlockConfig, RpcEncodingConfigWrapper, RpcTransactionConfig, - }, - solana_sdk::{clock::Slot, commitment_config::CommitmentConfig}, - solana_transaction_status::{TransactionDetails, UiTransactionEncoding}, -}; - -#[deprecated( - since = "1.7.0", - note = "Please use RpcSignaturesForAddressConfig instead" -)] -#[derive(Debug, Clone, Default, PartialEq, Eq, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct RpcGetConfirmedSignaturesForAddress2Config { - pub before: Option, // Signature as base-58 string - pub until: Option, // Signature as base-58 string - pub limit: Option, - #[serde(flatten)] - pub commitment: Option, -} - -#[deprecated(since = "1.7.0", note = "Please use RpcBlockConfig instead")] -#[derive(Debug, Clone, Copy, Default, PartialEq, Eq, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct RpcConfirmedBlockConfig { - pub encoding: Option, - pub transaction_details: Option, - pub rewards: Option, - #[serde(flatten)] - pub commitment: Option, -} - -impl EncodingConfig for RpcConfirmedBlockConfig { - fn new_with_encoding(encoding: &Option) -> Self { - Self { - encoding: *encoding, - ..Self::default() - } - } -} - -impl RpcConfirmedBlockConfig { - pub fn rewards_only() -> Self { - Self { - transaction_details: Some(TransactionDetails::None), - ..Self::default() - } - } - - pub fn rewards_with_commitment(commitment: Option) -> Self { - Self { - transaction_details: Some(TransactionDetails::None), - commitment, - ..Self::default() - } - } -} - -impl From for RpcEncodingConfigWrapper { - fn from(config: RpcConfirmedBlockConfig) -> Self { - RpcEncodingConfigWrapper::Current(Some(config)) - } -} - -impl From for RpcBlockConfig { - fn from(config: RpcConfirmedBlockConfig) -> Self { - Self { - encoding: config.encoding, - transaction_details: config.transaction_details, - rewards: config.rewards, - commitment: config.commitment, - max_supported_transaction_version: None, - } - } -} - -#[deprecated(since = "1.7.0", note = "Please use RpcTransactionConfig instead")] -#[derive(Debug, Clone, Copy, Default, PartialEq, Eq, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct RpcConfirmedTransactionConfig { - pub encoding: Option, - #[serde(flatten)] - pub commitment: Option, -} - -impl EncodingConfig for RpcConfirmedTransactionConfig { - fn new_with_encoding(encoding: &Option) -> Self { - Self { - encoding: *encoding, - ..Self::default() - } - } -} - -impl From for RpcTransactionConfig { - fn from(config: RpcConfirmedTransactionConfig) -> Self { - Self { - encoding: config.encoding, - commitment: config.commitment, - max_supported_transaction_version: None, - } - } -} - -#[deprecated(since = "1.7.0", note = "Please use RpcBlocksConfigWrapper instead")] -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -#[serde(untagged)] -pub enum RpcConfirmedBlocksConfigWrapper { - EndSlotOnly(Option), - CommitmentOnly(Option), -} - -impl RpcConfirmedBlocksConfigWrapper { - pub fn unzip(&self) -> (Option, Option) { - match &self { - RpcConfirmedBlocksConfigWrapper::EndSlotOnly(end_slot) => (*end_slot, None), - RpcConfirmedBlocksConfigWrapper::CommitmentOnly(commitment) => (None, *commitment), - } - } -} diff --git a/rpc-client-api/src/filter.rs b/rpc-client-api/src/filter.rs index 368d7ce7d7f855..bef8d1d16e8e67 100644 --- a/rpc-client-api/src/filter.rs +++ b/rpc-client-api/src/filter.rs @@ -1,6 +1,6 @@ -#![allow(deprecated)] use { - crate::version_req::VersionReq, + base64::{prelude::BASE64_STANDARD, Engine}, + serde::Deserialize, solana_inline_spl::{token::GenericTokenAccount, token_2022::Account}, solana_sdk::account::{AccountSharedData, ReadableAccount}, std::borrow::Cow, @@ -24,62 +24,46 @@ impl RpcFilterType { match self { RpcFilterType::DataSize(_) => Ok(()), RpcFilterType::Memcmp(compare) => { - let encoding = compare.encoding.as_ref().unwrap_or(&MemcmpEncoding::Binary); - match encoding { - MemcmpEncoding::Binary => { - use MemcmpEncodedBytes::*; - match &compare.bytes { - // DEPRECATED - Binary(bytes) => { - if bytes.len() > MAX_DATA_BASE58_SIZE { - return Err(RpcFilterError::Base58DataTooLarge); - } - let bytes = bs58::decode(&bytes) - .into_vec() - .map_err(RpcFilterError::DecodeError)?; - if bytes.len() > MAX_DATA_SIZE { - Err(RpcFilterError::Base58DataTooLarge) - } else { - Ok(()) - } - } - Base58(bytes) => { - if bytes.len() > MAX_DATA_BASE58_SIZE { - return Err(RpcFilterError::DataTooLarge); - } - let bytes = bs58::decode(&bytes).into_vec()?; - if bytes.len() > MAX_DATA_SIZE { - Err(RpcFilterError::DataTooLarge) - } else { - Ok(()) - } - } - Base64(bytes) => { - if bytes.len() > MAX_DATA_BASE64_SIZE { - return Err(RpcFilterError::DataTooLarge); - } - let bytes = base64::decode(bytes)?; - if bytes.len() > MAX_DATA_SIZE { - Err(RpcFilterError::DataTooLarge) - } else { - Ok(()) - } - } - Bytes(bytes) => { - if bytes.len() > MAX_DATA_SIZE { - return Err(RpcFilterError::DataTooLarge); - } - Ok(()) - } + use MemcmpEncodedBytes::*; + match &compare.bytes { + Base58(bytes) => { + if bytes.len() > MAX_DATA_BASE58_SIZE { + return Err(RpcFilterError::DataTooLarge); + } + let bytes = bs58::decode(&bytes).into_vec()?; + if bytes.len() > MAX_DATA_SIZE { + Err(RpcFilterError::DataTooLarge) + } else { + Ok(()) + } + } + Base64(bytes) => { + if bytes.len() > MAX_DATA_BASE64_SIZE { + return Err(RpcFilterError::DataTooLarge); + } + let bytes = BASE64_STANDARD.decode(bytes)?; + if bytes.len() > MAX_DATA_SIZE { + Err(RpcFilterError::DataTooLarge) + } else { + Ok(()) } } + Bytes(bytes) => { + if bytes.len() > MAX_DATA_SIZE { + return Err(RpcFilterError::DataTooLarge); + } + Ok(()) + } } } RpcFilterType::TokenAccountState => Ok(()), } } - #[deprecated = "Use solana_rpc::filter::filter_allows instead"] + #[deprecated( + since = "2.0.0", + note = "Use solana_rpc::filter::filter_allows instead" + )] pub fn allows(&self, account: &AccountSharedData) -> bool { match self { RpcFilterType::DataSize(size) => account.data().len() as u64 == *size, @@ -93,65 +77,68 @@ impl RpcFilterType { pub enum RpcFilterError { #[error("encoded binary data should be less than 129 bytes")] DataTooLarge, - #[deprecated( - since = "1.8.1", - note = "Error for MemcmpEncodedBytes::Binary which is deprecated" - )] - #[error("encoded binary (base 58) data should be less than 129 bytes")] - Base58DataTooLarge, - #[deprecated( - since = "1.8.1", - note = "Error for MemcmpEncodedBytes::Binary which is deprecated" - )] - #[error("bs58 decode error")] - DecodeError(bs58::decode::Error), #[error("base58 decode error")] Base58DecodeError(#[from] bs58::decode::Error), #[error("base64 decode error")] Base64DecodeError(#[from] base64::DecodeError), } -#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub enum MemcmpEncoding { - Binary, -} - -#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] -#[serde(rename_all = "camelCase", untagged)] +#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize)] +#[serde(rename_all = "camelCase", tag = "encoding", content = "bytes")] pub enum MemcmpEncodedBytes { - #[deprecated( - since = "1.8.1", - note = "Please use MemcmpEncodedBytes::Base58 instead" - )] - Binary(String), Base58(String), Base64(String), Bytes(Vec), } +impl<'de> Deserialize<'de> for MemcmpEncodedBytes { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + #[derive(Deserialize)] + #[serde(untagged)] + enum DataType { + Encoded(String), + Raw(Vec), + } + + #[derive(Deserialize)] + #[serde(rename_all = "camelCase")] + enum RpcMemcmpEncoding { + Base58, + Base64, + Bytes, + } + + #[derive(Deserialize)] + struct RpcMemcmpInner { + bytes: DataType, + encoding: Option, + } + + let data = RpcMemcmpInner::deserialize(deserializer)?; + + let memcmp_encoded_bytes = match data.bytes { + DataType::Encoded(bytes) => match data.encoding.unwrap_or(RpcMemcmpEncoding::Base58) { + RpcMemcmpEncoding::Base58 => MemcmpEncodedBytes::Base58(bytes), + RpcMemcmpEncoding::Base64 => MemcmpEncodedBytes::Base64(bytes), + _ => unreachable!(), + }, + DataType::Raw(bytes) => MemcmpEncodedBytes::Bytes(bytes), + }; + + Ok(memcmp_encoded_bytes) + } +} + #[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] -#[serde(into = "RpcMemcmp", from = "RpcMemcmp")] pub struct Memcmp { /// Data offset to begin match - #[deprecated( - since = "1.15.0", - note = "Field will be made private in future. Please use a constructor method instead." - )] - pub offset: usize, - /// Bytes, encoded with specified encoding, or default Binary - #[deprecated( - since = "1.15.0", - note = "Field will be made private in future. Please use a constructor method instead." - )] - pub bytes: MemcmpEncodedBytes, - /// Optional encoding specification - #[deprecated( - since = "1.11.2", - note = "Field has no server-side effect. Specify encoding with `MemcmpEncodedBytes` variant instead. \ - Field will be made private in future. Please use a constructor method instead." - )] - pub encoding: Option, + offset: usize, + /// Bytes, encoded with specified encoding + #[serde(flatten)] + bytes: MemcmpEncodedBytes, } impl Memcmp { @@ -159,7 +146,6 @@ impl Memcmp { Self { offset, bytes: encoded_bytes, - encoding: None, } } @@ -167,7 +153,6 @@ impl Memcmp { Self { offset, bytes: MemcmpEncodedBytes::Bytes(bytes), - encoding: None, } } @@ -175,15 +160,18 @@ impl Memcmp { Self { offset, bytes: MemcmpEncodedBytes::Base58(bs58::encode(bytes).into_string()), - encoding: None, } } + pub fn offset(&self) -> usize { + self.offset + } + pub fn bytes(&self) -> Option>> { use MemcmpEncodedBytes::*; match &self.bytes { - Binary(bytes) | Base58(bytes) => bs58::decode(bytes).into_vec().ok().map(Cow::Owned), - Base64(bytes) => base64::decode(bytes).ok().map(Cow::Owned), + Base58(bytes) => bs58::decode(bytes).into_vec().ok().map(Cow::Owned), + Base64(bytes) => BASE64_STANDARD.decode(bytes).ok().map(Cow::Owned), Bytes(bytes) => Some(Cow::Borrowed(bytes)), } } @@ -191,13 +179,13 @@ impl Memcmp { pub fn convert_to_raw_bytes(&mut self) -> Result<(), RpcFilterError> { use MemcmpEncodedBytes::*; match &self.bytes { - Binary(bytes) | Base58(bytes) => { + Base58(bytes) => { let bytes = bs58::decode(bytes).into_vec()?; self.bytes = Bytes(bytes); Ok(()) } Base64(bytes) => { - let bytes = base64::decode(bytes)?; + let bytes = BASE64_STANDARD.decode(bytes)?; self.bytes = Bytes(bytes); Ok(()) } @@ -219,120 +207,34 @@ impl Memcmp { None => false, } } -} - -// Internal struct to hold Memcmp filter data as either encoded String or raw Bytes -#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] -#[serde(untagged)] -enum DataType { - Encoded(String), - Raw(Vec), -} - -// Internal struct used to specify explicit Base58 and Base64 encoding -#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -enum RpcMemcmpEncoding { - Base58, - Base64, - // This variant exists only to preserve backward compatibility with generic `Memcmp` serde - #[serde(other)] - Binary, -} - -// Internal struct to enable Memcmp filters with explicit Base58 and Base64 encoding. The From -// implementations emulate `#[serde(tag = "encoding", content = "bytes")]` for -// `MemcmpEncodedBytes`. On the next major version, all these internal elements should be removed -// and replaced with adjacent tagging of `MemcmpEncodedBytes`. -#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] -struct RpcMemcmp { - offset: usize, - bytes: DataType, - encoding: Option, -} - -impl From for RpcMemcmp { - fn from(memcmp: Memcmp) -> RpcMemcmp { - let (bytes, encoding) = match memcmp.bytes { - MemcmpEncodedBytes::Binary(string) => { - (DataType::Encoded(string), Some(RpcMemcmpEncoding::Binary)) - } - MemcmpEncodedBytes::Base58(string) => { - (DataType::Encoded(string), Some(RpcMemcmpEncoding::Base58)) - } - MemcmpEncodedBytes::Base64(string) => { - (DataType::Encoded(string), Some(RpcMemcmpEncoding::Base64)) - } - MemcmpEncodedBytes::Bytes(vector) => (DataType::Raw(vector), None), - }; - RpcMemcmp { - offset: memcmp.offset, - bytes, - encoding, - } - } -} - -impl From for Memcmp { - fn from(memcmp: RpcMemcmp) -> Memcmp { - let encoding = memcmp.encoding.unwrap_or(RpcMemcmpEncoding::Binary); - let bytes = match (encoding, memcmp.bytes) { - (RpcMemcmpEncoding::Binary, DataType::Encoded(string)) - | (RpcMemcmpEncoding::Base58, DataType::Encoded(string)) => { - MemcmpEncodedBytes::Base58(string) - } - (RpcMemcmpEncoding::Binary, DataType::Raw(vector)) => MemcmpEncodedBytes::Bytes(vector), - (RpcMemcmpEncoding::Base64, DataType::Encoded(string)) => { - MemcmpEncodedBytes::Base64(string) - } - _ => unreachable!(), - }; - Memcmp { - offset: memcmp.offset, - bytes, - encoding: None, - } - } -} -pub fn maybe_map_filters( - node_version: Option, - filters: &mut [RpcFilterType], -) -> Result<(), String> { - let version_reqs = VersionReq::from_strs(&["<1.11.2", "~1.13"])?; - let needs_mapping = node_version - .map(|version| version_reqs.matches_any(&version)) - .unwrap_or(true); - if needs_mapping { - for filter in filters.iter_mut() { - if let RpcFilterType::Memcmp(memcmp) = filter { - match &memcmp.bytes { - MemcmpEncodedBytes::Base58(string) => { - memcmp.bytes = MemcmpEncodedBytes::Binary(string.clone()); - } - MemcmpEncodedBytes::Base64(_) => { - return Err("RPC node on old version does not support base64 \ - encoding for memcmp filters" - .to_string()); - } - _ => {} - } - } + /// Returns reference to bytes if variant is MemcmpEncodedBytes::Bytes; + /// otherwise returns None. Used exclusively by solana-rpc to check + /// SPL-token filters. + pub fn raw_bytes_as_ref(&self) -> Option<&[u8]> { + use MemcmpEncodedBytes::*; + if let Bytes(bytes) = &self.bytes { + Some(bytes) + } else { + None } } - Ok(()) } #[cfg(test)] mod tests { - use super::*; + use { + super::*, + const_format::formatcp, + serde_json::{json, Value}, + }; #[test] fn test_worst_case_encoded_tx_goldens() { let ff_data = vec![0xffu8; MAX_DATA_SIZE]; let data58 = bs58::encode(&ff_data).into_string(); assert_eq!(data58.len(), MAX_DATA_BASE58_SIZE); - let data64 = base64::encode(&ff_data); + let data64 = BASE64_STANDARD.encode(&ff_data); assert_eq!(data64.len(), MAX_DATA_BASE64_SIZE); } @@ -344,7 +246,6 @@ mod tests { assert!(Memcmp { offset: 0, bytes: MemcmpEncodedBytes::Base58(bs58::encode(vec![1, 2, 3, 4, 5]).into_string()), - encoding: None, } .bytes_match(&data)); @@ -352,7 +253,6 @@ mod tests { assert!(Memcmp { offset: 0, bytes: MemcmpEncodedBytes::Base58(bs58::encode(vec![1, 2]).into_string()), - encoding: None, } .bytes_match(&data)); @@ -360,7 +260,6 @@ mod tests { assert!(Memcmp { offset: 2, bytes: MemcmpEncodedBytes::Base58(bs58::encode(vec![3, 4]).into_string()), - encoding: None, } .bytes_match(&data)); @@ -368,7 +267,6 @@ mod tests { assert!(!Memcmp { offset: 0, bytes: MemcmpEncodedBytes::Base58(bs58::encode(vec![2]).into_string()), - encoding: None, } .bytes_match(&data)); @@ -376,7 +274,6 @@ mod tests { assert!(!Memcmp { offset: 2, bytes: MemcmpEncodedBytes::Base58(bs58::encode(vec![3, 4, 5, 6]).into_string()), - encoding: None, } .bytes_match(&data)); @@ -384,7 +281,6 @@ mod tests { assert!(!Memcmp { offset: 6, bytes: MemcmpEncodedBytes::Base58(bs58::encode(vec![5]).into_string()), - encoding: None, } .bytes_match(&data)); @@ -392,7 +288,6 @@ mod tests { assert!(!Memcmp { offset: 0, bytes: MemcmpEncodedBytes::Base58("III".to_string()), - encoding: None, } .bytes_match(&data)); } @@ -407,7 +302,6 @@ mod tests { RpcFilterType::Memcmp(Memcmp { offset: 0, bytes: MemcmpEncodedBytes::Base58(base58_bytes.to_string()), - encoding: None, }) .verify(), Ok(()) @@ -422,10 +316,118 @@ mod tests { RpcFilterType::Memcmp(Memcmp { offset: 0, bytes: MemcmpEncodedBytes::Base58(base58_bytes.to_string()), - encoding: None, }) .verify(), Err(RpcFilterError::DataTooLarge) ); } + + const BASE58_STR: &str = "Bpf4ERpEvSFmCSTNh1PzTWTkALrKXvMXEdthxHuwCQcf"; + const BASE64_STR: &str = "oMoycDvJzrjQpCfukbO4VW/FLGLfnbqBEc9KUEVgj2g="; + const BYTES: [u8; 4] = [0, 1, 2, 3]; + const OFFSET: usize = 42; + const DEFAULT_ENCODING_FILTER: &str = + formatcp!(r#"{{"bytes":"{BASE58_STR}","offset":{OFFSET}}}"#); + const BINARY_FILTER: &str = + formatcp!(r#"{{"bytes":"{BASE58_STR}","offset":{OFFSET},"encoding":"binary"}}"#); + const BASE58_FILTER: &str = + formatcp!(r#"{{"bytes":"{BASE58_STR}","offset":{OFFSET},"encoding":"base58"}}"#); + const BASE64_FILTER: &str = + formatcp!(r#"{{"bytes":"{BASE64_STR}","offset":{OFFSET},"encoding":"base64"}}"#); + const BYTES_FILTER: &str = + formatcp!(r#"{{"bytes":[0, 1, 2, 3],"offset":{OFFSET},"encoding":null}}"#); + const BYTES_FILTER_WITH_ENCODING: &str = + formatcp!(r#"{{"bytes":[0, 1, 2, 3],"offset":{OFFSET},"encoding":"bytes"}}"#); + + #[test] + fn test_filter_deserialize() { + // Base58 is the default encoding + let default: Memcmp = serde_json::from_str(DEFAULT_ENCODING_FILTER).unwrap(); + assert_eq!( + default, + Memcmp { + offset: OFFSET, + bytes: MemcmpEncodedBytes::Base58(BASE58_STR.to_string()), + } + ); + + // Binary input is no longer supported + let binary = serde_json::from_str::(BINARY_FILTER); + assert!(binary.is_err()); + + // Base58 input + let base58_filter: Memcmp = serde_json::from_str(BASE58_FILTER).unwrap(); + assert_eq!( + base58_filter, + Memcmp { + offset: OFFSET, + bytes: MemcmpEncodedBytes::Base58(BASE58_STR.to_string()), + } + ); + + // Base64 input + let base64_filter: Memcmp = serde_json::from_str(BASE64_FILTER).unwrap(); + assert_eq!( + base64_filter, + Memcmp { + offset: OFFSET, + bytes: MemcmpEncodedBytes::Base64(BASE64_STR.to_string()), + } + ); + + // Raw bytes input + let bytes_filter: Memcmp = serde_json::from_str(BYTES_FILTER).unwrap(); + assert_eq!( + bytes_filter, + Memcmp { + offset: OFFSET, + bytes: MemcmpEncodedBytes::Bytes(BYTES.to_vec()), + } + ); + + let bytes_filter: Memcmp = serde_json::from_str(BYTES_FILTER_WITH_ENCODING).unwrap(); + assert_eq!( + bytes_filter, + Memcmp { + offset: OFFSET, + bytes: MemcmpEncodedBytes::Bytes(BYTES.to_vec()), + } + ); + } + + #[test] + fn test_filter_serialize() { + // Base58 + let base58 = Memcmp { + offset: OFFSET, + bytes: MemcmpEncodedBytes::Base58(BASE58_STR.to_string()), + }; + let serialized_json = json!(base58); + assert_eq!( + serialized_json, + serde_json::from_str::(BASE58_FILTER).unwrap() + ); + + // Base64 + let base64 = Memcmp { + offset: OFFSET, + bytes: MemcmpEncodedBytes::Base64(BASE64_STR.to_string()), + }; + let serialized_json = json!(base64); + assert_eq!( + serialized_json, + serde_json::from_str::(BASE64_FILTER).unwrap() + ); + + // Bytes + let bytes = Memcmp { + offset: OFFSET, + bytes: MemcmpEncodedBytes::Bytes(BYTES.to_vec()), + }; + let serialized_json = json!(bytes); + assert_eq!( + serialized_json, + serde_json::from_str::(BYTES_FILTER_WITH_ENCODING).unwrap() + ); + } } diff --git a/rpc-client-api/src/lib.rs b/rpc-client-api/src/lib.rs index 6386a433f719b7..b2484637766ce7 100644 --- a/rpc-client-api/src/lib.rs +++ b/rpc-client-api/src/lib.rs @@ -3,12 +3,10 @@ pub mod client_error; pub mod config; pub mod custom_error; -pub mod deprecated_config; pub mod error_object; pub mod filter; pub mod request; pub mod response; -pub mod version_req; #[macro_use] extern crate serde_derive; diff --git a/rpc-client-api/src/request.rs b/rpc-client-api/src/request.rs index f0bbe0af3e78c6..fe032a858deb47 100644 --- a/rpc-client-api/src/request.rs +++ b/rpc-client-api/src/request.rs @@ -8,9 +8,7 @@ use { #[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)] pub enum RpcRequest { - Custom { - method: &'static str, - }, + Custom { method: &'static str }, DeregisterNode, GetAccountInfo, GetBalance, @@ -21,43 +19,9 @@ pub enum RpcRequest { GetBlocksWithLimit, GetBlockTime, GetClusterNodes, - #[deprecated(since = "1.7.0", note = "Please use RpcRequest::GetBlock instead")] - GetConfirmedBlock, - #[deprecated(since = "1.7.0", note = "Please use RpcRequest::GetBlocks instead")] - GetConfirmedBlocks, - #[deprecated( - since = "1.7.0", - note = "Please use RpcRequest::GetBlocksWithLimit instead" - )] - GetConfirmedBlocksWithLimit, - #[deprecated( - since = "1.7.0", - note = "Please use RpcRequest::GetSignaturesForAddress instead" - )] - GetConfirmedSignaturesForAddress2, - #[deprecated( - since = "1.7.0", - note = "Please use RpcRequest::GetTransaction instead" - )] - GetConfirmedTransaction, GetEpochInfo, GetEpochSchedule, - #[deprecated( - since = "1.9.0", - note = "Please use RpcRequest::GetFeeForMessage instead" - )] - GetFeeCalculatorForBlockhash, GetFeeForMessage, - #[deprecated( - since = "1.9.0", - note = "Please do not use, will no longer be available in the future" - )] - GetFeeRateGovernor, - #[deprecated( - since = "1.9.0", - note = "Please use RpcRequest::GetFeeForMessage instead" - )] - GetFees, GetFirstAvailableBlock, GetGenesisHash, GetHealth, @@ -73,19 +37,9 @@ pub enum RpcRequest { GetMinimumBalanceForRentExemption, GetMultipleAccounts, GetProgramAccounts, - #[deprecated( - since = "1.9.0", - note = "Please use RpcRequest::GetLatestBlockhash instead" - )] - GetRecentBlockhash, GetRecentPerformanceSamples, GetRecentPrioritizationFees, GetHighestSnapshotSlot, - #[deprecated( - since = "1.9.0", - note = "Please use RpcRequest::GetHighestSnapshotSlot instead" - )] - GetSnapshotSlot, GetSignaturesForAddress, GetSignatureStatuses, GetSlot, @@ -94,7 +48,6 @@ pub enum RpcRequest { GetStorageTurn, GetStorageTurnRate, GetSlotsPerSegment, - GetStakeActivation, GetStakeMinimumDelegation, GetStoragePubkeysForSlot, GetSupply, @@ -131,17 +84,9 @@ impl fmt::Display for RpcRequest { RpcRequest::GetBlocksWithLimit => "getBlocksWithLimit", RpcRequest::GetBlockTime => "getBlockTime", RpcRequest::GetClusterNodes => "getClusterNodes", - RpcRequest::GetConfirmedBlock => "getConfirmedBlock", - RpcRequest::GetConfirmedBlocks => "getConfirmedBlocks", - RpcRequest::GetConfirmedBlocksWithLimit => "getConfirmedBlocksWithLimit", - RpcRequest::GetConfirmedSignaturesForAddress2 => "getConfirmedSignaturesForAddress2", - RpcRequest::GetConfirmedTransaction => "getConfirmedTransaction", RpcRequest::GetEpochInfo => "getEpochInfo", RpcRequest::GetEpochSchedule => "getEpochSchedule", - RpcRequest::GetFeeCalculatorForBlockhash => "getFeeCalculatorForBlockhash", RpcRequest::GetFeeForMessage => "getFeeForMessage", - RpcRequest::GetFeeRateGovernor => "getFeeRateGovernor", - RpcRequest::GetFees => "getFees", RpcRequest::GetFirstAvailableBlock => "getFirstAvailableBlock", RpcRequest::GetGenesisHash => "getGenesisHash", RpcRequest::GetHealth => "getHealth", @@ -157,17 +102,14 @@ impl fmt::Display for RpcRequest { RpcRequest::GetMinimumBalanceForRentExemption => "getMinimumBalanceForRentExemption", RpcRequest::GetMultipleAccounts => "getMultipleAccounts", RpcRequest::GetProgramAccounts => "getProgramAccounts", - RpcRequest::GetRecentBlockhash => "getRecentBlockhash", RpcRequest::GetRecentPerformanceSamples => "getRecentPerformanceSamples", RpcRequest::GetRecentPrioritizationFees => "getRecentPrioritizationFees", RpcRequest::GetHighestSnapshotSlot => "getHighestSnapshotSlot", - RpcRequest::GetSnapshotSlot => "getSnapshotSlot", RpcRequest::GetSignaturesForAddress => "getSignaturesForAddress", RpcRequest::GetSignatureStatuses => "getSignatureStatuses", RpcRequest::GetSlot => "getSlot", RpcRequest::GetSlotLeader => "getSlotLeader", RpcRequest::GetSlotLeaders => "getSlotLeaders", - RpcRequest::GetStakeActivation => "getStakeActivation", RpcRequest::GetStakeMinimumDelegation => "getStakeMinimumDelegation", RpcRequest::GetStorageTurn => "getStorageTurn", RpcRequest::GetStorageTurnRate => "getStorageTurnRate", @@ -303,20 +245,9 @@ mod tests { let request = test_request.build_request_json(1, Value::Null); assert_eq!(request["method"], "getEpochInfo"); - #[allow(deprecated)] - let test_request = RpcRequest::GetRecentBlockhash; - let request = test_request.build_request_json(1, Value::Null); - assert_eq!(request["method"], "getRecentBlockhash"); - - #[allow(deprecated)] - let test_request = RpcRequest::GetFeeCalculatorForBlockhash; - let request = test_request.build_request_json(1, json!([addr])); - assert_eq!(request["method"], "getFeeCalculatorForBlockhash"); - - #[allow(deprecated)] - let test_request = RpcRequest::GetFeeRateGovernor; + let test_request = RpcRequest::GetLatestBlockhash; let request = test_request.build_request_json(1, Value::Null); - assert_eq!(request["method"], "getFeeRateGovernor"); + assert_eq!(request["method"], "getLatestBlockhash"); let test_request = RpcRequest::GetSlot; let request = test_request.build_request_json(1, Value::Null); @@ -347,8 +278,7 @@ mod tests { let addr = json!("deadbeefXjn8o3yroDHxUtKsZZgoy4GPkPPXfouKNHhx"); // Test request with CommitmentConfig and no params - #[allow(deprecated)] - let test_request = RpcRequest::GetRecentBlockhash; + let test_request = RpcRequest::GetLatestBlockhash; let request = test_request.build_request_json(1, json!([commitment_config])); assert_eq!(request["params"], json!([commitment_config.clone()])); diff --git a/rpc-client-api/src/response.rs b/rpc-client-api/src/response.rs index f9d3085e83c2d9..fcb330103057e4 100644 --- a/rpc-client-api/src/response.rs +++ b/rpc-client-api/src/response.rs @@ -5,7 +5,6 @@ use { solana_sdk::{ clock::{Epoch, Slot, UnixTimestamp}, fee_calculator::{FeeCalculator, FeeRateGovernor}, - hash::Hash, inflation::Inflation, transaction::{Result, TransactionError}, }, @@ -119,31 +118,6 @@ pub struct RpcBlockhash { pub last_valid_block_height: u64, } -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)] -#[serde(rename_all = "camelCase")] -pub struct RpcFees { - pub blockhash: String, - pub fee_calculator: FeeCalculator, - pub last_valid_slot: Slot, - pub last_valid_block_height: u64, -} - -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)] -#[serde(rename_all = "camelCase")] -pub struct DeprecatedRpcFees { - pub blockhash: String, - pub fee_calculator: FeeCalculator, - pub last_valid_slot: Slot, -} - -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)] -#[serde(rename_all = "camelCase")] -pub struct Fees { - pub blockhash: Hash, - pub fee_calculator: FeeCalculator, - pub last_valid_block_height: u64, -} - #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)] #[serde(rename_all = "camelCase")] pub struct RpcFeeCalculator { @@ -294,10 +268,20 @@ pub struct RpcContactInfo { pub pubkey: String, /// Gossip port pub gossip: Option, + /// Tvu UDP port + pub tvu: Option, /// Tpu UDP port pub tpu: Option, /// Tpu QUIC port pub tpu_quic: Option, + /// Tpu UDP forwards port + pub tpu_forwards: Option, + /// Tpu QUIC forwards port + pub tpu_forwards_quic: Option, + /// Tpu UDP vote port + pub tpu_vote: Option, + /// Server repair UDP port + pub serve_repair: Option, /// JSON RPC port pub rpc: Option, /// WebSocket PubSub port @@ -459,14 +443,6 @@ pub enum StakeActivationState { Inactive, } -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)] -#[serde(rename_all = "camelCase")] -pub struct RpcStakeActivation { - pub state: StakeActivationState, - pub active: u64, - pub inactive: u64, -} - #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] #[serde(rename_all = "camelCase")] pub struct RpcTokenAccountBalance { diff --git a/rpc-client-api/src/version_req.rs b/rpc-client-api/src/version_req.rs deleted file mode 100644 index 8c8d57e35c2610..00000000000000 --- a/rpc-client-api/src/version_req.rs +++ /dev/null @@ -1,20 +0,0 @@ -pub(crate) struct VersionReq(Vec); - -impl VersionReq { - pub(crate) fn from_strs(versions: &[T]) -> Result - where - T: AsRef + std::fmt::Debug, - { - let mut version_reqs = vec![]; - for version in versions { - let version_req = semver::VersionReq::parse(version.as_ref()) - .map_err(|err| format!("Could not parse version {version:?}: {err:?}"))?; - version_reqs.push(version_req); - } - Ok(Self(version_reqs)) - } - - pub(crate) fn matches_any(&self, version: &semver::Version) -> bool { - self.0.iter().any(|r| r.matches(version)) - } -} diff --git a/rpc-client-nonce-utils/src/blockhash_query.rs b/rpc-client-nonce-utils/src/blockhash_query.rs index 20b3e0572ff9f6..7a6c1c1f8441b2 100644 --- a/rpc-client-nonce-utils/src/blockhash_query.rs +++ b/rpc-client-nonce-utils/src/blockhash_query.rs @@ -6,10 +6,7 @@ use { offline::*, }, solana_rpc_client::rpc_client::RpcClient, - solana_sdk::{ - commitment_config::CommitmentConfig, fee_calculator::FeeCalculator, hash::Hash, - pubkey::Pubkey, - }, + solana_sdk::{commitment_config::CommitmentConfig, hash::Hash, pubkey::Pubkey}, }; #[derive(Debug, PartialEq, Eq)] @@ -19,56 +16,6 @@ pub enum Source { } impl Source { - #[deprecated(since = "1.9.0", note = "Please use `get_blockhash` instead")] - pub fn get_blockhash_and_fee_calculator( - &self, - rpc_client: &RpcClient, - commitment: CommitmentConfig, - ) -> Result<(Hash, FeeCalculator), Box> { - match self { - Self::Cluster => { - #[allow(deprecated)] - let res = rpc_client - .get_recent_blockhash_with_commitment(commitment)? - .value; - Ok((res.0, res.1)) - } - Self::NonceAccount(ref pubkey) => { - let data = crate::get_account_with_commitment(rpc_client, pubkey, commitment) - .and_then(|ref a| crate::data_from_account(a))?; - Ok((data.blockhash(), data.fee_calculator)) - } - } - } - - #[deprecated( - since = "1.9.0", - note = "Please do not use, will no longer be available in the future" - )] - pub fn get_fee_calculator( - &self, - rpc_client: &RpcClient, - blockhash: &Hash, - commitment: CommitmentConfig, - ) -> Result, Box> { - match self { - Self::Cluster => { - #[allow(deprecated)] - let res = rpc_client - .get_fee_calculator_for_blockhash_with_commitment(blockhash, commitment)? - .value; - Ok(res) - } - Self::NonceAccount(ref pubkey) => { - let res = crate::get_account_with_commitment(rpc_client, pubkey, commitment)?; - let res = crate::data_from_account(&res)?; - Ok(Some(res) - .filter(|d| d.blockhash() == *blockhash) - .map(|d| d.fee_calculator)) - } - } - } - pub fn get_blockhash( &self, rpc_client: &RpcClient, @@ -131,29 +78,6 @@ impl BlockhashQuery { BlockhashQuery::new(blockhash, sign_only, nonce_account) } - #[deprecated(since = "1.9.0", note = "Please use `get_blockhash` instead")] - pub fn get_blockhash_and_fee_calculator( - &self, - rpc_client: &RpcClient, - commitment: CommitmentConfig, - ) -> Result<(Hash, FeeCalculator), Box> { - match self { - BlockhashQuery::None(hash) => Ok((*hash, FeeCalculator::default())), - BlockhashQuery::FeeCalculator(source, hash) => { - #[allow(deprecated)] - let fee_calculator = source - .get_fee_calculator(rpc_client, hash, commitment)? - .ok_or(format!("Hash has expired {hash:?}"))?; - Ok((*hash, fee_calculator)) - } - BlockhashQuery::All(source) => - { - #[allow(deprecated)] - source.get_blockhash_and_fee_calculator(rpc_client, commitment) - } - } - } - pub fn get_blockhash( &self, rpc_client: &RpcClient, @@ -188,10 +112,11 @@ mod tests { solana_account_decoder::{UiAccount, UiAccountEncoding}, solana_rpc_client_api::{ request::RpcRequest, - response::{Response, RpcFeeCalculator, RpcFees, RpcResponseContext}, + response::{Response, RpcBlockhash, RpcResponseContext}, }, solana_sdk::{ account::Account, + fee_calculator::FeeCalculator, hash::hash, nonce::{self, state::DurableNonce}, system_program, @@ -350,65 +275,65 @@ mod tests { #[test] #[allow(deprecated)] - fn test_blockhash_query_get_blockhash_fee_calc() { + fn test_blockhash_query_get_blockhash() { let test_blockhash = hash(&[0u8]); let rpc_blockhash = hash(&[1u8]); - let rpc_fee_calc = FeeCalculator::new(42); - let get_recent_blockhash_response = json!(Response { + let get_latest_blockhash_response = json!(Response { context: RpcResponseContext { slot: 1, api_version: None }, - value: json!(RpcFees { + value: json!(RpcBlockhash { blockhash: rpc_blockhash.to_string(), - fee_calculator: rpc_fee_calc, - last_valid_slot: 42, last_valid_block_height: 42, }), }); - let get_fee_calculator_for_blockhash_response = json!(Response { + let is_blockhash_valid_response = json!(Response { context: RpcResponseContext { slot: 1, api_version: None }, - value: json!(RpcFeeCalculator { - fee_calculator: rpc_fee_calc - }), + value: true, }); let mut mocks = HashMap::new(); - mocks.insert(RpcRequest::GetFees, get_recent_blockhash_response.clone()); + mocks.insert( + RpcRequest::GetLatestBlockhash, + get_latest_blockhash_response.clone(), + ); let rpc_client = RpcClient::new_mock_with_mocks("".to_string(), mocks); assert_eq!( BlockhashQuery::default() - .get_blockhash_and_fee_calculator(&rpc_client, CommitmentConfig::default()) + .get_blockhash(&rpc_client, CommitmentConfig::default()) .unwrap(), - (rpc_blockhash, rpc_fee_calc), + rpc_blockhash, ); let mut mocks = HashMap::new(); - mocks.insert(RpcRequest::GetFees, get_recent_blockhash_response.clone()); mocks.insert( - RpcRequest::GetFeeCalculatorForBlockhash, - get_fee_calculator_for_blockhash_response, + RpcRequest::IsBlockhashValid, + is_blockhash_valid_response.clone(), ); let rpc_client = RpcClient::new_mock_with_mocks("".to_string(), mocks); assert_eq!( BlockhashQuery::FeeCalculator(Source::Cluster, test_blockhash) - .get_blockhash_and_fee_calculator(&rpc_client, CommitmentConfig::default()) + .get_blockhash(&rpc_client, CommitmentConfig::default()) .unwrap(), - (test_blockhash, rpc_fee_calc), + test_blockhash, ); let mut mocks = HashMap::new(); - mocks.insert(RpcRequest::GetFees, get_recent_blockhash_response); + mocks.insert( + RpcRequest::GetLatestBlockhash, + get_latest_blockhash_response, + ); let rpc_client = RpcClient::new_mock_with_mocks("".to_string(), mocks); assert_eq!( BlockhashQuery::None(test_blockhash) - .get_blockhash_and_fee_calculator(&rpc_client, CommitmentConfig::default()) + .get_blockhash(&rpc_client, CommitmentConfig::default()) .unwrap(), - (test_blockhash, FeeCalculator::default()), + test_blockhash, ); let rpc_client = RpcClient::new_mock("fails".to_string()); assert!(BlockhashQuery::default() - .get_blockhash_and_fee_calculator(&rpc_client, CommitmentConfig::default()) + .get_blockhash(&rpc_client, CommitmentConfig::default()) .is_err()); let durable_nonce = DurableNonce::from_blockhash(&Hash::new(&[2u8; 32])); @@ -447,40 +372,32 @@ mod tests { let rpc_client = RpcClient::new_mock_with_mocks("".to_string(), mocks); assert_eq!( BlockhashQuery::All(Source::NonceAccount(nonce_pubkey)) - .get_blockhash_and_fee_calculator(&rpc_client, CommitmentConfig::default()) + .get_blockhash(&rpc_client, CommitmentConfig::default()) .unwrap(), - (nonce_blockhash, nonce_fee_calc), + nonce_blockhash, ); let mut mocks = HashMap::new(); mocks.insert(RpcRequest::GetAccountInfo, get_account_response.clone()); let rpc_client = RpcClient::new_mock_with_mocks("".to_string(), mocks); assert_eq!( BlockhashQuery::FeeCalculator(Source::NonceAccount(nonce_pubkey), nonce_blockhash) - .get_blockhash_and_fee_calculator(&rpc_client, CommitmentConfig::default()) + .get_blockhash(&rpc_client, CommitmentConfig::default()) .unwrap(), - (nonce_blockhash, nonce_fee_calc), - ); - let mut mocks = HashMap::new(); - mocks.insert(RpcRequest::GetAccountInfo, get_account_response.clone()); - let rpc_client = RpcClient::new_mock_with_mocks("".to_string(), mocks); - assert!( - BlockhashQuery::FeeCalculator(Source::NonceAccount(nonce_pubkey), test_blockhash) - .get_blockhash_and_fee_calculator(&rpc_client, CommitmentConfig::default()) - .is_err() + nonce_blockhash, ); let mut mocks = HashMap::new(); mocks.insert(RpcRequest::GetAccountInfo, get_account_response); let rpc_client = RpcClient::new_mock_with_mocks("".to_string(), mocks); assert_eq!( BlockhashQuery::None(nonce_blockhash) - .get_blockhash_and_fee_calculator(&rpc_client, CommitmentConfig::default()) + .get_blockhash(&rpc_client, CommitmentConfig::default()) .unwrap(), - (nonce_blockhash, FeeCalculator::default()), + nonce_blockhash, ); let rpc_client = RpcClient::new_mock("fails".to_string()); assert!(BlockhashQuery::All(Source::NonceAccount(nonce_pubkey)) - .get_blockhash_and_fee_calculator(&rpc_client, CommitmentConfig::default()) + .get_blockhash(&rpc_client, CommitmentConfig::default()) .is_err()); } } diff --git a/rpc-client/src/mock_sender.rs b/rpc-client/src/mock_sender.rs index ec093461c96909..9730a6ff24a983 100644 --- a/rpc-client/src/mock_sender.rs +++ b/rpc-client/src/mock_sender.rs @@ -12,18 +12,17 @@ use { request::RpcRequest, response::{ Response, RpcAccountBalance, RpcBlockProduction, RpcBlockProductionRange, RpcBlockhash, - RpcConfirmedTransactionStatusWithSignature, RpcContactInfo, RpcFees, RpcIdentity, + RpcConfirmedTransactionStatusWithSignature, RpcContactInfo, RpcIdentity, RpcInflationGovernor, RpcInflationRate, RpcInflationReward, RpcKeyedAccount, RpcPerfSample, RpcPrioritizationFee, RpcResponseContext, RpcSimulateTransactionResult, - RpcSnapshotSlotInfo, RpcStakeActivation, RpcSupply, RpcVersionInfo, RpcVoteAccountInfo, - RpcVoteAccountStatus, StakeActivationState, + RpcSnapshotSlotInfo, RpcSupply, RpcVersionInfo, RpcVoteAccountInfo, + RpcVoteAccountStatus, }, }, solana_sdk::{ account::Account, clock::{Slot, UnixTimestamp}, epoch_info::EpochInfo, - fee_calculator::{FeeCalculator, FeeRateGovernor}, instruction::InstructionError, message::MessageHeader, pubkey::Pubkey, @@ -117,13 +116,6 @@ impl RpcSender for MockSender { context: RpcResponseContext { slot: 1, api_version: None }, value: Value::Number(Number::from(50)), })?, - "getRecentBlockhash" => serde_json::to_value(Response { - context: RpcResponseContext { slot: 1, api_version: None }, - value: ( - Value::String(PUBKEY.to_string()), - serde_json::to_value(FeeCalculator::default()).unwrap(), - ), - })?, "getEpochInfo" => serde_json::to_value(EpochInfo { epoch: 1, slot_index: 2, @@ -132,31 +124,6 @@ impl RpcSender for MockSender { block_height: 34, transaction_count: Some(123), })?, - "getFeeCalculatorForBlockhash" => { - let value = if self.url == "blockhash_expired" { - Value::Null - } else { - serde_json::to_value(Some(FeeCalculator::default())).unwrap() - }; - serde_json::to_value(Response { - context: RpcResponseContext { slot: 1, api_version: None }, - value, - })? - } - "getFeeRateGovernor" => serde_json::to_value(Response { - context: RpcResponseContext { slot: 1, api_version: None }, - value: serde_json::to_value(FeeRateGovernor::default()).unwrap(), - })?, - "getFees" => serde_json::to_value(Response { - context: RpcResponseContext { slot: 1, api_version: None }, - value: serde_json::to_value(RpcFees { - blockhash: PUBKEY.to_string(), - fee_calculator: FeeCalculator::default(), - last_valid_slot: 42, - last_valid_block_height: 42, - }) - .unwrap(), - })?, "getSignatureStatuses" => { let status: transaction::Result<()> = if self.url == "account_in_use" { Err(TransactionError::AccountInUse) @@ -242,7 +209,6 @@ impl RpcSender for MockSender { "getSlot" => json![0], "getMaxShredInsertSlot" => json![0], "requestAirdrop" => Value::String(Signature::from([8; 64]).to_string()), - "getSnapshotSlot" => Value::Number(Number::from(0)), "getHighestSnapshotSlot" => json!(RpcSnapshotSlotInfo { full: 100, incremental: Some(110), @@ -287,11 +253,6 @@ impl RpcSender for MockSender { }) } } - "getStakeActivation" => json!(RpcStakeActivation { - state: StakeActivationState::Activating, - active: 123, - inactive: 12, - }), "getStakeMinimumDelegation" => json!(Response { context: RpcResponseContext { slot: 1, api_version: None }, value: 123_456_789, @@ -376,8 +337,13 @@ impl RpcSender for MockSender { "getClusterNodes" => serde_json::to_value(vec![RpcContactInfo { pubkey: PUBKEY.to_string(), gossip: Some(SocketAddr::from(([10, 239, 6, 48], 8899))), + tvu: Some(SocketAddr::from(([10, 239, 6, 48], 8865))), tpu: Some(SocketAddr::from(([10, 239, 6, 48], 8856))), tpu_quic: Some(SocketAddr::from(([10, 239, 6, 48], 8862))), + tpu_forwards: Some(SocketAddr::from(([10, 239, 6, 48], 8857))), + tpu_forwards_quic: Some(SocketAddr::from(([10, 239, 6, 48], 8863))), + tpu_vote: Some(SocketAddr::from(([10, 239, 6, 48], 8870))), + serve_repair: Some(SocketAddr::from(([10, 239, 6, 48], 8880))), rpc: Some(SocketAddr::from(([10, 239, 6, 48], 8899))), pubsub: Some(SocketAddr::from(([10, 239, 6, 48], 8900))), version: Some("1.0.0 c375ce1f".to_string()), @@ -401,6 +367,7 @@ impl RpcSender for MockSender { version: Some(TransactionVersion::LEGACY), }], rewards: Rewards::new(), + num_partitions: None, block_time: None, block_height: Some(428), })?, diff --git a/rpc-client/src/nonblocking/rpc_client.rs b/rpc-client/src/nonblocking/rpc_client.rs index fceb338e20756e..0ca5f76a49f829 100644 --- a/rpc-client/src/nonblocking/rpc_client.rs +++ b/rpc-client/src/nonblocking/rpc_client.rs @@ -7,11 +7,6 @@ //! [JSON-RPC]: https://www.jsonrpc.org/specification pub use crate::mock_sender::Mocks; -#[allow(deprecated)] -use solana_rpc_client_api::deprecated_config::{ - RpcConfirmedBlockConfig, RpcConfirmedTransactionConfig, - RpcGetConfirmedSignaturesForAddress2Config, -}; #[cfg(feature = "spinner")] use {crate::spinner, solana_sdk::clock::MAX_HASH_AGE_IN_SECONDS, std::cmp::min}; use { @@ -37,17 +32,15 @@ use { Error as ClientError, ErrorKind as ClientErrorKind, Result as ClientResult, }, config::{RpcAccountInfoConfig, *}, - filter::{self, RpcFilterType}, request::{RpcError, RpcRequest, RpcResponseErrorData, TokenAccountsFilter}, response::*, }, solana_sdk::{ account::Account, clock::{Epoch, Slot, UnixTimestamp, DEFAULT_MS_PER_SLOT}, - commitment_config::{CommitmentConfig, CommitmentLevel}, + commitment_config::CommitmentConfig, epoch_info::EpochInfo, epoch_schedule::EpochSchedule, - fee_calculator::{FeeCalculator, FeeRateGovernor}, hash::Hash, pubkey::Pubkey, signature::Signature, @@ -63,7 +56,7 @@ use { str::FromStr, time::{Duration, Instant}, }, - tokio::{sync::RwLock, time::sleep}, + tokio::time::sleep, }; /// A client of a remote Solana node. @@ -147,7 +140,6 @@ use { pub struct RpcClient { sender: Box, config: RpcClientConfig, - node_version: RwLock>, } impl RpcClient { @@ -163,7 +155,6 @@ impl RpcClient { ) -> Self { Self { sender: Box::new(sender), - node_version: RwLock::new(None), config, } } @@ -515,30 +506,11 @@ impl RpcClient { self.sender.url() } - pub async fn set_node_version(&self, version: semver::Version) -> Result<(), ()> { - let mut w_node_version = self.node_version.write().await; - *w_node_version = Some(version); + #[deprecated(since = "2.0.2", note = "RpcClient::node_version is no longer used")] + pub async fn set_node_version(&self, _version: semver::Version) -> Result<(), ()> { Ok(()) } - async fn get_node_version(&self) -> Result { - let r_node_version = self.node_version.read().await; - if let Some(version) = &*r_node_version { - Ok(version.clone()) - } else { - drop(r_node_version); - let mut w_node_version = self.node_version.write().await; - let node_version = self.get_version().await.map_err(|e| { - RpcError::RpcRequestError(format!("cluster version query failed: {e}")) - })?; - let node_version = semver::Version::parse(&node_version.solana_core).map_err(|e| { - RpcError::RpcRequestError(format!("failed to parse cluster version: {e}")) - })?; - *w_node_version = Some(node_version.clone()); - Ok(node_version) - } - } - /// Get the configured default [commitment level][cl]. /// /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment @@ -556,54 +528,6 @@ impl RpcClient { self.config.commitment_config } - async fn use_deprecated_commitment(&self) -> Result { - Ok(self.get_node_version().await? < semver::Version::new(1, 5, 5)) - } - - async fn maybe_map_commitment( - &self, - requested_commitment: CommitmentConfig, - ) -> Result { - if matches!( - requested_commitment.commitment, - CommitmentLevel::Finalized | CommitmentLevel::Confirmed | CommitmentLevel::Processed - ) && self.use_deprecated_commitment().await? - { - return Ok(CommitmentConfig::use_deprecated_commitment( - requested_commitment, - )); - } - Ok(requested_commitment) - } - - #[allow(deprecated)] - async fn maybe_map_request(&self, mut request: RpcRequest) -> Result { - if self.get_node_version().await? < semver::Version::new(1, 7, 0) { - request = match request { - RpcRequest::GetBlock => RpcRequest::GetConfirmedBlock, - RpcRequest::GetBlocks => RpcRequest::GetConfirmedBlocks, - RpcRequest::GetBlocksWithLimit => RpcRequest::GetConfirmedBlocksWithLimit, - RpcRequest::GetSignaturesForAddress => { - RpcRequest::GetConfirmedSignaturesForAddress2 - } - RpcRequest::GetTransaction => RpcRequest::GetConfirmedTransaction, - _ => request, - }; - } - Ok(request) - } - - #[allow(deprecated)] - async fn maybe_map_filters( - &self, - mut filters: Vec, - ) -> Result, RpcError> { - let node_version = self.get_node_version().await?; - filter::maybe_map_filters(Some(node_version), &mut filters) - .map_err(RpcError::RpcRequestError)?; - Ok(filters) - } - /// Submit a transaction and wait for confirmation. /// /// Once this function returns successfully, the given transaction is @@ -845,11 +769,7 @@ impl RpcClient { self.send_transaction_with_config( transaction, RpcSendTransactionConfig { - preflight_commitment: Some( - self.maybe_map_commitment(self.commitment()) - .await? - .commitment, - ), + preflight_commitment: Some(self.commitment().commitment), ..RpcSendTransactionConfig::default() }, ) @@ -942,15 +862,10 @@ impl RpcClient { transaction: &impl SerializableTransaction, config: RpcSendTransactionConfig, ) -> ClientResult { - let encoding = if let Some(encoding) = config.encoding { - encoding - } else { - self.default_cluster_transaction_encoding().await? - }; + let encoding = config.encoding.unwrap_or(UiTransactionEncoding::Base64); let preflight_commitment = CommitmentConfig { commitment: config.preflight_commitment.unwrap_or_default(), }; - let preflight_commitment = self.maybe_map_commitment(preflight_commitment).await?; let config = RpcSendTransactionConfig { encoding: Some(encoding), preflight_commitment: Some(preflight_commitment.commitment), @@ -1233,16 +1148,6 @@ impl RpcClient { } } - async fn default_cluster_transaction_encoding( - &self, - ) -> Result { - if self.get_node_version().await? < semver::Version::new(1, 3, 16) { - Ok(UiTransactionEncoding::Base58) - } else { - Ok(UiTransactionEncoding::Base64) - } - } - /// Simulates sending a transaction. /// /// If the transaction fails, then the [`err`] field of the returned @@ -1392,13 +1297,8 @@ impl RpcClient { transaction: &impl SerializableTransaction, config: RpcSimulateTransactionConfig, ) -> RpcResult { - let encoding = if let Some(encoding) = config.encoding { - encoding - } else { - self.default_cluster_transaction_encoding().await? - }; + let encoding = config.encoding.unwrap_or(UiTransactionEncoding::Base64); let commitment = config.commitment.unwrap_or_default(); - let commitment = self.maybe_map_commitment(commitment).await?; let config = RpcSimulateTransactionConfig { encoding: Some(encoding), commitment: Some(commitment), @@ -1436,27 +1336,8 @@ impl RpcClient { /// # Ok::<(), Error>(()) /// ``` pub async fn get_highest_snapshot_slot(&self) -> ClientResult { - if self.get_node_version().await? < semver::Version::new(1, 9, 0) { - #[allow(deprecated)] - self.get_snapshot_slot() - .await - .map(|full| RpcSnapshotSlotInfo { - full, - incremental: None, - }) - } else { - self.send(RpcRequest::GetHighestSnapshotSlot, Value::Null) - .await - } - } - - #[deprecated( - since = "1.8.0", - note = "Please use RpcClient::get_highest_snapshot_slot() instead" - )] - #[allow(deprecated)] - pub async fn get_snapshot_slot(&self) -> ClientResult { - self.send(RpcRequest::GetSnapshotSlot, Value::Null).await + self.send(RpcRequest::GetHighestSnapshotSlot, Value::Null) + .await } /// Check if a transaction has been processed with the default [commitment level][cl]. @@ -1886,11 +1767,8 @@ impl RpcClient { &self, commitment_config: CommitmentConfig, ) -> ClientResult { - self.send( - RpcRequest::GetSlot, - json!([self.maybe_map_commitment(commitment_config).await?]), - ) - .await + self.send(RpcRequest::GetSlot, json!([commitment_config])) + .await } /// Returns the block height that has reached the configured [commitment level][cl]. @@ -1950,11 +1828,8 @@ impl RpcClient { &self, commitment_config: CommitmentConfig, ) -> ClientResult { - self.send( - RpcRequest::GetBlockHeight, - json!([self.maybe_map_commitment(commitment_config).await?]), - ) - .await + self.send(RpcRequest::GetBlockHeight, json!([commitment_config])) + .await } /// Returns the slot leaders for a given slot range. @@ -2076,99 +1951,6 @@ impl RpcClient { .await } - /// Returns epoch activation information for a stake account. - /// - /// This method uses the configured [commitment level]. - /// - /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment - /// - /// # RPC Reference - /// - /// This method corresponds directly to the [`getStakeActivation`] RPC method. - /// - /// [`getStakeActivation`]: https://solana.com/docs/rpc/http/getstakeactivation - /// - /// # Examples - /// - /// ``` - /// # use solana_rpc_client_api::{ - /// # client_error::Error, - /// # response::StakeActivationState, - /// # }; - /// # use solana_rpc_client::nonblocking::rpc_client::RpcClient; - /// # use solana_sdk::{ - /// # signer::keypair::Keypair, - /// # signature::Signer, - /// # pubkey::Pubkey, - /// # stake, - /// # stake::state::{Authorized, Lockup}, - /// # transaction::Transaction - /// # }; - /// # use std::str::FromStr; - /// # futures::executor::block_on(async { - /// # let alice = Keypair::new(); - /// # let rpc_client = RpcClient::new_mock("succeeds".to_string()); - /// // Find some vote account to delegate to - /// let vote_accounts = rpc_client.get_vote_accounts().await?; - /// let vote_account = vote_accounts.current.get(0).unwrap_or_else(|| &vote_accounts.delinquent[0]); - /// let vote_account_pubkey = &vote_account.vote_pubkey; - /// let vote_account_pubkey = Pubkey::from_str(vote_account_pubkey).expect("pubkey"); - /// - /// // Create a stake account - /// let stake_account = Keypair::new(); - /// let stake_account_pubkey = stake_account.pubkey(); - /// - /// // Build the instructions to create new stake account, - /// // funded by alice, and delegate to a validator's vote account. - /// let instrs = stake::instruction::create_account_and_delegate_stake( - /// &alice.pubkey(), - /// &stake_account_pubkey, - /// &vote_account_pubkey, - /// &Authorized::auto(&stake_account_pubkey), - /// &Lockup::default(), - /// 1_000_000, - /// ); - /// - /// let latest_blockhash = rpc_client.get_latest_blockhash().await?; - /// let tx = Transaction::new_signed_with_payer( - /// &instrs, - /// Some(&alice.pubkey()), - /// &[&alice, &stake_account], - /// latest_blockhash, - /// ); - /// - /// rpc_client.send_and_confirm_transaction(&tx).await?; - /// - /// let epoch_info = rpc_client.get_epoch_info().await?; - /// let activation = rpc_client.get_stake_activation( - /// stake_account_pubkey, - /// Some(epoch_info.epoch), - /// ).await?; - /// - /// assert_eq!(activation.state, StakeActivationState::Activating); - /// # Ok::<(), Error>(()) - /// # })?; - /// # Ok::<(), Error>(()) - /// ``` - pub async fn get_stake_activation( - &self, - stake_account: Pubkey, - epoch: Option, - ) -> ClientResult { - self.send( - RpcRequest::GetStakeActivation, - json!([ - stake_account.to_string(), - RpcEpochConfig { - epoch, - commitment: Some(self.commitment()), - min_context_slot: None, - } - ]), - ) - .await - } - /// Returns information about the current supply. /// /// This method uses the configured [commitment level][cl]. @@ -2225,11 +2007,8 @@ impl RpcClient { &self, commitment_config: CommitmentConfig, ) -> RpcResult { - self.send( - RpcRequest::GetSupply, - json!([self.maybe_map_commitment(commitment_config).await?]), - ) - .await + self.send(RpcRequest::GetSupply, json!([commitment_config])) + .await } /// Returns the 20 largest accounts, by lamport balance. @@ -2257,6 +2036,7 @@ impl RpcClient { /// let config = RpcLargestAccountsConfig { /// commitment: Some(commitment_config), /// filter: Some(RpcLargestAccountsFilter::Circulating), + /// sort_results: None, /// }; /// let accounts = rpc_client.get_largest_accounts_with_config( /// config, @@ -2270,7 +2050,6 @@ impl RpcClient { config: RpcLargestAccountsConfig, ) -> RpcResult> { let commitment = config.commitment.unwrap_or_default(); - let commitment = self.maybe_map_commitment(commitment).await?; let config = RpcLargestAccountsConfig { commitment: Some(commitment), ..config @@ -2340,7 +2119,7 @@ impl RpcClient { commitment_config: CommitmentConfig, ) -> ClientResult { self.get_vote_accounts_with_config(RpcGetVoteAccountsConfig { - commitment: Some(self.maybe_map_commitment(commitment_config).await?), + commitment: Some(commitment_config), ..RpcGetVoteAccountsConfig::default() }) .await @@ -2518,11 +2297,8 @@ impl RpcClient { slot: Slot, encoding: UiTransactionEncoding, ) -> ClientResult { - self.send( - self.maybe_map_request(RpcRequest::GetBlock).await?, - json!([slot, encoding]), - ) - .await + self.send(RpcRequest::GetBlock, json!([slot, encoding])) + .await } /// Returns identity and transaction information about a confirmed block in the ledger. @@ -2568,46 +2344,7 @@ impl RpcClient { slot: Slot, config: RpcBlockConfig, ) -> ClientResult { - self.send( - self.maybe_map_request(RpcRequest::GetBlock).await?, - json!([slot, config]), - ) - .await - } - - #[deprecated(since = "1.7.0", note = "Please use RpcClient::get_block() instead")] - #[allow(deprecated)] - pub async fn get_confirmed_block(&self, slot: Slot) -> ClientResult { - self.get_confirmed_block_with_encoding(slot, UiTransactionEncoding::Json) - .await - } - - #[deprecated( - since = "1.7.0", - note = "Please use RpcClient::get_block_with_encoding() instead" - )] - #[allow(deprecated)] - pub async fn get_confirmed_block_with_encoding( - &self, - slot: Slot, - encoding: UiTransactionEncoding, - ) -> ClientResult { - self.send(RpcRequest::GetConfirmedBlock, json!([slot, encoding])) - .await - } - - #[deprecated( - since = "1.7.0", - note = "Please use RpcClient::get_block_with_config() instead" - )] - #[allow(deprecated)] - pub async fn get_confirmed_block_with_config( - &self, - slot: Slot, - config: RpcConfirmedBlockConfig, - ) -> ClientResult { - self.send(RpcRequest::GetConfirmedBlock, json!([slot, config])) - .await + self.send(RpcRequest::GetBlock, json!([slot, config])).await } /// Returns a list of finalized blocks between two slots. @@ -2634,12 +2371,9 @@ impl RpcClient { /// /// # RPC Reference /// - /// This method corresponds directly to the [`getBlocks`] RPC method, unless - /// the remote node version is less than 1.7, in which case it maps to the - /// [`getConfirmedBlocks`] RPC method. + /// This method corresponds directly to the [`getBlocks`] RPC method. /// /// [`getBlocks`]: https://solana.com/docs/rpc/http/getblocks - /// [`getConfirmedBlocks`]: https://solana.com/docs/rpc/deprecated/getconfirmedblocks /// /// # Examples /// @@ -2661,11 +2395,8 @@ impl RpcClient { start_slot: Slot, end_slot: Option, ) -> ClientResult> { - self.send( - self.maybe_map_request(RpcRequest::GetBlocks).await?, - json!([start_slot, end_slot]), - ) - .await + self.send(RpcRequest::GetBlocks, json!([start_slot, end_slot])) + .await } /// Returns a list of confirmed blocks between two slots. @@ -2696,12 +2427,9 @@ impl RpcClient { /// /// # RPC Reference /// - /// This method corresponds directly to the [`getBlocks`] RPC method, unless - /// the remote node version is less than 1.7, in which case it maps to the - /// [`getConfirmedBlocks`] RPC method. + /// This method corresponds directly to the [`getBlocks`] RPC method. /// /// [`getBlocks`]: https://solana.com/docs/rpc/http/getblocks - /// [`getConfirmedBlocks`]: https://solana.com/docs/rpc/deprecated/getconfirmedblocks /// /// # Examples /// @@ -2732,19 +2460,11 @@ impl RpcClient { commitment_config: CommitmentConfig, ) -> ClientResult> { let json = if end_slot.is_some() { - json!([ - start_slot, - end_slot, - self.maybe_map_commitment(commitment_config).await? - ]) + json!([start_slot, end_slot, commitment_config]) } else { - json!([ - start_slot, - self.maybe_map_commitment(commitment_config).await? - ]) + json!([start_slot, commitment_config]) }; - self.send(self.maybe_map_request(RpcRequest::GetBlocks).await?, json) - .await + self.send(RpcRequest::GetBlocks, json).await } /// Returns a list of finalized blocks starting at the given slot. @@ -2761,11 +2481,9 @@ impl RpcClient { /// # RPC Reference /// /// This method corresponds directly to the [`getBlocksWithLimit`] RPC - /// method, unless the remote node version is less than 1.7, in which case - /// it maps to the [`getConfirmedBlocksWithLimit`] RPC method. + /// method. /// /// [`getBlocksWithLimit`]: https://solana.com/docs/rpc/http/getblockswithlimit - /// [`getConfirmedBlocksWithLimit`]: https://solana.com/docs/rpc/deprecated/getconfirmedblockswithlimit /// /// # Examples /// @@ -2787,12 +2505,8 @@ impl RpcClient { start_slot: Slot, limit: usize, ) -> ClientResult> { - self.send( - self.maybe_map_request(RpcRequest::GetBlocksWithLimit) - .await?, - json!([start_slot, limit]), - ) - .await + self.send(RpcRequest::GetBlocksWithLimit, json!([start_slot, limit])) + .await } /// Returns a list of confirmed blocks starting at the given slot. @@ -2810,11 +2524,9 @@ impl RpcClient { /// # RPC Reference /// /// This method corresponds directly to the [`getBlocksWithLimit`] RPC - /// method, unless the remote node version is less than 1.7, in which case - /// it maps to the `getConfirmedBlocksWithLimit` RPC method. + /// method. /// /// [`getBlocksWithLimit`]: https://solana.com/docs/rpc/http/getblockswithlimit - /// [`getConfirmedBlocksWithLimit`]: https://solana.com/docs/rpc/deprecated/getconfirmedblockswithlimit /// /// # Examples /// @@ -2844,92 +2556,8 @@ impl RpcClient { commitment_config: CommitmentConfig, ) -> ClientResult> { self.send( - self.maybe_map_request(RpcRequest::GetBlocksWithLimit) - .await?, - json!([ - start_slot, - limit, - self.maybe_map_commitment(commitment_config).await? - ]), - ) - .await - } - - #[deprecated(since = "1.7.0", note = "Please use RpcClient::get_blocks() instead")] - #[allow(deprecated)] - pub async fn get_confirmed_blocks( - &self, - start_slot: Slot, - end_slot: Option, - ) -> ClientResult> { - self.send( - RpcRequest::GetConfirmedBlocks, - json!([start_slot, end_slot]), - ) - .await - } - - #[deprecated( - since = "1.7.0", - note = "Please use RpcClient::get_blocks_with_commitment() instead" - )] - #[allow(deprecated)] - pub async fn get_confirmed_blocks_with_commitment( - &self, - start_slot: Slot, - end_slot: Option, - commitment_config: CommitmentConfig, - ) -> ClientResult> { - let json = if end_slot.is_some() { - json!([ - start_slot, - end_slot, - self.maybe_map_commitment(commitment_config).await? - ]) - } else { - json!([ - start_slot, - self.maybe_map_commitment(commitment_config).await? - ]) - }; - self.send(RpcRequest::GetConfirmedBlocks, json).await - } - - #[deprecated( - since = "1.7.0", - note = "Please use RpcClient::get_blocks_with_limit() instead" - )] - #[allow(deprecated)] - pub async fn get_confirmed_blocks_with_limit( - &self, - start_slot: Slot, - limit: usize, - ) -> ClientResult> { - self.send( - RpcRequest::GetConfirmedBlocksWithLimit, - json!([start_slot, limit]), - ) - .await - } - - #[deprecated( - since = "1.7.0", - note = "Please use RpcClient::get_blocks_with_limit_and_commitment() instead" - )] - #[allow(deprecated)] - pub async fn get_confirmed_blocks_with_limit_and_commitment( - &self, - start_slot: Slot, - limit: usize, - commitment_config: CommitmentConfig, - ) -> ClientResult> { - self.send( - RpcRequest::GetConfirmedBlocksWithLimit, - json!([ - start_slot, - limit, - self.maybe_map_commitment(commitment_config).await? - ]), + RpcRequest::GetBlocksWithLimit, + json!([start_slot, limit, commitment_config]), ) .await } @@ -3049,51 +2677,7 @@ impl RpcClient { let result: Vec = self .send( - self.maybe_map_request(RpcRequest::GetSignaturesForAddress) - .await?, - json!([address.to_string(), config]), - ) - .await?; - - Ok(result) - } - - #[deprecated( - since = "1.7.0", - note = "Please use RpcClient::get_signatures_for_address() instead" - )] - #[allow(deprecated)] - pub async fn get_confirmed_signatures_for_address2( - &self, - address: &Pubkey, - ) -> ClientResult> { - self.get_confirmed_signatures_for_address2_with_config( - address, - GetConfirmedSignaturesForAddress2Config::default(), - ) - .await - } - - #[deprecated( - since = "1.7.0", - note = "Please use RpcClient::get_signatures_for_address_with_config() instead" - )] - #[allow(deprecated)] - pub async fn get_confirmed_signatures_for_address2_with_config( - &self, - address: &Pubkey, - config: GetConfirmedSignaturesForAddress2Config, - ) -> ClientResult> { - let config = RpcGetConfirmedSignaturesForAddress2Config { - before: config.before.map(|signature| signature.to_string()), - until: config.until.map(|signature| signature.to_string()), - limit: config.limit, - commitment: config.commitment, - }; - - let result: Vec = self - .send( - RpcRequest::GetConfirmedSignaturesForAddress2, + RpcRequest::GetSignaturesForAddress, json!([address.to_string(), config]), ) .await?; @@ -3110,12 +2694,9 @@ impl RpcClient { /// /// # RPC Reference /// - /// This method corresponds directly to the [`getTransaction`] RPC method, - /// unless the remote node version is less than 1.7, in which case it maps - /// to the [`getConfirmedTransaction`] RPC method. + /// This method corresponds directly to the [`getTransaction`] RPC method. /// /// [`getTransaction`]: https://solana.com/docs/rpc/http/gettransaction - /// [`getConfirmedTransaction`]: https://solana.com/docs/rpc/deprecated/getConfirmedTransaction /// /// # Examples /// @@ -3151,7 +2732,7 @@ impl RpcClient { encoding: UiTransactionEncoding, ) -> ClientResult { self.send( - self.maybe_map_request(RpcRequest::GetTransaction).await?, + RpcRequest::GetTransaction, json!([signature.to_string(), encoding]), ) .await @@ -3169,12 +2750,9 @@ impl RpcClient { /// /// # RPC Reference /// - /// This method corresponds directly to the [`getTransaction`] RPC method, - /// unless the remote node version is less than 1.7, in which case it maps - /// to the [`getConfirmedTransaction`] RPC method. + /// This method corresponds directly to the [`getTransaction`] RPC method. /// /// [`getTransaction`]: https://solana.com/docs/rpc/http/gettransaction - /// [`getConfirmedTransaction`]: https://solana.com/docs/rpc/deprecated/getConfirmedTransaction /// /// # Examples /// @@ -3219,41 +2797,7 @@ impl RpcClient { config: RpcTransactionConfig, ) -> ClientResult { self.send( - self.maybe_map_request(RpcRequest::GetTransaction).await?, - json!([signature.to_string(), config]), - ) - .await - } - - #[deprecated( - since = "1.7.0", - note = "Please use RpcClient::get_transaction() instead" - )] - #[allow(deprecated)] - pub async fn get_confirmed_transaction( - &self, - signature: &Signature, - encoding: UiTransactionEncoding, - ) -> ClientResult { - self.send( - RpcRequest::GetConfirmedTransaction, - json!([signature.to_string(), encoding]), - ) - .await - } - - #[deprecated( - since = "1.7.0", - note = "Please use RpcClient::get_transaction_with_config() instead" - )] - #[allow(deprecated)] - pub async fn get_confirmed_transaction_with_config( - &self, - signature: &Signature, - config: RpcConfirmedTransactionConfig, - ) -> ClientResult { - self.send( - RpcRequest::GetConfirmedTransaction, + RpcRequest::GetTransaction, json!([signature.to_string(), config]), ) .await @@ -3354,11 +2898,8 @@ impl RpcClient { &self, commitment_config: CommitmentConfig, ) -> ClientResult { - self.send( - RpcRequest::GetEpochInfo, - json!([self.maybe_map_commitment(commitment_config).await?]), - ) - .await + self.send(RpcRequest::GetEpochInfo, json!([commitment_config])) + .await } /// Returns the leader schedule for an epoch. @@ -3431,7 +2972,7 @@ impl RpcClient { self.get_leader_schedule_with_config( slot, RpcLeaderScheduleConfig { - commitment: Some(self.maybe_map_commitment(commitment_config).await?), + commitment: Some(commitment_config), ..RpcLeaderScheduleConfig::default() }, ) @@ -3877,7 +3418,7 @@ impl RpcClient { ) -> RpcResult> { let config = RpcAccountInfoConfig { encoding: Some(UiAccountEncoding::Base64Zstd), - commitment: Some(self.maybe_map_commitment(commitment_config).await?), + commitment: Some(commitment_config), data_slice: None, min_context_slot: None, }; @@ -4104,7 +3645,7 @@ impl RpcClient { pubkeys, RpcAccountInfoConfig { encoding: Some(UiAccountEncoding::Base64Zstd), - commitment: Some(self.maybe_map_commitment(commitment_config).await?), + commitment: Some(commitment_config), data_slice: None, min_context_slot: None, }, @@ -4333,10 +3874,7 @@ impl RpcClient { ) -> RpcResult { self.send( RpcRequest::GetBalance, - json!([ - pubkey.to_string(), - self.maybe_map_commitment(commitment_config).await? - ]), + json!([pubkey.to_string(), commitment_config]), ) .await } @@ -4456,11 +3994,7 @@ impl RpcClient { .account_config .commitment .unwrap_or_else(|| self.commitment()); - let commitment = self.maybe_map_commitment(commitment).await?; config.account_config.commitment = Some(commitment); - if let Some(filters) = config.filters { - config.filters = Some(self.maybe_map_filters(filters).await?); - } let accounts = self .send::>>( @@ -4525,7 +4059,7 @@ impl RpcClient { Ok(self .send::>( RpcRequest::GetStakeMinimumDelegation, - json!([self.maybe_map_commitment(commitment_config).await?]), + json!([commitment_config]), ) .await? .value) @@ -4541,233 +4075,8 @@ impl RpcClient { &self, commitment_config: CommitmentConfig, ) -> ClientResult { - self.send( - RpcRequest::GetTransactionCount, - json!([self.maybe_map_commitment(commitment_config).await?]), - ) - .await - } - - #[deprecated( - since = "1.9.0", - note = "Please use `get_latest_blockhash` and `get_fee_for_message` instead" - )] - #[allow(deprecated)] - pub async fn get_fees(&self) -> ClientResult { - #[allow(deprecated)] - Ok(self - .get_fees_with_commitment(self.commitment()) - .await? - .value) - } - - #[deprecated( - since = "1.9.0", - note = "Please use `get_latest_blockhash_with_commitment` and `get_fee_for_message` instead" - )] - #[allow(deprecated)] - pub async fn get_fees_with_commitment( - &self, - commitment_config: CommitmentConfig, - ) -> RpcResult { - let Response { - context, - value: fees, - } = self - .send::>( - RpcRequest::GetFees, - json!([self.maybe_map_commitment(commitment_config).await?]), - ) - .await?; - let blockhash = fees.blockhash.parse().map_err(|_| { - ClientError::new_with_request( - RpcError::ParseError("Hash".to_string()).into(), - RpcRequest::GetFees, - ) - })?; - Ok(Response { - context, - value: Fees { - blockhash, - fee_calculator: fees.fee_calculator, - last_valid_block_height: fees.last_valid_block_height, - }, - }) - } - - #[deprecated(since = "1.9.0", note = "Please use `get_latest_blockhash` instead")] - #[allow(deprecated)] - pub async fn get_recent_blockhash(&self) -> ClientResult<(Hash, FeeCalculator)> { - #[allow(deprecated)] - let (blockhash, fee_calculator, _last_valid_slot) = self - .get_recent_blockhash_with_commitment(self.commitment()) - .await? - .value; - Ok((blockhash, fee_calculator)) - } - - #[deprecated( - since = "1.9.0", - note = "Please use `get_latest_blockhash_with_commitment` instead" - )] - #[allow(deprecated)] - pub async fn get_recent_blockhash_with_commitment( - &self, - commitment_config: CommitmentConfig, - ) -> RpcResult<(Hash, FeeCalculator, Slot)> { - let (context, blockhash, fee_calculator, last_valid_slot) = if let Ok(Response { - context, - value: - RpcFees { - blockhash, - fee_calculator, - last_valid_slot, - .. - }, - }) = self - .send::>( - RpcRequest::GetFees, - json!([self.maybe_map_commitment(commitment_config).await?]), - ) - .await - { - (context, blockhash, fee_calculator, last_valid_slot) - } else if let Ok(Response { - context, - value: - DeprecatedRpcFees { - blockhash, - fee_calculator, - last_valid_slot, - }, - }) = self - .send::>( - RpcRequest::GetFees, - json!([self.maybe_map_commitment(commitment_config).await?]), - ) - .await - { - (context, blockhash, fee_calculator, last_valid_slot) - } else if let Ok(Response { - context, - value: - RpcBlockhashFeeCalculator { - blockhash, - fee_calculator, - }, - }) = self - .send::>( - RpcRequest::GetRecentBlockhash, - json!([self.maybe_map_commitment(commitment_config).await?]), - ) + self.send(RpcRequest::GetTransactionCount, json!([commitment_config])) .await - { - (context, blockhash, fee_calculator, 0) - } else { - return Err(ClientError::new_with_request( - RpcError::ParseError("RpcBlockhashFeeCalculator or RpcFees".to_string()).into(), - RpcRequest::GetRecentBlockhash, - )); - }; - - let blockhash = blockhash.parse().map_err(|_| { - ClientError::new_with_request( - RpcError::ParseError("Hash".to_string()).into(), - RpcRequest::GetRecentBlockhash, - ) - })?; - Ok(Response { - context, - value: (blockhash, fee_calculator, last_valid_slot), - }) - } - - #[deprecated(since = "1.9.0", note = "Please `get_fee_for_message` instead")] - #[allow(deprecated)] - pub async fn get_fee_calculator_for_blockhash( - &self, - blockhash: &Hash, - ) -> ClientResult> { - #[allow(deprecated)] - Ok(self - .get_fee_calculator_for_blockhash_with_commitment(blockhash, self.commitment()) - .await? - .value) - } - - #[deprecated( - since = "1.9.0", - note = "Please `get_latest_blockhash_with_commitment` and `get_fee_for_message` instead" - )] - #[allow(deprecated)] - pub async fn get_fee_calculator_for_blockhash_with_commitment( - &self, - blockhash: &Hash, - commitment_config: CommitmentConfig, - ) -> RpcResult> { - let Response { context, value } = self - .send::>>( - RpcRequest::GetFeeCalculatorForBlockhash, - json!([ - blockhash.to_string(), - self.maybe_map_commitment(commitment_config).await? - ]), - ) - .await?; - - Ok(Response { - context, - value: value.map(|rf| rf.fee_calculator), - }) - } - - #[deprecated( - since = "1.9.0", - note = "Please do not use, will no longer be available in the future" - )] - #[allow(deprecated)] - pub async fn get_fee_rate_governor(&self) -> RpcResult { - let Response { - context, - value: RpcFeeRateGovernor { fee_rate_governor }, - } = self - .send::>(RpcRequest::GetFeeRateGovernor, Value::Null) - .await?; - - Ok(Response { - context, - value: fee_rate_governor, - }) - } - - #[deprecated( - since = "1.9.0", - note = "Please do not use, will no longer be available in the future" - )] - #[allow(deprecated)] - pub async fn get_new_blockhash(&self, blockhash: &Hash) -> ClientResult<(Hash, FeeCalculator)> { - let mut num_retries = 0; - let start = Instant::now(); - while start.elapsed().as_secs() < 5 { - #[allow(deprecated)] - if let Ok((new_blockhash, fee_calculator)) = self.get_recent_blockhash().await { - if new_blockhash != *blockhash { - return Ok((new_blockhash, fee_calculator)); - } - } - debug!("Got same blockhash ({:?}), will retry...", blockhash); - - // Retry ~twice during a slot - sleep(Duration::from_millis(DEFAULT_MS_PER_SLOT / 2)).await; - num_retries += 1; - } - Err(RpcError::ForUser(format!( - "Unable to get new blockhash after {}ms (retried {} times), stuck at {}", - start.elapsed().as_millis(), - num_retries, - blockhash - )) - .into()) } pub async fn get_first_available_block(&self) -> ClientResult { @@ -4806,7 +4115,7 @@ impl RpcClient { ) -> RpcResult> { let config = RpcAccountInfoConfig { encoding: Some(UiAccountEncoding::JsonParsed), - commitment: Some(self.maybe_map_commitment(commitment_config).await?), + commitment: Some(commitment_config), data_slice: None, min_context_slot: None, }; @@ -4869,10 +4178,7 @@ impl RpcClient { ) -> RpcResult { self.send( RpcRequest::GetTokenAccountBalance, - json!([ - pubkey.to_string(), - self.maybe_map_commitment(commitment_config).await? - ]), + json!([pubkey.to_string(), commitment_config]), ) .await } @@ -4907,7 +4213,7 @@ impl RpcClient { let config = RpcAccountInfoConfig { encoding: Some(UiAccountEncoding::JsonParsed), - commitment: Some(self.maybe_map_commitment(commitment_config).await?), + commitment: Some(commitment_config), data_slice: None, min_context_slot: None, }; @@ -4949,7 +4255,7 @@ impl RpcClient { let config = RpcAccountInfoConfig { encoding: Some(UiAccountEncoding::JsonParsed), - commitment: Some(self.maybe_map_commitment(commitment_config).await?), + commitment: Some(commitment_config), data_slice: None, min_context_slot: None, }; @@ -4978,10 +4284,7 @@ impl RpcClient { ) -> RpcResult> { self.send( RpcRequest::GetTokenLargestAccounts, - json!([ - mint.to_string(), - self.maybe_map_commitment(commitment_config).await? - ]), + json!([mint.to_string(), commitment_config]), ) .await } @@ -5000,10 +4303,7 @@ impl RpcClient { ) -> RpcResult { self.send( RpcRequest::GetTokenSupply, - json!([ - mint.to_string(), - self.maybe_map_commitment(commitment_config).await? - ]), + json!([mint.to_string(), commitment_config]), ) .await } @@ -5044,7 +4344,6 @@ impl RpcClient { config: RpcRequestAirdropConfig, ) -> ClientResult { let commitment = config.commitment.unwrap_or_default(); - let commitment = self.maybe_map_commitment(commitment).await?; let config = RpcRequestAirdropConfig { commitment: Some(commitment), ..config @@ -5259,64 +4558,40 @@ impl RpcClient { Ok(blockhash) } - #[allow(deprecated)] pub async fn get_latest_blockhash_with_commitment( &self, commitment: CommitmentConfig, ) -> ClientResult<(Hash, u64)> { - let (blockhash, last_valid_block_height) = - if self.get_node_version().await? < semver::Version::new(1, 9, 0) { - let Fees { - blockhash, - last_valid_block_height, - .. - } = self.get_fees_with_commitment(commitment).await?.value; - (blockhash, last_valid_block_height) - } else { - let RpcBlockhash { - blockhash, - last_valid_block_height, - } = self - .send::>( - RpcRequest::GetLatestBlockhash, - json!([self.maybe_map_commitment(commitment).await?]), - ) - .await? - .value; - let blockhash = blockhash.parse().map_err(|_| { - ClientError::new_with_request( - RpcError::ParseError("Hash".to_string()).into(), - RpcRequest::GetLatestBlockhash, - ) - })?; - (blockhash, last_valid_block_height) - }; + let RpcBlockhash { + blockhash, + last_valid_block_height, + } = self + .send::>(RpcRequest::GetLatestBlockhash, json!([commitment])) + .await? + .value; + let blockhash = blockhash.parse().map_err(|_| { + ClientError::new_with_request( + RpcError::ParseError("Hash".to_string()).into(), + RpcRequest::GetLatestBlockhash, + ) + })?; Ok((blockhash, last_valid_block_height)) } - #[allow(deprecated)] pub async fn is_blockhash_valid( &self, blockhash: &Hash, commitment: CommitmentConfig, ) -> ClientResult { - let result = if self.get_node_version().await? < semver::Version::new(1, 9, 0) { - self.get_fee_calculator_for_blockhash_with_commitment(blockhash, commitment) - .await? - .value - .is_some() - } else { - self.send::>( + Ok(self + .send::>( RpcRequest::IsBlockhashValid, json!([blockhash.to_string(), commitment,]), ) .await? - .value - }; - Ok(result) + .value) } - #[allow(deprecated)] pub async fn get_fee_for_message( &self, message: &impl SerializableMessage, diff --git a/rpc-client/src/rpc_client.rs b/rpc-client/src/rpc_client.rs index 119039269a7bac..32bd08cef49f03 100644 --- a/rpc-client/src/rpc_client.rs +++ b/rpc-client/src/rpc_client.rs @@ -10,10 +10,6 @@ //! in [`crate::nonblocking::rpc_client`]. pub use crate::mock_sender::Mocks; -#[allow(deprecated)] -use solana_rpc_client_api::deprecated_config::{ - RpcConfirmedBlockConfig, RpcConfirmedTransactionConfig, -}; use { crate::{ http_sender::HttpSender, @@ -40,7 +36,6 @@ use { epoch_info::EpochInfo, epoch_schedule::EpochSchedule, feature::Feature, - fee_calculator::{FeeCalculator, FeeRateGovernor}, hash::Hash, message::{v0, Message as LegacyMessage}, pubkey::Pubkey, @@ -1175,15 +1170,6 @@ impl RpcClient { self.invoke((self.rpc_client.as_ref()).get_highest_snapshot_slot()) } - #[deprecated( - since = "1.8.0", - note = "Please use RpcClient::get_highest_snapshot_slot() instead" - )] - #[allow(deprecated)] - pub fn get_snapshot_slot(&self) -> ClientResult { - self.invoke((self.rpc_client.as_ref()).get_snapshot_slot()) - } - /// Check if a transaction has been processed with the default [commitment level][cl]. /// /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment @@ -1717,85 +1703,6 @@ impl RpcClient { self.invoke((self.rpc_client.as_ref()).get_block_production_with_config(config)) } - /// Returns epoch activation information for a stake account. - /// - /// This method uses the configured [commitment level]. - /// - /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment - /// - /// # RPC Reference - /// - /// This method corresponds directly to the [`getStakeActivation`] RPC method. - /// - /// [`getStakeActivation`]: https://solana.com/docs/rpc/http/getstakeactivation - /// - /// # Examples - /// - /// ``` - /// # use solana_rpc_client_api::{ - /// # client_error::Error, - /// # response::StakeActivationState, - /// # }; - /// # use solana_rpc_client::rpc_client::RpcClient; - /// # use solana_sdk::{ - /// # signer::keypair::Keypair, - /// # signature::Signer, - /// # pubkey::Pubkey, - /// # stake, - /// # stake::state::{Authorized, Lockup}, - /// # transaction::Transaction - /// # }; - /// # use std::str::FromStr; - /// # let alice = Keypair::new(); - /// # let rpc_client = RpcClient::new_mock("succeeds".to_string()); - /// // Find some vote account to delegate to - /// let vote_accounts = rpc_client.get_vote_accounts()?; - /// let vote_account = vote_accounts.current.get(0).unwrap_or_else(|| &vote_accounts.delinquent[0]); - /// let vote_account_pubkey = &vote_account.vote_pubkey; - /// let vote_account_pubkey = Pubkey::from_str(vote_account_pubkey).expect("pubkey"); - /// - /// // Create a stake account - /// let stake_account = Keypair::new(); - /// let stake_account_pubkey = stake_account.pubkey(); - /// - /// // Build the instructions to create new stake account, - /// // funded by alice, and delegate to a validator's vote account. - /// let instrs = stake::instruction::create_account_and_delegate_stake( - /// &alice.pubkey(), - /// &stake_account_pubkey, - /// &vote_account_pubkey, - /// &Authorized::auto(&stake_account_pubkey), - /// &Lockup::default(), - /// 1_000_000, - /// ); - /// - /// let latest_blockhash = rpc_client.get_latest_blockhash()?; - /// let tx = Transaction::new_signed_with_payer( - /// &instrs, - /// Some(&alice.pubkey()), - /// &[&alice, &stake_account], - /// latest_blockhash, - /// ); - /// - /// rpc_client.send_and_confirm_transaction(&tx)?; - /// - /// let epoch_info = rpc_client.get_epoch_info()?; - /// let activation = rpc_client.get_stake_activation( - /// stake_account_pubkey, - /// Some(epoch_info.epoch), - /// )?; - /// - /// assert_eq!(activation.state, StakeActivationState::Activating); - /// # Ok::<(), Error>(()) - /// ``` - pub fn get_stake_activation( - &self, - stake_account: Pubkey, - epoch: Option, - ) -> ClientResult { - self.invoke((self.rpc_client.as_ref()).get_stake_activation(stake_account, epoch)) - } - /// Returns information about the current supply. /// /// This method uses the configured [commitment level][cl]. @@ -1872,6 +1779,7 @@ impl RpcClient { /// let config = RpcLargestAccountsConfig { /// commitment: Some(commitment_config), /// filter: Some(RpcLargestAccountsFilter::Circulating), + /// sort_results: None, /// }; /// let accounts = rpc_client.get_largest_accounts_with_config( /// config, @@ -2121,38 +2029,6 @@ impl RpcClient { self.invoke((self.rpc_client.as_ref()).get_block_with_config(slot, config)) } - #[deprecated(since = "1.7.0", note = "Please use RpcClient::get_block() instead")] - #[allow(deprecated)] - pub fn get_confirmed_block(&self, slot: Slot) -> ClientResult { - self.invoke((self.rpc_client.as_ref()).get_confirmed_block(slot)) - } - - #[deprecated( - since = "1.7.0", - note = "Please use RpcClient::get_block_with_encoding() instead" - )] - #[allow(deprecated)] - pub fn get_confirmed_block_with_encoding( - &self, - slot: Slot, - encoding: UiTransactionEncoding, - ) -> ClientResult { - self.invoke((self.rpc_client.as_ref()).get_confirmed_block_with_encoding(slot, encoding)) - } - - #[deprecated( - since = "1.7.0", - note = "Please use RpcClient::get_block_with_config() instead" - )] - #[allow(deprecated)] - pub fn get_confirmed_block_with_config( - &self, - slot: Slot, - config: RpcConfirmedBlockConfig, - ) -> ClientResult { - self.invoke((self.rpc_client.as_ref()).get_confirmed_block_with_config(slot, config)) - } - /// Returns a list of finalized blocks between two slots. /// /// The range is inclusive, with results including the block for both @@ -2177,12 +2053,9 @@ impl RpcClient { /// /// # RPC Reference /// - /// This method corresponds directly to the [`getBlocks`] RPC method, unless - /// the remote node version is less than 1.7, in which case it maps to the - /// [`getConfirmedBlocks`] RPC method. + /// This method corresponds directly to the [`getBlocks`] RPC method. /// /// [`getBlocks`]: https://solana.com/docs/rpc/http/getblocks - /// [`getConfirmedBlocks`]: https://solana.com/docs/rpc/deprecated/getconfirmedblocks /// /// # Examples /// @@ -2228,12 +2101,9 @@ impl RpcClient { /// /// # RPC Reference /// - /// This method corresponds directly to the [`getBlocks`] RPC method, unless - /// the remote node version is less than 1.7, in which case it maps to the - /// [`getConfirmedBlocks`] RPC method. + /// This method corresponds directly to the [`getBlocks`] RPC method. /// /// [`getBlocks`]: https://solana.com/docs/rpc/http/getblocks - /// [`getConfirmedBlocks`]: https://solana.com/docs/rpc/deprecated/getconfirmedblocks /// /// # Examples /// @@ -2281,11 +2151,9 @@ impl RpcClient { /// # RPC Reference /// /// This method corresponds directly to the [`getBlocksWithLimit`] RPC - /// method, unless the remote node version is less than 1.7, in which case - /// it maps to the [`getConfirmedBlocksWithLimit`] RPC method. + /// method. /// /// [`getBlocksWithLimit`]: https://solana.com/docs/rpc/http/getblockswithlimit - /// [`getConfirmedBlocksWithLimit`]: https://solana.com/docs/rpc/deprecated/getconfirmedblockswithlimit /// /// # Examples /// @@ -2318,11 +2186,9 @@ impl RpcClient { /// # RPC Reference /// /// This method corresponds directly to the [`getBlocksWithLimit`] RPC - /// method, unless the remote node version is less than 1.7, in which case - /// it maps to the `getConfirmedBlocksWithLimit` RPC method. + /// method. /// /// [`getBlocksWithLimit`]: https://solana.com/docs/rpc/http/getblockswithlimit - /// [`getConfirmedBlocksWithLimit`]: https://solana.com/docs/rpc/deprecated/getconfirmedblockswithlimit /// /// # Examples /// @@ -2357,69 +2223,6 @@ impl RpcClient { ) } - #[deprecated(since = "1.7.0", note = "Please use RpcClient::get_blocks() instead")] - #[allow(deprecated)] - pub fn get_confirmed_blocks( - &self, - start_slot: Slot, - end_slot: Option, - ) -> ClientResult> { - self.invoke((self.rpc_client.as_ref()).get_confirmed_blocks(start_slot, end_slot)) - } - - #[deprecated( - since = "1.7.0", - note = "Please use RpcClient::get_blocks_with_commitment() instead" - )] - #[allow(deprecated)] - pub fn get_confirmed_blocks_with_commitment( - &self, - start_slot: Slot, - end_slot: Option, - commitment_config: CommitmentConfig, - ) -> ClientResult> { - self.invoke( - (self.rpc_client.as_ref()).get_confirmed_blocks_with_commitment( - start_slot, - end_slot, - commitment_config, - ), - ) - } - - #[deprecated( - since = "1.7.0", - note = "Please use RpcClient::get_blocks_with_limit() instead" - )] - #[allow(deprecated)] - pub fn get_confirmed_blocks_with_limit( - &self, - start_slot: Slot, - limit: usize, - ) -> ClientResult> { - self.invoke((self.rpc_client.as_ref()).get_confirmed_blocks_with_limit(start_slot, limit)) - } - - #[deprecated( - since = "1.7.0", - note = "Please use RpcClient::get_blocks_with_limit_and_commitment() instead" - )] - #[allow(deprecated)] - pub fn get_confirmed_blocks_with_limit_and_commitment( - &self, - start_slot: Slot, - limit: usize, - commitment_config: CommitmentConfig, - ) -> ClientResult> { - self.invoke( - (self.rpc_client.as_ref()).get_confirmed_blocks_with_limit_and_commitment( - start_slot, - limit, - commitment_config, - ), - ) - } - /// Get confirmed signatures for transactions involving an address. /// /// Returns up to 1000 signatures, ordered from newest to oldest. @@ -2517,34 +2320,6 @@ impl RpcClient { ) } - #[deprecated( - since = "1.7.0", - note = "Please use RpcClient::get_signatures_for_address() instead" - )] - #[allow(deprecated)] - pub fn get_confirmed_signatures_for_address2( - &self, - address: &Pubkey, - ) -> ClientResult> { - self.invoke((self.rpc_client.as_ref()).get_confirmed_signatures_for_address2(address)) - } - - #[deprecated( - since = "1.7.0", - note = "Please use RpcClient::get_signatures_for_address_with_config() instead" - )] - #[allow(deprecated)] - pub fn get_confirmed_signatures_for_address2_with_config( - &self, - address: &Pubkey, - config: GetConfirmedSignaturesForAddress2Config, - ) -> ClientResult> { - self.invoke( - (self.rpc_client.as_ref()) - .get_confirmed_signatures_for_address2_with_config(address, config), - ) - } - /// Returns transaction details for a confirmed transaction. /// /// This method uses the [`Finalized`] [commitment level][cl]. @@ -2554,12 +2329,9 @@ impl RpcClient { /// /// # RPC Reference /// - /// This method corresponds directly to the [`getTransaction`] RPC method, - /// unless the remote node version is less than 1.7, in which case it maps - /// to the [`getConfirmedTransaction`] RPC method. + /// This method corresponds directly to the [`getTransaction`] RPC method. /// /// [`getTransaction`]: https://solana.com/docs/rpc/http/gettransaction - /// [`getConfirmedTransaction`]: https://solana.com/docs/rpc/deprecated/getConfirmedTransaction /// /// # Examples /// @@ -2606,12 +2378,9 @@ impl RpcClient { /// /// # RPC Reference /// - /// This method corresponds directly to the [`getTransaction`] RPC method, - /// unless the remote node version is less than 1.7, in which case it maps - /// to the [`getConfirmedTransaction`] RPC method. + /// This method corresponds directly to the [`getTransaction`] RPC method. /// /// [`getTransaction`]: https://solana.com/docs/rpc/http/gettransaction - /// [`getConfirmedTransaction`]: https://solana.com/docs/rpc/deprecated/getConfirmedTransaction /// /// # Examples /// @@ -2655,34 +2424,6 @@ impl RpcClient { self.invoke((self.rpc_client.as_ref()).get_transaction_with_config(signature, config)) } - #[deprecated( - since = "1.7.0", - note = "Please use RpcClient::get_transaction() instead" - )] - #[allow(deprecated)] - pub fn get_confirmed_transaction( - &self, - signature: &Signature, - encoding: UiTransactionEncoding, - ) -> ClientResult { - self.invoke((self.rpc_client.as_ref()).get_confirmed_transaction(signature, encoding)) - } - - #[deprecated( - since = "1.7.0", - note = "Please use RpcClient::get_transaction_with_config() instead" - )] - #[allow(deprecated)] - pub fn get_confirmed_transaction_with_config( - &self, - signature: &Signature, - config: RpcConfirmedTransactionConfig, - ) -> ClientResult { - self.invoke( - (self.rpc_client.as_ref()).get_confirmed_transaction_with_config(signature, config), - ) - } - /// Returns the estimated production time of a block. /// /// # RPC Reference @@ -3696,87 +3437,6 @@ impl RpcClient { ) } - #[deprecated( - since = "1.9.0", - note = "Please use `get_latest_blockhash` and `get_fee_for_message` instead" - )] - #[allow(deprecated)] - pub fn get_fees(&self) -> ClientResult { - self.invoke((self.rpc_client.as_ref()).get_fees()) - } - - #[deprecated( - since = "1.9.0", - note = "Please use `get_latest_blockhash_with_commitment` and `get_fee_for_message` instead" - )] - #[allow(deprecated)] - pub fn get_fees_with_commitment(&self, commitment_config: CommitmentConfig) -> RpcResult { - self.invoke((self.rpc_client.as_ref()).get_fees_with_commitment(commitment_config)) - } - - #[deprecated(since = "1.9.0", note = "Please use `get_latest_blockhash` instead")] - #[allow(deprecated)] - pub fn get_recent_blockhash(&self) -> ClientResult<(Hash, FeeCalculator)> { - self.invoke((self.rpc_client.as_ref()).get_recent_blockhash()) - } - - #[deprecated( - since = "1.9.0", - note = "Please use `get_latest_blockhash_with_commitment` instead" - )] - #[allow(deprecated)] - pub fn get_recent_blockhash_with_commitment( - &self, - commitment_config: CommitmentConfig, - ) -> RpcResult<(Hash, FeeCalculator, Slot)> { - self.invoke( - (self.rpc_client.as_ref()).get_recent_blockhash_with_commitment(commitment_config), - ) - } - - #[deprecated(since = "1.9.0", note = "Please `get_fee_for_message` instead")] - #[allow(deprecated)] - pub fn get_fee_calculator_for_blockhash( - &self, - blockhash: &Hash, - ) -> ClientResult> { - self.invoke((self.rpc_client.as_ref()).get_fee_calculator_for_blockhash(blockhash)) - } - - #[deprecated( - since = "1.9.0", - note = "Please `get_latest_blockhash_with_commitment` and `get_fee_for_message` instead" - )] - #[allow(deprecated)] - pub fn get_fee_calculator_for_blockhash_with_commitment( - &self, - blockhash: &Hash, - commitment_config: CommitmentConfig, - ) -> RpcResult> { - self.invoke( - (self.rpc_client.as_ref()) - .get_fee_calculator_for_blockhash_with_commitment(blockhash, commitment_config), - ) - } - - #[deprecated( - since = "1.9.0", - note = "Please do not use, will no longer be available in the future" - )] - #[allow(deprecated)] - pub fn get_fee_rate_governor(&self) -> RpcResult { - self.invoke((self.rpc_client.as_ref()).get_fee_rate_governor()) - } - - #[deprecated( - since = "1.9.0", - note = "Please do not use, will no longer be available in the future" - )] - #[allow(deprecated)] - pub fn get_new_blockhash(&self, blockhash: &Hash) -> ClientResult<(Hash, FeeCalculator)> { - self.invoke((self.rpc_client.as_ref()).get_new_blockhash(blockhash)) - } - pub fn get_first_available_block(&self) -> ClientResult { self.invoke((self.rpc_client.as_ref()).get_first_available_block()) } @@ -3995,7 +3655,6 @@ impl RpcClient { self.invoke((self.rpc_client.as_ref()).get_latest_blockhash()) } - #[allow(deprecated)] pub fn get_latest_blockhash_with_commitment( &self, commitment: CommitmentConfig, @@ -4003,7 +3662,6 @@ impl RpcClient { self.invoke((self.rpc_client.as_ref()).get_latest_blockhash_with_commitment(commitment)) } - #[allow(deprecated)] pub fn is_blockhash_valid( &self, blockhash: &Hash, @@ -4012,7 +3670,6 @@ impl RpcClient { self.invoke((self.rpc_client.as_ref()).is_blockhash_valid(blockhash, commitment)) } - #[allow(deprecated)] pub fn get_fee_for_message(&self, message: &impl SerializableMessage) -> ClientResult { self.invoke((self.rpc_client.as_ref()).get_fee_for_message(message)) } @@ -4136,7 +3793,7 @@ mod tests { future::ok(Value::Number(Number::from(50))) }); // Failed request - io.add_method("getRecentBlockhash", |params: Params| { + io.add_method("getLatestBlockhash", |params: Params| { if params != Params::None { future::err(Error::invalid_request()) } else { @@ -4168,16 +3825,14 @@ mod tests { .unwrap(); assert_eq!(balance, 50); - #[allow(deprecated)] let blockhash: String = rpc_client - .send(RpcRequest::GetRecentBlockhash, Value::Null) + .send(RpcRequest::GetLatestBlockhash, Value::Null) .unwrap(); assert_eq!(blockhash, "deadbeefXjn8o3yroDHxUtKsZZgoy4GPkPPXfouKNHhx"); // Send erroneous parameter - #[allow(deprecated)] let blockhash: ClientResult = - rpc_client.send(RpcRequest::GetRecentBlockhash, json!(["parameter"])); + rpc_client.send(RpcRequest::GetLatestBlockhash, json!(["parameter"])); assert!(blockhash.is_err()); } @@ -4204,22 +3859,6 @@ mod tests { assert!(signature.is_err()); } - #[test] - fn test_get_recent_blockhash() { - let rpc_client = RpcClient::new_mock("succeeds".to_string()); - - let expected_blockhash: Hash = PUBKEY.parse().unwrap(); - - let blockhash = rpc_client.get_latest_blockhash().expect("blockhash ok"); - assert_eq!(blockhash, expected_blockhash); - - let rpc_client = RpcClient::new_mock("fails".to_string()); - - #[allow(deprecated)] - let result = rpc_client.get_recent_blockhash(); - assert!(result.is_err()); - } - #[test] fn test_custom_request() { let rpc_client = RpcClient::new_mock("succeeds".to_string()); @@ -4319,7 +3958,6 @@ mod tests { let rpc_client = RpcClient::new_mock("fails".to_string()); - #[allow(deprecated)] let is_err = rpc_client.get_latest_blockhash().is_err(); assert!(is_err); } diff --git a/rpc-test/tests/rpc.rs b/rpc-test/tests/rpc.rs index 19bf50d2e4341b..463e2046867346 100644 --- a/rpc-test/tests/rpc.rs +++ b/rpc-test/tests/rpc.rs @@ -77,7 +77,7 @@ fn test_rpc_send_tx() { let bob_pubkey = solana_sdk::pubkey::new_rand(); - let req = json_req!("getRecentBlockhash", json!([])); + let req = json_req!("getLatestBlockhash", json!([])); let json = post_rpc(req, &rpc_url); let blockhash: Hash = json["result"]["value"]["blockhash"] diff --git a/rpc/src/rpc.rs b/rpc/src/rpc.rs index 7ac36cef70b103..d62a61ec81fe00 100644 --- a/rpc/src/rpc.rs +++ b/rpc/src/rpc.rs @@ -39,13 +39,12 @@ use { solana_rpc_client_api::{ config::*, custom_error::RpcCustomError, - deprecated_config::*, - filter::{Memcmp, MemcmpEncodedBytes, RpcFilterType}, + filter::{Memcmp, RpcFilterType}, request::{ TokenAccountsFilter, DELINQUENT_VALIDATOR_SLOT_DISTANCE, MAX_GET_CONFIRMED_BLOCKS_RANGE, MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS2_LIMIT, - MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS_SLOT_RANGE, MAX_GET_PROGRAM_ACCOUNT_FILTERS, - MAX_GET_SIGNATURE_STATUSES_QUERY_ITEMS, MAX_GET_SLOT_LEADERS, MAX_MULTIPLE_ACCOUNTS, + MAX_GET_PROGRAM_ACCOUNT_FILTERS, MAX_GET_SIGNATURE_STATUSES_QUERY_ITEMS, + MAX_GET_SLOT_LEADERS, MAX_MULTIPLE_ACCOUNTS, MAX_RPC_VOTE_ACCOUNT_INFO_EPOCH_CREDITS_HISTORY, NUM_LARGEST_ACCOUNTS, }, response::{Response as RpcResponse, *}, @@ -62,22 +61,18 @@ use { }, solana_sdk::{ account::{AccountSharedData, ReadableAccount}, - account_utils::StateMut, clock::{Slot, UnixTimestamp, MAX_PROCESSING_AGE}, commitment_config::{CommitmentConfig, CommitmentLevel}, epoch_info::EpochInfo, + epoch_rewards_hasher::EpochRewardsHasher, epoch_schedule::EpochSchedule, exit::Exit, feature_set, - fee_calculator::FeeCalculator, hash::Hash, message::SanitizedMessage, pubkey::{Pubkey, PUBKEY_BYTES}, signature::{Keypair, Signature, Signer}, - stake::state::{StakeActivationStatus, StakeStateV2}, - stake_history::StakeHistory, system_instruction, - sysvar::stake_history, transaction::{ self, AddressLoader, MessageHash, SanitizedTransaction, TransactionError, VersionedTransaction, MAX_TX_ACCOUNT_LOCKS, @@ -93,8 +88,9 @@ use { solana_transaction_status::{ map_inner_instructions, BlockEncodingOptions, ConfirmedBlock, ConfirmedTransactionStatusWithSignature, ConfirmedTransactionWithStatusMeta, - EncodedConfirmedTransactionWithStatusMeta, Reward, RewardType, TransactionBinaryEncoding, - TransactionConfirmationStatus, TransactionStatus, UiConfirmedBlock, UiTransactionEncoding, + EncodedConfirmedTransactionWithStatusMeta, Reward, RewardType, Rewards, + TransactionBinaryEncoding, TransactionConfirmationStatus, TransactionStatus, + UiConfirmedBlock, UiTransactionEncoding, }, solana_vote_program::vote_state::{VoteState, MAX_LOCKOUT_HISTORY}, spl_token_2022::{ @@ -156,7 +152,6 @@ pub struct JsonRpcConfig { pub rpc_threads: usize, pub rpc_niceness_adj: i8, pub full_api: bool, - pub obsolete_v1_7_api: bool, pub rpc_scan_and_fix_roots: bool, pub max_request_body_size: Option, /// Disable the health check, used for tests and TestValidator @@ -270,23 +265,13 @@ impl JsonRpcRequestProcessor { .slot_with_commitment(commitment.commitment); match commitment.commitment { - // Recent variant is deprecated - CommitmentLevel::Recent | CommitmentLevel::Processed => { + CommitmentLevel::Processed => { debug!("RPC using the heaviest slot: {:?}", slot); } - // Root variant is deprecated - CommitmentLevel::Root => { - debug!("RPC using node root: {:?}", slot); - } - // Single variant is deprecated - CommitmentLevel::Single => { - debug!("RPC using confirmed slot: {:?}", slot); - } - // Max variant is deprecated - CommitmentLevel::Max | CommitmentLevel::Finalized => { + CommitmentLevel::Finalized => { debug!("RPC using block: {:?}", slot); } - CommitmentLevel::SingleGossip | CommitmentLevel::Confirmed => unreachable!(), // SingleGossip variant is deprecated + CommitmentLevel::Confirmed => unreachable!(), // SingleGossip variant is deprecated }; let r_bank_forks = self.bank_forks.read().unwrap(); @@ -548,6 +533,34 @@ impl JsonRpcRequestProcessor { }) } + fn filter_map_rewards<'a, F>( + rewards: &'a Option, + slot: Slot, + addresses: &'a [String], + reward_type_filter: &'a F, + ) -> HashMap + where + F: Fn(RewardType) -> bool, + { + Self::filter_rewards(rewards, reward_type_filter) + .filter(|reward| addresses.contains(&reward.pubkey)) + .map(|reward| (reward.pubkey.clone(), (reward.clone(), slot))) + .collect() + } + + fn filter_rewards<'a, F>( + rewards: &'a Option, + reward_type_filter: &'a F, + ) -> impl Iterator + where + F: Fn(RewardType) -> bool, + { + rewards + .iter() + .flatten() + .filter(move |reward| reward.reward_type.is_some_and(reward_type_filter)) + } + pub async fn get_inflation_reward( &self, addresses: Vec, @@ -592,7 +605,22 @@ impl JsonRpcRequestProcessor { slot: first_slot_in_epoch, })?; - let Ok(Some(first_confirmed_block)) = self + // Determine if partitioned epoch rewards were enabled for the desired + // epoch + let bank = self.get_bank_with_config(context_config)?; + + // DO NOT CLEAN UP with feature_set::enable_partitioned_epoch_reward + // This logic needs to be retained indefinitely to support historical + // rewards before and after feature activation. + let partitioned_epoch_reward_enabled_slot = bank + .feature_set + .activated_slot(&feature_set::enable_partitioned_epoch_reward::id()); + let partitioned_epoch_reward_enabled = partitioned_epoch_reward_enabled_slot + .map(|slot| slot <= first_confirmed_block_in_epoch) + .unwrap_or(false); + + // Get first block in the epoch + let Ok(Some(epoch_boundary_block)) = self .get_block( first_confirmed_block_in_epoch, Some(RpcBlockConfig::rewards_with_commitment(config.commitment).into()), @@ -605,30 +633,109 @@ impl JsonRpcRequestProcessor { .into()); }; - let addresses: Vec = addresses - .into_iter() - .map(|pubkey| pubkey.to_string()) - .collect(); + // Collect rewards from first block in the epoch if partitioned epoch + // rewards not enabled, or address is a vote account + let mut reward_map: HashMap = { + let addresses: Vec = + addresses.iter().map(|pubkey| pubkey.to_string()).collect(); + Self::filter_map_rewards( + &epoch_boundary_block.rewards, + first_confirmed_block_in_epoch, + &addresses, + &|reward_type| -> bool { + reward_type == RewardType::Voting + || (!partitioned_epoch_reward_enabled && reward_type == RewardType::Staking) + }, + ) + }; - let reward_hash: HashMap = first_confirmed_block - .rewards - .unwrap_or_default() - .into_iter() - .filter_map(|reward| match reward.reward_type? { - RewardType::Staking | RewardType::Voting => addresses - .contains(&reward.pubkey) - .then(|| (reward.clone().pubkey, reward)), - _ => None, - }) - .collect(); + // Append stake account rewards from partitions if partitions epoch + // rewards is enabled + if partitioned_epoch_reward_enabled { + let num_partitions = epoch_boundary_block.num_reward_partitions.expect( + "epoch-boundary block should have num_reward_partitions after partitioned epoch \ + rewards enabled", + ); + + let num_partitions = usize::try_from(num_partitions) + .expect("num_partitions should never exceed usize::MAX"); + let hasher = EpochRewardsHasher::new( + num_partitions, + &Hash::from_str(&epoch_boundary_block.previous_blockhash) + .expect("UiConfirmedBlock::previous_blockhash should be properly formed"), + ); + let mut partition_index_addresses: HashMap> = HashMap::new(); + for address in addresses.iter() { + let address_string = address.to_string(); + // Skip this address if (Voting) rewards were already found in + // the first block of the epoch + if !reward_map.contains_key(&address_string) { + let partition_index = hasher.clone().hash_address_to_partition(address); + partition_index_addresses + .entry(partition_index) + .and_modify(|list| list.push(address_string.clone())) + .or_insert(vec![address_string]); + } + } + + let block_list = self + .get_blocks_with_limit( + first_confirmed_block_in_epoch + 1, + num_partitions, + Some(context_config), + ) + .await?; + + for (partition_index, addresses) in partition_index_addresses.iter() { + let slot = *block_list.get(*partition_index).ok_or_else(|| { + // If block_list.len() too short to contain + // partition_index, the epoch rewards period must be + // currently active. + let rewards_complete_block_height = epoch_boundary_block + .block_height + .map(|block_height| { + block_height + .saturating_add(num_partitions as u64) + .saturating_add(1) + }) + .expect( + "every block after partitioned_epoch_reward_enabled should have a \ + populated block_height", + ); + RpcCustomError::EpochRewardsPeriodActive { + slot: bank.slot(), + current_block_height: bank.block_height(), + rewards_complete_block_height, + } + })?; + + let Ok(Some(block)) = self + .get_block( + slot, + Some(RpcBlockConfig::rewards_with_commitment(config.commitment).into()), + ) + .await + else { + return Err(RpcCustomError::BlockNotAvailable { slot }.into()); + }; + + let index_reward_map = Self::filter_map_rewards( + &block.rewards, + slot, + addresses, + &|reward_type| -> bool { reward_type == RewardType::Staking }, + ); + reward_map.extend(index_reward_map); + } + } let rewards = addresses .iter() .map(|address| { - if let Some(reward) = reward_hash.get(address) { + if let Some((reward, slot)) = reward_map.get(&address.to_string()) { return Some(RpcInflationReward { epoch, - effective_slot: first_confirmed_block_in_epoch, + effective_slot: *slot, amount: reward.lamports.unsigned_abs(), post_balance: reward.post_balance, commission: reward.commission, @@ -678,75 +785,6 @@ impl JsonRpcRequestProcessor { Ok(new_response(&bank, bank.get_balance(pubkey))) } - fn get_recent_blockhash( - &self, - commitment: Option, - ) -> Result> { - let bank = self.bank(commitment); - let blockhash = bank.confirmed_last_blockhash(); - let lamports_per_signature = bank - .get_lamports_per_signature_for_blockhash(&blockhash) - .unwrap(); - Ok(new_response( - &bank, - RpcBlockhashFeeCalculator { - blockhash: blockhash.to_string(), - fee_calculator: FeeCalculator::new(lamports_per_signature), - }, - )) - } - - fn get_fees(&self, commitment: Option) -> Result> { - let bank = self.bank(commitment); - let blockhash = bank.confirmed_last_blockhash(); - let lamports_per_signature = bank - .get_lamports_per_signature_for_blockhash(&blockhash) - .unwrap(); - #[allow(deprecated)] - let last_valid_slot = bank - .get_blockhash_last_valid_slot(&blockhash) - .expect("bank blockhash queue should contain blockhash"); - let last_valid_block_height = bank - .get_blockhash_last_valid_block_height(&blockhash) - .expect("bank blockhash queue should contain blockhash"); - Ok(new_response( - &bank, - RpcFees { - blockhash: blockhash.to_string(), - fee_calculator: FeeCalculator::new(lamports_per_signature), - last_valid_slot, - last_valid_block_height, - }, - )) - } - - fn get_fee_calculator_for_blockhash( - &self, - blockhash: &Hash, - commitment: Option, - ) -> Result>> { - let bank = self.bank(commitment); - let lamports_per_signature = bank.get_lamports_per_signature_for_blockhash(blockhash); - Ok(new_response( - &bank, - lamports_per_signature.map(|lamports_per_signature| RpcFeeCalculator { - fee_calculator: FeeCalculator::new(lamports_per_signature), - }), - )) - } - - fn get_fee_rate_governor(&self) -> RpcResponse { - let bank = self.bank(None); - #[allow(deprecated)] - let fee_rate_governor = bank.get_fee_rate_governor(); - new_response( - &bank, - RpcFeeRateGovernor { - fee_rate_governor: fee_rate_governor.clone(), - }, - ) - } - pub fn confirm_transaction( &self, signature: &Signature, @@ -847,11 +885,6 @@ impl JsonRpcRequestProcessor { Ok(bank.transaction_count()) } - fn get_total_supply(&self, commitment: Option) -> Result { - let bank = self.bank(commitment); - Ok(bank.capitalization()) - } - fn get_cached_largest_accounts( &self, filter: &Option, @@ -876,6 +909,7 @@ impl JsonRpcRequestProcessor { ) -> RpcCustomResult>> { let config = config.unwrap_or_default(); let bank = self.bank(config.commitment); + let sort_results = config.sort_results.unwrap_or(true); if let Some((slot, accounts)) = self.get_cached_largest_accounts(&config.filter) { Ok(RpcResponse { @@ -900,7 +934,12 @@ impl JsonRpcRequestProcessor { (HashSet::new(), AccountAddressFilter::Exclude) }; let accounts = bank - .get_largest_accounts(NUM_LARGEST_ACCOUNTS, &addresses, address_filter) + .get_largest_accounts( + NUM_LARGEST_ACCOUNTS, + &addresses, + address_filter, + sort_results, + ) .map_err(|e| RpcCustomError::ScanError { message: e.to_string(), })? @@ -1770,87 +1809,6 @@ impl JsonRpcRequestProcessor { slot } - pub fn get_stake_activation( - &self, - pubkey: &Pubkey, - config: Option, - ) -> Result { - let config = config.unwrap_or_default(); - let bank = self.get_bank_with_config(RpcContextConfig { - commitment: config.commitment, - min_context_slot: config.min_context_slot, - })?; - let epoch = config.epoch.unwrap_or_else(|| bank.epoch()); - if epoch != bank.epoch() { - return Err(Error::invalid_params(format!( - "Invalid param: epoch {epoch:?}. Only the current epoch ({:?}) is supported", - bank.epoch() - ))); - } - - let stake_account = bank - .get_account(pubkey) - .ok_or_else(|| Error::invalid_params("Invalid param: account not found".to_string()))?; - let stake_state: StakeStateV2 = stake_account - .state() - .map_err(|_| Error::invalid_params("Invalid param: not a stake account".to_string()))?; - let delegation = stake_state.delegation(); - - let rent_exempt_reserve = stake_state - .meta() - .ok_or_else(|| { - Error::invalid_params("Invalid param: stake account not initialized".to_string()) - })? - .rent_exempt_reserve; - - let delegation = match delegation { - None => { - return Ok(RpcStakeActivation { - state: StakeActivationState::Inactive, - active: 0, - inactive: stake_account.lamports().saturating_sub(rent_exempt_reserve), - }) - } - Some(delegation) => delegation, - }; - - let stake_history_account = bank - .get_account(&stake_history::id()) - .ok_or_else(Error::internal_error)?; - let stake_history = - solana_sdk::account::from_account::(&stake_history_account) - .ok_or_else(Error::internal_error)?; - let new_rate_activation_epoch = bank.new_warmup_cooldown_rate_epoch(); - - let StakeActivationStatus { - effective, - activating, - deactivating, - } = delegation.stake_activating_and_deactivating( - epoch, - &stake_history, - new_rate_activation_epoch, - ); - let stake_activation_state = if deactivating > 0 { - StakeActivationState::Deactivating - } else if activating > 0 { - StakeActivationState::Activating - } else if effective > 0 { - StakeActivationState::Active - } else { - StakeActivationState::Inactive - }; - let inactive_stake = stake_account - .lamports() - .saturating_sub(effective) - .saturating_sub(rent_exempt_reserve); - Ok(RpcStakeActivation { - state: stake_activation_state, - active: effective, - inactive: inactive_stake, - }) - } - pub fn get_token_account_balance( &self, pubkey: &Pubkey, @@ -2421,7 +2379,7 @@ fn encode_account( /// Analyze custom filters to determine if the result will be a subset of spl-token accounts by /// owner. /// NOTE: `optimize_filters()` should almost always be called before using this method because of -/// the strict match on `MemcmpEncodedBytes::Bytes`. +/// the requirement that `Memcmp::raw_bytes_as_ref().is_some()`. fn get_spl_token_owner_filter(program_id: &Pubkey, filters: &[RpcFilterType]) -> Option { if !is_known_spl_token_id(program_id) { return None; @@ -2435,28 +2393,21 @@ fn get_spl_token_owner_filter(program_id: &Pubkey, filters: &[RpcFilterType]) -> for filter in filters { match filter { RpcFilterType::DataSize(size) => data_size_filter = Some(*size), - #[allow(deprecated)] - RpcFilterType::Memcmp(Memcmp { - offset, - bytes: MemcmpEncodedBytes::Bytes(bytes), - .. - }) if *offset == account_packed_len && *program_id == token_2022::id() => { - memcmp_filter = Some(bytes) - } - #[allow(deprecated)] - RpcFilterType::Memcmp(Memcmp { - offset, - bytes: MemcmpEncodedBytes::Bytes(bytes), - .. - }) if *offset == SPL_TOKEN_ACCOUNT_OWNER_OFFSET => { - if bytes.len() == PUBKEY_BYTES { - owner_key = Pubkey::try_from(&bytes[..]).ok(); - } else { - incorrect_owner_len = Some(bytes.len()); + RpcFilterType::Memcmp(memcmp) => { + let offset = memcmp.offset(); + if let Some(bytes) = memcmp.raw_bytes_as_ref() { + if offset == account_packed_len && *program_id == token_2022::id() { + memcmp_filter = Some(bytes); + } else if offset == SPL_TOKEN_ACCOUNT_OWNER_OFFSET { + if bytes.len() == PUBKEY_BYTES { + owner_key = Pubkey::try_from(bytes).ok(); + } else { + incorrect_owner_len = Some(bytes.len()); + } + } } } RpcFilterType::TokenAccountState => token_account_state_filter = true, - _ => {} } } if data_size_filter == Some(account_packed_len as u64) @@ -2479,7 +2430,7 @@ fn get_spl_token_owner_filter(program_id: &Pubkey, filters: &[RpcFilterType]) -> /// Analyze custom filters to determine if the result will be a subset of spl-token accounts by /// mint. /// NOTE: `optimize_filters()` should almost always be called before using this method because of -/// the strict match on `MemcmpEncodedBytes::Bytes`. +/// the requirement that `Memcmp::raw_bytes_as_ref().is_some()`. fn get_spl_token_mint_filter(program_id: &Pubkey, filters: &[RpcFilterType]) -> Option { if !is_known_spl_token_id(program_id) { return None; @@ -2493,28 +2444,21 @@ fn get_spl_token_mint_filter(program_id: &Pubkey, filters: &[RpcFilterType]) -> for filter in filters { match filter { RpcFilterType::DataSize(size) => data_size_filter = Some(*size), - #[allow(deprecated)] - RpcFilterType::Memcmp(Memcmp { - offset, - bytes: MemcmpEncodedBytes::Bytes(bytes), - .. - }) if *offset == account_packed_len && *program_id == token_2022::id() => { - memcmp_filter = Some(bytes) - } - #[allow(deprecated)] - RpcFilterType::Memcmp(Memcmp { - offset, - bytes: MemcmpEncodedBytes::Bytes(bytes), - .. - }) if *offset == SPL_TOKEN_ACCOUNT_MINT_OFFSET => { - if bytes.len() == PUBKEY_BYTES { - mint = Pubkey::try_from(&bytes[..]).ok(); - } else { - incorrect_mint_len = Some(bytes.len()); + RpcFilterType::Memcmp(memcmp) => { + let offset = memcmp.offset(); + if let Some(bytes) = memcmp.raw_bytes_as_ref() { + if offset == account_packed_len && *program_id == token_2022::id() { + memcmp_filter = Some(bytes); + } else if offset == SPL_TOKEN_ACCOUNT_MINT_OFFSET { + if bytes.len() == PUBKEY_BYTES { + mint = Pubkey::try_from(bytes).ok(); + } else { + incorrect_mint_len = Some(bytes.len()); + } + } } } RpcFilterType::TokenAccountState => token_account_state_filter = true, - _ => {} } } if data_size_filter == Some(account_packed_len as u64) @@ -3552,6 +3496,10 @@ pub mod rpc_full { Some(RpcContactInfo { pubkey: contact_info.pubkey().to_string(), gossip: contact_info.gossip().ok(), + tvu: contact_info + .tvu(Protocol::UDP) + .ok() + .filter(|addr| socket_addr_space.check(addr)), tpu: contact_info .tpu(Protocol::UDP) .ok() @@ -3560,6 +3508,22 @@ pub mod rpc_full { .tpu(Protocol::QUIC) .ok() .filter(|addr| socket_addr_space.check(addr)), + tpu_forwards: contact_info + .tpu_forwards(Protocol::UDP) + .ok() + .filter(|addr| socket_addr_space.check(addr)), + tpu_forwards_quic: contact_info + .tpu_forwards(Protocol::QUIC) + .ok() + .filter(|addr| socket_addr_space.check(addr)), + tpu_vote: contact_info + .tpu_vote() + .ok() + .filter(|addr| socket_addr_space.check(addr)), + serve_repair: contact_info + .serve_repair(Protocol::UDP) + .ok() + .filter(|addr| socket_addr_space.check(addr)), rpc: contact_info .rpc() .ok() @@ -4167,452 +4131,6 @@ fn rpc_perf_sample_from_perf_sample(slot: u64, sample: PerfSample) -> RpcPerfSam } } -pub mod rpc_deprecated_v1_18 { - use super::*; - #[rpc] - pub trait DeprecatedV1_18 { - type Metadata; - - // DEPRECATED - #[rpc(meta, name = "getStakeActivation")] - fn get_stake_activation( - &self, - meta: Self::Metadata, - pubkey_str: String, - config: Option, - ) -> Result; - } - - pub struct DeprecatedV1_18Impl; - impl DeprecatedV1_18 for DeprecatedV1_18Impl { - type Metadata = JsonRpcRequestProcessor; - - fn get_stake_activation( - &self, - meta: Self::Metadata, - pubkey_str: String, - config: Option, - ) -> Result { - debug!( - "get_stake_activation rpc request received: {:?}", - pubkey_str - ); - let pubkey = verify_pubkey(&pubkey_str)?; - meta.get_stake_activation(&pubkey, config) - } - } -} - -// RPC methods deprecated in v1.9 -pub mod rpc_deprecated_v1_9 { - #![allow(deprecated)] - use super::*; - #[rpc] - pub trait DeprecatedV1_9 { - type Metadata; - - #[rpc(meta, name = "getRecentBlockhash")] - fn get_recent_blockhash( - &self, - meta: Self::Metadata, - commitment: Option, - ) -> Result>; - - #[rpc(meta, name = "getFees")] - fn get_fees( - &self, - meta: Self::Metadata, - commitment: Option, - ) -> Result>; - - #[rpc(meta, name = "getFeeCalculatorForBlockhash")] - fn get_fee_calculator_for_blockhash( - &self, - meta: Self::Metadata, - blockhash: String, - commitment: Option, - ) -> Result>>; - - #[rpc(meta, name = "getFeeRateGovernor")] - fn get_fee_rate_governor( - &self, - meta: Self::Metadata, - ) -> Result>; - - #[rpc(meta, name = "getSnapshotSlot")] - fn get_snapshot_slot(&self, meta: Self::Metadata) -> Result; - } - - pub struct DeprecatedV1_9Impl; - impl DeprecatedV1_9 for DeprecatedV1_9Impl { - type Metadata = JsonRpcRequestProcessor; - - fn get_recent_blockhash( - &self, - meta: Self::Metadata, - commitment: Option, - ) -> Result> { - debug!("get_recent_blockhash rpc request received"); - meta.get_recent_blockhash(commitment) - } - - fn get_fees( - &self, - meta: Self::Metadata, - commitment: Option, - ) -> Result> { - debug!("get_fees rpc request received"); - meta.get_fees(commitment) - } - - fn get_fee_calculator_for_blockhash( - &self, - meta: Self::Metadata, - blockhash: String, - commitment: Option, - ) -> Result>> { - debug!("get_fee_calculator_for_blockhash rpc request received"); - let blockhash = - Hash::from_str(&blockhash).map_err(|e| Error::invalid_params(format!("{e:?}")))?; - meta.get_fee_calculator_for_blockhash(&blockhash, commitment) - } - - fn get_fee_rate_governor( - &self, - meta: Self::Metadata, - ) -> Result> { - debug!("get_fee_rate_governor rpc request received"); - Ok(meta.get_fee_rate_governor()) - } - - fn get_snapshot_slot(&self, meta: Self::Metadata) -> Result { - debug!("get_snapshot_slot rpc request received"); - - meta.snapshot_config - .and_then(|snapshot_config| { - snapshot_utils::get_highest_full_snapshot_archive_slot( - snapshot_config.full_snapshot_archives_dir, - ) - }) - .ok_or_else(|| RpcCustomError::NoSnapshot.into()) - } - } -} - -// RPC methods deprecated in v1.7 -pub mod rpc_deprecated_v1_7 { - #![allow(deprecated)] - use super::*; - #[rpc] - pub trait DeprecatedV1_7 { - type Metadata; - - // DEPRECATED - #[rpc(meta, name = "getConfirmedBlock")] - fn get_confirmed_block( - &self, - meta: Self::Metadata, - slot: Slot, - config: Option>, - ) -> BoxFuture>>; - - // DEPRECATED - #[rpc(meta, name = "getConfirmedBlocks")] - fn get_confirmed_blocks( - &self, - meta: Self::Metadata, - start_slot: Slot, - config: Option, - commitment: Option, - ) -> BoxFuture>>; - - // DEPRECATED - #[rpc(meta, name = "getConfirmedBlocksWithLimit")] - fn get_confirmed_blocks_with_limit( - &self, - meta: Self::Metadata, - start_slot: Slot, - limit: usize, - commitment: Option, - ) -> BoxFuture>>; - - // DEPRECATED - #[rpc(meta, name = "getConfirmedTransaction")] - fn get_confirmed_transaction( - &self, - meta: Self::Metadata, - signature_str: String, - config: Option>, - ) -> BoxFuture>>; - - // DEPRECATED - #[rpc(meta, name = "getConfirmedSignaturesForAddress2")] - fn get_confirmed_signatures_for_address2( - &self, - meta: Self::Metadata, - address: String, - config: Option, - ) -> BoxFuture>>; - } - - pub struct DeprecatedV1_7Impl; - impl DeprecatedV1_7 for DeprecatedV1_7Impl { - type Metadata = JsonRpcRequestProcessor; - - fn get_confirmed_block( - &self, - meta: Self::Metadata, - slot: Slot, - config: Option>, - ) -> BoxFuture>> { - debug!("get_confirmed_block rpc request received: {:?}", slot); - Box::pin(async move { - meta.get_block(slot, config.map(|config| config.convert())) - .await - }) - } - - fn get_confirmed_blocks( - &self, - meta: Self::Metadata, - start_slot: Slot, - config: Option, - commitment: Option, - ) -> BoxFuture>> { - let (end_slot, maybe_commitment) = - config.map(|config| config.unzip()).unwrap_or_default(); - debug!( - "get_confirmed_blocks rpc request received: {}-{:?}", - start_slot, end_slot - ); - Box::pin(async move { - meta.get_blocks( - start_slot, - end_slot, - Some(RpcContextConfig { - commitment: commitment.or(maybe_commitment), - min_context_slot: None, - }), - ) - .await - }) - } - - fn get_confirmed_blocks_with_limit( - &self, - meta: Self::Metadata, - start_slot: Slot, - limit: usize, - commitment: Option, - ) -> BoxFuture>> { - debug!( - "get_confirmed_blocks_with_limit rpc request received: {}-{}", - start_slot, limit, - ); - Box::pin(async move { - meta.get_blocks_with_limit( - start_slot, - limit, - Some(RpcContextConfig { - commitment, - min_context_slot: None, - }), - ) - .await - }) - } - - fn get_confirmed_transaction( - &self, - meta: Self::Metadata, - signature_str: String, - config: Option>, - ) -> BoxFuture>> { - debug!( - "get_confirmed_transaction rpc request received: {:?}", - signature_str - ); - let signature = verify_signature(&signature_str); - if let Err(err) = signature { - return Box::pin(future::err(err)); - } - Box::pin(async move { - meta.get_transaction(signature.unwrap(), config.map(|config| config.convert())) - .await - }) - } - - fn get_confirmed_signatures_for_address2( - &self, - meta: Self::Metadata, - address: String, - config: Option, - ) -> BoxFuture>> { - let config = config.unwrap_or_default(); - let commitment = config.commitment; - let verification = verify_and_parse_signatures_for_address_params( - address, - config.before, - config.until, - config.limit, - ); - - match verification { - Err(err) => Box::pin(future::err(err)), - Ok((address, before, until, limit)) => Box::pin(async move { - meta.get_signatures_for_address( - address, - before, - until, - limit, - RpcContextConfig { - commitment, - min_context_slot: None, - }, - ) - .await - }), - } - } - } -} - -// Obsolete RPC methods, collected for easy deactivation and removal -pub mod rpc_obsolete_v1_7 { - use super::*; - #[rpc] - pub trait ObsoleteV1_7 { - type Metadata; - - // DEPRECATED - #[rpc(meta, name = "confirmTransaction")] - fn confirm_transaction( - &self, - meta: Self::Metadata, - signature_str: String, - commitment: Option, - ) -> Result>; - - // DEPRECATED - #[rpc(meta, name = "getSignatureStatus")] - fn get_signature_status( - &self, - meta: Self::Metadata, - signature_str: String, - commitment: Option, - ) -> Result>>; - - // DEPRECATED (used by Trust Wallet) - #[rpc(meta, name = "getSignatureConfirmation")] - fn get_signature_confirmation( - &self, - meta: Self::Metadata, - signature_str: String, - commitment: Option, - ) -> Result>; - - // DEPRECATED - #[rpc(meta, name = "getTotalSupply")] - fn get_total_supply( - &self, - meta: Self::Metadata, - commitment: Option, - ) -> Result; - - // DEPRECATED - #[rpc(meta, name = "getConfirmedSignaturesForAddress")] - fn get_confirmed_signatures_for_address( - &self, - meta: Self::Metadata, - pubkey_str: String, - start_slot: Slot, - end_slot: Slot, - ) -> Result>; - } - - pub struct ObsoleteV1_7Impl; - impl ObsoleteV1_7 for ObsoleteV1_7Impl { - type Metadata = JsonRpcRequestProcessor; - - fn confirm_transaction( - &self, - meta: Self::Metadata, - id: String, - commitment: Option, - ) -> Result> { - debug!("confirm_transaction rpc request received: {:?}", id); - let signature = verify_signature(&id)?; - meta.confirm_transaction(&signature, commitment) - } - - fn get_signature_status( - &self, - meta: Self::Metadata, - signature_str: String, - commitment: Option, - ) -> Result>> { - debug!( - "get_signature_status rpc request received: {:?}", - signature_str - ); - let signature = verify_signature(&signature_str)?; - meta.get_signature_status(signature, commitment) - } - - fn get_signature_confirmation( - &self, - meta: Self::Metadata, - signature_str: String, - commitment: Option, - ) -> Result> { - debug!( - "get_signature_confirmation rpc request received: {:?}", - signature_str - ); - let signature = verify_signature(&signature_str)?; - meta.get_signature_confirmation_status(signature, commitment) - } - - fn get_total_supply( - &self, - meta: Self::Metadata, - commitment: Option, - ) -> Result { - debug!("get_total_supply rpc request received"); - meta.get_total_supply(commitment) - } - - fn get_confirmed_signatures_for_address( - &self, - meta: Self::Metadata, - pubkey_str: String, - start_slot: Slot, - end_slot: Slot, - ) -> Result> { - debug!( - "get_confirmed_signatures_for_address rpc request received: {:?} {:?}-{:?}", - pubkey_str, start_slot, end_slot - ); - let pubkey = verify_pubkey(&pubkey_str)?; - if end_slot < start_slot { - return Err(Error::invalid_params(format!( - "start_slot {start_slot} must be less than or equal to end_slot {end_slot}" - ))); - } - if end_slot - start_slot > MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS_SLOT_RANGE { - return Err(Error::invalid_params(format!( - "Slot range too large; max {MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS_SLOT_RANGE}" - ))); - } - Ok(meta - .get_confirmed_signatures_for_address(pubkey, start_slot, end_slot) - .iter() - .map(|signature| signature.to_string()) - .collect()) - } - } -} - const MAX_BASE58_SIZE: usize = 1683; // Golden, bump if PACKET_DATA_SIZE changes const MAX_BASE64_SIZE: usize = 1644; // Golden, bump if PACKET_DATA_SIZE changes fn decode_and_deserialize( @@ -4787,8 +4305,7 @@ pub fn populate_blockstore_for_tests( pub mod tests { use { super::{ - rpc_accounts::*, rpc_accounts_scan::*, rpc_bank::*, rpc_deprecated_v1_9::*, - rpc_full::*, rpc_minimal::*, *, + rpc_accounts::*, rpc_accounts_scan::*, rpc_bank::*, rpc_full::*, rpc_minimal::*, *, }, crate::{ optimistically_confirmed_bank_tracker::{ @@ -4813,7 +4330,7 @@ pub mod tests { JSON_RPC_SERVER_ERROR_TRANSACTION_HISTORY_NOT_AVAILABLE, JSON_RPC_SERVER_ERROR_UNSUPPORTED_TRANSACTION_VERSION, }, - filter::{Memcmp, MemcmpEncodedBytes}, + filter::MemcmpEncodedBytes, }, solana_runtime::{ accounts_background_service::AbsRequestSender, bank::BankTestConfig, @@ -4825,9 +4342,8 @@ pub mod tests { self, state::{AddressLookupTable, LookupTableMeta}, }, - clock::MAX_PROCESSING_AGE, compute_budget::ComputeBudgetInstruction, - fee_calculator::{FeeRateGovernor, DEFAULT_BURN_PERCENT}, + fee_calculator::FeeRateGovernor, hash::{hash, Hash}, instruction::InstructionError, message::{ @@ -4989,7 +4505,6 @@ pub mod tests { io.extend_with(rpc_accounts::AccountsDataImpl.to_delegate()); io.extend_with(rpc_accounts_scan::AccountsScanImpl.to_delegate()); io.extend_with(rpc_full::FullImpl.to_delegate()); - io.extend_with(rpc_deprecated_v1_9::DeprecatedV1_9Impl.to_delegate()); Self { io, meta, @@ -5321,8 +4836,13 @@ pub mod tests { "pubkey": rpc.identity.to_string(), "gossip": "127.0.0.1:8000", "shredVersion": 0u16, + "tvu": "127.0.0.1:8001", "tpu": "127.0.0.1:8003", "tpuQuic": "127.0.0.1:8009", + "tpuForwards": "127.0.0.1:8004", + "tpuForwardsQuic": "127.0.0.1:8010", + "tpuVote": "127.0.0.1:8005", + "serveRepair": "127.0.0.1:8008", "rpc": format!("127.0.0.1:{}", rpc_port::DEFAULT_RPC_PORT), "pubsub": format!("127.0.0.1:{}", rpc_port::DEFAULT_RPC_PUBSUB_PORT), "version": format!("{version}"), @@ -5331,8 +4851,13 @@ pub mod tests { "pubkey": rpc.leader_pubkey().to_string(), "gossip": "127.0.0.1:1235", "shredVersion": 0u16, + "tvu": "127.0.0.1:1236", "tpu": "127.0.0.1:1234", "tpuQuic": "127.0.0.1:1240", + "tpuForwards": "127.0.0.1:1239", + "tpuForwardsQuic": "127.0.0.1:1245", + "tpuVote": "127.0.0.1:1241", + "serveRepair": "127.0.0.1:1242", "rpc": format!("127.0.0.1:{}", rpc_port::DEFAULT_RPC_PORT), "pubsub": format!("127.0.0.1:{}", rpc_port::DEFAULT_RPC_PUBSUB_PORT), "version": format!("{version}"), @@ -6847,146 +6372,6 @@ pub mod tests { ); } - #[test] - fn test_rpc_get_recent_blockhash() { - let rpc = RpcHandler::start(); - let bank = rpc.working_bank(); - let recent_blockhash = bank.confirmed_last_blockhash(); - let RpcHandler { meta, io, .. } = rpc; - - let req = r#"{"jsonrpc":"2.0","id":1,"method":"getRecentBlockhash"}"#; - let res = io.handle_request_sync(req, meta); - let expected = json!({ - "jsonrpc": "2.0", - "result": { - "context": {"slot": 0, "apiVersion": RpcApiVersion::default()}, - "value":{ - "blockhash": recent_blockhash.to_string(), - "feeCalculator": { - "lamportsPerSignature": TEST_SIGNATURE_FEE, - } - }, - }, - "id": 1 - }); - let expected: Response = - serde_json::from_value(expected).expect("expected response deserialization"); - let result: Response = serde_json::from_str(&res.expect("actual response")) - .expect("actual response deserialization"); - assert_eq!(result, expected); - } - - #[test] - fn test_rpc_get_fees() { - let rpc = RpcHandler::start(); - let bank = rpc.working_bank(); - let recent_blockhash = bank.confirmed_last_blockhash(); - let RpcHandler { meta, io, .. } = rpc; - - let req = r#"{"jsonrpc":"2.0","id":1,"method":"getFees"}"#; - let res = io.handle_request_sync(req, meta); - let expected = json!({ - "jsonrpc": "2.0", - "result": { - "context": {"slot": 0, "apiVersion": RpcApiVersion::default()}, - "value": { - "blockhash": recent_blockhash.to_string(), - "feeCalculator": { - "lamportsPerSignature": TEST_SIGNATURE_FEE, - }, - "lastValidSlot": MAX_PROCESSING_AGE, - "lastValidBlockHeight": MAX_PROCESSING_AGE, - }, - }, - "id": 1 - }); - let expected: Response = - serde_json::from_value(expected).expect("expected response deserialization"); - let result: Response = serde_json::from_str(&res.expect("actual response")) - .expect("actual response deserialization"); - assert_eq!(result, expected); - } - - #[test] - fn test_rpc_get_fee_calculator_for_blockhash() { - let rpc = RpcHandler::start(); - let bank = rpc.working_bank(); - let recent_blockhash = bank.confirmed_last_blockhash(); - let RpcHandler { meta, io, .. } = rpc; - - let lamports_per_signature = bank.get_lamports_per_signature(); - let fee_calculator = RpcFeeCalculator { - fee_calculator: FeeCalculator::new(lamports_per_signature), - }; - - let req = format!( - r#"{{"jsonrpc":"2.0","id":1,"method":"getFeeCalculatorForBlockhash","params":["{recent_blockhash:?}"]}}"# - ); - let res = io.handle_request_sync(&req, meta.clone()); - let expected = json!({ - "jsonrpc": "2.0", - "result": { - "context": {"slot": 0, "apiVersion": RpcApiVersion::default()}, - "value":fee_calculator, - }, - "id": 1 - }); - let expected: Response = - serde_json::from_value(expected).expect("expected response deserialization"); - let result: Response = serde_json::from_str(&res.expect("actual response")) - .expect("actual response deserialization"); - assert_eq!(result, expected); - - // Expired (non-existent) blockhash - let req = format!( - r#"{{"jsonrpc":"2.0","id":1,"method":"getFeeCalculatorForBlockhash","params":["{:?}"]}}"#, - Hash::default() - ); - let res = io.handle_request_sync(&req, meta); - let expected = json!({ - "jsonrpc": "2.0", - "result": { - "context": {"slot": 0, "apiVersion": RpcApiVersion::default()}, - "value":Value::Null, - }, - "id": 1 - }); - let expected: Response = - serde_json::from_value(expected).expect("expected response deserialization"); - let result: Response = serde_json::from_str(&res.expect("actual response")) - .expect("actual response deserialization"); - assert_eq!(result, expected); - } - - #[test] - fn test_rpc_get_fee_rate_governor() { - let RpcHandler { meta, io, .. } = RpcHandler::start(); - - let req = r#"{"jsonrpc":"2.0","id":1,"method":"getFeeRateGovernor"}"#; - let res = io.handle_request_sync(req, meta); - let expected = json!({ - "jsonrpc": "2.0", - "result": { - "context": {"slot": 0, "apiVersion": RpcApiVersion::default()}, - "value":{ - "feeRateGovernor": { - "burnPercent": DEFAULT_BURN_PERCENT, - "maxLamportsPerSignature": TEST_SIGNATURE_FEE, - "minLamportsPerSignature": TEST_SIGNATURE_FEE, - "targetLamportsPerSignature": TEST_SIGNATURE_FEE, - "targetSignaturesPerSlot": 0 - } - }, - }, - "id": 1 - }); - let expected: Response = - serde_json::from_value(expected).expect("expected response deserialization"); - let result: Response = serde_json::from_str(&res.expect("actual response")) - .expect("actual response deserialization"); - assert_eq!(result, expected); - } - #[test] fn test_rpc_fail_request_airdrop() { let RpcHandler { meta, io, .. } = RpcHandler::start(); diff --git a/rpc/src/rpc_service.rs b/rpc/src/rpc_service.rs index 00f319f1c27b6f..fed748d709472b 100644 --- a/rpc/src/rpc_service.rs +++ b/rpc/src/rpc_service.rs @@ -5,11 +5,7 @@ use { cluster_tpu_info::ClusterTpuInfo, max_slots::MaxSlots, optimistically_confirmed_bank_tracker::OptimisticallyConfirmedBank, - rpc::{ - rpc_accounts::*, rpc_accounts_scan::*, rpc_bank::*, rpc_deprecated_v1_18::*, - rpc_deprecated_v1_7::*, rpc_deprecated_v1_9::*, rpc_full::*, rpc_minimal::*, - rpc_obsolete_v1_7::*, *, - }, + rpc::{rpc_accounts::*, rpc_accounts_scan::*, rpc_bank::*, rpc_full::*, rpc_minimal::*, *}, rpc_cache::LargestAccountsCache, rpc_health::*, }, @@ -453,7 +449,6 @@ impl JsonRpcService { }; let full_api = config.full_api; - let obsolete_v1_7_api = config.obsolete_v1_7_api; let max_request_body_size = config .max_request_body_size .unwrap_or(MAX_REQUEST_BODY_SIZE); @@ -508,12 +503,6 @@ impl JsonRpcService { io.extend_with(rpc_accounts::AccountsDataImpl.to_delegate()); io.extend_with(rpc_accounts_scan::AccountsScanImpl.to_delegate()); io.extend_with(rpc_full::FullImpl.to_delegate()); - io.extend_with(rpc_deprecated_v1_7::DeprecatedV1_7Impl.to_delegate()); - io.extend_with(rpc_deprecated_v1_9::DeprecatedV1_9Impl.to_delegate()); - io.extend_with(rpc_deprecated_v1_18::DeprecatedV1_18Impl.to_delegate()); - } - if obsolete_v1_7_api { - io.extend_with(rpc_obsolete_v1_7::ObsoleteV1_7Impl.to_delegate()); } let request_middleware = RpcRequestMiddleware::new( diff --git a/rpc/src/transaction_status_service.rs b/rpc/src/transaction_status_service.rs index aef87f84eb4dea..f8356296e970a6 100644 --- a/rpc/src/transaction_status_service.rs +++ b/rpc/src/transaction_status_service.rs @@ -330,7 +330,6 @@ pub(crate) mod tests { log_messages: None, inner_instructions: None, fee_details: FeeDetails::default(), - is_nonce: true, return_data: None, executed_units: 0, accounts_data_len_delta: 0, diff --git a/runtime/Cargo.toml b/runtime/Cargo.toml index ec0ca35fdfc6ca..54a54e9905b201 100644 --- a/runtime/Cargo.toml +++ b/runtime/Cargo.toml @@ -70,6 +70,8 @@ solana-transaction-status = { workspace = true } solana-version = { workspace = true } solana-vote = { workspace = true } solana-vote-program = { workspace = true } +solana-zk-elgamal-proof-program = { workspace = true } +solana-zk-sdk = { workspace = true } solana-zk-token-proof-program = { workspace = true } solana-zk-token-sdk = { workspace = true } static_assertions = { workspace = true } diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 1156b5c3338b1b..fdfb140c98779d 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -33,9 +33,6 @@ //! It offers a high-level API that signs transactions //! on behalf of the caller, and a low-level API for when they have //! already been signed and verified. -#[allow(deprecated)] -use solana_sdk::recent_blockhashes_account; -pub use solana_sdk::reward_type::RewardType; use { crate::{ bank::{ @@ -99,10 +96,7 @@ use { // solana_perf::perf_libs, solana_program_runtime::{ invoke_context::BuiltinFunctionWithContext, - loaded_programs::{ - ProgramCacheEntry, ProgramCacheEntryOwner, ProgramCacheEntryType, - ProgramCacheMatchCriteria, - }, + loaded_programs::{ProgramCacheEntry, ProgramCacheEntryOwner, ProgramCacheEntryType}, timings::{ExecuteTimingType, ExecuteTimings}, }, solana_sdk::{ @@ -110,8 +104,7 @@ use { create_account_shared_data_with_fields as create_account, from_account, Account, AccountSharedData, InheritableAccountFields, ReadableAccount, WritableAccount, }, - account_utils::StateMut, - bpf_loader_upgradeable::{self, UpgradeableLoaderState}, + bpf_loader_upgradeable, clock::{ BankId, Epoch, Slot, SlotCount, SlotIndex, UnixTimestamp, DEFAULT_HASHES_PER_TICK, DEFAULT_TICKS_PER_SECOND, INITIAL_RENT_EPOCH, MAX_PROCESSING_AGE, @@ -134,7 +127,6 @@ use { incinerator, inflation::Inflation, inner_instruction::InnerInstructions, - loader_v4, message::{AccountKeys, SanitizedMessage}, native_loader, native_token::LAMPORTS_PER_SOL, @@ -183,7 +175,6 @@ use { TransactionLoadedAccountsStats, TransactionResults, }, }, - solana_system_program::{get_system_account_kind, SystemAccountKind}, solana_vote::vote_account::{VoteAccount, VoteAccountsHashMap}, solana_vote_program::vote_state::VoteState, std::{ @@ -205,6 +196,9 @@ use { time::{Duration, Instant}, }, }; +pub use { + partitioned_epoch_rewards::KeyedRewardsAndNumPartitions, solana_sdk::reward_type::RewardType, +}; #[cfg(feature = "dev-context-only-utils")] use { solana_accounts_db::accounts_db::{ @@ -212,6 +206,7 @@ use { }, solana_program_runtime::{loaded_programs::ProgramCacheForTxBatch, sysvar_cache::SysvarCache}, solana_svm::program_loader::load_program_with_pubkey, + solana_system_program::{get_system_account_kind, SystemAccountKind}, }; /// params to `verify_accounts_hash` @@ -231,6 +226,7 @@ pub mod epoch_accounts_hash_utils; mod fee_distribution; mod metrics; pub(crate) mod partitioned_epoch_rewards; +mod recent_blockhashes_account; mod serde_snapshot; mod sysvar_cache; pub(crate) mod tests; @@ -602,6 +598,7 @@ impl PartialEq for Bank { collector_fee_details: _, compute_budget: _, transaction_account_lock_limit: _, + fee_structure: _, // Ignore new fields explicitly if they do not impact PartialEq. // Adding ".." will remove compile-time checks that if a new field // is added to the struct, this PartialEq is accordingly updated. @@ -887,6 +884,9 @@ pub struct Bank { /// The max number of accounts that a transaction may lock. transaction_account_lock_limit: Option, + + /// Fee structure to use for assessing transaction fees. + fee_structure: FeeStructure, } struct VoteWithStakeDelegations { @@ -933,10 +933,10 @@ struct PrevEpochInflationRewards { foundation_rate: f64, } -pub struct CommitTransactionCounts { - pub committed_transactions_count: u64, - pub committed_non_vote_transactions_count: u64, - pub committed_with_failure_result_count: u64, +pub struct ExecutedTransactionCounts { + pub executed_transactions_count: u64, + pub executed_non_vote_transactions_count: u64, + pub executed_with_failure_result_count: u64, pub signature_count: u64, } @@ -1004,14 +1004,11 @@ impl Bank { collector_fee_details: RwLock::new(CollectorFeeDetails::default()), compute_budget: None, transaction_account_lock_limit: None, + fee_structure: FeeStructure::default(), }; - bank.transaction_processor = TransactionBatchProcessor::new( - bank.slot, - bank.epoch, - bank.epoch_schedule.clone(), - HashSet::default(), - ); + bank.transaction_processor = + TransactionBatchProcessor::new(bank.slot, bank.epoch, HashSet::default()); let accounts_data_size_initial = bank.get_total_accounts_stats().unwrap().data_len as u64; bank.accounts_data_size_initial = accounts_data_size_initial; @@ -1253,6 +1250,7 @@ impl Bank { collector_fee_details: RwLock::new(CollectorFeeDetails::default()), compute_budget: parent.compute_budget, transaction_account_lock_limit: parent.transaction_account_lock_limit, + fee_structure: parent.fee_structure.clone(), }; let (_, ancestors_time_us) = measure_us!({ @@ -1283,12 +1281,17 @@ impl Bank { } }); + let (_epoch, slot_index) = new.epoch_schedule.get_epoch_and_slot_index(new.slot); + let slots_in_epoch = new.epoch_schedule.get_slots_in_epoch(new.epoch); + let (_, cache_preparation_time_us) = measure_us!(new .transaction_processor .prepare_program_cache_for_upcoming_feature_set( &new, &new.compute_active_feature_set(true).0, &new.compute_budget.unwrap_or_default(), + slot_index, + slots_in_epoch, )); // Update sysvars before processing transactions @@ -1296,7 +1299,6 @@ impl Bank { new.update_slot_hashes(); new.update_stake_history(Some(parent.epoch())); new.update_clock(Some(parent.epoch())); - new.update_fees(); new.update_last_restart_slot() }); @@ -1639,14 +1641,11 @@ impl Bank { collector_fee_details: RwLock::new(CollectorFeeDetails::default()), compute_budget: runtime_config.compute_budget, transaction_account_lock_limit: runtime_config.transaction_account_lock_limit, + fee_structure: FeeStructure::default(), }; - bank.transaction_processor = TransactionBatchProcessor::new( - bank.slot, - bank.epoch, - bank.epoch_schedule.clone(), - HashSet::default(), - ); + bank.transaction_processor = + TransactionBatchProcessor::new(bank.slot, bank.epoch, HashSet::default()); let thread_pool = ThreadPoolBuilder::new() .thread_name(|i| format!("solBnkNewFlds{i:02}")) @@ -2034,21 +2033,6 @@ impl Bank { } } - #[allow(deprecated)] - fn update_fees(&self) { - if !self - .feature_set - .is_active(&feature_set::disable_fees_sysvar::id()) - { - self.update_sysvar_account(&sysvar::fees::id(), |account| { - create_account( - &sysvar::fees::Fees::new(&self.fee_rate_governor.create_fee_calculator()), - self.inherit_specially_retained_account_fields(account), - ) - }); - } - } - fn update_rent(&self) { self.update_sysvar_account(&sysvar::rent::id(), |account| { create_account( @@ -2935,9 +2919,6 @@ impl Bank { self.capitalization.fetch_add(account.lamports(), Relaxed); self.accounts_data_size_initial += account.data().len() as u64; } - // updating sysvars (the fees sysvar in this case) now depends on feature activations in - // genesis_config.accounts above - self.update_fees(); for (pubkey, account) in genesis_config.rewards_pools.iter() { assert!( @@ -2971,7 +2952,6 @@ impl Bank { self.slots_per_year = genesis_config.slots_per_year(); self.epoch_schedule = genesis_config.epoch_schedule.clone(); - self.transaction_processor.epoch_schedule = genesis_config.epoch_schedule.clone(); self.inflation = Arc::new(RwLock::new(genesis_config.inflation)); @@ -3074,11 +3054,6 @@ impl Bank { blockhash_queue.get_lamports_per_signature(hash) } - #[deprecated(since = "1.9.0", note = "Please use `get_fee_for_message` instead")] - pub fn get_fee_rate_governor(&self) -> &FeeRateGovernor { - &self.fee_rate_governor - } - pub fn get_fee_for_message(&self, message: &SanitizedMessage) -> Option { let lamports_per_signature = { let blockhash_queue = self.blockhash_queue.read().unwrap(); @@ -3132,19 +3107,6 @@ impl Bank { ) } - #[deprecated( - since = "1.6.11", - note = "Please use `get_blockhash_last_valid_block_height`" - )] - pub fn get_blockhash_last_valid_slot(&self, blockhash: &Hash) -> Option { - let blockhash_queue = self.blockhash_queue.read().unwrap(); - // This calculation will need to be updated to consider epoch boundaries if BlockhashQueue - // length is made variable by epoch - blockhash_queue - .get_hash_age(blockhash) - .map(|age| self.slot + MAX_PROCESSING_AGE as u64 - age) - } - pub fn get_blockhash_last_valid_block_height(&self, blockhash: &Hash) -> Option { let blockhash_queue = self.blockhash_queue.read().unwrap(); // This calculation will need to be updated to consider epoch boundaries if BlockhashQueue @@ -3404,6 +3366,7 @@ impl Bank { &mut timings, TransactionProcessingConfig { account_overrides: Some(&account_overrides), + check_program_modification_slot: self.check_program_modification_slot, compute_budget: self.compute_budget(), log_messages_bytes_limit: None, limit_to_load_programs: true, @@ -3720,6 +3683,7 @@ impl Bank { epoch_total_stake: self.epoch_total_stake(self.epoch()), epoch_vote_accounts: self.epoch_vote_accounts(self.epoch()), feature_set: Arc::clone(&self.feature_set), + fee_structure: Some(&self.fee_structure), lamports_per_signature, rent_collector: Some(&self.rent_collector), }; @@ -3935,31 +3899,18 @@ impl Bank { fn filter_program_errors_and_collect_fee( &self, - txs: &[SanitizedTransaction], execution_results: &[TransactionExecutionResult], ) -> Vec> { let mut fees = 0; - let results = txs + let results = execution_results .iter() - .zip(execution_results) - .map(|(tx, execution_result)| { - let message = tx.message(); - let details = match &execution_result { - TransactionExecutionResult::Executed { details, .. } => details, - TransactionExecutionResult::NotExecuted(err) => return Err(err.clone()), - }; - - let fee = details.fee_details.total_fee(); - self.check_execution_status_and_charge_fee( - message, - &details.status, - details.is_nonce, - fee, - )?; - - fees += fee; - Ok(()) + .map(|execution_result| match execution_result { + TransactionExecutionResult::Executed { details, .. } => { + fees += details.fee_details.total_fee(); + Ok(()) + } + TransactionExecutionResult::NotExecuted(err) => Err(err.clone()), }) .collect(); @@ -3970,31 +3921,18 @@ impl Bank { // Note: this function is not yet used; next PR will call it behind a feature gate fn filter_program_errors_and_collect_fee_details( &self, - txs: &[SanitizedTransaction], execution_results: &[TransactionExecutionResult], ) -> Vec> { let mut accumulated_fee_details = FeeDetails::default(); - let results = txs + let results = execution_results .iter() - .zip(execution_results) - .map(|(tx, execution_result)| { - let message = tx.message(); - let details = match &execution_result { - TransactionExecutionResult::Executed { details, .. } => details, - TransactionExecutionResult::NotExecuted(err) => return Err(err.clone()), - }; - - self.check_execution_status_and_charge_fee( - message, - &details.status, - details.is_nonce, - details.fee_details.total_fee(), - )?; - - accumulated_fee_details.accumulate(&details.fee_details); - - Ok(()) + .map(|execution_result| match execution_result { + TransactionExecutionResult::Executed { details, .. } => { + accumulated_fee_details.accumulate(&details.fee_details); + Ok(()) + } + TransactionExecutionResult::NotExecuted(err) => Err(err.clone()), }) .collect(); @@ -4005,31 +3943,6 @@ impl Bank { results } - fn check_execution_status_and_charge_fee( - &self, - message: &SanitizedMessage, - execution_status: &transaction::Result<()>, - is_nonce: bool, - fee: u64, - ) -> Result<()> { - // In case of instruction error, even though no accounts - // were stored we still need to charge the payer the - // fee. - // - //...except nonce accounts, which already have their - // post-load, fee deducted, pre-execute account state - // stored - if execution_status.is_err() && !is_nonce { - self.withdraw(message.fee_payer(), fee)?; - } - - Ok(()) - } - - /// `committed_transactions_count` is the number of transactions out of `sanitized_txs` - /// that was executed. Of those, `committed_transactions_count`, - /// `committed_with_failure_result_count` is the number of executed transactions that returned - /// a failure result. pub fn commit_transactions( &self, sanitized_txs: &[SanitizedTransaction], @@ -4037,7 +3950,7 @@ impl Bank { execution_results: Vec, last_blockhash: Hash, lamports_per_signature: u64, - counts: CommitTransactionCounts, + counts: ExecutedTransactionCounts, timings: &mut ExecuteTimings, ) -> TransactionResults { assert!( @@ -4045,30 +3958,30 @@ impl Bank { "commit_transactions() working on a bank that is already frozen or is undergoing freezing!" ); - let CommitTransactionCounts { - committed_transactions_count, - committed_non_vote_transactions_count, - committed_with_failure_result_count, + let ExecutedTransactionCounts { + executed_transactions_count, + executed_non_vote_transactions_count, + executed_with_failure_result_count, signature_count, } = counts; - self.increment_transaction_count(committed_transactions_count); + self.increment_transaction_count(executed_transactions_count); self.increment_non_vote_transaction_count_since_restart( - committed_non_vote_transactions_count, + executed_non_vote_transactions_count, ); self.increment_signature_count(signature_count); - if committed_with_failure_result_count > 0 { + if executed_with_failure_result_count > 0 { self.transaction_error_count - .fetch_add(committed_with_failure_result_count, Relaxed); + .fetch_add(executed_with_failure_result_count, Relaxed); } - // Should be equivalent to checking `committed_transactions_count > 0` + // Should be equivalent to checking `executed_transactions_count > 0` if execution_results.iter().any(|result| result.was_executed()) { self.is_delta.store(true, Relaxed); self.transaction_entries_count.fetch_add(1, Relaxed); self.transactions_per_entry_max - .fetch_max(committed_transactions_count, Relaxed); + .fetch_max(executed_transactions_count, Relaxed); } let mut write_time = Measure::start("write_time"); @@ -4144,9 +4057,9 @@ impl Bank { self.update_transaction_statuses(sanitized_txs, &execution_results); let fee_collection_results = if self.feature_set.is_active(&reward_full_priority_fee::id()) { - self.filter_program_errors_and_collect_fee_details(sanitized_txs, &execution_results) + self.filter_program_errors_and_collect_fee_details(&execution_results) } else { - self.filter_program_errors_and_collect_fee(sanitized_txs, &execution_results) + self.filter_program_errors_and_collect_fee(&execution_results) }; update_transaction_statuses_time.stop(); timings.saturating_add_in_place( @@ -4887,6 +4800,7 @@ impl Bank { timings, TransactionProcessingConfig { account_overrides: None, + check_program_modification_slot: self.check_program_modification_slot, compute_budget: self.compute_budget(), log_messages_bytes_limit, limit_to_load_programs: false, @@ -4903,10 +4817,10 @@ impl Bank { execution_results, last_blockhash, lamports_per_signature, - CommitTransactionCounts { - committed_transactions_count: executed_transactions_count as u64, - committed_non_vote_transactions_count: executed_non_vote_transactions_count as u64, - committed_with_failure_result_count: executed_transactions_count + ExecutedTransactionCounts { + executed_transactions_count: executed_transactions_count as u64, + executed_non_vote_transactions_count: executed_non_vote_transactions_count as u64, + executed_with_failure_result_count: executed_transactions_count .saturating_sub(executed_with_successful_result_count) as u64, signature_count, @@ -5134,32 +5048,6 @@ impl Bank { ); } - fn withdraw(&self, pubkey: &Pubkey, lamports: u64) -> Result<()> { - match self.get_account_with_fixed_root_no_cache(pubkey) { - Some(mut account) => { - let min_balance = match get_system_account_kind(&account) { - Some(SystemAccountKind::Nonce) => self - .rent_collector - .rent - .minimum_balance(nonce::State::size()), - _ => 0, - }; - - lamports - .checked_add(min_balance) - .filter(|required_balance| *required_balance <= account.lamports()) - .ok_or(TransactionError::InsufficientFundsForFee)?; - account - .checked_sub_lamports(lamports) - .map_err(|_| TransactionError::InsufficientFundsForFee)?; - self.store_account(pubkey, &account); - - Ok(()) - } - None => Err(TransactionError::AccountNotFound), - } - } - pub fn accounts(&self) -> Arc { self.rc.accounts.clone() } @@ -5377,18 +5265,20 @@ impl Bank { } /// Returns all the accounts this bank can load - pub fn get_all_accounts(&self) -> ScanResult> { - self.rc.accounts.load_all(&self.ancestors, self.bank_id) + pub fn get_all_accounts(&self, sort_results: bool) -> ScanResult> { + self.rc + .accounts + .load_all(&self.ancestors, self.bank_id, sort_results) } // Scans all the accounts this bank can load, applying `scan_func` - pub fn scan_all_accounts(&self, scan_func: F) -> ScanResult<()> + pub fn scan_all_accounts(&self, scan_func: F, sort_results: bool) -> ScanResult<()> where F: FnMut(Option<(&Pubkey, AccountSharedData, Slot)>), { self.rc .accounts - .scan_all(&self.ancestors, self.bank_id, scan_func) + .scan_all(&self.ancestors, self.bank_id, scan_func, sort_results) } pub fn get_program_accounts_modified_since_parent( @@ -5434,6 +5324,7 @@ impl Bank { num: usize, filter_by_address: &HashSet, filter: AccountAddressFilter, + sort_results: bool, ) -> ScanResult> { self.rc.accounts.load_largest_accounts( &self.ancestors, @@ -5441,6 +5332,7 @@ impl Bank { num, filter_by_address, filter, + sort_results, ) } @@ -6723,7 +6615,7 @@ impl Bank { /// Get all the accounts for this bank and calculate stats pub fn get_total_accounts_stats(&self) -> ScanResult { - let accounts = self.get_all_accounts()?; + let accounts = self.get_all_accounts(false)?; Ok(self.calculate_total_accounts_stats( accounts .iter() @@ -6814,21 +6706,23 @@ impl Bank { pub fn is_in_slot_hashes_history(&self, slot: &Slot) -> bool { if slot < &self.slot { - if let Ok(sysvar_cache) = self.transaction_processor.sysvar_cache.read() { - if let Ok(slot_hashes) = sysvar_cache.get_slot_hashes() { - return slot_hashes.get(slot).is_some(); - } + if let Ok(slot_hashes) = self.transaction_processor.sysvar_cache().get_slot_hashes() { + return slot_hashes.get(slot).is_some(); } } false } - pub fn check_program_modification_slot(&mut self) { - self.check_program_modification_slot = true; + pub fn check_program_modification_slot(&self) -> bool { + self.check_program_modification_slot + } + + pub fn set_check_program_modification_slot(&mut self, check: bool) { + self.check_program_modification_slot = check; } pub fn fee_structure(&self) -> &FeeStructure { - &self.transaction_processor.fee_structure + &self.fee_structure } pub fn compute_budget(&self) -> Option { @@ -6839,40 +6733,6 @@ impl Bank { self.transaction_processor .add_builtin(self, program_id, name, builtin) } - - /// Find the slot in which the program was most recently modified. - /// Returns slot 0 for programs deployed with v1/v2 loaders, since programs deployed - /// with those loaders do not retain deployment slot information. - /// Returns an error if the program's account state can not be found or parsed. - fn program_modification_slot(&self, pubkey: &Pubkey) -> transaction::Result { - let program = self - .get_account(pubkey) - .ok_or(TransactionError::ProgramAccountNotFound)?; - if bpf_loader_upgradeable::check_id(program.owner()) { - if let Ok(UpgradeableLoaderState::Program { - programdata_address, - }) = program.state() - { - let programdata = self - .get_account(&programdata_address) - .ok_or(TransactionError::ProgramAccountNotFound)?; - if let Ok(UpgradeableLoaderState::ProgramData { - slot, - upgrade_authority_address: _, - }) = programdata.state() - { - return Ok(slot); - } - } - Err(TransactionError::ProgramAccountNotFound) - } else if loader_v4::check_id(program.owner()) { - let state = solana_loader_v4_program::get_state(program.data()) - .map_err(|_| TransactionError::ProgramAccountNotFound)?; - Ok(state.slot) - } else { - Ok(0) - } - } } impl TransactionProcessingCallback for Bank { @@ -6892,17 +6752,6 @@ impl TransactionProcessingCallback for Bank { .map(|(acc, _)| acc) } - fn get_program_match_criteria(&self, program: &Pubkey) -> ProgramCacheMatchCriteria { - if self.check_program_modification_slot { - self.program_modification_slot(program) - .map_or(ProgramCacheMatchCriteria::Tombstone, |slot| { - ProgramCacheMatchCriteria::DeployedOnOrAfterSlot(slot) - }) - } else { - ProgramCacheMatchCriteria::NoCriteria - } - } - // NOTE: must hold idempotent for the same set of arguments /// Add a builtin program account fn add_builtin_account(&self, name: &str, program_id: &Pubkey) { @@ -7150,7 +6999,7 @@ impl Bank { } pub fn set_fee_structure(&mut self, fee_structure: &FeeStructure) { - self.transaction_processor.fee_structure = fee_structure.clone(); + self.fee_structure = fee_structure.clone(); } pub fn load_program( @@ -7162,14 +7011,33 @@ impl Bank { let environments = self .transaction_processor .get_environments_for_epoch(effective_epoch)?; - load_program_with_pubkey( - self, - &environments, - pubkey, - self.slot(), - self.epoch_schedule(), - reload, - ) + load_program_with_pubkey(self, &environments, pubkey, self.slot(), reload) + } + + pub fn withdraw(&self, pubkey: &Pubkey, lamports: u64) -> Result<()> { + match self.get_account_with_fixed_root(pubkey) { + Some(mut account) => { + let min_balance = match get_system_account_kind(&account) { + Some(SystemAccountKind::Nonce) => self + .rent_collector + .rent + .minimum_balance(nonce::State::size()), + _ => 0, + }; + + lamports + .checked_add(min_balance) + .filter(|required_balance| *required_balance <= account.lamports()) + .ok_or(TransactionError::InsufficientFundsForFee)?; + account + .checked_sub_lamports(lamports) + .map_err(|_| TransactionError::InsufficientFundsForFee)?; + self.store_account(pubkey, &account); + + Ok(()) + } + None => Err(TransactionError::AccountNotFound), + } } } diff --git a/runtime/src/bank/address_lookup_table.rs b/runtime/src/bank/address_lookup_table.rs index 51eee794803e14..344f1e8bdf09aa 100644 --- a/runtime/src/bank/address_lookup_table.rs +++ b/runtime/src/bank/address_lookup_table.rs @@ -28,9 +28,7 @@ impl AddressLoader for &Bank { ) -> Result { let slot_hashes = self .transaction_processor - .sysvar_cache - .read() - .unwrap() + .sysvar_cache() .get_slot_hashes() .map_err(|_| AddressLoaderError::SlotHashesSysvarNotFound)?; diff --git a/runtime/src/bank/builtins/core_bpf_migration/mod.rs b/runtime/src/bank/builtins/core_bpf_migration/mod.rs index 4b1a5618bdb4c1..4c820185ca3b83 100644 --- a/runtime/src/bank/builtins/core_bpf_migration/mod.rs +++ b/runtime/src/bank/builtins/core_bpf_migration/mod.rs @@ -169,17 +169,11 @@ impl Bank { let elf = &programdata[progradata_metadata_size..]; // Set up the two `LoadedProgramsForTxBatch` instances, as if // processing a new transaction batch. - let program_cache_for_tx_batch = ProgramCacheForTxBatch::new_from_cache( + let mut program_cache_for_tx_batch = ProgramCacheForTxBatch::new_from_cache( self.slot, self.epoch, &self.transaction_processor.program_cache.read().unwrap(), ); - let mut programs_modified = ProgramCacheForTxBatch::new( - self.slot, - program_cache_for_tx_batch.environments.clone(), - program_cache_for_tx_batch.upcoming_environments.clone(), - program_cache_for_tx_batch.latest_root_epoch, - ); // Configure a dummy `InvokeContext` from the runtime's current // environment, as well as the two `ProgramCacheForTxBatch` @@ -196,13 +190,13 @@ impl Bank { let mut dummy_transaction_context = TransactionContext::new( vec![], self.rent_collector.rent.clone(), - compute_budget.max_invoke_stack_height, + compute_budget.max_instruction_stack_depth, compute_budget.max_instruction_trace_length, ); let mut dummy_invoke_context = InvokeContext::new( &mut dummy_transaction_context, - &program_cache_for_tx_batch, + &mut program_cache_for_tx_batch, EnvironmentConfig::new( Hash::default(), None, @@ -213,7 +207,6 @@ impl Bank { ), None, compute_budget, - &mut programs_modified, ); solana_bpf_loader_program::direct_deploy_program( @@ -232,7 +225,7 @@ impl Bank { .program_cache .write() .unwrap() - .merge(programs_modified.entries()); + .merge(&program_cache_for_tx_batch.drain_modified_entries()); Ok(()) } diff --git a/runtime/src/bank/builtins/core_bpf_migration/target_builtin.rs b/runtime/src/bank/builtins/core_bpf_migration/target_builtin.rs index fdd8c3279fd54f..b89a951ffd2cdf 100644 --- a/runtime/src/bank/builtins/core_bpf_migration/target_builtin.rs +++ b/runtime/src/bank/builtins/core_bpf_migration/target_builtin.rs @@ -120,6 +120,10 @@ mod tests { solana_zk_token_sdk::zk_token_proof_program::id(), Some(feature_set::zk_token_sdk_enabled::id()) )] + #[test_case( + solana_zk_sdk::zk_elgamal_proof_program::id(), + Some(feature_set::zk_elgamal_proof_program_enabled::id()) + )] fn test_target_program_builtin(program_address: Pubkey, activation_feature: Option) { let migration_target = CoreBpfMigrationTargetType::Builtin; let mut bank = create_simple_test_bank(0); diff --git a/runtime/src/bank/builtins/mod.rs b/runtime/src/bank/builtins/mod.rs index 8a7760850dc64c..6e1797be11a8cb 100644 --- a/runtime/src/bank/builtins/mod.rs +++ b/runtime/src/bank/builtins/mod.rs @@ -121,6 +121,13 @@ pub static BUILTINS: &[BuiltinPrototype] = &[ program_id: solana_sdk::loader_v4::id(), entrypoint: solana_loader_v4_program::Entrypoint::vm, }), + testable_prototype!(BuiltinPrototype { + core_bpf_migration_config: None, + name: zk_elgamal_proof_program, + enable_feature_id: Some(feature_set::zk_elgamal_proof_program_enabled::id()), + program_id: solana_zk_sdk::zk_elgamal_proof_program::id(), + entrypoint: solana_zk_elgamal_proof_program::Entrypoint::vm, + }), ]; pub static STATELESS_BUILTINS: &[StatelessBuiltinPrototype] = &[StatelessBuiltinPrototype { @@ -328,6 +335,25 @@ mod test_only { datapoint_name: "migrate_builtin_to_core_bpf_loader_v4_program", }; } + + pub mod zk_elgamal_proof_program { + pub mod feature { + solana_sdk::declare_id!("EYtuxScWqGWmcPEDmeUsEt3iPkvWE26EWLfSxUvWP2WN"); + } + pub mod source_buffer { + solana_sdk::declare_id!("AaVrLPurAUmjw6XRNGr6ezQfHaJWpBGHhcRSJmNjoVpQ"); + } + pub mod upgrade_authority { + solana_sdk::declare_id!("EyGkQYHgynUdvdNPNiWbJQk9roFCexgdJQMNcWbuvp78"); + } + pub const CONFIG: super::CoreBpfMigrationConfig = super::CoreBpfMigrationConfig { + source_buffer_address: source_buffer::id(), + upgrade_authority_address: Some(upgrade_authority::id()), + feature_id: feature::id(), + migration_target: super::CoreBpfMigrationTargetType::Builtin, + datapoint_name: "migrate_builtin_to_core_bpf_zk_elgamal_proof_program", + }; + } } #[cfg(test)] @@ -377,6 +403,10 @@ mod tests { &super::BUILTINS[10].core_bpf_migration_config, &Some(super::test_only::loader_v4::CONFIG) ); + assert_eq!( + &super::BUILTINS[11].core_bpf_migration_config, + &Some(super::test_only::zk_elgamal_proof_program::CONFIG) + ); // Feature Gate has a live migration config, so it has no test-only // configs to test here. } diff --git a/runtime/src/bank/partitioned_epoch_rewards/distribution.rs b/runtime/src/bank/partitioned_epoch_rewards/distribution.rs index 1784fb139d2fa3..ee5c716b2bb091 100644 --- a/runtime/src/bank/partitioned_epoch_rewards/distribution.rs +++ b/runtime/src/bank/partitioned_epoch_rewards/distribution.rs @@ -50,7 +50,7 @@ impl Bank { distribution_starting_block_height + status.stake_rewards_by_partition.len() as u64; assert!( self.epoch_schedule.get_slots_in_epoch(self.epoch) - > distribution_end_exclusive.saturating_sub(distribution_starting_block_height) + > status.stake_rewards_by_partition.len() as u64 ); if height >= distribution_starting_block_height && height < distribution_end_exclusive { @@ -150,7 +150,10 @@ impl Bank { let (mut account, stake_state): (AccountSharedData, StakeStateV2) = stake_account.into(); let StakeStateV2::Stake(meta, stake, flags) = stake_state else { // StakesCache only stores accounts where StakeStateV2::delegation().is_some() - unreachable!() + unreachable!( + "StakesCache entry {:?} failed StakeStateV2 deserialization", + partitioned_stake_reward.stake_pubkey + ) }; account .checked_add_lamports(partitioned_stake_reward.stake_reward_info.lamports as u64) @@ -207,8 +210,8 @@ impl Bank { } Err(err) => { error!( - "bank::distribution::store_stake_accounts_in_partition() failed for {}: {:?}", - stake_pubkey, err + "bank::distribution::store_stake_accounts_in_partition() failed for \ + {stake_pubkey}, {reward_amount} lamports burned: {err:?}" ); lamports_burned += reward_amount; } diff --git a/runtime/src/bank/partitioned_epoch_rewards/mod.rs b/runtime/src/bank/partitioned_epoch_rewards/mod.rs index bb965933f571ab..09343976211c9d 100644 --- a/runtime/src/bank/partitioned_epoch_rewards/mod.rs +++ b/runtime/src/bank/partitioned_epoch_rewards/mod.rs @@ -151,7 +151,36 @@ pub(super) struct CalculateRewardsAndDistributeVoteRewardsResult { pub(crate) type StakeRewards = Vec; +#[derive(Debug, PartialEq)] +pub struct KeyedRewardsAndNumPartitions { + pub keyed_rewards: Vec<(Pubkey, RewardInfo)>, + pub num_partitions: Option, +} + +impl KeyedRewardsAndNumPartitions { + pub fn should_record(&self) -> bool { + !self.keyed_rewards.is_empty() || self.num_partitions.is_some() + } +} + impl Bank { + pub fn get_rewards_and_num_partitions(&self) -> KeyedRewardsAndNumPartitions { + let keyed_rewards = self.rewards.read().unwrap().clone(); + let epoch_rewards_sysvar = self.get_epoch_rewards_sysvar(); + // If partitioned epoch rewards are active and this Bank is the + // epoch-boundary block, populate num_partitions + let epoch_schedule = self.epoch_schedule(); + let parent_epoch = epoch_schedule.get_epoch(self.parent_slot()); + let is_first_block_in_epoch = self.epoch() > parent_epoch; + + let num_partitions = (epoch_rewards_sysvar.active && is_first_block_in_epoch) + .then_some(epoch_rewards_sysvar.num_partitions); + KeyedRewardsAndNumPartitions { + keyed_rewards, + num_partitions, + } + } + pub(super) fn is_partitioned_rewards_feature_enabled(&self) -> bool { self.feature_set .is_active(&feature_set::enable_partitioned_epoch_reward::id()) @@ -249,6 +278,7 @@ mod tests { account::Account, epoch_schedule::EpochSchedule, native_token::LAMPORTS_PER_SOL, + reward_type::RewardType, signature::Signer, signer::keypair::Keypair, stake::instruction::StakeError, @@ -685,7 +715,7 @@ mod tests { /// Test that program execution that attempts to mutate a stake account /// incorrectly should fail during reward period. A credit should succeed, - /// but a withdrawal shoudl fail. + /// but a withdrawal should fail. #[test] fn test_program_execution_restricted_for_stake_account_in_reward_period() { use solana_sdk::transaction::TransactionError::InstructionError; @@ -801,4 +831,242 @@ mod tests { previous_bank = bank; } } + + #[test] + fn test_get_rewards_and_partitions() { + let starting_slot = SLOTS_PER_EPOCH - 1; + let num_rewards = 100; + let stake_account_stores_per_block = 50; + let RewardBank { bank, .. } = + create_reward_bank(num_rewards, stake_account_stores_per_block, starting_slot); + + assert!(bank.is_partitioned_rewards_feature_enabled()); + // Slot before the epoch boundary contains empty rewards (since fees are + // off), and no partitions because not at the epoch boundary + assert_eq!( + bank.get_rewards_and_num_partitions(), + KeyedRewardsAndNumPartitions { + keyed_rewards: vec![], + num_partitions: None, + } + ); + + let epoch_boundary_bank = Arc::new(Bank::new_from_parent( + bank, + &Pubkey::default(), + SLOTS_PER_EPOCH, + )); + assert!(epoch_boundary_bank.is_partitioned_rewards_feature_enabled()); + // Slot at the epoch boundary contains voting rewards only, as well as partition data + let KeyedRewardsAndNumPartitions { + keyed_rewards, + num_partitions, + } = epoch_boundary_bank.get_rewards_and_num_partitions(); + for (_pubkey, reward) in keyed_rewards.iter() { + assert_eq!(reward.reward_type, RewardType::Voting); + } + assert_eq!(keyed_rewards.len(), num_rewards); + assert_eq!( + num_partitions, + Some(num_rewards as u64 / stake_account_stores_per_block) + ); + + let mut total_staking_rewards = 0; + + let partition0_bank = Arc::new(Bank::new_from_parent( + epoch_boundary_bank, + &Pubkey::default(), + SLOTS_PER_EPOCH + 1, + )); + assert!(partition0_bank.is_partitioned_rewards_feature_enabled()); + // Slot after the epoch boundary contains first partition of staking + // rewards, and no partitions because not at the epoch boundary + let KeyedRewardsAndNumPartitions { + keyed_rewards, + num_partitions, + } = partition0_bank.get_rewards_and_num_partitions(); + for (_pubkey, reward) in keyed_rewards.iter() { + assert_eq!(reward.reward_type, RewardType::Staking); + } + total_staking_rewards += keyed_rewards.len(); + assert_eq!(num_partitions, None); + + let partition1_bank = Arc::new(Bank::new_from_parent( + partition0_bank, + &Pubkey::default(), + SLOTS_PER_EPOCH + 2, + )); + assert!(partition1_bank.is_partitioned_rewards_feature_enabled()); + // Slot 2 after the epoch boundary contains second partition of staking + // rewards, and no partitions because not at the epoch boundary + let KeyedRewardsAndNumPartitions { + keyed_rewards, + num_partitions, + } = partition1_bank.get_rewards_and_num_partitions(); + for (_pubkey, reward) in keyed_rewards.iter() { + assert_eq!(reward.reward_type, RewardType::Staking); + } + total_staking_rewards += keyed_rewards.len(); + assert_eq!(num_partitions, None); + + // All rewards are recorded + assert_eq!(total_staking_rewards, num_rewards); + + let bank = Bank::new_from_parent(partition1_bank, &Pubkey::default(), SLOTS_PER_EPOCH + 3); + assert!(bank.is_partitioned_rewards_feature_enabled()); + // Next slot contains empty rewards (since fees are off), and no + // partitions because not at the epoch boundary + assert_eq!( + bank.get_rewards_and_num_partitions(), + KeyedRewardsAndNumPartitions { + keyed_rewards: vec![], + num_partitions: None, + } + ); + } + + #[test] + fn test_get_rewards_and_partitions_before_feature() { + let starting_slot = SLOTS_PER_EPOCH - 1; + let num_rewards = 100; + + let validator_keypairs = (0..num_rewards) + .map(|_| ValidatorVoteKeypairs::new_rand()) + .collect::>(); + + let GenesisConfigInfo { + mut genesis_config, .. + } = create_genesis_config_with_vote_accounts( + 1_000_000_000, + &validator_keypairs, + vec![2_000_000_000; num_rewards], + ); + genesis_config.epoch_schedule = EpochSchedule::new(SLOTS_PER_EPOCH); + + // Set feature to inactive + genesis_config + .accounts + .remove(&feature_set::enable_partitioned_epoch_reward::id()); + + let bank = Bank::new_for_tests(&genesis_config); + + for validator_vote_keypairs in &validator_keypairs { + let vote_id = validator_vote_keypairs.vote_keypair.pubkey(); + let mut vote_account = bank.get_account(&vote_id).unwrap(); + // generate some rewards + let mut vote_state = Some(vote_state::from(&vote_account).unwrap()); + for i in 0..MAX_LOCKOUT_HISTORY + 42 { + if let Some(v) = vote_state.as_mut() { + vote_state::process_slot_vote_unchecked(v, i as u64) + } + let versioned = VoteStateVersions::Current(Box::new(vote_state.take().unwrap())); + vote_state::to(&versioned, &mut vote_account).unwrap(); + match versioned { + VoteStateVersions::Current(v) => { + vote_state = Some(*v); + } + _ => panic!("Has to be of type Current"), + }; + } + bank.store_account_and_update_capitalization(&vote_id, &vote_account); + } + + let (bank, bank_forks) = bank.wrap_with_bank_forks_for_tests(); + let bank = new_bank_from_parent_with_bank_forks( + &bank_forks, + bank, + &Pubkey::default(), + starting_slot, + ); + + assert!(!bank.is_partitioned_rewards_feature_enabled()); + // Slot before the epoch boundary contains empty rewards (since fees are + // off), and no partitions because feature is inactive + assert_eq!( + bank.get_rewards_and_num_partitions(), + KeyedRewardsAndNumPartitions { + keyed_rewards: vec![], + num_partitions: None, + } + ); + + let epoch_boundary_bank = Arc::new(Bank::new_from_parent( + bank, + &Pubkey::default(), + SLOTS_PER_EPOCH, + )); + assert!(!epoch_boundary_bank.is_partitioned_rewards_feature_enabled()); + // Slot at the epoch boundary contains voting rewards and staking rewards; still no partitions + let KeyedRewardsAndNumPartitions { + keyed_rewards, + num_partitions, + } = epoch_boundary_bank.get_rewards_and_num_partitions(); + let mut voting_rewards_count = 0; + let mut staking_rewards_count = 0; + for (_pubkey, reward) in keyed_rewards.iter() { + match reward.reward_type { + RewardType::Voting => { + voting_rewards_count += 1; + } + RewardType::Staking => { + staking_rewards_count += 1; + } + _ => {} + } + } + assert_eq!( + keyed_rewards.len(), + voting_rewards_count + staking_rewards_count + ); + assert_eq!(voting_rewards_count, num_rewards); + assert_eq!(staking_rewards_count, num_rewards); + assert!(num_partitions.is_none()); + + let bank = + Bank::new_from_parent(epoch_boundary_bank, &Pubkey::default(), SLOTS_PER_EPOCH + 1); + assert!(!bank.is_partitioned_rewards_feature_enabled()); + // Slot after the epoch boundary contains empty rewards (since fees are + // off), and no partitions because feature is inactive + assert_eq!( + bank.get_rewards_and_num_partitions(), + KeyedRewardsAndNumPartitions { + keyed_rewards: vec![], + num_partitions: None, + } + ); + } + + #[test] + fn test_rewards_and_partitions_should_record() { + let reward = RewardInfo { + reward_type: RewardType::Voting, + lamports: 55, + post_balance: 5555, + commission: Some(5), + }; + + let rewards_and_partitions = KeyedRewardsAndNumPartitions { + keyed_rewards: vec![], + num_partitions: None, + }; + assert!(!rewards_and_partitions.should_record()); + + let rewards_and_partitions = KeyedRewardsAndNumPartitions { + keyed_rewards: vec![(Pubkey::new_unique(), reward)], + num_partitions: None, + }; + assert!(rewards_and_partitions.should_record()); + + let rewards_and_partitions = KeyedRewardsAndNumPartitions { + keyed_rewards: vec![], + num_partitions: Some(42), + }; + assert!(rewards_and_partitions.should_record()); + + let rewards_and_partitions = KeyedRewardsAndNumPartitions { + keyed_rewards: vec![(Pubkey::new_unique(), reward)], + num_partitions: Some(42), + }; + assert!(rewards_and_partitions.should_record()); + } } diff --git a/sdk/src/recent_blockhashes_account.rs b/runtime/src/bank/recent_blockhashes_account.rs similarity index 72% rename from sdk/src/recent_blockhashes_account.rs rename to runtime/src/bank/recent_blockhashes_account.rs index 4235fc798a0b87..71815ef00f4e02 100644 --- a/sdk/src/recent_blockhashes_account.rs +++ b/runtime/src/bank/recent_blockhashes_account.rs @@ -1,29 +1,19 @@ //! Helpers for the recent blockhashes sysvar. #[allow(deprecated)] -use solana_program::sysvar::recent_blockhashes::{ +use solana_sdk::sysvar::recent_blockhashes::{ IntoIterSorted, IterItem, RecentBlockhashes, MAX_ENTRIES, }; use { - crate::{ - account::{ - create_account_shared_data_with_fields, to_account, AccountSharedData, - InheritableAccountFields, DUMMY_INHERITABLE_ACCOUNT_FIELDS, - }, - clock::INITIAL_RENT_EPOCH, + solana_sdk::account::{ + create_account_shared_data_with_fields, to_account, AccountSharedData, + InheritableAccountFields, }, std::{collections::BinaryHeap, iter::FromIterator}, }; -#[deprecated( - since = "1.9.0", - note = "Please do not use, will no longer be available in the future" -)] #[allow(deprecated)] -pub fn update_account<'a, I>( - account: &mut AccountSharedData, - recent_blockhash_iter: I, -) -> Option<()> +fn update_account<'a, I>(account: &mut AccountSharedData, recent_blockhash_iter: I) -> Option<()> where I: IntoIterator>, { @@ -37,25 +27,8 @@ where to_account(&recent_blockhashes, account) } -#[deprecated( - since = "1.5.17", - note = "Please use `create_account_with_data_for_test` instead" -)] -#[allow(deprecated)] -pub fn create_account_with_data<'a, I>(lamports: u64, recent_blockhash_iter: I) -> AccountSharedData -where - I: IntoIterator>, -{ - #[allow(deprecated)] - create_account_with_data_and_fields(recent_blockhash_iter, (lamports, INITIAL_RENT_EPOCH)) -} - -#[deprecated( - since = "1.9.0", - note = "Please do not use, will no longer be available in the future" -)] #[allow(deprecated)] -pub fn create_account_with_data_and_fields<'a, I>( +pub(in crate::bank) fn create_account_with_data_and_fields<'a, I>( recent_blockhash_iter: I, fields: InheritableAccountFields, ) -> AccountSharedData @@ -70,31 +43,26 @@ where account } -#[deprecated( - since = "1.9.0", - note = "Please do not use, will no longer be available in the future" -)] -#[allow(deprecated)] -pub fn create_account_with_data_for_test<'a, I>(recent_blockhash_iter: I) -> AccountSharedData -where - I: IntoIterator>, -{ - create_account_with_data_and_fields(recent_blockhash_iter, DUMMY_INHERITABLE_ACCOUNT_FIELDS) -} - #[cfg(test)] mod tests { #![allow(deprecated)] use { super::*, - crate::account::from_account, rand::{seq::SliceRandom, thread_rng}, - solana_program::{ + solana_sdk::{ + account::{from_account, DUMMY_INHERITABLE_ACCOUNT_FIELDS}, hash::{Hash, HASH_BYTES}, sysvar::recent_blockhashes::Entry, }, }; + fn create_account_with_data_for_test<'a, I>(recent_blockhash_iter: I) -> AccountSharedData + where + I: IntoIterator>, + { + create_account_with_data_and_fields(recent_blockhash_iter, DUMMY_INHERITABLE_ACCOUNT_FIELDS) + } + #[test] fn test_create_account_empty() { let account = create_account_with_data_for_test(vec![]); diff --git a/runtime/src/bank/sysvar_cache.rs b/runtime/src/bank/sysvar_cache.rs index ccf1436905bac4..b350b6f37c018f 100644 --- a/runtime/src/bank/sysvar_cache.rs +++ b/runtime/src/bank/sysvar_cache.rs @@ -18,15 +18,13 @@ mod tests { let (genesis_config, _mint_keypair) = create_genesis_config(100_000); let bank0 = Arc::new(Bank::new_for_tests(&genesis_config)); - let bank0_sysvar_cache = bank0.transaction_processor.sysvar_cache.read().unwrap(); + let bank0_sysvar_cache = bank0.transaction_processor.sysvar_cache(); let bank0_cached_clock = bank0_sysvar_cache.get_clock(); let bank0_cached_epoch_schedule = bank0_sysvar_cache.get_epoch_schedule(); - let bank0_cached_fees = bank0_sysvar_cache.get_fees(); let bank0_cached_rent = bank0_sysvar_cache.get_rent(); assert!(bank0_cached_clock.is_ok()); assert!(bank0_cached_epoch_schedule.is_ok()); - assert!(bank0_cached_fees.is_ok()); assert!(bank0_cached_rent.is_ok()); assert!(bank0_sysvar_cache.get_slot_hashes().is_err()); assert!(bank0_sysvar_cache.get_epoch_rewards().is_err()); // partitioned epoch reward feature is not enabled @@ -38,43 +36,37 @@ mod tests { bank1_slot, )); - let bank1_sysvar_cache = bank1.transaction_processor.sysvar_cache.read().unwrap(); + let bank1_sysvar_cache = bank1.transaction_processor.sysvar_cache(); let bank1_cached_clock = bank1_sysvar_cache.get_clock(); let bank1_cached_epoch_schedule = bank1_sysvar_cache.get_epoch_schedule(); - let bank1_cached_fees = bank1_sysvar_cache.get_fees(); let bank1_cached_rent = bank1_sysvar_cache.get_rent(); assert!(bank1_cached_clock.is_ok()); assert!(bank1_cached_epoch_schedule.is_ok()); - assert!(bank1_cached_fees.is_ok()); assert!(bank1_cached_rent.is_ok()); assert!(bank1_sysvar_cache.get_slot_hashes().is_ok()); assert!(bank1_sysvar_cache.get_epoch_rewards().is_err()); assert_ne!(bank0_cached_clock, bank1_cached_clock); assert_eq!(bank0_cached_epoch_schedule, bank1_cached_epoch_schedule); - assert_ne!(bank0_cached_fees, bank1_cached_fees); assert_eq!(bank0_cached_rent, bank1_cached_rent); let bank2_slot = bank1.slot() + 1; let bank2 = Bank::new_from_parent(bank1.clone(), &Pubkey::default(), bank2_slot); - let bank2_sysvar_cache = bank2.transaction_processor.sysvar_cache.read().unwrap(); + let bank2_sysvar_cache = bank2.transaction_processor.sysvar_cache(); let bank2_cached_clock = bank2_sysvar_cache.get_clock(); let bank2_cached_epoch_schedule = bank2_sysvar_cache.get_epoch_schedule(); - let bank2_cached_fees = bank2_sysvar_cache.get_fees(); let bank2_cached_rent = bank2_sysvar_cache.get_rent(); assert!(bank2_cached_clock.is_ok()); assert!(bank2_cached_epoch_schedule.is_ok()); - assert!(bank2_cached_fees.is_ok()); assert!(bank2_cached_rent.is_ok()); assert!(bank2_sysvar_cache.get_slot_hashes().is_ok()); assert!(bank2_sysvar_cache.get_epoch_rewards().is_err()); // partitioned epoch reward feature is not enabled assert_ne!(bank1_cached_clock, bank2_cached_clock); assert_eq!(bank1_cached_epoch_schedule, bank2_cached_epoch_schedule); - assert_eq!(bank1_cached_fees, bank2_cached_fees); assert_eq!(bank1_cached_rent, bank2_cached_rent); assert_ne!( bank1_sysvar_cache.get_slot_hashes(), @@ -90,7 +82,7 @@ mod tests { let bank1_slot = bank0.slot() + 1; let mut bank1 = Bank::new_from_parent(bank0, &Pubkey::default(), bank1_slot); - let bank1_sysvar_cache = bank1.transaction_processor.sysvar_cache.read().unwrap(); + let bank1_sysvar_cache = bank1.transaction_processor.sysvar_cache(); let bank1_cached_clock = bank1_sysvar_cache.get_clock(); let bank1_cached_epoch_schedule = bank1_sysvar_cache.get_epoch_schedule(); let bank1_cached_fees = bank1_sysvar_cache.get_fees(); @@ -100,7 +92,6 @@ mod tests { assert!(bank1_cached_clock.is_ok()); assert!(bank1_cached_epoch_schedule.is_ok()); - assert!(bank1_cached_fees.is_ok()); assert!(bank1_cached_rent.is_ok()); assert!(bank1_cached_slot_hashes.is_ok()); assert!(bank1_cached_epoch_rewards.is_err()); @@ -108,10 +99,9 @@ mod tests { drop(bank1_sysvar_cache); bank1.transaction_processor.reset_sysvar_cache(); - let bank1_sysvar_cache = bank1.transaction_processor.sysvar_cache.read().unwrap(); + let bank1_sysvar_cache = bank1.transaction_processor.sysvar_cache(); assert!(bank1_sysvar_cache.get_clock().is_err()); assert!(bank1_sysvar_cache.get_epoch_schedule().is_err()); - assert!(bank1_sysvar_cache.get_fees().is_err()); assert!(bank1_sysvar_cache.get_rent().is_err()); assert!(bank1_sysvar_cache.get_slot_hashes().is_err()); assert!(bank1_sysvar_cache.get_epoch_rewards().is_err()); @@ -143,7 +133,7 @@ mod tests { .transaction_processor .fill_missing_sysvar_cache_entries(&bank1); - let bank1_sysvar_cache = bank1.transaction_processor.sysvar_cache.read().unwrap(); + let bank1_sysvar_cache = bank1.transaction_processor.sysvar_cache(); assert_eq!(bank1_sysvar_cache.get_clock(), bank1_cached_clock); assert_eq!( bank1_sysvar_cache.get_epoch_schedule(), diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index 54d23ba203e17e..a5a6022875defe 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -1,6 +1,4 @@ #![cfg(test)] -#[allow(deprecated)] -use solana_sdk::sysvar::fees::Fees; use { super::{ test_utils::{goto_end_of_slot, update_vote_account_timestamp}, @@ -73,7 +71,6 @@ use { incinerator, instruction::{AccountMeta, CompiledInstruction, Instruction, InstructionError}, loader_upgradeable_instruction::UpgradeableLoaderInstruction, - loader_v4::{LoaderV4State, LoaderV4Status}, message::{Message, MessageHeader, SanitizedMessage}, native_loader, native_token::{sol_to_lamports, LAMPORTS_PER_SOL}, @@ -104,7 +101,7 @@ use { transaction_context::TransactionAccount, }, solana_stake_program::stake_state::{self, StakeStateV2}, - solana_svm::nonce_info::NonceFull, + solana_svm::nonce_info::NoncePartial, solana_vote_program::{ vote_instruction, vote_state::{ @@ -231,18 +228,13 @@ fn test_race_register_tick_freeze() { } } -fn new_execution_result( - status: Result<()>, - nonce: Option<&NonceFull>, - fee_details: FeeDetails, -) -> TransactionExecutionResult { +fn new_execution_result(status: Result<()>, fee_details: FeeDetails) -> TransactionExecutionResult { TransactionExecutionResult::Executed { details: TransactionExecutionDetails { status, log_messages: None, inner_instructions: None, fee_details, - is_nonce: nonce.is_some(), return_data: None, executed_units: 0, accounts_data_len_delta: 0, @@ -2867,45 +2859,27 @@ fn test_bank_blockhash_compute_unit_fee_structure() { #[test] fn test_filter_program_errors_and_collect_fee() { let leader = solana_sdk::pubkey::new_rand(); - let GenesisConfigInfo { - genesis_config, - mint_keypair, - .. - } = create_genesis_config_with_leader(100_000, &leader, 3); + let GenesisConfigInfo { genesis_config, .. } = + create_genesis_config_with_leader(100_000, &leader, 3); let mut bank = Bank::new_for_tests(&genesis_config); // this test is only for when `feature_set::reward_full_priority_fee` inactivated bank.deactivate_feature(&feature_set::reward_full_priority_fee::id()); - let key = solana_sdk::pubkey::new_rand(); - let tx1 = SanitizedTransaction::from_transaction_for_tests(system_transaction::transfer( - &mint_keypair, - &key, - 2, - genesis_config.hash(), - )); - let tx2 = SanitizedTransaction::from_transaction_for_tests(system_transaction::transfer( - &mint_keypair, - &key, - 5, - genesis_config.hash(), - )); - let tx_fee = 42; let fee_details = FeeDetails::new_for_tests(tx_fee, 0, false); let results = vec![ - new_execution_result(Ok(()), None, fee_details), + new_execution_result(Ok(()), fee_details), new_execution_result( Err(TransactionError::InstructionError( 1, SystemError::ResultWithNegativeLamports.into(), )), - None, fee_details, ), ]; let initial_balance = bank.get_balance(&leader); - let results = bank.filter_program_errors_and_collect_fee(&[tx1, tx2], &results); + let results = bank.filter_program_errors_and_collect_fee(&results); bank.freeze(); assert_eq!( bank.get_balance(&leader), @@ -2918,45 +2892,27 @@ fn test_filter_program_errors_and_collect_fee() { #[test] fn test_filter_program_errors_and_collect_priority_fee() { let leader = solana_sdk::pubkey::new_rand(); - let GenesisConfigInfo { - genesis_config, - mint_keypair, - .. - } = create_genesis_config_with_leader(1000000, &leader, 3); + let GenesisConfigInfo { genesis_config, .. } = + create_genesis_config_with_leader(1000000, &leader, 3); let mut bank = Bank::new_for_tests(&genesis_config); // this test is only for when `feature_set::reward_full_priority_fee` inactivated bank.deactivate_feature(&feature_set::reward_full_priority_fee::id()); - let key = solana_sdk::pubkey::new_rand(); - let tx1 = SanitizedTransaction::from_transaction_for_tests(system_transaction::transfer( - &mint_keypair, - &key, - 2, - genesis_config.hash(), - )); - let tx2 = SanitizedTransaction::from_transaction_for_tests(system_transaction::transfer( - &mint_keypair, - &key, - 5, - genesis_config.hash(), - )); - let priority_fee = 42; let fee_details: FeeDetails = FeeDetails::new_for_tests(0, priority_fee, false); let results = vec![ - new_execution_result(Ok(()), None, fee_details), + new_execution_result(Ok(()), fee_details), new_execution_result( Err(TransactionError::InstructionError( 1, SystemError::ResultWithNegativeLamports.into(), )), - None, fee_details, ), ]; let initial_balance = bank.get_balance(&leader); - let results = bank.filter_program_errors_and_collect_fee(&[tx1, tx2], &results); + let results = bank.filter_program_errors_and_collect_fee(&results); bank.freeze(); assert_eq!( bank.get_balance(&leader), @@ -4295,22 +4251,6 @@ fn test_bank_cloned_stake_delegations() { assert!(stake_delegations.get(&stake_keypair.pubkey()).is_some()); } -#[allow(deprecated)] -#[test] -fn test_bank_fees_account() { - let (mut genesis_config, _) = create_genesis_config(500); - genesis_config.fee_rate_governor = FeeRateGovernor::new(12345, 0); - let bank = Arc::new(Bank::new_for_tests(&genesis_config)); - - let fees_account = bank.get_account(&sysvar::fees::id()).unwrap(); - let fees = from_account::(&fees_account).unwrap(); - assert_eq!( - bank.fee_rate_governor.lamports_per_signature, - fees.fee_calculator.lamports_per_signature - ); - assert_eq!(fees.fee_calculator.lamports_per_signature, 12345); -} - #[test] fn test_is_delta_with_no_committables() { let (genesis_config, mint_keypair) = create_genesis_config(8000); @@ -4352,7 +4292,7 @@ fn test_bank_get_program_accounts() { let parent = Arc::new(Bank::new_for_tests(&genesis_config)); parent.restore_old_behavior_for_fragile_tests(); - let genesis_accounts: Vec<_> = parent.get_all_accounts().unwrap(); + let genesis_accounts: Vec<_> = parent.get_all_accounts(false).unwrap(); assert!( genesis_accounts .iter() @@ -6455,26 +6395,26 @@ fn test_bank_hash_consistency() { if bank.slot == 0 { assert_eq!( bank.hash().to_string(), - "i5hGiQ3WtEehNrvhbfPFkUdm267t18fSpujcYtkBioW", + "Hn2FoJuoFWXVFVnwcQ6peuT24mUPmhDtXHXVjKD7M4yP", ); } if bank.slot == 32 { assert_eq!( bank.hash().to_string(), - "7NmBtNvbhoqzatJv8NgBs84qWrm4ZhpuC75DCpbqwiS" + "7FPfwBut4b7bXtKPsobQS1cuFgF47SZHDb4teQcJRomv" ); } if bank.slot == 64 { assert_eq!( bank.hash().to_string(), - "A1jjuUaENeDcsSvwejFGaZ5zWmnJ77doSzqdKtfzpoFk" + "28CWiEuA3izdt5xe4LyS4Q1DTALmYgrVctSTazFiPVcW" ); } if bank.slot == 128 { assert_eq!( bank.hash().to_string(), - "ApnMkFt5Bs4yDJ8S2CCPsQRL1He6vWXw6vMzAyc5i811" + "AdCmEvRXWKpvXb9fG6AFQhzGgB5ciAXnDajvaNK7YUg8" ); break; } @@ -6697,7 +6637,7 @@ fn test_shrink_candidate_slots_cached() { // No more slots should be shrunk assert_eq!(bank2.shrink_candidate_slots(), 0); // alive_counts represents the count of alive accounts in the three slots 0,1,2 - assert_eq!(alive_counts, vec![15, 1, 7]); + assert_eq!(alive_counts, vec![15, 1, 6]); } #[test] @@ -7985,7 +7925,7 @@ fn test_reserved_account_keys() { assert_eq!( bank.get_reserved_account_keys().len(), - 29, + 30, "after activating the new feature, bank should have new active reserved keys" ); } @@ -9116,10 +9056,7 @@ fn test_epoch_schedule_from_genesis_config() { Arc::default(), )); - assert_eq!( - &bank.transaction_processor.epoch_schedule, - &genesis_config.epoch_schedule - ); + assert_eq!(bank.epoch_schedule(), &genesis_config.epoch_schedule); } fn check_stake_vote_account_validity(check_owner_change: bool, load_vote_and_stake_accounts: F) @@ -9481,24 +9418,24 @@ fn test_get_largest_accounts() { // Return only one largest account assert_eq!( - bank.get_largest_accounts(1, &pubkeys_hashset, AccountAddressFilter::Include) + bank.get_largest_accounts(1, &pubkeys_hashset, AccountAddressFilter::Include, false) .unwrap(), vec![(pubkeys[4], sol_to_lamports(5.0))] ); assert_eq!( - bank.get_largest_accounts(1, &HashSet::new(), AccountAddressFilter::Exclude) + bank.get_largest_accounts(1, &HashSet::new(), AccountAddressFilter::Exclude, false) .unwrap(), vec![(pubkeys[4], sol_to_lamports(5.0))] ); assert_eq!( - bank.get_largest_accounts(1, &exclude4, AccountAddressFilter::Exclude) + bank.get_largest_accounts(1, &exclude4, AccountAddressFilter::Exclude, false) .unwrap(), vec![(pubkeys[3], sol_to_lamports(4.0))] ); // Return all added accounts let results = bank - .get_largest_accounts(10, &pubkeys_hashset, AccountAddressFilter::Include) + .get_largest_accounts(10, &pubkeys_hashset, AccountAddressFilter::Include, false) .unwrap(); assert_eq!(results.len(), sorted_accounts.len()); for pubkey_balance in sorted_accounts.iter() { @@ -9510,7 +9447,7 @@ fn test_get_largest_accounts() { let expected_accounts = sorted_accounts[1..].to_vec(); let results = bank - .get_largest_accounts(10, &exclude4, AccountAddressFilter::Exclude) + .get_largest_accounts(10, &exclude4, AccountAddressFilter::Exclude, false) .unwrap(); // results include 5 Bank builtins assert_eq!(results.len(), 10); @@ -9524,7 +9461,7 @@ fn test_get_largest_accounts() { // Return 3 added accounts let expected_accounts = sorted_accounts[0..4].to_vec(); let results = bank - .get_largest_accounts(4, &pubkeys_hashset, AccountAddressFilter::Include) + .get_largest_accounts(4, &pubkeys_hashset, AccountAddressFilter::Include, false) .unwrap(); assert_eq!(results.len(), expected_accounts.len()); for pubkey_balance in expected_accounts.iter() { @@ -9533,7 +9470,7 @@ fn test_get_largest_accounts() { let expected_accounts = expected_accounts[1..4].to_vec(); let results = bank - .get_largest_accounts(3, &exclude4, AccountAddressFilter::Exclude) + .get_largest_accounts(3, &exclude4, AccountAddressFilter::Exclude, false) .unwrap(); assert_eq!(results.len(), expected_accounts.len()); for pubkey_balance in expected_accounts.iter() { @@ -9546,7 +9483,7 @@ fn test_get_largest_accounts() { .cloned() .collect(); assert_eq!( - bank.get_largest_accounts(2, &exclude, AccountAddressFilter::Exclude) + bank.get_largest_accounts(2, &exclude, AccountAddressFilter::Exclude, false) .unwrap(), vec![pubkeys_balances[3], pubkeys_balances[1]] ); @@ -12938,33 +12875,23 @@ fn test_failed_simulation_compute_units() { #[test] fn test_filter_program_errors_and_collect_fee_details() { - // TX | EXECUTION RESULT | is nonce | COLLECT | ADDITIONAL | COLLECT - // | | | (TX_FEE, PRIO_FEE) | WITHDRAW FROM PAYER | RESULT - // ------------------------------------------------------------------------------------------------------ - // tx1 | not executed | n/a | (0 , 0) | 0 | Original Err - // tx2 | executed and no error | n/a | (5_000, 1_000) | 0 | Ok - // tx3 | executed has error | true | (5_000, 1_000) | 0 | Ok - // tx4 | executed has error | false | (5_000, 1_000) | 6_000 | Ok - // tx5 | executed error, - // payer insufficient fund | false | (0 , 0) | 0 | InsufficientFundsForFee + // TX | EXECUTION RESULT | COLLECT | COLLECT + // | | (TX_FEE, PRIO_FEE) | RESULT + // --------------------------------------------------------------------------------- + // tx1 | not executed | (0 , 0) | Original Err + // tx2 | executed and no error | (5_000, 1_000) | Ok + // tx3 | executed has error | (5_000, 1_000) | Ok // let initial_payer_balance = 7_000; - let additional_payer_withdraw = 6_000; let tx_fee = 5000; let priority_fee = 1000; let tx_fee_details = FeeDetails::new_for_tests(tx_fee, priority_fee, false); let expected_collected_fee_details = CollectorFeeDetails { - transaction_fee: 3 * tx_fee, - priority_fee: 3 * priority_fee, + transaction_fee: 2 * tx_fee, + priority_fee: 2 * priority_fee, }; - let expected_collect_results = vec![ - Err(TransactionError::AccountNotFound), - Ok(()), - Ok(()), - Ok(()), - Err(TransactionError::InsufficientFundsForFee), - ]; + let expected_collect_results = vec![Err(TransactionError::AccountNotFound), Ok(()), Ok(())]; let GenesisConfigInfo { genesis_config, @@ -12973,106 +12900,31 @@ fn test_filter_program_errors_and_collect_fee_details() { } = create_genesis_config_with_leader(initial_payer_balance, &Pubkey::new_unique(), 3); let bank = Bank::new_for_tests(&genesis_config); - let tx = SanitizedTransaction::from_transaction_for_tests(Transaction::new_signed_with_payer( - &[system_instruction::transfer( - &mint_keypair.pubkey(), - &Pubkey::new_unique(), - 2, - )], - Some(&mint_keypair.pubkey()), - &[&mint_keypair], - genesis_config.hash(), - )); - let txs = vec![tx.clone(), tx.clone(), tx.clone(), tx.clone(), tx]; - let results = vec![ TransactionExecutionResult::NotExecuted(TransactionError::AccountNotFound), - new_execution_result(Ok(()), None, tx_fee_details), + new_execution_result(Ok(()), tx_fee_details), new_execution_result( Err(TransactionError::InstructionError( 0, SystemError::ResultWithNegativeLamports.into(), )), - Some(&NonceFull::new( - Pubkey::new_unique(), - AccountSharedData::default(), - None, - )), tx_fee_details, ), - new_execution_result( - Err(TransactionError::InstructionError( - 0, - SystemError::ResultWithNegativeLamports.into(), - )), - None, - tx_fee_details, - ), - new_execution_result(Err(TransactionError::AccountNotFound), None, tx_fee_details), ]; - let results = bank.filter_program_errors_and_collect_fee_details(&txs, &results); + let results = bank.filter_program_errors_and_collect_fee_details(&results); assert_eq!( expected_collected_fee_details, *bank.collector_fee_details.read().unwrap() ); assert_eq!( - initial_payer_balance - additional_payer_withdraw, + initial_payer_balance, bank.get_balance(&mint_keypair.pubkey()) ); assert_eq!(expected_collect_results, results); } -#[test] -fn test_check_execution_status_and_charge_fee() { - let fee = 5000; - let initial_balance = fee - 1000; - let tx_error = - TransactionError::InstructionError(0, InstructionError::MissingRequiredSignature); - let GenesisConfigInfo { - mut genesis_config, - mint_keypair, - .. - } = create_genesis_config_with_leader(initial_balance, &Pubkey::new_unique(), 3); - genesis_config.fee_rate_governor = FeeRateGovernor::new(5000, 0); - let bank = Bank::new_for_tests(&genesis_config); - let message = new_sanitized_message(Message::new( - &[system_instruction::transfer( - &mint_keypair.pubkey(), - &Pubkey::new_unique(), - 1, - )], - Some(&mint_keypair.pubkey()), - )); - - [Ok(()), Err(tx_error)] - .iter() - .flat_map(|result| [true, false].iter().map(move |is_nonce| (result, is_nonce))) - .for_each(|(result, is_nonce)| { - if result.is_err() && !is_nonce { - assert_eq!( - Err(TransactionError::InsufficientFundsForFee), - bank.check_execution_status_and_charge_fee(&message, result, *is_nonce, fee) - ); - assert_eq!(initial_balance, bank.get_balance(&mint_keypair.pubkey())); - - let small_fee = 1; - assert!(bank - .check_execution_status_and_charge_fee(&message, result, *is_nonce, small_fee) - .is_ok()); - assert_eq!( - initial_balance - small_fee, - bank.get_balance(&mint_keypair.pubkey()) - ); - } else { - assert!(bank - .check_execution_status_and_charge_fee(&message, result, *is_nonce, fee) - .is_ok()); - assert_eq!(initial_balance, bank.get_balance(&mint_keypair.pubkey())); - } - }); -} #[test] fn test_deploy_last_epoch_slot() { solana_logger::setup(); @@ -13186,91 +13038,6 @@ fn test_deploy_last_epoch_slot() { assert_eq!(result_with_feature_enabled, Ok(())); } -#[test] -fn test_program_modification_slot_account_not_found() { - let genesis_config = GenesisConfig::default(); - let bank = Bank::new_for_tests(&genesis_config); - let key = Pubkey::new_unique(); - - let result = bank.program_modification_slot(&key); - assert_eq!(result.err(), Some(TransactionError::ProgramAccountNotFound)); - - let mut account_data = AccountSharedData::new(100, 100, &bpf_loader_upgradeable::id()); - bank.store_account(&key, &account_data); - - let result = bank.program_modification_slot(&key); - assert_eq!(result.err(), Some(TransactionError::ProgramAccountNotFound)); - - let state = UpgradeableLoaderState::Program { - programdata_address: Pubkey::new_unique(), - }; - account_data.set_data(bincode::serialize(&state).unwrap()); - bank.store_account(&key, &account_data); - - let result = bank.program_modification_slot(&key); - assert_eq!(result.err(), Some(TransactionError::ProgramAccountNotFound)); - - account_data.set_owner(loader_v4::id()); - bank.store_account(&key, &account_data); - - let result = bank.program_modification_slot(&key); - assert_eq!(result.err(), Some(TransactionError::ProgramAccountNotFound)); -} - -#[test] -fn test_program_modification_slot_success() { - let genesis_config = GenesisConfig::default(); - let bank = Bank::new_for_tests(&genesis_config); - - let key1 = Pubkey::new_unique(); - let key2 = Pubkey::new_unique(); - - let account_data = AccountSharedData::new_data( - 100, - &UpgradeableLoaderState::Program { - programdata_address: key2, - }, - &bpf_loader_upgradeable::id(), - ) - .unwrap(); - bank.store_account(&key1, &account_data); - - let account_data = AccountSharedData::new_data( - 100, - &UpgradeableLoaderState::ProgramData { - slot: 77, - upgrade_authority_address: None, - }, - &bpf_loader_upgradeable::id(), - ) - .unwrap(); - bank.store_account(&key2, &account_data); - - let result = bank.program_modification_slot(&key1); - assert_eq!(result.unwrap(), 77); - - let state = LoaderV4State { - slot: 58, - authority_address: Pubkey::new_unique(), - status: LoaderV4Status::Deployed, - }; - let encoded = unsafe { - std::mem::transmute::<&LoaderV4State, &[u8; LoaderV4State::program_data_offset()]>(&state) - }; - let mut account_data = AccountSharedData::new(100, encoded.len(), &loader_v4::id()); - account_data.set_data(encoded.to_vec()); - bank.store_account(&key1, &account_data); - - let result = bank.program_modification_slot(&key1); - assert_eq!(result.unwrap(), 58); - - account_data.set_owner(Pubkey::new_unique()); - bank.store_account(&key2, &account_data); - - let result = bank.program_modification_slot(&key2); - assert_eq!(result.unwrap(), 0); -} - #[test] fn test_blockhash_last_valid_block_height() { let genesis_config = GenesisConfig::default(); diff --git a/runtime/src/bank_client.rs b/runtime/src/bank_client.rs index 634cb0e28b09ab..e8c8f7774ec66e 100644 --- a/runtime/src/bank_client.rs +++ b/runtime/src/bank_client.rs @@ -6,7 +6,6 @@ use { client::{AsyncClient, Client, SyncClient}, commitment_config::CommitmentConfig, epoch_info::EpochInfo, - fee_calculator::{FeeCalculator, FeeRateGovernor}, hash::Hash, instruction::Instruction, message::{Message, SanitizedMessage}, @@ -120,42 +119,6 @@ impl SyncClient for BankClient { Ok(self.bank.get_minimum_balance_for_rent_exemption(data_len)) } - fn get_recent_blockhash(&self) -> Result<(Hash, FeeCalculator)> { - Ok(( - self.bank.last_blockhash(), - FeeCalculator::new(self.bank.get_lamports_per_signature()), - )) - } - - fn get_recent_blockhash_with_commitment( - &self, - _commitment_config: CommitmentConfig, - ) -> Result<(Hash, FeeCalculator, u64)> { - let blockhash = self.bank.last_blockhash(); - #[allow(deprecated)] - let last_valid_slot = self - .bank - .get_blockhash_last_valid_slot(&blockhash) - .expect("bank blockhash queue should contain blockhash"); - Ok(( - blockhash, - FeeCalculator::new(self.bank.get_lamports_per_signature()), - last_valid_slot, - )) - } - - fn get_fee_calculator_for_blockhash(&self, blockhash: &Hash) -> Result> { - Ok(self - .bank - .get_lamports_per_signature_for_blockhash(blockhash) - .map(FeeCalculator::new)) - } - - fn get_fee_rate_governor(&self) -> Result { - #[allow(deprecated)] - Ok(self.bank.get_fee_rate_governor().clone()) - } - fn get_signature_status( &self, signature: &Signature, @@ -241,21 +204,6 @@ impl SyncClient for BankClient { Ok(()) } - fn get_new_blockhash(&self, blockhash: &Hash) -> Result<(Hash, FeeCalculator)> { - let recent_blockhash = self.get_latest_blockhash()?; - if recent_blockhash != *blockhash { - Ok(( - recent_blockhash, - FeeCalculator::new(self.bank.get_lamports_per_signature()), - )) - } else { - Err(TransportError::IoError(io::Error::new( - io::ErrorKind::Other, - "Unable to get new blockhash", - ))) - } - } - fn get_epoch_info(&self) -> Result { Ok(self.bank.get_epoch_info()) } diff --git a/runtime/src/bank_forks.rs b/runtime/src/bank_forks.rs index faa4413ede4d09..426908b288db6c 100644 --- a/runtime/src/bank_forks.rs +++ b/runtime/src/bank_forks.rs @@ -227,7 +227,7 @@ impl BankForks { pub fn insert(&mut self, mut bank: Bank) -> BankWithScheduler { if self.root.load(Ordering::Relaxed) < self.highest_slot_at_startup { - bank.check_program_modification_slot(); + bank.set_check_program_modification_slot(true); } let bank = Arc::new(bank); diff --git a/runtime/src/commitment.rs b/runtime/src/commitment.rs index 3f600a9401ae9b..632e4bda3d8505 100644 --- a/runtime/src/commitment.rs +++ b/runtime/src/commitment.rs @@ -111,16 +111,11 @@ impl BlockCommitmentCache { self.highest_confirmed_slot() } - #[allow(deprecated)] pub fn slot_with_commitment(&self, commitment_level: CommitmentLevel) -> Slot { match commitment_level { - CommitmentLevel::Recent | CommitmentLevel::Processed => self.slot(), - CommitmentLevel::Root => self.root(), - CommitmentLevel::Single => self.highest_confirmed_slot(), - CommitmentLevel::SingleGossip | CommitmentLevel::Confirmed => { - self.highest_gossip_confirmed_slot() - } - CommitmentLevel::Max | CommitmentLevel::Finalized => self.highest_super_majority_root(), + CommitmentLevel::Processed => self.slot(), + CommitmentLevel::Confirmed => self.highest_gossip_confirmed_slot(), + CommitmentLevel::Finalized => self.highest_super_majority_root(), } } diff --git a/runtime/src/genesis_utils.rs b/runtime/src/genesis_utils.rs index 379750b1743381..f257c19033847b 100644 --- a/runtime/src/genesis_utils.rs +++ b/runtime/src/genesis_utils.rs @@ -29,15 +29,13 @@ pub fn bootstrap_validator_stake_lamports() -> u64 { pub const fn genesis_sysvar_and_builtin_program_lamports() -> u64 { const NUM_BUILTIN_PROGRAMS: u64 = 9; const NUM_PRECOMPILES: u64 = 2; - const FEES_SYSVAR_MIN_BALANCE: u64 = 946_560; const STAKE_HISTORY_MIN_BALANCE: u64 = 114_979_200; const CLOCK_SYSVAR_MIN_BALANCE: u64 = 1_169_280; const RENT_SYSVAR_MIN_BALANCE: u64 = 1_009_200; const EPOCH_SCHEDULE_SYSVAR_MIN_BALANCE: u64 = 1_120_560; const RECENT_BLOCKHASHES_SYSVAR_MIN_BALANCE: u64 = 42_706_560; - FEES_SYSVAR_MIN_BALANCE - + STAKE_HISTORY_MIN_BALANCE + STAKE_HISTORY_MIN_BALANCE + CLOCK_SYSVAR_MIN_BALANCE + RENT_SYSVAR_MIN_BALANCE + EPOCH_SCHEDULE_SYSVAR_MIN_BALANCE diff --git a/runtime/src/installed_scheduler_pool.rs b/runtime/src/installed_scheduler_pool.rs index d7326da99ef43b..aeededab8ee784 100644 --- a/runtime/src/installed_scheduler_pool.rs +++ b/runtime/src/installed_scheduler_pool.rs @@ -25,8 +25,8 @@ use { log::*, solana_program_runtime::timings::ExecuteTimings, solana_sdk::{ + clock::Slot, hash::Hash, - slot_history::Slot, transaction::{Result, SanitizedTransaction, TransactionError}, }, std::{ @@ -34,6 +34,7 @@ use { mem, ops::Deref, sync::{Arc, RwLock}, + thread, }, }; #[cfg(feature = "dev-context-only-utils")] @@ -292,6 +293,8 @@ impl WaitReason { pub enum SchedulerStatus { /// Unified scheduler is disabled or installed scheduler is consumed by wait_for_termination(). /// Note that transition to Unavailable from {Active, Stale} is one-way (i.e. one-time). + /// Also, this variant is transiently used as a placeholder internally when transitioning + /// scheduler statuses, which isn't observable unless panic is happening. Unavailable, /// Scheduler is installed into a bank; could be running or just be idling. /// This will be transitioned to Stale after certain time has passed if its bank hasn't been @@ -329,7 +332,7 @@ impl SchedulerStatus { return; } let Self::Active(scheduler) = mem::replace(self, Self::Unavailable) else { - unreachable!("not active: {:?}", self); + unreachable!("not active: {self:?}"); }; let (pool, result_with_timings) = f(scheduler); *self = Self::Stale(pool, result_with_timings); @@ -491,7 +494,8 @@ impl BankWithScheduler { ); assert!( maybe_result_with_timings.is_none(), - "Premature result was returned from scheduler after paused" + "Premature result was returned from scheduler after paused (slot: {})", + bank.slot(), ); } @@ -548,7 +552,8 @@ impl BankWithSchedulerInner { let scheduler = self.scheduler.read().unwrap(); // Re-register a new timeout listener only after acquiring the read lock; // Otherwise, the listener would again put scheduler into Stale before the read - // lock under an extremely-rare race condition, causing panic below. + // lock under an extremely-rare race condition, causing panic below in + // active_scheduler(). pool.register_timeout_listener(self.do_create_timeout_listener()); f(scheduler.active_scheduler()) } @@ -619,7 +624,7 @@ impl BankWithSchedulerInner { "wait_for_scheduler_termination(slot: {}, reason: {:?}): started at {:?}...", bank.slot(), reason, - std::thread::current(), + thread::current(), ); let mut scheduler = scheduler.write().unwrap(); @@ -635,6 +640,11 @@ impl BankWithSchedulerInner { uninstalled_scheduler.return_to_pool(); (false, Some(result_with_timings)) } + SchedulerStatus::Stale(_pool, _result_with_timings) if reason.is_paused() => { + // Do nothing for pauses because the scheduler termination is guaranteed to be + // called later. + (true, None) + } SchedulerStatus::Stale(_pool, _result_with_timings) => { let result_with_timings = scheduler.transition_from_stale_to_unavailable(); (true, Some(result_with_timings)) @@ -647,7 +657,7 @@ impl BankWithSchedulerInner { reason, was_noop, result_with_timings.as_ref().map(|(result, _)| result), - std::thread::current(), + thread::current(), ); trace!( "wait_for_scheduler_termination(result_with_timings: {:?})", @@ -658,7 +668,7 @@ impl BankWithSchedulerInner { } fn drop_scheduler(&self) { - if std::thread::panicking() { + if thread::panicking() { error!( "BankWithSchedulerInner::drop_scheduler(): slot: {} skipping due to already panicking...", self.bank.slot(), diff --git a/runtime/src/loader_utils.rs b/runtime/src/loader_utils.rs index 7265641e900bc1..436a7242d93fd7 100644 --- a/runtime/src/loader_utils.rs +++ b/runtime/src/loader_utils.rs @@ -28,9 +28,18 @@ const CHUNK_SIZE: usize = 512; // Size of chunk just needs to fit into tx pub fn load_program_from_file(name: &str) -> Vec { let mut pathbuf = { let current_exe = env::current_exe().unwrap(); - PathBuf::from(current_exe.parent().unwrap().parent().unwrap()) + PathBuf::from( + current_exe + .parent() + .unwrap() + .parent() + .unwrap() + .parent() + .unwrap(), + ) }; - pathbuf.push("sbf/"); + pathbuf.push("sbf-solana-solana"); + pathbuf.push("release"); pathbuf.push(name); pathbuf.set_extension("so"); let mut file = File::open(&pathbuf).unwrap_or_else(|err| { diff --git a/sdk/Cargo.toml b/sdk/Cargo.toml index 0bb1317dc7f965..1f553f66a52272 100644 --- a/sdk/Cargo.toml +++ b/sdk/Cargo.toml @@ -49,7 +49,8 @@ bincode = { workspace = true } bitflags = { workspace = true, features = ["serde"] } borsh = { workspace = true, optional = true } bs58 = { workspace = true } -bytemuck = { workspace = true, features = ["derive"] } +bytemuck = { workspace = true } +bytemuck_derive = { workspace = true } byteorder = { workspace = true, optional = true } chrono = { workspace = true, features = ["alloc"], optional = true } curve25519-dalek = { workspace = true, optional = true } @@ -85,11 +86,11 @@ solana-program = { workspace = true } solana-sdk-macro = { workspace = true } thiserror = { workspace = true } uriparse = { workspace = true } -wasm-bindgen = { workspace = true } [target.'cfg(target_arch = "wasm32")'.dependencies] getrandom = { version = "0.1", features = ["wasm-bindgen"] } js-sys = { workspace = true } +wasm-bindgen = { workspace = true } [dev-dependencies] anyhow = { workspace = true } @@ -97,6 +98,7 @@ assert_matches = { workspace = true } curve25519-dalek = { workspace = true } hex = { workspace = true } solana-logger = { workspace = true } +solana-program = { workspace = true, features = ["dev-context-only-utils"] } solana-sdk = { path = ".", features = ["dev-context-only-utils"] } static_assertions = { workspace = true } tiny-bip39 = { workspace = true } diff --git a/sdk/cargo-build-sbf/tests/crates/fail/Cargo.toml b/sdk/cargo-build-sbf/tests/crates/fail/Cargo.toml index bcb610f7e38c47..08d34024383d90 100644 --- a/sdk/cargo-build-sbf/tests/crates/fail/Cargo.toml +++ b/sdk/cargo-build-sbf/tests/crates/fail/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "fail" -version = "2.0.0" +version = "2.0.2" description = "Solana SBF test program written in Rust" authors = ["Anza Maintainers "] repository = "https://github.com/anza-xyz/agave" @@ -10,7 +10,7 @@ edition = "2021" publish = false [dependencies] -solana-program = { path = "../../../../program", version = "=2.0.0" } +solana-program = { path = "../../../../program", version = "=2.0.2" } [lib] crate-type = ["cdylib"] diff --git a/sdk/cargo-build-sbf/tests/crates/noop/Cargo.toml b/sdk/cargo-build-sbf/tests/crates/noop/Cargo.toml index 0dad397a2d7b00..ccc5824fbe7c77 100644 --- a/sdk/cargo-build-sbf/tests/crates/noop/Cargo.toml +++ b/sdk/cargo-build-sbf/tests/crates/noop/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "noop" -version = "2.0.0" +version = "2.0.2" description = "Solana SBF test program written in Rust" authors = ["Anza Maintainers "] repository = "https://github.com/anza-xyz/agave" @@ -10,7 +10,7 @@ edition = "2021" publish = false [dependencies] -solana-program = { path = "../../../../program", version = "=2.0.0" } +solana-program = { path = "../../../../program", version = "=2.0.2" } [lib] crate-type = ["cdylib"] diff --git a/sdk/macro/src/lib.rs b/sdk/macro/src/lib.rs index 73121b568003cd..9808fad0ccd08a 100644 --- a/sdk/macro/src/lib.rs +++ b/sdk/macro/src/lib.rs @@ -378,34 +378,6 @@ pub fn pubkeys(input: TokenStream) -> TokenStream { TokenStream::from(quote! {#pubkeys}) } -// The normal `wasm_bindgen` macro generates a .bss section which causes the resulting -// SBF program to fail to load, so for now this stub should be used when building for SBF -#[proc_macro_attribute] -pub fn wasm_bindgen_stub(_attr: TokenStream, item: TokenStream) -> TokenStream { - match parse_macro_input!(item as syn::Item) { - syn::Item::Struct(mut item_struct) => { - if let syn::Fields::Named(fields) = &mut item_struct.fields { - // Strip out any `#[wasm_bindgen]` added to struct fields. This is custom - // syntax supplied by the normal `wasm_bindgen` macro. - for field in fields.named.iter_mut() { - field.attrs.retain(|attr| { - !attr - .path() - .segments - .iter() - .any(|segment| segment.ident == "wasm_bindgen") - }); - } - } - quote! { #item_struct } - } - item => { - quote!(#item) - } - } - .into() -} - // Sets padding in structures to zero explicitly. // Otherwise padding could be inconsistent across the network and lead to divergence / consensus failures. #[proc_macro_derive(CloneZeroed)] diff --git a/sdk/program/Cargo.toml b/sdk/program/Cargo.toml index 7cc2ed2ecc9573..c37cd182bbb869 100644 --- a/sdk/program/Cargo.toml +++ b/sdk/program/Cargo.toml @@ -18,13 +18,14 @@ borsh = { workspace = true, optional = true } borsh0-10 = { package = "borsh", version = "0.10.3", optional = true } bs58 = { workspace = true } bv = { workspace = true, features = ["serde"] } -bytemuck = { workspace = true, features = ["derive"] } -itertools = { workspace = true } +bytemuck = { workspace = true } +bytemuck_derive = { workspace = true } lazy_static = { workspace = true } log = { workspace = true } memoffset = { workspace = true } num-derive = { workspace = true } num-traits = { workspace = true, features = ["i128"] } +qualifier_attr = { workspace = true, optional = true } rustversion = { workspace = true } serde = { workspace = true } serde_bytes = { workspace = true } @@ -53,11 +54,9 @@ ark-serialize = { workspace = true } base64 = { workspace = true, features = ["alloc", "std"] } bitflags = { workspace = true } curve25519-dalek = { workspace = true } -itertools = { workspace = true } libsecp256k1 = { workspace = true } num-bigint = { workspace = true } rand = { workspace = true } -wasm-bindgen = { workspace = true } [target.'cfg(not(target_os = "solana"))'.dev-dependencies] arbitrary = { workspace = true, features = ["derive"] } @@ -69,6 +68,7 @@ console_error_panic_hook = { workspace = true } console_log = { workspace = true } getrandom = { workspace = true, features = ["js", "wasm-bindgen"] } js-sys = { workspace = true } +wasm-bindgen = { workspace = true } [target.'cfg(not(target_pointer_width = "64"))'.dependencies] parking_lot = { workspace = true } @@ -77,15 +77,13 @@ parking_lot = { workspace = true } anyhow = { workspace = true } array-bytes = { workspace = true } assert_matches = { workspace = true } +itertools = { workspace = true } serde_json = { workspace = true } static_assertions = { workspace = true } [build-dependencies] rustc_version = { workspace = true } -[target.'cfg(any(unix, windows))'.build-dependencies] -cc = { workspace = true, features = ["jobserver", "parallel"] } - [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -95,4 +93,5 @@ crate-type = ["cdylib", "rlib"] [features] default = ["borsh"] borsh = ["dep:borsh", "dep:borsh0-10"] +dev-context-only-utils = ["dep:qualifier_attr"] frozen-abi = ["dep:solana-frozen-abi", "dep:solana-frozen-abi-macro"] diff --git a/sdk/program/src/alt_bn128/mod.rs b/sdk/program/src/alt_bn128/mod.rs index 4919df3ac845ad..c7e1d8e5d28250 100644 --- a/sdk/program/src/alt_bn128/mod.rs +++ b/sdk/program/src/alt_bn128/mod.rs @@ -4,7 +4,7 @@ pub mod prelude { } use { - bytemuck::{Pod, Zeroable}, + bytemuck_derive::{Pod, Zeroable}, consts::*, thiserror::Error, }; diff --git a/sdk/program/src/borsh0_9.rs b/sdk/program/src/borsh0_9.rs deleted file mode 100644 index d7d1e97013f898..00000000000000 --- a/sdk/program/src/borsh0_9.rs +++ /dev/null @@ -1,44 +0,0 @@ -#![allow(clippy::arithmetic_side_effects)] -//! Utilities for the [borsh] serialization format, version 0.9. -//! -//! This file is provided for backwards compatibility with types that still use -//! borsh 0.9, even though this crate canonically uses borsh 0.10. -//! -//! [borsh]: https://borsh.io/ -use { - crate::borsh::{ - impl_get_instance_packed_len, impl_get_packed_len_v0, impl_try_from_slice_unchecked, - }, - borsh0_9::maybestd::io, -}; - -impl_get_packed_len_v0!( - borsh0_9, - #[deprecated( - since = "1.17.0", - note = "Please upgrade to Borsh 1.X and use `borsh1::get_packed_len` instead" - )] -); -impl_try_from_slice_unchecked!( - borsh0_9, - io, - #[deprecated( - since = "1.17.0", - note = "Please upgrade to Borsh 1.X and use `borsh1::try_from_slice_unchecked` instead" - )] -); -impl_get_instance_packed_len!( - borsh0_9, - io, - #[deprecated( - since = "1.17.0", - note = "Please upgrade to Borsh 1.X and use `borsh1::get_instance_packed_len` instead" - )] -); - -#[cfg(test)] -#[allow(deprecated)] -mod tests { - use {crate::borsh::impl_tests, borsh0_9::maybestd::io}; - impl_tests!(borsh0_9, io); -} diff --git a/sdk/program/src/bpf_loader_upgradeable.rs b/sdk/program/src/bpf_loader_upgradeable.rs index d0f95ffe166db5..82e9292fde2429 100644 --- a/sdk/program/src/bpf_loader_upgradeable.rs +++ b/sdk/program/src/bpf_loader_upgradeable.rs @@ -82,42 +82,6 @@ impl UpgradeableLoaderState { pub const fn size_of_programdata(program_len: usize) -> usize { Self::size_of_programdata_metadata().saturating_add(program_len) } - - /// Length of a Buffer account's data. - #[deprecated(since = "1.11.0", note = "Please use `size_of_buffer` instead")] - pub fn buffer_len(program_len: usize) -> Result { - Ok(Self::size_of_buffer(program_len)) - } - - /// Offset into the Buffer account's data of the program bits. - #[deprecated( - since = "1.11.0", - note = "Please use `size_of_buffer_metadata` instead" - )] - pub fn buffer_data_offset() -> Result { - Ok(Self::size_of_buffer_metadata()) - } - - /// Length of a Program account's data. - #[deprecated(since = "1.11.0", note = "Please use `size_of_program` instead")] - pub fn program_len() -> Result { - Ok(Self::size_of_program()) - } - - /// Length of a ProgramData account's data. - #[deprecated(since = "1.11.0", note = "Please use `size_of_programdata` instead")] - pub fn programdata_len(program_len: usize) -> Result { - Ok(Self::size_of_programdata(program_len)) - } - - /// Offset into the ProgramData account's data of the program bits. - #[deprecated( - since = "1.11.0", - note = "Please use `size_of_programdata_metadata` instead" - )] - pub fn programdata_data_offset() -> Result { - Ok(Self::size_of_programdata_metadata()) - } } /// Returns the program data address for a program ID @@ -425,24 +389,6 @@ mod tests { assert_eq!(UpgradeableLoaderState::size_of_program() as u64, size); } - #[test] - #[allow(deprecated)] - fn test_account_lengths() { - assert_eq!( - 4, - serialized_size(&UpgradeableLoaderState::Uninitialized).unwrap() - ); - assert_eq!(36, UpgradeableLoaderState::program_len().unwrap()); - assert_eq!( - 45, - UpgradeableLoaderState::programdata_data_offset().unwrap() - ); - assert_eq!( - 45 + 42, - UpgradeableLoaderState::programdata_len(42).unwrap() - ); - } - fn assert_is_instruction( is_instruction_fn: F, expected_instruction: UpgradeableLoaderInstruction, diff --git a/sdk/program/src/clock.rs b/sdk/program/src/clock.rs index e19c4c84486ced..5cf609d3000c26 100644 --- a/sdk/program/src/clock.rs +++ b/sdk/program/src/clock.rs @@ -33,10 +33,6 @@ static_assertions::const_assert_eq!(MS_PER_TICK, 6); /// The number of milliseconds per tick (6). pub const MS_PER_TICK: u64 = 1000 / DEFAULT_TICKS_PER_SECOND; -#[deprecated(since = "1.15.0", note = "Please use DEFAULT_MS_PER_SLOT instead")] -/// The expected duration of a slot (400 milliseconds). -pub const SLOT_MS: u64 = DEFAULT_MS_PER_SLOT; - // At 160 ticks/s, 64 ticks per slot implies that leader rotation and voting will happen // every 400 ms. A fast voting cadence ensures faster finality and convergence pub const DEFAULT_TICKS_PER_SLOT: u64 = 64; diff --git a/sdk/program/src/example_mocks.rs b/sdk/program/src/example_mocks.rs index ebde291ced11bd..b528812e36f6b3 100644 --- a/sdk/program/src/example_mocks.rs +++ b/sdk/program/src/example_mocks.rs @@ -274,44 +274,5 @@ pub mod solana_sdk { } } - #[deprecated( - since = "1.17.0", - note = "Please use `solana_sdk::address_lookup_table` instead" - )] - pub use crate::address_lookup_table as address_lookup_table_account; -} - -#[deprecated( - since = "1.17.0", - note = "Please use `solana_sdk::address_lookup_table` instead" -)] -pub mod solana_address_lookup_table_program { - pub use crate::address_lookup_table::program::{check_id, id, ID}; - - pub mod state { - use { - crate::{instruction::InstructionError, pubkey::Pubkey}, - std::borrow::Cow, - }; - - pub struct AddressLookupTable<'a> { - pub addresses: Cow<'a, [Pubkey]>, - } - - impl<'a> AddressLookupTable<'a> { - pub fn serialize_for_tests(self) -> Result, InstructionError> { - let mut data = vec![]; - self.addresses.iter().for_each(|address| { - data.extend_from_slice(address.as_ref()); - }); - Ok(data) - } - - pub fn deserialize(data: &'a [u8]) -> Result, InstructionError> { - Ok(Self { - addresses: Cow::Borrowed(bytemuck::try_cast_slice(data).unwrap()), - }) - } - } - } + pub use crate::address_lookup_table; } diff --git a/sdk/program/src/fee_calculator.rs b/sdk/program/src/fee_calculator.rs index 361e00c98b6b47..5d753e4acaed3a 100644 --- a/sdk/program/src/fee_calculator.rs +++ b/sdk/program/src/fee_calculator.rs @@ -1,10 +1,7 @@ //! Calculation of transaction fees. #![allow(clippy::arithmetic_side_effects)] -use { - crate::{clock::DEFAULT_MS_PER_SLOT, ed25519_program, message::Message, secp256k1_program}, - log::*, -}; +use {crate::clock::DEFAULT_MS_PER_SLOT, log::*}; #[repr(C)] #[cfg_attr(feature = "frozen-abi", derive(AbiExample))] @@ -24,29 +21,6 @@ impl FeeCalculator { lamports_per_signature, } } - - #[deprecated( - since = "1.9.0", - note = "Please do not use, will no longer be available in the future" - )] - pub fn calculate_fee(&self, message: &Message) -> u64 { - let mut num_signatures: u64 = 0; - for instruction in &message.instructions { - let program_index = instruction.program_id_index as usize; - // Message may not be sanitized here - if program_index < message.account_keys.len() { - let id = message.account_keys[program_index]; - if (secp256k1_program::check_id(&id) || ed25519_program::check_id(&id)) - && !instruction.data.is_empty() - { - num_signatures += instruction.data[0] as u64; - } - } - } - - self.lamports_per_signature - * (u64::from(message.header.num_required_signatures) + num_signatures) - } } #[cfg_attr(feature = "frozen-abi", derive(AbiExample))] @@ -188,10 +162,7 @@ impl FeeRateGovernor { #[cfg(test)] mod tests { - use { - super::*, - crate::{pubkey::Pubkey, system_instruction}, - }; + use super::*; #[test] fn test_fee_rate_governor_burn() { @@ -205,64 +176,6 @@ mod tests { assert_eq!(fee_rate_governor.burn(2), (0, 2)); } - #[test] - #[allow(deprecated)] - fn test_fee_calculator_calculate_fee() { - // Default: no fee. - let message = Message::default(); - assert_eq!(FeeCalculator::default().calculate_fee(&message), 0); - - // No signature, no fee. - assert_eq!(FeeCalculator::new(1).calculate_fee(&message), 0); - - // One signature, a fee. - let pubkey0 = Pubkey::from([0; 32]); - let pubkey1 = Pubkey::from([1; 32]); - let ix0 = system_instruction::transfer(&pubkey0, &pubkey1, 1); - let message = Message::new(&[ix0], Some(&pubkey0)); - assert_eq!(FeeCalculator::new(2).calculate_fee(&message), 2); - - // Two signatures, double the fee. - let ix0 = system_instruction::transfer(&pubkey0, &pubkey1, 1); - let ix1 = system_instruction::transfer(&pubkey1, &pubkey0, 1); - let message = Message::new(&[ix0, ix1], Some(&pubkey0)); - assert_eq!(FeeCalculator::new(2).calculate_fee(&message), 4); - } - - #[test] - #[allow(deprecated)] - fn test_fee_calculator_calculate_fee_secp256k1() { - use crate::instruction::Instruction; - let pubkey0 = Pubkey::from([0; 32]); - let pubkey1 = Pubkey::from([1; 32]); - let ix0 = system_instruction::transfer(&pubkey0, &pubkey1, 1); - let mut secp_instruction = Instruction { - program_id: crate::secp256k1_program::id(), - accounts: vec![], - data: vec![], - }; - let mut secp_instruction2 = Instruction { - program_id: crate::secp256k1_program::id(), - accounts: vec![], - data: vec![1], - }; - - let message = Message::new( - &[ - ix0.clone(), - secp_instruction.clone(), - secp_instruction2.clone(), - ], - Some(&pubkey0), - ); - assert_eq!(FeeCalculator::new(1).calculate_fee(&message), 2); - - secp_instruction.data = vec![0]; - secp_instruction2.data = vec![10]; - let message = Message::new(&[ix0, secp_instruction, secp_instruction2], Some(&pubkey0)); - assert_eq!(FeeCalculator::new(1).calculate_fee(&message), 11); - } - #[test] fn test_fee_rate_governor_derived_default() { solana_logger::setup(); diff --git a/sdk/program/src/hash.rs b/sdk/program/src/hash.rs index 6652c37001cb1a..eb6b467de6935e 100644 --- a/sdk/program/src/hash.rs +++ b/sdk/program/src/hash.rs @@ -3,11 +3,13 @@ //! [SHA-256]: https://en.wikipedia.org/wiki/SHA-2 //! [`Hash`]: struct@Hash +#[cfg(target_arch = "wasm32")] +use crate::wasm_bindgen; #[cfg(feature = "borsh")] use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; use { - crate::{sanitize::Sanitize, wasm_bindgen}, - bytemuck::{Pod, Zeroable}, + crate::sanitize::Sanitize, + bytemuck_derive::{Pod, Zeroable}, sha2::{Digest, Sha256}, std::{convert::TryFrom, fmt, mem, str::FromStr}, thiserror::Error, @@ -28,7 +30,7 @@ const MAX_BASE58_LEN: usize = 44; /// [blake3]: https://github.com/BLAKE3-team/BLAKE3 /// [`blake3`]: crate::blake3 /// [`Message::hash`]: crate::message::Message::hash -#[wasm_bindgen] +#[cfg_attr(target_arch = "wasm32", wasm_bindgen)] #[cfg_attr(feature = "frozen-abi", derive(AbiExample))] #[cfg_attr( feature = "borsh", diff --git a/sdk/program/src/instruction.rs b/sdk/program/src/instruction.rs index 19be50d9b693b9..9603431d6d2146 100644 --- a/sdk/program/src/instruction.rs +++ b/sdk/program/src/instruction.rs @@ -13,10 +13,12 @@ #![allow(clippy::arithmetic_side_effects)] +#[cfg(target_arch = "wasm32")] +use crate::wasm_bindgen; #[cfg(feature = "borsh")] use borsh::BorshSerialize; use { - crate::{pubkey::Pubkey, sanitize::Sanitize, short_vec, wasm_bindgen}, + crate::{pubkey::Pubkey, sanitize::Sanitize, short_vec}, bincode::serialize, serde::Serialize, thiserror::Error, @@ -325,16 +327,27 @@ pub enum InstructionError { /// Programs may require signatures from some accounts, in which case they /// should be specified as signers during `Instruction` construction. The /// program must still validate during execution that the account is a signer. -#[wasm_bindgen] +#[cfg(not(target_arch = "wasm32"))] #[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)] pub struct Instruction { /// Pubkey of the program that executes this instruction. - #[wasm_bindgen(skip)] pub program_id: Pubkey, /// Metadata describing accounts that should be passed to the program. - #[wasm_bindgen(skip)] pub accounts: Vec, /// Opaque data passed to the program for its own interpretation. + pub data: Vec, +} + +/// wasm-bindgen version of the Instruction struct. +/// This duplication is required until https://github.com/rustwasm/wasm-bindgen/issues/3671 +/// is fixed. This must not diverge from the regular non-wasm Instruction struct. +#[cfg(target_arch = "wasm32")] +#[wasm_bindgen] +pub struct Instruction { + #[wasm_bindgen(skip)] + pub program_id: Pubkey, + #[wasm_bindgen(skip)] + pub accounts: Vec, #[wasm_bindgen(skip)] pub data: Vec, } @@ -504,14 +517,6 @@ impl Instruction { data: data.to_vec(), } } - - #[deprecated( - since = "1.6.0", - note = "Please use another Instruction constructor instead, such as `Instruction::new_with_borsh`" - )] - pub fn new(program_id: Pubkey, data: &T, accounts: Vec) -> Self { - Self::new_with_bincode(program_id, data, accounts) - } } /// Addition that returns [`InstructionError::InsufficientFunds`] on overflow. diff --git a/sdk/program/src/lib.rs b/sdk/program/src/lib.rs index 017ac3a067744d..fc6a89976b37df 100644 --- a/sdk/program/src/lib.rs +++ b/sdk/program/src/lib.rs @@ -537,20 +537,7 @@ pub mod sysvar; pub mod vote; pub mod wasm; -#[deprecated( - since = "1.17.0", - note = "Please use `solana_sdk::address_lookup_table::AddressLookupTableAccount` instead" -)] -pub mod address_lookup_table_account { - pub use crate::address_lookup_table::AddressLookupTableAccount; -} - -#[cfg(target_os = "solana")] -pub use solana_sdk_macro::wasm_bindgen_stub as wasm_bindgen; -/// Re-export of [wasm-bindgen]. -/// -/// [wasm-bindgen]: https://rustwasm.github.io/docs/wasm-bindgen/ -#[cfg(not(target_os = "solana"))] +#[cfg(target_arch = "wasm32")] pub use wasm_bindgen::prelude::wasm_bindgen; /// The [config native program][np]. diff --git a/sdk/program/src/log.rs b/sdk/program/src/log.rs index 4f3463f8dc1201..5febb4dbc5ad64 100644 --- a/sdk/program/src/log.rs +++ b/sdk/program/src/log.rs @@ -35,24 +35,6 @@ use crate::account_info::AccountInfo; -/// Print a message to the log. -#[macro_export] -#[deprecated(since = "1.4.14", note = "Please use `msg` macro instead")] -macro_rules! info { - ($msg:expr) => { - $crate::log::sol_log($msg) - }; - ($arg1:expr, $arg2:expr, $arg3:expr, $arg4:expr, $arg5:expr) => { - $crate::log::sol_log_64( - $arg1 as u64, - $arg2 as u64, - $arg3 as u64, - $arg4 as u64, - $arg5 as u64, - ) - }; -} - /// Print a message to the log. /// /// Supports simple strings as well as Rust [format strings][fs]. When passed a diff --git a/sdk/program/src/message/compiled_keys.rs b/sdk/program/src/message/compiled_keys.rs index 7e9b19a10591e1..a9964c33448be2 100644 --- a/sdk/program/src/message/compiled_keys.rs +++ b/sdk/program/src/message/compiled_keys.rs @@ -1,6 +1,6 @@ #[cfg(not(target_os = "solana"))] use crate::{ - address_lookup_table_account::AddressLookupTableAccount, + address_lookup_table::AddressLookupTableAccount, message::v0::{LoadedAddresses, MessageAddressTableLookup}, }; use { diff --git a/sdk/program/src/message/legacy.rs b/sdk/program/src/message/legacy.rs index b9dc518e028ed0..3296af72c699ba 100644 --- a/sdk/program/src/message/legacy.rs +++ b/sdk/program/src/message/legacy.rs @@ -11,6 +11,8 @@ #![allow(clippy::arithmetic_side_effects)] +#[cfg(target_arch = "wasm32")] +use crate::wasm_bindgen; #[allow(deprecated)] pub use builtins::{BUILTIN_PROGRAMS_KEYS, MAYBE_BUILTIN_KEY_OR_SYSVAR}; use { @@ -21,7 +23,7 @@ use { message::{compiled_keys::CompiledKeys, MessageHeader}, pubkey::Pubkey, sanitize::{Sanitize, SanitizeError}, - short_vec, system_instruction, system_program, sysvar, wasm_bindgen, + short_vec, system_instruction, system_program, sysvar, }, std::{collections::HashSet, convert::TryFrom, str::FromStr}, }; @@ -117,7 +119,7 @@ fn compile_instructions(ixs: &[Instruction], keys: &[Pubkey]) -> Vec Vec, @@ -141,6 +141,33 @@ pub struct Message { /// Programs that will be executed in sequence and committed in one atomic transaction if all /// succeed. + #[serde(with = "short_vec")] + pub instructions: Vec, +} + +/// wasm-bindgen version of the Message struct. +/// This duplication is required until https://github.com/rustwasm/wasm-bindgen/issues/3671 +/// is fixed. This must not diverge from the regular non-wasm Message struct. +#[cfg(target_arch = "wasm32")] +#[wasm_bindgen] +#[cfg_attr( + feature = "frozen-abi", + frozen_abi(digest = "2KnLEqfLcTBQqitE22Pp8JYkaqVVbAkGbCfdeHoyxcAU"), + derive(AbiExample) +)] +#[derive(Serialize, Deserialize, Default, Debug, PartialEq, Eq, Clone)] +#[serde(rename_all = "camelCase")] +pub struct Message { + #[wasm_bindgen(skip)] + pub header: MessageHeader, + + #[wasm_bindgen(skip)] + #[serde(with = "short_vec")] + pub account_keys: Vec, + + /// The id of a recent ledger entry. + pub recent_blockhash: Hash, + #[wasm_bindgen(skip)] #[serde(with = "short_vec")] pub instructions: Vec, @@ -635,29 +662,6 @@ impl Message { i < self.header.num_required_signatures as usize } - #[deprecated] - pub fn get_account_keys_by_lock_type(&self) -> (Vec<&Pubkey>, Vec<&Pubkey>) { - let mut writable_keys = vec![]; - let mut readonly_keys = vec![]; - for (i, key) in self.account_keys.iter().enumerate() { - if self.is_maybe_writable(i, None) { - writable_keys.push(key); - } else { - readonly_keys.push(key); - } - } - (writable_keys, readonly_keys) - } - - #[deprecated] - pub fn deserialize_instruction( - index: usize, - data: &[u8], - ) -> Result { - #[allow(deprecated)] - sysvar::instructions::load_instruction_at(index, data) - } - pub fn signer_keys(&self) -> Vec<&Pubkey> { // Clamp in case we're working on un-`sanitize()`ed input let last_key = self @@ -880,36 +884,6 @@ mod tests { assert!(!message.is_account_maybe_reserved(2, None)); } - #[test] - fn test_get_account_keys_by_lock_type() { - let program_id = Pubkey::default(); - let id0 = Pubkey::new_unique(); - let id1 = Pubkey::new_unique(); - let id2 = Pubkey::new_unique(); - let id3 = Pubkey::new_unique(); - let message = Message::new( - &[ - Instruction::new_with_bincode(program_id, &0, vec![AccountMeta::new(id0, false)]), - Instruction::new_with_bincode(program_id, &0, vec![AccountMeta::new(id1, true)]), - Instruction::new_with_bincode( - program_id, - &0, - vec![AccountMeta::new_readonly(id2, false)], - ), - Instruction::new_with_bincode( - program_id, - &0, - vec![AccountMeta::new_readonly(id3, true)], - ), - ], - Some(&id1), - ); - assert_eq!( - message.get_account_keys_by_lock_type(), - (vec![&id1, &id0], vec![&id3, &program_id, &id2]) - ); - } - #[test] fn test_program_ids() { let key0 = Pubkey::new_unique(); diff --git a/sdk/program/src/message/versions/v0/mod.rs b/sdk/program/src/message/versions/v0/mod.rs index 41e8ec34494c71..a7cb6ce41d3a33 100644 --- a/sdk/program/src/message/versions/v0/mod.rs +++ b/sdk/program/src/message/versions/v0/mod.rs @@ -12,7 +12,7 @@ pub use loaded::*; use { crate::{ - address_lookup_table_account::AddressLookupTableAccount, + address_lookup_table::AddressLookupTableAccount, bpf_loader_upgradeable, hash::Hash, instruction::{CompiledInstruction, Instruction}, @@ -200,7 +200,7 @@ impl Message { /// use solana_rpc_client::rpc_client::RpcClient; /// use solana_program::address_lookup_table::{self, state::{AddressLookupTable, LookupTableMeta}}; /// use solana_sdk::{ - /// address_lookup_table_account::AddressLookupTableAccount, + /// address_lookup_table::AddressLookupTableAccount, /// instruction::{AccountMeta, Instruction}, /// message::{VersionedMessage, v0}, /// pubkey::Pubkey, diff --git a/sdk/program/src/program_stubs.rs b/sdk/program/src/program_stubs.rs index 7fd31358090118..e06f6b1d201a42 100644 --- a/sdk/program/src/program_stubs.rs +++ b/sdk/program/src/program_stubs.rs @@ -8,7 +8,6 @@ use { program_error::UNSUPPORTED_SYSVAR, pubkey::Pubkey, }, base64::{prelude::BASE64_STANDARD, Engine}, - itertools::Itertools, std::sync::{Arc, RwLock}, }; @@ -114,7 +113,11 @@ pub trait SyscallStubs: Sync + Send { fn sol_log_data(&self, fields: &[&[u8]]) { println!( "data: {}", - fields.iter().map(|v| BASE64_STANDARD.encode(v)).join(" ") + fields + .iter() + .map(|v| BASE64_STANDARD.encode(v)) + .collect::>() + .join(" ") ); } fn sol_get_processed_sibling_instruction(&self, _index: usize) -> Option { diff --git a/sdk/program/src/pubkey.rs b/sdk/program/src/pubkey.rs index 508ec483bbe647..3ade0b1a4b2f69 100644 --- a/sdk/program/src/pubkey.rs +++ b/sdk/program/src/pubkey.rs @@ -2,13 +2,15 @@ #![allow(clippy::arithmetic_side_effects)] +#[cfg(target_arch = "wasm32")] +use crate::wasm_bindgen; #[cfg(test)] use arbitrary::Arbitrary; #[cfg(feature = "borsh")] use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; use { - crate::{decode_error::DecodeError, hash::hashv, wasm_bindgen}, - bytemuck::{Pod, Zeroable}, + crate::{decode_error::DecodeError, hash::hashv}, + bytemuck_derive::{Pod, Zeroable}, num_derive::{FromPrimitive, ToPrimitive}, std::{ convert::{Infallible, TryFrom}, @@ -68,7 +70,7 @@ impl From for PubkeyError { /// [ed25519]: https://ed25519.cr.yp.to/ /// [pdas]: https://solana.com/docs/core/cpi#program-derived-addresses /// [`Keypair`]: https://docs.rs/solana-sdk/latest/solana_sdk/signer/keypair/struct.Keypair.html -#[wasm_bindgen] +#[cfg_attr(target_arch = "wasm32", wasm_bindgen)] #[repr(transparent)] #[cfg_attr(feature = "frozen-abi", derive(AbiExample))] #[cfg_attr( @@ -178,25 +180,10 @@ pub fn bytes_are_curve_point>(_bytes: T) -> bool { } impl Pubkey { - #[deprecated( - since = "1.14.14", - note = "Please use 'Pubkey::from' or 'Pubkey::try_from' instead" - )] - pub fn new(pubkey_vec: &[u8]) -> Self { - Self::try_from(pubkey_vec).expect("Slice must be the same length as a Pubkey") - } - pub const fn new_from_array(pubkey_array: [u8; 32]) -> Self { Self(pubkey_array) } - #[deprecated(since = "1.3.9", note = "Please use 'Pubkey::new_unique' instead")] - #[cfg(not(target_os = "solana"))] - pub fn new_rand() -> Self { - // Consider removing Pubkey::new_rand() entirely in the v1.5 or v1.6 timeframe - Pubkey::from(rand::random::<[u8; 32]>()) - } - /// unique Pubkey for tests and benchmarks. pub fn new_unique() -> Self { use crate::atomic_u64::AtomicU64; diff --git a/sdk/program/src/sysvar/instructions.rs b/sdk/program/src/sysvar/instructions.rs index 249b11b5f9452f..cf74f3552e59de 100644 --- a/sdk/program/src/sysvar/instructions.rs +++ b/sdk/program/src/sysvar/instructions.rs @@ -37,6 +37,8 @@ use crate::{ sanitize::SanitizeError, serialize_utils::{read_pubkey, read_slice, read_u16, read_u8}, }; +#[cfg(feature = "dev-context-only-utils")] +use qualifier_attr::qualifiers; #[cfg(not(target_os = "solana"))] use { crate::serialize_utils::{append_slice, append_u16, append_u8}, @@ -147,11 +149,10 @@ fn serialize_instructions(instructions: &[BorrowedInstruction]) -> Vec { /// `Transaction`. /// /// `data` is the instructions sysvar account data. -#[deprecated( - since = "1.8.0", - note = "Unsafe because the sysvar accounts address is not checked, please use `load_current_index_checked` instead" -)] -pub fn load_current_index(data: &[u8]) -> u16 { +/// +/// Unsafe because the sysvar accounts address is not checked; only used +/// internally after such a check. +fn load_current_index(data: &[u8]) -> u16 { let mut instr_fixed_data = [0u8; 2]; let len = data.len(); instr_fixed_data.copy_from_slice(&data[len - 2..len]); @@ -172,10 +173,8 @@ pub fn load_current_index_checked( } let instruction_sysvar = instruction_sysvar_account_info.try_borrow_data()?; - let mut instr_fixed_data = [0u8; 2]; - let len = instruction_sysvar.len(); - instr_fixed_data.copy_from_slice(&instruction_sysvar[len - 2..len]); - Ok(u16::from_le_bytes(instr_fixed_data)) + let index = load_current_index(&instruction_sysvar); + Ok(index) } /// Store the current `Instruction`'s index in the instructions sysvar data. @@ -232,11 +231,11 @@ fn deserialize_instruction(index: usize, data: &[u8]) -> Result Result { +/// +/// Unsafe because the sysvar accounts address is not checked; only used +/// internally after such a check. +#[cfg_attr(feature = "dev-context-only-utils", qualifiers(pub))] +fn load_instruction_at(index: usize, data: &[u8]) -> Result { deserialize_instruction(index, data) } @@ -255,7 +254,7 @@ pub fn load_instruction_at_checked( } let instruction_sysvar = instruction_sysvar_account_info.try_borrow_data()?; - deserialize_instruction(index, &instruction_sysvar).map_err(|err| match err { + load_instruction_at(index, &instruction_sysvar).map_err(|err| match err { SanitizeError::IndexOutOfBounds => ProgramError::InvalidArgument, _ => ProgramError::InvalidInstructionData, }) @@ -276,13 +275,11 @@ pub fn get_instruction_relative( } let instruction_sysvar = instruction_sysvar_account_info.data.borrow(); - #[allow(deprecated)] let current_index = load_current_index(&instruction_sysvar) as i64; let index = current_index.saturating_add(index_relative_to_current); if index < 0 { return Err(ProgramError::InvalidArgument); } - #[allow(deprecated)] load_instruction_at( current_index.saturating_add(index_relative_to_current) as usize, &instruction_sysvar, diff --git a/sdk/program/src/sysvar/slot_hashes.rs b/sdk/program/src/sysvar/slot_hashes.rs index 9d66fab235b585..4a1904039d7d8e 100644 --- a/sdk/program/src/sysvar/slot_hashes.rs +++ b/sdk/program/src/sysvar/slot_hashes.rs @@ -55,7 +55,7 @@ use { slot_hashes::MAX_ENTRIES, sysvar::{get_sysvar, Sysvar, SysvarId}, }, - bytemuck::{Pod, Zeroable}, + bytemuck_derive::{Pod, Zeroable}, }; crate::declare_sysvar_id!("SysvarS1otHashes111111111111111111111111111", SlotHashes); diff --git a/sdk/program/src/vote/instruction.rs b/sdk/program/src/vote/instruction.rs index 2c4cb4157f5721..b5d43b5c24c602 100644 --- a/sdk/program/src/vote/instruction.rs +++ b/sdk/program/src/vote/instruction.rs @@ -257,49 +257,6 @@ impl<'a> Default for CreateVoteAccountConfig<'a> { } } -#[deprecated( - since = "1.16.0", - note = "Please use `create_account_with_config()` instead." -)] -pub fn create_account( - from_pubkey: &Pubkey, - vote_pubkey: &Pubkey, - vote_init: &VoteInit, - lamports: u64, -) -> Vec { - create_account_with_config( - from_pubkey, - vote_pubkey, - vote_init, - lamports, - CreateVoteAccountConfig::default(), - ) -} - -#[deprecated( - since = "1.16.0", - note = "Please use `create_account_with_config()` instead." -)] -pub fn create_account_with_seed( - from_pubkey: &Pubkey, - vote_pubkey: &Pubkey, - base: &Pubkey, - seed: &str, - vote_init: &VoteInit, - lamports: u64, -) -> Vec { - create_account_with_config( - from_pubkey, - vote_pubkey, - vote_init, - lamports, - CreateVoteAccountConfig { - with_seed: Some((base, seed)), - ..CreateVoteAccountConfig::default() - }, - ) -} - pub fn create_account_with_config( from_pubkey: &Pubkey, vote_pubkey: &Pubkey, diff --git a/sdk/src/account.rs b/sdk/src/account.rs index c0f3d783c14038..0541e1f8d9ce44 100644 --- a/sdk/src/account.rs +++ b/sdk/src/account.rs @@ -675,15 +675,6 @@ impl AccountSharedData { pub type InheritableAccountFields = (u64, Epoch); pub const DUMMY_INHERITABLE_ACCOUNT_FIELDS: InheritableAccountFields = (1, INITIAL_RENT_EPOCH); -/// Create an `Account` from a `Sysvar`. -#[deprecated( - since = "1.5.17", - note = "Please use `create_account_for_test` instead" -)] -pub fn create_account(sysvar: &S, lamports: u64) -> Account { - create_account_with_fields(sysvar, (lamports, INITIAL_RENT_EPOCH)) -} - pub fn create_account_with_fields( sysvar: &S, (lamports, rent_epoch): InheritableAccountFields, @@ -700,17 +691,6 @@ pub fn create_account_for_test(sysvar: &S) -> Account { } /// Create an `Account` from a `Sysvar`. -#[deprecated( - since = "1.5.17", - note = "Please use `create_account_shared_data_for_test` instead" -)] -pub fn create_account_shared_data(sysvar: &S, lamports: u64) -> AccountSharedData { - AccountSharedData::from(create_account_with_fields( - sysvar, - (lamports, INITIAL_RENT_EPOCH), - )) -} - pub fn create_account_shared_data_with_fields( sysvar: &S, fields: InheritableAccountFields, diff --git a/sdk/src/client.rs b/sdk/src/client.rs index f9e435c81d0b66..185b9aeeb0d40b 100644 --- a/sdk/src/client.rs +++ b/sdk/src/client.rs @@ -14,7 +14,6 @@ use crate::{ clock::Slot, commitment_config::CommitmentConfig, epoch_info::EpochInfo, - fee_calculator::{FeeCalculator, FeeRateGovernor}, hash::Hash, instruction::Instruction, message::Message, @@ -82,35 +81,6 @@ pub trait SyncClient { fn get_minimum_balance_for_rent_exemption(&self, data_len: usize) -> Result; - /// Get recent blockhash - #[deprecated(since = "1.9.0", note = "Please use `get_latest_blockhash` instead")] - fn get_recent_blockhash(&self) -> Result<(Hash, FeeCalculator)>; - - /// Get recent blockhash. Uses explicit commitment configuration. - #[deprecated( - since = "1.9.0", - note = "Please use `get_latest_blockhash_with_commitment` and `get_latest_blockhash_with_commitment` instead" - )] - fn get_recent_blockhash_with_commitment( - &self, - commitment_config: CommitmentConfig, - ) -> Result<(Hash, FeeCalculator, Slot)>; - - /// Get `Some(FeeCalculator)` associated with `blockhash` if it is still in - /// the BlockhashQueue`, otherwise `None` - #[deprecated( - since = "1.9.0", - note = "Please use `get_fee_for_message` or `is_blockhash_valid` instead" - )] - fn get_fee_calculator_for_blockhash(&self, blockhash: &Hash) -> Result>; - - /// Get recent fee rate governor - #[deprecated( - since = "1.9.0", - note = "Please do not use, will no longer be available in the future" - )] - fn get_fee_rate_governor(&self) -> Result; - /// Get signature status. fn get_signature_status( &self, @@ -151,12 +121,6 @@ pub trait SyncClient { /// Poll to confirm a transaction. fn poll_for_signature(&self, signature: &Signature) -> Result<()>; - #[deprecated( - since = "1.9.0", - note = "Please do not use, will no longer be available in the future" - )] - fn get_new_blockhash(&self, blockhash: &Hash) -> Result<(Hash, FeeCalculator)>; - /// Get last known blockhash fn get_latest_blockhash(&self) -> Result; diff --git a/sdk/src/commitment_config.rs b/sdk/src/commitment_config.rs index f0068659f4d7b4..7aca56b8947dbd 100644 --- a/sdk/src/commitment_config.rs +++ b/sdk/src/commitment_config.rs @@ -1,6 +1,5 @@ //! Definitions of commitment levels. -#![allow(deprecated)] #![cfg(feature = "full")] use {std::str::FromStr, thiserror::Error}; @@ -12,56 +11,6 @@ pub struct CommitmentConfig { } impl CommitmentConfig { - #[deprecated( - since = "1.5.5", - note = "Please use CommitmentConfig::processed() instead" - )] - pub fn recent() -> Self { - Self { - commitment: CommitmentLevel::Recent, - } - } - - #[deprecated( - since = "1.5.5", - note = "Please use CommitmentConfig::finalized() instead" - )] - pub fn max() -> Self { - Self { - commitment: CommitmentLevel::Max, - } - } - - #[deprecated( - since = "1.5.5", - note = "Please use CommitmentConfig::finalized() instead" - )] - pub fn root() -> Self { - Self { - commitment: CommitmentLevel::Root, - } - } - - #[deprecated( - since = "1.5.5", - note = "Please use CommitmentConfig::confirmed() instead" - )] - pub fn single() -> Self { - Self { - commitment: CommitmentLevel::Single, - } - } - - #[deprecated( - since = "1.5.5", - note = "Please use CommitmentConfig::confirmed() instead" - )] - pub fn single_gossip() -> Self { - Self { - commitment: CommitmentLevel::SingleGossip, - } - } - pub const fn finalized() -> Self { Self { commitment: CommitmentLevel::Finalized, @@ -89,37 +38,27 @@ impl CommitmentConfig { } pub fn is_finalized(&self) -> bool { - matches!( - &self.commitment, - CommitmentLevel::Finalized | CommitmentLevel::Max | CommitmentLevel::Root - ) + self.commitment == CommitmentLevel::Finalized } pub fn is_confirmed(&self) -> bool { - matches!( - &self.commitment, - CommitmentLevel::Confirmed | CommitmentLevel::SingleGossip | CommitmentLevel::Single - ) + self.commitment == CommitmentLevel::Confirmed } pub fn is_processed(&self) -> bool { - matches!( - &self.commitment, - CommitmentLevel::Processed | CommitmentLevel::Recent - ) + self.commitment == CommitmentLevel::Processed } pub fn is_at_least_confirmed(&self) -> bool { self.is_confirmed() || self.is_finalized() } + #[deprecated( + since = "2.0.2", + note = "Returns self. Please do not use. Will be removed in the future." + )] pub fn use_deprecated_commitment(commitment: CommitmentConfig) -> Self { - match commitment.commitment { - CommitmentLevel::Finalized => CommitmentConfig::max(), - CommitmentLevel::Confirmed => CommitmentConfig::single_gossip(), - CommitmentLevel::Processed => CommitmentConfig::recent(), - _ => commitment, - } + commitment } } @@ -138,48 +77,6 @@ impl FromStr for CommitmentConfig { /// finalized. When querying the ledger state, use lower levels of commitment to report progress and higher /// levels to ensure state changes will not be rolled back. pub enum CommitmentLevel { - /// (DEPRECATED) The highest slot having reached max vote lockout, as recognized by a supermajority of the cluster. - #[deprecated( - since = "1.5.5", - note = "Please use CommitmentLevel::Finalized instead" - )] - Max, - - /// (DEPRECATED) The highest slot of the heaviest fork. Ledger state at this slot is not derived from a finalized - /// block, but if multiple forks are present, is from the fork the validator believes is most likely - /// to finalize. - #[deprecated( - since = "1.5.5", - note = "Please use CommitmentLevel::Processed instead" - )] - Recent, - - /// (DEPRECATED) The highest slot having reached max vote lockout. - #[deprecated( - since = "1.5.5", - note = "Please use CommitmentLevel::Finalized instead" - )] - Root, - - /// (DEPRECATED) The highest slot having reached 1 confirmation by supermajority of the cluster. - #[deprecated( - since = "1.5.5", - note = "Please use CommitmentLevel::Confirmed instead" - )] - Single, - - /// (DEPRECATED) The highest slot that has been voted on by supermajority of the cluster - /// This differs from `single` in that: - /// 1) It incorporates votes from gossip and replay. - /// 2) It does not count votes on descendants of a block, only direct votes on that block. - /// 3) This confirmation level also upholds "optimistic confirmation" guarantees in - /// release 1.3 and onwards. - #[deprecated( - since = "1.5.5", - note = "Please use CommitmentLevel::Confirmed instead" - )] - SingleGossip, - /// The highest slot of the heaviest fork processed by the node. Ledger state at this slot is /// not derived from a confirmed or finalized block, but if multiple forks are present, is from /// the fork the validator believes is most likely to finalize. @@ -207,11 +104,6 @@ impl FromStr for CommitmentLevel { fn from_str(s: &str) -> Result { match s { - "max" => Ok(CommitmentLevel::Max), - "recent" => Ok(CommitmentLevel::Recent), - "root" => Ok(CommitmentLevel::Root), - "single" => Ok(CommitmentLevel::Single), - "singleGossip" => Ok(CommitmentLevel::SingleGossip), "processed" => Ok(CommitmentLevel::Processed), "confirmed" => Ok(CommitmentLevel::Confirmed), "finalized" => Ok(CommitmentLevel::Finalized), @@ -223,11 +115,6 @@ impl FromStr for CommitmentLevel { impl std::fmt::Display for CommitmentLevel { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { let s = match self { - CommitmentLevel::Max => "max", - CommitmentLevel::Recent => "recent", - CommitmentLevel::Root => "root", - CommitmentLevel::Single => "single", - CommitmentLevel::SingleGossip => "singleGossip", CommitmentLevel::Processed => "processed", CommitmentLevel::Confirmed => "confirmed", CommitmentLevel::Finalized => "finalized", diff --git a/sdk/src/ed25519_instruction.rs b/sdk/src/ed25519_instruction.rs index 64c3de083fc0e5..10ae533f478171 100644 --- a/sdk/src/ed25519_instruction.rs +++ b/sdk/src/ed25519_instruction.rs @@ -6,7 +6,8 @@ use { crate::{feature_set::FeatureSet, instruction::Instruction, precompiles::PrecompileError}, - bytemuck::{bytes_of, Pod, Zeroable}, + bytemuck::bytes_of, + bytemuck_derive::{Pod, Zeroable}, ed25519_dalek::{ed25519::signature::Signature, Signer, Verifier}, }; diff --git a/sdk/src/entrypoint.rs b/sdk/src/entrypoint.rs index e83bdf0575b1f0..38c85e00af958a 100644 --- a/sdk/src/entrypoint.rs +++ b/sdk/src/entrypoint.rs @@ -5,30 +5,3 @@ //! [`bpf_loader`]: crate::bpf_loader pub use solana_program::entrypoint::*; - -#[macro_export] -#[deprecated( - since = "1.4.3", - note = "use solana_program::entrypoint::entrypoint instead" -)] -macro_rules! entrypoint { - ($process_instruction:ident) => { - #[cfg(all(not(feature = "custom-heap"), not(test)))] - #[global_allocator] - static A: $crate::entrypoint::BumpAllocator = $crate::entrypoint::BumpAllocator { - start: $crate::entrypoint::HEAP_START_ADDRESS, - len: $crate::entrypoint::HEAP_LENGTH, - }; - - /// # Safety - #[no_mangle] - pub unsafe extern "C" fn entrypoint(input: *mut u8) -> u64 { - let (program_id, accounts, instruction_data) = - unsafe { $crate::entrypoint::deserialize(input) }; - match $process_instruction(&program_id, &accounts, &instruction_data) { - Ok(()) => $crate::entrypoint::SUCCESS, - Err(error) => error.into(), - } - } - }; -} diff --git a/sdk/src/entrypoint_deprecated.rs b/sdk/src/entrypoint_deprecated.rs index c75b9c47a57a49..443a9bc0893f12 100644 --- a/sdk/src/entrypoint_deprecated.rs +++ b/sdk/src/entrypoint_deprecated.rs @@ -8,23 +8,3 @@ //! [`bpf_loader_deprecated`]: crate::bpf_loader_deprecated pub use solana_program::entrypoint_deprecated::*; - -#[macro_export] -#[deprecated( - since = "1.4.3", - note = "use solana_program::entrypoint::entrypoint instead" -)] -macro_rules! entrypoint_deprecated { - ($process_instruction:ident) => { - /// # Safety - #[no_mangle] - pub unsafe extern "C" fn entrypoint(input: *mut u8) -> u64 { - let (program_id, accounts, instruction_data) = - unsafe { $crate::entrypoint_deprecated::deserialize(input) }; - match $process_instruction(&program_id, &accounts, &instruction_data) { - Ok(()) => $crate::entrypoint_deprecated::SUCCESS, - Err(error) => error.into(), - } - } - }; -} diff --git a/sdk/src/feature_set.rs b/sdk/src/feature_set.rs index e44f83d0db7a4a..e4970cf8cda0a3 100644 --- a/sdk/src/feature_set.rs +++ b/sdk/src/feature_set.rs @@ -824,6 +824,10 @@ pub mod migrate_address_lookup_table_program_to_core_bpf { solana_sdk::declare_id!("C97eKZygrkU4JxJsZdjgbUY7iQR7rKTr4NyDWo2E5pRm"); } +pub mod zk_elgamal_proof_program_enabled { + solana_sdk::declare_id!("zkhiy5oLowR7HY4zogXjCjeMXyruLqBwSWH21qcFtnv"); +} + lazy_static! { /// Map of feature identifiers to user-visible description pub static ref FEATURE_NAMES: HashMap = [ @@ -1025,6 +1029,7 @@ lazy_static! { (migrate_config_program_to_core_bpf::id(), "Migrate Config program to Core BPF #1378"), (enable_get_epoch_stake_syscall::id(), "Enable syscall: sol_get_epoch_stake #884"), (migrate_address_lookup_table_program_to_core_bpf::id(), "Migrate Address Lookup Table program to Core BPF #1651"), + (zk_elgamal_proof_program_enabled::id(), "Enable ZkElGamalProof program SIMD-0153"), /*************** ADD NEW FEATURES HERE ***************/ ] .iter() diff --git a/sdk/src/lib.rs b/sdk/src/lib.rs index e6a1f66415c25e..f5b0a5682f17a4 100644 --- a/sdk/src/lib.rs +++ b/sdk/src/lib.rs @@ -41,6 +41,10 @@ pub use signer::signers; pub use solana_program::program_stubs; // These solana_program imports could be *-imported, but that causes a bunch of // confusing duplication in the docs due to a rustdoc bug. #26211 +#[allow(deprecated)] +pub use solana_program::sdk_ids; +#[cfg(target_arch = "wasm32")] +pub use solana_program::wasm_bindgen; pub use solana_program::{ account_info, address_lookup_table, alt_bn128, big_mod_exp, blake3, bpf_loader, bpf_loader_deprecated, bpf_loader_upgradeable, clock, config, custom_heap_default, @@ -51,10 +55,8 @@ pub use solana_program::{ program_memory, program_option, program_pack, rent, sanitize, secp256k1_program, secp256k1_recover, serde_varint, serialize_utils, short_vec, slot_hashes, slot_history, stable_layout, stake, stake_history, syscalls, system_instruction, system_program, sysvar, - unchecked_div_by_const, vote, wasm_bindgen, + unchecked_div_by_const, vote, }; -#[allow(deprecated)] -pub use solana_program::{address_lookup_table_account, sdk_ids}; #[cfg(feature = "borsh")] pub use solana_program::{borsh, borsh0_10, borsh1}; @@ -91,7 +93,6 @@ pub mod precompiles; pub mod program_utils; pub mod pubkey; pub mod quic; -pub mod recent_blockhashes_account; pub mod rent_collector; pub mod rent_debits; pub mod reserved_account_keys; @@ -155,16 +156,6 @@ pub use solana_sdk_macro::pubkeys; #[rustversion::since(1.46.0)] pub use solana_sdk_macro::respan; -// Unused `solana_sdk::program_stubs!()` macro retained for source backwards compatibility with older programs -#[macro_export] -#[deprecated( - since = "1.4.3", - note = "program_stubs macro is obsolete and can be safely removed" -)] -macro_rules! program_stubs { - () => {}; -} - /// Convenience macro for `AddAssign` with saturating arithmetic. /// Replace by `std::num::Saturating` once stable #[macro_export] diff --git a/sdk/src/log.rs b/sdk/src/log.rs index 78a45afaf4a1e8..748b241c02b77f 100644 --- a/sdk/src/log.rs +++ b/sdk/src/log.rs @@ -1,14 +1,3 @@ #![cfg(feature = "program")] pub use solana_program::log::*; - -#[macro_export] -#[deprecated( - since = "1.4.3", - note = "Please use `solana_program::log::info` instead" -)] -macro_rules! info { - ($msg:expr) => { - $crate::log::sol_log($msg) - }; -} diff --git a/sdk/src/native_loader.rs b/sdk/src/native_loader.rs index 3f10fc527ad8a6..53a7ded4b61f54 100644 --- a/sdk/src/native_loader.rs +++ b/sdk/src/native_loader.rs @@ -1,23 +1,12 @@ //! The native loader native program. -use crate::{ - account::{ - Account, AccountSharedData, InheritableAccountFields, DUMMY_INHERITABLE_ACCOUNT_FIELDS, - }, - clock::INITIAL_RENT_EPOCH, +use crate::account::{ + Account, AccountSharedData, InheritableAccountFields, DUMMY_INHERITABLE_ACCOUNT_FIELDS, }; crate::declare_id!("NativeLoader1111111111111111111111111111111"); /// Create an executable account with the given shared object name. -#[deprecated( - since = "1.5.17", - note = "Please use `create_loadable_account_for_test` instead" -)] -pub fn create_loadable_account(name: &str, lamports: u64) -> AccountSharedData { - create_loadable_account_with_fields(name, (lamports, INITIAL_RENT_EPOCH)) -} - pub fn create_loadable_account_with_fields( name: &str, (lamports, rent_epoch): InheritableAccountFields, diff --git a/sdk/src/reserved_account_keys.rs b/sdk/src/reserved_account_keys.rs index a1a1d367d18ffd..e3c6bed5c973be 100644 --- a/sdk/src/reserved_account_keys.rs +++ b/sdk/src/reserved_account_keys.rs @@ -22,6 +22,11 @@ mod zk_token_proof_program { solana_sdk::declare_id!("ZkTokenProof1111111111111111111111111111111"); } +// Inline zk-elgamal-proof program id since it isn't available in the sdk +mod zk_elgamal_proof_program { + solana_sdk::declare_id!("ZkE1Gama1Proof11111111111111111111111111111"); +} + // ReservedAccountKeys is not serialized into or deserialized from bank // snapshots but the bank requires this trait to be implemented anyways. #[cfg(all(RUSTC_WITH_SPECIALIZATION, feature = "frozen-abi"))] @@ -162,6 +167,7 @@ lazy_static! { ReservedAccount::new_active(stake::program::id()), ReservedAccount::new_active(system_program::id()), ReservedAccount::new_active(vote::program::id()), + ReservedAccount::new_pending(zk_elgamal_proof_program::id(), feature_set::add_new_reserved_account_keys::id()), ReservedAccount::new_pending(zk_token_proof_program::id(), feature_set::add_new_reserved_account_keys::id()), // sysvars diff --git a/sdk/src/signature.rs b/sdk/src/signature.rs index 158fa5ceeaf136..3509e96522a657 100644 --- a/sdk/src/signature.rs +++ b/sdk/src/signature.rs @@ -28,14 +28,6 @@ pub struct Signature(GenericArray); impl crate::sanitize::Sanitize for Signature {} impl Signature { - #[deprecated( - since = "1.16.4", - note = "Please use 'Signature::from' or 'Signature::try_from' instead" - )] - pub fn new(signature_slice: &[u8]) -> Self { - Self(GenericArray::clone_from_slice(signature_slice)) - } - pub fn new_unique() -> Self { Self::from(std::array::from_fn(|_| rand::random())) } diff --git a/sdk/src/signer/keypair.rs b/sdk/src/signer/keypair.rs index 1873996a399391..9e6088c1b5444f 100644 --- a/sdk/src/signer/keypair.rs +++ b/sdk/src/signer/keypair.rs @@ -1,5 +1,7 @@ #![cfg(feature = "full")] +#[cfg(target_arch = "wasm32")] +use wasm_bindgen::prelude::*; use { crate::{ derivation_path::DerivationPath, @@ -16,11 +18,10 @@ use { io::{Read, Write}, path::Path, }, - wasm_bindgen::prelude::*, }; /// A vanilla Ed25519 key pair -#[wasm_bindgen] +#[cfg_attr(target_arch = "wasm32", wasm_bindgen)] #[derive(Debug)] pub struct Keypair(ed25519_dalek::Keypair); diff --git a/sdk/src/transaction/mod.rs b/sdk/src/transaction/mod.rs index 520a80d0289684..d0eb01a3af96e0 100644 --- a/sdk/src/transaction/mod.rs +++ b/sdk/src/transaction/mod.rs @@ -111,6 +111,8 @@ #![cfg(feature = "full")] +#[cfg(target_arch = "wasm32")] +use crate::wasm_bindgen; use { crate::{ hash::Hash, @@ -124,7 +126,6 @@ use { short_vec, signature::{Signature, SignerError}, signers::Signers, - wasm_bindgen, }, serde::Serialize, solana_program::{system_instruction::SystemInstruction, system_program}, @@ -167,7 +168,7 @@ pub type Result = result::Result; /// if the caller has knowledge that the first account of the constructed /// transaction's `Message` is both a signer and the expected fee-payer, then /// redundantly specifying the fee-payer is not strictly required. -#[wasm_bindgen] +#[cfg(not(target_arch = "wasm32"))] #[cfg_attr( feature = "frozen-abi", derive(AbiExample), @@ -184,11 +185,29 @@ pub struct Transaction { /// [`MessageHeader`]: crate::message::MessageHeader /// [`num_required_signatures`]: crate::message::MessageHeader::num_required_signatures // NOTE: Serialization-related changes must be paired with the direct read at sigverify. - #[wasm_bindgen(skip)] #[serde(with = "short_vec")] pub signatures: Vec, /// The message to sign. + pub message: Message, +} + +/// wasm-bindgen version of the Transaction struct. +/// This duplication is required until https://github.com/rustwasm/wasm-bindgen/issues/3671 +/// is fixed. This must not diverge from the regular non-wasm Transaction struct. +#[cfg(target_arch = "wasm32")] +#[wasm_bindgen] +#[cfg_attr( + feature = "frozen-abi", + derive(AbiExample), + frozen_abi(digest = "FZtncnS1Xk8ghHfKiXE5oGiUbw2wJhmfXQuNgQR3K6Mc") +)] +#[derive(Debug, PartialEq, Default, Eq, Clone, Serialize, Deserialize)] +pub struct Transaction { + #[wasm_bindgen(skip)] + #[serde(with = "short_vec")] + pub signatures: Vec, + #[wasm_bindgen(skip)] pub message: Message, } @@ -1106,17 +1125,6 @@ pub fn uses_durable_nonce(tx: &Transaction) -> Option<&CompiledInstruction> { }) } -#[deprecated] -pub fn get_nonce_pubkey_from_instruction<'a>( - ix: &CompiledInstruction, - tx: &'a Transaction, -) -> Option<&'a Pubkey> { - ix.accounts.first().and_then(|idx| { - let idx = *idx as usize; - tx.message().account_keys.get(idx) - }) -} - #[cfg(test)] mod tests { #![allow(deprecated)] @@ -1611,34 +1619,6 @@ mod tests { assert!(uses_durable_nonce(&tx).is_none()); } - #[test] - fn get_nonce_pub_from_ix_ok() { - let (_, nonce_pubkey, tx) = nonced_transfer_tx(); - let nonce_ix = uses_durable_nonce(&tx).unwrap(); - assert_eq!( - get_nonce_pubkey_from_instruction(nonce_ix, &tx), - Some(&nonce_pubkey), - ); - } - - #[test] - fn get_nonce_pub_from_ix_no_accounts_fail() { - let (_, _, tx) = nonced_transfer_tx(); - let nonce_ix = uses_durable_nonce(&tx).unwrap(); - let mut nonce_ix = nonce_ix.clone(); - nonce_ix.accounts.clear(); - assert_eq!(get_nonce_pubkey_from_instruction(&nonce_ix, &tx), None,); - } - - #[test] - fn get_nonce_pub_from_ix_bad_acc_idx_fail() { - let (_, _, tx) = nonced_transfer_tx(); - let nonce_ix = uses_durable_nonce(&tx).unwrap(); - let mut nonce_ix = nonce_ix.clone(); - nonce_ix.accounts[0] = 255u8; - assert_eq!(get_nonce_pubkey_from_instruction(&nonce_ix, &tx), None,); - } - #[test] fn tx_keypair_pubkey_mismatch() { let from_keypair = Keypair::new(); diff --git a/send-transaction-service/Cargo.toml b/send-transaction-service/Cargo.toml index 35e76524d9017a..a69c366a358fdc 100644 --- a/send-transaction-service/Cargo.toml +++ b/send-transaction-service/Cargo.toml @@ -13,6 +13,7 @@ edition = { workspace = true } crossbeam-channel = { workspace = true } log = { workspace = true } solana-client = { workspace = true } +solana-connection-cache = { workspace = true } solana-measure = { workspace = true } solana-metrics = { workspace = true } solana-runtime = { workspace = true } diff --git a/send-transaction-service/src/send_transaction_service.rs b/send-transaction-service/src/send_transaction_service.rs index abe53b236d2e75..8cc21b12359639 100644 --- a/send-transaction-service/src/send_transaction_service.rs +++ b/send-transaction-service/src/send_transaction_service.rs @@ -2,10 +2,8 @@ use { crate::tpu_info::TpuInfo, crossbeam_channel::{Receiver, RecvTimeoutError}, log::*, - solana_client::{ - connection_cache::{ConnectionCache, Protocol}, - tpu_connection::TpuConnection, - }, + solana_client::connection_cache::{ConnectionCache, Protocol}, + solana_connection_cache::client_connection::ClientConnection as TpuConnection, solana_measure::measure::Measure, solana_runtime::{bank::Bank, bank_forks::BankForks}, solana_sdk::{ diff --git a/storage-bigtable/src/bigtable.rs b/storage-bigtable/src/bigtable.rs index fdebb5ab8d0214..b4bfe040a30963 100644 --- a/storage-bigtable/src/bigtable.rs +++ b/storage-bigtable/src/bigtable.rs @@ -985,6 +985,7 @@ mod tests { parent_slot, transactions, rewards, + num_partitions, block_time, block_height, } = confirmed_block; @@ -995,6 +996,8 @@ mod tests { parent_slot, transactions: transactions.into_iter().map(|tx| tx.into()).collect(), rewards: rewards.into_iter().map(|r| r.into()).collect(), + num_partitions: num_partitions + .map(|num_partitions| generated::NumPartitions { num_partitions }), block_time: block_time.map(|timestamp| generated::UnixTimestamp { timestamp }), block_height: block_height.map(|block_height| generated::BlockHeight { block_height }), } @@ -1028,6 +1031,7 @@ mod tests { blockhash: Hash::default().to_string(), previous_blockhash: Hash::default().to_string(), rewards: vec![], + num_partitions: None, block_time: Some(1_234_567_890), block_height: Some(1), }; diff --git a/storage-bigtable/src/lib.rs b/storage-bigtable/src/lib.rs index 240ae44c3d07fe..3af928a626d834 100644 --- a/storage-bigtable/src/lib.rs +++ b/storage-bigtable/src/lib.rs @@ -141,6 +141,7 @@ impl From for StoredConfirmedBlock { parent_slot, transactions, rewards, + num_partitions: _num_partitions, block_time, block_height, } = confirmed_block; @@ -175,6 +176,7 @@ impl From for ConfirmedBlock { parent_slot, transactions: transactions.into_iter().map(|tx| tx.into()).collect(), rewards: rewards.into_iter().map(|reward| reward.into()).collect(), + num_partitions: None, block_time, block_height, } diff --git a/storage-proto/proto/confirmed_block.proto b/storage-proto/proto/confirmed_block.proto index 47548ea13bc6a4..6d26ce7bce2cad 100644 --- a/storage-proto/proto/confirmed_block.proto +++ b/storage-proto/proto/confirmed_block.proto @@ -10,6 +10,7 @@ message ConfirmedBlock { repeated Reward rewards = 5; UnixTimestamp block_time = 6; BlockHeight block_height = 7; + NumPartitions num_partitions = 8; } message ConfirmedTransaction { @@ -130,6 +131,7 @@ message Reward { message Rewards { repeated Reward rewards = 1; + NumPartitions num_partitions = 2; } message UnixTimestamp { @@ -139,3 +141,7 @@ message UnixTimestamp { message BlockHeight { uint64 block_height = 1; } + +message NumPartitions { + uint64 num_partitions = 1; +} diff --git a/storage-proto/src/convert.rs b/storage-proto/src/convert.rs index 8d6669e44b43f1..8315bcf99a4dac 100644 --- a/storage-proto/src/convert.rs +++ b/storage-proto/src/convert.rs @@ -16,8 +16,9 @@ use { }, solana_transaction_status::{ ConfirmedBlock, EntrySummary, InnerInstruction, InnerInstructions, Reward, RewardType, - TransactionByAddrInfo, TransactionStatusMeta, TransactionTokenBalance, - TransactionWithStatusMeta, VersionedConfirmedBlock, VersionedTransactionWithStatusMeta, + RewardsAndNumPartitions, TransactionByAddrInfo, TransactionStatusMeta, + TransactionTokenBalance, TransactionWithStatusMeta, VersionedConfirmedBlock, + VersionedTransactionWithStatusMeta, }, std::{ convert::{TryFrom, TryInto}, @@ -47,6 +48,16 @@ impl From> for generated::Rewards { fn from(rewards: Vec) -> Self { Self { rewards: rewards.into_iter().map(|r| r.into()).collect(), + num_partitions: None, + } + } +} + +impl From for generated::Rewards { + fn from(input: RewardsAndNumPartitions) -> Self { + Self { + rewards: input.rewards.into_iter().map(|r| r.into()).collect(), + num_partitions: input.num_partitions.map(|n| n.into()), } } } @@ -57,6 +68,17 @@ impl From for Vec { } } +impl From for (Vec, Option) { + fn from(rewards: generated::Rewards) -> Self { + ( + rewards.rewards.into_iter().map(|r| r.into()).collect(), + rewards + .num_partitions + .map(|generated::NumPartitions { num_partitions }| num_partitions), + ) + } +} + impl From for generated::Rewards { fn from(rewards: StoredExtendedRewards) -> Self { Self { @@ -67,6 +89,7 @@ impl From for generated::Rewards { r.into() }) .collect(), + num_partitions: None, } } } @@ -121,6 +144,12 @@ impl From for Reward { } } +impl From for generated::NumPartitions { + fn from(num_partitions: u64) -> Self { + Self { num_partitions } + } +} + impl From for generated::ConfirmedBlock { fn from(confirmed_block: VersionedConfirmedBlock) -> Self { let VersionedConfirmedBlock { @@ -129,6 +158,7 @@ impl From for generated::ConfirmedBlock { parent_slot, transactions, rewards, + num_partitions, block_time, block_height, } = confirmed_block; @@ -139,6 +169,7 @@ impl From for generated::ConfirmedBlock { parent_slot, transactions: transactions.into_iter().map(|tx| tx.into()).collect(), rewards: rewards.into_iter().map(|r| r.into()).collect(), + num_partitions: num_partitions.map(Into::into), block_time: block_time.map(|timestamp| generated::UnixTimestamp { timestamp }), block_height: block_height.map(|block_height| generated::BlockHeight { block_height }), } @@ -156,6 +187,7 @@ impl TryFrom for ConfirmedBlock { parent_slot, transactions, rewards, + num_partitions, block_time, block_height, } = confirmed_block; @@ -169,6 +201,8 @@ impl TryFrom for ConfirmedBlock { .map(|tx| tx.try_into()) .collect::, Self::Error>>()?, rewards: rewards.into_iter().map(|r| r.into()).collect(), + num_partitions: num_partitions + .map(|generated::NumPartitions { num_partitions }| num_partitions), block_time: block_time.map(|generated::UnixTimestamp { timestamp }| timestamp), block_height: block_height.map(|generated::BlockHeight { block_height }| block_height), }) diff --git a/svm/Cargo.toml b/svm/Cargo.toml index c6ae6db41f13fb..41918ca261f013 100644 --- a/svm/Cargo.toml +++ b/svm/Cargo.toml @@ -26,6 +26,7 @@ solana-measure = { workspace = true } solana-program-runtime = { workspace = true } solana-sdk = { workspace = true } solana-system-program = { workspace = true } +solana-type-overrides = { workspace = true } solana-vote = { workspace = true } [lib] @@ -62,3 +63,9 @@ frozen-abi = [ "solana-program-runtime/frozen-abi", "solana-sdk/frozen-abi", ] +shuttle-test = [ + "solana-type-overrides/shuttle-test", + "solana-program-runtime/shuttle-test", + "solana-bpf-loader-program/shuttle-test", + "solana-loader-v4-program/shuttle-test", +] diff --git a/svm/doc/spec.md b/svm/doc/spec.md index c88b6b83f6b268..8f148b557debf0 100644 --- a/svm/doc/spec.md +++ b/svm/doc/spec.md @@ -81,8 +81,7 @@ Validator and in third-party applications. The interface to SVM is represented by the `transaction_processor::TransactionBatchProcessor` struct. To create a `TransactionBatchProcessor` object the client need to specify the -`slot`, `epoch`, `epoch_schedule`, `fee_structure`, `runtime_config`, -and `program_cache`. +`slot`, `epoch`, and `program_cache`. - `slot: Slot` is a u64 value representing the ordinal number of a particular blockchain state in context of which the transactions @@ -92,22 +91,6 @@ and `program_cache`. a Solana epoch, in which the slot was created. This is another index used to locate the onchain programs used in the execution of transactions in the batch. -- `epoch_schedule: EpochSchedule` is a struct that contains - information about epoch configuration, such as number of slots per - epoch, etc. TransactionBatchProcessor needs an instance of - EpochSchedule to obtain the first slot in the epoch in which the - transactions batch is being executed. This slot is sometimes - required for updating the information about the slot when a program - account has been accessed most recently. This is needed for - program cache bookkeeping. -- `fee_structure: FeeStructure` an instance of `FeeStructure` is - needed to check the validity of every transaction in a batch when - the transaction accounts are being loaded and checked for - compliance with required fees for transaction execution. -- `runtime_config: Arc` is a reference to a - RuntimeConfig struct instance. The `RuntimeConfig` is a collection - of fields that control parameters of runtime, such as compute - budget, the maximal size of log messages in bytes, etc. - `program_cache: Arc>>` is a reference to a ProgramCache instance. All on chain programs used in transaction batch execution are loaded from the program cache. @@ -119,45 +102,116 @@ The main entry point to the SVM is the method `load_and_execute_sanitized_transactions`. The method `load_and_execute_sanitized_transactions` takes the -following arguments - - `callbacks` is a `TransactionProcessingCallback` trait instance - that enables access to data available from accounts-db and from - Bank, - - `sanitized_txs` a slice of `SanitizedTransaction` - - `SanitizedTransaction` contains - - `SanitizedMessage` is an enum with two kinds of messages - - `LegacyMessage` and `LoadedMessage` - Both `LegacyMessage` and `LoadedMessage` consist of - - `MessageHeader` - - vector of `Pubkey` of accounts used in the transaction - - `Hash` of recent block - - vector of `CompiledInstruction` - In addition `LoadedMessage` contains a vector of - `MessageAddressTableLookup` -- list of address table lookups to - load additional accounts for this transaction. - - a Hash of the message - - a boolean flag `is_simple_vote_tx` -- explain - - a vector of `Signature` -- explain which signatures are in this vector - - `check_results` is a mutable slice of `TransactionCheckResult` - - `error_counters` is a mutable reference to `TransactionErrorMetrics` - - `recording_config` is a value of `ExecutionRecordingConfig` configuration parameters - - `timings` is a mutable reference to `ExecuteTimings` - - `account_overrides` is an optional reference to `AccountOverrides` - - `builtin_programs` is an iterator of `Pubkey` that represents builtin programs - - `log_messages_bytes_limit` is an optional `usize` limit on the size of log messages in bytes - - `limit_to_load_programs` is a boolean flag that instruct the function to only load the - programs and do not execute the transactions. - -The method returns a value of -`LoadAndExecuteSanitizedTransactionsOutput` which consists of two -vectors - - a vector of `TransactionLoadResult`, and - - a vector of `TransactionExecutionResult`. +following arguments: + +- `callbacks`: A `TransactionProcessingCallback` trait instance which allows + the transaction processor to summon information about accounts, most + importantly loading them for transaction execution. +- `sanitized_txs`: A slice of sanitized transactions. +- `check_results`: A mutable slice of transaction check results. +- `environment`: The runtime environment for transaction batch processing. +- `config`: Configurations for customizing transaction processing behavior. + +The method returns a `LoadAndExecuteSanitizedTransactionsOutput`, which is +defined below in more detail. An integration test `svm_integration` contains an example of instantiating `TransactionBatchProcessor` and calling its method `load_and_execute_sanitized_transactions`. +### `TransactionProcessingCallback` + +Downstream consumers of the SVM must implement the +`TransactionProcessingCallback` trait in order to provide the transaction +processor with the ability to load accounts and retrieve other account-related +information. + +```rust +pub trait TransactionProcessingCallback { + fn account_matches_owners(&self, account: &Pubkey, owners: &[Pubkey]) -> Option; + + fn get_account_shared_data(&self, pubkey: &Pubkey) -> Option; + + fn add_builtin_account(&self, _name: &str, _program_id: &Pubkey) {} +} +``` + +Consumers can customize this plug-in to use their own Solana account source, +caching, and more. + +### `SanitizedTransaction` + +A "sanitized" Solana transaction is a transaction that has undergone the +various checks required to evaluate a transaction against the Solana protocol +ruleset. Some of these rules include signature verification and validation +of account indices (`num_readonly_signers`, etc.). + +A `SanitizedTransaction` contains: + +- `SanitizedMessage`: Enum with two kinds of messages - `LegacyMessage` and + `LoadedMessage` - both of which contain: + - `MessageHeader`: Vector of `Pubkey` of accounts used in the transaction. + - `Hash` of recent block. + - Vector of `CompiledInstruction`. + - In addition, `LoadedMessage` contains a vector of + `MessageAddressTableLookup` - list of address table lookups to + load additional accounts for this transaction. +- A Hash of the message +- A boolean flag `is_simple_vote_tx` - shortcut for determining if the + transaction is merely a simple vote transaction produced by a validator. +- A vector of `Signature` - the hash of the transaction message encrypted using + the signing key (for each signer in the transaction). + +### `TransactionCheckResult` + +Simply stores details about a transaction, including whether or not it contains +a nonce, the nonce it contains (if applicable), and the lamports per signature +to charge for fees. + +### `TransactionProcessingEnvironment` + +The transaction processor requires consumers to provide values describing +the runtime environment to use for processing transactions. + +- `blockhash`: The blockhash to use for the transaction batch. +- `epoch_total_stake`: The total stake for the current epoch. +- `epoch_vote_accounts`: The vote accounts for the current epoch. +- `feature_set`: Runtime feature set to use for the transaction batch. +- `fee_structure`: Fee structure to use for assessing transaction fees. +- `lamports_per_signature`: Lamports per signature to charge per transaction. +- `rent_collector`: Rent collector to use for the transaction batch. + +### `TransactionProcessingConfig` + +Consumers can provide various configurations to adjust the default behavior of +the transaction processor. + +- `account_overrides`: Encapsulates overridden accounts, typically used for + transaction simulation. +- `compute_budget`: The compute budget to use for transaction execution. +- `check_program_modification_slot`: Whether or not to check a program's + modification slot when replenishing a program cache instance. +- `log_messages_bytes_limit`: The maximum number of bytes that log messages can + consume. +- `limit_to_load_programs`: Whether to limit the number of programs loaded for + the transaction batch. +- `recording_config`: Recording capabilities for transaction execution. +- `transaction_account_lock_limit`: The max number of accounts that a + transaction may lock. + +### `LoadAndExecuteSanitizedTransactionsOutput` + +The output of the transaction batch processor's +`load_and_execute_sanitized_transactions` method. + +- `error_metrics`: Error metrics for transactions that were processed. +- `execute_timings`: Timings for transaction batch execution. +- `execution_results`: Vector of results indicating whether a transaction was + executed or could not be executed. Note executed transactions can still have + failed! +- `loaded_transactions`: Vector of loaded transactions from transactions that + were processed. + # Functional Model In this section, we describe the functionality (logic) of the SVM in @@ -209,11 +263,10 @@ Steps of `load_and_execute_sanitized_transactions` - Validate the fee payer and the loaded accounts - Validate the programs accounts that have been loaded and checks if they are builtin programs. - Return `struct LoadedTransaction` containing the accounts (pubkey and data), - indices to the excutabe accounts in `TransactionContext` (or `InstructionContext`), + indices to the executable accounts in `TransactionContext` (or `InstructionContext`), the transaction rent, and the `struct RentDebit`. - - Generate a `NonceFull` struct (holds fee subtracted nonce info) when possible, `None` otherwise. - - Returns `TransactionLoadedResult`, a tuple containing the `LoadTransaction` we obtained from `loaded_transaction_accounts`, - and a `Option`. + - Generate a `RollbackAccounts` struct which holds fee-subtracted fee payer account and pre-execution nonce state used for rolling back account state on execution failure. + - Returns `TransactionLoadedResult`, containing the `LoadTransaction` we obtained from `loaded_transaction_accounts` 3. Execute each loaded transactions 1. Compute the sum of transaction accounts' balances. This sum is diff --git a/svm/src/account_loader.rs b/svm/src/account_loader.rs index 8eb2f9fa3e81f3..c62963c09e0934 100644 --- a/svm/src/account_loader.rs +++ b/svm/src/account_loader.rs @@ -1,13 +1,14 @@ use { crate::{ - account_overrides::AccountOverrides, - account_rent_state::RentState, - nonce_info::{NonceFull, NoncePartial}, + account_overrides::AccountOverrides, account_rent_state::RentState, + nonce_info::NoncePartial, rollback_accounts::RollbackAccounts, transaction_error_metrics::TransactionErrorMetrics, transaction_processing_callback::TransactionProcessingCallback, }, itertools::Itertools, - solana_compute_budget::compute_budget_processor::process_compute_budget_instructions, + solana_compute_budget::compute_budget_processor::{ + process_compute_budget_instructions, ComputeBudgetLimits, + }, solana_program_runtime::loaded_programs::{ProgramCacheEntry, ProgramCacheForTxBatch}, solana_sdk::{ account::{Account, AccountSharedData, ReadableAccount, WritableAccount}, @@ -45,7 +46,8 @@ pub struct CheckedTransactionDetails { #[derive(PartialEq, Eq, Debug, Clone)] #[cfg_attr(feature = "dev-context-only-utils", derive(Default))] pub struct ValidatedTransactionDetails { - pub nonce: Option, + pub rollback_accounts: RollbackAccounts, + pub compute_budget_limits: ComputeBudgetLimits, pub fee_details: FeeDetails, pub fee_payer_account: AccountSharedData, pub fee_payer_rent_debit: u64, @@ -55,19 +57,14 @@ pub struct ValidatedTransactionDetails { pub struct LoadedTransaction { pub accounts: Vec, pub program_indices: TransactionProgramIndices, - pub nonce: Option, pub fee_details: FeeDetails, + pub rollback_accounts: RollbackAccounts, + pub compute_budget_limits: ComputeBudgetLimits, pub rent: TransactionRent, pub rent_debits: RentDebits, pub loaded_accounts_data_size: usize, } -impl LoadedTransaction { - pub fn fee_payer_account(&self) -> Option<&TransactionAccount> { - self.accounts.first() - } -} - /// Collect rent from an account if rent is still enabled and regardless of /// whether rent is enabled, set the rent epoch to u64::MAX if the account is /// rent exempt. @@ -363,8 +360,9 @@ fn load_transaction_accounts( Ok(LoadedTransaction { accounts, program_indices, - nonce: tx_details.nonce, fee_details: tx_details.fee_details, + rollback_accounts: tx_details.rollback_accounts, + compute_budget_limits: tx_details.compute_budget_limits, rent: tx_rent, rent_debits, loaded_accounts_data_size: accumulated_accounts_data_size, @@ -439,7 +437,7 @@ mod tests { use { super::*, crate::{ - nonce_info::NonceFull, transaction_account_state_info::TransactionAccountStateInfo, + transaction_account_state_info::TransactionAccountStateInfo, transaction_processing_callback::TransactionProcessingCallback, }, nonce::state::Versions as NonceVersions, @@ -1114,6 +1112,70 @@ mod tests { assert_eq!(shared_data, expected); } + #[test] + fn test_load_transaction_accounts_fee_payer() { + let fee_payer_address = Pubkey::new_unique(); + let message = Message { + account_keys: vec![fee_payer_address], + header: MessageHeader::default(), + instructions: vec![], + recent_blockhash: Hash::default(), + }; + + let sanitized_message = new_unchecked_sanitized_message(message); + let mut mock_bank = TestCallbacks::default(); + + let fee_payer_balance = 200; + let mut fee_payer_account_data = AccountSharedData::default(); + fee_payer_account_data.set_lamports(fee_payer_balance); + mock_bank + .accounts_map + .insert(fee_payer_address, fee_payer_account_data.clone()); + let fee_payer_rent_debit = 42; + + let mut error_metrics = TransactionErrorMetrics::default(); + let loaded_programs = ProgramCacheForTxBatch::default(); + + let sanitized_transaction = SanitizedTransaction::new_for_tests( + sanitized_message, + vec![Signature::new_unique()], + false, + ); + let result = load_transaction_accounts( + &mock_bank, + sanitized_transaction.message(), + ValidatedTransactionDetails { + fee_payer_account: fee_payer_account_data.clone(), + fee_payer_rent_debit, + ..ValidatedTransactionDetails::default() + }, + &mut error_metrics, + None, + &FeatureSet::default(), + &RentCollector::default(), + &loaded_programs, + ); + + let expected_rent_debits = { + let mut rent_debits = RentDebits::default(); + rent_debits.insert(&fee_payer_address, fee_payer_rent_debit, fee_payer_balance); + rent_debits + }; + assert_eq!( + result.unwrap(), + LoadedTransaction { + accounts: vec![(fee_payer_address, fee_payer_account_data),], + program_indices: vec![], + fee_details: FeeDetails::default(), + rollback_accounts: RollbackAccounts::default(), + compute_budget_limits: ComputeBudgetLimits::default(), + rent: fee_payer_rent_debit, + rent_debits: expected_rent_debits, + loaded_accounts_data_size: 0, + } + ); + } + #[test] fn test_load_transaction_accounts_native_loader() { let key1 = Keypair::new(); @@ -1147,15 +1209,12 @@ mod tests { vec![Signature::new_unique()], false, ); - let fee_details = FeeDetails::new_for_tests(32, 0, false); let result = load_transaction_accounts( &mock_bank, sanitized_transaction.message(), ValidatedTransactionDetails { - nonce: None, - fee_details, - fee_payer_account: fee_payer_account_data, - fee_payer_rent_debit: 0, + fee_payer_account: fee_payer_account_data.clone(), + ..ValidatedTransactionDetails::default() }, &mut error_metrics, None, @@ -1168,18 +1227,16 @@ mod tests { result.unwrap(), LoadedTransaction { accounts: vec![ - ( - key1.pubkey(), - mock_bank.accounts_map[&key1.pubkey()].clone() - ), + (key1.pubkey(), fee_payer_account_data), ( native_loader::id(), mock_bank.accounts_map[&native_loader::id()].clone() ) ], program_indices: vec![vec![]], - nonce: None, - fee_details, + fee_details: FeeDetails::default(), + rollback_accounts: RollbackAccounts::default(), + compute_budget_limits: ComputeBudgetLimits::default(), rent: 0, rent_debits: RentDebits::default(), loaded_accounts_data_size: 0, @@ -1359,15 +1416,12 @@ mod tests { vec![Signature::new_unique()], false, ); - let fee_details = FeeDetails::new_for_tests(32, 0, false); let result = load_transaction_accounts( &mock_bank, sanitized_transaction.message(), ValidatedTransactionDetails { - nonce: None, - fee_details, - fee_payer_account: fee_payer_account_data, - fee_payer_rent_debit: 0, + fee_payer_account: fee_payer_account_data.clone(), + ..ValidatedTransactionDetails::default() }, &mut error_metrics, None, @@ -1380,17 +1434,15 @@ mod tests { result.unwrap(), LoadedTransaction { accounts: vec![ - ( - key2.pubkey(), - mock_bank.accounts_map[&key2.pubkey()].clone() - ), + (key2.pubkey(), fee_payer_account_data), ( key1.pubkey(), mock_bank.accounts_map[&key1.pubkey()].clone() ), ], - nonce: None, - fee_details, + fee_details: FeeDetails::default(), + rollback_accounts: RollbackAccounts::default(), + compute_budget_limits: ComputeBudgetLimits::default(), program_indices: vec![vec![1]], rent: 0, rent_debits: RentDebits::default(), @@ -1545,15 +1597,12 @@ mod tests { vec![Signature::new_unique()], false, ); - let fee_details = FeeDetails::new_for_tests(32, 0, false); let result = load_transaction_accounts( &mock_bank, sanitized_transaction.message(), ValidatedTransactionDetails { - nonce: None, - fee_details, - fee_payer_account: fee_payer_account_data, - fee_payer_rent_debit: 0, + fee_payer_account: fee_payer_account_data.clone(), + ..ValidatedTransactionDetails::default() }, &mut error_metrics, None, @@ -1566,10 +1615,7 @@ mod tests { result.unwrap(), LoadedTransaction { accounts: vec![ - ( - key2.pubkey(), - mock_bank.accounts_map[&key2.pubkey()].clone() - ), + (key2.pubkey(), fee_payer_account_data), ( key1.pubkey(), mock_bank.accounts_map[&key1.pubkey()].clone() @@ -1580,8 +1626,9 @@ mod tests { ), ], program_indices: vec![vec![2, 1]], - nonce: None, - fee_details, + fee_details: FeeDetails::default(), + rollback_accounts: RollbackAccounts::default(), + compute_budget_limits: ComputeBudgetLimits::default(), rent: 0, rent_debits: RentDebits::default(), loaded_accounts_data_size: 0, @@ -1640,15 +1687,12 @@ mod tests { vec![Signature::new_unique()], false, ); - let fee_details = FeeDetails::new_for_tests(32, 0, false); let result = load_transaction_accounts( &mock_bank, sanitized_transaction.message(), ValidatedTransactionDetails { - nonce: None, - fee_details, - fee_payer_account: fee_payer_account_data, - fee_payer_rent_debit: 0, + fee_payer_account: fee_payer_account_data.clone(), + ..ValidatedTransactionDetails::default() }, &mut error_metrics, None, @@ -1663,10 +1707,7 @@ mod tests { result.unwrap(), LoadedTransaction { accounts: vec![ - ( - key2.pubkey(), - mock_bank.accounts_map[&key2.pubkey()].clone() - ), + (key2.pubkey(), fee_payer_account_data), ( key1.pubkey(), mock_bank.accounts_map[&key1.pubkey()].clone() @@ -1678,8 +1719,9 @@ mod tests { ), ], program_indices: vec![vec![3, 1], vec![3, 1]], - nonce: None, - fee_details, + fee_details: FeeDetails::default(), + rollback_accounts: RollbackAccounts::default(), + compute_budget_limits: ComputeBudgetLimits::default(), rent: 0, rent_debits: RentDebits::default(), loaded_accounts_data_size: 0, @@ -1733,7 +1775,7 @@ mod tests { let transaction_context = TransactionContext::new( loaded_txs[0].as_ref().unwrap().accounts.clone(), Rent::default(), - compute_budget.max_invoke_stack_height, + compute_budget.max_instruction_stack_depth, compute_budget.max_instruction_trace_length, ); @@ -1800,10 +1842,8 @@ mod tests { false, ); let validation_result = Ok(ValidatedTransactionDetails { - nonce: None, - fee_details: FeeDetails::default(), fee_payer_account: fee_payer_account_data, - fee_payer_rent_debit: 0, + ..ValidatedTransactionDetails::default() }); let results = load_accounts( @@ -1841,8 +1881,9 @@ mod tests { ), ], program_indices: vec![vec![3, 1], vec![3, 1]], - nonce: None, fee_details: FeeDetails::default(), + rollback_accounts: RollbackAccounts::default(), + compute_budget_limits: ComputeBudgetLimits::default(), rent: 0, rent_debits: RentDebits::default(), loaded_accounts_data_size: 0, @@ -1874,13 +1915,7 @@ mod tests { false, ); - let validation_result = Ok(ValidatedTransactionDetails { - nonce: Some(NonceFull::default()), - fee_details: FeeDetails::default(), - fee_payer_account: AccountSharedData::default(), - fee_payer_rent_debit: 0, - }); - + let validation_result = Ok(ValidatedTransactionDetails::default()); let result = load_accounts( &mock_bank, &[sanitized_transaction.clone()], diff --git a/svm/src/lib.rs b/svm/src/lib.rs index b3c6a51cb1a0eb..4fcf894f6fe8ff 100644 --- a/svm/src/lib.rs +++ b/svm/src/lib.rs @@ -7,6 +7,7 @@ pub mod account_rent_state; pub mod message_processor; pub mod nonce_info; pub mod program_loader; +pub mod rollback_accounts; pub mod runtime_config; pub mod transaction_account_state_info; pub mod transaction_error_metrics; diff --git a/svm/src/message_processor.rs b/svm/src/message_processor.rs index eb442a23266064..95e5223b3ce53c 100644 --- a/svm/src/message_processor.rs +++ b/svm/src/message_processor.rs @@ -275,7 +275,6 @@ mod tests { ]), )); let sysvar_cache = SysvarCache::default(); - let mut programs_modified_by_tx = ProgramCacheForTxBatch::default(); let environment_config = EnvironmentConfig::new( Hash::default(), None, @@ -286,11 +285,10 @@ mod tests { ); let mut invoke_context = InvokeContext::new( &mut transaction_context, - &program_cache_for_tx_batch, + &mut program_cache_for_tx_batch, environment_config, None, ComputeBudget::default(), - &mut programs_modified_by_tx, ); let result = MessageProcessor::process_message( &message, @@ -331,7 +329,6 @@ mod tests { ), ]), )); - let mut programs_modified_by_tx = ProgramCacheForTxBatch::default(); let environment_config = EnvironmentConfig::new( Hash::default(), None, @@ -342,11 +339,10 @@ mod tests { ); let mut invoke_context = InvokeContext::new( &mut transaction_context, - &program_cache_for_tx_batch, + &mut program_cache_for_tx_batch, environment_config, None, ComputeBudget::default(), - &mut programs_modified_by_tx, ); let result = MessageProcessor::process_message( &message, @@ -377,7 +373,6 @@ mod tests { ), ]), )); - let mut programs_modified_by_tx = ProgramCacheForTxBatch::default(); let environment_config = EnvironmentConfig::new( Hash::default(), None, @@ -388,11 +383,10 @@ mod tests { ); let mut invoke_context = InvokeContext::new( &mut transaction_context, - &program_cache_for_tx_batch, + &mut program_cache_for_tx_batch, environment_config, None, ComputeBudget::default(), - &mut programs_modified_by_tx, ); let result = MessageProcessor::process_message( &message, @@ -514,7 +508,6 @@ mod tests { Some(transaction_context.get_key_of_account_at_index(0).unwrap()), )); let sysvar_cache = SysvarCache::default(); - let mut programs_modified_by_tx = ProgramCacheForTxBatch::default(); let environment_config = EnvironmentConfig::new( Hash::default(), None, @@ -525,11 +518,10 @@ mod tests { ); let mut invoke_context = InvokeContext::new( &mut transaction_context, - &program_cache_for_tx_batch, + &mut program_cache_for_tx_batch, environment_config, None, ComputeBudget::default(), - &mut programs_modified_by_tx, ); let result = MessageProcessor::process_message( &message, @@ -555,7 +547,6 @@ mod tests { )], Some(transaction_context.get_key_of_account_at_index(0).unwrap()), )); - let mut programs_modified_by_tx = ProgramCacheForTxBatch::default(); let environment_config = EnvironmentConfig::new( Hash::default(), None, @@ -566,11 +557,10 @@ mod tests { ); let mut invoke_context = InvokeContext::new( &mut transaction_context, - &program_cache_for_tx_batch, + &mut program_cache_for_tx_batch, environment_config, None, ComputeBudget::default(), - &mut programs_modified_by_tx, ); let result = MessageProcessor::process_message( &message, @@ -593,7 +583,6 @@ mod tests { )], Some(transaction_context.get_key_of_account_at_index(0).unwrap()), )); - let mut programs_modified_by_tx = ProgramCacheForTxBatch::default(); let environment_config = EnvironmentConfig::new( Hash::default(), None, @@ -604,11 +593,10 @@ mod tests { ); let mut invoke_context = InvokeContext::new( &mut transaction_context, - &program_cache_for_tx_batch, + &mut program_cache_for_tx_batch, environment_config, None, ComputeBudget::default(), - &mut programs_modified_by_tx, ); let result = MessageProcessor::process_message( &message, @@ -669,7 +657,7 @@ mod tests { // copies the `random` implementation at: // https://docs.rs/libsecp256k1/latest/src/libsecp256k1/lib.rs.html#430 let secret_key = { - use rand::RngCore; + use solana_type_overrides::rand::RngCore; let mut rng = rand::thread_rng(); loop { let mut ret = [0u8; libsecp256k1::util::SECRET_KEY_SIZE]; @@ -692,7 +680,6 @@ mod tests { mock_program_id, Arc::new(ProgramCacheEntry::new_builtin(0, 0, MockBuiltin::vm)), ); - let mut programs_modified_by_tx = ProgramCacheForTxBatch::default(); let environment_config = EnvironmentConfig::new( Hash::default(), None, @@ -703,11 +690,10 @@ mod tests { ); let mut invoke_context = InvokeContext::new( &mut transaction_context, - &program_cache_for_tx_batch, + &mut program_cache_for_tx_batch, environment_config, None, ComputeBudget::default(), - &mut programs_modified_by_tx, ); let result = MessageProcessor::process_message( &message, diff --git a/svm/src/nonce_info.rs b/svm/src/nonce_info.rs index 5088adb5b5965f..062b8fc221f87f 100644 --- a/svm/src/nonce_info.rs +++ b/svm/src/nonce_info.rs @@ -1,8 +1,4 @@ -use solana_sdk::{ - account::{AccountSharedData, ReadableAccount, WritableAccount}, - nonce_account, - pubkey::Pubkey, -}; +use solana_sdk::{account::AccountSharedData, nonce_account, pubkey::Pubkey}; pub trait NonceInfo { fn address(&self) -> &Pubkey; @@ -39,107 +35,27 @@ impl NonceInfo for NoncePartial { } } -/// Holds fee subtracted nonce info -#[derive(Clone, Debug, Default, PartialEq, Eq)] -pub struct NonceFull { - address: Pubkey, - account: AccountSharedData, - fee_payer_account: Option, -} - -impl NonceFull { - pub fn new( - address: Pubkey, - account: AccountSharedData, - fee_payer_account: Option, - ) -> Self { - Self { - address, - account, - fee_payer_account, - } - } - - pub fn from_partial( - partial: NoncePartial, - fee_payer_address: &Pubkey, - mut fee_payer_account: AccountSharedData, - fee_payer_rent_debit: u64, - ) -> Self { - fee_payer_account.set_lamports( - fee_payer_account - .lamports() - .saturating_add(fee_payer_rent_debit), - ); - - let NoncePartial { - address: nonce_address, - account: nonce_account, - } = partial; - - if *fee_payer_address == nonce_address { - Self::new(nonce_address, fee_payer_account, None) - } else { - Self::new(nonce_address, nonce_account, Some(fee_payer_account)) - } - } -} - -impl NonceInfo for NonceFull { - fn address(&self) -> &Pubkey { - &self.address - } - fn account(&self) -> &AccountSharedData { - &self.account - } - fn lamports_per_signature(&self) -> Option { - nonce_account::lamports_per_signature_of(&self.account) - } - fn fee_payer_account(&self) -> Option<&AccountSharedData> { - self.fee_payer_account.as_ref() - } -} - #[cfg(test)] mod tests { use { super::*, solana_sdk::{ hash::Hash, - instruction::Instruction, - message::{Message, SanitizedMessage}, - nonce::{self, state::DurableNonce}, - reserved_account_keys::ReservedAccountKeys, - signature::{keypair_from_seed, Signer}, - system_instruction, system_program, + nonce::state::{ + Data as NonceData, DurableNonce, State as NonceState, Versions as NonceVersions, + }, + system_program, }, }; - fn new_sanitized_message( - instructions: &[Instruction], - payer: Option<&Pubkey>, - ) -> SanitizedMessage { - SanitizedMessage::try_from_legacy_message( - Message::new(instructions, payer), - &ReservedAccountKeys::empty_key_set(), - ) - .unwrap() - } - #[test] fn test_nonce_info() { - let lamports_per_signature = 42; - - let nonce_authority = keypair_from_seed(&[0; 32]).unwrap(); - let nonce_address = nonce_authority.pubkey(); - let from = keypair_from_seed(&[1; 32]).unwrap(); - let from_address = from.pubkey(); - let to_address = Pubkey::new_unique(); - + let nonce_address = Pubkey::new_unique(); let durable_nonce = DurableNonce::from_blockhash(&Hash::new_unique()); + let lamports_per_signature = 42; let nonce_account = AccountSharedData::new_data( 43, - &nonce::state::Versions::new(nonce::State::Initialized(nonce::state::Data::new( + &NonceVersions::new(NonceState::Initialized(NonceData::new( Pubkey::default(), durable_nonce, lamports_per_signature, @@ -147,71 +63,15 @@ mod tests { &system_program::id(), ) .unwrap(); - let from_account = AccountSharedData::new(44, 0, &Pubkey::default()); - - const TEST_RENT_DEBIT: u64 = 1; - let rent_collected_nonce_account = { - let mut account = nonce_account.clone(); - account.set_lamports(nonce_account.lamports() - TEST_RENT_DEBIT); - account - }; - let rent_collected_from_account = { - let mut account = from_account.clone(); - account.set_lamports(from_account.lamports() - TEST_RENT_DEBIT); - account - }; - - let instructions = vec![ - system_instruction::advance_nonce_account(&nonce_address, &nonce_authority.pubkey()), - system_instruction::transfer(&from_address, &to_address, 42), - ]; // NoncePartial create + NonceInfo impl - let partial = NoncePartial::new(nonce_address, rent_collected_nonce_account.clone()); + let partial = NoncePartial::new(nonce_address, nonce_account.clone()); assert_eq!(*partial.address(), nonce_address); - assert_eq!(*partial.account(), rent_collected_nonce_account); + assert_eq!(*partial.account(), nonce_account); assert_eq!( partial.lamports_per_signature(), Some(lamports_per_signature) ); assert_eq!(partial.fee_payer_account(), None); - - // NonceFull create + NonceInfo impl - { - let message = new_sanitized_message(&instructions, Some(&from_address)); - let fee_payer_address = message.account_keys().get(0).unwrap(); - let fee_payer_account = rent_collected_from_account.clone(); - let full = NonceFull::from_partial( - partial.clone(), - fee_payer_address, - fee_payer_account, - TEST_RENT_DEBIT, - ); - assert_eq!(*full.address(), nonce_address); - assert_eq!(*full.account(), rent_collected_nonce_account); - assert_eq!(full.lamports_per_signature(), Some(lamports_per_signature)); - assert_eq!( - full.fee_payer_account(), - Some(&from_account), - "rent debit should be refunded in captured fee account" - ); - } - - // Nonce account is fee-payer - { - let message = new_sanitized_message(&instructions, Some(&nonce_address)); - let fee_payer_address = message.account_keys().get(0).unwrap(); - let fee_payer_account = rent_collected_nonce_account; - let full = NonceFull::from_partial( - partial, - fee_payer_address, - fee_payer_account, - TEST_RENT_DEBIT, - ); - assert_eq!(*full.address(), nonce_address); - assert_eq!(*full.account(), nonce_account); - assert_eq!(full.lamports_per_signature(), Some(lamports_per_signature)); - assert_eq!(full.fee_payer_account(), None); - } } } diff --git a/svm/src/program_loader.rs b/svm/src/program_loader.rs index 6060e8bb3db453..f77780fc9fecfa 100644 --- a/svm/src/program_loader.rs +++ b/svm/src/program_loader.rs @@ -11,14 +11,14 @@ use { account::{AccountSharedData, ReadableAccount}, account_utils::StateMut, bpf_loader, bpf_loader_deprecated, - bpf_loader_upgradeable::UpgradeableLoaderState, + bpf_loader_upgradeable::{self, UpgradeableLoaderState}, clock::Slot, - epoch_schedule::EpochSchedule, instruction::InstructionError, loader_v4::{self, LoaderV4State, LoaderV4Status}, pubkey::Pubkey, + transaction::{self, TransactionError}, }, - std::sync::Arc, + solana_type_overrides::sync::Arc, }; #[derive(Debug)] @@ -126,7 +126,6 @@ pub fn load_program_with_pubkey( environments: &ProgramRuntimeEnvironments, pubkey: &Pubkey, slot: Slot, - _epoch_schedule: &EpochSchedule, reload: bool, ) -> Option> { let mut load_program_metrics = LoadProgramMetrics { @@ -219,6 +218,43 @@ pub fn load_program_with_pubkey( Some(Arc::new(loaded_program)) } +/// Find the slot in which the program was most recently modified. +/// Returns slot 0 for programs deployed with v1/v2 loaders, since programs deployed +/// with those loaders do not retain deployment slot information. +/// Returns an error if the program's account state can not be found or parsed. +pub(crate) fn get_program_modification_slot( + callbacks: &CB, + pubkey: &Pubkey, +) -> transaction::Result { + let program = callbacks + .get_account_shared_data(pubkey) + .ok_or(TransactionError::ProgramAccountNotFound)?; + if bpf_loader_upgradeable::check_id(program.owner()) { + if let Ok(UpgradeableLoaderState::Program { + programdata_address, + }) = program.state() + { + let programdata = callbacks + .get_account_shared_data(&programdata_address) + .ok_or(TransactionError::ProgramAccountNotFound)?; + if let Ok(UpgradeableLoaderState::ProgramData { + slot, + upgrade_authority_address: _, + }) = programdata.state() + { + return Ok(slot); + } + } + Err(TransactionError::ProgramAccountNotFound) + } else if loader_v4::check_id(program.owner()) { + let state = solana_loader_v4_program::get_state(program.data()) + .map_err(|_| TransactionError::ProgramAccountNotFound)?; + Ok(state.slot) + } else { + Ok(0) + } +} + #[cfg(test)] mod tests { use { @@ -494,7 +530,6 @@ mod tests { &batch_processor.get_environments_for_epoch(50).unwrap(), &key, 500, - &batch_processor.epoch_schedule, false, ); assert!(result.is_none()); @@ -517,7 +552,6 @@ mod tests { &batch_processor.get_environments_for_epoch(20).unwrap(), &key, 0, // Slot 0 - &batch_processor.epoch_schedule, false, ); @@ -552,7 +586,6 @@ mod tests { &batch_processor.get_environments_for_epoch(20).unwrap(), &key, 200, - &batch_processor.epoch_schedule, false, ); let loaded_program = ProgramCacheEntry::new_tombstone( @@ -580,7 +613,6 @@ mod tests { &batch_processor.get_environments_for_epoch(20).unwrap(), &key, 200, - &batch_processor.epoch_schedule, false, ); @@ -634,7 +666,6 @@ mod tests { &batch_processor.get_environments_for_epoch(0).unwrap(), &key1, 0, - &batch_processor.epoch_schedule, false, ); let loaded_program = ProgramCacheEntry::new_tombstone( @@ -672,7 +703,6 @@ mod tests { &batch_processor.get_environments_for_epoch(20).unwrap(), &key1, 200, - &batch_processor.epoch_schedule, false, ); @@ -722,7 +752,6 @@ mod tests { &batch_processor.get_environments_for_epoch(0).unwrap(), &key, 0, - &batch_processor.epoch_schedule, false, ); let loaded_program = ProgramCacheEntry::new_tombstone( @@ -756,7 +785,6 @@ mod tests { &batch_processor.get_environments_for_epoch(20).unwrap(), &key, 200, - &batch_processor.epoch_schedule, false, ); @@ -807,7 +835,6 @@ mod tests { .unwrap(), &key, 200, - &batch_processor.epoch_schedule, false, ) .unwrap(); @@ -827,4 +854,111 @@ mod tests { ); } } + + #[test] + fn test_program_modification_slot_account_not_found() { + let mock_bank = MockBankCallback::default(); + + let key = Pubkey::new_unique(); + + let result = get_program_modification_slot(&mock_bank, &key); + assert_eq!(result.err(), Some(TransactionError::ProgramAccountNotFound)); + + let mut account_data = AccountSharedData::new(100, 100, &bpf_loader_upgradeable::id()); + mock_bank + .account_shared_data + .borrow_mut() + .insert(key, account_data.clone()); + + let result = get_program_modification_slot(&mock_bank, &key); + assert_eq!(result.err(), Some(TransactionError::ProgramAccountNotFound)); + + let state = UpgradeableLoaderState::Program { + programdata_address: Pubkey::new_unique(), + }; + account_data.set_data(bincode::serialize(&state).unwrap()); + mock_bank + .account_shared_data + .borrow_mut() + .insert(key, account_data.clone()); + + let result = get_program_modification_slot(&mock_bank, &key); + assert_eq!(result.err(), Some(TransactionError::ProgramAccountNotFound)); + + account_data.set_owner(loader_v4::id()); + mock_bank + .account_shared_data + .borrow_mut() + .insert(key, account_data); + + let result = get_program_modification_slot(&mock_bank, &key); + assert_eq!(result.err(), Some(TransactionError::ProgramAccountNotFound)); + } + + #[test] + fn test_program_modification_slot_success() { + let mock_bank = MockBankCallback::default(); + + let key1 = Pubkey::new_unique(); + let key2 = Pubkey::new_unique(); + + let account_data = AccountSharedData::new_data( + 100, + &UpgradeableLoaderState::Program { + programdata_address: key2, + }, + &bpf_loader_upgradeable::id(), + ) + .unwrap(); + mock_bank + .account_shared_data + .borrow_mut() + .insert(key1, account_data); + + let account_data = AccountSharedData::new_data( + 100, + &UpgradeableLoaderState::ProgramData { + slot: 77, + upgrade_authority_address: None, + }, + &bpf_loader_upgradeable::id(), + ) + .unwrap(); + mock_bank + .account_shared_data + .borrow_mut() + .insert(key2, account_data); + + let result = get_program_modification_slot(&mock_bank, &key1); + assert_eq!(result.unwrap(), 77); + + let state = LoaderV4State { + slot: 58, + authority_address: Pubkey::new_unique(), + status: LoaderV4Status::Deployed, + }; + let encoded = unsafe { + std::mem::transmute::<&LoaderV4State, &[u8; LoaderV4State::program_data_offset()]>( + &state, + ) + }; + let mut account_data = AccountSharedData::new(100, encoded.len(), &loader_v4::id()); + account_data.set_data(encoded.to_vec()); + mock_bank + .account_shared_data + .borrow_mut() + .insert(key1, account_data.clone()); + + let result = get_program_modification_slot(&mock_bank, &key1); + assert_eq!(result.unwrap(), 58); + + account_data.set_owner(Pubkey::new_unique()); + mock_bank + .account_shared_data + .borrow_mut() + .insert(key2, account_data); + + let result = get_program_modification_slot(&mock_bank, &key2); + assert_eq!(result.unwrap(), 0); + } } diff --git a/svm/src/rollback_accounts.rs b/svm/src/rollback_accounts.rs new file mode 100644 index 00000000000000..6fbd3a9c2e91e8 --- /dev/null +++ b/svm/src/rollback_accounts.rs @@ -0,0 +1,229 @@ +use { + crate::nonce_info::{NonceInfo, NoncePartial}, + solana_sdk::{ + account::{AccountSharedData, ReadableAccount, WritableAccount}, + clock::Epoch, + pubkey::Pubkey, + }, +}; + +/// Captured account state used to rollback account state for nonce and fee +/// payer accounts after a failed executed transaction. +#[derive(PartialEq, Eq, Debug, Clone)] +pub enum RollbackAccounts { + FeePayerOnly { + fee_payer_account: AccountSharedData, + }, + SameNonceAndFeePayer { + nonce: NoncePartial, + }, + SeparateNonceAndFeePayer { + nonce: NoncePartial, + fee_payer_account: AccountSharedData, + }, +} + +#[cfg(feature = "dev-context-only-utils")] +impl Default for RollbackAccounts { + fn default() -> Self { + Self::FeePayerOnly { + fee_payer_account: AccountSharedData::default(), + } + } +} + +impl RollbackAccounts { + pub fn new( + nonce: Option, + fee_payer_address: Pubkey, + mut fee_payer_account: AccountSharedData, + fee_payer_rent_debit: u64, + fee_payer_loaded_rent_epoch: Epoch, + ) -> Self { + // When the fee payer account is rolled back due to transaction failure, + // rent should not be charged so credit the previously debited rent + // amount. + fee_payer_account.set_lamports( + fee_payer_account + .lamports() + .saturating_add(fee_payer_rent_debit), + ); + + if let Some(nonce) = nonce { + if &fee_payer_address == nonce.address() { + RollbackAccounts::SameNonceAndFeePayer { + nonce: NoncePartial::new(fee_payer_address, fee_payer_account), + } + } else { + RollbackAccounts::SeparateNonceAndFeePayer { + nonce, + fee_payer_account, + } + } + } else { + // When rolling back failed transactions which don't use nonces, the + // runtime should not update the fee payer's rent epoch so reset the + // rollback fee payer acocunt's rent epoch to its originally loaded + // rent epoch value. In the future, a feature gate could be used to + // alter this behavior such that rent epoch updates are handled the + // same for both nonce and non-nonce failed transactions. + fee_payer_account.set_rent_epoch(fee_payer_loaded_rent_epoch); + RollbackAccounts::FeePayerOnly { fee_payer_account } + } + } + + pub fn nonce(&self) -> Option<&NoncePartial> { + match self { + Self::FeePayerOnly { .. } => None, + Self::SameNonceAndFeePayer { nonce } | Self::SeparateNonceAndFeePayer { nonce, .. } => { + Some(nonce) + } + } + } + + pub fn fee_payer_account(&self) -> &AccountSharedData { + match self { + Self::FeePayerOnly { fee_payer_account } + | Self::SeparateNonceAndFeePayer { + fee_payer_account, .. + } => fee_payer_account, + Self::SameNonceAndFeePayer { nonce } => nonce.account(), + } + } +} + +#[cfg(test)] +mod tests { + use { + super::*, + solana_sdk::{ + account::{ReadableAccount, WritableAccount}, + hash::Hash, + nonce::state::{ + Data as NonceData, DurableNonce, State as NonceState, Versions as NonceVersions, + }, + system_program, + }, + }; + + #[test] + fn test_new_fee_payer_only() { + let fee_payer_address = Pubkey::new_unique(); + let fee_payer_account = AccountSharedData::new(100, 0, &Pubkey::default()); + let fee_payer_rent_epoch = fee_payer_account.rent_epoch(); + + const TEST_RENT_DEBIT: u64 = 1; + let rent_collected_fee_payer_account = { + let mut account = fee_payer_account.clone(); + account.set_lamports(fee_payer_account.lamports() - TEST_RENT_DEBIT); + account.set_rent_epoch(fee_payer_rent_epoch + 1); + account + }; + + let rollback_accounts = RollbackAccounts::new( + None, + fee_payer_address, + rent_collected_fee_payer_account, + TEST_RENT_DEBIT, + fee_payer_rent_epoch, + ); + + let expected_fee_payer_account = fee_payer_account; + match rollback_accounts { + RollbackAccounts::FeePayerOnly { fee_payer_account } => { + assert_eq!(expected_fee_payer_account, fee_payer_account); + } + _ => panic!("Expected FeePayerOnly variant"), + } + } + + #[test] + fn test_new_same_nonce_and_fee_payer() { + let nonce_address = Pubkey::new_unique(); + let durable_nonce = DurableNonce::from_blockhash(&Hash::new_unique()); + let lamports_per_signature = 42; + let nonce_account = AccountSharedData::new_data( + 43, + &NonceVersions::new(NonceState::Initialized(NonceData::new( + Pubkey::default(), + durable_nonce, + lamports_per_signature, + ))), + &system_program::id(), + ) + .unwrap(); + + const TEST_RENT_DEBIT: u64 = 1; + let rent_collected_nonce_account = { + let mut account = nonce_account.clone(); + account.set_lamports(nonce_account.lamports() - TEST_RENT_DEBIT); + account + }; + + let nonce = NoncePartial::new(nonce_address, rent_collected_nonce_account.clone()); + let rollback_accounts = RollbackAccounts::new( + Some(nonce), + nonce_address, + rent_collected_nonce_account, + TEST_RENT_DEBIT, + u64::MAX, // ignored + ); + + match rollback_accounts { + RollbackAccounts::SameNonceAndFeePayer { nonce } => { + assert_eq!(nonce.address(), &nonce_address); + assert_eq!(nonce.account(), &nonce_account); + } + _ => panic!("Expected SameNonceAndFeePayer variant"), + } + } + + #[test] + fn test_separate_nonce_and_fee_payer() { + let nonce_address = Pubkey::new_unique(); + let durable_nonce = DurableNonce::from_blockhash(&Hash::new_unique()); + let lamports_per_signature = 42; + let nonce_account = AccountSharedData::new_data( + 43, + &NonceVersions::new(NonceState::Initialized(NonceData::new( + Pubkey::default(), + durable_nonce, + lamports_per_signature, + ))), + &system_program::id(), + ) + .unwrap(); + + let fee_payer_address = Pubkey::new_unique(); + let fee_payer_account = AccountSharedData::new(44, 0, &Pubkey::default()); + + const TEST_RENT_DEBIT: u64 = 1; + let rent_collected_fee_payer_account = { + let mut account = fee_payer_account.clone(); + account.set_lamports(fee_payer_account.lamports() - TEST_RENT_DEBIT); + account + }; + + let nonce = NoncePartial::new(nonce_address, nonce_account.clone()); + let rollback_accounts = RollbackAccounts::new( + Some(nonce), + fee_payer_address, + rent_collected_fee_payer_account.clone(), + TEST_RENT_DEBIT, + u64::MAX, // ignored + ); + + let expected_fee_payer_account = fee_payer_account; + match rollback_accounts { + RollbackAccounts::SeparateNonceAndFeePayer { + nonce, + fee_payer_account, + } => { + assert_eq!(nonce.address(), &nonce_address); + assert_eq!(nonce.account(), &nonce_account); + assert_eq!(expected_fee_payer_account, fee_payer_account); + } + _ => panic!("Expected SeparateNonceAndFeePayer variant"), + } + } +} diff --git a/svm/src/transaction_error_metrics.rs b/svm/src/transaction_error_metrics.rs index 8cb24e36189e06..ad572073ac5ee3 100644 --- a/svm/src/transaction_error_metrics.rs +++ b/svm/src/transaction_error_metrics.rs @@ -16,6 +16,7 @@ pub struct TransactionErrorMetrics { pub invalid_account_for_fee: usize, pub invalid_account_index: usize, pub invalid_program_for_execution: usize, + pub invalid_compute_budget: usize, pub not_allowed_during_cluster_maintenance: usize, pub invalid_writable_account: usize, pub invalid_rent_paying_account: usize, @@ -50,6 +51,7 @@ impl TransactionErrorMetrics { self.invalid_program_for_execution, other.invalid_program_for_execution ); + saturating_add_assign!(self.invalid_compute_budget, other.invalid_compute_budget); saturating_add_assign!( self.not_allowed_during_cluster_maintenance, other.not_allowed_during_cluster_maintenance @@ -128,6 +130,11 @@ impl TransactionErrorMetrics { // i64 // ), // ( + // "invalid_compute_budget", + // self.invalid_compute_budget as i64, + // i64 + // ), + // ( // "not_allowed_during_cluster_maintenance", // self.not_allowed_during_cluster_maintenance as i64, // i64 diff --git a/svm/src/transaction_processing_callback.rs b/svm/src/transaction_processing_callback.rs index bca549b12013cc..760a6606568798 100644 --- a/svm/src/transaction_processing_callback.rs +++ b/svm/src/transaction_processing_callback.rs @@ -1,7 +1,4 @@ -use { - solana_program_runtime::loaded_programs::ProgramCacheMatchCriteria, - solana_sdk::{account::AccountSharedData, pubkey::Pubkey}, -}; +use solana_sdk::{account::AccountSharedData, pubkey::Pubkey}; /// Runtime callbacks for transaction processing. pub trait TransactionProcessingCallback { @@ -9,9 +6,5 @@ pub trait TransactionProcessingCallback { fn get_account_shared_data(&self, pubkey: &Pubkey) -> Option; - fn get_program_match_criteria(&self, _program: &Pubkey) -> ProgramCacheMatchCriteria { - ProgramCacheMatchCriteria::NoCriteria - } - fn add_builtin_account(&self, _name: &str, _program_id: &Pubkey) {} } diff --git a/svm/src/transaction_processor.rs b/svm/src/transaction_processor.rs index cc0cbcdac5775e..2037489c0477f0 100644 --- a/svm/src/transaction_processor.rs +++ b/svm/src/transaction_processor.rs @@ -9,8 +9,8 @@ use { }, account_overrides::AccountOverrides, message_processor::MessageProcessor, - nonce_info::NonceFull, - program_loader::load_program_with_pubkey, + program_loader::{get_program_modification_slot, load_program_with_pubkey}, + rollback_accounts::RollbackAccounts, transaction_account_state_info::TransactionAccountStateInfo, transaction_error_metrics::TransactionErrorMetrics, transaction_processing_callback::TransactionProcessingCallback, @@ -38,12 +38,11 @@ use { solana_sdk::{ account::{AccountSharedData, ReadableAccount, PROGRAM_OWNERS}, clock::{Epoch, Slot}, - epoch_schedule::EpochSchedule, feature_set::{ include_loaded_accounts_data_size_in_fee_calculation, remove_rounding_in_fee_calculation, FeatureSet, }, - fee::{FeeDetails, FeeStructure}, + fee::{FeeBudgetLimits, FeeStructure}, hash::Hash, inner_instruction::{InnerInstruction, InnerInstructionsList}, instruction::{CompiledInstruction, TRANSACTION_LEVEL_STACK_HEIGHT}, @@ -54,16 +53,13 @@ use { transaction::{self, SanitizedTransaction, TransactionError}, transaction_context::{ExecutionRecord, TransactionContext}, }, + solana_type_overrides::sync::{atomic::Ordering, Arc, RwLock, RwLockReadGuard}, solana_vote::vote_account::VoteAccountsHashMap, std::{ cell::RefCell, collections::{hash_map::Entry, HashMap, HashSet}, fmt::{Debug, Formatter}, rc::Rc, - sync::{ - atomic::Ordering::{self, Relaxed}, - Arc, RwLock, - }, }, }; @@ -108,6 +104,9 @@ pub struct TransactionProcessingConfig<'a> { /// Encapsulates overridden accounts, typically used for transaction /// simulation. pub account_overrides: Option<&'a AccountOverrides>, + /// Whether or not to check a program's modification slot when replenishing + /// a program cache instance. + pub check_program_modification_slot: bool, /// The compute budget to use for transaction execution. pub compute_budget: Option, /// The maximum number of bytes that log messages can consume. @@ -132,6 +131,8 @@ pub struct TransactionProcessingEnvironment<'a> { pub epoch_vote_accounts: Option<&'a VoteAccountsHashMap>, /// Runtime feature set to use for the transaction batch. pub feature_set: Arc, + /// Fee structure to use for assessing transaction fees. + pub fee_structure: Option<&'a FeeStructure>, /// Lamports per signature to charge per transaction. pub lamports_per_signature: u64, /// Rent collector to use for the transaction batch. @@ -146,16 +147,10 @@ pub struct TransactionBatchProcessor { /// Bank epoch epoch: Epoch, - /// initialized from genesis - pub epoch_schedule: EpochSchedule, - - /// Transaction fee structure - pub fee_structure: FeeStructure, - /// SysvarCache is a collection of system variables that are /// accessible from on chain programs. It is passed to SVM from /// client code (e.g. Bank) and forwarded to the MessageProcessor. - pub sysvar_cache: RwLock, + sysvar_cache: RwLock, /// Programs required for transaction batch processing pub program_cache: Arc>>, @@ -169,8 +164,6 @@ impl Debug for TransactionBatchProcessor { f.debug_struct("TransactionBatchProcessor") .field("slot", &self.slot) .field("epoch", &self.epoch) - .field("epoch_schedule", &self.epoch_schedule) - .field("fee_structure", &self.fee_structure) .field("sysvar_cache", &self.sysvar_cache) .field("program_cache", &self.program_cache) .finish() @@ -182,8 +175,6 @@ impl Default for TransactionBatchProcessor { Self { slot: Slot::default(), epoch: Epoch::default(), - epoch_schedule: EpochSchedule::default(), - fee_structure: FeeStructure::default(), sysvar_cache: RwLock::::default(), program_cache: Arc::new(RwLock::new(ProgramCache::new( Slot::default(), @@ -195,17 +186,10 @@ impl Default for TransactionBatchProcessor { } impl TransactionBatchProcessor { - pub fn new( - slot: Slot, - epoch: Epoch, - epoch_schedule: EpochSchedule, - builtin_program_ids: HashSet, - ) -> Self { + pub fn new(slot: Slot, epoch: Epoch, builtin_program_ids: HashSet) -> Self { Self { slot, epoch, - epoch_schedule, - fee_structure: FeeStructure::default(), sysvar_cache: RwLock::::default(), program_cache: Arc::new(RwLock::new(ProgramCache::new(slot, epoch))), builtin_program_ids: RwLock::new(builtin_program_ids), @@ -216,8 +200,6 @@ impl TransactionBatchProcessor { Self { slot, epoch, - epoch_schedule: self.epoch_schedule.clone(), - fee_structure: self.fee_structure.clone(), sysvar_cache: RwLock::::default(), program_cache: self.program_cache.clone(), builtin_program_ids: RwLock::new(self.builtin_program_ids.read().unwrap().clone()), @@ -234,6 +216,10 @@ impl TransactionBatchProcessor { .map(|cache| cache.get_environments_for_epoch(epoch)) } + pub fn sysvar_cache(&self) -> RwLockReadGuard { + self.sysvar_cache.read().unwrap() + } + /// Main entrypoint to the SVM. pub fn load_and_execute_sanitized_transactions( &self, @@ -252,6 +238,9 @@ impl TransactionBatchProcessor { sanitized_txs, check_results, &environment.feature_set, + environment + .fee_structure + .unwrap_or(&FeeStructure::default()), environment .rent_collector .unwrap_or(&RentCollector::default()), @@ -272,6 +261,7 @@ impl TransactionBatchProcessor { let program_cache_for_tx_batch = Rc::new(RefCell::new(self.replenish_program_cache( callbacks, &program_accounts_map, + config.check_program_modification_slot, config.limit_to_load_programs, ))); @@ -312,34 +302,12 @@ impl TransactionBatchProcessor { .map(|(load_result, tx)| match load_result { Err(e) => TransactionExecutionResult::NotExecuted(e.clone()), Ok(loaded_transaction) => { - let compute_budget = if let Some(compute_budget) = config.compute_budget { - compute_budget - } else { - let mut compute_budget_process_transaction_time = - Measure::start("compute_budget_process_transaction_time"); - let maybe_compute_budget = ComputeBudget::try_from_instructions( - tx.message().program_instructions_iter(), - ); - compute_budget_process_transaction_time.stop(); - saturating_add_assign!( - execute_timings - .execute_accessories - .compute_budget_process_transaction_us, - compute_budget_process_transaction_time.as_us() - ); - if let Err(err) = maybe_compute_budget { - return TransactionExecutionResult::NotExecuted(err); - } - maybe_compute_budget.unwrap() - }; - let result = self.execute_loaded_transaction( tx, loaded_transaction, - compute_budget, &mut execute_timings, &mut error_metrics, - &program_cache_for_tx_batch.borrow(), + &mut program_cache_for_tx_batch.borrow_mut(), environment, config, ); @@ -415,6 +383,7 @@ impl TransactionBatchProcessor { sanitized_txs: &[impl core::borrow::Borrow], check_results: Vec, feature_set: &FeatureSet, + fee_structure: &FeeStructure, rent_collector: &RentCollector, error_counters: &mut TransactionErrorMetrics, ) -> Vec { @@ -422,41 +391,18 @@ impl TransactionBatchProcessor { .iter() .zip(check_results) .map(|(sanitized_tx, check_result)| { - check_result.and_then( - |CheckedTransactionDetails { - nonce, - lamports_per_signature, - }| { - let message = sanitized_tx.borrow().message(); - let (fee_details, fee_payer_account, fee_payer_rent_debit) = self - .validate_transaction_fee_payer( - callbacks, - message, - feature_set, - lamports_per_signature, - rent_collector, - error_counters, - )?; - - // Update nonce with fee-subtracted accounts - let fee_payer_address = message.fee_payer(); - let nonce = nonce.map(|nonce| { - NonceFull::from_partial( - nonce, - fee_payer_address, - fee_payer_account.clone(), - fee_payer_rent_debit, - ) - }); - - Ok(ValidatedTransactionDetails { - nonce, - fee_details, - fee_payer_account, - fee_payer_rent_debit, - }) - }, - ) + check_result.and_then(|checked_details| { + let message = sanitized_tx.borrow().message(); + self.validate_transaction_fee_payer( + callbacks, + message, + checked_details, + feature_set, + fee_structure, + rent_collector, + error_counters, + ) + }) }) .collect() } @@ -468,11 +414,20 @@ impl TransactionBatchProcessor { &self, callbacks: &CB, message: &SanitizedMessage, + checked_details: CheckedTransactionDetails, feature_set: &FeatureSet, - lamports_per_signature: u64, + fee_structure: &FeeStructure, rent_collector: &RentCollector, error_counters: &mut TransactionErrorMetrics, - ) -> transaction::Result<(FeeDetails, AccountSharedData, u64)> { + ) -> transaction::Result { + let compute_budget_limits = process_compute_budget_instructions( + message.program_instructions_iter(), + ) + .map_err(|err| { + error_counters.invalid_compute_budget += 1; + err + })?; + let fee_payer_address = message.fee_payer(); let Some(mut fee_payer_account) = callbacks.get_account_shared_data(fee_payer_address) else { @@ -480,6 +435,7 @@ impl TransactionBatchProcessor { return Err(TransactionError::AccountNotFound); }; + let fee_payer_loaded_rent_epoch = fee_payer_account.rent_epoch(); let fee_payer_rent_debit = collect_rent_from_account( feature_set, rent_collector, @@ -488,12 +444,16 @@ impl TransactionBatchProcessor { ) .rent_amount; - let fee_details = self.fee_structure.calculate_fee_details( + let CheckedTransactionDetails { + nonce, + lamports_per_signature, + } = checked_details; + + let fee_budget_limits = FeeBudgetLimits::from(compute_budget_limits); + let fee_details = fee_structure.calculate_fee_details( message, lamports_per_signature, - &process_compute_budget_instructions(message.program_instructions_iter()) - .unwrap_or_default() - .into(), + &fee_budget_limits, feature_set.is_active(&include_loaded_accounts_data_size_in_fee_calculation::id()), feature_set.is_active(&remove_rounding_in_fee_calculation::id()), ); @@ -508,7 +468,23 @@ impl TransactionBatchProcessor { fee_details.total_fee(), )?; - Ok((fee_details, fee_payer_account, fee_payer_rent_debit)) + // Capture fee-subtracted fee payer account and original nonce account state + // to rollback to if transaction execution fails. + let rollback_accounts = RollbackAccounts::new( + nonce, + *fee_payer_address, + fee_payer_account.clone(), + fee_payer_rent_debit, + fee_payer_loaded_rent_epoch, + ); + + Ok(ValidatedTransactionDetails { + fee_details, + fee_payer_account, + fee_payer_rent_debit, + rollback_accounts, + compute_budget_limits, + }) } /// Returns a map from executable program accounts (all accounts owned by any loader) @@ -548,16 +524,22 @@ impl TransactionBatchProcessor { &self, callback: &CB, program_accounts_map: &HashMap, + check_program_modification_slot: bool, limit_to_load_programs: bool, ) -> ProgramCacheForTxBatch { let mut missing_programs: Vec<(Pubkey, (ProgramCacheMatchCriteria, u64))> = program_accounts_map .iter() .map(|(pubkey, count)| { - ( - *pubkey, - (callback.get_program_match_criteria(pubkey), *count), - ) + let match_criteria = if check_program_modification_slot { + get_program_modification_slot(callback, pubkey) + .map_or(ProgramCacheMatchCriteria::Tombstone, |slot| { + ProgramCacheMatchCriteria::DeployedOnOrAfterSlot(slot) + }) + } else { + ProgramCacheMatchCriteria::NoCriteria + }; + (*pubkey, (match_criteria, *count)) }) .collect(); @@ -589,7 +571,6 @@ impl TransactionBatchProcessor { &program_cache.get_environments_for_epoch(self.epoch), &key, self.slot, - &self.epoch_schedule, false, ) .expect("called load_program_with_pubkey() with nonexistent account"); @@ -637,10 +618,10 @@ impl TransactionBatchProcessor { callbacks: &CB, upcoming_feature_set: &FeatureSet, compute_budget: &ComputeBudget, + slot_index: u64, + slots_in_epoch: u64, ) { // Recompile loaded programs one at a time before the next epoch hits - let (_epoch, slot_index) = self.epoch_schedule.get_epoch_and_slot_index(self.slot); - let slots_in_epoch = self.epoch_schedule.get_slots_in_epoch(self.epoch); let slots_in_recompilation_phase = (solana_program_runtime::loaded_programs::MAX_LOADED_ENTRY_COUNT as u64) .min(slots_in_epoch) @@ -661,15 +642,20 @@ impl TransactionBatchProcessor { &environments_for_epoch, &key, self.slot, - &self.epoch_schedule, false, ) { - recompiled - .tx_usage_counter - .fetch_add(program_to_recompile.tx_usage_counter.load(Relaxed), Relaxed); - recompiled - .ix_usage_counter - .fetch_add(program_to_recompile.ix_usage_counter.load(Relaxed), Relaxed); + recompiled.tx_usage_counter.fetch_add( + program_to_recompile + .tx_usage_counter + .load(Ordering::Relaxed), + Ordering::Relaxed, + ); + recompiled.ix_usage_counter.fetch_add( + program_to_recompile + .ix_usage_counter + .load(Ordering::Relaxed), + Ordering::Relaxed, + ); let mut program_cache = self.program_cache.write().unwrap(); program_cache.assign_program(key, recompiled); } @@ -719,10 +705,9 @@ impl TransactionBatchProcessor { &self, tx: &SanitizedTransaction, loaded_transaction: &mut LoadedTransaction, - compute_budget: ComputeBudget, execute_timings: &mut ExecuteTimings, error_metrics: &mut TransactionErrorMetrics, - program_cache_for_tx_batch: &ProgramCacheForTxBatch, + program_cache_for_tx_batch: &mut ProgramCacheForTxBatch, environment: &TransactionProcessingEnvironment, config: &TransactionProcessingConfig, ) -> TransactionExecutionResult { @@ -748,10 +733,14 @@ impl TransactionBatchProcessor { let lamports_before_tx = transaction_accounts_lamports_sum(&transaction_accounts, tx.message()).unwrap_or(0); + let compute_budget = config + .compute_budget + .unwrap_or_else(|| ComputeBudget::from(loaded_transaction.compute_budget_limits)); + let mut transaction_context = TransactionContext::new( transaction_accounts, rent.clone(), - compute_budget.max_invoke_stack_height, + compute_budget.max_instruction_stack_depth, compute_budget.max_instruction_trace_length, ); #[cfg(debug_assertions)] @@ -775,12 +764,6 @@ impl TransactionBatchProcessor { let lamports_per_signature = environment.lamports_per_signature; let mut executed_units = 0u64; - let mut programs_modified_by_tx = ProgramCacheForTxBatch::new( - self.slot, - program_cache_for_tx_batch.environments.clone(), - program_cache_for_tx_batch.upcoming_environments.clone(), - program_cache_for_tx_batch.latest_root_epoch, - ); let sysvar_cache = &self.sysvar_cache.read().unwrap(); let mut invoke_context = InvokeContext::new( @@ -796,7 +779,6 @@ impl TransactionBatchProcessor { ), log_collector.clone(), compute_budget, - &mut programs_modified_by_tx, ); let mut process_message_time = Measure::start("process_message_time"); @@ -898,12 +880,11 @@ impl TransactionBatchProcessor { log_messages, inner_instructions, fee_details: loaded_transaction.fee_details, - is_nonce: loaded_transaction.nonce.is_some(), return_data, executed_units, accounts_data_len_delta, }, - programs_modified_by_tx: programs_modified_by_tx.take_entries(), + programs_modified_by_tx: program_cache_for_tx_batch.drain_modified_entries(), } } @@ -1003,13 +984,18 @@ impl TransactionBatchProcessor { mod tests { use { super::*, - crate::account_loader::ValidatedTransactionDetails, + crate::{ + account_loader::ValidatedTransactionDetails, nonce_info::NoncePartial, + rollback_accounts::RollbackAccounts, + }, + solana_compute_budget::compute_budget_processor::ComputeBudgetLimits, solana_program_runtime::loaded_programs::{BlockRelation, ProgramCacheEntryType}, solana_sdk::{ account::{create_account_shared_data_for_test, WritableAccount}, bpf_loader, bpf_loader_upgradeable::{self, UpgradeableLoaderState}, compute_budget::ComputeBudgetInstruction, + epoch_schedule::EpochSchedule, feature_set::FeatureSet, fee::FeeDetails, fee_calculator::FeeCalculator, @@ -1149,7 +1135,7 @@ mod tests { }; let sanitized_message = new_unchecked_sanitized_message(message); - let program_cache_for_tx_batch = ProgramCacheForTxBatch::default(); + let mut program_cache_for_tx_batch = ProgramCacheForTxBatch::default(); let batch_processor = TransactionBatchProcessor::::default(); let sanitized_transaction = SanitizedTransaction::new_for_tests( @@ -1161,8 +1147,9 @@ mod tests { let mut loaded_transaction = LoadedTransaction { accounts: vec![(Pubkey::new_unique(), AccountSharedData::default())], program_indices: vec![vec![0]], - nonce: None, fee_details: FeeDetails::default(), + rollback_accounts: RollbackAccounts::default(), + compute_budget_limits: ComputeBudgetLimits::default(), rent: 0, rent_debits: RentDebits::default(), loaded_accounts_data_size: 32, @@ -1176,10 +1163,9 @@ mod tests { let result = batch_processor.execute_loaded_transaction( &sanitized_transaction, &mut loaded_transaction, - ComputeBudget::default(), &mut ExecuteTimings::default(), &mut TransactionErrorMetrics::default(), - &program_cache_for_tx_batch, + &mut program_cache_for_tx_batch, &processing_environment, &processing_config, ); @@ -1198,10 +1184,9 @@ mod tests { let result = batch_processor.execute_loaded_transaction( &sanitized_transaction, &mut loaded_transaction, - ComputeBudget::default(), &mut ExecuteTimings::default(), &mut TransactionErrorMetrics::default(), - &program_cache_for_tx_batch, + &mut program_cache_for_tx_batch, &processing_environment, &processing_config, ); @@ -1228,10 +1213,9 @@ mod tests { let result = batch_processor.execute_loaded_transaction( &sanitized_transaction, &mut loaded_transaction, - ComputeBudget::default(), &mut ExecuteTimings::default(), &mut TransactionErrorMetrics::default(), - &program_cache_for_tx_batch, + &mut program_cache_for_tx_batch, &processing_environment, &processing_config, ); @@ -1271,7 +1255,7 @@ mod tests { }; let sanitized_message = new_unchecked_sanitized_message(message); - let program_cache_for_tx_batch = ProgramCacheForTxBatch::default(); + let mut program_cache_for_tx_batch = ProgramCacheForTxBatch::default(); let batch_processor = TransactionBatchProcessor::::default(); let sanitized_transaction = SanitizedTransaction::new_for_tests( @@ -1288,8 +1272,9 @@ mod tests { (key2, AccountSharedData::default()), ], program_indices: vec![vec![0]], - nonce: None, fee_details: FeeDetails::default(), + rollback_accounts: RollbackAccounts::default(), + compute_budget_limits: ComputeBudgetLimits::default(), rent: 0, rent_debits: RentDebits::default(), loaded_accounts_data_size: 0, @@ -1304,10 +1289,9 @@ mod tests { let _ = batch_processor.execute_loaded_transaction( &sanitized_transaction, &mut loaded_transaction, - ComputeBudget::default(), &mut ExecuteTimings::default(), &mut error_metrics, - &program_cache_for_tx_batch, + &mut program_cache_for_tx_batch, &TransactionProcessingEnvironment::default(), &processing_config, ); @@ -1327,7 +1311,7 @@ mod tests { let mut account_maps: HashMap = HashMap::new(); account_maps.insert(key, 4); - batch_processor.replenish_program_cache(&mock_bank, &account_maps, true); + batch_processor.replenish_program_cache(&mock_bank, &account_maps, false, true); } #[test] @@ -1353,6 +1337,7 @@ mod tests { let result = batch_processor.replenish_program_cache( &mock_bank, &account_maps, + false, limit_to_load_programs, ); assert!(!result.hit_max_limit); @@ -1856,12 +1841,7 @@ mod tests { #[test] fn fast_concur_test() { let mut mock_bank = MockBankCallback::default(); - let batch_processor = TransactionBatchProcessor::::new( - 5, - 5, - EpochSchedule::default(), - HashSet::new(), - ); + let batch_processor = TransactionBatchProcessor::::new(5, 5, HashSet::new()); batch_processor.program_cache.write().unwrap().fork_graph = Some(Arc::new(RwLock::new(TestForkGraph {}))); @@ -1889,7 +1869,8 @@ mod tests { let maps = account_maps.clone(); let programs = programs.clone(); thread::spawn(move || { - let result = processor.replenish_program_cache(&local_bank, &maps, true); + let result = + processor.replenish_program_cache(&local_bank, &maps, false, true); for key in &programs { let cache_entry = result.find(key); assert!(matches!( @@ -1973,8 +1954,14 @@ mod tests { Some(&Pubkey::new_unique()), &Hash::new_unique(), )); + let compute_budget_limits = + process_compute_budget_instructions(message.program_instructions_iter()).unwrap(); let fee_payer_address = message.fee_payer(); - let rent_collector = RentCollector::default(); + let current_epoch = 42; + let rent_collector = RentCollector { + epoch: current_epoch, + ..RentCollector::default() + }; let min_balance = rent_collector.rent.minimum_balance(nonce::State::size()); let transaction_fee = lamports_per_signature; let priority_fee = 2_000_000u64; @@ -1985,7 +1972,14 @@ mod tests { so ensure that the starting balance is more than the min balance" ); - let fee_payer_account = AccountSharedData::new(starting_balance, 0, &Pubkey::default()); + let fee_payer_rent_epoch = current_epoch; + let fee_payer_rent_debit = 0; + let fee_payer_account = AccountSharedData::new_rent_epoch( + starting_balance, + 0, + &Pubkey::default(), + fee_payer_rent_epoch, + ); let mut mock_accounts = HashMap::new(); mock_accounts.insert(*fee_payer_address, fee_payer_account.clone()); let mock_bank = MockBankCallback { @@ -1997,8 +1991,12 @@ mod tests { let result = batch_processor.validate_transaction_fee_payer( &mock_bank, &message, + CheckedTransactionDetails { + nonce: None, + lamports_per_signature, + }, &FeatureSet::default(), - lamports_per_signature, + &FeeStructure::default(), &rent_collector, &mut error_counters, ); @@ -2012,11 +2010,19 @@ mod tests { assert_eq!( result, - Ok(( - FeeDetails::new_for_tests(transaction_fee, priority_fee, false), - post_validation_fee_payer_account, - 0 // rent due - )) + Ok(ValidatedTransactionDetails { + rollback_accounts: RollbackAccounts::new( + None, // nonce + *fee_payer_address, + post_validation_fee_payer_account.clone(), + fee_payer_rent_debit, + fee_payer_rent_epoch + ), + compute_budget_limits, + fee_details: FeeDetails::new_for_tests(transaction_fee, priority_fee, false), + fee_payer_rent_debit, + fee_payer_account: post_validation_fee_payer_account, + }) ); } @@ -2028,6 +2034,8 @@ mod tests { Some(&Pubkey::new_unique()), &Hash::new_unique(), )); + let compute_budget_limits = + process_compute_budget_instructions(message.program_instructions_iter()).unwrap(); let fee_payer_address = message.fee_payer(); let mut rent_collector = RentCollector::default(); rent_collector.rent.lamports_per_byte_year = 1_000_000; @@ -2035,14 +2043,14 @@ mod tests { let transaction_fee = lamports_per_signature; let starting_balance = min_balance - 1; let fee_payer_account = AccountSharedData::new(starting_balance, 0, &Pubkey::default()); - let rent_due = rent_collector + let fee_payer_rent_debit = rent_collector .get_rent_due( fee_payer_account.lamports(), fee_payer_account.data().len(), fee_payer_account.rent_epoch(), ) .lamports(); - assert!(rent_due > 0); + assert!(fee_payer_rent_debit > 0); let mut mock_accounts = HashMap::new(); mock_accounts.insert(*fee_payer_address, fee_payer_account.clone()); @@ -2055,8 +2063,12 @@ mod tests { let result = batch_processor.validate_transaction_fee_payer( &mock_bank, &message, + CheckedTransactionDetails { + nonce: None, + lamports_per_signature, + }, &FeatureSet::default(), - lamports_per_signature, + &FeeStructure::default(), &rent_collector, &mut error_counters, ); @@ -2064,17 +2076,25 @@ mod tests { let post_validation_fee_payer_account = { let mut account = fee_payer_account.clone(); account.set_rent_epoch(1); - account.set_lamports(starting_balance - transaction_fee - rent_due); + account.set_lamports(starting_balance - transaction_fee - fee_payer_rent_debit); account }; assert_eq!( result, - Ok(( - FeeDetails::new_for_tests(transaction_fee, 0, false), - post_validation_fee_payer_account, - rent_due, - )) + Ok(ValidatedTransactionDetails { + rollback_accounts: RollbackAccounts::new( + None, // nonce + *fee_payer_address, + post_validation_fee_payer_account.clone(), + fee_payer_rent_debit, + 0, // rent epoch + ), + compute_budget_limits, + fee_details: FeeDetails::new_for_tests(transaction_fee, 0, false), + fee_payer_rent_debit, + fee_payer_account: post_validation_fee_payer_account, + }) ); } @@ -2090,8 +2110,12 @@ mod tests { let result = batch_processor.validate_transaction_fee_payer( &mock_bank, &message, + CheckedTransactionDetails { + nonce: None, + lamports_per_signature, + }, &FeatureSet::default(), - lamports_per_signature, + &FeeStructure::default(), &RentCollector::default(), &mut error_counters, ); @@ -2118,8 +2142,12 @@ mod tests { let result = batch_processor.validate_transaction_fee_payer( &mock_bank, &message, + CheckedTransactionDetails { + nonce: None, + lamports_per_signature, + }, &FeatureSet::default(), - lamports_per_signature, + &FeeStructure::default(), &RentCollector::default(), &mut error_counters, ); @@ -2150,8 +2178,12 @@ mod tests { let result = batch_processor.validate_transaction_fee_payer( &mock_bank, &message, + CheckedTransactionDetails { + nonce: None, + lamports_per_signature, + }, &FeatureSet::default(), - lamports_per_signature, + &FeeStructure::default(), &rent_collector, &mut error_counters, ); @@ -2180,8 +2212,12 @@ mod tests { let result = batch_processor.validate_transaction_fee_payer( &mock_bank, &message, + CheckedTransactionDetails { + nonce: None, + lamports_per_signature, + }, &FeatureSet::default(), - lamports_per_signature, + &FeeStructure::default(), &RentCollector::default(), &mut error_counters, ); @@ -2190,6 +2226,37 @@ mod tests { assert_eq!(result, Err(TransactionError::InvalidAccountForFee)); } + #[test] + fn test_validate_transaction_fee_payer_invalid_compute_budget() { + let lamports_per_signature = 5000; + let message = new_unchecked_sanitized_message(Message::new( + &[ + ComputeBudgetInstruction::set_compute_unit_limit(2000u32), + ComputeBudgetInstruction::set_compute_unit_limit(42u32), + ], + Some(&Pubkey::new_unique()), + )); + + let mock_bank = MockBankCallback::default(); + let mut error_counters = TransactionErrorMetrics::default(); + let batch_processor = TransactionBatchProcessor::::default(); + let result = batch_processor.validate_transaction_fee_payer( + &mock_bank, + &message, + CheckedTransactionDetails { + nonce: None, + lamports_per_signature, + }, + &FeatureSet::default(), + &FeeStructure::default(), + &RentCollector::default(), + &mut error_counters, + ); + + assert_eq!(error_counters.invalid_compute_budget, 1); + assert_eq!(result, Err(TransactionError::DuplicateInstruction(1u8))); + } + #[test] fn test_validate_transaction_fee_payer_is_nonce() { let feature_set = FeatureSet::default(); @@ -2204,6 +2271,8 @@ mod tests { Some(&Pubkey::new_unique()), &Hash::new_unique(), )); + let compute_budget_limits = + process_compute_budget_instructions(message.program_instructions_iter()).unwrap(); let fee_payer_address = message.fee_payer(); let min_balance = Rent::default().minimum_balance(nonce::State::size()); let transaction_fee = lamports_per_signature; @@ -2228,11 +2297,19 @@ mod tests { let mut error_counters = TransactionErrorMetrics::default(); let batch_processor = TransactionBatchProcessor::::default(); + let nonce = Some(NoncePartial::new( + *fee_payer_address, + fee_payer_account.clone(), + )); let result = batch_processor.validate_transaction_fee_payer( &mock_bank, &message, + CheckedTransactionDetails { + nonce: nonce.clone(), + lamports_per_signature, + }, &feature_set, - lamports_per_signature, + &FeeStructure::default(), &rent_collector, &mut error_counters, ); @@ -2246,11 +2323,19 @@ mod tests { assert_eq!( result, - Ok(( - FeeDetails::new_for_tests(transaction_fee, priority_fee, false), - post_validation_fee_payer_account, - 0 // rent due - )) + Ok(ValidatedTransactionDetails { + rollback_accounts: RollbackAccounts::new( + nonce, + *fee_payer_address, + post_validation_fee_payer_account.clone(), + 0, // fee_payer_rent_debit + 0, // fee_payer_rent_epoch + ), + compute_budget_limits, + fee_details: FeeDetails::new_for_tests(transaction_fee, priority_fee, false), + fee_payer_rent_debit: 0, // rent due + fee_payer_account: post_validation_fee_payer_account, + }) ); } @@ -2276,8 +2361,12 @@ mod tests { let result = batch_processor.validate_transaction_fee_payer( &mock_bank, &message, + CheckedTransactionDetails { + nonce: None, + lamports_per_signature, + }, &feature_set, - lamports_per_signature, + &FeeStructure::default(), &rent_collector, &mut error_counters, ); diff --git a/svm/src/transaction_results.rs b/svm/src/transaction_results.rs index 1fcc9f7ce4b7c5..9f829a675267ed 100644 --- a/svm/src/transaction_results.rs +++ b/svm/src/transaction_results.rs @@ -85,7 +85,6 @@ pub struct TransactionExecutionDetails { pub log_messages: Option>, pub inner_instructions: Option, pub fee_details: FeeDetails, - pub is_nonce: bool, pub return_data: Option, pub executed_units: u64, /// The change in accounts data len for this transaction. diff --git a/svm/tests/conformance.rs b/svm/tests/conformance.rs index ec5bbe0c9fd529..865cf325cb2596 100644 --- a/svm/tests/conformance.rs +++ b/svm/tests/conformance.rs @@ -21,7 +21,6 @@ use { solana_sdk::{ account::{AccountSharedData, ReadableAccount, WritableAccount}, bpf_loader_upgradeable, - epoch_schedule::EpochSchedule, feature_set::{FeatureSet, FEATURE_NAMES}, hash::Hash, instruction::AccountMeta, @@ -247,12 +246,7 @@ fn run_fixture(fixture: InstrFixture, filename: OsString, execute_as_instr: bool create_program_runtime_environment_v1(&feature_set, &compute_budget, false, false).unwrap(); mock_bank.override_feature_set(feature_set); - let batch_processor = TransactionBatchProcessor::::new( - 42, - 2, - EpochSchedule::default(), - HashSet::new(), - ); + let batch_processor = TransactionBatchProcessor::::new(42, 2, HashSet::new()); { let mut program_cache = batch_processor.program_cache.write().unwrap(); @@ -271,9 +265,7 @@ fn run_fixture(fixture: InstrFixture, filename: OsString, execute_as_instr: bool #[allow(deprecated)] let (blockhash, lamports_per_signature) = batch_processor - .sysvar_cache - .read() - .unwrap() + .sysvar_cache() .get_recent_blockhashes() .ok() .and_then(|x| (*x).last().cloned()) @@ -287,6 +279,7 @@ fn run_fixture(fixture: InstrFixture, filename: OsString, execute_as_instr: bool }; let processor_config = TransactionProcessingConfig { account_overrides: None, + check_program_modification_slot: false, compute_budget: None, log_messages_bytes_limit: None, limit_to_load_programs: true, @@ -406,7 +399,7 @@ fn execute_fixture_as_instr( filename: OsString, cu_avail: u64, ) { - let rent = if let Ok(rent) = batch_processor.sysvar_cache.read().unwrap().get_rent() { + let rent = if let Ok(rent) = batch_processor.sysvar_cache().get_rent() { (*rent).clone() } else { Rent::default() @@ -421,7 +414,7 @@ fn execute_fixture_as_instr( let mut transaction_context = TransactionContext::new( transaction_accounts, rent, - compute_budget.max_invoke_stack_height, + compute_budget.max_instruction_stack_depth, compute_budget.max_instruction_trace_length, ); @@ -445,7 +438,6 @@ fn execute_fixture_as_instr( &batch_processor.get_environments_for_epoch(2).unwrap(), &program_id, 42, - &batch_processor.epoch_schedule, false, ) .unwrap(); @@ -460,10 +452,9 @@ fn execute_fixture_as_instr( )), ); - let mut programs_modified_by_tx = ProgramCacheForTxBatch::default(); let log_collector = LogCollector::new_ref(); - let sysvar_cache = &batch_processor.sysvar_cache.read().unwrap(); + let sysvar_cache = &batch_processor.sysvar_cache(); let env_config = EnvironmentConfig::new( Hash::default(), None, @@ -475,11 +466,10 @@ fn execute_fixture_as_instr( let mut invoke_context = InvokeContext::new( &mut transaction_context, - &loaded_programs, + &mut loaded_programs, env_config, Some(log_collector.clone()), compute_budget, - &mut programs_modified_by_tx, ); let mut instruction_accounts: Vec = diff --git a/svm/tests/example-programs/clock-sysvar/Cargo.toml b/svm/tests/example-programs/clock-sysvar/Cargo.toml index 082c29bbfe34fd..485b780f533484 100644 --- a/svm/tests/example-programs/clock-sysvar/Cargo.toml +++ b/svm/tests/example-programs/clock-sysvar/Cargo.toml @@ -1,10 +1,10 @@ [package] name = "clock-sysvar-program" -version = "2.0.0" +version = "2.0.2" edition = "2021" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=2.0.0" } +solana-program = { path = "../../../../sdk/program", version = "=2.0.2" } [lib] crate-type = ["cdylib", "rlib"] diff --git a/svm/tests/example-programs/hello-solana/Cargo.toml b/svm/tests/example-programs/hello-solana/Cargo.toml index 09995d8c6d8d2c..afc99d9b1a3fbb 100644 --- a/svm/tests/example-programs/hello-solana/Cargo.toml +++ b/svm/tests/example-programs/hello-solana/Cargo.toml @@ -1,10 +1,10 @@ [package] name = "hello-solana-program" -version = "2.0.0" +version = "2.0.2" edition = "2021" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=2.0.0" } +solana-program = { path = "../../../../sdk/program", version = "=2.0.2" } [lib] crate-type = ["cdylib", "rlib"] diff --git a/svm/tests/example-programs/simple-transfer/Cargo.toml b/svm/tests/example-programs/simple-transfer/Cargo.toml index 9ccbf60aa8b8f7..d25bbbc4ffd5f8 100644 --- a/svm/tests/example-programs/simple-transfer/Cargo.toml +++ b/svm/tests/example-programs/simple-transfer/Cargo.toml @@ -1,10 +1,10 @@ [package] name = "simple-transfer-program" -version = "2.0.0" +version = "2.0.2" edition = "2021" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=2.0.0" } +solana-program = { path = "../../../../sdk/program", version = "=2.0.2" } [lib] crate-type = ["cdylib", "rlib"] diff --git a/svm/tests/integration_test.rs b/svm/tests/integration_test.rs index 9816d41cbc4dae..c47ce03af9b5a1 100644 --- a/svm/tests/integration_test.rs +++ b/svm/tests/integration_test.rs @@ -21,7 +21,6 @@ use { account::{AccountSharedData, ReadableAccount, WritableAccount}, bpf_loader_upgradeable::{self, UpgradeableLoaderState}, clock::{Clock, Epoch, Slot, UnixTimestamp}, - epoch_schedule::EpochSchedule, hash::Hash, instruction::AccountMeta, pubkey::Pubkey, @@ -445,7 +444,6 @@ fn svm_integration() { let batch_processor = TransactionBatchProcessor::::new( EXECUTION_SLOT, EXECUTION_EPOCH, - EpochSchedule::default(), HashSet::new(), ); diff --git a/test-validator/src/lib.rs b/test-validator/src/lib.rs index 364e527542a8e8..da0d669b76393a 100644 --- a/test-validator/src/lib.rs +++ b/test-validator/src/lib.rs @@ -44,8 +44,7 @@ use { epoch_schedule::EpochSchedule, exit::Exit, feature_set::FEATURE_NAMES, - fee_calculator::{FeeCalculator, FeeRateGovernor}, - hash::Hash, + fee_calculator::FeeRateGovernor, instruction::{AccountMeta, Instruction}, message::Message, native_token::sol_to_lamports, @@ -78,14 +77,6 @@ pub struct AccountInfo<'a> { pub filename: &'a str, } -#[deprecated(since = "1.16.0", note = "Please use `UpgradeableProgramInfo` instead")] -#[derive(Clone)] -pub struct ProgramInfo { - pub program_id: Pubkey, - pub loader: Pubkey, - pub program_path: PathBuf, -} - #[derive(Clone)] pub struct UpgradeableProgramInfo { pub program_id: Pubkey, @@ -127,8 +118,6 @@ pub struct TestValidatorGenesis { rpc_ports: Option<(u16, u16)>, // (JsonRpc, JsonRpcPubSub), None == random ports warp_slot: Option, accounts: HashMap, - #[allow(deprecated)] - programs: Vec, upgradeable_programs: Vec, ticks_per_slot: Option, epoch_schedule: Option, @@ -161,8 +150,6 @@ impl Default for TestValidatorGenesis { rpc_ports: Option::<(u16, u16)>::default(), warp_slot: Option::::default(), accounts: HashMap::::default(), - #[allow(deprecated)] - programs: Vec::::default(), upgradeable_programs: Vec::::default(), ticks_per_slot: Option::::default(), epoch_schedule: Option::::default(), @@ -320,11 +307,6 @@ impl TestValidatorGenesis { self } - #[deprecated(note = "Please use `compute_unit_limit` instead")] - pub fn max_compute_units(&mut self, max_compute_units: u64) -> &mut Self { - self.compute_unit_limit(max_compute_units) - } - /// Add an account to the test environment pub fn add_account(&mut self, address: Pubkey, account: AccountSharedData) -> &mut Self { self.accounts.insert(address, account); @@ -582,19 +564,6 @@ impl TestValidatorGenesis { self } - /// Add a list of programs to the test environment. - #[deprecated( - since = "1.16.0", - note = "Please use `add_upgradeable_programs_with_path()` instead" - )] - #[allow(deprecated)] - pub fn add_programs_with_path(&mut self, programs: &[ProgramInfo]) -> &mut Self { - for program in programs { - self.programs.push(program.clone()); - } - self - } - /// Add a list of upgradeable programs to the test environment. pub fn add_upgradeable_programs_with_path( &mut self, @@ -796,20 +765,6 @@ impl TestValidator { for (address, account) in solana_program_test::programs::spl_programs(&config.rent) { accounts.entry(address).or_insert(account); } - #[allow(deprecated)] - for program in &config.programs { - let data = solana_program_test::read_file(&program.program_path); - accounts.insert( - program.program_id, - AccountSharedData::from(Account { - lamports: Rent::default().minimum_balance(data.len()).max(1), - data, - owner: program.loader, - executable: true, - rent_epoch: 0, - }), - ); - } for upgradeable_program in &config.upgradeable_programs { let data = solana_program_test::read_file(&upgradeable_program.program_path); let (programdata_address, _) = Pubkey::find_program_address( @@ -1163,20 +1118,6 @@ impl TestValidator { self.vote_account_address } - /// Return an RpcClient for the validator. As a convenience, also return a recent blockhash and - /// associated fee calculator - #[deprecated(since = "1.9.0", note = "Please use `get_rpc_client` instead")] - pub fn rpc_client(&self) -> (RpcClient, Hash, FeeCalculator) { - let rpc_client = - RpcClient::new_with_commitment(self.rpc_url.clone(), CommitmentConfig::processed()); - #[allow(deprecated)] - let (recent_blockhash, fee_calculator) = rpc_client - .get_recent_blockhash() - .expect("get_recent_blockhash"); - - (rpc_client, recent_blockhash, fee_calculator) - } - /// Return an RpcClient for the validator. pub fn get_rpc_client(&self) -> RpcClient { RpcClient::new_with_commitment(self.rpc_url.clone(), CommitmentConfig::processed()) diff --git a/thin-client/README.md b/thin-client/README.md index 147b287b2d62b2..43ca0825a4ab5d 100644 --- a/thin-client/README.md +++ b/thin-client/README.md @@ -1,4 +1,4 @@ # thin-client -This crate for `thin-client` is deprecated as of v1.19.0. It will receive no bugfixes or updates. +This crate for `thin-client` is deprecated as of v2.0.0. It will receive no bugfixes or updates. Please use `tpu-client` or `rpc-client`. \ No newline at end of file diff --git a/thin-client/src/thin_client.rs b/thin-client/src/thin_client.rs index 9e226c254c7a20..f53ae499a8b68f 100644 --- a/thin-client/src/thin_client.rs +++ b/thin-client/src/thin_client.rs @@ -13,14 +13,13 @@ use { }, }, solana_rpc_client::rpc_client::RpcClient, - solana_rpc_client_api::{config::RpcProgramAccountsConfig, response::Response}, + solana_rpc_client_api::config::RpcProgramAccountsConfig, solana_sdk::{ account::Account, client::{AsyncClient, Client, SyncClient}, - clock::{Slot, MAX_PROCESSING_AGE}, + clock::MAX_PROCESSING_AGE, commitment_config::CommitmentConfig, epoch_info::EpochInfo, - fee_calculator::{FeeCalculator, FeeRateGovernor}, hash::Hash, instruction::Instruction, message::Message, @@ -111,7 +110,7 @@ impl ClientOptimizer { } /// An object for querying and sending transactions to the network. -#[deprecated(since = "1.19.0", note = "Use [RpcClient] or [TpuClient] instead.")] +#[deprecated(since = "2.0.0", note = "Use [RpcClient] or [TpuClient] instead.")] pub struct ThinClient< P, // ConnectionPool M, // ConnectionManager @@ -419,52 +418,6 @@ where .map_err(|e| e.into()) } - fn get_recent_blockhash(&self) -> TransportResult<(Hash, FeeCalculator)> { - #[allow(deprecated)] - let (blockhash, fee_calculator, _last_valid_slot) = - self.get_recent_blockhash_with_commitment(CommitmentConfig::default())?; - Ok((blockhash, fee_calculator)) - } - - fn get_recent_blockhash_with_commitment( - &self, - commitment_config: CommitmentConfig, - ) -> TransportResult<(Hash, FeeCalculator, Slot)> { - let index = self.optimizer.experiment(); - let now = Instant::now(); - #[allow(deprecated)] - let recent_blockhash = - self.rpc_clients[index].get_recent_blockhash_with_commitment(commitment_config); - match recent_blockhash { - Ok(Response { value, .. }) => { - self.optimizer.report(index, duration_as_ms(&now.elapsed())); - Ok((value.0, value.1, value.2)) - } - Err(e) => { - self.optimizer.report(index, u64::MAX); - Err(e.into()) - } - } - } - - fn get_fee_calculator_for_blockhash( - &self, - blockhash: &Hash, - ) -> TransportResult> { - #[allow(deprecated)] - self.rpc_client() - .get_fee_calculator_for_blockhash(blockhash) - .map_err(|e| e.into()) - } - - fn get_fee_rate_governor(&self) -> TransportResult { - #[allow(deprecated)] - self.rpc_client() - .get_fee_rate_governor() - .map_err(|e| e.into()) - .map(|r| r.value) - } - fn get_signature_status( &self, signature: &Signature, @@ -575,13 +528,6 @@ where .map_err(|e| e.into()) } - fn get_new_blockhash(&self, blockhash: &Hash) -> TransportResult<(Hash, FeeCalculator)> { - #[allow(deprecated)] - self.rpc_client() - .get_new_blockhash(blockhash) - .map_err(|e| e.into()) - } - fn get_latest_blockhash(&self) -> TransportResult { let (blockhash, _) = self.get_latest_blockhash_with_commitment(CommitmentConfig::default())?; diff --git a/tps-client/Cargo.toml b/tps-client/Cargo.toml index 1bd4ab96ee05b3..c9bcf76325b5f7 100644 --- a/tps-client/Cargo.toml +++ b/tps-client/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-tps-client" -publish = false +description = "Blockchain, Rebuilt for Scale" version = { workspace = true } authors = { workspace = true } repository = { workspace = true } diff --git a/transaction-dos/src/main.rs b/transaction-dos/src/main.rs index d006972dca5649..72deea1422f161 100644 --- a/transaction-dos/src/main.rs +++ b/transaction-dos/src/main.rs @@ -251,6 +251,7 @@ fn run_transactions_dos( use_rpc: false, skip_fee_check: true, // skip_fee_check auto_extend: true, + skip_feature_verification: true, }); process_command(&config).expect("deploy didn't pass"); diff --git a/transaction-metrics-tracker/Cargo.toml b/transaction-metrics-tracker/Cargo.toml index 9bd82702a3ebb4..c4882603174422 100644 --- a/transaction-metrics-tracker/Cargo.toml +++ b/transaction-metrics-tracker/Cargo.toml @@ -8,7 +8,6 @@ repository = { workspace = true } homepage = { workspace = true } license = { workspace = true } edition = { workspace = true } -publish = false [dependencies] Inflector = { workspace = true } diff --git a/transaction-status/src/lib.rs b/transaction-status/src/lib.rs index 7779cfc5ae9353..489a213c525a94 100644 --- a/transaction-status/src/lib.rs +++ b/transaction-status/src/lib.rs @@ -628,6 +628,11 @@ pub struct Reward { pub type Rewards = Vec; +pub struct RewardsAndNumPartitions { + pub rewards: Rewards, + pub num_partitions: Option, +} + #[derive(Debug, Error)] pub enum ConvertBlockError { #[error("transactions missing after converted, before: {0}, after: {1}")] @@ -641,6 +646,7 @@ pub struct ConfirmedBlock { pub parent_slot: Slot, pub transactions: Vec, pub rewards: Rewards, + pub num_partitions: Option, pub block_time: Option, pub block_height: Option, } @@ -654,6 +660,7 @@ pub struct VersionedConfirmedBlock { pub parent_slot: Slot, pub transactions: Vec, pub rewards: Rewards, + pub num_partitions: Option, pub block_time: Option, pub block_height: Option, } @@ -670,6 +677,7 @@ impl From for ConfirmedBlock { .map(TransactionWithStatusMeta::Complete) .collect(), rewards: block.rewards, + num_partitions: block.num_partitions, block_time: block.block_time, block_height: block.block_height, } @@ -704,6 +712,7 @@ impl TryFrom for VersionedConfirmedBlock { parent_slot: block.parent_slot, transactions: txs, rewards: block.rewards, + num_partitions: block.num_partitions, block_time: block.block_time, block_height: block.block_height, }) @@ -768,6 +777,7 @@ impl ConfirmedBlock { } else { None }, + num_reward_partitions: self.num_partitions, block_time: self.block_time, block_height: self.block_height, }) @@ -782,6 +792,7 @@ pub struct EncodedConfirmedBlock { pub parent_slot: Slot, pub transactions: Vec, pub rewards: Rewards, + pub num_partitions: Option, pub block_time: Option, pub block_height: Option, } @@ -794,6 +805,7 @@ impl From for EncodedConfirmedBlock { parent_slot: block.parent_slot, transactions: block.transactions.unwrap_or_default(), rewards: block.rewards.unwrap_or_default(), + num_partitions: block.num_reward_partitions, block_time: block.block_time, block_height: block.block_height, } @@ -812,6 +824,8 @@ pub struct UiConfirmedBlock { pub signatures: Option>, #[serde(default, skip_serializing_if = "Option::is_none")] pub rewards: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub num_reward_partitions: Option, pub block_time: Option, pub block_height: Option, } diff --git a/transaction-status/src/parse_token.rs b/transaction-status/src/parse_token.rs index d445ba51101607..bc4e57fc467af7 100644 --- a/transaction-status/src/parse_token.rs +++ b/transaction-status/src/parse_token.rs @@ -14,17 +14,11 @@ use { parse_account_data::SplTokenAdditionalData, parse_token::{token_amount_to_ui_amount_v2, UiAccountState}, }, - solana_sdk::{ - instruction::{AccountMeta, CompiledInstruction, Instruction}, - message::AccountKeys, - }, + solana_sdk::{instruction::CompiledInstruction, message::AccountKeys}, spl_token_2022::{ extension::ExtensionType, instruction::{AuthorityType, TokenInstruction}, - solana_program::{ - instruction::Instruction as SplTokenInstruction, program_option::COption, - pubkey::Pubkey, - }, + solana_program::{program_option::COption, pubkey::Pubkey}, }, spl_token_group_interface::instruction::TokenGroupInstruction, spl_token_metadata_interface::instruction::TokenMetadataInstruction, @@ -498,7 +492,7 @@ pub fn parse_token( instruction_type: "amountToUiAmount".to_string(), info: json!({ "mint": account_keys[instruction.accounts[0] as usize].to_string(), - "amount": amount, + "amount": amount.to_string(), }), }) } @@ -851,23 +845,6 @@ fn check_num_token_accounts(accounts: &[u8], num: usize) -> Result<(), ParseInst check_num_accounts(accounts, num, ParsableProgram::SplToken) } -#[deprecated(since = "1.16.0", note = "Instruction conversions no longer needed")] -pub fn spl_token_instruction(instruction: SplTokenInstruction) -> Instruction { - Instruction { - program_id: instruction.program_id, - accounts: instruction - .accounts - .iter() - .map(|meta| AccountMeta { - pubkey: meta.pubkey, - is_signer: meta.is_signer, - is_writable: meta.is_writable, - }) - .collect(), - data: instruction.data, - } -} - fn map_coption_pubkey(pubkey: COption) -> Option { match pubkey { COption::Some(pubkey) => Some(pubkey.to_string()), @@ -1737,7 +1714,7 @@ mod test { instruction_type: "amountToUiAmount".to_string(), info: json!({ "mint": mint_pubkey.to_string(), - "amount": 4242, + "amount": "4242", }) } ); diff --git a/type-overrides/Cargo.toml b/type-overrides/Cargo.toml new file mode 100644 index 00000000000000..07b69542e53e6f --- /dev/null +++ b/type-overrides/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "solana-type-overrides" +description = "Type overrides for specialized testing" +version = { workspace = true } +authors = { workspace = true } +repository = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +edition = { workspace = true } + +[dependencies] +futures = { workspace = true, optional = true } +lazy_static = { workspace = true } +rand = { workspace = true } +shuttle = { workspace = true, optional = true } + +[features] +shuttle-test = ["dep:shuttle"] +executor = ["dep:futures"] diff --git a/type-overrides/src/lib.rs b/type-overrides/src/lib.rs new file mode 100644 index 00000000000000..4d9a2ac1a63922 --- /dev/null +++ b/type-overrides/src/lib.rs @@ -0,0 +1,50 @@ +/// +/// This lib contains both standard imports and imports shuttle. +/// Shuttle is a Rust crate that facilitates multithreaded testing. It has its own scheduler +/// and can efficiently detect bugs in concurrent code. The downside is that we need to replace +/// all imports by those from Shuttle. +/// +/// Instead of importing from std, rand, and so on, import the following from solana-type-override, +/// and include the 'shuttle-test' feature in your crate to use shuttle. + +#[cfg(feature = "executor")] +pub mod executor { + #[cfg(not(feature = "shuttle-test"))] + pub use futures::executor::*; + #[cfg(feature = "shuttle-test")] + pub use shuttle::future::*; +} + +pub mod hint { + #[cfg(feature = "shuttle-test")] + pub use shuttle::hint::*; + #[cfg(not(feature = "shuttle-test"))] + pub use std::hint::*; +} + +pub mod lazy_static { + #[cfg(not(feature = "shuttle-test"))] + pub use lazy_static::*; + #[cfg(feature = "shuttle-test")] + pub use shuttle::lazy_static::*; +} + +pub mod rand { + pub use rand::*; + #[cfg(feature = "shuttle-test")] + pub use shuttle::rand::{thread_rng, Rng, RngCore}; +} + +pub mod sync { + #[cfg(feature = "shuttle-test")] + pub use shuttle::sync::*; + #[cfg(not(feature = "shuttle-test"))] + pub use std::sync::*; +} + +pub mod thread { + #[cfg(feature = "shuttle-test")] + pub use shuttle::thread::*; + #[cfg(not(feature = "shuttle-test"))] + pub use std::thread::*; +} diff --git a/unified-scheduler-pool/src/lib.rs b/unified-scheduler-pool/src/lib.rs index e1309a6d963694..c57217285f4762 100644 --- a/unified-scheduler-pool/src/lib.rs +++ b/unified-scheduler-pool/src/lib.rs @@ -1,8 +1,3 @@ -//! NOTE: While the unified scheduler is fully functional and moderately performant even with -//! mainnet-beta, it has known resource-exhaustion related security issues for replaying -//! specially-crafted blocks produced by malicious leaders. Thus, this experimental and -//! nondefault functionality is exempt from the bug bounty program for now. -//! //! Transaction scheduling code. //! //! This crate implements 3 solana-runtime traits (`InstalledScheduler`, `UninstalledScheduler` and @@ -340,6 +335,8 @@ where context: SchedulingContext, result_with_timings: ResultWithTimings, ) -> S { + assert_matches!(result_with_timings, (Ok(_), _)); + // pop is intentional for filo, expecting relatively warmed-up scheduler due to having been // returned recently if let Some((inner, _pooled_at)) = self.scheduler_inners.lock().expect("not poisoned").pop() @@ -764,23 +761,6 @@ struct ThreadManager, TH: TaskHandler> { handler_threads: Vec>, } -impl PooledScheduler { - fn do_spawn( - pool: Arc>, - initial_context: SchedulingContext, - result_with_timings: ResultWithTimings, - ) -> Self { - Self::from_inner( - PooledSchedulerInner:: { - thread_manager: ThreadManager::new(pool), - usage_queue_loader: UsageQueueLoader::default(), - }, - initial_context, - result_with_timings, - ) - } -} - struct HandlerPanicked; type HandlerResult = std::result::Result, HandlerPanicked>; @@ -850,7 +830,15 @@ impl, TH: TaskHandler> ThreadManager { ); } - fn start_threads(&mut self, context: &SchedulingContext) { + // This method must take same set of session-related arguments as start_session() to avoid + // unneeded channel operations to minimize overhead. Starting threads incurs a very high cost + // already... Also, pre-creating threads isn't desirable as well to avoid `Option`-ed types + // for type safety. + fn start_threads( + &mut self, + context: SchedulingContext, + mut result_with_timings: ResultWithTimings, + ) { // Firstly, setup bi-directional messaging between the scheduler and handlers to pass // around tasks, by creating 2 channels (one for to-be-handled tasks from the scheduler to // the handlers and the other for finished tasks from the handlers to the scheduler). @@ -928,7 +916,7 @@ impl, TH: TaskHandler> ThreadManager { // prioritization further. Consequently, this also contributes to alleviate the known // heuristic's caveat for the first task of linearized runs, which is described above. let (mut runnable_task_sender, runnable_task_receiver) = - chained_channel::unbounded::(context.clone()); + chained_channel::unbounded::(context); // Create two handler-to-scheduler channels to prioritize the finishing of blocked tasks, // because it is more likely that a blocked task will have more blocked tasks behind it, // which should be scheduled while minimizing the delay to clear buffered linearized runs @@ -947,7 +935,7 @@ impl, TH: TaskHandler> ThreadManager { // 4. the handler thread processes the dispatched task. // 5. the handler thread reply back to the scheduler thread as an executed task. // 6. the scheduler thread post-processes the executed task. - let scheduler_main_loop = || { + let scheduler_main_loop = { let handler_count = self.pool.handler_count; let session_result_sender = self.session_result_sender.clone(); // Taking new_task_receiver here is important to ensure there's a single receiver. In @@ -1011,29 +999,14 @@ impl, TH: TaskHandler> ThreadManager { let mut state_machine = unsafe { SchedulingStateMachine::exclusively_initialize_current_thread_for_scheduling() }; - let mut result_with_timings = initialized_result_with_timings(); + // The following loop maintains and updates ResultWithTimings as its + // externally-provided mutable state for each session in this way: + // + // 1. Initial result_with_timing is propagated implicitly by the moved variable. + // 2. Subsequent result_with_timings are propagated explicitly from + // the new_task_receiver.recv() invocation located at the end of loop. 'nonaborted_main_loop: loop { - match new_task_receiver.recv() { - Ok(NewTaskPayload::OpenSubchannel(( - new_context, - new_result_with_timings, - ))) => { - // signal about new SchedulingContext to handler threads - runnable_task_sender - .send_chained_channel(new_context, handler_count) - .unwrap(); - result_with_timings = new_result_with_timings; - } - Ok(_) => { - unreachable!(); - } - Err(_) => { - // This unusual condition must be triggered by ThreadManager::drop(); - break 'nonaborted_main_loop; - } - } - let mut is_finished = false; while !is_finished { // ALL recv selectors are eager-evaluated ALWAYS by current crossbeam impl, @@ -1084,9 +1057,8 @@ impl, TH: TaskHandler> ThreadManager { Ok(NewTaskPayload::CloseSubchannel) => { session_ending = true; } - Ok(NewTaskPayload::OpenSubchannel(_context_and_result_with_timings)) => { - unreachable!(); - } + Ok(NewTaskPayload::OpenSubchannel(_context_and_result_with_timings)) => + unreachable!(), Err(RecvError) => { // Mostly likely is that this scheduler is dropped for pruned blocks of // abandoned forks... @@ -1109,15 +1081,36 @@ impl, TH: TaskHandler> ThreadManager { is_finished = session_ending && state_machine.has_no_active_task(); } - if session_ending { - state_machine.reinitialize(); - session_result_sender - .send(std::mem::replace( - &mut result_with_timings, - initialized_result_with_timings(), - )) - .expect("always outlived receiver"); - session_ending = false; + // Finalize the current session after asserting it's explicitly requested so. + assert!(session_ending); + // Send result first because this is blocking the replay code-path. + session_result_sender + .send(result_with_timings) + .expect("always outlived receiver"); + state_machine.reinitialize(); + session_ending = false; + + // Prepare for the new session. + match new_task_receiver.recv() { + Ok(NewTaskPayload::OpenSubchannel(( + new_context, + new_result_with_timings, + ))) => { + // We just received subsequent (= not initial) session and about to + // enter into the preceding `while(!is_finished) {...}` loop again. + // Before that, propagate new SchedulingContext to handler threads + runnable_task_sender + .send_chained_channel(new_context, handler_count) + .unwrap(); + result_with_timings = new_result_with_timings; + } + Err(_) => { + // This unusual condition must be triggered by ThreadManager::drop(). + // Initialize result_with_timings with a harmless value... + result_with_timings = initialized_result_with_timings(); + break 'nonaborted_main_loop; + } + Ok(_) => unreachable!(), } } @@ -1150,6 +1143,14 @@ impl, TH: TaskHandler> ThreadManager { let finished_blocked_task_sender = finished_blocked_task_sender.clone(); let finished_idle_task_sender = finished_idle_task_sender.clone(); + // The following loop maintains and updates SchedulingContext as its + // externally-provided state for each session in this way: + // + // 1. Initial context is propagated implicitly by the moved runnable_task_receiver, + // which is clone()-d just above for this particular thread. + // 2. Subsequent contexts are propagated explicitly inside `.after_select()` as part of + // `select_biased!`, which are sent from `.send_chained_channel()` in the scheduler + // thread for all-but-initial sessions. move || loop { let (task, sender) = select_biased! { recv(runnable_task_receiver.for_select()) -> message => { @@ -1204,7 +1205,7 @@ impl, TH: TaskHandler> ThreadManager { self.scheduler_thread = Some( thread::Builder::new() .name("solScheduler".to_owned()) - .spawn_tracked(scheduler_main_loop()) + .spawn_tracked(scheduler_main_loop) .unwrap(), ); @@ -1325,13 +1326,14 @@ impl, TH: TaskHandler> ThreadManager { fn start_session( &mut self, - context: &SchedulingContext, + context: SchedulingContext, result_with_timings: ResultWithTimings, ) { + assert!(!self.are_threads_joined()); assert_matches!(self.session_result_with_timings, None); self.new_task_sender .send(NewTaskPayload::OpenSubchannel(( - context.clone(), + context, result_with_timings, ))) .expect("no new session after aborted"); @@ -1351,7 +1353,7 @@ pub trait SpawnableScheduler: InstalledScheduler { fn spawn( pool: Arc>, - initial_context: SchedulingContext, + context: SchedulingContext, result_with_timings: ResultWithTimings, ) -> Self where @@ -1377,21 +1379,23 @@ impl SpawnableScheduler for PooledScheduler { ) -> Self { inner .thread_manager - .start_session(&context, result_with_timings); + .start_session(context.clone(), result_with_timings); Self { inner, context } } fn spawn( pool: Arc>, - initial_context: SchedulingContext, + context: SchedulingContext, result_with_timings: ResultWithTimings, ) -> Self { - let mut scheduler = Self::do_spawn(pool, initial_context, result_with_timings); - scheduler - .inner + let mut inner = Self::Inner { + thread_manager: ThreadManager::new(pool), + usage_queue_loader: UsageQueueLoader::default(), + }; + inner .thread_manager - .start_threads(&scheduler.context); - scheduler + .start_threads(context.clone(), result_with_timings); + Self { inner, context } } } @@ -1711,6 +1715,10 @@ mod tests { &CheckPoint::TimeoutListenerTriggered(0), &CheckPoint::TimeoutListenerTriggered(1), &TestCheckPoint::AfterTimeoutListenerTriggered, + &TestCheckPoint::BeforeTimeoutListenerTriggered, + &CheckPoint::TimeoutListenerTriggered(0), + &CheckPoint::TimeoutListenerTriggered(1), + &TestCheckPoint::AfterTimeoutListenerTriggered, ]); let ignored_prioritization_fee_cache = Arc::new(PrioritizationFeeCache::new(0u64)); @@ -1778,12 +1786,62 @@ mod tests { bank.schedule_transaction_executions([(tx_after_stale, &1)].into_iter()) .unwrap(); + // Observe second occurrence of TimeoutListenerTriggered(1), which indicates a new timeout + // lister is registered correctly again for reactivated scheduler. + sleepless_testing::at(TestCheckPoint::BeforeTimeoutListenerTriggered); + sleepless_testing::at(TestCheckPoint::AfterTimeoutListenerTriggered); + let (result, timings) = bank.wait_for_completed_scheduler().unwrap(); assert_matches!(result, Ok(())); // ResultWithTimings should be carried over across active=>stale=>active transitions. assert_eq!(timings.metrics[ExecuteTimingType::CheckUs], 246); } + #[test] + fn test_scheduler_pause_after_stale() { + solana_logger::setup(); + + let _progress = sleepless_testing::setup(&[ + &TestCheckPoint::BeforeTimeoutListenerTriggered, + &CheckPoint::TimeoutListenerTriggered(0), + &CheckPoint::TimeoutListenerTriggered(1), + &TestCheckPoint::AfterTimeoutListenerTriggered, + ]); + + let ignored_prioritization_fee_cache = Arc::new(PrioritizationFeeCache::new(0u64)); + let pool_raw = DefaultSchedulerPool::do_new( + None, + None, + None, + None, + ignored_prioritization_fee_cache, + SHORTENED_POOL_CLEANER_INTERVAL, + DEFAULT_MAX_POOLING_DURATION, + DEFAULT_MAX_USAGE_QUEUE_COUNT, + SHORTENED_TIMEOUT_DURATION, + ); + let pool = pool_raw.clone(); + + let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000); + let bank = Bank::new_for_tests(&genesis_config); + let bank = setup_dummy_fork_graph(bank); + + let context = SchedulingContext::new(bank.clone()); + + let scheduler = pool.take_scheduler(context); + let bank = BankWithScheduler::new(bank, Some(scheduler)); + pool.register_timeout_listener(bank.create_timeout_listener()); + + sleepless_testing::at(TestCheckPoint::BeforeTimeoutListenerTriggered); + sleepless_testing::at(TestCheckPoint::AfterTimeoutListenerTriggered); + + // This calls register_recent_blockhash() internally, which in turn calls + // BankWithScheduler::wait_for_paused_scheduler(). + bank.fill_bank_with_ticks_for_tests(); + let (result, _timings) = bank.wait_for_completed_scheduler().unwrap(); + assert_matches!(result, Ok(())); + } + #[test] fn test_scheduler_remain_stale_after_error() { solana_logger::setup(); @@ -2719,13 +2777,13 @@ mod tests { fn spawn( pool: Arc>, - initial_context: SchedulingContext, + context: SchedulingContext, _result_with_timings: ResultWithTimings, ) -> Self { AsyncScheduler::( Mutex::new(initialized_result_with_timings()), Mutex::new(vec![]), - initial_context, + context, pool, ) } diff --git a/unified-scheduler-pool/src/sleepless_testing.rs b/unified-scheduler-pool/src/sleepless_testing.rs index 9c2213f657e86a..901a2f7c4fded7 100644 --- a/unified-scheduler-pool/src/sleepless_testing.rs +++ b/unified-scheduler-pool/src/sleepless_testing.rs @@ -26,20 +26,21 @@ pub(crate) trait BuilderTracked: Sized { } #[cfg(not(test))] -pub(crate) use sleepless_testing_dummy::*; +pub(crate) use dummy::*; #[cfg(test)] -pub(crate) use sleepless_testing_real::*; +pub(crate) use real::*; #[cfg(test)] -mod sleepless_testing_real { +mod real { use { lazy_static::lazy_static, + log::trace, std::{ cmp::Ordering::{Equal, Greater, Less}, - collections::{HashMap, HashSet}, + collections::HashMap, fmt::Debug, sync::{Arc, Condvar, Mutex}, - thread::{current, JoinHandle, ThreadId}, + thread::{current, panicking, JoinHandle, ThreadId}, }, }; @@ -47,7 +48,7 @@ mod sleepless_testing_real { struct Progress { _name: String, check_points: Vec, - current_check_point: Mutex, + current_index: Mutex, condvar: Condvar, } @@ -61,61 +62,88 @@ mod sleepless_testing_real { .into_iter() .chain(check_points) .collect::>(); - let check_points_set = check_points.iter().collect::>(); - assert_eq!(check_points.len(), check_points_set.len()); - Self { _name: name, check_points, - current_check_point: Mutex::new(initial_check_point), + current_index: Mutex::new(0), condvar: Condvar::new(), } } fn change_current_check_point(&self, anchored_check_point: String) { - let Some(anchored_index) = self - .check_points - .iter() - .position(|check_point| check_point == &anchored_check_point) + let mut current_index = self.current_index.lock().unwrap(); + + let Some(anchored_index) = self.anchored_index(*current_index, &anchored_check_point) else { - // Ignore unrecognizable checkpoints... + trace!("Ignore {} at {:?}", anchored_check_point, current()); return; }; - let mut current_check_point = self.current_check_point.lock().unwrap(); - - let should_change = - match anchored_index.cmp(&self.expected_next_index(¤t_check_point)) { - Equal => true, - Greater => { - // anchor is one of future check points; block the current thread until - // that happens - current_check_point = self - .condvar - .wait_while(current_check_point, |current_check_point| { - anchored_index != self.expected_next_index(current_check_point) - }) - .unwrap(); - true - } - // anchor is already observed. - Less => false, - }; + let next_index = self.expected_next_index(*current_index); + let should_change = match anchored_index.cmp(&next_index) { + Equal => true, + Greater => { + trace!("Blocked on {} at {:?}", anchored_check_point, current()); + // anchor is one of future check points; block the current thread until + // that happens + current_index = self + .condvar + .wait_while(current_index, |&mut current_index| { + let Some(anchored_index) = + self.anchored_index(current_index, &anchored_check_point) + else { + // don't wait. seems the progress is made by other threads + // anchored to the same checkpoint. + return false; + }; + let next_index = self.expected_next_index(current_index); + + // determine we should wait further or not + match anchored_index.cmp(&next_index) { + Equal => false, + Greater => { + trace!( + "Re-blocked on {} ({} != {}) at {:?}", + anchored_check_point, + anchored_index, + next_index, + current() + ); + true + } + Less => unreachable!(), + } + }) + .unwrap(); + true + } + Less => unreachable!(), + }; if should_change { - *current_check_point = anchored_check_point; + if *current_index != anchored_index { + trace!("Progressed to: {} at {:?}", anchored_check_point, current()); + *current_index = anchored_index; + } + self.condvar.notify_all(); } } - fn expected_next_index(&self, current_check_point: &String) -> usize { - let current_index = self - .check_points - .iter() - .position(|check_point| check_point == current_check_point) - .unwrap(); + fn expected_next_index(&self, current_index: usize) -> usize { current_index.checked_add(1).unwrap() } + + fn anchored_index( + &self, + current_index: usize, + anchored_check_point: &String, + ) -> Option { + self.check_points[current_index..] + .iter() + .position(|check_point| check_point == anchored_check_point) + .map(|subslice_index| subslice_index.checked_add(current_index).unwrap()) + } } lazy_static! { @@ -142,11 +170,13 @@ mod sleepless_testing_real { } fn deactivate(&self) { - assert_eq!( - *self.0.check_points.last().unwrap(), - *self.0.current_check_point.lock().unwrap(), - "unfinished progress" - ); + if !panicking() { + assert_eq!( + self.0.check_points.len().checked_sub(1).unwrap(), + *self.0.current_index.lock().unwrap(), + "unfinished progress" + ); + } THREAD_REGISTRY.lock().unwrap().remove(&self.1).unwrap(); } } @@ -299,7 +329,7 @@ mod sleepless_testing_real { } #[cfg(not(test))] -mod sleepless_testing_dummy { +mod dummy { use std::fmt::Debug; #[inline] diff --git a/validator/src/bin/solana-test-validator.rs b/validator/src/bin/solana-test-validator.rs index 580a6a14027518..7f7865d8ac18e8 100644 --- a/validator/src/bin/solana-test-validator.rs +++ b/validator/src/bin/solana-test-validator.rs @@ -23,7 +23,6 @@ use { account::AccountSharedData, clock::Slot, epoch_schedule::EpochSchedule, - feature_set, native_token::sol_to_lamports, pubkey::Pubkey, rent::Rent, @@ -352,9 +351,7 @@ fn main() { exit(1); }); - let mut features_to_deactivate = pubkeys_of(&matches, "deactivate_feature").unwrap_or_default(); - // Remove this when client support is ready for the enable_partitioned_epoch_reward feature - features_to_deactivate.push(feature_set::enable_partitioned_epoch_reward::id()); + let features_to_deactivate = pubkeys_of(&matches, "deactivate_feature").unwrap_or_default(); if TestValidatorGenesis::ledger_exists(&ledger_path) { for (name, long) in &[ diff --git a/validator/src/cli.rs b/validator/src/cli.rs index 978d650a2ef1de..2046651652ea58 100644 --- a/validator/src/cli.rs +++ b/validator/src/cli.rs @@ -211,12 +211,6 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .takes_value(false) .help("Expose RPC methods for querying chain state and transaction history"), ) - .arg( - Arg::with_name("obsolete_v1_7_rpc_api") - .long("enable-rpc-obsolete_v1_7") - .takes_value(false) - .help("Enable the obsolete RPC methods removed in v1.7"), - ) .arg( Arg::with_name("private_rpc") .long("private-rpc") @@ -1522,7 +1516,6 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .arg( Arg::with_name("block_verification_method") .long("block-verification-method") - .hidden(hidden_unless_forced()) .value_name("METHOD") .takes_value(true) .possible_values(BlockVerificationMethod::cli_names()) @@ -1539,7 +1532,6 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .arg( Arg::with_name("unified_scheduler_handler_threads") .long("unified-scheduler-handler-threads") - .hidden(hidden_unless_forced()) .value_name("COUNT") .takes_value(true) .validator(|s| is_within_range(s, 1..)) @@ -1966,16 +1958,6 @@ fn deprecated_arguments() -> Vec { (@into-option $v:expr) => { Some($v) }; } - add_arg!(Arg::with_name("accounts_db_caching_enabled").long("accounts-db-caching-enabled")); - add_arg!( - Arg::with_name("accounts_db_index_hashing") - .long("accounts-db-index-hashing") - .help( - "Enables the use of the index in hash calculation in \ - AccountsHashVerifier/Accounts Background Service.", - ), - usage_warning: "The accounts hash is only calculated without using the index.", - ); add_arg!( Arg::with_name("accounts_db_skip_shrink") .long("accounts-db-skip-shrink") @@ -2070,16 +2052,6 @@ fn deprecated_arguments() -> Vec { .long("minimal-rpc-api") .takes_value(false) .help("Only expose the RPC methods required to serve snapshots to other nodes")); - add_arg!( - Arg::with_name("no_accounts_db_index_hashing") - .long("no-accounts-db-index-hashing") - .help( - "This is obsolete. See --accounts-db-index-hashing. \ - Disables the use of the index in hash calculation in \ - AccountsHashVerifier/Accounts Background Service.", - ), - usage_warning: "The accounts hash is only calculated without using the index.", - ); add_arg!( Arg::with_name("no_check_vote_account") .long("no-check-vote-account") diff --git a/validator/src/main.rs b/validator/src/main.rs index a87f6b3f488d9c..321f31fe13d917 100644 --- a/validator/src/main.rs +++ b/validator/src/main.rs @@ -1402,7 +1402,6 @@ pub fn main() { solana_net_utils::parse_host_port(address).expect("failed to parse faucet address") }), full_api, - obsolete_v1_7_api: matches.is_present("obsolete_v1_7_rpc_api"), max_multiple_accounts: Some(value_t_or_exit!( matches, "rpc_max_multiple_accounts", @@ -1772,16 +1771,26 @@ pub fn main() { None => ShredStorageType::default(), Some(shred_compaction_string) => match shred_compaction_string { "level" => ShredStorageType::RocksLevel, - "fifo" => match matches.value_of("rocksdb_fifo_shred_storage_size") { - None => ShredStorageType::rocks_fifo(default_fifo_shred_storage_size( - &validator_config, - )), - Some(_) => ShredStorageType::rocks_fifo(Some(value_t_or_exit!( - matches, - "rocksdb_fifo_shred_storage_size", - u64 - ))), - }, + "fifo" => { + warn!( + "The value \"fifo\" for --rocksdb-shred-compaction has been deprecated. \ + Use of \"fifo\" will still work for now, but is planned for full removal \ + in v2.1. To update, use \"level\" for --rocksdb-shred-compaction, or \ + remove the --rocksdb-shred-compaction argument altogether. Note that the \ + entire \"rocksdb_fifo\" subdirectory within the ledger directory will \ + need to be manually removed once the validator is running with \"level\"." + ); + match matches.value_of("rocksdb_fifo_shred_storage_size") { + None => ShredStorageType::rocks_fifo(default_fifo_shred_storage_size( + &validator_config, + )), + Some(_) => ShredStorageType::rocks_fifo(Some(value_t_or_exit!( + matches, + "rocksdb_fifo_shred_storage_size", + u64 + ))), + } + } _ => panic!("Unrecognized rocksdb-shred-compaction: {shred_compaction_string}"), }, }, diff --git a/wen-restart/src/heaviest_fork_aggregate.rs b/wen-restart/src/heaviest_fork_aggregate.rs index ce1c19283876ce..0b43b800d18573 100644 --- a/wen-restart/src/heaviest_fork_aggregate.rs +++ b/wen-restart/src/heaviest_fork_aggregate.rs @@ -118,6 +118,9 @@ impl HeaviestForkAggregate { ); return None; } + if from == &self.my_pubkey { + return None; + } if received_heaviest_fork.shred_version != self.my_shred_version { warn!( "Gossip should not accept RestartLastVotedFork with different shred version {} from {:?}", @@ -445,6 +448,23 @@ mod tests { .total_active_stake_seen_supermajority(), 1500 ); + + // test that message from my pubkey is ignored. + assert_eq!( + test_state + .heaviest_fork_aggregate + .aggregate(RestartHeaviestFork { + from: test_state.validator_voting_keypairs[MY_INDEX] + .node_keypair + .pubkey(), + wallclock: timestamp(), + last_slot: test_state.heaviest_slot, + last_slot_hash: test_state.heaviest_hash, + observed_stake: 100, + shred_version: SHRED_VERSION, + },), + None, + ); } #[test] @@ -535,6 +555,27 @@ mod tests { ); // percentage doesn't change since the previous aggregate is ignored. assert_eq!(test_state.heaviest_fork_aggregate.total_active_stake(), 200); + + // Record from my pubkey should be ignored. + assert_eq!( + test_state + .heaviest_fork_aggregate + .aggregate_from_record( + &test_state.validator_voting_keypairs[MY_INDEX] + .node_keypair + .pubkey() + .to_string(), + &HeaviestForkRecord { + wallclock: timestamp(), + slot: test_state.heaviest_slot, + bankhash: test_state.heaviest_hash.to_string(), + shred_version: SHRED_VERSION as u32, + total_active_stake: 100, + } + ) + .unwrap(), + None, + ); } #[test] diff --git a/wen-restart/src/last_voted_fork_slots_aggregate.rs b/wen-restart/src/last_voted_fork_slots_aggregate.rs index b042ba80d9bb51..67cd7c1c77ea87 100644 --- a/wen-restart/src/last_voted_fork_slots_aggregate.rs +++ b/wen-restart/src/last_voted_fork_slots_aggregate.rs @@ -20,6 +20,7 @@ pub(crate) struct LastVotedForkSlotsAggregate { slots_stake_map: HashMap, active_peers: HashSet, slots_to_repair: HashSet, + my_pubkey: Pubkey, } #[derive(Clone, Debug, PartialEq)] @@ -53,6 +54,7 @@ impl LastVotedForkSlotsAggregate { slots_stake_map, active_peers, slots_to_repair: HashSet::new(), + my_pubkey: *my_pubkey, } } @@ -70,6 +72,9 @@ impl LastVotedForkSlotsAggregate { record: &LastVotedForkSlotsRecord, ) -> Result> { let from = Pubkey::from_str(key_string)?; + if from == self.my_pubkey { + return Ok(None); + } let last_voted_hash = Hash::from_str(&record.last_vote_bankhash)?; let converted_record = RestartLastVotedForkSlots::new( from, @@ -88,6 +93,9 @@ impl LastVotedForkSlotsAggregate { let total_stake = self.epoch_stakes.total_stake(); let threshold_stake = (total_stake as f64 * self.repair_threshold) as u64; let from = &new_slots.from; + if from == &self.my_pubkey { + return None; + } let sender_stake = Self::validator_stake(&self.epoch_stakes, from); if sender_stake == 0 { warn!( @@ -354,6 +362,23 @@ mod tests { Vec::from_iter(test_state.slots_aggregate.slots_to_repair_iter().cloned()); actual_slots.sort(); assert_eq!(actual_slots, vec![root_slot + 1]); + + // test that message from my pubkey is ignored. + assert_eq!( + test_state.slots_aggregate.aggregate( + RestartLastVotedForkSlots::new( + test_state.validator_voting_keypairs[MY_INDEX] + .node_keypair + .pubkey(), + timestamp(), + &[root_slot + 1, root_slot + 4, root_slot + 5], + Hash::default(), + SHRED_VERSION, + ) + .unwrap(), + ), + None, + ); } #[test] @@ -446,6 +471,26 @@ mod tests { ); // percentage doesn't change since the previous aggregate is ignored. assert_eq!(test_state.slots_aggregate.active_percent(), 20.0); + + // Record from my pubkey should be ignored. + assert_eq!( + test_state + .slots_aggregate + .aggregate_from_record( + &test_state.validator_voting_keypairs[MY_INDEX] + .node_keypair + .pubkey() + .to_string(), + &LastVotedForkSlotsRecord { + wallclock: timestamp(), + last_voted_fork_slots: vec![root_slot + 10, root_slot + 300], + last_vote_bankhash: Hash::new_unique().to_string(), + shred_version: SHRED_VERSION as u32, + } + ) + .unwrap(), + None, + ); } #[test] diff --git a/zk-sdk/Cargo.toml b/zk-sdk/Cargo.toml index 8b67d12c693987..a57b994e017d2f 100644 --- a/zk-sdk/Cargo.toml +++ b/zk-sdk/Cargo.toml @@ -12,6 +12,7 @@ edition = { workspace = true } [dependencies] base64 = { workspace = true } bytemuck = { workspace = true } +bytemuck_derive = { workspace = true } merlin = { workspace = true } num-derive = { workspace = true } num-traits = { workspace = true } diff --git a/zk-sdk/src/encryption/pod/elgamal.rs b/zk-sdk/src/encryption/pod/elgamal.rs index 8ec72c6f5837bd..9c70724307d43c 100644 --- a/zk-sdk/src/encryption/pod/elgamal.rs +++ b/zk-sdk/src/encryption/pod/elgamal.rs @@ -5,7 +5,7 @@ use { pod::impl_from_str, DECRYPT_HANDLE_LEN, ELGAMAL_CIPHERTEXT_LEN, ELGAMAL_PUBKEY_LEN, }, base64::{prelude::BASE64_STANDARD, Engine}, - bytemuck::{Pod, Zeroable}, + bytemuck::Zeroable, std::fmt, }; #[cfg(not(target_os = "solana"))] @@ -24,7 +24,7 @@ const ELGAMAL_PUBKEY_MAX_BASE64_LEN: usize = 44; const ELGAMAL_CIPHERTEXT_MAX_BASE64_LEN: usize = 88; /// The `ElGamalCiphertext` type as a `Pod`. -#[derive(Clone, Copy, Pod, Zeroable, PartialEq, Eq)] +#[derive(Clone, Copy, bytemuck_derive::Pod, bytemuck_derive::Zeroable, PartialEq, Eq)] #[repr(transparent)] pub struct PodElGamalCiphertext(pub(crate) [u8; ELGAMAL_CIPHERTEXT_LEN]); @@ -69,7 +69,7 @@ impl TryFrom for ElGamalCiphertext { } /// The `ElGamalPubkey` type as a `Pod`. -#[derive(Clone, Copy, Default, Pod, Zeroable, PartialEq, Eq)] +#[derive(Clone, Copy, Default, bytemuck_derive::Pod, bytemuck_derive::Zeroable, PartialEq, Eq)] #[repr(transparent)] pub struct PodElGamalPubkey(pub(crate) [u8; ELGAMAL_PUBKEY_LEN]); @@ -108,7 +108,7 @@ impl TryFrom for ElGamalPubkey { } /// The `DecryptHandle` type as a `Pod`. -#[derive(Clone, Copy, Default, Pod, Zeroable, PartialEq, Eq)] +#[derive(Clone, Copy, Default, bytemuck_derive::Pod, bytemuck_derive::Zeroable, PartialEq, Eq)] #[repr(transparent)] pub struct PodDecryptHandle(pub(crate) [u8; DECRYPT_HANDLE_LEN]); diff --git a/zk-sdk/src/encryption/pod/grouped_elgamal.rs b/zk-sdk/src/encryption/pod/grouped_elgamal.rs index 9202f23098858d..25825bbb474a6d 100644 --- a/zk-sdk/src/encryption/pod/grouped_elgamal.rs +++ b/zk-sdk/src/encryption/pod/grouped_elgamal.rs @@ -10,7 +10,7 @@ use { }, errors::ElGamalError, }, - bytemuck::{Pod, Zeroable}, + bytemuck::Zeroable, std::fmt, }; @@ -61,7 +61,7 @@ const GROUPED_ELGAMAL_CIPHERTEXT_3_HANDLES: usize = PEDERSEN_COMMITMENT_LEN + DECRYPT_HANDLE_LEN + DECRYPT_HANDLE_LEN + DECRYPT_HANDLE_LEN; /// The `GroupedElGamalCiphertext` type with two decryption handles as a `Pod` -#[derive(Clone, Copy, Pod, Zeroable, PartialEq, Eq)] +#[derive(Clone, Copy, bytemuck_derive::Pod, bytemuck_derive::Zeroable, PartialEq, Eq)] #[repr(transparent)] pub struct PodGroupedElGamalCiphertext2Handles( pub(crate) [u8; GROUPED_ELGAMAL_CIPHERTEXT_2_HANDLES], @@ -97,7 +97,7 @@ impl TryFrom for GroupedElGamalCiphertext<2 impl_extract!(TYPE = PodGroupedElGamalCiphertext2Handles); /// The `GroupedElGamalCiphertext` type with three decryption handles as a `Pod` -#[derive(Clone, Copy, Pod, Zeroable, PartialEq, Eq)] +#[derive(Clone, Copy, bytemuck_derive::Pod, bytemuck_derive::Zeroable, PartialEq, Eq)] #[repr(transparent)] pub struct PodGroupedElGamalCiphertext3Handles( pub(crate) [u8; GROUPED_ELGAMAL_CIPHERTEXT_3_HANDLES], diff --git a/zk-sdk/src/encryption/pod/pedersen.rs b/zk-sdk/src/encryption/pod/pedersen.rs index 2d90b100cbbe96..faf39ca949bc09 100644 --- a/zk-sdk/src/encryption/pod/pedersen.rs +++ b/zk-sdk/src/encryption/pod/pedersen.rs @@ -2,7 +2,7 @@ use { crate::encryption::PEDERSEN_COMMITMENT_LEN, - bytemuck::{Pod, Zeroable}, + bytemuck_derive::{Pod, Zeroable}, std::fmt, }; #[cfg(not(target_os = "solana"))] diff --git a/zk-sdk/src/pod.rs b/zk-sdk/src/pod.rs index 416df4c58be767..2240b5c1ebe375 100644 --- a/zk-sdk/src/pod.rs +++ b/zk-sdk/src/pod.rs @@ -1,4 +1,4 @@ -use bytemuck::{Pod, Zeroable}; +use bytemuck_derive::{Pod, Zeroable}; #[derive(Clone, Copy, Debug, Default, PartialEq, Eq, Pod, Zeroable)] #[repr(transparent)] diff --git a/zk-sdk/src/sigma_proofs/pod.rs b/zk-sdk/src/sigma_proofs/pod.rs index b0d4477e09ffa9..fb0bc3a96efba0 100644 --- a/zk-sdk/src/sigma_proofs/pod.rs +++ b/zk-sdk/src/sigma_proofs/pod.rs @@ -192,7 +192,7 @@ impl TryFrom for ZeroCiphertextProof { } /// The `PercentageWithCapProof` type as a `Pod`. -#[derive(Clone, Copy, Pod, Zeroable)] +#[derive(Clone, Copy, bytemuck_derive::Pod, bytemuck_derive::Zeroable)] #[repr(transparent)] pub struct PodPercentageWithCapProof(pub(crate) [u8; PERCENTAGE_WITH_CAP_PROOF_LEN]); @@ -213,7 +213,7 @@ impl TryFrom for PercentageWithCapProof { } /// The `PubkeyValidityProof` type as a `Pod`. -#[derive(Clone, Copy, Pod, Zeroable)] +#[derive(Clone, Copy, bytemuck_derive::Pod, bytemuck_derive::Zeroable)] #[repr(transparent)] pub struct PodPubkeyValidityProof(pub(crate) [u8; PUBKEY_VALIDITY_PROOF_LEN]); diff --git a/zk-sdk/src/zk_elgamal_proof_program/proof_data/batched_grouped_ciphertext_validity/handles_2.rs b/zk-sdk/src/zk_elgamal_proof_program/proof_data/batched_grouped_ciphertext_validity/handles_2.rs index 56bc42d8606f30..bd6e980962b1df 100644 --- a/zk-sdk/src/zk_elgamal_proof_program/proof_data/batched_grouped_ciphertext_validity/handles_2.rs +++ b/zk-sdk/src/zk_elgamal_proof_program/proof_data/batched_grouped_ciphertext_validity/handles_2.rs @@ -13,7 +13,7 @@ use { sigma_proofs::pod::PodBatchedGroupedCiphertext2HandlesValidityProof, zk_elgamal_proof_program::proof_data::{ProofType, ZkProofData}, }, - bytemuck::{Pod, Zeroable}, + bytemuck_derive::{Pod, Zeroable}, }; #[cfg(not(target_os = "solana"))] use { diff --git a/zk-sdk/src/zk_elgamal_proof_program/proof_data/batched_grouped_ciphertext_validity/handles_3.rs b/zk-sdk/src/zk_elgamal_proof_program/proof_data/batched_grouped_ciphertext_validity/handles_3.rs index a98d9c8f47b526..7423a5427733aa 100644 --- a/zk-sdk/src/zk_elgamal_proof_program/proof_data/batched_grouped_ciphertext_validity/handles_3.rs +++ b/zk-sdk/src/zk_elgamal_proof_program/proof_data/batched_grouped_ciphertext_validity/handles_3.rs @@ -13,7 +13,7 @@ use { sigma_proofs::pod::PodBatchedGroupedCiphertext3HandlesValidityProof, zk_elgamal_proof_program::proof_data::{ProofType, ZkProofData}, }, - bytemuck::{Pod, Zeroable}, + bytemuck_derive::{Pod, Zeroable}, }; #[cfg(not(target_os = "solana"))] use { diff --git a/zk-sdk/src/zk_elgamal_proof_program/proof_data/batched_range_proof/batched_range_proof_u128.rs b/zk-sdk/src/zk_elgamal_proof_program/proof_data/batched_range_proof/batched_range_proof_u128.rs index fbfab0d03052d3..df3eea3b53ea58 100644 --- a/zk-sdk/src/zk_elgamal_proof_program/proof_data/batched_range_proof/batched_range_proof_u128.rs +++ b/zk-sdk/src/zk_elgamal_proof_program/proof_data/batched_range_proof/batched_range_proof_u128.rs @@ -19,7 +19,7 @@ use { batched_range_proof::BatchedRangeProofContext, ProofType, ZkProofData, }, }, - bytemuck::{Pod, Zeroable}, + bytemuck_derive::{Pod, Zeroable}, }; /// The instruction data that is needed for the diff --git a/zk-sdk/src/zk_elgamal_proof_program/proof_data/batched_range_proof/batched_range_proof_u256.rs b/zk-sdk/src/zk_elgamal_proof_program/proof_data/batched_range_proof/batched_range_proof_u256.rs index a2f7426044ba4c..a51997484e64ba 100644 --- a/zk-sdk/src/zk_elgamal_proof_program/proof_data/batched_range_proof/batched_range_proof_u256.rs +++ b/zk-sdk/src/zk_elgamal_proof_program/proof_data/batched_range_proof/batched_range_proof_u256.rs @@ -19,7 +19,7 @@ use { batched_range_proof::BatchedRangeProofContext, ProofType, ZkProofData, }, }, - bytemuck::{Pod, Zeroable}, + bytemuck_derive::{Pod, Zeroable}, }; #[cfg(not(target_os = "solana"))] diff --git a/zk-sdk/src/zk_elgamal_proof_program/proof_data/batched_range_proof/batched_range_proof_u64.rs b/zk-sdk/src/zk_elgamal_proof_program/proof_data/batched_range_proof/batched_range_proof_u64.rs index a701ae6a1370de..4043648f01ada3 100644 --- a/zk-sdk/src/zk_elgamal_proof_program/proof_data/batched_range_proof/batched_range_proof_u64.rs +++ b/zk-sdk/src/zk_elgamal_proof_program/proof_data/batched_range_proof/batched_range_proof_u64.rs @@ -19,7 +19,7 @@ use { batched_range_proof::BatchedRangeProofContext, ProofType, ZkProofData, }, }, - bytemuck::{Pod, Zeroable}, + bytemuck_derive::{Pod, Zeroable}, }; /// The instruction data that is needed for the diff --git a/zk-sdk/src/zk_elgamal_proof_program/proof_data/batched_range_proof/mod.rs b/zk-sdk/src/zk_elgamal_proof_program/proof_data/batched_range_proof/mod.rs index 828fcc08218e1a..5d8c3c419db652 100644 --- a/zk-sdk/src/zk_elgamal_proof_program/proof_data/batched_range_proof/mod.rs +++ b/zk-sdk/src/zk_elgamal_proof_program/proof_data/batched_range_proof/mod.rs @@ -20,17 +20,14 @@ pub mod batched_range_proof_u128; pub mod batched_range_proof_u256; pub mod batched_range_proof_u64; -use { - crate::encryption::pod::pedersen::PodPedersenCommitment, - bytemuck::{Pod, Zeroable}, -}; +use crate::encryption::pod::pedersen::PodPedersenCommitment; #[cfg(not(target_os = "solana"))] use { crate::{ encryption::pedersen::{PedersenCommitment, PedersenOpening}, zk_elgamal_proof_program::errors::{ProofGenerationError, ProofVerificationError}, }, - bytemuck::bytes_of, + bytemuck::{bytes_of, Zeroable}, curve25519_dalek::traits::IsIdentity, merlin::Transcript, std::convert::TryInto, @@ -48,7 +45,7 @@ const MAX_SINGLE_BIT_LENGTH: usize = 128; /// The context data needed to verify a range-proof for a Pedersen committed value. /// /// The context data is shared by all `VerifyBatchedRangeProof{N}` instructions. -#[derive(Clone, Copy, Pod, Zeroable)] +#[derive(Clone, Copy, bytemuck_derive::Pod, bytemuck_derive::Zeroable)] #[repr(C)] pub struct BatchedRangeProofContext { pub commitments: [PodPedersenCommitment; MAX_COMMITMENTS], diff --git a/zk-sdk/src/zk_elgamal_proof_program/proof_data/ciphertext_ciphertext_equality.rs b/zk-sdk/src/zk_elgamal_proof_program/proof_data/ciphertext_ciphertext_equality.rs index 5e2ba6cac9a7bc..21437bdba7b2b3 100644 --- a/zk-sdk/src/zk_elgamal_proof_program/proof_data/ciphertext_ciphertext_equality.rs +++ b/zk-sdk/src/zk_elgamal_proof_program/proof_data/ciphertext_ciphertext_equality.rs @@ -11,7 +11,7 @@ use { sigma_proofs::pod::PodCiphertextCiphertextEqualityProof, zk_elgamal_proof_program::proof_data::{ProofType, ZkProofData}, }, - bytemuck::{Pod, Zeroable}, + bytemuck_derive::{Pod, Zeroable}, }; #[cfg(not(target_os = "solana"))] use { diff --git a/zk-sdk/src/zk_elgamal_proof_program/proof_data/ciphertext_commitment_equality.rs b/zk-sdk/src/zk_elgamal_proof_program/proof_data/ciphertext_commitment_equality.rs index 86f4dbd2d4055a..bb093436b66f0a 100644 --- a/zk-sdk/src/zk_elgamal_proof_program/proof_data/ciphertext_commitment_equality.rs +++ b/zk-sdk/src/zk_elgamal_proof_program/proof_data/ciphertext_commitment_equality.rs @@ -14,7 +14,7 @@ use { sigma_proofs::pod::PodCiphertextCommitmentEqualityProof, zk_elgamal_proof_program::proof_data::{ProofType, ZkProofData}, }, - bytemuck::{Pod, Zeroable}, + bytemuck_derive::{Pod, Zeroable}, }; #[cfg(not(target_os = "solana"))] use { diff --git a/zk-sdk/src/zk_elgamal_proof_program/proof_data/grouped_ciphertext_validity/handles_2.rs b/zk-sdk/src/zk_elgamal_proof_program/proof_data/grouped_ciphertext_validity/handles_2.rs index 76083014c5ab2e..5c35b1b131729a 100644 --- a/zk-sdk/src/zk_elgamal_proof_program/proof_data/grouped_ciphertext_validity/handles_2.rs +++ b/zk-sdk/src/zk_elgamal_proof_program/proof_data/grouped_ciphertext_validity/handles_2.rs @@ -13,7 +13,7 @@ use { sigma_proofs::pod::PodGroupedCiphertext2HandlesValidityProof, zk_elgamal_proof_program::proof_data::{ProofType, ZkProofData}, }, - bytemuck::{Pod, Zeroable}, + bytemuck_derive::{Pod, Zeroable}, }; #[cfg(not(target_os = "solana"))] use { diff --git a/zk-sdk/src/zk_elgamal_proof_program/proof_data/grouped_ciphertext_validity/handles_3.rs b/zk-sdk/src/zk_elgamal_proof_program/proof_data/grouped_ciphertext_validity/handles_3.rs index 48420661b48d4a..5e8852f8584ca1 100644 --- a/zk-sdk/src/zk_elgamal_proof_program/proof_data/grouped_ciphertext_validity/handles_3.rs +++ b/zk-sdk/src/zk_elgamal_proof_program/proof_data/grouped_ciphertext_validity/handles_3.rs @@ -13,7 +13,7 @@ use { sigma_proofs::pod::PodGroupedCiphertext3HandlesValidityProof, zk_elgamal_proof_program::proof_data::{ProofType, ZkProofData}, }, - bytemuck::{Pod, Zeroable}, + bytemuck_derive::{Pod, Zeroable}, }; #[cfg(not(target_os = "solana"))] use { diff --git a/zk-sdk/src/zk_elgamal_proof_program/proof_data/percentage_with_cap.rs b/zk-sdk/src/zk_elgamal_proof_program/proof_data/percentage_with_cap.rs index 8a6b18e68bdf9b..6154f1ae43b0c0 100644 --- a/zk-sdk/src/zk_elgamal_proof_program/proof_data/percentage_with_cap.rs +++ b/zk-sdk/src/zk_elgamal_proof_program/proof_data/percentage_with_cap.rs @@ -24,7 +24,7 @@ use { sigma_proofs::pod::PodPercentageWithCapProof, zk_elgamal_proof_program::proof_data::{ProofType, ZkProofData}, }, - bytemuck::{Pod, Zeroable}, + bytemuck_derive::{Pod, Zeroable}, }; /// The instruction data that is needed for the `ProofInstruction::VerifyPercentageWithCap` diff --git a/zk-sdk/src/zk_elgamal_proof_program/proof_data/pod.rs b/zk-sdk/src/zk_elgamal_proof_program/proof_data/pod.rs index 50e1a81a582705..2010212c3e35cd 100644 --- a/zk-sdk/src/zk_elgamal_proof_program/proof_data/pod.rs +++ b/zk-sdk/src/zk_elgamal_proof_program/proof_data/pod.rs @@ -1,6 +1,6 @@ use { crate::zk_elgamal_proof_program::proof_data::{errors::ProofDataError, ProofType}, - bytemuck::{Pod, Zeroable}, + bytemuck_derive::{Pod, Zeroable}, num_traits::{FromPrimitive, ToPrimitive}, }; diff --git a/zk-sdk/src/zk_elgamal_proof_program/proof_data/pubkey_validity.rs b/zk-sdk/src/zk_elgamal_proof_program/proof_data/pubkey_validity.rs index b769458e5fb8c0..8b8ceb9c45be87 100644 --- a/zk-sdk/src/zk_elgamal_proof_program/proof_data/pubkey_validity.rs +++ b/zk-sdk/src/zk_elgamal_proof_program/proof_data/pubkey_validity.rs @@ -22,7 +22,7 @@ use { sigma_proofs::pod::PodPubkeyValidityProof, zk_elgamal_proof_program::proof_data::{ProofType, ZkProofData}, }, - bytemuck::{Pod, Zeroable}, + bytemuck_derive::{Pod, Zeroable}, }; /// The instruction data that is needed for the `ProofInstruction::VerifyPubkeyValidity` diff --git a/zk-sdk/src/zk_elgamal_proof_program/proof_data/zero_ciphertext.rs b/zk-sdk/src/zk_elgamal_proof_program/proof_data/zero_ciphertext.rs index 8a376304e05494..a6c225074974a3 100644 --- a/zk-sdk/src/zk_elgamal_proof_program/proof_data/zero_ciphertext.rs +++ b/zk-sdk/src/zk_elgamal_proof_program/proof_data/zero_ciphertext.rs @@ -21,7 +21,7 @@ use { sigma_proofs::pod::PodZeroCiphertextProof, zk_elgamal_proof_program::proof_data::{ProofType, ZkProofData}, }, - bytemuck::{Pod, Zeroable}, + bytemuck_derive::{Pod, Zeroable}, }; /// The instruction data that is needed for the `ProofInstruction::ZeroCiphertext` instruction. diff --git a/zk-sdk/src/zk_elgamal_proof_program/state.rs b/zk-sdk/src/zk_elgamal_proof_program/state.rs index 7cd87bbf5827da..1dd225af02f813 100644 --- a/zk-sdk/src/zk_elgamal_proof_program/state.rs +++ b/zk-sdk/src/zk_elgamal_proof_program/state.rs @@ -53,7 +53,7 @@ impl ProofContextState { /// The `ProofContextState` without the proof context itself. This struct exists to facilitate the /// decoding of generic-independent fields in `ProofContextState`. -#[derive(Clone, Copy, Debug, PartialEq, Pod, Zeroable)] +#[derive(Clone, Copy, Debug, PartialEq, bytemuck_derive::Pod, bytemuck_derive::Zeroable)] #[repr(C)] pub struct ProofContextStateMeta { /// The proof context authority that can close the account diff --git a/zk-token-sdk/Cargo.toml b/zk-token-sdk/Cargo.toml index fb3dc25649b5a8..d466d2ba0af22d 100644 --- a/zk-token-sdk/Cargo.toml +++ b/zk-token-sdk/Cargo.toml @@ -11,9 +11,11 @@ edition = { workspace = true } [dependencies] base64 = { workspace = true } -bytemuck = { workspace = true, features = ["derive"] } +bytemuck = { workspace = true } +bytemuck_derive = { workspace = true } num-derive = { workspace = true } num-traits = { workspace = true } +solana-curve25519 = { workspace = true } solana-program = { workspace = true } thiserror = { workspace = true } @@ -25,7 +27,6 @@ aes-gcm-siv = { workspace = true } bincode = { workspace = true } byteorder = { workspace = true } curve25519-dalek = { workspace = true, features = ["serde"] } -getrandom = { version = "0.1", features = ["dummy"] } itertools = { workspace = true } lazy_static = { workspace = true } merlin = { workspace = true } diff --git a/zk-token-sdk/src/curve25519/mod.rs b/zk-token-sdk/src/curve25519/mod.rs deleted file mode 100644 index 19c4aa1388aa9a..00000000000000 --- a/zk-token-sdk/src/curve25519/mod.rs +++ /dev/null @@ -1,11 +0,0 @@ -//! Syscall operations for curve25519 -//! -//! This module lives inside the zk-token-sdk for now, but should move to a general location since -//! it is independent of zk-tokens. - -pub mod curve_syscall_traits; -pub mod edwards; -#[cfg(not(target_os = "solana"))] -pub mod errors; -pub mod ristretto; -pub mod scalar; diff --git a/zk-token-sdk/src/encryption/elgamal.rs b/zk-token-sdk/src/encryption/elgamal.rs index 7f0a48820a6f35..130aacef669545 100644 --- a/zk-token-sdk/src/encryption/elgamal.rs +++ b/zk-token-sdk/src/encryption/elgamal.rs @@ -221,7 +221,7 @@ impl ElGamalKeypair { &self.secret } - #[deprecated(note = "please use `into()` instead")] + #[deprecated(since = "2.0.0", note = "please use `into()` instead")] #[allow(deprecated)] pub fn to_bytes(&self) -> [u8; ELGAMAL_KEYPAIR_LEN] { let mut bytes = [0u8; ELGAMAL_KEYPAIR_LEN]; @@ -230,7 +230,7 @@ impl ElGamalKeypair { bytes } - #[deprecated(note = "please use `try_from()` instead")] + #[deprecated(since = "2.0.0", note = "please use `try_from()` instead")] #[allow(deprecated)] pub fn from_bytes(bytes: &[u8]) -> Option { if bytes.len() != ELGAMAL_KEYPAIR_LEN { @@ -367,12 +367,12 @@ impl ElGamalPubkey { &self.0 } - #[deprecated(note = "please use `into()` instead")] + #[deprecated(since = "2.0.0", note = "please use `into()` instead")] pub fn to_bytes(&self) -> [u8; ELGAMAL_PUBKEY_LEN] { self.0.compress().to_bytes() } - #[deprecated(note = "please use `try_from()` instead")] + #[deprecated(since = "2.0.0", note = "please use `try_from()` instead")] pub fn from_bytes(bytes: &[u8]) -> Option { if bytes.len() != ELGAMAL_PUBKEY_LEN { return None; @@ -544,12 +544,12 @@ impl ElGamalSecretKey { self.0.as_bytes() } - #[deprecated(note = "please use `into()` instead")] + #[deprecated(since = "2.0.0", note = "please use `into()` instead")] pub fn to_bytes(&self) -> [u8; ELGAMAL_SECRET_KEY_LEN] { self.0.to_bytes() } - #[deprecated(note = "please use `try_from()` instead")] + #[deprecated(since = "2.0.0", note = "please use `try_from()` instead")] pub fn from_bytes(bytes: &[u8]) -> Option { match bytes.try_into() { Ok(bytes) => Scalar::from_canonical_bytes(bytes).map(ElGamalSecretKey), diff --git a/zk-token-sdk/src/errors.rs b/zk-token-sdk/src/errors.rs index 2d3adb74635574..e2c6c78721658a 100644 --- a/zk-token-sdk/src/errors.rs +++ b/zk-token-sdk/src/errors.rs @@ -6,18 +6,6 @@ use { thiserror::Error, }; -#[derive(Error, Clone, Debug, Eq, PartialEq)] -pub enum AuthenticatedEncryptionError { - #[error("key derivation method not supported")] - DerivationMethodNotSupported, - #[error("seed length too short for derivation")] - SeedLengthTooShort, - #[error("seed length too long for derivation")] - SeedLengthTooLong, - #[error("failed to deserialize")] - Deserialization, -} - #[derive(Error, Clone, Debug, Eq, PartialEq)] pub enum ElGamalError { #[error("key derivation method not supported")] @@ -36,6 +24,18 @@ pub enum ElGamalError { SecretKeyDeserialization, } +#[derive(Error, Clone, Debug, Eq, PartialEq)] +pub enum AuthenticatedEncryptionError { + #[error("key derivation method not supported")] + DerivationMethodNotSupported, + #[error("seed length too short for derivation")] + SeedLengthTooShort, + #[error("seed length too long for derivation")] + SeedLengthTooLong, + #[error("failed to deserialize")] + Deserialization, +} + #[cfg(not(target_os = "solana"))] #[derive(Error, Clone, Debug, Eq, PartialEq)] pub enum ProofGenerationError { diff --git a/zk-token-sdk/src/instruction/batched_grouped_ciphertext_validity/handles_2.rs b/zk-token-sdk/src/instruction/batched_grouped_ciphertext_validity/handles_2.rs index 0be760691f3ee6..6f489f47523728 100644 --- a/zk-token-sdk/src/instruction/batched_grouped_ciphertext_validity/handles_2.rs +++ b/zk-token-sdk/src/instruction/batched_grouped_ciphertext_validity/handles_2.rs @@ -30,7 +30,7 @@ use { instruction::{ProofType, ZkProofData}, zk_token_elgamal::pod, }, - bytemuck::{Pod, Zeroable}, + bytemuck_derive::{Pod, Zeroable}, }; /// The instruction data that is needed for the diff --git a/zk-token-sdk/src/instruction/batched_grouped_ciphertext_validity/handles_3.rs b/zk-token-sdk/src/instruction/batched_grouped_ciphertext_validity/handles_3.rs index 8a5fc1fca82837..d41b307f188b10 100644 --- a/zk-token-sdk/src/instruction/batched_grouped_ciphertext_validity/handles_3.rs +++ b/zk-token-sdk/src/instruction/batched_grouped_ciphertext_validity/handles_3.rs @@ -29,7 +29,7 @@ use { instruction::{ProofType, ZkProofData}, zk_token_elgamal::pod, }, - bytemuck::{Pod, Zeroable}, + bytemuck_derive::{Pod, Zeroable}, }; /// The instruction data that is needed for the diff --git a/zk-token-sdk/src/instruction/batched_range_proof/batched_range_proof_u128.rs b/zk-token-sdk/src/instruction/batched_range_proof/batched_range_proof_u128.rs index a1193c04190629..818af7c1d9e9a1 100644 --- a/zk-token-sdk/src/instruction/batched_range_proof/batched_range_proof_u128.rs +++ b/zk-token-sdk/src/instruction/batched_range_proof/batched_range_proof_u128.rs @@ -15,7 +15,7 @@ use { instruction::{batched_range_proof::BatchedRangeProofContext, ProofType, ZkProofData}, zk_token_elgamal::pod, }, - bytemuck::{Pod, Zeroable}, + bytemuck_derive::{Pod, Zeroable}, }; /// The instruction data that is needed for the diff --git a/zk-token-sdk/src/instruction/batched_range_proof/batched_range_proof_u256.rs b/zk-token-sdk/src/instruction/batched_range_proof/batched_range_proof_u256.rs index 39237a3b758470..d728923f07fc6b 100644 --- a/zk-token-sdk/src/instruction/batched_range_proof/batched_range_proof_u256.rs +++ b/zk-token-sdk/src/instruction/batched_range_proof/batched_range_proof_u256.rs @@ -15,7 +15,7 @@ use { instruction::{batched_range_proof::BatchedRangeProofContext, ProofType, ZkProofData}, zk_token_elgamal::pod, }, - bytemuck::{Pod, Zeroable}, + bytemuck_derive::{Pod, Zeroable}, }; #[cfg(not(target_os = "solana"))] diff --git a/zk-token-sdk/src/instruction/batched_range_proof/batched_range_proof_u64.rs b/zk-token-sdk/src/instruction/batched_range_proof/batched_range_proof_u64.rs index 94b76b5beff89d..53d790c2a30fa6 100644 --- a/zk-token-sdk/src/instruction/batched_range_proof/batched_range_proof_u64.rs +++ b/zk-token-sdk/src/instruction/batched_range_proof/batched_range_proof_u64.rs @@ -15,7 +15,7 @@ use { instruction::{batched_range_proof::BatchedRangeProofContext, ProofType, ZkProofData}, zk_token_elgamal::pod, }, - bytemuck::{Pod, Zeroable}, + bytemuck_derive::{Pod, Zeroable}, }; /// The instruction data that is needed for the diff --git a/zk-token-sdk/src/instruction/batched_range_proof/mod.rs b/zk-token-sdk/src/instruction/batched_range_proof/mod.rs index a002ca80ba642b..c94cbb328b2b33 100644 --- a/zk-token-sdk/src/instruction/batched_range_proof/mod.rs +++ b/zk-token-sdk/src/instruction/batched_range_proof/mod.rs @@ -20,17 +20,14 @@ pub mod batched_range_proof_u128; pub mod batched_range_proof_u256; pub mod batched_range_proof_u64; -use { - crate::zk_token_elgamal::pod, - bytemuck::{Pod, Zeroable}, -}; +use crate::zk_token_elgamal::pod; #[cfg(not(target_os = "solana"))] use { crate::{ encryption::pedersen::{PedersenCommitment, PedersenOpening}, errors::{ProofGenerationError, ProofVerificationError}, }, - bytemuck::bytes_of, + bytemuck::{bytes_of, Zeroable}, curve25519_dalek::traits::IsIdentity, merlin::Transcript, std::convert::TryInto, @@ -48,7 +45,7 @@ const MAX_SINGLE_BIT_LENGTH: usize = 128; /// The context data needed to verify a range-proof for a Pedersen committed value. /// /// The context data is shared by all `VerifyBatchedRangeProof{N}` instructions. -#[derive(Clone, Copy, Pod, Zeroable)] +#[derive(Clone, Copy, bytemuck_derive::Pod, bytemuck_derive::Zeroable)] #[repr(C)] pub struct BatchedRangeProofContext { pub commitments: [pod::PedersenCommitment; MAX_COMMITMENTS], diff --git a/zk-token-sdk/src/instruction/ciphertext_ciphertext_equality.rs b/zk-token-sdk/src/instruction/ciphertext_ciphertext_equality.rs index a7a0bd5eaa8531..c858c6c015f8c1 100644 --- a/zk-token-sdk/src/instruction/ciphertext_ciphertext_equality.rs +++ b/zk-token-sdk/src/instruction/ciphertext_ciphertext_equality.rs @@ -27,7 +27,7 @@ use { instruction::{ProofType, ZkProofData}, zk_token_elgamal::pod, }, - bytemuck::{Pod, Zeroable}, + bytemuck_derive::{Pod, Zeroable}, }; /// The instruction data that is needed for the diff --git a/zk-token-sdk/src/instruction/ciphertext_commitment_equality.rs b/zk-token-sdk/src/instruction/ciphertext_commitment_equality.rs index 5cd2b3cbc5c670..fc862904a5ecae 100644 --- a/zk-token-sdk/src/instruction/ciphertext_commitment_equality.rs +++ b/zk-token-sdk/src/instruction/ciphertext_commitment_equality.rs @@ -24,7 +24,7 @@ use { instruction::{ProofType, ZkProofData}, zk_token_elgamal::pod, }, - bytemuck::{Pod, Zeroable}, + bytemuck_derive::{Pod, Zeroable}, }; /// The instruction data that is needed for the /// `ProofInstruction::VerifyCiphertextCommitmentEquality` instruction. diff --git a/zk-token-sdk/src/instruction/fee_sigma.rs b/zk-token-sdk/src/instruction/fee_sigma.rs index 500e21a505cf33..adddef5f64fc9d 100644 --- a/zk-token-sdk/src/instruction/fee_sigma.rs +++ b/zk-token-sdk/src/instruction/fee_sigma.rs @@ -24,7 +24,7 @@ use { instruction::{ProofType, ZkProofData}, zk_token_elgamal::pod, }, - bytemuck::{Pod, Zeroable}, + bytemuck_derive::{Pod, Zeroable}, }; /// The instruction data that is needed for the `ProofInstruction::VerifyFeeSigma` instruction. diff --git a/zk-token-sdk/src/instruction/grouped_ciphertext_validity/handles_2.rs b/zk-token-sdk/src/instruction/grouped_ciphertext_validity/handles_2.rs index a99a733c748928..da6900043fb4d1 100644 --- a/zk-token-sdk/src/instruction/grouped_ciphertext_validity/handles_2.rs +++ b/zk-token-sdk/src/instruction/grouped_ciphertext_validity/handles_2.rs @@ -28,7 +28,7 @@ use { instruction::{ProofType, ZkProofData}, zk_token_elgamal::pod, }, - bytemuck::{Pod, Zeroable}, + bytemuck_derive::{Pod, Zeroable}, }; /// The instruction data that is needed for the `ProofInstruction::VerifyGroupedCiphertextValidity` diff --git a/zk-token-sdk/src/instruction/grouped_ciphertext_validity/handles_3.rs b/zk-token-sdk/src/instruction/grouped_ciphertext_validity/handles_3.rs index 14e025fef3f754..0fb3385247dde1 100644 --- a/zk-token-sdk/src/instruction/grouped_ciphertext_validity/handles_3.rs +++ b/zk-token-sdk/src/instruction/grouped_ciphertext_validity/handles_3.rs @@ -28,7 +28,7 @@ use { instruction::{ProofType, ZkProofData}, zk_token_elgamal::pod, }, - bytemuck::{Pod, Zeroable}, + bytemuck_derive::{Pod, Zeroable}, }; /// The instruction data that is needed for the diff --git a/zk-token-sdk/src/instruction/pubkey_validity.rs b/zk-token-sdk/src/instruction/pubkey_validity.rs index 3c264f0cdd31d3..6579611cba8b1e 100644 --- a/zk-token-sdk/src/instruction/pubkey_validity.rs +++ b/zk-token-sdk/src/instruction/pubkey_validity.rs @@ -21,7 +21,7 @@ use { instruction::{ProofType, ZkProofData}, zk_token_elgamal::pod, }, - bytemuck::{Pod, Zeroable}, + bytemuck_derive::{Pod, Zeroable}, }; /// The instruction data that is needed for the `ProofInstruction::VerifyPubkeyValidity` diff --git a/zk-token-sdk/src/instruction/range_proof.rs b/zk-token-sdk/src/instruction/range_proof.rs index fd6652e766aff6..823e41da71cf63 100644 --- a/zk-token-sdk/src/instruction/range_proof.rs +++ b/zk-token-sdk/src/instruction/range_proof.rs @@ -20,7 +20,7 @@ use { instruction::{ProofType, ZkProofData}, zk_token_elgamal::pod, }, - bytemuck::{Pod, Zeroable}, + bytemuck_derive::{Pod, Zeroable}, }; /// The context data needed to verify a range-proof for a committed value in a Pedersen commitment. diff --git a/zk-token-sdk/src/instruction/transfer/with_fee.rs b/zk-token-sdk/src/instruction/transfer/with_fee.rs index e8afb7606d6ff7..9fb25579a128c5 100644 --- a/zk-token-sdk/src/instruction/transfer/with_fee.rs +++ b/zk-token-sdk/src/instruction/transfer/with_fee.rs @@ -1,3 +1,7 @@ +use crate::{ + instruction::{ProofType, ZkProofData}, + zk_token_elgamal::pod, +}; #[cfg(not(target_os = "solana"))] use { crate::{ @@ -29,13 +33,6 @@ use { std::convert::TryInto, subtle::{ConditionallySelectable, ConstantTimeGreater}, }; -use { - crate::{ - instruction::{ProofType, ZkProofData}, - zk_token_elgamal::pod, - }, - bytemuck::{Pod, Zeroable}, -}; #[cfg(not(target_os = "solana"))] const MAX_FEE_BASIS_POINTS: u64 = 10_000; @@ -71,7 +68,7 @@ lazy_static::lazy_static! { /// /// It includes the cryptographic proof as well as the context data information needed to verify /// the proof. -#[derive(Clone, Copy, Pod, Zeroable)] +#[derive(Clone, Copy, bytemuck_derive::Pod, bytemuck_derive::Zeroable)] #[repr(C)] pub struct TransferWithFeeData { /// The context data for the transfer with fee proof @@ -82,7 +79,7 @@ pub struct TransferWithFeeData { } /// The context data needed to verify a transfer-with-fee proof. -#[derive(Clone, Copy, Pod, Zeroable)] +#[derive(Clone, Copy, bytemuck_derive::Pod, bytemuck_derive::Zeroable)] #[repr(C)] pub struct TransferWithFeeProofContext { /// Group encryption of the low 16 bites of the transfer amount @@ -108,7 +105,7 @@ pub struct TransferWithFeeProofContext { } /// The ElGamal public keys needed for a transfer with fee -#[derive(Clone, Copy, Pod, Zeroable)] +#[derive(Clone, Copy, bytemuck_derive::Pod, bytemuck_derive::Zeroable)] #[repr(C)] pub struct TransferWithFeePubkeys { pub source: pod::ElGamalPubkey, @@ -453,7 +450,7 @@ impl TransferWithFeeProofContext { } #[repr(C)] -#[derive(Clone, Copy, Pod, Zeroable)] +#[derive(Clone, Copy, bytemuck_derive::Pod, bytemuck_derive::Zeroable)] pub struct TransferWithFeeProof { pub new_source_commitment: pod::PedersenCommitment, pub claimed_commitment: pod::PedersenCommitment, @@ -820,7 +817,7 @@ fn compute_delta_commitment( #[cfg(test)] mod test { - use super::*; + use {super::*, bytemuck::Zeroable}; #[test] fn test_fee_correctness() { diff --git a/zk-token-sdk/src/instruction/transfer/without_fee.rs b/zk-token-sdk/src/instruction/transfer/without_fee.rs index a2f257de65f054..27c8782fa22960 100644 --- a/zk-token-sdk/src/instruction/transfer/without_fee.rs +++ b/zk-token-sdk/src/instruction/transfer/without_fee.rs @@ -29,7 +29,7 @@ use { instruction::{ProofType, ZkProofData}, zk_token_elgamal::pod, }, - bytemuck::{Pod, Zeroable}, + bytemuck_derive::{Pod, Zeroable}, }; #[cfg(not(target_os = "solana"))] @@ -470,7 +470,7 @@ impl TransferProof { #[cfg(test)] mod test { - use {super::*, crate::encryption::elgamal::ElGamalKeypair}; + use {super::*, crate::encryption::elgamal::ElGamalKeypair, bytemuck::Zeroable}; #[test] fn test_transfer_correctness() { diff --git a/zk-token-sdk/src/instruction/withdraw.rs b/zk-token-sdk/src/instruction/withdraw.rs index 530b6de0d75532..07cd8a9d6949a6 100644 --- a/zk-token-sdk/src/instruction/withdraw.rs +++ b/zk-token-sdk/src/instruction/withdraw.rs @@ -18,7 +18,7 @@ use { instruction::{ProofType, ZkProofData}, zk_token_elgamal::pod, }, - bytemuck::{Pod, Zeroable}, + bytemuck_derive::{Pod, Zeroable}, }; #[cfg(not(target_os = "solana"))] diff --git a/zk-token-sdk/src/instruction/zero_balance.rs b/zk-token-sdk/src/instruction/zero_balance.rs index 7d52b80063176e..7671fb21cc4569 100644 --- a/zk-token-sdk/src/instruction/zero_balance.rs +++ b/zk-token-sdk/src/instruction/zero_balance.rs @@ -20,7 +20,7 @@ use { instruction::{ProofType, ZkProofData}, zk_token_elgamal::pod, }, - bytemuck::{Pod, Zeroable}, + bytemuck_derive::{Pod, Zeroable}, }; /// The instruction data that is needed for the `ProofInstruction::ZeroBalance` instruction. diff --git a/zk-token-sdk/src/lib.rs b/zk-token-sdk/src/lib.rs index 2946e177358adc..83d8b188366e7d 100644 --- a/zk-token-sdk/src/lib.rs +++ b/zk-token-sdk/src/lib.rs @@ -17,6 +17,8 @@ // // `clippy::op_ref` is turned off to prevent clippy from warning that this is not idiomatic code. +pub use solana_curve25519 as curve25519; + #[cfg(not(target_os = "solana"))] #[macro_use] pub(crate) mod macros; @@ -27,7 +29,6 @@ mod sigma_proofs; #[cfg(not(target_os = "solana"))] mod transcript; -pub mod curve25519; pub mod errors; pub mod instruction; pub mod zk_token_elgamal; diff --git a/zk-token-sdk/src/zk_token_elgamal/convert.rs b/zk-token-sdk/src/zk_token_elgamal/convert.rs index a437c817b41e72..286383cc1cd98b 100644 --- a/zk-token-sdk/src/zk_token_elgamal/convert.rs +++ b/zk-token-sdk/src/zk_token_elgamal/convert.rs @@ -1,4 +1,4 @@ -use {super::pod, crate::curve25519::ristretto::PodRistrettoPoint}; +use {super::pod, solana_curve25519::ristretto::PodRistrettoPoint}; impl From<(pod::PedersenCommitment, pod::DecryptHandle)> for pod::ElGamalCiphertext { fn from((commitment, handle): (pod::PedersenCommitment, pod::DecryptHandle)) -> Self { @@ -47,26 +47,7 @@ impl From for pod::DecryptHandle { #[cfg(not(target_os = "solana"))] mod target_arch { - use { - super::pod, - crate::{curve25519::scalar::PodScalar, errors::ElGamalError}, - curve25519_dalek::{ristretto::CompressedRistretto, scalar::Scalar}, - std::convert::TryFrom, - }; - - impl From for PodScalar { - fn from(scalar: Scalar) -> Self { - Self(scalar.to_bytes()) - } - } - - impl TryFrom for Scalar { - type Error = ElGamalError; - - fn try_from(pod: PodScalar) -> Result { - Scalar::from_canonical_bytes(pod.0).ok_or(ElGamalError::CiphertextDeserialization) - } - } + use {super::pod, curve25519_dalek::ristretto::CompressedRistretto}; impl From for pod::CompressedRistretto { fn from(cr: CompressedRistretto) -> Self { diff --git a/zk-token-sdk/src/zk_token_elgamal/ops.rs b/zk-token-sdk/src/zk_token_elgamal/ops.rs index 38da19c1c2e7f1..d0cd41cc799e02 100644 --- a/zk-token-sdk/src/zk_token_elgamal/ops.rs +++ b/zk-token-sdk/src/zk_token_elgamal/ops.rs @@ -1,9 +1,9 @@ -use crate::{ - curve25519::{ +use { + crate::zk_token_elgamal::pod, + solana_curve25519::{ ristretto::{add_ristretto, multiply_ristretto, subtract_ristretto, PodRistrettoPoint}, scalar::PodScalar, }, - zk_token_elgamal::pod, }; const SHIFT_BITS: usize = 16; diff --git a/zk-token-sdk/src/zk_token_elgamal/pod/auth_encryption.rs b/zk-token-sdk/src/zk_token_elgamal/pod/auth_encryption.rs index 3e1cdf1786a4ab..f46307d2367de5 100644 --- a/zk-token-sdk/src/zk_token_elgamal/pod/auth_encryption.rs +++ b/zk-token-sdk/src/zk_token_elgamal/pod/auth_encryption.rs @@ -3,8 +3,9 @@ #[cfg(not(target_os = "solana"))] use crate::{encryption::auth_encryption as decoded, errors::AuthenticatedEncryptionError}; use { - crate::zk_token_elgamal::pod::{impl_from_str, Pod, Zeroable}, + crate::zk_token_elgamal::pod::impl_from_str, base64::{prelude::BASE64_STANDARD, Engine}, + bytemuck::{Pod, Zeroable}, std::fmt, }; diff --git a/zk-token-sdk/src/zk_token_elgamal/pod/elgamal.rs b/zk-token-sdk/src/zk_token_elgamal/pod/elgamal.rs index 64c3e794b4816b..2303daadfd1470 100644 --- a/zk-token-sdk/src/zk_token_elgamal/pod/elgamal.rs +++ b/zk-token-sdk/src/zk_token_elgamal/pod/elgamal.rs @@ -10,10 +10,11 @@ use { }; use { crate::{ - zk_token_elgamal::pod::{impl_from_str, pedersen::PEDERSEN_COMMITMENT_LEN, Pod, Zeroable}, + zk_token_elgamal::pod::{impl_from_str, pedersen::PEDERSEN_COMMITMENT_LEN}, RISTRETTO_POINT_LEN, }, base64::{prelude::BASE64_STANDARD, Engine}, + bytemuck::Zeroable, std::fmt, }; @@ -33,7 +34,7 @@ pub(crate) const ELGAMAL_CIPHERTEXT_LEN: usize = PEDERSEN_COMMITMENT_LEN + DECRY const ELGAMAL_CIPHERTEXT_MAX_BASE64_LEN: usize = 88; /// The `ElGamalCiphertext` type as a `Pod`. -#[derive(Clone, Copy, Pod, Zeroable, PartialEq, Eq)] +#[derive(Clone, Copy, bytemuck_derive::Pod, bytemuck_derive::Zeroable, PartialEq, Eq)] #[repr(transparent)] pub struct ElGamalCiphertext(pub [u8; ELGAMAL_CIPHERTEXT_LEN]); @@ -78,7 +79,7 @@ impl TryFrom for decoded::ElGamalCiphertext { } /// The `ElGamalPubkey` type as a `Pod`. -#[derive(Clone, Copy, Default, Pod, Zeroable, PartialEq, Eq)] +#[derive(Clone, Copy, Default, bytemuck_derive::Pod, bytemuck_derive::Zeroable, PartialEq, Eq)] #[repr(transparent)] pub struct ElGamalPubkey(pub [u8; ELGAMAL_PUBKEY_LEN]); @@ -117,7 +118,7 @@ impl TryFrom for decoded::ElGamalPubkey { } /// The `DecryptHandle` type as a `Pod`. -#[derive(Clone, Copy, Default, Pod, Zeroable, PartialEq, Eq)] +#[derive(Clone, Copy, Default, bytemuck_derive::Pod, bytemuck_derive::Zeroable, PartialEq, Eq)] #[repr(transparent)] pub struct DecryptHandle(pub [u8; DECRYPT_HANDLE_LEN]); diff --git a/zk-token-sdk/src/zk_token_elgamal/pod/grouped_elgamal.rs b/zk-token-sdk/src/zk_token_elgamal/pod/grouped_elgamal.rs index c7e820fcd04508..7d5ae944ecf1a9 100644 --- a/zk-token-sdk/src/zk_token_elgamal/pod/grouped_elgamal.rs +++ b/zk-token-sdk/src/zk_token_elgamal/pod/grouped_elgamal.rs @@ -8,9 +8,9 @@ use { zk_token_elgamal::pod::{ elgamal::{ElGamalCiphertext, DECRYPT_HANDLE_LEN, ELGAMAL_CIPHERTEXT_LEN}, pedersen::{PedersenCommitment, PEDERSEN_COMMITMENT_LEN}, - Pod, Zeroable, }, }, + bytemuck::Zeroable, std::fmt, }; @@ -61,7 +61,7 @@ const GROUPED_ELGAMAL_CIPHERTEXT_3_HANDLES: usize = PEDERSEN_COMMITMENT_LEN + DECRYPT_HANDLE_LEN + DECRYPT_HANDLE_LEN + DECRYPT_HANDLE_LEN; /// The `GroupedElGamalCiphertext` type with two decryption handles as a `Pod` -#[derive(Clone, Copy, Pod, Zeroable, PartialEq, Eq)] +#[derive(Clone, Copy, bytemuck_derive::Pod, bytemuck_derive::Zeroable, PartialEq, Eq)] #[repr(transparent)] pub struct GroupedElGamalCiphertext2Handles(pub [u8; GROUPED_ELGAMAL_CIPHERTEXT_2_HANDLES]); @@ -95,7 +95,7 @@ impl TryFrom for GroupedElGamalCiphertext<2> { impl_extract!(TYPE = GroupedElGamalCiphertext2Handles); /// The `GroupedElGamalCiphertext` type with three decryption handles as a `Pod` -#[derive(Clone, Copy, Pod, Zeroable, PartialEq, Eq)] +#[derive(Clone, Copy, bytemuck_derive::Pod, bytemuck_derive::Zeroable, PartialEq, Eq)] #[repr(transparent)] pub struct GroupedElGamalCiphertext3Handles(pub [u8; GROUPED_ELGAMAL_CIPHERTEXT_3_HANDLES]); diff --git a/zk-token-sdk/src/zk_token_elgamal/pod/instruction.rs b/zk-token-sdk/src/zk_token_elgamal/pod/instruction.rs index e29e3a500551ee..c87a0aec71dd87 100644 --- a/zk-token-sdk/src/zk_token_elgamal/pod/instruction.rs +++ b/zk-token-sdk/src/zk_token_elgamal/pod/instruction.rs @@ -1,11 +1,10 @@ use crate::zk_token_elgamal::pod::{ - GroupedElGamalCiphertext2Handles, GroupedElGamalCiphertext3Handles, Pod, PodU16, PodU64, - Zeroable, + GroupedElGamalCiphertext2Handles, GroupedElGamalCiphertext3Handles, PodU16, PodU64, }; #[cfg(not(target_os = "solana"))] use crate::{errors::ElGamalError, instruction::transfer as decoded}; -#[derive(Clone, Copy, Pod, Zeroable)] +#[derive(Clone, Copy, bytemuck_derive::Pod, bytemuck_derive::Zeroable)] #[repr(C)] pub struct TransferAmountCiphertext(pub GroupedElGamalCiphertext3Handles); @@ -25,7 +24,7 @@ impl TryFrom for decoded::TransferAmountCiphertext { } } -#[derive(Clone, Copy, Pod, Zeroable)] +#[derive(Clone, Copy, bytemuck_derive::Pod, bytemuck_derive::Zeroable)] #[repr(C)] pub struct FeeEncryption(pub GroupedElGamalCiphertext2Handles); @@ -45,7 +44,7 @@ impl TryFrom for decoded::FeeEncryption { } } -#[derive(Clone, Copy, Pod, Zeroable)] +#[derive(Clone, Copy, bytemuck_derive::Pod, bytemuck_derive::Zeroable)] #[repr(C)] pub struct FeeParameters { /// Fee rate expressed as basis points of the transfer amount, i.e. increments of 0.01% diff --git a/zk-token-sdk/src/zk_token_elgamal/pod/mod.rs b/zk-token-sdk/src/zk_token_elgamal/pod/mod.rs index 56ea70a6589532..d060213a37d6a8 100644 --- a/zk-token-sdk/src/zk_token_elgamal/pod/mod.rs +++ b/zk-token-sdk/src/zk_token_elgamal/pod/mod.rs @@ -36,7 +36,9 @@ pub enum ParseError { Invalid, } -#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, Pod, Zeroable)] +#[derive( + Clone, Copy, Debug, Default, PartialEq, Eq, bytemuck_derive::Pod, bytemuck_derive::Zeroable, +)] #[repr(transparent)] pub struct PodU16([u8; 2]); impl From for PodU16 { @@ -50,7 +52,9 @@ impl From for u16 { } } -#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, Pod, Zeroable)] +#[derive( + Clone, Copy, Debug, Default, PartialEq, Eq, bytemuck_derive::Pod, bytemuck_derive::Zeroable, +)] #[repr(transparent)] pub struct PodU64([u8; 8]); impl From for PodU64 { @@ -64,7 +68,9 @@ impl From for u64 { } } -#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, Pod, Zeroable)] +#[derive( + Clone, Copy, Debug, Default, PartialEq, Eq, bytemuck_derive::Pod, bytemuck_derive::Zeroable, +)] #[repr(transparent)] pub struct PodProofType(u8); impl From for PodProofType { @@ -80,7 +86,7 @@ impl TryFrom for ProofType { } } -#[derive(Clone, Copy, Pod, Zeroable, PartialEq, Eq)] +#[derive(Clone, Copy, bytemuck_derive::Pod, bytemuck_derive::Zeroable, PartialEq, Eq)] #[repr(transparent)] pub struct CompressedRistretto(pub [u8; 32]); diff --git a/zk-token-sdk/src/zk_token_elgamal/pod/pedersen.rs b/zk-token-sdk/src/zk_token_elgamal/pod/pedersen.rs index d27f307f43df2c..d9d1d551b22d6e 100644 --- a/zk-token-sdk/src/zk_token_elgamal/pod/pedersen.rs +++ b/zk-token-sdk/src/zk_token_elgamal/pod/pedersen.rs @@ -1,23 +1,17 @@ //! Plain Old Data type for the Pedersen commitment scheme. +use {crate::RISTRETTO_POINT_LEN, std::fmt}; #[cfg(not(target_os = "solana"))] use { crate::{encryption::pedersen as decoded, errors::ElGamalError}, curve25519_dalek::ristretto::CompressedRistretto, }; -use { - crate::{ - zk_token_elgamal::pod::{Pod, Zeroable}, - RISTRETTO_POINT_LEN, - }, - std::fmt, -}; /// Byte length of a Pedersen commitment pub(crate) const PEDERSEN_COMMITMENT_LEN: usize = RISTRETTO_POINT_LEN; /// The `PedersenCommitment` type as a `Pod`. -#[derive(Clone, Copy, Default, Pod, Zeroable, PartialEq, Eq)] +#[derive(Clone, Copy, Default, bytemuck_derive::Pod, bytemuck_derive::Zeroable, PartialEq, Eq)] #[repr(transparent)] pub struct PedersenCommitment(pub [u8; PEDERSEN_COMMITMENT_LEN]); diff --git a/zk-token-sdk/src/zk_token_elgamal/pod/range_proof.rs b/zk-token-sdk/src/zk_token_elgamal/pod/range_proof.rs index 4f134cb5eb7dd0..10746bef944c61 100644 --- a/zk-token-sdk/src/zk_token_elgamal/pod/range_proof.rs +++ b/zk-token-sdk/src/zk_token_elgamal/pod/range_proof.rs @@ -5,9 +5,9 @@ use crate::{ range_proof::{self as decoded, errors::RangeProofVerificationError}, UNIT_LEN, }; -use crate::{ - zk_token_elgamal::pod::{Pod, Zeroable}, - RISTRETTO_POINT_LEN, SCALAR_LEN, +use { + crate::{RISTRETTO_POINT_LEN, SCALAR_LEN}, + bytemuck::{Pod, Zeroable}, }; /// Byte length of a range proof excluding the inner-product proof component diff --git a/zk-token-sdk/src/zk_token_elgamal/pod/sigma_proofs.rs b/zk-token-sdk/src/zk_token_elgamal/pod/sigma_proofs.rs index f0f43e662a2e51..dc160f47f720b3 100644 --- a/zk-token-sdk/src/zk_token_elgamal/pod/sigma_proofs.rs +++ b/zk-token-sdk/src/zk_token_elgamal/pod/sigma_proofs.rs @@ -12,7 +12,7 @@ use crate::sigma_proofs::{ pubkey_proof::PubkeyValidityProof as DecodedPubkeyValidityProof, zero_balance_proof::ZeroBalanceProof as DecodedZeroBalanceProof, }; -use crate::zk_token_elgamal::pod::{Pod, Zeroable}; +use bytemuck::{Pod, Zeroable}; /// Byte length of a ciphertext-commitment equality proof const CIPHERTEXT_COMMITMENT_EQUALITY_PROOF_LEN: usize = 192; @@ -217,7 +217,7 @@ impl TryFrom for DecodedZeroBalanceProof { } /// The `FeeSigmaProof` type as a `Pod`. -#[derive(Clone, Copy, Pod, Zeroable)] +#[derive(Clone, Copy, bytemuck_derive::Pod, bytemuck_derive::Zeroable)] #[repr(transparent)] pub struct FeeSigmaProof(pub [u8; FEE_SIGMA_PROOF_LEN]); @@ -238,7 +238,7 @@ impl TryFrom for DecodedFeeSigmaProof { } /// The `PubkeyValidityProof` type as a `Pod`. -#[derive(Clone, Copy, Pod, Zeroable)] +#[derive(Clone, Copy, bytemuck_derive::Pod, bytemuck_derive::Zeroable)] #[repr(transparent)] pub struct PubkeyValidityProof(pub [u8; PUBKEY_VALIDITY_PROOF_LEN]); diff --git a/zk-token-sdk/src/zk_token_proof_state.rs b/zk-token-sdk/src/zk_token_proof_state.rs index d95aa4f11ec1c3..6d9644394ce197 100644 --- a/zk-token-sdk/src/zk_token_proof_state.rs +++ b/zk-token-sdk/src/zk_token_proof_state.rs @@ -53,7 +53,7 @@ impl ProofContextState { /// The `ProofContextState` without the proof context itself. This struct exists to facilitate the /// decoding of generic-independent fields in `ProofContextState`. -#[derive(Clone, Copy, Debug, PartialEq, Pod, Zeroable)] +#[derive(Clone, Copy, Debug, PartialEq, bytemuck_derive::Pod, bytemuck_derive::Zeroable)] #[repr(C)] pub struct ProofContextStateMeta { /// The proof context authority that can close the account