diff --git a/.travis.yml b/.travis.yml index 04a5221c546ea0..e476388278dc9f 100644 --- a/.travis.yml +++ b/.travis.yml @@ -38,6 +38,7 @@ jobs: - readlink -f . script: - source ci/env.sh + - rustup set profile default - ci/publish-tarball.sh deploy: - provider: s3 diff --git a/Cargo.lock b/Cargo.lock index ef5f624cbb4791..68912493dc6254 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -50,16 +50,22 @@ version = "0.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8716408b8bc624ed7f65d223ddb9ac2d044c0547b6fa4b0d554f3a9540496ada" dependencies = [ - "memchr 2.3.3", + "memchr 2.4.0", ] +[[package]] +name = "aliasable" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "250f629c0161ad8107cf89319e990051fae62832fd343083bea452d93e2205fd" + [[package]] name = "ansi_term" version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ee49baf6cb617b853aa8d93bf420db2383fab46d314482ca2803b40d5fde979b" dependencies = [ - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -100,10 +106,11 @@ checksum = "eab1c04a571841102f5345a8fc0f6bb3d31c315dec879b5c6e42e40ce7ffa34e" [[package]] name = "assert_cmd" -version = "1.0.2" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3dc1679af9a1ab4bea16f228b05d18f8363f8327b1fa8db00d2760cfafc6b61e" +checksum = "54f002ce7d0c5e809ebb02be78fd503aeed4a511fd0fcaff6e6914cbdabbfa33" dependencies = [ + "bstr", "doc-comment", "predicates", "predicates-core", @@ -134,7 +141,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a3548b8efc9f8e8a5a0a2808c5bd8451a9031b9e5b879a79590304ae928b0a70" dependencies = [ "proc-macro2 1.0.24", - "quote 1.0.6", + "quote 1.0.9", "syn 1.0.60", ] @@ -145,7 +152,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8d3a45e77e34375a7923b1e8febb049bb011f064714a8e17a1a616fef01da13d" dependencies = [ "proc-macro2 1.0.24", - "quote 1.0.6", + "quote 1.0.9", "syn 1.0.60", ] @@ -157,7 +164,7 @@ checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" dependencies = [ "hermit-abi", "libc", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -183,7 +190,7 @@ dependencies = [ "instant", "pin-project 1.0.1", "rand 0.8.3", - "tokio 1.1.1", + "tokio 1.8.1", ] [[package]] @@ -251,38 +258,47 @@ checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd" [[package]] name = "bincode" -version = "1.3.1" +version = "1.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f30d3a39baa26f9651f17b375061f3233dde33424a8b72b0dbe93a68a0bc896d" +checksum = "b1f45e9417d87227c7a56d22e471c6206462cba514c7590c09aff4cf6d1ddcad" dependencies = [ - "byteorder", "serde", ] [[package]] name = "bindgen" -version = "0.54.0" +version = "0.57.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66c0bb6167449588ff70803f4127f0684f9063097eca5016f37eb52b92c2cf36" +checksum = "fd4865004a46a0aafb2a0a5eb19d3c9fc46ee5f063a6cfc605c69ac9ecf5263d" dependencies = [ "bitflags", "cexpr", - "cfg-if 0.1.10", "clang-sys", - "clap", - "env_logger 0.7.1", "lazy_static", "lazycell", - "log 0.4.11", "peeking_take_while", "proc-macro2 1.0.24", - "quote 1.0.6", + "quote 1.0.9", "regex", "rustc-hash", "shlex", - "which", ] +[[package]] +name = "bit-set" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e11e16035ea35e4e5997b393eacbf6f63983188f7a2ad25bfb13465f5ad59de" +dependencies = [ + "bit-vec", +] + +[[package]] +name = "bit-vec" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "349f9b6a179ed607305526ca489b34ad0a41aed5f7980fa90eb03160b69598fb" + [[package]] name = "bitflags" version = "1.2.1" @@ -354,9 +370,9 @@ checksum = "8d696c370c750c948ada61c69a0ee2cbbb9c50b1019ddb86d9317157a99c2cae" [[package]] name = "borsh" -version = "0.8.1" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5a26c53ddf60281f18e7a29b20db7ba3db82a9d81b9650bfaa02d646f50d364" +checksum = "4fcabb02816fdadf90866dc9a7824491ccb63d69f55375a266dc03509ac68d36" dependencies = [ "borsh-derive", "hashbrown 0.9.1", @@ -364,9 +380,9 @@ dependencies = [ [[package]] name = "borsh-derive" -version = "0.8.1" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b637a47728b78a78cd7f4b85bf06d71ef4221840e059a38f048be2422bf673b2" +checksum = "4bd16f0729b89f0a212b0e2e1d19cc6593df63f771161a11863967780e2d033d" dependencies = [ "borsh-derive-internal", "borsh-schema-derive-internal", @@ -377,23 +393,23 @@ dependencies = [ [[package]] name = "borsh-derive-internal" -version = "0.8.1" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d813fa25eb0bed78c36492cff4415f38c760d6de833d255ba9095bd8ebb7d725" +checksum = "1e321a130a3ac4b88eb59a6d670bde11eec9721a397b77e0f2079060e2a1b785" dependencies = [ "proc-macro2 1.0.24", - "quote 1.0.6", + "quote 1.0.9", "syn 1.0.60", ] [[package]] name = "borsh-schema-derive-internal" -version = "0.8.1" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcf78ee4a98c8cb9eba1bac3d3e2a1ea3d7673c719ce691e67b5cbafc472d3b7" +checksum = "15151a485164b319cc7a5160fe4316dc469a27993f71b73d7617dc9032ff0fd7" dependencies = [ "proc-macro2 1.0.24", - "quote 1.0.6", + "quote 1.0.9", "syn 1.0.60", ] @@ -405,12 +421,12 @@ checksum = "476e9cd489f9e121e02ffa6014a8ef220ecb15c05ed23fc34cca13925dc283fb" [[package]] name = "bstr" -version = "0.2.13" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31accafdb70df7871592c058eca3985b71104e15ac32f64706022c58867da931" +checksum = "90682c8d613ad3373e66de8c6411e0ae2ab2571e879d2efbf73558cc66f21279" dependencies = [ "lazy_static", - "memchr 2.3.3", + "memchr 2.4.0", "regex-automata", "serde", ] @@ -569,27 +585,27 @@ dependencies = [ "num-traits", "serde", "time 0.1.43", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] name = "chrono-humanize" -version = "0.1.1" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0a4c32145b4db85fe1c4f2b125a4f9493769df424f5f84baf6b04ea8eaf33c9" +checksum = "2eddc119501d583fd930cb92144e605f44e0252c38dd89d9247fffa1993375cb" dependencies = [ "chrono", ] [[package]] name = "clang-sys" -version = "0.29.3" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe6837df1d5cba2397b835c8530f51723267e16abbf83892e9e5af4f0e5dd10a" +checksum = "853eda514c284c2287f4bf20ae614f8781f40a81d32ecda6e91449304dfe077c" dependencies = [ "glob", "libc", - "libloading 0.5.2", + "libloading 0.7.0", ] [[package]] @@ -634,7 +650,7 @@ dependencies = [ "ascii", "byteorder", "either", - "memchr 2.3.3", + "memchr 2.4.0", "unreachable", ] @@ -651,16 +667,51 @@ dependencies = [ "terminal_size", "termios", "unicode-width", - "winapi 0.3.8", + "winapi 0.3.9", "winapi-util", ] +[[package]] +name = "console" +version = "0.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3993e6445baa160675931ec041a5e03ca84b9c6e32a056150d3aa2bdda0a1f45" +dependencies = [ + "encode_unicode", + "lazy_static", + "libc", + "regex", + "terminal_size", + "unicode-width", + "winapi 0.3.9", +] + [[package]] name = "const_fn" version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "28b9d6de7f49e22cf97ad17fc4036ece69300032f45f78f30b4a4482cdc3f4a6" +[[package]] +name = "const_format" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c75ea7d6aeb2ebd1ee24f7b7e1b23242ef5a56b3a693733b99bfbe5ef31d0306" +dependencies = [ + "const_format_proc_macros", +] + +[[package]] +name = "const_format_proc_macros" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29c36c619c422113552db4eb28cddba8faa757e33f758cc3415bd2885977b591" +dependencies = [ + "proc-macro2 1.0.24", + "quote 1.0.9", + "unicode-xid 0.2.0", +] + [[package]] name = "constant_time_eq" version = "0.1.5" @@ -745,9 +796,9 @@ dependencies = [ [[package]] name = "crossbeam-deque" -version = "0.7.3" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f02af974daeee82218205558e51ec8768b48cf524bd01d550abe5573a608285" +checksum = "c20ff29ded3204c5106278a81a38f4b482636ed4fa1e6cfbeef193291beb29ed" dependencies = [ "crossbeam-epoch 0.8.2", "crossbeam-utils 0.7.2", @@ -756,9 +807,9 @@ dependencies = [ [[package]] name = "crossbeam-deque" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94af6efb46fef72616855b036a624cf27ba656ffc9be1b9a3c931cfc7749a9a9" +checksum = "6455c0ca19f0d2fbf751b908d5c55c1f5cbc65e03c4225427254b46890bdde1e" dependencies = [ "cfg-if 1.0.0", "crossbeam-epoch 0.9.1", @@ -892,7 +943,7 @@ version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2b2466559f260f48ad25fe6317b3c8dac77b5bdb5763ac7d9d6103530663bc90" dependencies = [ - "memchr 2.3.3", + "memchr 2.4.0", ] [[package]] @@ -902,7 +953,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "54dedab740bc412d514cfbc4a1d9d5d16fed02c4b14a7be129003c07fdc33b9b" dependencies = [ "nix 0.17.0", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -958,7 +1009,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb582b60359da160a9477ee80f15c8d784c477e69c217ef2cdd4169c24ea380f" dependencies = [ "proc-macro2 1.0.24", - "quote 1.0.6", + "quote 1.0.9", "syn 1.0.60", ] @@ -969,7 +1020,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "41cb0e6161ad61ed084a36ba71fbba9e3ac5aee3606fb607fe08da6acbcf3d8c" dependencies = [ "proc-macro2 1.0.24", - "quote 1.0.6", + "quote 1.0.9", "syn 1.0.60", ] @@ -979,16 +1030,16 @@ version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f4aa86af7b19b40ef9cbef761ed411a49f0afa06b7b6dcd3dfe2f96a3c546138" dependencies = [ - "console", + "console 0.11.3", "lazy_static", "tempfile", ] [[package]] -name = "difference" -version = "2.0.0" +name = "difflib" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "524cbf6897b527295dff137cec09ecf3a05f4fddffd7dfcd1585403449e74198" +checksum = "6184e33543162437515c2e2b48714794e37845ec9851711914eec9d308f6ebe8" [[package]] name = "digest" @@ -1035,7 +1086,7 @@ checksum = "99de365f605554ae33f115102a02057d4fc18b01f3284d6870be0938743cfe7d" dependencies = [ "libc", "redox_users", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -1053,7 +1104,7 @@ dependencies = [ "dlopen_derive", "lazy_static", "libc", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -1125,7 +1176,7 @@ checksum = "7260c7e6e656fc7702a1aa8d5b498a1a69aa84ac4ffcd5501b7d26939f368a93" dependencies = [ "enum-ordinalize", "proc-macro2 1.0.24", - "quote 1.0.6", + "quote 1.0.9", "syn 1.0.60", ] @@ -1166,7 +1217,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e94aa31f7c0dc764f57896dc615ddd76fc13b0d5dca7eb6cc5e018a5a09ec06" dependencies = [ "proc-macro2 1.0.24", - "quote 1.0.6", + "quote 1.0.9", "syn 1.0.60", ] @@ -1179,23 +1230,10 @@ dependencies = [ "num-bigint", "num-traits", "proc-macro2 1.0.24", - "quote 1.0.6", + "quote 1.0.9", "syn 1.0.60", ] -[[package]] -name = "env_logger" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44533bbbb3bb3c1fa17d9f2e4e38bbbaf8396ba82193c4cb1b6445d711445d36" -dependencies = [ - "atty", - "humantime 1.3.0", - "log 0.4.11", - "regex", - "termcolor", -] - [[package]] name = "env_logger" version = "0.8.3" @@ -1203,7 +1241,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "17392a012ea30ef05a610aa97dfb49496e71c9f676b27879922ea5bdf60d9d3f" dependencies = [ "atty", - "humantime 2.0.1", + "humantime", "log 0.4.11", "regex", "termcolor", @@ -1226,7 +1264,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aa4da3c766cd7a0db8242e326e9e4e081edd567072893ed320008189715366a4" dependencies = [ "proc-macro2 1.0.24", - "quote 1.0.6", + "quote 1.0.9", "syn 1.0.60", "synstructure", ] @@ -1253,7 +1291,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0010f02effd88c702318c5dde0463206be67495d0b4d906ba7c0a8f166cc7f06" dependencies = [ "libc", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -1262,6 +1300,17 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "835a3dc7d1ec9e75e2b5fb4ba75396837112d2060b03f7d43bc1897c7f7211da" +[[package]] +name = "filedescriptor" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a71e83755e51aa52b9034f1986173783789e8e7d79c3c774adbbb63fb554f2cb" +dependencies = [ + "libc", + "thiserror", + "winapi 0.3.9", +] + [[package]] name = "filetime" version = "0.2.10" @@ -1271,9 +1320,15 @@ dependencies = [ "cfg-if 0.1.10", "libc", "redox_syscall", - "winapi 0.3.8", + "winapi 0.3.9", ] +[[package]] +name = "fixedbitset" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37ab347416e802de484e4d03c7316c48f1ecb56574dfd4a46a80f173ce1de04d" + [[package]] name = "flate2" version = "1.0.14" @@ -1408,7 +1463,7 @@ checksum = "77408a692f1f97bcc61dc001d752e00643408fbc922e4d634c655df50d595556" dependencies = [ "proc-macro-hack", "proc-macro2 1.0.24", - "quote 1.0.6", + "quote 1.0.9", "syn 1.0.60", ] @@ -1440,7 +1495,7 @@ dependencies = [ "futures-macro", "futures-sink", "futures-task", - "memchr 2.3.3", + "memchr 2.4.0", "pin-project 1.0.1", "pin-utils", "proc-macro-hack", @@ -1450,11 +1505,11 @@ dependencies = [ [[package]] name = "gag" -version = "0.1.10" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8cc0b9f53275dc5fada808f1d2f82e3688a6c14d735633d1590b7be8eb2307b5" +checksum = "a713bee13966e9fbffdf7193af71d54a6b35a0bb34997cd6c9519ebeb5005972" dependencies = [ - "libc", + "filedescriptor", "tempfile", ] @@ -1485,7 +1540,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e692e296bfac1d2533ef168d0b60ff5897b8b70a4009276834014dd8924cc028" dependencies = [ "libc", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -1551,7 +1606,7 @@ dependencies = [ "simpl", "smpl_jwt", "time 0.2.25", - "tokio 1.1.1", + "tokio 1.8.1", ] [[package]] @@ -1586,9 +1641,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.0" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b67e66362108efccd8ac053abafc8b7a8d86a37e6e48fc4f6f7485eb5e9e6a5" +checksum = "825343c4eef0b63f541f8903f395dc5beb362a979b5799a84062527ef1e37726" dependencies = [ "bytes 1.0.1", "fnv", @@ -1598,10 +1653,9 @@ dependencies = [ "http", "indexmap", "slab", - "tokio 1.1.1", + "tokio 1.8.1", "tokio-util 0.6.3", "tracing", - "tracing-futures", ] [[package]] @@ -1621,20 +1675,26 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.8.2" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e91b62f79061a0bc2e046024cb7ba44b08419ed238ecbd9adbd787434b9e8c25" +checksum = "d7afe4a420e3fe79967a00898cc1f4db7c8a49a9333a29f8a4bd76a253d5cd04" dependencies = [ - "autocfg 1.0.0", + "ahash 0.4.6", ] [[package]] name = "hashbrown" -version = "0.9.1" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7afe4a420e3fe79967a00898cc1f4db7c8a49a9333a29f8a4bd76a253d5cd04" +checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e" + +[[package]] +name = "heck" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d621efb26863f0e9924c6ac577e8275e5e6b77455db64ffa6c65c904e9e132c" dependencies = [ - "ahash 0.4.6", + "unicode-segmentation", ] [[package]] @@ -1711,13 +1771,13 @@ dependencies = [ [[package]] name = "hmac-drbg" -version = "0.2.0" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6e570451493f10f6581b48cdd530413b63ea9e780f544bfd3bdcaa0d89d1a7b" +checksum = "17ea0a1394df5b6574da6e0c1ade9e78868c9fb0a4e5ef4428e32da4676b85b1" dependencies = [ - "digest 0.8.1", - "generic-array 0.12.3", - "hmac 0.7.1", + "digest 0.9.0", + "generic-array 0.14.3", + "hmac 0.8.1", ] [[package]] @@ -1743,19 +1803,20 @@ dependencies = [ [[package]] name = "http-body" -version = "0.4.0" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2861bd27ee074e5ee891e8b539837a9430012e249d7f0ca2d795650f579c1994" +checksum = "60daa14be0e0786db0f03a9e57cb404c9d756eed2b6c62b9ea98ec5743ec75a9" dependencies = [ "bytes 1.0.1", "http", + "pin-project-lite 0.2.7", ] [[package]] name = "httparse" -version = "1.3.4" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd179ae861f0c2e53da70d892f5f3029f9594be0c41dc5269cd371691b1dc2f9" +checksum = "f3a87b616e37e93c22fb19bcd386f02f3af5ea98a25670ad0fce773de23c5e68" [[package]] name = "httpdate" @@ -1764,13 +1825,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "494b4d60369511e7dea41cf646832512a94e542f68bb9c49e54518e0f468eb47" [[package]] -name = "humantime" -version = "1.3.0" +name = "httpdate" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df004cfca50ef23c36850aaaa59ad52cc70d0e90243c3c7737a4dd32dc7a3c4f" -dependencies = [ - "quick-error", -] +checksum = "6456b8a6c8f33fee7d958fcd1b60d55b11940a79e63ae87013e6d22e26034440" [[package]] name = "humantime" @@ -1811,10 +1869,10 @@ dependencies = [ "http", "http-body 0.3.1", "httparse", - "httpdate", + "httpdate 0.3.2", "itoa", "pin-project 1.0.1", - "socket2", + "socket2 0.3.17", "tokio 0.2.22", "tower-service", "tracing", @@ -1823,23 +1881,23 @@ dependencies = [ [[package]] name = "hyper" -version = "0.14.3" +version = "0.14.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ee5fc98172988e4394a3094002a75125e8fb864a88318732e8b613ec5adbda3" +checksum = "0b61cf2d1aebcf6e6352c97b81dc2244ca29194be1b276f5d8ad5c6330fffb11" dependencies = [ "bytes 1.0.1", "futures-channel", "futures-core", "futures-util", - "h2 0.3.0", + "h2 0.3.3", "http", - "http-body 0.4.0", + "http-body 0.4.2", "httparse", - "httpdate", + "httpdate 1.0.1", "itoa", - "pin-project-lite 0.2.4", - "socket2", - "tokio 1.1.1", + "pin-project-lite 0.2.7", + "socket2 0.4.1", + "tokio 1.8.1", "tower-service", "tracing", "want", @@ -1852,14 +1910,26 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f9f7a97316d44c0af9b0301e65010573a853a9fc97046d7331d7f6bc0fd5a64" dependencies = [ "futures-util", - "hyper 0.14.3", + "hyper 0.14.11", "log 0.4.11", "rustls", - "tokio 1.1.1", + "tokio 1.8.1", "tokio-rustls", "webpki", ] +[[package]] +name = "hyper-timeout" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbb958482e8c7be4bc3cf272a766a2b0bf1a6755e7a6ae777f017a31d11b13b1" +dependencies = [ + "hyper 0.14.11", + "pin-project-lite 0.2.7", + "tokio 1.8.1", + "tokio-io-timeout", +] + [[package]] name = "hyper-tls" version = "0.5.0" @@ -1867,9 +1937,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" dependencies = [ "bytes 1.0.1", - "hyper 0.14.3", + "hyper 0.14.11", "native-tls", - "tokio 1.1.1", + "tokio 1.8.1", "tokio-native-tls", ] @@ -1909,12 +1979,12 @@ checksum = "d480125acf340d6a6e59dab69ae19d6fca3a906e1eade277671272cc8f73794b" [[package]] name = "indexmap" -version = "1.5.1" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86b45e59b16c76b11bf9738fd5d38879d3bd28ad292d7b313608becb17ae2df9" +checksum = "bc633605454125dec4b66843673f01c7df2b89479b32e0ed634e43a91cff62a5" dependencies = [ "autocfg 1.0.0", - "hashbrown 0.8.2", + "hashbrown 0.11.2", "rayon", ] @@ -1924,7 +1994,7 @@ version = "0.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7baab56125e25686df467fe470785512329883aab42696d661247aca2a2896e4" dependencies = [ - "console", + "console 0.14.1", "lazy_static", "number_prefix", "regex", @@ -1969,6 +2039,15 @@ dependencies = [ "either", ] +[[package]] +name = "itertools" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69ddb889f9d0d08a67338271fa9b62996bc788c7796a5c18cf057420aaed5eaf" +dependencies = [ + "either", +] + [[package]] name = "itoa" version = "0.4.5" @@ -1995,9 +2074,9 @@ dependencies = [ [[package]] name = "jsonrpc-client-transports" -version = "17.0.0" +version = "17.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15b6c6ad01c7354d60de493148c30ac8a82b759e22ae678c8705e9b8e0c566a4" +checksum = "a2f81014e2706fde057e9dcb1036cf6bbf9418d972c597be5c7158c984656722" dependencies = [ "derive_more", "futures 0.3.8", @@ -2015,11 +2094,13 @@ dependencies = [ [[package]] name = "jsonrpc-core" -version = "17.0.0" +version = "17.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07569945133257ff557eb37b015497104cea61a2c9edaf126c1cbd6e8332397f" +checksum = "d4467ab6dfa369b69e52bd0692e480c4d117410538526a57a304a0f2250fd95e" dependencies = [ "futures 0.3.8", + "futures-executor", + "futures-util", "log 0.4.11", "serde", "serde_derive", @@ -2028,9 +2109,9 @@ dependencies = [ [[package]] name = "jsonrpc-core-client" -version = "17.0.0" +version = "17.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ac9d56dc729912796637c30f475bbf834594607b27740dfea6e5fa7ba40d1f1" +checksum = "5c366c092d6bccc6e7ab44dd635a0f22ab2f201215339915fb7ff9508404f431" dependencies = [ "futures 0.3.8", "jsonrpc-client-transports", @@ -2038,21 +2119,21 @@ dependencies = [ [[package]] name = "jsonrpc-derive" -version = "17.0.0" +version = "17.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b68ba7e76e5c7796cfa4d2a30e83986550c34404c6d40551c902ca6f7bd4a137" +checksum = "34f6326966ebac440db89eba788f5a0e5ac2614b4b4bfbdc049a971e71040f32" dependencies = [ "proc-macro-crate", "proc-macro2 1.0.24", - "quote 1.0.6", + "quote 1.0.9", "syn 1.0.60", ] [[package]] name = "jsonrpc-http-server" -version = "17.0.0" +version = "17.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eff2303c4f0562afcbd2dae75e3e21815095f8994749a80fbcd365877e44ed64" +checksum = "522a047cac0958097ee71d047dd71cb84979fd2fa21c7a68fbe12736bef870a2" dependencies = [ "futures 0.3.8", "hyper 0.13.10", @@ -2066,9 +2147,9 @@ dependencies = [ [[package]] name = "jsonrpc-ipc-server" -version = "17.0.1" +version = "17.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14c4cd89e5ea7e7f0884e828fc35bb83591a371b92439675eae28efa66c24a97" +checksum = "0b1d782052ef17051d12681bcc2fa2e9e1aabf3f634588125493d63ddcca6fe1" dependencies = [ "futures 0.3.8", "jsonrpc-core", @@ -2081,9 +2162,9 @@ dependencies = [ [[package]] name = "jsonrpc-pubsub" -version = "17.0.0" +version = "17.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c48dbebce7a9c88ab272a4db7d6478aa4c6d9596e6c086366e89efc4e9ed89e" +checksum = "14739e5523a40739882cc34a44ab2dd9356bce5ce102513f5984a9efbe342f3d" dependencies = [ "futures 0.3.8", "jsonrpc-core", @@ -2096,9 +2177,9 @@ dependencies = [ [[package]] name = "jsonrpc-server-utils" -version = "17.0.0" +version = "17.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4207cce738bf713a82525065b750a008f28351324f438f56b33d698ada95bb4" +checksum = "bce68fa279a2822b3619369cd024f8a4f8e5ce485468834f8679a3c7919aae2d" dependencies = [ "bytes 0.5.4", "futures 0.3.8", @@ -2113,9 +2194,9 @@ dependencies = [ [[package]] name = "jsonrpc-ws-server" -version = "17.0.0" +version = "17.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abe06e1385e4a912711703123ba44f735627d666f87e5fec764ad1338ec617dc" +checksum = "b1d267a8649ec37e4452dd7b3f48827c9cdf36de3a3539cf73242c222ba2eb50" dependencies = [ "futures 0.3.8", "jsonrpc-core", @@ -2165,34 +2246,34 @@ checksum = "b294d6fa9ee409a054354afc4352b0b9ef7ca222c69b8812cbea9e7d2bf3783f" [[package]] name = "libc" -version = "0.2.81" +version = "0.2.98" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1482821306169ec4d07f6aca392a4681f66c75c9918aa49641a2595db64053cb" +checksum = "320cfe77175da3a483efed4bc0adc1968ca050b098ce4f2f1c13a56626128790" [[package]] name = "libloading" -version = "0.5.2" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2b111a074963af1d37a139918ac6d49ad1d0d5e47f72fd55388619691a7d753" +checksum = "2cadb8e769f070c45df05c78c7520eb4cd17061d4ab262e43cfc68b4d00ac71c" dependencies = [ - "cc", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] name = "libloading" -version = "0.6.2" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2cadb8e769f070c45df05c78c7520eb4cd17061d4ab262e43cfc68b4d00ac71c" +checksum = "6f84d96438c15fcd6c3f244c8fce01d1e2b9c6b5623e9c711dc9286d8fc92d6a" dependencies = [ - "winapi 0.3.8", + "cfg-if 1.0.0", + "winapi 0.3.9", ] [[package]] name = "librocksdb-sys" -version = "6.11.4" +version = "6.17.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb5b56f651c204634b936be2f92dbb42c36867e00ff7fe2405591f3b9fa66f09" +checksum = "5da125e1c0f22c7cae785982115523a0738728498547f415c9054cb17c7e89f9" dependencies = [ "bindgen", "cc", @@ -2202,20 +2283,52 @@ dependencies = [ [[package]] name = "libsecp256k1" -version = "0.3.5" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fc1e2c808481a63dc6da2074752fdd4336a3c8fcc68b83db6f1fd5224ae7962" +checksum = "bd1137239ab33b41aa9637a88a28249e5e70c40a42ccc92db7f12cc356c1fcd7" dependencies = [ "arrayref", - "crunchy", - "digest 0.8.1", + "base64 0.12.3", + "digest 0.9.0", "hmac-drbg", + "libsecp256k1-core", + "libsecp256k1-gen-ecmult", + "libsecp256k1-gen-genmult", "rand 0.7.3", - "sha2 0.8.2", - "subtle 2.2.2", + "serde", + "sha2 0.9.2", "typenum", ] +[[package]] +name = "libsecp256k1-core" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ee11012b293ea30093c129173cac4335513064094619f4639a25b310fd33c11" +dependencies = [ + "crunchy", + "digest 0.9.0", + "subtle 2.2.2", +] + +[[package]] +name = "libsecp256k1-gen-ecmult" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32239626ffbb6a095b83b37a02ceb3672b2443a87a000a884fc3c4d16925c9c0" +dependencies = [ + "libsecp256k1-core", +] + +[[package]] +name = "libsecp256k1-gen-genmult" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76acb433e21d10f5f9892b1962c2856c58c7f39a9e4bd68ac82b9436a0ffd5b9" +dependencies = [ + "libsecp256k1-core", +] + [[package]] name = "linked-hash-map" version = "0.5.3" @@ -2290,9 +2403,9 @@ dependencies = [ [[package]] name = "memchr" -version = "2.3.3" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3728d817d99e5ac407411fa471ff9800a778d88a24685968b36824eaf4bee400" +checksum = "b16bd47d9e329435e309c58469fe0791c2d0d1ba96ec0954152a5ae2b04387dc" [[package]] name = "memmap2" @@ -2374,7 +2487,7 @@ dependencies = [ "log 0.4.11", "miow 0.3.6", "ntapi", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -2398,7 +2511,7 @@ dependencies = [ "log 0.4.11", "mio 0.6.22", "miow 0.3.6", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -2430,10 +2543,16 @@ version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a33c1b55807fbed163481b5ba66db4b2fa6cde694a5027be10fb724206c5897" dependencies = [ - "socket2", - "winapi 0.3.8", + "socket2 0.3.17", + "winapi 0.3.9", ] +[[package]] +name = "multimap" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a" + [[package]] name = "native-tls" version = "0.2.7" @@ -2460,7 +2579,7 @@ checksum = "391630d12b68002ae1e25e8f974306474966550ad82dac6886fb8910c19568ae" dependencies = [ "cfg-if 0.1.10", "libc", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -2503,7 +2622,7 @@ version = "5.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b471253da97532da4b61552249c521e01e736071f71c1a4f7ebbfbf0a06aad6" dependencies = [ - "memchr 2.3.3", + "memchr 2.4.0", "version_check 0.9.2", ] @@ -2513,7 +2632,7 @@ version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a31937dea023539c72ddae0e3571deadc1414b300483fa7aaec176168cfa9d2" dependencies = [ - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -2534,7 +2653,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6f09b9841adb6b5e1f89ef7087ea636e0fd94b2851f887c1e3eb5d5f8228fab3" dependencies = [ "proc-macro2 1.0.24", - "quote 1.0.6", + "quote 1.0.9", "syn 1.0.60", ] @@ -2585,7 +2704,7 @@ checksum = "1c0fd9eba1d5db0994a239e09c1be402d35622277e35468ba891aa5e3188ce7e" dependencies = [ "proc-macro-crate", "proc-macro2 1.0.24", - "quote 1.0.6", + "quote 1.0.9", "syn 1.0.60", ] @@ -2657,23 +2776,25 @@ dependencies = [ [[package]] name = "ouroboros" -version = "0.5.1" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc04551635026d3ac7bc646698ea1836a85ed2a26b7094fe1d15d8b14854c4a2" +checksum = "f0e3827c8742f21283e9374adf7905984e7b85731ad94a203137b56955d818b3" dependencies = [ + "aliasable", "ouroboros_macro", "stable_deref_trait", ] [[package]] name = "ouroboros_macro" -version = "0.5.1" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cec33dfceabec83cd0e95a5ce9d20e76ab3a5cbfef59659b8c927f69b93ed8ae" +checksum = "1ef06077e08eac7e2aeaacfbd113a25fcb1b9b903437bd89d2bd513da6e04112" dependencies = [ "Inflector", + "proc-macro-error", "proc-macro2 1.0.24", - "quote 1.0.6", + "quote 1.0.9", "syn 1.0.60", ] @@ -2690,7 +2811,7 @@ dependencies = [ "miow 0.3.6", "rand 0.7.3", "tokio 0.2.22", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -2755,7 +2876,7 @@ dependencies = [ "redox_syscall", "rustc_version", "smallvec 0.6.14", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -2769,7 +2890,7 @@ dependencies = [ "libc", "redox_syscall", "smallvec 1.6.1", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -2784,7 +2905,7 @@ dependencies = [ "libc", "redox_syscall", "smallvec 1.6.1", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -2842,6 +2963,16 @@ dependencies = [ "ucd-trie", ] +[[package]] +name = "petgraph" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "467d164a6de56270bd7c4d070df81d07beace25012d5103ced4e9ff08d6afdb7" +dependencies = [ + "fixedbitset", + "indexmap", +] + [[package]] name = "pickledb" version = "0.4.1" @@ -2880,7 +3011,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2c0e815c3ee9a031fdf5af21c10aa17c573c9c6a566328d99e3936c34e36461f" dependencies = [ "proc-macro2 1.0.24", - "quote 1.0.6", + "quote 1.0.9", "syn 1.0.60", ] @@ -2891,7 +3022,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "81a4ffa594b66bff340084d4081df649a7dc049ac8d7fc458d8e628bfbbb2f86" dependencies = [ "proc-macro2 1.0.24", - "quote 1.0.6", + "quote 1.0.9", "syn 1.0.60", ] @@ -2903,9 +3034,9 @@ checksum = "f7505eeebd78492e0f6108f7171c4948dbb120ee8119d9d77d0afa5469bef67f" [[package]] name = "pin-project-lite" -version = "0.2.4" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "439697af366c49a6d0a010c56a0d97685bc140ce0d377b13a2ea2aa42d64a827" +checksum = "8d31d11c69a6b52a174b42bdc0c30e5e11670f90788b2c471c31c1d17d449443" [[package]] name = "pin-utils" @@ -2933,11 +3064,12 @@ checksum = "237a5ed80e274dbc66f86bd59c1e25edc039660be53194b5fe0a482e0f2612ea" [[package]] name = "predicates" -version = "1.0.4" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "347a1b6f0b21e636bc9872fb60b83b8e185f6f5516298b8238699f7f9a531030" +checksum = "bc3d91237f5de3bcd9d927e24d03b495adb6135097b001cea7403e2d573d00a9" dependencies = [ - "difference", + "difflib", + "itertools 0.10.1", "predicates-core", ] @@ -2972,6 +3104,30 @@ dependencies = [ "toml", ] +[[package]] +name = "proc-macro-error" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" +dependencies = [ + "proc-macro-error-attr", + "proc-macro2 1.0.24", + "quote 1.0.9", + "syn 1.0.60", + "version_check 0.9.2", +] + +[[package]] +name = "proc-macro-error-attr" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" +dependencies = [ + "proc-macro2 1.0.24", + "quote 1.0.9", + "version_check 0.9.2", +] + [[package]] name = "proc-macro-hack" version = "0.5.19" @@ -3002,34 +3158,72 @@ dependencies = [ "unicode-xid 0.2.0", ] +[[package]] +name = "proptest" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e0d9cc07f18492d879586c92b485def06bc850da3118075cd45d50e9c95b0e5" +dependencies = [ + "bit-set", + "bitflags", + "byteorder", + "lazy_static", + "num-traits", + "quick-error 2.0.1", + "rand 0.8.3", + "rand_chacha 0.3.0", + "rand_xorshift 0.3.0", + "regex-syntax", + "rusty-fork", + "tempfile", +] + [[package]] name = "prost" -version = "0.7.0" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e6984d2f1a23009bd270b8bb56d0926810a3d483f59c987d77969e9d8e840b2" +checksum = "de5e2533f59d08fcf364fd374ebda0692a70bd6d7e66ef97f306f45c6c5d8020" dependencies = [ "bytes 1.0.1", "prost-derive", ] +[[package]] +name = "prost-build" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "355f634b43cdd80724ee7848f95770e7e70eefa6dcf14fea676216573b8fd603" +dependencies = [ + "bytes 1.0.1", + "heck", + "itertools 0.10.1", + "log 0.4.11", + "multimap", + "petgraph", + "prost", + "prost-types", + "tempfile", + "which", +] + [[package]] name = "prost-derive" -version = "0.7.0" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "169a15f3008ecb5160cba7d37bcd690a7601b6d30cfb87a117d45e59d52af5d4" +checksum = "600d2f334aa05acb02a755e217ef1ab6dea4d51b58b7846588b747edec04efba" dependencies = [ "anyhow", - "itertools", + "itertools 0.10.1", "proc-macro2 1.0.24", - "quote 1.0.6", + "quote 1.0.9", "syn 1.0.60", ] [[package]] name = "prost-types" -version = "0.7.0" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b518d7cdd93dab1d1122cf07fa9a60771836c668dde9d9e2a139f957f0d9f1bb" +checksum = "603bbd6394701d13f3f25aada59c7de9d35a6a5887cfc156181234a44002771b" dependencies = [ "bytes 1.0.1", "prost", @@ -3050,6 +3244,12 @@ version = "1.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" +[[package]] +name = "quick-error" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a993555f31e5a609f617c12db6250dedcac1b0a85076912c436e6fc9b2c8e6a3" + [[package]] name = "quote" version = "0.6.13" @@ -3061,9 +3261,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.6" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54a21852a652ad6f610c9510194f398ff6f8692e334fd1145fed931f7fbe44ea" +checksum = "c3d0b9745dc2debf507c8422de05d7226cc1f0644216dfdfead988f9b1ab32a7" dependencies = [ "proc-macro2 1.0.24", ] @@ -3078,7 +3278,7 @@ dependencies = [ "libc", "rand_core 0.3.1", "rdrand", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -3096,8 +3296,8 @@ dependencies = [ "rand_jitter", "rand_os", "rand_pcg 0.1.2", - "rand_xorshift", - "winapi 0.3.8", + "rand_xorshift 0.1.1", + "winapi 0.3.9", ] [[package]] @@ -3233,7 +3433,7 @@ checksum = "1166d5c91dc97b88d1decc3285bb0a99ed84b05cfd0bc2341bdf2d43fc41e39b" dependencies = [ "libc", "rand_core 0.4.2", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -3247,7 +3447,7 @@ dependencies = [ "libc", "rand_core 0.4.2", "rdrand", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -3278,6 +3478,15 @@ dependencies = [ "rand_core 0.3.1", ] +[[package]] +name = "rand_xorshift" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d25bf25ec5ae4a3f1b92f929810509a2f53d7dca2f50b794ff57e3face536c8f" +dependencies = [ + "rand_core 0.6.2", +] + [[package]] name = "raptorq" version = "1.4.2" @@ -3294,7 +3503,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b0d8e0819fadc20c74ea8373106ead0600e3a67ef1fe8da56e39b9ae7275674" dependencies = [ "autocfg 1.0.0", - "crossbeam-deque 0.8.0", + "crossbeam-deque 0.8.1", "either", "rayon-core", ] @@ -3306,7 +3515,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ab346ac5921dc62ffa9f89b7a773907511cdfa5490c572ae9be1be33e8afa4a" dependencies = [ "crossbeam-channel 0.5.0", - "crossbeam-deque 0.8.0", + "crossbeam-deque 0.8.1", "crossbeam-utils 0.8.1", "lazy_static", "num_cpus", @@ -3356,7 +3565,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "957056ecddbeba1b26965114e191d2e8589ce74db242b6ea25fc4062427a5c19" dependencies = [ "aho-corasick", - "memchr 2.3.3", + "memchr 2.4.0", "regex-syntax", ] @@ -3381,7 +3590,7 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dfc5b3ce5d5ea144bb04ebd093a9e14e9765bcfec866aecda9b6dec43b3d1e24" dependencies = [ - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -3396,8 +3605,8 @@ dependencies = [ "futures-core", "futures-util", "http", - "http-body 0.4.0", - "hyper 0.14.3", + "http-body 0.4.2", + "hyper 0.14.11", "hyper-rustls", "hyper-tls", "ipnet", @@ -3407,12 +3616,12 @@ dependencies = [ "mime 0.3.16", "native-tls", "percent-encoding 2.1.0", - "pin-project-lite 0.2.4", + "pin-project-lite 0.2.7", "rustls", "serde", "serde_json", "serde_urlencoded", - "tokio 1.1.1", + "tokio 1.8.1", "tokio-native-tls", "tokio-rustls", "url 2.2.0", @@ -3441,14 +3650,14 @@ dependencies = [ "spin", "untrusted", "web-sys", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] name = "rocksdb" -version = "0.15.0" +version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23d83c02c429044d58474eaf5ae31e062d0de894e21125b47437ec0edc1397e6" +checksum = "c749134fda8bfc90d0de643d59bfc841dcb3ac8a1062e12b6754bd60235c48b3" dependencies = [ "libc", "librocksdb-sys", @@ -3461,7 +3670,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "99371657d3c8e4d816fb6221db98fa408242b0b53bac08f8676a41f8554fe99f" dependencies = [ "libc", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -3516,6 +3725,18 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb5d2a036dc6d2d8fd16fde3498b04306e29bd193bf306a57427019b823d5acd" +[[package]] +name = "rusty-fork" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb3dcc6e454c328bb824492db107ab7c0ae8fcffe4ad210136ef014458c1bc4f" +dependencies = [ + "fnv", + "quick-error 1.2.3", + "tempfile", + "wait-timeout", +] + [[package]] name = "ryu" version = "1.0.4" @@ -3544,7 +3765,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f05ba609c234e60bee0d547fe94a4c7e9da733d1c962cf6e59efa4cd9c8bc75" dependencies = [ "lazy_static", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -3569,7 +3790,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e367622f934864ffa1c704ba2b82280aab856e3d8213c84c5720257eb34b15b9" dependencies = [ "proc-macro2 1.0.24", - "quote 1.0.6", + "quote 1.0.9", "syn 1.0.60", ] @@ -3642,18 +3863,18 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.122" +version = "1.0.126" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "974ef1bd2ad8a507599b336595454081ff68a9599b4890af7643c0c0ed73a62c" +checksum = "ec7505abeacaec74ae4778d9d9328fe5a5d04253220a85c4ee022239fc996d03" dependencies = [ "serde_derive", ] [[package]] name = "serde_bytes" -version = "0.11.4" +version = "0.11.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3bf487fbf5c6239d7ea2ff8b10cb6b811cd4b5080d1c2aeed1dec18753c06e10" +checksum = "16ae07dd2f88a366f15bd0632ba725227018c69a1c8550a927324f8eb8368bb9" dependencies = [ "serde", ] @@ -3670,12 +3891,12 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.122" +version = "1.0.126" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8dee1f300f838c8ac340ecb0112b3ac472464fa67e87292bdb3dfc9c49128e17" +checksum = "963a7dbc9895aeac7ac90e74f34a5d5261828f79df35cbed41e10189d3804d43" dependencies = [ "proc-macro2 1.0.24", - "quote 1.0.6", + "quote 1.0.9", "syn 1.0.60", ] @@ -3732,7 +3953,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d08338d8024b227c62bd68a12c7c9883f5c66780abaef15c550dc56f46ee6515" dependencies = [ "proc-macro2 1.0.24", - "quote 1.0.6", + "quote 1.0.9", "syn 1.0.60", ] @@ -3875,12 +4096,22 @@ dependencies = [ "cfg-if 1.0.0", "libc", "redox_syscall", - "winapi 0.3.8", + "winapi 0.3.9", +] + +[[package]] +name = "socket2" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "765f090f0e423d2b55843402a07915add955e7d60657db13707a159727326cad" +dependencies = [ + "libc", + "winapi 0.3.9", ] [[package]] name = "solana-account-decoder" -version = "1.7.0" +version = "1.7.11" dependencies = [ "Inflector", "base64 0.12.3", @@ -3893,7 +4124,6 @@ dependencies = [ "serde_json", "solana-config-program", "solana-sdk", - "solana-stake-program", "solana-vote-program", "spl-token", "thiserror", @@ -3902,14 +4132,14 @@ dependencies = [ [[package]] name = "solana-accounts-bench" -version = "1.7.0" +version = "1.7.11" dependencies = [ "clap", "crossbeam-channel 0.4.4", "log 0.4.11", "rand 0.7.3", "rayon", - "solana-logger 1.7.0", + "solana-logger 1.7.11", "solana-measure", "solana-runtime", "solana-sdk", @@ -3918,7 +4148,7 @@ dependencies = [ [[package]] name = "solana-accounts-cluster-bench" -version = "1.7.0" +version = "1.7.11" dependencies = [ "clap", "log 0.4.11", @@ -3931,11 +4161,12 @@ dependencies = [ "solana-faucet", "solana-gossip", "solana-local-cluster", - "solana-logger 1.7.0", + "solana-logger 1.7.11", "solana-measure", "solana-net-utils", "solana-runtime", "solana-sdk", + "solana-streamer", "solana-transaction-status", "solana-version", "spl-token", @@ -3943,7 +4174,7 @@ dependencies = [ [[package]] name = "solana-banking-bench" -version = "1.7.0" +version = "1.7.11" dependencies = [ "clap", "crossbeam-channel 0.4.4", @@ -3954,9 +4185,10 @@ dependencies = [ "solana-core", "solana-gossip", "solana-ledger", - "solana-logger 1.7.0", + "solana-logger 1.7.11", "solana-measure", "solana-perf", + "solana-poh", "solana-runtime", "solana-sdk", "solana-streamer", @@ -3965,7 +4197,7 @@ dependencies = [ [[package]] name = "solana-banks-client" -version = "1.7.0" +version = "1.7.11" dependencies = [ "bincode", "borsh", @@ -3974,28 +4206,28 @@ dependencies = [ "mio 0.7.6", "solana-banks-interface", "solana-banks-server", - "solana-program 1.7.0", + "solana-program 1.7.11", "solana-runtime", "solana-sdk", "tarpc", - "tokio 1.1.1", + "tokio 1.8.1", "tokio-serde", ] [[package]] name = "solana-banks-interface" -version = "1.7.0" +version = "1.7.11" dependencies = [ "mio 0.7.6", "serde", "solana-sdk", "tarpc", - "tokio 1.1.1", + "tokio 1.8.1", ] [[package]] name = "solana-banks-server" -version = "1.7.0" +version = "1.7.11" dependencies = [ "bincode", "futures 0.3.8", @@ -4006,17 +4238,17 @@ dependencies = [ "solana-runtime", "solana-sdk", "tarpc", - "tokio 1.1.1", + "tokio 1.8.1", "tokio-serde", "tokio-stream", ] [[package]] name = "solana-bench-exchange" -version = "1.7.0" +version = "1.7.11" dependencies = [ "clap", - "itertools", + "itertools 0.9.0", "log 0.4.11", "num-derive", "num-traits", @@ -4032,21 +4264,22 @@ dependencies = [ "solana-genesis", "solana-gossip", "solana-local-cluster", - "solana-logger 1.7.0", + "solana-logger 1.7.11", "solana-metrics", "solana-net-utils", "solana-runtime", "solana-sdk", + "solana-streamer", "solana-version", ] [[package]] name = "solana-bench-streamer" -version = "1.7.0" +version = "1.7.11" dependencies = [ "clap", "solana-clap-utils", - "solana-logger 1.7.0", + "solana-logger 1.7.11", "solana-net-utils", "solana-streamer", "solana-version", @@ -4054,7 +4287,7 @@ dependencies = [ [[package]] name = "solana-bench-tps" -version = "1.7.0" +version = "1.7.11" dependencies = [ "bincode", "clap", @@ -4070,21 +4303,23 @@ dependencies = [ "solana-genesis", "solana-gossip", "solana-local-cluster", - "solana-logger 1.7.0", + "solana-logger 1.7.11", "solana-measure", "solana-metrics", "solana-net-utils", "solana-runtime", "solana-sdk", + "solana-streamer", "solana-version", ] [[package]] name = "solana-bpf-loader-program" -version = "1.7.0" +version = "1.7.11" dependencies = [ "bincode", "byteorder", + "libsecp256k1", "log 0.4.11", "num-derive", "num-traits", @@ -4099,25 +4334,9 @@ dependencies = [ "thiserror", ] -[[package]] -name = "solana-budget-program" -version = "1.7.0" -dependencies = [ - "bincode", - "chrono", - "log 0.4.11", - "num-derive", - "num-traits", - "serde", - "serde_derive", - "solana-runtime", - "solana-sdk", - "thiserror", -] - [[package]] name = "solana-cargo-build-bpf" -version = "1.7.0" +version = "1.7.11" dependencies = [ "bzip2", "cargo_metadata", @@ -4130,7 +4349,7 @@ dependencies = [ [[package]] name = "solana-cargo-test-bpf" -version = "1.7.0" +version = "1.7.11" dependencies = [ "cargo_metadata", "clap", @@ -4138,7 +4357,7 @@ dependencies = [ [[package]] name = "solana-clap-utils" -version = "1.7.0" +version = "1.7.11" dependencies = [ "chrono", "clap", @@ -4154,18 +4373,19 @@ dependencies = [ [[package]] name = "solana-cli" -version = "1.7.0" +version = "1.7.11" dependencies = [ "Inflector", "bincode", "bs58", "chrono", "clap", - "console", + "console 0.14.1", + "const_format", "criterion-stats", "ctrlc", "dirs-next", - "humantime 2.0.1", + "humantime", "indicatif", "log 0.4.11", "num-traits", @@ -4183,11 +4403,11 @@ dependencies = [ "solana-config-program", "solana-core", "solana-faucet", - "solana-logger 1.7.0", + "solana-logger 1.7.11", "solana-net-utils", "solana-remote-wallet", "solana-sdk", - "solana-stake-program", + "solana-streamer", "solana-transaction-status", "solana-version", "solana-vote-program", @@ -4201,7 +4421,7 @@ dependencies = [ [[package]] name = "solana-cli-config" -version = "1.7.0" +version = "1.7.11" dependencies = [ "dirs-next", "lazy_static", @@ -4213,13 +4433,14 @@ dependencies = [ [[package]] name = "solana-cli-output" -version = "1.7.0" +version = "1.7.11" dependencies = [ "Inflector", "base64 0.13.0", "chrono", - "console", - "humantime 2.0.1", + "clap", + "console 0.14.1", + "humantime", "indicatif", "serde", "serde_derive", @@ -4228,7 +4449,6 @@ dependencies = [ "solana-clap-utils", "solana-client", "solana-sdk", - "solana-stake-program", "solana-transaction-status", "solana-vote-program", "spl-memo", @@ -4236,7 +4456,7 @@ dependencies = [ [[package]] name = "solana-client" -version = "1.7.0" +version = "1.7.11" dependencies = [ "assert_matches", "base64 0.13.0", @@ -4257,21 +4477,21 @@ dependencies = [ "solana-account-decoder", "solana-clap-utils", "solana-faucet", - "solana-logger 1.7.0", + "solana-logger 1.7.11", "solana-net-utils", "solana-sdk", "solana-transaction-status", "solana-version", "solana-vote-program", "thiserror", - "tokio 1.1.1", + "tokio 1.8.1", "tungstenite", "url 2.2.0", ] [[package]] name = "solana-config-program" -version = "1.7.0" +version = "1.7.11" dependencies = [ "bincode", "chrono", @@ -4279,13 +4499,13 @@ dependencies = [ "rand_core 0.6.2", "serde", "serde_derive", - "solana-logger 1.7.0", + "solana-logger 1.7.11", "solana-sdk", ] [[package]] name = "solana-core" -version = "1.7.0" +version = "1.7.11" dependencies = [ "ahash 0.6.1", "base64 0.12.3", @@ -4295,17 +4515,14 @@ dependencies = [ "bv", "byteorder", "chrono", - "core_affinity", "crossbeam-channel 0.4.4", "ed25519-dalek", "flate2", "fs_extra", "indexmap", - "itertools", + "itertools 0.9.0", "jsonrpc-core", "jsonrpc-core-client", - "jsonrpc-derive", - "jsonrpc-http-server", "libc", "log 0.4.11", "lru", @@ -4319,7 +4536,6 @@ dependencies = [ "rand_core 0.6.2", "raptorq", "rayon", - "regex", "reqwest", "retain_mut", "rustc_version", @@ -4332,26 +4548,24 @@ dependencies = [ "solana-banks-server", "solana-clap-utils", "solana-client", - "solana-faucet", - "solana-frozen-abi 1.7.0", - "solana-frozen-abi-macro 1.7.0", + "solana-frozen-abi 1.7.11", + "solana-frozen-abi-macro 1.7.11", "solana-gossip", "solana-ledger", - "solana-logger 1.7.0", + "solana-logger 1.7.11", "solana-measure", "solana-merkle-tree", "solana-metrics", "solana-net-utils", "solana-perf", + "solana-poh", "solana-program-test", "solana-rayon-threadlimit", "solana-rpc", "solana-runtime", "solana-sdk", "solana-stake-program", - "solana-storage-bigtable", "solana-streamer", - "solana-sys-tuner", "solana-transaction-status", "solana-version", "solana-vote-program", @@ -4361,14 +4575,12 @@ dependencies = [ "tempfile", "thiserror", "tokio 0.2.22", - "tokio 1.1.1", - "tokio-util 0.3.1", "trees", ] [[package]] name = "solana-crate-features" -version = "1.7.0" +version = "1.7.11" dependencies = [ "backtrace", "bytes 0.4.12", @@ -4386,12 +4598,12 @@ dependencies = [ "syn 0.15.44", "syn 1.0.60", "tokio 0.1.22", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] name = "solana-dos" -version = "1.7.0" +version = "1.7.11" dependencies = [ "bincode", "clap", @@ -4403,19 +4615,20 @@ dependencies = [ "solana-core", "solana-gossip", "solana-ledger", - "solana-logger 1.7.0", + "solana-logger 1.7.11", "solana-net-utils", "solana-runtime", "solana-sdk", + "solana-streamer", "solana-version", ] [[package]] name = "solana-download-utils" -version = "1.7.0" +version = "1.7.11" dependencies = [ "bzip2", - "console", + "console 0.14.1", "indicatif", "log 0.4.11", "reqwest", @@ -4426,7 +4639,7 @@ dependencies = [ [[package]] name = "solana-exchange-program" -version = "1.7.0" +version = "1.7.11" dependencies = [ "bincode", "log 0.4.11", @@ -4434,7 +4647,7 @@ dependencies = [ "num-traits", "serde", "serde_derive", - "solana-logger 1.7.0", + "solana-logger 1.7.11", "solana-metrics", "solana-runtime", "solana-sdk", @@ -4443,7 +4656,7 @@ dependencies = [ [[package]] name = "solana-failure-program" -version = "1.7.0" +version = "1.7.11" dependencies = [ "solana-runtime", "solana-sdk", @@ -4451,7 +4664,7 @@ dependencies = [ [[package]] name = "solana-faucet" -version = "1.7.0" +version = "1.7.11" dependencies = [ "bincode", "byteorder", @@ -4461,20 +4674,20 @@ dependencies = [ "serde_derive", "solana-clap-utils", "solana-cli-config", - "solana-logger 1.7.0", + "solana-logger 1.7.11", "solana-metrics", "solana-sdk", "solana-version", "spl-memo", "thiserror", - "tokio 1.1.1", + "tokio 1.8.1", ] [[package]] name = "solana-frozen-abi" -version = "1.6.4" +version = "1.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f81ac3925c8995d935d3af27390cf7b748a81837a29feb75a00e115f40dae6dc" +checksum = "b0b98d31e0662fedf3a1ee30919c655713874d578e19e65affe46109b1b927f9" dependencies = [ "bs58", "bv", @@ -4485,14 +4698,14 @@ dependencies = [ "serde", "serde_derive", "sha2 0.9.2", - "solana-frozen-abi-macro 1.6.4", - "solana-logger 1.6.4", + "solana-frozen-abi-macro 1.7.6", + "solana-logger 1.7.6", "thiserror", ] [[package]] name = "solana-frozen-abi" -version = "1.7.0" +version = "1.7.11" dependencies = [ "bs58", "bv", @@ -4503,37 +4716,36 @@ dependencies = [ "serde", "serde_derive", "sha2 0.9.2", - "solana-frozen-abi-macro 1.7.0", - "solana-logger 1.7.0", + "solana-frozen-abi-macro 1.7.11", + "solana-logger 1.7.11", "thiserror", ] [[package]] name = "solana-frozen-abi-macro" -version = "1.6.4" +version = "1.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae448159038e470f3b2dd1ab0d219246211f940a10bf1e656a02cb5c2d897437" +checksum = "ceac6e8ad1a784c92ff5f3d6ad68a8d664d389b08055b674c38b2b9abb69e6d4" dependencies = [ - "lazy_static", "proc-macro2 1.0.24", - "quote 1.0.6", + "quote 1.0.9", "rustc_version", "syn 1.0.60", ] [[package]] name = "solana-frozen-abi-macro" -version = "1.7.0" +version = "1.7.11" dependencies = [ "proc-macro2 1.0.24", - "quote 1.0.6", + "quote 1.0.9", "rustc_version", "syn 1.0.60", ] [[package]] name = "solana-genesis" -version = "1.7.0" +version = "1.7.11" dependencies = [ "base64 0.12.3", "chrono", @@ -4541,12 +4753,11 @@ dependencies = [ "serde", "serde_json", "serde_yaml", - "solana-budget-program", "solana-clap-utils", "solana-cli-config", "solana-exchange-program", "solana-ledger", - "solana-logger 1.7.0", + "solana-logger 1.7.11", "solana-runtime", "solana-sdk", "solana-stake-program", @@ -4557,7 +4768,7 @@ dependencies = [ [[package]] name = "solana-genesis-utils" -version = "1.7.0" +version = "1.7.11" dependencies = [ "solana-download-utils", "solana-runtime", @@ -4566,14 +4777,14 @@ dependencies = [ [[package]] name = "solana-gossip" -version = "1.7.0" +version = "1.7.11" dependencies = [ "bincode", "bv", "clap", "flate2", "indexmap", - "itertools", + "itertools 0.9.0", "log 0.4.11", "lru", "matches", @@ -4589,10 +4800,10 @@ dependencies = [ "serial_test", "solana-clap-utils", "solana-client", - "solana-frozen-abi 1.7.0", - "solana-frozen-abi-macro 1.7.0", + "solana-frozen-abi 1.7.11", + "solana-frozen-abi-macro 1.7.11", "solana-ledger", - "solana-logger 1.7.0", + "solana-logger 1.7.11", "solana-measure", "solana-metrics", "solana-net-utils", @@ -4608,14 +4819,14 @@ dependencies = [ [[package]] name = "solana-install" -version = "1.7.0" +version = "1.7.11" dependencies = [ "atty", "bincode", "bzip2", "chrono", "clap", - "console", + "console 0.14.1", "ctrlc", "dirs-next", "indicatif", @@ -4629,19 +4840,19 @@ dependencies = [ "solana-clap-utils", "solana-client", "solana-config-program", - "solana-logger 1.7.0", + "solana-logger 1.7.11", "solana-sdk", "solana-version", "tar", "tempfile", "url 2.2.0", - "winapi 0.3.8", + "winapi 0.3.9", "winreg", ] [[package]] name = "solana-keygen" -version = "1.7.0" +version = "1.7.11" dependencies = [ "bs58", "clap", @@ -4657,7 +4868,7 @@ dependencies = [ [[package]] name = "solana-ledger" -version = "1.7.0" +version = "1.7.11" dependencies = [ "assert_matches", "bincode", @@ -4671,7 +4882,7 @@ dependencies = [ "fs_extra", "futures 0.3.8", "futures-util", - "itertools", + "itertools 0.9.0", "lazy_static", "libc", "log 0.4.11", @@ -4689,10 +4900,9 @@ dependencies = [ "sha2 0.9.2", "solana-account-decoder", "solana-bpf-loader-program", - "solana-budget-program", - "solana-frozen-abi 1.7.0", - "solana-frozen-abi-macro 1.7.0", - "solana-logger 1.7.0", + "solana-frozen-abi 1.7.11", + "solana-frozen-abi-macro 1.7.11", + "solana-logger 1.7.11", "solana-measure", "solana-merkle-tree", "solana-metrics", @@ -4700,21 +4910,20 @@ dependencies = [ "solana-rayon-threadlimit", "solana-runtime", "solana-sdk", - "solana-stake-program", "solana-storage-bigtable", "solana-storage-proto", "solana-transaction-status", "solana-vote-program", "tempfile", "thiserror", - "tokio 1.1.1", + "tokio 1.8.1", "tokio-stream", "trees", ] [[package]] name = "solana-ledger-tool" -version = "1.7.0" +version = "1.7.11" dependencies = [ "assert_cmd", "bs58", @@ -4724,7 +4933,7 @@ dependencies = [ "futures 0.3.8", "futures-util", "histogram", - "itertools", + "itertools 0.9.0", "log 0.4.11", "regex", "serde", @@ -4734,7 +4943,7 @@ dependencies = [ "solana-clap-utils", "solana-cli-output", "solana-ledger", - "solana-logger 1.7.0", + "solana-logger 1.7.11", "solana-measure", "solana-runtime", "solana-sdk", @@ -4744,18 +4953,18 @@ dependencies = [ "solana-version", "solana-vote-program", "tempfile", - "tokio 1.1.1", + "tokio 1.8.1", ] [[package]] name = "solana-local-cluster" -version = "1.7.0" +version = "1.7.11" dependencies = [ "assert_matches", "crossbeam-channel 0.4.4", "fs_extra", "gag", - "itertools", + "itertools 0.9.0", "log 0.4.11", "rand 0.7.3", "rayon", @@ -4768,51 +4977,53 @@ dependencies = [ "solana-faucet", "solana-gossip", "solana-ledger", - "solana-logger 1.7.0", + "solana-logger 1.7.11", "solana-rayon-threadlimit", + "solana-rpc", "solana-runtime", "solana-sdk", "solana-stake-program", + "solana-streamer", "solana-vote-program", "tempfile", ] [[package]] name = "solana-log-analyzer" -version = "1.7.0" +version = "1.7.11" dependencies = [ "byte-unit", "clap", "serde", "serde_json", "solana-clap-utils", - "solana-logger 1.7.0", + "solana-logger 1.7.11", "solana-version", ] [[package]] name = "solana-logger" -version = "1.6.4" +version = "1.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fa7bddd7b89c26c6e3ef4af9b47d6bc8d60888559affb5160f5ade18c0cd058" +checksum = "ec7c514fe57f8c5042fa88c19f5711c67f264db723d9d79379fcb78dd1f09bbf" dependencies = [ - "env_logger 0.8.3", + "env_logger", "lazy_static", "log 0.4.11", ] [[package]] name = "solana-logger" -version = "1.7.0" +version = "1.7.11" dependencies = [ - "env_logger 0.8.3", + "env_logger", "lazy_static", "log 0.4.11", ] [[package]] name = "solana-measure" -version = "1.7.0" +version = "1.7.11" dependencies = [ "log 0.4.11", "solana-metrics", @@ -4821,11 +5032,11 @@ dependencies = [ [[package]] name = "solana-merkle-root-bench" -version = "1.7.0" +version = "1.7.11" dependencies = [ "clap", "log 0.4.11", - "solana-logger 1.7.0", + "solana-logger 1.7.11", "solana-measure", "solana-runtime", "solana-sdk", @@ -4834,19 +5045,19 @@ dependencies = [ [[package]] name = "solana-merkle-tree" -version = "1.7.0" +version = "1.7.11" dependencies = [ "fast-math", "hex", "matches", - "solana-program 1.7.0", + "solana-program 1.7.11", ] [[package]] name = "solana-metrics" -version = "1.7.0" +version = "1.7.11" dependencies = [ - "env_logger 0.8.3", + "env_logger", "gethostname", "lazy_static", "log 0.4.11", @@ -4858,19 +5069,19 @@ dependencies = [ [[package]] name = "solana-net-shaper" -version = "1.7.0" +version = "1.7.11" dependencies = [ "clap", "rand 0.7.3", "serde", "serde_json", "solana-clap-utils", - "solana-logger 1.7.0", + "solana-logger 1.7.11", ] [[package]] name = "solana-net-utils" -version = "1.7.0" +version = "1.7.11" dependencies = [ "bincode", "clap", @@ -4879,26 +5090,27 @@ dependencies = [ "rand 0.7.3", "serde", "serde_derive", - "socket2", + "socket2 0.3.17", "solana-clap-utils", - "solana-logger 1.7.0", + "solana-logger 1.7.11", + "solana-sdk", "solana-version", - "tokio 1.1.1", + "tokio 1.8.1", "url 2.2.0", ] [[package]] name = "solana-noop-program" -version = "1.7.0" +version = "1.7.11" dependencies = [ "log 0.4.11", - "solana-logger 1.7.0", + "solana-logger 1.7.11", "solana-sdk", ] [[package]] name = "solana-notifier" -version = "1.7.0" +version = "1.7.11" dependencies = [ "log 0.4.11", "reqwest", @@ -4907,7 +5119,7 @@ dependencies = [ [[package]] name = "solana-ownable" -version = "1.7.0" +version = "1.7.11" dependencies = [ "bincode", "num-derive", @@ -4919,7 +5131,7 @@ dependencies = [ [[package]] name = "solana-perf" -version = "1.7.0" +version = "1.7.11" dependencies = [ "bincode", "curve25519-dalek 2.1.0", @@ -4931,16 +5143,36 @@ dependencies = [ "rand 0.7.3", "rayon", "serde", - "solana-budget-program", - "solana-logger 1.7.0", + "solana-logger 1.7.11", "solana-metrics", "solana-rayon-threadlimit", "solana-sdk", ] +[[package]] +name = "solana-poh" +version = "1.7.11" +dependencies = [ + "bincode", + "core_affinity", + "crossbeam-channel 0.4.4", + "log 0.4.11", + "matches", + "rand 0.7.3", + "solana-ledger", + "solana-logger 1.7.11", + "solana-measure", + "solana-metrics", + "solana-perf", + "solana-runtime", + "solana-sdk", + "solana-sys-tuner", + "thiserror", +] + [[package]] name = "solana-poh-bench" -version = "1.7.0" +version = "1.7.11" dependencies = [ "clap", "log 0.4.11", @@ -4948,7 +5180,7 @@ dependencies = [ "rayon", "solana-clap-utils", "solana-ledger", - "solana-logger 1.7.0", + "solana-logger 1.7.11", "solana-measure", "solana-perf", "solana-sdk", @@ -4957,19 +5189,21 @@ dependencies = [ [[package]] name = "solana-program" -version = "1.6.4" +version = "1.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "231bf4cd42997b6f34e7c74a1449e8a1e8816fd83662fc3a5a0922da48fc667b" +checksum = "3bfe6a5dfc5372c0a946018ecdd8115e38af78cea8275bac48cf3d105c6b1fb3" dependencies = [ "bincode", + "blake3", "borsh", "borsh-derive", "bs58", "bv", "curve25519-dalek 2.1.0", "hex", - "itertools", + "itertools 0.9.0", "lazy_static", + "libsecp256k1", "log 0.4.11", "num-derive", "num-traits", @@ -4980,16 +5214,17 @@ dependencies = [ "serde_bytes", "serde_derive", "sha2 0.9.2", - "solana-frozen-abi 1.6.4", - "solana-frozen-abi-macro 1.6.4", - "solana-logger 1.6.4", - "solana-sdk-macro 1.6.4", + "sha3", + "solana-frozen-abi 1.7.6", + "solana-frozen-abi-macro 1.7.6", + "solana-logger 1.7.6", + "solana-sdk-macro 1.7.6", "thiserror", ] [[package]] name = "solana-program" -version = "1.7.0" +version = "1.7.11" dependencies = [ "assert_matches", "bincode", @@ -5000,8 +5235,9 @@ dependencies = [ "bv", "curve25519-dalek 2.1.0", "hex", - "itertools", + "itertools 0.9.0", "lazy_static", + "libsecp256k1", "log 0.4.11", "num-derive", "num-traits", @@ -5014,17 +5250,17 @@ dependencies = [ "serde_json", "sha2 0.9.2", "sha3", - "solana-frozen-abi 1.7.0", - "solana-frozen-abi-macro 1.7.0", - "solana-logger 1.7.0", - "solana-sdk-macro 1.7.0", + "solana-frozen-abi 1.7.11", + "solana-frozen-abi-macro 1.7.11", + "solana-logger 1.7.11", + "solana-sdk-macro 1.7.11", "static_assertions", "thiserror", ] [[package]] name = "solana-program-test" -version = "1.7.0" +version = "1.7.11" dependencies = [ "assert_matches", "async-trait", @@ -5039,18 +5275,17 @@ dependencies = [ "solana-banks-client", "solana-banks-server", "solana-bpf-loader-program", - "solana-logger 1.7.0", + "solana-logger 1.7.11", "solana-runtime", "solana-sdk", - "solana-stake-program", "solana-vote-program", "thiserror", - "tokio 1.1.1", + "tokio 1.8.1", ] [[package]] name = "solana-rayon-threadlimit" -version = "1.7.0" +version = "1.7.11" dependencies = [ "lazy_static", "num_cpus", @@ -5058,10 +5293,10 @@ dependencies = [ [[package]] name = "solana-remote-wallet" -version = "1.7.0" +version = "1.7.11" dependencies = [ "base32", - "console", + "console 0.14.1", "dialoguer", "hidapi", "log 0.4.11", @@ -5077,36 +5312,55 @@ dependencies = [ [[package]] name = "solana-rpc" -version = "1.7.0" +version = "1.7.11" dependencies = [ + "base64 0.12.3", + "bincode", "bs58", "crossbeam-channel 0.4.4", + "itertools 0.9.0", "jsonrpc-core", "jsonrpc-core-client", "jsonrpc-derive", + "jsonrpc-http-server", "jsonrpc-pubsub", "jsonrpc-ws-server", + "libc", "log 0.4.11", + "regex", "serde", "serde_derive", "serde_json", "serial_test", "solana-account-decoder", "solana-client", + "solana-faucet", + "solana-gossip", "solana-ledger", + "solana-logger 1.7.11", "solana-measure", "solana-metrics", + "solana-net-utils", + "solana-perf", + "solana-poh", "solana-runtime", "solana-sdk", "solana-stake-program", + "solana-storage-bigtable", + "solana-streamer", + "solana-transaction-status", + "solana-version", "solana-vote-program", "spl-token", - "tokio 1.1.1", + "symlink", + "tokio 0.2.22", + "tokio 1.8.1", + "tokio-util 0.3.1", ] [[package]] name = "solana-runtime" -version = "1.7.0" +version = "1.7.11" dependencies = [ "arrayref", "assert_matches", @@ -5120,7 +5374,7 @@ dependencies = [ "dir-diff", "flate2", "fnv", - "itertools", + "itertools 0.9.0", "lazy_static", "libc", "libloading 0.6.2", @@ -5137,9 +5391,9 @@ dependencies = [ "serde", "serde_derive", "solana-config-program", - "solana-frozen-abi 1.7.0", - "solana-frozen-abi-macro 1.7.0", - "solana-logger 1.7.0", + "solana-frozen-abi 1.7.11", + "solana-frozen-abi-macro 1.7.11", + "solana-logger 1.7.11", "solana-measure", "solana-metrics", "solana-noop-program", @@ -5157,7 +5411,7 @@ dependencies = [ [[package]] name = "solana-scripts" -version = "1.7.0" +version = "1.7.11" dependencies = [ "csv", "serde", @@ -5165,7 +5419,7 @@ dependencies = [ [[package]] name = "solana-sdk" -version = "1.7.0" +version = "1.7.11" dependencies = [ "assert_matches", "bincode", @@ -5181,7 +5435,7 @@ dependencies = [ "generic-array 0.14.3", "hex", "hmac 0.10.1", - "itertools", + "itertools 0.9.0", "lazy_static", "libsecp256k1", "log 0.4.11", @@ -5202,11 +5456,11 @@ dependencies = [ "sha2 0.9.2", "sha3", "solana-crate-features", - "solana-frozen-abi 1.7.0", - "solana-frozen-abi-macro 1.7.0", - "solana-logger 1.7.0", - "solana-program 1.7.0", - "solana-sdk-macro 1.7.0", + "solana-frozen-abi 1.7.11", + "solana-frozen-abi-macro 1.7.11", + "solana-logger 1.7.11", + "solana-program 1.7.11", + "solana-sdk-macro 1.7.11", "thiserror", "tiny-bip39 0.7.3", "uriparse", @@ -5214,44 +5468,42 @@ dependencies = [ [[package]] name = "solana-sdk-macro" -version = "1.6.4" +version = "1.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c805f1e32677f8ec0cc0b2f470833a0d5ab0922f068e52be3a4a93b468c9c70" +checksum = "84710ce45a21cccd9f2b09d8e9aad529080bb2540f27b1253874b6e732b465b9" dependencies = [ "bs58", "proc-macro2 1.0.24", - "quote 1.0.6", + "quote 1.0.9", "rustversion", "syn 1.0.60", ] [[package]] name = "solana-sdk-macro" -version = "1.7.0" +version = "1.7.11" dependencies = [ "bs58", "proc-macro2 1.0.24", - "quote 1.0.6", + "quote 1.0.9", "rustversion", "syn 1.0.60", ] [[package]] name = "solana-secp256k1-program" -version = "1.7.0" +version = "1.7.11" dependencies = [ "bincode", - "digest 0.9.0", "libsecp256k1", "rand 0.7.3", - "sha3", - "solana-logger 1.7.0", + "solana-logger 1.7.11", "solana-sdk", ] [[package]] name = "solana-stake-accounts" -version = "1.7.0" +version = "1.7.11" dependencies = [ "clap", "solana-clap-utils", @@ -5263,45 +5515,22 @@ dependencies = [ "solana-stake-program", ] -[[package]] -name = "solana-stake-monitor" -version = "1.7.0" -dependencies = [ - "clap", - "console", - "log 0.4.11", - "serde", - "serde_yaml", - "serial_test", - "solana-clap-utils", - "solana-cli-config", - "solana-client", - "solana-core", - "solana-local-cluster", - "solana-logger 1.7.0", - "solana-metrics", - "solana-sdk", - "solana-stake-program", - "solana-transaction-status", - "solana-version", - "tempfile", -] - [[package]] name = "solana-stake-program" -version = "1.7.0" +version = "1.7.11" dependencies = [ "bincode", "log 0.4.11", "num-derive", "num-traits", + "proptest", "rustc_version", "serde", "serde_derive", "solana-config-program", - "solana-frozen-abi 1.7.0", - "solana-frozen-abi-macro 1.7.0", - "solana-logger 1.7.0", + "solana-frozen-abi 1.7.11", + "solana-frozen-abi-macro 1.7.11", + "solana-logger 1.7.11", "solana-metrics", "solana-sdk", "solana-vote-program", @@ -5310,7 +5539,7 @@ dependencies = [ [[package]] name = "solana-storage-bigtable" -version = "1.7.0" +version = "1.7.11" dependencies = [ "arc-swap 0.4.8", "backoff", @@ -5337,7 +5566,7 @@ dependencies = [ [[package]] name = "solana-storage-proto" -version = "1.7.0" +version = "1.7.11" dependencies = [ "bincode", "bs58", @@ -5347,15 +5576,16 @@ dependencies = [ "solana-account-decoder", "solana-sdk", "solana-transaction-status", + "tonic-build", ] [[package]] name = "solana-store-tool" -version = "1.7.0" +version = "1.7.11" dependencies = [ "clap", "log 0.4.11", - "solana-logger 1.7.0", + "solana-logger 1.7.11", "solana-measure", "solana-runtime", "solana-sdk", @@ -5364,12 +5594,12 @@ dependencies = [ [[package]] name = "solana-streamer" -version = "1.7.0" +version = "1.7.11" dependencies = [ "libc", "log 0.4.11", "nix 0.19.0", - "solana-logger 1.7.0", + "solana-logger 1.7.11", "solana-measure", "solana-metrics", "solana-perf", @@ -5379,14 +5609,14 @@ dependencies = [ [[package]] name = "solana-sys-tuner" -version = "1.7.0" +version = "1.7.11" dependencies = [ "clap", "libc", "log 0.4.11", "nix 0.19.0", "solana-clap-utils", - "solana-logger 1.7.0", + "solana-logger 1.7.11", "solana-version", "sysctl", "unix_socket2", @@ -5395,12 +5625,12 @@ dependencies = [ [[package]] name = "solana-tokens" -version = "1.7.0" +version = "1.7.11" dependencies = [ "bincode", "chrono", "clap", - "console", + "console 0.14.1", "csv", "ctrlc", "dirs-next", @@ -5413,12 +5643,12 @@ dependencies = [ "solana-cli-config", "solana-client", "solana-core", - "solana-logger 1.7.0", + "solana-logger 1.7.11", "solana-program-test", "solana-remote-wallet", "solana-runtime", "solana-sdk", - "solana-stake-program", + "solana-streamer", "solana-transaction-status", "solana-version", "spl-associated-token-account", @@ -5429,7 +5659,7 @@ dependencies = [ [[package]] name = "solana-transaction-status" -version = "1.7.0" +version = "1.7.11" dependencies = [ "Inflector", "base64 0.12.3", @@ -5442,7 +5672,6 @@ dependencies = [ "solana-account-decoder", "solana-runtime", "solana-sdk", - "solana-stake-program", "solana-vote-program", "spl-associated-token-account", "spl-memo", @@ -5452,7 +5681,7 @@ dependencies = [ [[package]] name = "solana-upload-perf" -version = "1.7.0" +version = "1.7.11" dependencies = [ "serde_json", "solana-metrics", @@ -5460,13 +5689,13 @@ dependencies = [ [[package]] name = "solana-validator" -version = "1.7.0" +version = "1.7.11" dependencies = [ "base64 0.12.3", "bincode", "chrono", "clap", - "console", + "console 0.14.1", "core_affinity", "fd-lock", "indicatif", @@ -5490,13 +5719,15 @@ dependencies = [ "solana-genesis-utils", "solana-gossip", "solana-ledger", - "solana-logger 1.7.0", + "solana-logger 1.7.11", "solana-metrics", "solana-net-utils", "solana-perf", + "solana-poh", "solana-rpc", "solana-runtime", "solana-sdk", + "solana-streamer", "solana-version", "solana-vote-program", "symlink", @@ -5504,21 +5735,21 @@ dependencies = [ [[package]] name = "solana-version" -version = "1.7.0" +version = "1.7.11" dependencies = [ "log 0.4.11", "rustc_version", "serde", "serde_derive", - "solana-frozen-abi 1.7.0", - "solana-frozen-abi-macro 1.7.0", - "solana-logger 1.7.0", + "solana-frozen-abi 1.7.11", + "solana-frozen-abi-macro 1.7.11", + "solana-logger 1.7.11", "solana-sdk", ] [[package]] name = "solana-vote-program" -version = "1.7.0" +version = "1.7.11" dependencies = [ "bincode", "log 0.4.11", @@ -5527,9 +5758,9 @@ dependencies = [ "rustc_version", "serde", "serde_derive", - "solana-frozen-abi 1.7.0", - "solana-frozen-abi-macro 1.7.0", - "solana-logger 1.7.0", + "solana-frozen-abi 1.7.11", + "solana-frozen-abi-macro 1.7.11", + "solana-logger 1.7.11", "solana-metrics", "solana-sdk", "thiserror", @@ -5537,16 +5768,16 @@ dependencies = [ [[package]] name = "solana-watchtower" -version = "1.7.0" +version = "1.7.11" dependencies = [ "clap", - "humantime 2.0.1", + "humantime", "log 0.4.11", "solana-clap-utils", "solana-cli-config", "solana-cli-output", "solana-client", - "solana-logger 1.7.0", + "solana-logger 1.7.11", "solana-metrics", "solana-notifier", "solana-sdk", @@ -5580,11 +5811,11 @@ checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" [[package]] name = "spl-associated-token-account" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4adc47eebe5d2b662cbaaba1843719c28a67e5ec5d0460bc3ca60900a51f74e2" +checksum = "393e2240d521c3dd770806bff25c2c00d761ac962be106e14e22dd912007f428" dependencies = [ - "solana-program 1.6.4", + "solana-program 1.7.6", "spl-token", ] @@ -5594,20 +5825,20 @@ version = "3.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bd0dc6f70db6bacea7ff25870b016a65ba1d1b6013536f08e4fd79a8f9005325" dependencies = [ - "solana-program 1.6.4", + "solana-program 1.7.6", ] [[package]] name = "spl-token" -version = "3.1.0" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b795e50d15dfd35aa5460b80a16414503a322be115a417a43db987c5824c6798" +checksum = "93bfdd5bd7c869cb565c7d7635c4fafe189b988a0bdef81063cd9585c6b8dc01" dependencies = [ "arrayref", "num-derive", "num-traits", "num_enum", - "solana-program 1.6.4", + "solana-program 1.7.6", "thiserror", ] @@ -5653,7 +5884,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c87a60a40fccc84bef0652345bbbbbe20a605bf5d0ce81719fc476f5c03b50ef" dependencies = [ "proc-macro2 1.0.24", - "quote 1.0.6", + "quote 1.0.9", "serde", "serde_derive", "syn 1.0.60", @@ -5667,7 +5898,7 @@ checksum = "58fa5ff6ad0d98d1ffa8cb115892b6e69d67799f6763e162a1c9db421dc22e11" dependencies = [ "base-x", "proc-macro2 1.0.24", - "quote 1.0.6", + "quote 1.0.9", "serde", "serde_derive", "serde_json", @@ -5723,7 +5954,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c700597eca8a5a762beb35753ef6b94df201c81cca676604f547495a0d7f0081" dependencies = [ "proc-macro2 1.0.24", - "quote 1.0.6", + "quote 1.0.9", "unicode-xid 0.2.0", ] @@ -5734,7 +5965,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "67656ea1dc1b41b1451851562ea232ec2e5a80242139f7e679ceccfb5d61f545" dependencies = [ "proc-macro2 1.0.24", - "quote 1.0.6", + "quote 1.0.9", "syn 1.0.60", "unicode-xid 0.2.0", ] @@ -5764,18 +5995,17 @@ dependencies = [ "libc", "nom 3.2.1", "time 0.1.43", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] name = "tar" -version = "0.4.28" +version = "0.4.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c058ad0bd6ccb84faa24cc44d4fc99bee8a5d7ba9ff33aa4d993122d1aeeac2" +checksum = "d6f5515d3add52e0bbdcad7b83c388bb36ba7b754dda3b5f5bc2d38640cdba5c" dependencies = [ "filetime", "libc", - "redox_syscall", "xattr", ] @@ -5788,14 +6018,14 @@ dependencies = [ "anyhow", "fnv", "futures 0.3.8", - "humantime 2.0.1", + "humantime", "log 0.4.11", "pin-project 1.0.1", "rand 0.7.3", "serde", "static_assertions", "tarpc-plugins", - "tokio 1.1.1", + "tokio 1.8.1", "tokio-serde", "tokio-util 0.6.3", ] @@ -5807,7 +6037,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f3240378a22b1195734e085ba71d1d4188d50f034aea82635acc430b7005afb5" dependencies = [ "proc-macro2 1.0.24", - "quote 1.0.6", + "quote 1.0.9", "syn 1.0.60", ] @@ -5822,7 +6052,7 @@ dependencies = [ "rand 0.7.3", "redox_syscall", "remove_dir_all", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -5836,12 +6066,12 @@ dependencies = [ [[package]] name = "terminal_size" -version = "0.1.12" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8038f95fc7a6f351163f4b964af631bd26c9e828f7db085f2a84aca56f70d13b" +checksum = "86ca8ced750734db02076f44132d802af0b33b09942331f4459dde8636fd2406" dependencies = [ "libc", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -5878,7 +6108,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9be73a2caec27583d0046ef3796c3794f868a5bc813db689eed00c7631275cd1" dependencies = [ "proc-macro2 1.0.24", - "quote 1.0.6", + "quote 1.0.9", "syn 1.0.60", ] @@ -5895,7 +6125,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ca8a50ef2360fbd1eeb0ecd46795a87a19024eb4b53c5dc916ca1fd95fe62438" dependencies = [ "libc", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -5910,7 +6140,7 @@ dependencies = [ "stdweb", "time-macros", "version_check 0.9.2", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -5931,7 +6161,7 @@ checksum = "e5c3be1edfad6027c69f5491cf4cb310d1a71ecd6af742788c6ff8bced86b8fa" dependencies = [ "proc-macro-hack", "proc-macro2 1.0.24", - "quote 1.0.6", + "quote 1.0.9", "standback", "syn 1.0.60", ] @@ -6021,7 +6251,7 @@ dependencies = [ "iovec", "lazy_static", "libc", - "memchr 2.3.3", + "memchr 2.4.0", "mio 0.6.22", "mio-named-pipes", "mio-uds", @@ -6030,27 +6260,27 @@ dependencies = [ "signal-hook-registry", "slab", "tokio-macros 0.2.6", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] name = "tokio" -version = "1.1.1" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6714d663090b6b0acb0fa85841c6d66233d150cdb2602c8f9b8abb03370beb3f" +checksum = "98c8b05dc14c75ea83d63dd391100353789f5f24b8b3866542a5e85c8be8e985" dependencies = [ "autocfg 1.0.0", "bytes 1.0.1", "libc", - "memchr 2.3.3", + "memchr 2.4.0", "mio 0.7.6", "num_cpus", "once_cell", "parking_lot 0.11.0", - "pin-project-lite 0.2.4", + "pin-project-lite 0.2.7", "signal-hook-registry", - "tokio-macros 1.0.0", - "winapi 0.3.8", + "tokio-macros 1.3.0", + "winapi 0.3.9", ] [[package]] @@ -6106,6 +6336,16 @@ dependencies = [ "log 0.4.11", ] +[[package]] +name = "tokio-io-timeout" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90c49f106be240de154571dd31fbe48acb10ba6c6dd6f6517ad603abffa42de9" +dependencies = [ + "pin-project-lite 0.2.7", + "tokio 1.8.1", +] + [[package]] name = "tokio-macros" version = "0.2.6" @@ -6113,18 +6353,18 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e44da00bfc73a25f814cd8d7e57a68a5c31b74b3152a0a1d1f590c97ed06265a" dependencies = [ "proc-macro2 1.0.24", - "quote 1.0.6", + "quote 1.0.9", "syn 1.0.60", ] [[package]] name = "tokio-macros" -version = "1.0.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42517d2975ca3114b22a16192634e8241dc5cc1f130be194645970cc1c371494" +checksum = "54473be61f4ebe4efd09cec9bd5d16fa51d70ea0192213d754d2d500457db110" dependencies = [ "proc-macro2 1.0.24", - "quote 1.0.6", + "quote 1.0.9", "syn 1.0.60", ] @@ -6135,7 +6375,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f7d995660bd2b7f8c1568414c1126076c13fbb725c40112dc0120b78eb9b717b" dependencies = [ "native-tls", - "tokio 1.1.1", + "tokio 1.8.1", ] [[package]] @@ -6164,7 +6404,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc6844de72e57df1980054b38be3a9f4702aba4858be64dd700181a8a6d0e1b6" dependencies = [ "rustls", - "tokio 1.1.1", + "tokio 1.8.1", "webpki", ] @@ -6191,8 +6431,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "76066865172052eb8796c686f0b441a93df8b08d40a950b062ffb9a426f00edd" dependencies = [ "futures-core", - "pin-project-lite 0.2.4", - "tokio 1.1.1", + "pin-project-lite 0.2.7", + "tokio 1.8.1", ] [[package]] @@ -6225,7 +6465,7 @@ version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df720b6581784c118f0eb4310796b12b1d242a7eb95f716a8367855325c25f89" dependencies = [ - "crossbeam-deque 0.7.3", + "crossbeam-deque 0.7.4", "crossbeam-queue", "crossbeam-utils 0.7.2", "futures 0.1.29", @@ -6316,8 +6556,8 @@ dependencies = [ "futures-core", "futures-sink", "log 0.4.11", - "pin-project-lite 0.2.4", - "tokio 1.1.1", + "pin-project-lite 0.2.7", + "tokio 1.8.1", ] [[package]] @@ -6331,9 +6571,9 @@ dependencies = [ [[package]] name = "tonic" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ba8f479158947373b6df40cf48f4779bb25c99ca3c661bd95e0ab1963ad8b0e" +checksum = "b584f064fdfc50017ec39162d5aebce49912f1eb16fd128e04b7f4ce4907c7e5" dependencies = [ "async-stream", "async-trait", @@ -6341,29 +6581,43 @@ dependencies = [ "bytes 1.0.1", "futures-core", "futures-util", - "h2 0.3.0", + "h2 0.3.3", "http", - "http-body 0.4.0", - "hyper 0.14.3", + "http-body 0.4.2", + "hyper 0.14.11", + "hyper-timeout", "percent-encoding 2.1.0", "pin-project 1.0.1", "prost", "prost-derive", - "tokio 1.1.1", + "tokio 1.8.1", "tokio-rustls", "tokio-stream", "tokio-util 0.6.3", "tower", + "tower-layer", "tower-service", "tracing", "tracing-futures", ] +[[package]] +name = "tonic-build" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "25db9a497663a9a779693ef67b6e6aef8345b3d3ff8d50ef92eae6c88cb1e386" +dependencies = [ + "proc-macro2 1.0.24", + "prost-build", + "quote 1.0.9", + "syn 1.0.60", +] + [[package]] name = "tower" -version = "0.4.4" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fd7b451959622e21de79261673d658a0944b835012c58c51878ea55957fb51a" +checksum = "f60422bc7fefa2f3ec70359b8ff1caff59d785877eb70595904605bcc412470f" dependencies = [ "futures-core", "futures-util", @@ -6371,8 +6625,9 @@ dependencies = [ "pin-project 1.0.1", "rand 0.8.3", "slab", - "tokio 1.1.1", + "tokio 1.8.1", "tokio-stream", + "tokio-util 0.6.3", "tower-layer", "tower-service", "tracing", @@ -6392,32 +6647,33 @@ checksum = "e987b6bf443f4b5b3b6f38704195592cca41c5bb7aedd3c3693c7081f8289860" [[package]] name = "tracing" -version = "0.1.18" +version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0aae59226cf195d8e74d4b34beae1859257efb4e5fed3f147d2dc2c7d372178" +checksum = "09adeb8c97449311ccd28a427f96fb563e7fd31aabf994189879d9da2394b89d" dependencies = [ - "cfg-if 0.1.10", + "cfg-if 1.0.0", "log 0.4.11", + "pin-project-lite 0.2.7", "tracing-attributes", "tracing-core", ] [[package]] name = "tracing-attributes" -version = "0.1.9" +version = "0.1.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0693bf8d6f2bf22c690fc61a9d21ac69efdbb894a17ed596b9af0f01e64b84b" +checksum = "c42e6fa53307c8a17e4ccd4dc81cf5ec38db9209f59b222210375b54ee40d1e2" dependencies = [ "proc-macro2 1.0.24", - "quote 1.0.6", + "quote 1.0.9", "syn 1.0.60", ] [[package]] name = "tracing-core" -version = "0.1.13" +version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d593f98af59ebc017c0648f0117525db358745a8894a8d684e185ba3f45954f9" +checksum = "a9ff14f98b1a4b289c6248a023c1c2fa1491062964e9fed67ab29c4e4da4a052" dependencies = [ "lazy_static", ] @@ -6533,6 +6789,12 @@ dependencies = [ "tinyvec", ] +[[package]] +name = "unicode-segmentation" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8895849a949e7845e06bd6dc1aa51731a103c42707010a5b591c0038fb73385b" + [[package]] name = "unicode-width" version = "0.1.7" @@ -6676,7 +6938,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "777182bc735b6424e1a57516d35ed72cb8019d85c8c9bf536dccb3445c1a2f7d" dependencies = [ "same-file", - "winapi 0.3.8", + "winapi 0.3.9", "winapi-util", ] @@ -6718,7 +6980,7 @@ dependencies = [ "lazy_static", "log 0.4.11", "proc-macro2 1.0.24", - "quote 1.0.6", + "quote 1.0.9", "syn 1.0.60", "wasm-bindgen-shared", ] @@ -6741,7 +7003,7 @@ version = "0.2.70" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3b8853882eef39593ad4174dd26fc9865a64e84026d223f63bb2c42affcbba2c" dependencies = [ - "quote 1.0.6", + "quote 1.0.9", "wasm-bindgen-macro-support", ] @@ -6752,7 +7014,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4133b5e7f2a531fa413b3a1695e925038a05a71cf67e87dafa295cb645a01385" dependencies = [ "proc-macro2 1.0.24", - "quote 1.0.6", + "quote 1.0.9", "syn 1.0.60", "wasm-bindgen-backend", "wasm-bindgen-shared", @@ -6836,11 +7098,12 @@ dependencies = [ [[package]] name = "which" -version = "3.1.1" +version = "4.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d011071ae14a2f6671d0b74080ae0cd8ebf3a6f8c9589a2cd45f23126fe29724" +checksum = "87c14ef7e1b8b8ecfc75d5eca37949410046e66f15d185c01d70824f1f8111ef" dependencies = [ "libc", + "thiserror", ] [[package]] @@ -6851,9 +7114,9 @@ checksum = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a" [[package]] name = "winapi" -version = "0.3.8" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8093091eeb260906a183e6ae1abdba2ef5ef2257a21801128899c3fc699229c6" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" dependencies = [ "winapi-i686-pc-windows-gnu", "winapi-x86_64-pc-windows-gnu", @@ -6877,7 +7140,7 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" dependencies = [ - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -6892,7 +7155,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0120db82e8a1e0b9fb3345a539c478767c0048d842860994d96113d5b667bd69" dependencies = [ - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -6939,7 +7202,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "de251eec69fc7c1bc3923403d18ececb929380e016afe103da75f396704f8ca2" dependencies = [ "proc-macro2 1.0.24", - "quote 1.0.6", + "quote 1.0.9", "syn 1.0.60", "synstructure", ] @@ -6971,6 +7234,6 @@ checksum = "b89249644df056b522696b1bb9e7c18c87e8ffa3e2f0dc3b0155875d6498f01b" dependencies = [ "cc", "glob", - "itertools", + "itertools 0.9.0", "libc", ] diff --git a/Cargo.toml b/Cargo.toml index 28a13a393e835d..4d3e377cbbb9f8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -39,19 +39,20 @@ members = [ "metrics", "net-shaper", "notifier", + "poh", "poh-bench", "program-test", - "programs/secp256k1", "programs/bpf_loader", - "programs/budget", "programs/config", "programs/exchange", "programs/failure", "programs/noop", "programs/ownable", + "programs/secp256k1", "programs/stake", "programs/vote", "remote-wallet", + "rpc", "runtime", "runtime/store-tool", "sdk", @@ -59,7 +60,6 @@ members = [ "sdk/cargo-test-bpf", "scripts", "stake-accounts", - "stake-monitor", "sys-tuner", "tokens", "transaction-status", @@ -76,5 +76,10 @@ exclude = [ "programs/bpf", ] +# TODO: Remove once the "simd-accel" feature from the reed-solomon-erasure +# dependency is supported on Apple M1. v2 of the feature resolver is needed to +# specify arch-specific features. +resolver = "2" + [profile.dev] split-debuginfo = "unpacked" diff --git a/README.md b/README.md index f6f12bd0ea7dcc..4d75a8573b20a3 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ $ source $HOME/.cargo/env $ rustup component add rustfmt ``` -Please sure you are always using the latest stable rust version by running: +Please make sure you are always using the latest stable rust version by running: ```bash $ rustup update diff --git a/account-decoder/Cargo.toml b/account-decoder/Cargo.toml index e847b72e1c219a..478f26fd86ec6f 100644 --- a/account-decoder/Cargo.toml +++ b/account-decoder/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-account-decoder" -version = "1.7.0" +version = "1.7.11" description = "Solana account decoder" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -19,11 +19,10 @@ lazy_static = "1.4.0" serde = "1.0.122" serde_derive = "1.0.103" serde_json = "1.0.56" -solana-config-program = { path = "../programs/config", version = "=1.7.0" } -solana-sdk = { path = "../sdk", version = "=1.7.0" } -solana-stake-program = { path = "../programs/stake", version = "=1.7.0" } -solana-vote-program = { path = "../programs/vote", version = "=1.7.0" } -spl-token-v2-0 = { package = "spl-token", version = "=3.1.0", features = ["no-entrypoint"] } +solana-config-program = { path = "../programs/config", version = "=1.7.11" } +solana-sdk = { path = "../sdk", version = "=1.7.11" } +solana-vote-program = { path = "../programs/vote", version = "=1.7.11" } +spl-token-v2-0 = { package = "spl-token", version = "=3.2.0", features = ["no-entrypoint"] } thiserror = "1.0" zstd = "0.5.1" diff --git a/account-decoder/src/lib.rs b/account-decoder/src/lib.rs index 7f1e7c40c70c20..31cf56e374d834 100644 --- a/account-decoder/src/lib.rs +++ b/account-decoder/src/lib.rs @@ -28,6 +28,7 @@ use { pub type StringAmount = String; pub type StringDecimals = String; +pub const MAX_BASE58_BYTES: usize = 128; /// A duplicate representation of an Account for pretty JSON serialization #[derive(Serialize, Deserialize, Clone, Debug)] @@ -60,6 +61,17 @@ pub enum UiAccountEncoding { } impl UiAccount { + fn encode_bs58( + account: &T, + data_slice_config: Option, + ) -> String { + if account.data().len() <= MAX_BASE58_BYTES { + bs58::encode(slice_data(account.data(), data_slice_config)).into_string() + } else { + "error: data too large for bs58 encoding".to_string() + } + } + pub fn encode( pubkey: &Pubkey, account: &T, @@ -68,33 +80,34 @@ impl UiAccount { data_slice_config: Option, ) -> Self { let data = match encoding { - UiAccountEncoding::Binary => UiAccountData::LegacyBinary( - bs58::encode(slice_data(&account.data(), data_slice_config)).into_string(), - ), - UiAccountEncoding::Base58 => UiAccountData::Binary( - bs58::encode(slice_data(&account.data(), data_slice_config)).into_string(), - encoding, - ), + UiAccountEncoding::Binary => { + let data = Self::encode_bs58(account, data_slice_config); + UiAccountData::LegacyBinary(data) + } + UiAccountEncoding::Base58 => { + let data = Self::encode_bs58(account, data_slice_config); + UiAccountData::Binary(data, encoding) + } UiAccountEncoding::Base64 => UiAccountData::Binary( - base64::encode(slice_data(&account.data(), data_slice_config)), + base64::encode(slice_data(account.data(), data_slice_config)), encoding, ), UiAccountEncoding::Base64Zstd => { let mut encoder = zstd::stream::write::Encoder::new(Vec::new(), 0).unwrap(); match encoder - .write_all(slice_data(&account.data(), data_slice_config)) + .write_all(slice_data(account.data(), data_slice_config)) .and_then(|()| encoder.finish()) { Ok(zstd_data) => UiAccountData::Binary(base64::encode(zstd_data), encoding), Err(_) => UiAccountData::Binary( - base64::encode(slice_data(&account.data(), data_slice_config)), + base64::encode(slice_data(account.data(), data_slice_config)), UiAccountEncoding::Base64, ), } } UiAccountEncoding::JsonParsed => { if let Ok(parsed_data) = - parse_account_data(pubkey, &account.owner(), &account.data(), additional_data) + parse_account_data(pubkey, account.owner(), account.data(), additional_data) { UiAccountData::Json(parsed_data) } else { diff --git a/account-decoder/src/parse_account_data.rs b/account-decoder/src/parse_account_data.rs index 9b649dc8fec4ed..0c26b4003ea8f9 100644 --- a/account-decoder/src/parse_account_data.rs +++ b/account-decoder/src/parse_account_data.rs @@ -9,14 +9,14 @@ use crate::{ }; use inflector::Inflector; use serde_json::Value; -use solana_sdk::{instruction::InstructionError, pubkey::Pubkey, system_program, sysvar}; +use solana_sdk::{instruction::InstructionError, pubkey::Pubkey, stake, system_program, sysvar}; use std::collections::HashMap; use thiserror::Error; lazy_static! { static ref BPF_UPGRADEABLE_LOADER_PROGRAM_ID: Pubkey = solana_sdk::bpf_loader_upgradeable::id(); static ref CONFIG_PROGRAM_ID: Pubkey = solana_config_program::id(); - static ref STAKE_PROGRAM_ID: Pubkey = solana_stake_program::id(); + static ref STAKE_PROGRAM_ID: Pubkey = stake::program::id(); static ref SYSTEM_PROGRAM_ID: Pubkey = system_program::id(); static ref SYSVAR_PROGRAM_ID: Pubkey = sysvar::id(); static ref TOKEN_PROGRAM_ID: Pubkey = spl_token_id_v2_0(); diff --git a/account-decoder/src/parse_config.rs b/account-decoder/src/parse_config.rs index 3a9d6151b5eac7..e4cdf2457ef0d4 100644 --- a/account-decoder/src/parse_config.rs +++ b/account-decoder/src/parse_config.rs @@ -6,10 +6,10 @@ use bincode::deserialize; use serde_json::Value; use solana_config_program::{get_config_data, ConfigKeys}; use solana_sdk::pubkey::Pubkey; -use solana_stake_program::config::Config as StakeConfig; +use solana_sdk::stake::config::{self as stake_config, Config as StakeConfig}; pub fn parse_config(data: &[u8], pubkey: &Pubkey) -> Result { - let parsed_account = if pubkey == &solana_stake_program::config::id() { + let parsed_account = if pubkey == &stake_config::id() { get_config_data(data) .ok() .and_then(|data| deserialize::(data).ok()) @@ -37,7 +37,7 @@ fn parse_config_data(data: &[u8], keys: Vec<(Pubkey, bool)>) -> Option Result { let stake_state: StakeState = deserialize(data) diff --git a/accounts-bench/Cargo.toml b/accounts-bench/Cargo.toml index a8cd49907130fa..14cf041a9fb2be 100644 --- a/accounts-bench/Cargo.toml +++ b/accounts-bench/Cargo.toml @@ -2,7 +2,7 @@ authors = ["Solana Maintainers "] edition = "2018" name = "solana-accounts-bench" -version = "1.7.0" +version = "1.7.11" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -11,11 +11,11 @@ publish = false [dependencies] log = "0.4.11" rayon = "1.5.0" -solana-logger = { path = "../logger", version = "=1.7.0" } -solana-runtime = { path = "../runtime", version = "=1.7.0" } -solana-measure = { path = "../measure", version = "=1.7.0" } -solana-sdk = { path = "../sdk", version = "=1.7.0" } -solana-version = { path = "../version", version = "=1.7.0" } +solana-logger = { path = "../logger", version = "=1.7.11" } +solana-runtime = { path = "../runtime", version = "=1.7.11" } +solana-measure = { path = "../measure", version = "=1.7.11" } +solana-sdk = { path = "../sdk", version = "=1.7.11" } +solana-version = { path = "../version", version = "=1.7.11" } rand = "0.7.0" clap = "2.33.1" crossbeam-channel = "0.4" diff --git a/accounts-bench/src/main.rs b/accounts-bench/src/main.rs index 3aebc466aaf2e4..e32c01f1d821a5 100644 --- a/accounts-bench/src/main.rs +++ b/accounts-bench/src/main.rs @@ -6,6 +6,7 @@ use rayon::prelude::*; use solana_measure::measure::Measure; use solana_runtime::{ accounts::{create_test_accounts, update_accounts_bench, Accounts}, + accounts_db::AccountShrinkThreshold, accounts_index::AccountSecondaryIndexes, ancestors::Ancestors, }; @@ -64,6 +65,7 @@ fn main() { &ClusterType::Testnet, AccountSecondaryIndexes::default(), false, + AccountShrinkThreshold::default(), ); println!("Creating {} accounts", num_accounts); let mut create_time = Measure::start("create accounts"); @@ -119,6 +121,7 @@ fn main() { solana_sdk::clock::Slot::default(), &ancestors, None, + false, ); time_store.stop(); if results != results_store { diff --git a/accounts-cluster-bench/Cargo.toml b/accounts-cluster-bench/Cargo.toml index 94d0dc062689ac..4a57ab7a4020b6 100644 --- a/accounts-cluster-bench/Cargo.toml +++ b/accounts-cluster-bench/Cargo.toml @@ -2,7 +2,7 @@ authors = ["Solana Maintainers "] edition = "2018" name = "solana-accounts-cluster-bench" -version = "1.7.0" +version = "1.7.11" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -13,23 +13,24 @@ clap = "2.33.1" log = "0.4.11" rand = "0.7.0" rayon = "1.4.1" -solana-account-decoder = { path = "../account-decoder", version = "=1.7.0" } -solana-clap-utils = { path = "../clap-utils", version = "=1.7.0" } -solana-client = { path = "../client", version = "=1.7.0" } -solana-core = { path = "../core", version = "=1.7.0" } -solana-faucet = { path = "../faucet", version = "=1.7.0" } -solana-gossip = { path = "../gossip", version = "=1.7.0" } -solana-logger = { path = "../logger", version = "=1.7.0" } -solana-measure = { path = "../measure", version = "=1.7.0" } -solana-net-utils = { path = "../net-utils", version = "=1.7.0" } -solana-runtime = { path = "../runtime", version = "=1.7.0" } -solana-sdk = { path = "../sdk", version = "=1.7.0" } -solana-transaction-status = { path = "../transaction-status", version = "=1.7.0" } -solana-version = { path = "../version", version = "=1.7.0" } -spl-token-v2-0 = { package = "spl-token", version = "=3.1.0", features = ["no-entrypoint"] } +solana-account-decoder = { path = "../account-decoder", version = "=1.7.11" } +solana-clap-utils = { path = "../clap-utils", version = "=1.7.11" } +solana-client = { path = "../client", version = "=1.7.11" } +solana-core = { path = "../core", version = "=1.7.11" } +solana-faucet = { path = "../faucet", version = "=1.7.11" } +solana-gossip = { path = "../gossip", version = "=1.7.11" } +solana-logger = { path = "../logger", version = "=1.7.11" } +solana-measure = { path = "../measure", version = "=1.7.11" } +solana-net-utils = { path = "../net-utils", version = "=1.7.11" } +solana-runtime = { path = "../runtime", version = "=1.7.11" } +solana-sdk = { path = "../sdk", version = "=1.7.11" } +solana-streamer = { path = "../streamer", version = "=1.7.11" } +solana-transaction-status = { path = "../transaction-status", version = "=1.7.11" } +solana-version = { path = "../version", version = "=1.7.11" } +spl-token-v2-0 = { package = "spl-token", version = "=3.2.0", features = ["no-entrypoint"] } [dev-dependencies] -solana-local-cluster = { path = "../local-cluster", version = "=1.7.0" } +solana-local-cluster = { path = "../local-cluster", version = "=1.7.11" } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/accounts-cluster-bench/src/main.rs b/accounts-cluster-bench/src/main.rs index e8c8a8633c7851..58b5fa2a155938 100644 --- a/accounts-cluster-bench/src/main.rs +++ b/accounts-cluster-bench/src/main.rs @@ -20,6 +20,7 @@ use solana_sdk::{ timing::timestamp, transaction::Transaction, }; +use solana_streamer::socket::SocketAddrSpace; use solana_transaction_status::parse_token::spl_token_v2_0_instruction; use std::{ net::SocketAddr, @@ -55,7 +56,7 @@ pub fn airdrop_lamports( ); let (blockhash, _fee_calculator) = client.get_recent_blockhash().unwrap(); - match request_airdrop_transaction(&faucet_addr, &id.pubkey(), airdrop_amount, blockhash) { + match request_airdrop_transaction(faucet_addr, &id.pubkey(), airdrop_amount, blockhash) { Ok(transaction) => { let mut tries = 0; loop { @@ -363,7 +364,7 @@ fn run_accounts_bench( iterations: usize, maybe_space: Option, batch_size: usize, - close_nth: u64, + close_nth_batch: u64, maybe_lamports: Option, num_instructions: usize, mint: Option, @@ -431,7 +432,7 @@ fn run_accounts_bench( if !airdrop_lamports( &client, &faucet_addr, - &payer_keypairs[i], + payer_keypairs[i], lamports * 100_000, ) { warn!("failed airdrop, exiting"); @@ -441,6 +442,7 @@ fn run_accounts_bench( } } + // Create accounts let sigs_len = executor.num_outstanding(); if sigs_len < batch_size { let num_to_create = batch_size - sigs_len; @@ -475,21 +477,25 @@ fn run_accounts_bench( } } - if close_nth > 0 { - let expected_closed = total_accounts_created as u64 / close_nth; - if expected_closed > total_accounts_closed { - let txs: Vec<_> = (0..expected_closed - total_accounts_closed) + if close_nth_batch > 0 { + let num_batches_to_close = + total_accounts_created as u64 / (close_nth_batch * batch_size as u64); + let expected_closed = num_batches_to_close * batch_size as u64; + let max_closed_seed = seed_tracker.max_closed.load(Ordering::Relaxed); + // Close every account we've created with seed between max_closed_seed..expected_closed + if max_closed_seed < expected_closed { + let txs: Vec<_> = (0..expected_closed - max_closed_seed) .into_par_iter() .map(|_| { let message = make_close_message( - &payer_keypairs[0], + payer_keypairs[0], &base_keypair, seed_tracker.max_closed.clone(), 1, min_balance, mint.is_some(), ); - let signers: Vec<&Keypair> = vec![&payer_keypairs[0], &base_keypair]; + let signers: Vec<&Keypair> = vec![payer_keypairs[0], &base_keypair]; Transaction::new(&signers, message, recent_blockhash.0) }) .collect(); @@ -572,14 +578,14 @@ fn main() { .help("Number of transactions to send per batch"), ) .arg( - Arg::with_name("close_nth") + Arg::with_name("close_nth_batch") .long("close-frequency") .takes_value(true) .value_name("BYTES") .help( - "Send close transactions after this many accounts created. \ - Note: a `close-frequency` value near or below `batch-size` \ - may result in transaction-simulation errors, as the close \ + "Every `n` batches, create a batch of close transactions for + the earliest remaining batch of accounts created. + Note: Should be > 1 to avoid situations where the close \ transactions will be submitted before the corresponding \ create transactions have been confirmed", ), @@ -632,7 +638,7 @@ fn main() { let space = value_t!(matches, "space", u64).ok(); let lamports = value_t!(matches, "lamports", u64).ok(); let batch_size = value_t!(matches, "batch_size", usize).unwrap_or(4); - let close_nth = value_t!(matches, "close_nth", u64).unwrap_or(0); + let close_nth_batch = value_t!(matches, "close_nth_batch", u64).unwrap_or(0); let iterations = value_t!(matches, "iterations", usize).unwrap_or(10); let num_instructions = value_t!(matches, "num_instructions", usize).unwrap_or(1); if num_instructions == 0 || num_instructions > 500 { @@ -665,6 +671,7 @@ fn main() { Some(&entrypoint_addr), // find_node_by_gossip_addr None, // my_gossip_addr 0, // my_shred_version + SocketAddrSpace::Unspecified, ) .unwrap_or_else(|err| { eprintln!("Failed to discover {} node: {:?}", entrypoint_addr, err); @@ -685,7 +692,7 @@ fn main() { iterations, space, batch_size, - close_nth, + close_nth_batch, lamports, num_instructions, mint, @@ -716,11 +723,11 @@ pub mod test { }; let faucet_addr = SocketAddr::from(([127, 0, 0, 1], 9900)); - let cluster = LocalCluster::new(&mut config); + let cluster = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified); let iterations = 10; let maybe_space = None; let batch_size = 100; - let close_nth = 100; + let close_nth_batch = 100; let maybe_lamports = None; let num_instructions = 2; let mut start = Measure::start("total accounts run"); @@ -731,7 +738,7 @@ pub mod test { iterations, maybe_space, batch_size, - close_nth, + close_nth_batch, maybe_lamports, num_instructions, None, diff --git a/banking-bench/Cargo.toml b/banking-bench/Cargo.toml index d3aff28a9c1972..efbb3187094f8e 100644 --- a/banking-bench/Cargo.toml +++ b/banking-bench/Cargo.toml @@ -2,7 +2,7 @@ authors = ["Solana Maintainers "] edition = "2018" name = "solana-banking-bench" -version = "1.7.0" +version = "1.7.11" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -14,17 +14,18 @@ crossbeam-channel = "0.4" log = "0.4.11" rand = "0.7.0" rayon = "1.5.0" -solana-core = { path = "../core", version = "=1.7.0" } -solana-clap-utils = { path = "../clap-utils", version = "=1.7.0" } -solana-gossip = { path = "../gossip", version = "=1.7.0" } -solana-ledger = { path = "../ledger", version = "=1.7.0" } -solana-logger = { path = "../logger", version = "=1.7.0" } -solana-measure = { path = "../measure", version = "=1.7.0" } -solana-perf = { path = "../perf", version = "=1.7.0" } -solana-runtime = { path = "../runtime", version = "=1.7.0" } -solana-streamer = { path = "../streamer", version = "=1.7.0" } -solana-sdk = { path = "../sdk", version = "=1.7.0" } -solana-version = { path = "../version", version = "=1.7.0" } +solana-core = { path = "../core", version = "=1.7.11" } +solana-clap-utils = { path = "../clap-utils", version = "=1.7.11" } +solana-gossip = { path = "../gossip", version = "=1.7.11" } +solana-ledger = { path = "../ledger", version = "=1.7.11" } +solana-logger = { path = "../logger", version = "=1.7.11" } +solana-measure = { path = "../measure", version = "=1.7.11" } +solana-perf = { path = "../perf", version = "=1.7.11" } +solana-poh = { path = "../poh", version = "=1.7.11" } +solana-runtime = { path = "../runtime", version = "=1.7.11" } +solana-streamer = { path = "../streamer", version = "=1.7.11" } +solana-sdk = { path = "../sdk", version = "=1.7.11" } +solana-version = { path = "../version", version = "=1.7.11" } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/banking-bench/src/main.rs b/banking-bench/src/main.rs index 3c5527312b5649..b2bd5c35fda441 100644 --- a/banking-bench/src/main.rs +++ b/banking-bench/src/main.rs @@ -4,11 +4,7 @@ use crossbeam_channel::unbounded; use log::*; use rand::{thread_rng, Rng}; use rayon::prelude::*; -use solana_core::{ - banking_stage::{create_test_recorder, BankingStage}, - poh_recorder::PohRecorder, - poh_recorder::WorkingBankEntry, -}; +use solana_core::banking_stage::BankingStage; use solana_gossip::{cluster_info::ClusterInfo, cluster_info::Node}; use solana_ledger::{ blockstore::Blockstore, @@ -17,6 +13,7 @@ use solana_ledger::{ }; use solana_measure::measure::Measure; use solana_perf::packet::to_packets_chunked; +use solana_poh::poh_recorder::{create_test_recorder, PohRecorder, WorkingBankEntry}; use solana_runtime::{ accounts_background_service::AbsRequestSender, bank::Bank, bank_forks::BankForks, }; @@ -28,6 +25,7 @@ use solana_sdk::{ timing::{duration_as_us, timestamp}, transaction::Transaction, }; +use solana_streamer::socket::SocketAddrSpace; use std::{ sync::{atomic::Ordering, mpsc::Receiver, Arc, Mutex}, thread::sleep, @@ -77,7 +75,7 @@ fn make_accounts_txs( .into_par_iter() .map(|_| { let mut new = dummy.clone(); - let sig: Vec = (0..64).map(|_| thread_rng().gen()).collect(); + let sig: Vec = (0..64).map(|_| thread_rng().gen::()).collect(); if !same_payer { new.message.account_keys[0] = solana_sdk::pubkey::new_rand(); } @@ -188,7 +186,7 @@ fn main() { genesis_config.hash(), ); // Ignore any pesky duplicate signature errors in the case we are using single-payer - let sig: Vec = (0..64).map(|_| thread_rng().gen()).collect(); + let sig: Vec = (0..64).map(|_| thread_rng().gen::()).collect(); fund.signatures = vec![Signature::new(&sig[0..64])]; let x = bank.process_transaction(&fund); x.unwrap(); @@ -198,7 +196,7 @@ fn main() { if !skip_sanity { //sanity check, make sure all the transactions can execute sequentially transactions.iter().for_each(|tx| { - let res = bank.process_transaction(&tx); + let res = bank.process_transaction(tx); assert!(res.is_ok(), "sanity test transactions error: {:?}", res); }); bank.clear_signatures(); @@ -218,7 +216,11 @@ fn main() { ); let (exit, poh_recorder, poh_service, signal_receiver) = create_test_recorder(&bank, &blockstore, None); - let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info); + let cluster_info = ClusterInfo::new( + Node::new_localhost().info, + Arc::new(Keypair::new()), + SocketAddrSpace::Unspecified, + ); let cluster_info = Arc::new(cluster_info); let banking_stage = BankingStage::new( &cluster_info, @@ -354,7 +356,7 @@ fn main() { if bank.slot() > 0 && bank.slot() % 16 == 0 { for tx in transactions.iter_mut() { tx.message.recent_blockhash = bank.last_blockhash(); - let sig: Vec = (0..64).map(|_| thread_rng().gen()).collect(); + let sig: Vec = (0..64).map(|_| thread_rng().gen::()).collect(); tx.signatures[0] = Signature::new(&sig[0..64]); } verified = to_packets_chunked(&transactions.clone(), packets_per_chunk); diff --git a/banks-client/Cargo.toml b/banks-client/Cargo.toml index af80931fb97e4f..582c56a0f43910 100644 --- a/banks-client/Cargo.toml +++ b/banks-client/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-banks-client" -version = "1.7.0" +version = "1.7.11" description = "Solana banks client" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -11,20 +11,20 @@ edition = "2018" [dependencies] bincode = "1.3.1" -borsh = "0.8.1" -borsh-derive = "0.8.1" +borsh = "0.9.0" +borsh-derive = "0.9.0" futures = "0.3" mio = "0.7.6" -solana-banks-interface = { path = "../banks-interface", version = "=1.7.0" } -solana-program = { path = "../sdk/program", version = "=1.7.0" } -solana-sdk = { path = "../sdk", version = "=1.7.0" } +solana-banks-interface = { path = "../banks-interface", version = "=1.7.11" } +solana-program = { path = "../sdk/program", version = "=1.7.11" } +solana-sdk = { path = "../sdk", version = "=1.7.11" } tarpc = { version = "0.24.1", features = ["full"] } tokio = { version = "1", features = ["full"] } tokio-serde = { version = "0.8", features = ["bincode"] } [dev-dependencies] -solana-runtime = { path = "../runtime", version = "=1.7.0" } -solana-banks-server = { path = "../banks-server", version = "=1.7.0" } +solana-runtime = { path = "../runtime", version = "=1.7.11" } +solana-banks-server = { path = "../banks-server", version = "=1.7.11" } [lib] crate-type = ["lib"] diff --git a/banks-client/src/lib.rs b/banks-client/src/lib.rs index 5a4d36580e77b4..2c954175bc628b 100644 --- a/banks-client/src/lib.rs +++ b/banks-client/src/lib.rs @@ -10,8 +10,14 @@ use futures::{future::join_all, Future, FutureExt}; pub use solana_banks_interface::{BanksClient as TarpcClient, TransactionStatus}; use solana_banks_interface::{BanksRequest, BanksResponse}; use solana_program::{ - clock::Slot, fee_calculator::FeeCalculator, hash::Hash, program_pack::Pack, pubkey::Pubkey, - rent::Rent, sysvar, + clock::Clock, + clock::Slot, + fee_calculator::FeeCalculator, + hash::Hash, + program_pack::Pack, + pubkey::Pubkey, + rent::Rent, + sysvar::{self, Sysvar}, }; use solana_sdk::{ account::{from_account, Account}, @@ -63,7 +69,7 @@ impl BanksClient { &mut self, ctx: Context, commitment: CommitmentLevel, - ) -> impl Future> + '_ { + ) -> impl Future> + '_ { self.inner .get_fees_with_commitment_and_context(ctx, commitment) } @@ -85,6 +91,14 @@ impl BanksClient { self.inner.get_slot_with_context(ctx, commitment) } + pub fn get_block_height_with_context( + &mut self, + ctx: Context, + commitment: CommitmentLevel, + ) -> impl Future> + '_ { + self.inner.get_block_height_with_context(ctx, commitment) + } + pub fn process_transaction_with_commitment_and_context( &mut self, ctx: Context, @@ -115,24 +129,39 @@ impl BanksClient { self.send_transaction_with_context(context::current(), transaction) } + /// Return the cluster clock + pub fn get_clock(&mut self) -> impl Future> + '_ { + self.get_account(sysvar::clock::id()).map(|result| { + let clock_sysvar = result? + .ok_or_else(|| io::Error::new(io::ErrorKind::Other, "Clock sysvar not present"))?; + from_account::(&clock_sysvar).ok_or_else(|| { + io::Error::new(io::ErrorKind::Other, "Failed to deserialize Clock sysvar") + }) + }) + } + /// Return the fee parameters associated with a recent, rooted blockhash. The cluster /// will use the transaction's blockhash to look up these same fee parameters and /// use them to calculate the transaction fee. pub fn get_fees( &mut self, - ) -> impl Future> + '_ { + ) -> impl Future> + '_ { self.get_fees_with_commitment_and_context(context::current(), CommitmentLevel::default()) } + /// Return the cluster Sysvar + pub fn get_sysvar(&mut self) -> impl Future> + '_ { + self.get_account(T::id()).map(|result| { + let sysvar = result? + .ok_or_else(|| io::Error::new(io::ErrorKind::Other, "Sysvar not present"))?; + from_account::(&sysvar) + .ok_or_else(|| io::Error::new(io::ErrorKind::Other, "Failed to deserialize sysvar")) + }) + } + /// Return the cluster rent pub fn get_rent(&mut self) -> impl Future> + '_ { - self.get_account(sysvar::rent::id()).map(|result| { - let rent_sysvar = result? - .ok_or_else(|| io::Error::new(io::ErrorKind::Other, "Rent sysvar not present"))?; - from_account::(&rent_sysvar).ok_or_else(|| { - io::Error::new(io::ErrorKind::Other, "Failed to deserialize Rent sysvar") - }) - }) + self.get_sysvar::() } /// Return a recent, rooted blockhash from the server. The cluster will only accept @@ -192,12 +221,18 @@ impl BanksClient { self.process_transactions_with_commitment(transactions, CommitmentLevel::default()) } - /// Return the most recent rooted slot height. All transactions at or below this height - /// are said to be finalized. The cluster will not fork to a higher slot height. + /// Return the most recent rooted slot. All transactions at or below this slot + /// are said to be finalized. The cluster will not fork to a higher slot. pub fn get_root_slot(&mut self) -> impl Future> + '_ { self.get_slot_with_context(context::current(), CommitmentLevel::default()) } + /// Return the most recent rooted block height. All transactions at or below this height + /// are said to be finalized. The cluster will not fork to a higher block height. + pub fn get_root_block_height(&mut self) -> impl Future> + '_ { + self.get_block_height_with_context(context::current(), CommitmentLevel::default()) + } + /// Return the account at the given address at the slot corresponding to the given /// commitment level. If the account is not found, None is returned. pub fn get_account_with_commitment( @@ -377,13 +412,13 @@ mod tests { let mint_pubkey = &genesis.mint_keypair.pubkey(); let bob_pubkey = solana_sdk::pubkey::new_rand(); - let instruction = system_instruction::transfer(&mint_pubkey, &bob_pubkey, 1); - let message = Message::new(&[instruction], Some(&mint_pubkey)); + let instruction = system_instruction::transfer(mint_pubkey, &bob_pubkey, 1); + let message = Message::new(&[instruction], Some(mint_pubkey)); Runtime::new()?.block_on(async { let client_transport = start_local_server(bank_forks, block_commitment_cache).await; let mut banks_client = start_client(client_transport).await?; - let (_, recent_blockhash, last_valid_slot) = banks_client.get_fees().await?; + let (_, recent_blockhash, last_valid_block_height) = banks_client.get_fees().await?; let transaction = Transaction::new(&[&genesis.mint_keypair], message, recent_blockhash); let signature = transaction.signatures[0]; banks_client.send_transaction(transaction).await?; @@ -391,8 +426,8 @@ mod tests { let mut status = banks_client.get_transaction_status(signature).await?; while status.is_none() { - let root_slot = banks_client.get_root_slot().await?; - if root_slot > last_valid_slot { + let root_block_height = banks_client.get_root_block_height().await?; + if root_block_height > last_valid_block_height { break; } sleep(Duration::from_millis(100)).await; diff --git a/banks-interface/Cargo.toml b/banks-interface/Cargo.toml index 90aa3afea9a4f0..f50e993454558e 100644 --- a/banks-interface/Cargo.toml +++ b/banks-interface/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-banks-interface" -version = "1.7.0" +version = "1.7.11" description = "Solana banks RPC interface" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -12,7 +12,7 @@ edition = "2018" [dependencies] mio = "0.7.6" serde = { version = "1.0.122", features = ["derive"] } -solana-sdk = { path = "../sdk", version = "=1.7.0" } +solana-sdk = { path = "../sdk", version = "=1.7.11" } tarpc = { version = "0.24.1", features = ["full"] } [dev-dependencies] diff --git a/banks-interface/src/lib.rs b/banks-interface/src/lib.rs index 5c03e97382a78e..0ee4fddabb13ee 100644 --- a/banks-interface/src/lib.rs +++ b/banks-interface/src/lib.rs @@ -34,6 +34,7 @@ pub trait Banks { async fn get_transaction_status_with_context(signature: Signature) -> Option; async fn get_slot_with_context(commitment: CommitmentLevel) -> Slot; + async fn get_block_height_with_context(commitment: CommitmentLevel) -> u64; async fn process_transaction_with_commitment_and_context( transaction: Transaction, commitment: CommitmentLevel, diff --git a/banks-server/Cargo.toml b/banks-server/Cargo.toml index b7d84f2cd1613a..ed4af13c0a78f0 100644 --- a/banks-server/Cargo.toml +++ b/banks-server/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-banks-server" -version = "1.7.0" +version = "1.7.11" description = "Solana banks server" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -14,10 +14,10 @@ bincode = "1.3.1" futures = "0.3" log = "0.4.11" mio = "0.7.6" -solana-banks-interface = { path = "../banks-interface", version = "=1.7.0" } -solana-runtime = { path = "../runtime", version = "=1.7.0" } -solana-sdk = { path = "../sdk", version = "=1.7.0" } -solana-metrics = { path = "../metrics", version = "=1.7.0" } +solana-banks-interface = { path = "../banks-interface", version = "=1.7.11" } +solana-runtime = { path = "../runtime", version = "=1.7.11" } +solana-sdk = { path = "../sdk", version = "=1.7.11" } +solana-metrics = { path = "../metrics", version = "=1.7.11" } tarpc = { version = "0.24.1", features = ["full"] } tokio = { version = "1", features = ["full"] } tokio-serde = { version = "0.8", features = ["bincode"] } diff --git a/banks-server/src/banks_server.rs b/banks-server/src/banks_server.rs index 1f1a303ac2da81..17d26746105b85 100644 --- a/banks-server/src/banks_server.rs +++ b/banks-server/src/banks_server.rs @@ -113,7 +113,7 @@ impl BanksServer { self, signature: &Signature, blockhash: &Hash, - last_valid_slot: Slot, + last_valid_block_height: u64, commitment: CommitmentLevel, ) -> Option> { let mut status = self @@ -122,7 +122,7 @@ impl BanksServer { while status.is_none() { sleep(Duration::from_millis(200)).await; let bank = self.bank(commitment); - if bank.slot() > last_valid_slot { + if bank.block_height() > last_valid_block_height { break; } status = bank.get_signature_status_with_blockhash(signature, blockhash); @@ -131,10 +131,13 @@ impl BanksServer { } } -fn verify_transaction(transaction: &Transaction) -> transaction::Result<()> { +fn verify_transaction( + transaction: &Transaction, + libsecp256k1_0_5_upgrade_enabled: bool, +) -> transaction::Result<()> { if let Err(err) = transaction.verify() { Err(err) - } else if let Err(err) = transaction.verify_precompiles() { + } else if let Err(err) = transaction.verify_precompiles(libsecp256k1_0_5_upgrade_enabled) { Err(err) } else { Ok(()) @@ -145,16 +148,19 @@ fn verify_transaction(transaction: &Transaction) -> transaction::Result<()> { impl Banks for BanksServer { async fn send_transaction_with_context(self, _: Context, transaction: Transaction) { let blockhash = &transaction.message.recent_blockhash; - let last_valid_slot = self + let last_valid_block_height = self .bank_forks .read() .unwrap() .root_bank() - .get_blockhash_last_valid_slot(&blockhash) + .get_blockhash_last_valid_block_height(blockhash) .unwrap(); let signature = transaction.signatures.get(0).cloned().unwrap_or_default(); - let info = - TransactionInfo::new(signature, serialize(&transaction).unwrap(), last_valid_slot); + let info = TransactionInfo::new( + signature, + serialize(&transaction).unwrap(), + last_valid_block_height, + ); self.transaction_sender.send(info).unwrap(); } @@ -162,11 +168,13 @@ impl Banks for BanksServer { self, _: Context, commitment: CommitmentLevel, - ) -> (FeeCalculator, Hash, Slot) { + ) -> (FeeCalculator, Hash, u64) { let bank = self.bank(commitment); let (blockhash, fee_calculator) = bank.last_blockhash_with_fee_calculator(); - let last_valid_slot = bank.get_blockhash_last_valid_slot(&blockhash).unwrap(); - (fee_calculator, blockhash, last_valid_slot) + let last_valid_block_height = bank + .get_blockhash_last_valid_block_height(&blockhash) + .unwrap(); + (fee_calculator, blockhash, last_valid_block_height) } async fn get_transaction_status_with_context( @@ -209,29 +217,39 @@ impl Banks for BanksServer { self.slot(commitment) } + async fn get_block_height_with_context(self, _: Context, commitment: CommitmentLevel) -> u64 { + self.bank(commitment).block_height() + } + async fn process_transaction_with_commitment_and_context( self, _: Context, transaction: Transaction, commitment: CommitmentLevel, ) -> Option> { - if let Err(err) = verify_transaction(&transaction) { + if let Err(err) = verify_transaction( + &transaction, + self.bank(commitment).libsecp256k1_0_5_upgrade_enabled(), + ) { return Some(Err(err)); } let blockhash = &transaction.message.recent_blockhash; - let last_valid_slot = self + let last_valid_block_height = self .bank_forks .read() .unwrap() .root_bank() - .get_blockhash_last_valid_slot(blockhash) + .get_blockhash_last_valid_block_height(blockhash) .unwrap(); let signature = transaction.signatures.get(0).cloned().unwrap_or_default(); - let info = - TransactionInfo::new(signature, serialize(&transaction).unwrap(), last_valid_slot); + let info = TransactionInfo::new( + signature, + serialize(&transaction).unwrap(), + last_valid_block_height, + ); self.transaction_sender.send(info).unwrap(); - self.poll_signature_status(&signature, blockhash, last_valid_slot, commitment) + self.poll_signature_status(&signature, blockhash, last_valid_block_height, commitment) .await } diff --git a/banks-server/src/send_transaction_service.rs b/banks-server/src/send_transaction_service.rs index 54eb6b3f4d7e9a..0371474dbc5b29 100644 --- a/banks-server/src/send_transaction_service.rs +++ b/banks-server/src/send_transaction_service.rs @@ -2,7 +2,7 @@ use log::*; use solana_metrics::{datapoint_warn, inc_new_counter_info}; use solana_runtime::{bank::Bank, bank_forks::BankForks}; -use solana_sdk::{clock::Slot, signature::Signature}; +use solana_sdk::signature::Signature; use std::{ collections::HashMap, net::{SocketAddr, UdpSocket}, @@ -24,15 +24,19 @@ pub struct SendTransactionService { pub struct TransactionInfo { pub signature: Signature, pub wire_transaction: Vec, - pub last_valid_slot: Slot, + pub last_valid_block_height: u64, } impl TransactionInfo { - pub fn new(signature: Signature, wire_transaction: Vec, last_valid_slot: Slot) -> Self { + pub fn new( + signature: Signature, + wire_transaction: Vec, + last_valid_block_height: u64, + ) -> Self { Self { signature, wire_transaction, - last_valid_slot, + last_valid_block_height, } } } @@ -124,7 +128,7 @@ impl SendTransactionService { result.rooted += 1; inc_new_counter_info!("send_transaction_service-rooted", 1); false - } else if transaction_info.last_valid_slot < root_bank.slot() { + } else if transaction_info.last_valid_block_height < root_bank.block_height() { info!("Dropping expired transaction: {}", signature); result.expired += 1; inc_new_counter_info!("send_transaction_service-expired", 1); @@ -138,8 +142,8 @@ impl SendTransactionService { result.retried += 1; inc_new_counter_info!("send_transaction_service-retry", 1); Self::send_transaction( - &send_socket, - &tpu_address, + send_socket, + tpu_address, &transaction_info.wire_transaction, ); true diff --git a/bench-exchange/Cargo.toml b/bench-exchange/Cargo.toml index 9c0d8a89d5316e..4cce4905aa28de 100644 --- a/bench-exchange/Cargo.toml +++ b/bench-exchange/Cargo.toml @@ -2,7 +2,7 @@ authors = ["Solana Maintainers "] edition = "2018" name = "solana-bench-exchange" -version = "1.7.0" +version = "1.7.11" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -18,22 +18,23 @@ rand = "0.7.0" rayon = "1.5.0" serde_json = "1.0.56" serde_yaml = "0.8.13" -solana-clap-utils = { path = "../clap-utils", version = "=1.7.0" } -solana-core = { path = "../core", version = "=1.7.0" } -solana-genesis = { path = "../genesis", version = "=1.7.0" } -solana-client = { path = "../client", version = "=1.7.0" } -solana-exchange-program = { path = "../programs/exchange", version = "=1.7.0" } -solana-faucet = { path = "../faucet", version = "=1.7.0" } -solana-gossip = { path = "../gossip", version = "=1.7.0" } -solana-logger = { path = "../logger", version = "=1.7.0" } -solana-metrics = { path = "../metrics", version = "=1.7.0" } -solana-net-utils = { path = "../net-utils", version = "=1.7.0" } -solana-runtime = { path = "../runtime", version = "=1.7.0" } -solana-sdk = { path = "../sdk", version = "=1.7.0" } -solana-version = { path = "../version", version = "=1.7.0" } +solana-clap-utils = { path = "../clap-utils", version = "=1.7.11" } +solana-core = { path = "../core", version = "=1.7.11" } +solana-genesis = { path = "../genesis", version = "=1.7.11" } +solana-client = { path = "../client", version = "=1.7.11" } +solana-exchange-program = { path = "../programs/exchange", version = "=1.7.11" } +solana-faucet = { path = "../faucet", version = "=1.7.11" } +solana-gossip = { path = "../gossip", version = "=1.7.11" } +solana-logger = { path = "../logger", version = "=1.7.11" } +solana-metrics = { path = "../metrics", version = "=1.7.11" } +solana-net-utils = { path = "../net-utils", version = "=1.7.11" } +solana-runtime = { path = "../runtime", version = "=1.7.11" } +solana-sdk = { path = "../sdk", version = "=1.7.11" } +solana-streamer = { path = "../streamer", version = "=1.7.11" } +solana-version = { path = "../version", version = "=1.7.11" } [dev-dependencies] -solana-local-cluster = { path = "../local-cluster", version = "=1.7.0" } +solana-local-cluster = { path = "../local-cluster", version = "=1.7.11" } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/bench-exchange/src/bench.rs b/bench-exchange/src/bench.rs index f975d1602f1345..cffb7605be56ff 100644 --- a/bench-exchange/src/bench.rs +++ b/bench-exchange/src/bench.rs @@ -451,13 +451,13 @@ fn swapper( let to_swap_txs: Vec<_> = to_swap .par_iter() .map(|(signer, swap, profit)| { - let s: &Keypair = &signer; + let s: &Keypair = signer; let owner = &signer.pubkey(); let instruction = exchange_instruction::swap_request( owner, &swap.0.pubkey, &swap.1.pubkey, - &profit, + profit, ); let message = Message::new(&[instruction], Some(&s.pubkey())); Transaction::new(&[s], message, blockhash) @@ -600,7 +600,7 @@ fn trader( src, ), ]; - let message = Message::new(&instructions, Some(&owner_pubkey)); + let message = Message::new(&instructions, Some(owner_pubkey)); Transaction::new(&[owner.as_ref(), trade], message, blockhash) }) .collect(); @@ -739,7 +739,7 @@ pub fn fund_keys(client: &T, source: &Keypair, dests: &[Arc] let mut to_fund_txs: Vec<_> = chunk .par_iter() .map(|(k, m)| { - let instructions = system_instruction::transfer_many(&k.pubkey(), &m); + let instructions = system_instruction::transfer_many(&k.pubkey(), m); let message = Message::new(&instructions, Some(&k.pubkey())); (k.clone(), Transaction::new_unsigned(message)) }) @@ -777,7 +777,7 @@ pub fn fund_keys(client: &T, source: &Keypair, dests: &[Arc] let mut waits = 0; loop { sleep(Duration::from_millis(200)); - to_fund_txs.retain(|(_, tx)| !verify_funding_transfer(client, &tx, amount)); + to_fund_txs.retain(|(_, tx)| !verify_funding_transfer(client, tx, amount)); if to_fund_txs.is_empty() { break; } @@ -836,7 +836,7 @@ pub fn create_token_accounts( ); let request_ix = exchange_instruction::account_request(owner_pubkey, &new_keypair.pubkey()); - let message = Message::new(&[create_ix, request_ix], Some(&owner_pubkey)); + let message = Message::new(&[create_ix, request_ix], Some(owner_pubkey)); ( (from_keypair, new_keypair), Transaction::new_unsigned(message), @@ -872,7 +872,7 @@ pub fn create_token_accounts( let mut waits = 0; while !to_create_txs.is_empty() { sleep(Duration::from_millis(200)); - to_create_txs.retain(|(_, tx)| !verify_transaction(client, &tx)); + to_create_txs.retain(|(_, tx)| !verify_transaction(client, tx)); if to_create_txs.is_empty() { break; } @@ -958,7 +958,7 @@ fn compute_and_report_stats(maxes: &Arc>>, tot fn generate_keypairs(num: u64) -> Vec { let mut seed = [0_u8; 32]; - seed.copy_from_slice(&Keypair::new().pubkey().as_ref()); + seed.copy_from_slice(Keypair::new().pubkey().as_ref()); let mut rnd = GenKeys::new(seed); rnd.gen_n_keypairs(num) } @@ -989,7 +989,7 @@ pub fn airdrop_lamports( let (blockhash, _fee_calculator, _last_valid_slot) = client .get_recent_blockhash_with_commitment(CommitmentConfig::processed()) .expect("Failed to get blockhash"); - match request_airdrop_transaction(&faucet_addr, &id.pubkey(), amount_to_drop, blockhash) { + match request_airdrop_transaction(faucet_addr, &id.pubkey(), amount_to_drop, blockhash) { Ok(transaction) => { let signature = client.async_send_transaction(transaction).unwrap(); diff --git a/bench-exchange/src/main.rs b/bench-exchange/src/main.rs index 8986a67e6895fb..4e52f4027a89c7 100644 --- a/bench-exchange/src/main.rs +++ b/bench-exchange/src/main.rs @@ -7,6 +7,7 @@ use crate::bench::{airdrop_lamports, create_client_accounts_file, do_bench_excha use log::*; use solana_gossip::gossip_service::{discover_cluster, get_multi_client}; use solana_sdk::signature::Signer; +use solana_streamer::socket::SocketAddrSpace; fn main() { solana_logger::setup(); @@ -55,11 +56,12 @@ fn main() { ); } else { info!("Connecting to the cluster"); - let nodes = discover_cluster(&entrypoint_addr, num_nodes).unwrap_or_else(|_| { - panic!("Failed to discover nodes"); - }); + let nodes = discover_cluster(&entrypoint_addr, num_nodes, SocketAddrSpace::Unspecified) + .unwrap_or_else(|_| { + panic!("Failed to discover nodes"); + }); - let (client, num_clients) = get_multi_client(&nodes); + let (client, num_clients) = get_multi_client(&nodes, &SocketAddrSpace::Unspecified); info!("{} nodes found", num_clients); if num_clients < num_nodes { diff --git a/bench-exchange/tests/bench_exchange.rs b/bench-exchange/tests/bench_exchange.rs index 0cba65a7ff3671..081791e439c4ff 100644 --- a/bench-exchange/tests/bench_exchange.rs +++ b/bench-exchange/tests/bench_exchange.rs @@ -15,6 +15,7 @@ use solana_sdk::{ genesis_config::create_genesis_config, signature::{Keypair, Signer}, }; +use solana_streamer::socket::SocketAddrSpace; use std::{process::exit, sync::mpsc::channel, time::Duration}; #[test] @@ -43,13 +44,19 @@ fn test_exchange_local_cluster() { } = config; let accounts_in_groups = batch_size * account_groups; - let cluster = LocalCluster::new(&mut ClusterConfig { - node_stakes: vec![100_000; NUM_NODES], - cluster_lamports: 100_000_000_000_000, - validator_configs: make_identical_validator_configs(&ValidatorConfig::default(), NUM_NODES), - native_instruction_processors: [solana_exchange_program!()].to_vec(), - ..ClusterConfig::default() - }); + let cluster = LocalCluster::new( + &mut ClusterConfig { + node_stakes: vec![100_000; NUM_NODES], + cluster_lamports: 100_000_000_000_000, + validator_configs: make_identical_validator_configs( + &ValidatorConfig::default(), + NUM_NODES, + ), + native_instruction_processors: [solana_exchange_program!()].to_vec(), + ..ClusterConfig::default() + }, + SocketAddrSpace::Unspecified, + ); let faucet_keypair = Keypair::new(); cluster.transfer( @@ -66,13 +73,17 @@ fn test_exchange_local_cluster() { .expect("faucet_addr"); info!("Connecting to the cluster"); - let nodes = - discover_cluster(&cluster.entry_point_info.gossip, NUM_NODES).unwrap_or_else(|err| { - error!("Failed to discover {} nodes: {:?}", NUM_NODES, err); - exit(1); - }); + let nodes = discover_cluster( + &cluster.entry_point_info.gossip, + NUM_NODES, + SocketAddrSpace::Unspecified, + ) + .unwrap_or_else(|err| { + error!("Failed to discover {} nodes: {:?}", NUM_NODES, err); + exit(1); + }); - let (client, num_clients) = get_multi_client(&nodes); + let (client, num_clients) = get_multi_client(&nodes, &SocketAddrSpace::Unspecified); info!("clients: {}", num_clients); assert!(num_clients >= NUM_NODES); diff --git a/bench-streamer/Cargo.toml b/bench-streamer/Cargo.toml index 430908d99a0040..4c7fe1b628c908 100644 --- a/bench-streamer/Cargo.toml +++ b/bench-streamer/Cargo.toml @@ -2,7 +2,7 @@ authors = ["Solana Maintainers "] edition = "2018" name = "solana-bench-streamer" -version = "1.7.0" +version = "1.7.11" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -10,11 +10,11 @@ publish = false [dependencies] clap = "2.33.1" -solana-clap-utils = { path = "../clap-utils", version = "=1.7.0" } -solana-streamer = { path = "../streamer", version = "=1.7.0" } -solana-logger = { path = "../logger", version = "=1.7.0" } -solana-net-utils = { path = "../net-utils", version = "=1.7.0" } -solana-version = { path = "../version", version = "=1.7.0" } +solana-clap-utils = { path = "../clap-utils", version = "=1.7.11" } +solana-streamer = { path = "../streamer", version = "=1.7.11" } +solana-logger = { path = "../logger", version = "=1.7.11" } +solana-net-utils = { path = "../net-utils", version = "=1.7.11" } +solana-version = { path = "../version", version = "=1.7.11" } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/bench-streamer/src/main.rs b/bench-streamer/src/main.rs index 18fa4f0d0eae84..4e1f070eb0fa65 100644 --- a/bench-streamer/src/main.rs +++ b/bench-streamer/src/main.rs @@ -18,7 +18,7 @@ fn producer(addr: &SocketAddr, exit: Arc) -> JoinHandle<()> { msgs.packets.resize(10, Packet::default()); for w in msgs.packets.iter_mut() { w.meta.size = PACKET_DATA_SIZE; - w.meta.set_addr(&addr); + w.meta.set_addr(addr); } let msgs = Arc::new(msgs); spawn(move || loop { @@ -92,6 +92,7 @@ fn main() -> Result<()> { recycler.clone(), "bench-streamer-test", 1, + true, )); } diff --git a/bench-tps/Cargo.toml b/bench-tps/Cargo.toml index 813ed90479ff2d..c7de9a0851c373 100644 --- a/bench-tps/Cargo.toml +++ b/bench-tps/Cargo.toml @@ -2,7 +2,7 @@ authors = ["Solana Maintainers "] edition = "2018" name = "solana-bench-tps" -version = "1.7.0" +version = "1.7.11" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -15,23 +15,24 @@ log = "0.4.11" rayon = "1.5.0" serde_json = "1.0.56" serde_yaml = "0.8.13" -solana-clap-utils = { path = "../clap-utils", version = "=1.7.0" } -solana-core = { path = "../core", version = "=1.7.0" } -solana-genesis = { path = "../genesis", version = "=1.7.0" } -solana-client = { path = "../client", version = "=1.7.0" } -solana-faucet = { path = "../faucet", version = "=1.7.0" } -solana-gossip = { path = "../gossip", version = "=1.7.0" } -solana-logger = { path = "../logger", version = "=1.7.0" } -solana-metrics = { path = "../metrics", version = "=1.7.0" } -solana-measure = { path = "../measure", version = "=1.7.0" } -solana-net-utils = { path = "../net-utils", version = "=1.7.0" } -solana-runtime = { path = "../runtime", version = "=1.7.0" } -solana-sdk = { path = "../sdk", version = "=1.7.0" } -solana-version = { path = "../version", version = "=1.7.0" } +solana-clap-utils = { path = "../clap-utils", version = "=1.7.11" } +solana-core = { path = "../core", version = "=1.7.11" } +solana-genesis = { path = "../genesis", version = "=1.7.11" } +solana-client = { path = "../client", version = "=1.7.11" } +solana-faucet = { path = "../faucet", version = "=1.7.11" } +solana-gossip = { path = "../gossip", version = "=1.7.11" } +solana-logger = { path = "../logger", version = "=1.7.11" } +solana-metrics = { path = "../metrics", version = "=1.7.11" } +solana-measure = { path = "../measure", version = "=1.7.11" } +solana-net-utils = { path = "../net-utils", version = "=1.7.11" } +solana-runtime = { path = "../runtime", version = "=1.7.11" } +solana-sdk = { path = "../sdk", version = "=1.7.11" } +solana-streamer = { path = "../streamer", version = "=1.7.11" } +solana-version = { path = "../version", version = "=1.7.11" } [dev-dependencies] serial_test = "0.4.0" -solana-local-cluster = { path = "../local-cluster", version = "=1.7.0" } +solana-local-cluster = { path = "../local-cluster", version = "=1.7.11" } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/bench-tps/src/bench.rs b/bench-tps/src/bench.rs index 084b81ddec2f00..a2c21ce7efead1 100644 --- a/bench-tps/src/bench.rs +++ b/bench-tps/src/bench.rs @@ -544,12 +544,12 @@ impl<'a> FundingTransactions<'a> for Vec<(&'a Keypair, Transaction)> { // re-sign retained to_fund_txes with updated blockhash self.sign(blockhash); - self.send(&client); + self.send(client); // Sleep a few slots to allow transactions to process sleep(Duration::from_secs(1)); - self.verify(&client, to_lamports); + self.verify(client, to_lamports); // retry anything that seems to have dropped through cracks // again since these txs are all or nothing, they're fine to @@ -564,7 +564,7 @@ impl<'a> FundingTransactions<'a> for Vec<(&'a Keypair, Transaction)> { let to_fund_txs: Vec<(&Keypair, Transaction)> = to_fund .par_iter() .map(|(k, t)| { - let instructions = system_instruction::transfer_many(&k.pubkey(), &t); + let instructions = system_instruction::transfer_many(&k.pubkey(), t); let message = Message::new(&instructions, Some(&k.pubkey())); (*k, Transaction::new_unsigned(message)) }) @@ -617,7 +617,7 @@ impl<'a> FundingTransactions<'a> for Vec<(&'a Keypair, Transaction)> { return None; } - let verified = if verify_funding_transfer(&client, &tx, to_lamports) { + let verified = if verify_funding_transfer(&client, tx, to_lamports) { verified_txs.fetch_add(1, Ordering::Relaxed); Some(k.pubkey()) } else { @@ -733,7 +733,7 @@ pub fn airdrop_lamports( ); let (blockhash, _fee_calculator) = get_recent_blockhash(client); - match request_airdrop_transaction(&faucet_addr, &id.pubkey(), airdrop_amount, blockhash) { + match request_airdrop_transaction(faucet_addr, &id.pubkey(), airdrop_amount, blockhash) { Ok(transaction) => { let mut tries = 0; loop { diff --git a/bench-tps/src/main.rs b/bench-tps/src/main.rs index abb9b3a7eb4491..03ae9f7463d976 100644 --- a/bench-tps/src/main.rs +++ b/bench-tps/src/main.rs @@ -7,6 +7,7 @@ use solana_gossip::gossip_service::{discover_cluster, get_client, get_multi_clie use solana_sdk::fee_calculator::FeeRateGovernor; use solana_sdk::signature::{Keypair, Signer}; use solana_sdk::system_program; +use solana_streamer::socket::SocketAddrSpace; use std::{collections::HashMap, fs::File, io::prelude::*, path::Path, process::exit, sync::Arc}; /// Number of signatures for all transactions in ~1 week at ~100K TPS @@ -39,7 +40,7 @@ fn main() { let keypair_count = *tx_count * keypair_multiplier; if *write_to_client_file { info!("Generating {} keypairs", keypair_count); - let (keypairs, _) = generate_keypairs(&id, keypair_count as u64); + let (keypairs, _) = generate_keypairs(id, keypair_count as u64); let num_accounts = keypairs.len() as u64; let max_fee = FeeRateGovernor::new(*target_lamports_per_signature, 0).max_lamports_per_signature; @@ -68,13 +69,14 @@ fn main() { } info!("Connecting to the cluster"); - let nodes = discover_cluster(&entrypoint_addr, *num_nodes).unwrap_or_else(|err| { - eprintln!("Failed to discover {} nodes: {:?}", num_nodes, err); - exit(1); - }); + let nodes = discover_cluster(entrypoint_addr, *num_nodes, SocketAddrSpace::Unspecified) + .unwrap_or_else(|err| { + eprintln!("Failed to discover {} nodes: {:?}", num_nodes, err); + exit(1); + }); let client = if *multi_client { - let (client, num_clients) = get_multi_client(&nodes); + let (client, num_clients) = get_multi_client(&nodes, &SocketAddrSpace::Unspecified); if nodes.len() < num_clients { eprintln!( "Error: Insufficient nodes discovered. Expecting {} or more", @@ -88,7 +90,7 @@ fn main() { let mut target_client = None; for node in nodes { if node.id == *target_node { - target_client = Some(Arc::new(get_client(&[node]))); + target_client = Some(Arc::new(get_client(&[node], &SocketAddrSpace::Unspecified))); break; } } @@ -97,7 +99,7 @@ fn main() { exit(1); }) } else { - Arc::new(get_client(&nodes)) + Arc::new(get_client(&nodes, &SocketAddrSpace::Unspecified)) }; let keypairs = if *read_from_client_file { @@ -135,7 +137,7 @@ fn main() { generate_and_fund_keypairs( client.clone(), Some(*faucet_addr), - &id, + id, keypair_count, *num_lamports_per_account, ) diff --git a/bench-tps/tests/bench_tps.rs b/bench-tps/tests/bench_tps.rs index 2aab3983cc4d9e..26e6a1f895afe4 100644 --- a/bench-tps/tests/bench_tps.rs +++ b/bench-tps/tests/bench_tps.rs @@ -13,6 +13,7 @@ use solana_local_cluster::{ validator_configs::make_identical_validator_configs, }; use solana_sdk::signature::{Keypair, Signer}; +use solana_streamer::socket::SocketAddrSpace; use std::{ sync::{mpsc::channel, Arc}, time::Duration, @@ -23,13 +24,19 @@ fn test_bench_tps_local_cluster(config: Config) { solana_logger::setup(); const NUM_NODES: usize = 1; - let cluster = LocalCluster::new(&mut ClusterConfig { - node_stakes: vec![999_990; NUM_NODES], - cluster_lamports: 200_000_000, - validator_configs: make_identical_validator_configs(&ValidatorConfig::default(), NUM_NODES), - native_instruction_processors, - ..ClusterConfig::default() - }); + let cluster = LocalCluster::new( + &mut ClusterConfig { + node_stakes: vec![999_990; NUM_NODES], + cluster_lamports: 200_000_000, + validator_configs: make_identical_validator_configs( + &ValidatorConfig::default(), + NUM_NODES, + ), + native_instruction_processors, + ..ClusterConfig::default() + }, + SocketAddrSpace::Unspecified, + ); let faucet_keypair = Keypair::new(); cluster.transfer( diff --git a/ci/buildkite-pipeline.sh b/ci/buildkite-pipeline.sh index c92426f28af5ce..671d3f0c143f0f 100755 --- a/ci/buildkite-pipeline.sh +++ b/ci/buildkite-pipeline.sh @@ -148,6 +148,33 @@ all_test_steps() { command_step stable ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-stable.sh" 60 wait_step + # BPF test suite + if affects \ + .rs$ \ + Cargo.lock$ \ + Cargo.toml$ \ + ^ci/rust-version.sh \ + ^ci/test-stable-bpf.sh \ + ^ci/test-stable.sh \ + ^ci/test-local-cluster.sh \ + ^core/build.rs \ + ^fetch-perf-libs.sh \ + ^programs/ \ + ^sdk/ \ + ; then + cat >> "$output_file" <<"EOF" + - command: "ci/test-stable-bpf.sh" + name: "stable-bpf" + timeout_in_minutes: 20 + artifact_paths: "bpf-dumps.tar.bz2" + agents: + - "queue=default" +EOF + else + annotate --style info \ + "Stable-BPF skipped as no relevant files were modified" + fi + # Perf test suite if affects \ .rs$ \ @@ -165,7 +192,7 @@ all_test_steps() { cat >> "$output_file" <<"EOF" - command: "ci/test-stable-perf.sh" name: "stable-perf" - timeout_in_minutes: 40 + timeout_in_minutes: 20 artifact_paths: "log-*.txt" agents: - "queue=cuda" diff --git a/ci/buildkite-secondary.yml b/ci/buildkite-secondary.yml index 1be27d2fcb92b7..6b3c02846791b1 100644 --- a/ci/buildkite-secondary.yml +++ b/ci/buildkite-secondary.yml @@ -3,13 +3,19 @@ # Pull requests to not run these steps. steps: - command: "ci/publish-tarball.sh" + agents: + - "queue=release-build" timeout_in_minutes: 60 name: "publish tarball" - wait - command: "sdk/docker-solana/build.sh" + agents: + - "queue=release-build" timeout_in_minutes: 60 name: "publish docker" - command: "ci/publish-crate.sh" + agents: + - "queue=release-build" timeout_in_minutes: 240 name: "publish crate" branches: "!master" diff --git a/ci/do-audit.sh b/ci/do-audit.sh index b6c1b86be1a326..71a0465d6fbf69 100755 --- a/ci/do-audit.sh +++ b/ci/do-audit.sh @@ -28,16 +28,23 @@ cargo_audit_ignores=( # Blocked on multiple crates updating `time` to >= 0.2.23 --ignore RUSTSEC-2020-0071 - # difference is unmaintained - # - # Blocked on predicates v1.0.6 removing its dependency on `difference` - --ignore RUSTSEC-2020-0095 - # generic-array: arr! macro erases lifetimes # # Blocked on libsecp256k1 releasing with upgraded dependencies # https://github.com/paritytech/libsecp256k1/issues/66 --ignore RUSTSEC-2020-0146 + # hyper: Lenient `hyper` header parsing of `Content-Length` could allow request smuggling + # + # Blocked on jsonrpc removing dependency on unmaintained `websocket` + # https://github.com/paritytech/jsonrpc/issues/605 + --ignore RUSTSEC-2021-0078 + + # hyper: Integer overflow in `hyper`'s parsing of the `Transfer-Encoding` header leads to data loss + # + # Blocked on jsonrpc removing dependency on unmaintained `websocket` + # https://github.com/paritytech/jsonrpc/issues/605 + --ignore RUSTSEC-2021-0079 + ) scripts/cargo-for-all-lock-files.sh stable audit "${cargo_audit_ignores[@]}" diff --git a/ci/env.sh b/ci/env.sh index d075b259997020..973f4c85323bf1 100644 --- a/ci/env.sh +++ b/ci/env.sh @@ -74,10 +74,13 @@ else export CI_BUILD_ID= export CI_COMMIT= export CI_JOB_ID= - export CI_OS_NAME= export CI_PULL_REQUEST= export CI_REPO_SLUG= export CI_TAG= + # Don't override ci/run-local.sh + if [[ -z $CI_LOCAL_RUN ]]; then + export CI_OS_NAME= + fi fi cat <&2 + exit 1 + ;; +esac + +steps=() +steps+=(test-sanity) +steps+=(shellcheck) +steps+=(test-checks) +steps+=(test-coverage) +steps+=(test-stable) +steps+=(test-stable-bpf) +steps+=(test-stable-perf) +steps+=(test-downstream-builds) +steps+=(test-bench) +steps+=(test-local-cluster) + +step_index=0 +if [[ -n "$1" ]]; then + start_step="$1" + while [[ $step_index -lt ${#steps[@]} ]]; do + step="${steps[$step_index]}" + if [[ "$step" = "$start_step" ]]; then + break + fi + step_index=$((step_index + 1)) + done + if [[ $step_index -eq ${#steps[@]} ]]; then + echo "unexpected start step: \"$start_step\"" 1>&2 + exit 1 + else + echo "** starting at step: \"$start_step\" **" + echo + fi +fi + +while [[ $step_index -lt ${#steps[@]} ]]; do + step="${steps[$step_index]}" + cmd="ci/${step}.sh" + $cmd + step_index=$((step_index + 1)) +done diff --git a/ci/setup-new-buildkite-agent/setup-buildkite.sh b/ci/setup-new-buildkite-agent/setup-buildkite.sh index c3c6a33cbb62e9..b28110ccf68a9a 100755 --- a/ci/setup-new-buildkite-agent/setup-buildkite.sh +++ b/ci/setup-new-buildkite-agent/setup-buildkite.sh @@ -76,7 +76,7 @@ RestartForceExitStatus=SIGPIPE TimeoutStartSec=10 TimeoutStopSec=0 KillMode=process -LimitNOFILE=700000 +LimitNOFILE=1000000 [Install] WantedBy=multi-user.target diff --git a/ci/setup-new-buildkite-agent/setup-limits.sh b/ci/setup-new-buildkite-agent/setup-limits.sh index cf0c0be273137f..7fc4d5ad70be4e 100755 --- a/ci/setup-new-buildkite-agent/setup-limits.sh +++ b/ci/setup-new-buildkite-agent/setup-limits.sh @@ -8,5 +8,5 @@ source "$HERE"/utils.sh ensure_env || exit 1 # Allow more files to be opened by a user -echo "* - nofile 700000" > /etc/security/limits.d/90-solana-nofiles.conf +echo "* - nofile 1000000" > /etc/security/limits.d/90-solana-nofiles.conf diff --git a/ci/test-bench.sh b/ci/test-bench.sh index f2480f9882dd1c..ef56bcb1eff2e0 100755 --- a/ci/test-bench.sh +++ b/ci/test-bench.sh @@ -49,6 +49,10 @@ _ "$cargo" nightly bench --manifest-path runtime/Cargo.toml ${V:+--verbose} \ _ "$cargo" nightly bench --manifest-path gossip/Cargo.toml ${V:+--verbose} \ -- -Z unstable-options --format=json | tee -a "$BENCH_FILE" +# Run poh benches +_ "$cargo" nightly bench --manifest-path poh/Cargo.toml ${V:+--verbose} \ + -- -Z unstable-options --format=json | tee -a "$BENCH_FILE" + # Run core benches _ "$cargo" nightly bench --manifest-path core/Cargo.toml ${V:+--verbose} \ -- -Z unstable-options --format=json | tee -a "$BENCH_FILE" diff --git a/ci/test-checks.sh b/ci/test-checks.sh index 03ddeea0c41f97..8cf93132f00dbf 100755 --- a/ci/test-checks.sh +++ b/ci/test-checks.sh @@ -14,7 +14,7 @@ scripts/increment-cargo-version.sh check # Disallow uncommitted Cargo.lock changes ( - _ scripts/cargo-for-all-lock-files.sh tree + _ scripts/cargo-for-all-lock-files.sh tree >/dev/null set +e if ! _ git diff --exit-code; then echo -e "\nError: Uncommitted Cargo.lock changes" 1>&2 @@ -35,8 +35,10 @@ echo --- build environment "$cargo" stable clippy --version --verbose "$cargo" nightly clippy --version --verbose - # audit is done only with stable + # audit is done only with "$cargo stable" "$cargo" stable audit --version + + grcov --version ) export RUST_BACKTRACE=1 @@ -65,7 +67,8 @@ _ ci/order-crates-for-publishing.py # -Z... is needed because of clippy bug: https://github.com/rust-lang/rust-clippy/issues/4612 # run nightly clippy for `sdk/` as there's a moderate amount of nightly-only code there -_ "$cargo" nightly clippy -Zunstable-options --workspace --all-targets -- --deny=warnings --deny=clippy::integer_arithmetic +_ "$cargo" nightly clippy -Zunstable-options --workspace --all-targets -- \ + --deny=warnings --deny=clippy::integer_arithmetic --allow=clippy::inconsistent_struct_constructor _ "$cargo" stable fmt --all -- --check diff --git a/ci/test-downstream-builds.sh b/ci/test-downstream-builds.sh new file mode 100755 index 00000000000000..0a45db80d2b0bc --- /dev/null +++ b/ci/test-downstream-builds.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash + +cd "$(dirname "$0")/.." + +export CI_LOCAL_RUN=true + +set -ex + +scripts/build-downstream-projects.sh diff --git a/ci/test-stable-bpf.sh b/ci/test-stable-bpf.sh new file mode 120000 index 00000000000000..0c92a5c7bd6fd4 --- /dev/null +++ b/ci/test-stable-bpf.sh @@ -0,0 +1 @@ +test-stable.sh \ No newline at end of file diff --git a/ci/test-stable.sh b/ci/test-stable.sh index e3e6aaabad0ab8..025c1d9d82d2cc 100755 --- a/ci/test-stable.sh +++ b/ci/test-stable.sh @@ -21,10 +21,6 @@ export RUST_BACKTRACE=1 export RUSTFLAGS="-D warnings" source scripts/ulimit-n.sh -# Clear the C dependency files, if dependency moves these files are not regenerated -test -d target/debug/bpf && find target/debug/bpf -name '*.d' -delete -test -d target/release/bpf && find target/release/bpf -name '*.d' -delete - # Limit compiler jobs to reduce memory usage # on machines with 2gb/thread of memory NPROC=$(nproc) @@ -35,17 +31,25 @@ case $testName in test-stable) _ "$cargo" stable test --jobs "$NPROC" --all --exclude solana-local-cluster ${V:+--verbose} -- --nocapture ;; -test-stable-perf) +test-stable-bpf) + # Clear the C dependency files, if dependency moves these files are not regenerated + test -d target/debug/bpf && find target/debug/bpf -name '*.d' -delete + test -d target/release/bpf && find target/release/bpf -name '*.d' -delete + + # rustfilt required for dumping BPF assembly listings + "$cargo" install rustfilt + # solana-keygen required when building C programs _ "$cargo" build --manifest-path=keygen/Cargo.toml export PATH="$PWD/target/debug":$PATH + cargo_build_bpf="$(realpath ./cargo-build-bpf)" # BPF solana-sdk legacy compile test - ./cargo-build-bpf --manifest-path sdk/Cargo.toml + "$cargo_build_bpf" --manifest-path sdk/Cargo.toml # BPF Program unit tests "$cargo" test --manifest-path programs/bpf/Cargo.toml - cargo-build-bpf --manifest-path programs/bpf/Cargo.toml --bpf-sdk sdk/bpf + "$cargo_build_bpf" --manifest-path programs/bpf/Cargo.toml --bpf-sdk sdk/bpf # BPF program system tests _ make -C programs/bpf/c tests @@ -53,11 +57,32 @@ test-stable-perf) --manifest-path programs/bpf/Cargo.toml \ --no-default-features --features=bpf_c,bpf_rust -- --nocapture + # Dump BPF program assembly listings + for bpf_test in programs/bpf/rust/*; do + if pushd "$bpf_test"; then + "$cargo_build_bpf" --dump + popd + fi + done + + # BPF program instruction count assertion + bpf_target_path=programs/bpf/target + _ "$cargo" stable test \ + --manifest-path programs/bpf/Cargo.toml \ + --no-default-features --features=bpf_c,bpf_rust assert_instruction_count \ + -- --nocapture &> "${bpf_target_path}"/deploy/instuction_counts.txt + + bpf_dump_archive="bpf-dumps.tar.bz2" + rm -f "$bpf_dump_archive" + tar cjvf "$bpf_dump_archive" "${bpf_target_path}"/{deploy/*.txt,bpfel-unknown-unknown/release/*.so} + exit 0 + ;; +test-stable-perf) if [[ $(uname) = Linux ]]; then # Enable persistence mode to keep the CUDA kernel driver loaded, avoiding a # lengthy and unexpected delay the first time CUDA is involved when the driver # is not yet loaded. - sudo --non-interactive ./net/scripts/enable-nvidia-persistence-mode.sh + sudo --non-interactive ./net/scripts/enable-nvidia-persistence-mode.sh || true rm -rf target/perf-libs ./fetch-perf-libs.sh diff --git a/clap-utils/Cargo.toml b/clap-utils/Cargo.toml index 147281dd6dc1a7..b5ae62b06556b0 100644 --- a/clap-utils/Cargo.toml +++ b/clap-utils/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-clap-utils" -version = "1.7.0" +version = "1.7.11" description = "Solana utilities for the clap" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -12,8 +12,8 @@ edition = "2018" [dependencies] clap = "2.33.0" rpassword = "4.0" -solana-remote-wallet = { path = "../remote-wallet", version = "=1.7.0" } -solana-sdk = { path = "../sdk", version = "=1.7.0" } +solana-remote-wallet = { path = "../remote-wallet", version = "=1.7.11" } +solana-sdk = { path = "../sdk", version = "=1.7.11" } thiserror = "1.0.21" tiny-bip39 = "0.8.0" uriparse = "0.6.3" diff --git a/clap-utils/src/keypair.rs b/clap-utils/src/keypair.rs index d39bca21d4bc56..f63227043dce45 100644 --- a/clap-utils/src/keypair.rs +++ b/clap-utils/src/keypair.rs @@ -24,9 +24,11 @@ use { }, }, std::{ + cell::RefCell, convert::TryFrom, error, io::{stdin, stdout, Write}, + ops::Deref, process::exit, str::FromStr, sync::Arc, @@ -89,33 +91,49 @@ impl CliSignerInfo { .collect() } } -#[derive(Debug)] + +#[derive(Debug, Default)] pub struct DefaultSigner { pub arg_name: String, pub path: String, + is_path_checked: RefCell, } impl DefaultSigner { - pub fn new(path: String) -> Self { + pub fn new, P: AsRef>(arg_name: AN, path: P) -> Self { + let arg_name = arg_name.as_ref().to_string(); + let path = path.as_ref().to_string(); Self { - arg_name: "keypair".to_string(), + arg_name, path, + ..Self::default() } } - pub fn from_path(path: String) -> Result> { - std::fs::metadata(&path) - .map_err(|_| { - std::io::Error::new( - std::io::ErrorKind::Other, - format!( - "No default signer found, run \"solana-keygen new -o {}\" to create a new one", - path - ), - ) - .into() - }) - .map(|_| Self::new(path)) + + fn path(&self) -> Result<&str, Box> { + if !self.is_path_checked.borrow().deref() { + parse_signer_source(&self.path) + .and_then(|s| { + if let SignerSourceKind::Filepath(path) = &s.kind { + std::fs::metadata(path).map(|_| ()).map_err(|e| e.into()) + } else { + Ok(()) + } + }) + .map_err(|_| { + std::io::Error::new( + std::io::ErrorKind::Other, + format!( + "No default signer found, run \"solana-keygen new -o {}\" to create a new one", + self.path + ), + ) + })?; + *self.is_path_checked.borrow_mut() = true; + } + Ok(&self.path) } + pub fn generate_unique_signers( &self, bulk_signers: Vec>>, @@ -145,7 +163,7 @@ impl DefaultSigner { matches: &ArgMatches, wallet_manager: &mut Option>, ) -> Result, Box> { - signer_from_path(matches, &self.path, &self.arg_name, wallet_manager) + signer_from_path(matches, self.path()?, &self.arg_name, wallet_manager) } pub fn signer_from_path_with_config( @@ -154,7 +172,13 @@ impl DefaultSigner { wallet_manager: &mut Option>, config: &SignerFromPathConfig, ) -> Result, Box> { - signer_from_path_with_config(matches, &self.path, &self.arg_name, wallet_manager, config) + signer_from_path_with_config( + matches, + self.path()?, + &self.arg_name, + wallet_manager, + config, + ) } } @@ -277,7 +301,9 @@ pub(crate) fn parse_signer_source>( ASK_KEYWORD => Ok(SignerSource::new_legacy(SignerSourceKind::Prompt)), _ => match Pubkey::from_str(source.as_str()) { Ok(pubkey) => Ok(SignerSource::new(SignerSourceKind::Pubkey(pubkey))), - Err(_) => Ok(SignerSource::new(SignerSourceKind::Filepath(source))), + Err(_) => std::fs::metadata(source.as_str()) + .map(|_| SignerSource::new(SignerSourceKind::Filepath(source))) + .map_err(|err| err.into()), }, } } @@ -480,7 +506,7 @@ pub const SKIP_SEED_PHRASE_VALIDATION_ARG: ArgConstant<'static> = ArgConstant { /// Prompts user for a passphrase and then asks for confirmirmation to check for mistakes pub fn prompt_passphrase(prompt: &str) -> Result> { - let passphrase = prompt_password_stderr(&prompt)?; + let passphrase = prompt_password_stderr(prompt)?; if !passphrase.is_empty() { let confirmed = rpassword::prompt_password_stderr("Enter same passphrase again: ")?; if confirmed != passphrase { @@ -560,9 +586,9 @@ pub fn keypair_from_seed_phrase( let keypair = if skip_validation { let passphrase = prompt_passphrase(&passphrase_prompt)?; if legacy { - keypair_from_seed_phrase_and_passphrase(&seed_phrase, &passphrase)? + keypair_from_seed_phrase_and_passphrase(seed_phrase, &passphrase)? } else { - let seed = generate_seed_from_seed_phrase_and_passphrase(&seed_phrase, &passphrase); + let seed = generate_seed_from_seed_phrase_and_passphrase(seed_phrase, &passphrase); keypair_from_seed_and_derivation_path(&seed, derivation_path)? } } else { @@ -590,7 +616,7 @@ pub fn keypair_from_seed_phrase( if legacy { keypair_from_seed(seed.as_bytes())? } else { - keypair_from_seed_and_derivation_path(&seed.as_bytes(), derivation_path)? + keypair_from_seed_and_derivation_path(seed.as_bytes(), derivation_path)? } }; @@ -751,6 +777,10 @@ mod tests { // Catchall into SignerSource::Filepath fails let junk = "sometextthatisnotapubkeyorfile".to_string(); assert!(Pubkey::from_str(&junk).is_err()); + assert!(matches!( + parse_signer_source(&junk), + Err(SignerSourceError::IoError(_)) + )); let prompt = "prompt:".to_string(); assert!(matches!( diff --git a/cli-config/Cargo.toml b/cli-config/Cargo.toml index ca6afa37ca2faf..da9e2aa5cf9a1f 100644 --- a/cli-config/Cargo.toml +++ b/cli-config/Cargo.toml @@ -3,7 +3,7 @@ authors = ["Solana Maintainers "] edition = "2018" name = "solana-cli-config" description = "Blockchain, Rebuilt for Scale" -version = "1.7.0" +version = "1.7.11" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" diff --git a/cli-config/src/config.rs b/cli-config/src/config.rs index f98af53f4a6ce7..d9706ef9295655 100644 --- a/cli-config/src/config.rs +++ b/cli-config/src/config.rs @@ -107,24 +107,24 @@ mod test { #[test] fn compute_websocket_url() { assert_eq!( - Config::compute_websocket_url(&"http://api.devnet.solana.com"), + Config::compute_websocket_url("http://api.devnet.solana.com"), "ws://api.devnet.solana.com/".to_string() ); assert_eq!( - Config::compute_websocket_url(&"https://api.devnet.solana.com"), + Config::compute_websocket_url("https://api.devnet.solana.com"), "wss://api.devnet.solana.com/".to_string() ); assert_eq!( - Config::compute_websocket_url(&"http://example.com:8899"), + Config::compute_websocket_url("http://example.com:8899"), "ws://example.com:8900/".to_string() ); assert_eq!( - Config::compute_websocket_url(&"https://example.com:1234"), + Config::compute_websocket_url("https://example.com:1234"), "wss://example.com:1235/".to_string() ); - assert_eq!(Config::compute_websocket_url(&"garbage"), String::new()); + assert_eq!(Config::compute_websocket_url("garbage"), String::new()); } } diff --git a/cli-output/Cargo.toml b/cli-output/Cargo.toml index 3f6cf479d4290f..5e5669740e7c32 100644 --- a/cli-output/Cargo.toml +++ b/cli-output/Cargo.toml @@ -3,7 +3,7 @@ authors = ["Solana Maintainers "] edition = "2018" name = "solana-cli-output" description = "Blockchain, Rebuilt for Scale" -version = "1.7.0" +version = "1.7.11" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -12,20 +12,20 @@ documentation = "https://docs.rs/solana-cli-output" [dependencies] base64 = "0.13.0" chrono = { version = "0.4.11", features = ["serde"] } -console = "0.11.3" +clap = "2.33.0" +console = "0.14.1" humantime = "2.0.1" Inflector = "0.11.4" indicatif = "0.15.0" serde = "1.0.122" serde_derive = "1.0.103" serde_json = "1.0.56" -solana-account-decoder = { path = "../account-decoder", version = "=1.7.0" } -solana-clap-utils = { path = "../clap-utils", version = "=1.7.0" } -solana-client = { path = "../client", version = "=1.7.0" } -solana-sdk = { path = "../sdk", version = "=1.7.0" } -solana-stake-program = { path = "../programs/stake", version = "=1.7.0" } -solana-transaction-status = { path = "../transaction-status", version = "=1.7.0" } -solana-vote-program = { path = "../programs/vote", version = "=1.7.0" } +solana-account-decoder = { path = "../account-decoder", version = "=1.7.11" } +solana-clap-utils = { path = "../clap-utils", version = "=1.7.11" } +solana-client = { path = "../client", version = "=1.7.11" } +solana-sdk = { path = "../sdk", version = "=1.7.11" } +solana-transaction-status = { path = "../transaction-status", version = "=1.7.11" } +solana-vote-program = { path = "../programs/vote", version = "=1.7.11" } spl-memo = { version = "=3.0.1", features = ["no-entrypoint"] } [package.metadata.docs.rs] diff --git a/cli-output/src/cli_output.rs b/cli-output/src/cli_output.rs index 7532a41d934937..31d26c664a330c 100644 --- a/cli-output/src/cli_output.rs +++ b/cli-output/src/cli_output.rs @@ -8,6 +8,7 @@ use { QuietDisplay, VerboseDisplay, }, chrono::{Local, TimeZone}, + clap::ArgMatches, console::{style, Emoji}, inflector::cases::titlecase::to_title_case, serde::{Deserialize, Serialize}, @@ -25,10 +26,10 @@ use { native_token::lamports_to_sol, pubkey::Pubkey, signature::Signature, + stake::state::{Authorized, Lockup}, stake_history::StakeHistoryEntry, transaction::{Transaction, TransactionError}, }, - solana_stake_program::stake_state::{Authorized, Lockup}, solana_transaction_status::{ EncodedConfirmedBlock, EncodedTransaction, TransactionConfirmationStatus, UiTransactionStatusMeta, @@ -47,7 +48,7 @@ use { static WARNING: Emoji = Emoji("⚠️", "!"); -#[derive(PartialEq)] +#[derive(PartialEq, Debug)] pub enum OutputFormat { Display, Json, @@ -77,6 +78,21 @@ impl OutputFormat { OutputFormat::JsonCompact => serde_json::to_value(item).unwrap().to_string(), } } + + pub fn from_matches(matches: &ArgMatches<'_>, output_name: &str, verbose: bool) -> Self { + matches + .value_of(output_name) + .map(|value| match value { + "json" => OutputFormat::Json, + "json-compact" => OutputFormat::JsonCompact, + _ => unreachable!(), + }) + .unwrap_or(if verbose { + OutputFormat::DisplayVerbose + } else { + OutputFormat::Display + }) + } } #[derive(Serialize, Deserialize)] @@ -233,6 +249,10 @@ pub struct CliEpochInfo { pub epoch_info: EpochInfo, #[serde(skip)] pub average_slot_time_ms: u64, + #[serde(skip)] + pub start_block_time: Option, + #[serde(skip)] + pub current_block_time: Option, } impl QuietDisplay for CliEpochInfo {} @@ -277,21 +297,41 @@ impl fmt::Display for CliEpochInfo { remaining_slots_in_epoch ), )?; + let (time_elapsed, annotation) = if let (Some(start_block_time), Some(current_block_time)) = + (self.start_block_time, self.current_block_time) + { + ( + Duration::from_secs((current_block_time - start_block_time) as u64), + None, + ) + } else { + ( + slot_to_duration(self.epoch_info.slot_index, self.average_slot_time_ms), + Some("* estimated based on current slot durations"), + ) + }; + let time_remaining = slot_to_duration(remaining_slots_in_epoch, self.average_slot_time_ms); writeln_name_value( f, "Epoch Completed Time:", &format!( - "{}/{} ({} remaining)", - slot_to_human_time(self.epoch_info.slot_index, self.average_slot_time_ms), - slot_to_human_time(self.epoch_info.slots_in_epoch, self.average_slot_time_ms), - slot_to_human_time(remaining_slots_in_epoch, self.average_slot_time_ms) + "{}{}/{} ({} remaining)", + humantime::format_duration(time_elapsed).to_string(), + if annotation.is_some() { "*" } else { "" }, + humantime::format_duration(time_elapsed + time_remaining).to_string(), + humantime::format_duration(time_remaining).to_string(), ), - ) + )?; + if let Some(annotation) = annotation { + writeln!(f)?; + writeln!(f, "{}", annotation)?; + } + Ok(()) } } -fn slot_to_human_time(slot: Slot, slot_time_ms: u64) -> String { - humantime::format_duration(Duration::from_secs((slot * slot_time_ms) / 1000)).to_string() +fn slot_to_duration(slot: Slot, slot_time_ms: u64) -> Duration { + Duration::from_secs((slot * slot_time_ms) / 1000) } #[derive(Serialize, Deserialize, Default)] @@ -323,6 +363,8 @@ pub struct CliValidators { pub total_current_stake: u64, pub total_delinquent_stake: u64, pub validators: Vec, + pub average_skip_rate: f64, + pub average_stake_weighted_skip_rate: f64, #[serde(skip_serializing)] pub validators_sort_order: CliValidatorsSortOrder, #[serde(skip_serializing)] @@ -486,6 +528,18 @@ impl fmt::Display for CliValidators { writeln!(f, "{}", header)?; } + writeln!(f)?; + writeln_name_value( + f, + "Average Stake-Weighted Skip Rate:", + &format!("{:.2}%", self.average_stake_weighted_skip_rate,), + )?; + writeln_name_value( + f, + "Average Unweighted Skip Rate: ", + &format!("{:.2}%", self.average_skip_rate), + )?; + writeln!(f)?; writeln_name_value( f, @@ -733,6 +787,7 @@ pub struct CliEpochReward { pub post_balance: u64, // lamports pub percent_change: f64, pub apr: Option, + pub commission: Option, } #[derive(Serialize, Deserialize)] @@ -777,23 +832,27 @@ impl fmt::Display for CliKeyedEpochRewards { writeln!(f, "Epoch Rewards:")?; writeln!( f, - " {:<44} {:<18} {:<18} {:>14} {:>14}", - "Address", "Amount", "New Balance", "Percent Change", "APR" + " {:<44} {:<18} {:<18} {:>14} {:>14} {:>10}", + "Address", "Amount", "New Balance", "Percent Change", "APR", "Commission" )?; for keyed_reward in &self.rewards { match &keyed_reward.reward { Some(reward) => { writeln!( f, - " {:<44} ◎{:<17.9} ◎{:<17.9} {:>13.2}% {}", + " {:<44} ◎{:<17.9} ◎{:<17.9} {:>13.9}% {:>14} {:>10}", keyed_reward.address, lamports_to_sol(reward.amount), lamports_to_sol(reward.post_balance), reward.percent_change, reward .apr - .map(|apr| format!("{:>13.2}%", apr)) + .map(|apr| format!("{:.2}%", apr)) .unwrap_or_default(), + reward + .commission + .map(|commission| format!("{}%", commission)) + .unwrap_or_else(|| "-".to_string()) )?; } None => { @@ -910,13 +969,13 @@ fn show_epoch_rewards( writeln!(f, "Epoch Rewards:")?; writeln!( f, - " {:<6} {:<11} {:<18} {:<18} {:>14} {:>14}", - "Epoch", "Reward Slot", "Amount", "New Balance", "Percent Change", "APR" + " {:<6} {:<11} {:<18} {:<18} {:>14} {:>14} {:>10}", + "Epoch", "Reward Slot", "Amount", "New Balance", "Percent Change", "APR", "Commission" )?; for reward in epoch_rewards { writeln!( f, - " {:<6} {:<11} ◎{:<17.9} ◎{:<17.9} {:>13.2}% {}", + " {:<6} {:<11} ◎{:<17.9} ◎{:<17.9} {:>13.9}% {:>14} {:>10}", reward.epoch, reward.effective_slot, lamports_to_sol(reward.amount), @@ -924,8 +983,12 @@ fn show_epoch_rewards( reward.percent_change, reward .apr - .map(|apr| format!("{:>13.2}%", apr)) + .map(|apr| format!("{:.2}%", apr)) .unwrap_or_default(), + reward + .commission + .map(|commission| format!("{}%", commission)) + .unwrap_or_else(|| "-".to_string()) )?; } } @@ -1287,7 +1350,7 @@ impl fmt::Display for CliValidatorInfo { writeln_name_value( f, &format!(" {}:", to_title_case(key)), - &value.as_str().unwrap_or("?"), + value.as_str().unwrap_or("?"), )?; } Ok(()) @@ -1325,8 +1388,8 @@ impl fmt::Display for CliVoteAccount { build_balance_message(self.account_balance, self.use_lamports_unit, true) )?; writeln!(f, "Validator Identity: {}", self.validator_identity)?; - writeln!(f, "Authorized Voters: {}", self.authorized_voters)?; - writeln!(f, "Authorized Withdrawer: {}", self.authorized_withdrawer)?; + writeln!(f, "Vote Authority: {}", self.authorized_voters)?; + writeln!(f, "Withdraw Authority: {}", self.authorized_withdrawer)?; writeln!(f, "Credits: {}", self.credits)?; writeln!(f, "Commission: {}%", self.commission)?; writeln!( @@ -1513,15 +1576,19 @@ impl fmt::Display for CliInflation { "Staking rate: {:>5.2}%", self.current_rate.validator * 100. )?; - writeln!( - f, - "Foundation rate: {:>5.2}%", - self.current_rate.foundation * 100. - ) + + if self.current_rate.foundation > 0. { + writeln!( + f, + "Foundation rate: {:>5.2}%", + self.current_rate.foundation * 100. + )?; + } + Ok(()) } } -#[derive(Serialize, Deserialize, Default)] +#[derive(Serialize, Deserialize, Default, Debug, PartialEq)] #[serde(rename_all = "camelCase")] pub struct CliSignOnlyData { pub blockhash: String, @@ -1669,6 +1736,7 @@ pub struct CliFeesInner { pub blockhash: String, pub lamports_per_signature: u64, pub last_valid_slot: Option, + pub last_valid_block_height: Option, } impl QuietDisplay for CliFeesInner {} @@ -1682,11 +1750,11 @@ impl fmt::Display for CliFeesInner { "Lamports per signature:", &self.lamports_per_signature.to_string(), )?; - let last_valid_slot = self - .last_valid_slot + let last_valid_block_height = self + .last_valid_block_height .map(|s| s.to_string()) .unwrap_or_default(); - writeln_name_value(f, "Last valid slot:", &last_valid_slot) + writeln_name_value(f, "Last valid block height:", &last_valid_block_height) } } @@ -1715,6 +1783,7 @@ impl CliFees { blockhash: Hash, lamports_per_signature: u64, last_valid_slot: Option, + last_valid_block_height: Option, ) -> Self { Self { inner: Some(CliFeesInner { @@ -1722,6 +1791,7 @@ impl CliFees { blockhash: blockhash.to_string(), lamports_per_signature, last_valid_slot, + last_valid_block_height, }), } } @@ -1768,7 +1838,7 @@ impl fmt::Display for CliTokenAccount { writeln_name_value( f, "Close authority:", - &account.close_authority.as_ref().unwrap_or(&String::new()), + account.close_authority.as_ref().unwrap_or(&String::new()), )?; Ok(()) } @@ -1970,6 +2040,11 @@ pub fn return_signers_with_config( output_format: &OutputFormat, config: &ReturnSignersConfig, ) -> Result> { + let cli_command = return_signers_data(tx, config); + Ok(output_format.formatted_string(&cli_command)) +} + +pub fn return_signers_data(tx: &Transaction, config: &ReturnSignersConfig) -> CliSignOnlyData { let verify_results = tx.verify_with_results(); let mut signers = Vec::new(); let mut absent = Vec::new(); @@ -1994,19 +2069,17 @@ pub fn return_signers_with_config( None }; - let cli_command = CliSignOnlyData { + CliSignOnlyData { blockhash: tx.message.recent_blockhash.to_string(), message, signers, absent, bad_sig, - }; - - Ok(output_format.formatted_string(&cli_command)) + } } pub fn parse_sign_only_reply_string(reply: &str) -> SignOnly { - let object: Value = serde_json::from_str(&reply).unwrap(); + let object: Value = serde_json::from_str(reply).unwrap(); let blockhash_str = object.get("blockhash").unwrap().as_str().unwrap(); let blockhash = blockhash_str.parse::().unwrap(); let mut present_signers: Vec<(Pubkey, Signature)> = Vec::new(); @@ -2136,8 +2209,8 @@ impl fmt::Display for CliBlock { writeln!(f, "Rewards:")?; writeln!( f, - " {:<44} {:^15} {:<15} {:<20} {:>14}", - "Address", "Type", "Amount", "New Balance", "Percent Change" + " {:<44} {:^15} {:<15} {:<20} {:>14} {:>10}", + "Address", "Type", "Amount", "New Balance", "Percent Change", "Commission" )?; for reward in rewards { let sign = if reward.lamports < 0 { "-" } else { "" }; @@ -2145,7 +2218,7 @@ impl fmt::Display for CliBlock { total_rewards += reward.lamports; writeln!( f, - " {:<44} {:^15} {:>15} {}", + " {:<44} {:^15} {:>15} {} {}", reward.pubkey, if let Some(reward_type) = reward.reward_type { format!("{}", reward_type) @@ -2167,7 +2240,11 @@ impl fmt::Display for CliBlock { / (reward.post_balance as f64 - reward.lamports as f64)) * 100.0 ) - } + }, + reward + .commission + .map(|commission| format!("{:>9}%", commission)) + .unwrap_or_else(|| " -".to_string()) )?; } @@ -2380,6 +2457,7 @@ impl VerboseDisplay for CliGossipNodes {} #[cfg(test)] mod tests { use super::*; + use clap::{App, Arg}; use solana_sdk::{ message::Message, pubkey::Pubkey, @@ -2408,6 +2486,10 @@ mod tests { fn try_sign_message(&self, _message: &[u8]) -> Result { Ok(Signature::new(&[1u8; 64])) } + + fn is_interactive(&self) -> bool { + false + } } let present: Box = Box::new(keypair_from_seed(&[2u8; 32]).unwrap()); @@ -2436,6 +2518,22 @@ mod tests { assert_eq!(sign_only.absent_signers[0], absent.pubkey()); assert_eq!(sign_only.bad_signers[0], bad.pubkey()); + let res_data = return_signers_data(&tx, &ReturnSignersConfig::default()); + assert_eq!( + res_data, + CliSignOnlyData { + blockhash: blockhash.to_string(), + message: None, + signers: vec![format!( + "{}={}", + present.pubkey().to_string(), + tx.signatures[1] + )], + absent: vec![absent.pubkey().to_string()], + bad_sig: vec![bad.pubkey().to_string()], + } + ); + let expected_msg = "AwECBwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDgTl3Dqh9\ F19Wo1Rmw0x+zMuNipG07jeiXfYPW4/Js5QEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQE\ BAQEBAYGBgYGBgYGBgYGBgYGBgYGBgYGBgYGBgYGBgYGBgYGBQUFBQUFBQUFBQUFBQUFBQUF\ @@ -2449,10 +2547,26 @@ mod tests { let res = return_signers_with_config(&tx, &OutputFormat::JsonCompact, &config).unwrap(); let sign_only = parse_sign_only_reply_string(&res); assert_eq!(sign_only.blockhash, blockhash); - assert_eq!(sign_only.message, Some(expected_msg)); + assert_eq!(sign_only.message, Some(expected_msg.clone())); assert_eq!(sign_only.present_signers[0].0, present.pubkey()); assert_eq!(sign_only.absent_signers[0], absent.pubkey()); assert_eq!(sign_only.bad_signers[0], bad.pubkey()); + + let res_data = return_signers_data(&tx, &config); + assert_eq!( + res_data, + CliSignOnlyData { + blockhash: blockhash.to_string(), + message: Some(expected_msg), + signers: vec![format!( + "{}={}", + present.pubkey().to_string(), + tx.signatures[1] + )], + absent: vec![absent.pubkey().to_string()], + bad_sig: vec![bad.pubkey().to_string()], + } + ); } #[test] @@ -2501,4 +2615,50 @@ mod tests { "verbose" ); } + + #[test] + fn test_output_format_from_matches() { + let app = App::new("test").arg( + Arg::with_name("output_format") + .long("output") + .value_name("FORMAT") + .global(true) + .takes_value(true) + .possible_values(&["json", "json-compact"]) + .help("Return information in specified output format"), + ); + let matches = app + .clone() + .get_matches_from(vec!["test", "--output", "json"]); + assert_eq!( + OutputFormat::from_matches(&matches, "output_format", false), + OutputFormat::Json + ); + assert_eq!( + OutputFormat::from_matches(&matches, "output_format", true), + OutputFormat::Json + ); + + let matches = app + .clone() + .get_matches_from(vec!["test", "--output", "json-compact"]); + assert_eq!( + OutputFormat::from_matches(&matches, "output_format", false), + OutputFormat::JsonCompact + ); + assert_eq!( + OutputFormat::from_matches(&matches, "output_format", true), + OutputFormat::JsonCompact + ); + + let matches = app.clone().get_matches_from(vec!["test"]); + assert_eq!( + OutputFormat::from_matches(&matches, "output_format", false), + OutputFormat::Display + ); + assert_eq!( + OutputFormat::from_matches(&matches, "output_format", true), + OutputFormat::DisplayVerbose + ); + } } diff --git a/cli-output/src/display.rs b/cli-output/src/display.rs index ec4f8b4a41030a..f8f7e8274a1694 100644 --- a/cli-output/src/display.rs +++ b/cli-output/src/display.rs @@ -5,7 +5,7 @@ use { indicatif::{ProgressBar, ProgressStyle}, solana_sdk::{ clock::UnixTimestamp, hash::Hash, message::Message, native_token::lamports_to_sol, - program_utils::limited_deserialize, pubkey::Pubkey, transaction::Transaction, + program_utils::limited_deserialize, pubkey::Pubkey, stake, transaction::Transaction, }, solana_transaction_status::UiTransactionStatusMeta, spl_memo::id as spl_memo_id, @@ -140,7 +140,7 @@ fn format_account_mode(message: &Message, index: usize) -> String { } else { "-" }, - if message.is_writable(index, /*demote_sysvar_write_locks=*/ true) { + if message.is_writable(index) { "w" // comment for consistent rust fmt (no joking; lol) } else { "-" @@ -244,10 +244,9 @@ pub fn write_transaction( writeln!(w, "{} {:?}", prefix, vote_instruction)?; raw = false; } - } else if program_pubkey == solana_stake_program::id() { - if let Ok(stake_instruction) = limited_deserialize::< - solana_stake_program::stake_instruction::StakeInstruction, - >(&instruction.data) + } else if program_pubkey == stake::program::id() { + if let Ok(stake_instruction) = + limited_deserialize::(&instruction.data) { writeln!(w, "{} {:?}", prefix, stake_instruction)?; raw = false; diff --git a/cli/Cargo.toml b/cli/Cargo.toml index c0847937208200..9b2a9cf8bcbda1 100644 --- a/cli/Cargo.toml +++ b/cli/Cargo.toml @@ -3,7 +3,7 @@ authors = ["Solana Maintainers "] edition = "2018" name = "solana-cli" description = "Blockchain, Rebuilt for Scale" -version = "1.7.0" +version = "1.7.11" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -16,7 +16,8 @@ chrono = { version = "0.4.11", features = ["serde"] } clap = "2.33.1" criterion-stats = "0.3.0" ctrlc = { version = "3.1.5", features = ["termination"] } -console = "0.11.3" +console = "0.14.1" +const_format = "0.2.14" dirs-next = "2.0.0" log = "0.4.11" Inflector = "0.11.4" @@ -28,30 +29,30 @@ reqwest = { version = "0.11.2", default-features = false, features = ["blocking" serde = "1.0.122" serde_derive = "1.0.103" serde_json = "1.0.56" -solana-account-decoder = { path = "../account-decoder", version = "=1.7.0" } -solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "=1.7.0" } -solana-clap-utils = { path = "../clap-utils", version = "=1.7.0" } -solana-cli-config = { path = "../cli-config", version = "=1.7.0" } -solana-cli-output = { path = "../cli-output", version = "=1.7.0" } -solana-client = { path = "../client", version = "=1.7.0" } -solana-config-program = { path = "../programs/config", version = "=1.7.0" } -solana-faucet = { path = "../faucet", version = "=1.7.0" } -solana-logger = { path = "../logger", version = "=1.7.0" } -solana-net-utils = { path = "../net-utils", version = "=1.7.0" } +solana-account-decoder = { path = "../account-decoder", version = "=1.7.11" } +solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "=1.7.11" } +solana-clap-utils = { path = "../clap-utils", version = "=1.7.11" } +solana-cli-config = { path = "../cli-config", version = "=1.7.11" } +solana-cli-output = { path = "../cli-output", version = "=1.7.11" } +solana-client = { path = "../client", version = "=1.7.11" } +solana-config-program = { path = "../programs/config", version = "=1.7.11" } +solana-faucet = { path = "../faucet", version = "=1.7.11" } +solana-logger = { path = "../logger", version = "=1.7.11" } +solana-net-utils = { path = "../net-utils", version = "=1.7.11" } solana_rbpf = "=0.2.11" -solana-remote-wallet = { path = "../remote-wallet", version = "=1.7.0" } -solana-sdk = { path = "../sdk", version = "=1.7.0" } -solana-stake-program = { path = "../programs/stake", version = "=1.7.0" } -solana-transaction-status = { path = "../transaction-status", version = "=1.7.0" } -solana-version = { path = "../version", version = "=1.7.0" } -solana-vote-program = { path = "../programs/vote", version = "=1.7.0" } +solana-remote-wallet = { path = "../remote-wallet", version = "=1.7.11" } +solana-sdk = { path = "../sdk", version = "=1.7.11" } +solana-transaction-status = { path = "../transaction-status", version = "=1.7.11" } +solana-version = { path = "../version", version = "=1.7.11" } +solana-vote-program = { path = "../programs/vote", version = "=1.7.11" } spl-memo = { version = "=3.0.1", features = ["no-entrypoint"] } thiserror = "1.0.21" tiny-bip39 = "0.7.0" url = "2.1.1" [dev-dependencies] -solana-core = { path = "../core", version = "=1.7.0" } +solana-core = { path = "../core", version = "=1.7.11" } +solana-streamer = { path = "../streamer", version = "=1.7.11" } tempfile = "3.1.0" [[bin]] diff --git a/cli/src/clap_app.rs b/cli/src/clap_app.rs new file mode 100644 index 00000000000000..cf15285c3d9c9b --- /dev/null +++ b/cli/src/clap_app.rs @@ -0,0 +1,202 @@ +use crate::{ + cli::*, cluster_query::*, feature::*, inflation::*, nonce::*, program::*, stake::*, + validator_info::*, vote::*, wallet::*, +}; +use clap::{App, AppSettings, Arg, ArgGroup, SubCommand}; +use solana_clap_utils::{self, input_validators::*, keypair::*}; +use solana_cli_config::CONFIG_FILE; + +pub fn get_clap_app<'ab, 'v>(name: &str, about: &'ab str, version: &'v str) -> App<'ab, 'v> { + App::new(name) + .about(about) + .version(version) + .setting(AppSettings::SubcommandRequiredElseHelp) + .arg({ + let arg = Arg::with_name("config_file") + .short("C") + .long("config") + .value_name("FILEPATH") + .takes_value(true) + .global(true) + .help("Configuration file to use"); + if let Some(ref config_file) = *CONFIG_FILE { + arg.default_value(config_file) + } else { + arg + } + }) + .arg( + Arg::with_name("json_rpc_url") + .short("u") + .long("url") + .value_name("URL_OR_MONIKER") + .takes_value(true) + .global(true) + .validator(is_url_or_moniker) + .help( + "URL for Solana's JSON RPC or moniker (or their first letter): \ + [mainnet-beta, testnet, devnet, localhost]", + ), + ) + .arg( + Arg::with_name("websocket_url") + .long("ws") + .value_name("URL") + .takes_value(true) + .global(true) + .validator(is_url) + .help("WebSocket URL for the solana cluster"), + ) + .arg( + Arg::with_name("keypair") + .short("k") + .long("keypair") + .value_name("KEYPAIR") + .global(true) + .takes_value(true) + .help("Filepath or URL to a keypair"), + ) + .arg( + Arg::with_name("commitment") + .long("commitment") + .takes_value(true) + .possible_values(&[ + "processed", + "confirmed", + "finalized", + "recent", // Deprecated as of v1.5.5 + "single", // Deprecated as of v1.5.5 + "singleGossip", // Deprecated as of v1.5.5 + "root", // Deprecated as of v1.5.5 + "max", // Deprecated as of v1.5.5 + ]) + .value_name("COMMITMENT_LEVEL") + .hide_possible_values(true) + .global(true) + .help("Return information at the selected commitment level [possible values: processed, confirmed, finalized]"), + ) + .arg( + Arg::with_name("verbose") + .long("verbose") + .short("v") + .global(true) + .help("Show additional information"), + ) + .arg( + Arg::with_name("no_address_labels") + .long("no-address-labels") + .global(true) + .help("Do not use address labels in the output"), + ) + .arg( + Arg::with_name("output_format") + .long("output") + .value_name("FORMAT") + .global(true) + .takes_value(true) + .possible_values(&["json", "json-compact"]) + .help("Return information in specified output format"), + ) + .arg( + Arg::with_name(SKIP_SEED_PHRASE_VALIDATION_ARG.name) + .long(SKIP_SEED_PHRASE_VALIDATION_ARG.long) + .global(true) + .help(SKIP_SEED_PHRASE_VALIDATION_ARG.help), + ) + .arg( + Arg::with_name("rpc_timeout") + .long("rpc-timeout") + .value_name("SECONDS") + .takes_value(true) + .default_value(DEFAULT_RPC_TIMEOUT_SECONDS) + .global(true) + .hidden(true) + .help("Timeout value for RPC requests"), + ) + .arg( + Arg::with_name("confirm_transaction_initial_timeout") + .long("confirm-timeout") + .value_name("SECONDS") + .takes_value(true) + .default_value(DEFAULT_CONFIRM_TX_TIMEOUT_SECONDS) + .global(true) + .hidden(true) + .help("Timeout value for initial transaction status"), + ) + .cluster_query_subcommands() + .feature_subcommands() + .inflation_subcommands() + .nonce_subcommands() + .program_subcommands() + .stake_subcommands() + .validator_info_subcommands() + .vote_subcommands() + .wallet_subcommands() + .subcommand( + SubCommand::with_name("config") + .about("Solana command-line tool configuration settings") + .aliases(&["get", "set"]) + .setting(AppSettings::SubcommandRequiredElseHelp) + .subcommand( + SubCommand::with_name("get") + .about("Get current config settings") + .arg( + Arg::with_name("specific_setting") + .index(1) + .value_name("CONFIG_FIELD") + .takes_value(true) + .possible_values(&[ + "json_rpc_url", + "websocket_url", + "keypair", + "commitment", + ]) + .help("Return a specific config setting"), + ), + ) + .subcommand( + SubCommand::with_name("set") + .about("Set a config setting") + .group( + ArgGroup::with_name("config_settings") + .args(&["json_rpc_url", "websocket_url", "keypair", "commitment"]) + .multiple(true) + .required(true), + ), + ) + .subcommand( + SubCommand::with_name("import-address-labels") + .about("Import a list of address labels") + .arg( + Arg::with_name("filename") + .index(1) + .value_name("FILENAME") + .takes_value(true) + .help("YAML file of address labels"), + ), + ) + .subcommand( + SubCommand::with_name("export-address-labels") + .about("Export the current address labels") + .arg( + Arg::with_name("filename") + .index(1) + .value_name("FILENAME") + .takes_value(true) + .help("YAML file to receive the current address labels"), + ), + ), + ) + .subcommand( + SubCommand::with_name("completion") + .about("Generate completion scripts for various shells") + .arg( + Arg::with_name("shell") + .long("shell") + .short("s") + .takes_value(true) + .possible_values(&["bash", "fish", "zsh", "powershell", "elvish"]) + .default_value("bash") + ) + ) +} diff --git a/cli/src/cli.rs b/cli/src/cli.rs index e1092f7df52ea9..8a0b58928cc209 100644 --- a/cli/src/cli.rs +++ b/cli/src/cli.rs @@ -1,38 +1,21 @@ use crate::{ - cluster_query::*, feature::*, inflation::*, memo::*, nonce::*, program::*, spend_utils::*, - stake::*, validator_info::*, vote::*, + clap_app::*, cluster_query::*, feature::*, inflation::*, nonce::*, program::*, spend_utils::*, + stake::*, validator_info::*, vote::*, wallet::*, }; -use clap::{value_t_or_exit, App, AppSettings, Arg, ArgMatches, SubCommand}; +use clap::{crate_description, crate_name, value_t_or_exit, ArgMatches, Shell}; use log::*; use num_traits::FromPrimitive; use serde_json::{self, Value}; -use solana_account_decoder::{UiAccount, UiAccountEncoding}; -use solana_clap_utils::{ - self, - fee_payer::{fee_payer_arg, FEE_PAYER_ARG}, - input_parsers::*, - input_validators::*, - keypair::*, - memo::{memo_arg, MEMO_ARG}, - nonce::*, - offline::*, -}; +use solana_clap_utils::{self, input_parsers::*, input_validators::*, keypair::*}; use solana_cli_output::{ - display::{build_balance_message, println_name_value}, - return_signers_with_config, CliAccount, CliSignature, CliSignatureVerificationStatus, - CliTransaction, CliTransactionConfirmation, CliValidatorsSortOrder, OutputFormat, - ReturnSignersConfig, + display::println_name_value, CliSignature, CliValidatorsSortOrder, OutputFormat, }; use solana_client::{ blockhash_query::BlockhashQuery, - client_error::{ClientError, ClientErrorKind, Result as ClientResult}, + client_error::{ClientError, Result as ClientResult}, nonce_utils, rpc_client::RpcClient, - rpc_config::{ - RpcLargestAccountsFilter, RpcSendTransactionConfig, RpcTransactionConfig, - RpcTransactionLogsFilter, - }, - rpc_response::RpcKeyedAccount, + rpc_config::{RpcLargestAccountsFilter, RpcSendTransactionConfig, RpcTransactionLogsFilter}, }; use solana_remote_wallet::remote_wallet::RemoteWalletManager; use solana_sdk::{ @@ -41,26 +24,18 @@ use solana_sdk::{ decode_error::DecodeError, hash::Hash, instruction::InstructionError, - message::Message, pubkey::Pubkey, signature::{Signature, Signer, SignerError}, - system_instruction::{self, SystemError}, - system_program, + stake::{instruction::LockupArgs, state::Lockup}, transaction::{Transaction, TransactionError}, }; -use solana_stake_program::{ - stake_instruction::LockupArgs, - stake_state::{Lockup, StakeAuthorize}, -}; -use solana_transaction_status::{EncodedTransaction, UiTransactionEncoding}; use solana_vote_program::vote_state::VoteAuthorize; -use std::{ - collections::HashMap, error, fmt::Write as FmtWrite, fs::File, io::Write, str::FromStr, - sync::Arc, time::Duration, -}; +use std::{collections::HashMap, error, io::stdout, str::FromStr, sync::Arc, time::Duration}; use thiserror::Error; pub const DEFAULT_RPC_TIMEOUT_SECONDS: &str = "30"; +pub const DEFAULT_CONFIRM_TX_TIMEOUT_SECONDS: &str = "5"; +const CHECKED: bool = true; #[derive(Debug, PartialEq)] #[allow(clippy::large_enum_variant)] @@ -75,11 +50,6 @@ pub enum CliCommand { }, ClusterDate, ClusterVersion, - CreateAddressWithSeed { - from_pubkey: Option, - seed: String, - program_id: Pubkey, - }, Feature(FeatureCliCommand), Inflation(InflationCliCommand), Fees { @@ -134,6 +104,8 @@ pub enum CliCommand { sort_order: CliValidatorsSortOrder, reverse_sort: bool, number_validators: bool, + keep_unstaked_delinquents: bool, + delinquent_slot_distance: Option, }, Supply { print_accounts: bool, @@ -194,6 +166,7 @@ pub enum CliCommand { seed: Option, staker: Option, withdrawer: Option, + withdrawer_signer: Option, lockup: Lockup, amount: SpendAmount, sign_only: bool, @@ -267,7 +240,7 @@ pub enum CliCommand { }, StakeAuthorize { stake_account_pubkey: Pubkey, - new_authorizations: Vec<(StakeAuthorize, Pubkey, SignerIndex)>, + new_authorizations: Vec, sign_only: bool, dump_transaction_message: bool, blockhash_query: BlockhashQuery, @@ -282,6 +255,7 @@ pub enum CliCommand { stake_account_pubkey: Pubkey, lockup: LockupArgs, custodian: SignerIndex, + new_custodian_signer: Option, sign_only: bool, dump_transaction_message: bool, blockhash_query: BlockhashQuery, @@ -339,6 +313,8 @@ pub enum CliCommand { new_authorized_pubkey: Pubkey, vote_authorize: VoteAuthorize, memo: Option, + authorized: SignerIndex, + new_authorized: Option, }, VoteUpdateValidator { vote_account_pubkey: Pubkey, @@ -363,6 +339,11 @@ pub enum CliCommand { use_lamports_unit: bool, }, Confirm(Signature), + CreateAddressWithSeed { + from_pubkey: Option, + seed: String, + program_id: Pubkey, + }, DecodeTransaction(Transaction), ResolveSigner(Option), ShowAccount { @@ -451,6 +432,7 @@ pub struct CliConfig<'a> { pub output_format: OutputFormat, pub commitment: CommitmentConfig, pub send_transaction_config: RpcSendTransactionConfig, + pub confirm_transaction_initial_timeout: Duration, pub address_labels: HashMap, } @@ -595,6 +577,9 @@ impl Default for CliConfig<'_> { output_format: OutputFormat::Display, commitment: CommitmentConfig::confirmed(), send_transaction_config: RpcSendTransactionConfig::default(), + confirm_transaction_initial_timeout: Duration::from_secs( + u64::from_str(DEFAULT_CONFIRM_TX_TIMEOUT_SECONDS).unwrap(), + ), address_labels: HashMap::new(), } } @@ -606,7 +591,31 @@ pub fn parse_command( wallet_manager: &mut Option>, ) -> Result> { let response = match matches.subcommand() { + // Autocompletion Command + ("completion", Some(matches)) => { + let shell_choice = match matches.value_of("shell") { + Some("bash") => Shell::Bash, + Some("fish") => Shell::Fish, + Some("zsh") => Shell::Zsh, + Some("powershell") => Shell::PowerShell, + Some("elvish") => Shell::Elvish, + // This is safe, since we assign default_value and possible_values + // are restricted + _ => unreachable!(), + }; + get_clap_app( + crate_name!(), + crate_description!(), + solana_version::version!(), + ) + .gen_completions_to("solana", shell_choice, &mut stdout()); + std::process::exit(0); + } // Cluster Query Commands + ("block", Some(matches)) => parse_get_block(matches), + ("block-height", Some(matches)) => parse_get_block_height(matches), + ("block-production", Some(matches)) => parse_show_block_production(matches), + ("block-time", Some(matches)) => parse_get_block_time(matches), ("catchup", Some(matches)) => parse_catchup(matches, wallet_manager), ("cluster-date", Some(_matches)) => Ok(CliCommandInfo { command: CliCommand::ClusterDate, @@ -616,9 +625,8 @@ pub fn parse_command( command: CliCommand::ClusterVersion, signers: vec![], }), - ("create-address-with-seed", Some(matches)) => { - parse_create_address_with_seed(matches, default_signer, wallet_manager) - } + ("epoch", Some(matches)) => parse_get_epoch(matches), + ("epoch-info", Some(matches)) => parse_get_epoch_info(matches), ("feature", Some(matches)) => { parse_feature_subcommand(matches, default_signer, wallet_manager) } @@ -633,40 +641,47 @@ pub fn parse_command( command: CliCommand::FirstAvailableBlock, signers: vec![], }), - ("block", Some(matches)) => parse_get_block(matches), - ("block-time", Some(matches)) => parse_get_block_time(matches), - ("epoch-info", Some(matches)) => parse_get_epoch_info(matches), ("genesis-hash", Some(_matches)) => Ok(CliCommandInfo { command: CliCommand::GetGenesisHash, signers: vec![], }), - ("epoch", Some(matches)) => parse_get_epoch(matches), - ("slot", Some(matches)) => parse_get_slot(matches), - ("block-height", Some(matches)) => parse_get_block_height(matches), + ("gossip", Some(_matches)) => Ok(CliCommandInfo { + command: CliCommand::ShowGossip, + signers: vec![], + }), ("inflation", Some(matches)) => { parse_inflation_subcommand(matches, default_signer, wallet_manager) } ("largest-accounts", Some(matches)) => parse_largest_accounts(matches), - ("supply", Some(matches)) => parse_supply(matches), - ("total-supply", Some(matches)) => parse_total_supply(matches), - ("transaction-count", Some(matches)) => parse_get_transaction_count(matches), ("leader-schedule", Some(matches)) => parse_leader_schedule(matches), - ("ping", Some(matches)) => parse_cluster_ping(matches, default_signer, wallet_manager), ("live-slots", Some(_matches)) => Ok(CliCommandInfo { command: CliCommand::LiveSlots, signers: vec![], }), ("logs", Some(matches)) => parse_logs(matches, wallet_manager), - ("block-production", Some(matches)) => parse_show_block_production(matches), - ("gossip", Some(_matches)) => Ok(CliCommandInfo { - command: CliCommand::ShowGossip, - signers: vec![], - }), + ("ping", Some(matches)) => parse_cluster_ping(matches, default_signer, wallet_manager), + ("rent", Some(matches)) => { + let data_length = value_of::(matches, "data_length") + .unwrap() + .length(); + let use_lamports_unit = matches.is_present("lamports"); + Ok(CliCommandInfo { + command: CliCommand::Rent { + data_length, + use_lamports_unit, + }, + signers: vec![], + }) + } + ("slot", Some(matches)) => parse_get_slot(matches), ("stakes", Some(matches)) => parse_show_stakes(matches, wallet_manager), - ("validators", Some(matches)) => parse_show_validators(matches), + ("supply", Some(matches)) => parse_supply(matches), + ("total-supply", Some(matches)) => parse_total_supply(matches), + ("transaction-count", Some(matches)) => parse_get_transaction_count(matches), ("transaction-history", Some(matches)) => { parse_transaction_history(matches, wallet_manager) } + ("validators", Some(matches)) => parse_show_validators(matches), // Nonce Commands ("authorize-nonce-account", Some(matches)) => { parse_authorize_nonce_account(matches, default_signer, wallet_manager) @@ -711,7 +726,10 @@ pub fn parse_command( } // Stake Commands ("create-stake-account", Some(matches)) => { - parse_create_stake_account(matches, default_signer, wallet_manager) + parse_create_stake_account(matches, default_signer, wallet_manager, !CHECKED) + } + ("create-stake-account-checked", Some(matches)) => { + parse_create_stake_account(matches, default_signer, wallet_manager, CHECKED) } ("delegate-stake", Some(matches)) => { parse_stake_delegate_stake(matches, default_signer, wallet_manager) @@ -729,10 +747,16 @@ pub fn parse_command( parse_merge_stake(matches, default_signer, wallet_manager) } ("stake-authorize", Some(matches)) => { - parse_stake_authorize(matches, default_signer, wallet_manager) + parse_stake_authorize(matches, default_signer, wallet_manager, !CHECKED) + } + ("stake-authorize-checked", Some(matches)) => { + parse_stake_authorize(matches, default_signer, wallet_manager, CHECKED) } ("stake-set-lockup", Some(matches)) => { - parse_stake_set_lockup(matches, default_signer, wallet_manager) + parse_stake_set_lockup(matches, default_signer, wallet_manager, !CHECKED) + } + ("stake-set-lockup-checked", Some(matches)) => { + parse_stake_set_lockup(matches, default_signer, wallet_manager, CHECKED) } ("stake-account", Some(matches)) => parse_show_stake_account(matches, wallet_manager), ("stake-history", Some(matches)) => parse_show_stake_history(matches), @@ -759,50 +783,41 @@ pub fn parse_command( default_signer, wallet_manager, VoteAuthorize::Voter, + !CHECKED, ), ("vote-authorize-withdrawer", Some(matches)) => parse_vote_authorize( matches, default_signer, wallet_manager, VoteAuthorize::Withdrawer, + !CHECKED, + ), + ("vote-authorize-voter-checked", Some(matches)) => parse_vote_authorize( + matches, + default_signer, + wallet_manager, + VoteAuthorize::Voter, + CHECKED, + ), + ("vote-authorize-withdrawer-checked", Some(matches)) => parse_vote_authorize( + matches, + default_signer, + wallet_manager, + VoteAuthorize::Withdrawer, + CHECKED, ), ("vote-account", Some(matches)) => parse_vote_get_account_command(matches, wallet_manager), ("withdraw-from-vote-account", Some(matches)) => { parse_withdraw_from_vote_account(matches, default_signer, wallet_manager) } // Wallet Commands + ("account", Some(matches)) => parse_account(matches, wallet_manager), ("address", Some(matches)) => Ok(CliCommandInfo { command: CliCommand::Address, signers: vec![default_signer.signer_from_path(matches, wallet_manager)?], }), - ("airdrop", Some(matches)) => { - let pubkey = pubkey_of_signer(matches, "to", wallet_manager)?; - let signers = if pubkey.is_some() { - vec![] - } else { - vec![default_signer.signer_from_path(matches, wallet_manager)?] - }; - let lamports = lamports_of_sol(matches, "amount").unwrap(); - Ok(CliCommandInfo { - command: CliCommand::Airdrop { pubkey, lamports }, - signers, - }) - } - ("balance", Some(matches)) => { - let pubkey = pubkey_of_signer(matches, "pubkey", wallet_manager)?; - let signers = if pubkey.is_some() { - vec![] - } else { - vec![default_signer.signer_from_path(matches, wallet_manager)?] - }; - Ok(CliCommandInfo { - command: CliCommand::Balance { - pubkey, - use_lamports_unit: matches.is_present("lamports"), - }, - signers, - }) - } + ("airdrop", Some(matches)) => parse_airdrop(matches, default_signer, wallet_manager), + ("balance", Some(matches)) => parse_balance(matches, default_signer, wallet_manager), ("confirm", Some(matches)) => match matches.value_of("signature").unwrap().parse() { Ok(signature) => Ok(CliCommandInfo { command: CliCommand::Confirm(signature), @@ -810,40 +825,10 @@ pub fn parse_command( }), _ => Err(CliError::BadParameter("Invalid signature".to_string())), }, - ("decode-transaction", Some(matches)) => { - let blob = value_t_or_exit!(matches, "transaction", String); - let encoding = match matches.value_of("encoding").unwrap() { - "base58" => UiTransactionEncoding::Base58, - "base64" => UiTransactionEncoding::Base64, - _ => unreachable!(), - }; - - let encoded_transaction = EncodedTransaction::Binary(blob, encoding); - if let Some(transaction) = encoded_transaction.decode() { - Ok(CliCommandInfo { - command: CliCommand::DecodeTransaction(transaction), - signers: vec![], - }) - } else { - Err(CliError::BadParameter( - "Unable to decode transaction".to_string(), - )) - } - } - ("account", Some(matches)) => { - let account_pubkey = - pubkey_of_signer(matches, "account_pubkey", wallet_manager)?.unwrap(); - let output_file = matches.value_of("output_file"); - let use_lamports_unit = matches.is_present("lamports"); - Ok(CliCommandInfo { - command: CliCommand::ShowAccount { - pubkey: account_pubkey, - output_file: output_file.map(ToString::to_string), - use_lamports_unit, - }, - signers: vec![], - }) + ("create-address-with-seed", Some(matches)) => { + parse_create_address_with_seed(matches, default_signer, wallet_manager) } + ("decode-transaction", Some(matches)) => parse_decode_transaction(matches), ("resolve-signer", Some(matches)) => { let signer_path = resolve_signer(matches, "signer", wallet_manager)?; Ok(CliCommandInfo { @@ -851,69 +836,7 @@ pub fn parse_command( signers: vec![], }) } - ("transfer", Some(matches)) => { - let amount = SpendAmount::new_from_matches(matches, "amount"); - let to = pubkey_of_signer(matches, "to", wallet_manager)?.unwrap(); - let sign_only = matches.is_present(SIGN_ONLY_ARG.name); - let dump_transaction_message = matches.is_present(DUMP_TRANSACTION_MESSAGE.name); - let no_wait = matches.is_present("no_wait"); - let blockhash_query = BlockhashQuery::new_from_matches(matches); - let nonce_account = pubkey_of_signer(matches, NONCE_ARG.name, wallet_manager)?; - let (nonce_authority, nonce_authority_pubkey) = - signer_of(matches, NONCE_AUTHORITY_ARG.name, wallet_manager)?; - let memo = matches.value_of(MEMO_ARG.name).map(String::from); - let (fee_payer, fee_payer_pubkey) = - signer_of(matches, FEE_PAYER_ARG.name, wallet_manager)?; - let (from, from_pubkey) = signer_of(matches, "from", wallet_manager)?; - let allow_unfunded_recipient = matches.is_present("allow_unfunded_recipient"); - - let mut bulk_signers = vec![fee_payer, from]; - if nonce_account.is_some() { - bulk_signers.push(nonce_authority); - } - - let signer_info = - default_signer.generate_unique_signers(bulk_signers, matches, wallet_manager)?; - - let derived_address_seed = matches - .value_of("derived_address_seed") - .map(|s| s.to_string()); - let derived_address_program_id = - resolve_derived_address_program_id(matches, "derived_address_program_id"); - - Ok(CliCommandInfo { - command: CliCommand::Transfer { - amount, - to, - sign_only, - dump_transaction_message, - allow_unfunded_recipient, - no_wait, - blockhash_query, - nonce_account, - nonce_authority: signer_info.index_of(nonce_authority_pubkey).unwrap(), - memo, - fee_payer: signer_info.index_of(fee_payer_pubkey).unwrap(), - from: signer_info.index_of(from_pubkey).unwrap(), - derived_address_seed, - derived_address_program_id, - }, - signers: signer_info.signers, - }) - } - ("rent", Some(matches)) => { - let data_length = value_of::(matches, "data_length") - .unwrap() - .length(); - let use_lamports_unit = matches.is_present("lamports"); - Ok(CliCommandInfo { - command: CliCommand::Rent { - data_length, - use_lamports_unit, - }, - signers: vec![], - }) - } + ("transfer", Some(matches)) => parse_transfer(matches, default_signer, wallet_manager), // ("", None) => { eprintln!("{}", matches.usage()); @@ -928,349 +851,6 @@ pub fn parse_command( pub type ProcessResult = Result>; -fn resolve_derived_address_program_id(matches: &ArgMatches<'_>, arg_name: &str) -> Option { - matches.value_of(arg_name).and_then(|v| match v { - "NONCE" => Some(system_program::id()), - "STAKE" => Some(solana_stake_program::id()), - "VOTE" => Some(solana_vote_program::id()), - _ => pubkey_of(matches, arg_name), - }) -} - -pub fn parse_create_address_with_seed( - matches: &ArgMatches<'_>, - default_signer: &DefaultSigner, - wallet_manager: &mut Option>, -) -> Result { - let from_pubkey = pubkey_of_signer(matches, "from", wallet_manager)?; - let signers = if from_pubkey.is_some() { - vec![] - } else { - vec![default_signer.signer_from_path(matches, wallet_manager)?] - }; - - let program_id = resolve_derived_address_program_id(matches, "program_id").unwrap(); - - let seed = matches.value_of("seed").unwrap().to_string(); - - Ok(CliCommandInfo { - command: CliCommand::CreateAddressWithSeed { - from_pubkey, - seed, - program_id, - }, - signers, - }) -} - -fn process_create_address_with_seed( - config: &CliConfig, - from_pubkey: Option<&Pubkey>, - seed: &str, - program_id: &Pubkey, -) -> ProcessResult { - let from_pubkey = if let Some(pubkey) = from_pubkey { - *pubkey - } else { - config.pubkey()? - }; - let address = Pubkey::create_with_seed(&from_pubkey, seed, program_id)?; - Ok(address.to_string()) -} - -fn process_airdrop( - rpc_client: &RpcClient, - config: &CliConfig, - pubkey: &Option, - lamports: u64, -) -> ProcessResult { - let pubkey = if let Some(pubkey) = pubkey { - *pubkey - } else { - config.pubkey()? - }; - println!( - "Requesting airdrop of {}", - build_balance_message(lamports, false, true), - ); - - let pre_balance = rpc_client.get_balance(&pubkey)?; - - let result = request_and_confirm_airdrop(rpc_client, config, &pubkey, lamports); - if let Ok(signature) = result { - let signature_cli_message = log_instruction_custom_error::(result, &config)?; - println!("{}", signature_cli_message); - - let current_balance = rpc_client.get_balance(&pubkey)?; - - if current_balance < pre_balance.saturating_add(lamports) { - println!("Balance unchanged"); - println!("Run `solana confirm -v {:?}` for more info", signature); - Ok("".to_string()) - } else { - Ok(build_balance_message(current_balance, false, true)) - } - } else { - log_instruction_custom_error::(result, &config) - } -} - -fn process_balance( - rpc_client: &RpcClient, - config: &CliConfig, - pubkey: &Option, - use_lamports_unit: bool, -) -> ProcessResult { - let pubkey = if let Some(pubkey) = pubkey { - *pubkey - } else { - config.pubkey()? - }; - let balance = rpc_client.get_balance(&pubkey)?; - Ok(build_balance_message(balance, use_lamports_unit, true)) -} - -fn process_confirm( - rpc_client: &RpcClient, - config: &CliConfig, - signature: &Signature, -) -> ProcessResult { - match rpc_client.get_signature_statuses_with_history(&[*signature]) { - Ok(status) => { - let cli_transaction = if let Some(transaction_status) = &status.value[0] { - let mut transaction = None; - let mut get_transaction_error = None; - if config.verbose { - match rpc_client.get_transaction_with_config( - signature, - RpcTransactionConfig { - encoding: Some(UiTransactionEncoding::Base64), - commitment: Some(CommitmentConfig::confirmed()), - }, - ) { - Ok(confirmed_transaction) => { - let decoded_transaction = confirmed_transaction - .transaction - .transaction - .decode() - .expect("Successful decode"); - let json_transaction = EncodedTransaction::encode( - decoded_transaction.clone(), - UiTransactionEncoding::Json, - ); - - transaction = Some(CliTransaction { - transaction: json_transaction, - meta: confirmed_transaction.transaction.meta, - block_time: confirmed_transaction.block_time, - slot: Some(confirmed_transaction.slot), - decoded_transaction, - prefix: " ".to_string(), - sigverify_status: vec![], - }); - } - Err(err) => { - get_transaction_error = Some(format!("{:?}", err)); - } - } - } - CliTransactionConfirmation { - confirmation_status: Some(transaction_status.confirmation_status()), - transaction, - get_transaction_error, - err: transaction_status.err.clone(), - } - } else { - CliTransactionConfirmation { - confirmation_status: None, - transaction: None, - get_transaction_error: None, - err: None, - } - }; - Ok(config.output_format.formatted_string(&cli_transaction)) - } - Err(err) => Err(CliError::RpcRequestError(format!("Unable to confirm: {}", err)).into()), - } -} - -#[allow(clippy::unnecessary_wraps)] -fn process_decode_transaction(config: &CliConfig, transaction: &Transaction) -> ProcessResult { - let sigverify_status = CliSignatureVerificationStatus::verify_transaction(&transaction); - let decode_transaction = CliTransaction { - decoded_transaction: transaction.clone(), - transaction: EncodedTransaction::encode(transaction.clone(), UiTransactionEncoding::Json), - meta: None, - block_time: None, - slot: None, - prefix: "".to_string(), - sigverify_status, - }; - Ok(config.output_format.formatted_string(&decode_transaction)) -} - -fn process_show_account( - rpc_client: &RpcClient, - config: &CliConfig, - account_pubkey: &Pubkey, - output_file: &Option, - use_lamports_unit: bool, -) -> ProcessResult { - let account = rpc_client.get_account(account_pubkey)?; - let data = account.data.clone(); - let cli_account = CliAccount { - keyed_account: RpcKeyedAccount { - pubkey: account_pubkey.to_string(), - account: UiAccount::encode( - account_pubkey, - &account, - UiAccountEncoding::Base64, - None, - None, - ), - }, - use_lamports_unit, - }; - - let mut account_string = config.output_format.formatted_string(&cli_account); - - if config.output_format == OutputFormat::Display - || config.output_format == OutputFormat::DisplayVerbose - { - if let Some(output_file) = output_file { - let mut f = File::create(output_file)?; - f.write_all(&data)?; - writeln!(&mut account_string)?; - writeln!(&mut account_string, "Wrote account data to {}", output_file)?; - } else if !data.is_empty() { - use pretty_hex::*; - writeln!(&mut account_string, "{:?}", data.hex_dump())?; - } - } - - Ok(account_string) -} - -#[allow(clippy::too_many_arguments)] -fn process_transfer( - rpc_client: &RpcClient, - config: &CliConfig, - amount: SpendAmount, - to: &Pubkey, - from: SignerIndex, - sign_only: bool, - dump_transaction_message: bool, - allow_unfunded_recipient: bool, - no_wait: bool, - blockhash_query: &BlockhashQuery, - nonce_account: Option<&Pubkey>, - nonce_authority: SignerIndex, - memo: Option<&String>, - fee_payer: SignerIndex, - derived_address_seed: Option, - derived_address_program_id: Option<&Pubkey>, -) -> ProcessResult { - let from = config.signers[from]; - let mut from_pubkey = from.pubkey(); - - let (recent_blockhash, fee_calculator) = - blockhash_query.get_blockhash_and_fee_calculator(rpc_client, config.commitment)?; - - if !sign_only && !allow_unfunded_recipient { - let recipient_balance = rpc_client - .get_balance_with_commitment(to, config.commitment)? - .value; - if recipient_balance == 0 { - return Err(format!( - "The recipient address ({}) is not funded. \ - Add `--allow-unfunded-recipient` to complete the transfer \ - ", - to - ) - .into()); - } - } - - let nonce_authority = config.signers[nonce_authority]; - let fee_payer = config.signers[fee_payer]; - - let derived_parts = derived_address_seed.zip(derived_address_program_id); - let with_seed = if let Some((seed, program_id)) = derived_parts { - let base_pubkey = from_pubkey; - from_pubkey = Pubkey::create_with_seed(&base_pubkey, &seed, program_id)?; - Some((base_pubkey, seed, program_id, from_pubkey)) - } else { - None - }; - - let build_message = |lamports| { - let ixs = if let Some((base_pubkey, seed, program_id, from_pubkey)) = with_seed.as_ref() { - vec![system_instruction::transfer_with_seed( - from_pubkey, - base_pubkey, - seed.clone(), - program_id, - to, - lamports, - )] - .with_memo(memo) - } else { - vec![system_instruction::transfer(&from_pubkey, to, lamports)].with_memo(memo) - }; - - if let Some(nonce_account) = &nonce_account { - Message::new_with_nonce( - ixs, - Some(&fee_payer.pubkey()), - nonce_account, - &nonce_authority.pubkey(), - ) - } else { - Message::new(&ixs, Some(&fee_payer.pubkey())) - } - }; - - let (message, _) = resolve_spend_tx_and_check_account_balances( - rpc_client, - sign_only, - amount, - &fee_calculator, - &from_pubkey, - &fee_payer.pubkey(), - build_message, - config.commitment, - )?; - let mut tx = Transaction::new_unsigned(message); - - if sign_only { - tx.try_partial_sign(&config.signers, recent_blockhash)?; - return_signers_with_config( - &tx, - &config.output_format, - &ReturnSignersConfig { - dump_transaction_message, - }, - ) - } else { - if let Some(nonce_account) = &nonce_account { - let nonce_account = nonce_utils::get_account_with_commitment( - rpc_client, - nonce_account, - config.commitment, - )?; - check_nonce_account(&nonce_account, &nonce_authority.pubkey(), &recent_blockhash)?; - } - - tx.try_sign(&config.signers, recent_blockhash)?; - let result = if no_wait { - rpc_client.send_transaction(&tx) - } else { - rpc_client.send_and_confirm_transaction_with_spinner(&tx) - }; - log_instruction_custom_error::(result, &config) - } -} - pub fn process_command(config: &CliConfig) -> ProcessResult { if config.verbose && config.output_format == OutputFormat::DisplayVerbose { println_name_value("RPC URL:", &config.json_rpc_url); @@ -1286,10 +866,11 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { } let rpc_client = if config.rpc_client.is_none() { - Arc::new(RpcClient::new_with_timeout_and_commitment( + Arc::new(RpcClient::new_with_timeouts_and_commitment( config.json_rpc_url.to_string(), config.rpc_timeout, config.commitment, + config.confirm_transaction_initial_timeout, )) } else { // Primarily for testing @@ -1322,7 +903,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { from_pubkey, seed, program_id, - } => process_create_address_with_seed(config, from_pubkey.as_ref(), &seed, &program_id), + } => process_create_address_with_seed(config, from_pubkey.as_ref(), seed, program_id), CliCommand::Fees { ref blockhash } => process_fees(&rpc_client, config, blockhash.as_ref()), CliCommand::Feature(feature_subcommand) => { process_feature_subcommand(&rpc_client, config, feature_subcommand) @@ -1345,8 +926,8 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { CliCommand::LeaderSchedule { epoch } => { process_leader_schedule(&rpc_client, config, *epoch) } - CliCommand::LiveSlots => process_live_slots(&config), - CliCommand::Logs { filter } => process_logs(&config, filter), + CliCommand::LiveSlots => process_live_slots(config), + CliCommand::Logs { filter } => process_logs(config, filter), CliCommand::Ping { lamports, interval, @@ -1389,6 +970,8 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { sort_order, reverse_sort, number_validators, + keep_unstaked_delinquents, + delinquent_slot_distance, } => process_show_validators( &rpc_client, config, @@ -1396,6 +979,8 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { *sort_order, *reverse_sort, *number_validators, + *keep_unstaked_delinquents, + *delinquent_slot_distance, ), CliCommand::Supply { print_accounts } => { process_supply(&rpc_client, config, *print_accounts) @@ -1451,7 +1036,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { ), // Get the current nonce CliCommand::GetNonce(nonce_account_pubkey) => { - process_get_nonce(&rpc_client, config, &nonce_account_pubkey) + process_get_nonce(&rpc_client, config, nonce_account_pubkey) } // Get a new nonce CliCommand::NewNonce { @@ -1472,7 +1057,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { } => process_show_nonce_account( &rpc_client, config, - &nonce_account_pubkey, + nonce_account_pubkey, *use_lamports_unit, ), // Withdraw lamports from a nonce account @@ -1485,10 +1070,10 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { } => process_withdraw_from_nonce_account( &rpc_client, config, - &nonce_account, + nonce_account, *nonce_authority, memo.as_ref(), - &destination_account_pubkey, + destination_account_pubkey, *lamports, ), @@ -1520,6 +1105,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { seed, staker, withdrawer, + withdrawer_signer, lockup, amount, sign_only, @@ -1537,6 +1123,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { seed, staker, withdrawer, + *withdrawer_signer, lockup, *amount, *sign_only, @@ -1562,7 +1149,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { } => process_deactivate_stake_account( &rpc_client, config, - &stake_account_pubkey, + stake_account_pubkey, *stake_authority, *sign_only, *dump_transaction_message, @@ -1588,8 +1175,8 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { } => process_delegate_stake( &rpc_client, config, - &stake_account_pubkey, - &vote_account_pubkey, + stake_account_pubkey, + vote_account_pubkey, *stake_authority, *force, *sign_only, @@ -1616,7 +1203,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { } => process_split_stake( &rpc_client, config, - &stake_account_pubkey, + stake_account_pubkey, *stake_authority, *sign_only, *dump_transaction_message, @@ -1643,8 +1230,8 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { } => process_merge_stake( &rpc_client, config, - &stake_account_pubkey, - &source_stake_account_pubkey, + stake_account_pubkey, + source_stake_account_pubkey, *stake_authority, *sign_only, *dump_transaction_message, @@ -1661,7 +1248,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { } => process_show_stake_account( &rpc_client, config, - &stake_account_pubkey, + stake_account_pubkey, *use_lamports_unit, *with_rewards, ), @@ -1684,7 +1271,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { } => process_stake_authorize( &rpc_client, config, - &stake_account_pubkey, + stake_account_pubkey, new_authorizations, *custodian, *sign_only, @@ -1698,8 +1285,9 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { ), CliCommand::StakeSetLockup { stake_account_pubkey, - mut lockup, + lockup, custodian, + new_custodian_signer, sign_only, dump_transaction_message, blockhash_query, @@ -1710,8 +1298,9 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { } => process_stake_set_lockup( &rpc_client, config, - &stake_account_pubkey, - &mut lockup, + stake_account_pubkey, + lockup, + *new_custodian_signer, *custodian, *sign_only, *dump_transaction_message, @@ -1738,8 +1327,8 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { } => process_withdraw_stake( &rpc_client, config, - &stake_account_pubkey, - &destination_account_pubkey, + stake_account_pubkey, + destination_account_pubkey, *amount, *withdraw_authority, *custodian, @@ -1767,7 +1356,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { } => process_set_validator_info( &rpc_client, config, - &validator_info, + validator_info, *force_keybase, *info_pubkey, ), @@ -1801,7 +1390,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { } => process_show_vote_account( &rpc_client, config, - &vote_account_pubkey, + vote_account_pubkey, *use_lamports_unit, *with_rewards, ), @@ -1825,12 +1414,16 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { new_authorized_pubkey, vote_authorize, memo, + authorized, + new_authorized, } => process_vote_authorize( &rpc_client, config, - &vote_account_pubkey, - &new_authorized_pubkey, + vote_account_pubkey, + new_authorized_pubkey, *vote_authorize, + *authorized, + *new_authorized, memo.as_ref(), ), CliCommand::VoteUpdateValidator { @@ -1841,7 +1434,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { } => process_vote_update_validator( &rpc_client, config, - &vote_account_pubkey, + vote_account_pubkey, *new_identity_account, *withdraw_authority, memo.as_ref(), @@ -1854,7 +1447,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { } => process_vote_update_commission( &rpc_client, config, - &vote_account_pubkey, + vote_account_pubkey, *commission, *withdraw_authority, memo.as_ref(), @@ -1870,7 +1463,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { CliCommand::Balance { pubkey, use_lamports_unit, - } => process_balance(&rpc_client, config, &pubkey, *use_lamports_unit), + } => process_balance(&rpc_client, config, pubkey, *use_lamports_unit), // Confirm the last client transaction by signature CliCommand::Confirm(signature) => process_confirm(&rpc_client, config, signature), CliCommand::DecodeTransaction(transaction) => { @@ -1887,13 +1480,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { pubkey, output_file, use_lamports_unit, - } => process_show_account( - &rpc_client, - config, - &pubkey, - &output_file, - *use_lamports_unit, - ), + } => process_show_account(&rpc_client, config, pubkey, output_file, *use_lamports_unit), CliCommand::Transfer { amount, to, @@ -1947,21 +1534,41 @@ pub fn request_and_confirm_airdrop( Ok(signature) } +fn common_error_adapter(ix_error: &InstructionError) -> Option +where + E: 'static + std::error::Error + DecodeError + FromPrimitive, +{ + if let InstructionError::Custom(code) = ix_error { + E::decode_custom_error_to_enum(*code) + } else { + None + } +} + pub fn log_instruction_custom_error( result: ClientResult, config: &CliConfig, ) -> ProcessResult where E: 'static + std::error::Error + DecodeError + FromPrimitive, +{ + log_instruction_custom_error_ex::(result, config, common_error_adapter) +} + +pub fn log_instruction_custom_error_ex( + result: ClientResult, + config: &CliConfig, + error_adapter: F, +) -> ProcessResult +where + E: 'static + std::error::Error + DecodeError + FromPrimitive, + F: Fn(&InstructionError) -> Option, { match result { Err(err) => { - if let ClientErrorKind::TransactionError(TransactionError::InstructionError( - _, - InstructionError::Custom(code), - )) = err.kind() - { - if let Some(specific_error) = E::decode_custom_error_to_enum(*code) { + let maybe_tx_err = err.get_transaction_error(); + if let Some(TransactionError::InstructionError(_, ix_error)) = maybe_tx_err { + if let Some(specific_error) = error_adapter(&ix_error) { return Err(specific_error.into()); } } @@ -1976,289 +1583,6 @@ where } } -pub fn app<'ab, 'v>(name: &str, about: &'ab str, version: &'v str) -> App<'ab, 'v> { - App::new(name) - .about(about) - .version(version) - .setting(AppSettings::SubcommandRequiredElseHelp) - .subcommand( - SubCommand::with_name("address") - .about("Get your public key") - .arg( - Arg::with_name("confirm_key") - .long("confirm-key") - .takes_value(false) - .help("Confirm key on device; only relevant if using remote wallet"), - ), - ) - .cluster_query_subcommands() - .feature_subcommands() - .inflation_subcommands() - .nonce_subcommands() - .program_subcommands() - .stake_subcommands() - .subcommand( - SubCommand::with_name("airdrop") - .about("Request lamports") - .arg( - Arg::with_name("faucet_host") - .long("faucet-host") - .value_name("URL") - .takes_value(true) - .help("Faucet host to use [default: the --url host]"), - ) - .arg( - Arg::with_name("faucet_port") - .long("faucet-port") - .value_name("PORT_NUMBER") - .takes_value(true) - .default_value(solana_faucet::faucet::FAUCET_PORT_STR) - .help("Faucet port to use"), - ) - .arg( - Arg::with_name("amount") - .index(1) - .value_name("AMOUNT") - .takes_value(true) - .validator(is_amount) - .required(true) - .help("The airdrop amount to request, in SOL"), - ) - .arg( - pubkey!(Arg::with_name("to") - .index(2) - .value_name("RECIPIENT_ADDRESS"), - "The account address of airdrop recipient. "), - ), - ) - .subcommand( - SubCommand::with_name("balance") - .about("Get your balance") - .arg( - pubkey!(Arg::with_name("pubkey") - .index(1) - .value_name("ACCOUNT_ADDRESS"), - "The account address of the balance to check. ") - ) - .arg( - Arg::with_name("lamports") - .long("lamports") - .takes_value(false) - .help("Display balance in lamports instead of SOL"), - ), - ) - .subcommand( - SubCommand::with_name("confirm") - .about("Confirm transaction by signature") - .arg( - Arg::with_name("signature") - .index(1) - .value_name("TRANSACTION_SIGNATURE") - .takes_value(true) - .required(true) - .help("The transaction signature to confirm"), - ) - .after_help(// Formatted specifically for the manually-indented heredoc string - "Note: This will show more detailed information for finalized transactions with verbose mode (-v/--verbose).\ - \n\ - \nAccount modes:\ - \n |srwx|\ - \n s: signed\ - \n r: readable (always true)\ - \n w: writable\ - \n x: program account (inner instructions excluded)\ - " - ), - ) - .subcommand( - SubCommand::with_name("decode-transaction") - .about("Decode a serialized transaction") - .arg( - Arg::with_name("transaction") - .index(1) - .value_name("TRANSACTION") - .takes_value(true) - .required(true) - .help("transaction to decode"), - ) - .arg( - Arg::with_name("encoding") - .index(2) - .value_name("ENCODING") - .possible_values(&["base58", "base64"]) // Subset of `UiTransactionEncoding` enum - .default_value("base58") - .takes_value(true) - .required(true) - .help("transaction encoding"), - ), - ) - .subcommand( - SubCommand::with_name("create-address-with-seed") - .about("Generate a derived account address with a seed") - .arg( - Arg::with_name("seed") - .index(1) - .value_name("SEED_STRING") - .takes_value(true) - .required(true) - .validator(is_derived_address_seed) - .help("The seed. Must not take more than 32 bytes to encode as utf-8"), - ) - .arg( - Arg::with_name("program_id") - .index(2) - .value_name("PROGRAM_ID") - .takes_value(true) - .required(true) - .help( - "The program_id that the address will ultimately be used for, \n\ - or one of NONCE, STAKE, and VOTE keywords", - ), - ) - .arg( - pubkey!(Arg::with_name("from") - .long("from") - .value_name("FROM_PUBKEY") - .required(false), - "From (base) key, [default: cli config keypair]. "), - ), - ) - .subcommand( - SubCommand::with_name("deploy") - .about("Deploy a program") - .arg( - Arg::with_name("program_location") - .index(1) - .value_name("PROGRAM_FILEPATH") - .takes_value(true) - .required(true) - .help("/path/to/program.o"), - ) - .arg( - Arg::with_name("address_signer") - .index(2) - .value_name("PROGRAM_ADDRESS_SIGNER") - .takes_value(true) - .validator(is_valid_signer) - .help("The signer for the desired address of the program [default: new random address]") - ) - .arg( - Arg::with_name("use_deprecated_loader") - .long("use-deprecated-loader") - .takes_value(false) - .hidden(true) // Don't document this argument to discourage its use - .help("Use the deprecated BPF loader") - ) - .arg( - Arg::with_name("allow_excessive_balance") - .long("allow-excessive-deploy-account-balance") - .takes_value(false) - .help("Use the designated program id, even if the account already holds a large balance of SOL") - ), - ) - .subcommand( - SubCommand::with_name("resolve-signer") - .about("Checks that a signer is valid, and returns its specific path; useful for signers that may be specified generally, eg. usb://ledger") - .arg( - Arg::with_name("signer") - .index(1) - .value_name("SIGNER_KEYPAIR") - .takes_value(true) - .required(true) - .validator(is_valid_signer) - .help("The signer path to resolve") - ) - ) - .subcommand( - SubCommand::with_name("transfer") - .about("Transfer funds between system accounts") - .alias("pay") - .arg( - pubkey!(Arg::with_name("to") - .index(1) - .value_name("RECIPIENT_ADDRESS") - .required(true), - "The account address of recipient. "), - ) - .arg( - Arg::with_name("amount") - .index(2) - .value_name("AMOUNT") - .takes_value(true) - .validator(is_amount_or_all) - .required(true) - .help("The amount to send, in SOL; accepts keyword ALL"), - ) - .arg( - pubkey!(Arg::with_name("from") - .long("from") - .value_name("FROM_ADDRESS"), - "Source account of funds (if different from client local account). "), - ) - .arg( - Arg::with_name("no_wait") - .long("no-wait") - .takes_value(false) - .help("Return signature immediately after submitting the transaction, instead of waiting for confirmations"), - ) - .arg( - Arg::with_name("derived_address_seed") - .long("derived-address-seed") - .takes_value(true) - .value_name("SEED_STRING") - .requires("derived_address_program_id") - .validator(is_derived_address_seed) - .hidden(true) - ) - .arg( - Arg::with_name("derived_address_program_id") - .long("derived-address-program-id") - .takes_value(true) - .value_name("PROGRAM_ID") - .requires("derived_address_seed") - .hidden(true) - ) - .arg( - Arg::with_name("allow_unfunded_recipient") - .long("allow-unfunded-recipient") - .takes_value(false) - .help("Complete the transfer even if the recipient address is not funded") - ) - .offline_args() - .nonce_args(false) - .arg(memo_arg()) - .arg(fee_payer_arg()), - ) - .subcommand( - SubCommand::with_name("account") - .about("Show the contents of an account") - .alias("account") - .arg( - pubkey!(Arg::with_name("account_pubkey") - .index(1) - .value_name("ACCOUNT_ADDRESS") - .required(true), - "Account key URI. ") - ) - .arg( - Arg::with_name("output_file") - .long("output-file") - .short("o") - .value_name("FILEPATH") - .takes_value(true) - .help("Write the account data to this file"), - ) - .arg( - Arg::with_name("lamports") - .long("lamports") - .takes_value(false) - .help("Display balance in lamports instead of SOL"), - ), - ) - .validator_info_subcommands() - .vote_subcommands() -} - #[cfg(test)] mod tests { use super::*; @@ -2272,6 +1596,7 @@ mod tests { use solana_sdk::{ pubkey::Pubkey, signature::{keypair_from_seed, read_keypair_file, write_keypair_file, Keypair, Presigner}, + stake, system_program, transaction::TransactionError, }; use solana_transaction_status::TransactionConfirmationStatus; @@ -2299,7 +1624,7 @@ mod tests { let default_keypair_file = make_tmp_path("keypair_file"); write_keypair_file(&default_keypair, &default_keypair_file).unwrap(); - let default_signer = DefaultSigner::new(default_keypair_file); + let default_signer = DefaultSigner::new("keypair", &default_keypair_file); let signer_info = default_signer .generate_unique_signers(vec![], &matches, &mut None) @@ -2368,7 +1693,7 @@ mod tests { #[test] #[allow(clippy::cognitive_complexity)] fn test_cli_parse_command() { - let test_commands = app("test", "desc", "version"); + let test_commands = get_clap_app("test", "desc", "version"); let pubkey = solana_sdk::pubkey::new_rand(); let pubkey_string = format!("{}", pubkey); @@ -2377,7 +1702,7 @@ mod tests { let keypair_file = make_tmp_path("keypair_file"); write_keypair_file(&default_keypair, &keypair_file).unwrap(); let keypair = read_keypair_file(&keypair_file).unwrap(); - let default_signer = DefaultSigner::new(keypair_file.clone()); + let default_signer = DefaultSigner::new("", &keypair_file); // Test Airdrop Subcommand let test_airdrop = test_commands @@ -2464,7 +1789,7 @@ mod tests { let from_pubkey = Some(solana_sdk::pubkey::new_rand()); let from_str = from_pubkey.unwrap().to_string(); for (name, program_id) in &[ - ("STAKE", solana_stake_program::id()), + ("STAKE", stake::program::id()), ("VOTE", solana_vote_program::id()), ("NONCE", system_program::id()), ] { @@ -2500,7 +1825,7 @@ mod tests { command: CliCommand::CreateAddressWithSeed { from_pubkey: None, seed: "seed".to_string(), - program_id: solana_stake_program::id(), + program_id: stake::program::id(), }, signers: vec![read_keypair_file(&keypair_file).unwrap().into()], } @@ -2633,6 +1958,8 @@ mod tests { new_authorized_pubkey, vote_authorize: VoteAuthorize::Voter, memo: None, + authorized: 0, + new_authorized: None, }; let result = process_command(&config); assert!(result.is_ok()); @@ -2656,6 +1983,7 @@ mod tests { seed: None, staker: None, withdrawer: None, + withdrawer_signer: None, lockup: Lockup { epoch: 0, unix_timestamp: 0, @@ -2763,11 +2091,11 @@ mod tests { config.command = CliCommand::CreateAddressWithSeed { from_pubkey: Some(from_pubkey), seed: "seed".to_string(), - program_id: solana_stake_program::id(), + program_id: stake::program::id(), }; let address = process_command(&config); let expected_address = - Pubkey::create_with_seed(&from_pubkey, "seed", &solana_stake_program::id()).unwrap(); + Pubkey::create_with_seed(&from_pubkey, "seed", &stake::program::id()).unwrap(); assert_eq!(address.unwrap(), expected_address.to_string()); // Need airdrop cases @@ -2828,6 +2156,8 @@ mod tests { new_authorized_pubkey: bob_pubkey, vote_authorize: VoteAuthorize::Voter, memo: None, + authorized: 0, + new_authorized: None, }; assert!(process_command(&config).is_err()); @@ -2900,12 +2230,12 @@ mod tests { #[test] fn test_parse_transfer_subcommand() { - let test_commands = app("test", "desc", "version"); + let test_commands = get_clap_app("test", "desc", "version"); let default_keypair = Keypair::new(); let default_keypair_file = make_tmp_path("keypair_file"); write_keypair_file(&default_keypair, &default_keypair_file).unwrap(); - let default_signer = DefaultSigner::new(default_keypair_file.clone()); + let default_signer = DefaultSigner::new("", &default_keypair_file); //Test Transfer Subcommand, SOL let from_keypair = keypair_from_seed(&[0u8; 32]).unwrap(); @@ -3154,7 +2484,7 @@ mod tests { memo: None, fee_payer: 0, derived_address_seed: Some(derived_address_seed), - derived_address_program_id: Some(solana_stake_program::id()), + derived_address_program_id: Some(stake::program::id()), }, signers: vec![read_keypair_file(&default_keypair_file).unwrap().into(),], } diff --git a/cli/src/cluster_query.rs b/cli/src/cluster_query.rs index b4874b1b21dc71..c2d5e9a221fa7b 100644 --- a/cli/src/cluster_query.rs +++ b/cli/src/cluster_query.rs @@ -1,7 +1,6 @@ use crate::{ cli::{CliCommand, CliCommandInfo, CliConfig, CliError, ProcessResult}, spend_utils::{resolve_spend_tx_and_check_account_balance, SpendAmount}, - stake::is_stake_program_v2_enabled, }; use clap::{value_t, value_t_or_exit, App, AppSettings, Arg, ArgMatches, SubCommand}; use console::{style, Emoji}; @@ -24,11 +23,12 @@ use solana_client::{ pubsub_client::PubsubClient, rpc_client::{GetConfirmedSignaturesForAddress2Config, RpcClient}, rpc_config::{ - RpcAccountInfoConfig, RpcBlockConfig, RpcLargestAccountsConfig, RpcLargestAccountsFilter, - RpcProgramAccountsConfig, RpcTransactionConfig, RpcTransactionLogsConfig, - RpcTransactionLogsFilter, + RpcAccountInfoConfig, RpcBlockConfig, RpcGetVoteAccountsConfig, RpcLargestAccountsConfig, + RpcLargestAccountsFilter, RpcProgramAccountsConfig, RpcTransactionConfig, + RpcTransactionLogsConfig, RpcTransactionLogsFilter, }, rpc_filter, + rpc_request::DELINQUENT_VALIDATOR_SLOT_DISTANCE, rpc_response::SlotInfo, }; use solana_remote_wallet::remote_wallet::RemoteWalletManager; @@ -46,7 +46,9 @@ use solana_sdk::{ rent::Rent, rpc_port::DEFAULT_RPC_PORT_STR, signature::Signature, - slot_history, system_instruction, system_program, + slot_history, + stake::{self, state::StakeState}, + system_instruction, system_program, sysvar::{ self, slot_history::SlotHistory, @@ -55,7 +57,6 @@ use solana_sdk::{ timing, transaction::Transaction, }; -use solana_stake_program::stake_state::StakeState; use solana_transaction_status::UiTransactionEncoding; use solana_vote_program::vote_state::VoteState; use std::{ @@ -121,7 +122,7 @@ impl ClusterQuerySubCommands for App<'_, '_> { .long("our-localhost") .takes_value(false) .value_name("PORT") - .default_value(&DEFAULT_RPC_PORT_STR) + .default_value(DEFAULT_RPC_PORT_STR) .validator(is_port) .help("Guess Identity pubkey and validator rpc node assuming local (possibly private) validator"), ) @@ -175,7 +176,7 @@ impl ClusterQuerySubCommands for App<'_, '_> { .takes_value(true) .value_name("EPOCH") .validator(is_epoch) - .help("Epoch to show leader schedule for. (default: current)") + .help("Epoch to show leader schedule for. [default: current]") ) ) .subcommand( @@ -381,6 +382,25 @@ impl ClusterQuerySubCommands for App<'_, '_> { ]) .default_value("stake") .help("Sort order (does not affect JSON output)"), + ) + .arg( + Arg::with_name("keep_unstaked_delinquents") + .long("keep-unstaked-delinquents") + .takes_value(false) + .help("Don't discard unstaked, delinquent validators") + ) + .arg( + Arg::with_name("delinquent_slot_distance") + .long("delinquent-slot-distance") + .takes_value(true) + .value_name("SLOT_DISTANCE") + .validator(is_slot) + .help( + concatcp!( + "Minimum slot distance from the tip to consider a validator delinquent. [default: ", + DELINQUENT_VALIDATOR_SLOT_DISTANCE, + "]", + )) ), ) .subcommand( @@ -616,6 +636,8 @@ pub fn parse_show_validators(matches: &ArgMatches<'_>) -> Result CliValidatorsSortOrder::Delinquent, @@ -636,6 +658,8 @@ pub fn parse_show_validators(matches: &ArgMatches<'_>) -> Result Pro (secs as u64).saturating_mul(1000).checked_div(slots) }) .unwrap_or(clock::DEFAULT_MS_PER_SLOT); + let start_block_time = rpc_client + .get_block_time(epoch_info.absolute_slot - epoch_info.slot_index) + .ok(); + let current_block_time = rpc_client.get_block_time(epoch_info.absolute_slot).ok(); let epoch_info = CliEpochInfo { epoch_info, average_slot_time_ms, + start_block_time, + current_block_time, }; Ok(config.output_format.formatted_string(&epoch_info)) } @@ -1707,7 +1738,7 @@ pub fn process_show_stakes( } } let all_stake_accounts = rpc_client - .get_program_accounts_with_config(&solana_stake_program::id(), program_accounts_config)?; + .get_program_accounts_with_config(&stake::program::id(), program_accounts_config)?; let stake_history_account = rpc_client.get_account(&stake_history::id())?; let clock_account = rpc_client.get_account(&sysvar::clock::id())?; let clock: Clock = from_account(&clock_account).ok_or_else(|| { @@ -1718,8 +1749,6 @@ pub fn process_show_stakes( let stake_history = from_account(&stake_history_account).ok_or_else(|| { CliError::RpcRequestError("Failed to deserialize stake history".to_string()) })?; - // At v1.6, this check can be removed and simply passed as `true` - let stake_program_v2_enabled = is_stake_program_v2_enabled(rpc_client)?; let mut stake_accounts: Vec = vec![]; for (stake_pubkey, stake_account) in all_stake_accounts { @@ -1735,7 +1764,6 @@ pub fn process_show_stakes( use_lamports_unit, &stake_history, &clock, - stake_program_v2_enabled, ), }); } @@ -1754,7 +1782,6 @@ pub fn process_show_stakes( use_lamports_unit, &stake_history, &clock, - stake_program_v2_enabled, ), }); } @@ -1785,11 +1812,17 @@ pub fn process_show_validators( validators_sort_order: CliValidatorsSortOrder, validators_reverse_sort: bool, number_validators: bool, + keep_unstaked_delinquents: bool, + delinquent_slot_distance: Option, ) -> ProcessResult { let progress_bar = new_spinner_progress_bar(); progress_bar.set_message("Fetching vote accounts..."); let epoch_info = rpc_client.get_epoch_info()?; - let vote_accounts = rpc_client.get_vote_accounts()?; + let vote_accounts = rpc_client.get_vote_accounts_with_config(RpcGetVoteAccountsConfig { + keep_unstaked_delinquents: Some(keep_unstaked_delinquents), + delinquent_slot_distance, + ..RpcGetVoteAccountsConfig::default() + })?; progress_bar.set_message("Fetching block production..."); let skip_rate: HashMap<_, _> = rpc_client @@ -1888,14 +1921,40 @@ pub fn process_show_validators( entry.delinquent_active_stake += validator.activated_stake; } + let validators: Vec<_> = current_validators + .into_iter() + .chain(delinquent_validators.into_iter()) + .collect(); + + let (average_skip_rate, average_stake_weighted_skip_rate) = { + let mut skip_rate_len = 0; + let mut skip_rate_sum = 0.; + let mut skip_rate_weighted_sum = 0.; + for validator in validators.iter() { + if let Some(skip_rate) = validator.skip_rate { + skip_rate_sum += skip_rate; + skip_rate_len += 1; + skip_rate_weighted_sum += skip_rate * validator.activated_stake as f64; + } + } + + if skip_rate_len > 0 && total_active_stake > 0 { + ( + skip_rate_sum / skip_rate_len as f64, + skip_rate_weighted_sum / total_active_stake as f64, + ) + } else { + (100., 100.) // Impossible? + } + }; + let cli_validators = CliValidators { total_active_stake, total_current_stake, total_delinquent_stake, - validators: current_validators - .into_iter() - .chain(delinquent_validators.into_iter()) - .collect(), + validators, + average_skip_rate, + average_stake_weighted_skip_rate, validators_sort_order, validators_reverse_sort, number_validators, @@ -2082,7 +2141,7 @@ pub fn process_calculate_rent( #[cfg(test)] mod tests { use super::*; - use crate::cli::{app, parse_command}; + use crate::{clap_app::get_clap_app, cli::parse_command}; use solana_sdk::signature::{write_keypair, Keypair}; use std::str::FromStr; use tempfile::NamedTempFile; @@ -2094,11 +2153,11 @@ mod tests { #[test] fn test_parse_command() { - let test_commands = app("test", "desc", "version"); + let test_commands = get_clap_app("test", "desc", "version"); let default_keypair = Keypair::new(); let (default_keypair_file, mut tmp_file) = make_tmp_file(); write_keypair(&default_keypair, tmp_file.as_file_mut()).unwrap(); - let default_signer = DefaultSigner::new(default_keypair_file); + let default_signer = DefaultSigner::new("", &default_keypair_file); let test_cluster_version = test_commands .clone() diff --git a/cli/src/feature.rs b/cli/src/feature.rs index 86d6a24dc8ab58..5e64b338dc4ebb 100644 --- a/cli/src/feature.rs +++ b/cli/src/feature.rs @@ -10,6 +10,7 @@ use solana_cli_output::{QuietDisplay, VerboseDisplay}; use solana_client::{client_error::ClientError, rpc_client::RpcClient}; use solana_remote_wallet::remote_wallet::RemoteWalletManager; use solana_sdk::{ + account::Account, clock::Slot, feature::{self, Feature}, feature_set::FEATURE_NAMES, @@ -312,6 +313,31 @@ fn feature_activation_allowed(rpc_client: &RpcClient, quiet: bool) -> Result Option { + feature::from_account(&account).map(|feature| match feature.activated_at { + None => CliFeatureStatus::Pending, + Some(activation_slot) => CliFeatureStatus::Active(activation_slot), + }) +} + +fn get_feature_status( + rpc_client: &RpcClient, + feature_id: &Pubkey, +) -> Result, Box> { + rpc_client + .get_account(feature_id) + .map(status_from_account) + .map_err(|e| e.into()) +} + +pub fn get_feature_is_active( + rpc_client: &RpcClient, + feature_id: &Pubkey, +) -> Result> { + get_feature_status(rpc_client, feature_id) + .map(|status| matches!(status, Some(CliFeatureStatus::Active(_)))) +} + fn process_status( rpc_client: &RpcClient, config: &CliConfig, @@ -327,11 +353,7 @@ fn process_status( let feature_id = &feature_ids[i]; let feature_name = FEATURE_NAMES.get(feature_id).unwrap(); if let Some(account) = account { - if let Some(feature) = feature::from_account(&account) { - let feature_status = match feature.activated_at { - None => CliFeatureStatus::Pending, - Some(activation_slot) => CliFeatureStatus::Active(activation_slot), - }; + if let Some(feature_status) = status_from_account(account) { features.push(CliFeature { id: feature_id.to_string(), description: feature_name.to_string(), diff --git a/cli/src/inflation.rs b/cli/src/inflation.rs index 11d3fbfb5248ee..8ec8233db04291 100644 --- a/cli/src/inflation.rs +++ b/cli/src/inflation.rs @@ -102,7 +102,7 @@ fn process_rewards( rewards_epoch: Option, ) -> ProcessResult { let rewards = rpc_client - .get_inflation_reward(&addresses, rewards_epoch) + .get_inflation_reward(addresses, rewards_epoch) .map_err(|err| { if let Some(epoch) = rewards_epoch { format!("Rewards not available for epoch {}", epoch) diff --git a/cli/src/lib.rs b/cli/src/lib.rs index 21596aa94cd2bd..9fd8c7b98e2451 100644 --- a/cli/src/lib.rs +++ b/cli/src/lib.rs @@ -18,9 +18,13 @@ macro_rules! pubkey { }; } +#[macro_use] +extern crate const_format; + extern crate serde_derive; pub mod checks; +pub mod clap_app; pub mod cli; pub mod cluster_query; pub mod feature; @@ -33,3 +37,4 @@ pub mod stake; pub mod test_utils; pub mod validator_info; pub mod vote; +pub mod wallet; diff --git a/cli/src/main.rs b/cli/src/main.rs index d4b4056e2102c5..4b1d03ce7a6bc1 100644 --- a/cli/src/main.rs +++ b/cli/src/main.rs @@ -1,18 +1,15 @@ -use clap::{ - crate_description, crate_name, value_t_or_exit, AppSettings, Arg, ArgGroup, ArgMatches, - SubCommand, -}; +use clap::{crate_description, crate_name, value_t_or_exit, ArgMatches}; use console::style; use solana_clap_utils::{ - input_validators::{is_url, is_url_or_moniker, normalize_to_url_if_moniker}, - keypair::{CliSigners, DefaultSigner, SKIP_SEED_PHRASE_VALIDATION_ARG}, + input_validators::normalize_to_url_if_moniker, + keypair::{CliSigners, DefaultSigner}, DisplayError, }; -use solana_cli::cli::{ - app, parse_command, process_command, CliCommandInfo, CliConfig, SettingType, - DEFAULT_RPC_TIMEOUT_SECONDS, +use solana_cli::{ + clap_app::get_clap_app, + cli::{parse_command, process_command, CliCommandInfo, CliConfig, SettingType}, }; -use solana_cli_config::{Config, CONFIG_FILE}; +use solana_cli_config::Config; use solana_cli_output::{display::println_name_value, OutputFormat}; use solana_client::rpc_config::RpcSendTransactionConfig; use solana_remote_wallet::remote_wallet::RemoteWalletManager; @@ -167,23 +164,29 @@ pub fn parse_args<'a>( let rpc_timeout = value_t_or_exit!(matches, "rpc_timeout", u64); let rpc_timeout = Duration::from_secs(rpc_timeout); + let confirm_transaction_initial_timeout = + value_t_or_exit!(matches, "confirm_transaction_initial_timeout", u64); + let confirm_transaction_initial_timeout = + Duration::from_secs(confirm_transaction_initial_timeout); + let (_, websocket_url) = CliConfig::compute_websocket_url_setting( matches.value_of("websocket_url").unwrap_or(""), &config.websocket_url, matches.value_of("json_rpc_url").unwrap_or(""), &config.json_rpc_url, ); + let default_signer_arg_name = "keypair".to_string(); let (_, default_signer_path) = CliConfig::compute_keypair_path_setting( - matches.value_of("keypair").unwrap_or(""), + matches.value_of(&default_signer_arg_name).unwrap_or(""), &config.keypair_path, ); - let default_signer = DefaultSigner::from_path(default_signer_path.clone())?; + let default_signer = DefaultSigner::new(default_signer_arg_name, &default_signer_path); let CliCommandInfo { command, mut signers, - } = parse_command(&matches, &default_signer, &mut wallet_manager)?; + } = parse_command(matches, &default_signer, &mut wallet_manager)?; if signers.is_empty() { if let Ok(signer_info) = @@ -194,18 +197,7 @@ pub fn parse_args<'a>( } let verbose = matches.is_present("verbose"); - let output_format = matches - .value_of("output_format") - .map(|value| match value { - "json" => OutputFormat::Json, - "json-compact" => OutputFormat::JsonCompact, - _ => unreachable!(), - }) - .unwrap_or(if verbose { - OutputFormat::DisplayVerbose - } else { - OutputFormat::Display - }); + let output_format = OutputFormat::from_matches(matches, "output_format", verbose); let (_, commitment) = CliConfig::compute_commitment_config( matches.value_of("commitment").unwrap_or(""), @@ -234,6 +226,7 @@ pub fn parse_args<'a>( preflight_commitment: Some(commitment.commitment), ..RpcSendTransactionConfig::default() }, + confirm_transaction_initial_timeout, address_labels, }, signers, @@ -242,178 +235,21 @@ pub fn parse_args<'a>( fn main() -> Result<(), Box> { solana_logger::setup_with_default("off"); - let matches = app( + let matches = get_clap_app( crate_name!(), crate_description!(), solana_version::version!(), ) - .arg({ - let arg = Arg::with_name("config_file") - .short("C") - .long("config") - .value_name("FILEPATH") - .takes_value(true) - .global(true) - .help("Configuration file to use"); - if let Some(ref config_file) = *CONFIG_FILE { - arg.default_value(&config_file) - } else { - arg - } - }) - .arg( - Arg::with_name("json_rpc_url") - .short("u") - .long("url") - .value_name("URL_OR_MONIKER") - .takes_value(true) - .global(true) - .validator(is_url_or_moniker) - .help( - "URL for Solana's JSON RPC or moniker (or their first letter): \ - [mainnet-beta, testnet, devnet, localhost]", - ), - ) - .arg( - Arg::with_name("websocket_url") - .long("ws") - .value_name("URL") - .takes_value(true) - .global(true) - .validator(is_url) - .help("WebSocket URL for the solana cluster"), - ) - .arg( - Arg::with_name("keypair") - .short("k") - .long("keypair") - .value_name("KEYPAIR") - .global(true) - .takes_value(true) - .help("Filepath or URL to a keypair"), - ) - .arg( - Arg::with_name("commitment") - .long("commitment") - .takes_value(true) - .possible_values(&[ - "processed", - "confirmed", - "finalized", - "recent", // Deprecated as of v1.5.5 - "single", // Deprecated as of v1.5.5 - "singleGossip", // Deprecated as of v1.5.5 - "root", // Deprecated as of v1.5.5 - "max", // Deprecated as of v1.5.5 - ]) - .value_name("COMMITMENT_LEVEL") - .hide_possible_values(true) - .global(true) - .help("Return information at the selected commitment level [possible values: processed, confirmed, finalized]"), - ) - .arg( - Arg::with_name("verbose") - .long("verbose") - .short("v") - .global(true) - .help("Show additional information"), - ) - .arg( - Arg::with_name("no_address_labels") - .long("no-address-labels") - .global(true) - .help("Do not use address labels in the output"), - ) - .arg( - Arg::with_name("output_format") - .long("output") - .value_name("FORMAT") - .global(true) - .takes_value(true) - .possible_values(&["json", "json-compact"]) - .help("Return information in specified output format"), - ) - .arg( - Arg::with_name(SKIP_SEED_PHRASE_VALIDATION_ARG.name) - .long(SKIP_SEED_PHRASE_VALIDATION_ARG.long) - .global(true) - .help(SKIP_SEED_PHRASE_VALIDATION_ARG.help), - ) - .arg( - Arg::with_name("rpc_timeout") - .long("rpc-timeout") - .value_name("SECONDS") - .takes_value(true) - .default_value(DEFAULT_RPC_TIMEOUT_SECONDS) - .global(true) - .hidden(true) - .help("Timeout value for RPC requests"), - ) - .subcommand( - SubCommand::with_name("config") - .about("Solana command-line tool configuration settings") - .aliases(&["get", "set"]) - .setting(AppSettings::SubcommandRequiredElseHelp) - .subcommand( - SubCommand::with_name("get") - .about("Get current config settings") - .arg( - Arg::with_name("specific_setting") - .index(1) - .value_name("CONFIG_FIELD") - .takes_value(true) - .possible_values(&[ - "json_rpc_url", - "websocket_url", - "keypair", - "commitment", - ]) - .help("Return a specific config setting"), - ), - ) - .subcommand( - SubCommand::with_name("set") - .about("Set a config setting") - .group( - ArgGroup::with_name("config_settings") - .args(&["json_rpc_url", "websocket_url", "keypair", "commitment"]) - .multiple(true) - .required(true), - ), - ) - .subcommand( - SubCommand::with_name("import-address-labels") - .about("Import a list of address labels") - .arg( - Arg::with_name("filename") - .index(1) - .value_name("FILENAME") - .takes_value(true) - .help("YAML file of address labels"), - ), - ) - .subcommand( - SubCommand::with_name("export-address-labels") - .about("Export the current address labels") - .arg( - Arg::with_name("filename") - .index(1) - .value_name("FILENAME") - .takes_value(true) - .help("YAML file to receive the current address labels"), - ), - ), - ) .get_matches(); do_main(&matches).map_err(|err| DisplayError::new_as_boxed(err).into()) } fn do_main(matches: &ArgMatches<'_>) -> Result<(), Box> { - if parse_settings(&matches)? { + if parse_settings(matches)? { let mut wallet_manager = None; - let (mut config, signers) = parse_args(&matches, &mut wallet_manager)?; + let (mut config, signers) = parse_args(matches, &mut wallet_manager)?; config.signers = signers.iter().map(|s| s.as_ref()).collect(); let result = process_command(&config)?; println!("{}", result); diff --git a/cli/src/nonce.rs b/cli/src/nonce.rs index c978dae6e0405b..90e53be7938b01 100644 --- a/cli/src/nonce.rs +++ b/cli/src/nonce.rs @@ -1,9 +1,10 @@ use crate::{ checks::{check_account_for_fee_with_commitment, check_unique_pubkeys}, cli::{ - log_instruction_custom_error, CliCommand, CliCommandInfo, CliConfig, CliError, - ProcessResult, + log_instruction_custom_error, log_instruction_custom_error_ex, CliCommand, CliCommandInfo, + CliConfig, CliError, ProcessResult, }, + feature::get_feature_is_active, memo::WithMemo, spend_utils::{resolve_spend_tx_and_check_account_balance, SpendAmount}, }; @@ -12,7 +13,7 @@ use solana_clap_utils::{ input_parsers::*, input_validators::*, keypair::{DefaultSigner, SignerIndex}, - memo::MEMO_ARG, + memo::{memo_arg, MEMO_ARG}, nonce::*, }; use solana_cli_output::CliNonceAccount; @@ -20,16 +21,19 @@ use solana_client::{nonce_utils::*, rpc_client::RpcClient}; use solana_remote_wallet::remote_wallet::RemoteWalletManager; use solana_sdk::{ account::Account, + feature_set::merge_nonce_error_into_system_error, hash::Hash, + instruction::InstructionError, message::Message, nonce::{self, State}, pubkey::Pubkey, system_instruction::{ advance_nonce_account, authorize_nonce_account, create_nonce_account, - create_nonce_account_with_seed, withdraw_nonce_account, NonceError, SystemError, + create_nonce_account_with_seed, instruction_to_nonce_error, withdraw_nonce_account, + NonceError, SystemError, }, system_program, - transaction::Transaction, + transaction::{Transaction, TransactionError}, }; use std::sync::Arc; @@ -56,7 +60,8 @@ impl NonceSubCommands for App<'_, '_> { .required(true), "Account to be granted authority of the nonce account. "), ) - .arg(nonce_authority_arg()), + .arg(nonce_authority_arg()) + .arg(memo_arg()), ) .subcommand( SubCommand::with_name("create-nonce-account") @@ -91,7 +96,8 @@ impl NonceSubCommands for App<'_, '_> { .value_name("STRING") .takes_value(true) .help("Seed for address generation; if specified, the resulting account will be at a derived address of the NONCE_ACCOUNT pubkey") - ), + ) + .arg(memo_arg()), ) .subcommand( SubCommand::with_name("nonce") @@ -115,7 +121,8 @@ impl NonceSubCommands for App<'_, '_> { .required(true), "Address of the nonce account. "), ) - .arg(nonce_authority_arg()), + .arg(nonce_authority_arg()) + .arg(memo_arg()), ) .subcommand( SubCommand::with_name("nonce-account") @@ -161,7 +168,8 @@ impl NonceSubCommands for App<'_, '_> { .validator(is_amount) .help("The amount to withdraw from the nonce account, in SOL"), ) - .arg(nonce_authority_arg()), + .arg(nonce_authority_arg()) + .arg(memo_arg()), ) } } @@ -363,8 +371,21 @@ pub fn process_authorize_nonce_account( &tx.message, config.commitment, )?; + let merge_errors = + get_feature_is_active(rpc_client, &merge_nonce_error_into_system_error::id())?; let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx); - log_instruction_custom_error::(result, &config) + + if merge_errors { + log_instruction_custom_error::(result, config) + } else { + log_instruction_custom_error_ex::(result, config, |ix_error| { + if let InstructionError::Custom(_) = ix_error { + instruction_to_nonce_error(ix_error, merge_errors) + } else { + None + } + }) + } } pub fn process_create_nonce_account( @@ -448,8 +469,40 @@ pub fn process_create_nonce_account( let mut tx = Transaction::new_unsigned(message); tx.try_sign(&config.signers, recent_blockhash)?; + let merge_errors = + get_feature_is_active(rpc_client, &merge_nonce_error_into_system_error::id())?; let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx); - log_instruction_custom_error::(result, &config) + + let err_ix_index = if let Err(err) = &result { + err.get_transaction_error().and_then(|tx_err| { + if let TransactionError::InstructionError(ix_index, _) = tx_err { + Some(ix_index) + } else { + None + } + }) + } else { + None + }; + + match err_ix_index { + // SystemInstruction::InitializeNonceAccount failed + Some(1) => { + if merge_errors { + log_instruction_custom_error::(result, config) + } else { + log_instruction_custom_error_ex::(result, config, |ix_error| { + if let InstructionError::Custom(_) = ix_error { + instruction_to_nonce_error(ix_error, merge_errors) + } else { + None + } + }) + } + } + // SystemInstruction::CreateAccount{,WithSeed} failed + _ => log_instruction_custom_error::(result, config), + } } pub fn process_get_nonce( @@ -474,10 +527,10 @@ pub fn process_new_nonce( ) -> ProcessResult { check_unique_pubkeys( (&config.signers[0].pubkey(), "cli keypair".to_string()), - (&nonce_account, "nonce_account_pubkey".to_string()), + (nonce_account, "nonce_account_pubkey".to_string()), )?; - if let Err(err) = rpc_client.get_account(&nonce_account) { + if let Err(err) = rpc_client.get_account(nonce_account) { return Err(CliError::BadParameter(format!( "Unable to advance nonce account {}. error: {}", nonce_account, err @@ -487,7 +540,7 @@ pub fn process_new_nonce( let nonce_authority = config.signers[nonce_authority]; let ixs = vec![advance_nonce_account( - &nonce_account, + nonce_account, &nonce_authority.pubkey(), )] .with_memo(memo); @@ -502,8 +555,21 @@ pub fn process_new_nonce( &tx.message, config.commitment, )?; + let merge_errors = + get_feature_is_active(rpc_client, &merge_nonce_error_into_system_error::id())?; let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx); - log_instruction_custom_error::(result, &config) + + if merge_errors { + log_instruction_custom_error::(result, config) + } else { + log_instruction_custom_error_ex::(result, config, |ix_error| { + if let InstructionError::Custom(_) = ix_error { + instruction_to_nonce_error(ix_error, merge_errors) + } else { + None + } + }) + } } pub fn process_show_nonce_account( @@ -522,7 +588,7 @@ pub fn process_show_nonce_account( use_lamports_unit, ..CliNonceAccount::default() }; - if let Some(ref data) = data { + if let Some(data) = data { nonce_account.nonce = Some(data.blockhash.to_string()); nonce_account.lamports_per_signature = Some(data.fee_calculator.lamports_per_signature); nonce_account.authority = Some(data.authority.to_string()); @@ -565,14 +631,27 @@ pub fn process_withdraw_from_nonce_account( &tx.message, config.commitment, )?; + let merge_errors = + get_feature_is_active(rpc_client, &merge_nonce_error_into_system_error::id())?; let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx); - log_instruction_custom_error::(result, &config) + + if merge_errors { + log_instruction_custom_error::(result, config) + } else { + log_instruction_custom_error_ex::(result, config, |ix_error| { + if let InstructionError::Custom(_) = ix_error { + instruction_to_nonce_error(ix_error, merge_errors) + } else { + None + } + }) + } } #[cfg(test)] mod tests { use super::*; - use crate::cli::{app, parse_command}; + use crate::{clap_app::get_clap_app, cli::parse_command}; use solana_sdk::{ account::Account, account_utils::StateMut, @@ -592,11 +671,11 @@ mod tests { #[test] fn test_parse_command() { - let test_commands = app("test", "desc", "version"); + let test_commands = get_clap_app("test", "desc", "version"); let default_keypair = Keypair::new(); let (default_keypair_file, mut tmp_file) = make_tmp_file(); write_keypair(&default_keypair, tmp_file.as_file_mut()).unwrap(); - let default_signer = DefaultSigner::new(default_keypair_file.clone()); + let default_signer = DefaultSigner::new("", &default_keypair_file); let (keypair_file, mut tmp_file) = make_tmp_file(); let nonce_account_keypair = Keypair::new(); write_keypair(&nonce_account_keypair, tmp_file.as_file_mut()).unwrap(); diff --git a/cli/src/program.rs b/cli/src/program.rs index 2c4382a48ce061..1726303d7ea428 100644 --- a/cli/src/program.rs +++ b/cli/src/program.rs @@ -23,6 +23,7 @@ use solana_client::{ rpc_config::{RpcAccountInfoConfig, RpcProgramAccountsConfig}, rpc_filter::{Memcmp, MemcmpEncodedBytes, RpcFilterType}, rpc_request::MAX_GET_SIGNATURE_STATUSES_QUERY_ITEMS, + rpc_response::Fees, tpu_client::{TpuClient, TpuClientConfig}, }; use solana_rbpf::vm::{Config, Executable}; @@ -32,7 +33,6 @@ use solana_sdk::{ account_utils::StateMut, bpf_loader, bpf_loader_deprecated, bpf_loader_upgradeable::{self, UpgradeableLoaderState}, - clock::Slot, commitment_config::CommitmentConfig, instruction::Instruction, instruction::InstructionError, @@ -150,7 +150,7 @@ impl ProgramSubCommands for App<'_, '_> { pubkey!(Arg::with_name("program_id") .long("program-id") .value_name("PROGRAM_ID"), - "Executable program's address, must be a signer for initial deploys, can be a pubkey for upgrades \ + "Executable program's address, must be a keypair for initial deploys, can be a pubkey for upgrades \ [default: address of keypair at /path/to/program-keypair.json if present, otherwise a random address]"), ) .arg( @@ -330,7 +330,7 @@ impl ProgramSubCommands for App<'_, '_> { ) .subcommand( SubCommand::with_name("close") - .about("Close an acount and withdraw all lamports") + .about("Close an account and withdraw all lamports") .arg( Arg::with_name("account") .index(1) @@ -367,6 +367,39 @@ impl ProgramSubCommands for App<'_, '_> { ), ) ) + .subcommand( + SubCommand::with_name("deploy") + .about("Deploy a program") + .arg( + Arg::with_name("program_location") + .index(1) + .value_name("PROGRAM_FILEPATH") + .takes_value(true) + .required(true) + .help("/path/to/program.o"), + ) + .arg( + Arg::with_name("address_signer") + .index(2) + .value_name("PROGRAM_ADDRESS_SIGNER") + .takes_value(true) + .validator(is_valid_signer) + .help("The signer for the desired address of the program [default: new random address]") + ) + .arg( + Arg::with_name("use_deprecated_loader") + .long("use-deprecated-loader") + .takes_value(false) + .hidden(true) // Don't document this argument to discourage its use + .help("Use the deprecated BPF loader") + ) + .arg( + Arg::with_name("allow_excessive_balance") + .long("allow-excessive-deploy-account-balance") + .takes_value(false) + .help("Use the designated program id, even if the account already holds a large balance of SOL") + ), + ) } } @@ -767,7 +800,7 @@ fn process_program_deploy( }; let upgrade_authority_signer = config.signers[upgrade_authority_signer_index]; - let default_program_keypair = get_default_program_keypair(&program_location); + let default_program_keypair = get_default_program_keypair(program_location); let (program_signer, program_pubkey) = if let Some(i) = program_signer_index { (Some(config.signers[i]), config.signers[i].pubkey()) } else if let Some(program_pubkey) = program_pubkey { @@ -843,7 +876,7 @@ fn process_program_deploy( }; let (program_data, program_len) = if let Some(program_location) = program_location { - let program_data = read_and_verify_elf(&program_location)?; + let program_data = read_and_verify_elf(program_location)?; let program_len = program_data.len(); (program_data, program_len) } else if buffer_provided { @@ -886,6 +919,11 @@ fn process_program_deploy( )?; let result = if do_deploy { + if program_signer.is_none() { + return Err( + "Initial deployments require a keypair be provided for the program id".into(), + ); + } do_process_program_write_and_deploy( rpc_client.clone(), config, @@ -1254,7 +1292,7 @@ fn process_dump( UpgradeableLoaderState::programdata_data_offset().unwrap_or(0); let program_data = &programdata_account.data[offset..]; let mut f = File::create(output_location)?; - f.write_all(&program_data)?; + f.write_all(program_data)?; Ok(format!("Wrote program to {}", output_location)) } else { Err( @@ -1274,7 +1312,7 @@ fn process_dump( let offset = UpgradeableLoaderState::buffer_data_offset().unwrap_or(0); let program_data = &account.data[offset..]; let mut f = File::create(output_location)?; - f.write_all(&program_data)?; + f.write_all(program_data)?; Ok(format!("Wrote program to {}", output_location)) } else { Err(format!( @@ -1305,8 +1343,8 @@ fn close( let mut tx = Transaction::new_unsigned(Message::new( &[bpf_loader_upgradeable::close( - &account_pubkey, - &recipient_pubkey, + account_pubkey, + recipient_pubkey, &authority_signer.pubkey(), )], Some(&config.signers[0].pubkey()), @@ -1415,7 +1453,7 @@ fn process_close( if close( rpc_client, config, - &address, + address, &recipient_pubkey, authority_signer, ) @@ -1516,7 +1554,7 @@ fn do_process_program_write_and_deploy( .value { complete_partial_program_init( - &loader_id, + loader_id, &config.signers[0].pubkey(), buffer_pubkey, &account, @@ -1546,7 +1584,7 @@ fn do_process_program_write_and_deploy( buffer_pubkey, minimum_balance, buffer_data_len as u64, - &loader_id, + loader_id, )], minimum_balance, ) @@ -1574,7 +1612,7 @@ fn do_process_program_write_and_deploy( } else { loader_instruction::write( buffer_pubkey, - &loader_id, + loader_id, (i * DATA_CHUNK_SIZE) as u32, chunk.to_vec(), ) @@ -1618,7 +1656,7 @@ fn do_process_program_write_and_deploy( ) } else { Message::new( - &[loader_instruction::finalize(buffer_pubkey, &loader_id)], + &[loader_instruction::finalize(buffer_pubkey, loader_id)], Some(&config.signers[0].pubkey()), ) }; @@ -1744,8 +1782,8 @@ fn do_process_program_upgrade( // Create and add final message let final_message = Message::new( &[bpf_loader_upgradeable::upgrade( - &program_id, - &buffer_pubkey, + program_id, + buffer_pubkey, &upgrade_authority.pubkey(), &config.signers[0].pubkey(), )], @@ -1813,7 +1851,7 @@ fn complete_partial_program_init( account_data_len as u64, )); if account.owner != *loader_id { - instructions.push(system_instruction::assign(elf_pubkey, &loader_id)); + instructions.push(system_instruction::assign(elf_pubkey, loader_id)); } } if account.lamports < minimum_balance { @@ -1885,7 +1923,7 @@ fn send_deploy_messages( initial_transaction.try_sign(&[payer_signer], blockhash)?; } let result = rpc_client.send_and_confirm_transaction_with_spinner(&initial_transaction); - log_instruction_custom_error::(result, &config) + log_instruction_custom_error::(result, config) .map_err(|err| format!("Account allocation failed: {}", err))?; } else { return Err("Buffer account not created yet, must provide a key pair".into()); @@ -1895,8 +1933,12 @@ fn send_deploy_messages( if let Some(write_messages) = write_messages { if let Some(write_signer) = write_signer { trace!("Writing program data"); - let (blockhash, _, last_valid_slot) = rpc_client - .get_recent_blockhash_with_commitment(config.commitment)? + let Fees { + blockhash, + last_valid_block_height, + .. + } = rpc_client + .get_fees_with_commitment(config.commitment)? .value; let mut write_transactions = vec![]; for message in write_messages.iter() { @@ -1911,7 +1953,7 @@ fn send_deploy_messages( write_transactions, &[payer_signer, write_signer], config.commitment, - last_valid_slot, + last_valid_block_height, ) .map_err(|err| format!("Data writes to account failed: {}", err))?; } @@ -1981,7 +2023,7 @@ fn send_and_confirm_transactions_with_spinner( mut transactions: Vec, signer_keys: &T, commitment: CommitmentConfig, - mut last_valid_slot: Slot, + mut last_valid_block_height: u64, ) -> Result<(), Box> { let progress_bar = new_spinner_progress_bar(); let mut send_retries = 5; @@ -2021,7 +2063,7 @@ fn send_and_confirm_transactions_with_spinner( // Collect statuses for all the transactions, drop those that are confirmed loop { - let mut slot = 0; + let mut block_height = 0; let pending_signatures = pending_transactions.keys().cloned().collect::>(); for pending_signatures_chunk in pending_signatures.chunks(MAX_GET_SIGNATURE_STATUSES_QUERY_ITEMS) @@ -2046,12 +2088,12 @@ fn send_and_confirm_transactions_with_spinner( } } - slot = rpc_client.get_slot()?; + block_height = rpc_client.get_block_height()?; progress_bar.set_message(&format!( - "[{}/{}] Transactions confirmed. Retrying in {} slots", + "[{}/{}] Transactions confirmed. Retrying in {} blocks", num_transactions - pending_transactions.len(), num_transactions, - last_valid_slot.saturating_sub(slot) + last_valid_block_height.saturating_sub(block_height) )); } @@ -2059,7 +2101,7 @@ fn send_and_confirm_transactions_with_spinner( return Ok(()); } - if slot > last_valid_slot { + if block_height > last_valid_block_height { break; } @@ -2089,10 +2131,12 @@ fn send_and_confirm_transactions_with_spinner( send_retries -= 1; // Re-sign any failed transactions with a new blockhash and retry - let (blockhash, _fee_calculator, new_last_valid_slot) = rpc_client - .get_recent_blockhash_with_commitment(commitment)? - .value; - last_valid_slot = new_last_valid_slot; + let Fees { + blockhash, + last_valid_block_height: new_last_valid_block_height, + .. + } = rpc_client.get_fees_with_commitment(commitment)?.value; + last_valid_block_height = new_last_valid_block_height; transactions = vec![]; for (_, mut transaction) in pending_transactions.into_iter() { transaction.try_sign(signer_keys, blockhash)?; @@ -2104,7 +2148,10 @@ fn send_and_confirm_transactions_with_spinner( #[cfg(test)] mod tests { use super::*; - use crate::cli::{app, parse_command, process_command}; + use crate::{ + clap_app::get_clap_app, + cli::{parse_command, process_command}, + }; use serde_json::Value; use solana_cli_output::OutputFormat; use solana_sdk::signature::write_keypair_file; @@ -2126,12 +2173,12 @@ mod tests { #[test] #[allow(clippy::cognitive_complexity)] fn test_cli_parse_deploy() { - let test_commands = app("test", "desc", "version"); + let test_commands = get_clap_app("test", "desc", "version"); let default_keypair = Keypair::new(); let keypair_file = make_tmp_path("keypair_file"); write_keypair_file(&default_keypair, &keypair_file).unwrap(); - let default_signer = DefaultSigner::new(keypair_file.clone()); + let default_signer = DefaultSigner::new("", &keypair_file); let test_command = test_commands.clone().get_matches_from(vec![ "test", @@ -2334,12 +2381,12 @@ mod tests { #[test] #[allow(clippy::cognitive_complexity)] fn test_cli_parse_write_buffer() { - let test_commands = app("test", "desc", "version"); + let test_commands = get_clap_app("test", "desc", "version"); let default_keypair = Keypair::new(); let keypair_file = make_tmp_path("keypair_file"); write_keypair_file(&default_keypair, &keypair_file).unwrap(); - let default_signer = DefaultSigner::new(keypair_file.clone()); + let default_signer = DefaultSigner::new("", &keypair_file); // defaults let test_command = test_commands.clone().get_matches_from(vec![ @@ -2482,12 +2529,12 @@ mod tests { #[test] #[allow(clippy::cognitive_complexity)] fn test_cli_parse_set_upgrade_authority() { - let test_commands = app("test", "desc", "version"); + let test_commands = get_clap_app("test", "desc", "version"); let default_keypair = Keypair::new(); let keypair_file = make_tmp_path("keypair_file"); write_keypair_file(&default_keypair, &keypair_file).unwrap(); - let default_signer = DefaultSigner::new(keypair_file.clone()); + let default_signer = DefaultSigner::new("", &keypair_file); let program_pubkey = Pubkey::new_unique(); let new_authority_pubkey = Pubkey::new_unique(); @@ -2590,12 +2637,12 @@ mod tests { #[test] #[allow(clippy::cognitive_complexity)] fn test_cli_parse_set_buffer_authority() { - let test_commands = app("test", "desc", "version"); + let test_commands = get_clap_app("test", "desc", "version"); let default_keypair = Keypair::new(); let keypair_file = make_tmp_path("keypair_file"); write_keypair_file(&default_keypair, &keypair_file).unwrap(); - let default_signer = DefaultSigner::new(keypair_file.clone()); + let default_signer = DefaultSigner::new("", &keypair_file); let buffer_pubkey = Pubkey::new_unique(); let new_authority_pubkey = Pubkey::new_unique(); @@ -2647,12 +2694,12 @@ mod tests { #[test] #[allow(clippy::cognitive_complexity)] fn test_cli_parse_show() { - let test_commands = app("test", "desc", "version"); + let test_commands = get_clap_app("test", "desc", "version"); let default_keypair = Keypair::new(); let keypair_file = make_tmp_path("keypair_file"); write_keypair_file(&default_keypair, &keypair_file).unwrap(); - let default_signer = DefaultSigner::new(keypair_file); + let default_signer = DefaultSigner::new("", &keypair_file); // defaults let buffer_pubkey = Pubkey::new_unique(); @@ -2746,12 +2793,12 @@ mod tests { #[test] #[allow(clippy::cognitive_complexity)] fn test_cli_parse_close() { - let test_commands = app("test", "desc", "version"); + let test_commands = get_clap_app("test", "desc", "version"); let default_keypair = Keypair::new(); let keypair_file = make_tmp_path("keypair_file"); write_keypair_file(&default_keypair, &keypair_file).unwrap(); - let default_signer = DefaultSigner::new(keypair_file.clone()); + let default_signer = DefaultSigner::new("", &keypair_file); // defaults let buffer_pubkey = Pubkey::new_unique(); diff --git a/cli/src/spend_utils.rs b/cli/src/spend_utils.rs index 95431ccad26c85..df785e457b1845 100644 --- a/cli/src/spend_utils.rs +++ b/cli/src/spend_utils.rs @@ -92,7 +92,7 @@ where Ok((message, spend)) } else { let from_balance = rpc_client - .get_balance_with_commitment(&from_pubkey, commitment)? + .get_balance_with_commitment(from_pubkey, commitment)? .value; let (message, SpendAndFee { spend, fee }) = resolve_spend_message( amount, diff --git a/cli/src/stake.rs b/cli/src/stake.rs index f8d8c801dcd968..4125b296dab86c 100644 --- a/cli/src/stake.rs +++ b/cli/src/stake.rs @@ -14,7 +14,7 @@ use solana_clap_utils::{ input_parsers::*, input_validators::*, keypair::{DefaultSigner, SignerIndex}, - memo::MEMO_ARG, + memo::{memo_arg, MEMO_ARG}, nonce::*, offline::*, ArgConstant, @@ -33,9 +33,13 @@ use solana_sdk::{ account_utils::StateMut, clock::{Clock, UnixTimestamp, SECONDS_PER_DAY}, epoch_schedule::EpochSchedule, - feature, feature_set, message::Message, pubkey::Pubkey, + stake::{ + self, + instruction::{self as stake_instruction, LockupArgs, StakeError}, + state::{Authorized, Lockup, Meta, StakeAuthorize, StakeState}, + }, system_instruction::SystemError, sysvar::{ clock, @@ -43,10 +47,6 @@ use solana_sdk::{ }, transaction::Transaction, }; -use solana_stake_program::{ - stake_instruction::{self, LockupArgs, StakeError}, - stake_state::{Authorized, Lockup, Meta, StakeAuthorize, StakeState}, -}; use solana_vote_program::vote_state::VoteState; use std::{ops::Deref, sync::Arc}; @@ -95,6 +95,20 @@ fn custodian_arg<'a, 'b>() -> Arg<'a, 'b> { .help(CUSTODIAN_ARG.help) } +pub(crate) struct StakeAuthorization { + authorization_type: StakeAuthorize, + new_authority_pubkey: Pubkey, + authority_pubkey: Option, +} + +#[derive(Debug, PartialEq)] +pub struct StakeAuthorizationIndexed { + pub authorization_type: StakeAuthorize, + pub new_authority_pubkey: Pubkey, + pub authority: SignerIndex, + pub new_authority_signer: Option, +} + pub trait StakeSubCommands { fn stake_subcommands(self) -> Self; } @@ -178,6 +192,65 @@ impl StakeSubCommands for App<'_, '_> { .offline_args() .nonce_args(false) .arg(fee_payer_arg()) + .arg(memo_arg()) + ) + .subcommand( + SubCommand::with_name("create-stake-account-checked") + .about("Create a stake account, checking the withdraw authority as a signer") + .arg( + Arg::with_name("stake_account") + .index(1) + .value_name("STAKE_ACCOUNT_KEYPAIR") + .takes_value(true) + .required(true) + .validator(is_valid_signer) + .help("Stake account to create (or base of derived address if --seed is used)") + ) + .arg( + Arg::with_name("amount") + .index(2) + .value_name("AMOUNT") + .takes_value(true) + .validator(is_amount_or_all) + .required(true) + .help("The amount to send to the stake account, in SOL; accepts keyword ALL") + ) + .arg( + Arg::with_name("seed") + .long("seed") + .value_name("STRING") + .takes_value(true) + .help("Seed for address generation; if specified, the resulting account \ + will be at a derived address of the STAKE_ACCOUNT_KEYPAIR pubkey") + ) + .arg( + Arg::with_name(STAKE_AUTHORITY_ARG.name) + .long(STAKE_AUTHORITY_ARG.long) + .value_name("PUBKEY") + .takes_value(true) + .validator(is_valid_pubkey) + .help(STAKE_AUTHORITY_ARG.help) + ) + .arg( + Arg::with_name(WITHDRAW_AUTHORITY_ARG.name) + .long(WITHDRAW_AUTHORITY_ARG.long) + .value_name("KEYPAIR") + .takes_value(true) + .validator(is_valid_signer) + .help(WITHDRAW_AUTHORITY_ARG.help) + ) + .arg( + Arg::with_name("from") + .long("from") + .takes_value(true) + .value_name("KEYPAIR") + .validator(is_valid_signer) + .help("Source account of funds [default: cli config keypair]"), + ) + .offline_args() + .nonce_args(false) + .arg(fee_payer_arg()) + .arg(memo_arg()) ) .subcommand( SubCommand::with_name("delegate-stake") @@ -207,6 +280,7 @@ impl StakeSubCommands for App<'_, '_> { .offline_args() .nonce_args(false) .arg(fee_payer_arg()) + .arg(memo_arg()) ) .subcommand( SubCommand::with_name("stake-authorize") @@ -244,6 +318,47 @@ impl StakeSubCommands for App<'_, '_> { .takes_value(false) .help("Return signature immediately after submitting the transaction, instead of waiting for confirmations"), ) + .arg(memo_arg()) + ) + .subcommand( + SubCommand::with_name("stake-authorize-checked") + .about("Authorize a new signing keypair for the given stake account, checking the authority as a signer") + .arg( + pubkey!(Arg::with_name("stake_account_pubkey") + .required(true) + .index(1) + .value_name("STAKE_ACCOUNT_ADDRESS"), + "Stake account in which to set a new authority. ") + ) + .arg( + Arg::with_name("new_stake_authority") + .long("new-stake-authority") + .value_name("KEYPAIR") + .takes_value(true) + .validator(is_valid_signer) + .help("New authorized staker") + ) + .arg( + Arg::with_name("new_withdraw_authority") + .long("new-withdraw-authority") + .value_name("KEYPAIR") + .takes_value(true) + .validator(is_valid_signer) + .help("New authorized withdrawer") + ) + .arg(stake_authority_arg()) + .arg(withdraw_authority_arg()) + .offline_args() + .nonce_args(false) + .arg(fee_payer_arg()) + .arg(custodian_arg()) + .arg( + Arg::with_name("no_wait") + .long("no-wait") + .takes_value(false) + .help("Return signature immediately after submitting the transaction, instead of waiting for confirmations"), + ) + .arg(memo_arg()) ) .subcommand( SubCommand::with_name("deactivate-stake") @@ -267,6 +382,7 @@ impl StakeSubCommands for App<'_, '_> { .offline_args() .nonce_args(false) .arg(fee_payer_arg()) + .arg(memo_arg()) ) .subcommand( SubCommand::with_name("split-stake") @@ -308,6 +424,7 @@ impl StakeSubCommands for App<'_, '_> { .offline_args() .nonce_args(false) .arg(fee_payer_arg()) + .arg(memo_arg()) ) .subcommand( SubCommand::with_name("merge-stake") @@ -331,6 +448,7 @@ impl StakeSubCommands for App<'_, '_> { .offline_args() .nonce_args(false) .arg(fee_payer_arg()) + .arg(memo_arg()) ) .subcommand( SubCommand::with_name("withdraw-stake") @@ -371,6 +489,7 @@ impl StakeSubCommands for App<'_, '_> { .nonce_args(false) .arg(fee_payer_arg()) .arg(custodian_arg()) + .arg(memo_arg()) ) .subcommand( SubCommand::with_name("stake-set-lockup") @@ -418,6 +537,57 @@ impl StakeSubCommands for App<'_, '_> { .offline_args() .nonce_args(false) .arg(fee_payer_arg()) + .arg(memo_arg()) + ) + .subcommand( + SubCommand::with_name("stake-set-lockup-checked") + .about("Set Lockup for the stake account, checking the new authority as a signer") + .arg( + pubkey!(Arg::with_name("stake_account_pubkey") + .index(1) + .value_name("STAKE_ACCOUNT_ADDRESS") + .required(true), + "Stake account for which to set lockup parameters. ") + ) + .arg( + Arg::with_name("lockup_epoch") + .long("lockup-epoch") + .value_name("NUMBER") + .takes_value(true) + .help("The epoch height at which this account will be available for withdrawal") + ) + .arg( + Arg::with_name("lockup_date") + .long("lockup-date") + .value_name("RFC3339 DATETIME") + .validator(is_rfc3339_datetime) + .takes_value(true) + .help("The date and time at which this account will be available for withdrawal") + ) + .arg( + Arg::with_name("new_custodian") + .long("new-custodian") + .value_name("KEYPAIR") + .takes_value(true) + .validator(is_valid_signer) + .help("Keypair of a new lockup custodian") + ) + .group(ArgGroup::with_name("lockup_details") + .args(&["lockup_epoch", "lockup_date", "new_custodian"]) + .multiple(true) + .required(true)) + .arg( + Arg::with_name("custodian") + .long("custodian") + .takes_value(true) + .value_name("KEYPAIR") + .validator(is_valid_signer) + .help("Keypair of the existing custodian [default: cli config pubkey]") + ) + .offline_args() + .nonce_args(false) + .arg(fee_payer_arg()) + .arg(memo_arg()) ) .subcommand( SubCommand::with_name("stake-account") @@ -484,13 +654,23 @@ pub fn parse_create_stake_account( matches: &ArgMatches<'_>, default_signer: &DefaultSigner, wallet_manager: &mut Option>, + checked: bool, ) -> Result { let seed = matches.value_of("seed").map(|s| s.to_string()); let epoch = value_of(matches, "lockup_epoch").unwrap_or(0); let unix_timestamp = unix_timestamp_from_rfc3339_datetime(matches, "lockup_date").unwrap_or(0); let custodian = pubkey_of_signer(matches, "custodian", wallet_manager)?.unwrap_or_default(); let staker = pubkey_of_signer(matches, STAKE_AUTHORITY_ARG.name, wallet_manager)?; - let withdrawer = pubkey_of_signer(matches, WITHDRAW_AUTHORITY_ARG.name, wallet_manager)?; + + let (withdrawer_signer, withdrawer) = if checked { + signer_of(matches, WITHDRAW_AUTHORITY_ARG.name, wallet_manager)? + } else { + ( + None, + pubkey_of_signer(matches, WITHDRAW_AUTHORITY_ARG.name, wallet_manager)?, + ) + }; + let amount = SpendAmount::new_from_matches(matches, "amount"); let sign_only = matches.is_present(SIGN_ONLY_ARG.name); let dump_transaction_message = matches.is_present(DUMP_TRANSACTION_MESSAGE.name); @@ -508,6 +688,9 @@ pub fn parse_create_stake_account( if nonce_account.is_some() { bulk_signers.push(nonce_authority); } + if withdrawer_signer.is_some() { + bulk_signers.push(withdrawer_signer); + } let signer_info = default_signer.generate_unique_signers(bulk_signers, matches, wallet_manager)?; @@ -517,6 +700,11 @@ pub fn parse_create_stake_account( seed, staker, withdrawer, + withdrawer_signer: if checked { + signer_info.index_of(withdrawer) + } else { + None + }, lockup: Lockup { unix_timestamp, epoch, @@ -586,15 +774,24 @@ pub fn parse_stake_authorize( matches: &ArgMatches<'_>, default_signer: &DefaultSigner, wallet_manager: &mut Option>, + checked: bool, ) -> Result { let stake_account_pubkey = pubkey_of_signer(matches, "stake_account_pubkey", wallet_manager)?.unwrap(); let mut new_authorizations = Vec::new(); let mut bulk_signers = Vec::new(); - if let Some(new_authority_pubkey) = - pubkey_of_signer(matches, "new_stake_authority", wallet_manager)? - { + + let (new_staker_signer, new_staker) = if checked { + signer_of(matches, "new_stake_authority", wallet_manager)? + } else { + ( + None, + pubkey_of_signer(matches, "new_stake_authority", wallet_manager)?, + ) + }; + + if let Some(new_authority_pubkey) = new_staker { let (authority, authority_pubkey) = { let (authority, authority_pubkey) = signer_of(matches, STAKE_AUTHORITY_ARG.name, wallet_manager)?; @@ -605,24 +802,38 @@ pub fn parse_stake_authorize( (authority, authority_pubkey) } }; - new_authorizations.push(( - StakeAuthorize::Staker, + new_authorizations.push(StakeAuthorization { + authorization_type: StakeAuthorize::Staker, new_authority_pubkey, authority_pubkey, - )); + }); bulk_signers.push(authority); + if new_staker.is_some() { + bulk_signers.push(new_staker_signer); + } }; - if let Some(new_authority_pubkey) = - pubkey_of_signer(matches, "new_withdraw_authority", wallet_manager)? - { + + let (new_withdrawer_signer, new_withdrawer) = if checked { + signer_of(matches, "new_withdraw_authority", wallet_manager)? + } else { + ( + None, + pubkey_of_signer(matches, "new_withdraw_authority", wallet_manager)?, + ) + }; + + if let Some(new_authority_pubkey) = new_withdrawer { let (authority, authority_pubkey) = signer_of(matches, WITHDRAW_AUTHORITY_ARG.name, wallet_manager)?; - new_authorizations.push(( - StakeAuthorize::Withdrawer, + new_authorizations.push(StakeAuthorization { + authorization_type: StakeAuthorize::Withdrawer, new_authority_pubkey, authority_pubkey, - )); + }); bulk_signers.push(authority); + if new_withdrawer_signer.is_some() { + bulk_signers.push(new_withdrawer_signer); + } }; let sign_only = matches.is_present(SIGN_ONLY_ARG.name); let dump_transaction_message = matches.is_present(DUMP_TRANSACTION_MESSAGE.name); @@ -648,12 +859,17 @@ pub fn parse_stake_authorize( let new_authorizations = new_authorizations .into_iter() .map( - |(stake_authorize, new_authority_pubkey, authority_pubkey)| { - ( - stake_authorize, + |StakeAuthorization { + authorization_type, + new_authority_pubkey, + authority_pubkey, + }| { + StakeAuthorizationIndexed { + authorization_type, new_authority_pubkey, - signer_info.index_of(authority_pubkey).unwrap(), - ) + authority: signer_info.index_of(authority_pubkey).unwrap(), + new_authority_signer: signer_info.index_of(Some(new_authority_pubkey)), + } }, ) .collect(); @@ -870,12 +1086,21 @@ pub fn parse_stake_set_lockup( matches: &ArgMatches<'_>, default_signer: &DefaultSigner, wallet_manager: &mut Option>, + checked: bool, ) -> Result { let stake_account_pubkey = pubkey_of_signer(matches, "stake_account_pubkey", wallet_manager)?.unwrap(); let epoch = value_of(matches, "lockup_epoch"); let unix_timestamp = unix_timestamp_from_rfc3339_datetime(matches, "lockup_date"); - let new_custodian = pubkey_of_signer(matches, "new_custodian", wallet_manager)?; + + let (new_custodian_signer, new_custodian) = if checked { + signer_of(matches, "new_custodian", wallet_manager)? + } else { + ( + None, + pubkey_of_signer(matches, "new_custodian", wallet_manager)?, + ) + }; let sign_only = matches.is_present(SIGN_ONLY_ARG.name); let dump_transaction_message = matches.is_present(DUMP_TRANSACTION_MESSAGE.name); @@ -892,6 +1117,9 @@ pub fn parse_stake_set_lockup( if nonce_account.is_some() { bulk_signers.push(nonce_authority); } + if new_custodian_signer.is_some() { + bulk_signers.push(new_custodian_signer); + } let signer_info = default_signer.generate_unique_signers(bulk_signers, matches, wallet_manager)?; @@ -903,6 +1131,11 @@ pub fn parse_stake_set_lockup( epoch, unix_timestamp, }, + new_custodian_signer: if checked { + signer_info.index_of(new_custodian) + } else { + None + }, custodian: signer_info.index_of(custodian_pubkey).unwrap(), sign_only, dump_transaction_message, @@ -958,6 +1191,7 @@ pub fn process_create_stake_account( seed: &Option, staker: &Option, withdrawer: &Option, + withdrawer_signer: Option, lockup: &Lockup, amount: SpendAmount, sign_only: bool, @@ -971,7 +1205,7 @@ pub fn process_create_stake_account( ) -> ProcessResult { let stake_account = config.signers[stake_account]; let stake_account_address = if let Some(seed) = seed { - Pubkey::create_with_seed(&stake_account.pubkey(), &seed, &solana_stake_program::id())? + Pubkey::create_with_seed(&stake_account.pubkey(), seed, &stake::program::id())? } else { stake_account.pubkey() }; @@ -990,8 +1224,18 @@ pub fn process_create_stake_account( withdrawer: withdrawer.unwrap_or(from.pubkey()), }; - let ixs = if let Some(seed) = seed { - stake_instruction::create_account_with_seed( + let ixs = match (seed, withdrawer_signer) { + (Some(seed), Some(_withdrawer_signer)) => { + stake_instruction::create_account_with_seed_checked( + &from.pubkey(), // from + &stake_account_address, // to + &stake_account.pubkey(), // base + seed, // seed + &authorized, + lamports, + ) + } + (Some(seed), None) => stake_instruction::create_account_with_seed( &from.pubkey(), // from &stake_account_address, // to &stake_account.pubkey(), // base @@ -999,18 +1243,22 @@ pub fn process_create_stake_account( &authorized, lockup, lamports, - ) - .with_memo(memo) - } else { - stake_instruction::create_account( + ), + (None, Some(_withdrawer_signer)) => stake_instruction::create_account_checked( + &from.pubkey(), + &stake_account.pubkey(), + &authorized, + lamports, + ), + (None, None) => stake_instruction::create_account( &from.pubkey(), &stake_account.pubkey(), &authorized, lockup, lamports, - ) - .with_memo(memo) - }; + ), + } + .with_memo(memo); if let Some(nonce_account) = &nonce_account { Message::new_with_nonce( ixs, @@ -1039,7 +1287,7 @@ pub fn process_create_stake_account( if !sign_only { if let Ok(stake_account) = rpc_client.get_account(&stake_account_address) { - let err_msg = if stake_account.owner == solana_stake_program::id() { + let err_msg = if stake_account.owner == stake::program::id() { format!("Stake account {} already exists", stake_account_address) } else { format!( @@ -1084,7 +1332,7 @@ pub fn process_create_stake_account( } else { tx.try_sign(&config.signers, recent_blockhash)?; let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx); - log_instruction_custom_error::(result, &config) + log_instruction_custom_error::(result, config) } } @@ -1093,7 +1341,7 @@ pub fn process_stake_authorize( rpc_client: &RpcClient, config: &CliConfig, stake_account_pubkey: &Pubkey, - new_authorizations: &[(StakeAuthorize, Pubkey, SignerIndex)], + new_authorizations: &[StakeAuthorizationIndexed], custodian: Option, sign_only: bool, dump_transaction_message: bool, @@ -1106,19 +1354,35 @@ pub fn process_stake_authorize( ) -> ProcessResult { let mut ixs = Vec::new(); let custodian = custodian.map(|index| config.signers[index]); - for (stake_authorize, authorized_pubkey, authority) in new_authorizations.iter() { + for StakeAuthorizationIndexed { + authorization_type, + new_authority_pubkey, + authority, + new_authority_signer, + } in new_authorizations.iter() + { check_unique_pubkeys( (stake_account_pubkey, "stake_account_pubkey".to_string()), - (authorized_pubkey, "new_authorized_pubkey".to_string()), + (new_authority_pubkey, "new_authorized_pubkey".to_string()), )?; let authority = config.signers[*authority]; - ixs.push(stake_instruction::authorize( - stake_account_pubkey, // stake account to update - &authority.pubkey(), // currently authorized - authorized_pubkey, // new stake signer - *stake_authorize, // stake or withdraw - custodian.map(|signer| signer.pubkey()).as_ref(), - )); + if new_authority_signer.is_some() { + ixs.push(stake_instruction::authorize_checked( + stake_account_pubkey, // stake account to update + &authority.pubkey(), // currently authorized + new_authority_pubkey, // new stake signer + *authorization_type, // stake or withdraw + custodian.map(|signer| signer.pubkey()).as_ref(), + )); + } else { + ixs.push(stake_instruction::authorize( + stake_account_pubkey, // stake account to update + &authority.pubkey(), // currently authorized + new_authority_pubkey, // new stake signer + *authorization_type, // stake or withdraw + custodian.map(|signer| signer.pubkey()).as_ref(), + )); + } } ixs = ixs.with_memo(memo); @@ -1171,7 +1435,7 @@ pub fn process_stake_authorize( } else { rpc_client.send_and_confirm_transaction_with_spinner(&tx) }; - log_instruction_custom_error::(result, &config) + log_instruction_custom_error::(result, config) } } @@ -1195,7 +1459,7 @@ pub fn process_deactivate_stake_account( let stake_authority = config.signers[stake_authority]; let stake_account_address = if let Some(seed) = seed { - Pubkey::create_with_seed(&stake_account_pubkey, seed, &solana_stake_program::id())? + Pubkey::create_with_seed(stake_account_pubkey, seed, &stake::program::id())? } else { *stake_account_pubkey }; @@ -1247,7 +1511,7 @@ pub fn process_deactivate_stake_account( config.commitment, )?; let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx); - log_instruction_custom_error::(result, &config) + log_instruction_custom_error::(result, config) } } @@ -1273,7 +1537,7 @@ pub fn process_withdraw_stake( let custodian = custodian.map(|index| config.signers[index]); let stake_account_address = if let Some(seed) = seed { - Pubkey::create_with_seed(&stake_account_pubkey, seed, &solana_stake_program::id())? + Pubkey::create_with_seed(stake_account_pubkey, seed, &stake::program::id())? } else { *stake_account_pubkey }; @@ -1346,7 +1610,7 @@ pub fn process_withdraw_stake( config.commitment, )?; let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx); - log_instruction_custom_error::(result, &config) + log_instruction_custom_error::(result, config) } } @@ -1381,10 +1645,10 @@ pub fn process_split_stake( } check_unique_pubkeys( (&fee_payer.pubkey(), "fee-payer keypair".to_string()), - (&stake_account_pubkey, "stake_account".to_string()), + (stake_account_pubkey, "stake_account".to_string()), )?; check_unique_pubkeys( - (&stake_account_pubkey, "stake_account".to_string()), + (stake_account_pubkey, "stake_account".to_string()), ( &split_stake_account.pubkey(), "split_stake_account".to_string(), @@ -1394,18 +1658,14 @@ pub fn process_split_stake( let stake_authority = config.signers[stake_authority]; let split_stake_account_address = if let Some(seed) = split_stake_account_seed { - Pubkey::create_with_seed( - &split_stake_account.pubkey(), - &seed, - &solana_stake_program::id(), - )? + Pubkey::create_with_seed(&split_stake_account.pubkey(), seed, &stake::program::id())? } else { split_stake_account.pubkey() }; if !sign_only { if let Ok(stake_account) = rpc_client.get_account(&split_stake_account_address) { - let err_msg = if stake_account.owner == solana_stake_program::id() { + let err_msg = if stake_account.owner == stake::program::id() { format!( "Stake account {} already exists", split_stake_account_address @@ -1436,7 +1696,7 @@ pub fn process_split_stake( let ixs = if let Some(seed) = split_stake_account_seed { stake_instruction::split_with_seed( - &stake_account_pubkey, + stake_account_pubkey, &stake_authority.pubkey(), lamports, &split_stake_account_address, @@ -1446,7 +1706,7 @@ pub fn process_split_stake( .with_memo(memo) } else { stake_instruction::split( - &stake_account_pubkey, + stake_account_pubkey, &stake_authority.pubkey(), lamports, &split_stake_account_address, @@ -1495,7 +1755,7 @@ pub fn process_split_stake( config.commitment, )?; let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx); - log_instruction_custom_error::(result, &config) + log_instruction_custom_error::(result, config) } } @@ -1518,19 +1778,19 @@ pub fn process_merge_stake( check_unique_pubkeys( (&fee_payer.pubkey(), "fee-payer keypair".to_string()), - (&stake_account_pubkey, "stake_account".to_string()), + (stake_account_pubkey, "stake_account".to_string()), )?; check_unique_pubkeys( (&fee_payer.pubkey(), "fee-payer keypair".to_string()), ( - &source_stake_account_pubkey, + source_stake_account_pubkey, "source_stake_account".to_string(), ), )?; check_unique_pubkeys( - (&stake_account_pubkey, "stake_account".to_string()), + (stake_account_pubkey, "stake_account".to_string()), ( - &source_stake_account_pubkey, + source_stake_account_pubkey, "source_stake_account".to_string(), ), )?; @@ -1540,7 +1800,7 @@ pub fn process_merge_stake( if !sign_only { for stake_account_address in &[stake_account_pubkey, source_stake_account_pubkey] { if let Ok(stake_account) = rpc_client.get_account(stake_account_address) { - if stake_account.owner != solana_stake_program::id() { + if stake_account.owner != stake::program::id() { return Err(CliError::BadParameter(format!( "Account {} is not a stake account", stake_account_address @@ -1555,8 +1815,8 @@ pub fn process_merge_stake( blockhash_query.get_blockhash_and_fee_calculator(rpc_client, config.commitment)?; let ixs = stake_instruction::merge( - &stake_account_pubkey, - &source_stake_account_pubkey, + stake_account_pubkey, + source_stake_account_pubkey, &stake_authority.pubkey(), ) .with_memo(memo); @@ -1606,7 +1866,7 @@ pub fn process_merge_stake( config.commitment, config.send_transaction_config, ); - log_instruction_custom_error::(result, &config) + log_instruction_custom_error::(result, config) } } @@ -1615,7 +1875,8 @@ pub fn process_stake_set_lockup( rpc_client: &RpcClient, config: &CliConfig, stake_account_pubkey: &Pubkey, - lockup: &mut LockupArgs, + lockup: &LockupArgs, + new_custodian_signer: Option, custodian: SignerIndex, sign_only: bool, dump_transaction_message: bool, @@ -1629,11 +1890,11 @@ pub fn process_stake_set_lockup( blockhash_query.get_blockhash_and_fee_calculator(rpc_client, config.commitment)?; let custodian = config.signers[custodian]; - let ixs = vec![stake_instruction::set_lockup( - stake_account_pubkey, - lockup, - &custodian.pubkey(), - )] + let ixs = vec![if new_custodian_signer.is_some() { + stake_instruction::set_lockup_checked(stake_account_pubkey, lockup, &custodian.pubkey()) + } else { + stake_instruction::set_lockup(stake_account_pubkey, lockup, &custodian.pubkey()) + }] .with_memo(memo); let nonce_authority = config.signers[nonce_authority]; let fee_payer = config.signers[fee_payer]; @@ -1677,7 +1938,7 @@ pub fn process_stake_set_lockup( config.commitment, )?; let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx); - log_instruction_custom_error::(result, &config) + log_instruction_custom_error::(result, config) } } @@ -1695,7 +1956,6 @@ pub fn build_stake_state( use_lamports_unit: bool, stake_history: &StakeHistory, clock: &Clock, - stake_program_v2_enabled: bool, ) -> CliStakeState { match stake_state { StakeState::Stake( @@ -1707,12 +1967,9 @@ pub fn build_stake_state( stake, ) => { let current_epoch = clock.epoch; - let (active_stake, activating_stake, deactivating_stake) = - stake.delegation.stake_activating_and_deactivating( - current_epoch, - Some(stake_history), - stake_program_v2_enabled, - ); + let (active_stake, activating_stake, deactivating_stake) = stake + .delegation + .stake_activating_and_deactivating(current_epoch, Some(stake_history)); let lockup = if lockup.is_in_force(clock, None) { Some(lockup.into()) } else { @@ -1827,6 +2084,7 @@ pub fn make_cli_reward( post_balance: reward.post_balance, percent_change: rate_change * 100.0, apr: Some(apr * 100.0), + commission: reward.commission, }) } else { None @@ -1876,7 +2134,7 @@ pub fn process_show_stake_account( with_rewards: Option, ) -> ProcessResult { let stake_account = rpc_client.get_account(stake_account_address)?; - if stake_account.owner != solana_stake_program::id() { + if stake_account.owner != stake::program::id() { return Err(CliError::RpcRequestError(format!( "{:?} is not a stake account", stake_account_address, @@ -1900,7 +2158,6 @@ pub fn process_show_stake_account( use_lamports_unit, &stake_history, &clock, - is_stake_program_v2_enabled(rpc_client)?, // At v1.6, this check can be removed and simply passed as `true` ); if state.stake_type == CliStakeType::Stake && state.activation_epoch.is_some() { @@ -2079,23 +2336,14 @@ pub fn process_delegate_stake( config.commitment, )?; let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx); - log_instruction_custom_error::(result, &config) + log_instruction_custom_error::(result, config) } } -pub fn is_stake_program_v2_enabled( - rpc_client: &RpcClient, -) -> Result> { - let feature_account = rpc_client.get_account(&feature_set::stake_program_v2::id())?; - Ok(feature::from_account(&feature_account) - .and_then(|feature| feature.activated_at) - .is_some()) -} - #[cfg(test)] mod tests { use super::*; - use crate::cli::{app, parse_command}; + use crate::{clap_app::get_clap_app, cli::parse_command}; use solana_client::blockhash_query; use solana_sdk::{ hash::Hash, @@ -2113,11 +2361,11 @@ mod tests { #[test] #[allow(clippy::cognitive_complexity)] fn test_parse_command() { - let test_commands = app("test", "desc", "version"); + let test_commands = get_clap_app("test", "desc", "version"); let default_keypair = Keypair::new(); let (default_keypair_file, mut tmp_file) = make_tmp_file(); write_keypair(&default_keypair, tmp_file.as_file_mut()).unwrap(); - let default_signer = DefaultSigner::new(default_keypair_file.clone()); + let default_signer = DefaultSigner::new("", &default_keypair_file); let (keypair_file, mut tmp_file) = make_tmp_file(); let stake_account_keypair = Keypair::new(); write_keypair(&stake_account_keypair, tmp_file.as_file_mut()).unwrap(); @@ -2150,8 +2398,18 @@ mod tests { command: CliCommand::StakeAuthorize { stake_account_pubkey, new_authorizations: vec![ - (StakeAuthorize::Staker, new_stake_authority, 0,), - (StakeAuthorize::Withdrawer, new_withdraw_authority, 0,), + StakeAuthorizationIndexed { + authorization_type: StakeAuthorize::Staker, + new_authority_pubkey: new_stake_authority, + authority: 0, + new_authority_signer: None, + }, + StakeAuthorizationIndexed { + authorization_type: StakeAuthorize::Withdrawer, + new_authority_pubkey: new_withdraw_authority, + authority: 0, + new_authority_signer: None, + }, ], sign_only: false, dump_transaction_message: false, @@ -2188,8 +2446,18 @@ mod tests { command: CliCommand::StakeAuthorize { stake_account_pubkey, new_authorizations: vec![ - (StakeAuthorize::Staker, new_stake_authority, 1,), - (StakeAuthorize::Withdrawer, new_withdraw_authority, 2,), + StakeAuthorizationIndexed { + authorization_type: StakeAuthorize::Staker, + new_authority_pubkey: new_stake_authority, + authority: 1, + new_authority_signer: None, + }, + StakeAuthorizationIndexed { + authorization_type: StakeAuthorize::Withdrawer, + new_authority_pubkey: new_withdraw_authority, + authority: 2, + new_authority_signer: None, + }, ], sign_only: false, dump_transaction_message: false, @@ -2230,8 +2498,18 @@ mod tests { command: CliCommand::StakeAuthorize { stake_account_pubkey, new_authorizations: vec![ - (StakeAuthorize::Staker, new_stake_authority, 1,), - (StakeAuthorize::Withdrawer, new_withdraw_authority, 1,), + StakeAuthorizationIndexed { + authorization_type: StakeAuthorize::Staker, + new_authority_pubkey: new_stake_authority, + authority: 1, + new_authority_signer: None, + }, + StakeAuthorizationIndexed { + authorization_type: StakeAuthorize::Withdrawer, + new_authority_pubkey: new_withdraw_authority, + authority: 1, + new_authority_signer: None, + }, ], sign_only: false, dump_transaction_message: false, @@ -2263,7 +2541,12 @@ mod tests { CliCommandInfo { command: CliCommand::StakeAuthorize { stake_account_pubkey, - new_authorizations: vec![(StakeAuthorize::Staker, new_stake_authority, 0,),], + new_authorizations: vec![StakeAuthorizationIndexed { + authorization_type: StakeAuthorize::Staker, + new_authority_pubkey: new_stake_authority, + authority: 0, + new_authority_signer: None, + }], sign_only: false, dump_transaction_message: false, blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), @@ -2291,7 +2574,12 @@ mod tests { CliCommandInfo { command: CliCommand::StakeAuthorize { stake_account_pubkey, - new_authorizations: vec![(StakeAuthorize::Staker, new_stake_authority, 1,),], + new_authorizations: vec![StakeAuthorizationIndexed { + authorization_type: StakeAuthorize::Staker, + new_authority_pubkey: new_stake_authority, + authority: 1, + new_authority_signer: None, + }], sign_only: false, dump_transaction_message: false, blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), @@ -2325,7 +2613,12 @@ mod tests { CliCommandInfo { command: CliCommand::StakeAuthorize { stake_account_pubkey, - new_authorizations: vec![(StakeAuthorize::Staker, new_stake_authority, 1,),], + new_authorizations: vec![StakeAuthorizationIndexed { + authorization_type: StakeAuthorize::Staker, + new_authority_pubkey: new_stake_authority, + authority: 1, + new_authority_signer: None, + }], sign_only: false, dump_transaction_message: false, blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), @@ -2356,11 +2649,12 @@ mod tests { CliCommandInfo { command: CliCommand::StakeAuthorize { stake_account_pubkey, - new_authorizations: vec![( - StakeAuthorize::Withdrawer, - new_withdraw_authority, - 0, - ),], + new_authorizations: vec![StakeAuthorizationIndexed { + authorization_type: StakeAuthorize::Withdrawer, + new_authority_pubkey: new_withdraw_authority, + authority: 0, + new_authority_signer: None, + }], sign_only: false, dump_transaction_message: false, blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), @@ -2388,11 +2682,12 @@ mod tests { CliCommandInfo { command: CliCommand::StakeAuthorize { stake_account_pubkey, - new_authorizations: vec![( - StakeAuthorize::Withdrawer, - new_withdraw_authority, - 1, - ),], + new_authorizations: vec![StakeAuthorizationIndexed { + authorization_type: StakeAuthorize::Withdrawer, + new_authority_pubkey: new_withdraw_authority, + authority: 1, + new_authority_signer: None, + }], sign_only: false, dump_transaction_message: false, blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), @@ -2426,7 +2721,12 @@ mod tests { CliCommandInfo { command: CliCommand::StakeAuthorize { stake_account_pubkey, - new_authorizations: vec![(StakeAuthorize::Staker, stake_account_pubkey, 0)], + new_authorizations: vec![StakeAuthorizationIndexed { + authorization_type: StakeAuthorize::Staker, + new_authority_pubkey: stake_account_pubkey, + authority: 0, + new_authority_signer: None, + }], sign_only: false, dump_transaction_message: false, blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), @@ -2441,28 +2741,41 @@ mod tests { } ); - // Test Authorize Subcommand w/ sign-only - let blockhash = Hash::default(); - let blockhash_string = format!("{}", blockhash); - let test_authorize = test_commands.clone().get_matches_from(vec![ + // stake-authorize-checked subcommand + let (authority_keypair_file, mut tmp_file) = make_tmp_file(); + let authority_keypair = Keypair::new(); + write_keypair(&authority_keypair, tmp_file.as_file_mut()).unwrap(); + let test_stake_authorize = test_commands.clone().get_matches_from(vec![ "test", - "stake-authorize", + "stake-authorize-checked", &stake_account_string, "--new-stake-authority", - &stake_account_string, - "--blockhash", - &blockhash_string, - "--sign-only", + &authority_keypair_file, + "--new-withdraw-authority", + &authority_keypair_file, ]); assert_eq!( - parse_command(&test_authorize, &default_signer, &mut None).unwrap(), + parse_command(&test_stake_authorize, &default_signer, &mut None).unwrap(), CliCommandInfo { command: CliCommand::StakeAuthorize { stake_account_pubkey, - new_authorizations: vec![(StakeAuthorize::Staker, stake_account_pubkey, 0)], - sign_only: true, + new_authorizations: vec![ + StakeAuthorizationIndexed { + authorization_type: StakeAuthorize::Staker, + new_authority_pubkey: authority_keypair.pubkey(), + authority: 0, + new_authority_signer: Some(1), + }, + StakeAuthorizationIndexed { + authorization_type: StakeAuthorize::Withdrawer, + new_authority_pubkey: authority_keypair.pubkey(), + authority: 0, + new_authority_signer: Some(1), + }, + ], + sign_only: false, dump_transaction_message: false, - blockhash_query: BlockhashQuery::None(blockhash), + blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), nonce_account: None, nonce_authority: 0, memo: None, @@ -2470,33 +2783,409 @@ mod tests { custodian: None, no_wait: false, }, - signers: vec![read_keypair_file(&default_keypair_file).unwrap().into()], - } + signers: vec![ + read_keypair_file(&default_keypair_file).unwrap().into(), + read_keypair_file(&authority_keypair_file).unwrap().into(), + ], + }, ); - // Test Authorize Subcommand w/ offline feepayer - let keypair = Keypair::new(); - let pubkey = keypair.pubkey(); - let sig = keypair.sign_message(&[0u8]); - let signer = format!("{}={}", keypair.pubkey(), sig); - let test_authorize = test_commands.clone().get_matches_from(vec![ + let (withdraw_authority_keypair_file, mut tmp_file) = make_tmp_file(); + let withdraw_authority_keypair = Keypair::new(); + write_keypair(&withdraw_authority_keypair, tmp_file.as_file_mut()).unwrap(); + let test_stake_authorize = test_commands.clone().get_matches_from(vec![ "test", - "stake-authorize", + "stake-authorize-checked", &stake_account_string, "--new-stake-authority", - &stake_account_string, - "--blockhash", - &blockhash_string, - "--signer", - &signer, - "--fee-payer", - &pubkey.to_string(), + &authority_keypair_file, + "--new-withdraw-authority", + &authority_keypair_file, + "--stake-authority", + &stake_authority_keypair_file, + "--withdraw-authority", + &withdraw_authority_keypair_file, ]); assert_eq!( - parse_command(&test_authorize, &default_signer, &mut None).unwrap(), + parse_command(&test_stake_authorize, &default_signer, &mut None).unwrap(), + CliCommandInfo { + command: CliCommand::StakeAuthorize { + stake_account_pubkey, + new_authorizations: vec![ + StakeAuthorizationIndexed { + authorization_type: StakeAuthorize::Staker, + new_authority_pubkey: authority_keypair.pubkey(), + authority: 1, + new_authority_signer: Some(2), + }, + StakeAuthorizationIndexed { + authorization_type: StakeAuthorize::Withdrawer, + new_authority_pubkey: authority_keypair.pubkey(), + authority: 3, + new_authority_signer: Some(2), + }, + ], + sign_only: false, + dump_transaction_message: false, + blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), + nonce_account: None, + nonce_authority: 0, + memo: None, + fee_payer: 0, + custodian: None, + no_wait: false, + }, + signers: vec![ + read_keypair_file(&default_keypair_file).unwrap().into(), + read_keypair_file(&stake_authority_keypair_file) + .unwrap() + .into(), + read_keypair_file(&authority_keypair_file).unwrap().into(), + read_keypair_file(&withdraw_authority_keypair_file) + .unwrap() + .into(), + ], + }, + ); + // Withdraw authority may set both new authorities + let test_stake_authorize = test_commands.clone().get_matches_from(vec![ + "test", + "stake-authorize-checked", + &stake_account_string, + "--new-stake-authority", + &authority_keypair_file, + "--new-withdraw-authority", + &authority_keypair_file, + "--withdraw-authority", + &withdraw_authority_keypair_file, + ]); + assert_eq!( + parse_command(&test_stake_authorize, &default_signer, &mut None).unwrap(), + CliCommandInfo { + command: CliCommand::StakeAuthorize { + stake_account_pubkey, + new_authorizations: vec![ + StakeAuthorizationIndexed { + authorization_type: StakeAuthorize::Staker, + new_authority_pubkey: authority_keypair.pubkey(), + authority: 1, + new_authority_signer: Some(2), + }, + StakeAuthorizationIndexed { + authorization_type: StakeAuthorize::Withdrawer, + new_authority_pubkey: authority_keypair.pubkey(), + authority: 1, + new_authority_signer: Some(2), + }, + ], + sign_only: false, + dump_transaction_message: false, + blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), + nonce_account: None, + nonce_authority: 0, + memo: None, + fee_payer: 0, + custodian: None, + no_wait: false, + }, + signers: vec![ + read_keypair_file(&default_keypair_file).unwrap().into(), + read_keypair_file(&withdraw_authority_keypair_file) + .unwrap() + .into(), + read_keypair_file(&authority_keypair_file).unwrap().into(), + ], + }, + ); + let test_stake_authorize = test_commands.clone().get_matches_from(vec![ + "test", + "stake-authorize-checked", + &stake_account_string, + "--new-stake-authority", + &authority_keypair_file, + ]); + assert_eq!( + parse_command(&test_stake_authorize, &default_signer, &mut None).unwrap(), + CliCommandInfo { + command: CliCommand::StakeAuthorize { + stake_account_pubkey, + new_authorizations: vec![StakeAuthorizationIndexed { + authorization_type: StakeAuthorize::Staker, + new_authority_pubkey: authority_keypair.pubkey(), + authority: 0, + new_authority_signer: Some(1), + }], + sign_only: false, + dump_transaction_message: false, + blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), + nonce_account: None, + nonce_authority: 0, + memo: None, + fee_payer: 0, + custodian: None, + no_wait: false, + }, + signers: vec![ + read_keypair_file(&default_keypair_file).unwrap().into(), + read_keypair_file(&authority_keypair_file).unwrap().into(), + ], + }, + ); + let test_stake_authorize = test_commands.clone().get_matches_from(vec![ + "test", + "stake-authorize-checked", + &stake_account_string, + "--new-stake-authority", + &authority_keypair_file, + "--stake-authority", + &stake_authority_keypair_file, + ]); + assert_eq!( + parse_command(&test_stake_authorize, &default_signer, &mut None).unwrap(), CliCommandInfo { command: CliCommand::StakeAuthorize { stake_account_pubkey, - new_authorizations: vec![(StakeAuthorize::Staker, stake_account_pubkey, 0)], + new_authorizations: vec![StakeAuthorizationIndexed { + authorization_type: StakeAuthorize::Staker, + new_authority_pubkey: authority_keypair.pubkey(), + authority: 1, + new_authority_signer: Some(2), + }], + sign_only: false, + dump_transaction_message: false, + blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), + nonce_account: None, + nonce_authority: 0, + memo: None, + fee_payer: 0, + custodian: None, + no_wait: false, + }, + signers: vec![ + read_keypair_file(&default_keypair_file).unwrap().into(), + read_keypair_file(&stake_authority_keypair_file) + .unwrap() + .into(), + read_keypair_file(&authority_keypair_file).unwrap().into(), + ], + }, + ); + // Withdraw authority may set new stake authority + let test_stake_authorize = test_commands.clone().get_matches_from(vec![ + "test", + "stake-authorize-checked", + &stake_account_string, + "--new-stake-authority", + &authority_keypair_file, + "--withdraw-authority", + &withdraw_authority_keypair_file, + ]); + assert_eq!( + parse_command(&test_stake_authorize, &default_signer, &mut None).unwrap(), + CliCommandInfo { + command: CliCommand::StakeAuthorize { + stake_account_pubkey, + new_authorizations: vec![StakeAuthorizationIndexed { + authorization_type: StakeAuthorize::Staker, + new_authority_pubkey: authority_keypair.pubkey(), + authority: 1, + new_authority_signer: Some(2), + }], + sign_only: false, + dump_transaction_message: false, + blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), + nonce_account: None, + nonce_authority: 0, + memo: None, + fee_payer: 0, + custodian: None, + no_wait: false, + }, + signers: vec![ + read_keypair_file(&default_keypair_file).unwrap().into(), + read_keypair_file(&withdraw_authority_keypair_file) + .unwrap() + .into(), + read_keypair_file(&authority_keypair_file).unwrap().into(), + ], + }, + ); + let test_stake_authorize = test_commands.clone().get_matches_from(vec![ + "test", + "stake-authorize-checked", + &stake_account_string, + "--new-withdraw-authority", + &authority_keypair_file, + ]); + assert_eq!( + parse_command(&test_stake_authorize, &default_signer, &mut None).unwrap(), + CliCommandInfo { + command: CliCommand::StakeAuthorize { + stake_account_pubkey, + new_authorizations: vec![StakeAuthorizationIndexed { + authorization_type: StakeAuthorize::Withdrawer, + new_authority_pubkey: authority_keypair.pubkey(), + authority: 0, + new_authority_signer: Some(1), + }], + sign_only: false, + dump_transaction_message: false, + blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), + nonce_account: None, + nonce_authority: 0, + memo: None, + fee_payer: 0, + custodian: None, + no_wait: false, + }, + signers: vec![ + read_keypair_file(&default_keypair_file).unwrap().into(), + read_keypair_file(&authority_keypair_file).unwrap().into(), + ], + }, + ); + let test_stake_authorize = test_commands.clone().get_matches_from(vec![ + "test", + "stake-authorize-checked", + &stake_account_string, + "--new-withdraw-authority", + &authority_keypair_file, + "--withdraw-authority", + &withdraw_authority_keypair_file, + ]); + assert_eq!( + parse_command(&test_stake_authorize, &default_signer, &mut None).unwrap(), + CliCommandInfo { + command: CliCommand::StakeAuthorize { + stake_account_pubkey, + new_authorizations: vec![StakeAuthorizationIndexed { + authorization_type: StakeAuthorize::Withdrawer, + new_authority_pubkey: authority_keypair.pubkey(), + authority: 1, + new_authority_signer: Some(2), + }], + sign_only: false, + dump_transaction_message: false, + blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), + nonce_account: None, + nonce_authority: 0, + memo: None, + fee_payer: 0, + custodian: None, + no_wait: false, + }, + signers: vec![ + read_keypair_file(&default_keypair_file).unwrap().into(), + read_keypair_file(&withdraw_authority_keypair_file) + .unwrap() + .into(), + read_keypair_file(&authority_keypair_file).unwrap().into(), + ], + }, + ); + + // Test Authorize Subcommand w/ no-wait + let test_authorize = test_commands.clone().get_matches_from(vec![ + "test", + "stake-authorize-checked", + &stake_account_string, + "--new-stake-authority", + &authority_keypair_file, + "--no-wait", + ]); + assert_eq!( + parse_command(&test_authorize, &default_signer, &mut None).unwrap(), + CliCommandInfo { + command: CliCommand::StakeAuthorize { + stake_account_pubkey, + new_authorizations: vec![StakeAuthorizationIndexed { + authorization_type: StakeAuthorize::Staker, + new_authority_pubkey: authority_keypair.pubkey(), + authority: 0, + new_authority_signer: Some(1), + }], + sign_only: false, + dump_transaction_message: false, + blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), + nonce_account: None, + nonce_authority: 0, + memo: None, + fee_payer: 0, + custodian: None, + no_wait: true, + }, + signers: vec![ + read_keypair_file(&default_keypair_file).unwrap().into(), + read_keypair_file(&authority_keypair_file).unwrap().into(), + ], + } + ); + + // Test Authorize Subcommand w/ sign-only + let blockhash = Hash::default(); + let blockhash_string = format!("{}", blockhash); + let test_authorize = test_commands.clone().get_matches_from(vec![ + "test", + "stake-authorize", + &stake_account_string, + "--new-stake-authority", + &stake_account_string, + "--blockhash", + &blockhash_string, + "--sign-only", + ]); + assert_eq!( + parse_command(&test_authorize, &default_signer, &mut None).unwrap(), + CliCommandInfo { + command: CliCommand::StakeAuthorize { + stake_account_pubkey, + new_authorizations: vec![StakeAuthorizationIndexed { + authorization_type: StakeAuthorize::Staker, + new_authority_pubkey: stake_account_pubkey, + authority: 0, + new_authority_signer: None, + }], + sign_only: true, + dump_transaction_message: false, + blockhash_query: BlockhashQuery::None(blockhash), + nonce_account: None, + nonce_authority: 0, + memo: None, + fee_payer: 0, + custodian: None, + no_wait: false, + }, + signers: vec![read_keypair_file(&default_keypair_file).unwrap().into()], + } + ); + // Test Authorize Subcommand w/ offline feepayer + let keypair = Keypair::new(); + let pubkey = keypair.pubkey(); + let sig = keypair.sign_message(&[0u8]); + let signer = format!("{}={}", keypair.pubkey(), sig); + let test_authorize = test_commands.clone().get_matches_from(vec![ + "test", + "stake-authorize", + &stake_account_string, + "--new-stake-authority", + &stake_account_string, + "--blockhash", + &blockhash_string, + "--signer", + &signer, + "--fee-payer", + &pubkey.to_string(), + ]); + assert_eq!( + parse_command(&test_authorize, &default_signer, &mut None).unwrap(), + CliCommandInfo { + command: CliCommand::StakeAuthorize { + stake_account_pubkey, + new_authorizations: vec![StakeAuthorizationIndexed { + authorization_type: StakeAuthorize::Staker, + new_authority_pubkey: stake_account_pubkey, + authority: 0, + new_authority_signer: None, + }], sign_only: false, dump_transaction_message: false, blockhash_query: BlockhashQuery::FeeCalculator( @@ -2546,7 +3235,12 @@ mod tests { CliCommandInfo { command: CliCommand::StakeAuthorize { stake_account_pubkey, - new_authorizations: vec![(StakeAuthorize::Staker, stake_account_pubkey, 0)], + new_authorizations: vec![StakeAuthorizationIndexed { + authorization_type: StakeAuthorize::Staker, + new_authority_pubkey: stake_account_pubkey, + authority: 0, + new_authority_signer: None, + }], sign_only: false, dump_transaction_message: false, blockhash_query: BlockhashQuery::FeeCalculator( @@ -2582,7 +3276,12 @@ mod tests { CliCommandInfo { command: CliCommand::StakeAuthorize { stake_account_pubkey, - new_authorizations: vec![(StakeAuthorize::Staker, stake_account_pubkey, 0)], + new_authorizations: vec![StakeAuthorizationIndexed { + authorization_type: StakeAuthorize::Staker, + new_authority_pubkey: stake_account_pubkey, + authority: 0, + new_authority_signer: None, + }], sign_only: false, dump_transaction_message: false, blockhash_query: BlockhashQuery::FeeCalculator( @@ -2623,7 +3322,12 @@ mod tests { CliCommandInfo { command: CliCommand::StakeAuthorize { stake_account_pubkey, - new_authorizations: vec![(StakeAuthorize::Staker, stake_account_pubkey, 0)], + new_authorizations: vec![StakeAuthorizationIndexed { + authorization_type: StakeAuthorize::Staker, + new_authority_pubkey: stake_account_pubkey, + authority: 0, + new_authority_signer: None, + }], sign_only: false, dump_transaction_message: false, blockhash_query: BlockhashQuery::FeeCalculator( @@ -2663,7 +3367,12 @@ mod tests { CliCommandInfo { command: CliCommand::StakeAuthorize { stake_account_pubkey, - new_authorizations: vec![(StakeAuthorize::Staker, stake_account_pubkey, 0)], + new_authorizations: vec![StakeAuthorizationIndexed { + authorization_type: StakeAuthorize::Staker, + new_authority_pubkey: stake_account_pubkey, + authority: 0, + new_authority_signer: None, + }], sign_only: false, dump_transaction_message: false, blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), @@ -2701,7 +3410,12 @@ mod tests { CliCommandInfo { command: CliCommand::StakeAuthorize { stake_account_pubkey, - new_authorizations: vec![(StakeAuthorize::Staker, stake_account_pubkey, 0)], + new_authorizations: vec![StakeAuthorizationIndexed { + authorization_type: StakeAuthorize::Staker, + new_authority_pubkey: stake_account_pubkey, + authority: 0, + new_authority_signer: None, + }], sign_only: false, dump_transaction_message: false, blockhash_query: BlockhashQuery::FeeCalculator( @@ -2749,6 +3463,7 @@ mod tests { seed: None, staker: Some(authorized), withdrawer: Some(authorized), + withdrawer_signer: None, lockup: Lockup { epoch: 43, unix_timestamp: 0, @@ -2792,6 +3507,7 @@ mod tests { seed: None, staker: None, withdrawer: None, + withdrawer_signer: None, lockup: Lockup::default(), amount: SpendAmount::Some(50_000_000_000), sign_only: false, @@ -2809,6 +3525,58 @@ mod tests { ], } ); + let (withdrawer_keypair_file, mut tmp_file) = make_tmp_file(); + let withdrawer_keypair = Keypair::new(); + write_keypair(&withdrawer_keypair, tmp_file.as_file_mut()).unwrap(); + let test_create_stake_account = test_commands.clone().get_matches_from(vec![ + "test", + "create-stake-account-checked", + &keypair_file, + "50", + "--stake-authority", + &authorized_string, + "--withdraw-authority", + &withdrawer_keypair_file, + ]); + assert_eq!( + parse_command(&test_create_stake_account, &default_signer, &mut None).unwrap(), + CliCommandInfo { + command: CliCommand::CreateStakeAccount { + stake_account: 1, + seed: None, + staker: Some(authorized), + withdrawer: Some(withdrawer_keypair.pubkey()), + withdrawer_signer: Some(2), + lockup: Lockup::default(), + amount: SpendAmount::Some(50_000_000_000), + sign_only: false, + dump_transaction_message: false, + blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), + nonce_account: None, + nonce_authority: 0, + memo: None, + fee_payer: 0, + from: 0, + }, + signers: vec![ + read_keypair_file(&default_keypair_file).unwrap().into(), + stake_account_keypair.into(), + withdrawer_keypair.into(), + ], + } + ); + + let test_create_stake_account = test_commands.clone().get_matches_from(vec![ + "test", + "create-stake-account-checked", + &keypair_file, + "50", + "--stake-authority", + &authorized_string, + "--withdraw-authority", + &authorized_string, + ]); + assert!(parse_command(&test_create_stake_account, &default_signer, &mut None).is_err()); // CreateStakeAccount offline and nonce let nonce_account = Pubkey::new(&[1u8; 32]); @@ -2847,6 +3615,7 @@ mod tests { seed: None, staker: None, withdrawer: None, + withdrawer_signer: None, lockup: Lockup::default(), amount: SpendAmount::Some(50_000_000_000), sign_only: false, diff --git a/cli/src/validator_info.rs b/cli/src/validator_info.rs index 5479c0cb87300b..61054a3e98f6cc 100644 --- a/cli/src/validator_info.rs +++ b/cli/src/validator_info.rs @@ -119,7 +119,7 @@ fn parse_validator_info( let key_list: ConfigKeys = deserialize(&account.data)?; if !key_list.keys.is_empty() { let (validator_pubkey, _) = key_list.keys[1]; - let validator_info_string: String = deserialize(&get_config_data(&account.data)?)?; + let validator_info_string: String = deserialize(get_config_data(&account.data)?)?; let validator_info: Map<_, _> = serde_json::from_str(&validator_info_string)?; Ok((validator_pubkey, validator_info)) } else { @@ -246,7 +246,7 @@ pub fn process_set_validator_info( ) -> ProcessResult { // Validate keybase username if let Some(string) = validator_info.get("keybaseUsername") { - let result = verify_keybase(&config.signers[0].pubkey(), &string); + let result = verify_keybase(&config.signers[0].pubkey(), string); if result.is_err() { if force_keybase { println!("--force supplied, ignoring: {:?}", result); @@ -272,7 +272,7 @@ pub fn process_set_validator_info( }, ) .find(|(pubkey, account)| { - let (validator_pubkey, _) = parse_validator_info(&pubkey, &account).unwrap(); + let (validator_pubkey, _) = parse_validator_info(pubkey, account).unwrap(); validator_pubkey == config.signers[0].pubkey() }); @@ -393,7 +393,7 @@ pub fn process_get_validator_info( } for (validator_info_pubkey, validator_info_account) in validator_info.iter() { let (validator_pubkey, validator_info) = - parse_validator_info(&validator_info_pubkey, &validator_info_account)?; + parse_validator_info(validator_info_pubkey, validator_info_account)?; validator_info_list.push(CliValidatorInfo { identity_pubkey: validator_pubkey.to_string(), info_pubkey: validator_info_pubkey.to_string(), @@ -408,7 +408,7 @@ pub fn process_get_validator_info( #[cfg(test)] mod tests { use super::*; - use crate::cli::app; + use crate::clap_app::get_clap_app; use bincode::{serialize, serialized_size}; use serde_json::json; @@ -432,7 +432,7 @@ mod tests { #[test] fn test_parse_args() { - let matches = app("test", "desc", "version").get_matches_from(vec![ + let matches = get_clap_app("test", "desc", "version").get_matches_from(vec![ "test", "validator-info", "publish", @@ -451,7 +451,7 @@ mod tests { "name": "Alice", "keybaseUsername": "alice_keybase", }); - assert_eq!(parse_args(&matches), expected); + assert_eq!(parse_args(matches), expected); } #[test] diff --git a/cli/src/vote.rs b/cli/src/vote.rs index 96e8e046e25b9b..ee81929d077381 100644 --- a/cli/src/vote.rs +++ b/cli/src/vote.rs @@ -82,7 +82,7 @@ impl VoteSubCommands for App<'_, '_> { .takes_value(true) .help("Seed for address generation; if specified, the resulting account will be at a derived address of the VOTE ACCOUNT pubkey") ) - .arg(memo_arg()) + .arg(memo_arg()) ) .subcommand( SubCommand::with_name("vote-authorize-voter") @@ -109,7 +109,7 @@ impl VoteSubCommands for App<'_, '_> { .required(true), "New authorized vote signer. "), ) - .arg(memo_arg()) + .arg(memo_arg()) ) .subcommand( SubCommand::with_name("vote-authorize-withdrawer") @@ -136,7 +136,65 @@ impl VoteSubCommands for App<'_, '_> { .required(true), "New authorized withdrawer. "), ) - .arg(memo_arg()) + .arg(memo_arg()) + ) + .subcommand( + SubCommand::with_name("vote-authorize-voter-checked") + .about("Authorize a new vote signing keypair for the given vote account, \ + checking the new authority as a signer") + .arg( + pubkey!(Arg::with_name("vote_account_pubkey") + .index(1) + .value_name("VOTE_ACCOUNT_ADDRESS") + .required(true), + "Vote account in which to set the authorized voter. "), + ) + .arg( + Arg::with_name("authorized") + .index(2) + .value_name("AUTHORIZED_KEYPAIR") + .required(true) + .validator(is_valid_signer) + .help("Current authorized vote signer."), + ) + .arg( + Arg::with_name("new_authorized") + .index(3) + .value_name("NEW_AUTHORIZED_KEYPAIR") + .required(true) + .validator(is_valid_signer) + .help("New authorized vote signer."), + ) + .arg(memo_arg()) + ) + .subcommand( + SubCommand::with_name("vote-authorize-withdrawer-checked") + .about("Authorize a new withdraw signing keypair for the given vote account, \ + checking the new authority as a signer") + .arg( + pubkey!(Arg::with_name("vote_account_pubkey") + .index(1) + .value_name("VOTE_ACCOUNT_ADDRESS") + .required(true), + "Vote account in which to set the authorized withdrawer. "), + ) + .arg( + Arg::with_name("authorized") + .index(2) + .value_name("AUTHORIZED_KEYPAIR") + .required(true) + .validator(is_valid_signer) + .help("Current authorized withdrawer."), + ) + .arg( + Arg::with_name("new_authorized") + .index(3) + .value_name("NEW_AUTHORIZED_KEYPAIR") + .required(true) + .validator(is_valid_signer) + .help("New authorized withdrawer."), + ) + .arg(memo_arg()) ) .subcommand( SubCommand::with_name("vote-update-validator") @@ -166,7 +224,7 @@ impl VoteSubCommands for App<'_, '_> { .validator(is_valid_signer) .help("Authorized withdrawer keypair"), ) - .arg(memo_arg()) + .arg(memo_arg()) ) .subcommand( SubCommand::with_name("vote-update-commission") @@ -196,7 +254,7 @@ impl VoteSubCommands for App<'_, '_> { .validator(is_valid_signer) .help("Authorized withdrawer keypair"), ) - .arg(memo_arg()) + .arg(memo_arg()) ) .subcommand( SubCommand::with_name("vote-account") @@ -266,7 +324,7 @@ impl VoteSubCommands for App<'_, '_> { .validator(is_valid_signer) .help("Authorized withdrawer [default: cli config keypair]"), ) - .arg(memo_arg()) + .arg(memo_arg()) ) } } @@ -311,19 +369,25 @@ pub fn parse_vote_authorize( default_signer: &DefaultSigner, wallet_manager: &mut Option>, vote_authorize: VoteAuthorize, + checked: bool, ) -> Result { let vote_account_pubkey = pubkey_of_signer(matches, "vote_account_pubkey", wallet_manager)?.unwrap(); - let new_authorized_pubkey = - pubkey_of_signer(matches, "new_authorized_pubkey", wallet_manager)?.unwrap(); - let (authorized, _) = signer_of(matches, "authorized", wallet_manager)?; + let (authorized, authorized_pubkey) = signer_of(matches, "authorized", wallet_manager)?; let payer_provided = None; - let signer_info = default_signer.generate_unique_signers( - vec![payer_provided, authorized], - matches, - wallet_manager, - )?; + let mut signers = vec![payer_provided, authorized]; + + let new_authorized_pubkey = if checked { + let (new_authorized_signer, new_authorized_pubkey) = + signer_of(matches, "new_authorized", wallet_manager)?; + signers.push(new_authorized_signer); + new_authorized_pubkey.unwrap() + } else { + pubkey_of_signer(matches, "new_authorized_pubkey", wallet_manager)?.unwrap() + }; + + let signer_info = default_signer.generate_unique_signers(signers, matches, wallet_manager)?; let memo = matches.value_of(MEMO_ARG.name).map(String::from); Ok(CliCommandInfo { @@ -332,6 +396,12 @@ pub fn parse_vote_authorize( new_authorized_pubkey, vote_authorize, memo, + authorized: signer_info.index_of(authorized_pubkey).unwrap(), + new_authorized: if checked { + signer_info.index_of(Some(new_authorized_pubkey)) + } else { + None + }, }, signers: signer_info.signers, }) @@ -468,7 +538,7 @@ pub fn process_create_vote_account( let vote_account = config.signers[vote_account]; let vote_account_pubkey = vote_account.pubkey(); let vote_account_address = if let Some(seed) = seed { - Pubkey::create_with_seed(&vote_account_pubkey, &seed, &solana_vote_program::id())? + Pubkey::create_with_seed(&vote_account_pubkey, seed, &solana_vote_program::id())? } else { vote_account_pubkey }; @@ -549,7 +619,7 @@ pub fn process_create_vote_account( let mut tx = Transaction::new_unsigned(message); tx.try_sign(&config.signers, recent_blockhash)?; let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx); - log_instruction_custom_error::(result, &config) + log_instruction_custom_error::(result, config) } pub fn process_vote_authorize( @@ -558,28 +628,34 @@ pub fn process_vote_authorize( vote_account_pubkey: &Pubkey, new_authorized_pubkey: &Pubkey, vote_authorize: VoteAuthorize, + authorized: SignerIndex, + new_authorized: Option, memo: Option<&String>, ) -> ProcessResult { - // If the `authorized_account` is also the fee payer, `config.signers` will only have one - // keypair in it - let authorized = if config.signers.len() == 2 { - config.signers[1] - } else { - config.signers[0] - }; + let authorized = config.signers[authorized]; + let new_authorized_signer = new_authorized.map(|index| config.signers[index]); check_unique_pubkeys( (&authorized.pubkey(), "authorized_account".to_string()), (new_authorized_pubkey, "new_authorized_pubkey".to_string()), )?; let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?; - let ixs = vec![vote_instruction::authorize( - vote_account_pubkey, // vote account to update - &authorized.pubkey(), // current authorized - new_authorized_pubkey, // new vote signer/withdrawer - vote_authorize, // vote or withdraw - )] - .with_memo(memo); + let vote_ix = if new_authorized_signer.is_some() { + vote_instruction::authorize_checked( + vote_account_pubkey, // vote account to update + &authorized.pubkey(), // current authorized + new_authorized_pubkey, // new vote signer/withdrawer + vote_authorize, // vote or withdraw + ) + } else { + vote_instruction::authorize( + vote_account_pubkey, // vote account to update + &authorized.pubkey(), // current authorized + new_authorized_pubkey, // new vote signer/withdrawer + vote_authorize, // vote or withdraw + ) + }; + let ixs = vec![vote_ix].with_memo(memo); let message = Message::new(&ixs, Some(&config.signers[0].pubkey())); let mut tx = Transaction::new_unsigned(message); @@ -592,7 +668,7 @@ pub fn process_vote_authorize( config.commitment, )?; let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx); - log_instruction_custom_error::(result, &config) + log_instruction_custom_error::(result, config) } pub fn process_vote_update_validator( @@ -629,7 +705,7 @@ pub fn process_vote_update_validator( config.commitment, )?; let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx); - log_instruction_custom_error::(result, &config) + log_instruction_custom_error::(result, config) } pub fn process_vote_update_commission( @@ -660,7 +736,7 @@ pub fn process_vote_update_commission( config.commitment, )?; let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx); - log_instruction_custom_error::(result, &config) + log_instruction_custom_error::(result, config) } fn get_vote_account( @@ -763,7 +839,7 @@ pub fn process_withdraw_from_vote_account( let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?; let withdraw_authority = config.signers[withdraw_authority]; - let current_balance = rpc_client.get_balance(&vote_account_pubkey)?; + let current_balance = rpc_client.get_balance(vote_account_pubkey)?; let minimum_balance = rpc_client.get_minimum_balance_for_rent_exemption(VoteState::size_of())?; let lamports = match withdraw_amount { @@ -798,13 +874,13 @@ pub fn process_withdraw_from_vote_account( config.commitment, )?; let result = rpc_client.send_and_confirm_transaction_with_spinner(&transaction); - log_instruction_custom_error::(result, &config) + log_instruction_custom_error::(result, config) } #[cfg(test)] mod tests { use super::*; - use crate::cli::{app, parse_command}; + use crate::{clap_app::get_clap_app, cli::parse_command}; use solana_sdk::signature::{read_keypair_file, write_keypair, Keypair, Signer}; use tempfile::NamedTempFile; @@ -815,7 +891,7 @@ mod tests { #[test] fn test_parse_command() { - let test_commands = app("test", "desc", "version"); + let test_commands = get_clap_app("test", "desc", "version"); let keypair = Keypair::new(); let pubkey = keypair.pubkey(); let pubkey_string = pubkey.to_string(); @@ -826,7 +902,7 @@ mod tests { let default_keypair = Keypair::new(); let (default_keypair_file, mut tmp_file) = make_tmp_file(); write_keypair(&default_keypair, tmp_file.as_file_mut()).unwrap(); - let default_signer = DefaultSigner::new(default_keypair_file.clone()); + let default_signer = DefaultSigner::new("", &default_keypair_file); let test_authorize_voter = test_commands.clone().get_matches_from(vec![ "test", @@ -843,6 +919,8 @@ mod tests { new_authorized_pubkey: pubkey2, vote_authorize: VoteAuthorize::Voter, memo: None, + authorized: 0, + new_authorized: None, }, signers: vec![read_keypair_file(&default_keypair_file).unwrap().into()], } @@ -867,14 +945,80 @@ mod tests { new_authorized_pubkey: pubkey2, vote_authorize: VoteAuthorize::Voter, memo: None, + authorized: 1, + new_authorized: None, + }, + signers: vec![ + read_keypair_file(&default_keypair_file).unwrap().into(), + read_keypair_file(&authorized_keypair_file).unwrap().into(), + ], + } + ); + + let (voter_keypair_file, mut tmp_file) = make_tmp_file(); + let voter_keypair = Keypair::new(); + write_keypair(&voter_keypair, tmp_file.as_file_mut()).unwrap(); + + let test_authorize_voter = test_commands.clone().get_matches_from(vec![ + "test", + "vote-authorize-voter-checked", + &pubkey_string, + &default_keypair_file, + &voter_keypair_file, + ]); + assert_eq!( + parse_command(&test_authorize_voter, &default_signer, &mut None).unwrap(), + CliCommandInfo { + command: CliCommand::VoteAuthorize { + vote_account_pubkey: pubkey, + new_authorized_pubkey: voter_keypair.pubkey(), + vote_authorize: VoteAuthorize::Voter, + memo: None, + authorized: 0, + new_authorized: Some(1), + }, + signers: vec![ + read_keypair_file(&default_keypair_file).unwrap().into(), + read_keypair_file(&voter_keypair_file).unwrap().into() + ], + } + ); + + let test_authorize_voter = test_commands.clone().get_matches_from(vec![ + "test", + "vote-authorize-voter-checked", + &pubkey_string, + &authorized_keypair_file, + &voter_keypair_file, + ]); + assert_eq!( + parse_command(&test_authorize_voter, &default_signer, &mut None).unwrap(), + CliCommandInfo { + command: CliCommand::VoteAuthorize { + vote_account_pubkey: pubkey, + new_authorized_pubkey: voter_keypair.pubkey(), + vote_authorize: VoteAuthorize::Voter, + memo: None, + authorized: 1, + new_authorized: Some(2), }, signers: vec![ read_keypair_file(&default_keypair_file).unwrap().into(), read_keypair_file(&authorized_keypair_file).unwrap().into(), + read_keypair_file(&voter_keypair_file).unwrap().into(), ], } ); + let test_authorize_voter = test_commands.clone().get_matches_from(vec![ + "test", + "vote-authorize-voter-checked", + &pubkey_string, + &authorized_keypair_file, + &pubkey2_string, + ]); + assert!(parse_command(&test_authorize_voter, &default_signer, &mut None).is_err()); + let (keypair_file, mut tmp_file) = make_tmp_file(); let keypair = Keypair::new(); write_keypair(&keypair, tmp_file.as_file_mut()).unwrap(); diff --git a/cli/src/wallet.rs b/cli/src/wallet.rs new file mode 100644 index 00000000000000..f375562588d2c1 --- /dev/null +++ b/cli/src/wallet.rs @@ -0,0 +1,744 @@ +use crate::{ + cli::{ + log_instruction_custom_error, request_and_confirm_airdrop, CliCommand, CliCommandInfo, + CliConfig, CliError, ProcessResult, + }, + memo::WithMemo, + nonce::check_nonce_account, + spend_utils::{resolve_spend_tx_and_check_account_balances, SpendAmount}, +}; +use clap::{value_t_or_exit, App, Arg, ArgMatches, SubCommand}; +use solana_account_decoder::{UiAccount, UiAccountEncoding}; +use solana_clap_utils::{ + fee_payer::*, + input_parsers::*, + input_validators::*, + keypair::{DefaultSigner, SignerIndex}, + memo::*, + nonce::*, + offline::*, +}; +use solana_cli_output::{ + display::build_balance_message, return_signers_with_config, CliAccount, + CliSignatureVerificationStatus, CliTransaction, CliTransactionConfirmation, OutputFormat, + ReturnSignersConfig, +}; +use solana_client::{ + blockhash_query::BlockhashQuery, nonce_utils, rpc_client::RpcClient, + rpc_config::RpcTransactionConfig, rpc_response::RpcKeyedAccount, +}; +use solana_remote_wallet::remote_wallet::RemoteWalletManager; +use solana_sdk::{ + commitment_config::CommitmentConfig, + message::Message, + pubkey::Pubkey, + signature::Signature, + stake, + system_instruction::{self, SystemError}, + system_program, + transaction::Transaction, +}; +use solana_transaction_status::{EncodedTransaction, UiTransactionEncoding}; +use std::{fmt::Write as FmtWrite, fs::File, io::Write, sync::Arc}; + +pub trait WalletSubCommands { + fn wallet_subcommands(self) -> Self; +} + +impl WalletSubCommands for App<'_, '_> { + fn wallet_subcommands(self) -> Self { + self.subcommand( + SubCommand::with_name("account") + .about("Show the contents of an account") + .alias("account") + .arg( + pubkey!(Arg::with_name("account_pubkey") + .index(1) + .value_name("ACCOUNT_ADDRESS") + .required(true), + "Account key URI. ") + ) + .arg( + Arg::with_name("output_file") + .long("output-file") + .short("o") + .value_name("FILEPATH") + .takes_value(true) + .help("Write the account data to this file"), + ) + .arg( + Arg::with_name("lamports") + .long("lamports") + .takes_value(false) + .help("Display balance in lamports instead of SOL"), + ), + ) + .subcommand( + SubCommand::with_name("address") + .about("Get your public key") + .arg( + Arg::with_name("confirm_key") + .long("confirm-key") + .takes_value(false) + .help("Confirm key on device; only relevant if using remote wallet"), + ), + ) + .subcommand( + SubCommand::with_name("airdrop") + .about("Request SOL from a faucet") + .arg( + Arg::with_name("amount") + .index(1) + .value_name("AMOUNT") + .takes_value(true) + .validator(is_amount) + .required(true) + .help("The airdrop amount to request, in SOL"), + ) + .arg( + pubkey!(Arg::with_name("to") + .index(2) + .value_name("RECIPIENT_ADDRESS"), + "The account address of airdrop recipient. "), + ), + ) + .subcommand( + SubCommand::with_name("balance") + .about("Get your balance") + .arg( + pubkey!(Arg::with_name("pubkey") + .index(1) + .value_name("ACCOUNT_ADDRESS"), + "The account address of the balance to check. ") + ) + .arg( + Arg::with_name("lamports") + .long("lamports") + .takes_value(false) + .help("Display balance in lamports instead of SOL"), + ), + ) + .subcommand( + SubCommand::with_name("confirm") + .about("Confirm transaction by signature") + .arg( + Arg::with_name("signature") + .index(1) + .value_name("TRANSACTION_SIGNATURE") + .takes_value(true) + .required(true) + .help("The transaction signature to confirm"), + ) + .after_help(// Formatted specifically for the manually-indented heredoc string + "Note: This will show more detailed information for finalized transactions with verbose mode (-v/--verbose).\ + \n\ + \nAccount modes:\ + \n |srwx|\ + \n s: signed\ + \n r: readable (always true)\ + \n w: writable\ + \n x: program account (inner instructions excluded)\ + " + ), + ) + .subcommand( + SubCommand::with_name("create-address-with-seed") + .about("Generate a derived account address with a seed") + .arg( + Arg::with_name("seed") + .index(1) + .value_name("SEED_STRING") + .takes_value(true) + .required(true) + .validator(is_derived_address_seed) + .help("The seed. Must not take more than 32 bytes to encode as utf-8"), + ) + .arg( + Arg::with_name("program_id") + .index(2) + .value_name("PROGRAM_ID") + .takes_value(true) + .required(true) + .help( + "The program_id that the address will ultimately be used for, \n\ + or one of NONCE, STAKE, and VOTE keywords", + ), + ) + .arg( + pubkey!(Arg::with_name("from") + .long("from") + .value_name("FROM_PUBKEY") + .required(false), + "From (base) key, [default: cli config keypair]. "), + ), + ) + .subcommand( + SubCommand::with_name("decode-transaction") + .about("Decode a serialized transaction") + .arg( + Arg::with_name("transaction") + .index(1) + .value_name("TRANSACTION") + .takes_value(true) + .required(true) + .help("transaction to decode"), + ) + .arg( + Arg::with_name("encoding") + .index(2) + .value_name("ENCODING") + .possible_values(&["base58", "base64"]) // Subset of `UiTransactionEncoding` enum + .default_value("base58") + .takes_value(true) + .required(true) + .help("transaction encoding"), + ), + ) + .subcommand( + SubCommand::with_name("resolve-signer") + .about("Checks that a signer is valid, and returns its specific path; useful for signers that may be specified generally, eg. usb://ledger") + .arg( + Arg::with_name("signer") + .index(1) + .value_name("SIGNER_KEYPAIR") + .takes_value(true) + .required(true) + .validator(is_valid_signer) + .help("The signer path to resolve") + ) + ) + .subcommand( + SubCommand::with_name("transfer") + .about("Transfer funds between system accounts") + .alias("pay") + .arg( + pubkey!(Arg::with_name("to") + .index(1) + .value_name("RECIPIENT_ADDRESS") + .required(true), + "The account address of recipient. "), + ) + .arg( + Arg::with_name("amount") + .index(2) + .value_name("AMOUNT") + .takes_value(true) + .validator(is_amount_or_all) + .required(true) + .help("The amount to send, in SOL; accepts keyword ALL"), + ) + .arg( + pubkey!(Arg::with_name("from") + .long("from") + .value_name("FROM_ADDRESS"), + "Source account of funds (if different from client local account). "), + ) + .arg( + Arg::with_name("no_wait") + .long("no-wait") + .takes_value(false) + .help("Return signature immediately after submitting the transaction, instead of waiting for confirmations"), + ) + .arg( + Arg::with_name("derived_address_seed") + .long("derived-address-seed") + .takes_value(true) + .value_name("SEED_STRING") + .requires("derived_address_program_id") + .validator(is_derived_address_seed) + .hidden(true) + ) + .arg( + Arg::with_name("derived_address_program_id") + .long("derived-address-program-id") + .takes_value(true) + .value_name("PROGRAM_ID") + .requires("derived_address_seed") + .hidden(true) + ) + .arg( + Arg::with_name("allow_unfunded_recipient") + .long("allow-unfunded-recipient") + .takes_value(false) + .help("Complete the transfer even if the recipient address is not funded") + ) + .offline_args() + .nonce_args(false) + .arg(memo_arg()) + .arg(fee_payer_arg()), + ) + } +} + +fn resolve_derived_address_program_id(matches: &ArgMatches<'_>, arg_name: &str) -> Option { + matches.value_of(arg_name).and_then(|v| match v { + "NONCE" => Some(system_program::id()), + "STAKE" => Some(stake::program::id()), + "VOTE" => Some(solana_vote_program::id()), + _ => pubkey_of(matches, arg_name), + }) +} + +pub fn parse_account( + matches: &ArgMatches<'_>, + wallet_manager: &mut Option>, +) -> Result { + let account_pubkey = pubkey_of_signer(matches, "account_pubkey", wallet_manager)?.unwrap(); + let output_file = matches.value_of("output_file"); + let use_lamports_unit = matches.is_present("lamports"); + Ok(CliCommandInfo { + command: CliCommand::ShowAccount { + pubkey: account_pubkey, + output_file: output_file.map(ToString::to_string), + use_lamports_unit, + }, + signers: vec![], + }) +} + +pub fn parse_airdrop( + matches: &ArgMatches<'_>, + default_signer: &DefaultSigner, + wallet_manager: &mut Option>, +) -> Result { + let pubkey = pubkey_of_signer(matches, "to", wallet_manager)?; + let signers = if pubkey.is_some() { + vec![] + } else { + vec![default_signer.signer_from_path(matches, wallet_manager)?] + }; + let lamports = lamports_of_sol(matches, "amount").unwrap(); + Ok(CliCommandInfo { + command: CliCommand::Airdrop { pubkey, lamports }, + signers, + }) +} + +pub fn parse_balance( + matches: &ArgMatches<'_>, + default_signer: &DefaultSigner, + wallet_manager: &mut Option>, +) -> Result { + let pubkey = pubkey_of_signer(matches, "pubkey", wallet_manager)?; + let signers = if pubkey.is_some() { + vec![] + } else { + vec![default_signer.signer_from_path(matches, wallet_manager)?] + }; + Ok(CliCommandInfo { + command: CliCommand::Balance { + pubkey, + use_lamports_unit: matches.is_present("lamports"), + }, + signers, + }) +} + +pub fn parse_decode_transaction(matches: &ArgMatches<'_>) -> Result { + let blob = value_t_or_exit!(matches, "transaction", String); + let encoding = match matches.value_of("encoding").unwrap() { + "base58" => UiTransactionEncoding::Base58, + "base64" => UiTransactionEncoding::Base64, + _ => unreachable!(), + }; + + let encoded_transaction = EncodedTransaction::Binary(blob, encoding); + if let Some(transaction) = encoded_transaction.decode() { + Ok(CliCommandInfo { + command: CliCommand::DecodeTransaction(transaction), + signers: vec![], + }) + } else { + Err(CliError::BadParameter( + "Unable to decode transaction".to_string(), + )) + } +} + +pub fn parse_create_address_with_seed( + matches: &ArgMatches<'_>, + default_signer: &DefaultSigner, + wallet_manager: &mut Option>, +) -> Result { + let from_pubkey = pubkey_of_signer(matches, "from", wallet_manager)?; + let signers = if from_pubkey.is_some() { + vec![] + } else { + vec![default_signer.signer_from_path(matches, wallet_manager)?] + }; + + let program_id = resolve_derived_address_program_id(matches, "program_id").unwrap(); + + let seed = matches.value_of("seed").unwrap().to_string(); + + Ok(CliCommandInfo { + command: CliCommand::CreateAddressWithSeed { + from_pubkey, + seed, + program_id, + }, + signers, + }) +} + +pub fn parse_transfer( + matches: &ArgMatches<'_>, + default_signer: &DefaultSigner, + wallet_manager: &mut Option>, +) -> Result { + let amount = SpendAmount::new_from_matches(matches, "amount"); + let to = pubkey_of_signer(matches, "to", wallet_manager)?.unwrap(); + let sign_only = matches.is_present(SIGN_ONLY_ARG.name); + let dump_transaction_message = matches.is_present(DUMP_TRANSACTION_MESSAGE.name); + let no_wait = matches.is_present("no_wait"); + let blockhash_query = BlockhashQuery::new_from_matches(matches); + let nonce_account = pubkey_of_signer(matches, NONCE_ARG.name, wallet_manager)?; + let (nonce_authority, nonce_authority_pubkey) = + signer_of(matches, NONCE_AUTHORITY_ARG.name, wallet_manager)?; + let memo = matches.value_of(MEMO_ARG.name).map(String::from); + let (fee_payer, fee_payer_pubkey) = signer_of(matches, FEE_PAYER_ARG.name, wallet_manager)?; + let (from, from_pubkey) = signer_of(matches, "from", wallet_manager)?; + let allow_unfunded_recipient = matches.is_present("allow_unfunded_recipient"); + + let mut bulk_signers = vec![fee_payer, from]; + if nonce_account.is_some() { + bulk_signers.push(nonce_authority); + } + + let signer_info = + default_signer.generate_unique_signers(bulk_signers, matches, wallet_manager)?; + + let derived_address_seed = matches + .value_of("derived_address_seed") + .map(|s| s.to_string()); + let derived_address_program_id = + resolve_derived_address_program_id(matches, "derived_address_program_id"); + + Ok(CliCommandInfo { + command: CliCommand::Transfer { + amount, + to, + sign_only, + dump_transaction_message, + allow_unfunded_recipient, + no_wait, + blockhash_query, + nonce_account, + nonce_authority: signer_info.index_of(nonce_authority_pubkey).unwrap(), + memo, + fee_payer: signer_info.index_of(fee_payer_pubkey).unwrap(), + from: signer_info.index_of(from_pubkey).unwrap(), + derived_address_seed, + derived_address_program_id, + }, + signers: signer_info.signers, + }) +} + +pub fn process_show_account( + rpc_client: &RpcClient, + config: &CliConfig, + account_pubkey: &Pubkey, + output_file: &Option, + use_lamports_unit: bool, +) -> ProcessResult { + let account = rpc_client.get_account(account_pubkey)?; + let data = account.data.clone(); + let cli_account = CliAccount { + keyed_account: RpcKeyedAccount { + pubkey: account_pubkey.to_string(), + account: UiAccount::encode( + account_pubkey, + &account, + UiAccountEncoding::Base64, + None, + None, + ), + }, + use_lamports_unit, + }; + + let mut account_string = config.output_format.formatted_string(&cli_account); + + if config.output_format == OutputFormat::Display + || config.output_format == OutputFormat::DisplayVerbose + { + if let Some(output_file) = output_file { + let mut f = File::create(output_file)?; + f.write_all(&data)?; + writeln!(&mut account_string)?; + writeln!(&mut account_string, "Wrote account data to {}", output_file)?; + } else if !data.is_empty() { + use pretty_hex::*; + writeln!(&mut account_string, "{:?}", data.hex_dump())?; + } + } + + Ok(account_string) +} + +pub fn process_airdrop( + rpc_client: &RpcClient, + config: &CliConfig, + pubkey: &Option, + lamports: u64, +) -> ProcessResult { + let pubkey = if let Some(pubkey) = pubkey { + *pubkey + } else { + config.pubkey()? + }; + println!( + "Requesting airdrop of {}", + build_balance_message(lamports, false, true), + ); + + let pre_balance = rpc_client.get_balance(&pubkey)?; + + let result = request_and_confirm_airdrop(rpc_client, config, &pubkey, lamports); + if let Ok(signature) = result { + let signature_cli_message = log_instruction_custom_error::(result, config)?; + println!("{}", signature_cli_message); + + let current_balance = rpc_client.get_balance(&pubkey)?; + + if current_balance < pre_balance.saturating_add(lamports) { + println!("Balance unchanged"); + println!("Run `solana confirm -v {:?}` for more info", signature); + Ok("".to_string()) + } else { + Ok(build_balance_message(current_balance, false, true)) + } + } else { + log_instruction_custom_error::(result, config) + } +} + +pub fn process_balance( + rpc_client: &RpcClient, + config: &CliConfig, + pubkey: &Option, + use_lamports_unit: bool, +) -> ProcessResult { + let pubkey = if let Some(pubkey) = pubkey { + *pubkey + } else { + config.pubkey()? + }; + let balance = rpc_client.get_balance(&pubkey)?; + Ok(build_balance_message(balance, use_lamports_unit, true)) +} + +pub fn process_confirm( + rpc_client: &RpcClient, + config: &CliConfig, + signature: &Signature, +) -> ProcessResult { + match rpc_client.get_signature_statuses_with_history(&[*signature]) { + Ok(status) => { + let cli_transaction = if let Some(transaction_status) = &status.value[0] { + let mut transaction = None; + let mut get_transaction_error = None; + if config.verbose { + match rpc_client.get_transaction_with_config( + signature, + RpcTransactionConfig { + encoding: Some(UiTransactionEncoding::Base64), + commitment: Some(CommitmentConfig::confirmed()), + }, + ) { + Ok(confirmed_transaction) => { + let decoded_transaction = confirmed_transaction + .transaction + .transaction + .decode() + .expect("Successful decode"); + let json_transaction = EncodedTransaction::encode( + decoded_transaction.clone(), + UiTransactionEncoding::Json, + ); + + transaction = Some(CliTransaction { + transaction: json_transaction, + meta: confirmed_transaction.transaction.meta, + block_time: confirmed_transaction.block_time, + slot: Some(confirmed_transaction.slot), + decoded_transaction, + prefix: " ".to_string(), + sigverify_status: vec![], + }); + } + Err(err) => { + get_transaction_error = Some(format!("{:?}", err)); + } + } + } + CliTransactionConfirmation { + confirmation_status: Some(transaction_status.confirmation_status()), + transaction, + get_transaction_error, + err: transaction_status.err.clone(), + } + } else { + CliTransactionConfirmation { + confirmation_status: None, + transaction: None, + get_transaction_error: None, + err: None, + } + }; + Ok(config.output_format.formatted_string(&cli_transaction)) + } + Err(err) => Err(CliError::RpcRequestError(format!("Unable to confirm: {}", err)).into()), + } +} + +#[allow(clippy::unnecessary_wraps)] +pub fn process_decode_transaction(config: &CliConfig, transaction: &Transaction) -> ProcessResult { + let sigverify_status = CliSignatureVerificationStatus::verify_transaction(transaction); + let decode_transaction = CliTransaction { + decoded_transaction: transaction.clone(), + transaction: EncodedTransaction::encode(transaction.clone(), UiTransactionEncoding::Json), + meta: None, + block_time: None, + slot: None, + prefix: "".to_string(), + sigverify_status, + }; + Ok(config.output_format.formatted_string(&decode_transaction)) +} + +pub fn process_create_address_with_seed( + config: &CliConfig, + from_pubkey: Option<&Pubkey>, + seed: &str, + program_id: &Pubkey, +) -> ProcessResult { + let from_pubkey = if let Some(pubkey) = from_pubkey { + *pubkey + } else { + config.pubkey()? + }; + let address = Pubkey::create_with_seed(&from_pubkey, seed, program_id)?; + Ok(address.to_string()) +} + +#[allow(clippy::too_many_arguments)] +pub fn process_transfer( + rpc_client: &RpcClient, + config: &CliConfig, + amount: SpendAmount, + to: &Pubkey, + from: SignerIndex, + sign_only: bool, + dump_transaction_message: bool, + allow_unfunded_recipient: bool, + no_wait: bool, + blockhash_query: &BlockhashQuery, + nonce_account: Option<&Pubkey>, + nonce_authority: SignerIndex, + memo: Option<&String>, + fee_payer: SignerIndex, + derived_address_seed: Option, + derived_address_program_id: Option<&Pubkey>, +) -> ProcessResult { + let from = config.signers[from]; + let mut from_pubkey = from.pubkey(); + + let (recent_blockhash, fee_calculator) = + blockhash_query.get_blockhash_and_fee_calculator(rpc_client, config.commitment)?; + + if !sign_only && !allow_unfunded_recipient { + let recipient_balance = rpc_client + .get_balance_with_commitment(to, config.commitment)? + .value; + if recipient_balance == 0 { + return Err(format!( + "The recipient address ({}) is not funded. \ + Add `--allow-unfunded-recipient` to complete the transfer \ + ", + to + ) + .into()); + } + } + + let nonce_authority = config.signers[nonce_authority]; + let fee_payer = config.signers[fee_payer]; + + let derived_parts = derived_address_seed.zip(derived_address_program_id); + let with_seed = if let Some((seed, program_id)) = derived_parts { + let base_pubkey = from_pubkey; + from_pubkey = Pubkey::create_with_seed(&base_pubkey, &seed, program_id)?; + Some((base_pubkey, seed, program_id, from_pubkey)) + } else { + None + }; + + let build_message = |lamports| { + let ixs = if let Some((base_pubkey, seed, program_id, from_pubkey)) = with_seed.as_ref() { + vec![system_instruction::transfer_with_seed( + from_pubkey, + base_pubkey, + seed.clone(), + program_id, + to, + lamports, + )] + .with_memo(memo) + } else { + vec![system_instruction::transfer(&from_pubkey, to, lamports)].with_memo(memo) + }; + + if let Some(nonce_account) = &nonce_account { + Message::new_with_nonce( + ixs, + Some(&fee_payer.pubkey()), + nonce_account, + &nonce_authority.pubkey(), + ) + } else { + Message::new(&ixs, Some(&fee_payer.pubkey())) + } + }; + + let (message, _) = resolve_spend_tx_and_check_account_balances( + rpc_client, + sign_only, + amount, + &fee_calculator, + &from_pubkey, + &fee_payer.pubkey(), + build_message, + config.commitment, + )?; + let mut tx = Transaction::new_unsigned(message); + + if sign_only { + tx.try_partial_sign(&config.signers, recent_blockhash)?; + return_signers_with_config( + &tx, + &config.output_format, + &ReturnSignersConfig { + dump_transaction_message, + }, + ) + } else { + if let Some(nonce_account) = &nonce_account { + let nonce_account = nonce_utils::get_account_with_commitment( + rpc_client, + nonce_account, + config.commitment, + )?; + check_nonce_account(&nonce_account, &nonce_authority.pubkey(), &recent_blockhash)?; + } + + tx.try_sign(&config.signers, recent_blockhash)?; + let result = if no_wait { + rpc_client.send_transaction(&tx) + } else { + rpc_client.send_and_confirm_transaction_with_spinner(&tx) + }; + log_instruction_custom_error::(result, config) + } +} diff --git a/cli/tests/nonce.rs b/cli/tests/nonce.rs index 2e539934213c42..fe17bb811e2738 100644 --- a/cli/tests/nonce.rs +++ b/cli/tests/nonce.rs @@ -18,13 +18,15 @@ use solana_sdk::{ signature::{keypair_from_seed, Keypair, Signer}, system_program, }; +use solana_streamer::socket::SocketAddrSpace; #[test] fn test_nonce() { let mint_keypair = Keypair::new(); let mint_pubkey = mint_keypair.pubkey(); let faucet_addr = run_local_faucet(mint_keypair, None); - let test_validator = TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr)); + let test_validator = + TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified); full_battery_tests(test_validator, None, false); } @@ -34,7 +36,8 @@ fn test_nonce_with_seed() { let mint_keypair = Keypair::new(); let mint_pubkey = mint_keypair.pubkey(); let faucet_addr = run_local_faucet(mint_keypair, None); - let test_validator = TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr)); + let test_validator = + TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified); full_battery_tests(test_validator, Some(String::from("seed")), false); } @@ -44,7 +47,8 @@ fn test_nonce_with_authority() { let mint_keypair = Keypair::new(); let mint_pubkey = mint_keypair.pubkey(); let faucet_addr = run_local_faucet(mint_keypair, None); - let test_validator = TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr)); + let test_validator = + TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified); full_battery_tests(test_validator, None, true); } @@ -216,7 +220,12 @@ fn test_create_account_with_seed() { let mint_keypair = Keypair::new(); let mint_pubkey = mint_keypair.pubkey(); let faucet_addr = run_local_faucet(mint_keypair, None); - let test_validator = TestValidator::with_custom_fees(mint_pubkey, 1, Some(faucet_addr)); + let test_validator = TestValidator::with_custom_fees( + mint_pubkey, + 1, + Some(faucet_addr), + SocketAddrSpace::Unspecified, + ); let offline_nonce_authority_signer = keypair_from_seed(&[1u8; 32]).unwrap(); let online_nonce_creator_signer = keypair_from_seed(&[2u8; 32]).unwrap(); diff --git a/cli/tests/program.rs b/cli/tests/program.rs index 5a83d0ea2409bc..1772bec05479f2 100644 --- a/cli/tests/program.rs +++ b/cli/tests/program.rs @@ -15,6 +15,7 @@ use solana_sdk::{ pubkey::Pubkey, signature::{Keypair, Signer}, }; +use solana_streamer::socket::SocketAddrSpace; use std::{env, fs::File, io::Read, path::PathBuf, str::FromStr}; #[test] @@ -30,7 +31,8 @@ fn test_cli_program_deploy_non_upgradeable() { let mint_keypair = Keypair::new(); let mint_pubkey = mint_keypair.pubkey(); let faucet_addr = run_local_faucet(mint_keypair, None); - let test_validator = TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr)); + let test_validator = + TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified); let rpc_client = RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed()); @@ -68,7 +70,7 @@ fn test_cli_program_deploy_non_upgradeable() { .unwrap() .as_str() .unwrap(); - let program_id = Pubkey::from_str(&program_id_str).unwrap(); + let program_id = Pubkey::from_str(program_id_str).unwrap(); let account0 = rpc_client.get_account(&program_id).unwrap(); assert_eq!(account0.lamports, minimum_balance_for_rent_exemption); assert_eq!(account0.owner, bpf_loader::id()); @@ -146,7 +148,8 @@ fn test_cli_program_deploy_no_authority() { let mint_keypair = Keypair::new(); let mint_pubkey = mint_keypair.pubkey(); let faucet_addr = run_local_faucet(mint_keypair, None); - let test_validator = TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr)); + let test_validator = + TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified); let rpc_client = RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed()); @@ -198,7 +201,7 @@ fn test_cli_program_deploy_no_authority() { .unwrap() .as_str() .unwrap(); - let program_id = Pubkey::from_str(&program_id_str).unwrap(); + let program_id = Pubkey::from_str(program_id_str).unwrap(); // Attempt to upgrade the program config.signers = vec![&keypair, &upgrade_authority]; @@ -229,7 +232,8 @@ fn test_cli_program_deploy_with_authority() { let mint_keypair = Keypair::new(); let mint_pubkey = mint_keypair.pubkey(); let faucet_addr = run_local_faucet(mint_keypair, None); - let test_validator = TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr)); + let test_validator = + TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified); let rpc_client = RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed()); @@ -284,7 +288,7 @@ fn test_cli_program_deploy_with_authority() { .unwrap(); assert_eq!( program_keypair.pubkey(), - Pubkey::from_str(&program_pubkey_str).unwrap() + Pubkey::from_str(program_pubkey_str).unwrap() ); let program_account = rpc_client.get_account(&program_keypair.pubkey()).unwrap(); assert_eq!(program_account.lamports, minimum_balance_for_program); @@ -328,7 +332,7 @@ fn test_cli_program_deploy_with_authority() { .unwrap() .as_str() .unwrap(); - let program_pubkey = Pubkey::from_str(&program_pubkey_str).unwrap(); + let program_pubkey = Pubkey::from_str(program_pubkey_str).unwrap(); let program_account = rpc_client.get_account(&program_pubkey).unwrap(); assert_eq!(program_account.lamports, minimum_balance_for_program); assert_eq!(program_account.owner, bpf_loader_upgradeable::id()); @@ -397,7 +401,7 @@ fn test_cli_program_deploy_with_authority() { .as_str() .unwrap(); assert_eq!( - Pubkey::from_str(&new_upgrade_authority_str).unwrap(), + Pubkey::from_str(new_upgrade_authority_str).unwrap(), new_upgrade_authority.pubkey() ); @@ -452,7 +456,7 @@ fn test_cli_program_deploy_with_authority() { .unwrap(); assert_eq!( new_upgrade_authority.pubkey(), - Pubkey::from_str(&authority_pubkey_str).unwrap() + Pubkey::from_str(authority_pubkey_str).unwrap() ); // Set no authority @@ -510,7 +514,7 @@ fn test_cli_program_deploy_with_authority() { .unwrap() .as_str() .unwrap(); - let program_pubkey = Pubkey::from_str(&program_pubkey_str).unwrap(); + let program_pubkey = Pubkey::from_str(program_pubkey_str).unwrap(); let (programdata_pubkey, _) = Pubkey::find_program_address(&[program_pubkey.as_ref()], &bpf_loader_upgradeable::id()); let programdata_account = rpc_client.get_account(&programdata_pubkey).unwrap(); @@ -557,7 +561,8 @@ fn test_cli_program_write_buffer() { let mint_keypair = Keypair::new(); let mint_pubkey = mint_keypair.pubkey(); let faucet_addr = run_local_faucet(mint_keypair, None); - let test_validator = TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr)); + let test_validator = + TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified); let rpc_client = RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed()); @@ -606,7 +611,7 @@ fn test_cli_program_write_buffer() { .unwrap() .as_str() .unwrap(); - let new_buffer_pubkey = Pubkey::from_str(&buffer_pubkey_str).unwrap(); + let new_buffer_pubkey = Pubkey::from_str(buffer_pubkey_str).unwrap(); let buffer_account = rpc_client.get_account(&new_buffer_pubkey).unwrap(); assert_eq!(buffer_account.lamports, minimum_balance_for_buffer_default); assert_eq!(buffer_account.owner, bpf_loader_upgradeable::id()); @@ -641,7 +646,7 @@ fn test_cli_program_write_buffer() { .unwrap(); assert_eq!( buffer_keypair.pubkey(), - Pubkey::from_str(&buffer_pubkey_str).unwrap() + Pubkey::from_str(buffer_pubkey_str).unwrap() ); let buffer_account = rpc_client.get_account(&buffer_keypair.pubkey()).unwrap(); assert_eq!(buffer_account.lamports, minimum_balance_for_buffer); @@ -675,7 +680,7 @@ fn test_cli_program_write_buffer() { .unwrap(); assert_eq!( keypair.pubkey(), - Pubkey::from_str(&authority_pubkey_str).unwrap() + Pubkey::from_str(authority_pubkey_str).unwrap() ); // Specify buffer authority @@ -700,7 +705,7 @@ fn test_cli_program_write_buffer() { .unwrap(); assert_eq!( buffer_keypair.pubkey(), - Pubkey::from_str(&buffer_pubkey_str).unwrap() + Pubkey::from_str(buffer_pubkey_str).unwrap() ); let buffer_account = rpc_client.get_account(&buffer_keypair.pubkey()).unwrap(); assert_eq!(buffer_account.lamports, minimum_balance_for_buffer_default); @@ -735,7 +740,7 @@ fn test_cli_program_write_buffer() { .unwrap() .as_str() .unwrap(); - let buffer_pubkey = Pubkey::from_str(&buffer_pubkey_str).unwrap(); + let buffer_pubkey = Pubkey::from_str(buffer_pubkey_str).unwrap(); let buffer_account = rpc_client.get_account(&buffer_pubkey).unwrap(); assert_eq!(buffer_account.lamports, minimum_balance_for_buffer_default); assert_eq!(buffer_account.owner, bpf_loader_upgradeable::id()); @@ -768,7 +773,7 @@ fn test_cli_program_write_buffer() { .unwrap(); assert_eq!( authority_keypair.pubkey(), - Pubkey::from_str(&authority_pubkey_str).unwrap() + Pubkey::from_str(authority_pubkey_str).unwrap() ); // Close buffer @@ -806,7 +811,7 @@ fn test_cli_program_write_buffer() { .unwrap() .as_str() .unwrap(); - let new_buffer_pubkey = Pubkey::from_str(&buffer_pubkey_str).unwrap(); + let new_buffer_pubkey = Pubkey::from_str(buffer_pubkey_str).unwrap(); // Close buffers and deposit default keypair let pre_lamports = rpc_client.get_account(&keypair.pubkey()).unwrap().lamports; @@ -839,7 +844,8 @@ fn test_cli_program_set_buffer_authority() { let mint_keypair = Keypair::new(); let mint_pubkey = mint_keypair.pubkey(); let faucet_addr = run_local_faucet(mint_keypair, None); - let test_validator = TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr)); + let test_validator = + TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified); let rpc_client = RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed()); @@ -901,7 +907,7 @@ fn test_cli_program_set_buffer_authority() { .as_str() .unwrap(); assert_eq!( - Pubkey::from_str(&new_buffer_authority_str).unwrap(), + Pubkey::from_str(new_buffer_authority_str).unwrap(), new_buffer_authority.pubkey() ); let buffer_account = rpc_client.get_account(&buffer_keypair.pubkey()).unwrap(); @@ -928,7 +934,7 @@ fn test_cli_program_set_buffer_authority() { .as_str() .unwrap(); assert_eq!( - Pubkey::from_str(&buffer_authority_str).unwrap(), + Pubkey::from_str(buffer_authority_str).unwrap(), buffer_keypair.pubkey() ); let buffer_account = rpc_client.get_account(&buffer_keypair.pubkey()).unwrap(); @@ -952,7 +958,8 @@ fn test_cli_program_mismatch_buffer_authority() { let mint_keypair = Keypair::new(); let mint_pubkey = mint_keypair.pubkey(); let faucet_addr = run_local_faucet(mint_keypair, None); - let test_validator = TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr)); + let test_validator = + TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified); let rpc_client = RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed()); @@ -1041,7 +1048,8 @@ fn test_cli_program_show() { let mint_keypair = Keypair::new(); let mint_pubkey = mint_keypair.pubkey(); let faucet_addr = run_local_faucet(mint_keypair, None); - let test_validator = TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr)); + let test_validator = + TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified); let rpc_client = RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed()); @@ -1101,7 +1109,7 @@ fn test_cli_program_show() { .unwrap(); assert_eq!( buffer_keypair.pubkey(), - Pubkey::from_str(&address_str).unwrap() + Pubkey::from_str(address_str).unwrap() ); let authority_str = json .as_object() @@ -1112,7 +1120,7 @@ fn test_cli_program_show() { .unwrap(); assert_eq!( authority_keypair.pubkey(), - Pubkey::from_str(&authority_str).unwrap() + Pubkey::from_str(authority_str).unwrap() ); let data_len = json .as_object() @@ -1161,7 +1169,7 @@ fn test_cli_program_show() { .unwrap(); assert_eq!( program_keypair.pubkey(), - Pubkey::from_str(&address_str).unwrap() + Pubkey::from_str(address_str).unwrap() ); let programdata_address_str = json .as_object() @@ -1176,7 +1184,7 @@ fn test_cli_program_show() { ); assert_eq!( programdata_pubkey, - Pubkey::from_str(&programdata_address_str).unwrap() + Pubkey::from_str(programdata_address_str).unwrap() ); let authority_str = json .as_object() @@ -1187,7 +1195,7 @@ fn test_cli_program_show() { .unwrap(); assert_eq!( authority_keypair.pubkey(), - Pubkey::from_str(&authority_str).unwrap() + Pubkey::from_str(authority_str).unwrap() ); let deployed_slot = json .as_object() @@ -1221,7 +1229,8 @@ fn test_cli_program_dump() { let mint_keypair = Keypair::new(); let mint_pubkey = mint_keypair.pubkey(); let faucet_addr = run_local_faucet(mint_keypair, None); - let test_validator = TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr)); + let test_validator = + TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified); let rpc_client = RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed()); diff --git a/cli/tests/request_airdrop.rs b/cli/tests/request_airdrop.rs index 0787d7725d9e16..56444472bf27f4 100644 --- a/cli/tests/request_airdrop.rs +++ b/cli/tests/request_airdrop.rs @@ -6,13 +6,15 @@ use solana_sdk::{ commitment_config::CommitmentConfig, signature::{Keypair, Signer}, }; +use solana_streamer::socket::SocketAddrSpace; #[test] fn test_cli_request_airdrop() { let mint_keypair = Keypair::new(); let mint_pubkey = mint_keypair.pubkey(); let faucet_addr = run_local_faucet(mint_keypair, None); - let test_validator = TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr)); + let test_validator = + TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified); let mut bob_config = CliConfig::recent_for_tests(); bob_config.json_rpc_url = test_validator.rpc_url(); diff --git a/cli/tests/stake.rs b/cli/tests/stake.rs index 51c3bd97705ff9..5e41030a53de4b 100644 --- a/cli/tests/stake.rs +++ b/cli/tests/stake.rs @@ -1,6 +1,7 @@ use solana_cli::{ cli::{process_command, request_and_confirm_airdrop, CliCommand, CliConfig}, spend_utils::SpendAmount, + stake::StakeAuthorizationIndexed, test_utils::{check_ready, check_recent_balance}, }; use solana_cli_output::{parse_sign_only_reply_string, OutputFormat}; @@ -17,18 +18,21 @@ use solana_sdk::{ nonce::State as NonceState, pubkey::Pubkey, signature::{keypair_from_seed, Keypair, Signer}, + stake::{ + self, + instruction::LockupArgs, + state::{Lockup, StakeAuthorize, StakeState}, + }, }; -use solana_stake_program::{ - stake_instruction::LockupArgs, - stake_state::{Lockup, StakeAuthorize, StakeState}, -}; +use solana_streamer::socket::SocketAddrSpace; #[test] fn test_stake_delegation_force() { let mint_keypair = Keypair::new(); let mint_pubkey = mint_keypair.pubkey(); let faucet_addr = run_local_faucet(mint_keypair, None); - let test_validator = TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr)); + let test_validator = + TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified); let rpc_client = RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed()); @@ -63,6 +67,7 @@ fn test_stake_delegation_force() { seed: None, staker: None, withdrawer: None, + withdrawer_signer: None, lockup: Lockup::default(), amount: SpendAmount::Some(50_000), sign_only: false, @@ -117,7 +122,8 @@ fn test_seed_stake_delegation_and_deactivation() { let mint_keypair = Keypair::new(); let mint_pubkey = mint_keypair.pubkey(); let faucet_addr = run_local_faucet(mint_keypair, None); - let test_validator = TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr)); + let test_validator = + TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified); let rpc_client = RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed()); @@ -139,7 +145,7 @@ fn test_seed_stake_delegation_and_deactivation() { let stake_address = Pubkey::create_with_seed( &config_validator.signers[0].pubkey(), "hi there", - &solana_stake_program::id(), + &stake::program::id(), ) .expect("bad seed"); @@ -150,6 +156,7 @@ fn test_seed_stake_delegation_and_deactivation() { seed: Some("hi there".to_string()), staker: None, withdrawer: None, + withdrawer_signer: None, lockup: Lockup::default(), amount: SpendAmount::Some(50_000), sign_only: false, @@ -202,7 +209,8 @@ fn test_stake_delegation_and_deactivation() { let mint_keypair = Keypair::new(); let mint_pubkey = mint_keypair.pubkey(); let faucet_addr = run_local_faucet(mint_keypair, None); - let test_validator = TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr)); + let test_validator = + TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified); let rpc_client = RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed()); @@ -230,6 +238,7 @@ fn test_stake_delegation_and_deactivation() { seed: None, staker: None, withdrawer: None, + withdrawer_signer: None, lockup: Lockup::default(), amount: SpendAmount::Some(50_000), sign_only: false, @@ -283,7 +292,8 @@ fn test_offline_stake_delegation_and_deactivation() { let mint_keypair = Keypair::new(); let mint_pubkey = mint_keypair.pubkey(); let faucet_addr = run_local_faucet(mint_keypair, None); - let test_validator = TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr)); + let test_validator = + TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified); let rpc_client = RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed()); @@ -331,6 +341,7 @@ fn test_offline_stake_delegation_and_deactivation() { seed: None, staker: Some(config_offline.signers[0].pubkey()), withdrawer: None, + withdrawer_signer: None, lockup: Lockup::default(), amount: SpendAmount::Some(50_000), sign_only: false, @@ -425,7 +436,8 @@ fn test_nonced_stake_delegation_and_deactivation() { let mint_keypair = Keypair::new(); let mint_pubkey = mint_keypair.pubkey(); let faucet_addr = run_local_faucet(mint_keypair, None); - let test_validator = TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr)); + let test_validator = + TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified); let rpc_client = RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed()); @@ -450,6 +462,7 @@ fn test_nonced_stake_delegation_and_deactivation() { seed: None, staker: None, withdrawer: None, + withdrawer_signer: None, lockup: Lockup::default(), amount: SpendAmount::Some(50_000), sign_only: false, @@ -541,7 +554,8 @@ fn test_stake_authorize() { let mint_keypair = Keypair::new(); let mint_pubkey = mint_keypair.pubkey(); let faucet_addr = run_local_faucet(mint_keypair, None); - let test_validator = TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr)); + let test_validator = + TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified); let rpc_client = RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed()); @@ -580,6 +594,7 @@ fn test_stake_authorize() { seed: None, staker: None, withdrawer: None, + withdrawer_signer: None, lockup: Lockup::default(), amount: SpendAmount::Some(50_000), sign_only: false, @@ -599,7 +614,12 @@ fn test_stake_authorize() { config.signers.pop(); config.command = CliCommand::StakeAuthorize { stake_account_pubkey, - new_authorizations: vec![(StakeAuthorize::Staker, online_authority_pubkey, 0)], + new_authorizations: vec![StakeAuthorizationIndexed { + authorization_type: StakeAuthorize::Staker, + new_authority_pubkey: online_authority_pubkey, + authority: 0, + new_authority_signer: None, + }], sign_only: false, dump_transaction_message: false, blockhash_query: BlockhashQuery::default(), @@ -628,8 +648,18 @@ fn test_stake_authorize() { config.command = CliCommand::StakeAuthorize { stake_account_pubkey, new_authorizations: vec![ - (StakeAuthorize::Staker, online_authority2_pubkey, 1), - (StakeAuthorize::Withdrawer, withdraw_authority_pubkey, 0), + StakeAuthorizationIndexed { + authorization_type: StakeAuthorize::Staker, + new_authority_pubkey: online_authority2_pubkey, + authority: 1, + new_authority_signer: None, + }, + StakeAuthorizationIndexed { + authorization_type: StakeAuthorize::Withdrawer, + new_authority_pubkey: withdraw_authority_pubkey, + authority: 0, + new_authority_signer: None, + }, ], sign_only: false, dump_transaction_message: false, @@ -656,7 +686,12 @@ fn test_stake_authorize() { config.signers.push(&online_authority2); config.command = CliCommand::StakeAuthorize { stake_account_pubkey, - new_authorizations: vec![(StakeAuthorize::Staker, offline_authority_pubkey, 1)], + new_authorizations: vec![StakeAuthorizationIndexed { + authorization_type: StakeAuthorize::Staker, + new_authority_pubkey: offline_authority_pubkey, + authority: 1, + new_authority_signer: None, + }], sign_only: false, dump_transaction_message: false, blockhash_query: BlockhashQuery::default(), @@ -682,7 +717,12 @@ fn test_stake_authorize() { let (blockhash, _) = rpc_client.get_recent_blockhash().unwrap(); config_offline.command = CliCommand::StakeAuthorize { stake_account_pubkey, - new_authorizations: vec![(StakeAuthorize::Staker, nonced_authority_pubkey, 0)], + new_authorizations: vec![StakeAuthorizationIndexed { + authorization_type: StakeAuthorize::Staker, + new_authority_pubkey: nonced_authority_pubkey, + authority: 0, + new_authority_signer: None, + }], sign_only: true, dump_transaction_message: false, blockhash_query: BlockhashQuery::None(blockhash), @@ -701,7 +741,12 @@ fn test_stake_authorize() { config.signers = vec![&offline_presigner]; config.command = CliCommand::StakeAuthorize { stake_account_pubkey, - new_authorizations: vec![(StakeAuthorize::Staker, nonced_authority_pubkey, 0)], + new_authorizations: vec![StakeAuthorizationIndexed { + authorization_type: StakeAuthorize::Staker, + new_authority_pubkey: nonced_authority_pubkey, + authority: 0, + new_authority_signer: None, + }], sign_only: false, dump_transaction_message: false, blockhash_query: BlockhashQuery::FeeCalculator(blockhash_query::Source::Cluster, blockhash), @@ -752,7 +797,12 @@ fn test_stake_authorize() { config_offline.signers.push(&nonced_authority); config_offline.command = CliCommand::StakeAuthorize { stake_account_pubkey, - new_authorizations: vec![(StakeAuthorize::Staker, online_authority_pubkey, 1)], + new_authorizations: vec![StakeAuthorizationIndexed { + authorization_type: StakeAuthorize::Staker, + new_authority_pubkey: online_authority_pubkey, + authority: 1, + new_authority_signer: None, + }], sign_only: true, dump_transaction_message: false, blockhash_query: BlockhashQuery::None(nonce_hash), @@ -772,7 +822,12 @@ fn test_stake_authorize() { config.signers = vec![&offline_presigner, &nonced_authority_presigner]; config.command = CliCommand::StakeAuthorize { stake_account_pubkey, - new_authorizations: vec![(StakeAuthorize::Staker, online_authority_pubkey, 1)], + new_authorizations: vec![StakeAuthorizationIndexed { + authorization_type: StakeAuthorize::Staker, + new_authority_pubkey: online_authority_pubkey, + authority: 1, + new_authority_signer: None, + }], sign_only: false, dump_transaction_message: false, blockhash_query: BlockhashQuery::FeeCalculator( @@ -814,7 +869,12 @@ fn test_stake_authorize_with_fee_payer() { let mint_keypair = Keypair::new(); let mint_pubkey = mint_keypair.pubkey(); let faucet_addr = run_local_faucet(mint_keypair, None); - let test_validator = TestValidator::with_custom_fees(mint_pubkey, SIG_FEE, Some(faucet_addr)); + let test_validator = TestValidator::with_custom_fees( + mint_pubkey, + SIG_FEE, + Some(faucet_addr), + SocketAddrSpace::Unspecified, + ); let rpc_client = RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed()); @@ -860,6 +920,7 @@ fn test_stake_authorize_with_fee_payer() { seed: None, staker: None, withdrawer: None, + withdrawer_signer: None, lockup: Lockup::default(), amount: SpendAmount::Some(50_000), sign_only: false, @@ -879,7 +940,12 @@ fn test_stake_authorize_with_fee_payer() { config.signers = vec![&default_signer, &payer_keypair]; config.command = CliCommand::StakeAuthorize { stake_account_pubkey, - new_authorizations: vec![(StakeAuthorize::Staker, offline_pubkey, 0)], + new_authorizations: vec![StakeAuthorizationIndexed { + authorization_type: StakeAuthorize::Staker, + new_authority_pubkey: offline_pubkey, + authority: 0, + new_authority_signer: None, + }], sign_only: false, dump_transaction_message: false, blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), @@ -901,7 +967,12 @@ fn test_stake_authorize_with_fee_payer() { let (blockhash, _) = rpc_client.get_recent_blockhash().unwrap(); config_offline.command = CliCommand::StakeAuthorize { stake_account_pubkey, - new_authorizations: vec![(StakeAuthorize::Staker, payer_pubkey, 0)], + new_authorizations: vec![StakeAuthorizationIndexed { + authorization_type: StakeAuthorize::Staker, + new_authority_pubkey: payer_pubkey, + authority: 0, + new_authority_signer: None, + }], sign_only: true, dump_transaction_message: false, blockhash_query: BlockhashQuery::None(blockhash), @@ -920,7 +991,12 @@ fn test_stake_authorize_with_fee_payer() { config.signers = vec![&offline_presigner]; config.command = CliCommand::StakeAuthorize { stake_account_pubkey, - new_authorizations: vec![(StakeAuthorize::Staker, payer_pubkey, 0)], + new_authorizations: vec![StakeAuthorizationIndexed { + authorization_type: StakeAuthorize::Staker, + new_authority_pubkey: payer_pubkey, + authority: 0, + new_authority_signer: None, + }], sign_only: false, dump_transaction_message: false, blockhash_query: BlockhashQuery::FeeCalculator(blockhash_query::Source::Cluster, blockhash), @@ -946,7 +1022,12 @@ fn test_stake_split() { let mint_keypair = Keypair::new(); let mint_pubkey = mint_keypair.pubkey(); let faucet_addr = run_local_faucet(mint_keypair, None); - let test_validator = TestValidator::with_custom_fees(mint_pubkey, 1, Some(faucet_addr)); + let test_validator = TestValidator::with_custom_fees( + mint_pubkey, + 1, + Some(faucet_addr), + SocketAddrSpace::Unspecified, + ); let rpc_client = RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed()); @@ -984,6 +1065,7 @@ fn test_stake_split() { seed: None, staker: Some(offline_pubkey), withdrawer: Some(offline_pubkey), + withdrawer_signer: None, lockup: Lockup::default(), amount: SpendAmount::Some(10 * minimum_stake_balance), sign_only: false, @@ -1089,7 +1171,12 @@ fn test_stake_set_lockup() { let mint_keypair = Keypair::new(); let mint_pubkey = mint_keypair.pubkey(); let faucet_addr = run_local_faucet(mint_keypair, None); - let test_validator = TestValidator::with_custom_fees(mint_pubkey, 1, Some(faucet_addr)); + let test_validator = TestValidator::with_custom_fees( + mint_pubkey, + 1, + Some(faucet_addr), + SocketAddrSpace::Unspecified, + ); let rpc_client = RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed()); @@ -1134,6 +1221,7 @@ fn test_stake_set_lockup() { seed: None, staker: Some(offline_pubkey), withdrawer: Some(config.signers[0].pubkey()), + withdrawer_signer: None, lockup, amount: SpendAmount::Some(10 * minimum_stake_balance), sign_only: false, @@ -1162,6 +1250,7 @@ fn test_stake_set_lockup() { config.command = CliCommand::StakeSetLockup { stake_account_pubkey, lockup, + new_custodian_signer: None, custodian: 0, sign_only: false, dump_transaction_message: false, @@ -1197,6 +1286,7 @@ fn test_stake_set_lockup() { config.command = CliCommand::StakeSetLockup { stake_account_pubkey, lockup, + new_custodian_signer: None, custodian: 0, sign_only: false, dump_transaction_message: false, @@ -1217,6 +1307,7 @@ fn test_stake_set_lockup() { config.command = CliCommand::StakeSetLockup { stake_account_pubkey, lockup, + new_custodian_signer: None, custodian: 1, sign_only: false, dump_transaction_message: false, @@ -1249,6 +1340,7 @@ fn test_stake_set_lockup() { config.command = CliCommand::StakeSetLockup { stake_account_pubkey, lockup, + new_custodian_signer: None, custodian: 1, sign_only: false, dump_transaction_message: false, @@ -1296,6 +1388,7 @@ fn test_stake_set_lockup() { config_offline.command = CliCommand::StakeSetLockup { stake_account_pubkey, lockup, + new_custodian_signer: None, custodian: 0, sign_only: true, dump_transaction_message: false, @@ -1314,6 +1407,7 @@ fn test_stake_set_lockup() { config.command = CliCommand::StakeSetLockup { stake_account_pubkey, lockup, + new_custodian_signer: None, custodian: 0, sign_only: false, dump_transaction_message: false, @@ -1348,7 +1442,8 @@ fn test_offline_nonced_create_stake_account_and_withdraw() { let mint_keypair = Keypair::new(); let mint_pubkey = mint_keypair.pubkey(); let faucet_addr = run_local_faucet(mint_keypair, None); - let test_validator = TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr)); + let test_validator = + TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified); let rpc_client = RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed()); @@ -1408,6 +1503,7 @@ fn test_offline_nonced_create_stake_account_and_withdraw() { seed: None, staker: None, withdrawer: None, + withdrawer_signer: None, lockup: Lockup::default(), amount: SpendAmount::Some(50_000), sign_only: true, @@ -1431,6 +1527,7 @@ fn test_offline_nonced_create_stake_account_and_withdraw() { seed: None, staker: Some(offline_pubkey), withdrawer: None, + withdrawer_signer: None, lockup: Lockup::default(), amount: SpendAmount::Some(50_000), sign_only: false, @@ -1520,6 +1617,7 @@ fn test_offline_nonced_create_stake_account_and_withdraw() { seed: Some(seed.to_string()), staker: None, withdrawer: None, + withdrawer_signer: None, lockup: Lockup::default(), amount: SpendAmount::Some(50_000), sign_only: true, @@ -1541,6 +1639,7 @@ fn test_offline_nonced_create_stake_account_and_withdraw() { seed: Some(seed.to_string()), staker: Some(offline_pubkey), withdrawer: Some(offline_pubkey), + withdrawer_signer: None, lockup: Lockup::default(), amount: SpendAmount::Some(50_000), sign_only: false, @@ -1557,6 +1656,232 @@ fn test_offline_nonced_create_stake_account_and_withdraw() { }; process_command(&config).unwrap(); let seed_address = - Pubkey::create_with_seed(&stake_pubkey, seed, &solana_stake_program::id()).unwrap(); + Pubkey::create_with_seed(&stake_pubkey, seed, &stake::program::id()).unwrap(); check_recent_balance(50_000, &rpc_client, &seed_address); } + +#[test] +fn test_stake_checked_instructions() { + solana_logger::setup(); + + let mint_keypair = Keypair::new(); + let mint_pubkey = mint_keypair.pubkey(); + let faucet_addr = run_local_faucet(mint_keypair, None); + let test_validator = + TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified); + + let rpc_client = + RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed()); + let default_signer = Keypair::new(); + + let mut config = CliConfig::recent_for_tests(); + config.json_rpc_url = test_validator.rpc_url(); + config.signers = vec![&default_signer]; + + request_and_confirm_airdrop(&rpc_client, &config, &config.signers[0].pubkey(), 100_000) + .unwrap(); + + // Create stake account with withdrawer + let stake_keypair = Keypair::new(); + let stake_account_pubkey = stake_keypair.pubkey(); + let withdrawer_keypair = Keypair::new(); + let withdrawer_pubkey = withdrawer_keypair.pubkey(); + config.signers.push(&stake_keypair); + config.command = CliCommand::CreateStakeAccount { + stake_account: 1, + seed: None, + staker: None, + withdrawer: Some(withdrawer_pubkey), + withdrawer_signer: Some(1), + lockup: Lockup::default(), + amount: SpendAmount::Some(50_000), + sign_only: false, + dump_transaction_message: false, + blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), + nonce_account: None, + nonce_authority: 0, + memo: None, + fee_payer: 0, + from: 0, + }; + process_command(&config).unwrap_err(); // unsigned authority should fail + + config.signers = vec![&default_signer, &stake_keypair, &withdrawer_keypair]; + config.command = CliCommand::CreateStakeAccount { + stake_account: 1, + seed: None, + staker: None, + withdrawer: Some(withdrawer_pubkey), + withdrawer_signer: Some(1), + lockup: Lockup::default(), + amount: SpendAmount::Some(50_000), + sign_only: false, + dump_transaction_message: false, + blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), + nonce_account: None, + nonce_authority: 0, + memo: None, + fee_payer: 0, + from: 0, + }; + process_command(&config).unwrap(); + + // Re-authorize account, checking new authority + let staker_keypair = Keypair::new(); + let staker_pubkey = staker_keypair.pubkey(); + config.signers = vec![&default_signer]; + config.command = CliCommand::StakeAuthorize { + stake_account_pubkey, + new_authorizations: vec![StakeAuthorizationIndexed { + authorization_type: StakeAuthorize::Staker, + new_authority_pubkey: staker_pubkey, + authority: 0, + new_authority_signer: Some(0), + }], + sign_only: false, + dump_transaction_message: false, + blockhash_query: BlockhashQuery::default(), + nonce_account: None, + nonce_authority: 0, + memo: None, + fee_payer: 0, + custodian: None, + no_wait: false, + }; + process_command(&config).unwrap_err(); // unsigned authority should fail + + config.signers = vec![&default_signer, &staker_keypair]; + config.command = CliCommand::StakeAuthorize { + stake_account_pubkey, + new_authorizations: vec![StakeAuthorizationIndexed { + authorization_type: StakeAuthorize::Staker, + new_authority_pubkey: staker_pubkey, + authority: 0, + new_authority_signer: Some(1), + }], + sign_only: false, + dump_transaction_message: false, + blockhash_query: BlockhashQuery::default(), + nonce_account: None, + nonce_authority: 0, + memo: None, + fee_payer: 0, + custodian: None, + no_wait: false, + }; + process_command(&config).unwrap(); + let stake_account = rpc_client.get_account(&stake_account_pubkey).unwrap(); + let stake_state: StakeState = stake_account.state().unwrap(); + let current_authority = match stake_state { + StakeState::Initialized(meta) => meta.authorized.staker, + _ => panic!("Unexpected stake state!"), + }; + assert_eq!(current_authority, staker_pubkey); + + let new_withdrawer_keypair = Keypair::new(); + let new_withdrawer_pubkey = new_withdrawer_keypair.pubkey(); + config.signers = vec![&default_signer, &withdrawer_keypair]; + config.command = CliCommand::StakeAuthorize { + stake_account_pubkey, + new_authorizations: vec![StakeAuthorizationIndexed { + authorization_type: StakeAuthorize::Withdrawer, + new_authority_pubkey: new_withdrawer_pubkey, + authority: 1, + new_authority_signer: Some(1), + }], + sign_only: false, + dump_transaction_message: false, + blockhash_query: BlockhashQuery::default(), + nonce_account: None, + nonce_authority: 0, + memo: None, + fee_payer: 0, + custodian: None, + no_wait: false, + }; + process_command(&config).unwrap_err(); // unsigned authority should fail + + config.signers = vec![ + &default_signer, + &withdrawer_keypair, + &new_withdrawer_keypair, + ]; + config.command = CliCommand::StakeAuthorize { + stake_account_pubkey, + new_authorizations: vec![StakeAuthorizationIndexed { + authorization_type: StakeAuthorize::Withdrawer, + new_authority_pubkey: new_withdrawer_pubkey, + authority: 1, + new_authority_signer: Some(2), + }], + sign_only: false, + dump_transaction_message: false, + blockhash_query: BlockhashQuery::default(), + nonce_account: None, + nonce_authority: 0, + memo: None, + fee_payer: 0, + custodian: None, + no_wait: false, + }; + process_command(&config).unwrap(); + let stake_account = rpc_client.get_account(&stake_account_pubkey).unwrap(); + let stake_state: StakeState = stake_account.state().unwrap(); + let current_authority = match stake_state { + StakeState::Initialized(meta) => meta.authorized.withdrawer, + _ => panic!("Unexpected stake state!"), + }; + assert_eq!(current_authority, new_withdrawer_pubkey); + + // Set lockup, checking new custodian + let custodian = Keypair::new(); + let custodian_pubkey = custodian.pubkey(); + let lockup = LockupArgs { + unix_timestamp: Some(1_581_534_570), + epoch: Some(200), + custodian: Some(custodian_pubkey), + }; + config.signers = vec![&default_signer, &new_withdrawer_keypair]; + config.command = CliCommand::StakeSetLockup { + stake_account_pubkey, + lockup, + new_custodian_signer: Some(1), + custodian: 1, + sign_only: false, + dump_transaction_message: false, + blockhash_query: BlockhashQuery::default(), + nonce_account: None, + nonce_authority: 0, + memo: None, + fee_payer: 0, + }; + process_command(&config).unwrap_err(); // unsigned new custodian should fail + + config.signers = vec![&default_signer, &new_withdrawer_keypair, &custodian]; + config.command = CliCommand::StakeSetLockup { + stake_account_pubkey, + lockup, + new_custodian_signer: Some(2), + custodian: 1, + sign_only: false, + dump_transaction_message: false, + blockhash_query: BlockhashQuery::default(), + nonce_account: None, + nonce_authority: 0, + memo: None, + fee_payer: 0, + }; + process_command(&config).unwrap(); + let stake_account = rpc_client.get_account(&stake_account_pubkey).unwrap(); + let stake_state: StakeState = stake_account.state().unwrap(); + let current_lockup = match stake_state { + StakeState::Initialized(meta) => meta.lockup, + _ => panic!("Unexpected stake state!"), + }; + assert_eq!( + current_lockup.unix_timestamp, + lockup.unix_timestamp.unwrap() + ); + assert_eq!(current_lockup.epoch, lockup.epoch.unwrap()); + assert_eq!(current_lockup.custodian, custodian_pubkey); +} diff --git a/cli/tests/transfer.rs b/cli/tests/transfer.rs index 3d971588305578..ad457846147701 100644 --- a/cli/tests/transfer.rs +++ b/cli/tests/transfer.rs @@ -16,7 +16,9 @@ use solana_sdk::{ nonce::State as NonceState, pubkey::Pubkey, signature::{keypair_from_seed, Keypair, NullSigner, Signer}, + stake, }; +use solana_streamer::socket::SocketAddrSpace; #[test] fn test_transfer() { @@ -24,7 +26,12 @@ fn test_transfer() { let mint_keypair = Keypair::new(); let mint_pubkey = mint_keypair.pubkey(); let faucet_addr = run_local_faucet(mint_keypair, None); - let test_validator = TestValidator::with_custom_fees(mint_pubkey, 1, Some(faucet_addr)); + let test_validator = TestValidator::with_custom_fees( + mint_pubkey, + 1, + Some(faucet_addr), + SocketAddrSpace::Unspecified, + ); let rpc_client = RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed()); @@ -276,7 +283,12 @@ fn test_transfer_multisession_signing() { let mint_keypair = Keypair::new(); let mint_pubkey = mint_keypair.pubkey(); let faucet_addr = run_local_faucet(mint_keypair, None); - let test_validator = TestValidator::with_custom_fees(mint_pubkey, 1, Some(faucet_addr)); + let test_validator = TestValidator::with_custom_fees( + mint_pubkey, + 1, + Some(faucet_addr), + SocketAddrSpace::Unspecified, + ); let to_pubkey = Pubkey::new(&[1u8; 32]); let offline_from_signer = keypair_from_seed(&[2u8; 32]).unwrap(); @@ -403,7 +415,12 @@ fn test_transfer_all() { let mint_keypair = Keypair::new(); let mint_pubkey = mint_keypair.pubkey(); let faucet_addr = run_local_faucet(mint_keypair, None); - let test_validator = TestValidator::with_custom_fees(mint_pubkey, 1, Some(faucet_addr)); + let test_validator = TestValidator::with_custom_fees( + mint_pubkey, + 1, + Some(faucet_addr), + SocketAddrSpace::Unspecified, + ); let rpc_client = RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed()); @@ -451,7 +468,12 @@ fn test_transfer_unfunded_recipient() { let mint_keypair = Keypair::new(); let mint_pubkey = mint_keypair.pubkey(); let faucet_addr = run_local_faucet(mint_keypair, None); - let test_validator = TestValidator::with_custom_fees(mint_pubkey, 1, Some(faucet_addr)); + let test_validator = TestValidator::with_custom_fees( + mint_pubkey, + 1, + Some(faucet_addr), + SocketAddrSpace::Unspecified, + ); let rpc_client = RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed()); @@ -499,7 +521,12 @@ fn test_transfer_with_seed() { let mint_keypair = Keypair::new(); let mint_pubkey = mint_keypair.pubkey(); let faucet_addr = run_local_faucet(mint_keypair, None); - let test_validator = TestValidator::with_custom_fees(mint_pubkey, 1, Some(faucet_addr)); + let test_validator = TestValidator::with_custom_fees( + mint_pubkey, + 1, + Some(faucet_addr), + SocketAddrSpace::Unspecified, + ); let rpc_client = RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed()); @@ -513,7 +540,7 @@ fn test_transfer_with_seed() { let sender_pubkey = config.signers[0].pubkey(); let recipient_pubkey = Pubkey::new(&[1u8; 32]); let derived_address_seed = "seed".to_string(); - let derived_address_program_id = solana_stake_program::id(); + let derived_address_program_id = stake::program::id(); let derived_address = Pubkey::create_with_seed( &sender_pubkey, &derived_address_seed, diff --git a/cli/tests/vote.rs b/cli/tests/vote.rs index 64c67a1d0b8c1c..6026bdde355792 100644 --- a/cli/tests/vote.rs +++ b/cli/tests/vote.rs @@ -14,6 +14,7 @@ use solana_sdk::{ commitment_config::CommitmentConfig, signature::{Keypair, Signer}, }; +use solana_streamer::socket::SocketAddrSpace; use solana_vote_program::vote_state::{VoteAuthorize, VoteState, VoteStateVersions}; #[test] @@ -21,7 +22,8 @@ fn test_vote_authorize_and_withdraw() { let mint_keypair = Keypair::new(); let mint_pubkey = mint_keypair.pubkey(); let faucet_addr = run_local_faucet(mint_keypair, None); - let test_validator = TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr)); + let test_validator = + TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified); let rpc_client = RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed()); @@ -83,13 +85,48 @@ fn test_vote_authorize_and_withdraw() { check_recent_balance(expected_balance, &rpc_client, &vote_account_pubkey); // Authorize vote account withdrawal to another signer - let withdraw_authority = Keypair::new(); + let first_withdraw_authority = Keypair::new(); config.signers = vec![&default_signer]; + config.command = CliCommand::VoteAuthorize { + vote_account_pubkey, + new_authorized_pubkey: first_withdraw_authority.pubkey(), + vote_authorize: VoteAuthorize::Withdrawer, + memo: None, + authorized: 0, + new_authorized: None, + }; + process_command(&config).unwrap(); + let vote_account = rpc_client + .get_account(&vote_account_keypair.pubkey()) + .unwrap(); + let vote_state: VoteStateVersions = vote_account.state().unwrap(); + let authorized_withdrawer = vote_state.convert_to_current().authorized_withdrawer; + assert_eq!(authorized_withdrawer, first_withdraw_authority.pubkey()); + + // Authorize vote account withdrawal to another signer with checked instruction + let withdraw_authority = Keypair::new(); + config.signers = vec![&default_signer, &first_withdraw_authority]; + config.command = CliCommand::VoteAuthorize { + vote_account_pubkey, + new_authorized_pubkey: withdraw_authority.pubkey(), + vote_authorize: VoteAuthorize::Withdrawer, + memo: None, + authorized: 1, + new_authorized: Some(1), + }; + process_command(&config).unwrap_err(); // unsigned by new authority should fail + config.signers = vec![ + &default_signer, + &first_withdraw_authority, + &withdraw_authority, + ]; config.command = CliCommand::VoteAuthorize { vote_account_pubkey, new_authorized_pubkey: withdraw_authority.pubkey(), vote_authorize: VoteAuthorize::Withdrawer, memo: None, + authorized: 1, + new_authorized: Some(2), }; process_command(&config).unwrap(); let vote_account = rpc_client diff --git a/client/Cargo.toml b/client/Cargo.toml index 4ed8a9db0b6fde..1260b2fa09f837 100644 --- a/client/Cargo.toml +++ b/client/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-client" -version = "1.7.0" +version = "1.7.11" description = "Solana Client" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -24,14 +24,14 @@ semver = "0.11.0" serde = "1.0.122" serde_derive = "1.0.103" serde_json = "1.0.56" -solana-account-decoder = { path = "../account-decoder", version = "=1.7.0" } -solana-clap-utils = { path = "../clap-utils", version = "=1.7.0" } -solana-faucet = { path = "../faucet", version = "=1.7.0" } -solana-net-utils = { path = "../net-utils", version = "=1.7.0" } -solana-sdk = { path = "../sdk", version = "=1.7.0" } -solana-transaction-status = { path = "../transaction-status", version = "=1.7.0" } -solana-version = { path = "../version", version = "=1.7.0" } -solana-vote-program = { path = "../programs/vote", version = "=1.7.0" } +solana-account-decoder = { path = "../account-decoder", version = "=1.7.11" } +solana-clap-utils = { path = "../clap-utils", version = "=1.7.11" } +solana-faucet = { path = "../faucet", version = "=1.7.11" } +solana-net-utils = { path = "../net-utils", version = "=1.7.11" } +solana-sdk = { path = "../sdk", version = "=1.7.11" } +solana-transaction-status = { path = "../transaction-status", version = "=1.7.11" } +solana-version = { path = "../version", version = "=1.7.11" } +solana-vote-program = { path = "../programs/vote", version = "=1.7.11" } thiserror = "1.0" tokio = { version = "1", features = ["full"] } tungstenite = "0.10.1" @@ -40,7 +40,7 @@ url = "2.1.1" [dev-dependencies] assert_matches = "1.3.0" jsonrpc-http-server = "17.0.0" -solana-logger = { path = "../logger", version = "=1.7.0" } +solana-logger = { path = "../logger", version = "=1.7.11" } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/client/src/blockhash_query.rs b/client/src/blockhash_query.rs index ced886b04bf280..78dd26e39118a8 100644 --- a/client/src/blockhash_query.rs +++ b/client/src/blockhash_query.rs @@ -122,10 +122,10 @@ mod tests { use crate::{ blockhash_query, rpc_request::RpcRequest, - rpc_response::{Response, RpcFeeCalculator, RpcResponseContext}, + rpc_response::{Response, RpcFeeCalculator, RpcFees, RpcResponseContext}, }; use clap::App; - use serde_json::{self, json, Value}; + use serde_json::{self, json}; use solana_account_decoder::{UiAccount, UiAccountEncoding}; use solana_sdk::{account::Account, hash::hash, nonce, system_program}; use std::collections::HashMap; @@ -288,10 +288,12 @@ mod tests { let rpc_fee_calc = FeeCalculator::new(42); let get_recent_blockhash_response = json!(Response { context: RpcResponseContext { slot: 1 }, - value: json!(( - Value::String(rpc_blockhash.to_string()), - serde_json::to_value(rpc_fee_calc.clone()).unwrap() - )), + value: json!(RpcFees { + blockhash: rpc_blockhash.to_string(), + fee_calculator: rpc_fee_calc.clone(), + last_valid_slot: 42, + last_valid_block_height: 42, + }), }); let get_fee_calculator_for_blockhash_response = json!(Response { context: RpcResponseContext { slot: 1 }, @@ -300,10 +302,7 @@ mod tests { }), }); let mut mocks = HashMap::new(); - mocks.insert( - RpcRequest::GetRecentBlockhash, - get_recent_blockhash_response.clone(), - ); + mocks.insert(RpcRequest::GetFees, get_recent_blockhash_response.clone()); let rpc_client = RpcClient::new_mock_with_mocks("".to_string(), mocks); assert_eq!( BlockhashQuery::default() @@ -312,10 +311,7 @@ mod tests { (rpc_blockhash, rpc_fee_calc.clone()), ); let mut mocks = HashMap::new(); - mocks.insert( - RpcRequest::GetRecentBlockhash, - get_recent_blockhash_response.clone(), - ); + mocks.insert(RpcRequest::GetFees, get_recent_blockhash_response.clone()); mocks.insert( RpcRequest::GetFeeCalculatorForBlockhash, get_fee_calculator_for_blockhash_response, @@ -328,10 +324,7 @@ mod tests { (test_blockhash, rpc_fee_calc), ); let mut mocks = HashMap::new(); - mocks.insert( - RpcRequest::GetRecentBlockhash, - get_recent_blockhash_response, - ); + mocks.insert(RpcRequest::GetFees, get_recent_blockhash_response); let rpc_client = RpcClient::new_mock_with_mocks("".to_string(), mocks); assert_eq!( BlockhashQuery::None(test_blockhash) diff --git a/client/src/client_error.rs b/client/src/client_error.rs index fec5d047c304f3..365d70941c4b4d 100644 --- a/client/src/client_error.rs +++ b/client/src/client_error.rs @@ -1,5 +1,5 @@ use { - crate::rpc_request, + crate::{rpc_request, rpc_response}, solana_faucet::faucet::FaucetError, solana_sdk::{ signature::SignerError, transaction::TransactionError, transport::TransportError, @@ -30,6 +30,24 @@ pub enum ClientErrorKind { Custom(String), } +impl ClientErrorKind { + pub fn get_transaction_error(&self) -> Option { + match self { + Self::RpcError(rpc_request::RpcError::RpcResponseError { + data: + rpc_request::RpcResponseErrorData::SendTransactionPreflightFailure( + rpc_response::RpcSimulateTransactionResult { + err: Some(tx_err), .. + }, + ), + .. + }) => Some(tx_err.clone()), + Self::TransactionError(tx_err) => Some(tx_err.clone()), + _ => None, + } + } +} + impl From for ClientErrorKind { fn from(err: TransportError) -> Self { match err { @@ -86,6 +104,10 @@ impl ClientError { pub fn kind(&self) -> &ClientErrorKind { &self.kind } + + pub fn get_transaction_error(&self) -> Option { + self.kind.get_transaction_error() + } } impl From for ClientError { diff --git a/client/src/http_sender.rs b/client/src/http_sender.rs index 9c3f98a9b5b253..79d5c4ed852409 100644 --- a/client/src/http_sender.rs +++ b/client/src/http_sender.rs @@ -1,3 +1,5 @@ +//! The standard [`RpcSender`] over HTTP. + use { crate::{ client_error::Result, @@ -28,11 +30,19 @@ pub struct HttpSender { request_id: AtomicU64, } +/// The standard [`RpcSender`] over HTTP. impl HttpSender { + /// Create an HTTP RPC sender. + /// + /// The URL is an HTTP URL, usually for port 8899, as in + /// "http://localhost:8899". The sender has a default timeout of 30 seconds. pub fn new(url: String) -> Self { Self::new_with_timeout(url, Duration::from_secs(30)) } + /// Create an HTTP RPC sender. + /// + /// The URL is an HTTP URL, usually for port 8899. pub fn new_with_timeout(url: String, timeout: Duration) -> Self { // `reqwest::blocking::Client` panics if run in a tokio async context. Shuttle the // request to a different tokio thread to avoid this @@ -57,7 +67,6 @@ impl HttpSender { struct RpcErrorObject { code: i64, message: String, - data: serde_json::Value, } impl RpcSender for HttpSender { diff --git a/client/src/mock_sender.rs b/client/src/mock_sender.rs index e40e456745546e..b5f478df5331a3 100644 --- a/client/src/mock_sender.rs +++ b/client/src/mock_sender.rs @@ -1,8 +1,15 @@ +//! An [`RpcSender`] used for unit testing [`RpcClient`](crate::rpc_client::RpcClient). + use { crate::{ client_error::Result, + rpc_config::RpcBlockProductionConfig, rpc_request::RpcRequest, - rpc_response::{Response, RpcResponseContext, RpcVersionInfo}, + rpc_response::{ + Response, RpcAccountBalance, RpcBlockProduction, RpcBlockProductionRange, RpcFees, + RpcResponseContext, RpcSimulateTransactionResult, RpcStakeActivation, RpcSupply, + RpcVersionInfo, RpcVoteAccountStatus, StakeActivationState, + }, rpc_sender::RpcSender, }, serde_json::{json, Number, Value}, @@ -28,6 +35,31 @@ pub struct MockSender { url: String, } +/// An [`RpcSender`] used for unit testing [`RpcClient`](crate::rpc_client::RpcClient). +/// +/// This is primarily for internal use. +/// +/// Unless directed otherwise, it will generally return a reasonable default +/// response, at least for [`RpcRequest`] values for which responses have been +/// implemented. +/// +/// The behavior can be customized in two ways: +/// +/// 1) The `url` constructor argument is not actually a URL, but a simple string +/// directive that changes `MockSender`s behavior in specific scenarios. +/// +/// If `url` is "fails" then any call to `send` will return `Ok(Value::Null)`. +/// +/// It is customary to set the `url` to "succeeds" for mocks that should +/// return sucessfully, though this value is not actually interpreted. +/// +/// Other possible values of `url` are specific to different `RpcRequest` +/// values. Read the implementation for specifics. +/// +/// 2) Custom responses can be configured by providing [`Mocks`] to the +/// [`MockSender::new_with_mocks`] constructor. This type is a [`HashMap`] +/// from [`RpcRequest`] to a JSON [`Value`] response, Any entries in this map +/// override the default behavior for the given request. impl MockSender { pub fn new(url: String) -> Self { Self::new_with_mocks(url, Mocks::default()) @@ -49,23 +81,26 @@ impl RpcSender for MockSender { if self.url == "fails" { return Ok(Value::Null); } - let val = match request { - RpcRequest::GetAccountInfo => serde_json::to_value(Response { + + let method = &request.build_request_json(42, params.clone())["method"]; + + let val = match method.as_str().unwrap() { + "getAccountInfo" => serde_json::to_value(Response { context: RpcResponseContext { slot: 1 }, value: Value::Null, })?, - RpcRequest::GetBalance => serde_json::to_value(Response { + "getBalance" => serde_json::to_value(Response { context: RpcResponseContext { slot: 1 }, value: Value::Number(Number::from(50)), })?, - RpcRequest::GetRecentBlockhash => serde_json::to_value(Response { + "getRecentBlockhash" => serde_json::to_value(Response { context: RpcResponseContext { slot: 1 }, value: ( Value::String(PUBKEY.to_string()), serde_json::to_value(FeeCalculator::default()).unwrap(), ), })?, - RpcRequest::GetEpochInfo => serde_json::to_value(EpochInfo { + "getEpochInfo" => serde_json::to_value(EpochInfo { epoch: 1, slot_index: 2, slots_in_epoch: 32, @@ -73,7 +108,7 @@ impl RpcSender for MockSender { block_height: 34, transaction_count: Some(123), })?, - RpcRequest::GetFeeCalculatorForBlockhash => { + "getFeeCalculatorForBlockhash" => { let value = if self.url == "blockhash_expired" { Value::Null } else { @@ -84,11 +119,21 @@ impl RpcSender for MockSender { value, })? } - RpcRequest::GetFeeRateGovernor => serde_json::to_value(Response { + "getFeeRateGovernor" => serde_json::to_value(Response { context: RpcResponseContext { slot: 1 }, value: serde_json::to_value(FeeRateGovernor::default()).unwrap(), })?, - RpcRequest::GetSignatureStatuses => { + "getFees" => serde_json::to_value(Response { + context: RpcResponseContext { slot: 1 }, + value: serde_json::to_value(RpcFees { + blockhash: PUBKEY.to_string(), + fee_calculator: FeeCalculator::default(), + last_valid_slot: 42, + last_valid_block_height: 42, + }) + .unwrap(), + })?, + "getSignatureStatuses" => { let status: transaction::Result<()> = if self.url == "account_in_use" { Err(TransactionError::AccountInUse) } else if self.url == "instruction_error" { @@ -122,11 +167,83 @@ impl RpcSender for MockSender { value: statuses, })? } - RpcRequest::GetTransactionCount => Value::Number(Number::from(1234)), - RpcRequest::GetSlot => Value::Number(Number::from(0)), - RpcRequest::GetMaxShredInsertSlot => Value::Number(Number::from(0)), - RpcRequest::RequestAirdrop => Value::String(Signature::new(&[8; 64]).to_string()), - RpcRequest::SendTransaction => { + "getTransactionCount" => json![1234], + "getSlot" => json![0], + "getMaxShredInsertSlot" => json![0], + "requestAirdrop" => Value::String(Signature::new(&[8; 64]).to_string()), + "getSnapshotSlot" => Value::Number(Number::from(0)), + "getBlockHeight" => Value::Number(Number::from(1234)), + "getSlotLeaders" => json!([PUBKEY]), + "getBlockProduction" => { + if params.is_null() { + json!(Response { + context: RpcResponseContext { slot: 1 }, + value: RpcBlockProduction { + by_identity: HashMap::new(), + range: RpcBlockProductionRange { + first_slot: 1, + last_slot: 2, + }, + }, + }) + } else { + let config: Vec = + serde_json::from_value(params).unwrap(); + let config = config[0].clone(); + let mut by_identity = HashMap::new(); + by_identity.insert(config.identity.unwrap(), (1, 123)); + let config_range = config.range.unwrap_or_default(); + + json!(Response { + context: RpcResponseContext { slot: 1 }, + value: RpcBlockProduction { + by_identity, + range: RpcBlockProductionRange { + first_slot: config_range.first_slot, + last_slot: { + if let Some(last_slot) = config_range.last_slot { + last_slot + } else { + 2 + } + }, + }, + }, + }) + } + } + "getStakeActivation" => json!(RpcStakeActivation { + state: StakeActivationState::Active, + active: 123, + inactive: 12, + }), + "getSupply" => json!(Response { + context: RpcResponseContext { slot: 1 }, + value: RpcSupply { + total: 100000000, + circulating: 50000, + non_circulating: 20000, + non_circulating_accounts: vec![PUBKEY.to_string()], + }, + }), + "getLargestAccounts" => { + let rpc_account_balance = RpcAccountBalance { + address: PUBKEY.to_string(), + lamports: 10000, + }; + + json!(Response { + context: RpcResponseContext { slot: 1 }, + value: vec![rpc_account_balance], + }) + } + "getVoteAccounts" => { + json!(RpcVoteAccountStatus { + current: vec![], + delinquent: vec![], + }) + } + "sendTransaction" => { let signature = if self.url == "malicious" { Signature::new(&[8; 64]).to_string() } else { @@ -137,8 +254,16 @@ impl RpcSender for MockSender { }; Value::String(signature) } - RpcRequest::GetMinimumBalanceForRentExemption => Value::Number(Number::from(20)), - RpcRequest::GetVersion => { + "simulateTransaction" => serde_json::to_value(Response { + context: RpcResponseContext { slot: 1 }, + value: RpcSimulateTransactionResult { + err: None, + logs: None, + accounts: None, + }, + })?, + "getMinimumBalanceForRentExemption" => json![20], + "getVersion" => { let version = Version::default(); json!(RpcVersionInfo { solana_core: version.to_string(), diff --git a/client/src/rpc_cache.rs b/client/src/rpc_cache.rs index 38dbba582562dd..4207d3ce36752b 100644 --- a/client/src/rpc_cache.rs +++ b/client/src/rpc_cache.rs @@ -31,7 +31,7 @@ impl LargestAccountsCache { &self, filter: &Option, ) -> Option<(u64, Vec)> { - self.cache.get(&filter).and_then(|value| { + self.cache.get(filter).and_then(|value| { if let Ok(elapsed) = value.cached_time.elapsed() { if elapsed < Duration::from_secs(self.duration) { return Some((value.slot, value.accounts.clone())); diff --git a/client/src/rpc_client.rs b/client/src/rpc_client.rs index c54ef4ad94f5bf..5221a5cde2fe90 100644 --- a/client/src/rpc_client.rs +++ b/client/src/rpc_client.rs @@ -1,3 +1,11 @@ +//! Communication with a Solana node over RPC. +//! +//! Software that interacts with the Solana blockchain, whether querying its +//! state or submitting transactions, communicates with a Solana node over +//! [JSON-RPC], using the [`RpcClient`] type. +//! +//! [JSON-RPC]: https://www.jsonrpc.org/specification + #[allow(deprecated)] use crate::rpc_deprecated_config::{ RpcConfirmedBlockConfig, RpcConfirmedTransactionConfig, @@ -49,59 +57,198 @@ use { }, }; -pub struct RpcClient { - sender: Box, +#[derive(Default)] +pub struct RpcClientConfig { commitment_config: CommitmentConfig, - node_version: RwLock>, + confirm_transaction_initial_timeout: Option, } -fn serialize_encode_transaction( - transaction: &Transaction, - encoding: UiTransactionEncoding, -) -> ClientResult { - let serialized = serialize(transaction) - .map_err(|e| ClientErrorKind::Custom(format!("transaction serialization failed: {}", e)))?; - let encoded = match encoding { - UiTransactionEncoding::Base58 => bs58::encode(serialized).into_string(), - UiTransactionEncoding::Base64 => base64::encode(serialized), - _ => { - return Err(ClientErrorKind::Custom(format!( - "unsupported transaction encoding: {}. Supported encodings: base58, base64", - encoding - )) - .into()) +impl RpcClientConfig { + fn with_commitment(commitment_config: CommitmentConfig) -> Self { + RpcClientConfig { + commitment_config, + ..Self::default() } - }; - Ok(encoded) + } +} + +/// A client of a remote Solana node. +/// +/// `RpcClient` communicates with a Solana node over [JSON-RPC], with the +/// [Solana JSON-RPC protocol][jsonprot]. It is the primary Rust interface for +/// querying and transacting with the network from external programs. +/// +/// This type builds on the underlying RPC protocol, adding extra features such +/// as timeout handling, retries, and waiting on transaction commitment levels. +/// Some methods simply pass through to the underlying RPC protocol. Not all RPC +/// methods are encapsulated by this type, but `RpcClient` does expose a generic +/// [`send`](RpcClient::send) method for making any [`RpcRequest`]. +/// +/// The documentation for most `RpcClient` methods contains an "RPC Reference" +/// section that links to the documentation for the underlying JSON-RPC method. +/// The documentation for `RpcClient` does not reproduce the documentation for +/// the underlying JSON-RPC methods. Thus reading both is necessary for complete +/// understanding. +/// +/// `RpcClient`s generally communicate over HTTP on port 8899, a typical server +/// URL being "http://localhost:8899". +/// +/// By default, requests to confirm transactions only succeed once those +/// transactions are finalized, meaning they are definitely permanently +/// committed. Transactions can be confirmed with less finality by creating +/// `RpcClient` with an explicit [`CommitmentConfig`], or by calling the various +/// `_with_commitment` methods, like +/// [`RpcClient::confirm_transaction_with_commitment`]. +/// +/// [jsonprot]: https://docs.solana.com/developing/clients/jsonrpc-api +/// [JSON-RPC]: https://www.jsonrpc.org/specification +/// +/// # Errors +/// +/// Methods on `RpcClient` return +/// [`client_error::Result`][crate::client_error::Result], and many of them +/// return the [`RpcResult`][crate::rpc_response::RpcResult] typedef, which +/// contains [`Response`][crate::rpc_response::Response] on `Ok`. Both +/// `client_error::Result` and [`RpcResult`] contain `ClientError` on error. In +/// the case of `RpcResult`, the actual return value is in the +/// [`value`][crate::rpc_response::Response::value] field, with RPC contextual +/// information in the [`context`][crate::rpc_response::Response::context] +/// field, so it is common for the value to be accessed with `?.value`, as in +/// +/// ``` +/// # use solana_sdk::system_transaction; +/// # use solana_client::rpc_client::RpcClient; +/// # use solana_client::client_error::ClientError; +/// # use solana_sdk::signature::{Keypair, Signer}; +/// # use solana_sdk::hash::Hash; +/// # let rpc_client = RpcClient::new_mock("succeeds".to_string()); +/// # let key = Keypair::new(); +/// # let to = solana_sdk::pubkey::new_rand(); +/// # let lamports = 50; +/// # let recent_blockhash = Hash::default(); +/// # let tx = system_transaction::transfer(&key, &to, lamports, recent_blockhash); +/// let signature = rpc_client.send_transaction(&tx)?; +/// let statuses = rpc_client.get_signature_statuses(&[signature])?.value; +/// # Ok::<(), ClientError>(()) +/// ``` +/// +/// Requests may timeout, in which case they return a [`ClientError`] where the +/// [`ClientErrorKind`] is [`ClientErrorKind::Reqwest`], and where the interior +/// [`reqwest::Error`](crate::client_error::reqwest::Error)s +/// [`is_timeout`](crate::client_error::reqwest::Error::is_timeout) method +/// returns `true`. The default timeout is 30 seconds, and may be changed by +/// calling an appropriate constructor with a `timeout` parameter. +pub struct RpcClient { + sender: Box, + config: RpcClientConfig, + node_version: RwLock>, } impl RpcClient { + /// Create an `RpcClient` from an [`RpcSender`] and an [`RpcClientConfig`]. + /// + /// This is the basic constructor, allowing construction with any type of + /// `RpcSender`. Most applications should use one of the other constructors, + /// such as [`new`] and [`new_mock`], which create an `RpcClient` + /// encapsulating an [`HttpSender`] and [`MockSender`] respectively. fn new_sender( sender: T, - commitment_config: CommitmentConfig, + config: RpcClientConfig, ) -> Self { Self { sender: Box::new(sender), node_version: RwLock::new(None), - commitment_config, + config, } } + /// Create an HTTP `RpcClient`. + /// + /// The URL is an HTTP URL, usually for port 8899, as in + /// "http://localhost:8899". + /// + /// The client has a default timeout of 30 seconds, and a default commitment + /// level of [`Finalized`](CommitmentLevel::Finalized). + /// + /// # Examples + /// + /// ``` + /// # use solana_client::rpc_client::RpcClient; + /// let url = "http://localhost:8899".to_string(); + /// let client = RpcClient::new(url); + /// ``` pub fn new(url: String) -> Self { Self::new_with_commitment(url, CommitmentConfig::default()) } + /// Create an HTTP `RpcClient` with specified commitment level. + /// + /// The URL is an HTTP URL, usually for port 8899, as in + /// "http://localhost:8899". + /// + /// The client has a default timeout of 30 seconds, and a user-specified + /// [`CommitmentLevel`] via [`CommitmentConfig`]. + /// + /// # Examples + /// + /// ``` + /// # use solana_sdk::commitment_config::CommitmentConfig; + /// # use solana_client::rpc_client::RpcClient; + /// let url = "http://localhost:8899".to_string(); + /// let commitment_config = CommitmentConfig::processed(); + /// let client = RpcClient::new_with_commitment(url, commitment_config); + /// ``` pub fn new_with_commitment(url: String, commitment_config: CommitmentConfig) -> Self { - Self::new_sender(HttpSender::new(url), commitment_config) + Self::new_sender( + HttpSender::new(url), + RpcClientConfig::with_commitment(commitment_config), + ) } + /// Create an HTTP `RpcClient` with specified timeout. + /// + /// The URL is an HTTP URL, usually for port 8899, as in + /// "http://localhost:8899". + /// + /// The client has and a default commitment level of + /// [`Finalized`](CommitmentLevel::Finalized). + /// + /// # Examples + /// + /// ``` + /// # use std::time::Duration; + /// # use solana_client::rpc_client::RpcClient; + /// let url = "http://localhost::8899".to_string(); + /// let timeout = Duration::from_secs(1); + /// let client = RpcClient::new_with_timeout(url, timeout); + /// ``` pub fn new_with_timeout(url: String, timeout: Duration) -> Self { Self::new_sender( HttpSender::new_with_timeout(url, timeout), - CommitmentConfig::default(), + RpcClientConfig::with_commitment(CommitmentConfig::default()), ) } + /// Create an HTTP `RpcClient` with specified timeout and commitment level. + /// + /// The URL is an HTTP URL, usually for port 8899, as in + /// "http://localhost:8899". + /// + /// # Examples + /// + /// ``` + /// # use std::time::Duration; + /// # use solana_client::rpc_client::RpcClient; + /// # use solana_sdk::commitment_config::CommitmentConfig; + /// let url = "http://localhost::8899".to_string(); + /// let timeout = Duration::from_secs(1); + /// let commitment_config = CommitmentConfig::processed(); + /// let client = RpcClient::new_with_timeout_and_commitment( + /// url, + /// timeout, + /// commitment_config, + /// ); + /// ``` pub fn new_with_timeout_and_commitment( url: String, timeout: Duration, @@ -109,25 +256,151 @@ impl RpcClient { ) -> Self { Self::new_sender( HttpSender::new_with_timeout(url, timeout), - commitment_config, + RpcClientConfig::with_commitment(commitment_config), ) } + /// Create an HTTP `RpcClient` with specified timeout and commitment level. + /// + /// The URL is an HTTP URL, usually for port 8899, as in + /// "http://localhost:8899". + /// + /// The `confirm_transaction_initial_timeout` argument specifies, when + /// confirming a transaction via one of the `_with_spinner` methods, like + /// [`RpcClient::send_and_confirm_transaction_with_spinner`], the amount of + /// time to allow for the server to initially process a transaction. In + /// other words, setting `confirm_transaction_initial_timeout` to > 0 allows + /// `RpcClient` to wait for confirmation of a transaction that the server + /// has not "seen" yet. + /// + /// # Examples + /// + /// ``` + /// # use std::time::Duration; + /// # use solana_client::rpc_client::RpcClient; + /// # use solana_sdk::commitment_config::CommitmentConfig; + /// let url = "http://localhost::8899".to_string(); + /// let timeout = Duration::from_secs(1); + /// let commitment_config = CommitmentConfig::processed(); + /// let confirm_transaction_initial_timeout = Duration::from_secs(10); + /// let client = RpcClient::new_with_timeouts_and_commitment( + /// url, + /// timeout, + /// commitment_config, + /// confirm_transaction_initial_timeout, + /// ); + /// ``` + pub fn new_with_timeouts_and_commitment( + url: String, + timeout: Duration, + commitment_config: CommitmentConfig, + confirm_transaction_initial_timeout: Duration, + ) -> Self { + Self::new_sender( + HttpSender::new_with_timeout(url, timeout), + RpcClientConfig { + commitment_config, + confirm_transaction_initial_timeout: Some(confirm_transaction_initial_timeout), + }, + ) + } + + /// Create a mock `RpcClient`. + /// + /// See the [`MockSender`] documentation for an explanation of + /// how it treats the `url` argument. + /// + /// # Examples + /// + /// ``` + /// # use solana_client::rpc_client::RpcClient; + /// // Create an `RpcClient` that always succeeds + /// let url = "succeeds".to_string(); + /// let successful_client = RpcClient::new_mock(url); + /// ``` + /// + /// ``` + /// # use solana_client::rpc_client::RpcClient; + /// // Create an `RpcClient` that always fails + /// let url = "fails".to_string(); + /// let successful_client = RpcClient::new_mock(url); + /// ``` pub fn new_mock(url: String) -> Self { - Self::new_sender(MockSender::new(url), CommitmentConfig::default()) + Self::new_sender( + MockSender::new(url), + RpcClientConfig::with_commitment(CommitmentConfig::default()), + ) } + /// Create a mock `RpcClient`. + /// + /// See the [`MockSender`] documentation for an explanation of how it treats + /// the `url` argument. + /// + /// # Examples + /// + /// ``` + /// # use solana_client::{ + /// # rpc_client::RpcClient, + /// # rpc_request::RpcRequest, + /// # rpc_response::{Response, RpcResponseContext}, + /// # }; + /// # use std::collections::HashMap; + /// # use serde_json::json; + /// // Create a mock with a custom repsonse to the `GetBalance` request + /// let account_balance = 50; + /// let account_balance_response = json!(Response { + /// context: RpcResponseContext { slot: 1 }, + /// value: json!(account_balance), + /// }); + /// + /// let mut mocks = HashMap::new(); + /// mocks.insert(RpcRequest::GetBalance, account_balance_response); + /// let url = "succeeds".to_string(); + /// let client = RpcClient::new_mock_with_mocks(url, mocks); + /// ``` pub fn new_mock_with_mocks(url: String, mocks: Mocks) -> Self { Self::new_sender( MockSender::new_with_mocks(url, mocks), - CommitmentConfig::default(), + RpcClientConfig::with_commitment(CommitmentConfig::default()), ) } + /// Create an HTTP `RpcClient` from a [`SocketAddr`]. + /// + /// The client has a default timeout of 30 seconds, and a default commitment + /// level of [`Finalized`](CommitmentLevel::Finalized). + /// + /// # Examples + /// + /// ``` + /// # use std::net::SocketAddr; + /// # use solana_client::rpc_client::RpcClient; + /// let addr = SocketAddr::from(([127, 0, 0, 1], 8899)); + /// let client = RpcClient::new_socket(addr); + /// ``` pub fn new_socket(addr: SocketAddr) -> Self { Self::new(get_rpc_request_str(addr, false)) } + /// Create an HTTP `RpcClient` from a [`SocketAddr`] with specified commitment level. + /// + /// The client has a default timeout of 30 seconds, and a user-specified + /// [`CommitmentLevel`] via [`CommitmentConfig`]. + /// + /// # Examples + /// + /// ``` + /// # use std::net::SocketAddr; + /// # use solana_client::rpc_client::RpcClient; + /// # use solana_sdk::commitment_config::CommitmentConfig; + /// let addr = SocketAddr::from(([127, 0, 0, 1], 8899)); + /// let commitment_config = CommitmentConfig::processed(); + /// let client = RpcClient::new_socket_with_commitment( + /// addr, + /// commitment_config + /// ); + /// ``` pub fn new_socket_with_commitment( addr: SocketAddr, commitment_config: CommitmentConfig, @@ -135,6 +408,20 @@ impl RpcClient { Self::new_with_commitment(get_rpc_request_str(addr, false), commitment_config) } + /// Create an HTTP `RpcClient` from a [`SocketAddr`] with specified timeout. + /// + /// The client has and a default commitment level of [`Finalized`](CommitmentLevel::Finalized). + /// + /// # Examples + /// + /// ``` + /// # use std::net::SocketAddr; + /// # use std::time::Duration; + /// # use solana_client::rpc_client::RpcClient; + /// let addr = SocketAddr::from(([127, 0, 0, 1], 8899)); + /// let timeout = Duration::from_secs(1); + /// let client = RpcClient::new_socket_with_timeout(addr, timeout); + /// ``` pub fn new_socket_with_timeout(addr: SocketAddr, timeout: Duration) -> Self { let url = get_rpc_request_str(addr, false); Self::new_with_timeout(url, timeout) @@ -158,8 +445,19 @@ impl RpcClient { } } + /// Get the configured default commitment level. + /// + /// The commitment config may be specified during construction, and + /// determines how thoroughly committed a transaction must be when waiting + /// for its confirmation or otherwise checking for confirmation. If not + /// specified, the default commitment level is + /// [`Finalized`](CommitmentLevel::Finalized). + /// + /// The default commitment level is overridden when calling methods that + /// explicitly provide a [`CommitmentConfig`], like + /// [`RpcClient::confirm_transaction_with_commitment`]. pub fn commitment(&self) -> CommitmentConfig { - self.commitment_config + self.config.commitment_config } fn use_deprecated_commitment(&self) -> Result { @@ -199,12 +497,119 @@ impl RpcClient { Ok(request) } + /// Check the confirmation status of a transaction. + /// + /// Returns `true` if the given transaction succeeded and has been committed + /// with the configured commitment level, which can be retrieved with + /// the [`commitment`](RpcClient::commitment) method. + /// + /// Note that this method does not wait for a transaction to be confirmed + /// — it only checks whether a transaction has been confirmed. To + /// submit a transaction and wait for it to confirm, use + /// [`send_and_confirm_transaction`][RpcClient::send_and_confirm_transaction]. + /// + /// _This method returns `false` if the transaction failed, even if it has + /// been confirmed._ + /// + /// # RPC Reference + /// + /// This method is built on the [`getSignatureStatuses`] RPC method. + /// + /// [`getSignatureStatuses`]: https://docs.solana.com/developing/clients/jsonrpc-api#getsignaturestatuses + /// + /// # Examples + /// + /// ``` + /// # use solana_client::{ + /// # client_error::ClientError, + /// # rpc_client::RpcClient, + /// # rpc_config::RpcSimulateTransactionConfig, + /// # }; + /// # use solana_sdk::{ + /// # signature::Signer, + /// # signature::Signature, + /// # signer::keypair::Keypair, + /// # hash::Hash, + /// # system_transaction, + /// # }; + /// # use std::time::Duration; + /// # let rpc_client = RpcClient::new_mock("succeeds".to_string()); + /// // Transfer lamports from Alice to Bob and wait for confirmation + /// # let alice = Keypair::new(); + /// # let bob = Keypair::new(); + /// # let lamports = 50; + /// let (recent_blockhash, _) = rpc_client.get_recent_blockhash()?; + /// let tx = system_transaction::transfer(&alice, &bob.pubkey(), lamports, recent_blockhash); + /// let signature = rpc_client.send_transaction(&tx)?; + /// + /// loop { + /// let confirmed = rpc_client.confirm_transaction(&signature)?; + /// if confirmed { + /// break; + /// } + /// } + /// # Ok::<(), ClientError>(()) + /// ``` pub fn confirm_transaction(&self, signature: &Signature) -> ClientResult { Ok(self - .confirm_transaction_with_commitment(signature, self.commitment_config)? + .confirm_transaction_with_commitment(signature, self.commitment())? .value) } + /// Check the confirmation status of a transaction. + /// + /// Returns an [`RpcResult`] with value `true` if the given transaction + /// succeeded and has been committed with the given commitment level. + /// + /// Note that this method does not wait for a transaction to be confirmed + /// — it only checks whether a transaction has been confirmed. To + /// submit a transaction and wait for it to confirm, use + /// [`send_and_confirm_transaction`][RpcClient::send_and_confirm_transaction]. + /// + /// _This method returns an [`RpcResult`] with value `false` if the + /// transaction failed, even if it has been confirmed._ + /// + /// # RPC Reference + /// + /// This method is built on the [`getSignatureStatuses`] RPC method. + /// + /// [`getSignatureStatuses`]: https://docs.solana.com/developing/clients/jsonrpc-api#getsignaturestatuses + /// + /// # Examples + /// + /// ``` + /// # use solana_client::{ + /// # client_error::ClientError, + /// # rpc_client::RpcClient, + /// # rpc_config::RpcSimulateTransactionConfig, + /// # }; + /// # use solana_sdk::{ + /// # commitment_config::CommitmentConfig, + /// # signature::Signer, + /// # signature::Signature, + /// # signer::keypair::Keypair, + /// # hash::Hash, + /// # system_transaction, + /// # }; + /// # use std::time::Duration; + /// # let rpc_client = RpcClient::new_mock("succeeds".to_string()); + /// // Transfer lamports from Alice to Bob and wait for confirmation + /// # let alice = Keypair::new(); + /// # let bob = Keypair::new(); + /// # let lamports = 50; + /// let (recent_blockhash, _) = rpc_client.get_recent_blockhash()?; + /// let tx = system_transaction::transfer(&alice, &bob.pubkey(), lamports, recent_blockhash); + /// let signature = rpc_client.send_transaction(&tx)?; + /// + /// loop { + /// let commitment_config = CommitmentConfig::processed(); + /// let confirmed = rpc_client.confirm_transaction_with_commitment(&signature, commitment_config)?; + /// if confirmed.value { + /// break; + /// } + /// } + /// # Ok::<(), ClientError>(()) + /// ``` pub fn confirm_transaction_with_commitment( &self, signature: &Signature, @@ -222,13 +627,36 @@ impl RpcClient { }) } + /// # Examples + /// + /// ``` + /// # use solana_client::{ + /// # client_error::ClientError, + /// # rpc_client::RpcClient, + /// # }; + /// # use solana_sdk::{ + /// # signature::Signer, + /// # signature::Signature, + /// # signer::keypair::Keypair, + /// # hash::Hash, + /// # system_transaction, + /// # }; + /// # let rpc_client = RpcClient::new_mock("succeeds".to_string()); + /// // Transfer lamports from Alice to Bob + /// # let alice = Keypair::new(); + /// # let bob = Keypair::new(); + /// # let lamports = 50; + /// let (recent_blockhash, _) = rpc_client.get_recent_blockhash()?; + /// let tx = system_transaction::transfer(&alice, &bob.pubkey(), lamports, recent_blockhash); + /// let signature = rpc_client.send_transaction(&tx)?; + /// # Ok::<(), ClientError>(()) + /// ``` pub fn send_transaction(&self, transaction: &Transaction) -> ClientResult { self.send_transaction_with_config( transaction, RpcSendTransactionConfig { preflight_commitment: Some( - self.maybe_map_commitment(self.commitment_config)? - .commitment, + self.maybe_map_commitment(self.commitment())?.commitment, ), ..RpcSendTransactionConfig::default() }, @@ -243,6 +671,38 @@ impl RpcClient { } } + /// # Examples + /// + /// ``` + /// # use solana_client::{ + /// # client_error::ClientError, + /// # rpc_client::RpcClient, + /// # rpc_config::RpcSendTransactionConfig, + /// # }; + /// # use solana_sdk::{ + /// # signature::Signer, + /// # signature::Signature, + /// # signer::keypair::Keypair, + /// # hash::Hash, + /// # system_transaction, + /// # }; + /// # let rpc_client = RpcClient::new_mock("succeeds".to_string()); + /// // Transfer lamports from Alice to Bob + /// # let alice = Keypair::new(); + /// # let bob = Keypair::new(); + /// # let lamports = 50; + /// let (recent_blockhash, _) = rpc_client.get_recent_blockhash()?; + /// let tx = system_transaction::transfer(&alice, &bob.pubkey(), lamports, recent_blockhash); + /// let config = RpcSendTransactionConfig { + /// skip_preflight: true, + /// .. RpcSendTransactionConfig::default() + /// }; + /// let signature = rpc_client.send_transaction_with_config( + /// &tx, + /// config, + /// )?; + /// # Ok::<(), ClientError>(()) + /// ``` pub fn send_transaction_with_config( &self, transaction: &Transaction, @@ -310,6 +770,32 @@ impl RpcClient { } } + /// # Examples + /// + /// ``` + /// # use solana_client::{ + /// # client_error::ClientError, + /// # rpc_client::RpcClient, + /// # rpc_response::RpcSimulateTransactionResult, + /// # }; + /// # use solana_sdk::{ + /// # signature::Signer, + /// # signature::Signature, + /// # signer::keypair::Keypair, + /// # hash::Hash, + /// # system_transaction, + /// # }; + /// # let rpc_client = RpcClient::new_mock("succeeds".to_string()); + /// // Transfer lamports from Alice to Bob + /// # let alice = Keypair::new(); + /// # let bob = Keypair::new(); + /// # let lamports = 50; + /// let (recent_blockhash, _) = rpc_client.get_recent_blockhash()?; + /// let tx = system_transaction::transfer(&alice, &bob.pubkey(), lamports, recent_blockhash); + /// let result = rpc_client.simulate_transaction(&tx)?; + /// assert!(result.value.err.is_none()); + /// # Ok::<(), ClientError>(()) + /// ``` pub fn simulate_transaction( &self, transaction: &Transaction, @@ -317,12 +803,45 @@ impl RpcClient { self.simulate_transaction_with_config( transaction, RpcSimulateTransactionConfig { - commitment: Some(self.commitment_config), + commitment: Some(self.commitment()), ..RpcSimulateTransactionConfig::default() }, ) } + /// # Examples + /// + /// ``` + /// # use solana_client::{ + /// # client_error::ClientError, + /// # rpc_client::RpcClient, + /// # rpc_config::RpcSimulateTransactionConfig, + /// # rpc_response::RpcSimulateTransactionResult, + /// # }; + /// # use solana_sdk::{ + /// # signature::Signer, + /// # signer::keypair::Keypair, + /// # hash::Hash, + /// # system_transaction, + /// # }; + /// # let rpc_client = RpcClient::new_mock("succeeds".to_string()); + /// // Transfer lamports from Alice to Bob + /// # let alice = Keypair::new(); + /// # let bob = Keypair::new(); + /// # let lamports = 50; + /// let (recent_blockhash, _) = rpc_client.get_recent_blockhash()?; + /// let tx = system_transaction::transfer(&alice, &bob.pubkey(), lamports, recent_blockhash); + /// let config = RpcSimulateTransactionConfig { + /// sig_verify: false, + /// .. RpcSimulateTransactionConfig::default() + /// }; + /// let result = rpc_client.simulate_transaction_with_config( + /// &tx, + /// config, + /// )?; + /// assert!(result.value.err.is_none()); + /// # Ok::<(), ClientError>(()) + /// ``` pub fn simulate_transaction_with_config( &self, transaction: &Transaction, @@ -347,17 +866,123 @@ impl RpcClient { ) } + /// # Examples + /// + /// ``` + /// # use solana_client::{ + /// # rpc_client::RpcClient, + /// # client_error::ClientError, + /// # }; + /// # let rpc_client = RpcClient::new_mock("succeeds".to_string()); + /// let slot = rpc_client.get_snapshot_slot()?; + /// # Ok::<(), ClientError>(()) + /// ``` pub fn get_snapshot_slot(&self) -> ClientResult { self.send(RpcRequest::GetSnapshotSlot, Value::Null) } + /// # Examples + /// + /// ``` + /// # use solana_client::{ + /// # rpc_client::RpcClient, + /// # client_error::ClientError, + /// # }; + /// # use solana_sdk::{ + /// # signature::Signer, + /// # signature::Signature, + /// # signer::keypair::Keypair, + /// # hash::Hash, + /// # system_transaction, + /// # }; + /// # let rpc_client = RpcClient::new_mock("succeeds".to_string()); + /// # let alice = Keypair::new(); + /// # let bob = Keypair::new(); + /// # let lamports = 50; + /// # let (recent_blockhash, _) = rpc_client.get_recent_blockhash()?; + /// # let tx = system_transaction::transfer(&alice, &bob.pubkey(), lamports, recent_blockhash); + /// let signature = rpc_client.send_transaction(&tx)?; + /// let status = rpc_client.get_signature_status(&signature)?; + /// # Ok::<(), ClientError>(()) + /// ``` pub fn get_signature_status( &self, signature: &Signature, ) -> ClientResult>> { - self.get_signature_status_with_commitment(signature, self.commitment_config) - } - + self.get_signature_status_with_commitment(signature, self.commitment()) + } + + /// Gets the statuses of a list of transaction signatures. + /// + /// The returned vector of [`TransactionStatus`] has the same length as the + /// input slice. + /// + /// For any transaction that has not been processed by the network, the + /// value of the corresponding entry in the returned vector is `None`. As a + /// result, a transaction that has recently been submitted will not have a + /// status immediately. + /// + /// To submit a transaction and wait for it to confirm, use + /// [`send_and_confirm_transaction`][RpcClient::send_and_confirm_transaction]. + /// + /// This function ignores the configured confirmation level, and returns the + /// transaction status whatever it is. It does not wait for transactions to + /// be processed. + /// + /// This function only searches a node's recent history, including all + /// recent slots, plus up to + /// [`MAX_RECENT_BLOCKHASHES`][solana_sdk::clock::MAX_RECENT_BLOCKHASHES] + /// rooted slots. To search the full transaction history use the + /// [`get_signature_statuses_with_history`][RpcClient::get_signature_statuses_with_history] + /// method. + /// + /// # Errors + /// + /// Any individual `TransactionStatus` may have triggered an error during + /// processing, in which case its [`err`][`TransactionStatus::err`] field + /// will be `Some`. + /// + /// # RPC Reference + /// + /// This method corresponds directly to the [`getSignatureStatuses`] RPC method. + /// + /// [`getSignatureStatuses`]: https://docs.solana.com/developing/clients/jsonrpc-api#getsignaturestatuses + /// + /// # Examples + /// + /// ``` + /// # use solana_client::{ + /// # rpc_client::RpcClient, + /// # client_error::ClientError, + /// # }; + /// # use solana_sdk::{ + /// # signature::Signer, + /// # signature::Signature, + /// # signer::keypair::Keypair, + /// # hash::Hash, + /// # system_transaction, + /// # }; + /// # use std::time::Duration; + /// # let rpc_client = RpcClient::new_mock("succeeds".to_string()); + /// # let alice = Keypair::new(); + /// // Send lamports from Alice to Bob and wait for the transaction to be processed + /// # let bob = Keypair::new(); + /// # let lamports = 50; + /// let (recent_blockhash, _) = rpc_client.get_recent_blockhash()?; + /// let tx = system_transaction::transfer(&alice, &bob.pubkey(), lamports, recent_blockhash); + /// let signature = rpc_client.send_transaction(&tx)?; + /// + /// let status = loop { + /// let statuses = rpc_client.get_signature_statuses(&[signature])?.value; + /// if let Some(status) = statuses[0].clone() { + /// break status; + /// } + /// std::thread::sleep(Duration::from_millis(100)); + /// }; + /// + /// assert!(status.err.is_none()); + /// # Ok::<(), ClientError>(()) + /// ``` pub fn get_signature_statuses( &self, signatures: &[Signature], @@ -366,6 +991,67 @@ impl RpcClient { self.send(RpcRequest::GetSignatureStatuses, json!([signatures])) } + /// Gets the statuses of a list of transaction signatures. + /// + /// The returned vector of [`TransactionStatus`] has the same length as the + /// input slice. + /// + /// For any transaction that has not been processed by the network, the + /// value of the corresponding entry in the returned vector is `None`. As a + /// result, a transaction that has recently been submitted will not have a + /// status immediately. + /// + /// To submit a transaction and wait for it to confirm, use + /// [`send_and_confirm_transaction`][RpcClient::send_and_confirm_transaction]. + /// + /// This function ignores the configured confirmation level, and returns the + /// transaction status whatever it is. It does not wait for transactions to + /// be processed. + /// + /// This function searches a node's full ledger history and (if implemented) long-term storage. To search for + /// transactions in recent slots only use the + /// [`get_signature_statuses`][RpcClient::get_signature_statuses] method. + /// + /// # Errors + /// + /// Any individual `TransactionStatus` may have triggered an error during + /// processing, in which case its [`err`][`TransactionStatus::err`] field + /// will be `Some`. + /// + /// # RPC Reference + /// + /// This method corresponds directly to the [`getSignatureStatuses`] RPC + /// method, with the `searchTransactionHistory` configuration option set to + /// `true`. + /// + /// [`getSignatureStatuses`]: https://docs.solana.com/developing/clients/jsonrpc-api#getsignaturestatuses + /// + /// # Examples + /// + /// ``` + /// # use solana_client::{ + /// # rpc_client::RpcClient, + /// # client_error::ClientError, + /// # }; + /// # use solana_sdk::{ + /// # signature::Signer, + /// # signature::Signature, + /// # signer::keypair::Keypair, + /// # hash::Hash, + /// # system_transaction, + /// # }; + /// # let rpc_client = RpcClient::new_mock("succeeds".to_string()); + /// # let alice = Keypair::new(); + /// # fn get_old_transaction_signature() -> Signature { Signature::default() } + /// // Check if an old transaction exists + /// let signature = get_old_transaction_signature(); + /// let (recent_blockhash, _) = rpc_client.get_recent_blockhash()?; + /// let statuses = rpc_client.get_signature_statuses_with_history(&[signature])?.value; + /// if statuses[0].is_none() { + /// println!("old transaction does not exist"); + /// } + /// # Ok::<(), ClientError>(()) + /// ``` pub fn get_signature_statuses_with_history( &self, signatures: &[Signature], @@ -379,6 +1065,35 @@ impl RpcClient { ) } + /// # Examples + /// + /// ``` + /// # use solana_client::{ + /// # rpc_client::RpcClient, + /// # client_error::ClientError, + /// # }; + /// # use solana_sdk::{ + /// # commitment_config::CommitmentConfig, + /// # signature::Signer, + /// # signature::Signature, + /// # signer::keypair::Keypair, + /// # hash::Hash, + /// # system_transaction, + /// # }; + /// # let rpc_client = RpcClient::new_mock("succeeds".to_string()); + /// # let alice = Keypair::new(); + /// # let bob = Keypair::new(); + /// # let lamports = 50; + /// # let (recent_blockhash, _) = rpc_client.get_recent_blockhash()?; + /// # let tx = system_transaction::transfer(&alice, &bob.pubkey(), lamports, recent_blockhash); + /// let signature = rpc_client.send_transaction(&tx)?; + /// let commitment_config = CommitmentConfig::processed(); + /// let status = rpc_client.get_signature_status_with_commitment( + /// &signature, + /// commitment_config, + /// )?; + /// # Ok::<(), ClientError>(()) + /// ``` pub fn get_signature_status_with_commitment( &self, signature: &Signature, @@ -394,6 +1109,37 @@ impl RpcClient { .map(|status_meta| status_meta.status)) } + /// # Examples + /// + /// ``` + /// # use solana_client::{ + /// # rpc_client::RpcClient, + /// # client_error::ClientError, + /// # }; + /// # use solana_sdk::{ + /// # commitment_config::CommitmentConfig, + /// # signature::Signer, + /// # signature::Signature, + /// # signer::keypair::Keypair, + /// # hash::Hash, + /// # system_transaction, + /// # }; + /// # let rpc_client = RpcClient::new_mock("succeeds".to_string()); + /// # let alice = Keypair::new(); + /// # let bob = Keypair::new(); + /// # let lamports = 50; + /// # let (recent_blockhash, _) = rpc_client.get_recent_blockhash()?; + /// # let tx = system_transaction::transfer(&alice, &bob.pubkey(), lamports, recent_blockhash); + /// let signature = rpc_client.send_transaction(&tx)?; + /// let commitment_config = CommitmentConfig::processed(); + /// let search_transaction_history = true; + /// let status = rpc_client.get_signature_status_with_commitment_and_history( + /// &signature, + /// commitment_config, + /// search_transaction_history, + /// )?; + /// # Ok::<(), ClientError>(()) + /// ``` pub fn get_signature_status_with_commitment_and_history( &self, signature: &Signature, @@ -412,10 +1158,34 @@ impl RpcClient { .map(|status_meta| status_meta.status)) } + /// # Examples + /// + /// ``` + /// # use solana_client::{ + /// # rpc_client::RpcClient, + /// # client_error::ClientError, + /// # }; + /// # let rpc_client = RpcClient::new_mock("succeeds".to_string()); + /// let slot = rpc_client.get_slot()?; + /// # Ok::<(), ClientError>(()) + /// ``` pub fn get_slot(&self) -> ClientResult { - self.get_slot_with_commitment(self.commitment_config) - } - + self.get_slot_with_commitment(self.commitment()) + } + + /// # Examples + /// + /// ``` + /// # use solana_client::{ + /// # rpc_client::RpcClient, + /// # client_error::ClientError, + /// # }; + /// # use solana_sdk::commitment_config::CommitmentConfig; + /// # let rpc_client = RpcClient::new_mock("succeeds".to_string()); + /// let commitment_config = CommitmentConfig::processed(); + /// let slot = rpc_client.get_slot_with_commitment(commitment_config)?; + /// # Ok::<(), ClientError>(()) + /// ``` pub fn get_slot_with_commitment( &self, commitment_config: CommitmentConfig, @@ -426,10 +1196,36 @@ impl RpcClient { ) } + /// # Examples + /// + /// ``` + /// # use solana_client::{ + /// # rpc_client::RpcClient, + /// # client_error::ClientError, + /// # }; + /// # let rpc_client = RpcClient::new_mock("succeeds".to_string()); + /// let block_height = rpc_client.get_block_height()?; + /// # Ok::<(), ClientError>(()) + /// ``` pub fn get_block_height(&self) -> ClientResult { - self.get_block_height_with_commitment(self.commitment_config) - } - + self.get_block_height_with_commitment(self.commitment()) + } + + /// # Examples + /// + /// ``` + /// # use solana_client::{ + /// # rpc_client::RpcClient, + /// # client_error::ClientError, + /// # }; + /// # use solana_sdk::commitment_config::CommitmentConfig; + /// # let rpc_client = RpcClient::new_mock("succeeds".to_string()); + /// let commitment_config = CommitmentConfig::processed(); + /// let block_height = rpc_client.get_block_height_with_commitment( + /// commitment_config, + /// )?; + /// # Ok::<(), ClientError>(()) + /// ``` pub fn get_block_height_with_commitment( &self, commitment_config: CommitmentConfig, @@ -440,6 +1236,20 @@ impl RpcClient { ) } + /// # Examples + /// + /// ``` + /// # use solana_client::{ + /// # rpc_client::RpcClient, + /// # client_error::ClientError, + /// # }; + /// # use solana_sdk::slot_history::Slot; + /// # let rpc_client = RpcClient::new_mock("succeeds".to_string()); + /// let start_slot = 1; + /// let limit = 3; + /// let leaders = rpc_client.get_slot_leaders(start_slot, limit)?; + /// # Ok::<(), ClientError>(()) + /// ``` pub fn get_slot_leaders(&self, start_slot: Slot, limit: u64) -> ClientResult> { self.send(RpcRequest::GetSlotLeaders, json!([start_slot, limit])) .and_then(|slot_leaders: Vec| { @@ -458,18 +1268,84 @@ impl RpcClient { }) } - /// Get block production for the current epoch + /// Get block production for the current epoch. + /// + /// # Examples + /// + /// ``` + /// # use solana_client::{ + /// # rpc_client::RpcClient, + /// # client_error::ClientError, + /// # }; + /// # let rpc_client = RpcClient::new_mock("succeeds".to_string()); + /// let production = rpc_client.get_block_production()?; + /// # Ok::<(), ClientError>(()) + /// ``` pub fn get_block_production(&self) -> RpcResult { self.send(RpcRequest::GetBlockProduction, Value::Null) } + /// # Examples + /// + /// ``` + /// # use solana_client::{ + /// # rpc_client::RpcClient, + /// # client_error::ClientError, + /// # rpc_config::RpcBlockProductionConfig, + /// # rpc_config::RpcBlockProductionConfigRange, + /// # }; + /// # use solana_sdk::{ + /// # signature::Signer, + /// # signer::keypair::Keypair, + /// # commitment_config::CommitmentConfig, + /// # }; + /// # let rpc_client = RpcClient::new_mock("succeeds".to_string()); + /// # let start_slot = 1; + /// # let limit = 3; + /// let leader = rpc_client.get_slot_leaders(start_slot, limit)?; + /// let leader = leader[0]; + /// let range = RpcBlockProductionConfigRange { + /// first_slot: 0, + /// last_slot: Some(0), + /// }; + /// let config = RpcBlockProductionConfig { + /// identity: Some(leader.to_string()), + /// range: Some(range), + /// commitment: Some(CommitmentConfig::processed()), + /// }; + /// let production = rpc_client.get_block_production_with_config( + /// config + /// )?; + /// # Ok::<(), ClientError>(()) + /// ``` pub fn get_block_production_with_config( &self, config: RpcBlockProductionConfig, ) -> RpcResult { - self.send(RpcRequest::GetBlockProduction, json!(config)) - } - + self.send(RpcRequest::GetBlockProduction, json!([config])) + } + + /// # Examples + /// + /// ``` + /// # use solana_client::{ + /// # rpc_client::RpcClient, + /// # client_error::ClientError, + /// # }; + /// # use solana_sdk::{ + /// # signer::keypair::Keypair, + /// # signature::Signer, + /// # }; + /// # let rpc_client = RpcClient::new_mock("succeeds".to_string()); + /// # let stake_account_keypair = Keypair::new(); + /// let stake_account = stake_account_keypair.pubkey(); + /// let epoch = rpc_client.get_epoch_info()?; + /// let activation = rpc_client.get_stake_activation( + /// stake_account, + /// Some(epoch.epoch), + /// )?; + /// # Ok::<(), ClientError>(()) + /// ``` pub fn get_stake_activation( &self, stake_account: Pubkey, @@ -481,16 +1357,42 @@ impl RpcClient { stake_account.to_string(), RpcEpochConfig { epoch, - commitment: Some(self.commitment_config), + commitment: Some(self.commitment()), } ]), ) } + /// # Examples + /// + /// ``` + /// # use solana_client::{ + /// # rpc_client::RpcClient, + /// # client_error::ClientError, + /// # }; + /// # let rpc_client = RpcClient::new_mock("succeeds".to_string()); + /// let supply = rpc_client.supply()?; + /// # Ok::<(), ClientError>(()) + /// ``` pub fn supply(&self) -> RpcResult { - self.supply_with_commitment(self.commitment_config) - } - + self.supply_with_commitment(self.commitment()) + } + + /// # Examples + /// + /// ``` + /// # use solana_client::{ + /// # rpc_client::RpcClient, + /// # client_error::ClientError, + /// # }; + /// # use solana_sdk::commitment_config::CommitmentConfig; + /// # let rpc_client = RpcClient::new_mock("succeeds".to_string()); + /// let commitment_config = CommitmentConfig::processed(); + /// let supply = rpc_client.supply_with_commitment( + /// commitment_config, + /// )?; + /// # Ok::<(), ClientError>(()) + /// ``` pub fn supply_with_commitment( &self, commitment_config: CommitmentConfig, @@ -501,6 +1403,27 @@ impl RpcClient { ) } + /// # Examples + /// + /// ``` + /// # use solana_client::{ + /// # rpc_client::RpcClient, + /// # client_error::ClientError, + /// # rpc_config::RpcLargestAccountsConfig, + /// # rpc_config::RpcLargestAccountsFilter, + /// # }; + /// # use solana_sdk::commitment_config::CommitmentConfig; + /// # let rpc_client = RpcClient::new_mock("succeeds".to_string()); + /// let commitment_config = CommitmentConfig::processed(); + /// let config = RpcLargestAccountsConfig { + /// commitment: Some(commitment_config), + /// filter: Some(RpcLargestAccountsFilter::Circulating), + /// }; + /// let accounts = rpc_client.get_largest_accounts_with_config( + /// config, + /// )?; + /// # Ok::<(), ClientError>(()) + /// ``` pub fn get_largest_accounts_with_config( &self, config: RpcLargestAccountsConfig, @@ -514,10 +1437,36 @@ impl RpcClient { self.send(RpcRequest::GetLargestAccounts, json!([config])) } + /// # Examples + /// + /// ``` + /// # use solana_client::{ + /// # rpc_client::RpcClient, + /// # client_error::ClientError, + /// # }; + /// # let rpc_client = RpcClient::new_mock("succeeds".to_string()); + /// let accounts = rpc_client.get_vote_accounts()?; + /// # Ok::<(), ClientError>(()) + /// ``` pub fn get_vote_accounts(&self) -> ClientResult { - self.get_vote_accounts_with_commitment(self.commitment_config) - } - + self.get_vote_accounts_with_commitment(self.commitment()) + } + + /// # Examples + /// + /// ``` + /// # use solana_sdk::commitment_config::CommitmentConfig; + /// # use solana_client::{ + /// # rpc_client::RpcClient, + /// # client_error::ClientError, + /// # }; + /// # let rpc_client = RpcClient::new_mock("succeeds".to_string()); + /// let commitment_config = CommitmentConfig::processed(); + /// let accounts = rpc_client.get_vote_accounts_with_commitment( + /// commitment_config, + /// )?; + /// # Ok::<(), ClientError>(()) + /// ``` pub fn get_vote_accounts_with_commitment( &self, commitment_config: CommitmentConfig, @@ -528,6 +1477,34 @@ impl RpcClient { }) } + /// # Examples + /// + /// ``` + /// # use solana_client::{ + /// # rpc_client::RpcClient, + /// # client_error::ClientError, + /// # rpc_config::RpcGetVoteAccountsConfig, + /// # }; + /// # use solana_sdk::{ + /// # signer::keypair::Keypair, + /// # signature::Signer, + /// # commitment_config::CommitmentConfig, + /// # }; + /// # let rpc_client = RpcClient::new_mock("succeeds".to_string()); + /// # let vote_keypair = Keypair::new(); + /// let vote_pubkey = vote_keypair.pubkey(); + /// let commitment = CommitmentConfig::processed(); + /// let config = RpcGetVoteAccountsConfig { + /// vote_pubkey: Some(vote_pubkey.to_string()), + /// commitment: Some(commitment), + /// keep_unstaked_delinquents: Some(true), + /// delinquent_slot_distance: Some(10), + /// }; + /// let accounts = rpc_client.get_vote_accounts_with_config( + /// config, + /// )?; + /// # Ok::<(), ClientError>(()) + /// ``` pub fn get_vote_accounts_with_config( &self, config: RpcGetVoteAccountsConfig, @@ -892,7 +1869,7 @@ impl RpcClient { } pub fn get_epoch_info(&self) -> ClientResult { - self.get_epoch_info_with_commitment(self.commitment_config) + self.get_epoch_info_with_commitment(self.commitment()) } pub fn get_epoch_info_with_commitment( @@ -909,7 +1886,7 @@ impl RpcClient { &self, slot: Option, ) -> ClientResult> { - self.get_leader_schedule_with_commitment(slot, self.commitment_config) + self.get_leader_schedule_with_commitment(slot, self.commitment()) } pub fn get_leader_schedule_with_commitment( @@ -979,7 +1956,7 @@ impl RpcClient { addresses, RpcEpochConfig { epoch, - commitment: Some(self.commitment_config), + commitment: Some(self.commitment()), } ]), ) @@ -997,55 +1974,61 @@ impl RpcClient { &self, transaction: &Transaction, ) -> ClientResult { - let signature = self.send_transaction(transaction)?; - let recent_blockhash = if uses_durable_nonce(transaction).is_some() { - self.get_recent_blockhash_with_commitment(CommitmentConfig::processed())? - .value - .0 - } else { - transaction.message.recent_blockhash - }; - let status = loop { - let status = self.get_signature_status(&signature)?; - if status.is_none() { - if self - .get_fee_calculator_for_blockhash_with_commitment( - &recent_blockhash, - CommitmentConfig::processed(), - )? - .value - .is_none() - { - break status; - } + const SEND_RETRIES: usize = 1; + const GET_STATUS_RETRIES: usize = usize::MAX; + + 'sending: for _ in 0..SEND_RETRIES { + let signature = self.send_transaction(transaction)?; + + let recent_blockhash = if uses_durable_nonce(transaction).is_some() { + let (recent_blockhash, ..) = self + .get_recent_blockhash_with_commitment(CommitmentConfig::processed())? + .value; + recent_blockhash } else { - break status; - } - if cfg!(not(test)) { - // Retry twice a second - sleep(Duration::from_millis(500)); - } - }; - if let Some(result) = status { - match result { - Ok(_) => Ok(signature), - Err(err) => Err(err.into()), + transaction.message.recent_blockhash + }; + + for status_retry in 0..GET_STATUS_RETRIES { + match self.get_signature_status(&signature)? { + Some(Ok(_)) => return Ok(signature), + Some(Err(e)) => return Err(e.into()), + None => { + let fee_calculator = self + .get_fee_calculator_for_blockhash_with_commitment( + &recent_blockhash, + CommitmentConfig::processed(), + )? + .value; + if fee_calculator.is_none() { + // Block hash is not found by some reason + break 'sending; + } else if cfg!(not(test)) + // Ignore sleep at last step. + && status_retry < GET_STATUS_RETRIES + { + // Retry twice a second + sleep(Duration::from_millis(500)); + continue; + } + } + } } - } else { - Err(RpcError::ForUser( - "unable to confirm transaction. \ - This can happen in situations such as transaction expiration \ - and insufficient fee-payer funds" - .to_string(), - ) - .into()) } + + Err(RpcError::ForUser( + "unable to confirm transaction. \ + This can happen in situations such as transaction expiration \ + and insufficient fee-payer funds" + .to_string(), + ) + .into()) } /// Note that `get_account` returns `Err(..)` if the account does not exist whereas /// `get_account_with_commitment` returns `Ok(None)` if the account does not exist. pub fn get_account(&self, pubkey: &Pubkey) -> ClientResult { - self.get_account_with_commitment(pubkey, self.commitment_config)? + self.get_account_with_commitment(pubkey, self.commitment())? .value .ok_or_else(|| RpcError::ForUser(format!("AccountNotFound: pubkey={}", pubkey)).into()) } @@ -1101,7 +2084,7 @@ impl RpcClient { pub fn get_multiple_accounts(&self, pubkeys: &[Pubkey]) -> ClientResult>> { Ok(self - .get_multiple_accounts_with_commitment(pubkeys, self.commitment_config)? + .get_multiple_accounts_with_commitment(pubkeys, self.commitment())? .value) } @@ -1155,7 +2138,7 @@ impl RpcClient { /// Request the balance of the account `pubkey`. pub fn get_balance(&self, pubkey: &Pubkey) -> ClientResult { Ok(self - .get_balance_with_commitment(pubkey, self.commitment_config)? + .get_balance_with_commitment(pubkey, self.commitment())? .value) } @@ -1213,7 +2196,7 @@ impl RpcClient { /// Request the transaction count. pub fn get_transaction_count(&self) -> ClientResult { - self.get_transaction_count_with_commitment(self.commitment_config) + self.get_transaction_count_with_commitment(self.commitment()) } pub fn get_transaction_count_with_commitment( @@ -1226,9 +2209,37 @@ impl RpcClient { ) } + pub fn get_fees(&self) -> ClientResult { + Ok(self.get_fees_with_commitment(self.commitment())?.value) + } + + pub fn get_fees_with_commitment(&self, commitment_config: CommitmentConfig) -> RpcResult { + let Response { + context, + value: fees, + } = self.send::>( + RpcRequest::GetFees, + json!([self.maybe_map_commitment(commitment_config)?]), + )?; + let blockhash = fees.blockhash.parse().map_err(|_| { + ClientError::new_with_request( + RpcError::ParseError("Hash".to_string()).into(), + RpcRequest::GetFees, + ) + })?; + Ok(Response { + context, + value: Fees { + blockhash, + fee_calculator: fees.fee_calculator, + last_valid_block_height: fees.last_valid_block_height, + }, + }) + } + pub fn get_recent_blockhash(&self) -> ClientResult<(Hash, FeeCalculator)> { let (blockhash, fee_calculator, _last_valid_slot) = self - .get_recent_blockhash_with_commitment(self.commitment_config)? + .get_recent_blockhash_with_commitment(self.commitment())? .value; Ok((blockhash, fee_calculator)) } @@ -1301,7 +2312,7 @@ impl RpcClient { blockhash: &Hash, ) -> ClientResult> { Ok(self - .get_fee_calculator_for_blockhash_with_commitment(blockhash, self.commitment_config)? + .get_fee_calculator_for_blockhash_with_commitment(blockhash, self.commitment())? .value) } @@ -1383,7 +2394,7 @@ impl RpcClient { pub fn get_token_account(&self, pubkey: &Pubkey) -> ClientResult> { Ok(self - .get_token_account_with_commitment(pubkey, self.commitment_config)? + .get_token_account_with_commitment(pubkey, self.commitment())? .value) } @@ -1444,7 +2455,7 @@ impl RpcClient { pub fn get_token_account_balance(&self, pubkey: &Pubkey) -> ClientResult { Ok(self - .get_token_account_balance_with_commitment(pubkey, self.commitment_config)? + .get_token_account_balance_with_commitment(pubkey, self.commitment())? .value) } @@ -1471,7 +2482,7 @@ impl RpcClient { .get_token_accounts_by_delegate_with_commitment( delegate, token_account_filter, - self.commitment_config, + self.commitment(), )? .value) } @@ -1510,7 +2521,7 @@ impl RpcClient { .get_token_accounts_by_owner_with_commitment( owner, token_account_filter, - self.commitment_config, + self.commitment(), )? .value) } @@ -1542,7 +2553,7 @@ impl RpcClient { pub fn get_token_supply(&self, mint: &Pubkey) -> ClientResult { Ok(self - .get_token_supply_with_commitment(mint, self.commitment_config)? + .get_token_supply_with_commitment(mint, self.commitment())? .value) } @@ -1565,7 +2576,7 @@ impl RpcClient { pubkey, lamports, RpcRequestAirdropConfig { - commitment: Some(self.commitment_config), + commitment: Some(self.commitment()), ..RpcRequestAirdropConfig::default() }, ) @@ -1581,7 +2592,7 @@ impl RpcClient { pubkey, lamports, RpcRequestAirdropConfig { - commitment: Some(self.commitment_config), + commitment: Some(self.commitment()), recent_blockhash: Some(recent_blockhash.to_string()), }, ) @@ -1627,7 +2638,7 @@ impl RpcClient { ) -> ClientResult { let now = Instant::now(); loop { - match self.get_balance_with_commitment(&pubkey, commitment_config) { + match self.get_balance_with_commitment(pubkey, commitment_config) { Ok(bal) => { return Ok(bal.value); } @@ -1684,7 +2695,7 @@ impl RpcClient { /// Poll the server to confirm a transaction. pub fn poll_for_signature(&self, signature: &Signature) -> ClientResult<()> { - self.poll_for_signature_with_commitment(signature, self.commitment_config) + self.poll_for_signature_with_commitment(signature, self.commitment()) } /// Poll the server to confirm a transaction. @@ -1696,7 +2707,7 @@ impl RpcClient { let now = Instant::now(); loop { if let Ok(Some(_)) = - self.get_signature_status_with_commitment(&signature, commitment_config) + self.get_signature_status_with_commitment(signature, commitment_config) { break; } @@ -1794,7 +2805,7 @@ impl RpcClient { ) -> ClientResult { self.send_and_confirm_transaction_with_spinner_and_commitment( transaction, - self.commitment_config, + self.commitment(), ) } @@ -1850,19 +2861,25 @@ impl RpcClient { "[{}/{}] Finalizing transaction {}", confirmations, desired_confirmations, signature, )); + + let now = Instant::now(); + let confirm_transaction_initial_timeout = self + .config + .confirm_transaction_initial_timeout + .unwrap_or_default(); let (signature, status) = loop { // Get recent commitment in order to count confirmations for successful transactions let status = self - .get_signature_status_with_commitment(&signature, CommitmentConfig::processed())?; + .get_signature_status_with_commitment(signature, CommitmentConfig::processed())?; if status.is_none() { - if self + let blockhash_not_found = self .get_fee_calculator_for_blockhash_with_commitment( - &recent_blockhash, + recent_blockhash, CommitmentConfig::processed(), )? .value - .is_none() - { + .is_none(); + if blockhash_not_found && now.elapsed() >= confirm_transaction_initial_timeout { break (signature, status); } } else { @@ -1891,7 +2908,7 @@ impl RpcClient { // Return when specified commitment is reached // Failed transactions have already been eliminated, `is_some` check is sufficient if self - .get_signature_status_with_commitment(&signature, commitment)? + .get_signature_status_with_commitment(signature, commitment)? .is_some() { progress_bar.set_message("Transaction confirmed"); @@ -1907,7 +2924,7 @@ impl RpcClient { )); sleep(Duration::from_millis(500)); confirmations = self - .get_num_blocks_since_signature_confirmation(&signature) + .get_num_blocks_since_signature_confirmation(signature) .unwrap_or(confirmations); if now.elapsed().as_secs() >= MAX_HASH_AGE_IN_SECONDS as u64 { return Err( @@ -1924,6 +2941,7 @@ impl RpcClient { T: serde::de::DeserializeOwned, { assert!(params.is_array() || params.is_null()); + let response = self .sender .send(request, params) @@ -1933,6 +2951,26 @@ impl RpcClient { } } +fn serialize_encode_transaction( + transaction: &Transaction, + encoding: UiTransactionEncoding, +) -> ClientResult { + let serialized = serialize(transaction) + .map_err(|e| ClientErrorKind::Custom(format!("transaction serialization failed: {}", e)))?; + let encoded = match encoding { + UiTransactionEncoding::Base58 => bs58::encode(serialized).into_string(), + UiTransactionEncoding::Base64 => base64::encode(serialized), + _ => { + return Err(ClientErrorKind::Custom(format!( + "unsupported transaction encoding: {}. Supported encodings: base58, base64", + encoding + )) + .into()) + } + }; + Ok(encoded) +} + #[derive(Debug, Default)] pub struct GetConfirmedSignaturesForAddress2Config { pub before: Option, @@ -1991,7 +3029,9 @@ mod tests { use jsonrpc_http_server::{AccessControlAllowOrigin, DomainsValidation, ServerBuilder}; use serde_json::Number; use solana_sdk::{ - instruction::InstructionError, signature::Keypair, system_transaction, + instruction::InstructionError, + signature::{Keypair, Signer}, + system_transaction, transaction::TransactionError, }; use std::{io, sync::mpsc::channel, thread}; @@ -2087,6 +3127,7 @@ mod tests { let signature = rpc_client.send_transaction(&tx); assert!(signature.is_err()); } + #[test] fn test_get_recent_blockhash() { let rpc_client = RpcClient::new_mock("succeeds".to_string()); @@ -2101,6 +3142,20 @@ mod tests { assert!(rpc_client.get_recent_blockhash().is_err()); } + #[test] + fn test_custom_request() { + let rpc_client = RpcClient::new_mock("succeeds".to_string()); + + let slot = rpc_client.get_slot().unwrap(); + assert_eq!(slot, 0); + + let custom_slot = rpc_client + .send::(RpcRequest::Custom { method: "getSlot" }, Value::Null) + .unwrap(); + + assert_eq!(slot, custom_slot); + } + #[test] fn test_get_signature_status() { let signature = Signature::default(); @@ -2155,4 +3210,23 @@ mod tests { let rpc_client = RpcClient::new_mock("succeeds".to_string()); thread::spawn(move || rpc_client); } + + // Regression test that the get_block_production_with_config + // method internally creates the json params array correctly. + #[test] + fn get_block_production_with_config_no_error() -> ClientResult<()> { + let rpc_client = RpcClient::new_mock("succeeds".to_string()); + + let config = RpcBlockProductionConfig { + identity: Some(Keypair::new().pubkey().to_string()), + range: None, + commitment: None, + }; + + let prod = rpc_client.get_block_production_with_config(config)?.value; + + assert!(!prod.by_identity.is_empty()); + + Ok(()) + } } diff --git a/client/src/rpc_config.rs b/client/src/rpc_config.rs index 5597384e3081dc..cd9e2fa11dc4d0 100644 --- a/client/src/rpc_config.rs +++ b/client/src/rpc_config.rs @@ -81,6 +81,8 @@ pub struct RpcGetVoteAccountsConfig { pub vote_pubkey: Option, // validator vote address, as a base-58 encoded string #[serde(flatten)] pub commitment: Option, + pub keep_unstaked_delinquents: Option, + pub delinquent_slot_distance: Option, } #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] @@ -114,6 +116,15 @@ pub struct RpcLargestAccountsConfig { pub filter: Option, } +#[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct RpcSupplyConfig { + #[serde(flatten)] + pub commitment: Option, + #[serde(default)] + pub exclude_non_circulating_accounts_list: bool, +} + #[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct RpcEpochConfig { diff --git a/client/src/rpc_custom_error.rs b/client/src/rpc_custom_error.rs index 400947dcd6e828..82487e3a986e6f 100644 --- a/client/src/rpc_custom_error.rs +++ b/client/src/rpc_custom_error.rs @@ -1,5 +1,5 @@ //! Implementation defined RPC server errors - +use thiserror::Error; use { crate::rpc_response::RpcSimulateTransactionResult, jsonrpc_core::{Error, ErrorCode}, @@ -17,35 +17,43 @@ pub const JSON_RPC_SERVER_ERROR_NO_SNAPSHOT: i64 = -32008; pub const JSON_RPC_SERVER_ERROR_LONG_TERM_STORAGE_SLOT_SKIPPED: i64 = -32009; pub const JSON_RPC_SERVER_ERROR_KEY_EXCLUDED_FROM_SECONDARY_INDEX: i64 = -32010; pub const JSON_RPC_SERVER_ERROR_TRANSACTION_HISTORY_NOT_AVAILABLE: i64 = -32011; +pub const JSON_RPC_SCAN_ERROR: i64 = -32012; +pub const JSON_RPC_SERVER_ERROR_TRANSACTION_SIGNATURE_LEN_MISMATCH: i64 = -32013; +#[derive(Error, Debug)] pub enum RpcCustomError { + #[error("BlockCleanedUp")] BlockCleanedUp { slot: Slot, first_available_block: Slot, }, + #[error("SendTransactionPreflightFailure")] SendTransactionPreflightFailure { message: String, result: RpcSimulateTransactionResult, }, + #[error("TransactionSignatureVerificationFailure")] TransactionSignatureVerificationFailure, - BlockNotAvailable { - slot: Slot, - }, - NodeUnhealthy { - num_slots_behind: Option, - }, + #[error("BlockNotAvailable")] + BlockNotAvailable { slot: Slot }, + #[error("NodeUnhealthy")] + NodeUnhealthy { num_slots_behind: Option }, + #[error("TransactionPrecompileVerificationFailure")] TransactionPrecompileVerificationFailure(solana_sdk::transaction::TransactionError), - SlotSkipped { - slot: Slot, - }, + #[error("SlotSkipped")] + SlotSkipped { slot: Slot }, + #[error("NoSnapshot")] NoSnapshot, - LongTermStorageSlotSkipped { - slot: Slot, - }, - KeyExcludedFromSecondaryIndex { - index_key: String, - }, + #[error("LongTermStorageSlotSkipped")] + LongTermStorageSlotSkipped { slot: Slot }, + #[error("KeyExcludedFromSecondaryIndex")] + KeyExcludedFromSecondaryIndex { index_key: String }, + #[error("TransactionHistoryNotAvailable")] TransactionHistoryNotAvailable, + #[error("ScanError")] + ScanError { message: String }, + #[error("TransactionSignatureLenMismatch")] + TransactionSignatureLenMismatch, } #[derive(Debug, Serialize, Deserialize)] @@ -141,6 +149,18 @@ impl From for Error { message: "Transaction history is not available from this node".to_string(), data: None, }, + RpcCustomError::ScanError { message } => Self { + code: ErrorCode::ServerError(JSON_RPC_SCAN_ERROR), + message, + data: None, + }, + RpcCustomError::TransactionSignatureLenMismatch => Self { + code: ErrorCode::ServerError( + JSON_RPC_SERVER_ERROR_TRANSACTION_SIGNATURE_LEN_MISMATCH, + ), + message: "Transaction signature length mismatch".to_string(), + data: None, + }, } } } diff --git a/client/src/rpc_request.rs b/client/src/rpc_request.rs index 25d76ff28404be..52bae09c161024 100644 --- a/client/src/rpc_request.rs +++ b/client/src/rpc_request.rs @@ -86,6 +86,9 @@ pub enum RpcRequest { SendTransaction, SimulateTransaction, SignVote, + Custom { + method: &'static str, + }, } #[allow(deprecated)] @@ -154,6 +157,7 @@ impl fmt::Display for RpcRequest { RpcRequest::SendTransaction => "sendTransaction", RpcRequest::SimulateTransaction => "simulateTransaction", RpcRequest::SignVote => "signVote", + RpcRequest::Custom { method } => method, }; write!(f, "{}", method) diff --git a/client/src/rpc_response.rs b/client/src/rpc_response.rs index a712739e60d14c..ddbc41ef209a40 100644 --- a/client/src/rpc_response.rs +++ b/client/src/rpc_response.rs @@ -4,6 +4,7 @@ use { solana_sdk::{ clock::{Epoch, Slot, UnixTimestamp}, fee_calculator::{FeeCalculator, FeeRateGovernor}, + hash::Hash, inflation::Inflation, transaction::{Result, TransactionError}, }, @@ -57,6 +58,14 @@ pub struct DeprecatedRpcFees { pub last_valid_slot: Slot, } +#[derive(Serialize, Deserialize, Clone, Debug)] +#[serde(rename_all = "camelCase")] +pub struct Fees { + pub blockhash: Hash, + pub fee_calculator: FeeCalculator, + pub last_valid_block_height: u64, +} + #[derive(Serialize, Deserialize, Clone, Debug)] #[serde(rename_all = "camelCase")] pub struct RpcFeeCalculator { @@ -394,8 +403,9 @@ pub struct RpcPerfSample { pub struct RpcInflationReward { pub epoch: Epoch, pub effective_slot: Slot, - pub amount: u64, // lamports - pub post_balance: u64, // lamports + pub amount: u64, // lamports + pub post_balance: u64, // lamports + pub commission: Option, // Vote account commission when the reward was credited } impl From for RpcConfirmedTransactionStatusWithSignature { diff --git a/client/src/rpc_sender.rs b/client/src/rpc_sender.rs index 6574637b0a2ba7..75e5aab0e0c099 100644 --- a/client/src/rpc_sender.rs +++ b/client/src/rpc_sender.rs @@ -1,5 +1,18 @@ +//! A transport for RPC calls. + use crate::{client_error::Result, rpc_request::RpcRequest}; +/// A transport for RPC calls. +/// +/// `RpcSender` implements the underlying transport of requests to, and +/// responses from, a Solana node, and is used primarily by [`RpcClient`]. +/// +/// It is typically implemented by [`HttpSender`] in production, and +/// [`MockSender`] in unit tests. +/// +/// [`RpcClient`]: crate::rpc_client::RpcClient +/// [`HttpSender`]: crate::http_sender::HttpSender +/// [`MockSender`]: crate::mock_sender::MockSender pub trait RpcSender { fn send(&self, request: RpcRequest, params: serde_json::Value) -> Result; } diff --git a/client/src/thin_client.rs b/client/src/thin_client.rs index 8b3d0830840c6e..3988e8e5d086b3 100644 --- a/client/src/thin_client.rs +++ b/client/src/thin_client.rs @@ -451,7 +451,7 @@ impl SyncClient for ThinClient { ) -> TransportResult>> { let status = self .rpc_client() - .get_signature_status(&signature) + .get_signature_status(signature) .map_err(|err| { io::Error::new( io::ErrorKind::Other, @@ -468,7 +468,7 @@ impl SyncClient for ThinClient { ) -> TransportResult>> { let status = self .rpc_client() - .get_signature_status_with_commitment(&signature, commitment_config) + .get_signature_status_with_commitment(signature, commitment_config) .map_err(|err| { io::Error::new( io::ErrorKind::Other, diff --git a/client/src/tpu_client.rs b/client/src/tpu_client.rs index ae264f9875fe18..01c902af12a009 100644 --- a/client/src/tpu_client.rs +++ b/client/src/tpu_client.rs @@ -121,7 +121,7 @@ struct LeaderTpuCache { impl LeaderTpuCache { fn new(rpc_client: &RpcClient, first_slot: Slot) -> Self { let leaders = Self::fetch_slot_leaders(rpc_client, first_slot).unwrap_or_default(); - let leader_tpu_map = Self::fetch_cluster_tpu_sockets(&rpc_client).unwrap_or_default(); + let leader_tpu_map = Self::fetch_cluster_tpu_sockets(rpc_client).unwrap_or_default(); Self { first_slot, leaders, diff --git a/core/Cargo.toml b/core/Cargo.toml index c75e7cf30baa81..aa6ade53440c1c 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "solana-core" description = "Blockchain, Rebuilt for Scale" -version = "1.7.0" +version = "1.7.11" homepage = "https://solana.com/" documentation = "https://docs.rs/solana-core" readme = "../README.md" @@ -22,17 +22,12 @@ bv = { version = "0.11.1", features = ["serde"] } bs58 = "0.3.1" byteorder = "1.3.4" chrono = { version = "0.4.11", features = ["serde"] } -core_affinity = "0.5.10" crossbeam-channel = "0.4" ed25519-dalek = "=1.0.1" fs_extra = "1.2.0" flate2 = "1.0" indexmap = { version = "1.5", features = ["rayon"] } itertools = "0.9.0" -jsonrpc-core = "17.0.0" -jsonrpc-core-client = { version = "17.0.0", features = ["ipc", "ws"] } -jsonrpc-derive = "17.0.0" -jsonrpc-http-server = "17.0.0" libc = "0.2.81" log = "0.4.11" lru = "0.6.1" @@ -44,54 +39,52 @@ rand_chacha = "0.2.2" rand_core = "0.6.2" raptorq = "1.4.2" rayon = "1.5.0" -regex = "1.3.9" retain_mut = "0.1.2" serde = "1.0.122" serde_bytes = "0.11" serde_derive = "1.0.103" -serde_json = "1.0.56" -solana-account-decoder = { path = "../account-decoder", version = "=1.7.0" } -solana-banks-server = { path = "../banks-server", version = "=1.7.0" } -solana-clap-utils = { path = "../clap-utils", version = "=1.7.0" } -solana-client = { path = "../client", version = "=1.7.0" } -solana-faucet = { path = "../faucet", version = "=1.7.0" } -solana-gossip = { path = "../gossip", version = "=1.7.0" } -solana-ledger = { path = "../ledger", version = "=1.7.0" } -solana-logger = { path = "../logger", version = "=1.7.0" } -solana-merkle-tree = { path = "../merkle-tree", version = "=1.7.0" } -solana-metrics = { path = "../metrics", version = "=1.7.0" } -solana-measure = { path = "../measure", version = "=1.7.0" } -solana-net-utils = { path = "../net-utils", version = "=1.7.0" } -solana-perf = { path = "../perf", version = "=1.7.0" } -solana-program-test = { path = "../program-test", version = "=1.7.0" } -solana-rpc = { path = "../rpc", version = "=1.7.0" } -solana-runtime = { path = "../runtime", version = "=1.7.0" } -solana-sdk = { path = "../sdk", version = "=1.7.0" } -solana-frozen-abi = { path = "../frozen-abi", version = "=1.7.0" } -solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.7.0" } -solana-stake-program = { path = "../programs/stake", version = "=1.7.0" } -solana-storage-bigtable = { path = "../storage-bigtable", version = "=1.7.0" } -solana-streamer = { path = "../streamer", version = "=1.7.0" } -solana-sys-tuner = { path = "../sys-tuner", version = "=1.7.0" } -solana-transaction-status = { path = "../transaction-status", version = "=1.7.0" } -solana-version = { path = "../version", version = "=1.7.0" } -solana-vote-program = { path = "../programs/vote", version = "=1.7.0" } -spl-token-v2-0 = { package = "spl-token", version = "=3.1.0", features = ["no-entrypoint"] } +solana-account-decoder = { path = "../account-decoder", version = "=1.7.11" } +solana-banks-server = { path = "../banks-server", version = "=1.7.11" } +solana-clap-utils = { path = "../clap-utils", version = "=1.7.11" } +solana-client = { path = "../client", version = "=1.7.11" } +solana-gossip = { path = "../gossip", version = "=1.7.11" } +solana-ledger = { path = "../ledger", version = "=1.7.11" } +solana-logger = { path = "../logger", version = "=1.7.11" } +solana-merkle-tree = { path = "../merkle-tree", version = "=1.7.11" } +solana-metrics = { path = "../metrics", version = "=1.7.11" } +solana-measure = { path = "../measure", version = "=1.7.11" } +solana-net-utils = { path = "../net-utils", version = "=1.7.11" } +solana-perf = { path = "../perf", version = "=1.7.11" } +solana-poh = { path = "../poh", version = "=1.7.11" } +solana-program-test = { path = "../program-test", version = "=1.7.11" } +solana-rpc = { path = "../rpc", version = "=1.7.11" } +solana-runtime = { path = "../runtime", version = "=1.7.11" } +solana-sdk = { path = "../sdk", version = "=1.7.11" } +solana-frozen-abi = { path = "../frozen-abi", version = "=1.7.11" } +solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.7.11" } +solana-streamer = { path = "../streamer", version = "=1.7.11" } +solana-transaction-status = { path = "../transaction-status", version = "=1.7.11" } +solana-version = { path = "../version", version = "=1.7.11" } +solana-vote-program = { path = "../programs/vote", version = "=1.7.11" } +spl-token-v2-0 = { package = "spl-token", version = "=3.2.0", features = ["no-entrypoint"] } tempfile = "3.1.0" thiserror = "1.0" -tokio = { version = "1", features = ["full"] } -tokio_02 = { version = "0.2", package = "tokio", features = ["full"] } -tokio-util = { version = "0.3", features = ["codec"] } # This crate needs to stay in sync with tokio_02, until that dependency can be removed -solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.7.0" } +solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.7.11" } trees = "0.2.1" [dev-dependencies] +jsonrpc-core = "17.1.0" +jsonrpc-core-client = { version = "17.1.0", features = ["ipc", "ws"] } matches = "0.1.6" num_cpus = "1.13.0" reqwest = { version = "0.11.2", default-features = false, features = ["blocking", "rustls-tls", "json"] } +serde_json = "1.0.56" serial_test = "0.4.0" +solana-stake-program = { path = "../programs/stake", version = "=1.7.11" } +solana-version = { path = "../version", version = "=1.7.11" } symlink = "0.1.0" systemstat = "0.1.5" +tokio_02 = { version = "0.2", package = "tokio", features = ["full"] } [build-dependencies] rustc_version = "0.2" @@ -111,9 +104,6 @@ name = "gen_keys" [[bench]] name = "sigverify_stage" -[[bench]] -name = "poh" - [[bench]] name = "retransmit_stage" diff --git a/core/benches/banking_stage.rs b/core/benches/banking_stage.rs index 13e2d088c532b9..0d411e554d8624 100644 --- a/core/benches/banking_stage.rs +++ b/core/benches/banking_stage.rs @@ -7,8 +7,7 @@ use crossbeam_channel::unbounded; use log::*; use rand::{thread_rng, Rng}; use rayon::prelude::*; -use solana_core::banking_stage::{create_test_recorder, BankingStage, BankingStageStats}; -use solana_core::poh_recorder::WorkingBankEntry; +use solana_core::banking_stage::{BankingStage, BankingStageStats}; use solana_gossip::cluster_info::ClusterInfo; use solana_gossip::cluster_info::Node; use solana_ledger::blockstore_processor::process_entries; @@ -17,6 +16,7 @@ use solana_ledger::genesis_utils::{create_genesis_config, GenesisConfigInfo}; use solana_ledger::{blockstore::Blockstore, get_tmp_ledger_path}; use solana_perf::packet::to_packets_chunked; use solana_perf::test_tx::test_tx; +use solana_poh::poh_recorder::{create_test_recorder, WorkingBankEntry}; use solana_runtime::bank::Bank; use solana_sdk::genesis_config::GenesisConfig; use solana_sdk::hash::Hash; @@ -29,6 +29,7 @@ use solana_sdk::system_instruction; use solana_sdk::system_transaction; use solana_sdk::timing::{duration_as_us, timestamp}; use solana_sdk::transaction::Transaction; +use solana_streamer::socket::SocketAddrSpace; use std::collections::VecDeque; use std::sync::atomic::Ordering; use std::sync::mpsc::Receiver; @@ -183,7 +184,7 @@ fn bench_banking(bencher: &mut Bencher, tx_type: TransactionType) { }); //sanity check, make sure all the transactions can execute sequentially transactions.iter().for_each(|tx| { - let res = bank.process_transaction(&tx); + let res = bank.process_transaction(tx); assert!(res.is_ok(), "sanity test transactions"); }); bank.clear_signatures(); @@ -201,7 +202,11 @@ fn bench_banking(bencher: &mut Bencher, tx_type: TransactionType) { ); let (exit, poh_recorder, poh_service, signal_receiver) = create_test_recorder(&bank, &blockstore, None); - let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info); + let cluster_info = ClusterInfo::new( + Node::new_localhost().info, + Arc::new(Keypair::new()), + SocketAddrSpace::Unspecified, + ); let cluster_info = Arc::new(cluster_info); let (s, _r) = unbounded(); let _banking_stage = BankingStage::new( diff --git a/core/benches/cluster_info.rs b/core/benches/cluster_info.rs index 54750a249704f1..c3665998487311 100644 --- a/core/benches/cluster_info.rs +++ b/core/benches/cluster_info.rs @@ -3,13 +3,17 @@ extern crate test; use rand::{thread_rng, Rng}; -use solana_core::broadcast_stage::broadcast_metrics::TransmitShredsStats; -use solana_core::broadcast_stage::{broadcast_shreds, get_broadcast_peers}; -use solana_gossip::cluster_info::{ClusterInfo, Node}; -use solana_gossip::contact_info::ContactInfo; +use solana_core::{ + broadcast_stage::{broadcast_metrics::TransmitShredsStats, broadcast_shreds, BroadcastStage}, + cluster_nodes::ClusterNodes, +}; +use solana_gossip::{ + cluster_info::{ClusterInfo, Node}, + contact_info::ContactInfo, +}; use solana_ledger::shred::Shred; -use solana_sdk::pubkey; -use solana_sdk::timing::timestamp; +use solana_sdk::{pubkey, signature::Keypair, timing::timestamp}; +use solana_streamer::socket::SocketAddrSpace; use std::{ collections::HashMap, net::UdpSocket, @@ -22,7 +26,11 @@ fn broadcast_shreds_bench(bencher: &mut Bencher) { solana_logger::setup(); let leader_pubkey = pubkey::new_rand(); let leader_info = Node::new_localhost_with_pubkey(&leader_pubkey); - let cluster_info = ClusterInfo::new_with_invalid_keypair(leader_info.info); + let cluster_info = ClusterInfo::new( + leader_info.info, + Arc::new(Keypair::new()), + SocketAddrSpace::Unspecified, + ); let socket = UdpSocket::bind("0.0.0.0:0").unwrap(); const NUM_SHREDS: usize = 32; @@ -36,7 +44,7 @@ fn broadcast_shreds_bench(bencher: &mut Bencher) { stakes.insert(id, thread_rng().gen_range(1, NUM_PEERS) as u64); } let cluster_info = Arc::new(cluster_info); - let (peers, peers_and_stakes) = get_broadcast_peers(&cluster_info, Some(&stakes)); + let cluster_nodes = ClusterNodes::::new(&cluster_info, &stakes); let shreds = Arc::new(shreds); let last_datapoint = Arc::new(AtomicU64::new(0)); bencher.iter(move || { @@ -44,10 +52,10 @@ fn broadcast_shreds_bench(bencher: &mut Bencher) { broadcast_shreds( &socket, &shreds, - &peers_and_stakes, - &peers, + &cluster_nodes, &last_datapoint, &mut TransmitShredsStats::default(), + &SocketAddrSpace::Unspecified, ) .unwrap(); }); diff --git a/core/benches/consensus.rs b/core/benches/consensus.rs index 64035f4c3af177..280ee08c13e46e 100644 --- a/core/benches/consensus.rs +++ b/core/benches/consensus.rs @@ -24,10 +24,10 @@ fn bench_save_tower(bench: &mut Bencher) { let heaviest_bank = BankForks::new(Bank::default()).working_bank(); let tower = Tower::new( &node_keypair.pubkey(), - &vote_account_pubkey, + vote_account_pubkey, 0, &heaviest_bank, - &path, + path, ); bench.iter(move || { diff --git a/core/benches/retransmit_stage.rs b/core/benches/retransmit_stage.rs index 5d225560718def..d5816a3d49e948 100644 --- a/core/benches/retransmit_stage.rs +++ b/core/benches/retransmit_stage.rs @@ -21,6 +21,7 @@ use solana_sdk::pubkey; use solana_sdk::signature::{Keypair, Signer}; use solana_sdk::system_transaction; use solana_sdk::timing::timestamp; +use solana_streamer::socket::SocketAddrSpace; use std::net::UdpSocket; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::mpsc::channel; @@ -35,11 +36,20 @@ use test::Bencher; #[allow(clippy::same_item_push)] fn bench_retransmitter(bencher: &mut Bencher) { solana_logger::setup(); - let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info); + let cluster_info = ClusterInfo::new( + Node::new_localhost().info, + Arc::new(Keypair::new()), + SocketAddrSpace::Unspecified, + ); const NUM_PEERS: usize = 4; let mut peer_sockets = Vec::new(); for _ in 0..NUM_PEERS { - let id = pubkey::new_rand(); + // This ensures that cluster_info.id() is the root of turbine + // retransmit tree and so the shreds are retransmited to all other + // nodes in the cluster. + let id = std::iter::repeat_with(pubkey::new_rand) + .find(|pk| cluster_info.id() < *pk) + .unwrap(); let socket = UdpSocket::bind("0.0.0.0:0").unwrap(); let mut contact_info = ContactInfo::new_localhost(&id, timestamp()); contact_info.tvu = socket.local_addr().unwrap(); diff --git a/core/src/accounts_hash_verifier.rs b/core/src/accounts_hash_verifier.rs index 07f115ac3047bd..c320a6c47c6a63 100644 --- a/core/src/accounts_hash_verifier.rs +++ b/core/src/accounts_hash_verifier.rs @@ -1,7 +1,7 @@ // Service to verify accounts hashes with other trusted validator nodes. // // Each interval, publish the snapshat hash which is the full accounts state -// hash on gossip. Monitor gossip for messages from validators in the --trusted-validators +// hash on gossip. Monitor gossip for messages from validators in the `--known-validator`s // set and halt the node if a mismatch is detected. use crate::snapshot_packager_service::PendingSnapshotPackage; @@ -148,7 +148,7 @@ impl AccountsHashVerifier { for (slot, hash) in hashes.iter() { slot_to_hash.insert(*slot, *hash); } - if Self::should_halt(&cluster_info, trusted_validators, &mut slot_to_hash) { + if Self::should_halt(cluster_info, trusted_validators, &mut slot_to_hash) { exit.store(true, Ordering::Relaxed); } } @@ -223,13 +223,22 @@ mod tests { hash::hash, signature::{Keypair, Signer}, }; + use solana_streamer::socket::SocketAddrSpace; + + fn new_test_cluster_info(contact_info: ContactInfo) -> ClusterInfo { + ClusterInfo::new( + contact_info, + Arc::new(Keypair::new()), + SocketAddrSpace::Unspecified, + ) + } #[test] fn test_should_halt() { let keypair = Keypair::new(); let contact_info = ContactInfo::new_localhost(&keypair.pubkey(), 0); - let cluster_info = ClusterInfo::new_with_invalid_keypair(contact_info); + let cluster_info = new_test_cluster_info(contact_info); let cluster_info = Arc::new(cluster_info); let mut trusted_validators = HashSet::new(); @@ -265,7 +274,7 @@ mod tests { let keypair = Keypair::new(); let contact_info = ContactInfo::new_localhost(&keypair.pubkey(), 0); - let cluster_info = ClusterInfo::new_with_invalid_keypair(contact_info); + let cluster_info = new_test_cluster_info(contact_info); let cluster_info = Arc::new(cluster_info); let trusted_validators = HashSet::new(); diff --git a/core/src/banking_stage.rs b/core/src/banking_stage.rs index 092cbd9b83024d..386d41024e0d99 100644 --- a/core/src/banking_stage.rs +++ b/core/src/banking_stage.rs @@ -1,20 +1,13 @@ //! The `banking_stage` processes Transaction messages. It is intended to be used //! to contruct a software pipeline. The stage uses all available CPU cores and //! can do its processing in parallel with signature verification on the GPU. -use crate::{ - packet_hasher::PacketHasher, - poh_recorder::{PohRecorder, PohRecorderError, TransactionRecorder, WorkingBankEntry}, - poh_service::{self, PohService}, -}; +use crate::packet_hasher::PacketHasher; use crossbeam_channel::{Receiver as CrossbeamReceiver, RecvTimeoutError}; use itertools::Itertools; use lru::LruCache; use retain_mut::RetainMut; use solana_gossip::cluster_info::ClusterInfo; -use solana_ledger::{ - blockstore::Blockstore, blockstore_processor::TransactionStatusSender, - entry::hash_transactions, leader_schedule_cache::LeaderScheduleCache, -}; +use solana_ledger::{blockstore_processor::TransactionStatusSender, entry::hash_transactions}; use solana_measure::measure::Measure; use solana_metrics::{inc_new_counter_debug, inc_new_counter_info}; use solana_perf::{ @@ -22,6 +15,7 @@ use solana_perf::{ packet::{limited_deserialize, Packet, Packets, PACKETS_PER_BATCH}, perf_libs, }; +use solana_poh::poh_recorder::{PohRecorder, PohRecorderError, TransactionRecorder}; use solana_runtime::{ accounts_db::ErrorCounters, bank::{ @@ -39,7 +33,6 @@ use solana_sdk::{ MAX_TRANSACTION_FORWARDING_DELAY_GPU, }, message::Message, - poh_config::PohConfig, pubkey::Pubkey, short_vec::decode_shortu16_len, signature::Signature, @@ -57,8 +50,7 @@ use std::{ mem::size_of, net::UdpSocket, ops::DerefMut, - sync::atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering}, - sync::mpsc::Receiver, + sync::atomic::{AtomicU64, AtomicUsize, Ordering}, sync::{Arc, Mutex}, thread::{self, Builder, JoinHandle}, time::Duration, @@ -360,9 +352,9 @@ impl BankingStage { // We've hit the end of this slot, no need to perform more processing, // just filter the remaining packets for the invalid (e.g. too old) ones let new_unprocessed_indexes = Self::filter_unprocessed_packets( - &bank, - &msgs, - &original_unprocessed_indexes, + bank, + msgs, + original_unprocessed_indexes, my_pubkey, *next_leader, ); @@ -377,8 +369,8 @@ impl BankingStage { Self::process_packets_transactions( &bank, &bank_creation_time, - &recorder, - &msgs, + recorder, + msgs, original_unprocessed_indexes.to_owned(), transaction_status_sender.clone(), gossip_vote_sender, @@ -411,7 +403,7 @@ impl BankingStage { // `original_unprocessed_indexes` must have remaining packets to process // if not yet processed. assert!(Self::packet_has_more_unprocessed_transactions( - &original_unprocessed_indexes + original_unprocessed_indexes )); true } @@ -605,7 +597,7 @@ impl BankingStage { let decision = Self::process_buffered_packets( &my_pubkey, &socket, - &poh_recorder, + poh_recorder, cluster_info, &mut buffered_packets, enable_forwarding, @@ -635,8 +627,8 @@ impl BankingStage { match Self::process_packets( &my_pubkey, - &verified_receiver, - &poh_recorder, + verified_receiver, + poh_recorder, recv_start, recv_timeout, id, @@ -746,7 +738,7 @@ impl BankingStage { let mut mint_decimals: HashMap = HashMap::new(); let pre_token_balances = if transaction_status_sender.is_some() { - collect_token_balances(&bank, &batch, &mut mint_decimals) + collect_token_balances(bank, batch, &mut mint_decimals) } else { vec![] }; @@ -806,7 +798,7 @@ impl BankingStage { if let Some(transaction_status_sender) = transaction_status_sender { let txs = batch.transactions_iter().cloned().collect(); let post_balances = bank.collect_balances(batch); - let post_token_balances = collect_token_balances(&bank, &batch, &mut mint_decimals); + let post_token_balances = collect_token_balances(bank, batch, &mut mint_decimals); transaction_status_sender.send_transaction_status_batch( bank.clone(), txs, @@ -984,16 +976,15 @@ impl BankingStage { fn transactions_from_packets( msgs: &Packets, transaction_indexes: &[usize], - secp256k1_program_enabled: bool, + libsecp256k1_0_5_upgrade_enabled: bool, ) -> (Vec>, Vec) { transaction_indexes .iter() .filter_map(|tx_index| { let p = &msgs.packets[*tx_index]; let tx: Transaction = limited_deserialize(&p.data[0..p.meta.size]).ok()?; - if secp256k1_program_enabled { - tx.verify_precompiles().ok()?; - } + tx.verify_precompiles(libsecp256k1_0_5_upgrade_enabled) + .ok()?; let message_bytes = Self::packet_message(p)?; let message_hash = Message::hash_raw_message(message_bytes); Some(( @@ -1057,7 +1048,7 @@ impl BankingStage { let (transactions, transaction_to_packet_indexes) = Self::transactions_from_packets( msgs, &packet_indexes, - bank.secp256k1_program_enabled(), + bank.libsecp256k1_0_5_upgrade_enabled(), ); packet_conversion_time.stop(); @@ -1128,7 +1119,7 @@ impl BankingStage { let (transactions, transaction_to_packet_indexes) = Self::transactions_from_packets( msgs, &transaction_indexes, - bank.secp256k1_program_enabled(), + bank.libsecp256k1_0_5_upgrade_enabled(), ); let tx_count = transaction_to_packet_indexes.len(); @@ -1257,7 +1248,7 @@ impl BankingStage { &bank, &msgs, &packet_indexes, - &my_pubkey, + my_pubkey, next_leader, ); Self::push_unprocessed( @@ -1392,79 +1383,54 @@ fn next_leader_tpu_forwards( } } -pub fn create_test_recorder( - bank: &Arc, - blockstore: &Arc, - poh_config: Option, -) -> ( - Arc, - Arc>, - PohService, - Receiver, -) { - let exit = Arc::new(AtomicBool::new(false)); - let poh_config = Arc::new(poh_config.unwrap_or_default()); - let (mut poh_recorder, entry_receiver, record_receiver) = PohRecorder::new( - bank.tick_height(), - bank.last_blockhash(), - bank.slot(), - Some((4, 4)), - bank.ticks_per_slot(), - &Pubkey::default(), - blockstore, - &Arc::new(LeaderScheduleCache::new_from_bank(&bank)), - &poh_config, - exit.clone(), - ); - poh_recorder.set_bank(&bank); - - let poh_recorder = Arc::new(Mutex::new(poh_recorder)); - let poh_service = PohService::new( - poh_recorder.clone(), - &poh_config, - &exit, - bank.ticks_per_slot(), - poh_service::DEFAULT_PINNED_CPU_CORE, - poh_service::DEFAULT_HASHES_PER_BATCH, - record_receiver, - ); - - (exit, poh_recorder, poh_service, entry_receiver) -} - #[cfg(test)] mod tests { use super::*; - use crate::{ - poh_recorder::Record, poh_recorder::WorkingBank, - transaction_status_service::TransactionStatusService, - }; use crossbeam_channel::unbounded; use itertools::Itertools; - use solana_gossip::cluster_info::Node; + use solana_gossip::{cluster_info::Node, contact_info::ContactInfo}; use solana_ledger::{ - blockstore::entries_to_test_shreds, + blockstore::{entries_to_test_shreds, Blockstore}, entry::{next_entry, Entry, EntrySlice}, genesis_utils::{create_genesis_config, GenesisConfigInfo}, get_tmp_ledger_path, + leader_schedule_cache::LeaderScheduleCache, }; use solana_perf::packet::to_packets_chunked; + use solana_poh::{ + poh_recorder::{create_test_recorder, Record, WorkingBank, WorkingBankEntry}, + poh_service::PohService, + }; + use solana_rpc::transaction_status_service::TransactionStatusService; use solana_sdk::{ hash::Hash, instruction::InstructionError, + poh_config::PohConfig, signature::{Keypair, Signer}, system_instruction::SystemError, system_transaction, transaction::TransactionError, }; + use solana_streamer::socket::SocketAddrSpace; use solana_transaction_status::TransactionWithStatusMeta; use std::{ net::SocketAddr, path::Path, - sync::atomic::{AtomicBool, Ordering}, + sync::{ + atomic::{AtomicBool, Ordering}, + mpsc::Receiver, + }, thread::sleep, }; + fn new_test_cluster_info(contact_info: ContactInfo) -> ClusterInfo { + ClusterInfo::new( + contact_info, + Arc::new(Keypair::new()), + SocketAddrSpace::Unspecified, + ) + } + #[test] fn test_banking_stage_shutdown1() { let genesis_config = create_genesis_config(2).genesis_config; @@ -1480,7 +1446,7 @@ mod tests { ); let (exit, poh_recorder, poh_service, _entry_receiever) = create_test_recorder(&bank, &blockstore, None); - let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info); + let cluster_info = new_test_cluster_info(Node::new_localhost().info); let cluster_info = Arc::new(cluster_info); let banking_stage = BankingStage::new( &cluster_info, @@ -1523,7 +1489,7 @@ mod tests { }; let (exit, poh_recorder, poh_service, entry_receiver) = create_test_recorder(&bank, &blockstore, Some(poh_config)); - let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info); + let cluster_info = new_test_cluster_info(Node::new_localhost().info); let cluster_info = Arc::new(cluster_info); let (gossip_vote_sender, _gossip_vote_receiver) = unbounded(); @@ -1592,7 +1558,7 @@ mod tests { }; let (exit, poh_recorder, poh_service, entry_receiver) = create_test_recorder(&bank, &blockstore, Some(poh_config)); - let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info); + let cluster_info = new_test_cluster_info(Node::new_localhost().info); let cluster_info = Arc::new(cluster_info); let (gossip_vote_sender, _gossip_vote_receiver) = unbounded(); @@ -1740,8 +1706,7 @@ mod tests { }; let (exit, poh_recorder, poh_service, entry_receiver) = create_test_recorder(&bank, &blockstore, Some(poh_config)); - let cluster_info = - ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info); + let cluster_info = new_test_cluster_info(Node::new_localhost().info); let cluster_info = Arc::new(cluster_info); let _banking_stage = BankingStage::new_num_threads( &cluster_info, @@ -2422,7 +2387,7 @@ mod tests { let shreds = entries_to_test_shreds(entries, bank.slot(), 0, true, 0); blockstore.insert_shreds(shreds, None, false).unwrap(); - blockstore.set_roots(&[bank.slot()]).unwrap(); + blockstore.set_roots(std::iter::once(&bank.slot())).unwrap(); let (transaction_status_sender, transaction_status_receiver) = unbounded(); let transaction_status_service = TransactionStatusService::new( @@ -2491,7 +2456,7 @@ mod tests { Receiver, JoinHandle<()>, ) { - Blockstore::destroy(&ledger_path).unwrap(); + Blockstore::destroy(ledger_path).unwrap(); let genesis_config_info = create_slow_genesis_config(10_000); let GenesisConfigInfo { genesis_config, @@ -2499,8 +2464,8 @@ mod tests { .. } = &genesis_config_info; let blockstore = - Blockstore::open(&ledger_path).expect("Expected to be able to open database ledger"); - let bank = Arc::new(Bank::new_no_wallclock_throttle(&genesis_config)); + Blockstore::open(ledger_path).expect("Expected to be able to open database ledger"); + let bank = Arc::new(Bank::new_no_wallclock_throttle(genesis_config)); let exit = Arc::new(AtomicBool::default()); let (poh_recorder, entry_receiver, record_receiver) = PohRecorder::new( bank.tick_height(), @@ -2521,9 +2486,9 @@ mod tests { let pubkey1 = solana_sdk::pubkey::new_rand(); let pubkey2 = solana_sdk::pubkey::new_rand(); let transactions = vec![ - system_transaction::transfer(&mint_keypair, &pubkey0, 1, genesis_config.hash()), - system_transaction::transfer(&mint_keypair, &pubkey1, 1, genesis_config.hash()), - system_transaction::transfer(&mint_keypair, &pubkey2, 1, genesis_config.hash()), + system_transaction::transfer(mint_keypair, &pubkey0, 1, genesis_config.hash()), + system_transaction::transfer(mint_keypair, &pubkey1, 1, genesis_config.hash()), + system_transaction::transfer(mint_keypair, &pubkey2, 1, genesis_config.hash()), ]; let poh_simulator = simulate_poh(record_receiver, &poh_recorder); diff --git a/core/src/broadcast_stage.rs b/core/src/broadcast_stage.rs index d03b9dd65db2f8..668ebee6c623a9 100644 --- a/core/src/broadcast_stage.rs +++ b/core/src/broadcast_stage.rs @@ -1,31 +1,28 @@ //! A stage to broadcast data from a leader node to validators #![allow(clippy::rc_buffer)] use self::{ + broadcast_duplicates_run::BroadcastDuplicatesRun, broadcast_fake_shreds_run::BroadcastFakeShredsRun, broadcast_metrics::*, fail_entry_verification_broadcast_run::FailEntryVerificationBroadcastRun, standard_broadcast_run::StandardBroadcastRun, }; use crate::{ - poh_recorder::WorkingBankEntry, + cluster_nodes::ClusterNodes, result::{Error, Result}, }; use crossbeam_channel::{ Receiver as CrossbeamReceiver, RecvTimeoutError as CrossbeamRecvTimeoutError, Sender as CrossbeamSender, }; -use solana_gossip::{ - cluster_info::{self, ClusterInfo, ClusterInfoError}, - contact_info::ContactInfo, - crds_gossip_pull::CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS, - weighted_shuffle::weighted_best, -}; +use solana_gossip::cluster_info::{ClusterInfo, ClusterInfoError}; use solana_ledger::{blockstore::Blockstore, shred::Shred}; use solana_measure::measure::Measure; use solana_metrics::{inc_new_counter_error, inc_new_counter_info}; +use solana_poh::poh_recorder::WorkingBankEntry; use solana_runtime::bank::Bank; use solana_sdk::timing::timestamp; use solana_sdk::{clock::Slot, pubkey::Pubkey}; -use solana_streamer::sendmmsg::send_mmsg; +use solana_streamer::{sendmmsg::send_mmsg, socket::SocketAddrSpace}; use std::sync::atomic::AtomicU64; use std::{ collections::HashMap, @@ -37,6 +34,7 @@ use std::{ time::{Duration, Instant}, }; +mod broadcast_duplicates_run; mod broadcast_fake_shreds_run; pub mod broadcast_metrics; pub(crate) mod broadcast_utils; @@ -54,11 +52,20 @@ pub enum BroadcastStageReturnType { ChannelDisconnected, } +#[derive(PartialEq, Clone, Debug)] +pub struct BroadcastDuplicatesConfig { + /// Percentage of stake to send different version of slots to + pub stake_partition: u8, + /// Number of slots to wait before sending duplicate shreds + pub duplicate_send_delay: usize, +} + #[derive(PartialEq, Clone, Debug)] pub enum BroadcastStageType { Standard, FailEntryVerification, BroadcastFakeShreds, + BroadcastDuplicates(BroadcastDuplicatesConfig), } impl BroadcastStageType { @@ -103,6 +110,16 @@ impl BroadcastStageType { blockstore, BroadcastFakeShredsRun::new(keypair, 0, shred_version), ), + + BroadcastStageType::BroadcastDuplicates(config) => BroadcastStage::new( + sock, + cluster_info, + receiver, + retransmit_slots_receiver, + exit_sender, + blockstore, + BroadcastDuplicatesRun::new(keypair, shred_version, config.clone()), + ), } } } @@ -172,15 +189,15 @@ impl BroadcastStage { fn handle_error(r: Result<()>, name: &str) -> Option { if let Err(e) = r { match e { - Error::RecvTimeoutError(RecvTimeoutError::Disconnected) - | Error::SendError - | Error::RecvError(RecvError) - | Error::CrossbeamRecvTimeoutError(CrossbeamRecvTimeoutError::Disconnected) => { + Error::RecvTimeout(RecvTimeoutError::Disconnected) + | Error::Send + | Error::Recv(RecvError) + | Error::CrossbeamRecvTimeout(CrossbeamRecvTimeoutError::Disconnected) => { return Some(BroadcastStageReturnType::ChannelDisconnected); } - Error::RecvTimeoutError(RecvTimeoutError::Timeout) - | Error::CrossbeamRecvTimeoutError(CrossbeamRecvTimeoutError::Timeout) => (), - Error::ClusterInfoError(ClusterInfoError::NoPeers) => (), // TODO: Why are the unit-tests throwing hundreds of these? + Error::RecvTimeout(RecvTimeoutError::Timeout) + | Error::CrossbeamRecvTimeout(CrossbeamRecvTimeoutError::Timeout) => (), + Error::ClusterInfo(ClusterInfoError::NoPeers) => (), // TODO: Why are the unit-tests throwing hundreds of these? _ => { inc_new_counter_error!("streamer-broadcaster-error", 1, 1); error!("{} broadcaster error: {:?}", name, e); @@ -361,26 +378,17 @@ fn update_peer_stats( } } -pub fn get_broadcast_peers( - cluster_info: &ClusterInfo, - stakes: Option<&HashMap>, -) -> (Vec, Vec<(u64, usize)>) { - let mut peers = cluster_info.tvu_peers(); - let peers_and_stakes = cluster_info::stake_weight_peers(&mut peers, stakes); - (peers, peers_and_stakes) -} - /// broadcast messages from the leader to layer 1 nodes /// # Remarks pub fn broadcast_shreds( s: &UdpSocket, shreds: &[Shred], - peers_and_stakes: &[(u64, usize)], - peers: &[ContactInfo], + cluster_nodes: &ClusterNodes, last_datapoint_submit: &Arc, transmit_stats: &mut TransmitShredsStats, + socket_addr_space: &SocketAddrSpace, ) -> Result<()> { - let broadcast_len = peers_and_stakes.len(); + let broadcast_len = cluster_nodes.num_peers(); if broadcast_len == 0 { update_peer_stats(1, 1, last_datapoint_submit); return Ok(()); @@ -388,10 +396,13 @@ pub fn broadcast_shreds( let mut shred_select = Measure::start("shred_select"); let packets: Vec<_> = shreds .iter() - .map(|shred| { - let broadcast_index = weighted_best(&peers_and_stakes, shred.seed()); - - (&shred.payload, &peers[broadcast_index].tvu) + .filter_map(|shred| { + let node = cluster_nodes.get_broadcast_peer(shred.seed())?; + if socket_addr_space.check(&node.tvu) { + Some((&shred.payload, &node.tvu)) + } else { + None + } }) .collect(); shred_select.stop(); @@ -410,7 +421,7 @@ pub fn broadcast_shreds( send_mmsg_time.stop(); transmit_stats.send_mmsg_elapsed += send_mmsg_time.as_us(); - let num_live_peers = num_live_peers(&peers); + let num_live_peers = cluster_nodes.num_peers_live(timestamp()) as i64; update_peer_stats( num_live_peers, broadcast_len as i64 + 1, @@ -419,25 +430,6 @@ pub fn broadcast_shreds( Ok(()) } -fn distance(a: u64, b: u64) -> u64 { - if a > b { - a - b - } else { - b - a - } -} - -fn num_live_peers(peers: &[ContactInfo]) -> i64 { - let mut num_live_peers = 1i64; - peers.iter().for_each(|p| { - // A peer is considered live if they generated their contact info recently - if distance(timestamp(), p.wallclock) <= CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS { - num_live_peers += 1; - } - }); - num_live_peers -} - #[cfg(test)] pub mod test { use super::*; @@ -521,19 +513,6 @@ pub mod test { assert_eq!(num_expected_coding_shreds, coding_index); } - #[test] - fn test_num_live_peers() { - let mut ci = ContactInfo { - wallclock: std::u64::MAX, - ..ContactInfo::default() - }; - assert_eq!(num_live_peers(&[ci.clone()]), 1); - ci.wallclock = timestamp() - 1; - assert_eq!(num_live_peers(&[ci.clone()]), 2); - ci.wallclock = timestamp() - CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS - 1; - assert_eq!(num_live_peers(&[ci]), 1); - } - #[test] fn test_duplicate_retransmit_signal() { // Setup @@ -607,7 +586,11 @@ pub mod test { let broadcast_buddy = Node::new_localhost_with_pubkey(&buddy_keypair.pubkey()); // Fill the cluster_info with the buddy's info - let cluster_info = ClusterInfo::new_with_invalid_keypair(leader_info.info.clone()); + let cluster_info = ClusterInfo::new( + leader_info.info.clone(), + Arc::new(Keypair::new()), + SocketAddrSpace::Unspecified, + ); cluster_info.insert_info(broadcast_buddy.info); let cluster_info = Arc::new(cluster_info); diff --git a/core/src/broadcast_stage/broadcast_duplicates_run.rs b/core/src/broadcast_stage/broadcast_duplicates_run.rs new file mode 100644 index 00000000000000..cbe7a3861e95af --- /dev/null +++ b/core/src/broadcast_stage/broadcast_duplicates_run.rs @@ -0,0 +1,338 @@ +use super::broadcast_utils::ReceiveResults; +use super::*; +use log::*; +use solana_ledger::entry::{create_ticks, Entry, EntrySlice}; +use solana_ledger::shred::Shredder; +use solana_runtime::blockhash_queue::BlockhashQueue; +use solana_sdk::clock::Slot; +use solana_sdk::fee_calculator::FeeCalculator; +use solana_sdk::hash::Hash; +use solana_sdk::signature::{Keypair, Signer}; +use solana_sdk::transaction::Transaction; +use std::collections::VecDeque; +use std::sync::Mutex; + +// Queue which facilitates delivering shreds with a delay +type DelayedQueue = VecDeque<(Option, Option>)>; + +#[derive(Clone)] +pub(super) struct BroadcastDuplicatesRun { + config: BroadcastDuplicatesConfig, + // Local queue for broadcast to track which duplicate blockhashes we've sent + duplicate_queue: BlockhashQueue, + // Shared queue between broadcast and transmit threads + delayed_queue: Arc>, + // Buffer for duplicate entries + duplicate_entries_buffer: Vec, + last_duplicate_entry_hash: Hash, + last_broadcast_slot: Slot, + next_shred_index: u32, + shred_version: u16, + keypair: Arc, +} + +impl BroadcastDuplicatesRun { + pub(super) fn new( + keypair: Arc, + shred_version: u16, + config: BroadcastDuplicatesConfig, + ) -> Self { + let mut delayed_queue = DelayedQueue::new(); + delayed_queue.resize(config.duplicate_send_delay, (None, None)); + Self { + config, + delayed_queue: Arc::new(Mutex::new(delayed_queue)), + duplicate_queue: BlockhashQueue::default(), + duplicate_entries_buffer: vec![], + next_shred_index: u32::MAX, + last_broadcast_slot: 0, + last_duplicate_entry_hash: Hash::default(), + shred_version, + keypair, + } + } + + fn queue_or_create_duplicate_entries( + &mut self, + bank: &Arc, + receive_results: &ReceiveResults, + ) -> (Vec, u32) { + // If the last entry hash is default, grab the last blockhash from the parent bank + if self.last_duplicate_entry_hash == Hash::default() { + self.last_duplicate_entry_hash = bank.last_blockhash(); + } + + // Create duplicate entries by.. + // 1) rearranging real entries so that all transaction entries are moved to + // the front and tick entries are moved to the back. + // 2) setting all transaction entries to zero hashes and all tick entries to `hashes_per_tick`. + // 3) removing any transactions which reference blockhashes which aren't in the + // duplicate blockhash queue. + let (duplicate_entries, next_shred_index) = if bank.slot() > MINIMUM_DUPLICATE_SLOT { + let mut tx_entries: Vec = receive_results + .entries + .iter() + .filter_map(|entry| { + if entry.is_tick() { + return None; + } + + let transactions: Vec = entry + .transactions + .iter() + .filter(|tx| { + self.duplicate_queue + .get_hash_age(&tx.message.recent_blockhash) + .is_some() + }) + .cloned() + .collect(); + if !transactions.is_empty() { + Some(Entry::new_mut( + &mut self.last_duplicate_entry_hash, + &mut 0, + transactions, + )) + } else { + None + } + }) + .collect(); + let mut tick_entries = create_ticks( + receive_results.entries.tick_count(), + bank.hashes_per_tick().unwrap_or_default(), + self.last_duplicate_entry_hash, + ); + self.duplicate_entries_buffer.append(&mut tx_entries); + self.duplicate_entries_buffer.append(&mut tick_entries); + + // Only send out duplicate entries when the block is finished otherwise the + // recipient will start repairing for shreds they haven't received yet and + // hit duplicate slot issues before we want them to. + let entries = if receive_results.last_tick_height == bank.max_tick_height() { + self.duplicate_entries_buffer.drain(..).collect() + } else { + vec![] + }; + + // Set next shred index to 0 since we are sending the full slot + (entries, 0) + } else { + // Send real entries until we hit min duplicate slot + (receive_results.entries.clone(), self.next_shred_index) + }; + + // Save last duplicate entry hash to avoid invalid entry hash errors + if let Some(last_duplicate_entry) = duplicate_entries.last() { + self.last_duplicate_entry_hash = last_duplicate_entry.hash; + } + + (duplicate_entries, next_shred_index) + } +} + +/// Duplicate slots should only be sent once all validators have started. +/// This constant is intended to be used as a buffer so that all validators +/// are live before sending duplicate slots. +pub const MINIMUM_DUPLICATE_SLOT: Slot = 20; + +impl BroadcastRun for BroadcastDuplicatesRun { + fn run( + &mut self, + blockstore: &Arc, + receiver: &Receiver, + socket_sender: &Sender<(TransmitShreds, Option)>, + blockstore_sender: &Sender<(Arc>, Option)>, + ) -> Result<()> { + // 1) Pull entries from banking stage + let receive_results = broadcast_utils::recv_slot_entries(receiver)?; + let bank = receive_results.bank.clone(); + let last_tick_height = receive_results.last_tick_height; + + if self.next_shred_index == u32::MAX { + self.next_shred_index = blockstore + .meta(bank.slot()) + .expect("Database error") + .map(|meta| meta.consumed) + .unwrap_or(0) as u32 + } + + // We were not the leader, but just became leader again + if bank.slot() > self.last_broadcast_slot + 1 { + self.last_duplicate_entry_hash = Hash::default(); + } + self.last_broadcast_slot = bank.slot(); + + let shredder = Shredder::new( + bank.slot(), + bank.parent().unwrap().slot(), + self.keypair.clone(), + (bank.tick_height() % bank.ticks_per_slot()) as u8, + self.shred_version, + ) + .expect("Expected to create a new shredder"); + + let (data_shreds, coding_shreds, last_shred_index) = shredder.entries_to_shreds( + &receive_results.entries, + last_tick_height == bank.max_tick_height(), + self.next_shred_index, + ); + + let (duplicate_entries, next_duplicate_shred_index) = + self.queue_or_create_duplicate_entries(&bank, &receive_results); + let (duplicate_data_shreds, duplicate_coding_shreds, _) = if !duplicate_entries.is_empty() { + shredder.entries_to_shreds( + &duplicate_entries, + last_tick_height == bank.max_tick_height(), + next_duplicate_shred_index, + ) + } else { + (vec![], vec![], 0) + }; + + // Manually track the shred index because relying on slot meta consumed is racy + if last_tick_height == bank.max_tick_height() { + self.next_shred_index = 0; + self.duplicate_queue + .register_hash(&self.last_duplicate_entry_hash, &FeeCalculator::default()); + } else { + self.next_shred_index = last_shred_index; + } + + // Partition network with duplicate and real shreds based on stake + let bank_epoch = bank.get_leader_schedule_epoch(bank.slot()); + let mut duplicate_recipients = HashMap::new(); + let mut real_recipients = HashMap::new(); + + let mut stakes: Vec<(Pubkey, u64)> = bank + .epoch_staked_nodes(bank_epoch) + .unwrap() + .into_iter() + .filter(|(pubkey, _)| *pubkey != self.keypair.pubkey()) + .collect(); + stakes.sort_by(|(l_key, l_stake), (r_key, r_stake)| { + if r_stake == l_stake { + l_key.cmp(r_key) + } else { + r_stake.cmp(l_stake) + } + }); + + let highest_staked_node = stakes.first().cloned().map(|x| x.0); + let stake_total: u64 = stakes.iter().map(|(_, stake)| *stake).sum(); + let mut cumulative_stake: u64 = 0; + for (pubkey, stake) in stakes.into_iter().rev() { + cumulative_stake += stake; + if (100 * cumulative_stake / stake_total) as u8 <= self.config.stake_partition { + duplicate_recipients.insert(pubkey, stake); + } else { + real_recipients.insert(pubkey, stake); + } + } + + if let Some(highest_staked_node) = highest_staked_node { + if bank.slot() > MINIMUM_DUPLICATE_SLOT && last_tick_height == bank.max_tick_height() { + warn!( + "{} sent duplicate slot {} to nodes: {:?}", + self.keypair.pubkey(), + bank.slot(), + &duplicate_recipients, + ); + warn!( + "Duplicate shreds for slot {} will be broadcast in {} slot(s)", + bank.slot(), + self.config.duplicate_send_delay + ); + + let delayed_shreds: Option> = vec![ + duplicate_data_shreds.last().cloned(), + data_shreds.last().cloned(), + ] + .into_iter() + .collect(); + self.delayed_queue + .lock() + .unwrap() + .push_back((Some(highest_staked_node), delayed_shreds)); + } + } + + let duplicate_recipients = Arc::new(duplicate_recipients); + let real_recipients = Arc::new(real_recipients); + + let data_shreds = Arc::new(data_shreds); + blockstore_sender.send((data_shreds.clone(), None))?; + + // 3) Start broadcast step + socket_sender.send(( + ( + Some(duplicate_recipients.clone()), + Arc::new(duplicate_data_shreds), + ), + None, + ))?; + socket_sender.send(( + ( + Some(duplicate_recipients), + Arc::new(duplicate_coding_shreds), + ), + None, + ))?; + socket_sender.send(((Some(real_recipients.clone()), data_shreds), None))?; + socket_sender.send(((Some(real_recipients), Arc::new(coding_shreds)), None))?; + + Ok(()) + } + fn transmit( + &mut self, + receiver: &Arc>, + cluster_info: &ClusterInfo, + sock: &UdpSocket, + ) -> Result<()> { + // Check the delay queue for shreds that are ready to be sent + let (delayed_recipient, delayed_shreds) = { + let mut delayed_deque = self.delayed_queue.lock().unwrap(); + if delayed_deque.len() > self.config.duplicate_send_delay { + delayed_deque.pop_front().unwrap() + } else { + (None, None) + } + }; + + let ((stakes, shreds), _) = receiver.lock().unwrap().recv()?; + let stakes = stakes.unwrap(); + let socket_addr_space = cluster_info.socket_addr_space(); + for peer in cluster_info.tvu_peers() { + // Forward shreds to circumvent gossip + if stakes.get(&peer.id).is_some() { + shreds.iter().for_each(|shred| { + if socket_addr_space.check(&peer.tvu_forwards) { + sock.send_to(&shred.payload, &peer.tvu_forwards).unwrap(); + } + }); + } + + // After a delay, broadcast duplicate shreds to a single node + if let Some(shreds) = delayed_shreds.as_ref() { + if Some(peer.id) == delayed_recipient { + shreds.iter().for_each(|shred| { + if socket_addr_space.check(&peer.tvu) { + sock.send_to(&shred.payload, &peer.tvu).unwrap(); + } + }); + } + } + } + + Ok(()) + } + fn record( + &mut self, + receiver: &Arc>, + blockstore: &Arc, + ) -> Result<()> { + let (data_shreds, _) = receiver.lock().unwrap().recv()?; + blockstore.insert_shreds(data_shreds.to_vec(), None, true)?; + Ok(()) + } +} diff --git a/core/src/broadcast_stage/broadcast_fake_shreds_run.rs b/core/src/broadcast_stage/broadcast_fake_shreds_run.rs index 55983fd35e202d..6461f350ac6b77 100644 --- a/core/src/broadcast_stage/broadcast_fake_shreds_run.rs +++ b/core/src/broadcast_stage/broadcast_fake_shreds_run.rs @@ -139,14 +139,16 @@ impl BroadcastRun for BroadcastFakeShredsRun { mod tests { use super::*; use solana_gossip::contact_info::ContactInfo; + use solana_streamer::socket::SocketAddrSpace; use std::net::{IpAddr, Ipv4Addr, SocketAddr}; #[test] fn test_tvu_peers_ordering() { - let cluster = ClusterInfo::new_with_invalid_keypair(ContactInfo::new_localhost( - &solana_sdk::pubkey::new_rand(), - 0, - )); + let cluster = ClusterInfo::new( + ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), 0), + Arc::new(Keypair::new()), + SocketAddrSpace::Unspecified, + ); cluster.insert_info(ContactInfo::new_with_socketaddr(&SocketAddr::new( IpAddr::V4(Ipv4Addr::new(192, 168, 1, 1)), 8080, diff --git a/core/src/broadcast_stage/broadcast_utils.rs b/core/src/broadcast_stage/broadcast_utils.rs index b421a2a5245d24..08a9d40738bb52 100644 --- a/core/src/broadcast_stage/broadcast_utils.rs +++ b/core/src/broadcast_stage/broadcast_utils.rs @@ -1,6 +1,6 @@ -use crate::poh_recorder::WorkingBankEntry; use crate::result::Result; use solana_ledger::{entry::Entry, shred::Shred}; +use solana_poh::poh_recorder::WorkingBankEntry; use solana_runtime::bank::Bank; use solana_sdk::clock::Slot; use std::{ diff --git a/core/src/broadcast_stage/fail_entry_verification_broadcast_run.rs b/core/src/broadcast_stage/fail_entry_verification_broadcast_run.rs index b66681786d6f7d..4fb06e416cc3af 100644 --- a/core/src/broadcast_stage/fail_entry_verification_broadcast_run.rs +++ b/core/src/broadcast_stage/fail_entry_verification_broadcast_run.rs @@ -1,4 +1,5 @@ use super::*; +use crate::cluster_nodes::ClusterNodes; use solana_ledger::shred::Shredder; use solana_sdk::hash::Hash; use solana_sdk::signature::Keypair; @@ -134,15 +135,17 @@ impl BroadcastRun for FailEntryVerificationBroadcastRun { ) -> Result<()> { let ((stakes, shreds), _) = receiver.lock().unwrap().recv()?; // Broadcast data - let (peers, peers_and_stakes) = get_broadcast_peers(cluster_info, stakes.as_deref()); - + let cluster_nodes = ClusterNodes::::new( + cluster_info, + stakes.as_deref().unwrap_or(&HashMap::default()), + ); broadcast_shreds( sock, &shreds, - &peers_and_stakes, - &peers, + &cluster_nodes, &Arc::new(AtomicU64::new(0)), &mut TransmitShredsStats::default(), + cluster_info.socket_addr_space(), )?; Ok(()) diff --git a/core/src/broadcast_stage/standard_broadcast_run.rs b/core/src/broadcast_stage/standard_broadcast_run.rs index 6908d5dd1b9908..27caca8eabfdf8 100644 --- a/core/src/broadcast_stage/standard_broadcast_run.rs +++ b/core/src/broadcast_stage/standard_broadcast_run.rs @@ -4,7 +4,7 @@ use super::{ broadcast_utils::{self, ReceiveResults}, *, }; -use crate::broadcast_stage::broadcast_utils::UnfinishedSlotInfo; +use crate::{broadcast_stage::broadcast_utils::UnfinishedSlotInfo, cluster_nodes::ClusterNodes}; use solana_ledger::{ entry::Entry, shred::{ @@ -27,16 +27,10 @@ pub struct StandardBroadcastRun { shred_version: u16, last_datapoint_submit: Arc, num_batches: usize, - broadcast_peer_cache: Arc>, + cluster_nodes: Arc>>, last_peer_update: Arc, } -#[derive(Default)] -struct BroadcastPeerCache { - peers: Vec, - peers_and_stakes: Vec<(u64, usize)>, -} - impl StandardBroadcastRun { pub(super) fn new(keypair: Arc, shred_version: u16) -> Self { Self { @@ -50,7 +44,7 @@ impl StandardBroadcastRun { shred_version, last_datapoint_submit: Arc::default(), num_batches: 0, - broadcast_peer_cache: Arc::default(), + cluster_nodes: Arc::default(), last_peer_update: Arc::default(), } } @@ -161,7 +155,7 @@ impl StandardBroadcastRun { ) -> Result<()> { let (bsend, brecv) = channel(); let (ssend, srecv) = channel(); - self.process_receive_results(&blockstore, &ssend, &bsend, receive_results)?; + self.process_receive_results(blockstore, &ssend, &bsend, receive_results)?; let srecv = Arc::new(Mutex::new(srecv)); let brecv = Arc::new(Mutex::new(brecv)); //data @@ -351,16 +345,16 @@ impl StandardBroadcastRun { if now - last > BROADCAST_PEER_UPDATE_INTERVAL_MS && self .last_peer_update - .compare_and_swap(now, last, Ordering::Relaxed) + .compare_and_swap(last, now, Ordering::Relaxed) == last { - let mut w_broadcast_peer_cache = self.broadcast_peer_cache.write().unwrap(); - let (peers, peers_and_stakes) = get_broadcast_peers(cluster_info, stakes); - w_broadcast_peer_cache.peers = peers; - w_broadcast_peer_cache.peers_and_stakes = peers_and_stakes; + *self.cluster_nodes.write().unwrap() = ClusterNodes::::new( + cluster_info, + stakes.unwrap_or(&HashMap::default()), + ); } get_peers_time.stop(); - let r_broadcast_peer_cache = self.broadcast_peer_cache.read().unwrap(); + let cluster_nodes = self.cluster_nodes.read().unwrap(); let mut transmit_stats = TransmitShredsStats::default(); // Broadcast the shreds @@ -368,12 +362,12 @@ impl StandardBroadcastRun { broadcast_shreds( sock, &shreds, - &r_broadcast_peer_cache.peers_and_stakes, - &r_broadcast_peer_cache.peers, + &cluster_nodes, &self.last_datapoint_submit, &mut transmit_stats, + cluster_info.socket_addr_space(), )?; - drop(r_broadcast_peer_cache); + drop(cluster_nodes); transmit_time.stop(); transmit_stats.transmit_elapsed = transmit_time.as_us(); @@ -505,6 +499,7 @@ mod test { genesis_config::GenesisConfig, signature::{Keypair, Signer}, }; + use solana_streamer::socket::SocketAddrSpace; use std::sync::Arc; use std::time::Duration; @@ -526,7 +521,11 @@ mod test { let leader_keypair = Arc::new(Keypair::new()); let leader_pubkey = leader_keypair.pubkey(); let leader_info = Node::new_localhost_with_pubkey(&leader_pubkey); - let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair(leader_info.info)); + let cluster_info = Arc::new(ClusterInfo::new( + leader_info.info, + Arc::new(Keypair::new()), + SocketAddrSpace::Unspecified, + )); let socket = UdpSocket::bind("0.0.0.0:0").unwrap(); let mut genesis_config = create_genesis_config(10_000).genesis_config; genesis_config.ticks_per_slot = max_ticks_per_n_shreds(num_shreds_per_slot, None) + 1; diff --git a/core/src/cluster_info_vote_listener.rs b/core/src/cluster_info_vote_listener.rs index be3e410a62b92c..b0afdd586fc1c7 100644 --- a/core/src/cluster_info_vote_listener.rs +++ b/core/src/cluster_info_vote_listener.rs @@ -1,6 +1,5 @@ use crate::{ optimistic_confirmation_verifier::OptimisticConfirmationVerifier, - poh_recorder::PohRecorder, replay_stage::DUPLICATE_THRESHOLD, result::{Error, Result}, sigverify, @@ -20,6 +19,7 @@ use solana_gossip::{ use solana_ledger::blockstore::Blockstore; use solana_metrics::inc_new_counter_debug; use solana_perf::packet::{self, Packets}; +use solana_poh::poh_recorder::PohRecorder; use solana_rpc::{ optimistically_confirmed_bank_tracker::{BankNotification, BankNotificationSender}, rpc_subscriptions::RpcSubscriptions, @@ -33,7 +33,7 @@ use solana_runtime::{ vote_sender_types::{ReplayVoteReceiver, ReplayedVote}, }; use solana_sdk::{ - clock::{Epoch, Slot, DEFAULT_MS_PER_SLOT}, + clock::{Epoch, Slot, DEFAULT_MS_PER_SLOT, DEFAULT_TICKS_PER_SLOT}, epoch_schedule::EpochSchedule, hash::Hash, pubkey::Pubkey, @@ -110,7 +110,7 @@ impl VoteTracker { epoch_schedule: *root_bank.epoch_schedule(), ..VoteTracker::default() }; - vote_tracker.progress_with_new_root_bank(&root_bank); + vote_tracker.progress_with_new_root_bank(root_bank); assert_eq!( *vote_tracker.leader_schedule_epoch.read().unwrap(), root_bank.get_leader_schedule_epoch(root_bank.slot()) @@ -384,15 +384,20 @@ impl ClusterInfoVoteListener { return Ok(()); } + let would_be_leader = poh_recorder + .lock() + .unwrap() + .would_be_leader(20 * DEFAULT_TICKS_PER_SLOT); if let Err(e) = verified_vote_packets.receive_and_process_vote_packets( &verified_vote_label_packets_receiver, &mut update_version, + would_be_leader, ) { match e { - Error::CrossbeamRecvTimeoutError(RecvTimeoutError::Disconnected) => { + Error::CrossbeamRecvTimeout(RecvTimeoutError::Disconnected) => { return Ok(()); } - Error::CrossbeamRecvTimeoutError(RecvTimeoutError::Timeout) => (), + Error::CrossbeamRecvTimeout(RecvTimeoutError::Timeout) => (), _ => { error!("thread {:?} error {:?}", thread::current().name(), e); } @@ -474,8 +479,8 @@ impl ClusterInfoVoteListener { .add_new_optimistic_confirmed_slots(confirmed_slots.clone()); } Err(e) => match e { - Error::CrossbeamRecvTimeoutError(RecvTimeoutError::Timeout) - | Error::ReadyTimeoutError => (), + Error::CrossbeamRecvTimeout(RecvTimeoutError::Timeout) + | Error::ReadyTimeout => (), _ => { error!("thread {:?} error {:?}", thread::current().name(), e); } @@ -598,7 +603,7 @@ impl ClusterInfoVoteListener { if slot == last_vote_slot { let vote_accounts = Stakes::vote_accounts(epoch_stakes.stakes()); let stake = vote_accounts - .get(&vote_pubkey) + .get(vote_pubkey) .map(|(stake, _)| *stake) .unwrap_or_default(); let total_stake = epoch_stakes.total_stake(); @@ -687,7 +692,7 @@ impl ClusterInfoVoteListener { // voters trying to make votes for slots earlier than the epoch for // which they are authorized let actual_authorized_voter = - vote_tracker.get_authorized_voter(&vote_pubkey, *last_vote_slot); + vote_tracker.get_authorized_voter(vote_pubkey, *last_vote_slot); if actual_authorized_voter.is_none() { return false; @@ -695,7 +700,7 @@ impl ClusterInfoVoteListener { // Voting without the correct authorized pubkey, dump the vote if !VoteTracker::vote_contains_authorized_voter( - &gossip_tx, + gossip_tx, &actual_authorized_voter.unwrap(), ) { return false; @@ -733,7 +738,7 @@ impl ClusterInfoVoteListener { Self::track_new_votes_and_notify_confirmations( vote, &vote_pubkey, - &vote_tracker, + vote_tracker, root_bank, subscriptions, verified_vote_sender, diff --git a/core/src/cluster_nodes.rs b/core/src/cluster_nodes.rs new file mode 100644 index 00000000000000..ca0a82d18f2a7c --- /dev/null +++ b/core/src/cluster_nodes.rs @@ -0,0 +1,446 @@ +use { + crate::{broadcast_stage::BroadcastStage, retransmit_stage::RetransmitStage}, + itertools::Itertools, + solana_gossip::{ + cluster_info::{compute_retransmit_peers, ClusterInfo}, + contact_info::ContactInfo, + crds_gossip_pull::CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS, + weighted_shuffle::{weighted_best, weighted_shuffle}, + }, + solana_sdk::pubkey::Pubkey, + std::{any::TypeId, cmp::Reverse, collections::HashMap, marker::PhantomData}, +}; + +enum NodeId { + // TVU node obtained through gossip (staked or not). + ContactInfo(ContactInfo), + // Staked node with no contact-info in gossip table. + Pubkey(Pubkey), +} + +struct Node { + node: NodeId, + stake: u64, +} + +pub struct ClusterNodes { + pubkey: Pubkey, // The local node itself. + // All staked nodes + other known tvu-peers + the node itself; + // sorted by (stake, pubkey) in descending order. + nodes: Vec, + // Weights and indices for sampling peers. weighted_{shuffle,best} expect + // weights >= 1. For backward compatibility we use max(1, stake) for + // weights and exclude nodes with no contact-info. + index: Vec<(/*weight:*/ u64, /*index:*/ usize)>, + _phantom: PhantomData, +} + +impl Node { + #[inline] + fn pubkey(&self) -> Pubkey { + match &self.node { + NodeId::Pubkey(pubkey) => *pubkey, + NodeId::ContactInfo(node) => node.id, + } + } + + #[inline] + fn contact_info(&self) -> Option<&ContactInfo> { + match &self.node { + NodeId::Pubkey(_) => None, + NodeId::ContactInfo(node) => Some(node), + } + } +} + +impl ClusterNodes { + pub fn num_peers(&self) -> usize { + self.index.len() + } + + // A peer is considered live if they generated their contact info recently. + pub fn num_peers_live(&self, now: u64) -> usize { + self.index + .iter() + .filter_map(|(_, index)| self.nodes[*index].contact_info()) + .filter(|node| { + let elapsed = if node.wallclock < now { + now - node.wallclock + } else { + node.wallclock - now + }; + elapsed < CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS + }) + .count() + } +} + +impl ClusterNodes { + pub fn new(cluster_info: &ClusterInfo, stakes: &HashMap) -> Self { + new_cluster_nodes(cluster_info, stakes) + } + + /// Returns the root of turbine broadcast tree, which the leader sends the + /// shred to. + pub fn get_broadcast_peer(&self, shred_seed: [u8; 32]) -> Option<&ContactInfo> { + if self.index.is_empty() { + None + } else { + let index = weighted_best(&self.index, shred_seed); + match &self.nodes[index].node { + NodeId::ContactInfo(node) => Some(node), + NodeId::Pubkey(_) => panic!("this should not happen!"), + } + } + } +} + +impl ClusterNodes { + pub fn new(cluster_info: &ClusterInfo, stakes: &HashMap) -> Self { + new_cluster_nodes(cluster_info, stakes) + } + + pub fn get_retransmit_peers( + &self, + shred_seed: [u8; 32], + fanout: usize, + slot_leader: Option, + ) -> ( + Vec<&ContactInfo>, // neighbors + Vec<&ContactInfo>, // children + ) { + // Exclude leader from list of nodes. + let index = self.index.iter().copied(); + let (weights, index): (Vec, Vec) = match slot_leader { + None => { + error!("unknown leader for shred slot"); + index.unzip() + } + Some(slot_leader) if slot_leader == self.pubkey => { + error!("retransmit from slot leader: {}", slot_leader); + index.unzip() + } + Some(slot_leader) => index + .filter(|(_, i)| self.nodes[*i].pubkey() != slot_leader) + .unzip(), + }; + let index: Vec<_> = { + let shuffle = weighted_shuffle(&weights, shred_seed); + shuffle.into_iter().map(|i| index[i]).collect() + }; + let self_index = index + .iter() + .position(|i| self.nodes[*i].pubkey() == self.pubkey) + .unwrap(); + let (neighbors, children) = compute_retransmit_peers(fanout, self_index, &index); + // Assert that the node itself is included in the set of neighbors, at + // the right offset. + debug_assert_eq!( + self.nodes[neighbors[self_index % fanout]].pubkey(), + self.pubkey + ); + let get_contact_infos = |index: Vec| -> Vec<&ContactInfo> { + index + .into_iter() + .map(|i| self.nodes[i].contact_info().unwrap()) + .collect() + }; + (get_contact_infos(neighbors), get_contact_infos(children)) + } +} + +fn new_cluster_nodes( + cluster_info: &ClusterInfo, + stakes: &HashMap, +) -> ClusterNodes { + let self_pubkey = cluster_info.id(); + let nodes = get_nodes(cluster_info, stakes); + let broadcast = TypeId::of::() == TypeId::of::(); + // For backward compatibility: + // * nodes which do not have contact-info are excluded. + // * stakes are floored at 1. + // The sorting key here should be equivalent to + // solana_gossip::deprecated::sorted_stakes_with_index. + // Leader itself is excluded when sampling broadcast peers. + let index = nodes + .iter() + .enumerate() + .filter(|(_, node)| node.contact_info().is_some()) + .filter(|(_, node)| !broadcast || node.pubkey() != self_pubkey) + .sorted_by_key(|(_, node)| Reverse((node.stake.max(1), node.pubkey()))) + .map(|(index, node)| (node.stake.max(1), index)) + .collect(); + ClusterNodes { + pubkey: self_pubkey, + nodes, + index, + _phantom: PhantomData::default(), + } +} + +// All staked nodes + other known tvu-peers + the node itself; +// sorted by (stake, pubkey) in descending order. +fn get_nodes(cluster_info: &ClusterInfo, stakes: &HashMap) -> Vec { + let self_pubkey = cluster_info.id(); + // The local node itself. + std::iter::once({ + let stake = stakes.get(&self_pubkey).copied().unwrap_or_default(); + let node = NodeId::from(cluster_info.my_contact_info()); + Node { node, stake } + }) + // All known tvu-peers from gossip. + .chain(cluster_info.tvu_peers().into_iter().map(|node| { + let stake = stakes.get(&node.id).copied().unwrap_or_default(); + let node = NodeId::from(node); + Node { node, stake } + })) + // All staked nodes. + .chain( + stakes + .iter() + .filter(|(_, stake)| **stake > 0) + .map(|(&pubkey, &stake)| Node { + node: NodeId::from(pubkey), + stake, + }), + ) + .sorted_by_key(|node| Reverse((node.stake, node.pubkey()))) + // Since sorted_by_key is stable, in case of duplicates, this + // will keep nodes with contact-info. + .dedup_by(|a, b| a.pubkey() == b.pubkey()) + .collect() +} + +impl From for NodeId { + fn from(node: ContactInfo) -> Self { + NodeId::ContactInfo(node) + } +} + +impl From for NodeId { + fn from(pubkey: Pubkey) -> Self { + NodeId::Pubkey(pubkey) + } +} + +impl Default for ClusterNodes { + fn default() -> Self { + Self { + pubkey: Pubkey::default(), + nodes: Vec::default(), + index: Vec::default(), + _phantom: PhantomData::default(), + } + } +} + +#[cfg(test)] +mod tests { + use { + super::*, + rand::{seq::SliceRandom, Rng}, + solana_gossip::{ + crds_value::{CrdsData, CrdsValue}, + deprecated::{ + shuffle_peers_and_index, sorted_retransmit_peers_and_stakes, + sorted_stakes_with_index, + }, + }, + solana_sdk::{signature::Keypair, timing::timestamp}, + solana_streamer::socket::SocketAddrSpace, + std::{iter::repeat_with, sync::Arc}, + }; + + // Legacy methods copied for testing backward compatibility. + + fn get_broadcast_peers( + cluster_info: &ClusterInfo, + stakes: Option<&HashMap>, + ) -> (Vec, Vec<(u64, usize)>) { + let mut peers = cluster_info.tvu_peers(); + let peers_and_stakes = stake_weight_peers(&mut peers, stakes); + (peers, peers_and_stakes) + } + + fn stake_weight_peers( + peers: &mut Vec, + stakes: Option<&HashMap>, + ) -> Vec<(u64, usize)> { + peers.dedup(); + sorted_stakes_with_index(peers, stakes) + } + + fn make_cluster( + rng: &mut R, + ) -> ( + Vec, + HashMap, // stakes + ClusterInfo, + ) { + let mut nodes: Vec<_> = repeat_with(|| ContactInfo::new_rand(rng, None)) + .take(1000) + .collect(); + nodes.shuffle(rng); + let this_node = nodes[0].clone(); + let mut stakes: HashMap = nodes + .iter() + .filter_map(|node| { + if rng.gen_ratio(1, 7) { + None // No stake for some of the nodes. + } else { + Some((node.id, rng.gen_range(0, 20))) + } + }) + .collect(); + // Add some staked nodes with no contact-info. + stakes.extend(repeat_with(|| (Pubkey::new_unique(), rng.gen_range(0, 20))).take(100)); + let cluster_info = ClusterInfo::new( + this_node, + Arc::new(Keypair::new()), + SocketAddrSpace::Unspecified, + ); + { + let now = timestamp(); + let mut gossip = cluster_info.gossip.write().unwrap(); + // First node is pushed to crds table by ClusterInfo constructor. + for node in nodes.iter().skip(1) { + let node = CrdsData::ContactInfo(node.clone()); + let node = CrdsValue::new_unsigned(node); + assert_eq!(gossip.crds.insert(node, now), Ok(())); + } + } + (nodes, stakes, cluster_info) + } + + #[test] + fn test_cluster_nodes_retransmit() { + let mut rng = rand::thread_rng(); + let (nodes, stakes, cluster_info) = make_cluster(&mut rng); + let this_node = cluster_info.my_contact_info(); + // ClusterInfo::tvu_peers excludes the node itself. + assert_eq!(cluster_info.tvu_peers().len(), nodes.len() - 1); + let cluster_nodes = ClusterNodes::::new(&cluster_info, &stakes); + // All nodes with contact-info should be in the index. + assert_eq!(cluster_nodes.index.len(), nodes.len()); + // Staked nodes with no contact-info should be included. + assert!(cluster_nodes.nodes.len() > nodes.len()); + // Assert that all nodes keep their contact-info. + // and, all staked nodes are also included. + { + let cluster_nodes: HashMap<_, _> = cluster_nodes + .nodes + .iter() + .map(|node| (node.pubkey(), node)) + .collect(); + for node in &nodes { + assert_eq!(cluster_nodes[&node.id].contact_info().unwrap().id, node.id); + } + for (pubkey, stake) in &stakes { + if *stake > 0 { + assert_eq!(cluster_nodes[pubkey].stake, *stake); + } + } + } + let (peers, stakes_and_index) = + sorted_retransmit_peers_and_stakes(&cluster_info, Some(&stakes)); + assert_eq!(stakes_and_index.len(), peers.len()); + assert_eq!(cluster_nodes.index.len(), peers.len()); + for (i, node) in cluster_nodes + .index + .iter() + .map(|(_, i)| &cluster_nodes.nodes[*i]) + .enumerate() + { + let (stake, index) = stakes_and_index[i]; + // Wallclock may be update by ClusterInfo::push_self. + if node.pubkey() == this_node.id { + assert_eq!(this_node.id, peers[index].id) + } else { + assert_eq!(node.contact_info().unwrap(), &peers[index]); + } + assert_eq!(node.stake.max(1), stake); + } + let slot_leader = nodes[1..].choose(&mut rng).unwrap().id; + // Remove slot leader from peers indices. + let stakes_and_index: Vec<_> = stakes_and_index + .into_iter() + .filter(|(_stake, index)| peers[*index].id != slot_leader) + .collect(); + assert_eq!(peers.len(), stakes_and_index.len() + 1); + let mut shred_seed = [0u8; 32]; + rng.fill(&mut shred_seed[..]); + let (self_index, shuffled_peers_and_stakes) = + shuffle_peers_and_index(&this_node.id, &peers, &stakes_and_index, shred_seed); + let shuffled_index: Vec<_> = shuffled_peers_and_stakes + .into_iter() + .map(|(_, index)| index) + .collect(); + assert_eq!(this_node.id, peers[shuffled_index[self_index]].id); + for fanout in 1..200 { + let (neighbors_indices, children_indices) = + compute_retransmit_peers(fanout, self_index, &shuffled_index); + let (neighbors, children) = + cluster_nodes.get_retransmit_peers(shred_seed, fanout, Some(slot_leader)); + assert_eq!(children.len(), children_indices.len()); + for (node, index) in children.into_iter().zip(children_indices) { + assert_eq!(*node, peers[index]); + } + assert_eq!(neighbors.len(), neighbors_indices.len()); + assert_eq!(neighbors[0].id, peers[neighbors_indices[0]].id); + for (node, index) in neighbors.into_iter().zip(neighbors_indices).skip(1) { + assert_eq!(*node, peers[index]); + } + } + } + + #[test] + fn test_cluster_nodes_broadcast() { + let mut rng = rand::thread_rng(); + let (nodes, stakes, cluster_info) = make_cluster(&mut rng); + // ClusterInfo::tvu_peers excludes the node itself. + assert_eq!(cluster_info.tvu_peers().len(), nodes.len() - 1); + let cluster_nodes = ClusterNodes::::new(&cluster_info, &stakes); + // All nodes with contact-info should be in the index. + // Excluding this node itself. + assert_eq!(cluster_nodes.index.len() + 1, nodes.len()); + // Staked nodes with no contact-info should be included. + assert!(cluster_nodes.nodes.len() > nodes.len()); + // Assert that all nodes keep their contact-info. + // and, all staked nodes are also included. + { + let cluster_nodes: HashMap<_, _> = cluster_nodes + .nodes + .iter() + .map(|node| (node.pubkey(), node)) + .collect(); + for node in &nodes { + assert_eq!(cluster_nodes[&node.id].contact_info().unwrap().id, node.id); + } + for (pubkey, stake) in &stakes { + if *stake > 0 { + assert_eq!(cluster_nodes[pubkey].stake, *stake); + } + } + } + let (peers, peers_and_stakes) = get_broadcast_peers(&cluster_info, Some(&stakes)); + assert_eq!(peers_and_stakes.len(), peers.len()); + assert_eq!(cluster_nodes.index.len(), peers.len()); + for (i, node) in cluster_nodes + .index + .iter() + .map(|(_, i)| &cluster_nodes.nodes[*i]) + .enumerate() + { + let (stake, index) = peers_and_stakes[i]; + assert_eq!(node.contact_info().unwrap(), &peers[index]); + assert_eq!(node.stake.max(1), stake); + } + for _ in 0..100 { + let mut shred_seed = [0u8; 32]; + rng.fill(&mut shred_seed[..]); + let index = weighted_best(&peers_and_stakes, shred_seed); + let peer = cluster_nodes.get_broadcast_peer(shred_seed).unwrap(); + assert_eq!(*peer, peers[index]); + } + } +} diff --git a/core/src/cluster_slot_state_verifier.rs b/core/src/cluster_slot_state_verifier.rs index 2bc47e2a75ffd9..2ad5090ce5c5e8 100644 --- a/core/src/cluster_slot_state_verifier.rs +++ b/core/src/cluster_slot_state_verifier.rs @@ -3,8 +3,9 @@ use crate::{ progress_map::ProgressMap, }; use solana_sdk::{clock::Slot, hash::Hash}; -use std::collections::{BTreeMap, HashMap, HashSet}; +use std::collections::{BTreeMap, BTreeSet}; +pub(crate) type DuplicateSlotsTracker = BTreeSet; pub(crate) type GossipDuplicateConfirmedSlots = BTreeMap; type SlotStateHandler = fn(Slot, &Hash, Option<&Hash>, bool, bool) -> Vec; @@ -191,7 +192,7 @@ fn get_cluster_duplicate_confirmed_hash<'a>( slot, gossip_duplicate_confirmed_hash, local_duplicate_confirmed_hash ); } - Some(&local_frozen_hash) + Some(local_frozen_hash) } (Some(local_frozen_hash), None) => Some(local_frozen_hash), _ => gossip_duplicate_confirmed_hash, @@ -200,19 +201,12 @@ fn get_cluster_duplicate_confirmed_hash<'a>( fn apply_state_changes( slot: Slot, - progress: &mut ProgressMap, fork_choice: &mut HeaviestSubtreeForkChoice, - ancestors: &HashMap>, - descendants: &HashMap>, state_changes: Vec, ) { for state_change in state_changes { match state_change { ResultingStateChange::MarkSlotDuplicate(bank_frozen_hash) => { - progress.set_unconfirmed_duplicate_slot( - slot, - descendants.get(&slot).unwrap_or(&HashSet::default()), - ); fork_choice.mark_fork_invalid_candidate(&(slot, bank_frozen_hash)); } ResultingStateChange::RepairDuplicateConfirmedVersion( @@ -223,25 +217,20 @@ fn apply_state_changes( repair_correct_version(slot, &cluster_duplicate_confirmed_hash); } ResultingStateChange::DuplicateConfirmedSlotMatchesCluster(bank_frozen_hash) => { - progress.set_confirmed_duplicate_slot( - slot, - ancestors.get(&slot).unwrap_or(&HashSet::default()), - descendants.get(&slot).unwrap_or(&HashSet::default()), - ); fork_choice.mark_fork_valid_candidate(&(slot, bank_frozen_hash)); } } } } +#[allow(clippy::too_many_arguments)] pub(crate) fn check_slot_agrees_with_cluster( slot: Slot, root: Slot, frozen_hash: Option, + duplicate_slots_tracker: &mut DuplicateSlotsTracker, gossip_duplicate_confirmed_slots: &GossipDuplicateConfirmedSlots, - ancestors: &HashMap>, - descendants: &HashMap>, - progress: &mut ProgressMap, + progress: &ProgressMap, fork_choice: &mut HeaviestSubtreeForkChoice, slot_state_update: SlotStateUpdate, ) { @@ -258,6 +247,15 @@ pub(crate) fn check_slot_agrees_with_cluster( return; } + // Needs to happen before the frozen_hash.is_none() check below to account for duplicate + // signals arriving before the bank is constructed in replay. + if matches!(slot_state_update, SlotStateUpdate::Duplicate) { + // If this slot has already been processed before, return + if !duplicate_slots_tracker.insert(slot) { + return; + } + } + if frozen_hash.is_none() { // If the bank doesn't even exist in BankForks yet, // then there's nothing to do as replay of the slot @@ -268,25 +266,18 @@ pub(crate) fn check_slot_agrees_with_cluster( let frozen_hash = frozen_hash.unwrap(); let gossip_duplicate_confirmed_hash = gossip_duplicate_confirmed_slots.get(&slot); - let is_local_replay_duplicate_confirmed = progress.is_duplicate_confirmed(slot).expect("If the frozen hash exists, then the slot must exist in bank forks and thus in progress map"); + // If the bank hasn't been frozen yet, then we haven't duplicate confirmed a local version + // this slot through replay yet. + let is_local_replay_duplicate_confirmed = fork_choice + .is_duplicate_confirmed(&(slot, frozen_hash)) + .unwrap_or(false); let cluster_duplicate_confirmed_hash = get_cluster_duplicate_confirmed_hash( slot, gossip_duplicate_confirmed_hash, &frozen_hash, is_local_replay_duplicate_confirmed, ); - let mut is_slot_duplicate = - progress.is_unconfirmed_duplicate(slot).expect("If the frozen hash exists, then the slot must exist in bank forks and thus in progress map"); - if matches!(slot_state_update, SlotStateUpdate::Duplicate) { - if is_slot_duplicate { - // Already processed duplicate signal for this slot, no need to continue - return; - } else { - // Otherwise, mark the slot as duplicate so the appropriate state changes - // will trigger - is_slot_duplicate = true; - } - } + let is_slot_duplicate = duplicate_slots_tracker.contains(&slot); let is_dead = progress.is_dead(slot).expect("If the frozen hash exists, then the slot must exist in bank forks and thus in progress map"); info!( @@ -309,14 +300,7 @@ pub(crate) fn check_slot_agrees_with_cluster( is_slot_duplicate, is_dead, ); - apply_state_changes( - slot, - progress, - fork_choice, - ancestors, - descendants, - state_changes, - ); + apply_state_changes(slot, fork_choice, state_changes); } #[cfg(test)] @@ -324,15 +308,16 @@ mod test { use super::*; use crate::consensus::test::VoteSimulator; use solana_runtime::bank_forks::BankForks; - use std::sync::RwLock; + use std::{ + collections::{HashMap, HashSet}, + sync::RwLock, + }; use trees::tr; struct InitialState { heaviest_subtree_fork_choice: HeaviestSubtreeForkChoice, progress: ProgressMap, - ancestors: HashMap>, descendants: HashMap>, - slot: Slot, bank_forks: RwLock, } @@ -341,7 +326,6 @@ mod test { let forks = tr(0) / (tr(1) / (tr(2) / tr(3))); let mut vote_simulator = VoteSimulator::new(1); vote_simulator.fill_bank_forks(forks, &HashMap::new()); - let ancestors = vote_simulator.bank_forks.read().unwrap().ancestors(); let descendants = vote_simulator .bank_forks @@ -353,9 +337,7 @@ mod test { InitialState { heaviest_subtree_fork_choice: vote_simulator.heaviest_subtree_fork_choice, progress: vote_simulator.progress, - ancestors, descendants, - slot: 0, bank_forks: vote_simulator.bank_forks, } } @@ -626,75 +608,159 @@ mod test { // Common state let InitialState { mut heaviest_subtree_fork_choice, - mut progress, - ancestors, descendants, - slot, bank_forks, + .. } = setup(); // MarkSlotDuplicate should mark progress map and remove // the slot from fork choice - let slot_hash = bank_forks.read().unwrap().get(slot).unwrap().hash(); + let duplicate_slot = bank_forks.read().unwrap().root() + 1; + let duplicate_slot_hash = bank_forks + .read() + .unwrap() + .get(duplicate_slot) + .unwrap() + .hash(); apply_state_changes( - slot, - &mut progress, + duplicate_slot, &mut heaviest_subtree_fork_choice, - &ancestors, - &descendants, - vec![ResultingStateChange::MarkSlotDuplicate(slot_hash)], + vec![ResultingStateChange::MarkSlotDuplicate(duplicate_slot_hash)], ); assert!(!heaviest_subtree_fork_choice - .is_candidate_slot(&(slot, slot_hash)) + .is_candidate(&(duplicate_slot, duplicate_slot_hash)) .unwrap()); for child_slot in descendants - .get(&slot) + .get(&duplicate_slot) .unwrap() .iter() - .chain(std::iter::once(&slot)) + .chain(std::iter::once(&duplicate_slot)) { assert_eq!( - progress - .latest_unconfirmed_duplicate_ancestor(*child_slot) + heaviest_subtree_fork_choice + .latest_invalid_ancestor(&( + *child_slot, + bank_forks.read().unwrap().get(*child_slot).unwrap().hash() + )) .unwrap(), - slot + duplicate_slot ); } // DuplicateConfirmedSlotMatchesCluster should re-enable fork choice apply_state_changes( - slot, - &mut progress, + duplicate_slot, &mut heaviest_subtree_fork_choice, - &ancestors, - &descendants, vec![ResultingStateChange::DuplicateConfirmedSlotMatchesCluster( - slot_hash, + duplicate_slot_hash, )], ); for child_slot in descendants - .get(&slot) + .get(&duplicate_slot) .unwrap() .iter() - .chain(std::iter::once(&slot)) + .chain(std::iter::once(&duplicate_slot)) { - assert!(progress - .latest_unconfirmed_duplicate_ancestor(*child_slot) + assert!(heaviest_subtree_fork_choice + .latest_invalid_ancestor(&( + *child_slot, + bank_forks.read().unwrap().get(*child_slot).unwrap().hash() + )) .is_none()); } assert!(heaviest_subtree_fork_choice - .is_candidate_slot(&(slot, slot_hash)) + .is_candidate(&(duplicate_slot, duplicate_slot_hash)) .unwrap()); } + fn run_test_state_duplicate_then_bank_frozen(initial_bank_hash: Option) { + // Common state + let InitialState { + mut heaviest_subtree_fork_choice, + progress, + bank_forks, + .. + } = setup(); + + // Setup a duplicate slot state transition with the initial bank state of the duplicate slot + // determined by `initial_bank_hash`, which can be: + // 1) A default hash (unfrozen bank), + // 2) None (a slot that hasn't even started replay yet). + let root = 0; + let mut duplicate_slots_tracker = DuplicateSlotsTracker::default(); + let gossip_duplicate_confirmed_slots = GossipDuplicateConfirmedSlots::default(); + let duplicate_slot = 2; + check_slot_agrees_with_cluster( + duplicate_slot, + root, + initial_bank_hash, + &mut duplicate_slots_tracker, + &gossip_duplicate_confirmed_slots, + &progress, + &mut heaviest_subtree_fork_choice, + SlotStateUpdate::Duplicate, + ); + assert!(duplicate_slots_tracker.contains(&duplicate_slot)); + // Nothing should be applied yet to fork choice, since bank was not yet frozen + for slot in 2..=3 { + let slot_hash = bank_forks.read().unwrap().get(slot).unwrap().hash(); + assert!(heaviest_subtree_fork_choice + .latest_invalid_ancestor(&(slot, slot_hash)) + .is_none()); + } + + // Now freeze the bank + let frozen_duplicate_slot_hash = bank_forks + .read() + .unwrap() + .get(duplicate_slot) + .unwrap() + .hash(); + check_slot_agrees_with_cluster( + duplicate_slot, + root, + Some(frozen_duplicate_slot_hash), + &mut duplicate_slots_tracker, + &gossip_duplicate_confirmed_slots, + &progress, + &mut heaviest_subtree_fork_choice, + SlotStateUpdate::Frozen, + ); + + // Progress map should have the correct updates, fork choice should mark duplicate + // as unvotable + assert!(heaviest_subtree_fork_choice + .is_unconfirmed_duplicate(&(duplicate_slot, frozen_duplicate_slot_hash)) + .unwrap()); + + // The ancestor of the duplicate slot should be the best slot now + let (duplicate_ancestor, duplicate_parent_hash) = { + let r_bank_forks = bank_forks.read().unwrap(); + let parent_bank = r_bank_forks.get(duplicate_slot).unwrap().parent().unwrap(); + (parent_bank.slot(), parent_bank.hash()) + }; + assert_eq!( + heaviest_subtree_fork_choice.best_overall_slot(), + (duplicate_ancestor, duplicate_parent_hash) + ); + } + + #[test] + fn test_state_unfrozen_bank_duplicate_then_bank_frozen() { + run_test_state_duplicate_then_bank_frozen(Some(Hash::default())); + } + + #[test] + fn test_state_unreplayed_bank_duplicate_then_bank_frozen() { + run_test_state_duplicate_then_bank_frozen(None); + } + #[test] fn test_state_ancestor_confirmed_descendant_duplicate() { // Common state let InitialState { mut heaviest_subtree_fork_choice, - mut progress, - ancestors, - descendants, + progress, bank_forks, .. } = setup(); @@ -705,6 +771,7 @@ mod test { (3, slot3_hash) ); let root = 0; + let mut duplicate_slots_tracker = DuplicateSlotsTracker::default(); let mut gossip_duplicate_confirmed_slots = GossipDuplicateConfirmedSlots::default(); // Mark slot 2 as duplicate confirmed @@ -714,36 +781,67 @@ mod test { 2, root, Some(slot2_hash), + &mut duplicate_slots_tracker, &gossip_duplicate_confirmed_slots, - &ancestors, - &descendants, - &mut progress, + &progress, &mut heaviest_subtree_fork_choice, SlotStateUpdate::DuplicateConfirmed, ); - + assert!(heaviest_subtree_fork_choice + .is_duplicate_confirmed(&(2, slot2_hash)) + .unwrap()); assert_eq!( heaviest_subtree_fork_choice.best_overall_slot(), (3, slot3_hash) ); + for slot in 0..=2 { + let slot_hash = bank_forks.read().unwrap().get(slot).unwrap().hash(); + assert!(heaviest_subtree_fork_choice + .is_duplicate_confirmed(&(slot, slot_hash)) + .unwrap()); + assert!(heaviest_subtree_fork_choice + .latest_invalid_ancestor(&(slot, slot_hash)) + .is_none()); + } - // Mark 3 as duplicate, should not remove slot 2 from fork choice + // Mark 3 as duplicate, should not remove the duplicate confirmed slot 2 from + // fork choice check_slot_agrees_with_cluster( 3, root, Some(slot3_hash), + &mut duplicate_slots_tracker, &gossip_duplicate_confirmed_slots, - &ancestors, - &descendants, - &mut progress, + &progress, &mut heaviest_subtree_fork_choice, SlotStateUpdate::Duplicate, ); - + assert!(duplicate_slots_tracker.contains(&3)); assert_eq!( heaviest_subtree_fork_choice.best_overall_slot(), (2, slot2_hash) ); + for slot in 0..=3 { + let slot_hash = bank_forks.read().unwrap().get(slot).unwrap().hash(); + if slot <= 2 { + assert!(heaviest_subtree_fork_choice + .is_duplicate_confirmed(&(slot, slot_hash)) + .unwrap()); + assert!(heaviest_subtree_fork_choice + .latest_invalid_ancestor(&(slot, slot_hash)) + .is_none()); + } else { + assert!(!heaviest_subtree_fork_choice + .is_duplicate_confirmed(&(slot, slot_hash)) + .unwrap()); + assert_eq!( + heaviest_subtree_fork_choice + .latest_invalid_ancestor(&(slot, slot_hash)) + .unwrap(), + 3 + ); + } + } } #[test] @@ -751,9 +849,7 @@ mod test { // Common state let InitialState { mut heaviest_subtree_fork_choice, - mut progress, - ancestors, - descendants, + progress, bank_forks, .. } = setup(); @@ -764,19 +860,30 @@ mod test { (3, slot3_hash) ); let root = 0; + let mut duplicate_slots_tracker = DuplicateSlotsTracker::default(); let mut gossip_duplicate_confirmed_slots = GossipDuplicateConfirmedSlots::default(); - // Mark 2 as duplicate confirmed + + // Mark 2 as duplicate check_slot_agrees_with_cluster( 2, root, Some(bank_forks.read().unwrap().get(2).unwrap().hash()), + &mut duplicate_slots_tracker, &gossip_duplicate_confirmed_slots, - &ancestors, - &descendants, - &mut progress, + &progress, &mut heaviest_subtree_fork_choice, SlotStateUpdate::Duplicate, ); + assert!(duplicate_slots_tracker.contains(&2)); + for slot in 2..=3 { + let slot_hash = bank_forks.read().unwrap().get(slot).unwrap().hash(); + assert_eq!( + heaviest_subtree_fork_choice + .latest_invalid_ancestor(&(slot, slot_hash)) + .unwrap(), + 2 + ); + } let slot1_hash = bank_forks.read().unwrap().get(1).unwrap().hash(); assert_eq!( @@ -790,14 +897,93 @@ mod test { 3, root, Some(slot3_hash), + &mut duplicate_slots_tracker, &gossip_duplicate_confirmed_slots, - &ancestors, - &descendants, - &mut progress, + &progress, &mut heaviest_subtree_fork_choice, SlotStateUpdate::DuplicateConfirmed, ); + for slot in 0..=3 { + let slot_hash = bank_forks.read().unwrap().get(slot).unwrap().hash(); + assert!(heaviest_subtree_fork_choice + .is_duplicate_confirmed(&(slot, slot_hash)) + .unwrap()); + assert!(heaviest_subtree_fork_choice + .latest_invalid_ancestor(&(slot, slot_hash)) + .is_none()); + } + assert_eq!( + heaviest_subtree_fork_choice.best_overall_slot(), + (3, slot3_hash) + ); + } + #[test] + fn test_state_descendant_confirmed_ancestor_duplicate() { + // Common state + let InitialState { + mut heaviest_subtree_fork_choice, + progress, + bank_forks, + .. + } = setup(); + + let slot3_hash = bank_forks.read().unwrap().get(3).unwrap().hash(); + assert_eq!( + heaviest_subtree_fork_choice.best_overall_slot(), + (3, slot3_hash) + ); + let root = 0; + let mut duplicate_slots_tracker = DuplicateSlotsTracker::default(); + let mut gossip_duplicate_confirmed_slots = GossipDuplicateConfirmedSlots::default(); + + // Mark 3 as duplicate confirmed + gossip_duplicate_confirmed_slots.insert(3, slot3_hash); + check_slot_agrees_with_cluster( + 3, + root, + Some(slot3_hash), + &mut duplicate_slots_tracker, + &gossip_duplicate_confirmed_slots, + &progress, + &mut heaviest_subtree_fork_choice, + SlotStateUpdate::DuplicateConfirmed, + ); + let verify_all_slots_duplicate_confirmed = + |bank_forks: &RwLock, + heaviest_subtree_fork_choice: &HeaviestSubtreeForkChoice| { + for slot in 0..=3 { + let slot_hash = bank_forks.read().unwrap().get(slot).unwrap().hash(); + assert!(heaviest_subtree_fork_choice + .is_duplicate_confirmed(&(slot, slot_hash)) + .unwrap()); + assert!(heaviest_subtree_fork_choice + .latest_invalid_ancestor(&(slot, slot_hash)) + .is_none()); + } + }; + verify_all_slots_duplicate_confirmed(&bank_forks, &heaviest_subtree_fork_choice); + assert_eq!( + heaviest_subtree_fork_choice.best_overall_slot(), + (3, slot3_hash) + ); + + // Mark ancestor 1 as duplicate, fork choice should be unaffected since + // slot 1 was duplicate confirmed by the confirmation on its + // descendant, 3. + let slot1_hash = bank_forks.read().unwrap().get(1).unwrap().hash(); + check_slot_agrees_with_cluster( + 1, + root, + Some(slot1_hash), + &mut duplicate_slots_tracker, + &gossip_duplicate_confirmed_slots, + &progress, + &mut heaviest_subtree_fork_choice, + SlotStateUpdate::Duplicate, + ); + assert!(duplicate_slots_tracker.contains(&1)); + verify_all_slots_duplicate_confirmed(&bank_forks, &heaviest_subtree_fork_choice); assert_eq!( heaviest_subtree_fork_choice.best_overall_slot(), (3, slot3_hash) diff --git a/core/src/cluster_slots.rs b/core/src/cluster_slots.rs index 7a9ca5fd3ddc91..1eee4ff29e8ace 100644 --- a/core/src/cluster_slots.rs +++ b/core/src/cluster_slots.rs @@ -1,20 +1,24 @@ -use crate::serve_repair::RepairType; -use itertools::Itertools; -use solana_gossip::{ - cluster_info::ClusterInfo, contact_info::ContactInfo, crds::Cursor, epoch_slots::EpochSlots, -}; -use solana_runtime::{bank_forks::BankForks, epoch_stakes::NodeIdToVoteAccounts}; -use solana_sdk::{clock::Slot, pubkey::Pubkey}; -use std::{ - collections::{BTreeMap, HashMap, HashSet}, - sync::{Arc, Mutex, RwLock}, +use { + itertools::Itertools, + solana_gossip::{ + cluster_info::ClusterInfo, contact_info::ContactInfo, crds::Cursor, epoch_slots::EpochSlots, + }, + solana_runtime::{bank::Bank, epoch_stakes::NodeIdToVoteAccounts}, + solana_sdk::{ + clock::{Slot, DEFAULT_SLOTS_PER_EPOCH}, + pubkey::Pubkey, + }, + std::{ + collections::{BTreeMap, HashMap}, + sync::{Arc, Mutex, RwLock}, + }, }; // Limit the size of cluster-slots map in case // of receiving bogus epoch slots values. const CLUSTER_SLOTS_TRIM_SIZE: usize = 524_288; // 512K -pub type SlotPubkeys = HashMap; +pub(crate) type SlotPubkeys = HashMap; #[derive(Default)] pub struct ClusterSlots { @@ -25,20 +29,21 @@ pub struct ClusterSlots { } impl ClusterSlots { - pub fn lookup(&self, slot: Slot) -> Option>> { + pub(crate) fn lookup(&self, slot: Slot) -> Option>> { self.cluster_slots.read().unwrap().get(&slot).cloned() } - pub fn update(&self, root: Slot, cluster_info: &ClusterInfo, bank_forks: &RwLock) { - self.update_peers(bank_forks); + pub(crate) fn update(&self, root_bank: &Bank, cluster_info: &ClusterInfo) { + self.update_peers(root_bank); let epoch_slots = { let mut cursor = self.cursor.lock().unwrap(); cluster_info.get_epoch_slots(&mut cursor) }; - self.update_internal(root, epoch_slots); + let num_epoch_slots = root_bank.get_slots_in_epoch(root_bank.epoch()); + self.update_internal(root_bank.slot(), epoch_slots, num_epoch_slots); } - fn update_internal(&self, root: Slot, epoch_slots_list: Vec) { + fn update_internal(&self, root: Slot, epoch_slots_list: Vec, num_epoch_slots: u64) { // Attach validator's total stake. let epoch_slots_list: Vec<_> = { let validator_stakes = self.validator_stakes.read().unwrap(); @@ -53,13 +58,20 @@ impl ClusterSlots { }) .collect() }; + // Discard slots at or before current root or epochs ahead. + let slot_range = (root + 1) + ..root.saturating_add( + num_epoch_slots + .max(DEFAULT_SLOTS_PER_EPOCH) + .saturating_mul(2), + ); let slot_nodes_stakes = epoch_slots_list .into_iter() .flat_map(|(epoch_slots, stake)| { epoch_slots .to_slots(root) .into_iter() - .filter(|slot| *slot > root) + .filter(|slot| slot_range.contains(slot)) .zip(std::iter::repeat((epoch_slots.from, stake))) }) .into_group_map(); @@ -89,16 +101,6 @@ impl ClusterSlots { } } - pub fn collect(&self, id: &Pubkey) -> HashSet { - self.cluster_slots - .read() - .unwrap() - .iter() - .filter(|(_, keys)| keys.read().unwrap().contains_key(id)) - .map(|(slot, _)| *slot) - .collect() - } - #[cfg(test)] pub(crate) fn insert_node_id(&self, slot: Slot, node_id: Pubkey) { let balance = self @@ -118,8 +120,7 @@ impl ClusterSlots { slot_pubkeys.write().unwrap().insert(node_id, balance); } - fn update_peers(&self, bank_forks: &RwLock) { - let root_bank = bank_forks.read().unwrap().root_bank(); + fn update_peers(&self, root_bank: &Bank) { let root_epoch = root_bank.epoch(); let my_epoch = *self.epoch.read().unwrap(); @@ -135,7 +136,10 @@ impl ClusterSlots { } } - pub fn compute_weights(&self, slot: Slot, repair_peers: &[ContactInfo]) -> Vec { + pub(crate) fn compute_weights(&self, slot: Slot, repair_peers: &[ContactInfo]) -> Vec { + if repair_peers.is_empty() { + return Vec::default(); + } let stakes = { let validator_stakes = self.validator_stakes.read().unwrap(); repair_peers @@ -162,7 +166,7 @@ impl ClusterSlots { .collect() } - pub fn compute_weights_exclude_noncomplete( + pub(crate) fn compute_weights_exclude_noncomplete( &self, slot: Slot, repair_peers: &[ContactInfo], @@ -178,21 +182,6 @@ impl ClusterSlots { }) .collect() } - - pub fn generate_repairs_for_missing_slots( - &self, - self_id: &Pubkey, - root: Slot, - ) -> Vec { - let my_slots = self.collect(self_id); - self.cluster_slots - .read() - .unwrap() - .keys() - .filter(|x| **x > root && !my_slots.contains(*x)) - .map(|x| RepairType::HighestShred(*x, 0)) - .collect() - } } #[cfg(test)] @@ -209,7 +198,7 @@ mod tests { #[test] fn test_update_noop() { let cs = ClusterSlots::default(); - cs.update_internal(0, vec![]); + cs.update_internal(0, vec![], DEFAULT_SLOTS_PER_EPOCH); assert!(cs.cluster_slots.read().unwrap().is_empty()); } @@ -217,7 +206,7 @@ mod tests { fn test_update_empty() { let cs = ClusterSlots::default(); let epoch_slot = EpochSlots::default(); - cs.update_internal(0, vec![epoch_slot]); + cs.update_internal(0, vec![epoch_slot], DEFAULT_SLOTS_PER_EPOCH); assert!(cs.lookup(0).is_none()); } @@ -227,7 +216,7 @@ mod tests { let cs = ClusterSlots::default(); let mut epoch_slot = EpochSlots::default(); epoch_slot.fill(&[0], 0); - cs.update_internal(0, vec![epoch_slot]); + cs.update_internal(0, vec![epoch_slot], DEFAULT_SLOTS_PER_EPOCH); assert!(cs.lookup(0).is_none()); } @@ -236,7 +225,7 @@ mod tests { let cs = ClusterSlots::default(); let mut epoch_slot = EpochSlots::default(); epoch_slot.fill(&[1], 0); - cs.update_internal(0, vec![epoch_slot]); + cs.update_internal(0, vec![epoch_slot], DEFAULT_SLOTS_PER_EPOCH); assert!(cs.lookup(0).is_none()); assert!(cs.lookup(1).is_some()); assert_eq!( @@ -366,7 +355,7 @@ mod tests { ); *cs.validator_stakes.write().unwrap() = map; - cs.update_internal(0, vec![epoch_slot]); + cs.update_internal(0, vec![epoch_slot], DEFAULT_SLOTS_PER_EPOCH); assert!(cs.lookup(1).is_some()); assert_eq!( cs.lookup(1) @@ -377,40 +366,4 @@ mod tests { Some(&1) ); } - - #[test] - fn test_generate_repairs() { - let cs = ClusterSlots::default(); - let mut epoch_slot = EpochSlots::default(); - epoch_slot.fill(&[1], 0); - cs.update_internal(0, vec![epoch_slot]); - let self_id = solana_sdk::pubkey::new_rand(); - assert_eq!( - cs.generate_repairs_for_missing_slots(&self_id, 0), - vec![RepairType::HighestShred(1, 0)] - ) - } - - #[test] - fn test_collect_my_slots() { - let cs = ClusterSlots::default(); - let mut epoch_slot = EpochSlots::default(); - epoch_slot.fill(&[1], 0); - let self_id = epoch_slot.from; - cs.update_internal(0, vec![epoch_slot]); - let slots: Vec = cs.collect(&self_id).into_iter().collect(); - assert_eq!(slots, vec![1]); - } - - #[test] - fn test_generate_repairs_existing() { - let cs = ClusterSlots::default(); - let mut epoch_slot = EpochSlots::default(); - epoch_slot.fill(&[1], 0); - let self_id = epoch_slot.from; - cs.update_internal(0, vec![epoch_slot]); - assert!(cs - .generate_repairs_for_missing_slots(&self_id, 0) - .is_empty()); - } } diff --git a/core/src/cluster_slots_service.rs b/core/src/cluster_slots_service.rs index 1f059521b67c57..d2f7e7a295f513 100644 --- a/core/src/cluster_slots_service.rs +++ b/core/src/cluster_slots_service.rs @@ -1,29 +1,32 @@ use crate::cluster_slots::ClusterSlots; +use crossbeam_channel::{Receiver, RecvTimeoutError, Sender}; use solana_gossip::cluster_info::ClusterInfo; -use solana_ledger::blockstore::{Blockstore, CompletedSlotsReceiver}; +use solana_ledger::blockstore::Blockstore; use solana_measure::measure::Measure; use solana_runtime::bank_forks::BankForks; -use solana_sdk::{clock::Slot, pubkey::Pubkey}; +use solana_sdk::clock::Slot; use std::{ sync::{ atomic::{AtomicBool, Ordering}, - mpsc::RecvTimeoutError, {Arc, RwLock}, }, thread::{self, Builder, JoinHandle}, time::{Duration, Instant}, }; +pub type ClusterSlotsUpdateReceiver = Receiver>; +pub type ClusterSlotsUpdateSender = Sender>; + #[derive(Default, Debug)] struct ClusterSlotsServiceTiming { pub lowest_slot_elapsed: u64, - pub update_completed_slots_elapsed: u64, + pub process_cluster_slots_updates_elapsed: u64, } impl ClusterSlotsServiceTiming { - fn update(&mut self, lowest_slot_elapsed: u64, update_completed_slots_elapsed: u64) { + fn update(&mut self, lowest_slot_elapsed: u64, process_cluster_slots_updates_elapsed: u64) { self.lowest_slot_elapsed += lowest_slot_elapsed; - self.update_completed_slots_elapsed += update_completed_slots_elapsed; + self.process_cluster_slots_updates_elapsed += process_cluster_slots_updates_elapsed; } } @@ -37,12 +40,11 @@ impl ClusterSlotsService { cluster_slots: Arc, bank_forks: Arc>, cluster_info: Arc, - completed_slots_receiver: CompletedSlotsReceiver, + cluster_slots_update_receiver: ClusterSlotsUpdateReceiver, exit: Arc, ) -> Self { - let id = cluster_info.id(); - Self::initialize_lowest_slot(id, &blockstore, &cluster_info); - Self::initialize_epoch_slots(&blockstore, &cluster_info, &completed_slots_receiver); + Self::initialize_lowest_slot(&blockstore, &cluster_info); + Self::initialize_epoch_slots(&bank_forks, &cluster_info); let t_cluster_slots_service = Builder::new() .name("solana-cluster-slots-service".to_string()) .spawn(move || { @@ -51,7 +53,7 @@ impl ClusterSlotsService { cluster_slots, bank_forks, cluster_info, - completed_slots_receiver, + cluster_slots_update_receiver, exit, ) }) @@ -71,7 +73,7 @@ impl ClusterSlotsService { cluster_slots: Arc, bank_forks: Arc>, cluster_info: Arc, - completed_slots_receiver: CompletedSlotsReceiver, + cluster_slots_update_receiver: ClusterSlotsUpdateReceiver, exit: Arc, ) { let mut cluster_slots_service_timing = ClusterSlotsServiceTiming::default(); @@ -80,7 +82,8 @@ impl ClusterSlotsService { if exit.load(Ordering::Relaxed) { break; } - let slots = match completed_slots_receiver.recv_timeout(Duration::from_millis(200)) { + let slots = match cluster_slots_update_receiver.recv_timeout(Duration::from_millis(200)) + { Ok(slots) => Some(slots), Err(RecvTimeoutError::Timeout) => None, Err(RecvTimeoutError::Disconnected) => { @@ -88,23 +91,26 @@ impl ClusterSlotsService { break; } }; - let new_root = bank_forks.read().unwrap().root(); - let id = cluster_info.id(); let mut lowest_slot_elapsed = Measure::start("lowest_slot_elapsed"); let lowest_slot = blockstore.lowest_slot(); - Self::update_lowest_slot(&id, lowest_slot, &cluster_info); + Self::update_lowest_slot(lowest_slot, &cluster_info); lowest_slot_elapsed.stop(); - let mut update_completed_slots_elapsed = - Measure::start("update_completed_slots_elapsed"); + let mut process_cluster_slots_updates_elapsed = + Measure::start("process_cluster_slots_updates_elapsed"); if let Some(slots) = slots { - Self::update_completed_slots(slots, &completed_slots_receiver, &cluster_info); + Self::process_cluster_slots_updates( + slots, + &cluster_slots_update_receiver, + &cluster_info, + ); } - cluster_slots.update(new_root, &cluster_info, &bank_forks); - update_completed_slots_elapsed.stop(); + let root_bank = bank_forks.read().unwrap().root_bank(); + cluster_slots.update(&root_bank, &cluster_info); + process_cluster_slots_updates_elapsed.stop(); cluster_slots_service_timing.update( lowest_slot_elapsed.as_us(), - update_completed_slots_elapsed.as_us(), + process_cluster_slots_updates_elapsed.as_us(), ); if last_stats.elapsed().as_secs() > 2 { @@ -116,8 +122,8 @@ impl ClusterSlotsService { i64 ), ( - "update_completed_slots_elapsed", - cluster_slots_service_timing.update_completed_slots_elapsed, + "process_cluster_slots_updates_elapsed", + cluster_slots_service_timing.process_cluster_slots_updates_elapsed, i64 ), ); @@ -127,12 +133,12 @@ impl ClusterSlotsService { } } - fn update_completed_slots( + fn process_cluster_slots_updates( mut slots: Vec, - completed_slots_receiver: &CompletedSlotsReceiver, + cluster_slots_update_receiver: &ClusterSlotsUpdateReceiver, cluster_info: &ClusterInfo, ) { - while let Ok(mut more) = completed_slots_receiver.try_recv() { + while let Ok(mut more) = cluster_slots_update_receiver.try_recv() { slots.append(&mut more); } #[allow(clippy::stable_sort_primitive)] @@ -143,57 +149,51 @@ impl ClusterSlotsService { } } - fn initialize_lowest_slot(id: Pubkey, blockstore: &Blockstore, cluster_info: &ClusterInfo) { + fn initialize_lowest_slot(blockstore: &Blockstore, cluster_info: &ClusterInfo) { // Safe to set into gossip because by this time, the leader schedule cache should // also be updated with the latest root (done in blockstore_processor) and thus // will provide a schedule to window_service for any incoming shreds up to the // last_confirmed_epoch. - cluster_info.push_lowest_slot(id, blockstore.lowest_slot()); + cluster_info.push_lowest_slot(blockstore.lowest_slot()); } - fn update_lowest_slot(id: &Pubkey, lowest_slot: Slot, cluster_info: &ClusterInfo) { - cluster_info.push_lowest_slot(*id, lowest_slot); + fn update_lowest_slot(lowest_slot: Slot, cluster_info: &ClusterInfo) { + cluster_info.push_lowest_slot(lowest_slot); } - fn initialize_epoch_slots( - blockstore: &Blockstore, - cluster_info: &ClusterInfo, - completed_slots_receiver: &CompletedSlotsReceiver, - ) { - let root = blockstore.last_root(); - let mut slots: Vec<_> = blockstore - .live_slots_iterator(root) - .filter_map(|(slot, slot_meta)| { - if slot_meta.is_full() { - Some(slot) - } else { - None - } - }) - .collect(); + fn initialize_epoch_slots(bank_forks: &RwLock, cluster_info: &ClusterInfo) { + // TODO: Should probably incorporate slots that were replayed on startup, + // and maybe some that were frozen < snapshot root in case validators restart + // from newer snapshots and lose history. + let frozen_banks = bank_forks.read().unwrap().frozen_banks(); + let mut frozen_bank_slots: Vec = frozen_banks.keys().cloned().collect(); + frozen_bank_slots.sort_unstable(); - while let Ok(mut more) = completed_slots_receiver.try_recv() { - slots.append(&mut more); - } - slots.sort_unstable(); - slots.dedup(); - if !slots.is_empty() { - cluster_info.push_epoch_slots(&slots); + if !frozen_bank_slots.is_empty() { + cluster_info.push_epoch_slots(&frozen_bank_slots); } } } #[cfg(test)] mod test { - use super::*; - use solana_gossip::{cluster_info::Node, crds_value::CrdsValueLabel}; + use { + super::*, + solana_gossip::{cluster_info::Node, crds_value::CrdsValueLabel}, + solana_sdk::{pubkey::Pubkey, signature::Keypair}, + solana_streamer::socket::SocketAddrSpace, + }; #[test] pub fn test_update_lowest_slot() { let pubkey = Pubkey::new_unique(); let node_info = Node::new_localhost_with_pubkey(&pubkey); - let cluster_info = ClusterInfo::new_with_invalid_keypair(node_info.info); - ClusterSlotsService::update_lowest_slot(&pubkey, 5, &cluster_info); + let cluster_info = ClusterInfo::new( + node_info.info, + Arc::new(Keypair::new()), + SocketAddrSpace::Unspecified, + ); + ClusterSlotsService::update_lowest_slot(5, &cluster_info); cluster_info.flush_push_queue(); let lowest = { let label = CrdsValueLabel::LowestSlot(pubkey); diff --git a/core/src/commitment_service.rs b/core/src/commitment_service.rs index 02cb4732c651aa..fe10848b755704 100644 --- a/core/src/commitment_service.rs +++ b/core/src/commitment_service.rs @@ -352,15 +352,15 @@ mod tests { if *a <= root { let mut expected = BlockCommitment::default(); expected.increase_rooted_stake(lamports); - assert_eq!(*commitment.get(&a).unwrap(), expected); + assert_eq!(*commitment.get(a).unwrap(), expected); } else if i <= 4 { let mut expected = BlockCommitment::default(); expected.increase_confirmation_stake(2, lamports); - assert_eq!(*commitment.get(&a).unwrap(), expected); + assert_eq!(*commitment.get(a).unwrap(), expected); } else if i <= 6 { let mut expected = BlockCommitment::default(); expected.increase_confirmation_stake(1, lamports); - assert_eq!(*commitment.get(&a).unwrap(), expected); + assert_eq!(*commitment.get(a).unwrap(), expected); } } assert_eq!(rooted_stake[0], (root, lamports)); diff --git a/core/src/consensus.rs b/core/src/consensus.rs index 8462ea54a82651..f4643afdd7533c 100644 --- a/core/src/consensus.rs +++ b/core/src/consensus.rs @@ -1,4 +1,5 @@ use crate::{ + heaviest_subtree_fork_choice::HeaviestSubtreeForkChoice, latest_validator_votes_for_frozen_banks::LatestValidatorVotesForFrozenBanks, progress_map::{LockoutIntervals, ProgressMap}, }; @@ -103,13 +104,13 @@ pub(crate) struct ComputedBankState { pub my_latest_landed_vote: Option, } -#[frozen_abi(digest = "Eay84NBbJqiMBfE7HHH2o6e51wcvoU79g8zCi5sw6uj3")] +#[frozen_abi(digest = "GMs1FxKteU7K4ZFRofMBqNhBpM4xkPVxfYod6R8DQmpT")] #[derive(Clone, Serialize, Deserialize, Debug, PartialEq, AbiExample)] pub struct Tower { node_pubkey: Pubkey, threshold_depth: usize, threshold_size: f64, - lockouts: VoteState, + vote_state: VoteState, last_vote: Vote, #[serde(skip)] // The blockhash used in the last vote transaction, may or may not equal the @@ -140,7 +141,7 @@ impl Default for Tower { node_pubkey: Pubkey::default(), threshold_depth: VOTE_THRESHOLD_DEPTH, threshold_size: VOTE_THRESHOLD_SIZE, - lockouts: VoteState::default(), + vote_state: VoteState::default(), last_vote: Vote::default(), last_timestamp: BlockTimestamp::default(), last_vote_tx_blockhash: Hash::default(), @@ -150,7 +151,7 @@ impl Default for Tower { last_switch_threshold_check: Option::default(), }; // VoteState::root_slot is ensured to be Some in Tower - tower.lockouts.root_slot = Some(Slot::default()); + tower.vote_state.root_slot = Some(Slot::default()); tower } } @@ -163,7 +164,7 @@ impl Tower { bank: &Bank, path: &Path, ) -> Self { - let path = Self::get_filename(&path, node_pubkey); + let path = Self::get_filename(path, node_pubkey); let tmp_path = Self::get_tmp_filename(&path); let mut tower = Self { node_pubkey: *node_pubkey, @@ -204,8 +205,8 @@ impl Tower { crate::replay_stage::ReplayStage::initialize_progress_and_fork_choice( root_bank.deref(), bank_forks.frozen_banks().values().cloned().collect(), - &my_pubkey, - &vote_account, + my_pubkey, + vote_account, ); let root = root_bank.slot(); @@ -217,13 +218,7 @@ impl Tower { ) .clone(); - Self::new( - &my_pubkey, - &vote_account, - root, - &heaviest_bank, - &ledger_path, - ) + Self::new(my_pubkey, vote_account, root, &heaviest_bank, ledger_path) } pub(crate) fn collect_vote_lockouts( @@ -378,7 +373,7 @@ impl Tower { } pub fn tower_slots(&self) -> Vec { - self.lockouts.tower() + self.vote_state.tower() } pub fn last_vote_tx_blockhash(&self) -> Hash { @@ -426,7 +421,7 @@ impl Tower { let last_voted_slot_in_bank = Self::last_voted_slot_in_bank(bank, vote_account_pubkey); // Returns the new root if one is made after applying a vote for the given bank to - // `self.lockouts` + // `self.vote_state` self.record_bank_vote_and_update_lockouts(bank.slot(), bank.hash(), last_voted_slot_in_bank) } @@ -439,7 +434,7 @@ impl Tower { trace!("{} record_vote for {}", self.node_pubkey, vote_slot); let old_root = self.root(); let mut new_vote = Self::apply_vote_and_generate_vote_diff( - &mut self.lockouts, + &mut self.vote_state, vote_slot, vote_hash, last_voted_slot_in_bank, @@ -507,12 +502,12 @@ impl Tower { // snapshot (slot N). In other words, there should be no possibility a Tower doesn't have // root, unlike young vote accounts. pub fn root(&self) -> Slot { - self.lockouts.root_slot.unwrap() + self.vote_state.root_slot.unwrap() } // a slot is recent if it's newer than the last vote we have pub fn is_recent(&self, slot: Slot) -> bool { - if let Some(last_voted_slot) = self.lockouts.last_voted_slot() { + if let Some(last_voted_slot) = self.vote_state.last_voted_slot() { if slot <= last_voted_slot { return false; } @@ -521,7 +516,7 @@ impl Tower { } pub fn has_voted(&self, slot: Slot) -> bool { - for vote in &self.lockouts.votes { + for vote in &self.vote_state.votes { if slot == vote.slot { return true; } @@ -538,15 +533,15 @@ impl Tower { // slot to the current lockouts to pop any expired votes. If any of the // remaining voted slots are on a different fork from the checked slot, // it's still locked out. - let mut lockouts = self.lockouts.clone(); - lockouts.process_slot_vote_unchecked(slot); - for vote in &lockouts.votes { + let mut vote_state = self.vote_state.clone(); + vote_state.process_slot_vote_unchecked(slot); + for vote in &vote_state.votes { if slot != vote.slot && !ancestors.contains(&vote.slot) { return true; } } - if let Some(root_slot) = lockouts.root_slot { + if let Some(root_slot) = vote_state.root_slot { if slot != root_slot { // This case should never happen because bank forks purges all // non-descendants of the root every time root is set @@ -573,6 +568,7 @@ impl Tower { .map(|candidate_slot_ancestors| candidate_slot_ancestors.contains(&last_voted_slot)) } + #[allow(clippy::too_many_arguments)] fn make_check_switch_threshold_decision( &self, switch_slot: u64, @@ -582,9 +578,10 @@ impl Tower { total_stake: u64, epoch_vote_accounts: &HashMap, latest_validator_votes_for_frozen_banks: &LatestValidatorVotesForFrozenBanks, + heaviest_subtree_fork_choice: &HeaviestSubtreeForkChoice, ) -> SwitchForkDecision { - self.last_voted_slot() - .map(|last_voted_slot| { + self.last_voted_slot_hash() + .map(|(last_voted_slot, last_voted_hash)| { let root = self.root(); let empty_ancestors = HashSet::default(); let empty_ancestors_due_to_minor_unsynced_ledger = || { @@ -673,7 +670,7 @@ impl Tower { if last_vote_ancestors.contains(&switch_slot) { if self.is_stray_last_vote() { return suspended_decision_due_to_major_unsynced_ledger(); - } else if let Some(latest_duplicate_ancestor) = progress.latest_unconfirmed_duplicate_ancestor(last_voted_slot) { + } else if let Some(latest_duplicate_ancestor) = heaviest_subtree_fork_choice.latest_invalid_ancestor(&(last_voted_slot, last_voted_hash)) { // We're rolling back because one of the ancestors of the last vote was a duplicate. In this // case, it's acceptable if the switch candidate is one of ancestors of the previous vote, // just fail the switch check because there's no point in voting on an ancestor. ReplayStage @@ -733,7 +730,7 @@ impl Tower { // finding any lockout intervals in the `lockout_intervals` tree // for this bank that contain `last_vote`. let lockout_intervals = &progress - .get(&candidate_slot) + .get(candidate_slot) .unwrap() .fork_stats .lockout_intervals; @@ -821,6 +818,7 @@ impl Tower { .unwrap_or(SwitchForkDecision::SameFork) } + #[allow(clippy::too_many_arguments)] pub(crate) fn check_switch_threshold( &mut self, switch_slot: u64, @@ -830,6 +828,7 @@ impl Tower { total_stake: u64, epoch_vote_accounts: &HashMap, latest_validator_votes_for_frozen_banks: &LatestValidatorVotesForFrozenBanks, + heaviest_subtree_fork_choice: &HeaviestSubtreeForkChoice, ) -> SwitchForkDecision { let decision = self.make_check_switch_threshold_decision( switch_slot, @@ -839,6 +838,7 @@ impl Tower { total_stake, epoch_vote_accounts, latest_validator_votes_for_frozen_banks, + heaviest_subtree_fork_choice, ); let new_check = Some((switch_slot, decision.clone())); if new_check != self.last_switch_threshold_check { @@ -862,9 +862,9 @@ impl Tower { voted_stakes: &VotedStakes, total_stake: Stake, ) -> bool { - let mut lockouts = self.lockouts.clone(); - lockouts.process_slot_vote_unchecked(slot); - let vote = lockouts.nth_recent_vote(self.threshold_depth); + let mut vote_state = self.vote_state.clone(); + vote_state.process_slot_vote_unchecked(slot); + let vote = vote_state.nth_recent_vote(self.threshold_depth); if let Some(vote) = vote { if let Some(fork_stake) = voted_stakes.get(&vote.slot) { let lockout = *fork_stake as f64 / total_stake as f64; @@ -873,7 +873,7 @@ impl Tower { slot, vote.slot, lockout, fork_stake, total_stake ); if vote.confirmation_count as usize > self.threshold_depth { - for old_vote in &self.lockouts.votes { + for old_vote in &self.vote_state.votes { if old_vote.slot == vote.slot && old_vote.confirmation_count == vote.confirmation_count { @@ -928,7 +928,7 @@ impl Tower { } fn voted_slots(&self) -> Vec { - self.lockouts + self.vote_state .votes .iter() .map(|lockout| lockout.slot) @@ -964,11 +964,11 @@ impl Tower { assert_eq!(slot_history.check(replayed_root), Check::Found); assert!( - self.last_vote == Vote::default() && self.lockouts.votes.is_empty() - || self.last_vote != Vote::default() && !self.lockouts.votes.is_empty(), - "last vote: {:?} lockouts.votes: {:?}", + self.last_vote == Vote::default() && self.vote_state.votes.is_empty() + || self.last_vote != Vote::default() && !self.vote_state.votes.is_empty(), + "last vote: {:?} vote_state.votes: {:?}", self.last_vote, - self.lockouts.votes + self.vote_state.votes ); if let Some(last_voted_slot) = self.last_voted_slot() { @@ -1034,7 +1034,7 @@ impl Tower { let tower_root = self.root(); // retained slots will be consisted only from divergent slots let mut retain_flags_for_each_vote_in_reverse: Vec<_> = - Vec::with_capacity(self.lockouts.votes.len()); + Vec::with_capacity(self.vote_state.votes.len()); let mut still_in_future = true; let mut past_outside_history = false; @@ -1112,10 +1112,10 @@ impl Tower { let mut retain_flags_for_each_vote = retain_flags_for_each_vote_in_reverse.into_iter().rev(); - let original_votes_len = self.lockouts.votes.len(); + let original_votes_len = self.vote_state.votes.len(); self.initialize_lockouts(move |_| retain_flags_for_each_vote.next().unwrap()); - if self.lockouts.votes.is_empty() { + if self.vote_state.votes.is_empty() { info!("All restored votes were behind; resetting root_slot and last_vote in tower!"); // we might not have banks for those votes so just reset. // That's because the votes may well past replayed_root @@ -1145,7 +1145,7 @@ impl Tower { bank: &Bank, ) { if let Some((_stake, vote_account)) = bank.get_vote_account(vote_account_pubkey) { - self.lockouts = vote_account + self.vote_state = vote_account .vote_state() .as_ref() .expect("vote_account isn't a VoteState?") @@ -1158,7 +1158,7 @@ impl Tower { bank.slot(), ); assert_eq!( - self.lockouts.node_pubkey, self.node_pubkey, + self.vote_state.node_pubkey, self.node_pubkey, "vote account's node_pubkey doesn't match", ); } else { @@ -1172,13 +1172,13 @@ impl Tower { } fn initialize_lockouts bool>(&mut self, should_retain: F) { - self.lockouts.votes.retain(should_retain); + self.vote_state.votes.retain(should_retain); } // Updating root is needed to correctly restore from newly-saved tower for the next // boot fn initialize_root(&mut self, root: Slot) { - self.lockouts.root_slot = Some(root); + self.vote_state.root_slot = Some(root); } pub fn get_filename(path: &Path, node_pubkey: &Pubkey) -> PathBuf { @@ -1322,7 +1322,7 @@ pub fn reconcile_blockstore_roots_with_tower( if last_blockstore_root < tower_root { // Ensure tower_root itself to exist and be marked as rooted in the blockstore // in addition to its ancestors. - let new_roots: Vec<_> = AncestorIterator::new_inclusive(tower_root, &blockstore) + let new_roots: Vec<_> = AncestorIterator::new_inclusive(tower_root, blockstore) .take_while(|current| match current.cmp(&last_blockstore_root) { Ordering::Greater => true, Ordering::Equal => false, @@ -1337,7 +1337,7 @@ pub fn reconcile_blockstore_roots_with_tower( "Reconciling slots as root based on tower root: {:?} ({}..{}) ", new_roots, tower_root, last_blockstore_root ); - blockstore.set_roots(&new_roots)?; + blockstore.set_roots(new_roots.iter())?; } else { // This indicates we're in bad state; but still don't panic here. // That's because we might have a chance of recovering properly with @@ -1358,11 +1358,11 @@ pub mod test { use super::*; use crate::{ cluster_info_vote_listener::VoteTracker, - cluster_slot_state_verifier::GossipDuplicateConfirmedSlots, + cluster_slot_state_verifier::{DuplicateSlotsTracker, GossipDuplicateConfirmedSlots}, cluster_slots::ClusterSlots, - fork_choice::SelectVoteAndResetForkResult, - heaviest_subtree_fork_choice::{HeaviestSubtreeForkChoice, SlotHashKey}, - progress_map::{DuplicateStats, ForkProgress}, + fork_choice::{ForkChoice, SelectVoteAndResetForkResult}, + heaviest_subtree_fork_choice::SlotHashKey, + progress_map::ForkProgress, replay_stage::{HeaviestForkFailures, ReplayStage}, unfrozen_gossip_verified_vote_hashes::UnfrozenGossipVerifiedVoteHashes, }; @@ -1439,9 +1439,6 @@ pub mod test { while let Some(visit) = walk.get() { let slot = visit.node().data; - self.progress.entry(slot).or_insert_with(|| { - ForkProgress::new(Hash::default(), None, DuplicateStats::default(), None, 0, 0) - }); if self.bank_forks.read().unwrap().get(slot).is_some() { walk.forward(); continue; @@ -1449,6 +1446,9 @@ pub mod test { let parent = walk.get_parent().unwrap().data; let parent_bank = self.bank_forks.read().unwrap().get(parent).unwrap().clone(); let new_bank = Bank::new_from_parent(&parent_bank, &Pubkey::default(), slot); + self.progress + .entry(slot) + .or_insert_with(|| ForkProgress::new(Hash::default(), None, None, 0, 0)); for (pubkey, vote) in cluster_votes.iter() { if vote.contains(&parent) { let keypairs = self.validator_keypairs.get(pubkey).unwrap(); @@ -1484,7 +1484,7 @@ pub mod test { tower: &mut Tower, ) -> Vec { // Try to simulate the vote - let my_keypairs = self.validator_keypairs.get(&my_pubkey).unwrap(); + let my_keypairs = self.validator_keypairs.get(my_pubkey).unwrap(); let my_vote_pubkey = my_keypairs.vote_keypair.pubkey(); let ancestors = self.bank_forks.read().unwrap().ancestors(); let mut frozen_banks: Vec<_> = self @@ -1497,7 +1497,7 @@ pub mod test { .collect(); let _ = ReplayStage::compute_bank_stats( - &my_pubkey, + my_pubkey, &ancestors, &mut frozen_banks, tower, @@ -1530,6 +1530,7 @@ pub mod test { &self.progress, tower, &self.latest_validator_votes_for_frozen_banks, + &self.heaviest_subtree_fork_choice, ); // Make sure this slot isn't locked out or failing threshold @@ -1554,6 +1555,7 @@ pub mod test { &AbsRequestSender::default(), None, &mut self.heaviest_subtree_fork_choice, + &mut DuplicateSlotsTracker::default(), &mut GossipDuplicateConfirmedSlots::default(), &mut UnfrozenGossipVerifiedVoteHashes::default(), &mut true, @@ -1574,9 +1576,9 @@ pub mod test { .filter_map(|slot| { let mut fork_tip_parent = tr(slot - 1); fork_tip_parent.push_front(tr(slot)); - self.fill_bank_forks(fork_tip_parent, &cluster_votes); + self.fill_bank_forks(fork_tip_parent, cluster_votes); if votes_to_simulate.contains(&slot) { - Some((slot, self.simulate_vote(slot, &my_pubkey, tower))) + Some((slot, self.simulate_vote(slot, my_pubkey, tower))) } else { None } @@ -1592,9 +1594,7 @@ pub mod test { ) { self.progress .entry(slot) - .or_insert_with(|| { - ForkProgress::new(Hash::default(), None, DuplicateStats::default(), None, 0, 0) - }) + .or_insert_with(|| ForkProgress::new(Hash::default(), None, None, 0, 0)) .fork_stats .lockout_intervals .entry(lockout_interval.1) @@ -1621,7 +1621,7 @@ pub mod test { fork_tip_parent.push_front(tr(start_slot + i)); self.fill_bank_forks(fork_tip_parent, cluster_votes); if self - .simulate_vote(i + start_slot, &my_pubkey, tower) + .simulate_vote(i + start_slot, my_pubkey, tower) .is_empty() { cluster_votes @@ -1701,10 +1701,10 @@ pub mod test { let mut progress = ProgressMap::default(); progress.insert( 0, - ForkProgress::new( - bank0.last_blockhash(), - None, - DuplicateStats::default(), + ForkProgress::new_from_bank( + &bank0, + bank0.collector_id(), + &Pubkey::default(), None, 0, 0, @@ -1800,8 +1800,8 @@ pub mod test { } for i in 0..5 { - assert_eq!(tower.lockouts.votes[i].slot as usize, i); - assert_eq!(tower.lockouts.votes[i].confirmation_count as usize, 6 - i); + assert_eq!(tower.vote_state.votes[i].slot as usize, i); + assert_eq!(tower.vote_state.votes[i].confirmation_count as usize, 6 - i); } } @@ -1867,21 +1867,46 @@ pub mod test { let mut tower = Tower::new_with_key(&vote_simulator.node_pubkeys[0]); // Last vote is 47 - tower.record_vote(47, Hash::default()); + tower.record_vote( + 47, + vote_simulator + .bank_forks + .read() + .unwrap() + .get(47) + .unwrap() + .hash(), + ); // Trying to switch to an ancestor of last vote should only not panic // if the current vote has a duplicate ancestor let ancestor_of_voted_slot = 43; let duplicate_ancestor1 = 44; let duplicate_ancestor2 = 45; - vote_simulator.progress.set_unconfirmed_duplicate_slot( - duplicate_ancestor1, - &descendants.get(&duplicate_ancestor1).unwrap(), - ); - vote_simulator.progress.set_unconfirmed_duplicate_slot( - duplicate_ancestor2, - &descendants.get(&duplicate_ancestor2).unwrap(), - ); + vote_simulator + .heaviest_subtree_fork_choice + .mark_fork_invalid_candidate(&( + duplicate_ancestor1, + vote_simulator + .bank_forks + .read() + .unwrap() + .get(duplicate_ancestor1) + .unwrap() + .hash(), + )); + vote_simulator + .heaviest_subtree_fork_choice + .mark_fork_invalid_candidate(&( + duplicate_ancestor2, + vote_simulator + .bank_forks + .read() + .unwrap() + .get(duplicate_ancestor2) + .unwrap() + .hash(), + )); assert_eq!( tower.check_switch_threshold( ancestor_of_voted_slot, @@ -1890,7 +1915,8 @@ pub mod test { &vote_simulator.progress, total_stake, bank0.epoch_vote_accounts(0).unwrap(), - &vote_simulator.latest_validator_votes_for_frozen_banks + &vote_simulator.latest_validator_votes_for_frozen_banks, + &vote_simulator.heaviest_subtree_fork_choice, ), SwitchForkDecision::FailedSwitchDuplicateRollback(duplicate_ancestor2) ); @@ -1903,11 +1929,18 @@ pub mod test { confirm_ancestors.push(duplicate_ancestor2); } for (i, duplicate_ancestor) in confirm_ancestors.into_iter().enumerate() { - vote_simulator.progress.set_confirmed_duplicate_slot( - duplicate_ancestor, - ancestors.get(&duplicate_ancestor).unwrap(), - &descendants.get(&duplicate_ancestor).unwrap(), - ); + vote_simulator + .heaviest_subtree_fork_choice + .mark_fork_valid_candidate(&( + duplicate_ancestor, + vote_simulator + .bank_forks + .read() + .unwrap() + .get(duplicate_ancestor) + .unwrap() + .hash(), + )); let res = tower.check_switch_threshold( ancestor_of_voted_slot, &ancestors, @@ -1916,6 +1949,7 @@ pub mod test { total_stake, bank0.epoch_vote_accounts(0).unwrap(), &vote_simulator.latest_validator_votes_for_frozen_banks, + &vote_simulator.heaviest_subtree_fork_choice, ); if i == 0 { assert_eq!( @@ -1951,7 +1985,8 @@ pub mod test { &vote_simulator.progress, total_stake, bank0.epoch_vote_accounts(0).unwrap(), - &vote_simulator.latest_validator_votes_for_frozen_banks + &vote_simulator.latest_validator_votes_for_frozen_banks, + &vote_simulator.heaviest_subtree_fork_choice, ), SwitchForkDecision::SameFork ); @@ -1965,7 +2000,8 @@ pub mod test { &vote_simulator.progress, total_stake, bank0.epoch_vote_accounts(0).unwrap(), - &vote_simulator.latest_validator_votes_for_frozen_banks + &vote_simulator.latest_validator_votes_for_frozen_banks, + &vote_simulator.heaviest_subtree_fork_choice, ), SwitchForkDecision::FailedSwitchThreshold(0, 20000) ); @@ -1981,7 +2017,8 @@ pub mod test { &vote_simulator.progress, total_stake, bank0.epoch_vote_accounts(0).unwrap(), - &vote_simulator.latest_validator_votes_for_frozen_banks + &vote_simulator.latest_validator_votes_for_frozen_banks, + &vote_simulator.heaviest_subtree_fork_choice, ), SwitchForkDecision::FailedSwitchThreshold(0, 20000) ); @@ -1997,7 +2034,8 @@ pub mod test { &vote_simulator.progress, total_stake, bank0.epoch_vote_accounts(0).unwrap(), - &vote_simulator.latest_validator_votes_for_frozen_banks + &vote_simulator.latest_validator_votes_for_frozen_banks, + &vote_simulator.heaviest_subtree_fork_choice, ), SwitchForkDecision::FailedSwitchThreshold(0, 20000) ); @@ -2013,7 +2051,8 @@ pub mod test { &vote_simulator.progress, total_stake, bank0.epoch_vote_accounts(0).unwrap(), - &vote_simulator.latest_validator_votes_for_frozen_banks + &vote_simulator.latest_validator_votes_for_frozen_banks, + &vote_simulator.heaviest_subtree_fork_choice, ), SwitchForkDecision::FailedSwitchThreshold(0, 20000) ); @@ -2031,7 +2070,8 @@ pub mod test { &vote_simulator.progress, total_stake, bank0.epoch_vote_accounts(0).unwrap(), - &vote_simulator.latest_validator_votes_for_frozen_banks + &vote_simulator.latest_validator_votes_for_frozen_banks, + &vote_simulator.heaviest_subtree_fork_choice, ), SwitchForkDecision::FailedSwitchThreshold(0, 20000) ); @@ -2047,7 +2087,8 @@ pub mod test { &vote_simulator.progress, total_stake, bank0.epoch_vote_accounts(0).unwrap(), - &vote_simulator.latest_validator_votes_for_frozen_banks + &vote_simulator.latest_validator_votes_for_frozen_banks, + &vote_simulator.heaviest_subtree_fork_choice, ), SwitchForkDecision::SwitchProof(Hash::default()) ); @@ -2064,7 +2105,8 @@ pub mod test { &vote_simulator.progress, total_stake, bank0.epoch_vote_accounts(0).unwrap(), - &vote_simulator.latest_validator_votes_for_frozen_banks + &vote_simulator.latest_validator_votes_for_frozen_banks, + &vote_simulator.heaviest_subtree_fork_choice, ), SwitchForkDecision::SwitchProof(Hash::default()) ); @@ -2072,7 +2114,7 @@ pub mod test { // If we set a root, then any lockout intervals below the root shouldn't // count toward the switch threshold. This means the other validator's // vote lockout no longer counts - tower.lockouts.root_slot = Some(43); + tower.vote_state.root_slot = Some(43); // Refresh ancestors and descendants for new root. let ancestors = vote_simulator.bank_forks.read().unwrap().ancestors(); let descendants = vote_simulator @@ -2090,7 +2132,8 @@ pub mod test { &vote_simulator.progress, total_stake, bank0.epoch_vote_accounts(0).unwrap(), - &vote_simulator.latest_validator_votes_for_frozen_banks + &vote_simulator.latest_validator_votes_for_frozen_banks, + &vote_simulator.heaviest_subtree_fork_choice, ), SwitchForkDecision::FailedSwitchThreshold(0, 20000) ); @@ -2122,7 +2165,8 @@ pub mod test { &vote_simulator.progress, total_stake, bank0.epoch_vote_accounts(0).unwrap(), - &vote_simulator.latest_validator_votes_for_frozen_banks + &vote_simulator.latest_validator_votes_for_frozen_banks, + &vote_simulator.heaviest_subtree_fork_choice, ), SwitchForkDecision::FailedSwitchThreshold(0, num_validators * 10000) ); @@ -2137,7 +2181,8 @@ pub mod test { &vote_simulator.progress, total_stake, bank0.epoch_vote_accounts(0).unwrap(), - &vote_simulator.latest_validator_votes_for_frozen_banks + &vote_simulator.latest_validator_votes_for_frozen_banks, + &vote_simulator.heaviest_subtree_fork_choice, ), SwitchForkDecision::FailedSwitchThreshold(0, 20000) ); @@ -2169,7 +2214,8 @@ pub mod test { &vote_simulator.progress, total_stake, bank0.epoch_vote_accounts(0).unwrap(), - &vote_simulator.latest_validator_votes_for_frozen_banks + &vote_simulator.latest_validator_votes_for_frozen_banks, + &vote_simulator.heaviest_subtree_fork_choice, ), SwitchForkDecision::SwitchProof(Hash::default()) ); @@ -2193,7 +2239,8 @@ pub mod test { &vote_simulator.progress, total_stake, bank0.epoch_vote_accounts(0).unwrap(), - &vote_simulator.latest_validator_votes_for_frozen_banks + &vote_simulator.latest_validator_votes_for_frozen_banks, + &vote_simulator.heaviest_subtree_fork_choice, ), SwitchForkDecision::FailedSwitchThreshold(0, 20000) ); @@ -2307,7 +2354,7 @@ pub mod test { .is_empty()); } - info!("local tower: {:#?}", tower.lockouts.votes); + info!("local tower: {:#?}", tower.vote_state.votes); let observed = vote_simulator .bank_forks .read() @@ -2399,14 +2446,14 @@ pub mod test { }; let root_weight = root.lockout() as u128; let vote_account_expected_weight = tower - .lockouts + .vote_state .votes .iter() .map(|v| v.lockout() as u128) .sum::() + root_weight; let expected_bank_weight = 2 * vote_account_expected_weight; - assert_eq!(tower.lockouts.root_slot, Some(0)); + assert_eq!(tower.vote_state.root_slot, Some(0)); let mut latest_validator_votes_for_frozen_banks = LatestValidatorVotesForFrozenBanks::default(); let ComputedBankState { @@ -2483,7 +2530,7 @@ pub mod test { fn test_is_locked_out_root_slot_child_pass() { let mut tower = Tower::new_for_tests(0, 0.67); let ancestors: HashSet = vec![0].into_iter().collect(); - tower.lockouts.root_slot = Some(0); + tower.vote_state.root_slot = Some(0); assert!(!tower.is_locked_out(1, &ancestors)); } @@ -2491,7 +2538,7 @@ pub mod test { fn test_is_locked_out_root_slot_sibling_fail() { let mut tower = Tower::new_for_tests(0, 0.67); let ancestors: HashSet = vec![0].into_iter().collect(); - tower.lockouts.root_slot = Some(0); + tower.vote_state.root_slot = Some(0); tower.record_vote(1, Hash::default()); assert!(tower.is_locked_out(2, &ancestors)); } @@ -2552,10 +2599,10 @@ pub mod test { tower.record_vote(1, Hash::default()); assert!(!tower.is_locked_out(4, &ancestors)); tower.record_vote(4, Hash::default()); - assert_eq!(tower.lockouts.votes[0].slot, 0); - assert_eq!(tower.lockouts.votes[0].confirmation_count, 2); - assert_eq!(tower.lockouts.votes[1].slot, 4); - assert_eq!(tower.lockouts.votes[1].confirmation_count, 1); + assert_eq!(tower.vote_state.votes[0].slot, 0); + assert_eq!(tower.vote_state.votes[0].confirmation_count, 2); + assert_eq!(tower.vote_state.votes[1].slot, 4); + assert_eq!(tower.vote_state.votes[1].confirmation_count, 1); } #[test] @@ -2804,7 +2851,7 @@ pub mod test { tower.save(&identity_keypair).unwrap(); modify_serialized(&tower.path); - let loaded = Tower::restore(&dir.path(), &identity_keypair.pubkey()); + let loaded = Tower::restore(dir.path(), &identity_keypair.pubkey()); (tower, loaded) } @@ -2872,7 +2919,8 @@ pub mod test { &vote_simulator.progress, total_stake, bank0.epoch_vote_accounts(0).unwrap(), - &vote_simulator.latest_validator_votes_for_frozen_banks + &vote_simulator.latest_validator_votes_for_frozen_banks, + &vote_simulator.heaviest_subtree_fork_choice, ), SwitchForkDecision::SameFork ); @@ -2886,7 +2934,8 @@ pub mod test { &vote_simulator.progress, total_stake, bank0.epoch_vote_accounts(0).unwrap(), - &vote_simulator.latest_validator_votes_for_frozen_banks + &vote_simulator.latest_validator_votes_for_frozen_banks, + &vote_simulator.heaviest_subtree_fork_choice, ), SwitchForkDecision::FailedSwitchThreshold(0, 20000) ); @@ -2901,7 +2950,8 @@ pub mod test { &vote_simulator.progress, total_stake, bank0.epoch_vote_accounts(0).unwrap(), - &vote_simulator.latest_validator_votes_for_frozen_banks + &vote_simulator.latest_validator_votes_for_frozen_banks, + &vote_simulator.heaviest_subtree_fork_choice, ), SwitchForkDecision::SwitchProof(Hash::default()) ); @@ -2912,7 +2962,7 @@ pub mod test { tower.record_vote(110, Hash::default()); tower.record_vote(111, Hash::default()); assert_eq!(tower.voted_slots(), vec![43, 110, 111]); - assert_eq!(tower.lockouts.root_slot, Some(0)); + assert_eq!(tower.vote_state.root_slot, Some(0)); } // Prepare simulated validator restart! @@ -2971,7 +3021,8 @@ pub mod test { &vote_simulator.progress, total_stake, bank0.epoch_vote_accounts(0).unwrap(), - &vote_simulator.latest_validator_votes_for_frozen_banks + &vote_simulator.latest_validator_votes_for_frozen_banks, + &vote_simulator.heaviest_subtree_fork_choice, ), SwitchForkDecision::FailedSwitchThreshold(0, 20000) ); @@ -2986,7 +3037,8 @@ pub mod test { &vote_simulator.progress, total_stake, bank0.epoch_vote_accounts(0).unwrap(), - &vote_simulator.latest_validator_votes_for_frozen_banks + &vote_simulator.latest_validator_votes_for_frozen_banks, + &vote_simulator.heaviest_subtree_fork_choice, ), SwitchForkDecision::FailedSwitchThreshold(0, 20000) ); @@ -3001,7 +3053,8 @@ pub mod test { &vote_simulator.progress, total_stake, bank0.epoch_vote_accounts(0).unwrap(), - &vote_simulator.latest_validator_votes_for_frozen_banks + &vote_simulator.latest_validator_votes_for_frozen_banks, + &vote_simulator.heaviest_subtree_fork_choice, ), SwitchForkDecision::SwitchProof(Hash::default()) ); @@ -3009,7 +3062,7 @@ pub mod test { tower.record_vote(110, Hash::default()); tower.record_vote(111, Hash::default()); assert_eq!(tower.voted_slots(), vec![110, 111]); - assert_eq!(tower.lockouts.root_slot, Some(replayed_root_slot)); + assert_eq!(tower.vote_state.root_slot, Some(replayed_root_slot)); } #[test] @@ -3099,7 +3152,7 @@ pub mod test { assert!(!blockstore.is_root(4)); let mut tower = Tower::new_with_key(&Pubkey::default()); - tower.lockouts.root_slot = Some(4); + tower.vote_state.root_slot = Some(4); reconcile_blockstore_roots_with_tower(&tower, &blockstore).unwrap(); assert!(!blockstore.is_root(0)); @@ -3124,14 +3177,14 @@ pub mod test { blockstore.insert_shreds(shreds, None, false).unwrap(); let (shreds, _) = make_slot_entries(4, 1, 42); blockstore.insert_shreds(shreds, None, false).unwrap(); - blockstore.set_roots(&[3]).unwrap(); + blockstore.set_roots(std::iter::once(&3)).unwrap(); assert!(!blockstore.is_root(0)); assert!(!blockstore.is_root(1)); assert!(blockstore.is_root(3)); assert!(!blockstore.is_root(4)); let mut tower = Tower::new_with_key(&Pubkey::default()); - tower.lockouts.root_slot = Some(4); + tower.vote_state.root_slot = Some(4); reconcile_blockstore_roots_with_tower(&tower, &blockstore).unwrap(); } Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); @@ -3153,7 +3206,7 @@ pub mod test { assert!(!blockstore.is_root(3)); let mut tower = Tower::new_with_key(&Pubkey::default()); - tower.lockouts.root_slot = Some(4); + tower.vote_state.root_slot = Some(4); assert_eq!(blockstore.last_root(), 0); reconcile_blockstore_roots_with_tower(&tower, &blockstore).unwrap(); assert_eq!(blockstore.last_root(), 0); @@ -3305,7 +3358,7 @@ pub mod test { #[test] fn test_adjust_lockouts_after_replay_all_not_found_even_if_rooted() { let mut tower = Tower::new_for_tests(10, 0.9); - tower.lockouts.root_slot = Some(4); + tower.vote_state.root_slot = Some(4); tower.record_vote(5, Hash::default()); tower.record_vote(6, Hash::default()); @@ -3327,7 +3380,7 @@ pub mod test { #[test] fn test_adjust_lockouts_after_replay_all_future_votes_only_root_found() { let mut tower = Tower::new_for_tests(10, 0.9); - tower.lockouts.root_slot = Some(2); + tower.vote_state.root_slot = Some(2); tower.record_vote(3, Hash::default()); tower.record_vote(4, Hash::default()); tower.record_vote(5, Hash::default()); @@ -3383,8 +3436,8 @@ pub mod test { #[test] fn test_adjust_lockouts_after_replay_time_warped() { let mut tower = Tower::new_for_tests(10, 0.9); - tower.lockouts.votes.push_back(Lockout::new(1)); - tower.lockouts.votes.push_back(Lockout::new(0)); + tower.vote_state.votes.push_back(Lockout::new(1)); + tower.vote_state.votes.push_back(Lockout::new(0)); let vote = Vote::new(vec![0], Hash::default()); tower.last_vote = vote; @@ -3401,8 +3454,8 @@ pub mod test { #[test] fn test_adjust_lockouts_after_replay_diverged_ancestor() { let mut tower = Tower::new_for_tests(10, 0.9); - tower.lockouts.votes.push_back(Lockout::new(1)); - tower.lockouts.votes.push_back(Lockout::new(2)); + tower.vote_state.votes.push_back(Lockout::new(1)); + tower.vote_state.votes.push_back(Lockout::new(2)); let vote = Vote::new(vec![2], Hash::default()); tower.last_vote = vote; @@ -3423,11 +3476,11 @@ pub mod test { let mut tower = Tower::new_for_tests(10, 0.9); tower - .lockouts + .vote_state .votes .push_back(Lockout::new(MAX_ENTRIES - 1)); - tower.lockouts.votes.push_back(Lockout::new(0)); - tower.lockouts.votes.push_back(Lockout::new(1)); + tower.vote_state.votes.push_back(Lockout::new(0)); + tower.vote_state.votes.push_back(Lockout::new(1)); let vote = Vote::new(vec![1], Hash::default()); tower.last_vote = vote; @@ -3445,8 +3498,8 @@ pub mod test { #[should_panic(expected = "slot_in_tower(2) < checked_slot(1)")] fn test_adjust_lockouts_after_replay_reversed_votes() { let mut tower = Tower::new_for_tests(10, 0.9); - tower.lockouts.votes.push_back(Lockout::new(2)); - tower.lockouts.votes.push_back(Lockout::new(1)); + tower.vote_state.votes.push_back(Lockout::new(2)); + tower.vote_state.votes.push_back(Lockout::new(1)); let vote = Vote::new(vec![1], Hash::default()); tower.last_vote = vote; @@ -3463,9 +3516,9 @@ pub mod test { #[should_panic(expected = "slot_in_tower(3) < checked_slot(3)")] fn test_adjust_lockouts_after_replay_repeated_non_root_votes() { let mut tower = Tower::new_for_tests(10, 0.9); - tower.lockouts.votes.push_back(Lockout::new(2)); - tower.lockouts.votes.push_back(Lockout::new(3)); - tower.lockouts.votes.push_back(Lockout::new(3)); + tower.vote_state.votes.push_back(Lockout::new(2)); + tower.vote_state.votes.push_back(Lockout::new(3)); + tower.vote_state.votes.push_back(Lockout::new(3)); let vote = Vote::new(vec![3], Hash::default()); tower.last_vote = vote; @@ -3481,10 +3534,10 @@ pub mod test { #[test] fn test_adjust_lockouts_after_replay_vote_on_root() { let mut tower = Tower::new_for_tests(10, 0.9); - tower.lockouts.root_slot = Some(42); - tower.lockouts.votes.push_back(Lockout::new(42)); - tower.lockouts.votes.push_back(Lockout::new(43)); - tower.lockouts.votes.push_back(Lockout::new(44)); + tower.vote_state.root_slot = Some(42); + tower.vote_state.votes.push_back(Lockout::new(42)); + tower.vote_state.votes.push_back(Lockout::new(43)); + tower.vote_state.votes.push_back(Lockout::new(44)); let vote = Vote::new(vec![44], Hash::default()); tower.last_vote = vote; @@ -3498,7 +3551,7 @@ pub mod test { #[test] fn test_adjust_lockouts_after_replay_vote_on_genesis() { let mut tower = Tower::new_for_tests(10, 0.9); - tower.lockouts.votes.push_back(Lockout::new(0)); + tower.vote_state.votes.push_back(Lockout::new(0)); let vote = Vote::new(vec![0], Hash::default()); tower.last_vote = vote; @@ -3511,8 +3564,8 @@ pub mod test { #[test] fn test_adjust_lockouts_after_replay_future_tower() { let mut tower = Tower::new_for_tests(10, 0.9); - tower.lockouts.votes.push_back(Lockout::new(13)); - tower.lockouts.votes.push_back(Lockout::new(14)); + tower.vote_state.votes.push_back(Lockout::new(13)); + tower.vote_state.votes.push_back(Lockout::new(14)); let vote = Vote::new(vec![14], Hash::default()); tower.last_vote = vote; tower.initialize_root(12); diff --git a/core/src/fetch_stage.rs b/core/src/fetch_stage.rs index 418e748d6cf85d..8aea7fe25cc0a5 100644 --- a/core/src/fetch_stage.rs +++ b/core/src/fetch_stage.rs @@ -1,11 +1,11 @@ //! The `fetch_stage` batches input from a UDP socket and sends it to a channel. use crate::banking_stage::HOLD_TRANSACTIONS_SLOT_OFFSET; -use crate::poh_recorder::PohRecorder; use crate::result::{Error, Result}; use solana_metrics::{inc_new_counter_debug, inc_new_counter_info}; use solana_perf::packet::PacketsRecycler; use solana_perf::recycler::Recycler; +use solana_poh::poh_recorder::PohRecorder; use solana_sdk::clock::DEFAULT_TICKS_PER_SLOT; use solana_streamer::streamer::{self, PacketReceiver, PacketSender}; use std::net::UdpSocket; @@ -34,7 +34,7 @@ impl FetchStage { tpu_forwards_sockets, exit, &sender, - &poh_recorder, + poh_recorder, coalesce_ms, ), receiver, @@ -54,8 +54,8 @@ impl FetchStage { tx_sockets, tpu_forwards_sockets, exit, - &sender, - &poh_recorder, + sender, + poh_recorder, coalesce_ms, ) } @@ -85,7 +85,7 @@ impl FetchStage { inc_new_counter_debug!("fetch_stage-honor_forwards", len); for packets in batch { if sendr.send(packets).is_err() { - return Err(Error::SendError); + return Err(Error::Send); } } } else { @@ -108,11 +108,12 @@ impl FetchStage { let tpu_threads = sockets.into_iter().map(|socket| { streamer::receiver( socket, - &exit, + exit, sender.clone(), recycler.clone(), "fetch_stage", coalesce_ms, + true, ) }); @@ -120,11 +121,12 @@ impl FetchStage { let tpu_forwards_threads = tpu_forwards_sockets.into_iter().map(|socket| { streamer::receiver( socket, - &exit, + exit, forward_sender.clone(), recycler.clone(), "fetch_forward_stage", coalesce_ms, + true, ) }); @@ -138,10 +140,10 @@ impl FetchStage { Self::handle_forwarded_packets(&forward_receiver, &sender, &poh_recorder) { match e { - Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => break, - Error::RecvTimeoutError(RecvTimeoutError::Timeout) => (), - Error::RecvError(_) => break, - Error::SendError => break, + Error::RecvTimeout(RecvTimeoutError::Disconnected) => break, + Error::RecvTimeout(RecvTimeoutError::Timeout) => (), + Error::Recv(_) => break, + Error::Send => break, _ => error!("{:?}", e), } } diff --git a/core/src/heaviest_subtree_fork_choice.rs b/core/src/heaviest_subtree_fork_choice.rs index fb1f10eac0ab1c..df75eaae7269da 100644 --- a/core/src/heaviest_subtree_fork_choice.rs +++ b/core/src/heaviest_subtree_fork_choice.rs @@ -30,7 +30,17 @@ const MAX_ROOT_PRINT_SECONDS: u64 = 30; enum UpdateLabel { Aggregate, Add, - MarkValid, + + // Notify a fork in the tree that a particular slot in that fork is now + // marked as valid. If there are multiple MarkValid operations for + // a single node, should apply the one with the smaller slot first (hence + // why the actual slot is included here). + MarkValid(Slot), + // Notify a fork in the tree that a particular slot in that fork is now + // marked as invalid. If there are multiple MarkInvalid operations for + // a single node, should apply the one with the smaller slot first (hence + // why the actual slot is included here). + MarkInvalid(Slot), Subtract, } @@ -53,7 +63,10 @@ impl GetSlotHash for Slot { #[derive(PartialEq, Eq, Clone, Debug)] enum UpdateOperation { Add(u64), - MarkValid, + MarkValid(Slot), + // Notify a fork in the tree that a particular slot in that fork is now + // marked as invalid. + MarkInvalid(Slot), Subtract(u64), Aggregate, } @@ -63,7 +76,8 @@ impl UpdateOperation { match self { Self::Aggregate => panic!("Should not get here"), Self::Add(stake) => *stake += new_stake, - Self::MarkValid => panic!("Should not get here"), + Self::MarkValid(_slot) => panic!("Should not get here"), + Self::MarkInvalid(_slot) => panic!("Should not get here"), Self::Subtract(stake) => *stake += new_stake, } } @@ -80,9 +94,68 @@ struct ForkInfo { best_slot: SlotHashKey, parent: Option, children: Vec, - // Whether the fork rooted at this slot is a valid contender - // for the best fork - is_candidate: bool, + // The latest ancestor of this node that has been marked invalid. If the slot + // itself is a duplicate, this is set to the slot itself. + latest_invalid_ancestor: Option, + // Set to true if this slot or a child node was duplicate confirmed. + is_duplicate_confirmed: bool, +} + +impl ForkInfo { + /// Returns if this node has been explicitly marked as a duplicate + /// slot + fn is_unconfirmed_duplicate(&self, my_slot: Slot) -> bool { + self.latest_invalid_ancestor + .map(|ancestor| ancestor == my_slot) + .unwrap_or(false) + } + + /// Returns if the fork rooted at this node is included in fork choice + fn is_candidate(&self) -> bool { + self.latest_invalid_ancestor.is_none() + } + + fn is_duplicate_confirmed(&self) -> bool { + self.is_duplicate_confirmed + } + + fn set_duplicate_confirmed(&mut self) { + self.is_duplicate_confirmed = true; + self.latest_invalid_ancestor = None; + } + + fn update_with_newly_valid_ancestor( + &mut self, + my_key: &SlotHashKey, + newly_valid_ancestor: Slot, + ) { + if let Some(latest_invalid_ancestor) = self.latest_invalid_ancestor { + if latest_invalid_ancestor <= newly_valid_ancestor { + info!("Fork choice for {:?} clearing latest invalid ancestor {:?} because {:?} was duplicate confirmed", my_key, latest_invalid_ancestor, newly_valid_ancestor); + self.latest_invalid_ancestor = None; + } + } + } + + fn update_with_newly_invalid_ancestor( + &mut self, + my_key: &SlotHashKey, + newly_invalid_ancestor: Slot, + ) { + // Should not be marking a duplicate confirmed slot as invalid + assert!(!self.is_duplicate_confirmed); + if self + .latest_invalid_ancestor + .map(|latest_invalid_ancestor| newly_invalid_ancestor > latest_invalid_ancestor) + .unwrap_or(true) + { + info!( + "Fork choice for {:?} setting latest invalid ancestor from {:?} to {}", + my_key, self.latest_invalid_ancestor, newly_invalid_ancestor + ); + self.latest_invalid_ancestor = Some(newly_invalid_ancestor); + } + } } pub struct HeaviestSubtreeForkChoice { @@ -182,12 +255,6 @@ impl HeaviestSubtreeForkChoice { .map(|fork_info| fork_info.stake_voted_subtree) } - pub fn is_candidate_slot(&self, key: &SlotHashKey) -> Option { - self.fork_infos - .get(key) - .map(|fork_info| fork_info.is_candidate) - } - pub fn root(&self) -> SlotHashKey { self.root } @@ -252,35 +319,41 @@ impl HeaviestSubtreeForkChoice { best_slot: root_info.best_slot, children: vec![self.root], parent: None, - is_candidate: true, + latest_invalid_ancestor: None, + is_duplicate_confirmed: root_info.is_duplicate_confirmed, }; self.fork_infos.insert(root_parent, root_parent_info); self.root = root_parent; } - pub fn add_new_leaf_slot(&mut self, slot: SlotHashKey, parent: Option) { + pub fn add_new_leaf_slot(&mut self, slot_hash_key: SlotHashKey, parent: Option) { if self.last_root_time.elapsed().as_secs() > MAX_ROOT_PRINT_SECONDS { self.print_state(); self.last_root_time = Instant::now(); } - if self.fork_infos.contains_key(&slot) { + if self.fork_infos.contains_key(&slot_hash_key) { // Can potentially happen if we repair the same version of the duplicate slot, after // dumping the original version return; } + let parent_latest_invalid_ancestor = + parent.and_then(|parent| self.latest_invalid_ancestor(&parent)); self.fork_infos - .entry(slot) - .and_modify(|slot_info| slot_info.parent = parent) + .entry(slot_hash_key) + .and_modify(|fork_info| fork_info.parent = parent) .or_insert(ForkInfo { stake_voted_at: 0, stake_voted_subtree: 0, // The `best_slot` of a leaf is itself - best_slot: slot, + best_slot: slot_hash_key, children: vec![], parent, - is_candidate: true, + latest_invalid_ancestor: parent_latest_invalid_ancestor, + // If the parent is none, then this is the root, which implies this must + // have reached the duplicate confirmed threshold + is_duplicate_confirmed: parent.is_none(), }); if parent.is_none() { @@ -294,11 +367,11 @@ impl HeaviestSubtreeForkChoice { .get_mut(&parent) .unwrap() .children - .push(slot); + .push(slot_hash_key); // Propagate leaf up the tree to any ancestors who considered the previous leaf // the `best_slot` - self.propagate_new_leaf(&slot, &parent) + self.propagate_new_leaf(&slot_hash_key, &parent) } // Returns if the given `maybe_best_child` is the heaviest among the children @@ -316,10 +389,7 @@ impl HeaviestSubtreeForkChoice { .expect("child must exist in `self.fork_infos`"); // Don't count children currently marked as invalid - if !self - .is_candidate_slot(child) - .expect("child must exist in tree") - { + if !self.is_candidate(child).expect("child must exist in tree") { continue; } @@ -378,6 +448,34 @@ impl HeaviestSubtreeForkChoice { .map(|fork_info| fork_info.stake_voted_at) } + pub fn latest_invalid_ancestor(&self, slot_hash_key: &SlotHashKey) -> Option { + self.fork_infos + .get(slot_hash_key) + .map(|fork_info| fork_info.latest_invalid_ancestor) + .unwrap_or(None) + } + + pub fn is_duplicate_confirmed(&self, slot_hash_key: &SlotHashKey) -> Option { + self.fork_infos + .get(slot_hash_key) + .map(|fork_info| fork_info.is_duplicate_confirmed()) + } + + /// Returns if the exact node with the specified key has been explicitly marked as a duplicate + /// slot (doesn't count ancestors being marked as duplicate). + pub fn is_unconfirmed_duplicate(&self, slot_hash_key: &SlotHashKey) -> Option { + self.fork_infos + .get(slot_hash_key) + .map(|fork_info| fork_info.is_unconfirmed_duplicate(slot_hash_key.0)) + } + + /// Returns false if the node or any of its ancestors have been marked as duplicate + pub fn is_candidate(&self, slot_hash_key: &SlotHashKey) -> Option { + self.fork_infos + .get(slot_hash_key) + .map(|fork_info| fork_info.is_candidate()) + } + fn propagate_new_leaf( &mut self, slot_hash_key: &SlotHashKey, @@ -406,42 +504,69 @@ impl HeaviestSubtreeForkChoice { } } - fn insert_mark_valid_aggregate_operations( + fn insert_aggregate_operations( &self, update_operations: &mut BTreeMap<(SlotHashKey, UpdateLabel), UpdateOperation>, slot_hash_key: SlotHashKey, ) { - self.do_insert_aggregate_operations(update_operations, true, slot_hash_key); + self.do_insert_aggregate_operations_across_ancestors( + update_operations, + None, + slot_hash_key, + ); } - fn insert_aggregate_operations( + #[allow(clippy::map_entry)] + fn do_insert_aggregate_operations_across_ancestors( &self, update_operations: &mut BTreeMap<(SlotHashKey, UpdateLabel), UpdateOperation>, + modify_fork_validity: Option, slot_hash_key: SlotHashKey, ) { - self.do_insert_aggregate_operations(update_operations, false, slot_hash_key); + for parent_slot_hash_key in self.ancestor_iterator(slot_hash_key) { + if !self.do_insert_aggregate_operation( + update_operations, + &modify_fork_validity, + parent_slot_hash_key, + ) { + // If this parent was already inserted, we assume all the other parents have also + // already been inserted. This is to prevent iterating over the parents multiple times + // when we are aggregating leaves that have a lot of shared ancestors + break; + } + } } #[allow(clippy::map_entry)] - fn do_insert_aggregate_operations( + fn do_insert_aggregate_operation( &self, update_operations: &mut BTreeMap<(SlotHashKey, UpdateLabel), UpdateOperation>, - should_mark_valid: bool, + modify_fork_validity: &Option, slot_hash_key: SlotHashKey, - ) { - for parent_slot_hash_key in self.ancestor_iterator(slot_hash_key) { - let aggregate_label = (parent_slot_hash_key, UpdateLabel::Aggregate); - if update_operations.contains_key(&aggregate_label) { - break; - } else { - if should_mark_valid { - update_operations.insert( - (parent_slot_hash_key, UpdateLabel::MarkValid), - UpdateOperation::MarkValid, - ); + ) -> bool { + let aggregate_label = (slot_hash_key, UpdateLabel::Aggregate); + if update_operations.contains_key(&aggregate_label) { + false + } else { + if let Some(mark_fork_validity) = modify_fork_validity { + match mark_fork_validity { + UpdateOperation::MarkValid(slot) => { + update_operations.insert( + (slot_hash_key, UpdateLabel::MarkValid(*slot)), + UpdateOperation::MarkValid(*slot), + ); + } + UpdateOperation::MarkInvalid(slot) => { + update_operations.insert( + (slot_hash_key, UpdateLabel::MarkInvalid(*slot)), + UpdateOperation::MarkInvalid(*slot), + ); + } + _ => (), } - update_operations.insert(aggregate_label, UpdateOperation::Aggregate); } + update_operations.insert(aggregate_label, UpdateOperation::Aggregate); + true } } @@ -452,12 +577,18 @@ impl HeaviestSubtreeForkChoice { fn aggregate_slot(&mut self, slot_hash_key: SlotHashKey) { let mut stake_voted_subtree; let mut best_slot_hash_key = slot_hash_key; + let mut is_duplicate_confirmed = false; if let Some(fork_info) = self.fork_infos.get(&slot_hash_key) { stake_voted_subtree = fork_info.stake_voted_at; let mut best_child_stake_voted_subtree = 0; - let mut best_child_slot = slot_hash_key; - for child in &fork_info.children { - let child_stake_voted_subtree = self.stake_voted_subtree(child).unwrap(); + let mut best_child_slot_key = slot_hash_key; + for child_key in &fork_info.children { + let child_fork_info = self + .fork_infos + .get(child_key) + .expect("Child must exist in fork_info map"); + let child_stake_voted_subtree = child_fork_info.stake_voted_subtree; + is_duplicate_confirmed |= child_fork_info.is_duplicate_confirmed; // Child forks that are not candidates still contribute to the weight // of the subtree rooted at `slot_hash_key`. For instance: @@ -482,19 +613,15 @@ impl HeaviestSubtreeForkChoice { // Note: If there's no valid children, then the best slot should default to the // input `slot` itself. - if self - .is_candidate_slot(child) - .expect("Child must exist in fork_info map") - && (best_child_slot == slot_hash_key || + if child_fork_info.is_candidate() + && (best_child_slot_key == slot_hash_key || child_stake_voted_subtree > best_child_stake_voted_subtree || // tiebreaker by slot height, prioritize earlier slot - (child_stake_voted_subtree == best_child_stake_voted_subtree && child < &best_child_slot)) + (child_stake_voted_subtree == best_child_stake_voted_subtree && child_key < &best_child_slot_key)) { best_child_stake_voted_subtree = child_stake_voted_subtree; - best_child_slot = *child; - best_slot_hash_key = self - .best_slot(child) - .expect("`child` must exist in `self.fork_infos`"); + best_child_slot_key = *child_key; + best_slot_hash_key = child_fork_info.best_slot; } } } else { @@ -502,19 +629,38 @@ impl HeaviestSubtreeForkChoice { } let fork_info = self.fork_infos.get_mut(&slot_hash_key).unwrap(); + if is_duplicate_confirmed { + if !fork_info.is_duplicate_confirmed { + info!( + "Fork choice setting {:?} to duplicate confirmed", + slot_hash_key + ); + } + fork_info.set_duplicate_confirmed(); + } fork_info.stake_voted_subtree = stake_voted_subtree; fork_info.best_slot = best_slot_hash_key; } - fn mark_slot_valid(&mut self, valid_slot_hash_key: (Slot, Hash)) { - if let Some(fork_info) = self.fork_infos.get_mut(&valid_slot_hash_key) { - if !fork_info.is_candidate { - info!( - "marked previously invalid fork starting at slot: {:?} as valid", - valid_slot_hash_key - ); + /// Mark that `valid_slot` on the fork starting at `fork_to_modify` has been marked + /// valid. Note we don't need the hash for `valid_slot` because slot number uniquely + /// identifies a node on a single fork. + fn mark_fork_valid(&mut self, fork_to_modify_key: SlotHashKey, valid_slot: Slot) { + if let Some(fork_info_to_modify) = self.fork_infos.get_mut(&fork_to_modify_key) { + fork_info_to_modify.update_with_newly_valid_ancestor(&fork_to_modify_key, valid_slot); + if fork_to_modify_key.0 == valid_slot { + fork_info_to_modify.is_duplicate_confirmed = true; } - fork_info.is_candidate = true; + } + } + + /// Mark that `invalid_slot` on the fork starting at `fork_to_modify` has been marked + /// invalid. Note we don't need the hash for `invalid_slot` because slot number uniquely + /// identifies a node on a single fork. + fn mark_fork_invalid(&mut self, fork_to_modify_key: SlotHashKey, invalid_slot: Slot) { + if let Some(fork_info_to_modify) = self.fork_infos.get_mut(&fork_to_modify_key) { + fork_info_to_modify + .update_with_newly_invalid_ancestor(&fork_to_modify_key, invalid_slot); } } @@ -624,7 +770,7 @@ impl HeaviestSubtreeForkChoice { let epoch = epoch_schedule.get_epoch(new_vote_slot_hash.0); let stake_update = epoch_stakes .get(&epoch) - .map(|epoch_stakes| epoch_stakes.vote_account_stake(&pubkey)) + .map(|epoch_stakes| epoch_stakes.vote_account_stake(pubkey)) .unwrap_or(0); update_operations @@ -641,7 +787,12 @@ impl HeaviestSubtreeForkChoice { // Iterate through the update operations from greatest to smallest slot for ((slot_hash_key, _), operation) in update_operations.into_iter().rev() { match operation { - UpdateOperation::MarkValid => self.mark_slot_valid(slot_hash_key), + UpdateOperation::MarkValid(valid_slot) => { + self.mark_fork_valid(slot_hash_key, valid_slot) + } + UpdateOperation::MarkInvalid(invalid_slot) => { + self.mark_fork_invalid(slot_hash_key, invalid_slot) + } UpdateOperation::Aggregate => self.aggregate_slot(slot_hash_key), UpdateOperation::Add(stake) => self.add_slot_stake(&slot_hash_key, stake), UpdateOperation::Subtract(stake) => self.subtract_slot_stake(&slot_hash_key, stake), @@ -745,7 +896,7 @@ impl TreeDiff for HeaviestSubtreeForkChoice { fn children(&self, slot_hash_key: &SlotHashKey) -> Option<&[SlotHashKey]> { self.fork_infos - .get(&slot_hash_key) + .get(slot_hash_key) .map(|fork_info| &fork_info.children[..]) } } @@ -810,35 +961,48 @@ impl ForkChoice for HeaviestSubtreeForkChoice { fn mark_fork_invalid_candidate(&mut self, invalid_slot_hash_key: &SlotHashKey) { info!( - "marking fork starting at slot: {:?} invalid candidate", + "marking fork starting at: {:?} invalid candidate", invalid_slot_hash_key ); let fork_info = self.fork_infos.get_mut(invalid_slot_hash_key); if let Some(fork_info) = fork_info { - if fork_info.is_candidate { - fork_info.is_candidate = false; - // Aggregate to find the new best slots excluding this fork - let mut update_operations = UpdateOperations::default(); - self.insert_aggregate_operations(&mut update_operations, *invalid_slot_hash_key); - self.process_update_operations(update_operations); + // Should not be marking duplicate confirmed blocks as invalid candidates + assert!(!fork_info.is_duplicate_confirmed); + let mut update_operations = UpdateOperations::default(); + + // Notify all the children of this node that a parent was marked as invalid + for child_hash_key in self.subtree_diff(*invalid_slot_hash_key, SlotHashKey::default()) + { + self.do_insert_aggregate_operation( + &mut update_operations, + &Some(UpdateOperation::MarkInvalid(invalid_slot_hash_key.0)), + child_hash_key, + ); } + + // Aggregate across all ancestors to find the new best slots excluding this fork + self.insert_aggregate_operations(&mut update_operations, *invalid_slot_hash_key); + self.process_update_operations(update_operations); } } fn mark_fork_valid_candidate(&mut self, valid_slot_hash_key: &SlotHashKey) { + info!( + "marking fork starting at: {:?} valid candidate", + valid_slot_hash_key + ); let mut update_operations = UpdateOperations::default(); - let fork_info = self.fork_infos.get_mut(valid_slot_hash_key); - if let Some(fork_info) = fork_info { - // If a bunch of slots on the same fork are confirmed at once, then only the latest - // slot will incur this aggregation operation - fork_info.is_candidate = true; - self.insert_mark_valid_aggregate_operations( + // Notify all the children of this node that a parent was marked as valid + for child_hash_key in self.subtree_diff(*valid_slot_hash_key, SlotHashKey::default()) { + self.do_insert_aggregate_operation( &mut update_operations, - *valid_slot_hash_key, + &Some(UpdateOperation::MarkValid(valid_slot_hash_key.0)), + child_hash_key, ); } - // Aggregate to find the new best slots including this fork + // Aggregate across all ancestors to find the new best slots including this fork + self.insert_aggregate_operations(&mut update_operations, *valid_slot_hash_key); self.process_update_operations(update_operations); } } @@ -1333,7 +1497,7 @@ mod test { .chain(std::iter::once(&duplicate_leaves_descended_from_4[1])) { assert!(heaviest_subtree_fork_choice - .children(&duplicate_leaf) + .children(duplicate_leaf) .unwrap() .is_empty(),); } @@ -2740,34 +2904,50 @@ mod test { (expected_best_slot, Hash::default()), ); - // Mark slot 5 as invalid, the best fork should be its ancestor 3, - // not the other for at 4. - let invalid_candidate = (5, Hash::default()); + // Simulate a vote on slot 5 + let last_voted_slot_hash = (5, Hash::default()); + let mut tower = Tower::new_for_tests(10, 0.9); + tower.record_vote(last_voted_slot_hash.0, last_voted_slot_hash.1); + + // The heaviest_slot_on_same_voted_fork() should be 6, descended from 5. + assert_eq!( + heaviest_subtree_fork_choice + .heaviest_slot_on_same_voted_fork(&tower) + .unwrap(), + (6, Hash::default()) + ); + + // Mark slot 5 as invalid + let invalid_candidate = last_voted_slot_hash; heaviest_subtree_fork_choice.mark_fork_invalid_candidate(&invalid_candidate); - assert_eq!(heaviest_subtree_fork_choice.best_overall_slot().0, 3); assert!(!heaviest_subtree_fork_choice - .is_candidate_slot(&invalid_candidate) + .is_candidate(&invalid_candidate) .unwrap()); - // The ancestor is still a candidate + // The ancestor 3 is still a candidate assert!(heaviest_subtree_fork_choice - .is_candidate_slot(&(3, Hash::default())) + .is_candidate(&(3, Hash::default())) .unwrap()); + // The best fork should be its ancestor 3, not the other fork at 4. + assert_eq!(heaviest_subtree_fork_choice.best_overall_slot().0, 3); + + // After marking the last vote in the tower as invalid, `heaviest_slot_on_same_voted_fork()` + // should disregard all descendants of that invalid vote + assert!(heaviest_subtree_fork_choice + .heaviest_slot_on_same_voted_fork(&tower) + .is_none()); + // Adding another descendant to the invalid candidate won't // update the best slot, even if it contains votes - let new_leaf_slot7 = 7; - heaviest_subtree_fork_choice.add_new_leaf_slot( - (new_leaf_slot7, Hash::default()), - Some((6, Hash::default())), - ); + let new_leaf7 = (7, Hash::default()); + heaviest_subtree_fork_choice.add_new_leaf_slot(new_leaf7, Some((6, Hash::default()))); let invalid_slot_ancestor = 3; assert_eq!( heaviest_subtree_fork_choice.best_overall_slot().0, invalid_slot_ancestor ); - let pubkey_votes: Vec<(Pubkey, SlotHashKey)> = - vec![(vote_pubkeys[0], (new_leaf_slot7, Hash::default()))]; + let pubkey_votes: Vec<(Pubkey, SlotHashKey)> = vec![(vote_pubkeys[0], new_leaf7)]; assert_eq!( heaviest_subtree_fork_choice.add_votes( pubkey_votes.iter(), @@ -2777,34 +2957,51 @@ mod test { (invalid_slot_ancestor, Hash::default()), ); + // This shouldn't update the `heaviest_slot_on_same_voted_fork` either + assert!(heaviest_subtree_fork_choice + .heaviest_slot_on_same_voted_fork(&tower) + .is_none()); + // Adding a descendant to the ancestor of the invalid candidate *should* update // the best slot though, since the ancestor is on the heaviest fork - let new_leaf_slot8 = 8; - heaviest_subtree_fork_choice.add_new_leaf_slot( - (new_leaf_slot8, Hash::default()), - Some((invalid_slot_ancestor, Hash::default())), - ); - assert_eq!( - heaviest_subtree_fork_choice.best_overall_slot().0, - new_leaf_slot8, - ); + let new_leaf8 = (8, Hash::default()); + heaviest_subtree_fork_choice + .add_new_leaf_slot(new_leaf8, Some((invalid_slot_ancestor, Hash::default()))); + assert_eq!(heaviest_subtree_fork_choice.best_overall_slot(), new_leaf8,); + // Should not update the `heaviest_slot_on_same_voted_fork` because the new leaf + // is not descended from the last vote + assert!(heaviest_subtree_fork_choice + .heaviest_slot_on_same_voted_fork(&tower) + .is_none()); // If we mark slot a descendant of `invalid_candidate` as valid, then that // should also mark `invalid_candidate` as valid, and the best slot should // be the leaf of the heaviest fork, `new_leaf_slot`. heaviest_subtree_fork_choice.mark_fork_valid_candidate(&invalid_candidate); assert!(heaviest_subtree_fork_choice - .is_candidate_slot(&invalid_candidate) + .is_candidate(&invalid_candidate) .unwrap()); assert_eq!( - heaviest_subtree_fork_choice.best_overall_slot().0, + heaviest_subtree_fork_choice.best_overall_slot(), // Should pick the smaller slot of the two new equally weighted leaves - new_leaf_slot7 + new_leaf7 + ); + // Should update the `heaviest_slot_on_same_voted_fork` as well + assert_eq!( + heaviest_subtree_fork_choice + .heaviest_slot_on_same_voted_fork(&tower) + .unwrap(), + new_leaf7 ); } - #[test] - fn test_mark_valid_invalid_forks_duplicate() { + fn setup_mark_invalid_forks_duplicate_tests() -> ( + HeaviestSubtreeForkChoice, + Vec, + SlotHashKey, + Bank, + Vec, + ) { let ( mut heaviest_subtree_fork_choice, duplicate_leaves_descended_from_4, @@ -2832,11 +3029,27 @@ mod test { // the other branch at slot 5 let invalid_candidate = (4, Hash::default()); heaviest_subtree_fork_choice.mark_fork_invalid_candidate(&invalid_candidate); - assert_eq!( heaviest_subtree_fork_choice.best_overall_slot(), (2, Hash::default()) ); + ( + heaviest_subtree_fork_choice, + duplicate_leaves_descended_from_4, + invalid_candidate, + bank, + vote_pubkeys, + ) + } + + #[test] + fn test_mark_invalid_then_valid_duplicate() { + let ( + mut heaviest_subtree_fork_choice, + duplicate_leaves_descended_from_4, + invalid_candidate, + .., + ) = setup_mark_invalid_forks_duplicate_tests(); // Marking candidate as valid again will choose the the heaviest leaf of // the newly valid branch @@ -2851,16 +3064,26 @@ mod test { heaviest_subtree_fork_choice.best_overall_slot(), duplicate_descendant ); + } - // Mark the current heaviest branch as invalid again - heaviest_subtree_fork_choice.mark_fork_invalid_candidate(&invalid_candidate); + #[test] + fn test_mark_invalid_then_add_new_heavier_duplicate_slot() { + let ( + mut heaviest_subtree_fork_choice, + duplicate_leaves_descended_from_4, + _invalid_candidate, + bank, + vote_pubkeys, + ) = setup_mark_invalid_forks_duplicate_tests(); // If we add a new version of the duplicate slot that is not descended from the invalid // candidate and votes for that duplicate slot, the new duplicate slot should be picked // once it has more weight let new_duplicate_hash = Hash::default(); + // The hash has to be smaller in order for the votes to be counted assert!(new_duplicate_hash < duplicate_leaves_descended_from_4[0].1); + let duplicate_slot = duplicate_leaves_descended_from_4[0].0; let new_duplicate = (duplicate_slot, new_duplicate_hash); heaviest_subtree_fork_choice.add_new_leaf_slot(new_duplicate, Some((3, Hash::default()))); @@ -2881,6 +3104,285 @@ mod test { ); } + #[test] + fn test_mark_valid_then_descendant_invalid() { + let forks = tr(0) / (tr(1) / (tr(2) / (tr(3) / (tr(4) / (tr(5) / tr(6)))))); + let mut heaviest_subtree_fork_choice = HeaviestSubtreeForkChoice::new_from_tree(forks); + let duplicate_confirmed_slot: Slot = 1; + let duplicate_confirmed_key = duplicate_confirmed_slot.slot_hash(); + heaviest_subtree_fork_choice.mark_fork_valid_candidate(&duplicate_confirmed_key); + + for slot_hash_key in heaviest_subtree_fork_choice.fork_infos.keys() { + let slot = slot_hash_key.0; + if slot <= duplicate_confirmed_slot { + assert!(heaviest_subtree_fork_choice + .is_duplicate_confirmed(slot_hash_key) + .unwrap()); + } else { + assert!(!heaviest_subtree_fork_choice + .is_duplicate_confirmed(slot_hash_key) + .unwrap()); + } + assert!(heaviest_subtree_fork_choice + .latest_invalid_ancestor(slot_hash_key) + .is_none()); + } + + // Mark a later descendant invalid + let invalid_descendant_slot = 5; + let invalid_descendant_key = invalid_descendant_slot.slot_hash(); + heaviest_subtree_fork_choice.mark_fork_invalid_candidate(&invalid_descendant_key); + for slot_hash_key in heaviest_subtree_fork_choice.fork_infos.keys() { + let slot = slot_hash_key.0; + if slot <= duplicate_confirmed_slot { + // All ancestors of the duplicate confirmed slot should: + // 1) Be duplicate confirmed + // 2) Have no invalid ancestors + assert!(heaviest_subtree_fork_choice + .is_duplicate_confirmed(slot_hash_key) + .unwrap()); + assert!(heaviest_subtree_fork_choice + .latest_invalid_ancestor(slot_hash_key) + .is_none()); + } else if slot >= invalid_descendant_slot { + // Anything descended from the invalid slot should: + // 1) Not be duplicate confirmed + // 2) Should have an invalid ancestor == `invalid_descendant_slot` + assert!(!heaviest_subtree_fork_choice + .is_duplicate_confirmed(slot_hash_key) + .unwrap()); + assert_eq!( + heaviest_subtree_fork_choice + .latest_invalid_ancestor(slot_hash_key) + .unwrap(), + invalid_descendant_slot + ); + } else { + // Anything in between the duplicate confirmed slot and the invalid slot should: + // 1) Not be duplicate confirmed + // 2) Should not have an invalid ancestor + assert!(!heaviest_subtree_fork_choice + .is_duplicate_confirmed(slot_hash_key) + .unwrap()); + assert!(heaviest_subtree_fork_choice + .latest_invalid_ancestor(slot_hash_key) + .is_none()); + } + } + + // Mark later descendant `d` duplicate confirmed where `duplicate_confirmed_slot < d < invalid_descendant_slot`. + let later_duplicate_confirmed_slot = 4; + assert!( + later_duplicate_confirmed_slot > duplicate_confirmed_slot + && later_duplicate_confirmed_slot < invalid_descendant_slot + ); + let later_duplicate_confirmed_key = later_duplicate_confirmed_slot.slot_hash(); + heaviest_subtree_fork_choice.mark_fork_valid_candidate(&later_duplicate_confirmed_key); + + for slot_hash_key in heaviest_subtree_fork_choice.fork_infos.keys() { + let slot = slot_hash_key.0; + if slot <= later_duplicate_confirmed_slot { + // All ancestors of the later_duplicate_confirmed_slot should: + // 1) Be duplicate confirmed + // 2) Have no invalid ancestors + assert!(heaviest_subtree_fork_choice + .is_duplicate_confirmed(slot_hash_key) + .unwrap()); + assert!(heaviest_subtree_fork_choice + .latest_invalid_ancestor(slot_hash_key) + .is_none()); + } else if slot >= invalid_descendant_slot { + // Anything descended from the invalid slot should: + // 1) Not be duplicate confirmed + // 2) Should have an invalid ancestor == `invalid_descendant_slot` + assert!(!heaviest_subtree_fork_choice + .is_duplicate_confirmed(slot_hash_key) + .unwrap()); + assert_eq!( + heaviest_subtree_fork_choice + .latest_invalid_ancestor(slot_hash_key) + .unwrap(), + invalid_descendant_slot + ); + } else { + // Anything in between the duplicate confirmed slot and the invalid slot should: + // 1) Not be duplicate confirmed + // 2) Should not have an invalid ancestor + assert!(!heaviest_subtree_fork_choice + .is_duplicate_confirmed(slot_hash_key) + .unwrap()); + assert!(heaviest_subtree_fork_choice + .latest_invalid_ancestor(slot_hash_key) + .is_none()); + } + } + + // Mark all slots duplicate confirmed + let last_duplicate_confirmed_slot = 6; + let last_duplicate_confirmed_key = last_duplicate_confirmed_slot.slot_hash(); + heaviest_subtree_fork_choice.mark_fork_valid_candidate(&last_duplicate_confirmed_key); + for slot_hash_key in heaviest_subtree_fork_choice.fork_infos.keys() { + assert!(heaviest_subtree_fork_choice + .is_duplicate_confirmed(slot_hash_key) + .unwrap()); + assert!(heaviest_subtree_fork_choice + .latest_invalid_ancestor(slot_hash_key) + .is_none()); + } + } + + #[test] + #[should_panic] + fn test_mark_valid_then_ancestor_invalid() { + let forks = tr(0) / (tr(1) / (tr(2) / (tr(3) / (tr(4) / (tr(5) / tr(6)))))); + let mut heaviest_subtree_fork_choice = HeaviestSubtreeForkChoice::new_from_tree(forks); + let duplicate_confirmed_slot: Slot = 4; + let duplicate_confirmed_key = duplicate_confirmed_slot.slot_hash(); + heaviest_subtree_fork_choice.mark_fork_valid_candidate(&duplicate_confirmed_key); + + // Now mark an ancestor of this fork invalid, should panic since this ancestor + // was duplicate confirmed by its descendant 4 already + heaviest_subtree_fork_choice.mark_fork_invalid_candidate(&3.slot_hash()); + } + + fn setup_set_unconfirmed_and_confirmed_duplicate_slot_tests( + smaller_duplicate_slot: Slot, + larger_duplicate_slot: Slot, + ) -> HeaviestSubtreeForkChoice { + // Create simple fork 0 -> 1 -> 2 -> 3 -> 4 -> 5 + let forks = tr(0) / (tr(1) / (tr(2) / (tr(3) / (tr(4) / tr(5))))); + let mut heaviest_subtree_fork_choice = HeaviestSubtreeForkChoice::new_from_tree(forks); + + // Mark the slots as unconfirmed duplicates + heaviest_subtree_fork_choice + .mark_fork_invalid_candidate(&smaller_duplicate_slot.slot_hash()); + heaviest_subtree_fork_choice + .mark_fork_invalid_candidate(&larger_duplicate_slot.slot_hash()); + + // Correctness checks + for slot_hash_key in heaviest_subtree_fork_choice.fork_infos.keys() { + let slot = slot_hash_key.0; + if slot < smaller_duplicate_slot { + assert!(heaviest_subtree_fork_choice + .latest_invalid_ancestor(slot_hash_key) + .is_none()); + } else if slot < larger_duplicate_slot { + assert_eq!( + heaviest_subtree_fork_choice + .latest_invalid_ancestor(slot_hash_key) + .unwrap(), + smaller_duplicate_slot + ); + } else { + assert_eq!( + heaviest_subtree_fork_choice + .latest_invalid_ancestor(slot_hash_key) + .unwrap(), + larger_duplicate_slot + ); + } + } + + heaviest_subtree_fork_choice + } + + #[test] + fn test_set_unconfirmed_duplicate_confirm_smaller_slot_first() { + let smaller_duplicate_slot = 1; + let larger_duplicate_slot = 4; + let mut heaviest_subtree_fork_choice = + setup_set_unconfirmed_and_confirmed_duplicate_slot_tests( + smaller_duplicate_slot, + larger_duplicate_slot, + ); + + // Mark the smaller duplicate slot as confirmed + heaviest_subtree_fork_choice.mark_fork_valid_candidate(&smaller_duplicate_slot.slot_hash()); + for slot_hash_key in heaviest_subtree_fork_choice.fork_infos.keys() { + let slot = slot_hash_key.0; + if slot < larger_duplicate_slot { + // Only slots <= smaller_duplicate_slot have been duplicate confirmed + if slot <= smaller_duplicate_slot { + assert!(heaviest_subtree_fork_choice + .is_duplicate_confirmed(slot_hash_key) + .unwrap()); + } else { + assert!(!heaviest_subtree_fork_choice + .is_duplicate_confirmed(slot_hash_key) + .unwrap()); + } + // The unconfirmed duplicate flag has been cleared on the smaller + // descendants because their most recent duplicate ancestor has + // been confirmed + assert!(heaviest_subtree_fork_choice + .latest_invalid_ancestor(slot_hash_key) + .is_none()); + } else { + assert!(!heaviest_subtree_fork_choice + .is_duplicate_confirmed(slot_hash_key) + .unwrap(),); + // The unconfirmed duplicate flag has not been cleared on the smaller + // descendants because their most recent duplicate ancestor, + // `larger_duplicate_slot` has not yet been confirmed + assert_eq!( + heaviest_subtree_fork_choice + .latest_invalid_ancestor(slot_hash_key) + .unwrap(), + larger_duplicate_slot + ); + } + } + + // Mark the larger duplicate slot as confirmed, all slots should no longer + // have any unconfirmed duplicate ancestors, and should be marked as duplciate confirmed + heaviest_subtree_fork_choice.mark_fork_valid_candidate(&larger_duplicate_slot.slot_hash()); + for slot_hash_key in heaviest_subtree_fork_choice.fork_infos.keys() { + let slot = slot_hash_key.0; + // All slots <= the latest duplciate confirmed slot are ancestors of + // that slot, so they should all be marked duplicate confirmed + assert_eq!( + heaviest_subtree_fork_choice + .is_duplicate_confirmed(slot_hash_key) + .unwrap(), + slot <= larger_duplicate_slot + ); + assert!(heaviest_subtree_fork_choice + .latest_invalid_ancestor(slot_hash_key) + .is_none()); + } + } + + #[test] + fn test_set_unconfirmed_duplicate_confirm_larger_slot_first() { + let smaller_duplicate_slot = 1; + let larger_duplicate_slot = 4; + let mut heaviest_subtree_fork_choice = + setup_set_unconfirmed_and_confirmed_duplicate_slot_tests( + smaller_duplicate_slot, + larger_duplicate_slot, + ); + + // Mark the larger duplicate slot as confirmed + heaviest_subtree_fork_choice.mark_fork_valid_candidate(&larger_duplicate_slot.slot_hash()); + + // All slots should no longer have any unconfirmed duplicate ancestors + heaviest_subtree_fork_choice.mark_fork_valid_candidate(&smaller_duplicate_slot.slot_hash()); + for slot_hash_key in heaviest_subtree_fork_choice.fork_infos.keys() { + let slot = slot_hash_key.0; + // All slots <= the latest duplciate confirmed slot are ancestors of + // that slot, so they should all be marked duplicate confirmed + assert_eq!( + heaviest_subtree_fork_choice + .is_duplicate_confirmed(slot_hash_key) + .unwrap(), + slot <= larger_duplicate_slot + ); + assert!(heaviest_subtree_fork_choice + .latest_invalid_ancestor(slot_hash_key) + .is_none()); + } + } + fn setup_forks() -> HeaviestSubtreeForkChoice { /* Build fork structure: diff --git a/core/src/ledger_cleanup_service.rs b/core/src/ledger_cleanup_service.rs index 9d081b277f58f6..195601e873f698 100644 --- a/core/src/ledger_cleanup_service.rs +++ b/core/src/ledger_cleanup_service.rs @@ -187,7 +187,7 @@ impl LedgerCleanupService { *last_purge_slot = root; let (slots_to_clean, purge_first_slot, lowest_cleanup_slot, total_shreds) = - Self::find_slots_to_clean(&blockstore, root, max_ledger_shreds); + Self::find_slots_to_clean(blockstore, root, max_ledger_shreds); if slots_to_clean { let purge_complete = Arc::new(AtomicBool::new(false)); @@ -207,11 +207,25 @@ impl LedgerCleanupService { ); let mut purge_time = Measure::start("purge_slots"); + blockstore.purge_slots( purge_first_slot, lowest_cleanup_slot, - PurgeType::PrimaryIndex, + PurgeType::CompactionFilter, ); + // Update only after purge operation. + // Safety: This value can be used by compaction_filters shared via Arc. + // Compactions are async and run as a multi-threaded background job. However, this + // shouldn't cause consistency issues for iterators and getters because we have + // already expired all affected keys (older than or equal to lowest_cleanup_slot) + // by the above `purge_slots`. According to the general RocksDB design where SST + // files are immutable, even running iterators aren't affected; the database grabs + // a snapshot of the live set of sst files at iterator's creation. + // Also, we passed the PurgeType::CompactionFilter, meaning no delete_range for + // transaction_status and address_signatures CFs. These are fine because they + // don't require strong consistent view for their operation. + blockstore.set_max_expired_slot(lowest_cleanup_slot); + purge_time.stop(); info!("{}", purge_time); diff --git a/core/src/lib.rs b/core/src/lib.rs index 89c3aab3f708f5..88fa14dd07a125 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -9,10 +9,10 @@ pub mod accounts_hash_verifier; pub mod banking_stage; -pub mod bigtable_upload_service; pub mod broadcast_stage; pub mod cache_block_meta_service; pub mod cluster_info_vote_listener; +pub mod cluster_nodes; pub mod cluster_slot_state_verifier; pub mod cluster_slots; pub mod cluster_slots_service; @@ -28,8 +28,6 @@ pub mod ledger_cleanup_service; pub mod optimistic_confirmation_verifier; pub mod outstanding_requests; pub mod packet_hasher; -pub mod poh_recorder; -pub mod poh_service; pub mod progress_map; pub mod repair_response; pub mod repair_service; @@ -40,11 +38,7 @@ pub mod request_response; mod result; pub mod retransmit_stage; pub mod rewards_recorder_service; -pub mod rpc; -pub mod rpc_health; -pub mod rpc_service; pub mod sample_performance_service; -pub mod send_transaction_service; pub mod serve_repair; pub mod serve_repair_service; pub mod shred_fetch_stage; @@ -54,13 +48,13 @@ pub mod sigverify_stage; pub mod snapshot_packager_service; pub mod test_validator; pub mod tpu; -pub mod transaction_status_service; pub mod tree_diff; pub mod tvu; pub mod unfrozen_gossip_verified_vote_hashes; pub mod validator; pub mod verified_vote_packets; pub mod vote_stake_tracker; +pub mod voting_service; pub mod window_service; #[macro_use] @@ -69,10 +63,6 @@ extern crate log; #[macro_use] extern crate serde_derive; -#[cfg(test)] -#[macro_use] -extern crate serde_json; - #[macro_use] extern crate solana_metrics; diff --git a/core/src/optimistic_confirmation_verifier.rs b/core/src/optimistic_confirmation_verifier.rs index 2f27bc2b785cb0..5e53abb342b2c3 100644 --- a/core/src/optimistic_confirmation_verifier.rs +++ b/core/src/optimistic_confirmation_verifier.rs @@ -36,7 +36,7 @@ impl OptimisticConfirmationVerifier { .into_iter() .filter(|(optimistic_slot, optimistic_hash)| { (*optimistic_slot == root && *optimistic_hash != root_bank.hash()) - || (!root_ancestors.contains_key(&optimistic_slot) && + || (!root_ancestors.contains_key(optimistic_slot) && // In this second part of the `and`, we account for the possibility that // there was some other root `rootX` set in BankForks where: // @@ -316,7 +316,7 @@ mod test { assert!(optimistic_confirmation_verifier.unchecked_slots.is_empty()); // If we know set the root in blockstore, should return nothing - blockstore.set_roots(&[1, 3]).unwrap(); + blockstore.set_roots(vec![1, 3].iter()).unwrap(); optimistic_confirmation_verifier.add_new_optimistic_confirmed_slots(optimistic_slots); assert!(optimistic_confirmation_verifier .verify_for_unrooted_optimistic_slots(&bank7, &blockstore) diff --git a/core/src/progress_map.rs b/core/src/progress_map.rs index af29f1fb5f0988..36348ca534090a 100644 --- a/core/src/progress_map.rs +++ b/core/src/progress_map.rs @@ -63,6 +63,16 @@ impl ReplaySlotStats { ("load_us", self.execute_timings.load_us, i64), ("execute_us", self.execute_timings.execute_us, i64), ("store_us", self.execute_timings.store_us, i64), + ( + "total_batches_len", + self.execute_timings.total_batches_len, + i64 + ), + ( + "num_execute_batches", + self.execute_timings.num_execute_batches, + i64 + ), ( "serialize_us", self.execute_timings.details.serialize_us, @@ -140,7 +150,6 @@ pub(crate) struct ForkProgress { pub(crate) propagated_stats: PropagatedStats, pub(crate) replay_stats: ReplaySlotStats, pub(crate) replay_progress: ConfirmationProgress, - pub(crate) duplicate_stats: DuplicateStats, // Note `num_blocks_on_fork` and `num_dropped_blocks_on_fork` only // count new blocks replayed since last restart, which won't include // blocks already existing in the ledger/before snapshot at start, @@ -153,7 +162,6 @@ impl ForkProgress { pub fn new( last_entry: Hash, prev_leader_slot: Option, - duplicate_stats: DuplicateStats, validator_stake_info: Option, num_blocks_on_fork: u64, num_dropped_blocks_on_fork: u64, @@ -187,7 +195,6 @@ impl ForkProgress { fork_stats: ForkStats::default(), replay_stats: ReplaySlotStats::default(), replay_progress: ConfirmationProgress::new(last_entry), - duplicate_stats, num_blocks_on_fork, num_dropped_blocks_on_fork, propagated_stats: PropagatedStats { @@ -207,16 +214,14 @@ impl ForkProgress { my_pubkey: &Pubkey, voting_pubkey: &Pubkey, prev_leader_slot: Option, - duplicate_stats: DuplicateStats, num_blocks_on_fork: u64, num_dropped_blocks_on_fork: u64, ) -> Self { - let validator_fork_info = { + let validator_stake_info = { if bank.collector_id() == my_pubkey { - let stake = bank.epoch_vote_account_stake(voting_pubkey); Some(ValidatorStakeInfo::new( *voting_pubkey, - stake, + bank.epoch_vote_account_stake(voting_pubkey), bank.total_epoch_stake(), )) } else { @@ -227,20 +232,11 @@ impl ForkProgress { Self::new( bank.last_blockhash(), prev_leader_slot, - duplicate_stats, - validator_fork_info, + validator_stake_info, num_blocks_on_fork, num_dropped_blocks_on_fork, ) } - - pub fn is_duplicate_confirmed(&self) -> bool { - self.duplicate_stats.is_duplicate_confirmed - } - - pub fn set_duplicate_confirmed(&mut self) { - self.duplicate_stats.set_duplicate_confirmed(); - } } #[derive(Debug, Clone, Default)] @@ -275,38 +271,6 @@ pub(crate) struct PropagatedStats { pub(crate) total_epoch_stake: u64, } -#[derive(Clone, Default)] -pub(crate) struct DuplicateStats { - latest_unconfirmed_duplicate_ancestor: Option, - is_duplicate_confirmed: bool, -} - -impl DuplicateStats { - pub fn new_with_unconfirmed_duplicate_ancestor( - latest_unconfirmed_duplicate_ancestor: Option, - ) -> Self { - Self { - latest_unconfirmed_duplicate_ancestor, - is_duplicate_confirmed: false, - } - } - - fn set_duplicate_confirmed(&mut self) { - self.is_duplicate_confirmed = true; - self.latest_unconfirmed_duplicate_ancestor = None; - } - - fn update_with_newly_confirmed_duplicate_ancestor(&mut self, newly_confirmed_ancestor: Slot) { - if let Some(latest_unconfirmed_duplicate_ancestor) = - self.latest_unconfirmed_duplicate_ancestor - { - if latest_unconfirmed_duplicate_ancestor <= newly_confirmed_ancestor { - self.latest_unconfirmed_duplicate_ancestor = None; - } - } - } -} - impl PropagatedStats { pub fn add_vote_pubkey(&mut self, vote_pubkey: Pubkey, stake: u64) { if self.propagated_validators.insert(vote_pubkey) { @@ -317,7 +281,7 @@ impl PropagatedStats { pub fn add_node_pubkey(&mut self, node_pubkey: &Pubkey, bank: &Bank) { if !self.propagated_node_ids.contains(node_pubkey) { let node_vote_accounts = bank - .epoch_vote_accounts_for_node_id(&node_pubkey) + .epoch_vote_accounts_for_node_id(node_pubkey) .map(|v| &v.vote_accounts); if let Some(node_vote_accounts) = node_vote_accounts { @@ -438,101 +402,6 @@ impl ProgressMap { } } - pub fn is_unconfirmed_duplicate(&self, slot: Slot) -> Option { - self.get(&slot).map(|p| { - p.duplicate_stats - .latest_unconfirmed_duplicate_ancestor - .map(|ancestor| ancestor == slot) - .unwrap_or(false) - }) - } - - pub fn latest_unconfirmed_duplicate_ancestor(&self, slot: Slot) -> Option { - self.get(&slot) - .map(|p| p.duplicate_stats.latest_unconfirmed_duplicate_ancestor) - .unwrap_or(None) - } - - pub fn set_unconfirmed_duplicate_slot(&mut self, slot: Slot, descendants: &HashSet) { - if let Some(fork_progress) = self.get_mut(&slot) { - if fork_progress.is_duplicate_confirmed() { - assert!(fork_progress - .duplicate_stats - .latest_unconfirmed_duplicate_ancestor - .is_none()); - return; - } - - if fork_progress - .duplicate_stats - .latest_unconfirmed_duplicate_ancestor - == Some(slot) - { - // Already been marked - return; - } - fork_progress - .duplicate_stats - .latest_unconfirmed_duplicate_ancestor = Some(slot); - - for d in descendants { - if let Some(fork_progress) = self.get_mut(&d) { - fork_progress - .duplicate_stats - .latest_unconfirmed_duplicate_ancestor = Some(std::cmp::max( - fork_progress - .duplicate_stats - .latest_unconfirmed_duplicate_ancestor - .unwrap_or(0), - slot, - )); - } - } - } - } - - pub fn set_confirmed_duplicate_slot( - &mut self, - slot: Slot, - ancestors: &HashSet, - descendants: &HashSet, - ) { - for a in ancestors { - if let Some(fork_progress) = self.get_mut(&a) { - fork_progress.set_duplicate_confirmed(); - } - } - - if let Some(slot_fork_progress) = self.get_mut(&slot) { - // Setting the fields here is nly correct and necessary if the loop above didn't - // already do this, so check with an assert. - assert!(!ancestors.contains(&slot)); - let slot_had_unconfirmed_duplicate_ancestor = slot_fork_progress - .duplicate_stats - .latest_unconfirmed_duplicate_ancestor - .is_some(); - slot_fork_progress.set_duplicate_confirmed(); - - if slot_had_unconfirmed_duplicate_ancestor { - for d in descendants { - if let Some(descendant_fork_progress) = self.get_mut(&d) { - descendant_fork_progress - .duplicate_stats - .update_with_newly_confirmed_duplicate_ancestor(slot); - } - } - } else { - // Neither this slot `S`, nor earlier ancestors were marked as duplicate, - // so this means all descendants either: - // 1) Have no duplicate ancestors - // 2) Have a duplicate ancestor > `S` - - // In both cases, there's no need to iterate through descendants because - // this confirmation on `S` is irrelevant to them. - } - } - } - pub fn my_latest_landed_vote(&self, slot: Slot) -> Option { self.progress_map .get(&slot) @@ -550,12 +419,6 @@ impl ProgressMap { .map(|s| s.fork_stats.is_supermajority_confirmed) } - pub fn is_duplicate_confirmed(&self, slot: Slot) -> Option { - self.progress_map - .get(&slot) - .map(|s| s.is_duplicate_confirmed()) - } - pub fn get_bank_prev_leader_slot(&self, bank: &Bank) -> Option { let parent_slot = bank.parent_slot(); self.get_propagated_stats(parent_slot) @@ -598,8 +461,6 @@ impl ProgressMap { #[cfg(test)] mod test { use super::*; - use crate::consensus::test::VoteSimulator; - use trees::tr; #[test] fn test_add_vote_pubkey() { @@ -690,21 +551,13 @@ mod test { fn test_is_propagated_status_on_construction() { // If the given ValidatorStakeInfo == None, then this is not // a leader slot and is_propagated == false - let progress = ForkProgress::new( - Hash::default(), - Some(9), - DuplicateStats::default(), - None, - 0, - 0, - ); + let progress = ForkProgress::new(Hash::default(), Some(9), None, 0, 0); assert!(!progress.propagated_stats.is_propagated); // If the stake is zero, then threshold is always achieved let progress = ForkProgress::new( Hash::default(), Some(9), - DuplicateStats::default(), Some(ValidatorStakeInfo { total_epoch_stake: 0, ..ValidatorStakeInfo::default() @@ -719,7 +572,6 @@ mod test { let progress = ForkProgress::new( Hash::default(), Some(9), - DuplicateStats::default(), Some(ValidatorStakeInfo { total_epoch_stake: 2, ..ValidatorStakeInfo::default() @@ -733,7 +585,6 @@ mod test { let progress = ForkProgress::new( Hash::default(), Some(9), - DuplicateStats::default(), Some(ValidatorStakeInfo { stake: 1, total_epoch_stake: 2, @@ -750,7 +601,6 @@ mod test { let progress = ForkProgress::new( Hash::default(), Some(9), - DuplicateStats::default(), Some(ValidatorStakeInfo::default()), 0, 0, @@ -764,23 +614,12 @@ mod test { // Insert new ForkProgress for slot 10 (not a leader slot) and its // previous leader slot 9 (leader slot) - progress_map.insert( - 10, - ForkProgress::new( - Hash::default(), - Some(9), - DuplicateStats::default(), - None, - 0, - 0, - ), - ); + progress_map.insert(10, ForkProgress::new(Hash::default(), Some(9), None, 0, 0)); progress_map.insert( 9, ForkProgress::new( Hash::default(), None, - DuplicateStats::default(), Some(ValidatorStakeInfo::default()), 0, 0, @@ -795,17 +634,7 @@ mod test { // The previous leader before 8, slot 7, does not exist in // progress map, so is_propagated(8) should return true as // this implies the parent is rooted - progress_map.insert( - 8, - ForkProgress::new( - Hash::default(), - Some(7), - DuplicateStats::default(), - None, - 0, - 0, - ), - ); + progress_map.insert(8, ForkProgress::new(Hash::default(), Some(7), None, 0, 0)); assert!(progress_map.is_propagated(8)); // If we set the is_propagated = true, is_propagated should return true @@ -828,157 +657,4 @@ mod test { .is_leader_slot = true; assert!(!progress_map.is_propagated(10)); } - - fn setup_set_unconfirmed_and_confirmed_duplicate_slot_tests( - smaller_duplicate_slot: Slot, - larger_duplicate_slot: Slot, - ) -> (ProgressMap, RwLock) { - // Create simple fork 0 -> 1 -> 2 -> 3 -> 4 -> 5 - let forks = tr(0) / (tr(1) / (tr(2) / (tr(3) / (tr(4) / tr(5))))); - let mut vote_simulator = VoteSimulator::new(1); - vote_simulator.fill_bank_forks(forks, &HashMap::new()); - let VoteSimulator { - mut progress, - bank_forks, - .. - } = vote_simulator; - let descendants = bank_forks.read().unwrap().descendants().clone(); - - // Mark the slots as unconfirmed duplicates - progress.set_unconfirmed_duplicate_slot( - smaller_duplicate_slot, - &descendants.get(&smaller_duplicate_slot).unwrap(), - ); - progress.set_unconfirmed_duplicate_slot( - larger_duplicate_slot, - &descendants.get(&larger_duplicate_slot).unwrap(), - ); - - // Correctness checks - for slot in bank_forks.read().unwrap().banks().keys() { - if *slot < smaller_duplicate_slot { - assert!(progress - .latest_unconfirmed_duplicate_ancestor(*slot) - .is_none()); - } else if *slot < larger_duplicate_slot { - assert_eq!( - progress - .latest_unconfirmed_duplicate_ancestor(*slot) - .unwrap(), - smaller_duplicate_slot - ); - } else { - assert_eq!( - progress - .latest_unconfirmed_duplicate_ancestor(*slot) - .unwrap(), - larger_duplicate_slot - ); - } - } - - (progress, bank_forks) - } - - #[test] - fn test_set_unconfirmed_duplicate_confirm_smaller_slot_first() { - let smaller_duplicate_slot = 1; - let larger_duplicate_slot = 4; - let (mut progress, bank_forks) = setup_set_unconfirmed_and_confirmed_duplicate_slot_tests( - smaller_duplicate_slot, - larger_duplicate_slot, - ); - let descendants = bank_forks.read().unwrap().descendants().clone(); - let ancestors = bank_forks.read().unwrap().ancestors(); - - // Mark the smaller duplicate slot as confirmed - progress.set_confirmed_duplicate_slot( - smaller_duplicate_slot, - &ancestors.get(&smaller_duplicate_slot).unwrap(), - &descendants.get(&smaller_duplicate_slot).unwrap(), - ); - for slot in bank_forks.read().unwrap().banks().keys() { - if *slot < larger_duplicate_slot { - // Only slots <= smaller_duplicate_slot have been duplicate confirmed - if *slot <= smaller_duplicate_slot { - assert!(progress.is_duplicate_confirmed(*slot).unwrap()); - } else { - assert!(!progress.is_duplicate_confirmed(*slot).unwrap()); - } - // The unconfirmed duplicate flag has been cleared on the smaller - // descendants because their most recent duplicate ancestor has - // been confirmed - assert!(progress - .latest_unconfirmed_duplicate_ancestor(*slot) - .is_none()); - } else { - assert!(!progress.is_duplicate_confirmed(*slot).unwrap(),); - // The unconfirmed duplicate flag has not been cleared on the smaller - // descendants because their most recent duplicate ancestor, - // `larger_duplicate_slot` has not yet been confirmed - assert_eq!( - progress - .latest_unconfirmed_duplicate_ancestor(*slot) - .unwrap(), - larger_duplicate_slot - ); - } - } - - // Mark the larger duplicate slot as confirmed, all slots should no longer - // have any unconfirmed duplicate ancestors, and should be marked as duplciate confirmed - progress.set_confirmed_duplicate_slot( - larger_duplicate_slot, - &ancestors.get(&larger_duplicate_slot).unwrap(), - &descendants.get(&larger_duplicate_slot).unwrap(), - ); - for slot in bank_forks.read().unwrap().banks().keys() { - // All slots <= the latest duplciate confirmed slot are ancestors of - // that slot, so they should all be marked duplicate confirmed - assert_eq!( - progress.is_duplicate_confirmed(*slot).unwrap(), - *slot <= larger_duplicate_slot - ); - assert!(progress - .latest_unconfirmed_duplicate_ancestor(*slot) - .is_none()); - } - } - - #[test] - fn test_set_unconfirmed_duplicate_confirm_larger_slot_first() { - let smaller_duplicate_slot = 1; - let larger_duplicate_slot = 4; - let (mut progress, bank_forks) = setup_set_unconfirmed_and_confirmed_duplicate_slot_tests( - smaller_duplicate_slot, - larger_duplicate_slot, - ); - let descendants = bank_forks.read().unwrap().descendants().clone(); - let ancestors = bank_forks.read().unwrap().ancestors(); - - // Mark the larger duplicate slot as confirmed - progress.set_confirmed_duplicate_slot( - larger_duplicate_slot, - &ancestors.get(&larger_duplicate_slot).unwrap(), - &descendants.get(&larger_duplicate_slot).unwrap(), - ); - - // All slots should no longer have any unconfirmed duplicate ancestors - progress.set_confirmed_duplicate_slot( - larger_duplicate_slot, - &ancestors.get(&larger_duplicate_slot).unwrap(), - &descendants.get(&larger_duplicate_slot).unwrap(), - ); - for slot in bank_forks.read().unwrap().banks().keys() { - // All slots <= the latest duplciate confirmed slot are ancestors of - // that slot, so they should all be marked duplicate confirmed - assert_eq!( - progress.is_duplicate_confirmed(*slot).unwrap(), - *slot <= larger_duplicate_slot - ); - assert!(progress - .latest_unconfirmed_duplicate_ancestor(*slot) - .is_none()); - } - } } diff --git a/core/src/repair_service.rs b/core/src/repair_service.rs index 0bb96983bf2ae1..236499de38fdd5 100644 --- a/core/src/repair_service.rs +++ b/core/src/repair_service.rs @@ -5,20 +5,25 @@ use crate::{ cluster_slots::ClusterSlots, outstanding_requests::OutstandingRequests, repair_weight::RepairWeight, + replay_stage::DUPLICATE_THRESHOLD, result::Result, - serve_repair::{RepairType, ServeRepair}, + serve_repair::{RepairType, ServeRepair, REPAIR_PEERS_CACHE_CAPACITY}, }; use crossbeam_channel::{Receiver as CrossbeamReceiver, Sender as CrossbeamSender}; +use lru::LruCache; use solana_gossip::cluster_info::ClusterInfo; use solana_ledger::{ blockstore::{Blockstore, SlotMeta}, shred::Nonce, }; use solana_measure::measure::Measure; -use solana_runtime::{ - bank::Bank, bank_forks::BankForks, commitment::VOTE_THRESHOLD_SIZE, contains::Contains, +use solana_runtime::{bank::Bank, bank_forks::BankForks, contains::Contains}; +use solana_sdk::{ + clock::{BankId, Slot}, + epoch_schedule::EpochSchedule, + pubkey::Pubkey, + timing::timestamp, }; -use solana_sdk::{clock::Slot, epoch_schedule::EpochSchedule, pubkey::Pubkey, timing::timestamp}; use std::{ collections::{HashMap, HashSet}, iter::Iterator, @@ -33,6 +38,8 @@ use std::{ pub type DuplicateSlotsResetSender = CrossbeamSender; pub type DuplicateSlotsResetReceiver = CrossbeamReceiver; +pub type ConfirmedSlotsSender = CrossbeamSender>; +pub type ConfirmedSlotsReceiver = CrossbeamReceiver>; pub type OutstandingRepairs = OutstandingRequests; @@ -187,6 +194,7 @@ impl RepairService { let mut last_stats = Instant::now(); let duplicate_slot_repair_statuses: HashMap = HashMap::new(); + let mut peers_cache = LruCache::new(REPAIR_PEERS_CACHE_CAPACITY); loop { if exit.load(Ordering::Relaxed) { @@ -223,7 +231,7 @@ impl RepairService { add_votes_elapsed = Measure::start("add_votes"); repair_weight.add_votes( - &blockstore, + blockstore, slot_to_vote_pubkeys.into_iter(), root_bank.epoch_stakes_map(), root_bank.epoch_schedule(), @@ -266,14 +274,13 @@ impl RepairService { ) }; - let mut cache = HashMap::new(); let mut send_repairs_elapsed = Measure::start("send_repairs_elapsed"); let mut outstanding_requests = outstanding_requests.write().unwrap(); repairs.into_iter().for_each(|repair_request| { if let Ok((to, req)) = serve_repair.repair_request( - &cluster_slots, + cluster_slots, repair_request, - &mut cache, + &mut peers_cache, &mut repair_stats, &repair_info.repair_validators, &mut outstanding_requests, @@ -487,7 +494,7 @@ impl RepairService { repair_validators, ); if let Some((repair_pubkey, repair_addr)) = status.repair_pubkey_and_addr { - let repairs = Self::generate_duplicate_repairs_for_slot(&blockstore, *slot); + let repairs = Self::generate_duplicate_repairs_for_slot(blockstore, *slot); if let Some(repairs) = repairs { let mut outstanding_requests = outstanding_requests.write().unwrap(); @@ -529,7 +536,7 @@ impl RepairService { nonce: Nonce, ) -> Result<()> { let req = - serve_repair.map_repair_request(&repair_type, repair_pubkey, repair_stats, nonce)?; + serve_repair.map_repair_request(repair_type, repair_pubkey, repair_stats, nonce)?; repair_socket.send_to(&req, to)?; Ok(()) } @@ -558,7 +565,7 @@ impl RepairService { #[allow(dead_code)] fn process_new_duplicate_slots( - new_duplicate_slots: &[Slot], + new_duplicate_slots: &[(Slot, BankId)], duplicate_slot_repair_statuses: &mut HashMap, cluster_slots: &ClusterSlots, root_bank: &Bank, @@ -567,16 +574,16 @@ impl RepairService { duplicate_slots_reset_sender: &DuplicateSlotsResetSender, repair_validators: &Option>, ) { - for slot in new_duplicate_slots { + for (slot, bank_id) in new_duplicate_slots { warn!( - "Cluster completed slot: {}, dumping our current version and repairing", + "Cluster confirmed slot: {}, dumping our current version and repairing", slot ); // Clear the slot signatures from status cache for this slot root_bank.clear_slot_signatures(*slot); // Clear the accounts for this slot - root_bank.remove_unrooted_slot(*slot); + root_bank.remove_unrooted_slots(&[(*slot, *bank_id)]); // Clear the slot-related data in blockstore. This will: // 1) Clear old shreds allowing new ones to be inserted @@ -641,7 +648,7 @@ impl RepairService { }) .sum(); if total_completed_slot_stake as f64 / total_stake as f64 - > VOTE_THRESHOLD_SIZE + > DUPLICATE_THRESHOLD { Some(dead_slot) } else { @@ -669,17 +676,27 @@ impl RepairService { mod test { use super::*; use crossbeam_channel::unbounded; - use solana_gossip::cluster_info::Node; + use solana_gossip::{cluster_info::Node, contact_info::ContactInfo}; use solana_ledger::blockstore::{ make_chaining_slot_entries, make_many_slot_entries, make_slot_entries, }; use solana_ledger::shred::max_ticks_per_n_shreds; use solana_ledger::{blockstore::Blockstore, get_tmp_ledger_path}; use solana_runtime::genesis_utils::{self, GenesisConfigInfo, ValidatorVoteKeypairs}; + use solana_sdk::signature::Keypair; use solana_sdk::signature::Signer; + use solana_streamer::socket::SocketAddrSpace; use solana_vote_program::vote_transaction; use std::collections::HashSet; + fn new_test_cluster_info(contact_info: ContactInfo) -> ClusterInfo { + ClusterInfo::new( + contact_info, + Arc::new(Keypair::new()), + SocketAddrSpace::Unspecified, + ) + } + #[test] pub fn test_repair_orphan() { let blockstore_path = get_tmp_ledger_path!(); @@ -971,7 +988,8 @@ mod test { let blockstore_path = get_tmp_ledger_path!(); let blockstore = Blockstore::open(&blockstore_path).unwrap(); let cluster_slots = ClusterSlots::default(); - let serve_repair = ServeRepair::new_with_invalid_keypair(Node::new_localhost().info); + let serve_repair = + ServeRepair::new(Arc::new(new_test_cluster_info(Node::new_localhost().info))); let mut duplicate_slot_repair_statuses = HashMap::new(); let dead_slot = 9; let receive_socket = &UdpSocket::bind("0.0.0.0:0").unwrap(); @@ -1053,13 +1071,11 @@ mod test { Pubkey::default(), UdpSocket::bind("0.0.0.0:0").unwrap().local_addr().unwrap(), )); - let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair( - Node::new_localhost().info, - )); + let cluster_info = Arc::new(new_test_cluster_info(Node::new_localhost().info)); let serve_repair = ServeRepair::new(cluster_info.clone()); let valid_repair_peer = Node::new_localhost().info; - // Signal that this peer has completed the dead slot, and is thus + // Signal that this peer has confirmed the dead slot, and is thus // a valid target for repair let dead_slot = 9; let cluster_slots = ClusterSlots::default(); @@ -1115,7 +1131,8 @@ mod test { let blockstore_path = get_tmp_ledger_path!(); let blockstore = Blockstore::open(&blockstore_path).unwrap(); let cluster_slots = ClusterSlots::default(); - let serve_repair = ServeRepair::new_with_invalid_keypair(Node::new_localhost().info); + let serve_repair = + ServeRepair::new(Arc::new(new_test_cluster_info(Node::new_localhost().info))); let mut duplicate_slot_repair_statuses = HashMap::new(); let duplicate_slot = 9; @@ -1138,6 +1155,7 @@ mod test { ); let bank0 = Arc::new(Bank::new(&genesis_config)); let bank9 = Bank::new_from_parent(&bank0, &Pubkey::default(), duplicate_slot); + let duplicate_bank_id = bank9.bank_id(); let old_balance = bank9.get_balance(&keypairs.node_keypair.pubkey()); bank9 .transfer(10_000, &mint_keypair, &keypairs.node_keypair.pubkey()) @@ -1155,7 +1173,7 @@ mod test { assert!(bank9.get_signature_status(&vote_tx.signatures[0]).is_some()); RepairService::process_new_duplicate_slots( - &[duplicate_slot], + &[(duplicate_slot, duplicate_bank_id)], &mut duplicate_slot_repair_statuses, &cluster_slots, &bank9, diff --git a/core/src/repair_weight.rs b/core/src/repair_weight.rs index 26cce442e1519a..fe080518a50369 100644 --- a/core/src/repair_weight.rs +++ b/core/src/repair_weight.rs @@ -495,7 +495,7 @@ impl RepairWeight { for ((slot, _), _) in all_slots { *self .slot_to_tree - .get_mut(&slot) + .get_mut(slot) .expect("Nodes in tree must exist in `self.slot_to_tree`") = root2; } } @@ -521,9 +521,9 @@ impl RepairWeight { fn sort_by_stake_weight_slot(slot_stake_voted: &mut Vec<(Slot, u64)>) { slot_stake_voted.sort_by(|(slot, stake_voted), (slot_, stake_voted_)| { if stake_voted == stake_voted_ { - slot.cmp(&slot_) + slot.cmp(slot_) } else { - stake_voted.cmp(&stake_voted_).reverse() + stake_voted.cmp(stake_voted_).reverse() } }); } @@ -757,7 +757,7 @@ mod test { ); for slot in &[8, 10, 11] { - assert_eq!(*repair_weight.slot_to_tree.get(&slot).unwrap(), 8); + assert_eq!(*repair_weight.slot_to_tree.get(slot).unwrap(), 8); } for slot in 0..=1 { assert_eq!(*repair_weight.slot_to_tree.get(&slot).unwrap(), 0); @@ -772,7 +772,7 @@ mod test { ); for slot in &[8, 10, 11] { - assert_eq!(*repair_weight.slot_to_tree.get(&slot).unwrap(), 0); + assert_eq!(*repair_weight.slot_to_tree.get(slot).unwrap(), 0); } assert_eq!(repair_weight.trees.len(), 1); assert!(repair_weight.trees.contains_key(&0)); @@ -1088,10 +1088,10 @@ mod test { let purged_slots = vec![0, 1, 2, 4, 8, 10]; let mut expected_unrooted_len = 0; for purged_slot in &purged_slots { - assert!(!repair_weight.slot_to_tree.contains_key(&purged_slot)); - assert!(!repair_weight.trees.contains_key(&purged_slot)); + assert!(!repair_weight.slot_to_tree.contains_key(purged_slot)); + assert!(!repair_weight.trees.contains_key(purged_slot)); if *purged_slot > 3 { - assert!(repair_weight.unrooted_slots.contains(&purged_slot)); + assert!(repair_weight.unrooted_slots.contains(purged_slot)); expected_unrooted_len += 1; } } diff --git a/core/src/repair_weighted_traversal.rs b/core/src/repair_weighted_traversal.rs index 534ef4841d16af..8b6cd0ceb4e8cc 100644 --- a/core/src/repair_weighted_traversal.rs +++ b/core/src/repair_weighted_traversal.rs @@ -101,7 +101,7 @@ pub fn get_best_repair_shreds<'a>( let new_repairs = RepairService::generate_repairs_for_slot( blockstore, slot, - &slot_meta, + slot_meta, max_repairs - repairs.len(), ); repairs.extend(new_repairs); diff --git a/core/src/replay_stage.rs b/core/src/replay_stage.rs index 401434f2b29562..45cd78306c3846 100644 --- a/core/src/replay_stage.rs +++ b/core/src/replay_stage.rs @@ -8,6 +8,7 @@ use crate::{ }, cluster_slot_state_verifier::*, cluster_slots::ClusterSlots, + cluster_slots_service::ClusterSlotsUpdateSender, commitment_service::{AggregateCommitmentService, CommitmentAggregationData}, consensus::{ ComputedBankState, Stake, SwitchForkDecision, Tower, VotedStakes, SWITCH_FORK_THRESHOLD, @@ -15,12 +16,12 @@ use crate::{ fork_choice::{ForkChoice, SelectVoteAndResetForkResult}, heaviest_subtree_fork_choice::HeaviestSubtreeForkChoice, latest_validator_votes_for_frozen_banks::LatestValidatorVotesForFrozenBanks, - poh_recorder::{PohRecorder, GRACE_TICKS_FACTOR, MAX_GRACE_SLOTS}, - progress_map::{DuplicateStats, ForkProgress, ProgressMap, PropagatedStats}, + progress_map::{ForkProgress, ProgressMap, PropagatedStats}, repair_service::DuplicateSlotsResetReceiver, result::Result, rewards_recorder_service::RewardsRecorderSender, unfrozen_gossip_verified_vote_hashes::UnfrozenGossipVerifiedVoteHashes, + voting_service::VoteOp, window_service::DuplicateSlotReceiver, }; use solana_client::rpc_response::SlotUpdate; @@ -34,6 +35,7 @@ use solana_ledger::{ }; use solana_measure::measure::Measure; use solana_metrics::inc_new_counter_info; +use solana_poh::poh_recorder::{PohRecorder, GRACE_TICKS_FACTOR, MAX_GRACE_SLOTS}; use solana_rpc::{ optimistically_confirmed_bank_tracker::{BankNotification, BankNotificationSender}, rpc_subscriptions::RpcSubscriptions, @@ -137,6 +139,10 @@ pub struct ReplayTiming { start_leader_elapsed: u64, reset_bank_elapsed: u64, voting_elapsed: u64, + vote_push_us: u64, + vote_send_us: u64, + generate_vote_us: u64, + update_commitment_cache_us: u64, select_forks_elapsed: u64, compute_slot_stats_elapsed: u64, generate_new_bank_forks_elapsed: u64, @@ -190,6 +196,17 @@ impl ReplayTiming { let now = timestamp(); let elapsed_ms = now - self.last_print; if elapsed_ms > 1000 { + datapoint_info!( + "replay-loop-voting-stats", + ("vote_push_us", self.vote_push_us, i64), + ("vote_send_us", self.vote_send_us, i64), + ("generate_vote_us", self.generate_vote_us, i64), + ( + "update_commitment_cache_us", + self.update_commitment_cache_us, + i64 + ), + ); datapoint_info!( "replay-loop-timing-stats", ("total_elapsed_us", elapsed_ms * 1000, i64), @@ -292,6 +309,8 @@ impl ReplayStage { replay_vote_sender: ReplayVoteSender, gossip_duplicate_confirmed_slots_receiver: GossipDuplicateConfirmedSlotsReceiver, gossip_verified_vote_hash_receiver: GossipVerifiedVoteHashReceiver, + cluster_slots_update_sender: ClusterSlotsUpdateSender, + voting_sender: Sender, ) -> Self { let ReplayStageConfig { my_pubkey, @@ -337,9 +356,10 @@ impl ReplayStage { let mut partition_exists = false; let mut skipped_slots_info = SkippedSlotsInfo::default(); let mut replay_timing = ReplayTiming::default(); - let mut gossip_duplicate_confirmed_slots: GossipDuplicateConfirmedSlots = GossipDuplicateConfirmedSlots::default(); - let mut unfrozen_gossip_verified_vote_hashes: UnfrozenGossipVerifiedVoteHashes = UnfrozenGossipVerifiedVoteHashes::default(); - let mut latest_validator_votes_for_frozen_banks: LatestValidatorVotesForFrozenBanks = LatestValidatorVotesForFrozenBanks::default(); + let mut duplicate_slots_tracker = DuplicateSlotsTracker::default(); + let mut gossip_duplicate_confirmed_slots = GossipDuplicateConfirmedSlots::default(); + let mut unfrozen_gossip_verified_vote_hashes = UnfrozenGossipVerifiedVoteHashes::default(); + let mut latest_validator_votes_for_frozen_banks = LatestValidatorVotesForFrozenBanks::default(); let mut voted_signatures = Vec::new(); let mut has_new_vote_been_rooted = !wait_for_vote_to_start_leader; let mut last_vote_refresh_time = LastVoteRefreshTime { @@ -382,11 +402,11 @@ impl ReplayStage { &bank_notification_sender, &rewards_recorder_sender, &subscriptions, + &mut duplicate_slots_tracker, &gossip_duplicate_confirmed_slots, - &ancestors, - &descendants, &mut unfrozen_gossip_verified_vote_hashes, &mut latest_validator_votes_for_frozen_banks, + &cluster_slots_update_sender, ); replay_active_banks_time.stop(); @@ -408,12 +428,11 @@ impl ReplayStage { let mut process_gossip_duplicate_confirmed_slots_time = Measure::start("process_gossip_duplicate_confirmed_slots"); Self::process_gossip_duplicate_confirmed_slots( &gossip_duplicate_confirmed_slots_receiver, + &mut duplicate_slots_tracker, &mut gossip_duplicate_confirmed_slots, &bank_forks, &mut progress, &mut heaviest_subtree_fork_choice, - &ancestors, - &descendants, ); process_gossip_duplicate_confirmed_slots_time.stop(); @@ -437,10 +456,9 @@ impl ReplayStage { if !tpu_has_bank { Self::process_duplicate_slots( &duplicate_slots_receiver, + &mut duplicate_slots_tracker, &gossip_duplicate_confirmed_slots, &bank_forks, - &ancestors, - &descendants, &mut progress, &mut heaviest_subtree_fork_choice, ); @@ -484,7 +502,7 @@ impl ReplayStage { &bank_forks, ); - Self::mark_slots_confirmed(&confirmed_forks, &bank_forks, &mut progress, &ancestors, &descendants, &mut heaviest_subtree_fork_choice); + Self::mark_slots_confirmed(&confirmed_forks, &bank_forks, &mut progress, &mut duplicate_slots_tracker, &mut heaviest_subtree_fork_choice); } compute_slot_stats_time.stop(); @@ -495,7 +513,16 @@ impl ReplayStage { if let Some(heaviest_bank_on_same_voted_fork) = heaviest_bank_on_same_voted_fork.as_ref() { if let Some(my_latest_landed_vote) = progress.my_latest_landed_vote(heaviest_bank_on_same_voted_fork.slot()) { - Self::refresh_last_vote(&mut tower, &cluster_info, heaviest_bank_on_same_voted_fork, &poh_recorder, my_latest_landed_vote, &vote_account, &authorized_voter_keypairs.read().unwrap(), &mut voted_signatures, has_new_vote_been_rooted, &mut last_vote_refresh_time); + Self::refresh_last_vote(&mut tower, &cluster_info, + heaviest_bank_on_same_voted_fork, + my_latest_landed_vote, + &vote_account, + &authorized_voter_keypairs.read().unwrap(), + &mut voted_signatures, + has_new_vote_been_rooted, &mut + last_vote_refresh_time, + &voting_sender, + ); } } @@ -513,6 +540,7 @@ impl ReplayStage { &progress, &mut tower, &latest_validator_votes_for_frozen_banks, + &heaviest_subtree_fork_choice, ); select_vote_and_reset_forks_time.stop(); @@ -551,8 +579,7 @@ impl ReplayStage { } Self::handle_votable_bank( - &vote_bank, - &poh_recorder, + vote_bank, switch_fork_decision, &bank_forks, &mut tower, @@ -569,10 +596,13 @@ impl ReplayStage { &block_commitment_cache, &mut heaviest_subtree_fork_choice, &bank_notification_sender, + &mut duplicate_slots_tracker, &mut gossip_duplicate_confirmed_slots, &mut unfrozen_gossip_verified_vote_hashes, &mut voted_signatures, &mut has_new_vote_been_rooted, + &mut replay_timing, + &voting_sender, ); }; voting_time.stop(); @@ -741,12 +771,7 @@ impl ReplayStage { ) }; - Self::initialize_progress_and_fork_choice( - &root_bank, - frozen_banks, - &my_pubkey, - &vote_account, - ) + Self::initialize_progress_and_fork_choice(&root_bank, frozen_banks, my_pubkey, vote_account) } pub(crate) fn initialize_progress_and_fork_choice( @@ -762,20 +787,9 @@ impl ReplayStage { // Initialize progress map with any root banks for bank in &frozen_banks { let prev_leader_slot = progress.get_bank_prev_leader_slot(bank); - let duplicate_stats = DuplicateStats::new_with_unconfirmed_duplicate_ancestor( - progress.latest_unconfirmed_duplicate_ancestor(bank.parent_slot()), - ); progress.insert( bank.slot(), - ForkProgress::new_from_bank( - bank, - &my_pubkey, - &vote_account, - prev_leader_slot, - duplicate_stats, - 0, - 0, - ), + ForkProgress::new_from_bank(bank, my_pubkey, vote_account, prev_leader_slot, 0, 0), ); } let root = root_bank.slot(); @@ -841,12 +855,6 @@ impl ReplayStage { // Clear the duplicate banks from BankForks { let mut w_bank_forks = bank_forks.write().unwrap(); - // Purging should have already been taken care of by logic - // in repair_service, so make sure drop implementation doesn't - // run - if let Some(b) = w_bank_forks.get(*d) { - b.skip_drop.store(true, Ordering::Relaxed) - } w_bank_forks.remove(*d); } } @@ -872,7 +880,7 @@ impl ReplayStage { .expect("must exist based on earlier check") { descendants - .get_mut(&a) + .get_mut(a) .expect("If exists in ancestor map must exist in descendants map") .retain(|d| *d != slot && !slot_descendants.contains(d)); } @@ -882,9 +890,9 @@ impl ReplayStage { // Purge all the descendants of this slot from both maps for descendant in slot_descendants { - ancestors.remove(&descendant).expect("must exist"); + ancestors.remove(descendant).expect("must exist"); descendants - .remove(&descendant) + .remove(descendant) .expect("must exist based on earlier check"); } descendants @@ -898,12 +906,11 @@ impl ReplayStage { // for duplicate slot recovery. fn process_gossip_duplicate_confirmed_slots( gossip_duplicate_confirmed_slots_receiver: &GossipDuplicateConfirmedSlotsReceiver, + duplicate_slots_tracker: &mut DuplicateSlotsTracker, gossip_duplicate_confirmed_slots: &mut GossipDuplicateConfirmedSlots, bank_forks: &RwLock, progress: &mut ProgressMap, fork_choice: &mut HeaviestSubtreeForkChoice, - ancestors: &HashMap>, - descendants: &HashMap>, ) { let root = bank_forks.read().unwrap().root(); for new_confirmed_slots in gossip_duplicate_confirmed_slots_receiver.try_iter() { @@ -926,9 +933,8 @@ impl ReplayStage { .unwrap() .get(confirmed_slot) .map(|b| b.hash()), + duplicate_slots_tracker, gossip_duplicate_confirmed_slots, - ancestors, - descendants, progress, fork_choice, SlotStateUpdate::DuplicateConfirmed, @@ -959,34 +965,32 @@ impl ReplayStage { // Checks for and handle forks with duplicate slots. fn process_duplicate_slots( duplicate_slots_receiver: &DuplicateSlotReceiver, + duplicate_slots_tracker: &mut DuplicateSlotsTracker, gossip_duplicate_confirmed_slots: &GossipDuplicateConfirmedSlots, bank_forks: &RwLock, - ancestors: &HashMap>, - descendants: &HashMap>, progress: &mut ProgressMap, fork_choice: &mut HeaviestSubtreeForkChoice, ) { - let duplicate_slots: Vec = duplicate_slots_receiver.try_iter().collect(); + let new_duplicate_slots: Vec = duplicate_slots_receiver.try_iter().collect(); let (root_slot, bank_hashes) = { let r_bank_forks = bank_forks.read().unwrap(); - let bank_hashes: Vec> = duplicate_slots + let bank_hashes: Vec> = new_duplicate_slots .iter() - .map(|slot| r_bank_forks.get(*slot).map(|bank| bank.hash())) + .map(|duplicate_slot| r_bank_forks.get(*duplicate_slot).map(|bank| bank.hash())) .collect(); (r_bank_forks.root(), bank_hashes) }; - - for (duplicate_slot, bank_hash) in duplicate_slots.into_iter().zip(bank_hashes.into_iter()) + for (duplicate_slot, bank_hash) in + new_duplicate_slots.into_iter().zip(bank_hashes.into_iter()) { // WindowService should only send the signal once per slot check_slot_agrees_with_cluster( duplicate_slot, root_slot, bank_hash, + duplicate_slots_tracker, gossip_duplicate_confirmed_slots, - ancestors, - descendants, progress, fork_choice, SlotStateUpdate::Duplicate, @@ -1135,7 +1139,8 @@ impl ReplayStage { ); if !Self::check_propagation_for_start_leader(poh_slot, parent_slot, progress_map) { - let latest_unconfirmed_leader_slot = progress_map.get_latest_leader_slot(parent_slot).expect("In order for propagated check to fail, latest leader must exist in progress map"); + let latest_unconfirmed_leader_slot = progress_map.get_latest_leader_slot(parent_slot) + .expect("In order for propagated check to fail, latest leader must exist in progress map"); if poh_slot != skipped_slots_info.last_skipped_slot { datapoint_info!( "replay_stage-skip_leader_slot", @@ -1150,15 +1155,21 @@ impl ReplayStage { progress_map.log_propagated_stats(latest_unconfirmed_leader_slot, bank_forks); skipped_slots_info.last_skipped_slot = poh_slot; } - let bank = bank_forks.read().unwrap().get(latest_unconfirmed_leader_slot) - .expect("In order for propagated check to fail, latest leader must exist in progress map, and thus also in BankForks").clone(); + let bank = bank_forks + .read() + .unwrap() + .get(latest_unconfirmed_leader_slot) + .expect( + "In order for propagated check to fail, \ + latest leader must exist in progress map, and thus also in BankForks", + ) + .clone(); // Signal retransmit if Self::should_retransmit(poh_slot, &mut skipped_slots_info.last_retransmit_slot) { datapoint_info!("replay_stage-retransmit", ("slot", bank.slot(), i64),); - retransmit_slots_sender - .send(vec![(bank.slot(), bank.clone())].into_iter().collect()) - .unwrap(); + let _ = retransmit_slots_sender + .send(vec![(bank.slot(), bank.clone())].into_iter().collect()); } return; } @@ -1225,9 +1236,8 @@ impl ReplayStage { root: Slot, err: &BlockstoreProcessorError, subscriptions: &Arc, + duplicate_slots_tracker: &mut DuplicateSlotsTracker, gossip_duplicate_confirmed_slots: &GossipDuplicateConfirmedSlots, - ancestors: &HashMap>, - descendants: &HashMap>, progress: &mut ProgressMap, heaviest_subtree_fork_choice: &mut HeaviestSubtreeForkChoice, ) { @@ -1268,9 +1278,8 @@ impl ReplayStage { slot, root, Some(bank.hash()), + duplicate_slots_tracker, gossip_duplicate_confirmed_slots, - ancestors, - descendants, progress, heaviest_subtree_fork_choice, SlotStateUpdate::Dead, @@ -1280,7 +1289,6 @@ impl ReplayStage { #[allow(clippy::too_many_arguments)] fn handle_votable_bank( bank: &Arc, - poh_recorder: &Arc>, switch_fork_decision: &SwitchForkDecision, bank_forks: &Arc>, tower: &mut Tower, @@ -1297,10 +1305,13 @@ impl ReplayStage { block_commitment_cache: &Arc>, heaviest_subtree_fork_choice: &mut HeaviestSubtreeForkChoice, bank_notification_sender: &Option, + duplicate_slots_tracker: &mut DuplicateSlotsTracker, gossip_duplicate_confirmed_slots: &mut GossipDuplicateConfirmedSlots, unfrozen_gossip_verified_vote_hashes: &mut UnfrozenGossipVerifiedVoteHashes, vote_signatures: &mut Vec, has_new_vote_been_rooted: &mut bool, + replay_timing: &mut ReplayTiming, + voting_sender: &Sender, ) { if bank.is_empty() { inc_new_counter_info!("replay_stage-voted_empty_bank", 1); @@ -1330,7 +1341,7 @@ impl ReplayStage { // get dropped. leader_schedule_cache.set_root(rooted_banks.last().unwrap()); blockstore - .set_roots(&rooted_slots) + .set_roots(rooted_slots.iter()) .expect("Ledger set roots failed"); let highest_confirmed_root = Some( block_commitment_cache @@ -1340,11 +1351,12 @@ impl ReplayStage { ); Self::handle_new_root( new_root, - &bank_forks, + bank_forks, progress, accounts_background_request_sender, highest_confirmed_root, heaviest_subtree_fork_choice, + duplicate_slots_tracker, gossip_duplicate_confirmed_slots, unfrozen_gossip_verified_vote_hashes, has_new_vote_been_rooted, @@ -1364,22 +1376,27 @@ impl ReplayStage { info!("new root {}", new_root); } + let mut update_commitment_cache_time = Measure::start("update_commitment_cache"); Self::update_commitment_cache( bank.clone(), bank_forks.read().unwrap().root(), progress.get_fork_stats(bank.slot()).unwrap().total_stake, lockouts_sender, ); + update_commitment_cache_time.stop(); + replay_timing.update_commitment_cache_us += update_commitment_cache_time.as_us(); + Self::push_vote( cluster_info, bank, - poh_recorder, vote_account_pubkey, authorized_voter_keypairs, tower, switch_fork_decision, vote_signatures, *has_new_vote_been_rooted, + replay_timing, + voting_sender, ); } @@ -1445,7 +1462,7 @@ impl ReplayStage { let vote_ix = switch_fork_decision .to_vote_instruction( vote, - &vote_account_pubkey, + vote_account_pubkey, &authorized_voter_keypair.pubkey(), ) .expect("Switch threshold failure should not lead to voting"); @@ -1473,13 +1490,13 @@ impl ReplayStage { tower: &mut Tower, cluster_info: &ClusterInfo, heaviest_bank_on_same_fork: &Bank, - poh_recorder: &Mutex, my_latest_landed_vote: Slot, vote_account_pubkey: &Pubkey, authorized_voter_keypairs: &[Arc], vote_signatures: &mut Vec, has_new_vote_been_rooted: bool, last_vote_refresh_time: &mut LastVoteRefreshTime, + voting_sender: &Sender, ) { let last_voted_slot = tower.last_voted_slot(); if last_voted_slot.is_none() { @@ -1493,7 +1510,12 @@ impl ReplayStage { && last_vote_refresh_time.last_print_time.elapsed().as_secs() >= 1 { last_vote_refresh_time.last_print_time = Instant::now(); - info!("Last landed vote for slot {} in bank {} is greater than the current last vote for slot: {} tracked by Tower", my_latest_landed_vote, heaviest_bank_on_same_fork.slot(), last_voted_slot); + info!( + "Last landed vote for slot {} in bank {} is greater than the current last vote for slot: {} tracked by Tower", + my_latest_landed_vote, + heaviest_bank_on_same_fork.slot(), + last_voted_slot + ); } if my_latest_landed_vote >= last_voted_slot || heaviest_bank_on_same_fork @@ -1531,26 +1553,30 @@ impl ReplayStage { ("target_bank_slot", heaviest_bank_on_same_fork.slot(), i64), ("target_bank_hash", hash_string, String), ); - let _ = cluster_info.send_vote( - &vote_tx, - crate::banking_stage::next_leader_tpu(cluster_info, poh_recorder), - ); - cluster_info.refresh_vote(vote_tx, last_voted_slot); + voting_sender + .send(VoteOp::RefreshVote { + tx: vote_tx, + last_voted_slot, + }) + .unwrap_or_else(|err| warn!("Error: {:?}", err)); last_vote_refresh_time.last_refresh_time = Instant::now(); } } + #[allow(clippy::too_many_arguments)] fn push_vote( cluster_info: &ClusterInfo, bank: &Bank, - poh_recorder: &Mutex, vote_account_pubkey: &Pubkey, authorized_voter_keypairs: &[Arc], tower: &mut Tower, switch_fork_decision: &SwitchForkDecision, vote_signatures: &mut Vec, has_new_vote_been_rooted: bool, + replay_timing: &mut ReplayTiming, + voting_sender: &Sender, ) { + let mut generate_time = Measure::start("generate_vote"); let vote_tx = Self::generate_vote_tx( &cluster_info.keypair, bank, @@ -1561,13 +1587,18 @@ impl ReplayStage { vote_signatures, has_new_vote_been_rooted, ); + generate_time.stop(); + replay_timing.generate_vote_us += generate_time.as_us(); if let Some(vote_tx) = vote_tx { tower.refresh_last_vote_tx_blockhash(vote_tx.message.recent_blockhash); - let _ = cluster_info.send_vote( - &vote_tx, - crate::banking_stage::next_leader_tpu(cluster_info, poh_recorder), - ); - cluster_info.push_vote(&tower.tower_slots(), vote_tx); + + let tower_slots = tower.tower_slots(); + voting_sender + .send(VoteOp::PushVote { + tx: vote_tx, + tower_slots, + }) + .unwrap_or_else(|err| warn!("Error: {:?}", err)); } } @@ -1592,9 +1623,9 @@ impl ReplayStage { leader_schedule_cache: &LeaderScheduleCache, ) { let next_leader_slot = leader_schedule_cache.next_leader_slot( - &my_pubkey, + my_pubkey, bank.slot(), - &bank, + bank, Some(blockstore), GRACE_TICKS_FACTOR * MAX_GRACE_SLOTS, ); @@ -1633,11 +1664,11 @@ impl ReplayStage { bank_notification_sender: &Option, rewards_recorder_sender: &Option, subscriptions: &Arc, + duplicate_slots_tracker: &mut DuplicateSlotsTracker, gossip_duplicate_confirmed_slots: &GossipDuplicateConfirmedSlots, - ancestors: &HashMap>, - descendants: &HashMap>, unfrozen_gossip_verified_vote_hashes: &mut UnfrozenGossipVerifiedVoteHashes, latest_validator_votes_for_frozen_banks: &mut LatestValidatorVotesForFrozenBanks, + cluster_slots_update_sender: &ClusterSlotsUpdateSender, ) -> bool { let mut did_complete_bank = false; let mut tx_count = 0; @@ -1665,21 +1696,15 @@ impl ReplayStage { (num_blocks_on_fork, num_dropped_blocks_on_fork) }; - // New children adopt the same latest duplicate ancestor as their parent. - let duplicate_stats = DuplicateStats::new_with_unconfirmed_duplicate_ancestor( - progress.latest_unconfirmed_duplicate_ancestor(bank.parent_slot()), - ); - // Insert a progress entry even for slots this node is the leader for, so that // 1) confirm_forks can report confirmation, 2) we can cache computations about // this bank in `select_forks()` let bank_progress = &mut progress.entry(bank.slot()).or_insert_with(|| { ForkProgress::new_from_bank( &bank, - &my_pubkey, + my_pubkey, vote_account, prev_leader_slot, - duplicate_stats, num_blocks_on_fork, num_dropped_blocks_on_fork, ) @@ -1688,7 +1713,7 @@ impl ReplayStage { let root_slot = bank_forks.read().unwrap().root(); let replay_result = Self::replay_blockstore_into_bank( &bank, - &blockstore, + blockstore, bank_progress, transaction_status_sender, replay_vote_sender, @@ -1704,9 +1729,8 @@ impl ReplayStage { root_slot, &err, subscriptions, + duplicate_slots_tracker, gossip_duplicate_confirmed_slots, - ancestors, - descendants, progress, heaviest_subtree_fork_choice, ); @@ -1725,12 +1749,16 @@ impl ReplayStage { ); did_complete_bank = true; info!("bank frozen: {}", bank.slot()); + let _ = cluster_slots_update_sender.send(vec![*bank_slot]); if let Some(transaction_status_sender) = transaction_status_sender { transaction_status_sender.send_transaction_status_freeze_message(&bank); } bank.freeze(); let bank_hash = bank.hash(); assert_ne!(bank_hash, Hash::default()); + // Needs to be updated before `check_slot_agrees_with_cluster()` so that + // any updates in `check_slot_agrees_with_cluster()` on fork choice take + // effect heaviest_subtree_fork_choice.add_new_leaf_slot( (bank.slot(), bank.hash()), Some((bank.parent_slot(), bank.parent_hash())), @@ -1739,9 +1767,8 @@ impl ReplayStage { bank.slot(), bank_forks.read().unwrap().root(), Some(bank.hash()), + duplicate_slots_tracker, gossip_duplicate_confirmed_slots, - ancestors, - descendants, progress, heaviest_subtree_fork_choice, SlotStateUpdate::Frozen, @@ -1766,7 +1793,7 @@ impl ReplayStage { ); } } - Self::record_rewards(&bank, &rewards_recorder_sender); + Self::record_rewards(&bank, rewards_recorder_sender); } else { trace!( "bank {} not completed tick_height: {}, max_tick_height: {}", @@ -1810,14 +1837,14 @@ impl ReplayStage { my_vote_pubkey, bank_slot, bank.vote_accounts().into_iter(), - &ancestors, + ancestors, |slot| progress.get_hash(slot), latest_validator_votes_for_frozen_banks, ); // Notify any listeners of the votes found in this newly computed // bank heaviest_subtree_fork_choice.compute_bank_stats( - &bank, + bank, tower, latest_validator_votes_for_frozen_banks, ); @@ -1955,6 +1982,7 @@ impl ReplayStage { progress: &ProgressMap, tower: &mut Tower, latest_validator_votes_for_frozen_banks: &LatestValidatorVotesForFrozenBanks, + fork_choice: &HeaviestSubtreeForkChoice, ) -> SelectVoteAndResetForkResult { // Try to vote on the actual heaviest fork. If the heaviest bank is // locked out or fails the threshold check, the validator will: @@ -1973,14 +2001,15 @@ impl ReplayStage { let selected_fork = { let switch_fork_decision = tower.check_switch_threshold( heaviest_bank.slot(), - &ancestors, - &descendants, - &progress, + ancestors, + descendants, + progress, heaviest_bank.total_epoch_stake(), heaviest_bank .epoch_vote_accounts(heaviest_bank.epoch()) .expect("Bank epoch vote accounts must contain entry for the bank's own epoch"), latest_validator_votes_for_frozen_banks, + fork_choice, ); match switch_fork_decision { @@ -2220,7 +2249,7 @@ impl ReplayStage { .contains(vote_pubkey); leader_propagated_stats.add_vote_pubkey( *vote_pubkey, - leader_bank.epoch_vote_account_stake(&vote_pubkey), + leader_bank.epoch_vote_account_stake(vote_pubkey), ); !exists }); @@ -2249,8 +2278,7 @@ impl ReplayStage { confirmed_forks: &[Slot], bank_forks: &RwLock, progress: &mut ProgressMap, - ancestors: &HashMap>, - descendants: &HashMap>, + duplicate_slots_tracker: &mut DuplicateSlotsTracker, fork_choice: &mut HeaviestSubtreeForkChoice, ) { let (root_slot, bank_hashes) = { @@ -2265,19 +2293,18 @@ impl ReplayStage { for (slot, bank_hash) in confirmed_forks.iter().zip(bank_hashes.into_iter()) { // This case should be guaranteed as false by confirm_forks() if let Some(false) = progress.is_supermajority_confirmed(*slot) { - // Because supermajority confirmation will iterate through all ancestors/descendants - // in `check_slot_agrees_with_cluster`, only incur this cost if the slot wasn't already + // Because supermajority confirmation will iterate through and update the + // subtree in fork choice, only incur this cost if the slot wasn't already // confirmed progress.set_supermajority_confirmed_slot(*slot); check_slot_agrees_with_cluster( *slot, root_slot, bank_hash, + duplicate_slots_tracker, // Don't need to pass the gossip confirmed slots since `slot` // is already marked as confirmed in progress &BTreeMap::new(), - ancestors, - descendants, progress, fork_choice, SlotStateUpdate::DuplicateConfirmed, @@ -2328,6 +2355,7 @@ impl ReplayStage { accounts_background_request_sender: &AbsRequestSender, highest_confirmed_root: Option, heaviest_subtree_fork_choice: &mut HeaviestSubtreeForkChoice, + duplicate_slots_tracker: &mut DuplicateSlotsTracker, gossip_duplicate_confirmed_slots: &mut GossipDuplicateConfirmedSlots, unfrozen_gossip_verified_vote_hashes: &mut UnfrozenGossipVerifiedVoteHashes, has_new_vote_been_rooted: &mut bool, @@ -2353,6 +2381,10 @@ impl ReplayStage { } progress.handle_new_root(&r_bank_forks); heaviest_subtree_fork_choice.set_root((new_root, r_bank_forks.root_bank().hash())); + let mut slots_ge_root = duplicate_slots_tracker.split_off(&new_root); + // duplicate_slots_tracker now only contains entries >= `new_root` + std::mem::swap(duplicate_slots_tracker, &mut slots_ge_root); + let mut slots_ge_root = gossip_duplicate_confirmed_slots.split_off(&new_root); // gossip_confirmed_slots now only contains entries >= `new_root` std::mem::swap(gossip_duplicate_confirmed_slots, &mut slots_ge_root); @@ -2470,22 +2502,21 @@ impl ReplayStage { } #[cfg(test)] -pub(crate) mod tests { +mod tests { use super::*; use crate::{ consensus::test::{initialize_state, VoteSimulator}, consensus::Tower, progress_map::ValidatorStakeInfo, replay_stage::ReplayStage, - transaction_status_service::TransactionStatusService, }; use crossbeam_channel::unbounded; use solana_gossip::{cluster_info::Node, crds::Cursor}; use solana_ledger::{ blockstore::make_slot_entries, blockstore::{entries_to_test_shreds, BlockstoreError}, - blockstore_processor, create_new_tmp_ledger, - entry::{self, next_entry, Entry}, + create_new_tmp_ledger, + entry::{self, Entry}, genesis_utils::{create_genesis_config, create_genesis_config_with_leader}, get_tmp_ledger_path, shred::{ @@ -2493,11 +2524,14 @@ pub(crate) mod tests { SIZE_OF_COMMON_SHRED_HEADER, SIZE_OF_DATA_SHRED_HEADER, SIZE_OF_DATA_SHRED_PAYLOAD, }, }; - use solana_rpc::optimistically_confirmed_bank_tracker::OptimisticallyConfirmedBank; + use solana_rpc::{ + optimistically_confirmed_bank_tracker::OptimisticallyConfirmedBank, + rpc::create_test_transactions_and_populate_blockstore, + }; use solana_runtime::{ accounts_background_service::AbsRequestSender, commitment::BlockCommitment, - genesis_utils::{self, GenesisConfigInfo, ValidatorVoteKeypairs}, + genesis_utils::{GenesisConfigInfo, ValidatorVoteKeypairs}, }; use solana_sdk::{ clock::NUM_CONSECUTIVE_LEADER_SLOTS, @@ -2506,25 +2540,27 @@ pub(crate) mod tests { instruction::InstructionError, packet::PACKET_DATA_SIZE, poh_config::PohConfig, - signature::{Keypair, Signature, Signer}, + signature::{Keypair, Signer}, system_transaction, transaction::TransactionError, }; + use solana_streamer::socket::SocketAddrSpace; use solana_transaction_status::TransactionWithStatusMeta; use solana_vote_program::{ vote_state::{VoteState, VoteStateVersions}, vote_transaction, }; + use std::sync::mpsc::channel; use std::{ fs::remove_dir_all, iter, sync::{atomic::AtomicU64, Arc, RwLock}, }; - use trees::tr; + use trees::{tr, Tree}; #[test] fn test_is_partition_detected() { - let VoteSimulator { bank_forks, .. } = setup_forks(); + let (VoteSimulator { bank_forks, .. }, _) = setup_default_forks(1); let ancestors = bank_forks.read().unwrap().ancestors(); // Last vote 1 is an ancestor of the heaviest slot 3, no partition assert!(!ReplayStage::is_partition_detected(&ancestors, 1, 3)); @@ -2541,8 +2577,8 @@ pub(crate) mod tests { struct ReplayBlockstoreComponents { blockstore: Arc, validator_node_to_vote_keys: HashMap, - validator_authorized_voter_keypairs: HashMap, - my_vote_pubkey: Pubkey, + validator_keypairs: HashMap, + my_pubkey: Pubkey, progress: ProgressMap, cluster_info: ClusterInfo, leader_schedule_cache: Arc, @@ -2552,64 +2588,53 @@ pub(crate) mod tests { rpc_subscriptions: Arc, } - fn replay_blockstore_components() -> ReplayBlockstoreComponents { + fn replay_blockstore_components(forks: Option>) -> ReplayBlockstoreComponents { // Setup blockstore - let ledger_path = get_tmp_ledger_path!(); - let blockstore = Arc::new( - Blockstore::open(&ledger_path).expect("Expected to be able to open database ledger"), - ); - let validator_authorized_voter_keypairs: Vec<_> = - (0..20).map(|_| ValidatorVoteKeypairs::new_rand()).collect(); - - let validator_node_to_vote_keys: HashMap = - validator_authorized_voter_keypairs - .iter() - .map(|v| (v.node_keypair.pubkey(), v.vote_keypair.pubkey())) - .collect(); - let GenesisConfigInfo { genesis_config, .. } = - genesis_utils::create_genesis_config_with_vote_accounts( - 10_000, - &validator_authorized_voter_keypairs, - vec![100; validator_authorized_voter_keypairs.len()], - ); + let (vote_simulator, blockstore) = + setup_forks_from_tree(forks.unwrap_or_else(|| tr(0)), 20); - let bank0 = Bank::new(&genesis_config); + let VoteSimulator { + validator_keypairs, + progress, + bank_forks, + .. + } = vote_simulator; - // ProgressMap - let mut progress = ProgressMap::default(); - progress.insert( - 0, - ForkProgress::new_from_bank( - &bank0, - bank0.collector_id(), - &Pubkey::default(), - None, - DuplicateStats::default(), - 0, - 0, - ), - ); + let blockstore = Arc::new(blockstore); + let bank_forks = Arc::new(bank_forks); + let validator_node_to_vote_keys: HashMap = validator_keypairs + .iter() + .map(|(_, keypairs)| { + ( + keypairs.node_keypair.pubkey(), + keypairs.vote_keypair.pubkey(), + ) + }) + .collect(); // ClusterInfo - let my_keypairs = &validator_authorized_voter_keypairs[0]; + let my_keypairs = validator_keypairs.values().next().unwrap(); let my_pubkey = my_keypairs.node_keypair.pubkey(); let cluster_info = ClusterInfo::new( Node::new_localhost_with_pubkey(&my_pubkey).info, Arc::new(Keypair::from_bytes(&my_keypairs.node_keypair.to_bytes()).unwrap()), + SocketAddrSpace::Unspecified, ); assert_eq!(my_pubkey, cluster_info.id()); // Leader schedule cache - let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank0)); + let root_bank = bank_forks.read().unwrap().root_bank(); + let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&root_bank)); // PohRecorder + let working_bank = bank_forks.read().unwrap().working_bank(); let poh_recorder = Mutex::new( PohRecorder::new( - bank0.tick_height(), - bank0.last_blockhash(), - bank0.slot(), + working_bank.tick_height(), + working_bank.last_blockhash(), + working_bank.slot(), None, - bank0.ticks_per_slot(), + working_bank.ticks_per_slot(), &Pubkey::default(), &blockstore, &leader_schedule_cache, @@ -2619,14 +2644,11 @@ pub(crate) mod tests { .0, ); - // BankForks - let bank_forks = Arc::new(RwLock::new(BankForks::new(bank0))); - // Tower let my_vote_pubkey = my_keypairs.vote_keypair.pubkey(); let tower = Tower::new_from_bankforks( &bank_forks.read().unwrap(), - &ledger_path, + blockstore.ledger_path(), &cluster_info.id(), &my_vote_pubkey, ); @@ -2642,17 +2664,11 @@ pub(crate) mod tests { optimistically_confirmed_bank, )); - let validator_authorized_voter_keypairs: HashMap = - validator_authorized_voter_keypairs - .into_iter() - .map(|keys| (keys.vote_keypair.pubkey(), keys)) - .collect(); - ReplayBlockstoreComponents { blockstore, validator_node_to_vote_keys, - validator_authorized_voter_keypairs, - my_vote_pubkey, + validator_keypairs, + my_pubkey, progress, cluster_info, leader_schedule_cache, @@ -2673,7 +2689,7 @@ pub(crate) mod tests { leader_schedule_cache, rpc_subscriptions, .. - } = replay_blockstore_components(); + } = replay_blockstore_components(None); // Insert a non-root bank so that the propagation logic will update this // bank @@ -2688,10 +2704,9 @@ pub(crate) mod tests { &bank1, bank1.collector_id(), validator_node_to_vote_keys - .get(&bank1.collector_id()) + .get(bank1.collector_id()) .unwrap(), Some(0), - DuplicateStats::default(), 0, 0, ), @@ -2788,11 +2803,11 @@ pub(crate) mod tests { let mut progress = ProgressMap::default(); for i in 0..=root { - progress.insert( - i, - ForkProgress::new(Hash::default(), None, DuplicateStats::default(), None, 0, 0), - ); + progress.insert(i, ForkProgress::new(Hash::default(), None, None, 0, 0)); } + + let mut duplicate_slots_tracker: DuplicateSlotsTracker = + vec![root - 1, root, root + 1].into_iter().collect(); let mut gossip_duplicate_confirmed_slots: GossipDuplicateConfirmedSlots = vec![root - 1, root, root + 1] .into_iter() @@ -2812,6 +2827,7 @@ pub(crate) mod tests { &AbsRequestSender::default(), None, &mut heaviest_subtree_fork_choice, + &mut duplicate_slots_tracker, &mut gossip_duplicate_confirmed_slots, &mut unfrozen_gossip_verified_vote_hashes, &mut true, @@ -2821,6 +2837,10 @@ pub(crate) mod tests { assert_eq!(progress.len(), 1); assert!(progress.get(&root).is_some()); // root - 1 is filtered out + assert_eq!( + duplicate_slots_tracker.into_iter().collect::>(), + vec![root, root + 1] + ); assert_eq!( gossip_duplicate_confirmed_slots .keys() @@ -2869,10 +2889,7 @@ pub(crate) mod tests { let mut heaviest_subtree_fork_choice = HeaviestSubtreeForkChoice::new((root, root_hash)); let mut progress = ProgressMap::default(); for i in 0..=root { - progress.insert( - i, - ForkProgress::new(Hash::default(), None, DuplicateStats::default(), None, 0, 0), - ); + progress.insert(i, ForkProgress::new(Hash::default(), None, None, 0, 0)); } ReplayStage::handle_new_root( root, @@ -2881,6 +2898,7 @@ pub(crate) mod tests { &AbsRequestSender::default(), Some(confirmed_root), &mut heaviest_subtree_fork_choice, + &mut DuplicateSlotsTracker::default(), &mut GossipDuplicateConfirmedSlots::default(), &mut UnfrozenGossipVerifiedVoteHashes::default(), &mut true, @@ -2943,7 +2961,7 @@ pub(crate) mod tests { &bad_hash, hashes_per_tick.saturating_sub(1), vec![system_transaction::transfer( - &genesis_keypair, + genesis_keypair, &keypair2.pubkey(), 2, blockhash, @@ -3060,8 +3078,7 @@ pub(crate) mod tests { let mut entries = entry::create_ticks(bank.ticks_per_slot(), hashes_per_tick, blockhash); let last_entry_hash = entries.last().unwrap().hash; - let tx = - system_transaction::transfer(&genesis_keypair, &keypair.pubkey(), 2, blockhash); + let tx = system_transaction::transfer(genesis_keypair, &keypair.pubkey(), 2, blockhash); let trailing_entry = entry::next_entry(&last_entry_hash, 1, vec![tx]); entries.push(trailing_entry); entries_to_test_shreds(entries, slot, slot.saturating_sub(1), true, 0) @@ -3127,9 +3144,9 @@ pub(crate) mod tests { let bank0 = bank_forks.working_bank(); let mut progress = ProgressMap::default(); let last_blockhash = bank0.last_blockhash(); - let mut bank0_progress = progress.entry(bank0.slot()).or_insert_with(|| { - ForkProgress::new(last_blockhash, None, DuplicateStats::default(), None, 0, 0) - }); + let mut bank0_progress = progress + .entry(bank0.slot()) + .or_insert_with(|| ForkProgress::new(last_blockhash, None, None, 0, 0)); let shreds = shred_to_insert(&mint_keypair, bank0.clone()); blockstore.insert_shreds(shreds, None, false).unwrap(); let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default())); @@ -3141,7 +3158,7 @@ pub(crate) mod tests { &mut bank0_progress, None, &replay_vote_sender, - &&VerifyRecyclers::default(), + &VerifyRecyclers::default(), ); let subscriptions = Arc::new(RpcSubscriptions::new( @@ -3157,9 +3174,8 @@ pub(crate) mod tests { 0, err, &subscriptions, - &BTreeMap::new(), - &HashMap::new(), - &HashMap::new(), + &mut DuplicateSlotsTracker::default(), + &GossipDuplicateConfirmedSlots::default(), &mut progress, &mut HeaviestSubtreeForkChoice::new((0, Hash::default())), ); @@ -3182,12 +3198,12 @@ pub(crate) mod tests { #[test] fn test_replay_commitment_cache() { fn leader_vote(vote_slot: Slot, bank: &Arc, pubkey: &Pubkey) { - let mut leader_vote_account = bank.get_account(&pubkey).unwrap(); + let mut leader_vote_account = bank.get_account(pubkey).unwrap(); let mut vote_state = VoteState::from(&leader_vote_account).unwrap(); vote_state.process_slot_vote_unchecked(vote_slot); let versioned = VoteStateVersions::new_current(vote_state); VoteState::to(&versioned, &mut leader_vote_account).unwrap(); - bank.store_account(&pubkey, &leader_vote_account); + bank.store_account(pubkey, &leader_vote_account); } let leader_pubkey = solana_sdk::pubkey::new_rand(); @@ -3297,68 +3313,6 @@ pub(crate) mod tests { ); } - pub fn create_test_transactions_and_populate_blockstore( - keypairs: Vec<&Keypair>, - previous_slot: Slot, - bank: Arc, - blockstore: Arc, - max_complete_transaction_status_slot: Arc, - ) -> Vec { - let mint_keypair = keypairs[0]; - let keypair1 = keypairs[1]; - let keypair2 = keypairs[2]; - let keypair3 = keypairs[3]; - let slot = bank.slot(); - let blockhash = bank.confirmed_last_blockhash().0; - - // Generate transactions for processing - // Successful transaction - let success_tx = - system_transaction::transfer(&mint_keypair, &keypair1.pubkey(), 2, blockhash); - let success_signature = success_tx.signatures[0]; - let entry_1 = next_entry(&blockhash, 1, vec![success_tx]); - // Failed transaction, InstructionError - let ix_error_tx = - system_transaction::transfer(&keypair2, &keypair3.pubkey(), 10, blockhash); - let ix_error_signature = ix_error_tx.signatures[0]; - let entry_2 = next_entry(&entry_1.hash, 1, vec![ix_error_tx]); - // Failed transaction - let fail_tx = - system_transaction::transfer(&mint_keypair, &keypair2.pubkey(), 2, Hash::default()); - let entry_3 = next_entry(&entry_2.hash, 1, vec![fail_tx]); - let mut entries = vec![entry_1, entry_2, entry_3]; - - let shreds = entries_to_test_shreds(entries.clone(), slot, previous_slot, true, 0); - blockstore.insert_shreds(shreds, None, false).unwrap(); - blockstore.set_roots(&[slot]).unwrap(); - - let (transaction_status_sender, transaction_status_receiver) = unbounded(); - let (replay_vote_sender, _replay_vote_receiver) = unbounded(); - let transaction_status_service = TransactionStatusService::new( - transaction_status_receiver, - max_complete_transaction_status_slot, - blockstore, - &Arc::new(AtomicBool::new(false)), - ); - - // Check that process_entries successfully writes can_commit transactions statuses, and - // that they are matched properly by get_rooted_block - let _result = blockstore_processor::process_entries( - &bank, - &mut entries, - true, - Some(&TransactionStatusSender { - sender: transaction_status_sender, - enable_cpi_and_log_storage: false, - }), - Some(&replay_vote_sender), - ); - - transaction_status_service.join().unwrap(); - - vec![success_signature, ix_error_signature] - } - #[test] fn test_write_persist_transaction_status() { let GenesisConfigInfo { @@ -3490,14 +3444,7 @@ pub(crate) mod tests { bank_forks.write().unwrap().insert(bank1); progress.insert( 1, - ForkProgress::new( - bank0.last_blockhash(), - None, - DuplicateStats::default(), - None, - 0, - 0, - ), + ForkProgress::new(bank0.last_blockhash(), None, None, 0, 0), ); let ancestors = bank_forks.read().unwrap().ancestors(); let mut frozen_banks: Vec<_> = bank_forks @@ -3793,7 +3740,7 @@ pub(crate) mod tests { success_index: usize, ) { let stake = 10_000; - let (bank_forks, _, _) = initialize_state(&all_keypairs, stake); + let (bank_forks, _, _) = initialize_state(all_keypairs, stake); let root_bank = bank_forks.root_bank(); let mut propagated_stats = PropagatedStats { total_epoch_stake: stake * all_keypairs.len() as u64, @@ -3924,7 +3871,6 @@ pub(crate) mod tests { ForkProgress::new( Hash::default(), Some(9), - DuplicateStats::default(), Some(ValidatorStakeInfo { total_epoch_stake, ..ValidatorStakeInfo::default() @@ -3938,7 +3884,6 @@ pub(crate) mod tests { ForkProgress::new( Hash::default(), Some(8), - DuplicateStats::default(), Some(ValidatorStakeInfo { total_epoch_stake, ..ValidatorStakeInfo::default() @@ -4021,7 +3966,6 @@ pub(crate) mod tests { ForkProgress::new( Hash::default(), Some(prev_leader_slot), - DuplicateStats::default(), { if i % 2 == 0 { Some(ValidatorStakeInfo { @@ -4101,7 +4045,6 @@ pub(crate) mod tests { let mut fork_progress = ForkProgress::new( Hash::default(), Some(prev_leader_slot), - DuplicateStats::default(), Some(ValidatorStakeInfo { total_epoch_stake, ..ValidatorStakeInfo::default() @@ -4161,7 +4104,7 @@ pub(crate) mod tests { // should succeed progress_map.insert( parent_slot, - ForkProgress::new(Hash::default(), None, DuplicateStats::default(), None, 0, 0), + ForkProgress::new(Hash::default(), None, None, 0, 0), ); assert!(ReplayStage::check_propagation_for_start_leader( poh_slot, @@ -4177,7 +4120,6 @@ pub(crate) mod tests { ForkProgress::new( Hash::default(), None, - DuplicateStats::default(), Some(ValidatorStakeInfo::default()), 0, 0, @@ -4204,21 +4146,13 @@ pub(crate) mod tests { let previous_leader_slot = parent_slot - 1; progress_map.insert( parent_slot, - ForkProgress::new( - Hash::default(), - Some(previous_leader_slot), - DuplicateStats::default(), - None, - 0, - 0, - ), + ForkProgress::new(Hash::default(), Some(previous_leader_slot), None, 0, 0), ); progress_map.insert( previous_leader_slot, ForkProgress::new( Hash::default(), None, - DuplicateStats::default(), Some(ValidatorStakeInfo::default()), 0, 0, @@ -4279,7 +4213,6 @@ pub(crate) mod tests { ForkProgress::new( Hash::default(), None, - DuplicateStats::default(), Some(ValidatorStakeInfo::default()), 0, 0, @@ -4315,7 +4248,6 @@ pub(crate) mod tests { ForkProgress::new( Hash::default(), None, - DuplicateStats::default(), Some(ValidatorStakeInfo::default()), 0, 0, @@ -4339,7 +4271,6 @@ pub(crate) mod tests { ForkProgress::new( Hash::default(), None, - DuplicateStats::default(), Some(ValidatorStakeInfo::default()), 0, 0, @@ -4354,11 +4285,12 @@ pub(crate) mod tests { #[test] fn test_purge_unconfirmed_duplicate_slot() { + let (vote_simulator, _) = setup_default_forks(2); let VoteSimulator { bank_forks, mut progress, .. - } = setup_forks(); + } = vote_simulator; let mut descendants = bank_forks.read().unwrap().descendants().clone(); let mut ancestors = bank_forks.read().unwrap().ancestors(); @@ -4418,7 +4350,7 @@ pub(crate) mod tests { #[test] fn test_purge_ancestors_descendants() { - let VoteSimulator { bank_forks, .. } = setup_forks(); + let (VoteSimulator { bank_forks, .. }, _) = setup_default_forks(1); // Purge branch rooted at slot 2 let mut descendants = bank_forks.read().unwrap().descendants().clone(); @@ -4443,7 +4375,7 @@ pub(crate) mod tests { )); assert!(check_map_eq( &descendants, - &bank_forks.read().unwrap().descendants() + bank_forks.read().unwrap().descendants() )); // Try to purge the root @@ -4476,7 +4408,7 @@ pub(crate) mod tests { bank_forks, leader_schedule_cache, .. - } = replay_blockstore_components(); + } = replay_blockstore_components(None); let root_bank = bank_forks.read().unwrap().root_bank(); let my_pubkey = leader_schedule_cache @@ -4582,26 +4514,24 @@ pub(crate) mod tests { // Record the vote for 4 tower.record_bank_vote( - &bank_forks.read().unwrap().get(4).unwrap(), + bank_forks.read().unwrap().get(4).unwrap(), &Pubkey::default(), ); // Mark 4 as duplicate, 3 should be the heaviest slot, but should not be votable // because of lockout blockstore.store_duplicate_slot(4, vec![], vec![]).unwrap(); - let ancestors = bank_forks.read().unwrap().ancestors(); - let descendants = bank_forks.read().unwrap().descendants().clone(); - let mut gossip_duplicate_confirmed_slots = BTreeMap::new(); + let mut duplicate_slots_tracker = DuplicateSlotsTracker::default(); + let mut gossip_duplicate_confirmed_slots = GossipDuplicateConfirmedSlots::default(); let bank4_hash = bank_forks.read().unwrap().get(4).unwrap().hash(); assert_ne!(bank4_hash, Hash::default()); check_slot_agrees_with_cluster( 4, bank_forks.read().unwrap().root(), Some(bank4_hash), + &mut duplicate_slots_tracker, &gossip_duplicate_confirmed_slots, - &ancestors, - &descendants, - &mut progress, + &progress, &mut vote_simulator.heaviest_subtree_fork_choice, SlotStateUpdate::Duplicate, ); @@ -4618,18 +4548,15 @@ pub(crate) mod tests { // Now mark 2, an ancestor of 4, as duplicate blockstore.store_duplicate_slot(2, vec![], vec![]).unwrap(); - let ancestors = bank_forks.read().unwrap().ancestors(); - let descendants = bank_forks.read().unwrap().descendants().clone(); let bank2_hash = bank_forks.read().unwrap().get(2).unwrap().hash(); assert_ne!(bank2_hash, Hash::default()); check_slot_agrees_with_cluster( 2, bank_forks.read().unwrap().root(), Some(bank2_hash), + &mut duplicate_slots_tracker, &gossip_duplicate_confirmed_slots, - &ancestors, - &descendants, - &mut progress, + &progress, &mut vote_simulator.heaviest_subtree_fork_choice, SlotStateUpdate::Duplicate, ); @@ -4654,10 +4581,9 @@ pub(crate) mod tests { 4, bank_forks.read().unwrap().root(), Some(bank4_hash), + &mut duplicate_slots_tracker, &gossip_duplicate_confirmed_slots, - &ancestors, - &descendants, - &mut progress, + &progress, &mut vote_simulator.heaviest_subtree_fork_choice, SlotStateUpdate::DuplicateConfirmed, ); @@ -4676,13 +4602,16 @@ pub(crate) mod tests { #[test] fn test_gossip_vote_doesnt_affect_fork_choice() { - let VoteSimulator { - bank_forks, - mut heaviest_subtree_fork_choice, - mut latest_validator_votes_for_frozen_banks, - vote_pubkeys, - .. - } = setup_forks(); + let ( + VoteSimulator { + bank_forks, + mut heaviest_subtree_fork_choice, + mut latest_validator_votes_for_frozen_banks, + vote_pubkeys, + .. + }, + _, + ) = setup_default_forks(1); let vote_pubkey = vote_pubkeys[0]; let mut unfrozen_gossip_verified_vote_hashes = UnfrozenGossipVerifiedVoteHashes::default(); @@ -4718,14 +4647,14 @@ pub(crate) mod tests { #[test] fn test_replay_stage_refresh_last_vote() { let ReplayBlockstoreComponents { - mut validator_authorized_voter_keypairs, + mut validator_keypairs, cluster_info, poh_recorder, bank_forks, mut tower, - my_vote_pubkey, + my_pubkey, .. - } = replay_blockstore_components(); + } = replay_blockstore_components(None); let mut last_vote_refresh_time = LastVoteRefreshTime { last_refresh_time: Instant::now(), @@ -4735,11 +4664,9 @@ pub(crate) mod tests { let mut voted_signatures = vec![]; let my_vote_keypair = vec![Arc::new( - validator_authorized_voter_keypairs - .remove(&my_vote_pubkey) - .unwrap() - .vote_keypair, + validator_keypairs.remove(&my_pubkey).unwrap().vote_keypair, )]; + let my_vote_pubkey = my_vote_keypair[0].pubkey(); let bank0 = bank_forks.read().unwrap().get(0).unwrap().clone(); fn fill_bank_with_ticks(bank: &Bank) { @@ -4751,6 +4678,7 @@ pub(crate) mod tests { } } } + let (voting_sender, voting_receiver) = channel(); // Simulate landing a vote for slot 0 landing in slot 1 let bank1 = Arc::new(Bank::new_from_parent(&bank0, &Pubkey::default(), 1)); @@ -4759,14 +4687,20 @@ pub(crate) mod tests { ReplayStage::push_vote( &cluster_info, &bank0, - &poh_recorder, &my_vote_pubkey, &my_vote_keypair, &mut tower, &SwitchForkDecision::SameFork, &mut voted_signatures, has_new_vote_been_rooted, + &mut ReplayTiming::default(), + &voting_sender, ); + let vote_info = voting_receiver + .recv_timeout(Duration::from_secs(1)) + .unwrap(); + crate::voting_service::VotingService::handle_vote(&cluster_info, &poh_recorder, vote_info); + let mut cursor = Cursor::default(); let (_, votes) = cluster_info.get_votes(&mut cursor); assert_eq!(votes.len(), 1); @@ -4787,13 +4721,13 @@ pub(crate) mod tests { &mut tower, &cluster_info, refresh_bank, - &poh_recorder, - Tower::last_voted_slot_in_bank(&refresh_bank, &my_vote_pubkey).unwrap(), + Tower::last_voted_slot_in_bank(refresh_bank, &my_vote_pubkey).unwrap(), &my_vote_pubkey, &my_vote_keypair, &mut voted_signatures, has_new_vote_been_rooted, &mut last_vote_refresh_time, + &voting_sender, ); // No new votes have been submitted to gossip @@ -4810,14 +4744,19 @@ pub(crate) mod tests { ReplayStage::push_vote( &cluster_info, &bank1, - &poh_recorder, &my_vote_pubkey, &my_vote_keypair, &mut tower, &SwitchForkDecision::SameFork, &mut voted_signatures, has_new_vote_been_rooted, + &mut ReplayTiming::default(), + &voting_sender, ); + let vote_info = voting_receiver + .recv_timeout(Duration::from_secs(1)) + .unwrap(); + crate::voting_service::VotingService::handle_vote(&cluster_info, &poh_recorder, vote_info); let (_, votes) = cluster_info.get_votes(&mut cursor); assert_eq!(votes.len(), 1); let vote_tx = &votes[0]; @@ -4831,14 +4770,15 @@ pub(crate) mod tests { &mut tower, &cluster_info, &bank2, - &poh_recorder, Tower::last_voted_slot_in_bank(&bank2, &my_vote_pubkey).unwrap(), &my_vote_pubkey, &my_vote_keypair, &mut voted_signatures, has_new_vote_been_rooted, &mut last_vote_refresh_time, + &voting_sender, ); + // No new votes have been submitted to gossip let (_, votes) = cluster_info.get_votes(&mut cursor); assert!(votes.is_empty()); @@ -4867,14 +4807,19 @@ pub(crate) mod tests { &mut tower, &cluster_info, &expired_bank, - &poh_recorder, Tower::last_voted_slot_in_bank(&expired_bank, &my_vote_pubkey).unwrap(), &my_vote_pubkey, &my_vote_keypair, &mut voted_signatures, has_new_vote_been_rooted, &mut last_vote_refresh_time, + &voting_sender, ); + let vote_info = voting_receiver + .recv_timeout(Duration::from_secs(1)) + .unwrap(); + crate::voting_service::VotingService::handle_vote(&cluster_info, &poh_recorder, vote_info); + assert!(last_vote_refresh_time.last_refresh_time > clone_refresh_time); let (_, votes) = cluster_info.get_votes(&mut cursor); assert_eq!(votes.len(), 1); @@ -4923,14 +4868,15 @@ pub(crate) mod tests { &mut tower, &cluster_info, &expired_bank_sibling, - &poh_recorder, Tower::last_voted_slot_in_bank(&expired_bank_sibling, &my_vote_pubkey).unwrap(), &my_vote_pubkey, &my_vote_keypair, &mut voted_signatures, has_new_vote_been_rooted, &mut last_vote_refresh_time, + &voting_sender, ); + let (_, votes) = cluster_info.get_votes(&mut cursor); assert!(votes.is_empty()); assert_eq!( @@ -4968,12 +4914,12 @@ pub(crate) mod tests { progress, &VoteTracker::default(), &ClusterSlots::default(), - &bank_forks, + bank_forks, heaviest_subtree_fork_choice, latest_validator_votes_for_frozen_banks, ); let (heaviest_bank, heaviest_bank_on_same_fork) = heaviest_subtree_fork_choice - .select_forks(&frozen_banks, &tower, &progress, &ancestors, bank_forks); + .select_forks(&frozen_banks, tower, progress, ancestors, bank_forks); assert!(heaviest_bank_on_same_fork.is_none()); let SelectVoteAndResetForkResult { vote_bank, @@ -4982,11 +4928,12 @@ pub(crate) mod tests { } = ReplayStage::select_vote_and_reset_forks( &heaviest_bank, heaviest_bank_on_same_fork.as_ref(), - &ancestors, - &descendants, + ancestors, + descendants, progress, tower, latest_validator_votes_for_frozen_banks, + heaviest_subtree_fork_choice, ); ( vote_bank.map(|(b, _)| b.slot()), @@ -4994,7 +4941,16 @@ pub(crate) mod tests { ) } - fn setup_forks() -> VoteSimulator { + fn setup_forks_from_tree(tree: Tree, num_keys: usize) -> (VoteSimulator, Blockstore) { + let mut vote_simulator = VoteSimulator::new(num_keys); + vote_simulator.fill_bank_forks(tree.clone(), &HashMap::new()); + let ledger_path = get_tmp_ledger_path!(); + let blockstore = Blockstore::open(&ledger_path).unwrap(); + blockstore.add_tree(tree, false, true, 2, Hash::default()); + (vote_simulator, blockstore) + } + + fn setup_default_forks(num_keys: usize) -> (VoteSimulator, Blockstore) { /* Build fork structure: @@ -5009,12 +4965,9 @@ pub(crate) mod tests { | slot 6 */ - let forks = tr(0) / (tr(1) / (tr(2) / (tr(4))) / (tr(3) / (tr(5) / (tr(6))))); - - let mut vote_simulator = VoteSimulator::new(1); - vote_simulator.fill_bank_forks(forks, &HashMap::new()); - vote_simulator + let tree = tr(0) / (tr(1) / (tr(2) / (tr(4))) / (tr(3) / (tr(5) / (tr(6))))); + setup_forks_from_tree(tree, num_keys) } fn check_map_eq( diff --git a/core/src/result.rs b/core/src/result.rs index 8dbce74de77ded..5fafca2ee1bed7 100644 --- a/core/src/result.rs +++ b/core/src/result.rs @@ -6,18 +6,18 @@ use solana_ledger::blockstore; #[derive(Debug)] pub enum Error { Io(std::io::Error), - RecvError(std::sync::mpsc::RecvError), - CrossbeamRecvTimeoutError(crossbeam_channel::RecvTimeoutError), - ReadyTimeoutError, - RecvTimeoutError(std::sync::mpsc::RecvTimeoutError), - CrossbeamSendError, - TryCrossbeamSendError, + Recv(std::sync::mpsc::RecvError), + CrossbeamRecvTimeout(crossbeam_channel::RecvTimeoutError), + ReadyTimeout, + RecvTimeout(std::sync::mpsc::RecvTimeoutError), + CrossbeamSend, + TryCrossbeamSend, Serialize(std::boxed::Box), - ClusterInfoError(cluster_info::ClusterInfoError), - SendError, - BlockstoreError(blockstore::BlockstoreError), - WeightedIndexError(rand::distributions::weighted::WeightedError), - GossipError(GossipError), + ClusterInfo(cluster_info::ClusterInfoError), + Send, + Blockstore(blockstore::BlockstoreError), + WeightedIndex(rand::distributions::weighted::WeightedError), + Gossip(GossipError), } pub type Result = std::result::Result; @@ -32,42 +32,42 @@ impl std::error::Error for Error {} impl std::convert::From for Error { fn from(e: std::sync::mpsc::RecvError) -> Error { - Error::RecvError(e) + Error::Recv(e) } } impl std::convert::From for Error { fn from(e: crossbeam_channel::RecvTimeoutError) -> Error { - Error::CrossbeamRecvTimeoutError(e) + Error::CrossbeamRecvTimeout(e) } } impl std::convert::From for Error { fn from(_e: crossbeam_channel::ReadyTimeoutError) -> Error { - Error::ReadyTimeoutError + Error::ReadyTimeout } } impl std::convert::From for Error { fn from(e: std::sync::mpsc::RecvTimeoutError) -> Error { - Error::RecvTimeoutError(e) + Error::RecvTimeout(e) } } impl std::convert::From for Error { fn from(e: cluster_info::ClusterInfoError) -> Error { - Error::ClusterInfoError(e) + Error::ClusterInfo(e) } } impl std::convert::From> for Error { fn from(_e: crossbeam_channel::SendError) -> Error { - Error::CrossbeamSendError + Error::CrossbeamSend } } impl std::convert::From> for Error { fn from(_e: crossbeam_channel::TrySendError) -> Error { - Error::TryCrossbeamSendError + Error::TryCrossbeamSend } } impl std::convert::From> for Error { fn from(_e: std::sync::mpsc::SendError) -> Error { - Error::SendError + Error::Send } } impl std::convert::From for Error { @@ -82,17 +82,17 @@ impl std::convert::From> for Error { } impl std::convert::From for Error { fn from(e: blockstore::BlockstoreError) -> Error { - Error::BlockstoreError(e) + Error::Blockstore(e) } } impl std::convert::From for Error { fn from(e: rand::distributions::weighted::WeightedError) -> Error { - Error::WeightedIndexError(e) + Error::WeightedIndex(e) } } impl std::convert::From for Error { fn from(e: GossipError) -> Error { - Error::GossipError(e) + Error::Gossip(e) } } @@ -116,12 +116,12 @@ mod tests { #[test] fn from_test() { - assert_matches!(Error::from(RecvError {}), Error::RecvError(_)); + assert_matches!(Error::from(RecvError {}), Error::Recv(_)); assert_matches!( Error::from(RecvTimeoutError::Timeout), - Error::RecvTimeoutError(_) + Error::RecvTimeout(_) ); - assert_matches!(send_error(), Err(Error::SendError)); + assert_matches!(send_error(), Err(Error::Send)); let ioe = io::Error::new(io::ErrorKind::NotFound, "hi"); assert_matches!(Error::from(ioe), Error::Io(_)); } diff --git a/core/src/retransmit_stage.rs b/core/src/retransmit_stage.rs index 3ebfaf56a1eda4..50f837e1c1625b 100644 --- a/core/src/retransmit_stage.rs +++ b/core/src/retransmit_stage.rs @@ -3,21 +3,18 @@ use crate::{ cluster_info_vote_listener::VerifiedVoteReceiver, + cluster_nodes::ClusterNodes, cluster_slots::ClusterSlots, - cluster_slots_service::ClusterSlotsService, + cluster_slots_service::{ClusterSlotsService, ClusterSlotsUpdateReceiver}, completed_data_sets_service::CompletedDataSetsSender, - repair_service::DuplicateSlotsResetSender, - repair_service::RepairInfo, + repair_service::{DuplicateSlotsResetSender, RepairInfo}, result::{Error, Result}, window_service::{should_retransmit_and_persist, WindowService}, }; use crossbeam_channel::{Receiver, Sender}; use lru::LruCache; use solana_client::rpc_response::SlotUpdate; -use solana_gossip::{ - cluster_info::{compute_retransmit_peers, ClusterInfo, DATA_PLANE_FANOUT}, - contact_info::ContactInfo, -}; +use solana_gossip::cluster_info::{ClusterInfo, DATA_PLANE_FANOUT}; use solana_ledger::shred::{get_shred_slot_index_type, ShredFetchStats}; use solana_ledger::{ blockstore::{Blockstore, CompletedSlotsReceiver}, @@ -34,7 +31,6 @@ use solana_runtime::{bank::Bank, bank_forks::BankForks}; use solana_sdk::{clock::Slot, epoch_schedule::EpochSchedule, pubkey::Pubkey, timing::timestamp}; use solana_streamer::streamer::PacketReceiver; use std::{ - cmp, collections::hash_set::HashSet, collections::{BTreeMap, BTreeSet, HashMap}, net::UdpSocket, @@ -218,12 +214,6 @@ fn update_retransmit_stats( } } -#[derive(Default)] -struct EpochStakesCache { - peers: Vec, - stakes_and_index: Vec<(u64, usize)>, -} - use crate::packet_hasher::PacketHasher; // Map of shred (slot, index, is_data) => list of hash values seen for that key. pub type ShredFilter = LruCache<(Slot, u32, bool), Vec>; @@ -284,33 +274,6 @@ fn check_if_first_shred_received( } } -// Drops shred slot leader from retransmit peers. -// TODO: decide which bank should be used here. -fn get_retransmit_peers( - self_pubkey: Pubkey, - shred_slot: Slot, - leader_schedule_cache: &LeaderScheduleCache, - bank: &Bank, - stakes_cache: &EpochStakesCache, -) -> Vec<(u64 /*stakes*/, usize /*index*/)> { - match leader_schedule_cache.slot_leader_at(shred_slot, Some(bank)) { - None => { - error!("unknown leader for shred slot"); - stakes_cache.stakes_and_index.clone() - } - Some(pubkey) if pubkey == self_pubkey => { - error!("retransmit from slot leader: {}", pubkey); - stakes_cache.stakes_and_index.clone() - } - Some(pubkey) => stakes_cache - .stakes_and_index - .iter() - .filter(|(_, i)| stakes_cache.peers[*i].id != pubkey) - .copied() - .collect(), - } -} - #[allow(clippy::too_many_arguments)] fn retransmit( bank_forks: &RwLock, @@ -320,7 +283,7 @@ fn retransmit( sock: &UdpSocket, id: u32, stats: &RetransmitStats, - epoch_stakes_cache: &RwLock, + cluster_nodes: &RwLock>, last_peer_update: &AtomicU64, shreds_received: &Mutex, max_slots: &MaxSlots, @@ -358,24 +321,22 @@ fn retransmit( && last_peer_update.compare_and_swap(last, now, Ordering::Relaxed) == last { let epoch_staked_nodes = r_bank.epoch_staked_nodes(bank_epoch); - let (peers, stakes_and_index) = - cluster_info.sorted_retransmit_peers_and_stakes(epoch_staked_nodes.as_ref()); - { - let mut epoch_stakes_cache = epoch_stakes_cache.write().unwrap(); - epoch_stakes_cache.peers = peers; - epoch_stakes_cache.stakes_and_index = stakes_and_index; - } + *cluster_nodes.write().unwrap() = ClusterNodes::::new( + cluster_info, + &epoch_staked_nodes.unwrap_or_default(), + ); { let mut sr = shreds_received.lock().unwrap(); sr.0.clear(); sr.1.reset(); } } - let r_epoch_stakes_cache = epoch_stakes_cache.read().unwrap(); - let mut peers_len = 0; + let cluster_nodes = cluster_nodes.read().unwrap(); + let peers_len = cluster_nodes.num_peers(); epoch_cache_update.stop(); let my_id = cluster_info.id(); + let socket_addr_space = cluster_info.socket_addr_space(); let mut discard_total = 0; let mut repair_total = 0; let mut retransmit_total = 0; @@ -412,50 +373,18 @@ fn retransmit( } let mut compute_turbine_peers = Measure::start("turbine_start"); - let stakes_and_index = get_retransmit_peers( - my_id, - shred_slot, - leader_schedule_cache, - r_bank.deref(), - r_epoch_stakes_cache.deref(), - ); - let (my_index, shuffled_stakes_and_index) = ClusterInfo::shuffle_peers_and_index( - &my_id, - &r_epoch_stakes_cache.peers, - &stakes_and_index, - packet.meta.seed, - ); + let slot_leader = leader_schedule_cache.slot_leader_at(shred_slot, Some(r_bank.deref())); + let (neighbors, children) = + cluster_nodes.get_retransmit_peers(packet.meta.seed, DATA_PLANE_FANOUT, slot_leader); // If the node is on the critical path (i.e. the first node in each // neighborhood), then we expect that the packet arrives at tvu socket // as opposed to tvu-forwards. If this is not the case, then the // turbine broadcast/retransmit tree is mismatched across nodes. - if packet.meta.forward == (my_index % DATA_PLANE_FANOUT == 0) { + let anchor_node = neighbors[0].id == my_id; + if packet.meta.forward == anchor_node { + // TODO: Consider forwarding the packet to the root node here. retransmit_tree_mismatch += 1; } - peers_len = cmp::max(peers_len, shuffled_stakes_and_index.len()); - // split off the indexes, we don't need the stakes anymore - let indexes: Vec<_> = shuffled_stakes_and_index - .into_iter() - .map(|(_, index)| index) - .collect(); - debug_assert_eq!(my_id, r_epoch_stakes_cache.peers[indexes[my_index]].id); - - let (neighbors, children) = compute_retransmit_peers(DATA_PLANE_FANOUT, my_index, &indexes); - let neighbors: Vec<_> = neighbors - .into_iter() - .filter_map(|index| { - let peer = &r_epoch_stakes_cache.peers[index]; - if peer.id == my_id { - None - } else { - Some(peer) - } - }) - .collect(); - let children: Vec<_> = children - .into_iter() - .map(|index| &r_epoch_stakes_cache.peers[index]) - .collect(); compute_turbine_peers.stop(); compute_turbine_peers_total += compute_turbine_peers.as_us(); @@ -465,10 +394,27 @@ fn retransmit( .or_default() += 1; let mut retransmit_time = Measure::start("retransmit_to"); - if !packet.meta.forward { - ClusterInfo::retransmit_to(&neighbors, packet, sock, true)?; + // If the node is on the critical path (i.e. the first node in each + // neighborhood), it should send the packet to tvu socket of its + // children and also tvu_forward socket of its neighbors. Otherwise it + // should only forward to tvu_forward socket of its children. + if anchor_node { + // First neighbor is this node itself, so skip it. + ClusterInfo::retransmit_to( + &neighbors[1..], + packet, + sock, + /*forward socket=*/ true, + socket_addr_space, + ); } - ClusterInfo::retransmit_to(&children, packet, sock, packet.meta.forward)?; + ClusterInfo::retransmit_to( + &children, + packet, + sock, + !anchor_node, // send to forward socket! + socket_addr_space, + ); retransmit_time.stop(); retransmit_total += retransmit_time.as_us(); } @@ -531,7 +477,7 @@ pub fn retransmitter( let r = r.clone(); let cluster_info = cluster_info.clone(); let stats = stats.clone(); - let epoch_stakes_cache = Arc::new(RwLock::new(EpochStakesCache::default())); + let cluster_nodes = Arc::default(); let last_peer_update = Arc::new(AtomicU64::new(0)); let shreds_received = shreds_received.clone(); let max_slots = max_slots.clone(); @@ -551,7 +497,7 @@ pub fn retransmitter( &sockets[s], s as u32, &stats, - &epoch_stakes_cache, + &cluster_nodes, &last_peer_update, &shreds_received, &max_slots, @@ -559,8 +505,8 @@ pub fn retransmitter( &rpc_subscriptions, ) { match e { - Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => break, - Error::RecvTimeoutError(RecvTimeoutError::Timeout) => (), + Error::RecvTimeout(RecvTimeoutError::Disconnected) => break, + Error::RecvTimeout(RecvTimeoutError::Timeout) => (), _ => { inc_new_counter_error!("streamer-retransmit-error", 1, 1); } @@ -592,7 +538,8 @@ impl RetransmitStage { repair_socket: Arc, verified_receiver: Receiver>, exit: &Arc, - completed_slots_receivers: [CompletedSlotsReceiver; 2], + rpc_completed_slots_receiver: CompletedSlotsReceiver, + cluster_slots_update_receiver: ClusterSlotsUpdateReceiver, epoch_schedule: EpochSchedule, cfg: Option>, shred_version: u16, @@ -618,8 +565,6 @@ impl RetransmitStage { rpc_subscriptions.clone(), ); - let [rpc_completed_slots_receiver, cluster_completed_slots_receiver] = - completed_slots_receivers; let rpc_completed_slots_hdl = RpcCompletedSlotsService::spawn(rpc_completed_slots_receiver, rpc_subscriptions); let cluster_slots_service = ClusterSlotsService::new( @@ -627,7 +572,7 @@ impl RetransmitStage { cluster_slots.clone(), bank_forks.clone(), cluster_info.clone(), - cluster_completed_slots_receiver, + cluster_slots_update_receiver, exit.clone(), ); @@ -700,6 +645,8 @@ mod tests { use solana_ledger::shred::Shred; use solana_net_utils::find_available_port_in_range; use solana_perf::packet::{Packet, Packets}; + use solana_sdk::signature::Keypair; + use solana_streamer::socket::SocketAddrSpace; use std::net::{IpAddr, Ipv4Addr}; #[test] @@ -709,6 +656,7 @@ mod tests { let (ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_config); let blockstore = Blockstore::open(&ledger_path).unwrap(); let opts = ProcessOptions { + accounts_db_test_hash_calculation: true, full_leader_cache: true, ..ProcessOptions::default() }; @@ -728,9 +676,18 @@ mod tests { .unwrap() .local_addr() .unwrap(); - - let other = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), 0); - let cluster_info = ClusterInfo::new_with_invalid_keypair(other); + // This fixes the order of nodes returned by shuffle_peers_and_index, + // and makes turbine retransmit tree deterministic for the purpose of + // the test. + let other = std::iter::repeat_with(solana_sdk::pubkey::new_rand) + .find(|pk| me.id < *pk) + .unwrap(); + let other = ContactInfo::new_localhost(&other, 0); + let cluster_info = ClusterInfo::new( + other, + Arc::new(Keypair::new()), + SocketAddrSpace::Unspecified, + ); cluster_info.insert_info(me); let retransmit_socket = Arc::new(vec![UdpSocket::bind("0.0.0.0:0").unwrap()]); diff --git a/core/src/rewards_recorder_service.rs b/core/src/rewards_recorder_service.rs index ef0ad1795851e5..65c66fe49aa66c 100644 --- a/core/src/rewards_recorder_service.rs +++ b/core/src/rewards_recorder_service.rs @@ -55,6 +55,7 @@ impl RewardsRecorderService { lamports: reward_info.lamports, post_balance: reward_info.post_balance, reward_type: Some(reward_info.reward_type), + commission: reward_info.commission, }) .collect(); diff --git a/core/src/serve_repair.rs b/core/src/serve_repair.rs index 03e8d3f91f9012..2a8acf75e80172 100644 --- a/core/src/serve_repair.rs +++ b/core/src/serve_repair.rs @@ -6,7 +6,11 @@ use crate::{ result::{Error, Result}, }; use bincode::serialize; -use rand::distributions::{Distribution, WeightedIndex}; +use lru::LruCache; +use rand::{ + distributions::{Distribution, WeightedError, WeightedIndex}, + Rng, +}; use solana_gossip::{ cluster_info::{ClusterInfo, ClusterInfoError}, contact_info::ContactInfo, @@ -27,7 +31,7 @@ use solana_sdk::{ }; use solana_streamer::streamer::{PacketReceiver, PacketSender}; use std::{ - collections::{hash_map::Entry, HashMap, HashSet}, + collections::HashSet, net::SocketAddr, sync::atomic::{AtomicBool, Ordering}, sync::{Arc, RwLock}, @@ -37,6 +41,10 @@ use std::{ /// the number of slots to respond with when responding to `Orphan` requests pub const MAX_ORPHAN_REPAIR_RESPONSES: usize = 10; +// Number of slots to cache their respective repair peers and sampling weights. +pub(crate) const REPAIR_PEERS_CACHE_CAPACITY: usize = 128; +// Limit cache entries ttl in order to avoid re-using outdated data. +const REPAIR_PEERS_CACHE_TTL: Duration = Duration::from_secs(10); #[derive(Serialize, Deserialize, Debug, Clone, Copy, Hash, PartialEq, Eq)] pub enum RepairType { @@ -107,16 +115,40 @@ pub struct ServeRepair { cluster_info: Arc, } -type RepairCache = HashMap, WeightedIndex)>; +// Cache entry for repair peers for a slot. +pub(crate) struct RepairPeers { + asof: Instant, + peers: Vec<(Pubkey, /*ContactInfo.serve_repair:*/ SocketAddr)>, + weighted_index: WeightedIndex, +} -impl ServeRepair { - /// Without a valid keypair gossip will not function. Only useful for tests. - pub fn new_with_invalid_keypair(contact_info: ContactInfo) -> Self { - Self::new(Arc::new(ClusterInfo::new_with_invalid_keypair( - contact_info, - ))) +impl RepairPeers { + fn new(asof: Instant, peers: &[ContactInfo], weights: &[u64]) -> Result { + if peers.is_empty() { + return Err(Error::from(ClusterInfoError::NoPeers)); + } + if peers.len() != weights.len() { + return Err(Error::from(WeightedError::InvalidWeight)); + } + let weighted_index = WeightedIndex::new(weights)?; + let peers = peers + .iter() + .map(|peer| (peer.id, peer.serve_repair)) + .collect(); + Ok(Self { + asof, + peers, + weighted_index, + }) } + fn sample(&self, rng: &mut R) -> (Pubkey, SocketAddr) { + let index = self.weighted_index.sample(rng); + self.peers[index] + } +} + +impl ServeRepair { pub fn new(cluster_info: Arc) -> Self { let (keypair, my_info) = { (cluster_info.keypair.clone(), cluster_info.my_contact_info()) }; Self { @@ -171,7 +203,7 @@ impl ServeRepair { Self::run_window_request( recycler, from, - &from_addr, + from_addr, blockstore, &me.read().unwrap().my_info, *slot, @@ -186,7 +218,7 @@ impl ServeRepair { ( Self::run_highest_window_request( recycler, - &from_addr, + from_addr, blockstore, *slot, *highest_index, @@ -200,7 +232,7 @@ impl ServeRepair { ( Self::run_orphan( recycler, - &from_addr, + from_addr, blockstore, *slot, MAX_ORPHAN_REPAIR_RESPONSES, @@ -256,7 +288,7 @@ impl ServeRepair { let mut time = Measure::start("repair::handle_packets"); for reqs in reqs_v { - Self::handle_packets(obj, &recycler, blockstore, reqs, response_sender, stats); + Self::handle_packets(obj, recycler, blockstore, reqs, response_sender, stats); } time.stop(); if total_packets >= *max_packets { @@ -323,7 +355,7 @@ impl ServeRepair { &mut max_packets, ); match result { - Err(Error::RecvTimeoutError(_)) | Ok(_) => {} + Err(Error::RecvTimeout(_)) | Ok(_) => {} Err(err) => info!("repair listener error: {:?}", err), }; if exit.load(Ordering::Relaxed) { @@ -396,11 +428,11 @@ impl ServeRepair { Ok(out) } - pub fn repair_request( + pub(crate) fn repair_request( &self, cluster_slots: &ClusterSlots, repair_request: RepairType, - cache: &mut RepairCache, + peers_cache: &mut LruCache, repair_stats: &mut RepairStats, repair_validators: &Option>, outstanding_requests: &mut OutstandingRepairs, @@ -408,25 +440,21 @@ impl ServeRepair { // find a peer that appears to be accepting replication and has the desired slot, as indicated // by a valid tvu port location let slot = repair_request.slot(); - let (repair_peers, weighted_index) = match cache.entry(slot) { - Entry::Occupied(entry) => entry.into_mut(), - Entry::Vacant(entry) => { - let repair_peers = self.repair_peers(&repair_validators, slot); - if repair_peers.is_empty() { - return Err(Error::from(ClusterInfoError::NoPeers)); - } + let repair_peers = match peers_cache.get(&slot) { + Some(entry) if entry.asof.elapsed() < REPAIR_PEERS_CACHE_TTL => entry, + _ => { + peers_cache.pop(&slot); + let repair_peers = self.repair_peers(repair_validators, slot); let weights = cluster_slots.compute_weights(slot, &repair_peers); - debug_assert_eq!(weights.len(), repair_peers.len()); - let weighted_index = WeightedIndex::new(weights)?; - entry.insert((repair_peers, weighted_index)) + let repair_peers = RepairPeers::new(Instant::now(), &repair_peers, &weights)?; + peers_cache.put(slot, repair_peers); + peers_cache.get(&slot).unwrap() } }; - let n = weighted_index.sample(&mut rand::thread_rng()); - let addr = repair_peers[n].serve_repair; // send the request to the peer's serve_repair port + let (peer, addr) = repair_peers.sample(&mut rand::thread_rng()); let nonce = outstanding_requests.add_request(repair_request, solana_sdk::timing::timestamp()); - let repair_peer_id = repair_peers[n].id; - let out = self.map_repair_request(&repair_request, &repair_peer_id, repair_stats, nonce)?; + let out = self.map_repair_request(&repair_request, &peer, repair_stats, nonce)?; Ok((addr, out)) } @@ -515,7 +543,7 @@ impl ServeRepair { if let Some(packet) = packet { inc_new_counter_debug!("serve_repair-window-request-ledger", 1); - return Some(Packets::new_with_recycler_data( + return Some(Packets::new_unpinned_with_recycler_data( recycler, "run_window_request", vec![packet], @@ -555,7 +583,7 @@ impl ServeRepair { from_addr, nonce, )?; - return Some(Packets::new_with_recycler_data( + return Some(Packets::new_unpinned_with_recycler_data( recycler, "run_highest_window_request", vec![packet], @@ -572,7 +600,7 @@ impl ServeRepair { max_responses: usize, nonce: Nonce, ) -> Option { - let mut res = Packets::new_with_recycler(recycler.clone(), 64, "run_orphan"); + let mut res = Packets::new_unpinned_with_recycler(recycler.clone(), 64, "run_orphan"); if let Some(blockstore) = blockstore { // Try to find the next "n" parent slots of the input slot while let Ok(Some(meta)) = blockstore.meta(slot) { @@ -617,7 +645,8 @@ mod tests { shred::{max_ticks_per_n_shreds, Shred}, }; use solana_perf::packet::Packet; - use solana_sdk::{hash::Hash, pubkey::Pubkey, timing::timestamp}; + use solana_sdk::{hash::Hash, pubkey::Pubkey, signature::Keypair, timing::timestamp}; + use solana_streamer::socket::SocketAddrSpace; #[test] fn test_run_highest_window_request() { @@ -762,22 +791,30 @@ mod tests { Blockstore::destroy(&ledger_path).expect("Expected successful database destruction"); } + fn new_test_cluster_info(contact_info: ContactInfo) -> ClusterInfo { + ClusterInfo::new( + contact_info, + Arc::new(Keypair::new()), + SocketAddrSpace::Unspecified, + ) + } + #[test] fn window_index_request() { let cluster_slots = ClusterSlots::default(); let me = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), timestamp()); - let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair(me)); + let cluster_info = Arc::new(new_test_cluster_info(me)); let serve_repair = ServeRepair::new(cluster_info.clone()); let mut outstanding_requests = OutstandingRepairs::default(); let rv = serve_repair.repair_request( &cluster_slots, RepairType::Shred(0, 0), - &mut HashMap::new(), + &mut LruCache::new(100), &mut RepairStats::default(), &None, &mut outstanding_requests, ); - assert_matches!(rv, Err(Error::ClusterInfoError(ClusterInfoError::NoPeers))); + assert_matches!(rv, Err(Error::ClusterInfo(ClusterInfoError::NoPeers))); let serve_repair_addr = socketaddr!([127, 0, 0, 1], 1243); let nxt = ContactInfo { @@ -800,7 +837,7 @@ mod tests { .repair_request( &cluster_slots, RepairType::Shred(0, 0), - &mut HashMap::new(), + &mut LruCache::new(100), &mut RepairStats::default(), &None, &mut outstanding_requests, @@ -834,7 +871,7 @@ mod tests { .repair_request( &cluster_slots, RepairType::Shred(0, 0), - &mut HashMap::new(), + &mut LruCache::new(100), &mut RepairStats::default(), &None, &mut outstanding_requests, @@ -993,7 +1030,7 @@ mod tests { fn test_repair_with_repair_validators() { let cluster_slots = ClusterSlots::default(); let me = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), timestamp()); - let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair(me.clone())); + let cluster_info = Arc::new(new_test_cluster_info(me.clone())); // Insert two peers on the network let contact_info2 = @@ -1015,7 +1052,7 @@ mod tests { .repair_request( &cluster_slots, RepairType::Shred(0, 0), - &mut HashMap::new(), + &mut LruCache::new(100), &mut RepairStats::default(), &trusted_validators, &mut OutstandingRepairs::default(), @@ -1032,7 +1069,7 @@ mod tests { .repair_request( &cluster_slots, RepairType::Shred(0, 0), - &mut HashMap::new(), + &mut LruCache::new(100), &mut RepairStats::default(), &trusted_validators, &mut OutstandingRepairs::default(), @@ -1053,7 +1090,7 @@ mod tests { .repair_request( &cluster_slots, RepairType::Shred(0, 0), - &mut HashMap::new(), + &mut LruCache::new(100), &mut RepairStats::default(), &None, &mut OutstandingRepairs::default(), diff --git a/core/src/serve_repair_service.rs b/core/src/serve_repair_service.rs index f2ef152e9c263e..70ce3f2e671502 100644 --- a/core/src/serve_repair_service.rs +++ b/core/src/serve_repair_service.rs @@ -1,7 +1,7 @@ use crate::serve_repair::ServeRepair; use solana_ledger::blockstore::Blockstore; use solana_perf::recycler::Recycler; -use solana_streamer::streamer; +use solana_streamer::{socket::SocketAddrSpace, streamer}; use std::net::UdpSocket; use std::sync::atomic::AtomicBool; use std::sync::mpsc::channel; @@ -17,6 +17,7 @@ impl ServeRepairService { serve_repair: &Arc>, blockstore: Option>, serve_repair_socket: UdpSocket, + socket_addr_space: SocketAddrSpace, exit: &Arc, ) -> Self { let (request_sender, request_receiver) = channel(); @@ -28,15 +29,20 @@ impl ServeRepairService { ); let t_receiver = streamer::receiver( serve_repair_socket.clone(), - &exit, + exit, request_sender, Recycler::default(), "serve_repair_receiver", 1, + false, ); let (response_sender, response_receiver) = channel(); - let t_responder = - streamer::responder("serve-repairs", serve_repair_socket, response_receiver); + let t_responder = streamer::responder( + "serve-repairs", + serve_repair_socket, + response_receiver, + socket_addr_space, + ); let t_listen = ServeRepair::listen( serve_repair.clone(), blockstore, diff --git a/core/src/shred_fetch_stage.rs b/core/src/shred_fetch_stage.rs index b7946b54d16a5b..50a53160743727 100644 --- a/core/src/shred_fetch_stage.rs +++ b/core/src/shred_fetch_stage.rs @@ -145,11 +145,12 @@ impl ShredFetchStage { .map(|s| { streamer::receiver( s, - &exit, + exit, packet_sender.clone(), recycler.clone(), "packet_modifier", 1, + true, ) }) .collect(); @@ -173,7 +174,7 @@ impl ShredFetchStage { let (mut tvu_threads, tvu_filter) = Self::packet_modifier( sockets, - &exit, + exit, sender.clone(), recycler.clone(), bank_forks.clone(), @@ -183,7 +184,7 @@ impl ShredFetchStage { let (tvu_forwards_threads, fwd_thread_hdl) = Self::packet_modifier( forward_sockets, - &exit, + exit, sender.clone(), recycler.clone(), bank_forks.clone(), @@ -193,7 +194,7 @@ impl ShredFetchStage { let (repair_receiver, repair_handler) = Self::packet_modifier( vec![repair_socket], - &exit, + exit, sender.clone(), recycler, bank_forks, diff --git a/core/src/sigverify_stage.rs b/core/src/sigverify_stage.rs index ff153b0bbe445a..91354a843e3af3 100644 --- a/core/src/sigverify_stage.rs +++ b/core/src/sigverify_stage.rs @@ -24,10 +24,10 @@ const RECV_BATCH_MAX_GPU: usize = 5_000; #[derive(Error, Debug)] pub enum SigVerifyServiceError { #[error("send packets batch error")] - SendError(#[from] SendError>), + Send(#[from] SendError>), #[error("streamer error")] - StreamerError(#[from] StreamerError), + Streamer(#[from] StreamerError), } type Result = std::result::Result; @@ -126,13 +126,13 @@ impl SigVerifyStage { .spawn(move || loop { if let Err(e) = Self::verifier(&packet_receiver, &verified_sender, id, &verifier) { match e { - SigVerifyServiceError::StreamerError(StreamerError::RecvTimeoutError( + SigVerifyServiceError::Streamer(StreamerError::RecvTimeout( RecvTimeoutError::Disconnected, )) => break, - SigVerifyServiceError::StreamerError(StreamerError::RecvTimeoutError( + SigVerifyServiceError::Streamer(StreamerError::RecvTimeout( RecvTimeoutError::Timeout, )) => (), - SigVerifyServiceError::SendError(_) => { + SigVerifyServiceError::Send(_) => { break; } _ => error!("{:?}", e), diff --git a/core/src/test_validator.rs b/core/src/test_validator.rs index 9c6ec773e56ffb..86fcfd738e1b7e 100644 --- a/core/src/test_validator.rs +++ b/core/src/test_validator.rs @@ -1,12 +1,10 @@ use { - crate::{ - rpc::JsonRpcConfig, - validator::{Validator, ValidatorConfig, ValidatorExit, ValidatorStartProgress}, - }, + crate::validator::{Validator, ValidatorConfig, ValidatorStartProgress}, solana_client::rpc_client::RpcClient, solana_gossip::{cluster_info::Node, gossip_service::discover_cluster, socketaddr}, solana_ledger::{blockstore::create_new_ledger, create_new_tmp_ledger}, solana_net_utils::PortRange, + solana_rpc::rpc::JsonRpcConfig, solana_runtime::{ bank_forks::{ArchiveFormat, SnapshotConfig, SnapshotVersion}, genesis_utils::create_genesis_config_with_leader_ex, @@ -18,6 +16,7 @@ use { clock::{Slot, DEFAULT_MS_PER_SLOT}, commitment_config::CommitmentConfig, epoch_schedule::EpochSchedule, + exit::Exit, fee_calculator::{FeeCalculator, FeeRateGovernor}, hash::Hash, native_token::sol_to_lamports, @@ -25,6 +24,7 @@ use { rent::Rent, signature::{read_keypair_file, write_keypair_file, Keypair, Signer}, }, + solana_streamer::socket::SocketAddrSpace, std::{ collections::HashMap, fs::remove_dir_all, @@ -70,7 +70,7 @@ impl Default for TestValidatorNodeConfig { pub struct TestValidatorGenesis { fee_rate_governor: FeeRateGovernor, ledger_path: Option, - rent: Rent, + pub rent: Rent, rpc_config: JsonRpcConfig, rpc_ports: Option<(u16, u16)>, // (JsonRpc, JsonRpcPubSub), None == random ports warp_slot: Option, @@ -79,7 +79,7 @@ pub struct TestValidatorGenesis { programs: Vec, epoch_schedule: Option, node_config: TestValidatorNodeConfig, - pub validator_exit: Arc>, + pub validator_exit: Arc>, pub start_progress: Arc>, pub authorized_voter_keypairs: Arc>>>, pub max_ledger_shreds: Option, @@ -264,8 +264,9 @@ impl TestValidatorGenesis { pub fn start_with_mint_address( &self, mint_address: Pubkey, + socket_addr_space: SocketAddrSpace, ) -> Result> { - TestValidator::start(mint_address, self) + TestValidator::start(mint_address, self, socket_addr_space) } /// Start a test validator @@ -275,8 +276,21 @@ impl TestValidatorGenesis { /// /// This function panics on initialization failure. pub fn start(&self) -> (TestValidator, Keypair) { + self.start_with_socket_addr_space(SocketAddrSpace::new(/*allow_private_addr=*/ true)) + } + + /// Start a test validator with the given `SocketAddrSpace` + /// + /// Returns a new `TestValidator` as well as the keypair for the mint account that will receive tokens + /// created at genesis. + /// + /// This function panics on initialization failure. + pub fn start_with_socket_addr_space( + &self, + socket_addr_space: SocketAddrSpace, + ) -> (TestValidator, Keypair) { let mint_keypair = Keypair::new(); - TestValidator::start(mint_keypair.pubkey(), self) + TestValidator::start(mint_keypair.pubkey(), self, socket_addr_space) .map(|test_validator| (test_validator, mint_keypair)) .expect("Test validator failed to start") } @@ -298,7 +312,11 @@ impl TestValidator { /// Faucet optional. /// /// This function panics on initialization failure. - pub fn with_no_fees(mint_address: Pubkey, faucet_addr: Option) -> Self { + pub fn with_no_fees( + mint_address: Pubkey, + faucet_addr: Option, + socket_addr_space: SocketAddrSpace, + ) -> Self { TestValidatorGenesis::default() .fee_rate_governor(FeeRateGovernor::new(0, 0)) .rent(Rent { @@ -307,7 +325,7 @@ impl TestValidator { ..Rent::default() }) .faucet_addr(faucet_addr) - .start_with_mint_address(mint_address) + .start_with_mint_address(mint_address, socket_addr_space) .expect("validator start failed") } @@ -319,6 +337,7 @@ impl TestValidator { mint_address: Pubkey, target_lamports_per_signature: u64, faucet_addr: Option, + socket_addr_space: SocketAddrSpace, ) -> Self { TestValidatorGenesis::default() .fee_rate_governor(FeeRateGovernor::new(target_lamports_per_signature, 0)) @@ -328,7 +347,7 @@ impl TestValidator { ..Rent::default() }) .faucet_addr(faucet_addr) - .start_with_mint_address(mint_address) + .start_with_mint_address(mint_address, socket_addr_space) .expect("validator start failed") } @@ -431,6 +450,7 @@ impl TestValidator { fn start( mint_address: Pubkey, config: &TestValidatorGenesis, + socket_addr_space: SocketAddrSpace, ) -> Result> { let preserve_ledger = config.ledger_path.is_some(); let ledger_path = TestValidator::initialize_ledger(mint_address, config)?; @@ -514,11 +534,12 @@ impl TestValidator { &validator_config, true, // should_check_duplicate_instance config.start_progress.clone(), + socket_addr_space, )); // Needed to avoid panics in `solana-responder-gossip` in tests that create a number of // test validators concurrently... - discover_cluster(&gossip, 1) + discover_cluster(&gossip, 1, socket_addr_space) .map_err(|err| format!("TestValidator startup failed: {:?}", err))?; // This is a hack to delay until the fees are non-zero for test consistency diff --git a/core/src/tpu.rs b/core/src/tpu.rs index 23ddea5490c6e6..dc97a0f595e8c6 100644 --- a/core/src/tpu.rs +++ b/core/src/tpu.rs @@ -9,13 +9,13 @@ use crate::{ VerifiedVoteSender, VoteTracker, }, fetch_stage::FetchStage, - poh_recorder::{PohRecorder, WorkingBankEntry}, sigverify::TransactionSigVerifier, sigverify_stage::SigVerifyStage, }; use crossbeam_channel::unbounded; use solana_gossip::cluster_info::ClusterInfo; use solana_ledger::{blockstore::Blockstore, blockstore_processor::TransactionStatusSender}; +use solana_poh::poh_recorder::{PohRecorder, WorkingBankEntry}; use solana_rpc::{ optimistically_confirmed_bank_tracker::BankNotificationSender, rpc_subscriptions::RpcSubscriptions, @@ -74,9 +74,9 @@ impl Tpu { let fetch_stage = FetchStage::new_with_sender( transactions_sockets, tpu_forwards_sockets, - &exit, + exit, &packet_sender, - &poh_recorder, + poh_recorder, tpu_coalesce_ms, ); let (verified_sender, verified_receiver) = unbounded(); @@ -88,10 +88,10 @@ impl Tpu { let (verified_vote_packets_sender, verified_vote_packets_receiver) = unbounded(); let cluster_info_vote_listener = ClusterInfoVoteListener::new( - &exit, + exit, cluster_info.clone(), verified_vote_packets_sender, - &poh_recorder, + poh_recorder, vote_tracker, bank_forks, subscriptions.clone(), @@ -104,7 +104,7 @@ impl Tpu { ); let banking_stage = BankingStage::new( - &cluster_info, + cluster_info, poh_recorder, verified_receiver, verified_vote_packets_receiver, @@ -117,7 +117,7 @@ impl Tpu { cluster_info.clone(), entry_receiver, retransmit_slots_receiver, - &exit, + exit, blockstore, shred_version, ); diff --git a/core/src/tvu.rs b/core/src/tvu.rs index 8f1bd381fd6b58..718095d10f9f26 100644 --- a/core/src/tvu.rs +++ b/core/src/tvu.rs @@ -13,7 +13,6 @@ use crate::{ completed_data_sets_service::CompletedDataSetsSender, consensus::Tower, ledger_cleanup_service::LedgerCleanupService, - poh_recorder::PohRecorder, replay_stage::{ReplayStage, ReplayStageConfig}, retransmit_stage::RetransmitStage, rewards_recorder_service::RewardsRecorderSender, @@ -21,6 +20,7 @@ use crate::{ sigverify_shreds::ShredSigVerifier, sigverify_stage::SigVerifyStage, snapshot_packager_service::PendingSnapshotPackage, + voting_service::VotingService, }; use crossbeam_channel::unbounded; use solana_gossip::cluster_info::ClusterInfo; @@ -29,6 +29,7 @@ use solana_ledger::{ blockstore_processor::TransactionStatusSender, leader_schedule_cache::LeaderScheduleCache, }; +use solana_poh::poh_recorder::PohRecorder; use solana_rpc::{ max_slots::MaxSlots, optimistically_confirmed_bank_tracker::BankNotificationSender, rpc_subscriptions::RpcSubscriptions, @@ -37,6 +38,7 @@ use solana_runtime::{ accounts_background_service::{ AbsRequestHandler, AbsRequestSender, AccountsBackgroundService, SnapshotRequestHandler, }, + accounts_db::AccountShrinkThreshold, bank_forks::{BankForks, SnapshotConfig}, commitment::BlockCommitmentCache, vote_sender_types::ReplayVoteSender, @@ -65,6 +67,7 @@ pub struct Tvu { ledger_cleanup_service: Option, accounts_background_service: AccountsBackgroundService, accounts_hash_verifier: AccountsHashVerifier, + voting_service: VotingService, } pub struct Sockets { @@ -88,6 +91,7 @@ pub struct TvuConfig { pub rocksdb_compaction_interval: Option, pub rocksdb_max_compaction_jitter: Option, pub wait_for_vote_to_start_leader: bool, + pub accounts_shrink_ratio: AccountShrinkThreshold, } impl Tvu { @@ -111,7 +115,7 @@ impl Tvu { tower: Tower, leader_schedule_cache: &Arc, exit: &Arc, - completed_slots_receivers: [CompletedSlotsReceiver; 2], + completed_slots_receiver: CompletedSlotsReceiver, block_commitment_cache: Arc>, cfg: Option>, transaction_status_sender: Option, @@ -150,7 +154,7 @@ impl Tvu { repair_socket.clone(), &fetch_sender, Some(bank_forks.clone()), - &exit, + exit, ); let (verified_sender, verified_receiver) = unbounded(); @@ -165,16 +169,18 @@ impl Tvu { let compaction_interval = tvu_config.rocksdb_compaction_interval; let max_compaction_jitter = tvu_config.rocksdb_max_compaction_jitter; let (duplicate_slots_sender, duplicate_slots_receiver) = unbounded(); + let (cluster_slots_update_sender, cluster_slots_update_receiver) = unbounded(); let retransmit_stage = RetransmitStage::new( bank_forks.clone(), leader_schedule_cache, blockstore.clone(), - &cluster_info, + cluster_info, Arc::new(retransmit_sockets), repair_socket, verified_receiver, &exit, - completed_slots_receivers, + completed_slots_receiver, + cluster_slots_update_receiver, *bank_forks.read().unwrap().working_bank().epoch_schedule(), cfg, tvu_config.shred_version, @@ -208,7 +214,7 @@ impl Tvu { accounts_hash_receiver, pending_snapshot_package, exit, - &cluster_info, + cluster_info, tvu_config.trusted_validators.clone(), tvu_config.halt_on_trusted_validators_accounts_hash_mismatch, tvu_config.accounts_hash_fault_injection_slots, @@ -272,6 +278,10 @@ impl Tvu { wait_for_vote_to_start_leader: tvu_config.wait_for_vote_to_start_leader, }; + let (voting_sender, voting_receiver) = channel(); + let voting_service = + VotingService::new(voting_receiver, cluster_info.clone(), poh_recorder.clone()); + let replay_stage = ReplayStage::new( replay_stage_config, blockstore.clone(), @@ -288,6 +298,8 @@ impl Tvu { replay_vote_sender, gossip_confirmed_slots_receiver, gossip_verified_vote_hash_receiver, + cluster_slots_update_sender, + voting_sender, ); let ledger_cleanup_service = tvu_config.max_ledger_shreds.map(|max_ledger_shreds| { @@ -295,7 +307,7 @@ impl Tvu { ledger_cleanup_slot_receiver, blockstore.clone(), max_ledger_shreds, - &exit, + exit, compaction_interval, max_compaction_jitter, ) @@ -303,7 +315,7 @@ impl Tvu { let accounts_background_service = AccountsBackgroundService::new( bank_forks.clone(), - &exit, + exit, accounts_background_request_handler, tvu_config.accounts_db_caching_enabled, tvu_config.test_hash_calculation, @@ -318,6 +330,7 @@ impl Tvu { ledger_cleanup_service, accounts_background_service, accounts_hash_verifier, + voting_service, } } @@ -331,6 +344,7 @@ impl Tvu { self.accounts_background_service.join()?; self.replay_stage.join()?; self.accounts_hash_verifier.join()?; + self.voting_service.join()?; Ok(()) } } @@ -338,7 +352,6 @@ impl Tvu { #[cfg(test)] pub mod tests { use super::*; - use crate::banking_stage::create_test_recorder; use serial_test::serial; use solana_gossip::cluster_info::{ClusterInfo, Node}; use solana_ledger::{ @@ -346,8 +359,11 @@ pub mod tests { create_new_tmp_ledger, genesis_utils::{create_genesis_config, GenesisConfigInfo}, }; + use solana_poh::poh_recorder::create_test_recorder; use solana_rpc::optimistically_confirmed_bank_tracker::OptimisticallyConfirmedBank; use solana_runtime::bank::Bank; + use solana_sdk::signature::{Keypair, Signer}; + use solana_streamer::socket::SocketAddrSpace; use std::sync::atomic::Ordering; #[ignore] @@ -365,7 +381,11 @@ pub mod tests { let bank_forks = BankForks::new(Bank::new(&genesis_config)); //start cluster_info1 - let cluster_info1 = ClusterInfo::new_with_invalid_keypair(target1.info.clone()); + let cluster_info1 = ClusterInfo::new( + target1.info.clone(), + Arc::new(Keypair::new()), + SocketAddrSpace::Unspecified, + ); cluster_info1.insert_info(leader.info); let cref1 = Arc::new(cluster_info1); @@ -373,7 +393,7 @@ pub mod tests { let BlockstoreSignals { blockstore, ledger_signal_receiver, - completed_slots_receivers, + completed_slots_receiver, .. } = Blockstore::open_with_signal(&blockstore_path, None, true) .expect("Expected to successfully open ledger"); @@ -417,7 +437,7 @@ pub mod tests { tower, &leader_schedule_cache, &exit, - completed_slots_receivers, + completed_slots_receiver, block_commitment_cache, None, None, diff --git a/core/src/unfrozen_gossip_verified_vote_hashes.rs b/core/src/unfrozen_gossip_verified_vote_hashes.rs index 4640e01e72a2c4..30d944754c889a 100644 --- a/core/src/unfrozen_gossip_verified_vote_hashes.rs +++ b/core/src/unfrozen_gossip_verified_vote_hashes.rs @@ -116,7 +116,7 @@ mod tests { if *unfrozen_vote_slot >= frozen_vote_slot { let vote_hashes_map = unfrozen_gossip_verified_vote_hashes .votes_per_slot - .get(&unfrozen_vote_slot) + .get(unfrozen_vote_slot) .unwrap(); assert_eq!(vote_hashes_map.len(), num_duplicate_hashes); for pubkey_votes in vote_hashes_map.values() { diff --git a/core/src/validator.rs b/core/src/validator.rs index df80c95d1929d9..da83da7b472a56 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -6,18 +6,13 @@ use crate::{ cluster_info_vote_listener::VoteTracker, completed_data_sets_service::CompletedDataSetsService, consensus::{reconcile_blockstore_roots_with_tower, Tower}, - poh_recorder::{PohRecorder, GRACE_TICKS_FACTOR, MAX_GRACE_SLOTS}, - poh_service::{self, PohService}, rewards_recorder_service::{RewardsRecorderSender, RewardsRecorderService}, - rpc::JsonRpcConfig, - rpc_service::JsonRpcService, sample_performance_service::SamplePerformanceService, serve_repair::ServeRepair, serve_repair_service::ServeRepairService, sigverify, snapshot_packager_service::{PendingSnapshotPackage, SnapshotPackagerService}, tpu::{Tpu, DEFAULT_TPU_COALESCE_MS}, - transaction_status_service::TransactionStatusService, tvu::{Sockets, Tvu, TvuConfig}, }; use crossbeam_channel::{bounded, unbounded}; @@ -41,15 +36,23 @@ use solana_ledger::{ }; use solana_measure::measure::Measure; use solana_metrics::datapoint_info; +use solana_poh::{ + poh_recorder::{PohRecorder, GRACE_TICKS_FACTOR, MAX_GRACE_SLOTS}, + poh_service::{self, PohService}, +}; use solana_rpc::{ max_slots::MaxSlots, optimistically_confirmed_bank_tracker::{ OptimisticallyConfirmedBank, OptimisticallyConfirmedBankTracker, }, + rpc::JsonRpcConfig, rpc_pubsub_service::{PubSubConfig, PubSubService}, + rpc_service::JsonRpcService, rpc_subscriptions::RpcSubscriptions, + transaction_status_service::TransactionStatusService, }; use solana_runtime::{ + accounts_db::AccountShrinkThreshold, accounts_index::AccountSecondaryIndexes, bank::Bank, bank_forks::{BankForks, SnapshotConfig}, @@ -59,6 +62,7 @@ use solana_runtime::{ use solana_sdk::{ clock::Slot, epoch_schedule::MAX_LEADER_SCHEDULE_EPOCH_OFFSET, + exit::Exit, genesis_config::GenesisConfig, hash::Hash, pubkey::Pubkey, @@ -66,11 +70,10 @@ use solana_sdk::{ signature::{Keypair, Signer}, timing::timestamp, }; +use solana_streamer::socket::SocketAddrSpace; use solana_vote_program::vote_state::VoteState; -use std::time::Instant; use std::{ collections::HashSet, - fmt, net::SocketAddr, ops::Deref, path::{Path, PathBuf}, @@ -78,7 +81,7 @@ use std::{ sync::mpsc::Receiver, sync::{Arc, Mutex, RwLock}, thread::{sleep, Builder}, - time::Duration, + time::{Duration, Instant}, }; const MAX_COMPLETED_DATA_SETS_IN_CHANNEL: usize = 100_000; @@ -117,7 +120,6 @@ pub struct ValidatorConfig { pub max_genesis_archive_unpacked_size: u64, pub wal_recovery_mode: Option, pub poh_verify: bool, // Perform PoH verification during blockstore processing at boo - pub cuda: bool, pub require_tower: bool, pub tower_path: Option, pub debug_keys: Option>>, @@ -133,10 +135,12 @@ pub struct ValidatorConfig { pub accounts_db_caching_enabled: bool, pub warp_slot: Option, pub accounts_db_test_hash_calculation: bool, + pub accounts_db_skip_shrink: bool, pub accounts_db_use_index_hash_calculation: bool, pub tpu_coalesce_ms: u64, - pub validator_exit: Arc>, + pub validator_exit: Arc>, pub no_wait_for_vote_to_start_leader: bool, + pub accounts_shrink_ratio: AccountShrinkThreshold, } impl Default for ValidatorConfig { @@ -173,7 +177,6 @@ impl Default for ValidatorConfig { max_genesis_archive_unpacked_size: MAX_GENESIS_ARCHIVE_UNPACKED_SIZE, wal_recovery_mode: None, poh_verify: true, - cuda: false, require_tower: false, tower_path: None, debug_keys: None, @@ -189,10 +192,12 @@ impl Default for ValidatorConfig { accounts_db_caching_enabled: false, warp_slot: None, accounts_db_test_hash_calculation: false, + accounts_db_skip_shrink: false, accounts_db_use_index_hash_calculation: true, tpu_coalesce_ms: DEFAULT_TPU_COALESCE_MS, - validator_exit: Arc::new(RwLock::new(ValidatorExit::default())), + validator_exit: Arc::new(RwLock::new(Exit::default())), no_wait_for_vote_to_start_leader: true, + accounts_shrink_ratio: AccountShrinkThreshold::default(), } } } @@ -223,35 +228,6 @@ impl Default for ValidatorStartProgress { } } -#[derive(Default)] -pub struct ValidatorExit { - exited: bool, - exits: Vec>, -} - -impl ValidatorExit { - pub fn register_exit(&mut self, exit: Box) { - if self.exited { - exit(); - } else { - self.exits.push(exit); - } - } - - pub fn exit(&mut self) { - self.exited = true; - for exit in self.exits.drain(..) { - exit(); - } - } -} - -impl fmt::Debug for ValidatorExit { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{} exits", self.exits.len()) - } -} - #[derive(Default)] struct TransactionHistoryServices { transaction_status_sender: Option, @@ -264,7 +240,7 @@ struct TransactionHistoryServices { } pub struct Validator { - validator_exit: Arc>, + validator_exit: Arc>, json_rpc_service: Option, pubsub_service: Option, optimistically_confirmed_bank_tracker: Option, @@ -298,6 +274,7 @@ pub(crate) fn abort() -> ! { } impl Validator { + #[allow(clippy::too_many_arguments)] pub fn new( mut node: Node, identity_keypair: &Arc, @@ -308,6 +285,7 @@ impl Validator { config: &ValidatorConfig, should_check_duplicate_instance: bool, start_progress: Arc>, + socket_addr_space: SocketAddrSpace, ) -> Self { let id = identity_keypair.pubkey(); assert_eq!(id, node.info.id); @@ -324,8 +302,6 @@ impl Validator { } } - report_target_features(); - for cluster_entrypoint in &cluster_entrypoints { info!("entrypoint: {:?}", cluster_entrypoint); } @@ -387,7 +363,7 @@ impl Validator { bank_forks, blockstore, ledger_signal_receiver, - completed_slots_receivers, + completed_slots_receiver, leader_schedule_cache, snapshot_hash, TransactionHistoryServices { @@ -409,6 +385,7 @@ impl Validator { &exit, config.enforce_ulimit_nofile, &start_progress, + config.no_poh_speed_test, ); *start_progress.write().unwrap() = ValidatorStartProgress::StartingServices; @@ -457,7 +434,11 @@ impl Validator { } } - let mut cluster_info = ClusterInfo::new(node.info.clone(), identity_keypair.clone()); + let mut cluster_info = ClusterInfo::new( + node.info.clone(), + identity_keypair.clone(), + socket_addr_space, + ); cluster_info.set_contact_debug_interval(config.contact_debug_interval); cluster_info.set_entrypoints(cluster_entrypoints); cluster_info.restore_contact_info(ledger_path, config.contact_save_interval); @@ -530,10 +511,16 @@ impl Validator { optimistically_confirmed_bank_tracker, bank_notification_sender, ) = if let Some((rpc_addr, rpc_pubsub_addr)) = config.rpc_addrs { - if ContactInfo::is_valid_address(&node.info.rpc) { - assert!(ContactInfo::is_valid_address(&node.info.rpc_pubsub)); + if ContactInfo::is_valid_address(&node.info.rpc, &socket_addr_space) { + assert!(ContactInfo::is_valid_address( + &node.info.rpc_pubsub, + &socket_addr_space + )); } else { - assert!(!ContactInfo::is_valid_address(&node.info.rpc_pubsub)); + assert!(!ContactInfo::is_valid_address( + &node.info.rpc_pubsub, + &socket_addr_space + )); } let (bank_notification_sender, bank_notification_receiver) = unbounded(); ( @@ -594,9 +581,13 @@ impl Validator { *start_progress.write().unwrap() = ValidatorStartProgress::Halted; std::thread::park(); } - - let ip_echo_server = node.sockets.ip_echo.map(solana_net_utils::ip_echo_server); - + let ip_echo_server = match node.sockets.ip_echo { + None => None, + Some(tcp_listener) => Some(solana_net_utils::ip_echo_server( + tcp_listener, + Some(node.info.shred_version), + )), + }; let gossip_service = GossipService::new( &cluster_info, Some(bank_forks.clone()), @@ -610,6 +601,7 @@ impl Validator { &serve_repair, Some(blockstore.clone()), node.sockets.serve_repair, + socket_addr_space, &exit, ); @@ -640,10 +632,6 @@ impl Validator { (None, None) }; - if !config.no_poh_speed_test { - check_poh_speed(&genesis_config, None); - } - let waited_for_supermajority = if let Ok(waited) = wait_for_supermajority( config, &bank, @@ -719,7 +707,7 @@ impl Validator { tower, &leader_schedule_cache, &exit, - completed_slots_receivers, + completed_slots_receiver, block_commitment_cache, config.enable_partition.clone(), transaction_status_sender.clone(), @@ -748,6 +736,7 @@ impl Validator { rocksdb_compaction_interval: config.rocksdb_compaction_interval, rocksdb_max_compaction_jitter: config.rocksdb_compaction_interval, wait_for_vote_to_start_leader, + accounts_shrink_ratio: config.accounts_shrink_ratio, }, &max_slots, ); @@ -986,7 +975,7 @@ fn post_process_restored_tower( }) .unwrap_or_else(|err| { let voting_has_been_active = - active_vote_account_exists_in_bank(&bank_forks.working_bank(), &vote_account); + active_vote_account_exists_in_bank(&bank_forks.working_bank(), vote_account); if !err.is_file_missing() { datapoint_error!( "tower_error", @@ -1019,10 +1008,10 @@ fn post_process_restored_tower( } Tower::new_from_bankforks( - &bank_forks, + bank_forks, tower_path, - &validator_identity, - &vote_account, + validator_identity, + vote_account, ) }) } @@ -1037,12 +1026,13 @@ fn new_banks_from_ledger( exit: &Arc, enforce_ulimit_nofile: bool, start_progress: &Arc>, + no_poh_speed_test: bool, ) -> ( GenesisConfig, BankForks, Arc, Receiver, - [CompletedSlotsReceiver; 2], + CompletedSlotsReceiver, LeaderScheduleCache, Option<(Slot, Hash)>, TransactionHistoryServices, @@ -1070,10 +1060,14 @@ fn new_banks_from_ledger( } } + if !no_poh_speed_test { + check_poh_speed(&genesis_config, None); + } + let BlockstoreSignals { mut blockstore, ledger_signal_receiver, - completed_slots_receivers, + completed_slots_receiver, .. } = Blockstore::open_with_signal( ledger_path, @@ -1085,9 +1079,9 @@ fn new_banks_from_ledger( let tower_path = config.tower_path.as_deref().unwrap_or(ledger_path); - let restored_tower = Tower::restore(tower_path, &validator_identity); + let restored_tower = Tower::restore(tower_path, validator_identity); if let Ok(tower) = &restored_tower { - reconcile_blockstore_roots_with_tower(&tower, &blockstore).unwrap_or_else(|err| { + reconcile_blockstore_roots_with_tower(tower, &blockstore).unwrap_or_else(|err| { error!("Failed to reconcile blockstore with tower: {:?}", err); abort() }); @@ -1119,6 +1113,9 @@ fn new_banks_from_ledger( debug_keys: config.debug_keys.clone(), account_indexes: config.account_indexes.clone(), accounts_db_caching_enabled: config.accounts_db_caching_enabled, + shrink_ratio: config.accounts_shrink_ratio, + accounts_db_test_hash_calculation: config.accounts_db_test_hash_calculation, + accounts_db_skip_shrink: config.accounts_db_skip_shrink, ..blockstore_processor::ProcessOptions::default() }; @@ -1188,7 +1185,7 @@ fn new_banks_from_ledger( None, &snapshot_config.snapshot_package_output_path, snapshot_config.archive_format, - Some(&bank_forks.root_bank().get_thread_pool()), + Some(bank_forks.root_bank().get_thread_pool()), snapshot_config.maximum_snapshots_to_retain, ) .unwrap_or_else(|err| { @@ -1200,9 +1197,9 @@ fn new_banks_from_ledger( let tower = post_process_restored_tower( restored_tower, - &validator_identity, - &vote_account, - &config, + validator_identity, + vote_account, + config, tower_path, &bank_forks, ); @@ -1225,7 +1222,7 @@ fn new_banks_from_ledger( bank_forks, blockstore, ledger_signal_receiver, - completed_slots_receivers, + completed_slots_receiver, leader_schedule_cache, snapshot_hash, transaction_history_services, @@ -1407,7 +1404,7 @@ fn wait_for_supermajority( ); } - let gossip_stake_percent = get_stake_percent_in_gossip(&bank, &cluster_info, i % 10 == 0); + let gossip_stake_percent = get_stake_percent_in_gossip(bank, cluster_info, i % 10 == 0); if gossip_stake_percent >= WAIT_FOR_SUPERMAJORITY_THRESHOLD_PERCENT { break; @@ -1422,7 +1419,26 @@ fn wait_for_supermajority( Ok(true) } -fn report_target_features() { +fn is_rosetta_emulated() -> bool { + #[cfg(target_os = "macos")] + { + use std::str::FromStr; + std::process::Command::new("sysctl") + .args(&["-in", "sysctl.proc_translated"]) + .output() + .map_err(|_| ()) + .and_then(|output| String::from_utf8(output.stdout).map_err(|_| ())) + .and_then(|stdout| u8::from_str(stdout.trim()).map_err(|_| ())) + .map(|enabled| enabled == 1) + .unwrap_or(false) + } + #[cfg(not(target_os = "macos"))] + { + false + } +} + +pub fn report_target_features() { warn!( "CUDA is {}abled", if solana_perf::perf_libs::api().is_some() { @@ -1432,35 +1448,47 @@ fn report_target_features() { } ); - // We exclude Mac OS here to be compatible with computers that have Mac M1 chips. - // For these computers, one must install rust/cargo/brew etc. using Rosetta 2, - // which allows them to run software targeted for x86_64 on an aarch64. - // Hence the code below will run on these machines (target_arch="x86_64") - // if we don't exclude with target_os="macos". - // - // It's going to require more more work to get Solana building - // on Mac M1's without Rosetta, - // and when that happens we should remove this - // (the feature flag for code targeting that is target_arch="aarch64") - #[cfg(all( - any(target_arch = "x86", target_arch = "x86_64"), - not(target_os = "macos") - ))] - { - // Validator binaries built on a machine with AVX support will generate invalid opcodes - // when run on machines without AVX causing a non-obvious process abort. Instead detect - // the mismatch and error cleanly. - if is_x86_feature_detected!("avx") { - info!("AVX detected"); - } else { - error!( - "Your machine does not have AVX support, please rebuild from source on your machine" - ); - abort(); - } + if !is_rosetta_emulated() { + unsafe { check_avx() }; + unsafe { check_avx2() }; + } +} + +// Validator binaries built on a machine with AVX support will generate invalid opcodes +// when run on machines without AVX causing a non-obvious process abort. Instead detect +// the mismatch and error cleanly. +#[cfg(any(target_arch = "x86", target_arch = "x86_64"))] +#[target_feature(enable = "avx")] +unsafe fn check_avx() { + if is_x86_feature_detected!("avx") { + info!("AVX detected"); + } else { + error!( + "Incompatible CPU detected: missing AVX support. Please build from source on the target" + ); + abort(); } } +#[cfg(not(any(target_arch = "x86", target_arch = "x86_64")))] +unsafe fn check_avx() {} + +#[cfg(any(target_arch = "x86", target_arch = "x86_64"))] +#[target_feature(enable = "avx2")] +unsafe fn check_avx2() { + if is_x86_feature_detected!("avx2") { + info!("AVX2 detected"); + } else { + error!( + "Incompatible CPU detected: missing AVX2 support. Please build from source on the target" + ); + abort(); + } +} + +#[cfg(not(any(target_arch = "x86", target_arch = "x86_64")))] +unsafe fn check_avx2() {} + // Get the activated stake percentage (based on the provided bank) that is visible in gossip fn get_stake_percent_in_gossip(bank: &Bank, cluster_info: &ClusterInfo, log: bool) -> u64 { let mut online_stake = 0; @@ -1603,6 +1631,7 @@ mod tests { &config, true, // should_check_duplicate_instance start_progress.clone(), + SocketAddrSpace::Unspecified, ); assert_eq!( *start_progress.read().unwrap(), @@ -1636,9 +1665,11 @@ mod tests { } drop(blockstore); + // this purges and compacts all slots greater than or equal to 5 backup_and_clear_blockstore(&blockstore_path, 5, 2); let blockstore = Blockstore::open(&blockstore_path).unwrap(); + // assert that slots less than 5 aren't affected assert!(blockstore.meta(4).unwrap().unwrap().next_slots.is_empty()); for i in 5..10 { assert!(blockstore @@ -1679,6 +1710,7 @@ mod tests { &config, true, // should_check_duplicate_instance Arc::new(RwLock::new(ValidatorStartProgress::default())), + SocketAddrSpace::Unspecified, ) }) .collect(); @@ -1704,6 +1736,7 @@ mod tests { let cluster_info = ClusterInfo::new( ContactInfo::new_localhost(&node_keypair.pubkey(), timestamp()), node_keypair, + SocketAddrSpace::Unspecified, ); let (genesis_config, _mint_keypair) = create_genesis_config(1); diff --git a/core/src/verified_vote_packets.rs b/core/src/verified_vote_packets.rs index f401af38a27e05..772aac1775ec31 100644 --- a/core/src/verified_vote_packets.rs +++ b/core/src/verified_vote_packets.rs @@ -15,17 +15,25 @@ impl VerifiedVotePackets { &mut self, vote_packets_receiver: &VerifiedLabelVotePacketsReceiver, last_update_version: &mut u64, + would_be_leader: bool, ) -> Result<()> { let timer = Duration::from_millis(200); let vote_packets = vote_packets_receiver.recv_timeout(timer)?; *last_update_version += 1; - for (label, slot, packet) in vote_packets { - self.0.insert(label, (*last_update_version, slot, packet)); - } - while let Ok(vote_packets) = vote_packets_receiver.try_recv() { + if would_be_leader { for (label, slot, packet) in vote_packets { self.0.insert(label, (*last_update_version, slot, packet)); } + } else { + self.0.clear(); + self.0.shrink_to_fit(); + } + while let Ok(vote_packets) = vote_packets_receiver.try_recv() { + if would_be_leader { + for (label, slot, packet) in vote_packets { + self.0.insert(label, (*last_update_version, slot, packet)); + } + } } Ok(()) } @@ -137,7 +145,7 @@ mod tests { s.send(vec![(label1.clone(), 42, later_packets)]).unwrap(); let mut verified_vote_packets = VerifiedVotePackets(HashMap::new()); verified_vote_packets - .receive_and_process_vote_packets(&r, &mut update_version) + .receive_and_process_vote_packets(&r, &mut update_version, true) .unwrap(); // Test timestamps for same batch are the same @@ -171,7 +179,7 @@ mod tests { s.send(vec![(label2.clone(), 51, Packets::default())]) .unwrap(); verified_vote_packets - .receive_and_process_vote_packets(&r, &mut update_version) + .receive_and_process_vote_packets(&r, &mut update_version, true) .unwrap(); let update_version2 = verified_vote_packets.get_vote_packets(&label2).unwrap().0; assert!(update_version2 > update_version1); @@ -179,8 +187,8 @@ mod tests { // Test empty doesn't bump the version let before = update_version; assert_matches!( - verified_vote_packets.receive_and_process_vote_packets(&r, &mut update_version), - Err(Error::CrossbeamRecvTimeoutError(RecvTimeoutError::Timeout)) + verified_vote_packets.receive_and_process_vote_packets(&r, &mut update_version, true), + Err(Error::CrossbeamRecvTimeout(RecvTimeoutError::Timeout)) ); assert_eq!(before, update_version); } diff --git a/core/src/voting_service.rs b/core/src/voting_service.rs new file mode 100644 index 00000000000000..4dcf8bf59fdd1e --- /dev/null +++ b/core/src/voting_service.rs @@ -0,0 +1,79 @@ +use solana_gossip::cluster_info::ClusterInfo; +use solana_poh::poh_recorder::PohRecorder; +use solana_sdk::{clock::Slot, transaction::Transaction}; +use std::{ + sync::{mpsc::Receiver, Arc, Mutex}, + thread::{self, Builder, JoinHandle}, +}; + +pub enum VoteOp { + PushVote { + tx: Transaction, + tower_slots: Vec, + }, + RefreshVote { + tx: Transaction, + last_voted_slot: Slot, + }, +} + +impl VoteOp { + fn tx(&self) -> &Transaction { + match self { + VoteOp::PushVote { tx, tower_slots: _ } => tx, + VoteOp::RefreshVote { + tx, + last_voted_slot: _, + } => tx, + } + } +} + +pub struct VotingService { + thread_hdl: JoinHandle<()>, +} + +impl VotingService { + pub fn new( + vote_receiver: Receiver, + cluster_info: Arc, + poh_recorder: Arc>, + ) -> Self { + let thread_hdl = Builder::new() + .name("sol-vote-service".to_string()) + .spawn(move || { + for vote_op in vote_receiver.iter() { + Self::handle_vote(&cluster_info, &poh_recorder, vote_op); + } + }) + .unwrap(); + Self { thread_hdl } + } + + pub fn handle_vote( + cluster_info: &ClusterInfo, + poh_recorder: &Mutex, + vote_op: VoteOp, + ) { + let _ = cluster_info.send_vote( + vote_op.tx(), + crate::banking_stage::next_leader_tpu(cluster_info, poh_recorder), + ); + + match vote_op { + VoteOp::PushVote { tx, tower_slots } => { + cluster_info.push_vote(&tower_slots, tx); + } + VoteOp::RefreshVote { + tx, + last_voted_slot, + } => { + cluster_info.refresh_vote(tx, last_voted_slot); + } + } + } + + pub fn join(self) -> thread::Result<()> { + self.thread_hdl.join() + } +} diff --git a/core/src/window_service.rs b/core/src/window_service.rs index eac0b4c55b55f7..c34238d500f5ee 100644 --- a/core/src/window_service.rs +++ b/core/src/window_service.rs @@ -134,7 +134,7 @@ fn verify_repair( .map(|repair_meta| { outstanding_requests.register_response( repair_meta.nonce, - &shred, + shred, solana_sdk::timing::timestamp(), ) }) @@ -153,7 +153,7 @@ fn prune_shreds_invalid_repair( let mut outstanding_requests = outstanding_requests.write().unwrap(); shreds.retain(|shred| { let should_keep = ( - verify_repair(&mut outstanding_requests, &shred, &repair_infos[i]), + verify_repair(&mut outstanding_requests, shred, &repair_infos[i]), i += 1, ) .0; @@ -188,9 +188,14 @@ where } prune_shreds_invalid_repair(&mut shreds, &mut repair_infos, outstanding_requests); + let repairs: Vec<_> = repair_infos + .iter() + .map(|repair_info| repair_info.is_some()) + .collect(); let (completed_data_sets, inserted_indices) = blockstore.insert_shreds_handle_duplicate( shreds, + repairs, Some(leader_schedule_cache), false, &handle_duplicate, @@ -582,12 +587,12 @@ impl WindowService { H: Fn(), { match e { - Error::CrossbeamRecvTimeoutError(RecvTimeoutError::Disconnected) => true, - Error::CrossbeamRecvTimeoutError(RecvTimeoutError::Timeout) => { + Error::CrossbeamRecvTimeout(RecvTimeoutError::Disconnected) => true, + Error::CrossbeamRecvTimeout(RecvTimeoutError::Timeout) => { handle_timeout(); false } - Error::CrossbeamSendError => true, + Error::CrossbeamSend => true, _ => { handle_error(); error!("thread {:?} error {:?}", thread::current().name(), e); @@ -621,6 +626,7 @@ mod test { signature::{Keypair, Signer}, timing::timestamp, }; + use solana_streamer::socket::SocketAddrSpace; use std::sync::Arc; fn local_entries_to_shred( @@ -630,7 +636,7 @@ mod test { keypair: &Arc, ) -> Vec { let shredder = Shredder::new(slot, parent, keypair.clone(), 0, 0).unwrap(); - shredder.entries_to_shreds(&entries, true, 0).0 + shredder.entries_to_shreds(entries, true, 0).0 } #[test] @@ -771,7 +777,11 @@ mod test { assert!(!blockstore.has_duplicate_shreds_in_slot(duplicate_shred_slot)); let keypair = Keypair::new(); let contact_info = ContactInfo::new_localhost(&keypair.pubkey(), timestamp()); - let cluster_info = ClusterInfo::new(contact_info, Arc::new(keypair)); + let cluster_info = ClusterInfo::new( + contact_info, + Arc::new(keypair), + SocketAddrSpace::Unspecified, + ); run_check_duplicate( &cluster_info, &blockstore, diff --git a/core/tests/client.rs b/core/tests/client.rs index 66d3a1ccc530fb..8fe1c04776f1a7 100644 --- a/core/tests/client.rs +++ b/core/tests/client.rs @@ -18,6 +18,7 @@ use solana_sdk::{ signature::{Keypair, Signer}, system_transaction, }; +use solana_streamer::socket::SocketAddrSpace; use std::{ net::{IpAddr, SocketAddr}, sync::{ @@ -34,7 +35,8 @@ fn test_rpc_client() { solana_logger::setup(); let alice = Keypair::new(); - let test_validator = TestValidator::with_no_fees(alice.pubkey(), None); + let test_validator = + TestValidator::with_no_fees(alice.pubkey(), None, SocketAddrSpace::Unspecified); let bob_pubkey = solana_sdk::pubkey::new_rand(); diff --git a/core/tests/fork-selection.rs b/core/tests/fork-selection.rs index 23396a10d9d16f..cfeda12228486a 100644 --- a/core/tests/fork-selection.rs +++ b/core/tests/fork-selection.rs @@ -188,7 +188,7 @@ impl Tower { .delayed_votes .iter() .enumerate() - .map(|(i, v)| (*scores.get(&v).unwrap_or(&0), v.time, i)) + .map(|(i, v)| (*scores.get(v).unwrap_or(&0), v.time, i)) .collect(); // highest score, latest vote first best.sort_unstable(); @@ -542,7 +542,7 @@ fn test_with_partitions( let mut scores: HashMap = HashMap::new(); towers.iter().for_each(|n| { n.delayed_votes.iter().for_each(|v| { - *scores.entry(v.clone()).or_insert(0) += n.score(&v, &fork_tree); + *scores.entry(v.clone()).or_insert(0) += n.score(v, &fork_tree); }) }); for tower in towers.iter_mut() { diff --git a/core/tests/ledger_cleanup.rs b/core/tests/ledger_cleanup.rs index 5e6258415ff187..69778edefa6152 100644 --- a/core/tests/ledger_cleanup.rs +++ b/core/tests/ledger_cleanup.rs @@ -39,6 +39,8 @@ mod tests { pub cleanup_blockstore: bool, pub emit_cpu_info: bool, pub assert_compaction: bool, + pub compaction_interval: Option, + pub no_compaction: bool, } #[derive(Clone, Copy, Debug)] @@ -154,6 +156,11 @@ mod tests { let emit_cpu_info = read_env("EMIT_CPU_INFO", true); // set default to `true` once compaction is merged let assert_compaction = read_env("ASSERT_COMPACTION", false); + let compaction_interval = match read_env("COMPACTION_INTERVAL", 0) { + maybe_zero if maybe_zero == 0 => None, + non_zero => Some(non_zero), + }; + let no_compaction = read_env("NO_COMPACTION", false); BenchmarkConfig { benchmark_slots, @@ -166,6 +173,8 @@ mod tests { cleanup_blockstore, emit_cpu_info, assert_compaction, + compaction_interval, + no_compaction, } } @@ -211,8 +220,13 @@ mod tests { fn test_ledger_cleanup_compaction() { solana_logger::setup(); let blockstore_path = get_tmp_ledger_path!(); - let blockstore = Arc::new(Blockstore::open(&blockstore_path).unwrap()); + let mut blockstore = Blockstore::open(&blockstore_path).unwrap(); let config = get_benchmark_config(); + if config.no_compaction { + blockstore.set_no_compaction(true); + } + let blockstore = Arc::new(blockstore); + eprintln!("BENCHMARK CONFIG: {:?}", config); eprintln!("LEDGER_PATH: {:?}", &blockstore_path); @@ -223,6 +237,8 @@ mod tests { let stop_size_bytes = config.stop_size_bytes; let stop_size_iterations = config.stop_size_iterations; let pre_generate_data = config.pre_generate_data; + let compaction_interval = config.compaction_interval; + let batches = benchmark_slots / batch_size; let (sender, receiver) = channel(); @@ -232,7 +248,7 @@ mod tests { blockstore.clone(), max_ledger_shreds, &exit, - None, + compaction_interval, None, ); diff --git a/core/tests/rpc.rs b/core/tests/rpc.rs index ee8291f8d150a5..7a6771ed010db2 100644 --- a/core/tests/rpc.rs +++ b/core/tests/rpc.rs @@ -6,8 +6,10 @@ use reqwest::{self, header::CONTENT_TYPE}; use serde_json::{json, Value}; use solana_account_decoder::UiAccount; use solana_client::{ + client_error::{ClientErrorKind, Result as ClientResult}, rpc_client::RpcClient, rpc_config::{RpcAccountInfoConfig, RpcSignatureSubscribeConfig}, + rpc_request::RpcError, rpc_response::{Response, RpcSignatureResult, SlotUpdate}, tpu_client::{TpuClient, TpuClientConfig}, }; @@ -21,6 +23,7 @@ use solana_sdk::{ system_transaction, transaction::Transaction, }; +use solana_streamer::socket::SocketAddrSpace; use solana_transaction_status::TransactionStatus; use std::{ collections::HashSet, @@ -58,7 +61,8 @@ fn test_rpc_send_tx() { solana_logger::setup(); let alice = Keypair::new(); - let test_validator = TestValidator::with_no_fees(alice.pubkey(), None); + let test_validator = + TestValidator::with_no_fees(alice.pubkey(), None, SocketAddrSpace::Unspecified); let rpc_url = test_validator.rpc_url(); let bob_pubkey = solana_sdk::pubkey::new_rand(); @@ -122,7 +126,8 @@ fn test_rpc_invalid_requests() { solana_logger::setup(); let alice = Keypair::new(); - let test_validator = TestValidator::with_no_fees(alice.pubkey(), None); + let test_validator = + TestValidator::with_no_fees(alice.pubkey(), None, SocketAddrSpace::Unspecified); let rpc_url = test_validator.rpc_url(); let bob_pubkey = solana_sdk::pubkey::new_rand(); @@ -153,7 +158,8 @@ fn test_rpc_invalid_requests() { fn test_rpc_slot_updates() { solana_logger::setup(); - let test_validator = TestValidator::with_no_fees(Pubkey::new_unique(), None); + let test_validator = + TestValidator::with_no_fees(Pubkey::new_unique(), None, SocketAddrSpace::Unspecified); // Create the pub sub runtime let rt = Runtime::new().unwrap(); @@ -218,7 +224,8 @@ fn test_rpc_subscriptions() { solana_logger::setup(); let alice = Keypair::new(); - let test_validator = TestValidator::with_no_fees(alice.pubkey(), None); + let test_validator = + TestValidator::with_no_fees(alice.pubkey(), None, SocketAddrSpace::Unspecified); let transactions_socket = UdpSocket::bind("0.0.0.0:0").unwrap(); transactions_socket.connect(test_validator.tpu()).unwrap(); @@ -385,7 +392,8 @@ fn test_rpc_subscriptions() { fn test_tpu_send_transaction() { let mint_keypair = Keypair::new(); let mint_pubkey = mint_keypair.pubkey(); - let test_validator = TestValidator::with_no_fees(mint_pubkey, None); + let test_validator = + TestValidator::with_no_fees(mint_pubkey, None, SocketAddrSpace::Unspecified); let rpc_client = Arc::new(RpcClient::new_with_commitment( test_validator.rpc_url(), CommitmentConfig::processed(), @@ -414,3 +422,34 @@ fn test_tpu_send_transaction() { } } } + +#[test] +fn deserialize_rpc_error() -> ClientResult<()> { + solana_logger::setup(); + + let alice = Keypair::new(); + let validator = TestValidator::with_no_fees(alice.pubkey(), None, SocketAddrSpace::Unspecified); + let rpc_client = RpcClient::new(validator.rpc_url()); + + let bob = Keypair::new(); + let lamports = 50; + let (recent_blockhash, _) = rpc_client.get_recent_blockhash()?; + let mut tx = system_transaction::transfer(&alice, &bob.pubkey(), lamports, recent_blockhash); + + // This will cause an error + tx.signatures.clear(); + + let err = rpc_client.send_transaction(&tx); + let err = err.unwrap_err(); + + match err.kind { + ClientErrorKind::RpcError(RpcError::RpcRequestError { .. }) => { + // This is what used to happen + panic!() + } + ClientErrorKind::RpcError(RpcError::RpcResponseError { .. }) => Ok(()), + _ => { + panic!() + } + } +} diff --git a/core/tests/snapshots.rs b/core/tests/snapshots.rs index 05b00b261887ba..bc0a52bb6bd515 100644 --- a/core/tests/snapshots.rs +++ b/core/tests/snapshots.rs @@ -60,6 +60,7 @@ mod tests { signature::{Keypair, Signer}, system_transaction, }; + use solana_streamer::socket::SocketAddrSpace; use std::{ collections::HashSet, fs, @@ -106,6 +107,8 @@ mod tests { None, AccountSecondaryIndexes::default(), false, + accounts_db::AccountShrinkThreshold::default(), + false, ); bank0.freeze(); let mut bank_forks = BankForks::new(bank0); @@ -145,8 +148,9 @@ mod tests { let old_last_bank = old_bank_forks.get(old_last_slot).unwrap(); - let deserialized_bank = snapshot_utils::bank_from_archive( - &account_paths, + let check_hash_calculation = false; + let (deserialized_bank, _timing) = snapshot_utils::bank_from_archive( + account_paths, &[], &old_bank_forks .snapshot_config @@ -165,6 +169,9 @@ mod tests { AccountSecondaryIndexes::default(), false, None, + accounts_db::AccountShrinkThreshold::default(), + check_hash_calculation, + false, ) .unwrap(); @@ -211,7 +218,7 @@ mod tests { }; for slot in 0..last_slot { let mut bank = Bank::new_from_parent(&bank_forks[slot], &Pubkey::default(), slot + 1); - f(&mut bank, &mint_keypair); + f(&mut bank, mint_keypair); let bank = bank_forks.insert(bank); // Set root to make sure we don't end up with too many account storage entries // and to allow snapshotting of bank and the purging logic on status_cache to @@ -220,7 +227,7 @@ mod tests { // set_root should send a snapshot request bank_forks.set_root(bank.slot(), &request_sender, None); bank.update_accounts_hash(); - snapshot_request_handler.handle_snapshot_requests(false, false, false); + snapshot_request_handler.handle_snapshot_requests(false, false, false, 0); } } @@ -245,7 +252,7 @@ mod tests { .unwrap(); let snapshot_package = snapshot_utils::process_accounts_package_pre( snapshot_package, - Some(&last_bank.get_thread_pool()), + Some(last_bank.get_thread_pool()), ); snapshot_utils::archive_snapshot_package( &snapshot_package, @@ -272,12 +279,12 @@ mod tests { |bank, mint_keypair| { let key1 = Keypair::new().pubkey(); let tx = - system_transaction::transfer(&mint_keypair, &key1, 1, bank.last_blockhash()); + system_transaction::transfer(mint_keypair, &key1, 1, bank.last_blockhash()); assert_eq!(bank.process_transaction(&tx), Ok(())); let key2 = Keypair::new().pubkey(); let tx = - system_transaction::transfer(&mint_keypair, &key2, 0, bank.last_blockhash()); + system_transaction::transfer(mint_keypair, &key2, 0, bank.last_blockhash()); assert_eq!(bank.process_transaction(&tx), Ok(())); bank.freeze(); @@ -289,7 +296,7 @@ mod tests { fn goto_end_of_slot(bank: &mut Bank) { let mut tick_hash = bank.last_blockhash(); loop { - tick_hash = hashv(&[&tick_hash.as_ref(), &[42]]); + tick_hash = hashv(&[tick_hash.as_ref(), &[42]]); bank.register_tick(&tick_hash); if tick_hash == bank.last_blockhash() { bank.freeze(); @@ -344,7 +351,7 @@ mod tests { ); let slot = bank.slot(); let key1 = Keypair::new().pubkey(); - let tx = system_transaction::transfer(&mint_keypair, &key1, 1, genesis_config.hash()); + let tx = system_transaction::transfer(mint_keypair, &key1, 1, genesis_config.hash()); assert_eq!(bank.process_transaction(&tx), Ok(())); bank.squash(); let accounts_hash = bank.update_accounts_hash(); @@ -363,9 +370,9 @@ mod tests { snapshot_utils::snapshot_bank( &bank, vec![], - &package_sender, - &snapshot_path, - &snapshot_package_output_path, + package_sender, + snapshot_path, + snapshot_package_output_path, snapshot_config.snapshot_version, &snapshot_config.archive_format, None, @@ -423,7 +430,7 @@ mod tests { // Purge all the outdated snapshots, including the ones needed to generate the package // currently sitting in the channel - snapshot_utils::purge_old_snapshots(&snapshot_path); + snapshot_utils::purge_old_snapshots(snapshot_path); assert!(snapshot_utils::get_snapshot_paths(&snapshots_dir) .into_iter() .map(|path| path.slot) @@ -437,7 +444,11 @@ mod tests { // channel hold hard links to these deleted snapshots. We verify this is the case below. let exit = Arc::new(AtomicBool::new(false)); - let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair(ContactInfo::default())); + let cluster_info = Arc::new(ClusterInfo::new( + ContactInfo::default(), + Arc::new(Keypair::new()), + SocketAddrSpace::Unspecified, + )); let pending_snapshot_package = PendingSnapshotPackage::default(); let snapshot_packager_service = SnapshotPackagerService::new( @@ -570,14 +581,14 @@ mod tests { (MAX_CACHE_ENTRIES * 2 + 1) as u64, |bank, mint_keypair| { let tx = system_transaction::transfer( - &mint_keypair, + mint_keypair, &key1, 1, bank.parent().unwrap().last_blockhash(), ); assert_eq!(bank.process_transaction(&tx), Ok(())); let tx = system_transaction::transfer( - &mint_keypair, + mint_keypair, &key2, 1, bank.parent().unwrap().last_blockhash(), diff --git a/crate-features/Cargo.toml b/crate-features/Cargo.toml index 98788197f7e7a2..e5893d460ef028 100644 --- a/crate-features/Cargo.toml +++ b/crate-features/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-crate-features" -version = "1.7.0" +version = "1.7.11" description = "Solana Crate Features" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" diff --git a/docs/docusaurus.config.js b/docs/docusaurus.config.js index 9baac67783b00f..7a04e68d635316 100644 --- a/docs/docusaurus.config.js +++ b/docs/docusaurus.config.js @@ -88,10 +88,6 @@ module.exports = { label: "Introduction", to: "introduction", }, - { - label: "Tour de SOL", - to: "tour-de-sol", - }, ], }, { diff --git a/docs/i18n/zh/docusaurus-plugin-content-docs/current/clusters.md b/docs/i18n/zh/docusaurus-plugin-content-docs/current/clusters.md index 05c7fcb8946201..a1a7e5ebfa7d90 100644 --- a/docs/i18n/zh/docusaurus-plugin-content-docs/current/clusters.md +++ b/docs/i18n/zh/docusaurus-plugin-content-docs/current/clusters.md @@ -56,7 +56,6 @@ $ solana-validator \ ## Testnet(测试网) - Testnet是我们在实时群集上重点测试最新发布功能的地方,尤其侧重于网络性能,稳定性和验证程序行为。 -- 集群[Tour de SOL](tour-de-sol.md)计划在Testnet上运行,在该计划中,我们接受恶意行为和对网络的攻击,以帮助我们发现和消除错误或网络漏洞。 - Testnet代币**不是真实的** - Testnet可能会重置账本。 - Testnet包括用于空投的代币水龙头,用于应用程序测试 diff --git a/docs/i18n/zh/docusaurus-plugin-content-docs/current/tour-de-sol.md b/docs/i18n/zh/docusaurus-plugin-content-docs/current/tour-de-sol.md deleted file mode 100644 index 381e36e68c23a3..00000000000000 --- a/docs/i18n/zh/docusaurus-plugin-content-docs/current/tour-de-sol.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: 介绍 ---- - -## 欢迎! - -本指南包括了关于如何参加 Solana Tour de SOL 的信息。 遇到困难? 需要提出看法? 请发邮件到 ryan@solana.com - -### [了解更多关于 Tour de SOL 的信息](https://solana.com/tds/) - -如果您还没有注册,请先在 [https://solana.com/tds/](https://solana.com/tds/) 填写表格。 diff --git a/docs/i18n/zh/docusaurus-plugin-content-docs/current/tour-de-sol/participation/README.md b/docs/i18n/zh/docusaurus-plugin-content-docs/current/tour-de-sol/participation/README.md deleted file mode 100644 index 9882d83fb2c35d..00000000000000 --- a/docs/i18n/zh/docusaurus-plugin-content-docs/current/tour-de-sol/participation/README.md +++ /dev/null @@ -1 +0,0 @@ -# 参与方法 diff --git a/docs/i18n/zh/docusaurus-plugin-content-docs/current/tour-de-sol/participation/steps-to-create-a-validator.md b/docs/i18n/zh/docusaurus-plugin-content-docs/current/tour-de-sol/participation/steps-to-create-a-validator.md deleted file mode 100644 index fa190ee2a4d065..00000000000000 --- a/docs/i18n/zh/docusaurus-plugin-content-docs/current/tour-de-sol/participation/steps-to-create-a-validator.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -title: 创建验证节点的步骤 ---- - -要创建Solana验证节点,请遵循针对[Testnet集群](../../clusters.md)的常规[验证节点工作流程](../../running-validator/validator-start.md)。 - -请注意,运行每个Epoch的进程都会自动质押到Testnet验证节点。 如果您的验证节点运行正常,则将在两天内质押生效(如果长时间离线则自动取消质押)。 diff --git a/docs/i18n/zh/docusaurus-plugin-content-docs/current/tour-de-sol/participation/validator-public-key-registration.md b/docs/i18n/zh/docusaurus-plugin-content-docs/current/tour-de-sol/participation/validator-public-key-registration.md deleted file mode 100644 index 3b94d7095a286e..00000000000000 --- a/docs/i18n/zh/docusaurus-plugin-content-docs/current/tour-de-sol/participation/validator-public-key-registration.md +++ /dev/null @@ -1,44 +0,0 @@ ---- -title: 创建验证节点公钥 ---- - -您需要先注册才能参加到网络中。 请查看 [注册信息](../registration/how-to-register.md)。 - -为了获得 SOL 奖励,您需要在keybase.io帐户下发布验证者的身份公共密钥。 - -## **生成密钥对** - -1. 如果还没有密钥对,请运行以下命令来为验证节点生成一个: - - ```bash - solana-keygen new -o ~/validator-keypair.json - ``` - -2. 现在可以运行以下命令查看身份公共密钥: - - ```bash - solana-keygen pubkey ~/validator-keypair.json - ``` - -> 注意:“validator-keypair.json”文件也是您的 \(ed25519\) 私钥。 - -验证节点身份密钥独特识别了您在网络中的验证节点。 **备份此信息至关重要。** - -如果您不备份此信息,那么如果您无法访问验证节点的话,将无法对其进行恢复。 如果发生这种情况,您将失去SOL TOO的奖励。 - -要备份您的验证节点识别密钥, **请备份您的"validator-keypair.json" 文件到一个安全位置。** - -## 将您的Solana公钥链接到Keybase帐户 - -您必须将Solana pubkey链接到Keybase.io帐户。 以下说明介绍了如何通过在服务器上安装Keybase来执行此操作。 - -1. 在您的机器上安装[Keybase](https://keybase.io/download)。 -2. 登录到服务器上的Keybase帐户。 如果您还没有Keybase帐户,请先创建一个。 以下是基本的[Keybase CLI命令列表](https://keybase.io/docs/command_line/basics)。 -3. 在公用文件夹中创建一个Solana目录:`mkdir /keybase/public//solana` -4. 在Keybase公共文件夹中按以下格式创建一个空文件,来发布验证者的身份公共密钥:`/keybase/public//solana/validator-`。 例如: - - ```bash - touch /keybase/public//solana/validator- - ``` - -5. 要检查公钥是否已成功发布,请确保您在 `https://keybase.pub//solana/validator-` 看到它。 diff --git a/docs/i18n/zh/docusaurus-plugin-content-docs/current/tour-de-sol/participation/validator-technical-requirements.md b/docs/i18n/zh/docusaurus-plugin-content-docs/current/tour-de-sol/participation/validator-technical-requirements.md deleted file mode 100644 index 388edfa685556e..00000000000000 --- a/docs/i18n/zh/docusaurus-plugin-content-docs/current/tour-de-sol/participation/validator-technical-requirements.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: 运行验证节点的要求 ---- - -## 硬件 - -请参考 [建议的硬件配置](../../running-validator/validator-reqs.md)。 - -## 软件 - -- 我们在Ubuntu 04/18上进行开发和运行。 在Ubuntu 04/16上运行时,某些用户会遇到一些问题 diff --git a/docs/i18n/zh/docusaurus-plugin-content-docs/current/tour-de-sol/registration/README.md b/docs/i18n/zh/docusaurus-plugin-content-docs/current/tour-de-sol/registration/README.md deleted file mode 100644 index 6dbeaf5bc3e081..00000000000000 --- a/docs/i18n/zh/docusaurus-plugin-content-docs/current/tour-de-sol/registration/README.md +++ /dev/null @@ -1 +0,0 @@ -# 注册 diff --git a/docs/i18n/zh/docusaurus-plugin-content-docs/current/tour-de-sol/registration/confidentiality.md b/docs/i18n/zh/docusaurus-plugin-content-docs/current/tour-de-sol/registration/confidentiality.md deleted file mode 100644 index 86dec1328712b0..00000000000000 --- a/docs/i18n/zh/docusaurus-plugin-content-docs/current/tour-de-sol/registration/confidentiality.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -title: 保密规定 ---- - -请参看****第 8 章[** TOUR DE SOL 参与规则的 **](https://drive.google.com/file/d/15ueLG6VJoQ5Hx4rnpjFeuL3pG5DbrBbE/view) ** 保密规定。** - -Solana 无意在 Tour de SOL 共享任何机密信息。 我们将会通过口头、电子邮件等方式将信息分享出去。 除非明确地调出信息,否则不应将信息视为机密信息,我们欢迎您的分享。 diff --git a/docs/i18n/zh/docusaurus-plugin-content-docs/current/tour-de-sol/registration/how-to-register.md b/docs/i18n/zh/docusaurus-plugin-content-docs/current/tour-de-sol/registration/how-to-register.md deleted file mode 100644 index 20b8d8a902642c..00000000000000 --- a/docs/i18n/zh/docusaurus-plugin-content-docs/current/tour-de-sol/registration/how-to-register.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -title: 如何注册 ---- - -#### 1) 注册表 - -[请在此提交注册表](https://forms.gle/gQYLozj5u7yKU3HG6) - -#### 2) KYC/AML(通过 Coinlist) - -[请在这里注册 KYC/AML + 参与协议](https://tsm.coinlist.co/solana-staking) - -_如果您先前已经完成了 SLP 或 TdS 的 KYC/AML,那么同一个实体/个人就不需要这个步骤了。 我们不接受美国 实体或个人。_ - -#### 3) 加入我们的Discord - -所有 Tour de SOL 验证程序**都需要**加入,因为这是我们的主要通信渠道:https://discord.gg/N3mqAfa - -### 下一步 - -- 查看我们的文档来熟悉如何[运行一个验证节点](../../running-validator.md) - -- 完成注册后,您将收到一封电子邮件,说明您要完成了注册流程。 - -- 在 Discord 上相见! diff --git a/docs/i18n/zh/docusaurus-plugin-content-docs/current/tour-de-sol/registration/rewards.md b/docs/i18n/zh/docusaurus-plugin-content-docs/current/tour-de-sol/registration/rewards.md deleted file mode 100644 index 653dc850955d73..00000000000000 --- a/docs/i18n/zh/docusaurus-plugin-content-docs/current/tour-de-sol/registration/rewards.md +++ /dev/null @@ -1,29 +0,0 @@ ---- -title: 奖励 ---- - -## 奖励计算 - -奖励将根据 [论坛的这个帖子](https://forums.solana.com/t/tour-de-sol-stage-1-preliminary-compensation-design/79) 和 [这个表格](https://docs.google.com/spreadsheets/d/11puBSw2THdO4wU-uyDEic-D03jg4ZAooVpcZU0w_4gI/edit#gid=218406032) 中描述的奖励规则来计算。 - -另外请查看 [TOUR DE SOL 参与条款](https://drive.google.com/file/d/15ueLG6VJoQ5Hx4rnpjFeuL3pG5DbrBbE/view) 中的“2\(f\) Tour de Sol 详情”,来了解更多奖励详情。 - -## 领取奖励的要求 - -参与者必须已经签署 Tour de SOL 参与协议,并通过 CoinList 平台以个人身份通过KYC/AML,并在参加之前填写了W-8 BEN或W-9纳税表格\(取决于您的居住地\) 来参与到 Tour 中。 完成注册后,参与者可以参加到任何一个和所有的阶段。 最终注册日期将分阶段公开宣布。 - -最后,参与者必须签署Solana的标准[代币协议](https://drive.google.com/open?id=1O4cEUZzeSNoVcncbHcEegAqPgjT-7hcy)。 代币协议将在奖励发放日期之前由Solana提供。 - -另外请参阅 [TOUR DE SOL 参与条款](https://drive.google.com/file/d/15ueLG6VJoQ5Hx4rnpjFeuL3pG5DbrBbE/view) 中的“2\(i\) & 2\(j\) Tour de Sol 详情”部分,了解与领取奖励有关的更多详情。 - -## 税务要求 - -参与者正在与Solana签订服务协议,并获得与服务相关的酌情奖励。 他们不被视为公司的全职员工,因此如果适用的话,Solana会收集W-9和W-8 BEN表格以支持纳税报告义务。 Solana建议参与者咨询税务会计师,以了解任何潜在的税务要求。 - -此外,如 [TOUR DE SOL 参与条款](https://drive.google.com/file/d/15ueLG6VJoQ5Hx4rnpjFeuL3pG5DbrBbE/view) 第2i、2k和10c节所述: - -> 2i - 要获得任何SOL奖励,参与者必须签署公司的SOL奖励协议标准格式,其中包括作为SOL奖励发行的管理SOL所有权和使用的条款和条件,包括但不限于适用的锁定证券法、黑名单日期和纳税报告信息要求。 - -> 2k - 要获得任何SOL奖励,入围者必须签署公司的SOL奖励协议标准格式,其中包括作为SOL奖励发行的用于管理SOL所有权和使用的条款和条件,包括但不限于适用的锁定证券法、黑名单日期和纳税报告信息要求。 - -> 10c - 您有责任遵守适用于任何协议的交易的所有法律和法规,包括但不限于《商品交易法》以及美国 商品期货交易委员会\(“CFTC”\)颁布的法规,美国 证券交易委员会\(“SEC”\) 颁布的联邦证券法律和法规以及适用于您从公司收取的任何报酬的税法。 diff --git a/docs/i18n/zh/docusaurus-plugin-content-docs/current/tour-de-sol/registration/terms-of-participation.md b/docs/i18n/zh/docusaurus-plugin-content-docs/current/tour-de-sol/registration/terms-of-participation.md deleted file mode 100644 index 320bdea36cb32f..00000000000000 --- a/docs/i18n/zh/docusaurus-plugin-content-docs/current/tour-de-sol/registration/terms-of-participation.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -title: 参与条款 ---- - -详情请查看官方 [TOUR DE SOL 参与条款](https://drive.google.com/a/solana.com/file/d/15ueLG6VJoQ5Hx4rnpjFeuL3pG5DbrBbE/view?usp=sharing)。 diff --git a/docs/i18n/zh/docusaurus-plugin-content-docs/current/tour-de-sol/registration/validator-registration-and-rewards-faq.md b/docs/i18n/zh/docusaurus-plugin-content-docs/current/tour-de-sol/registration/validator-registration-and-rewards-faq.md deleted file mode 100644 index 5aa0df3cf382d6..00000000000000 --- a/docs/i18n/zh/docusaurus-plugin-content-docs/current/tour-de-sol/registration/validator-registration-and-rewards-faq.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -title: 注册常见问题 ---- - -对于任何参与问题,[TOUR DE SOL 参与条款](https://drive.google.com/a/solana.com/file/d/15ueLG6VJoQ5Hx4rnpjFeuL3pG5DbrBbE/view?usp=sharing) 都应被视为权威资料。 - -## 注册是强制性的吗? - -是的。 注册是强制的。 注册正在进行中, 我们每月主办为期一个月的 Tour de SOL,新的参加者需要等到下一阶段开始时才能进入。 [注册信息在这里](how-to-register.md)。 - -## 谁有资格参加? - -详情请见 [TOUR DE SOL Participation terms](https://drive.google.com/a/solana.com/file/d/15ueLG6VJoQ5Hx4rnpjFeuL3pG5DbrBbE/view?usp=sharing) 的“参与资格 1;KYC 要求”。 - -## 我是否必须完成KYC/AML认证才能参与? - -是的。 完成KYC/AML是强制性的。 如果你在第一阶段前没有完成这个进程,你就无法参加到 Tour de SOL。 - -我们已经与 Coinlist 合作管理 Tour de SOL 的 KYC/AML。 您可以在这里找到 [参与教程](https://docs.google.com/presentation/d/1gz8e34piUzzwzCMKwVrKKbZiPXV64Uq2-Izt4-VcMR4/),[在这里完成认证](https://docs.google.com/presentation/d/1gz8e34piUzzwzCMKwVrKKbZiPXV64Uq2-Izt4-VcMR4/edit#slide=id.g5dff17f5e5_0_44)。 - -## 我作为 Tour de Sol 参与者的责任是什么? - -详情请查看 [TOR DE SOL 参与条款中](https://drive.google.com/file/d/15ueLG6VJoQ5Hx4rnpjFeuL3pG5DbrBbE/view) 的“2c Tour de SOL详情”。 - -### 如何计算“Tour de Sol 活跃 Tour 事件时间的 50%”责任? - -为了有资格在给定阶段获得奖励,验证者必须在该阶段 >= 50%的位置中提交投票。 - -如果验证者无法为某个阶段提交 >= 50%的投票,但仍然认为他们应该在该阶段获得奖励,那么他们可以向Solana提出重新考虑的请求。 - -## Tour de Sol 测试代币与 Solana 主网代币之间是否有关系? - -没有。 详情请查看 [TOR DE SOL 参与条款中](https://drive.google.com/file/d/15ueLG6VJoQ5Hx4rnpjFeuL3pG5DbrBbE/view) 的“2d Tour de SOL Details”。 - -## 验证节点会被取消 Tour de Sol 资格吗? - -会的。 如果某个验证节点从事违禁行为和/或未能提供上述第\#4点所述的最低限度服务,那么它将被取消资格。 - -另见 [ TOUR DE SOL 参与条款](https://drive.google.com/file/d/15ueLG6VJoQ5Hx4rnpjFeuL3pG5DbrBbE/view) 的“4 违禁行为”,来详细地查看违禁行为。 - -### 更多关于违禁行为的问题: - -#### 如“ 4 禁止行为”一节所述,有在居住地管辖范围以外的其他管辖区提供 Tour 服务的例子吗? 是否意味着服务器必须放在我居住地的管辖范围内? - -不是的。 服务器可以位于与参与者的居住地不同的其他管辖区中。 签署[TOUR DE SOL 参与条款](https://drive.google.com/file/d/15ueLG6VJoQ5Hx4rnpjFeuL3pG5DbrBbE/view)后,参与者已同意:如果居住在美国,他们就在美国提供服务;如果不在美国境内,他们就从美国境外提供服务。 - -## 奖励是怎么计算的? - -详情请查看 [奖励部分](rewards.md)。 - -## 我们怎么知道能否公开分享哪些信息? - -请查看 [保密协议](confidentiality.md)。 diff --git a/docs/i18n/zh/docusaurus-plugin-content-docs/current/tour-de-sol/submitting-bugs.md b/docs/i18n/zh/docusaurus-plugin-content-docs/current/tour-de-sol/submitting-bugs.md deleted file mode 100644 index 04df217326c074..00000000000000 --- a/docs/i18n/zh/docusaurus-plugin-content-docs/current/tour-de-sol/submitting-bugs.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -title: 提交 Bug ---- - -请在[此Github代码库中的issue](https://github.com/solana-labs/solana/issues)提交所有的漏洞和反馈。 - -由于[Discord频道](useful-links.md)的信息流比较快,因此其中报告的问题很可能会在信息流中丢失。 在Github代码库中归档问题是确保记录并解决问题的唯一方法。 diff --git a/docs/i18n/zh/docusaurus-plugin-content-docs/current/tour-de-sol/useful-links.md b/docs/i18n/zh/docusaurus-plugin-content-docs/current/tour-de-sol/useful-links.md deleted file mode 100644 index 6ef11c50f149ba..00000000000000 --- a/docs/i18n/zh/docusaurus-plugin-content-docs/current/tour-de-sol/useful-links.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: 有用的链接 & 讨论 -description: 阅读本指南以后 ---- - -- [网络浏览器](http://explorer.solana.com/) -- [TdS 性能指示板](https://metrics.solana.com:3000/d/monitor-edge/cluster-telemetry-edge?refresh=1m&from=now-15m&to=now&var-testnet=tds) -- 验证节点频道 - - [\#validator-support](https://discord.gg/rZsenD) 通用群组用于讨论 Tour de SOL 故障以外的验证节点相关疑问。 - - [\#tourdesol-validators](https://discord.gg/BdujK2) 群组供 Tour de SOL 参与者进行交流。 - - [\#tourdesol-annound](https://discord.gg/Q5TxEC),关于 Tour de SOL 关键信息的唯一官方发布频道。 -- [核心软件代码库](https://github.com/solana-labs/solana) -- [在此仓库中提交 bug 和反馈](https://github.com/solana-labs/solana/issues) - -> 找不到您想要的东西? 请发送电子邮件到 ryan@solana.com 或在 Discord 联系 @rshea\#2622。 diff --git a/docs/i18n/zh/docusaurus-theme-classic/footer.json b/docs/i18n/zh/docusaurus-theme-classic/footer.json index fd2962f254ba4a..0dc706b81a56b8 100644 --- a/docs/i18n/zh/docusaurus-theme-classic/footer.json +++ b/docs/i18n/zh/docusaurus-theme-classic/footer.json @@ -15,10 +15,6 @@ "message": "介绍", "description": "The label of footer link with label=Introduction linking to introduction" }, - "link.item.label.Tour de SOL": { - "message": "Tour de SOL", - "description": "The label of footer link with label=Tour de SOL linking to tour-de-sol" - }, "link.item.label.Discord": { "message": "Discord", "description": "The label of footer link with label=Discord linking to https://discordapp.com/invite/pquxPsq" @@ -39,4 +35,4 @@ "message": "Copyright © 2021 Solana Foundation", "description": "The footer copyright" } -} \ No newline at end of file +} diff --git a/docs/sidebars.js b/docs/sidebars.js index f53d3cb07bea49..0106f9e9370b1b 100644 --- a/docs/sidebars.js +++ b/docs/sidebars.js @@ -101,35 +101,6 @@ module.exports = { "running-validator/validator-stake", "running-validator/validator-monitor", "running-validator/validator-info", - { - type: "category", - label: "Incenvitized Testnet", - items: [ - "tour-de-sol", - { - type: "category", - label: "Registration", - items: [ - "tour-de-sol/registration/how-to-register", - "tour-de-sol/registration/terms-of-participation", - "tour-de-sol/registration/rewards", - "tour-de-sol/registration/confidentiality", - "tour-de-sol/registration/validator-registration-and-rewards-faq", - ], - }, - { - type: "category", - label: "Participation", - items: [ - "tour-de-sol/participation/validator-technical-requirements", - "tour-de-sol/participation/validator-public-key-registration", - "tour-de-sol/participation/steps-to-create-a-validator", - ], - }, - "tour-de-sol/useful-links", - "tour-de-sol/submitting-bugs", - ], - }, "running-validator/validator-troubleshoot", ], Clusters: [ @@ -213,22 +184,23 @@ module.exports = { label: "Accepted", items: [ "proposals/accepted-design-proposals", + "proposals/bankless-leader", + "proposals/block-confirmation", + "proposals/cluster-test-framework", + "proposals/embedding-move", + "proposals/interchain-transaction-verification", "proposals/ledger-replication-to-implement", "proposals/optimistic-confirmation-and-slashing", - "proposals/vote-signing-to-implement", - "proposals/cluster-test-framework", - "proposals/validator-proposal", + "proposals/optimistic_confirmation", + "proposals/rip-curl", + "proposals/rust-clients", "proposals/simple-payment-and-state-verification", - "proposals/interchain-transaction-verification", - "proposals/snapshot-verification", - "proposals/bankless-leader", "proposals/slashing", + "proposals/snapshot-verification", "proposals/tick-verification", - "proposals/block-confirmation", - "proposals/rust-clients", - "proposals/optimistic_confirmation", - "proposals/embedding-move", - "proposals/rip-curl", + "proposals/transactions-v2", + "proposals/validator-proposal", + "proposals/vote-signing-to-implement", ], }, ], diff --git a/docs/src/cluster/bench-tps.md b/docs/src/cluster/bench-tps.md index 938b220a581fec..e5b805fcd8fe5a 100644 --- a/docs/src/cluster/bench-tps.md +++ b/docs/src/cluster/bench-tps.md @@ -123,7 +123,7 @@ This will dump all the threads stack traces into gdb.txt In this example the client connects to our public testnet. To run validators on the testnet you would need to open udp ports `8000-10000`. ```bash -NDEBUG=1 ./multinode-demo/bench-tps.sh --entrypoint devnet.solana.com:8001 --faucet devnet.solana.com:9900 --duration 60 --tx_count 50 +NDEBUG=1 ./multinode-demo/bench-tps.sh --entrypoint entrypoint.devnet.solana.com:8001 --faucet api.devnet.solana.com:9900 --duration 60 --tx_count 50 ``` You can observe the effects of your client's transactions on our [metrics dashboard](https://metrics.solana.com:3000/d/monitor/cluster-telemetry?var-testnet=devnet) diff --git a/docs/src/cluster/leader-rotation.md b/docs/src/cluster/leader-rotation.md index aa71c76f2cf5bb..a52cbb7eafc465 100644 --- a/docs/src/cluster/leader-rotation.md +++ b/docs/src/cluster/leader-rotation.md @@ -19,7 +19,7 @@ Without a partition lasting longer than an epoch, the cluster will work as follo For example: -The epoch duration is 100 slots. The root fork is updated from fork computed at slot height 99 to a fork computed at slot height 102. Forks with slots at height 100, 101 were skipped because of failures. The new leader schedule is computed using fork at slot height 102. It is active from slot 200 until it is updated again. +Let's assume an epoch duration of 100 slots, which in reality is magnitudes higher. The root fork is updated from fork computed at slot height 99 to a fork computed at slot height 102. Forks with slots at height 100, 101 were skipped because of failures. The new leader schedule is computed using fork at slot height 102. It is active from slot 200 until it is updated again. No inconsistency can exist because every validator that is voting with the cluster has skipped 100 and 101 when its root passes 102. All validators, regardless of voting pattern, would be committing to a root that is either 102, or a descendant of 102. diff --git a/docs/src/clusters.md b/docs/src/clusters.md index 09e5f935ed89b3..ce78456f387d59 100644 --- a/docs/src/clusters.md +++ b/docs/src/clusters.md @@ -44,27 +44,32 @@ solana config set --url https://api.devnet.solana.com $ solana-validator \ --identity validator-keypair.json \ --vote-account vote-account-keypair.json \ - --trusted-validator dv1LfzJvDF7S1fBKpFgKoKXK5yoSosmkAdfbxBo1GqJ \ - --no-untrusted-rpc \ + --known-validator dv1ZAGvdsz5hHLwWXsVnM94hWf1pjbKVau1QVkaMJ92 \ + --known-validator dv2eQHeP4RFrJZ6UeiZWoc3XTtmtZCUKxxCApCDcRNV \ + --known-validator dv4ACNkpYPcE3aKmYDqZm9G5EB3J4MRoeE7WNDRBVJB \ + --known-validator dv3qDFk1DTF36Z62bNvrCXe9sKATA6xvVy6A798xxAS \ + --only-known-rpc \ --ledger ledger \ --rpc-port 8899 \ --dynamic-port-range 8000-8010 \ --entrypoint entrypoint.devnet.solana.com:8001 \ + --entrypoint entrypoint2.devnet.solana.com:8001 \ + --entrypoint entrypoint3.devnet.solana.com:8001 \ + --entrypoint entrypoint4.devnet.solana.com:8001 \ + --entrypoint entrypoint5.devnet.solana.com:8001 \ --expected-genesis-hash EtWTRABZaYq6iMfeYKouRu166VU2xqa1wcaWoxPkrZBG \ --wal-recovery-mode skip_any_corrupted_record \ --limit-ledger-size ``` -The `--trusted-validator`s is operated by Solana +The [`--known-validator`s](running-validator/validator-start.md#known-validators) +are operated by Solana Labs ## Testnet - Testnet is where we stress test recent release features on a live cluster, particularly focused on network performance, stability and validator behavior. -- [Tour de SOL](tour-de-sol.md) initiative runs on Testnet, where we - encourage malicious behavior and attacks on the network to help us find and - squash bugs or network vulnerabilities. - Testnet tokens are **not real** - Testnet may be subject to ledger resets. - Testnet includes a token faucet for airdrops for application testing @@ -91,11 +96,11 @@ solana config set --url https://api.testnet.solana.com $ solana-validator \ --identity validator-keypair.json \ --vote-account vote-account-keypair.json \ - --trusted-validator 5D1fNXzvv5NjV1ysLjirC4WY92RNsVH18vjmcszZd8on \ - --trusted-validator 7XSY3MrYnK8vq693Rju17bbPkCN3Z7KvvfvJx4kdrsSY \ - --trusted-validator Ft5fbkqNa76vnsjYNwjDZUXoTWpP7VYm3mtsaQckQADN \ - --trusted-validator 9QxCLckBiJc783jnMvXZubK4wH86Eqqvashtrwvcsgkv \ - --no-untrusted-rpc \ + --known-validator 5D1fNXzvv5NjV1ysLjirC4WY92RNsVH18vjmcszZd8on \ + --known-validator 7XSY3MrYnK8vq693Rju17bbPkCN3Z7KvvfvJx4kdrsSY \ + --known-validator Ft5fbkqNa76vnsjYNwjDZUXoTWpP7VYm3mtsaQckQADN \ + --known-validator 9QxCLckBiJc783jnMvXZubK4wH86Eqqvashtrwvcsgkv \ + --only-known-rpc \ --ledger ledger \ --rpc-port 8899 \ --dynamic-port-range 8000-8010 \ @@ -107,17 +112,16 @@ $ solana-validator \ --limit-ledger-size ``` -The identity of the `--trusted-validator`s are: +The identities of the +[`--known-validator`s](running-validator/validator-start.md#known-validators) are: -- `5D1fNXzvv5NjV1ysLjirC4WY92RNsVH18vjmcszZd8on` - Solana Foundation (testnet.solana.com) -- `7XSY3MrYnK8vq693Rju17bbPkCN3Z7KvvfvJx4kdrsSY` - Solana Foundation (Break RPC node) +- `5D1fNXzvv5NjV1ysLjirC4WY92RNsVH18vjmcszZd8on` - Solana Labs (testnet.solana.com) - `Ft5fbkqNa76vnsjYNwjDZUXoTWpP7VYm3mtsaQckQADN` - Certus One - `9QxCLckBiJc783jnMvXZubK4wH86Eqqvashtrwvcsgkv` - Algo|Stake ## Mainnet Beta A permissionless, persistent cluster for early token holders and launch partners. -Currently, rewards and inflation are disabled. - Tokens that are issued on Mainnet Beta are **real** SOL - If you have paid money to purchase/be issued tokens, such as through our @@ -146,11 +150,11 @@ solana config set --url https://api.mainnet-beta.solana.com $ solana-validator \ --identity ~/validator-keypair.json \ --vote-account ~/vote-account-keypair.json \ - --trusted-validator 7Np41oeYqPefeNQEHSv1UDhYrehxin3NStELsSKCT4K2 \ - --trusted-validator GdnSyH3YtwcxFvQrVVJMm1JhTS4QVX7MFsX56uJLUfiZ \ - --trusted-validator DE1bawNcRJB9rVm3buyMVfr8mBEoyyu73NBovf2oXJsJ \ - --trusted-validator CakcnaRDHka2gXyfbEd2d3xsvkJkqsLw2akB3zsN1D2S \ - --no-untrusted-rpc \ + --known-validator 7Np41oeYqPefeNQEHSv1UDhYrehxin3NStELsSKCT4K2 \ + --known-validator GdnSyH3YtwcxFvQrVVJMm1JhTS4QVX7MFsX56uJLUfiZ \ + --known-validator DE1bawNcRJB9rVm3buyMVfr8mBEoyyu73NBovf2oXJsJ \ + --known-validator CakcnaRDHka2gXyfbEd2d3xsvkJkqsLw2akB3zsN1D2S \ + --only-known-rpc \ --ledger ledger \ --rpc-port 8899 \ --private-rpc \ @@ -165,4 +169,5 @@ $ solana-validator \ --limit-ledger-size ``` -All four `--trusted-validator`s are operated by Solana +All four [`--known-validator`s](running-validator/validator-start.md#known-validators) +are operated by Solana Labs diff --git a/docs/src/developing/backwards-compatibility.md b/docs/src/developing/backwards-compatibility.md index d16d245e90c201..9f26ad03f87116 100644 --- a/docs/src/developing/backwards-compatibility.md +++ b/docs/src/developing/backwards-compatibility.md @@ -31,7 +31,7 @@ updates of a particular `MINOR` version release. #### Release Channels - `edge` software that contains cutting-edge features with no backward compatibility policy -- `beta` software that runs on the Solana Tour de SOL testnet cluster +- `beta` software that runs on the Solana Testnet cluster - `stable` software that run on the Solana Mainnet Beta and Devnet clusters #### Major Releases (x.0.0) @@ -43,7 +43,7 @@ that were enabled in the previous `MAJOR` version. #### Minor Releases (1.x.0) New features and proposal implementations are added to _new_ `MINOR` version -releases (e.g. 1.4.0) and are first run on Solana's Tour de SOL testnet cluster. While running +releases (e.g. 1.4.0) and are first run on Solana's Testnet cluster. While running on the testnet, `MINOR` versions are considered to be in the `beta` release channel. After those changes have been patched as needed and proven to be reliable, the `MINOR` version will be upgraded to the `stable` release channel and deployed to the Mainnet Beta cluster. diff --git a/docs/src/developing/clients/jsonrpc-api.md b/docs/src/developing/clients/jsonrpc-api.md index c1c4cd312504fa..a88d2bd40c9565 100644 --- a/docs/src/developing/clients/jsonrpc-api.md +++ b/docs/src/developing/clients/jsonrpc-api.md @@ -55,6 +55,7 @@ gives a convenient interface for the RPC methods. - [getSlotLeader](jsonrpc-api.md#getslotleader) - [getSlotLeaders](jsonrpc-api.md#getslotleaders) - [getStakeActivation](jsonrpc-api.md#getstakeactivation) +- [getSnapshotSlot](jsonrpc-api.md#getsnapshotslot) - [getSupply](jsonrpc-api.md#getsupply) - [getTokenAccountBalance](jsonrpc-api.md#gettokenaccountbalance) - [getTokenAccountsByDelegate](jsonrpc-api.md#gettokenaccountsbydelegate) @@ -81,6 +82,15 @@ gives a convenient interface for the RPC methods. - [slotSubscribe](jsonrpc-api.md#slotsubscribe) - [slotUnsubscribe](jsonrpc-api.md#slotunsubscribe) +### Unstable Methods + +Unstable methods may see breaking changes in patch releases and may not be supported in perpetuity. + +- [slotsUpdatesSubscribe](jsonrpc-api.md#slotsupdatessubscribe---unstable) +- [slotsUpdatesUnsubscribe](jsonrpc-api.md#slotsupdatesunsubscribe) +- [voteSubscribe](jsonrpc-api.md#votesubscribe---unstable-disabled-by-default) +- [voteUnsubscribe](jsonrpc-api.md#voteunsubscribe) + ### Deprecated Methods - [getConfirmedBlock](jsonrpc-api.md#getconfirmedblock) @@ -195,11 +205,11 @@ health-check mechanism for use by load balancers or other network infrastructure. This request will always return a HTTP 200 OK response with a body of "ok", "behind" or "unknown" based on the following conditions: -1. If one or more `--trusted-validator` arguments are provided to `solana-validator`, "ok" is returned +1. If one or more `--known-validator` arguments are provided to `solana-validator`, "ok" is returned when the node has within `HEALTH_CHECK_SLOT_DISTANCE` slots of the highest - trusted validator, otherwise "behind". "unknown" is returned when no slot - information from trusted validators is not yet available. -2. "ok" is always returned if no trusted validators are provided. + known validator, otherwise "behind". "unknown" is returned when no slot + information from known validators is not yet available. +2. "ok" is always returned if no known validators are provided. ## JSON RPC API Reference @@ -350,7 +360,7 @@ Result: ### getBlock **NEW: This method is only available in solana-core v1.7 or newer. Please use -[getBlock](jsonrpc-api.md#getblock) for solana-core v1.6** +[getConfirmedBlock](jsonrpc-api.md#getconfirmedblock) for solana-core v1.6** Returns identity and transaction information about a confirmed block in the ledger @@ -393,6 +403,7 @@ The result field will be an object with the following fields: - `lamports: `- number of reward lamports credited or debited by the account, as a i64 - `postBalance: ` - account balance in lamports after the reward was applied - `rewardType: ` - type of reward: "fee", "rent", "voting", "staking" + - `commission: ` - vote account commission when the reward was credited, only present for voting and staking rewards - `blockTime: ` - estimated production time, as Unix timestamp (seconds since the Unix epoch). null if not available - `blockHeight: ` - the number of blocks beneath this block @@ -1184,10 +1195,10 @@ Result: Returns the current health of the node. -If one or more `--trusted-validator` arguments are provided to +If one or more `--known-validator` arguments are provided to `solana-validator`, "ok" is returned when the node has within -`HEALTH_CHECK_SLOT_DISTANCE` slots of the highest trusted validator, otherwise -an error is returned. "ok" is always returned if no trusted validators are +`HEALTH_CHECK_SLOT_DISTANCE` slots of the highest known validator, otherwise +an error is returned. "ok" is always returned if no known validators are provided. #### Parameters: @@ -1363,6 +1374,7 @@ The result field will be a JSON array with the following fields: - `effectiveSlot: `, the slot in which the rewards are effective - `amount: `, reward amount in lamports - `postBalance: `, post balance of the account in lamports +- `commission: ` - vote account commission when the reward was credited #### Example @@ -1374,7 +1386,7 @@ curl http://localhost:8899 -X POST -H "Content-Type: application/json" -d ' "id": 1, "method": "getInflationReward", "params": [ - ["6dmNQ5jwLeLk5REvio1JcMshcbvkYMwy26sJ8pbkvStu", "BGsqMegLpV6n6Ve146sSX2dTjUMj3M92HnU8BbNRMhF2"], 2 + ["6dmNQ5jwLeLk5REvio1JcMshcbvkYMwy26sJ8pbkvStu", "BGsqMegLpV6n6Ve146sSX2dTjUMj3M92HnU8BbNRMhF2"], {"epoch": 2} ] } ' @@ -2235,7 +2247,7 @@ Result: ### getSlot -Returns the current slot the node is processing +Returns the slot that has reached the [given or default commitment level](jsonrpc-api.md#configuring-state-commitment) #### Parameters: @@ -2401,7 +2413,9 @@ Returns information about the current supply. #### Parameters: -- `` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment) +- `` - (optional) Configuration object containing the following optional fields: + - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment) + - (optional) `excludeNonCirculatingAccountsList: ` - exclude non circulating accounts list from response #### Results: @@ -2410,7 +2424,7 @@ The result will be an RpcResponse JSON object with `value` equal to a JSON objec - `total: ` - Total supply in lamports - `circulating: ` - Circulating supply in lamports - `nonCirculating: ` - Non-circulating supply in lamports -- `nonCirculatingAccounts: ` - an array of account addresses of non-circulating accounts, as strings +- `nonCirculatingAccounts: ` - an array of account addresses of non-circulating accounts, as strings. If `excludeNonCirculatingAccountsList` is enabled, the returned array will be empty. #### Example: @@ -2463,6 +2477,9 @@ The result will be an RpcResponse JSON object with `value` equal to a JSON objec - `uiAmount: ` - the balance, using mint-prescribed decimals **DEPRECATED** - `uiAmountString: ` - the balance as a string, using mint-prescribed decimals +For more details on returned data: The +[Token Balances Structure](jsonrpc-api.md#token-balances-structure) response from [getBlock](jsonrpc-api.md#getblock) follows a similar structure. + #### Example: Request: @@ -2519,6 +2536,8 @@ The result will be an RpcResponse JSON object with `value` equal to an array of - `executable: `, boolean indicating if the account contains a program \(and is strictly read-only\) - `rentEpoch: `, the epoch at which this account will next owe rent, as u64 +When the data is requested with the `jsonParsed` encoding a format similar to that of the [Token Balances Structure](jsonrpc-api.md#token-balances-structure) can be expected inside the structure, both for the `tokenAmount` and the `delegatedAmount`, with the latter being an optional object. + #### Example: ```bash @@ -2553,7 +2572,6 @@ Result: "data": { "program": "spl-token", "parsed": { - "accountType": "account", "info": { "tokenAmount": { "amount": "1", @@ -2562,13 +2580,20 @@ Result: "uiAmountString": "0.1", }, "delegate": "4Nd1mBQtrMJVYVfKf2PJy9NZUZdTAsp7D4xWLs4gDB4T", - "delegatedAmount": 1, - "isInitialized": true, + "delegatedAmount": { + "amount": "1", + "decimals": 1, + "uiAmount": 0.1, + "uiAmountString": "0.1", + }, + "state": "initialized", "isNative": false, "mint": "3wyAj7Rt1TWVPZVteFJPLa26JmLvdb1CAKEFZm3NY75E", "owner": "CnPoSPKXu7wJqxe59Fs72tkBeALovhsCxYeFwPCQH9TD" - } - } + }, + "type": "account" + }, + "space": 165 }, "executable": false, "lamports": 1726080, @@ -2609,6 +2634,8 @@ The result will be an RpcResponse JSON object with `value` equal to an array of - `executable: `, boolean indicating if the account contains a program \(and is strictly read-only\) - `rentEpoch: `, the epoch at which this account will next owe rent, as u64 +When the data is requested with the `jsonParsed` encoding a format similar to that of the [Token Balances Structure](jsonrpc-api.md#token-balances-structure) can be expected inside the structure, both for the `tokenAmount` and the `delegatedAmount`, with the latter being an optional object. + #### Example: ```bash @@ -2651,14 +2678,21 @@ Result: "uiAmount": 0.1, "uiAmountString": "0.1", }, - "delegate": null, - "delegatedAmount": 1, - "isInitialized": true, + "delegate": "4Nd1mBQtrMJVYVfKf2PJy9NZUZdTAsp7D4xWLs4gDB4T", + "delegatedAmount": { + "amount": "1", + "decimals": 1, + "uiAmount": 0.1, + "uiAmountString": "0.1", + }, + "state": "initialized", "isNative": false, "mint": "3wyAj7Rt1TWVPZVteFJPLa26JmLvdb1CAKEFZm3NY75E", "owner": "4Qkev8aNZcqFNSRhQzwyLMFSsi94jHqE8WNVTJzTP99F" - } - } + }, + "type": "account" + }, + "space": 165 }, "executable": false, "lamports": 1726080, @@ -2811,6 +2845,7 @@ Returns transaction details for a confirmed transaction - `lamports: `- number of reward lamports credited or debited by the account, as a i64 - `postBalance: ` - account balance in lamports after the reward was applied - `rewardType: ` - type of reward: currently only "rent", other types may be added in the future + - `commission: ` - vote account commission when the reward was credited, only present for voting and staking rewards #### Example: @@ -3004,7 +3039,7 @@ curl http://localhost:8899 -X POST -H "Content-Type: application/json" -d ' Result: ```json -{"jsonrpc":"2.0","result":{"solana-core": "1.7.0"},"id":1} +{"jsonrpc":"2.0","result":{"solana-core": "1.7.11"},"id":1} ``` ### getVoteAccounts @@ -3016,6 +3051,8 @@ Returns the account info and associated stake for all the voting accounts in the - `` - (optional) Configuration object containing the following field: - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment) - (optional) `votePubkey: ` - Only return results for this validator vote address (base-58 encoded) + - (optional) `keepUnstakedDelinquents: ` - Do not filter out delinquent validators with no stake + - (optional) `delinquentSlotDistance: ` - Specify the number of slots behind the tip that a validator must fall to be considered delinquent. **NOTE:** For the sake of consistency between ecosystem products, _it is **not** recommended that this argument be specified._ #### Results: @@ -3364,6 +3401,8 @@ Result: #### Notification Format: +The notification format is the same as seen in the [getAccountInfo](jsonrpc-api.md#getAccountInfo) RPC HTTP method. + Base58 encoding: ```json { @@ -3496,7 +3535,14 @@ Result: #### Notification Format: -Base58 encoding: +The notification will be an RpcResponse JSON object with value equal to: + +- `signature: ` - The transaction signature base58 encoded. +- `err: ` - Error if transaction failed, null if transaction succeeded. [TransactionError definitions](https://github.com/solana-labs/solana/blob/master/sdk/src/transaction.rs#L24) +- `logs: ` - Array of log messages the transaction instructions output during execution, null if simulation failed before the transaction was able to execute (for example due to an invalid blockhash or signature verification failure) + +Example: + ```json { "jsonrpc": "2.0", @@ -3536,7 +3582,6 @@ Unsubscribe from transaction logging Request: ```json {"jsonrpc":"2.0", "id":1, "method":"logsUnsubscribe", "params":[0]} - ``` Result: @@ -3613,6 +3658,8 @@ Result: #### Notification Format: +The notification format is a single program account object as seen in the [getProgramAccounts](jsonrpc-api.md#getProgramAccounts) RPC HTTP method. + Base58 encoding: ```json { @@ -3747,7 +3794,12 @@ Result: ``` #### Notification Format: -```bash + +The notification will be an RpcResponse JSON object with value containing an object with: +- `err: ` - Error if transaction failed, null if transaction succeeded. [TransactionError definitions](https://github.com/solana-labs/solana/blob/master/sdk/src/transaction.rs#L24) + +Example: +```json { "jsonrpc": "2.0", "method": "signatureNotification", @@ -3817,7 +3869,14 @@ Result: #### Notification Format: -```bash +The notification will be an object with the following fields: + +- `parent: ` - The parent slot +- `root: ` - The current root slot +- `slot: ` - The newly set slot value + +Example: +```json { "jsonrpc": "2.0", "method": "slotNotification", @@ -3857,6 +3916,92 @@ Result: {"jsonrpc": "2.0","result": true,"id": 1} ``` +### slotsUpdatesSubscribe - Unstable + +**This subscription is unstable; the format of this subscription may change in +the future and it may not always be supported** + +Subscribe to receive a notification from the validator on a variety of updates +on every slot + +#### Parameters: + +None + +#### Results: + +- `integer` - subscription id \(needed to unsubscribe\) + +#### Example: + +Request: +```json +{"jsonrpc":"2.0", "id":1, "method":"slotsUpdatesSubscribe"} + +``` + +Result: +```json +{"jsonrpc": "2.0","result": 0,"id": 1} +``` + +#### Notification Format: + +The notification will be an object with the following fields: + +- `parent: ` - The parent slot +- `slot: ` - The newly updated slot +- `timestamp: ` - The Unix timestamp of the update +- `type: ` - The update type, one of: + - "firstShredReceived" + - "completed" + - "createdBank" + - "frozen" + - "dead" + - "optimisticConfirmation" + - "root" + +```bash +{ + "jsonrpc": "2.0", + "method": "slotsUpdatesNotification", + "params": { + "result": { + "parent": 75, + "slot": 76, + "timestamp": 1625081266243, + "type": "optimisticConfirmation" + }, + "subscription": 0 + } +} +``` + +### slotsUpdatesUnsubscribe + +Unsubscribe from slot-update notifications + +#### Parameters: + +- `` - subscription id to cancel + +#### Results: + +- `` - unsubscribe success message + +#### Example: + +Request: +```json +{"jsonrpc":"2.0", "id":1, "method":"slotsUpdatesUnsubscribe", "params":[0]} + +``` + +Result: +```json +{"jsonrpc": "2.0","result": true,"id": 1} +``` + ### rootSubscribe Subscribe to receive notification anytime a new root is set by the validator. @@ -3886,7 +4031,7 @@ Result: The result is the latest root slot number. -```bash +```json { "jsonrpc": "2.0", "method": "rootNotification", @@ -3955,7 +4100,10 @@ Result: #### Notification Format: -The result is the latest vote, containing its hash, a list of voted slots, and an optional timestamp. +The notification will be an object with the following fields: +- `hash: ` - The vote hash +- `slots: ` - The slots covered by the vote, as an array of u64 integers +- `timestamp: ` - The timestamp of the vote ```json { @@ -4044,6 +4192,7 @@ The result field will be an object with the following fields: - `lamports: `- number of reward lamports credited or debited by the account, as a i64 - `postBalance: ` - account balance in lamports after the reward was applied - `rewardType: ` - type of reward: "fee", "rent", "voting", "staking" + - `commission: ` - vote account commission when the reward was credited, only present for voting and staking rewards - `blockTime: ` - estimated production time, as Unix timestamp (seconds since the Unix epoch). null if not available #### Example: @@ -4187,9 +4336,9 @@ Result: ``` For more details on returned data: -[Transaction Structure](jsonrpc-api.md#transactionstructure) -[Inner Instructions Structure](jsonrpc-api.md#innerinstructionsstructure) -[Token Balances Structure](jsonrpc-api.md#tokenbalancesstructure) +[Transaction Structure](jsonrpc-api.md#transaction-structure) +[Inner Instructions Structure](jsonrpc-api.md#inner-instructions-structure) +[Token Balances Structure](jsonrpc-api.md#token-balances-structure) ### getConfirmedBlocks diff --git a/docs/src/developing/on-chain-programs/overview.md b/docs/src/developing/on-chain-programs/overview.md index b24f953d652a8c..e9cd7bd6150e23 100644 --- a/docs/src/developing/on-chain-programs/overview.md +++ b/docs/src/developing/on-chain-programs/overview.md @@ -94,7 +94,7 @@ attempts to use a float operation that is not supported, the runtime will report an unresolved symbol error. Float operations are performed via software libraries, specifically LLVM's float -builtins. Due to be software emulated they consume more compute units than +builtins. Due to the software emulated they consume more compute units than integer operations. In general, fixed point operations are recommended where possible. @@ -108,7 +108,7 @@ To run the test, sync the repo, and run: Recent results show the float operations take more instructions compared to integers equivalents. Fixed point implementations may vary but will also be -less then the float equivalents: +less than the float equivalents: ``` u64 f32 diff --git a/docs/src/developing/programming-model/accounts.md b/docs/src/developing/programming-model/accounts.md index 283252806e6d08..2c60c6373ac8d2 100644 --- a/docs/src/developing/programming-model/accounts.md +++ b/docs/src/developing/programming-model/accounts.md @@ -5,14 +5,14 @@ title: "Accounts" ## Storing State between Transactions If the program needs to store state between transactions, it does so using -_accounts_. Accounts are similar to files in operating systems such as Linux. -Like a file, an account may hold arbitrary data and that data persists beyond +_accounts_. Accounts are similar to files in operating systems such as Linux in +that they may hold arbitrary data that persists beyond the lifetime of a program. Also like a file, an account includes metadata that tells the runtime who is allowed to access the data and how. Unlike a file, the account includes metadata for the lifetime of the file. That -lifetime is expressed in "tokens", which is a number of fractional native -tokens, called _lamports_. Accounts are held in validator memory and pay +lifetime is expressed by a number of fractional native +tokens called _lamports_. Accounts are held in validator memory and pay ["rent"](#rent) to stay there. Each validator periodically scans all accounts and collects rent. Any account that drops to zero lamports is purged. Accounts can also be marked [rent-exempt](#rent-exemption) if they contain a sufficient @@ -24,10 +24,10 @@ uses an _address_ to look up an account. The address is a 256-bit public key. ## Signers Transactions may include digital [signatures](terminology.md#signature) -corresponding to the accounts' public keys referenced by the transaction. When a -corresponding digital signature is present it signifies that the holder of the -account's private key signed and thus "authorized" the transaction and the -account is then referred to as a _signer_. Whether an account is a signer or not +corresponding to the accounts' public keys referenced by the transaction. Such +signatures signify that the holder of the +account's private key signed and thus "authorized" the transaction. In this case, +the account is referred to as a _signer_. Whether an account is a signer or not is communicated to the program as part of the account's metadata. Programs can then use that information to make authority decisions. @@ -41,21 +41,22 @@ modify a read-only account, the transaction is rejected by the runtime. ## Executable -If an account is marked "executable" in its metadata then it is considered a -program which can be executed by including the account's public key an +If an account is marked "executable" in its metadata, then it is considered a +program which can be executed by including the account's public key in an instruction's [program id](transactions.md#program-id). Accounts are marked as executable during a successful program deployment process by the loader that -owns the account. For example, during BPF program deployment, once the loader -has determined that the BPF bytecode in the account's data is valid, the loader +owns the account. When a program is deployed to the execution engine (BPF deployment), +the loader determines that the bytecode in the account's data is valid. +If so, the loader permanently marks the program account as executable. Once executable, the runtime enforces that the account's data (the program) is immutable. ## Creating -To create an account a client generates a _keypair_ and registers its public key -using the `SystemProgram::CreateAccount` instruction with preallocated a fixed -storage size in bytes. The current maximum size of an account's data is 10 -megabytes. +To create an account, a client generates a _keypair_ and registers its public key +using the `SystemProgram::CreateAccount` instruction with a fixed +storage size in bytes preallocated. +The current maximum size of an account's data is 10 megabytes. An account address can be any arbitrary 256 bit value, and there are mechanisms for advanced users to create derived addresses @@ -64,13 +65,15 @@ for advanced users to create derived addresses Accounts that have never been created via the system program can also be passed to programs. When an instruction references an account that hasn't been -previously created the program will be passed an account that is owned by the -system program, has zero lamports, and zero data. But, the account will reflect -whether it is a signer of the transaction or not and therefore can be used as an +previously created, the program will be passed an account with no data and zero lamports +that is owned by the system program. + +Such newly created accounts reflect +whether they sign the transaction and therefore can be used as an authority. Authorities in this context convey to the program that the holder of the private key associated with the account's public key signed the transaction. The account's public key may be known to the program or recorded in another -account and signify some kind of ownership or authority over an asset or +account, signifying some kind of ownership or authority over an asset or operation the program controls or performs. ## Ownership and Assignment to Programs @@ -89,19 +92,22 @@ data and credit the account. For security purposes, it is recommended that programs check the validity of any account it reads but does not modify. -The security model enforces that an account's data can only be modified by the -account's `Owner` program. Doing so allows the program to trust that the data -passed to them via accounts they own will be in a known and valid state. The -runtime enforces this by rejecting any transaction containing a program that -attempts to write to an account it does not own. But, there are also cases -where a program may merely read an account they think they own and assume the -data has only been written by themselves and thus is valid. But anyone can -issues instructions to a program, and the runtime does not know that those -accounts are expected to be owned by the program. Therefore a malicious user +This is because a malicious user could create accounts with arbitrary data and then pass these accounts to the -program in the place of a valid account. The arbitrary data could be crafted in +program in place of valid accounts. The arbitrary data could be crafted in a way that leads to unexpected or harmful program behavior. +The security model enforces that an account's data can only be modified by the +account's `Owner` program. This allows the program to trust that the data +passed to them via accounts they own. The +runtime enforces this by rejecting any transaction containing a program that +attempts to write to an account it does not own. + +If a program were to not check account validity, it might read an account +it thinks it owns but doesn't. Anyone can +issue instructions to a program, and the runtime does not know that those +accounts are expected to be owned by the program. + To check an account's validity, the program should either check the account's address against a known value or check that the account is indeed owned correctly (usually owned by the program itself). @@ -109,6 +115,7 @@ correctly (usually owned by the program itself). One example is when programs use a sysvar account. Unless the program checks the account's address or owner, it's impossible to be sure whether it's a real and valid sysvar account merely by successful deserialization of the account's data. + Accordingly, the Solana SDK [checks the sysvar account's validity during deserialization](https://github.com/solana-labs/solana/blob/a95675a7ce1651f7b59443eb146b356bc4b3f374/sdk/program/src/sysvar/mod.rs#L65). A alternative and safer way to read a sysvar is via the sysvar's [`get()` @@ -116,15 +123,14 @@ function](https://github.com/solana-labs/solana/blob/64bfc14a75671e4ec3fe969ded0 which doesn't require these checks. If the program always modifies the account in question, the address/owner check -isn't required because modifying an unowned (could be the malicious account with -the wrong owner) will be rejected by the runtime, and the containing transaction -will be thrown out. +isn't required because modifying an unowned account will be rejected by the runtime, +and the containing transaction will be thrown out. ## Rent Keeping accounts alive on Solana incurs a storage cost called _rent_ because the -cluster must actively maintain the data to process any future transactions on -it. This is different from Bitcoin and Ethereum, where storing accounts doesn't +blockchain cluster must actively maintain the data to process any future transactions. +This is different from Bitcoin and Ethereum, where storing accounts doesn't incur any costs. The rent is debited from an account's balance by the runtime upon the first @@ -190,8 +196,8 @@ if the transferred lamports are less than or equal to 2,439. ### Rent exemption Alternatively, an account can be made entirely exempt from rent collection by -depositing at least 2 years-worth of rent. This is checked every time an -account's balance is reduced and rent is immediately debited once the balance +depositing at least 2 years worth of rent. This is checked every time an +account's balance is reduced, and rent is immediately debited once the balance goes below the minimum amount. Program executable accounts are required by the runtime to be rent-exempt to diff --git a/docs/src/developing/programming-model/calling-between-programs.md b/docs/src/developing/programming-model/calling-between-programs.md index 0dd75952829cbe..18a95cbd49fdc1 100644 --- a/docs/src/developing/programming-model/calling-between-programs.md +++ b/docs/src/developing/programming-model/calling-between-programs.md @@ -240,7 +240,7 @@ pub fn find_program_address( Clients can use the `create_program_address` function to generate a destination address. In this example, we assume that -`create_program_address(&[&["escrow]], &escrow_program_id)` generates a valid +`create_program_address(&[&["escrow"]], &escrow_program_id)` generates a valid program address that is off the curve. ```rust,ignore diff --git a/docs/src/developing/programming-model/transactions.md b/docs/src/developing/programming-model/transactions.md index 72c85a5c278c04..efb60e879d7402 100644 --- a/docs/src/developing/programming-model/transactions.md +++ b/docs/src/developing/programming-model/transactions.md @@ -139,7 +139,7 @@ accounts are permanently marked as executable by the loader once they are successfully deployed. The runtime will reject transactions that specify programs that are not executable. -Unlike on-chain programs, [Native Programs](developing/runtime-facilities/programs) +Unlike on-chain programs, [Native Programs](developing/runtime-facilities/programs.md) are handled differently in that they are built directly into the Solana runtime. ### Accounts diff --git a/docs/src/developing/runtime-facilities/programs.md b/docs/src/developing/runtime-facilities/programs.md index 74cda44ee06285..c66afe9841fb20 100644 --- a/docs/src/developing/runtime-facilities/programs.md +++ b/docs/src/developing/runtime-facilities/programs.md @@ -41,7 +41,7 @@ Create and manage accounts representing stake and rewards for delegations to validators. - Program id: `Stake11111111111111111111111111111111111111` -- Instructions: [StakeInstruction](https://docs.rs/solana-stake-program/VERSION_FOR_DOCS_RS/solana_stake_program/stake_instruction/enum.StakeInstruction.html) +- Instructions: [StakeInstruction](https://docs.rs/solana-sdk/VERSION_FOR_DOCS_RS/solana_sdk/stake/instruction/enum.StakeInstruction.html) ## Vote Program diff --git a/docs/src/developing/runtime-facilities/sysvars.md b/docs/src/developing/runtime-facilities/sysvars.md index 6eb4265fadaef3..3b5ed443ae33ab 100644 --- a/docs/src/developing/runtime-facilities/sysvars.md +++ b/docs/src/developing/runtime-facilities/sysvars.md @@ -17,6 +17,12 @@ The first is to query the sysvar at runtime via the sysvar's `get()` function: let clock = Clock::get() ``` +The following sysvars support `get`: +- Clock +- EpochSchedule +- Fees +- Rent + The second is to pass the sysvar to the program as an account by including its address as one of the accounts in the `Instruction` and then deserializing the data during execution. Access to sysvars accounts is always _readonly_. @@ -91,7 +97,9 @@ other instructions in the same transaction. Read more information on ## RecentBlockhashes The RecentBlockhashes sysvar contains the active recent blockhashes as well as -their associated fee calculators. It is updated every slot. +their associated fee calculators. It is updated every slot. Entries are ordered +by descending block height, so the first entry holds the most recent block hash, +and the last entry holds an old block hash. - Address: `SysvarRecentB1ockHashes11111111111111111111` - Layout: diff --git a/docs/src/history.md b/docs/src/history.md index 608afd301b7d12..b4ea570d904497 100644 --- a/docs/src/history.md +++ b/docs/src/history.md @@ -46,8 +46,8 @@ people were confused about whether they were the same project. The Loom team decided it would rebrand. They chose the name Solana, a nod to a small beach town North of San Diego called Solana Beach, where Anatoly, Greg and Stephen lived and surfed for three years when they worked for Qualcomm. On March 28th, -the team created the Solana Labs GitHub organization and renamed Greg's -prototype Silk to Solana. +the team created the Solana GitHub organization and renamed Greg's prototype +Silk to Solana. In June of 2018, the team scaled up the technology to run on cloud-based networks and on July 19th, published a 50-node, permissioned, public testnet diff --git a/docs/src/implemented-proposals/ed_overview/ed_storage_rent_economics.md b/docs/src/implemented-proposals/ed_overview/ed_storage_rent_economics.md index f19246a2730cf3..8abd88ad2c5247 100644 --- a/docs/src/implemented-proposals/ed_overview/ed_storage_rent_economics.md +++ b/docs/src/implemented-proposals/ed_overview/ed_storage_rent_economics.md @@ -8,7 +8,7 @@ Storage rent can be paid via one of two methods: Method 1: Set it and forget it -With this approach, accounts with two-years worth of rent deposits secured are exempt from network rent charges. By maintaining this minimum-balance, the broader network benefits from reduced liquidity and the account holder can trust that their `Account::data` will be retained for continual access/usage. +With this approach, accounts with two-years worth of rent deposits secured are exempt from network rent charges. By maintaining this minimum-balance, the broader network benefits from reduced liquidity and the account holder can rest assured that their `Account::data` will be retained for continual access/usage. Method 2: Pay per byte diff --git a/docs/src/implemented-proposals/ed_overview/ed_validation_client_economics/ed_vce_validation_stake_delegation.md b/docs/src/implemented-proposals/ed_overview/ed_validation_client_economics/ed_vce_validation_stake_delegation.md index 2ea3d3120cae6b..64ab8779260f52 100644 --- a/docs/src/implemented-proposals/ed_overview/ed_validation_client_economics/ed_vce_validation_stake_delegation.md +++ b/docs/src/implemented-proposals/ed_overview/ed_validation_client_economics/ed_vce_validation_stake_delegation.md @@ -23,6 +23,6 @@ Running a Solana validation-client required relatively modest upfront hardware c **Table 2** example high-end hardware setup for running a Solana client. -Despite the low-barrier to entry as a validation-client, from a capital investment perspective, as in any developing economy, there will be much opportunity and need for trusted validation services as evidenced by node reliability, UX/UI, APIs and other software accessibility tools. Additionally, although Solana’s validator node startup costs are nominal when compared to similar networks, they may still be somewhat restrictive for some potential participants. In the spirit of developing a true decentralized, permissionless network, these interested parties can become involved in the Solana network/economy via delegation of previously acquired tokens with a reliable validation node to earn a portion of the interest generated. +Despite the low-barrier to entry as a validation-client, from a capital investment perspective, as in any developing economy, there will be much opportunity and need for competent validation services as evidenced by node reliability, UX/UI, APIs and other software accessibility tools. Additionally, although Solana’s validator node startup costs are nominal when compared to similar networks, they may still be somewhat restrictive for some potential participants. In the spirit of developing a true decentralized, permissionless network, these interested parties can become involved in the Solana network/economy via delegation of previously acquired tokens with a reliable validation node to earn a portion of the interest generated. Delegation of tokens to validation-clients provides a way for passive Solana token holders to become part of the active Solana economy and earn interest rates proportional to the interest rate generated by the delegated validation-client. Additionally, this feature intends to create a healthy validation-client market, with potential validation-client nodes competing to build reliable, transparent and profitable delegation services. diff --git a/docs/src/implemented-proposals/installer.md b/docs/src/implemented-proposals/installer.md index 12fb2742dff82b..a32b59c183da7c 100644 --- a/docs/src/implemented-proposals/installer.md +++ b/docs/src/implemented-proposals/installer.md @@ -4,7 +4,7 @@ title: Cluster Software Installation and Updates Currently users are required to build the solana cluster software themselves from the git repository and manually update it, which is error prone and inconvenient. -This document proposes an easy to use software install and updater that can be used to deploy pre-built binaries for supported platforms. Users may elect to use binaries supplied by Solana or any other party they trust. Deployment of updates is managed using an on-chain update manifest program. +This document proposes an easy to use software install and updater that can be used to deploy pre-built binaries for supported platforms. Users may elect to use binaries supplied by Solana or any other party provider. Deployment of updates is managed using an on-chain update manifest program. ## Motivating Examples diff --git a/docs/src/implemented-proposals/staking-rewards.md b/docs/src/implemented-proposals/staking-rewards.md index f82d6594fe19dd..2d23555b1daf45 100644 --- a/docs/src/implemented-proposals/staking-rewards.md +++ b/docs/src/implemented-proposals/staking-rewards.md @@ -48,7 +48,7 @@ To become a Solana validator, one must deposit/lock-up some amount of SOL in a c initial deposit. -Solana's trustless sense of time and ordering provided by its PoH data structure, along with its [turbine](https://www.youtube.com/watch?v=qt_gDRXHrHQ&t=1s) data broadcast and transmission design, should provide sub-second transaction confirmation times that scale with the log of the number of nodes in the cluster. This means we shouldn't have to restrict the number of validating nodes with a prohibitive 'minimum deposits' and expect nodes to be able to become validators with nominal amounts of SOL staked. At the same time, Solana's focus on high-throughput should create incentive for validation clients to provide high-performant and reliable hardware. Combined with potential a minimum network speed threshold to join as a validation-client, we expect a healthy validation delegation market to emerge. To this end, Solana's testnet will lead into a "Tour de SOL" validation-client competition, focusing on throughput and uptime to rank and reward testnet validators. +Solana's trustless sense of time and ordering provided by its PoH data structure, along with its [turbine](https://www.youtube.com/watch?v=qt_gDRXHrHQ&t=1s) data broadcast and transmission design, should provide sub-second transaction confirmation times that scale with the log of the number of nodes in the cluster. This means we shouldn't have to restrict the number of validating nodes with a prohibitive 'minimum deposits' and expect nodes to be able to become validators with nominal amounts of SOL staked. At the same time, Solana's focus on high-throughput should create incentive for validation clients to provide high-performant and reliable hardware. Combined with potential a minimum network speed threshold to join as a validation-client, we expect a healthy validation delegation market to emerge. ## Penalties diff --git a/docs/src/inflation/terminology.md b/docs/src/inflation/terminology.md index 5b60018e559a17..796547df0495f4 100644 --- a/docs/src/inflation/terminology.md +++ b/docs/src/inflation/terminology.md @@ -24,14 +24,14 @@ A deterministic description of token issuance over time. The Solana Foundation i The inflation rate actually observed on the Solana network after accounting for other factors that might decrease the _Total Current Supply_. Note that it is not possible for tokens to be created outside of what is described by the _Inflation Schedule_. -- While the _Inflation Schedule_ determines how the protocol issues SOL, this neglects the concurrent elimination of tokens in the ecosystem due to various factors. The primary token burning mechanism is the burning of a portion of each transaction fee. While $100\%$ of each transaction fee is currently being destroyed, it is planned on reducing this burn rate to $50\%$ of each transaction fee, with the remaining fee to be retained by the validator that processes the transaction. +- While the _Inflation Schedule_ determines how the protocol issues SOL, this neglects the concurrent elimination of tokens in the ecosystem due to various factors. The primary token burning mechanism is the burning of a portion of each transaction fee. $50\%$ of each transaction fee is burned, with the remaining fee retained by the validator that processes the transaction. - Additional factors such as loss of private keys and slashing events should also be considered in a holistic analysis of the _Effective Inflation Rate_. For example, it’s estimated that $10-20\%$ of all BTC have been lost and are unrecoverable and that networks may experience similar yearly losses at the rate of $1-2\%$. ### Staking Yield [%] The rate of return (aka _interest_) earned on SOL staked on the network. It is often quoted as an annualized rate (e.g. "the network _staking yield_ is currently $10\%$ per year"). -- _Staking yield_ is of great interest to validators and token-holders holders who wish to delegate their tokens to avoid token dilution due to inflation (the extent of which is discussed below). +- _Staking yield_ is of great interest to validators and token holders who wish to delegate their tokens to avoid token dilution due to inflation (the extent of which is discussed below). - $100\%$ of inflationary issuances are to be distributed to staked token-holders in proportion to their staked SOL and to validators who charge a commission on the rewards earned by their delegated SOL.. - There may be future consideration for an additional split of inflation issuance with the introduction of _Archivers_ into the economy. _Archivers_ are network participants who provide a decentralized storage service and should also be incentivized with token distribution from inflation issuances for this service. - Similarly, early designs specified a fixed percentage of inflationary issuance to be delivered to the Foundation treasury for operational expenses and future grants. However, inflation will be launching without any portion allocated to the Foundation. - _Staking yield_ can be calculated from the _Inflation Schedule_ along with the fraction of the _Total Current Supply_ that is staked at any given time. The explicit relationship is given by: diff --git a/docs/src/integrations/exchange.md b/docs/src/integrations/exchange.md index b5eba2c33a7594..49f5f161ff0b19 100644 --- a/docs/src/integrations/exchange.md +++ b/docs/src/integrations/exchange.md @@ -13,8 +13,8 @@ operations with a bundled monitoring tool. This setup enables you: -- to have a trusted gateway to the Solana mainnet-beta cluster to get data and - submit withdrawal transactions +- to have a self-administered gateway to the Solana mainnet-beta cluster to get + data and submit withdrawal transactions - to have full control over how much historical block data is retained - to maintain your service availability even if one node fails @@ -30,14 +30,15 @@ To run an api node: ```bash solana-validator \ --ledger \ + --identity \ --entrypoint \ --expected-genesis-hash \ --rpc-port 8899 \ --no-voting \ --enable-rpc-transaction-history \ --limit-ledger-size \ - --trusted-validator \ - --no-untrusted-rpc + --known-validator \ + --only-known-rpc ``` Customize `--ledger` to your desired ledger storage location, and `--rpc-port` to the port you want to expose. @@ -55,7 +56,7 @@ default limit value used by `--limit-ledger-size`. More information about selecting a custom limit value is [available here](https://github.com/solana-labs/solana/blob/583cec922b6107e0f85c7e14cb5e642bc7dfb340/core/src/ledger_cleanup_service.rs#L15-L26). -Specifying one or more `--trusted-validator` parameters can protect you from booting from a malicious snapshot. [More on the value of booting with trusted validators](../running-validator/validator-start.md#trusted-validators) +Specifying one or more `--known-validator` parameters can protect you from booting from a malicious snapshot. [More on the value of booting with known validators](../running-validator/validator-start.md#known-validators) Optional parameters to consider: @@ -96,7 +97,7 @@ announcement. For security-related releases, more urgent action may be needed. ### Ledger Continuity By default, each of your nodes will boot from a snapshot provided by one of your -trusted validators. This snapshot reflects the current state of the chain, but +known validators. This snapshot reflects the current state of the chain, but does not contain the complete historical ledger. If one of your node exits and boots from a new snapshot, there may be a gap in the ledger on that node. In order to prevent this issue, add the `--no-snapshot-fetch` parameter to your @@ -111,7 +112,7 @@ It is important to note that the amount of historical ledger available to your nodes from the rest of the network is limited at any point in time. Once operational if your validators experience significant downtime they may not be able to catch up to the network and will need to download a new snapshot from a -trusted validator. In doing so your validators will now have a gap in its +known validator. In doing so your validators will now have a gap in its historical ledger data that cannot be filled. ### Minimizing Validator Port Exposure diff --git a/docs/src/introduction.md b/docs/src/introduction.md index b4b6d57ff3bcd7..7494a51f95d6cf 100644 --- a/docs/src/introduction.md +++ b/docs/src/introduction.md @@ -8,11 +8,11 @@ Solana is an open source project implementing a new, high-performance, permissio ## Why Solana? -It is possible for a centralized database to process 710,000 transactions per second on a standard gigabit network if the transactions are, on average, no more than 176 bytes. A centralized database can also replicate itself and maintain high availability without significantly compromising that transaction rate using the distributed system technique known as Optimistic Concurrency Control [\[H.T.Kung, J.T.Robinson (1981)\]](http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.65.4735). At Solana, we are demonstrating that these same theoretical limits apply just as well to blockchain on an adversarial network. The key ingredient? Finding a way to share time when nodes cannot trust one-another. Once nodes can trust time, suddenly ~40 years of distributed systems research becomes applicable to blockchain! +It is possible for a centralized database to process 710,000 transactions per second on a standard gigabit network if the transactions are, on average, no more than 176 bytes. A centralized database can also replicate itself and maintain high availability without significantly compromising that transaction rate using the distributed system technique known as Optimistic Concurrency Control [\[H.T.Kung, J.T.Robinson (1981)\]](http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.65.4735). At Solana, we are demonstrating that these same theoretical limits apply just as well to blockchain on an adversarial network. The key ingredient? Finding a way to share time when nodes cannot rely upon one-another. Once nodes can rely upon time, suddenly ~40 years of distributed systems research becomes applicable to blockchain! > Perhaps the most striking difference between algorithms obtained by our method and ones based upon timeout is that using timeout produces a traditional distributed algorithm in which the processes operate asynchronously, while our method produces a globally synchronous one in which every process does the same thing at (approximately) the same time. Our method seems to contradict the whole purpose of distributed processing, which is to permit different processes to operate independently and perform different functions. However, if a distributed system is really a single system, then the processes must be synchronized in some way. Conceptually, the easiest way to synchronize processes is to get them all to do the same thing at the same time. Therefore, our method is used to implement a kernel that performs the necessary synchronization--for example, making sure that two different processes do not try to modify a file at the same time. Processes might spend only a small fraction of their time executing the synchronizing kernel; the rest of the time, they can operate independently--e.g., accessing different files. This is an approach we have advocated even when fault-tolerance is not required. The method's basic simplicity makes it easier to understand the precise properties of a system, which is crucial if one is to know just how fault-tolerant the system is. [\[L.Lamport (1984)\]](http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.71.1078) -Furthermore, and much to our surprise, it can be implemented using a mechanism that has existed in Bitcoin since day one. The Bitcoin feature is called nLocktime and it can be used to postdate transactions using block height instead of a timestamp. As a Bitcoin client, you would use block height instead of a timestamp if you don't trust the network. Block height turns out to be an instance of what's being called a Verifiable Delay Function in cryptography circles. It's a cryptographically secure way to say time has passed. In Solana, we use a far more granular verifiable delay function, a SHA 256 hash chain, to checkpoint the ledger and coordinate consensus. With it, we implement Optimistic Concurrency Control and are now well en route towards that theoretical limit of 710,000 transactions per second. +Furthermore, and much to our surprise, it can be implemented using a mechanism that has existed in Bitcoin since day one. The Bitcoin feature is called nLocktime and it can be used to postdate transactions using block height instead of a timestamp. As a Bitcoin client, you would use block height instead of a timestamp if you don't rely upon the network. Block height turns out to be an instance of what's being called a Verifiable Delay Function in cryptography circles. It's a cryptographically secure way to say time has passed. In Solana, we use a far more granular verifiable delay function, a SHA 256 hash chain, to checkpoint the ledger and coordinate consensus. With it, we implement Optimistic Concurrency Control and are now well en route towards that theoretical limit of 710,000 transactions per second. ## Documentation Overview diff --git a/docs/src/pages/index.js b/docs/src/pages/index.js index 7bfa03c893c0ab..c170abc99c5b02 100644 --- a/docs/src/pages/index.js +++ b/docs/src/pages/index.js @@ -105,24 +105,6 @@ function Home() { {features && features.length > 0 && (
-
- Announcing the Solana Season Hackathon -
- Jumpstart your next project on Solana & join the fastest growing - ecosystem in crypto -
-

- - - -

-
{features.map((props, idx) => ( diff --git a/docs/src/proposals/rip-curl.md b/docs/src/proposals/rip-curl.md index 23b7247d2bde17..8e2ab9707a39b7 100644 --- a/docs/src/proposals/rip-curl.md +++ b/docs/src/proposals/rip-curl.md @@ -17,7 +17,7 @@ static content and less appealing for transaction processing. The clients poll for transaction status instead of being notified, giving the false impression of higher confirmation times. Furthermore, what clients can poll for is limited, preventing them from making reasonable real-time decisions, such as -recognizing a transaction is confirmed as soon as particular, trusted +recognizing a transaction is confirmed as soon as particular, known validators vote on it. ## Proposed Solution diff --git a/docs/src/proposals/transactions-v2.md b/docs/src/proposals/transactions-v2.md new file mode 100644 index 00000000000000..6e4dfa930a5d8e --- /dev/null +++ b/docs/src/proposals/transactions-v2.md @@ -0,0 +1,302 @@ +# Transactions v2 - Address maps + +## Problem + +Messages transmitted to Solana validators must not exceed the IPv6 MTU size to +ensure fast and reliable network transmission of cluster info over UDP. +Solana's networking stack uses a conservative MTU size of 1280 bytes which, +after accounting for headers, leaves 1232 bytes for packet data like serialized +transactions. + +Developers building applications on Solana must design their on-chain program +interfaces within the above transaction size limit constraint. One common +work-around is to store state temporarily on-chain and consume that state in +later transactions. This is the approach used by the BPF loader program for +deploying Solana programs. + +However, this workaround doesn't work well when developers compose many on-chain +programs in a single atomic transaction. With more composition comes more +account inputs, each of which takes up 32 bytes. There is currently no available +workaround for increasing the number of accounts used in a single transaction +since each transaction must list all accounts that it needs to properly lock +accounts for parallel execution. Therefore the current cap is about 35 accounts +after accounting for signatures and other transaction metadata. + +## Proposed Solution + +Introduce a new on-chain program which stores account address maps and add a new +transaction format which supports concise account references through the +on-chain address maps. + +### Address Map Program + +Here we describe a program-based solution to the problem, whereby a protocol +developer or end-user can create collections of related addresses on-chain for +concise use in a transaction's account inputs. This approach is similar to page +tables used in operating systems to succinctly map virtual addresses to physical +memory. + +After addresses are stored on-chain in an address map account, they may be +succinctly referenced in a transaction using a 1-byte u8 index rather than a +full 32-byte address. This will require a new transaction format to make use of +these succinct references as well as runtime handling for looking up and loading +accounts from the on-chain mappings. + +#### State + +Address map accounts must be rent-exempt but may be closed with a one epoch +deactivation period. Address maps must be activated before use. + +Since transactions use a u8 offset to look up mapped addresses, accounts can +store up to 2^8 addresses each. Anyone may create an address map account of any +size as long as its big enough to store the necessary metadata. In addition to +stored addresses, address map accounts must also track the latest count of +stored addresses and an authority which must be a present signer for all +appended map entries. + +Map additions require one slot to activate so each map should track how many +addresses are still pending activation in their on-chain state: + +```rust +struct AddressMap { + // authority must sign for each addition and to close the map account + authority: Pubkey, + // record a deactivation epoch to help validators know when to remove + // the map from their caches. + deactivation_epoch: Epoch, + // entries may not be modified once activated + activated: bool, + // list of entries, max capacity of u8::MAX + entries: Vec, +} +``` + +#### Cleanup + +Once an address map gets stale and is no longer used, it can be reclaimed by the +authority withdrawing lamports but the remaining balance must be greater than +two epochs of rent. This ensures that it takes at least one full epoch to +deactivate a map. + +Maps may not be recreated because each new map must be created at a derived +address using a monotonically increasing counter as a derivation seed. + +#### Cost + +Since address map accounts require caching and special handling in the runtime, +they should incur higher costs for storage. Cost structure design will be added +later. + +### Versioned Transactions + +In order to allow accounts to be referenced more succinctly, the structure of +serialized transactions must be modified. The new transaction format should not +affect transaction processing in the Solana VM beyond the increased capacity for +accounts and program invocations. Invoked programs will be unaware of which +transaction format was used. + +The new transaction format must be distinguished from the current transaction +format. Current transactions can fit at most 19 signatures (64-bytes each) but +the message header encodes `num_required_signatures` as a `u8`. Since the upper +bit of the `u8` will never be set for a valid transaction, we can enable it to +denote whether a transaction should be decoded with the versioned format or not. + +#### New Transaction Format + +```rust +#[derive(Serialize, Deserialize)] +pub struct Transaction { + #[serde(with = "short_vec")] + pub signatures: Vec, + /// The message to sign. + pub message: Message, +} + +// Uses custom serialization. If the first bit is set, a versioned message is +// encoded starting from the next byte. If the first bit is not set, all bytes +// are used to encode the original unversioned `Message` format. +pub enum Message { + Unversioned(UnversionedMessage), + Versioned(VersionedMessage), +} + +// use bincode varint encoding to use u8 instead of u32 for enum tags +#[derive(Serialize, Deserialize)] +pub enum VersionedMessage { + Current(Box) +} + +#[derive(Serialize, Deserialize)] +pub struct MessageV2 { + // unchanged + pub header: MessageHeader, + + // unchanged + #[serde(with = "short_vec")] + pub account_keys: Vec, + + /// The last `address_maps.len()` number of readonly unsigned account_keys + /// should be loaded as address maps + #[serde(with = "short_vec")] + pub address_maps: Vec, + + // unchanged + pub recent_blockhash: Hash, + + // unchanged. Account indices are still `u8` encoded so the max number of accounts + // in account_keys + address_maps is limited to 256. + #[serde(with = "short_vec")] + pub instructions: Vec, +} + +#[derive(Serialize, Deserialize)] +pub struct AddressMap { + /// The last num_readonly_entries of entries are read-only + pub num_readonly_entries: u8, + + /// List of map entries to load + #[serde(with = "short_vec")] + pub entries: Vec, +} +``` + +#### Size changes + +- 1 byte for `prefix` field +- 1 byte for version enum discriminant +- 1 byte for `address_maps` length +- Each map requires 2 bytes for `entries` length and `num_readonly` +- Each map entry is 1 byte (u8) + +#### Cost changes + +Using an address map in a transaction should incur an extra cost due to +the extra work validators need to do to load and cache them. + +#### Metadata changes + +Each account accessed via an address map should be stored in the transaction +metadata for quick reference. This will avoid the need for clients to make +multiple RPC round trips to fetch all accounts referenced in a v2 transaction. +It will also make it easier to use the ledger tool to analyze account access +patterns. + +#### RPC changes + +Fetched transaction responses will likely require a new version field to +indicate to clients which transaction structure to use for deserialization. +Clients using pre-existing RPC methods will receive error responses when +attempting to fetch a versioned transaction which will indicate that they +must upgrade. + +The RPC API should also support an option for returning fully expanded +transactions to abstract away the address map details from downstream clients. + +### Limitations + +- Max of 256 accounts may be specified in a transaction because u8 is used by compiled +instructions to index into transaction message account keys. +- Address maps can hold up to 256 addresses because references to map entries +are encoded as `u8` in transactions. +- Transaction signers may not be referenced with an address map, the full +address of each signer must be serialized in the transaction. This ensures that +the performance of transaction signature checks is not affected. +- Hardware wallets will probably not be able to display details about accounts +referenced through address maps due to inability to verify on-chain data. +- Only single level address maps can be used. Recursive maps will not be supported. + +## Security Concerns + +### Resource consumption + +Enabling more account inputs in a transaction allows for more program +invocations, write-locks, and data reads / writes. Before address maps are +enabled, transaction-wide compute limits and increased costs for write locks and +data reads are required. + +### Front running + +If the addresses listed within an address map account are modifiable, front +running attacks could modify which mapped accounts are resolved for a later +transaction. For this reason, we propose that any stored address is immutable +and that address map accounts themselves may not be recreated. + +Additionally, a malicious actor could try to fork the chain immediately after a +new address map account is added to a block. If successful, they could add a +different unexpected map entry in the fork. In order to deter this attack, +clients should wait for address maps to be finalized before using them in a +transaction. Clients may also append integrity check instructions to the +transaction which verify that the correct accounts are used. + +### Denial of service + +Address map accounts will be read very frequently and will therefore be a +more high profile target for denial of service attacks through write locks +similar to sysvar accounts. + +For this reason, special handling should be given to address map lookups. +Address maps lookups should not be affected by account read/write locks. + +### Duplicate accounts + +Transactions may not load an account more than once whether directly through +`account_keys` or indirectly through `address_maps`. + +## Other Proposals + +1) Account prefixes + +Needing to pre-register accounts in an on-chain address map is cumbersome +because it adds an extra step for transaction processing. Instead, Solana +transactions could use variable length address prefixes to specify accounts. +These prefix shortcuts can save on data usage without needing to setup on-chain +state. + +However, this model requires nodes to keep a mapping of prefixes to active account +addresses. Attackers can create accounts with the same prefix as a popular account +to disrupt transactions. + +2) Transaction builder program + +Solana can provide a new on-chain program which allows "Big" transactions to be +constructed on-chain by normal transactions. Once the transaction is +constructed, a final "Execute" transaction can trigger a node to process the big +transaction as a normal transaction without needing to fit it into an MTU sized +packet. + +The UX of this approach is tricky. A user could in theory sign a big transaction +but it wouldn't be great if they had to use their wallet to sign multiple +transactions to build that transaction that they already signed and approved. This +could be a use-case for transaction relay services, though. A user could pay a +relayer to construct the large pre-signed transaction on-chain for them. + +In order to prevent the large transaction from being reconstructed and replayed, +its message hash will need to be added to the status cache when executed. + +3) Epoch account indexes + +Similarly to leader schedule calculation, validators could create a global index +of the most accessed accounts in the previous epoch and make that index +available to transactions in the following epoch. + +This approach has a downside of only updating the index at epoch boundaries +which means there would be a few day delay before popular new accounts could be +referenced. It also needs to be consistently generated by all validators by +using some criteria like adding accounts in order by access count. + +4) Address lists + +Extend the transaction structure to support addresses that, when loaded, expand +to a list of addresses. After expansion, all account inputs are concatenated to +form a single list of account keys which can be indexed into by instructions. +Address lists would likely need to be immutable to prevent attacks. They would +also need to be limited in length to limit resource consumption. + +This proposal can be thought of a special case of the proposed index account +approach. Since the full account list would be expanded, there's no need to add +additional offsets that use up the limited space in a serialized transaction. +However, the expected size of an address list may need to be encoded into the +transaction to aid the sanitization of account indexes. We would also need to +encode how many addresses in the list should be loaded as readonly vs +read-write. Lastly, special attention must be given to watch out for addresses +that exist in multiple account lists. diff --git a/docs/src/running-validator.md b/docs/src/running-validator.md index fd6699ca58e15d..177cbdb19de0bb 100644 --- a/docs/src/running-validator.md +++ b/docs/src/running-validator.md @@ -2,6 +2,6 @@ title: Running a Validator --- -This section describes how run a Solana validator node. +This section describes how to run a Solana validator node. -There are several clusters available to connect to, see [choosing a Cluster](cli/choose-a-cluster.md) for an overview of each. +There are several clusters available to connect to; see [choosing a Cluster](cli/choose-a-cluster.md) for an overview of each. diff --git a/docs/src/running-validator/restart-cluster.md b/docs/src/running-validator/restart-cluster.md index 1772c148504172..caf48b4f8a6726 100644 --- a/docs/src/running-validator/restart-cluster.md +++ b/docs/src/running-validator/restart-cluster.md @@ -54,9 +54,9 @@ Post something like the following to #announcements (adjusting the text as appro > --hard-fork SLOT_X # <-- NEW! IMPORTANT! REMOVE AFTER THIS RESTART > --no-snapshot-fetch # <-- NEW! IMPORTANT! REMOVE AFTER THIS RESTART > --entrypoint entrypoint.testnet.solana.com:8001 -> --trusted-validator 5D1fNXzvv5NjV1ysLjirC4WY92RNsVH18vjmcszZd8on +> --known-validator 5D1fNXzvv5NjV1ysLjirC4WY92RNsVH18vjmcszZd8on > --expected-genesis-hash 4uhcVJyU9pJkvQyS88uRDiswHXSCkY3zQawwpjk2NsNY -> --no-untrusted-rpc +> --only-known-rpc > --limit-ledger-size > ... # <-- your other --identity/--vote-account/etc arguments > ``` @@ -68,9 +68,9 @@ Post something like the following to #announcements (adjusting the text as appro > --wait-for-supermajority SLOT_X # <-- NEW! IMPORTANT! REMOVE AFTER THIS RESTART > --expected-bank-hash NEW_BANK_HASH # <-- NEW! IMPORTANT! REMOVE AFTER THIS RESTART > --entrypoint entrypoint.testnet.solana.com:8001 -> --trusted-validator 5D1fNXzvv5NjV1ysLjirC4WY92RNsVH18vjmcszZd8on +> --known-validator 5D1fNXzvv5NjV1ysLjirC4WY92RNsVH18vjmcszZd8on > --expected-genesis-hash 4uhcVJyU9pJkvQyS88uRDiswHXSCkY3zQawwpjk2NsNY -> --no-untrusted-rpc +> --only-known-rpc > --limit-ledger-size > ... # <-- your other --identity/--vote-account/etc arguments > ``` diff --git a/docs/src/running-validator/validator-reqs.md b/docs/src/running-validator/validator-reqs.md index fc1559a06e1602..0ab295e6e14844 100644 --- a/docs/src/running-validator/validator-reqs.md +++ b/docs/src/running-validator/validator-reqs.md @@ -85,12 +85,29 @@ releases at [solanalabs/solana](https://hub.docker.com/r/solanalabs/solana). - We build and run on Ubuntu 20.04. - See [Installing Solana](../cli/install-solana-cli-tools.md) for the current Solana software release. -Be sure to ensure that the machine used is not behind a residential NAT to avoid -NAT traversal issues. A cloud-hosted machine works best. **Ensure that IP ports 8000 through 10000 are not blocked for Internet inbound and outbound traffic.** - Prebuilt binaries are available for Linux x86_64 on CPUs supporting AVX2 \(Ubuntu 20.04 recommended\). MacOS or WSL users may build from source. +## Networking +Internet service should be at least 300Mbit/s symmetric, commercial. 1GBit/s preferred + +### Port Forwarding +The following ports need to be open to the internet for both inbound and outbound + +It is not recommended to run a validator behind a NAT. Operators who choose to +do so should be comfortable configuring their networking equipment and debugging +any traversal issues on their own. + +#### Required +- 8000-10000 TCP/UDP - P2P protocols (gossip, turbine, repair, etc). This can +be limited to any free 11 port range with `--dynamic-port-range` + +#### Optional +For security purposes, it is not suggested that the following ports be open to +the internet on staked, mainnet-beta validators. +- 8899 TCP - JSONRPC over HTTP. Change with `--rpc-port RPC_PORT`` +- 8900 TCP - JSONRPC over Websockets. Derived. Uses `RPC_PORT + 1` + ## GPU Requirements CUDA is required to make use of the GPU on your system. The provided Solana diff --git a/docs/src/running-validator/validator-start.md b/docs/src/running-validator/validator-start.md index 37049ca571abf4..b25df957388904 100644 --- a/docs/src/running-validator/validator-start.md +++ b/docs/src/running-validator/validator-start.md @@ -60,7 +60,7 @@ the latest recommended settings are applied. To run it: ```bash -sudo solana-sys-tuner --user $(whoami) > sys-tuner.log 2>&1 & +sudo $(command -v solana-sys-tuner) --user $(whoami) > sys-tuner.log 2>&1 & ``` #### Manual @@ -89,7 +89,7 @@ sudo sysctl -p /etc/sysctl.d/20-solana-udp-buffers.conf ```bash sudo bash -c "cat >/etc/sysctl.d/20-solana-mmaps.conf </etc/security/limits.d/90-solana-nofiles.conf <` -argument to `solana-validator`. You can specify multiple ones by repeating the argument `--trusted-validator --trusted-validator `. -This has two effects, one is when the validator is booting with `--no-untrusted-rpc`, it will only ask that set of -trusted nodes for downloading genesis and snapshot data. Another is that in combination with the `--halt-on-trusted-validator-hash-mismatch` option, -it will monitor the merkle root hash of the entire accounts state of other trusted nodes on gossip and if the hashes produce any mismatch, +If you know and respect other validator operators, you can specify this on the command line with the `--known-validator ` +argument to `solana-validator`. You can specify multiple ones by repeating the argument `--known-validator --known-validator `. +This has two effects, one is when the validator is booting with `--only-known-rpc`, it will only ask that set of +known nodes for downloading genesis and snapshot data. Another is that in combination with the `--halt-on-known-validator-hash-mismatch` option, +it will monitor the merkle root hash of the entire accounts state of other known nodes on gossip and if the hashes produce any mismatch, the validator will halt the node to prevent the validator from voting or processing potentially incorrect state values. At the moment, the slot that -the validator publishes the hash on is tied to the snapshot interval. For the feature to be effective, all validators in the trusted +the validator publishes the hash on is tied to the snapshot interval. For the feature to be effective, all validators in the known set should be set to the same snapshot interval value or multiples of the same. It is highly recommended you use these options to prevent malicious snapshot state download or @@ -349,7 +349,7 @@ Type=simple Restart=always RestartSec=1 User=sol -LimitNOFILE=700000 +LimitNOFILE=1000000 LogRateLimitIntervalSec=0 Environment="PATH=/bin:/usr/bin:/home/sol/.local/share/solana/install/active_release/bin" ExecStart=/home/sol/bin/validator.sh @@ -358,8 +358,13 @@ ExecStart=/home/sol/bin/validator.sh WantedBy=multi-user.target ``` -Now create `/home/sol/bin/validator.sh` to include the desired `solana-validator` -command-line. Ensure that running `/home/sol/bin/validator.sh` manually starts +Now create `/home/sol/bin/validator.sh` to include the desired +`solana-validator` command-line. Ensure that the 'exec' command is used to +start the validator process (i.e. "exec solana-validator ..."). This is +important because without it, logrotate will end up killing the validator +every time the logs are rotated. + +Ensure that running `/home/sol/bin/validator.sh` manually starts the validator as expected. Don't forget to mark it executable with `chmod +x /home/sol/bin/validator.sh` Start the service with: @@ -416,6 +421,12 @@ sudo cp logrotate.sol /etc/logrotate.d/sol systemctl restart logrotate.service ``` +As mentioned earlier, be sure that if you use logrotate, any script you create +which starts the solana validator process uses "exec" to do so (example: "exec +solana-validator ..."); otherwise, when logrotate sends its signal to the +validator, the enclosing script will die and take the validator process with +it. + ### Disable port checks to speed up restarts Once your validator is operating normally, you can reduce the time it takes to diff --git a/docs/src/running-validator/validator-troubleshoot.md b/docs/src/running-validator/validator-troubleshoot.md index 3afb484098630c..28e1679b95b713 100644 --- a/docs/src/running-validator/validator-troubleshoot.md +++ b/docs/src/running-validator/validator-troubleshoot.md @@ -11,12 +11,7 @@ testnet participants, [https://discord.gg/pquxPsq](https://discord.gg/pquxPsq). - [Testnet Metrics Dashboard](https://metrics.solana.com:3000/d/monitor-edge/cluster-telemetry-edge?refresh=60s&orgId=2) - Validator chat channels - [\#validator-support](https://discord.gg/rZsenD) General support channel for any Validator related queries. - - [\#tourdesol](https://discord.gg/BdujK2) Discussion and support channel for Tour de SOL participants ([What is Tour de SOL?](https://solana.com/tds/)). - - [\#tourdesol-announcements](https://discord.gg/Q5TxEC) The single source of truth for critical information relating to Tour de SOL - - [\#tourdesol-stage0](https://discord.gg/Xf8tES) Discussion for events within Tour de SOL Stage 0. Stage 0 includes all the dry-run + - [\#testnet-announcements](https://discord.gg/Q5TxEC) The single source of truth for critical information relating Testnet - [Core software repo](https://github.com/solana-labs/solana) -- [Tour de SOL Docs](https://docs.solana.com/tour-de-sol) -- [TdS repo](https://github.com/solana-labs/tour-de-sol) -- [TdS metrics dashboard](https://metrics.solana.com:3000/d/monitor-edge/cluster-telemetry-edge?refresh=1m&from=now-15m&to=now&var-testnet=tds) Can't find what you're looking for? Send an email to ryan@solana.com or reach out to @rshea\#2622 on Discord. diff --git a/docs/src/running-validator/vote-accounts.md b/docs/src/running-validator/vote-accounts.md index 2d559f485c03fd..f948abc95a7143 100644 --- a/docs/src/running-validator/vote-accounts.md +++ b/docs/src/running-validator/vote-accounts.md @@ -165,6 +165,24 @@ change the validator identity. The follow steps assume that to modify the validator identity in your vote account 4. Restart your validator with the new identity keypair for the `--identity` argument +**Additional steps are required if your validator has stake.** The leader +schedule is computed two epochs in advance. Therefore if your old validator +identity was in the leader schedule, it will remain in the leader schedule for +up to two epochs after the validator identity change. If extra steps are not +taken your validator will produce no blocks until your new validator identity is +added to the leader schedule. + +After your validator is restarted with the new identity keypair, per step 4, +start a second non-voting validator on a different machine with the old identity keypair +without providing the `--vote-account` argument. + +This temporary validator should be run for two full epochs. During this time it will: +* Produce blocks for the remaining slots that are assigned to your old validator identity +* Receive the transaction fees and rent rewards for your old validator identity + +It is safe to stop this temporary validator when your old validator identity is +no longer listed in the `solana leader-schedule` output. + ### Vote Account Authorized Voter The _vote authority_ keypair may only be changed at epoch boundaries and diff --git a/docs/src/staking.md b/docs/src/staking.md index 05352e60a7cb0d..7a4cc60a2f1c22 100644 --- a/docs/src/staking.md +++ b/docs/src/staking.md @@ -6,57 +6,51 @@ _Note before reading: All references to increases in values are in absolute terms with regards to balance of SOL. This document makes no suggestion as to the monetary value of SOL at any time._ -Staking your SOL tokens on Solana is the best way you can help secure the world's -highest-performing blockchain network, and -[earn rewards](implemented-proposals/staking-rewards.md) for doing so! +By staking your SOL tokens, you help secure the network and +[earn rewards](implemented-proposals/staking-rewards.md) while doing so. -Solana is a Proof-of-Stake (PoS) network with delegations, which means that -anyone who holds SOL tokens can choose to delegate some of their SOL to one or -more validators, who process transactions and run the network. +You can stake by delegating your tokens to validators who process transactions and run the network. Delegating stake is a shared-risk shared-reward financial model that may provide returns to holders of tokens delegated for a long period. This is achieved by aligning the financial incentives of the token-holders (delegators) and the validators to whom they delegate. -The more stake a validator has delegated to them, the more often this validator +The more stake delegated to a validator, the more often this validator is chosen to write new transactions to the ledger. The more transactions -the validator writes, the more rewards they and their delegators earn. +the validator writes, the more rewards the validator and its delegators earn. Validators who configure their systems to be able to process more transactions -at a time not only earn proportionally more rewards for doing so, they also -keep the network running as fast and as smoothly as possible. +earn proportionally more rewards and +because they keep the network running as fast and as smoothly as possible. Validators incur costs by running and maintaining their systems, and this is passed on to delegators in the form of a fee collected as a percentage of -rewards earned. This fee is known as a _commission_. As validators earn more +rewards earned. This fee is known as a _commission_. Since validators earn more rewards the more stake is delegated to them, they may compete with one another -to offer the lowest commission for their services, in order to attract more -delegated stake. +to offer the lowest commission for their services. -There is a risk of loss of tokens when staking, through a process known as +You risk losing tokens when staking through a process known as _slashing_. Slashing involves the removal and destruction of a portion of a validator's delegated stake in response to intentional malicious behavior, such as creating invalid transactions or censoring certain types of transactions or network participants. -If a validator is slashed, all token holders who have delegated stake to that -validator will lose a portion of their delegation. While this means an immediate +When a validator is slashed, all token holders who have delegated stake to that +validator lose a portion of their delegation. While this means an immediate loss for the token holder, it also is a loss of future rewards for the validator due to their reduced total delegation. More details on the slashing roadmap can be found [here](proposals/optimistic-confirmation-and-slashing.md#slashing-roadmap). -It is the goal of the network rewards and slashing to align both validators' -and token holders' financial incentives, which in turn help keeps the network -secure, robust and performing at its best. +Rewards and slashing align validator and token holder interests which helps keep the network +secure, robust and performant. + ## How do I stake my SOL tokens? -In order to stake tokens on Solana, you first will need to transfer some SOL -into a wallet that supports staking, then follow the steps or instructions -provided by the wallet to create a stake account and delegate your stake. -Different wallets will vary slightly in their process for this but the general -description is below. +You can stake SOL by moving your tokens +into a wallet that supports staking. The wallet provides steps to create a stake account +and do the delegation. #### Supported Wallets @@ -81,22 +75,14 @@ Staking operations are supported by the following wallet solutions: #### Create a Stake Account -A stake account is a different type of account from a wallet address -that is used to simply send and receive SOL tokens to other addresses. If you -have received SOL in a wallet address you control, you can use some of -these tokens to create and fund a new stake account, which will have a different -address than the wallet you used to create it. -Depending on which wallet you are using the steps to create a stake account -may vary slightly. Not all wallets support stake accounts, see -[Supported Wallets](#supported-wallets). +Follow the wallet's instructions for creating a staking account. This account +will be of a different type than one used to simply send and receive tokens. #### Select a Validator -After a stake account is created, you will likely want to delegate the SOL -to a validator node. Below are a few places where you can get information about -the validators who are currently participating in running the network. -The Solana Labs team and the Solana Foundation do not recommend any particular -validator. +Follow the wallet's instructions for selecting a validator. You can get +information about potentially performant validators from the links below. +The Solana Foundation does not recommend any particular validator. The Mainnet Beta validators introduce themselves and their services on this Solana Forum thread: @@ -116,13 +102,11 @@ To view block production statistics, use the Solana command-line tools: - `solana block-production` The Solana team does not make recommendations on how to interpret this -information. Potential delegators should do their own due diligence. +information. Do your own due diligence. #### Delegate your Stake -Once you have decided to which validator or validators you will delegate, use -a supported wallet to delegate your stake account to the validator's vote -account address. +Follow the wallet's instructions for delegating your to your chosen validator. ## Stake Account Details diff --git a/docs/src/storage_rent_economics.md b/docs/src/storage_rent_economics.md index 986467da8c4531..8405c8906f3a29 100644 --- a/docs/src/storage_rent_economics.md +++ b/docs/src/storage_rent_economics.md @@ -8,7 +8,7 @@ Storage rent can be paid via one of two methods: Method 1: Set it and forget it -With this approach, accounts with two-years worth of rent deposits secured are exempt from network rent charges. By maintaining this minimum-balance, the broader network benefits from reduced liquidity and the account holder can trust that their `Account::data` will be retained for continual access/usage. +With this approach, accounts with two-years worth of rent deposits secured are exempt from network rent charges. By maintaining this minimum-balance, the broader network benefits from reduced liquidity and the account holder can rest assured that their `Account::data` will be retained for continual access/usage. Method 2: Pay per byte diff --git a/docs/src/tour-de-sol.md b/docs/src/tour-de-sol.md deleted file mode 100644 index 20700728ff3061..00000000000000 --- a/docs/src/tour-de-sol.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: Introduction ---- - -## Welcome! - -This guide contains information about how to participate in Solana's Tour de SOL. Questions? Comments? Keep on reading! - -### Learn more about Tour de SOL - -If you haven't registered yet, complete the form at [https://solana.com/validator-registration/](https://solana.com/validator-registration/) first. diff --git a/docs/src/tour-de-sol/participation/README.md b/docs/src/tour-de-sol/participation/README.md deleted file mode 100644 index 60e27ea9d15249..00000000000000 --- a/docs/src/tour-de-sol/participation/README.md +++ /dev/null @@ -1 +0,0 @@ -# Participation diff --git a/docs/src/tour-de-sol/participation/steps-to-create-a-validator.md b/docs/src/tour-de-sol/participation/steps-to-create-a-validator.md deleted file mode 100644 index ecb56d1a32f58b..00000000000000 --- a/docs/src/tour-de-sol/participation/steps-to-create-a-validator.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: Steps to create a validator ---- - -To create a Solana validator, follow the normal [validator workflow](../../running-validator/validator-start.md) -targeting the [Testnet cluster](../../clusters.md). - -Note that Testnet validators are automatically staked by a process that runs -every Epoch. If your validator is running correctly then in a couple of days it -will be staked (and automatically destaked if offline for a prolonged period of -time). diff --git a/docs/src/tour-de-sol/participation/validator-public-key-registration.md b/docs/src/tour-de-sol/participation/validator-public-key-registration.md deleted file mode 100644 index 1826aa6fa2dfea..00000000000000 --- a/docs/src/tour-de-sol/participation/validator-public-key-registration.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -title: Create a validator public key ---- - -In order to participate you need to first register. See [Registration info](../registration/how-to-register.md). - -In order to obtain your allotment of SOL you need to publish your -validator's identity public key under your keybase.io account. - -## **Generate Keypair** - -1. If you haven't already, generate your validator's identity keypair by running: - - ```bash - solana-keygen new -o ~/validator-keypair.json - ``` - -2. The identity public key can now be viewed by running: - - ```bash - solana-keygen pubkey ~/validator-keypair.json - ``` - -> Note: The "validator-keypair.json” file is also your \(ed25519\) private key. - -Your validator identity keypair uniquely identifies your validator within the network. **It is crucial to back-up this information.** - -If you don’t back up this information, you WILL NOT BE ABLE TO RECOVER YOUR VALIDATOR, if you lose access to it. If this happens, YOU WILL LOSE YOUR ALLOCATION OF SOL TOO. - -To back-up your validator identify keypair, **back-up your "validator-keypair.json” file to a secure location.** - -## Link your Solana pubkey to a Keybase account - -You must link your Solana pubkey to a Keybase.io account. The following instructions describe how to do that by installing Keybase on your server. - -1. Install [Keybase](https://keybase.io/download) on your machine. -2. Log in to your Keybase account on your server. Create a Keybase account first if you don’t already have one. Here’s a [list of basic Keybase CLI commands](https://keybase.io/docs/command_line/basics). -3. Create a Solana directory in your public file folder: `mkdir /keybase/public//solana` -4. Publish your validator's identity public key by creating an empty file in your Keybase public file folder in the following format: `/keybase/public//solana/validator-`. For example: - - ```bash - touch /keybase/public//solana/validator- - ``` - -5. To check your public key was published, ensure you can successfully browse to `https://keybase.pub//solana/validator-` diff --git a/docs/src/tour-de-sol/participation/validator-technical-requirements.md b/docs/src/tour-de-sol/participation/validator-technical-requirements.md deleted file mode 100644 index 9e824b4899df6b..00000000000000 --- a/docs/src/tour-de-sol/participation/validator-technical-requirements.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: Requirements to run a validator ---- - -## Hardware - -See [suggested hardware configuration here](../../running-validator/validator-reqs.md). - -## Software - -- We build and run on Ubuntu 20.04 diff --git a/docs/src/tour-de-sol/registration/README.md b/docs/src/tour-de-sol/registration/README.md deleted file mode 100644 index 9c3eb458fa44d7..00000000000000 --- a/docs/src/tour-de-sol/registration/README.md +++ /dev/null @@ -1 +0,0 @@ -# Registration diff --git a/docs/src/tour-de-sol/registration/confidentiality.md b/docs/src/tour-de-sol/registration/confidentiality.md deleted file mode 100644 index 17b65a40077942..00000000000000 --- a/docs/src/tour-de-sol/registration/confidentiality.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -title: Confidentiality ---- - -**Section 8 of the** [**TOUR DE SOL PARTICIPATION TERMS**](https://drive.google.com/file/d/15ueLG6VJoQ5Hx4rnpjFeuL3pG5DbrBbE/view) **references confidentiality.** - -Solana doesn’t intend to share any confidential information during the Tour de SOL. However, if we do, it will be called out as such within our communications verbally, by email, etc. Unless information is specifically called out as such, the information should not be considered confidential and we welcome you to share it. diff --git a/docs/src/tour-de-sol/registration/how-to-register.md b/docs/src/tour-de-sol/registration/how-to-register.md deleted file mode 100644 index 2ba86281aebd66..00000000000000 --- a/docs/src/tour-de-sol/registration/how-to-register.md +++ /dev/null @@ -1,8 +0,0 @@ ---- -title: How To Register ---- - -For registration, KYC, and the participation agreement, please visit the -validator registration page on the Solana.com website: - -https://solana.com/validator-registration diff --git a/docs/src/tour-de-sol/registration/rewards.md b/docs/src/tour-de-sol/registration/rewards.md deleted file mode 100644 index e61cf447175ecb..00000000000000 --- a/docs/src/tour-de-sol/registration/rewards.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -title: Compensation ---- - -## Compensation Calculation - -Compensation will be calculated according to the compensation design described in [this forum post](https://forums.solana.com/t/tour-de-sol-updates-to-tour-de-sol-and-bug-bounty-compensation-structure/1132). - -Please also see section “2\(f\) Tour de Sol Details” of the [TOUR DE SOL PARTICIPATION TERMS](https://drive.google.com/file/d/15ueLG6VJoQ5Hx4rnpjFeuL3pG5DbrBbE/view) for additional compensation details. - -## Requirements to Receive Rewards - -Participants must have signed the Tour de SOL participation agreement, -passed KYC/AML , as well as filled out the W-8 BEN or W-9 tax form -\(depending on your residency\) prior to participating in the Tour. - -A participant may participate in any and all stages that begin after they -complete registration. The final registrations dates will be announced -publicly on a stage-by-stage basis. - -## Tax Implications - -Participants are entering into a service agreement with Solana with discretionary compensation associated with the services. They are not considered to be a full-time employee of the company and therefore Solana is collecting W-9 and W-8 BEN forms in order to support tax reporting obligations, if applicable. Solana recommends participants consult with a tax accountant to understand any potential tax implications. - -Also, as stated in Sections 2i, 2k and 10c of the [TOUR DE SOL PARTICIPATION TERMS](https://drive.google.com/file/d/15ueLG6VJoQ5Hx4rnpjFeuL3pG5DbrBbE/view): - -> 2i - To receive any SOL Reward, a Finalist must sign the Company’s standard form of SOL Reward Agreement which will include the terms and conditions governing the ownership and use of the SOL that are issued as the SOL Reward, including but not limited to applicable lockups required by securities laws, blackout dates and tax reporting information. - -> 2k - To receive any SOL Reward, a Finalist must sign the Company’s standard form of SOL Reward Agreement which will include the terms and conditions governing the ownership and use of the SOL that are issued as the SOL Reward, including but not limited to applicable lockups required by securities laws, blackout dates and tax reporting information. - -> 10c - You are responsible for complying with all laws and regulations applicable to your transactions on any Protocol, including, but not limited to, the Commodity Exchange Act and the regulations promulgated thereunder by the U.S. Commodity Futures Trading Commission \(“CFTC”\), the federal securities laws and the regulations promulgated thereunder by the U.S. Securities and Exchange Commission \(“SEC”\), and the tax laws applicable to any remuneration received by you from Company. diff --git a/docs/src/tour-de-sol/registration/terms-of-participation.md b/docs/src/tour-de-sol/registration/terms-of-participation.md deleted file mode 100644 index e04053a752a6df..00000000000000 --- a/docs/src/tour-de-sol/registration/terms-of-participation.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -title: Terms of Participation ---- - -Please see the official [TOUR DE SOL PARTICIPATION TERMS](https://drive.google.com/a/solana.com/file/d/15ueLG6VJoQ5Hx4rnpjFeuL3pG5DbrBbE/view?usp=sharing) for complete details. diff --git a/docs/src/tour-de-sol/registration/validator-registration-and-rewards-faq.md b/docs/src/tour-de-sol/registration/validator-registration-and-rewards-faq.md deleted file mode 100644 index 0e37e8294a3626..00000000000000 --- a/docs/src/tour-de-sol/registration/validator-registration-and-rewards-faq.md +++ /dev/null @@ -1,49 +0,0 @@ ---- -title: Registration FAQ ---- - -The [TOUR DE SOL PARTICIPATION TERMS](https://drive.google.com/a/solana.com/file/d/15ueLG6VJoQ5Hx4rnpjFeuL3pG5DbrBbE/view?usp=sharing) should be considered the authoritative resource for any participation questions. - -## Is registration mandatory? - -Yes. Registration is mandatory. Registration is rolling, we host month-long Tour de SOL stages on a monthly basis and new participants will need to wait until the start of the next stage to be on-boarded. [Registration information lives here](how-to-register.md). - -## Who’s eligible to participate? - -Please see section “1 Eligibility; KYC Requirements” of the [TOUR DE SOL PARTICIPATION TERMS](https://drive.google.com/a/solana.com/file/d/15ueLG6VJoQ5Hx4rnpjFeuL3pG5DbrBbE/view?usp=sharing) for details. - -## Do I have to complete the KYC/AML process to participate? - -Yes. Completing the KYC/AML process is mandatory. - -## What are my responsibilities as a Tour de Sol participant? - -Please see section “2c Tour de SOL Details” of the [TOUR DE SOL PARTICIPATION TERMS](https://drive.google.com/file/d/15ueLG6VJoQ5Hx4rnpjFeuL3pG5DbrBbE/view) for details. - -## How is the “90% of the active Tour event time” responsibility calculated? - -To be eligible for rewards in a given stage, a validator must submit votes in >= 90% of that stage’s slots. - -## Is there a relationship between the Tour de Sol testnet tokens and Solana mainnet tokens? - -No. Please see section ”2d Tour de SOL Details” of the [TOUR DE SOL PARTICIPATION TERMS](https://drive.google.com/file/d/15ueLG6VJoQ5Hx4rnpjFeuL3pG5DbrBbE/view) for details. - -## Can a validator be disqualified from the Tour de Sol? - -Yes. A validator be disqualified from the Tour de SOL if they engage in prohibited conduct and/or fails to provide the minimum level of services described in question \#4 above. - -Please also see Section “4 Prohibited Conduct” of the [TOUR DE SOL PARTICIPATION TERMS](https://drive.google.com/file/d/15ueLG6VJoQ5Hx4rnpjFeuL3pG5DbrBbE/view) for a more detailed description of prohibited conduct. - -### More questions related to Prohibited Conduct: - -#### As referenced in section “4 Prohibited Conduct”, what would be an example of providing Tour Services from a jurisdiction other than the jurisdiction of my residence? Does this mean my server has to reside in the jurisdiction of my residence? - -No. Servers can be in other jurisdictions that differ from a participant’s residency. By signing the [TOUR DE SOL PARTICIPATION TERMS](https://drive.google.com/file/d/15ueLG6VJoQ5Hx4rnpjFeuL3pG5DbrBbE/view) the participant has represented that they are delivering their services from the US if they reside there or from outside the US if they are not residing within the US. - -## How are rewards calculated? - -Please see the [Rewards section](rewards.md) for details - -## How will we know what information we can and cannot share publicly? - -Please see [Confidentiality](confidentiality.md). diff --git a/docs/src/tour-de-sol/submitting-bugs.md b/docs/src/tour-de-sol/submitting-bugs.md deleted file mode 100644 index f8450d686588f2..00000000000000 --- a/docs/src/tour-de-sol/submitting-bugs.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -title: Submitting Bugs ---- - -Please submit all bugs and feedback as [issues in this Github repo](https://github.com/solana-labs/solana/issues). - -Given the fast pace of communication in the [Discord channels](useful-links.md), it’s likely issues reported in them may be lost in the information flow. Filing the issues in the Github repo is the only way to ensure the issues get logged and addressed. diff --git a/docs/src/tour-de-sol/useful-links.md b/docs/src/tour-de-sol/useful-links.md deleted file mode 100644 index fd52798bf695c9..00000000000000 --- a/docs/src/tour-de-sol/useful-links.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: Useful Links & Discussion -description: Where to go after you've read this guide ---- - -- [Network Explorer](http://explorer.solana.com/) -- [TdS metrics dashboard](https://metrics.solana.com:3000/d/monitor-edge/cluster-telemetry-edge?refresh=1m&from=now-15m&to=now&var-testnet=tds) -- [Core software repo](https://github.com/solana-labs/solana) -- [Submit bugs and feedback in this repo](https://github.com/solana-labs/solana/issues) - -### Validator Discussions - -The Solana Discord server is where all the action happens. Join the server by -visiting https://solana.com/discord. Check out the "Testnet (Tour de SOL)" channel -group. diff --git a/docs/src/wallet-guide/apps.md b/docs/src/wallet-guide/apps.md index 552287acdb93f6..54788d05460a92 100644 --- a/docs/src/wallet-guide/apps.md +++ b/docs/src/wallet-guide/apps.md @@ -46,3 +46,19 @@ viewed at any later time in the app by following these steps: be used to send and receive SOL tokens. _Note: Coin98 does not support stake accounts or staking operations_ + +## Zelcore + +[Zelcore](https://zelcore.io) is a multi-currency wallet now supporting SOL and all Solana tokens (SPL). Each Zelcore account has 3 separate addresses for each asset. +Store, transact, connect to dapps, and (soon) DEX trade in a single app and hold SOL tokens alongside BTC, ETH, and 270+ other cryptos. + +Zelcore is available for [Desktop](https://zelcore.io), [iOS](https://apps.apple.com/us/app/zelcore/id1436296839), and [Android](https://play.google.com/store/apps/details?id=com.zelcash.zelcore&hl=en_US&gl=US). One account for all your cryptos, all your devices. + +Zelcore also uses a custom Sollet-based wallet adapter solution so users can connect to all their dapps with a single browser tab to keep things tidy. + +__**DEVS**__: integrate Zelcore wallet into your products by implementing Sollet solution using "link.zelcore.io" as connection URL. + +### Zelcore Wallet Security +Zelcore utilizes a username/password schema to derive private keys for all assets. Security layers include blockchain-based 2FA PIN, mobile device biometrics, and passphrases to allow users to set up as much or as little security as they like. All hashing functions done on-device, and no login/account info is stored nor transmitted off your device. The private keys only exist on-device while logged in, upon logging out there is no digital footprint left of your PKs. + +**Treat your username, password, d2FA PIN, passphrases, etc. with utmost care, just like your private keys. Never give these to anyone!** diff --git a/docs/src/wallet-guide/paper-wallet.md b/docs/src/wallet-guide/paper-wallet.md index 83c1e6b3f0c910..4b3e93c90a929a 100644 --- a/docs/src/wallet-guide/paper-wallet.md +++ b/docs/src/wallet-guide/paper-wallet.md @@ -82,6 +82,7 @@ For full usage details run: solana-keygen new --help ``` + ### Public Key Derivation Public keys can be derived from a seed phrase and a passphrase if you choose to @@ -107,11 +108,17 @@ solana-keygen pubkey prompt:// --skip-seed-phrase-validation ``` After entering your seed phrase with `solana-keygen pubkey prompt://` the console -will display a string of base-58 character. This is the base _wallet address_ +will display a string of base-58 character. This is the [derived](#hierarchical-derivation) solana BIP44 _wallet address_ associated with your seed phrase. > Copy the derived address to a USB stick for easy usage on networked computers +If needed, you can access the legacy, raw keypair's pubkey by instead passing the `ASK` keyword: + +```bash +solana-keygen pubkey ASK +``` + > A common next step is to [check the balance](#checking-account-balance) of the account associated with a public key For full usage details run: diff --git a/dos/Cargo.toml b/dos/Cargo.toml index 2e98c29bc506bc..56c937f01a4030 100644 --- a/dos/Cargo.toml +++ b/dos/Cargo.toml @@ -2,7 +2,7 @@ authors = ["Solana Maintainers "] edition = "2018" name = "solana-dos" -version = "1.7.0" +version = "1.7.11" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -14,16 +14,17 @@ clap = "2.33.1" log = "0.4.11" rand = "0.7.0" rayon = "1.5.0" -solana-clap-utils = { path = "../clap-utils", version = "=1.7.0" } -solana-core = { path = "../core", version = "=1.7.0" } -solana-gossip = { path = "../gossip", version = "=1.7.0" } -solana-ledger = { path = "../ledger", version = "=1.7.0" } -solana-logger = { path = "../logger", version = "=1.7.0" } -solana-net-utils = { path = "../net-utils", version = "=1.7.0" } -solana-runtime = { path = "../runtime", version = "=1.7.0" } -solana-sdk = { path = "../sdk", version = "=1.7.0" } -solana-version = { path = "../version", version = "=1.7.0" } -solana-client = { path = "../client", version = "=1.7.0" } +solana-clap-utils = { path = "../clap-utils", version = "=1.7.11" } +solana-core = { path = "../core", version = "=1.7.11" } +solana-gossip = { path = "../gossip", version = "=1.7.11" } +solana-ledger = { path = "../ledger", version = "=1.7.11" } +solana-logger = { path = "../logger", version = "=1.7.11" } +solana-net-utils = { path = "../net-utils", version = "=1.7.11" } +solana-runtime = { path = "../runtime", version = "=1.7.11" } +solana-sdk = { path = "../sdk", version = "=1.7.11" } +solana-streamer = { path = "../streamer", version = "=1.7.11" } +solana-version = { path = "../version", version = "=1.7.11" } +solana-client = { path = "../client", version = "=1.7.11" } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/dos/src/main.rs b/dos/src/main.rs index 891f9c9fa3fc6b..14ddbbd0080958 100644 --- a/dos/src/main.rs +++ b/dos/src/main.rs @@ -6,6 +6,7 @@ use solana_client::rpc_client::RpcClient; use solana_core::serve_repair::RepairProtocol; use solana_gossip::{contact_info::ContactInfo, gossip_service::discover}; use solana_sdk::pubkey::Pubkey; +use solana_streamer::socket::SocketAddrSpace; use std::net::{SocketAddr, UdpSocket}; use std::process::exit; use std::str::FromStr; @@ -96,14 +97,14 @@ fn run_dos( let res = rpc_client .as_ref() .unwrap() - .get_account(&Pubkey::from_str(&data_input.as_ref().unwrap()).unwrap()); + .get_account(&Pubkey::from_str(data_input.as_ref().unwrap()).unwrap()); if res.is_err() { error_count += 1; } } "get_program_accounts" => { let res = rpc_client.as_ref().unwrap().get_program_accounts( - &Pubkey::from_str(&data_input.as_ref().unwrap()).unwrap(), + &Pubkey::from_str(data_input.as_ref().unwrap()).unwrap(), ); if res.is_err() { error_count += 1; @@ -197,6 +198,13 @@ fn main() { .long("skip-gossip") .help("Just use entrypoint address directly"), ) + .arg( + Arg::with_name("allow_private_addr") + .long("allow-private-addr") + .takes_value(false) + .help("Allow contacting private ip addresses") + .hidden(true), + ) .get_matches(); let mut entrypoint_addr = SocketAddr::from(([127, 0, 0, 1], 8001)); @@ -216,6 +224,7 @@ fn main() { let mut nodes = vec![]; if !skip_gossip { info!("Finding cluster entry: {:?}", entrypoint_addr); + let socket_addr_space = SocketAddrSpace::new(matches.is_present("allow_private_addr")); let (gossip_nodes, _validators) = discover( None, // keypair Some(&entrypoint_addr), @@ -225,6 +234,7 @@ fn main() { Some(&entrypoint_addr), // find_node_by_gossip_addr None, // my_gossip_addr 0, // my_shred_version + socket_addr_space, ) .unwrap_or_else(|err| { eprintln!("Failed to discover {} node: {:?}", entrypoint_addr, err); diff --git a/download-utils/Cargo.toml b/download-utils/Cargo.toml index c04d32a3d80830..808f808fd5d7e6 100644 --- a/download-utils/Cargo.toml +++ b/download-utils/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-download-utils" -version = "1.7.0" +version = "1.7.11" description = "Solana Download Utils" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -11,13 +11,13 @@ edition = "2018" [dependencies] bzip2 = "0.3.3" -console = "0.11.3" +console = "0.14.1" indicatif = "0.15.0" log = "0.4.11" reqwest = { version = "0.11.2", default-features = false, features = ["blocking", "rustls-tls", "json"] } -solana-sdk = { path = "../sdk", version = "=1.7.0" } -solana-runtime = { path = "../runtime", version = "=1.7.0" } -tar = "0.4.28" +solana-sdk = { path = "../sdk", version = "=1.7.11" } +solana-runtime = { path = "../runtime", version = "=1.7.11" } +tar = "0.4.37" [lib] crate-type = ["lib"] diff --git a/download-utils/src/lib.rs b/download-utils/src/lib.rs index da31b0d3434562..3fda667deab027 100644 --- a/download-utils/src/lib.rs +++ b/download-utils/src/lib.rs @@ -258,6 +258,7 @@ pub fn download_snapshot<'a, 'b>( ArchiveFormat::TarZstd, ArchiveFormat::TarGzip, ArchiveFormat::TarBzip2, + ArchiveFormat::Tar, // `solana-test-validator` creates uncompressed snapshots ] { let desired_snapshot_package = snapshot_utils::get_snapshot_archive_path( snapshot_output_dir.to_path_buf(), @@ -269,7 +270,7 @@ pub fn download_snapshot<'a, 'b>( return Ok(()); } - if download_file( + match download_file( &format!( "http://{}/{}", rpc_addr, @@ -282,11 +283,13 @@ pub fn download_snapshot<'a, 'b>( &desired_snapshot_package, use_progress_bar, progress_notify_callback, - ) - .is_ok() - { - return Ok(()); + ) { + Ok(()) => return Ok(()), + Err(err) => info!("{}", err), } } - Err("Snapshot couldn't be downloaded".to_string()) + Err(format!( + "Failed to download a snapshot for slot {} from {}", + desired_snapshot_hash.0, rpc_addr + )) } diff --git a/faucet/Cargo.toml b/faucet/Cargo.toml index 1488ebd25261a0..44488f66fc5c75 100644 --- a/faucet/Cargo.toml +++ b/faucet/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-faucet" -version = "1.7.0" +version = "1.7.11" description = "Solana Faucet" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -16,12 +16,12 @@ clap = "2.33" log = "0.4.11" serde = "1.0.122" serde_derive = "1.0.103" -solana-clap-utils = { path = "../clap-utils", version = "=1.7.0" } -solana-cli-config = { path = "../cli-config", version = "=1.7.0" } -solana-logger = { path = "../logger", version = "=1.7.0" } -solana-metrics = { path = "../metrics", version = "=1.7.0" } -solana-sdk = { path = "../sdk", version = "=1.7.0" } -solana-version = { path = "../version", version = "=1.7.0" } +solana-clap-utils = { path = "../clap-utils", version = "=1.7.11" } +solana-cli-config = { path = "../cli-config", version = "=1.7.11" } +solana-logger = { path = "../logger", version = "=1.7.11" } +solana-metrics = { path = "../metrics", version = "=1.7.11" } +solana-sdk = { path = "../sdk", version = "=1.7.11" } +solana-version = { path = "../version", version = "=1.7.11" } spl-memo = { version = "=3.0.1", features = ["no-entrypoint"] } thiserror = "1.0" tokio = { version = "1", features = ["full"] } diff --git a/faucet/src/faucet.rs b/faucet/src/faucet.rs index c2831ee627d926..b253a6543567ef 100644 --- a/faucet/src/faucet.rs +++ b/faucet/src/faucet.rs @@ -654,7 +654,7 @@ mod tests { #[test] fn test_process_faucet_request() { let to = solana_sdk::pubkey::new_rand(); - let blockhash = Hash::new(&to.as_ref()); + let blockhash = Hash::new(to.as_ref()); let lamports = 50; let req = FaucetRequest::GetAirdrop { lamports, @@ -679,6 +679,6 @@ mod tests { assert_eq!(expected_vec_with_length, response_vec); let bad_bytes = "bad bytes".as_bytes(); - assert!(faucet.process_faucet_request(&bad_bytes, ip).is_err()); + assert!(faucet.process_faucet_request(bad_bytes, ip).is_err()); } } diff --git a/faucet/tests/local-faucet.rs b/faucet/tests/local-faucet.rs index 841255274365ff..8629c68ac25dfe 100644 --- a/faucet/tests/local-faucet.rs +++ b/faucet/tests/local-faucet.rs @@ -12,7 +12,7 @@ fn test_local_faucet() { let keypair = Keypair::new(); let to = solana_sdk::pubkey::new_rand(); let lamports = 50; - let blockhash = Hash::new(&to.as_ref()); + let blockhash = Hash::new(to.as_ref()); let create_instruction = system_instruction::transfer(&keypair.pubkey(), &to, lamports); let message = Message::new(&[create_instruction], Some(&keypair.pubkey())); let expected_tx = Transaction::new(&[&keypair], message, blockhash); diff --git a/fetch-spl.sh b/fetch-spl.sh index 3f419fcd843bae..cf0f2b41232964 100755 --- a/fetch-spl.sh +++ b/fetch-spl.sh @@ -38,10 +38,10 @@ fetch_program() { } -fetch_program token 3.1.0 TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA BPFLoader2111111111111111111111111111111111 +fetch_program token 3.2.0 TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA BPFLoader2111111111111111111111111111111111 fetch_program memo 1.0.0 Memo1UhkJRfHyvLMcVucJwxXeuD728EqVDDwQDxFMNo BPFLoader1111111111111111111111111111111111 fetch_program memo 3.0.0 MemoSq4gqABAXKb96qnH8TysNcWxMyWCqXgDLGmfcHr BPFLoader2111111111111111111111111111111111 -fetch_program associated-token-account 1.0.1 ATokenGPvbdGVxr1b2hvZbsiqW5xWH25efTNsLJA8knL BPFLoader2111111111111111111111111111111111 +fetch_program associated-token-account 1.0.3 ATokenGPvbdGVxr1b2hvZbsiqW5xWH25efTNsLJA8knL BPFLoader2111111111111111111111111111111111 fetch_program feature-proposal 1.0.0 Feat1YXHhH6t1juaWF74WLcfv4XoNocjXA6sPWHNgAse BPFLoader2111111111111111111111111111111111 echo "${genesis_args[@]}" > spl-genesis-args.sh diff --git a/frozen-abi/Cargo.toml b/frozen-abi/Cargo.toml index 5d425e3de93708..efba2cbd2b51af 100644 --- a/frozen-abi/Cargo.toml +++ b/frozen-abi/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-frozen-abi" -version = "1.7.0" +version = "1.7.11" description = "Solana Frozen ABI" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -16,11 +16,11 @@ log = "0.4.11" serde = "1.0.122" serde_derive = "1.0.103" sha2 = "0.9.2" -solana-frozen-abi-macro = { path = "macro", version = "=1.7.0" } +solana-frozen-abi-macro = { path = "macro", version = "=1.7.11" } thiserror = "1.0" [target.'cfg(not(target_arch = "bpf"))'.dependencies] -solana-logger = { path = "../logger", version = "=1.7.0" } +solana-logger = { path = "../logger", version = "=1.7.11" } generic-array = { version = "0.14.3", default-features = false, features = ["serde", "more_lengths"]} memmap2 = "0.1.0" diff --git a/frozen-abi/macro/Cargo.toml b/frozen-abi/macro/Cargo.toml index eaf31a2c375927..837369e3231be1 100644 --- a/frozen-abi/macro/Cargo.toml +++ b/frozen-abi/macro/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-frozen-abi-macro" -version = "1.7.0" +version = "1.7.11" description = "Solana Frozen ABI Macro" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" diff --git a/frozen-abi/macro/src/lib.rs b/frozen-abi/macro/src/lib.rs index bd285a826c9acc..ee0a4fdf2b448a 100644 --- a/frozen-abi/macro/src/lib.rs +++ b/frozen-abi/macro/src/lib.rs @@ -224,7 +224,7 @@ fn do_derive_abi_enum_visitor(input: ItemEnum) -> TokenStream { if filter_serde_attrs(&variant.attrs) { continue; }; - let sample_variant = quote_sample_variant(&type_name, &ty_generics, &variant); + let sample_variant = quote_sample_variant(type_name, &ty_generics, variant); variant_count = if let Some(variant_count) = variant_count.checked_add(1) { variant_count } else { @@ -319,7 +319,7 @@ fn test_mod_name(type_name: &Ident) -> Ident { #[cfg(RUSTC_WITH_SPECIALIZATION)] fn frozen_abi_type_alias(input: ItemType, expected_digest: &str) -> TokenStream { let type_name = &input.ident; - let test = quote_for_test(&test_mod_name(type_name), type_name, &expected_digest); + let test = quote_for_test(&test_mod_name(type_name), type_name, expected_digest); let result = quote! { #input #test @@ -330,7 +330,7 @@ fn frozen_abi_type_alias(input: ItemType, expected_digest: &str) -> TokenStream #[cfg(RUSTC_WITH_SPECIALIZATION)] fn frozen_abi_struct_type(input: ItemStruct, expected_digest: &str) -> TokenStream { let type_name = &input.ident; - let test = quote_for_test(&test_mod_name(type_name), type_name, &expected_digest); + let test = quote_for_test(&test_mod_name(type_name), type_name, expected_digest); let result = quote! { #input #test @@ -387,7 +387,7 @@ fn quote_sample_variant( #[cfg(RUSTC_WITH_SPECIALIZATION)] fn frozen_abi_enum_type(input: ItemEnum, expected_digest: &str) -> TokenStream { let type_name = &input.ident; - let test = quote_for_test(&test_mod_name(type_name), type_name, &expected_digest); + let test = quote_for_test(&test_mod_name(type_name), type_name, expected_digest); let result = quote! { #input #test diff --git a/frozen-abi/src/abi_example.rs b/frozen-abi/src/abi_example.rs index ebb74e31ca2ada..b9bb57b3641914 100644 --- a/frozen-abi/src/abi_example.rs +++ b/frozen-abi/src/abi_example.rs @@ -468,7 +468,7 @@ impl AbiEnumVisitor for &T { default fn visit_for_abi(&self, digester: &mut AbiDigester) -> DigestResult { info!("AbiEnumVisitor for (&default): {}", type_name::()); // Don't call self.visit_for_abi(...) to avoid the infinite recursion! - T::visit_for_abi(&self, digester) + T::visit_for_abi(self, digester) } } diff --git a/genesis-utils/Cargo.toml b/genesis-utils/Cargo.toml index e555908cff4c63..5be6cb9410f062 100644 --- a/genesis-utils/Cargo.toml +++ b/genesis-utils/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-genesis-utils" -version = "1.7.0" +version = "1.7.11" description = "Solana Genesis Utils" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,9 +10,9 @@ documentation = "https://docs.rs/solana-download-utils" edition = "2018" [dependencies] -solana-sdk = { path = "../sdk", version = "=1.7.0" } -solana-download-utils = { path = "../download-utils", version = "=1.7.0" } -solana-runtime = { path = "../runtime", version = "=1.7.0" } +solana-sdk = { path = "../sdk", version = "=1.7.11" } +solana-download-utils = { path = "../download-utils", version = "=1.7.11" } +solana-runtime = { path = "../runtime", version = "=1.7.11" } [lib] crate-type = ["lib"] diff --git a/genesis-utils/src/lib.rs b/genesis-utils/src/lib.rs index 1efb26aedd3bf7..513da409ad20cd 100644 --- a/genesis-utils/src/lib.rs +++ b/genesis-utils/src/lib.rs @@ -28,7 +28,7 @@ fn load_local_genesis( ledger_path: &std::path::Path, expected_genesis_hash: Option, ) -> Result { - let existing_genesis = GenesisConfig::load(&ledger_path) + let existing_genesis = GenesisConfig::load(ledger_path) .map_err(|err| format!("Failed to load genesis config: {}", err))?; check_genesis_hash(&existing_genesis, expected_genesis_hash)?; @@ -54,12 +54,12 @@ pub fn download_then_check_genesis_hash( { unpack_genesis_archive( &tmp_genesis_package, - &ledger_path, + ledger_path, max_genesis_archive_unpacked_size, ) .map_err(|err| format!("Failed to unpack downloaded genesis config: {}", err))?; - let downloaded_genesis = GenesisConfig::load(&ledger_path) + let downloaded_genesis = GenesisConfig::load(ledger_path) .map_err(|err| format!("Failed to load downloaded genesis config: {}", err))?; check_genesis_hash(&downloaded_genesis, expected_genesis_hash)?; diff --git a/genesis/Cargo.toml b/genesis/Cargo.toml index 851f1b50846efe..95f1009c024d6b 100644 --- a/genesis/Cargo.toml +++ b/genesis/Cargo.toml @@ -3,7 +3,7 @@ authors = ["Solana Maintainers "] edition = "2018" name = "solana-genesis" description = "Blockchain, Rebuilt for Scale" -version = "1.7.0" +version = "1.7.11" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -16,17 +16,16 @@ chrono = "0.4" serde = "1.0.122" serde_json = "1.0.56" serde_yaml = "0.8.13" -solana-budget-program = { path = "../programs/budget", version = "=1.7.0" } -solana-clap-utils = { path = "../clap-utils", version = "=1.7.0" } -solana-cli-config = { path = "../cli-config", version = "=1.7.0" } -solana-exchange-program = { path = "../programs/exchange", version = "=1.7.0" } -solana-ledger = { path = "../ledger", version = "=1.7.0" } -solana-logger = { path = "../logger", version = "=1.7.0" } -solana-runtime = { path = "../runtime", version = "=1.7.0" } -solana-sdk = { path = "../sdk", version = "=1.7.0" } -solana-stake-program = { path = "../programs/stake", version = "=1.7.0" } -solana-version = { path = "../version", version = "=1.7.0" } -solana-vote-program = { path = "../programs/vote", version = "=1.7.0" } +solana-clap-utils = { path = "../clap-utils", version = "=1.7.11" } +solana-cli-config = { path = "../cli-config", version = "=1.7.11" } +solana-exchange-program = { path = "../programs/exchange", version = "=1.7.11" } +solana-ledger = { path = "../ledger", version = "=1.7.11" } +solana-logger = { path = "../logger", version = "=1.7.11" } +solana-runtime = { path = "../runtime", version = "=1.7.11" } +solana-sdk = { path = "../sdk", version = "=1.7.11" } +solana-stake-program = { path = "../programs/stake", version = "=1.7.11" } +solana-version = { path = "../version", version = "=1.7.11" } +solana-vote-program = { path = "../programs/vote", version = "=1.7.11" } tempfile = "3.1.0" [[bin]] diff --git a/genesis/src/genesis_accounts.rs b/genesis/src/genesis_accounts.rs index 61abf74d27d7e1..7bf3504402c8e0 100644 --- a/genesis/src/genesis_accounts.rs +++ b/genesis/src/genesis_accounts.rs @@ -231,20 +231,20 @@ pub fn add_genesis_accounts(genesis_config: &mut GenesisConfig, mut issued_lampo issued_lamports += add_stakes( genesis_config, - &CREATOR_STAKER_INFOS, + CREATOR_STAKER_INFOS, &UNLOCKS_HALF_AT_9_MONTHS, ) + add_stakes( genesis_config, - &SERVICE_STAKER_INFOS, + SERVICE_STAKER_INFOS, &UNLOCKS_ALL_AT_9_MONTHS, ) + add_stakes( genesis_config, - &FOUNDATION_STAKER_INFOS, + FOUNDATION_STAKER_INFOS, &UNLOCKS_ALL_DAY_ZERO, - ) + add_stakes(genesis_config, &GRANTS_STAKER_INFOS, &UNLOCKS_ALL_DAY_ZERO) + ) + add_stakes(genesis_config, GRANTS_STAKER_INFOS, &UNLOCKS_ALL_DAY_ZERO) + add_stakes( genesis_config, - &COMMUNITY_STAKER_INFOS, + COMMUNITY_STAKER_INFOS, &UNLOCKS_ALL_DAY_ZERO, ); diff --git a/genesis/src/main.rs b/genesis/src/main.rs index d86d38668e76e4..2fb7498337d1ad 100644 --- a/genesis/src/main.rs +++ b/genesis/src/main.rs @@ -1,8 +1,6 @@ //! A command-line executable for generating the chain's genesis config. #![allow(clippy::integer_arithmetic)] -#[macro_use] -extern crate solana_budget_program; #[macro_use] extern crate solana_exchange_program; @@ -28,9 +26,10 @@ use solana_sdk::{ pubkey::Pubkey, rent::Rent, signature::{Keypair, Signer}, + stake::state::StakeState, system_program, timing, }; -use solana_stake_program::stake_state::{self, StakeState}; +use solana_stake_program::stake_state; use solana_vote_program::vote_state::{self, VoteState}; use std::{ collections::HashMap, @@ -492,7 +491,7 @@ fn main() -> Result<(), Box> { ); let native_instruction_processors = if cluster_type == ClusterType::Development { - vec![solana_budget_program!(), solana_exchange_program!()] + vec![solana_exchange_program!()] } else { vec![] }; @@ -535,9 +534,9 @@ fn main() -> Result<(), Box> { ); let vote_account = vote_state::create_account_with_authorized( - &identity_pubkey, - &identity_pubkey, - &identity_pubkey, + identity_pubkey, + identity_pubkey, + identity_pubkey, commission, VoteState::get_rent_exempt_reserve(&rent).max(1), ); @@ -547,8 +546,8 @@ fn main() -> Result<(), Box> { stake_state::create_account( bootstrap_stake_authorized_pubkey .as_ref() - .unwrap_or(&identity_pubkey), - &vote_pubkey, + .unwrap_or(identity_pubkey), + vote_pubkey, &vote_account, &rent, bootstrap_validator_stake_lamports, @@ -783,7 +782,7 @@ mod tests { let pubkey = &pubkey_str.parse().unwrap(); assert_eq!( b64_account.balance, - genesis_config.accounts[&pubkey].lamports, + genesis_config.accounts[pubkey].lamports, ); } diff --git a/genesis/src/stakes.rs b/genesis/src/stakes.rs index db9104b7ebf8ef..ca2b8da4d9791b 100644 --- a/genesis/src/stakes.rs +++ b/genesis/src/stakes.rs @@ -1,15 +1,22 @@ //! stakes generator -use crate::{ - address_generator::AddressGenerator, - unlocks::{UnlockInfo, Unlocks}, -}; -use solana_sdk::{ - account::Account, clock::Slot, genesis_config::GenesisConfig, pubkey::Pubkey, system_program, - timing::years_as_slots, -}; -use solana_stake_program::{ - self, - stake_state::{create_lockup_stake_account, Authorized, Lockup, StakeState}, +use { + crate::{ + address_generator::AddressGenerator, + unlocks::{UnlockInfo, Unlocks}, + }, + solana_sdk::{ + account::Account, + clock::Slot, + genesis_config::GenesisConfig, + pubkey::Pubkey, + stake::{ + self, + state::{Authorized, Lockup, StakeState}, + }, + system_program, + timing::years_as_slots, + }, + solana_stake_program::stake_state::create_lockup_stake_account, }; #[derive(Debug)] @@ -98,8 +105,7 @@ pub fn create_and_add_stakes( genesis_config.ticks_per_slot, ); - let mut address_generator = - AddressGenerator::new(&authorized.staker, &solana_stake_program::id()); + let mut address_generator = AddressGenerator::new(&authorized.staker, &stake::program::id()); let stake_rent_reserve = StakeState::get_rent_exempt_reserve(&genesis_config.rent); diff --git a/gossip/Cargo.toml b/gossip/Cargo.toml index 459c5220c53c4e..d85e2fb9d9da4b 100644 --- a/gossip/Cargo.toml +++ b/gossip/Cargo.toml @@ -3,7 +3,7 @@ authors = ["Solana Maintainers "] edition = "2018" name = "solana-gossip" description = "Blockchain, Rebuilt for Scale" -version = "1.7.0" +version = "1.7.11" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -18,6 +18,7 @@ indexmap = { version = "1.5", features = ["rayon"] } itertools = "0.9.0" log = "0.4.11" lru = "0.6.1" +matches = "0.1.8" num-traits = "0.2" rand = "0.7.0" rand_chacha = "0.2.2" @@ -25,22 +26,22 @@ rayon = "1.5.0" serde = "1.0.122" serde_bytes = "0.11" serde_derive = "1.0.103" -solana-clap-utils = { path = "../clap-utils", version = "=1.7.0" } -solana-client = { path = "../client", version = "=1.7.0" } -solana-frozen-abi = { path = "../frozen-abi", version = "=1.7.0" } -solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.7.0" } -solana-ledger = { path = "../ledger", version = "=1.7.0" } -solana-logger = { path = "../logger", version = "=1.7.0" } -solana-measure = { path = "../measure", version = "=1.7.0" } -solana-metrics = { path = "../metrics", version = "=1.7.0" } -solana-net-utils = { path = "../net-utils", version = "=1.7.0" } -solana-perf = { path = "../perf", version = "=1.7.0" } -solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.7.0" } -solana-runtime = { path = "../runtime", version = "=1.7.0" } -solana-streamer = { path = "../streamer", version = "=1.7.0" } -solana-sdk = { path = "../sdk", version = "=1.7.0" } -solana-version = { path = "../version", version = "=1.7.0" } -solana-vote-program = { path = "../programs/vote", version = "=1.7.0" } +solana-clap-utils = { path = "../clap-utils", version = "=1.7.11" } +solana-client = { path = "../client", version = "=1.7.11" } +solana-frozen-abi = { path = "../frozen-abi", version = "=1.7.11" } +solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.7.11" } +solana-ledger = { path = "../ledger", version = "=1.7.11" } +solana-logger = { path = "../logger", version = "=1.7.11" } +solana-measure = { path = "../measure", version = "=1.7.11" } +solana-metrics = { path = "../metrics", version = "=1.7.11" } +solana-net-utils = { path = "../net-utils", version = "=1.7.11" } +solana-perf = { path = "../perf", version = "=1.7.11" } +solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.7.11" } +solana-runtime = { path = "../runtime", version = "=1.7.11" } +solana-streamer = { path = "../streamer", version = "=1.7.11" } +solana-sdk = { path = "../sdk", version = "=1.7.11" } +solana-version = { path = "../version", version = "=1.7.11" } +solana-vote-program = { path = "../programs/vote", version = "=1.7.11" } thiserror = "1.0" [dev-dependencies] diff --git a/gossip/benches/weighted_shuffle.rs b/gossip/benches/weighted_shuffle.rs new file mode 100644 index 00000000000000..37097f3e626921 --- /dev/null +++ b/gossip/benches/weighted_shuffle.rs @@ -0,0 +1,39 @@ +#![feature(test)] + +extern crate test; + +use { + rand::{Rng, SeedableRng}, + rand_chacha::ChaChaRng, + solana_gossip::weighted_shuffle::{weighted_shuffle, WeightedShuffle}, + std::iter::repeat_with, + test::Bencher, +}; + +fn make_weights(rng: &mut R) -> Vec { + repeat_with(|| rng.gen_range(1, 100)).take(1000).collect() +} + +#[bench] +fn bench_weighted_shuffle_old(bencher: &mut Bencher) { + let mut seed = [0u8; 32]; + let mut rng = rand::thread_rng(); + let weights = make_weights(&mut rng); + bencher.iter(|| { + rng.fill(&mut seed[..]); + weighted_shuffle(&weights, seed); + }); +} + +#[bench] +fn bench_weighted_shuffle_new(bencher: &mut Bencher) { + let mut seed = [0u8; 32]; + let mut rng = rand::thread_rng(); + let weights = make_weights(&mut rng); + bencher.iter(|| { + rng.fill(&mut seed[..]); + WeightedShuffle::new(&mut ChaChaRng::from_seed(seed), &weights) + .unwrap() + .collect::>() + }); +} diff --git a/gossip/src/cluster_info.rs b/gossip/src/cluster_info.rs index d1889e2f8ec941..856144606bf998 100644 --- a/gossip/src/cluster_info.rs +++ b/gossip/src/cluster_info.rs @@ -14,9 +14,11 @@ //! Bank needs to provide an interface for us to query the stake weight use { crate::{ - cluster_info_metrics::{submit_gossip_stats, Counter, GossipStats, ScopedTimer}, + cluster_info_metrics::{ + submit_gossip_stats, Counter, GossipStats, ScopedTimer, TimedGuard, + }, contact_info::ContactInfo, - crds::Cursor, + crds::{Crds, Cursor}, crds_gossip::CrdsGossip, crds_gossip_error::CrdsGossipError, crds_gossip_pull::{CrdsFilter, ProcessPullStats, CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS}, @@ -29,7 +31,7 @@ use { gossip_error::GossipError, ping_pong::{self, PingCache, Pong}, socketaddr, socketaddr_any, - weighted_shuffle::weighted_shuffle, + weighted_shuffle::WeightedShuffle, }, bincode::{serialize, serialized_size}, itertools::Itertools, @@ -62,6 +64,7 @@ use { solana_streamer::{ packet, sendmmsg::multicast, + socket::SocketAddrSpace, streamer::{PacketReceiver, PacketSender}, }, solana_vote_program::vote_state::MAX_LOCKOUT_HISTORY, @@ -73,11 +76,12 @@ use { io::BufReader, iter::repeat, net::{IpAddr, Ipv4Addr, SocketAddr, TcpListener, UdpSocket}, - ops::{Deref, DerefMut, Div}, + ops::{Deref, Div}, path::{Path, PathBuf}, result::Result, sync::{ atomic::{AtomicBool, Ordering}, + mpsc::{Receiver, RecvTimeoutError, Sender}, {Arc, Mutex, RwLock, RwLockReadGuard, RwLockWriteGuard}, }, thread::{sleep, Builder, JoinHandle}, @@ -136,78 +140,6 @@ pub enum ClusterInfoError { BadGossipAddress, } -struct GossipWriteLock<'a> { - gossip: RwLockWriteGuard<'a, CrdsGossip>, - timer: Measure, - counter: &'a Counter, -} - -impl<'a> GossipWriteLock<'a> { - fn new( - gossip: RwLockWriteGuard<'a, CrdsGossip>, - label: &'static str, - counter: &'a Counter, - ) -> Self { - Self { - gossip, - timer: Measure::start(label), - counter, - } - } -} - -impl<'a> Deref for GossipWriteLock<'a> { - type Target = RwLockWriteGuard<'a, CrdsGossip>; - fn deref(&self) -> &Self::Target { - &self.gossip - } -} - -impl<'a> DerefMut for GossipWriteLock<'a> { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.gossip - } -} - -impl<'a> Drop for GossipWriteLock<'a> { - fn drop(&mut self) { - self.counter.add_measure(&mut self.timer); - } -} - -struct GossipReadLock<'a> { - gossip: RwLockReadGuard<'a, CrdsGossip>, - timer: Measure, - counter: &'a Counter, -} - -impl<'a> GossipReadLock<'a> { - fn new( - gossip: RwLockReadGuard<'a, CrdsGossip>, - label: &'static str, - counter: &'a Counter, - ) -> Self { - Self { - gossip, - timer: Measure::start(label), - counter, - } - } -} - -impl<'a> Deref for GossipReadLock<'a> { - type Target = RwLockReadGuard<'a, CrdsGossip>; - fn deref(&self) -> &Self::Target { - &self.gossip - } -} - -impl<'a> Drop for GossipReadLock<'a> { - fn drop(&mut self) { - self.counter.add_measure(&mut self.timer); - } -} - pub struct ClusterInfo { /// The network pub gossip: RwLock, @@ -226,16 +158,11 @@ pub struct ClusterInfo { contact_save_interval: u64, // milliseconds, 0 = disabled instance: NodeInstance, contact_info_path: PathBuf, -} - -impl Default for ClusterInfo { - fn default() -> Self { - Self::new_with_invalid_keypair(ContactInfo::default()) - } + socket_addr_space: SocketAddrSpace, } #[derive(Clone, Debug, Default, Deserialize, Serialize, AbiExample)] -struct PruneData { +pub(crate) struct PruneData { /// Pubkey of the node that sent this prune data pubkey: Pubkey, /// Pubkeys of nodes that should be pruned @@ -264,7 +191,7 @@ impl PruneData { destination: Pubkey::new_unique(), wallclock, }; - prune_data.sign(&self_keypair); + prune_data.sign(self_keypair); prune_data } } @@ -329,7 +256,7 @@ pub(crate) type Ping = ping_pong::Ping<[u8; GOSSIP_PING_TOKEN_SIZE]>; #[frozen_abi(digest = "GANv3KVkTYF84kmg1bAuWEZd9MaiYzPquuu13hup3379")] #[derive(Serialize, Deserialize, Debug, AbiEnumVisitor, AbiExample)] #[allow(clippy::large_enum_variant)] -enum Protocol { +pub(crate) enum Protocol { /// Gossip protocol messages PullRequest(CrdsFilter, CrdsValue), PullResponse(Pubkey, Vec), @@ -460,12 +387,11 @@ fn retain_staked(values: &mut Vec, stakes: &HashMap) { } impl ClusterInfo { - /// Without a valid keypair gossip will not function. Only useful for tests. - pub fn new_with_invalid_keypair(contact_info: ContactInfo) -> Self { - Self::new(contact_info, Arc::new(Keypair::new())) - } - - pub fn new(contact_info: ContactInfo, keypair: Arc) -> Self { + pub fn new( + contact_info: ContactInfo, + keypair: Arc, + socket_addr_space: SocketAddrSpace, + ) -> Self { let id = contact_info.id; let me = Self { gossip: RwLock::new(CrdsGossip::default()), @@ -485,12 +411,8 @@ impl ClusterInfo { instance: NodeInstance::new(&mut thread_rng(), id, timestamp()), contact_info_path: PathBuf::default(), contact_save_interval: 0, // disabled + socket_addr_space, }; - { - let mut gossip = me.gossip.write().unwrap(); - gossip.set_self(&id); - gossip.set_shred_version(me.my_shred_version()); - } me.insert_self(); me.push_self(&HashMap::new(), None); me @@ -498,8 +420,7 @@ impl ClusterInfo { // Should only be used by tests and simulations pub fn clone_with_id(&self, new_id: &Pubkey) -> Self { - let mut gossip = self.gossip.read().unwrap().mock_clone(); - gossip.id = *new_id; + let gossip = self.gossip.read().unwrap().mock_clone(); let mut my_contact_info = self.my_contact_info.read().unwrap().clone(); my_contact_info.id = *new_id; ClusterInfo { @@ -522,6 +443,7 @@ impl ClusterInfo { instance: NodeInstance::new(&mut thread_rng(), *new_id, timestamp()), contact_info_path: PathBuf::default(), contact_save_interval: 0, // disabled + ..*self } } @@ -529,6 +451,10 @@ impl ClusterInfo { self.contact_debug_interval = new; } + pub fn socket_addr_space(&self) -> &SocketAddrSpace { + &self.socket_addr_space + } + fn push_self( &self, stakes: &HashMap, @@ -547,10 +473,18 @@ impl ClusterInfo { .lock() .unwrap() .extend(entries); - self.gossip - .write() - .unwrap() - .refresh_push_active_set(stakes, gossip_validators); + let ContactInfo { + id: self_pubkey, + shred_version, + .. + } = *self.my_contact_info.read().unwrap(); + self.gossip.write().unwrap().refresh_push_active_set( + &self_pubkey, + shred_version, + stakes, + gossip_validators, + &self.socket_addr_space, + ); } // TODO kill insert_info, only used by tests @@ -713,14 +647,15 @@ impl ClusterInfo { self.my_contact_info.read().unwrap().shred_version } - pub fn lookup_epoch_slots(&self, ix: EpochSlotsIndex) -> EpochSlots { - let label = CrdsValueLabel::EpochSlots(ix, self.id()); + fn lookup_epoch_slots(&self, ix: EpochSlotsIndex) -> EpochSlots { + let self_pubkey = self.id(); + let label = CrdsValueLabel::EpochSlots(ix, self_pubkey); let gossip = self.gossip.read().unwrap(); let entry = gossip.crds.get(&label); entry .and_then(|v| v.value.epoch_slots()) .cloned() - .unwrap_or_else(|| EpochSlots::new(self.id(), timestamp())) + .unwrap_or_else(|| EpochSlots::new(self_pubkey, timestamp())) } pub fn rpc_info_trace(&self) -> String { @@ -731,7 +666,7 @@ impl ClusterInfo { .all_peers() .into_iter() .filter_map(|(node, last_updated)| { - if !ContactInfo::is_valid_address(&node.rpc) { + if !ContactInfo::is_valid_address(&node.rpc, &self.socket_addr_space) { return None; } @@ -742,8 +677,8 @@ impl ClusterInfo { return None; } - fn addr_to_string(default_ip: &IpAddr, addr: &SocketAddr) -> String { - if ContactInfo::is_valid_address(addr) { + let addr_to_string = |default_ip: &IpAddr, addr: &SocketAddr| -> String { + if ContactInfo::is_valid_address(addr, &self.socket_addr_space) { if &addr.ip() == default_ip { addr.port().to_string() } else { @@ -752,7 +687,7 @@ impl ClusterInfo { } else { "none".to_string() } - } + }; let rpc_addr = node.rpc.ip(); Some(format!( @@ -796,7 +731,7 @@ impl ClusterInfo { .all_peers() .into_iter() .filter_map(|(node, last_updated)| { - let is_spy_node = Self::is_spy_node(&node); + let is_spy_node = Self::is_spy_node(&node, &self.socket_addr_space); if is_spy_node { total_spy_nodes = total_spy_nodes.saturating_add(1); } @@ -809,8 +744,8 @@ impl ClusterInfo { if is_spy_node { shred_spy_nodes = shred_spy_nodes.saturating_add(1); } - fn addr_to_string(default_ip: &IpAddr, addr: &SocketAddr) -> String { - if ContactInfo::is_valid_address(addr) { + let addr_to_string = |default_ip: &IpAddr, addr: &SocketAddr| -> String { + if ContactInfo::is_valid_address(addr, &self.socket_addr_space) { if &addr.ip() == default_ip { addr.port().to_string() } else { @@ -819,11 +754,11 @@ impl ClusterInfo { } else { "none".to_string() } - } + }; let ip_addr = node.gossip.ip(); Some(format!( "{:15} {:2}| {:5} | {:44} |{:^9}| {:5}| {:5}| {:5}| {:5}| {:5}| {:5}| {:5}| {}\n", - if ContactInfo::is_valid_address(&node.gossip) { + if ContactInfo::is_valid_address(&node.gossip, &self.socket_addr_space) { ip_addr.to_string() } else { "none".to_string() @@ -874,20 +809,20 @@ impl ClusterInfo { ) } - pub fn push_lowest_slot(&self, id: Pubkey, min: Slot) { - let now = timestamp(); - let last = self - .gossip - .read() - .unwrap() - .crds - .get(&CrdsValueLabel::LowestSlot(self.id())) - .and_then(|x| x.value.lowest_slot()) - .map(|x| x.lowest) - .unwrap_or(0); + pub fn push_lowest_slot(&self, min: Slot) { + let self_pubkey = self.id(); + let last = { + let gossip = self.gossip.read().unwrap(); + gossip + .crds + .get_lowest_slot(self_pubkey) + .map(|x| x.lowest) + .unwrap_or_default() + }; if min > last { + let now = timestamp(); let entry = CrdsValue::new_signed( - CrdsData::LowestSlot(0, LowestSlot::new(id, min, now)), + CrdsData::LowestSlot(0, LowestSlot::new(self_pubkey, min, now)), &self.keypair, ); self.local_message_pending_push_queue @@ -897,6 +832,8 @@ impl ClusterInfo { } } + // TODO: If two threads call into this function then epoch_slot_index has a + // race condition and the threads will overwrite each other in crds table. pub fn push_epoch_slots(&self, mut update: &[Slot]) { let current_slots: Vec<_> = { let gossip = @@ -933,42 +870,49 @@ impl ClusterInfo { Some((_wallclock, _slot, index)) => *index, None => 0, }; + let self_pubkey = self.id(); + let mut entries = Vec::default(); while !update.is_empty() { let ix = (epoch_slot_index % crds_value::MAX_EPOCH_SLOTS) as u8; let now = timestamp(); let mut slots = if !reset { self.lookup_epoch_slots(ix) } else { - EpochSlots::new(self.id(), now) + EpochSlots::new(self_pubkey, now) }; let n = slots.fill(update, now); update = &update[n..]; if n > 0 { - let entry = CrdsValue::new_signed(CrdsData::EpochSlots(ix, slots), &self.keypair); - self.local_message_pending_push_queue - .lock() - .unwrap() - .push(entry); + let epoch_slots = CrdsData::EpochSlots(ix, slots); + let entry = CrdsValue::new_signed(epoch_slots, &self.keypair); + entries.push(entry); } epoch_slot_index += 1; reset = true; } + let mut gossip = self.gossip.write().unwrap(); + let now = timestamp(); + for entry in entries { + if let Err(err) = gossip.crds.insert(entry, now) { + error!("push_epoch_slots failed: {:?}", err); + } + } } fn time_gossip_read_lock<'a>( &'a self, label: &'static str, counter: &'a Counter, - ) -> GossipReadLock<'a> { - GossipReadLock::new(self.gossip.read().unwrap(), label, counter) + ) -> TimedGuard<'a, RwLockReadGuard> { + TimedGuard::new(self.gossip.read().unwrap(), label, counter) } fn time_gossip_write_lock<'a>( &'a self, label: &'static str, counter: &'a Counter, - ) -> GossipWriteLock<'a> { - GossipWriteLock::new(self.gossip.write().unwrap(), label, counter) + ) -> TimedGuard<'a, RwLockWriteGuard> { + TimedGuard::new(self.gossip.write().unwrap(), label, counter) } pub fn push_message(&self, message: CrdsValue) { @@ -1011,10 +955,10 @@ impl ClusterInfo { let vote = Vote::new(self_pubkey, vote, now); let vote = CrdsData::Vote(vote_index, vote); let vote = CrdsValue::new_signed(vote, &self.keypair); - self.gossip - .write() - .unwrap() - .process_push_message(&self_pubkey, vec![vote], now); + let mut gossip = self.gossip.write().unwrap(); + if let Err(err) = gossip.crds.insert(vote, now) { + error!("push_vote failed: {:?}", err); + } } pub fn push_vote(&self, tower: &[Slot], vote: Transaction) { @@ -1165,10 +1109,16 @@ impl ClusterInfo { .map(map) } + /// Returns epoch-slots inserted since the given cursor. + /// Excludes entries from nodes with unkown or different shred version. pub fn get_epoch_slots(&self, cursor: &mut Cursor) -> Vec { + let self_shred_version = Some(self.my_shred_version()); let gossip = self.gossip.read().unwrap(); let entries = gossip.crds.get_epoch_slots(cursor); entries + .filter(|entry| { + gossip.crds.get_shred_version(&entry.value.pubkey()) == self_shred_version + }) .map(|entry| match &entry.value.data { CrdsData::EpochSlots(_, slots) => slots.clone(), _ => panic!("this should not happen!"), @@ -1177,38 +1127,28 @@ impl ClusterInfo { } pub fn get_node_version(&self, pubkey: &Pubkey) -> Option { - let version = self - .gossip - .read() - .unwrap() - .crds - .get(&CrdsValueLabel::Version(*pubkey)) - .map(|x| x.value.version()) - .flatten() - .map(|version| version.version.clone()); - - if version.is_none() { - self.gossip - .read() - .unwrap() - .crds - .get(&CrdsValueLabel::LegacyVersion(*pubkey)) - .map(|x| x.value.legacy_version()) - .flatten() - .map(|version| version.version.clone().into()) - } else { - version + let gossip = self.gossip.read().unwrap(); + let version = gossip.crds.get(&CrdsValueLabel::Version(*pubkey)); + if let Some(version) = version.and_then(|v| v.value.version()) { + return Some(version.version.clone()); } + let version = gossip.crds.get(&CrdsValueLabel::LegacyVersion(*pubkey))?; + let version = version.value.legacy_version()?; + Some(version.version.clone().into()) } /// all validators that have a valid rpc port regardless of `shred_version`. pub fn all_rpc_peers(&self) -> Vec { + let self_pubkey = self.id(); self.gossip .read() .unwrap() .crds .get_nodes_contact_info() - .filter(|x| x.id != self.id() && ContactInfo::is_valid_address(&x.rpc)) + .filter(|x| { + x.id != self_pubkey + && ContactInfo::is_valid_address(&x.rpc, &self.socket_addr_space) + }) .cloned() .collect() } @@ -1232,17 +1172,23 @@ impl ClusterInfo { .crds .get_nodes_contact_info() // shred_version not considered for gossip peers (ie, spy nodes do not set shred_version) - .filter(|x| x.id != me && ContactInfo::is_valid_address(&x.gossip)) + .filter(|x| { + x.id != me && ContactInfo::is_valid_address(&x.gossip, &self.socket_addr_space) + }) .cloned() .collect() } /// all validators that have a valid tvu port regardless of `shred_version`. pub fn all_tvu_peers(&self) -> Vec { + let self_pubkey = self.id(); self.time_gossip_read_lock("all_tvu_peers", &self.stats.all_tvu_peers) .crds .get_nodes_contact_info() - .filter(|x| ContactInfo::is_valid_address(&x.tvu) && x.id != self.id()) + .filter(|x| { + ContactInfo::is_valid_address(&x.tvu, &self.socket_addr_space) + && x.id != self_pubkey + }) .cloned() .collect() } @@ -1257,7 +1203,7 @@ impl ClusterInfo { .filter(|node| { node.id != self_pubkey && node.shred_version == self_shred_version - && ContactInfo::is_valid_address(&node.tvu) + && ContactInfo::is_valid_tvu_address(&node.tvu) }) .cloned() .collect() @@ -1275,7 +1221,7 @@ impl ClusterInfo { nodes .into_iter() .filter(|node| { - ContactInfo::is_valid_address(&node.serve_repair) + ContactInfo::is_valid_address(&node.serve_repair, &self.socket_addr_space) && match gossip.crds.get_lowest_slot(node.id) { None => true, // fallback to legacy behavior Some(lowest_slot) => lowest_slot.lowest <= slot, @@ -1287,94 +1233,24 @@ impl ClusterInfo { nodes } - fn is_spy_node(contact_info: &ContactInfo) -> bool { - !ContactInfo::is_valid_address(&contact_info.tpu) - || !ContactInfo::is_valid_address(&contact_info.gossip) - || !ContactInfo::is_valid_address(&contact_info.tvu) - } - - fn sorted_stakes_with_index( - peers: &[ContactInfo], - stakes: Option<&HashMap>, - ) -> Vec<(u64, usize)> { - let stakes_and_index: Vec<_> = peers - .iter() - .enumerate() - .map(|(i, c)| { - // For stake weighted shuffle a valid weight is atleast 1. Weight 0 is - // assumed to be missing entry. So let's make sure stake weights are atleast 1 - let stake = 1.max( - stakes - .as_ref() - .map_or(1, |stakes| *stakes.get(&c.id).unwrap_or(&1)), - ); - (stake, i) - }) - .sorted_by(|(l_stake, l_info), (r_stake, r_info)| { - if r_stake == l_stake { - peers[*r_info].id.cmp(&peers[*l_info].id) - } else { - r_stake.cmp(&l_stake) - } - }) - .collect(); - - stakes_and_index - } - - fn stake_weighted_shuffle( - stakes_and_index: &[(u64, usize)], - seed: [u8; 32], - ) -> Vec<(u64, usize)> { - let stake_weights: Vec<_> = stakes_and_index.iter().map(|(w, _)| *w).collect(); - - let shuffle = weighted_shuffle(&stake_weights, seed); - - shuffle.iter().map(|x| stakes_and_index[*x]).collect() - } - - // Return sorted_retransmit_peers(including self) and their stakes - pub fn sorted_retransmit_peers_and_stakes( - &self, - stakes: Option<&HashMap>, - ) -> (Vec, Vec<(u64, usize)>) { - let mut peers = self.tvu_peers(); - // insert "self" into this list for the layer and neighborhood computation - peers.push(self.my_contact_info()); - let stakes_and_index = ClusterInfo::sorted_stakes_with_index(&peers, stakes); - (peers, stakes_and_index) - } - - /// Return sorted Retransmit peers and index of `Self.id()` as if it were in that list - pub fn shuffle_peers_and_index( - id: &Pubkey, - peers: &[ContactInfo], - stakes_and_index: &[(u64, usize)], - seed: [u8; 32], - ) -> (usize, Vec<(u64, usize)>) { - let shuffled_stakes_and_index = ClusterInfo::stake_weighted_shuffle(stakes_and_index, seed); - let self_index = shuffled_stakes_and_index - .iter() - .enumerate() - .find_map(|(i, (_stake, index))| { - if peers[*index].id == *id { - Some(i) - } else { - None - } - }) - .unwrap(); - (self_index, shuffled_stakes_and_index) + fn is_spy_node(contact_info: &ContactInfo, socket_addr_space: &SocketAddrSpace) -> bool { + !ContactInfo::is_valid_address(&contact_info.tpu, socket_addr_space) + || !ContactInfo::is_valid_address(&contact_info.gossip, socket_addr_space) + || !ContactInfo::is_valid_address(&contact_info.tvu, socket_addr_space) } /// compute broadcast table pub fn tpu_peers(&self) -> Vec { + let self_pubkey = self.id(); self.gossip .read() .unwrap() .crds .get_nodes_contact_info() - .filter(|x| x.id != self.id() && ContactInfo::is_valid_address(&x.tpu)) + .filter(|x| { + x.id != self_pubkey + && ContactInfo::is_valid_address(&x.tpu, &self.socket_addr_space) + }) .cloned() .collect() } @@ -1387,33 +1263,44 @@ impl ClusterInfo { packet: &Packet, s: &UdpSocket, forwarded: bool, - ) -> Result<(), GossipError> { + socket_addr_space: &SocketAddrSpace, + ) { trace!("retransmit orders {}", peers.len()); let dests: Vec<_> = if forwarded { peers .iter() .map(|peer| &peer.tvu_forwards) - .filter(|addr| ContactInfo::is_valid_address(addr)) + .filter(|addr| ContactInfo::is_valid_address(addr, socket_addr_space)) .collect() } else { - peers.iter().map(|peer| &peer.tvu).collect() + peers + .iter() + .map(|peer| &peer.tvu) + .filter(|addr| socket_addr_space.check(addr)) + .collect() }; - let mut sent = 0; - while sent < dests.len() { - match multicast(s, &packet.data[..packet.meta.size], &dests[sent..]) { - Ok(n) => sent += n, - Err(e) => { - inc_new_counter_error!( - "cluster_info-retransmit-send_to_error", - dests.len() - sent, - 1 - ); - error!("retransmit result {:?}", e); - return Err(GossipError::Io(e)); + let mut dests = &dests[..]; + let data = &packet.data[..packet.meta.size]; + while !dests.is_empty() { + match multicast(s, data, dests) { + Ok(n) => dests = &dests[n..], + Err(err) => { + inc_new_counter_error!("cluster_info-retransmit-send_to_error", dests.len(), 1); + error!("retransmit multicast: {:?}", err); + break; } } } - Ok(()) + let mut errs = 0; + for dest in dests { + if let Err(err) = s.send_to(data, dest) { + error!("retransmit send: {}, {:?}", dest, err); + errs += 1; + } + } + if errs != 0 { + inc_new_counter_error!("cluster_info-retransmit-error", errs, 1); + } } fn insert_self(&self) { @@ -1532,12 +1419,14 @@ impl ClusterInfo { match gossip.new_pull_request( thread_pool, self.keypair.deref(), + self.my_shred_version(), now, gossip_validators, stakes, MAX_BLOOM_SIZE, &self.ping_cache, &mut pings, + &self.socket_addr_space, ) { Err(_) => Vec::default(), Ok((peer, filters)) => vec![(peer, filters)], @@ -1576,7 +1465,10 @@ impl ClusterInfo { pub fn flush_push_queue(&self) { let pending_push_messages = self.drain_push_queue(); let mut gossip = self.gossip.write().unwrap(); - gossip.process_push_message(&self.id, pending_push_messages, timestamp()); + let now = timestamp(); + for entry in pending_push_messages { + let _ = gossip.crds.insert(entry, now); + } } fn new_push_requests( &self, @@ -1626,7 +1518,7 @@ impl ClusterInfo { generate_pull_requests: bool, require_stake_for_gossip: bool, ) -> Vec<(SocketAddr, Protocol)> { - self.trim_crds_table(CRDS_UNIQUE_PUBKEY_CAPACITY, &stakes); + self.trim_crds_table(CRDS_UNIQUE_PUBKEY_CAPACITY, stakes); // This will flush local pending push messages before generating // pull-request bloom filters, preventing pull responses to return the // same values back to the node itself. Note that packets will arrive @@ -1637,7 +1529,7 @@ impl ClusterInfo { .add_relaxed(out.len() as u64); if generate_pull_requests { let (pings, pull_requests) = - self.new_pull_requests(&thread_pool, gossip_validators, stakes); + self.new_pull_requests(thread_pool, gossip_validators, stakes); self.stats .packets_sent_pull_requests_count .add_relaxed(pull_requests.len() as u64); @@ -1706,10 +1598,6 @@ impl ClusterInfo { entrypoint.shred_version, entrypoint.id ); self.my_contact_info.write().unwrap().shred_version = entrypoint.shred_version; - self.gossip - .write() - .unwrap() - .set_shred_version(entrypoint.shred_version); } } self.my_shred_version() != 0 @@ -1724,14 +1612,15 @@ impl ClusterInfo { bank_forks: Option<&RwLock>, stakes: &HashMap, ) { + let self_pubkey = self.id(); let epoch_duration = get_epoch_duration(bank_forks); let timeouts = { let gossip = self.gossip.read().unwrap(); - gossip.make_timeouts(stakes, epoch_duration) + gossip.make_timeouts(self_pubkey, stakes, epoch_duration) }; let num_purged = self .time_gossip_write_lock("purge", &self.stats.purge) - .purge(thread_pool, timestamp(), &timeouts); + .purge(&self_pubkey, thread_pool, timestamp(), &timeouts); inc_new_counter_info!("cluster_info-purge-count", num_purged); } @@ -1749,11 +1638,14 @@ impl ClusterInfo { .map(|k| k.id) .chain(std::iter::once(self.id)) .collect(); + self.stats.trim_crds_table.add_relaxed(1); let mut gossip = self.gossip.write().unwrap(); match gossip.crds.trim(cap, &keep, stakes, timestamp()) { Err(err) => { self.stats.trim_crds_table_failed.add_relaxed(1); - error!("crds table trim failed: {:?}", err); + // TODO: Stakes are comming from the root-bank. Debug why/when + // they are empty/zero. + debug!("crds table trim failed: {:?}", err); } Ok(num_purged) => { self.stats @@ -1769,9 +1661,8 @@ impl ClusterInfo { bank_forks: Option>>, sender: PacketSender, gossip_validators: Option>, - exit: &Arc, + exit: Arc, ) -> JoinHandle<()> { - let exit = exit.clone(); let thread_pool = ThreadPoolBuilder::new() .num_threads(std::cmp::min(get_thread_count(), 8)) .thread_name(|i| format!("ClusterInfo::gossip-{}", i)) @@ -1874,11 +1765,13 @@ impl ClusterInfo { ); let mut prune_message_timeout = 0; let mut bad_prune_destination = 0; + let self_pubkey = self.id(); { let gossip = self.time_gossip_read_lock("process_prune", &self.stats.process_prune); let now = timestamp(); for (from, data) in messages { match gossip.process_prune_msg( + &self_pubkey, &from, &data.destination, &data.prunes, @@ -1918,7 +1811,6 @@ impl ClusterInfo { return; } let self_pubkey = self.id(); - let self_shred_version = self.my_shred_version(); let requests: Vec<_> = thread_pool.install(|| { requests .into_par_iter() @@ -1930,17 +1822,7 @@ impl ClusterInfo { inc_new_counter_debug!("cluster_info-window-request-loopback", 1); false } - Some(caller) => { - if self_shred_version != 0 - && caller.shred_version != 0 - && caller.shred_version != self_shred_version - { - self.stats.skip_pull_shred_version.add_relaxed(1); - false - } else { - true - } - } + Some(_) => true, }) .map(|(from_addr, filter, caller)| PullData { from_addr, @@ -1953,8 +1835,13 @@ impl ClusterInfo { self.stats .pull_requests_count .add_relaxed(requests.len() as u64); - let response = - self.handle_pull_requests(recycler, requests, stakes, require_stake_for_gossip); + let response = self.handle_pull_requests( + thread_pool, + recycler, + requests, + stakes, + require_stake_for_gossip, + ); if !response.is_empty() { self.stats .packets_sent_pull_responses_count @@ -2013,7 +1900,7 @@ impl ClusterInfo { // incoming pull-requests, pings are also sent to request.from_addr (as // opposed to caller.gossip address). move |request| { - ContactInfo::is_valid_address(&request.from_addr) && { + ContactInfo::is_valid_address(&request.from_addr, &self.socket_addr_space) && { let node = (request.caller.pubkey(), request.from_addr); *cache.entry(node).or_insert_with(|| hard_check(node)) } @@ -2024,6 +1911,7 @@ impl ClusterInfo { // and tries to send back to them the values it detects are missing. fn handle_pull_requests( &self, + thread_pool: &ThreadPool, recycler: &PacketsRecycler, requests: Vec, stakes: &HashMap, @@ -2036,7 +1924,8 @@ impl ClusterInfo { .process_pull_requests(callers.cloned(), timestamp()); let output_size_limit = self.update_data_budget(stakes.len()) / PULL_RESPONSE_MIN_SERIALIZED_SIZE; - let mut packets = Packets::new_with_recycler(recycler.clone(), 64, "handle_pull_requests"); + let mut packets = + Packets::new_unpinned_with_recycler(recycler.clone(), 64, "handle_pull_requests"); let (caller_and_filters, addrs): (Vec<_>, Vec<_>) = { let mut rng = rand::thread_rng(); let check_pull_request = @@ -2055,7 +1944,7 @@ impl ClusterInfo { "generate_pull_responses", &self.stats.generate_pull_responses, ) - .generate_pull_responses(&caller_and_filters, output_size_limit, now); + .generate_pull_responses(thread_pool, &caller_and_filters, output_size_limit, now); if require_stake_for_gossip { for resp in &mut pull_responses { retain_staked(resp, stakes); @@ -2086,11 +1975,8 @@ impl ClusterInfo { if responses.is_empty() { return packets; } - let shuffle = { - let mut seed = [0; 32]; - rand::thread_rng().fill(&mut seed[..]); - weighted_shuffle(&scores, seed).into_iter() - }; + let mut rng = rand::thread_rng(); + let shuffle = WeightedShuffle::new(&mut rng, &scores).unwrap(); let mut total_bytes = 0; let mut sent = 0; for (addr, response) in shuffle.map(|i| &responses[i]) { @@ -2178,9 +2064,10 @@ impl ClusterInfo { .reduce(HashMap::new, merge) }); if !responses.is_empty() { + let self_pubkey = self.id(); let timeouts = { let gossip = self.gossip.read().unwrap(); - gossip.make_timeouts(&stakes, epoch_duration) + gossip.make_timeouts(self_pubkey, stakes, epoch_duration) }; for (from, data) in responses { self.handle_pull_response(&from, data, &timeouts); @@ -2192,22 +2079,11 @@ impl ClusterInfo { fn handle_pull_response( &self, from: &Pubkey, - mut crds_values: Vec, + crds_values: Vec, timeouts: &HashMap, ) -> (usize, usize, usize) { let len = crds_values.len(); trace!("PullResponse me: {} from: {} len={}", self.id, from, len); - let shred_version = self - .lookup_contact_info(from, |ci| ci.shred_version) - .unwrap_or(0); - Self::filter_by_shred_version( - from, - &mut crds_values, - shred_version, - self.my_shred_version(), - ); - let filtered_len = crds_values.len(); - let mut pull_stats = ProcessPullStats::default(); let (filtered_pulls, filtered_pulls_expired_timeout, failed_inserts) = self .time_gossip_read_lock("filter_pull_resp", &self.stats.filter_pull_response) @@ -2227,14 +2103,8 @@ impl ClusterInfo { &mut pull_stats, ); } - - self.stats - .skip_pull_response_shred_version - .add_relaxed((len - filtered_len) as u64); self.stats.process_pull_response_count.add_relaxed(1); - self.stats - .process_pull_response_len - .add_relaxed(filtered_len as u64); + self.stats.process_pull_response_len.add_relaxed(len as u64); self.stats .process_pull_response_timeout .add_relaxed(pull_stats.timeout_count as u64); @@ -2255,23 +2125,6 @@ impl ClusterInfo { ) } - fn filter_by_shred_version( - from: &Pubkey, - crds_values: &mut Vec, - shred_version: u16, - my_shred_version: u16, - ) { - // Always run filter on spies - if my_shred_version != 0 && shred_version != my_shred_version { - // Allow someone to update their own ContactInfo so they - // can change shred versions if needed. - crds_values.retain(|crds_value| match &crds_value.data { - CrdsData::ContactInfo(contact_info) => contact_info.id == *from, - _ => false, - }); - } - } - fn handle_batch_ping_messages( &self, pings: I, @@ -2308,7 +2161,7 @@ impl ClusterInfo { None } else { let packets = - Packets::new_with_recycler_data(recycler, "handle_ping_messages", packets); + Packets::new_unpinned_with_recycler_data(recycler, "handle_ping_messages", packets); Some(packets) } } @@ -2344,44 +2197,10 @@ impl ClusterInfo { self.stats .push_message_count .add_relaxed(messages.len() as u64); - // Obtain shred versions of the origins. - let shred_versions: Vec<_> = { - let gossip = self.gossip.read().unwrap(); - messages - .iter() - .map(|(from, _)| match gossip.crds.get_contact_info(*from) { - None => 0, - Some(info) => info.shred_version, - }) - .collect() - }; - // Filter out data if the origin has different shred version. - let self_shred_version = self.my_shred_version(); let num_crds_values: u64 = messages.iter().map(|(_, data)| data.len() as u64).sum(); - let messages: Vec<_> = messages - .into_iter() - .zip(shred_versions) - .filter_map(|((from, mut crds_values), shred_version)| { - Self::filter_by_shred_version( - &from, - &mut crds_values, - shred_version, - self_shred_version, - ); - if crds_values.is_empty() { - None - } else { - Some((from, crds_values)) - } - }) - .collect(); - let num_filtered_crds_values = messages.iter().map(|(_, data)| data.len() as u64).sum(); self.stats .push_message_value_count - .add_relaxed(num_filtered_crds_values); - self.stats - .skip_push_message_shred_version - .add_relaxed(num_crds_values - num_filtered_crds_values); + .add_relaxed(num_crds_values); // Origins' pubkeys of upserted crds values. let origins: HashSet<_> = { let mut gossip = @@ -2395,9 +2214,10 @@ impl ClusterInfo { .collect() }; // Generate prune messages. + let self_pubkey = self.id(); let prunes = self .time_gossip_write_lock("prune_received_cache", &self.stats.prune_received_cache) - .prune_received_cache(origins, stakes); + .prune_received_cache(&self_pubkey, origins, stakes); let prunes: Vec<(Pubkey /*from*/, Vec /*origins*/)> = prunes .into_iter() .flat_map(|(from, prunes)| { @@ -2447,7 +2267,7 @@ impl ClusterInfo { let new_push_requests = self.new_push_requests(stakes, require_stake_for_gossip); inc_new_counter_debug!("cluster_info-push_message-pushes", new_push_requests.len()); for (address, request) in new_push_requests { - if ContactInfo::is_valid_address(&address) { + if ContactInfo::is_valid_address(&address, &self.socket_addr_space) { match Packet::from_data(Some(&address), &request) { Ok(packet) => packets.packets.push(packet), Err(err) => error!("failed to write push-request packet: {:?}", err), @@ -2494,7 +2314,7 @@ impl ClusterInfo { fn process_packets( &self, - packets: VecDeque, + packets: VecDeque<(/*from:*/ SocketAddr, Protocol)>, thread_pool: &ThreadPool, recycler: &PacketsRecycler, response_sender: &PacketSender, @@ -2504,24 +2324,28 @@ impl ClusterInfo { should_check_duplicate_instance: bool, ) -> Result<(), GossipError> { let _st = ScopedTimer::from(&self.stats.process_gossip_packets_time); - self.stats - .packets_received_count - .add_relaxed(packets.len() as u64); - let packets: Vec<_> = thread_pool.install(|| { + // Filter out values if the shred-versions are different. + let self_shred_version = self.my_shred_version(); + let packets = if self_shred_version == 0 { packets - .into_par_iter() - .filter_map(|packet| { - let protocol: Protocol = - limited_deserialize(&packet.data[..packet.meta.size]).ok()?; - protocol.sanitize().ok()?; - let protocol = protocol.par_verify()?; - Some((packet.meta.addr(), protocol)) - }) - .collect() - }); - self.stats - .packets_received_verified_count - .add_relaxed(packets.len() as u64); + } else { + let gossip = self.gossip.read().unwrap(); + thread_pool.install(|| { + packets + .into_par_iter() + .with_min_len(1024) + .filter_map(|(from, msg)| { + let msg = filter_on_shred_version( + msg, + self_shred_version, + &gossip.crds, + &self.stats, + )?; + Some((from, msg)) + }) + .collect() + }) + }; // Check if there is a duplicate instance of // this node with more recent timestamp. let check_duplicate_instance = |values: &[CrdsValue]| { @@ -2606,12 +2430,54 @@ impl ClusterInfo { Ok(()) } + // Consumes packets received from the socket, deserializing, sanitizing and + // verifying them and then sending them down the channel for the actual + // handling of requests/messages. + fn run_socket_consume( + &self, + receiver: &PacketReceiver, + sender: &Sender>, + thread_pool: &ThreadPool, + ) -> Result<(), GossipError> { + const RECV_TIMEOUT: Duration = Duration::from_secs(1); + let packets: Vec<_> = receiver.recv_timeout(RECV_TIMEOUT)?.packets.into(); + let mut packets = VecDeque::from(packets); + for payload in receiver.try_iter() { + packets.extend(payload.packets.iter().cloned()); + let excess_count = packets.len().saturating_sub(MAX_GOSSIP_TRAFFIC); + if excess_count > 0 { + packets.drain(0..excess_count); + self.stats + .gossip_packets_dropped_count + .add_relaxed(excess_count as u64); + } + } + self.stats + .packets_received_count + .add_relaxed(packets.len() as u64); + let verify_packet = |packet: Packet| { + let data = &packet.data[..packet.meta.size]; + let protocol: Protocol = limited_deserialize(data).ok()?; + protocol.sanitize().ok()?; + let protocol = protocol.par_verify()?; + Some((packet.meta.addr(), protocol)) + }; + let packets: Vec<_> = { + let _st = ScopedTimer::from(&self.stats.verify_gossip_packets_time); + thread_pool.install(|| packets.into_par_iter().filter_map(verify_packet).collect()) + }; + self.stats + .packets_received_verified_count + .add_relaxed(packets.len() as u64); + Ok(sender.send(packets)?) + } + /// Process messages from the network fn run_listen( &self, recycler: &PacketsRecycler, bank_forks: Option<&RwLock>, - requests_receiver: &PacketReceiver, + receiver: &Receiver>, response_sender: &PacketSender, thread_pool: &ThreadPool, last_print: &mut Instant, @@ -2619,10 +2485,9 @@ impl ClusterInfo { ) -> Result<(), GossipError> { const RECV_TIMEOUT: Duration = Duration::from_secs(1); const SUBMIT_GOSSIP_STATS_INTERVAL: Duration = Duration::from_secs(2); - let packets: Vec<_> = requests_receiver.recv_timeout(RECV_TIMEOUT)?.packets.into(); - let mut packets = VecDeque::from(packets); - while let Ok(packet) = requests_receiver.try_recv() { - packets.extend(packet.packets.iter().cloned()); + let mut packets = VecDeque::from(receiver.recv_timeout(RECV_TIMEOUT)?); + for payload in receiver.try_iter() { + packets.extend(payload); let excess_count = packets.len().saturating_sub(MAX_GOSSIP_TRAFFIC); if excess_count > 0 { packets.drain(0..excess_count); @@ -2659,25 +2524,52 @@ impl ClusterInfo { Ok(()) } - pub fn listen( + pub(crate) fn start_socket_consume_thread( + self: Arc, + receiver: PacketReceiver, + sender: Sender>, + exit: Arc, + ) -> JoinHandle<()> { + let thread_pool = ThreadPoolBuilder::new() + .num_threads(get_thread_count().min(8)) + .thread_name(|i| format!("gossip-consume-{}", i)) + .build() + .unwrap(); + let run_consume = move || { + while !exit.load(Ordering::Relaxed) { + match self.run_socket_consume(&receiver, &sender, &thread_pool) { + Err(GossipError::RecvTimeoutError(RecvTimeoutError::Disconnected)) => break, + Err(GossipError::RecvTimeoutError(RecvTimeoutError::Timeout)) => (), + // A send operation can only fail if the receiving end of a + // channel is disconnected. + Err(GossipError::SendError) => break, + Err(err) => error!("gossip consume: {}", err), + Ok(()) => (), + } + } + }; + let thread_name = String::from("gossip-consume"); + Builder::new().name(thread_name).spawn(run_consume).unwrap() + } + + pub(crate) fn listen( self: Arc, bank_forks: Option>>, - requests_receiver: PacketReceiver, + requests_receiver: Receiver>, response_sender: PacketSender, should_check_duplicate_instance: bool, - exit: &Arc, + exit: Arc, ) -> JoinHandle<()> { - let exit = exit.clone(); + let mut last_print = Instant::now(); let recycler = PacketsRecycler::default(); + let thread_pool = ThreadPoolBuilder::new() + .num_threads(get_thread_count().min(8)) + .thread_name(|i| format!("sol-gossip-work-{}", i)) + .build() + .unwrap(); Builder::new() .name("solana-listen".to_string()) .spawn(move || { - let thread_pool = ThreadPoolBuilder::new() - .num_threads(std::cmp::min(get_thread_count(), 8)) - .thread_name(|i| format!("sol-gossip-work-{}", i)) - .build() - .unwrap(); - let mut last_print = Instant::now(); while !exit.load(Ordering::Relaxed) { if let Err(err) = self.run_listen( &recycler, @@ -2689,7 +2581,8 @@ impl ClusterInfo { should_check_duplicate_instance, ) { match err { - GossipError::RecvTimeoutError(_) => { + GossipError::RecvTimeoutError(RecvTimeoutError::Disconnected) => break, + GossipError::RecvTimeoutError(RecvTimeoutError::Timeout) => { let table_size = self.gossip.read().unwrap().crds.len(); debug!( "{}: run_listen timeout, table size: {}", @@ -2703,7 +2596,7 @@ impl ClusterInfo { self.id() ); exit.store(true, Ordering::Relaxed); - // TODO: Pass through ValidatorExit here so + // TODO: Pass through Exit here so // that this will exit cleanly. std::process::exit(1); } @@ -3024,22 +2917,79 @@ pub fn push_messages_to_peer( messages: Vec, self_id: Pubkey, peer_gossip: SocketAddr, + socket_addr_space: &SocketAddrSpace, ) -> Result<(), GossipError> { let reqs: Vec<_> = ClusterInfo::split_gossip_messages(PUSH_MESSAGE_MAX_PAYLOAD_SIZE, messages) .map(move |payload| (peer_gossip, Protocol::PushMessage(self_id, payload))) .collect(); let packets = to_packets_with_destination(PacketsRecycler::default(), &reqs); let sock = UdpSocket::bind("0.0.0.0:0").unwrap(); - packet::send_to(&packets, &sock)?; + packet::send_to(&packets, &sock, socket_addr_space)?; Ok(()) } -pub fn stake_weight_peers( - peers: &mut Vec, - stakes: Option<&HashMap>, -) -> Vec<(u64, usize)> { - peers.dedup(); - ClusterInfo::sorted_stakes_with_index(peers, stakes) +// Filters out values from nodes with different shred-version. +fn filter_on_shred_version( + mut msg: Protocol, + self_shred_version: u16, + crds: &Crds, + stats: &GossipStats, +) -> Option { + let filter_values = |from: &Pubkey, values: &mut Vec, skipped_counter: &Counter| { + let num_values = values.len(); + if crds.get_shred_version(from) == Some(self_shred_version) { + // Retain values with the same shred-vesion, or those which are + // contact-info so that shred-versions can be updated. + values.retain(|value| match &value.data { + CrdsData::ContactInfo(_) => true, + _ => crds.get_shred_version(&value.pubkey()) == Some(self_shred_version), + }) + } else { + // Only allow node to update its own contact info in case their + // shred-version changes. + values.retain(|value| match &value.data { + CrdsData::ContactInfo(node) => node.id == *from, + _ => false, + }) + } + let num_skipped = num_values - values.len(); + if num_skipped != 0 { + skipped_counter.add_relaxed(num_skipped as u64); + } + }; + match &mut msg { + Protocol::PullRequest(_, caller) => match &caller.data { + // Allow spy nodes with shred-verion == 0 to pull from other nodes. + CrdsData::ContactInfo(node) + if node.shred_version == 0 || node.shred_version == self_shred_version => + { + Some(msg) + } + _ => { + stats.skip_pull_shred_version.add_relaxed(1); + None + } + }, + Protocol::PullResponse(from, values) => { + filter_values(from, values, &stats.skip_pull_response_shred_version); + if values.is_empty() { + None + } else { + Some(msg) + } + } + Protocol::PushMessage(from, values) => { + filter_values(from, values, &stats.skip_push_message_shred_version); + if values.is_empty() { + None + } else { + Some(msg) + } + } + Protocol::PruneMessage(_, _) | Protocol::PingMessage(_) | Protocol::PongMessage(_) => { + Some(msg) + } + } } #[cfg(test)] @@ -3068,20 +3018,30 @@ mod tests { fn test_gossip_node() { //check that a gossip nodes always show up as spies let (node, _, _) = ClusterInfo::spy_node(&solana_sdk::pubkey::new_rand(), 0); - assert!(ClusterInfo::is_spy_node(&node)); + assert!(ClusterInfo::is_spy_node( + &node, + &SocketAddrSpace::Unspecified + )); let (node, _, _) = ClusterInfo::gossip_node( &solana_sdk::pubkey::new_rand(), &"1.1.1.1:1111".parse().unwrap(), 0, ); - assert!(ClusterInfo::is_spy_node(&node)); + assert!(ClusterInfo::is_spy_node( + &node, + &SocketAddrSpace::Unspecified + )); } #[test] fn test_handle_pull() { solana_logger::setup(); let node = Node::new_localhost(); - let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair(node.info)); + let cluster_info = Arc::new(ClusterInfo::new( + node.info, + Arc::new(Keypair::new()), + SocketAddrSpace::Unspecified, + )); let entrypoint_pubkey = solana_sdk::pubkey::new_rand(); let data = test_crds_values(entrypoint_pubkey); @@ -3138,6 +3098,7 @@ mod tests { let cluster_info = ClusterInfo::new( ContactInfo::new_localhost(&this_node.pubkey(), timestamp()), this_node.clone(), + SocketAddrSpace::Unspecified, ); let remote_nodes: Vec<(Keypair, SocketAddr)> = repeat_with(|| new_rand_remote_node(&mut rng)) @@ -3192,6 +3153,7 @@ mod tests { let cluster_info = ClusterInfo::new( ContactInfo::new_localhost(&this_node.pubkey(), timestamp()), this_node.clone(), + SocketAddrSpace::Unspecified, ); let remote_nodes: Vec<(Keypair, SocketAddr)> = repeat_with(|| new_rand_remote_node(&mut rng)) @@ -3237,59 +3199,6 @@ mod tests { vec![entrypoint_crdsvalue] } - #[test] - fn test_filter_shred_version() { - let from = solana_sdk::pubkey::new_rand(); - let my_shred_version = 1; - let other_shred_version = 1; - - // Allow same shred_version - let mut values = test_crds_values(from); - ClusterInfo::filter_by_shred_version( - &from, - &mut values, - other_shred_version, - my_shred_version, - ); - assert_eq!(values.len(), 1); - - // Allow shred_version=0. - let other_shred_version = 0; - ClusterInfo::filter_by_shred_version( - &from, - &mut values, - other_shred_version, - my_shred_version, - ); - assert_eq!(values.len(), 1); - - // Change to sender's ContactInfo version, allow that. - let other_shred_version = 2; - ClusterInfo::filter_by_shred_version( - &from, - &mut values, - other_shred_version, - my_shred_version, - ); - assert_eq!(values.len(), 1); - - let snapshot_hash_data = CrdsValue::new_unsigned(CrdsData::SnapshotHashes(SnapshotHash { - from: solana_sdk::pubkey::new_rand(), - hashes: vec![], - wallclock: 0, - })); - values.push(snapshot_hash_data); - // Change to sender's ContactInfo version, allow that. - let other_shred_version = 2; - ClusterInfo::filter_by_shred_version( - &from, - &mut values, - other_shred_version, - my_shred_version, - ); - assert_eq!(values.len(), 1); - } - #[test] fn test_max_snapshot_hashes_with_push_messages() { let mut rng = rand::thread_rng(); @@ -3405,23 +3314,32 @@ mod tests { //check that gossip doesn't try to push to invalid addresses let node = Node::new_localhost(); let (spy, _, _) = ClusterInfo::spy_node(&solana_sdk::pubkey::new_rand(), 0); - let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair(node.info)); + let cluster_info = Arc::new(ClusterInfo::new( + node.info, + Arc::new(Keypair::new()), + SocketAddrSpace::Unspecified, + )); cluster_info.insert_info(spy); - cluster_info - .gossip - .write() - .unwrap() - .refresh_push_active_set(&HashMap::new(), None); + { + let mut gossip = cluster_info.gossip.write().unwrap(); + gossip.refresh_push_active_set( + &cluster_info.id(), + cluster_info.my_shred_version(), + &HashMap::new(), // stakes + None, // gossip validators + &SocketAddrSpace::Unspecified, + ); + } let reqs = cluster_info.generate_new_gossip_requests( &thread_pool, - None, // gossip_validators - &HashMap::new(), - true, // generate_pull_requests - false, // require_stake_for_gossip + None, // gossip_validators + &HashMap::new(), // stakes + true, // generate_pull_requests + false, // require_stake_for_gossip ); //assert none of the addrs are invalid. reqs.iter().all(|(addr, _)| { - let res = ContactInfo::is_valid_address(addr); + let res = ContactInfo::is_valid_address(addr, &SocketAddrSpace::Unspecified); assert!(res); res }); @@ -3430,14 +3348,19 @@ mod tests { #[test] fn test_cluster_info_new() { let d = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), timestamp()); - let cluster_info = ClusterInfo::new_with_invalid_keypair(d.clone()); + let cluster_info = ClusterInfo::new( + d.clone(), + Arc::new(Keypair::new()), + SocketAddrSpace::Unspecified, + ); assert_eq!(d.id, cluster_info.id()); } #[test] fn insert_info_test() { let d = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), timestamp()); - let cluster_info = ClusterInfo::new_with_invalid_keypair(d); + let cluster_info = + ClusterInfo::new(d, Arc::new(Keypair::new()), SocketAddrSpace::Unspecified); let d = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), timestamp()); let label = CrdsValueLabel::ContactInfo(d.id); cluster_info.insert_info(d); @@ -3516,18 +3439,27 @@ mod tests { let peer_keypair = Keypair::new(); let contact_info = ContactInfo::new_localhost(&keypair.pubkey(), 0); let peer = ContactInfo::new_localhost(&peer_keypair.pubkey(), 0); - let cluster_info = ClusterInfo::new(contact_info, Arc::new(keypair)); + let cluster_info = ClusterInfo::new( + contact_info, + Arc::new(keypair), + SocketAddrSpace::Unspecified, + ); cluster_info .ping_cache .lock() .unwrap() .mock_pong(peer.id, peer.gossip, Instant::now()); cluster_info.insert_info(peer); - cluster_info - .gossip - .write() - .unwrap() - .refresh_push_active_set(&HashMap::new(), None); + { + let mut gossip = cluster_info.gossip.write().unwrap(); + gossip.refresh_push_active_set( + &cluster_info.id(), + cluster_info.my_shred_version(), + &HashMap::new(), // stakes + None, // gossip validators + &SocketAddrSpace::Unspecified, + ); + } //check that all types of gossip messages are signed correctly let push_messages = cluster_info .gossip @@ -3548,12 +3480,14 @@ mod tests { .new_pull_request( &thread_pool, cluster_info.keypair.deref(), + cluster_info.my_shred_version(), timestamp(), None, &HashMap::new(), MAX_BLOOM_SIZE, &cluster_info.ping_cache, &mut pings, + &cluster_info.socket_addr_space, ) .ok() .unwrap(); @@ -3563,7 +3497,11 @@ mod tests { fn test_refresh_vote() { let keys = Keypair::new(); let contact_info = ContactInfo::new_localhost(&keys.pubkey(), 0); - let cluster_info = ClusterInfo::new_with_invalid_keypair(contact_info); + let cluster_info = ClusterInfo::new( + contact_info, + Arc::new(Keypair::new()), + SocketAddrSpace::Unspecified, + ); // Construct and push a vote for some other slot let unrefresh_slot = 5; @@ -3579,7 +3517,6 @@ mod tests { None, // payer ); cluster_info.push_vote(&unrefresh_tower, unrefresh_tx.clone()); - cluster_info.flush_push_queue(); let mut cursor = Cursor::default(); let (_, votes) = cluster_info.get_votes(&mut cursor); assert_eq!(votes, vec![unrefresh_tx.clone()]); @@ -3601,7 +3538,6 @@ mod tests { // Trying to refresh vote when it doesn't yet exist in gossip // shouldn't add the vote cluster_info.refresh_vote(refresh_tx.clone(), refresh_slot); - cluster_info.flush_push_queue(); let (_, votes) = cluster_info.get_votes(&mut cursor); assert_eq!(votes, vec![]); let (_, votes) = cluster_info.get_votes(&mut Cursor::default()); @@ -3610,7 +3546,6 @@ mod tests { // Push the new vote for `refresh_slot` cluster_info.push_vote(&refresh_tower, refresh_tx.clone()); - cluster_info.flush_push_queue(); // Should be two votes in gossip let (_, votes) = cluster_info.get_votes(&mut Cursor::default()); @@ -3636,8 +3571,6 @@ mod tests { ); cluster_info.refresh_vote(latest_refresh_tx.clone(), refresh_slot); } - cluster_info.flush_push_queue(); - // The diff since `max_ts` should only be the latest refreshed vote let (_, votes) = cluster_info.get_votes(&mut cursor); assert_eq!(votes.len(), 1); @@ -3655,7 +3588,11 @@ mod tests { let mut rng = rand::thread_rng(); let keys = Keypair::new(); let contact_info = ContactInfo::new_localhost(&keys.pubkey(), 0); - let cluster_info = ClusterInfo::new_with_invalid_keypair(contact_info); + let cluster_info = ClusterInfo::new( + contact_info, + Arc::new(Keypair::new()), + SocketAddrSpace::Unspecified, + ); // make sure empty crds is handled correctly let mut cursor = Cursor::default(); @@ -3678,7 +3615,6 @@ mod tests { ); let tower = vec![7]; // Last slot in the vote. cluster_info.push_vote(&tower, tx.clone()); - cluster_info.flush_push_queue(); let (labels, votes) = cluster_info.get_votes(&mut cursor); assert_eq!(votes, vec![tx]); @@ -3727,7 +3663,11 @@ mod tests { let mut rng = rand::thread_rng(); let keys = Keypair::new(); let contact_info = ContactInfo::new_localhost(&keys.pubkey(), 0); - let cluster_info = ClusterInfo::new_with_invalid_keypair(contact_info); + let cluster_info = ClusterInfo::new( + contact_info, + Arc::new(Keypair::new()), + SocketAddrSpace::Unspecified, + ); let mut tower = Vec::new(); for k in 0..MAX_LOCKOUT_HISTORY { let slot = k as Slot; @@ -3773,11 +3713,14 @@ mod tests { fn test_push_epoch_slots() { let keys = Keypair::new(); let contact_info = ContactInfo::new_localhost(&keys.pubkey(), 0); - let cluster_info = ClusterInfo::new_with_invalid_keypair(contact_info); + let cluster_info = ClusterInfo::new( + contact_info, + Arc::new(Keypair::new()), + SocketAddrSpace::Unspecified, + ); let slots = cluster_info.get_epoch_slots(&mut Cursor::default()); assert!(slots.is_empty()); cluster_info.push_epoch_slots(&[0]); - cluster_info.flush_push_queue(); let mut cursor = Cursor::default(); let slots = cluster_info.get_epoch_slots(&mut cursor); @@ -3785,6 +3728,43 @@ mod tests { let slots = cluster_info.get_epoch_slots(&mut cursor); assert!(slots.is_empty()); + + // Test with different shred versions. + let mut rng = rand::thread_rng(); + let node_pubkey = Pubkey::new_unique(); + let mut node = ContactInfo::new_rand(&mut rng, Some(node_pubkey)); + node.shred_version = 42; + let epoch_slots = EpochSlots::new_rand(&mut rng, Some(node_pubkey)); + let entries = vec![ + CrdsValue::new_unsigned(CrdsData::ContactInfo(node)), + CrdsValue::new_unsigned(CrdsData::EpochSlots(0, epoch_slots)), + ]; + { + let mut gossip = cluster_info.gossip.write().unwrap(); + for entry in entries { + assert!(gossip.crds.insert(entry, /*now=*/ 0).is_ok()); + } + } + // Should exclude other node's epoch-slot because of different + // shred-version. + let slots = cluster_info.get_epoch_slots(&mut Cursor::default()); + assert_eq!(slots.len(), 1); + assert_eq!(slots[0].from, cluster_info.id); + // Match shred versions. + { + let mut node = cluster_info.my_contact_info.write().unwrap(); + node.shred_version = 42; + } + cluster_info.push_self( + &HashMap::default(), // stakes + None, // gossip validators + ); + cluster_info.flush_push_queue(); + // Should now include both epoch slots. + let slots = cluster_info.get_epoch_slots(&mut Cursor::default()); + assert_eq!(slots.len(), 2); + assert_eq!(slots[0].from, cluster_info.id); + assert_eq!(slots[1].from, node_pubkey); } #[test] @@ -3794,6 +3774,7 @@ mod tests { let cluster_info = ClusterInfo::new( ContactInfo::new_localhost(&node_keypair.pubkey(), timestamp()), node_keypair, + SocketAddrSpace::Unspecified, ); let entrypoint_pubkey = solana_sdk::pubkey::new_rand(); let entrypoint = ContactInfo::new_localhost(&entrypoint_pubkey, timestamp()); @@ -3818,6 +3799,7 @@ mod tests { let timeouts = { let gossip = cluster_info.gossip.read().unwrap(); gossip.make_timeouts( + cluster_info.id(), &HashMap::default(), // stakes, Duration::from_millis(gossip.pull.crds_timeout), ) @@ -3946,7 +3928,11 @@ mod tests { #[test] fn test_tvu_peers_and_stakes() { let d = ContactInfo::new_localhost(&Pubkey::new(&[0; 32]), timestamp()); - let cluster_info = ClusterInfo::new_with_invalid_keypair(d.clone()); + let cluster_info = ClusterInfo::new( + d.clone(), + Arc::new(Keypair::new()), + SocketAddrSpace::Unspecified, + ); let mut stakes = HashMap::new(); // no stake @@ -3978,15 +3964,6 @@ mod tests { assert_ne!(contact_info.shred_version, d.shred_version); cluster_info.insert_info(contact_info); stakes.insert(id4, 10); - - let mut peers = cluster_info.tvu_peers(); - let peers_and_stakes = stake_weight_peers(&mut peers, Some(&stakes)); - assert_eq!(peers.len(), 2); - assert_eq!(peers[0].id, id); - assert_eq!(peers[1].id, id2); - assert_eq!(peers_and_stakes.len(), 2); - assert_eq!(peers_and_stakes[0].0, 10); - assert_eq!(peers_and_stakes[1].0, 1); } #[test] @@ -3996,6 +3973,7 @@ mod tests { let cluster_info = ClusterInfo::new( ContactInfo::new_localhost(&node_keypair.pubkey(), timestamp()), node_keypair, + SocketAddrSpace::Unspecified, ); let entrypoint_pubkey = solana_sdk::pubkey::new_rand(); let mut entrypoint = ContactInfo::new_localhost(&entrypoint_pubkey, timestamp()); @@ -4051,6 +4029,7 @@ mod tests { let cluster_info = ClusterInfo::new( ContactInfo::new_localhost(&node_keypair.pubkey(), timestamp()), node_keypair, + SocketAddrSpace::Unspecified, ); for i in 0..10 { // make these invalid for the upcoming repair request @@ -4126,6 +4105,7 @@ mod tests { let cluster_info = ClusterInfo::new( ContactInfo::new_localhost(&node_keypair.pubkey(), timestamp()), node_keypair, + SocketAddrSpace::Unspecified, ); let mut range: Vec = vec![]; //random should be hard to compress @@ -4134,9 +4114,7 @@ mod tests { range.push(last + rand::thread_rng().gen_range(1, 32)); } cluster_info.push_epoch_slots(&range[..16000]); - cluster_info.flush_push_queue(); cluster_info.push_epoch_slots(&range[16000..]); - cluster_info.flush_push_queue(); let slots = cluster_info.get_epoch_slots(&mut Cursor::default()); let slots: Vec<_> = slots.iter().flat_map(|x| x.to_slots(0)).collect(); assert_eq!(slots, range); @@ -4175,6 +4153,7 @@ mod tests { let cluster_info = Arc::new(ClusterInfo::new( ContactInfo::new_localhost(&node_keypair.pubkey(), timestamp()), node_keypair, + SocketAddrSpace::Unspecified, )); assert_eq!(cluster_info.my_shred_version(), 0); @@ -4258,6 +4237,7 @@ mod tests { contact_info }, node_keypair, + SocketAddrSpace::Unspecified, )); assert_eq!(cluster_info.my_shred_version(), 2); @@ -4429,7 +4409,11 @@ mod tests { #[ignore] // TODO: debug why this is flaky on buildkite! fn test_pull_request_time_pruning() { let node = Node::new_localhost(); - let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair(node.info)); + let cluster_info = Arc::new(ClusterInfo::new( + node.info, + Arc::new(Keypair::new()), + SocketAddrSpace::Unspecified, + )); let entrypoint_pubkey = solana_sdk::pubkey::new_rand(); let entrypoint = ContactInfo::new_localhost(&entrypoint_pubkey, timestamp()); cluster_info.set_entrypoint(entrypoint); diff --git a/gossip/src/cluster_info_metrics.rs b/gossip/src/cluster_info_metrics.rs index baac2f54384dda..b3c228ed6f8592 100644 --- a/gossip/src/cluster_info_metrics.rs +++ b/gossip/src/cluster_info_metrics.rs @@ -4,6 +4,7 @@ use { solana_sdk::pubkey::Pubkey, std::{ collections::HashMap, + ops::{Deref, DerefMut}, sync::{ atomic::{AtomicU64, Ordering}, RwLock, @@ -28,6 +29,12 @@ impl Counter { } } +pub(crate) struct TimedGuard<'a, T> { + guard: T, + timer: Measure, + counter: &'a Counter, +} + pub(crate) struct ScopedTimer<'a> { clock: Instant, metric: &'a AtomicU64, @@ -52,6 +59,35 @@ impl Drop for ScopedTimer<'_> { } } +impl<'a, T> TimedGuard<'a, T> { + pub(crate) fn new(guard: T, label: &'static str, counter: &'a Counter) -> Self { + Self { + guard, + timer: Measure::start(label), + counter, + } + } +} + +impl<'a, T> Deref for TimedGuard<'a, T> { + type Target = T; + fn deref(&self) -> &Self::Target { + &self.guard + } +} + +impl<'a, T> DerefMut for TimedGuard<'a, T> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.guard + } +} + +impl<'a, T> Drop for TimedGuard<'a, T> { + fn drop(&mut self) { + self.counter.add_measure(&mut self.timer); + } +} + #[derive(Default)] pub(crate) struct GossipStats { pub(crate) all_tvu_peers: Counter, @@ -115,9 +151,11 @@ pub(crate) struct GossipStats { pub(crate) skip_pull_response_shred_version: Counter, pub(crate) skip_pull_shred_version: Counter, pub(crate) skip_push_message_shred_version: Counter, + pub(crate) trim_crds_table: Counter, pub(crate) trim_crds_table_failed: Counter, pub(crate) trim_crds_table_purged_values_count: Counter, pub(crate) tvu_peers: Counter, + pub(crate) verify_gossip_packets_time: Counter, } pub(crate) fn submit_gossip_stats( @@ -171,6 +209,11 @@ pub(crate) fn submit_gossip_stats( stats.process_gossip_packets_time.clear(), i64 ), + ( + "verify_gossip_packets_time", + stats.verify_gossip_packets_time.clear(), + i64 + ), ( "handle_batch_ping_messages_time", stats.handle_batch_ping_messages_time.clear(), @@ -392,6 +435,7 @@ pub(crate) fn submit_gossip_stats( stats.require_stake_for_gossip_unknown_stakes.clear(), i64 ), + ("trim_crds_table", stats.trim_crds_table.clear(), i64), ( "trim_crds_table_failed", stats.trim_crds_table_failed.clear(), diff --git a/gossip/src/contact_info.rs b/gossip/src/contact_info.rs index 6471c30dac00d6..30ec785ef4cc71 100644 --- a/gossip/src/contact_info.rs +++ b/gossip/src/contact_info.rs @@ -7,6 +7,7 @@ use { signature::{Keypair, Signer}, timing::timestamp, }, + solana_streamer::socket::SocketAddrSpace, std::net::{IpAddr, SocketAddr}, }; @@ -105,7 +106,7 @@ impl ContactInfo { } /// New random ContactInfo for tests and simulations. - pub(crate) fn new_rand(rng: &mut R, pubkey: Option) -> Self { + pub fn new_rand(rng: &mut R, pubkey: Option) -> Self { let delay = 10 * 60 * 1000; // 10 minutes let now = timestamp() - delay + rng.gen_range(0, 2 * delay); let pubkey = pubkey.unwrap_or_else(solana_sdk::pubkey::new_rand); @@ -143,14 +144,14 @@ impl ContactInfo { } let tpu = *bind_addr; - let gossip = next_port(&bind_addr, 1); - let tvu = next_port(&bind_addr, 2); - let tpu_forwards = next_port(&bind_addr, 3); - let tvu_forwards = next_port(&bind_addr, 4); - let repair = next_port(&bind_addr, 5); + let gossip = next_port(bind_addr, 1); + let tvu = next_port(bind_addr, 2); + let tpu_forwards = next_port(bind_addr, 3); + let tvu_forwards = next_port(bind_addr, 4); + let repair = next_port(bind_addr, 5); let rpc = SocketAddr::new(bind_addr.ip(), rpc_port::DEFAULT_RPC_PORT); let rpc_pubsub = SocketAddr::new(bind_addr.ip(), rpc_port::DEFAULT_RPC_PUBSUB_PORT); - let serve_repair = next_port(&bind_addr, 6); + let serve_repair = next_port(bind_addr, 6); Self { id: *pubkey, gossip, @@ -193,16 +194,29 @@ impl ContactInfo { /// port must not be 0 /// ip must be specified and not multicast /// loopback ip is only allowed in tests - pub fn is_valid_address(addr: &SocketAddr) -> bool { + // Keeping this for now not to break tvu-peers and turbine shuffle order of + // nodes when arranging nodes on retransmit tree. Private IP addresses in + // turbine are filtered out just before sending packets. + pub(crate) fn is_valid_tvu_address(addr: &SocketAddr) -> bool { (addr.port() != 0) && Self::is_valid_ip(addr.ip()) } + // TODO: Replace this entirely with streamer SocketAddrSpace. + pub fn is_valid_address(addr: &SocketAddr, socket_addr_space: &SocketAddrSpace) -> bool { + Self::is_valid_tvu_address(addr) && socket_addr_space.check(addr) + } + pub fn client_facing_addr(&self) -> (SocketAddr, SocketAddr) { (self.rpc, self.tpu) } - pub fn valid_client_facing_addr(&self) -> Option<(SocketAddr, SocketAddr)> { - if ContactInfo::is_valid_address(&self.rpc) && ContactInfo::is_valid_address(&self.tpu) { + pub fn valid_client_facing_addr( + &self, + socket_addr_space: &SocketAddrSpace, + ) -> Option<(SocketAddr, SocketAddr)> { + if ContactInfo::is_valid_address(&self.rpc, socket_addr_space) + && ContactInfo::is_valid_address(&self.tpu, socket_addr_space) + { Some((self.rpc, self.tpu)) } else { None @@ -217,13 +231,25 @@ mod tests { #[test] fn test_is_valid_address() { let bad_address_port = socketaddr!("127.0.0.1:0"); - assert!(!ContactInfo::is_valid_address(&bad_address_port)); + assert!(!ContactInfo::is_valid_address( + &bad_address_port, + &SocketAddrSpace::Unspecified + )); let bad_address_unspecified = socketaddr!(0, 1234); - assert!(!ContactInfo::is_valid_address(&bad_address_unspecified)); + assert!(!ContactInfo::is_valid_address( + &bad_address_unspecified, + &SocketAddrSpace::Unspecified + )); let bad_address_multicast = socketaddr!([224, 254, 0, 0], 1234); - assert!(!ContactInfo::is_valid_address(&bad_address_multicast)); + assert!(!ContactInfo::is_valid_address( + &bad_address_multicast, + &SocketAddrSpace::Unspecified + )); let loopback = socketaddr!("127.0.0.1:1234"); - assert!(ContactInfo::is_valid_address(&loopback)); + assert!(ContactInfo::is_valid_address( + &loopback, + &SocketAddrSpace::Unspecified + )); // assert!(!ContactInfo::is_valid_ip_internal(loopback.ip(), false)); } @@ -306,11 +332,19 @@ mod tests { #[test] fn test_valid_client_facing() { let mut ci = ContactInfo::default(); - assert_eq!(ci.valid_client_facing_addr(), None); + assert_eq!( + ci.valid_client_facing_addr(&SocketAddrSpace::Unspecified), + None + ); ci.tpu = socketaddr!("127.0.0.1:123"); - assert_eq!(ci.valid_client_facing_addr(), None); + assert_eq!( + ci.valid_client_facing_addr(&SocketAddrSpace::Unspecified), + None + ); ci.rpc = socketaddr!("127.0.0.1:234"); - assert!(ci.valid_client_facing_addr().is_some()); + assert!(ci + .valid_client_facing_addr(&SocketAddrSpace::Unspecified) + .is_some()); } #[test] diff --git a/gossip/src/crds.rs b/gossip/src/crds.rs index bd197647aa3bc6..fcba2230c681e7 100644 --- a/gossip/src/crds.rs +++ b/gossip/src/crds.rs @@ -35,6 +35,7 @@ use { map::{rayon::ParValues, Entry, IndexMap}, set::IndexSet, }, + matches::debug_assert_matches, rayon::{prelude::*, ThreadPool}, solana_sdk::{ hash::{hash, Hash}, @@ -66,6 +67,8 @@ pub struct Crds { entries: BTreeMap, // Hash of recently purged values. purged: VecDeque<(Hash, u64 /*timestamp*/)>, + // Mapping from nodes' pubkeys to their respective shred-version. + shred_versions: HashMap, } #[derive(PartialEq, Debug)] @@ -125,6 +128,7 @@ impl Default for Crds { records: HashMap::default(), entries: BTreeMap::default(), purged: VecDeque::default(), + shred_versions: HashMap::default(), } } } @@ -173,9 +177,10 @@ impl Crds { Entry::Vacant(entry) => { let entry_index = entry.index(); self.shards.insert(entry_index, &value); - match value.value.data { - CrdsData::ContactInfo(_) => { + match &value.value.data { + CrdsData::ContactInfo(node) => { self.nodes.insert(entry_index); + self.shred_versions.insert(pubkey, node.shred_version); } CrdsData::Vote(_, _) => { self.votes.insert(value.ordinal, entry_index); @@ -195,7 +200,13 @@ impl Crds { let entry_index = entry.index(); self.shards.remove(entry_index, entry.get()); self.shards.insert(entry_index, &value); - match value.value.data { + match &value.value.data { + CrdsData::ContactInfo(node) => { + self.shred_versions.insert(pubkey, node.shred_version); + // self.nodes does not need to be updated since the + // entry at this index was and stays contact-info. + debug_assert_matches!(entry.get().value.data, CrdsData::ContactInfo(_)); + } CrdsData::Vote(_, _) => { self.votes.remove(&entry.get().ordinal); self.votes.insert(value.ordinal, entry_index); @@ -239,6 +250,10 @@ impl Crds { self.table.get(&label)?.value.contact_info() } + pub(crate) fn get_shred_version(&self, pubkey: &Pubkey) -> Option { + self.shred_versions.get(pubkey).copied() + } + pub fn get_lowest_slot(&self, pubkey: Pubkey) -> Option<&LowestSlot> { let lable = CrdsValueLabel::LowestSlot(pubkey); self.table.get(&lable)?.value.lowest_slot() @@ -391,22 +406,20 @@ impl Crds { // returns crds labels of old values to be evicted. let evict = |pubkey, index: &IndexSet| { let timeout = timeouts.get(pubkey).copied().unwrap_or(default_timeout); - let local_timestamp = { - let origin = CrdsValueLabel::ContactInfo(*pubkey); - match self.table.get(&origin) { - Some(origin) => origin.local_timestamp, - None => 0, + // If the origin's contact-info hasn't expired yet then preserve + // all associated values. + let origin = CrdsValueLabel::ContactInfo(*pubkey); + if let Some(origin) = self.table.get(&origin) { + if now < origin.local_timestamp.saturating_add(timeout) { + return vec![]; } - }; + } + // Otherwise check each value's timestamp individually. index .into_iter() .filter_map(|ix| { let (label, value) = self.table.get_index(*ix).unwrap(); - let expiry_timestamp = value - .local_timestamp - .max(local_timestamp) - .saturating_add(timeout); - if expiry_timestamp <= now { + if value.local_timestamp.saturating_add(timeout) <= now { Some(label.clone()) } else { None @@ -451,6 +464,7 @@ impl Crds { records_entry.get_mut().swap_remove(&index); if records_entry.get().is_empty() { records_entry.remove(); + self.shred_versions.remove(&pubkey); } // If index == self.table.len(), then the removed entry was the last // entry in the table, in which case no other keys were modified. @@ -519,7 +533,7 @@ impl Crds { stakes: &HashMap, now: u64, ) -> Result { - if stakes.is_empty() { + if stakes.values().all(|&stake| stake == 0) { return Err(CrdsError::UnknownStakes); } let mut keys: Vec<_> = self @@ -546,17 +560,20 @@ impl Crds { } #[cfg(test)] -mod test { +mod tests { use { super::*, crate::{ contact_info::ContactInfo, - crds_value::{new_rand_timestamp, NodeInstance}, + crds_value::{new_rand_timestamp, NodeInstance, SnapshotHash}, }, rand::{thread_rng, Rng, SeedableRng}, rand_chacha::ChaChaRng, rayon::ThreadPoolBuilder, - solana_sdk::signature::{Keypair, Signer}, + solana_sdk::{ + signature::{Keypair, Signer}, + timing::timestamp, + }, std::{collections::HashSet, iter::repeat_with}, }; @@ -1019,6 +1036,53 @@ mod test { assert!(crds.records.is_empty()); } + #[test] + fn test_get_shred_version() { + let mut rng = rand::thread_rng(); + let pubkey = Pubkey::new_unique(); + let mut crds = Crds::default(); + assert_eq!(crds.get_shred_version(&pubkey), None); + // Initial insertion of a node with shred version: + let mut node = ContactInfo::new_rand(&mut rng, Some(pubkey)); + let wallclock = node.wallclock; + node.shred_version = 42; + let node = CrdsData::ContactInfo(node); + let node = CrdsValue::new_unsigned(node); + assert_eq!(crds.insert(node, timestamp()), Ok(())); + assert_eq!(crds.get_shred_version(&pubkey), Some(42)); + // An outdated value should not update shred-version: + let mut node = ContactInfo::new_rand(&mut rng, Some(pubkey)); + node.wallclock = wallclock - 1; // outdated. + node.shred_version = 8; + let node = CrdsData::ContactInfo(node); + let node = CrdsValue::new_unsigned(node); + assert_eq!(crds.insert(node, timestamp()), Err(CrdsError::InsertFailed)); + assert_eq!(crds.get_shred_version(&pubkey), Some(42)); + // Update shred version: + let mut node = ContactInfo::new_rand(&mut rng, Some(pubkey)); + node.wallclock = wallclock + 1; // so that it overrides the prev one. + node.shred_version = 8; + let node = CrdsData::ContactInfo(node); + let node = CrdsValue::new_unsigned(node); + assert_eq!(crds.insert(node, timestamp()), Ok(())); + assert_eq!(crds.get_shred_version(&pubkey), Some(8)); + // Add other crds values with the same pubkey. + let val = SnapshotHash::new_rand(&mut rng, Some(pubkey)); + let val = CrdsData::SnapshotHashes(val); + let val = CrdsValue::new_unsigned(val); + assert_eq!(crds.insert(val, timestamp()), Ok(())); + assert_eq!(crds.get_shred_version(&pubkey), Some(8)); + // Remove contact-info. Shred version should stay there since there + // are still values associated with the pubkey. + crds.remove(&CrdsValueLabel::ContactInfo(pubkey), timestamp()); + assert_eq!(crds.get_contact_info(pubkey), None); + assert_eq!(crds.get_shred_version(&pubkey), Some(8)); + // Remove the remaining entry with the same pubkey. + crds.remove(&CrdsValueLabel::SnapshotHashes(pubkey), timestamp()); + assert_eq!(crds.get_records(&pubkey).count(), 0); + assert_eq!(crds.get_shred_version(&pubkey), None); + } + #[test] #[allow(clippy::needless_collect)] fn test_drop() { diff --git a/gossip/src/crds_gossip.rs b/gossip/src/crds_gossip.rs index 18c7991f3bbeff..f45125e6445dd7 100644 --- a/gossip/src/crds_gossip.rs +++ b/gossip/src/crds_gossip.rs @@ -24,6 +24,7 @@ use { signature::{Keypair, Signer}, timing::timestamp, }, + solana_streamer::socket::SocketAddrSpace, std::{ collections::{HashMap, HashSet}, net::SocketAddr, @@ -32,45 +33,25 @@ use { }, }; +#[derive(Default)] pub struct CrdsGossip { pub crds: Crds, - pub id: Pubkey, - pub shred_version: u16, pub push: CrdsGossipPush, pub pull: CrdsGossipPull, } -impl Default for CrdsGossip { - fn default() -> Self { - CrdsGossip { - crds: Crds::default(), - id: Pubkey::default(), - shred_version: 0, - push: CrdsGossipPush::default(), - pull: CrdsGossipPull::default(), - } - } -} - impl CrdsGossip { - pub fn set_self(&mut self, id: &Pubkey) { - self.id = *id; - } - pub fn set_shred_version(&mut self, shred_version: u16) { - self.shred_version = shred_version; - } - /// process a push message to the network - /// Returns origins' pubkeys of upserted values. + /// Returns unique origins' pubkeys of upserted values. pub fn process_push_message( &mut self, from: &Pubkey, values: Vec, now: u64, - ) -> Vec { + ) -> HashSet { values .into_iter() - .flat_map(|val| { + .filter_map(|val| { let origin = val.pubkey(); self.push .process_push_message(&mut self.crds, from, val, now) @@ -83,18 +64,18 @@ impl CrdsGossip { /// remove redundant paths in the network pub fn prune_received_cache( &mut self, + self_pubkey: &Pubkey, origins: I, // Unique pubkeys of crds values' owners. stakes: &HashMap, ) -> HashMap> where I: IntoIterator, { - let self_pubkey = self.id; origins .into_iter() .flat_map(|origin| { self.push - .prune_received_cache(&self_pubkey, &origin, stakes) + .prune_received_cache(self_pubkey, &origin, stakes) .into_iter() .zip(std::iter::repeat(origin)) }) @@ -106,8 +87,9 @@ impl CrdsGossip { pending_push_messages: Vec, now: u64, ) -> HashMap> { - let self_pubkey = self.id; - self.process_push_message(&self_pubkey, pending_push_messages, now); + for entry in pending_push_messages { + let _ = self.crds.insert(entry, now); + } self.push.new_push_messages(&self.crds, now) } @@ -161,21 +143,24 @@ impl CrdsGossip { } else { offset }; - let entries = chunks - .enumerate() - .map(|(k, chunk)| { - let index = (offset + k as DuplicateShredIndex) % MAX_DUPLICATE_SHREDS; - let data = CrdsData::DuplicateShred(index, chunk); - CrdsValue::new_signed(data, keypair) - }) - .collect(); - self.process_push_message(&pubkey, entries, timestamp()); + let entries = chunks.enumerate().map(|(k, chunk)| { + let index = (offset + k as DuplicateShredIndex) % MAX_DUPLICATE_SHREDS; + let data = CrdsData::DuplicateShred(index, chunk); + CrdsValue::new_signed(data, keypair) + }); + let now = timestamp(); + for entry in entries { + if let Err(err) = self.crds.insert(entry, now) { + error!("push_duplicate_shred faild: {:?}", err); + } + } Ok(()) } /// add the `from` to the peer's filter of nodes pub fn process_prune_msg( &self, + self_pubkey: &Pubkey, peer: &Pubkey, destination: &Pubkey, origin: &[Pubkey], @@ -186,8 +171,8 @@ impl CrdsGossip { if expired { return Err(CrdsGossipError::PruneMessageTimeout); } - if self.id == *destination { - self.push.process_prune_msg(&self.id, peer, origin); + if self_pubkey == destination { + self.push.process_prune_msg(self_pubkey, peer, origin); Ok(()) } else { Err(CrdsGossipError::BadPruneDestination) @@ -198,43 +183,51 @@ impl CrdsGossip { /// * ratio - number of actives to rotate pub fn refresh_push_active_set( &mut self, + self_pubkey: &Pubkey, + self_shred_version: u16, stakes: &HashMap, gossip_validators: Option<&HashSet>, + socket_addr_space: &SocketAddrSpace, ) { self.push.refresh_push_active_set( &self.crds, stakes, gossip_validators, - &self.id, - self.shred_version, + self_pubkey, + self_shred_version, self.crds.num_nodes(), CRDS_GOSSIP_NUM_ACTIVE, + socket_addr_space, ) } /// generate a random request + #[allow(clippy::too_many_arguments)] pub fn new_pull_request( &self, thread_pool: &ThreadPool, self_keypair: &Keypair, + self_shred_version: u16, now: u64, gossip_validators: Option<&HashSet>, stakes: &HashMap, bloom_size: usize, ping_cache: &Mutex, pings: &mut Vec<(SocketAddr, Ping)>, + socket_addr_space: &SocketAddrSpace, ) -> Result<(ContactInfo, Vec), CrdsGossipError> { self.pull.new_pull_request( thread_pool, &self.crds, self_keypair, - self.shred_version, + self_shred_version, now, gossip_validators, stakes, bloom_size, ping_cache, pings, + socket_addr_space, ) } @@ -250,18 +243,23 @@ impl CrdsGossip { where I: IntoIterator, { - self.pull - .process_pull_requests(&mut self.crds, callers, now); + CrdsGossipPull::process_pull_requests(&mut self.crds, callers, now); } pub fn generate_pull_responses( &self, + thread_pool: &ThreadPool, filters: &[(CrdsValue, CrdsFilter)], output_size_limit: usize, // Limit number of crds values returned. now: u64, ) -> Vec> { - self.pull - .generate_pull_responses(&self.crds, filters, output_size_limit, now) + CrdsGossipPull::generate_pull_responses( + thread_pool, + &self.crds, + filters, + output_size_limit, + now, + ) } pub fn filter_pull_responses( @@ -302,14 +300,16 @@ impl CrdsGossip { pub fn make_timeouts( &self, + self_pubkey: Pubkey, stakes: &HashMap, epoch_duration: Duration, ) -> HashMap { - self.pull.make_timeouts(self.id, stakes, epoch_duration) + self.pull.make_timeouts(self_pubkey, stakes, epoch_duration) } pub fn purge( &mut self, + self_pubkey: &Pubkey, thread_pool: &ThreadPool, now: u64, timeouts: &HashMap, @@ -321,11 +321,9 @@ impl CrdsGossip { } if now > self.pull.crds_timeout { //sanity check - assert_eq!(timeouts[&self.id], std::u64::MAX); + assert_eq!(timeouts[self_pubkey], std::u64::MAX); assert!(timeouts.contains_key(&Pubkey::default())); - rv = self - .pull - .purge_active(thread_pool, &mut self.crds, now, &timeouts); + rv = CrdsGossipPull::purge_active(thread_pool, &mut self.crds, now, timeouts); } self.crds .trim_purged(now.saturating_sub(5 * self.pull.crds_timeout)); @@ -339,7 +337,6 @@ impl CrdsGossip { crds: self.crds.clone(), push: self.push.mock_clone(), pull: self.pull.mock_clone(), - ..*self } } } @@ -371,11 +368,8 @@ mod test { #[test] fn test_prune_errors() { - let mut crds_gossip = CrdsGossip { - id: Pubkey::new(&[0; 32]), - ..CrdsGossip::default() - }; - let id = crds_gossip.id; + let mut crds_gossip = CrdsGossip::default(); + let id = Pubkey::new(&[0; 32]); let ci = ContactInfo::new_localhost(&Pubkey::new(&[1; 32]), 0); let prune_pubkey = Pubkey::new(&[2; 32]); crds_gossip @@ -385,10 +379,17 @@ mod test { 0, ) .unwrap(); - crds_gossip.refresh_push_active_set(&HashMap::new(), None); + crds_gossip.refresh_push_active_set( + &id, + 0, // shred version + &HashMap::new(), // stakes + None, // gossip validators + &SocketAddrSpace::Unspecified, + ); let now = timestamp(); //incorrect dest let mut res = crds_gossip.process_prune_msg( + &id, &ci.id, &Pubkey::new(hash(&[1; 32]).as_ref()), &[prune_pubkey], @@ -397,11 +398,25 @@ mod test { ); assert_eq!(res.err(), Some(CrdsGossipError::BadPruneDestination)); //correct dest - res = crds_gossip.process_prune_msg(&ci.id, &id, &[prune_pubkey], now, now); + res = crds_gossip.process_prune_msg( + &id, // self_pubkey + &ci.id, // peer + &id, // destination + &[prune_pubkey], // origins + now, + now, + ); res.unwrap(); //test timeout let timeout = now + crds_gossip.push.prune_timeout * 2; - res = crds_gossip.process_prune_msg(&ci.id, &id, &[prune_pubkey], now, timeout); + res = crds_gossip.process_prune_msg( + &id, // self_pubkey + &ci.id, // peer + &id, // destination + &[prune_pubkey], // origins + now, + timeout, + ); assert_eq!(res.err(), Some(CrdsGossipError::PruneMessageTimeout)); } } diff --git a/gossip/src/crds_gossip_pull.rs b/gossip/src/crds_gossip_pull.rs index d01e8887d2224e..3b84fe6c61a991 100644 --- a/gossip/src/crds_gossip_pull.rs +++ b/gossip/src/crds_gossip_pull.rs @@ -13,18 +13,15 @@ use { crate::{ cluster_info::{Ping, CRDS_UNIQUE_PUBKEY_CAPACITY}, contact_info::ContactInfo, - crds::Crds, + crds::{Crds, VersionedCrdsValue}, crds_gossip::{get_stake, get_weight}, crds_gossip_error::CrdsGossipError, crds_value::CrdsValue, ping_pong::PingCache, + weighted_shuffle::WeightedShuffle, }, - itertools::Itertools, lru::LruCache, - rand::{ - distributions::{Distribution, WeightedIndex}, - Rng, - }, + rand::Rng, rayon::{prelude::*, ThreadPool}, solana_runtime::bloom::{AtomicBloom, Bloom}, solana_sdk::{ @@ -32,12 +29,16 @@ use { pubkey::Pubkey, signature::{Keypair, Signer}, }, + solana_streamer::socket::SocketAddrSpace, std::{ collections::{HashMap, HashSet, VecDeque}, convert::TryInto, iter::repeat_with, net::SocketAddr, - sync::Mutex, + sync::{ + atomic::{AtomicI64, AtomicUsize, Ordering}, + Mutex, + }, time::{Duration, Instant}, }, }; @@ -77,7 +78,8 @@ impl solana_sdk::sanitize::Sanitize for CrdsFilter { } impl CrdsFilter { - pub fn new_rand(num_items: usize, max_bytes: usize) -> Self { + #[cfg(test)] + pub(crate) fn new_rand(num_items: usize, max_bytes: usize) -> Self { let max_bits = (max_bytes * 8) as f64; let max_items = Self::max_items(max_bits, FALSE_RATE, KEYS); let mask_bits = Self::mask_bits(num_items as f64, max_items as f64); @@ -210,7 +212,7 @@ impl Default for CrdsGossipPull { impl CrdsGossipPull { /// generate a random request #[allow(clippy::too_many_arguments)] - pub fn new_pull_request( + pub(crate) fn new_pull_request( &self, thread_pool: &ThreadPool, crds: &Crds, @@ -222,6 +224,7 @@ impl CrdsGossipPull { bloom_size: usize, ping_cache: &Mutex, pings: &mut Vec<(SocketAddr, Ping)>, + socket_addr_space: &SocketAddrSpace, ) -> Result<(ContactInfo, Vec), CrdsGossipError> { let (weights, peers): (Vec<_>, Vec<_>) = self .pull_options( @@ -231,19 +234,17 @@ impl CrdsGossipPull { now, gossip_validators, stakes, + socket_addr_space, ) .into_iter() .unzip(); if peers.is_empty() { return Err(CrdsGossipError::NoPeers); } - let mut peers = { - let mut rng = rand::thread_rng(); - let num_samples = peers.len() * 2; - let index = WeightedIndex::new(weights).unwrap(); - let sample_peer = move || peers[index.sample(&mut rng)]; - repeat_with(sample_peer).take(num_samples) - }; + let mut rng = rand::thread_rng(); + let mut peers = WeightedShuffle::new(&mut rng, &weights) + .unwrap() + .map(|i| peers[i]); let peer = { let mut rng = rand::thread_rng(); let mut ping_cache = ping_cache.lock().unwrap(); @@ -275,7 +276,8 @@ impl CrdsGossipPull { now: u64, gossip_validators: Option<&HashSet>, stakes: &HashMap, - ) -> Vec<(f32, &'a ContactInfo)> { + socket_addr_space: &SocketAddrSpace, + ) -> Vec<(u64, &'a ContactInfo)> { let mut rng = rand::thread_rng(); let active_cutoff = now.saturating_sub(PULL_ACTIVE_TIMEOUT_MS); crds.get_nodes() @@ -294,7 +296,7 @@ impl CrdsGossipPull { }) .filter(|v| { v.id != *self_id - && ContactInfo::is_valid_address(&v.gossip) + && ContactInfo::is_valid_address(&v.gossip, socket_addr_space) && (self_shred_version == 0 || self_shred_version == v.shred_version) && gossip_validators .map_or(true, |gossip_validators| gossip_validators.contains(&v.id)) @@ -309,7 +311,9 @@ impl CrdsGossipPull { let since = (now.saturating_sub(req_time).min(3600 * 1000) / 1024) as u32; let stake = get_stake(&item.id, stakes); let weight = get_weight(max_weight, since, stake); - (weight, item) + // Weights are bounded by max_weight defined above. + // So this type-cast should be safe. + ((weight * 100.0) as u64, item) }) .collect() } @@ -318,12 +322,12 @@ impl CrdsGossipPull { /// This is used for weighted random selection during `new_pull_request` /// It's important to use the local nodes request creation time as the weight /// instead of the response received time otherwise failed nodes will increase their weight. - pub fn mark_pull_request_creation_time(&mut self, from: Pubkey, now: u64) { + pub(crate) fn mark_pull_request_creation_time(&mut self, from: Pubkey, now: u64) { self.pull_request_time.put(from, now); } /// process a pull request - pub fn process_pull_requests(&mut self, crds: &mut Crds, callers: I, now: u64) + pub(crate) fn process_pull_requests(crds: &mut Crds, callers: I, now: u64) where I: IntoIterator, { @@ -335,14 +339,14 @@ impl CrdsGossipPull { } /// Create gossip responses to pull requests - pub fn generate_pull_responses( - &self, + pub(crate) fn generate_pull_responses( + thread_pool: &ThreadPool, crds: &Crds, requests: &[(CrdsValue, CrdsFilter)], output_size_limit: usize, // Limit number of crds values returned. now: u64, ) -> Vec> { - self.filter_crds_values(crds, requests, output_size_limit, now) + Self::filter_crds_values(thread_pool, crds, requests, output_size_limit, now) } // Checks if responses should be inserted and @@ -351,7 +355,7 @@ impl CrdsGossipPull { // .0 => responses that update the owner timestamp // .1 => responses that do not update the owner timestamp // .2 => hash value of outdated values which will fail to insert. - pub fn filter_pull_responses( + pub(crate) fn filter_pull_responses( &self, crds: &Crds, timeouts: &HashMap, @@ -397,7 +401,7 @@ impl CrdsGossipPull { } /// process a vec of pull responses - pub fn process_pull_responses( + pub(crate) fn process_pull_responses( &mut self, crds: &mut Crds, from: &Pubkey, @@ -429,7 +433,7 @@ impl CrdsGossipPull { .extend(failed_inserts.into_iter().zip(std::iter::repeat(now))); } - pub fn purge_failed_inserts(&mut self, now: u64) { + pub(crate) fn purge_failed_inserts(&mut self, now: u64) { if FAILED_INSERTS_RETENTION_MS < now { let cutoff = now - FAILED_INSERTS_RETENTION_MS; let outdated = self @@ -475,10 +479,10 @@ impl CrdsGossipPull { /// filter values that fail the bloom filter up to max_bytes fn filter_crds_values( - &self, + thread_pool: &ThreadPool, crds: &Crds, filters: &[(CrdsValue, CrdsFilter)], - mut output_size_limit: usize, // Limit number of crds values returned. + output_size_limit: usize, // Limit number of crds values returned. now: u64, ) -> Vec> { let msg_timeout = CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS; @@ -486,46 +490,56 @@ impl CrdsGossipPull { //skip filters from callers that are too old let caller_wallclock_window = now.saturating_sub(msg_timeout)..now.saturating_add(msg_timeout); - let mut dropped_requests = 0; - let mut total_skipped = 0; - let ret: Vec<_> = filters - .iter() - .map(|(caller, filter)| { - if output_size_limit == 0 { - return None; - } - let caller_wallclock = caller.wallclock(); - if !caller_wallclock_window.contains(&caller_wallclock) { - dropped_requests += 1; - return Some(vec![]); + let dropped_requests = AtomicUsize::default(); + let total_skipped = AtomicUsize::default(); + let output_size_limit = output_size_limit.try_into().unwrap_or(i64::MAX); + let output_size_limit = AtomicI64::new(output_size_limit); + let apply_filter = |caller: &CrdsValue, filter: &CrdsFilter| { + if output_size_limit.load(Ordering::Relaxed) <= 0 { + return Vec::default(); + } + let caller_wallclock = caller.wallclock(); + if !caller_wallclock_window.contains(&caller_wallclock) { + dropped_requests.fetch_add(1, Ordering::Relaxed); + return Vec::default(); + } + let caller_pubkey = caller.pubkey(); + let caller_wallclock = caller_wallclock.checked_add(jitter).unwrap_or(0); + let pred = |entry: &&VersionedCrdsValue| { + debug_assert!(filter.test_mask(&entry.value_hash)); + // Skip values that are too new. + if entry.value.wallclock() > caller_wallclock { + total_skipped.fetch_add(1, Ordering::Relaxed); + false + } else { + !filter.filter_contains(&entry.value_hash) + && (entry.value.pubkey() != caller_pubkey + || entry.value.should_force_push(&caller_pubkey)) } - let caller_wallclock = caller_wallclock.checked_add(jitter).unwrap_or(0); - let out: Vec<_> = crds - .filter_bitmask(filter.mask, filter.mask_bits) - .filter_map(|item| { - debug_assert!(filter.test_mask(&item.value_hash)); - //skip values that are too new - if item.value.wallclock() > caller_wallclock { - total_skipped += 1; - None - } else if filter.filter_contains(&item.value_hash) { - None - } else { - Some(item.value.clone()) - } - }) - .take(output_size_limit) - .collect(); - output_size_limit -= out.len(); - Some(out) - }) - .while_some() - .collect(); + }; + let out: Vec<_> = crds + .filter_bitmask(filter.mask, filter.mask_bits) + .filter(pred) + .map(|entry| entry.value.clone()) + .take(output_size_limit.load(Ordering::Relaxed).max(0) as usize) + .collect(); + output_size_limit.fetch_sub(out.len() as i64, Ordering::Relaxed); + out + }; + let ret: Vec<_> = thread_pool.install(|| { + filters + .par_iter() + .map(|(caller, filter)| apply_filter(caller, filter)) + .collect() + }); inc_new_counter_info!( "gossip_filter_crds_values-dropped_requests", - dropped_requests + filters.len() - ret.len() + dropped_requests.into_inner() + ); + inc_new_counter_info!( + "gossip_filter_crds_values-dropped_values", + total_skipped.into_inner() ); - inc_new_counter_info!("gossip_filter_crds_values-dropped_values", total_skipped); ret } @@ -553,8 +567,7 @@ impl CrdsGossipPull { } /// Purge values from the crds that are older then `active_timeout` - pub fn purge_active( - &mut self, + pub(crate) fn purge_active( thread_pool: &ThreadPool, crds: &mut Crds, now: u64, @@ -569,7 +582,7 @@ impl CrdsGossipPull { /// For legacy tests #[cfg(test)] - pub fn process_pull_response( + fn process_pull_response( &mut self, crds: &mut Crds, from: &Pubkey, @@ -689,7 +702,15 @@ pub(crate) mod tests { stakes.insert(id, i * 100); } let now = 1024; - let mut options = node.pull_options(&crds, &me.label().pubkey(), 0, now, None, &stakes); + let mut options = node.pull_options( + &crds, + &me.label().pubkey(), + 0, + now, + None, + &stakes, + &SocketAddrSpace::Unspecified, + ); assert!(!options.is_empty()); options.sort_by(|(weight_l, _), (weight_r, _)| weight_r.partial_cmp(weight_l).unwrap()); // check that the highest stake holder is also the heaviest weighted. @@ -739,7 +760,15 @@ pub(crate) mod tests { // shred version 123 should ignore nodes with versions 0 and 456 let options = node - .pull_options(&crds, &me.label().pubkey(), 123, 0, None, &stakes) + .pull_options( + &crds, + &me.label().pubkey(), + 123, + 0, + None, + &stakes, + &SocketAddrSpace::Unspecified, + ) .iter() .map(|(_, c)| c.id) .collect::>(); @@ -749,7 +778,15 @@ pub(crate) mod tests { // spy nodes will see all let options = node - .pull_options(&crds, &spy.label().pubkey(), 0, 0, None, &stakes) + .pull_options( + &crds, + &spy.label().pubkey(), + 0, + 0, + None, + &stakes, + &SocketAddrSpace::Unspecified, + ) .iter() .map(|(_, c)| c.id) .collect::>(); @@ -789,6 +826,7 @@ pub(crate) mod tests { 0, Some(&gossip_validators), &stakes, + &SocketAddrSpace::Unspecified, ); assert!(options.is_empty()); @@ -801,6 +839,7 @@ pub(crate) mod tests { 0, Some(&gossip_validators), &stakes, + &SocketAddrSpace::Unspecified, ); assert!(options.is_empty()); @@ -813,6 +852,7 @@ pub(crate) mod tests { 0, Some(&gossip_validators), &stakes, + &SocketAddrSpace::Unspecified, ); assert_eq!(options.len(), 1); assert_eq!(options[0].1.id, node_123.pubkey()); @@ -913,7 +953,7 @@ pub(crate) mod tests { &node_keypair.pubkey(), 0, ))); - let node = CrdsGossipPull::default(); + let mut node = CrdsGossipPull::default(); let mut pings = Vec::new(); let ping_cache = Mutex::new(PingCache::new( Duration::from_secs(20 * 60), // ttl @@ -931,6 +971,7 @@ pub(crate) mod tests { PACKET_DATA_SIZE, &ping_cache, &mut pings, + &SocketAddrSpace::Unspecified, ), Err(CrdsGossipError::NoPeers) ); @@ -948,28 +989,53 @@ pub(crate) mod tests { PACKET_DATA_SIZE, &ping_cache, &mut pings, + &SocketAddrSpace::Unspecified, ), Err(CrdsGossipError::NoPeers) ); - let new = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), 0); + let now = 1625029781069; + let new = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), now); ping_cache .lock() .unwrap() .mock_pong(new.id, new.gossip, Instant::now()); let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(new)); - crds.insert(new.clone(), 0).unwrap(); + crds.insert(new.clone(), now).unwrap(); let req = node.new_pull_request( &thread_pool, &crds, &node_keypair, 0, + now, + None, + &HashMap::new(), + PACKET_DATA_SIZE, + &ping_cache, + &mut pings, + &SocketAddrSpace::Unspecified, + ); + let (peer, _) = req.unwrap(); + assert_eq!(peer, *new.contact_info().unwrap()); + + node.mark_pull_request_creation_time(new.contact_info().unwrap().id, now); + let offline = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), now); + let offline = CrdsValue::new_unsigned(CrdsData::ContactInfo(offline)); + crds.insert(offline, now).unwrap(); + let req = node.new_pull_request( + &thread_pool, + &crds, + &node_keypair, 0, + now, None, &HashMap::new(), PACKET_DATA_SIZE, &ping_cache, &mut pings, + &SocketAddrSpace::Unspecified, ); + // Even though the offline node should have higher weight, we shouldn't request from it + // until we receive a ping. let (peer, _) = req.unwrap(); assert_eq!(peer, *new.contact_info().unwrap()); } @@ -1021,6 +1087,7 @@ pub(crate) mod tests { PACKET_DATA_SIZE, // bloom_size &ping_cache, &mut pings, + &SocketAddrSpace::Unspecified, ) .unwrap(); peer @@ -1098,17 +1165,18 @@ pub(crate) mod tests { PACKET_DATA_SIZE, &Mutex::new(ping_cache), &mut pings, + &SocketAddrSpace::Unspecified, ); let mut dest_crds = Crds::default(); - let dest = CrdsGossipPull::default(); let (_, filters) = req.unwrap(); let mut filters: Vec<_> = filters.into_iter().map(|f| (caller.clone(), f)).collect(); - let rsp = dest.generate_pull_responses( + let rsp = CrdsGossipPull::generate_pull_responses( + &thread_pool, &dest_crds, &filters, - /*output_size_limit=*/ usize::MAX, - 0, + usize::MAX, // output_size_limit + 0, // now ); assert_eq!(rsp[0].len(), 0); @@ -1122,11 +1190,12 @@ pub(crate) mod tests { .unwrap(); //should skip new value since caller is to old - let rsp = dest.generate_pull_responses( + let rsp = CrdsGossipPull::generate_pull_responses( + &thread_pool, &dest_crds, &filters, - /*output_size_limit=*/ usize::MAX, - CRDS_GOSSIP_PULL_MSG_TIMEOUT_MS, + usize::MAX, // output_size_limit + CRDS_GOSSIP_PULL_MSG_TIMEOUT_MS, // now ); assert_eq!(rsp[0].len(), 0); assert_eq!(filters.len(), MIN_NUM_BLOOM_FILTERS); @@ -1140,10 +1209,11 @@ pub(crate) mod tests { .map(|(_, filter)| (caller.clone(), filter.clone())) .collect::>() }); - let rsp = dest.generate_pull_responses( + let rsp = CrdsGossipPull::generate_pull_responses( + &thread_pool, &dest_crds, &filters, - /*output_size_limit=*/ usize::MAX, + usize::MAX, // output_size_limit CRDS_GOSSIP_PULL_MSG_TIMEOUT_MS, ); assert_eq!(rsp.len(), 2 * MIN_NUM_BLOOM_FILTERS); @@ -1186,19 +1256,20 @@ pub(crate) mod tests { PACKET_DATA_SIZE, &Mutex::new(ping_cache), &mut pings, + &SocketAddrSpace::Unspecified, ); let mut dest_crds = Crds::default(); - let mut dest = CrdsGossipPull::default(); let (_, filters) = req.unwrap(); let filters: Vec<_> = filters.into_iter().map(|f| (caller.clone(), f)).collect(); - let rsp = dest.generate_pull_responses( + let rsp = CrdsGossipPull::generate_pull_responses( + &thread_pool, &dest_crds, &filters, - /*output_size_limit=*/ usize::MAX, - 0, + usize::MAX, // output_size_limit + 0, // now ); - dest.process_pull_requests( + CrdsGossipPull::process_pull_requests( &mut dest_crds, filters.into_iter().map(|(caller, _)| caller), 1, @@ -1229,7 +1300,6 @@ pub(crate) mod tests { let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(new)); node_crds.insert(new, 0).unwrap(); - let mut dest = CrdsGossipPull::default(); let mut dest_crds = Crds::default(); let new_id = solana_sdk::pubkey::new_rand(); let new = ContactInfo::new_localhost(&new_id, 1); @@ -1261,16 +1331,18 @@ pub(crate) mod tests { PACKET_DATA_SIZE, &ping_cache, &mut pings, + &SocketAddrSpace::Unspecified, ); let (_, filters) = req.unwrap(); let filters: Vec<_> = filters.into_iter().map(|f| (caller.clone(), f)).collect(); - let rsp = dest.generate_pull_responses( + let rsp = CrdsGossipPull::generate_pull_responses( + &thread_pool, &dest_crds, &filters, - /*output_size_limit=*/ usize::MAX, - 0, + usize::MAX, // output_size_limit + 0, // now ); - dest.process_pull_requests( + CrdsGossipPull::process_pull_requests( &mut dest_crds, filters.into_iter().map(|(caller, _)| caller), 0, @@ -1313,7 +1385,7 @@ pub(crate) mod tests { ))); let node_label = entry.label(); let node_pubkey = node_label.pubkey(); - let mut node = CrdsGossipPull::default(); + let node = CrdsGossipPull::default(); node_crds.insert(entry, 0).unwrap(); let old = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost( &solana_sdk::pubkey::new_rand(), @@ -1329,7 +1401,7 @@ pub(crate) mod tests { ); // purge let timeouts = node.make_timeouts(node_pubkey, &HashMap::new(), Duration::default()); - node.purge_active(&thread_pool, &mut node_crds, node.crds_timeout, &timeouts); + CrdsGossipPull::purge_active(&thread_pool, &mut node_crds, node.crds_timeout, &timeouts); //verify self is still valid after purge assert_eq!( diff --git a/gossip/src/crds_gossip_push.rs b/gossip/src/crds_gossip_push.rs index 8daa532d9bf869..4639dca4795476 100644 --- a/gossip/src/crds_gossip_push.rs +++ b/gossip/src/crds_gossip_push.rs @@ -16,7 +16,7 @@ use { crds_gossip::{get_stake, get_weight}, crds_gossip_error::CrdsGossipError, crds_value::CrdsValue, - weighted_shuffle::weighted_shuffle, + weighted_shuffle::WeightedShuffle, }, bincode::serialized_size, indexmap::map::IndexMap, @@ -24,6 +24,7 @@ use { rand::{seq::SliceRandom, Rng}, solana_runtime::bloom::{AtomicBloom, Bloom}, solana_sdk::{packet::PACKET_DATA_SIZE, pubkey::Pubkey, timing::timestamp}, + solana_streamer::socket::SocketAddrSpace, std::{ cmp, collections::{HashMap, HashSet}, @@ -98,7 +99,7 @@ impl CrdsGossipPush { ((CRDS_GOSSIP_PRUNE_STAKE_THRESHOLD_PCT * min_path_stake as f64).round() as u64).max(1) } - pub fn prune_received_cache( + pub(crate) fn prune_received_cache( &mut self, self_pubkey: &Pubkey, origin: &Pubkey, @@ -119,6 +120,7 @@ impl CrdsGossipPush { if peer_stake_total < prune_stake_threshold { return Vec::new(); } + let mut rng = rand::thread_rng(); let shuffled_staked_peers = { let peers: Vec<_> = peers .iter() @@ -126,11 +128,9 @@ impl CrdsGossipPush { .filter_map(|(peer, _)| Some((*peer, *stakes.get(peer)?))) .filter(|(_, stake)| *stake > 0) .collect(); - let mut seed = [0; 32]; - rand::thread_rng().fill(&mut seed[..]); let weights: Vec<_> = peers.iter().map(|(_, stake)| *stake).collect(); - weighted_shuffle(&weights, seed) - .into_iter() + WeightedShuffle::new(&mut rng, &weights) + .unwrap() .map(move |i| peers[i]) }; let mut keep = HashSet::new(); @@ -165,7 +165,7 @@ impl CrdsGossipPush { } /// process a push message to the network - pub fn process_push_message( + pub(crate) fn process_push_message( &mut self, crds: &mut Crds, from: &Pubkey, @@ -255,7 +255,7 @@ impl CrdsGossipPush { /// refresh the push active set /// * ratio - active_set.len()/ratio is the number of actives to rotate - pub fn refresh_push_active_set( + pub(crate) fn refresh_push_active_set( &mut self, crds: &Crds, stakes: &HashMap, @@ -264,6 +264,7 @@ impl CrdsGossipPush { self_shred_version: u16, network_size: usize, ratio: usize, + socket_addr_space: &SocketAddrSpace, ) { const BLOOM_FALSE_RATE: f64 = 0.1; const BLOOM_MAX_BITS: usize = 1024 * 8 * 4; @@ -277,10 +278,11 @@ impl CrdsGossipPush { let (weights, peers): (Vec<_>, Vec<_>) = self .push_options( crds, - &self_id, + self_id, self_shred_version, stakes, gossip_validators, + socket_addr_space, ) .into_iter() .unzip(); @@ -288,11 +290,7 @@ impl CrdsGossipPush { return; } let num_bloom_items = MIN_NUM_BLOOM_ITEMS.max(network_size); - let shuffle = { - let mut seed = [0; 32]; - rng.fill(&mut seed[..]); - weighted_shuffle(&weights, seed).into_iter() - }; + let shuffle = WeightedShuffle::new(&mut rng, &weights).unwrap(); for peer in shuffle.map(|i| peers[i].id) { if new_items.len() >= need { break; @@ -326,7 +324,8 @@ impl CrdsGossipPush { self_shred_version: u16, stakes: &HashMap, gossip_validators: Option<&HashSet>, - ) -> Vec<(f32, &'a ContactInfo)> { + socket_addr_space: &SocketAddrSpace, + ) -> Vec<(u64, &'a ContactInfo)> { let now = timestamp(); let mut rng = rand::thread_rng(); let max_weight = u16::MAX as f32 - 1.0; @@ -347,7 +346,7 @@ impl CrdsGossipPush { }) .filter(|info| { info.id != *self_id - && ContactInfo::is_valid_address(&info.gossip) + && ContactInfo::is_valid_address(&info.gossip, socket_addr_space) && self_shred_version == info.shred_version && gossip_validators.map_or(true, |gossip_validators| { gossip_validators.contains(&info.id) @@ -362,13 +361,15 @@ impl CrdsGossipPush { let since = (now.saturating_sub(last_pushed_to).min(3600 * 1000) / 1024) as u32; let stake = get_stake(&info.id, stakes); let weight = get_weight(max_weight, since, stake); - (weight, info) + // Weights are bounded by max_weight defined above. + // So this type-cast should be safe. + ((weight * 100.0) as u64, info) }) .collect() } /// purge received push message cache - pub fn purge_old_received_cache(&mut self, min_time: u64) { + pub(crate) fn purge_old_received_cache(&mut self, min_time: u64) { self.received_cache.retain(|_, v| { v.retain(|_, (_, t)| *t > min_time); !v.is_empty() @@ -556,7 +557,16 @@ mod test { ))); assert_eq!(crds.insert(value1.clone(), now), Ok(())); - push.refresh_push_active_set(&crds, &HashMap::new(), None, &Pubkey::default(), 0, 1, 1); + push.refresh_push_active_set( + &crds, + &HashMap::new(), + None, + &Pubkey::default(), + 0, + 1, + 1, + &SocketAddrSpace::Unspecified, + ); assert!(push.active_set.get(&value1.label().pubkey()).is_some()); let value2 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost( @@ -566,7 +576,16 @@ mod test { assert!(push.active_set.get(&value2.label().pubkey()).is_none()); assert_eq!(crds.insert(value2.clone(), now), Ok(())); for _ in 0..30 { - push.refresh_push_active_set(&crds, &HashMap::new(), None, &Pubkey::default(), 0, 1, 1); + push.refresh_push_active_set( + &crds, + &HashMap::new(), + None, + &Pubkey::default(), + 0, + 1, + 1, + &SocketAddrSpace::Unspecified, + ); if push.active_set.get(&value2.label().pubkey()).is_some() { break; } @@ -579,7 +598,16 @@ mod test { )); assert_eq!(crds.insert(value2.clone(), now), Ok(())); } - push.refresh_push_active_set(&crds, &HashMap::new(), None, &Pubkey::default(), 0, 1, 1); + push.refresh_push_active_set( + &crds, + &HashMap::new(), + None, + &Pubkey::default(), + 0, + 1, + 1, + &SocketAddrSpace::Unspecified, + ); assert_eq!(push.active_set.len(), push.num_active); } #[test] @@ -599,7 +627,14 @@ mod test { stakes.insert(id, i * 100); push.last_pushed_to.put(id, time); } - let mut options = push.push_options(&crds, &Pubkey::default(), 0, &stakes, None); + let mut options = push.push_options( + &crds, + &Pubkey::default(), + 0, + &stakes, + None, + &SocketAddrSpace::Unspecified, + ); assert!(!options.is_empty()); options.sort_by(|(weight_l, _), (weight_r, _)| weight_r.partial_cmp(weight_l).unwrap()); // check that the highest stake holder is also the heaviest weighted. @@ -650,7 +685,14 @@ mod test { // shred version 123 should ignore nodes with versions 0 and 456 let options = node - .push_options(&crds, &me.label().pubkey(), 123, &stakes, None) + .push_options( + &crds, + &me.label().pubkey(), + 123, + &stakes, + None, + &SocketAddrSpace::Unspecified, + ) .iter() .map(|(_, c)| c.id) .collect::>(); @@ -660,7 +702,14 @@ mod test { // spy nodes should not push to people on different shred versions let options = node - .push_options(&crds, &spy.label().pubkey(), 0, &stakes, None) + .push_options( + &crds, + &spy.label().pubkey(), + 0, + &stakes, + None, + &SocketAddrSpace::Unspecified, + ) .iter() .map(|(_, c)| c.id) .collect::>(); @@ -697,6 +746,7 @@ mod test { 0, &stakes, Some(&gossip_validators), + &SocketAddrSpace::Unspecified, ); assert!(options.is_empty()); @@ -709,6 +759,7 @@ mod test { 0, &stakes, Some(&gossip_validators), + &SocketAddrSpace::Unspecified, ); assert!(options.is_empty()); @@ -720,6 +771,7 @@ mod test { 0, &stakes, Some(&gossip_validators), + &SocketAddrSpace::Unspecified, ); assert_eq!(options.len(), 1); @@ -736,7 +788,16 @@ mod test { 0, ))); assert_eq!(crds.insert(peer.clone(), now), Ok(())); - push.refresh_push_active_set(&crds, &HashMap::new(), None, &Pubkey::default(), 0, 1, 1); + push.refresh_push_active_set( + &crds, + &HashMap::new(), + None, + &Pubkey::default(), + 0, + 1, + 1, + &SocketAddrSpace::Unspecified, + ); let new_msg = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost( &solana_sdk::pubkey::new_rand(), @@ -771,7 +832,16 @@ mod test { push.process_push_message(&mut crds, &Pubkey::default(), peers[2].clone(), now), Ok(()) ); - push.refresh_push_active_set(&crds, &HashMap::new(), None, &Pubkey::default(), 0, 1, 1); + push.refresh_push_active_set( + &crds, + &HashMap::new(), + None, + &Pubkey::default(), + 0, + 1, + 1, + &SocketAddrSpace::Unspecified, + ); // push 3's contact info to 1 and 2 and 3 let expected: HashMap<_, _> = vec![ @@ -793,7 +863,16 @@ mod test { 0, ))); assert_eq!(crds.insert(peer.clone(), 0), Ok(())); - push.refresh_push_active_set(&crds, &HashMap::new(), None, &Pubkey::default(), 0, 1, 1); + push.refresh_push_active_set( + &crds, + &HashMap::new(), + None, + &Pubkey::default(), + 0, + 1, + 1, + &SocketAddrSpace::Unspecified, + ); let new_msg = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost( &solana_sdk::pubkey::new_rand(), @@ -820,7 +899,16 @@ mod test { 0, ))); assert_eq!(crds.insert(peer, 0), Ok(())); - push.refresh_push_active_set(&crds, &HashMap::new(), None, &Pubkey::default(), 0, 1, 1); + push.refresh_push_active_set( + &crds, + &HashMap::new(), + None, + &Pubkey::default(), + 0, + 1, + 1, + &SocketAddrSpace::Unspecified, + ); let mut ci = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), 0); ci.wallclock = 1; diff --git a/gossip/src/crds_value.rs b/gossip/src/crds_value.rs index 02c1b623b6a456..1c951b9ff31408 100644 --- a/gossip/src/crds_value.rs +++ b/gossip/src/crds_value.rs @@ -71,7 +71,7 @@ impl Signable for CrdsValue { fn verify(&self) -> bool { self.get_signature() - .verify(&self.pubkey().as_ref(), self.signable_data().borrow()) + .verify(self.pubkey().as_ref(), self.signable_data().borrow()) } } @@ -83,7 +83,7 @@ impl Signable for CrdsValue { pub enum CrdsData { ContactInfo(ContactInfo), Vote(VoteIndex, Vote), - LowestSlot(u8, LowestSlot), + LowestSlot(/*DEPRECATED:*/ u8, LowestSlot), SnapshotHashes(SnapshotHash), AccountsHashes(SnapshotHash), EpochSlots(EpochSlotsIndex, EpochSlots), @@ -853,9 +853,9 @@ mod test { wrong_keypair: &Keypair, ) { assert!(!value.verify()); - value.sign(&correct_keypair); + value.sign(correct_keypair); assert!(value.verify()); - value.sign(&wrong_keypair); + value.sign(wrong_keypair); assert!(!value.verify()); serialize_deserialize_value(value, correct_keypair); } diff --git a/gossip/src/deprecated.rs b/gossip/src/deprecated.rs index 57a7a8315cb237..120c69ffc1f53c 100644 --- a/gossip/src/deprecated.rs +++ b/gossip/src/deprecated.rs @@ -1,4 +1,11 @@ -use solana_sdk::clock::Slot; +use { + crate::{ + cluster_info::ClusterInfo, contact_info::ContactInfo, weighted_shuffle::weighted_shuffle, + }, + itertools::Itertools, + solana_sdk::{clock::Slot, pubkey::Pubkey}, + std::collections::HashMap, +}; #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, AbiExample, AbiEnumVisitor)] enum CompressionType { @@ -19,3 +26,74 @@ pub(crate) struct EpochIncompleteSlots { compression: CompressionType, compressed_list: Vec, } + +// Legacy methods copied for testing backward compatibility. + +pub fn sorted_retransmit_peers_and_stakes( + cluster_info: &ClusterInfo, + stakes: Option<&HashMap>, +) -> (Vec, Vec<(u64, usize)>) { + let mut peers = cluster_info.tvu_peers(); + // insert "self" into this list for the layer and neighborhood computation + peers.push(cluster_info.my_contact_info()); + let stakes_and_index = sorted_stakes_with_index(&peers, stakes); + (peers, stakes_and_index) +} + +pub fn sorted_stakes_with_index( + peers: &[ContactInfo], + stakes: Option<&HashMap>, +) -> Vec<(u64, usize)> { + let stakes_and_index: Vec<_> = peers + .iter() + .enumerate() + .map(|(i, c)| { + // For stake weighted shuffle a valid weight is atleast 1. Weight 0 is + // assumed to be missing entry. So let's make sure stake weights are atleast 1 + let stake = 1.max( + stakes + .as_ref() + .map_or(1, |stakes| *stakes.get(&c.id).unwrap_or(&1)), + ); + (stake, i) + }) + .sorted_by(|(l_stake, l_info), (r_stake, r_info)| { + if r_stake == l_stake { + peers[*r_info].id.cmp(&peers[*l_info].id) + } else { + r_stake.cmp(l_stake) + } + }) + .collect(); + + stakes_and_index +} + +pub fn shuffle_peers_and_index( + id: &Pubkey, + peers: &[ContactInfo], + stakes_and_index: &[(u64, usize)], + seed: [u8; 32], +) -> (usize, Vec<(u64, usize)>) { + let shuffled_stakes_and_index = stake_weighted_shuffle(stakes_and_index, seed); + let self_index = shuffled_stakes_and_index + .iter() + .enumerate() + .find_map(|(i, (_stake, index))| { + if peers[*index].id == *id { + Some(i) + } else { + None + } + }) + .unwrap(); + (self_index, shuffled_stakes_and_index) +} + +fn stake_weighted_shuffle(stakes_and_index: &[(u64, usize)], seed: [u8; 32]) -> Vec<(u64, usize)> { + let stake_weights: Vec<_> = stakes_and_index.iter().map(|(w, _)| *w).collect(); + + let shuffle = weighted_shuffle(&stake_weights, seed); + + shuffle.iter().map(|x| stakes_and_index[*x]).collect() +} diff --git a/gossip/src/gossip_service.rs b/gossip/src/gossip_service.rs index 4dbc6a50c72a7d..9924d0f8a49a16 100644 --- a/gossip/src/gossip_service.rs +++ b/gossip/src/gossip_service.rs @@ -13,6 +13,7 @@ use { pubkey::Pubkey, signature::{Keypair, Signer}, }, + solana_streamer::socket::SocketAddrSpace, solana_streamer::streamer, std::{ collections::HashSet, @@ -47,32 +48,55 @@ impl GossipService { &cluster_info.id(), gossip_socket.local_addr().unwrap() ); + let socket_addr_space = *cluster_info.socket_addr_space(); let t_receiver = streamer::receiver( gossip_socket.clone(), - &exit, + exit, request_sender, Recycler::default(), "gossip_receiver", 1, + false, + ); + let (consume_sender, listen_receiver) = channel(); + // https://github.com/rust-lang/rust/issues/39364#issuecomment-634545136 + let _consume_sender = consume_sender.clone(); + let t_socket_consume = cluster_info.clone().start_socket_consume_thread( + request_receiver, + consume_sender, + exit.clone(), ); let (response_sender, response_receiver) = channel(); - let t_responder = streamer::responder("gossip", gossip_socket, response_receiver); - let t_listen = ClusterInfo::listen( - cluster_info.clone(), + let t_listen = cluster_info.clone().listen( bank_forks.clone(), - request_receiver, + listen_receiver, response_sender.clone(), should_check_duplicate_instance, - exit, + exit.clone(), ); - let t_gossip = ClusterInfo::gossip( - cluster_info.clone(), + let t_gossip = cluster_info.clone().gossip( bank_forks, response_sender, gossip_validators, - exit, + exit.clone(), ); - let thread_hdls = vec![t_receiver, t_responder, t_listen, t_gossip]; + // To work around: + // https://github.com/rust-lang/rust/issues/54267 + // responder thread should start after response_sender.clone(). see: + // https://github.com/rust-lang/rust/issues/39364#issuecomment-381446873 + let t_responder = streamer::responder( + "gossip", + gossip_socket, + response_receiver, + socket_addr_space, + ); + let thread_hdls = vec![ + t_receiver, + t_responder, + t_socket_consume, + t_listen, + t_gossip, + ]; Self { thread_hdls } } @@ -88,6 +112,7 @@ impl GossipService { pub fn discover_cluster( entrypoint: &SocketAddr, num_nodes: usize, + socket_addr_space: SocketAddrSpace, ) -> std::io::Result> { const DISCOVER_CLUSTER_TIMEOUT: Duration = Duration::from_secs(120); let (_all_peers, validators) = discover( @@ -99,6 +124,7 @@ pub fn discover_cluster( None, // find_node_by_gossip_addr None, // my_gossip_addr 0, // my_shred_version + socket_addr_space, )?; Ok(validators) } @@ -112,6 +138,7 @@ pub fn discover( find_node_by_gossip_addr: Option<&SocketAddr>, my_gossip_addr: Option<&SocketAddr>, my_shred_version: u16, + socket_addr_space: SocketAddrSpace, ) -> std::io::Result<( Vec, // all gossip peers Vec, // tvu peers (validators) @@ -126,6 +153,7 @@ pub fn discover( my_gossip_addr, my_shred_version, true, // should_check_duplicate_instance, + socket_addr_space, ); let id = spy_ref.id(); @@ -134,9 +162,8 @@ pub fn discover( if let Some(my_gossip_addr) = my_gossip_addr { info!("Gossip Address: {:?}", my_gossip_addr); } - - let _ip_echo_server = ip_echo.map(solana_net_utils::ip_echo_server); - + let _ip_echo_server = ip_echo + .map(|tcp_listener| solana_net_utils::ip_echo_server(tcp_listener, Some(my_shred_version))); let (met_criteria, elapsed, all_peers, tvu_peers) = spy( spy_ref.clone(), num_nodes, @@ -173,28 +200,31 @@ pub fn discover( } /// Creates a ThinClient per valid node -pub fn get_clients(nodes: &[ContactInfo]) -> Vec { +pub fn get_clients(nodes: &[ContactInfo], socket_addr_space: &SocketAddrSpace) -> Vec { nodes .iter() - .filter_map(ContactInfo::valid_client_facing_addr) + .filter_map(|node| ContactInfo::valid_client_facing_addr(node, socket_addr_space)) .map(|addrs| create_client(addrs, VALIDATOR_PORT_RANGE)) .collect() } /// Creates a ThinClient by selecting a valid node at random -pub fn get_client(nodes: &[ContactInfo]) -> ThinClient { +pub fn get_client(nodes: &[ContactInfo], socket_addr_space: &SocketAddrSpace) -> ThinClient { let nodes: Vec<_> = nodes .iter() - .filter_map(ContactInfo::valid_client_facing_addr) + .filter_map(|node| ContactInfo::valid_client_facing_addr(node, socket_addr_space)) .collect(); let select = thread_rng().gen_range(0, nodes.len()); create_client(nodes[select], VALIDATOR_PORT_RANGE) } -pub fn get_multi_client(nodes: &[ContactInfo]) -> (ThinClient, usize) { +pub fn get_multi_client( + nodes: &[ContactInfo], + socket_addr_space: &SocketAddrSpace, +) -> (ThinClient, usize) { let addrs: Vec<_> = nodes .iter() - .filter_map(ContactInfo::valid_client_facing_addr) + .filter_map(|node| ContactInfo::valid_client_facing_addr(node, socket_addr_space)) .collect(); let rpc_addrs: Vec<_> = addrs.iter().map(|addr| addr.0).collect(); let tpu_addrs: Vec<_> = addrs.iter().map(|addr| addr.1).collect(); @@ -285,13 +315,14 @@ fn make_gossip_node( gossip_addr: Option<&SocketAddr>, shred_version: u16, should_check_duplicate_instance: bool, + socket_addr_space: SocketAddrSpace, ) -> (GossipService, Option, Arc) { let (node, gossip_socket, ip_echo) = if let Some(gossip_addr) = gossip_addr { ClusterInfo::gossip_node(&keypair.pubkey(), gossip_addr, shred_version) } else { ClusterInfo::spy_node(&keypair.pubkey(), shred_version) }; - let cluster_info = ClusterInfo::new(node, keypair); + let cluster_info = ClusterInfo::new(node, keypair, socket_addr_space); if let Some(entrypoint) = entrypoint { cluster_info.set_entrypoint(ContactInfo::new_gossip_entry_point(entrypoint)); } @@ -302,7 +333,7 @@ fn make_gossip_node( gossip_socket, None, should_check_duplicate_instance, - &exit, + exit, ); (gossip_service, ip_echo, cluster_info) } @@ -321,7 +352,11 @@ mod tests { fn test_exit() { let exit = Arc::new(AtomicBool::new(false)); let tn = Node::new_localhost(); - let cluster_info = ClusterInfo::new_with_invalid_keypair(tn.info.clone()); + let cluster_info = ClusterInfo::new( + tn.info.clone(), + Arc::new(Keypair::new()), + SocketAddrSpace::Unspecified, + ); let c = Arc::new(cluster_info); let d = GossipService::new( &c, @@ -344,7 +379,11 @@ mod tests { let contact_info = ContactInfo::new_localhost(&keypair.pubkey(), 0); let peer0_info = ContactInfo::new_localhost(&peer0, 0); let peer1_info = ContactInfo::new_localhost(&peer1, 0); - let cluster_info = ClusterInfo::new(contact_info, Arc::new(keypair)); + let cluster_info = ClusterInfo::new( + contact_info, + Arc::new(keypair), + SocketAddrSpace::Unspecified, + ); cluster_info.insert_info(peer0_info.clone()); cluster_info.insert_info(peer1_info); diff --git a/gossip/src/lib.rs b/gossip/src/lib.rs index 7562674a0cd861..e5b2d7ccce8e4e 100644 --- a/gossip/src/lib.rs +++ b/gossip/src/lib.rs @@ -13,7 +13,7 @@ pub mod crds_gossip_push; pub mod crds_shards; pub mod crds_value; pub mod data_budget; -mod deprecated; +pub mod deprecated; pub mod duplicate_shred; pub mod epoch_slots; pub mod gossip_error; diff --git a/gossip/src/main.rs b/gossip/src/main.rs index 20f667ef24dbe2..3499e1eb6d996b 100644 --- a/gossip/src/main.rs +++ b/gossip/src/main.rs @@ -11,6 +11,7 @@ use { }, solana_gossip::{contact_info::ContactInfo, gossip_service::discover}, solana_sdk::pubkey::Pubkey, + solana_streamer::socket::SocketAddrSpace, std::{ error, net::{IpAddr, Ipv4Addr, SocketAddr}, @@ -32,6 +33,13 @@ fn parse_matches() -> ArgMatches<'static> { .about(crate_description!()) .version(solana_version::version!()) .setting(AppSettings::SubcommandRequiredElseHelp) + .arg( + Arg::with_name("allow_private_addr") + .long("allow-private-addr") + .takes_value(false) + .help("Allow contacting private ip addresses") + .hidden(true), + ) .subcommand( SubCommand::with_name("rpc-url") .about("Get an RPC URL for the cluster") @@ -224,8 +232,9 @@ fn process_spy(matches: &ArgMatches) -> std::io::Result<()> { let pubkey = matches .value_of("node_pubkey") .map(|pubkey_str| pubkey_str.parse::().unwrap()); + let socket_addr_space = SocketAddrSpace::new(matches.is_present("allow_private_addr")); let shred_version = value_t_or_exit!(matches, "shred_version", u16); - let identity_keypair = keypair_of(&matches, "identity").map(Arc::new); + let identity_keypair = keypair_of(matches, "identity").map(Arc::new); let entrypoint_addr = parse_entrypoint(matches); @@ -251,6 +260,7 @@ fn process_spy(matches: &ArgMatches) -> std::io::Result<()> { None, // find_node_by_gossip_addr Some(&gossip_addr), // my_gossip_addr shred_version, + socket_addr_space, )?; process_spy_results(timeout, validators, num_nodes, num_nodes_exactly, pubkey); @@ -270,9 +280,10 @@ fn parse_entrypoint(matches: &ArgMatches) -> Option { fn process_rpc_url(matches: &ArgMatches) -> std::io::Result<()> { let any = matches.is_present("any"); let all = matches.is_present("all"); - let entrypoint_addr = parse_entrypoint(&matches); + let entrypoint_addr = parse_entrypoint(matches); let timeout = value_t_or_exit!(matches, "timeout", u64); let shred_version = value_t_or_exit!(matches, "shred_version", u16); + let socket_addr_space = SocketAddrSpace::new(matches.is_present("allow_private_addr")); let (_all_peers, validators) = discover( None, // keypair entrypoint_addr.as_ref(), @@ -282,13 +293,14 @@ fn process_rpc_url(matches: &ArgMatches) -> std::io::Result<()> { entrypoint_addr.as_ref(), // find_node_by_gossip_addr None, // my_gossip_addr shred_version, + socket_addr_space, )?; let rpc_addrs: Vec<_> = validators .iter() .filter_map(|contact_info| { if (any || all || Some(contact_info.gossip) == entrypoint_addr) - && ContactInfo::is_valid_address(&contact_info.rpc) + && ContactInfo::is_valid_address(&contact_info.rpc, &socket_addr_space) { return Some(contact_info.rpc); } diff --git a/gossip/src/weighted_shuffle.rs b/gossip/src/weighted_shuffle.rs index 62e2d18eb692ff..63ba440f30168f 100644 --- a/gossip/src/weighted_shuffle.rs +++ b/gossip/src/weighted_shuffle.rs @@ -2,12 +2,138 @@ use { itertools::Itertools, - num_traits::{FromPrimitive, ToPrimitive}, - rand::{Rng, SeedableRng}, + num_traits::{CheckedAdd, FromPrimitive, ToPrimitive}, + rand::{ + distributions::uniform::{SampleUniform, UniformSampler}, + Rng, SeedableRng, + }, rand_chacha::ChaChaRng, - std::{iter, ops::Div}, + std::{ + iter, + ops::{AddAssign, Div, Sub, SubAssign}, + }, }; +#[derive(Debug)] +pub enum WeightedShuffleError { + NegativeWeight(T), + SumOverflow, +} + +/// Implements an iterator where indices are shuffled according to their +/// weights: +/// - Returned indices are unique in the range [0, weights.len()). +/// - Higher weighted indices tend to appear earlier proportional to their +/// weight. +/// - Zero weighted indices are excluded. Therefore the iterator may have +/// count less than weights.len(). +pub struct WeightedShuffle<'a, R, T> { + arr: Vec, // Underlying array implementing binary indexed tree. + sum: T, // Current sum of weights, excluding already selected indices. + rng: &'a mut R, // Random number generator. +} + +// The implementation uses binary indexed tree: +// https://en.wikipedia.org/wiki/Fenwick_tree +// to maintain cumulative sum of weights excluding already selected indices +// over self.arr. +impl<'a, R: Rng, T> WeightedShuffle<'a, R, T> +where + T: Copy + Default + PartialOrd + AddAssign + CheckedAdd, +{ + /// Returns error if: + /// - any of the weights are negative. + /// - sum of weights overflows. + pub fn new(rng: &'a mut R, weights: &[T]) -> Result> { + let size = weights.len() + 1; + let zero = ::default(); + let mut arr = vec![zero; size]; + let mut sum = zero; + for (mut k, &weight) in (1usize..).zip(weights) { + #[allow(clippy::neg_cmp_op_on_partial_ord)] + // weight < zero does not work for NaNs. + if !(weight >= zero) { + return Err(WeightedShuffleError::NegativeWeight(weight)); + } + sum = sum + .checked_add(&weight) + .ok_or(WeightedShuffleError::SumOverflow)?; + while k < size { + arr[k] += weight; + k += k & k.wrapping_neg(); + } + } + Ok(Self { arr, sum, rng }) + } +} + +impl<'a, R, T> WeightedShuffle<'a, R, T> +where + T: Copy + Default + PartialOrd + AddAssign + SubAssign + Sub, +{ + // Returns cumulative sum of current weights upto index k (inclusive). + fn cumsum(&self, mut k: usize) -> T { + let mut out = ::default(); + while k != 0 { + out += self.arr[k]; + k ^= k & k.wrapping_neg(); + } + out + } + + // Removes given weight at index k. + fn remove(&mut self, mut k: usize, weight: T) { + self.sum -= weight; + let size = self.arr.len(); + while k < size { + self.arr[k] -= weight; + k += k & k.wrapping_neg(); + } + } + + // Returns smallest index such that self.cumsum(k) > val, + // along with its respective weight. + fn search(&self, val: T) -> (/*index:*/ usize, /*weight:*/ T) { + let zero = ::default(); + debug_assert!(val >= zero); + debug_assert!(val < self.sum); + let mut lo = (/*index:*/ 0, /*cumsum:*/ zero); + let mut hi = (self.arr.len() - 1, self.sum); + while lo.0 + 1 < hi.0 { + let k = lo.0 + (hi.0 - lo.0) / 2; + let sum = self.cumsum(k); + if sum <= val { + lo = (k, sum); + } else { + hi = (k, sum); + } + } + debug_assert!(lo.1 <= val); + debug_assert!(hi.1 > val); + (hi.0, hi.1 - lo.1) + } +} + +impl<'a, R: Rng, T> Iterator for WeightedShuffle<'a, R, T> +where + T: Copy + Default + PartialOrd + AddAssign + SampleUniform + SubAssign + Sub, +{ + type Item = usize; + + fn next(&mut self) -> Option { + let zero = ::default(); + #[allow(clippy::neg_cmp_op_on_partial_ord)] + // self.sum <= zero does not work for NaNs. + if !(self.sum > zero) { + return None; + } + let sample = ::Sampler::sample_single(zero, self.sum, &mut self.rng); + let (index, weight) = WeightedShuffle::search(self, sample); + self.remove(index, weight); + Some(index - 1) + } +} + /// Returns a list of indexes shuffled based on the input weights /// Note - The sum of all weights must not exceed `u64::MAX` pub fn weighted_shuffle(weights: &[T], seed: [u8; 32]) -> Vec @@ -67,6 +193,31 @@ pub fn weighted_best(weights_and_indexes: &[(u64, usize)], seed: [u8; 32]) -> us #[cfg(test)] mod tests { use super::*; + use std::{convert::TryInto, iter::repeat_with}; + + fn weighted_shuffle_slow(rng: &mut R, mut weights: Vec) -> Vec + where + R: Rng, + { + let mut shuffle = Vec::with_capacity(weights.len()); + loop { + let high: u64 = weights.iter().sum(); + if high == 0 { + break shuffle; + } + let sample = rng.gen_range(0, high); + let index = weights + .iter() + .scan(0, |acc, &w| { + *acc += w; + Some(*acc) + }) + .position(|acc| sample < acc) + .unwrap(); + shuffle.push(index); + weights[index] = 0; + } + } #[test] fn test_weighted_shuffle_iterator() { @@ -133,4 +284,56 @@ mod tests { let best_index = weighted_best(&weights_and_indexes, [0x5b; 32]); assert_eq!(best_index, 2); } + + // Asserts that each index is selected proportional to its weight. + #[test] + fn test_weighted_shuffle_sanity() { + let seed: Vec<_> = (1..).step_by(3).take(32).collect(); + let seed: [u8; 32] = seed.try_into().unwrap(); + let mut rng = ChaChaRng::from_seed(seed); + let weights = [1, 1000, 10, 100]; + let mut counts = [0; 4]; + for _ in 0..100000 { + let mut shuffle = WeightedShuffle::new(&mut rng, &weights).unwrap(); + counts[shuffle.next().unwrap()] += 1; + let _ = shuffle.count(); // consume the rest. + } + assert_eq!(counts, [101, 90113, 891, 8895]); + } + + #[test] + fn test_weighted_shuffle_hard_coded() { + let weights = [ + 78, 70, 38, 27, 21, 0, 82, 42, 21, 77, 77, 17, 4, 50, 96, 83, 33, 16, 72, + ]; + let seed = [48u8; 32]; + let mut rng = ChaChaRng::from_seed(seed); + let shuffle: Vec<_> = WeightedShuffle::new(&mut rng, &weights).unwrap().collect(); + assert_eq!( + shuffle, + [2, 11, 16, 0, 13, 14, 15, 10, 1, 9, 7, 6, 12, 18, 4, 17, 3, 8] + ); + let seed = [37u8; 32]; + let mut rng = ChaChaRng::from_seed(seed); + let shuffle: Vec<_> = WeightedShuffle::new(&mut rng, &weights).unwrap().collect(); + assert_eq!( + shuffle, + [17, 3, 14, 13, 6, 10, 15, 16, 9, 2, 4, 1, 0, 7, 8, 18, 11, 12] + ); + } + + #[test] + fn test_weighted_shuffle_match_slow() { + let mut rng = rand::thread_rng(); + let weights: Vec = repeat_with(|| rng.gen_range(0, 1000)).take(997).collect(); + for _ in 0..10 { + let mut seed = [0u8; 32]; + rng.fill(&mut seed[..]); + let mut rng = ChaChaRng::from_seed(seed); + let shuffle: Vec<_> = WeightedShuffle::new(&mut rng, &weights).unwrap().collect(); + let mut rng = ChaChaRng::from_seed(seed); + let shuffle_slow = weighted_shuffle_slow(&mut rng, weights.clone()); + assert_eq!(shuffle, shuffle_slow,); + } + } } diff --git a/gossip/tests/cluster_info.rs b/gossip/tests/cluster_info.rs index 921a842c321791..231c2399cd86e5 100644 --- a/gossip/tests/cluster_info.rs +++ b/gossip/tests/cluster_info.rs @@ -5,8 +5,10 @@ use { solana_gossip::{ cluster_info::{compute_retransmit_peers, ClusterInfo}, contact_info::ContactInfo, + deprecated::{shuffle_peers_and_index, sorted_retransmit_peers_and_stakes}, }, - solana_sdk::pubkey::Pubkey, + solana_sdk::{pubkey::Pubkey, signer::keypair::Keypair}, + solana_streamer::socket::SocketAddrSpace, std::{ collections::{HashMap, HashSet}, sync::{ @@ -78,7 +80,11 @@ fn run_simulation(stakes: &[u64], fanout: usize) { // describe the leader let leader_info = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), 0); - let cluster_info = ClusterInfo::new_with_invalid_keypair(leader_info.clone()); + let cluster_info = ClusterInfo::new( + leader_info.clone(), + Arc::new(Keypair::new()), + SocketAddrSpace::Unspecified, + ); // setup staked nodes let mut staked_nodes = HashMap::new(); @@ -118,14 +124,13 @@ fn run_simulation(stakes: &[u64], fanout: usize) { .map(|i| { let mut seed = [0; 32]; seed[0..4].copy_from_slice(&i.to_le_bytes()); + // TODO: Ideally these should use the new methods in + // solana_core::cluster_nodes, however that would add build + // dependency on solana_core which is not desired. let (peers, stakes_and_index) = - cluster_info.sorted_retransmit_peers_and_stakes(Some(&staked_nodes)); - let (_, shuffled_stakes_and_indexes) = ClusterInfo::shuffle_peers_and_index( - &cluster_info.id(), - &peers, - &stakes_and_index, - seed, - ); + sorted_retransmit_peers_and_stakes(&cluster_info, Some(&staked_nodes)); + let (_, shuffled_stakes_and_indexes) = + shuffle_peers_and_index(&cluster_info.id(), &peers, &stakes_and_index, seed); shuffled_stakes_and_indexes .into_iter() .map(|(_, i)| peers[i].clone()) diff --git a/gossip/tests/crds_gossip.rs b/gossip/tests/crds_gossip.rs index da4c30191ba7ed..564b6a7e28ac6d 100644 --- a/gossip/tests/crds_gossip.rs +++ b/gossip/tests/crds_gossip.rs @@ -21,6 +21,7 @@ use { signature::{Keypair, Signer}, timing::timestamp, }, + solana_streamer::socket::SocketAddrSpace, std::{ collections::{HashMap, HashSet}, ops::Deref, @@ -116,11 +117,9 @@ fn star_network_create(num: usize) -> Network { let node_keypair = Arc::new(Keypair::new()); let contact_info = ContactInfo::new_localhost(&node_keypair.pubkey(), 0); let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(contact_info.clone())); - let id = new.label().pubkey(); let mut node = CrdsGossip::default(); node.crds.insert(new.clone(), timestamp()).unwrap(); node.crds.insert(entry.clone(), timestamp()).unwrap(); - node.set_self(&id); let node = Node::new(node_keypair, contact_info, Arc::new(Mutex::new(node))); (new.label().pubkey(), node) }) @@ -128,7 +127,6 @@ fn star_network_create(num: usize) -> Network { let mut node = CrdsGossip::default(); let id = entry.label().pubkey(); node.crds.insert(entry, timestamp()).unwrap(); - node.set_self(&id); let node = Node::new(node_keypair, contact_info, Arc::new(Mutex::new(node))); network.insert(id, node); Network::new(network) @@ -141,18 +139,14 @@ fn rstar_network_create(num: usize) -> Network { let mut origin = CrdsGossip::default(); let id = entry.label().pubkey(); origin.crds.insert(entry, timestamp()).unwrap(); - origin.set_self(&id); let mut network: HashMap<_, _> = (1..num) .map(|_| { let node_keypair = Arc::new(Keypair::new()); let contact_info = ContactInfo::new_localhost(&node_keypair.pubkey(), 0); let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(contact_info.clone())); - let id = new.label().pubkey(); let mut node = CrdsGossip::default(); node.crds.insert(new.clone(), timestamp()).unwrap(); origin.crds.insert(new.clone(), timestamp()).unwrap(); - node.set_self(&id); - let node = Node::new(node_keypair, contact_info, Arc::new(Mutex::new(node))); (new.label().pubkey(), node) }) @@ -168,10 +162,8 @@ fn ring_network_create(num: usize) -> Network { let node_keypair = Arc::new(Keypair::new()); let contact_info = ContactInfo::new_localhost(&node_keypair.pubkey(), 0); let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(contact_info.clone())); - let id = new.label().pubkey(); let mut node = CrdsGossip::default(); node.crds.insert(new.clone(), timestamp()).unwrap(); - node.set_self(&id); let node = Node::new(node_keypair, contact_info, Arc::new(Mutex::new(node))); (new.label().pubkey(), node) }) @@ -180,7 +172,7 @@ fn ring_network_create(num: usize) -> Network { for k in 0..keys.len() { let start_info = { let start = &network[&keys[k]]; - let start_id = start.lock().unwrap().id; + let start_id = keys[k]; let label = CrdsValueLabel::ContactInfo(start_id); let gossip = start.gossip.lock().unwrap(); gossip.crds.get(&label).unwrap().value.clone() @@ -202,10 +194,8 @@ fn connected_staked_network_create(stakes: &[u64]) -> Network { let node_keypair = Arc::new(Keypair::new()); let contact_info = ContactInfo::new_localhost(&node_keypair.pubkey(), 0); let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(contact_info.clone())); - let id = new.label().pubkey(); let mut node = CrdsGossip::default(); node.crds.insert(new.clone(), timestamp()).unwrap(); - node.set_self(&id); let node = Node::staked( node_keypair, contact_info, @@ -221,15 +211,14 @@ fn connected_staked_network_create(stakes: &[u64]) -> Network { .iter() .map(|k| { let start = &network[k].lock().unwrap(); - let start_id = start.id; - let start_label = CrdsValueLabel::ContactInfo(start_id); + let start_label = CrdsValueLabel::ContactInfo(*k); start.crds.get(&start_label).unwrap().value.clone() }) .collect(); - for end in network.values_mut() { + for (end_pubkey, end) in network.iter_mut() { for k in 0..keys.len() { let mut end = end.lock().unwrap(); - if keys[k] != end.id { + if keys[k] != *end_pubkey { let start_info = start_entries[k].clone(); end.crds.insert(start_info, timestamp()).unwrap(); } @@ -240,7 +229,7 @@ fn connected_staked_network_create(stakes: &[u64]) -> Network { fn network_simulator_pull_only(thread_pool: &ThreadPool, network: &mut Network) { let num = network.len(); - let (converged, bytes_tx) = network_run_pull(&thread_pool, network, 0, num * 2, 0.9); + let (converged, bytes_tx) = network_run_pull(thread_pool, network, 0, num * 2, 0.9); trace!( "network_simulator_pull_{}: converged: {} total_bytes: {}", num, @@ -253,14 +242,19 @@ fn network_simulator_pull_only(thread_pool: &ThreadPool, network: &mut Network) fn network_simulator(thread_pool: &ThreadPool, network: &mut Network, max_convergance: f64) { let num = network.len(); // run for a small amount of time - let (converged, bytes_tx) = network_run_pull(&thread_pool, network, 0, 10, 1.0); + let (converged, bytes_tx) = network_run_pull(thread_pool, network, 0, 10, 1.0); trace!("network_simulator_push_{}: converged: {}", num, converged); // make sure there is someone in the active set let network_values: Vec = network.values().cloned().collect(); network_values.par_iter().for_each(|node| { - node.lock() - .unwrap() - .refresh_push_active_set(&HashMap::new(), None); + let node_pubkey = node.keypair.pubkey(); + node.lock().unwrap().refresh_push_active_set( + &node_pubkey, + 0, // shred version + &HashMap::new(), // stakes + None, // gossip validators + &SocketAddrSpace::Unspecified, + ); }); let mut total_bytes = bytes_tx; let mut ts = timestamp(); @@ -271,8 +265,9 @@ fn network_simulator(thread_pool: &ThreadPool, network: &mut Network, max_conver ts += 1000; // push a message to the network network_values.par_iter().for_each(|locked_node| { + let node_pubkey = locked_node.keypair.pubkey(); let node = &mut locked_node.lock().unwrap(); - let label = CrdsValueLabel::ContactInfo(node.id); + let label = CrdsValueLabel::ContactInfo(node_pubkey); let entry = node.crds.get(&label).unwrap(); let mut m = entry.value.contact_info().cloned().unwrap(); m.wallclock = now; @@ -292,7 +287,7 @@ fn network_simulator(thread_pool: &ThreadPool, network: &mut Network, max_conver bytes_tx ); // pull for a bit - let (converged, bytes_tx) = network_run_pull(&thread_pool, network, start, end, 1.0); + let (converged, bytes_tx) = network_run_pull(thread_pool, network, start, end, 1.0); total_bytes += bytes_tx; trace!( "network_simulator_push_{}: converged: {} bytes: {} total_bytes: {}", @@ -327,13 +322,15 @@ fn network_run_push( let requests: Vec<_> = network_values .par_iter() .map(|node| { + let node_pubkey = node.keypair.pubkey(); let mut node_lock = node.lock().unwrap(); let timeouts = node_lock.make_timeouts( + node_pubkey, &HashMap::default(), // stakes Duration::from_millis(node_lock.pull.crds_timeout), ); - node_lock.purge(thread_pool, now, &timeouts); - (node_lock.id, node_lock.new_push_messages(vec![], now)) + node_lock.purge(&node_pubkey, thread_pool, now, &timeouts); + (node_pubkey, node_lock.new_push_messages(vec![], now)) }) .collect(); let transfered: Vec<_> = requests @@ -356,7 +353,14 @@ fn network_run_push( .collect(); let prunes_map = network .get(&to) - .map(|node| node.lock().unwrap().prune_received_cache(origins, &stakes)) + .map(|node| { + let node_pubkey = node.keypair.pubkey(); + node.lock().unwrap().prune_received_cache( + &node_pubkey, + origins, + &stakes, + ) + }) .unwrap(); for (from, prune_set) in prunes_map { @@ -371,11 +375,19 @@ fn network_run_push( network .get(&from) .map(|node| { + let node_pubkey = node.keypair.pubkey(); let node = node.lock().unwrap(); - let destination = node.id; + let destination = node_pubkey; let now = timestamp(); - node.process_prune_msg(&to, &destination, &prune_keys, now, now) - .unwrap() + node.process_prune_msg( + &node_pubkey, + &to, + &destination, + &prune_keys, + now, + now, + ) + .unwrap() }) .unwrap(); } @@ -399,9 +411,14 @@ fn network_run_push( } if now % CRDS_GOSSIP_PUSH_MSG_TIMEOUT_MS == 0 && now > 0 { network_values.par_iter().for_each(|node| { - node.lock() - .unwrap() - .refresh_push_active_set(&HashMap::new(), None); + let node_pubkey = node.keypair.pubkey(); + node.lock().unwrap().refresh_push_active_set( + &node_pubkey, + 0, // shred version + &HashMap::new(), // stakes + None, // gossip validators + &SocketAddrSpace::Unspecified, + ); }); } total = network_values @@ -466,25 +483,28 @@ fn network_run_pull( .lock() .unwrap() .new_pull_request( - &thread_pool, + thread_pool, from.keypair.deref(), + 0, // shred version. now, None, &HashMap::new(), cluster_info::MAX_BLOOM_SIZE, from.ping_cache.deref(), &mut pings, + &SocketAddrSpace::Unspecified, ) .ok()?; + let from_pubkey = from.keypair.pubkey(); let gossip = from.gossip.lock().unwrap(); - let label = CrdsValueLabel::ContactInfo(gossip.id); + let label = CrdsValueLabel::ContactInfo(from_pubkey); let self_info = gossip.crds.get(&label).unwrap().value.clone(); Some((peer.id, filters, self_info)) }) .collect() }; let transfered: Vec<_> = requests - .into_par_iter() + .into_iter() .map(|(to, filters, caller_info)| { let mut bytes: usize = 0; let mut msgs: usize = 0; @@ -507,8 +527,9 @@ fn network_run_pull( .lock() .unwrap() .generate_pull_responses( + thread_pool, &filters, - /*output_size_limit=*/ usize::MAX, + usize::MAX, // output_size_limit now, ) .into_iter() @@ -676,11 +697,8 @@ fn test_star_network_large_push() { } #[test] fn test_prune_errors() { - let mut crds_gossip = CrdsGossip { - id: Pubkey::new(&[0; 32]), - ..CrdsGossip::default() - }; - let id = crds_gossip.id; + let mut crds_gossip = CrdsGossip::default(); + let id = Pubkey::new(&[0; 32]); let ci = ContactInfo::new_localhost(&Pubkey::new(&[1; 32]), 0); let prune_pubkey = Pubkey::new(&[2; 32]); crds_gossip @@ -690,22 +708,43 @@ fn test_prune_errors() { 0, ) .unwrap(); - crds_gossip.refresh_push_active_set(&HashMap::new(), None); + crds_gossip.refresh_push_active_set( + &id, + 0, // shred version + &HashMap::new(), // stakes + None, // gossip validators + &SocketAddrSpace::Unspecified, + ); let now = timestamp(); //incorrect dest let mut res = crds_gossip.process_prune_msg( - &ci.id, - &Pubkey::new(hash(&[1; 32]).as_ref()), - &[prune_pubkey], + &id, // self_pubkey + &ci.id, // peer + &Pubkey::new(hash(&[1; 32]).as_ref()), // destination + &[prune_pubkey], // origins now, now, ); assert_eq!(res.err(), Some(CrdsGossipError::BadPruneDestination)); //correct dest - res = crds_gossip.process_prune_msg(&ci.id, &id, &[prune_pubkey], now, now); + res = crds_gossip.process_prune_msg( + &id, // self_pubkey + &ci.id, // peer + &id, // destination + &[prune_pubkey], // origins + now, + now, + ); res.unwrap(); //test timeout let timeout = now + crds_gossip.push.prune_timeout * 2; - res = crds_gossip.process_prune_msg(&ci.id, &id, &[prune_pubkey], now, timeout); + res = crds_gossip.process_prune_msg( + &id, // self_pubkey + &ci.id, // peer + &id, // destination + &[prune_pubkey], // origins + now, + timeout, + ); assert_eq!(res.err(), Some(CrdsGossipError::PruneMessageTimeout)); } diff --git a/gossip/tests/gossip.rs b/gossip/tests/gossip.rs index 970a9c58b968c4..b565a1e04cca4b 100644 --- a/gossip/tests/gossip.rs +++ b/gossip/tests/gossip.rs @@ -18,6 +18,7 @@ use { timing::timestamp, transaction::Transaction, }, + solana_streamer::socket::SocketAddrSpace, solana_vote_program::{vote_instruction, vote_state::Vote}, std::{ net::UdpSocket, @@ -33,7 +34,11 @@ use { fn test_node(exit: &Arc) -> (Arc, GossipService, UdpSocket) { let keypair = Arc::new(Keypair::new()); let mut test_node = Node::new_localhost_with_pubkey(&keypair.pubkey()); - let cluster_info = Arc::new(ClusterInfo::new(test_node.info.clone(), keypair)); + let cluster_info = Arc::new(ClusterInfo::new( + test_node.info.clone(), + keypair, + SocketAddrSpace::Unspecified, + )); let gossip_service = GossipService::new( &cluster_info, None, @@ -56,7 +61,11 @@ fn test_node_with_bank( bank_forks: Arc>, ) -> (Arc, GossipService, UdpSocket) { let mut test_node = Node::new_localhost_with_pubkey(&node_keypair.pubkey()); - let cluster_info = Arc::new(ClusterInfo::new(test_node.info.clone(), node_keypair)); + let cluster_info = Arc::new(ClusterInfo::new( + test_node.info.clone(), + node_keypair, + SocketAddrSpace::Unspecified, + )); let gossip_service = GossipService::new( &cluster_info, Some(bank_forks), @@ -209,7 +218,13 @@ pub fn cluster_info_retransmit() { p.meta.size = 10; let peers = c1.tvu_peers(); let retransmit_peers: Vec<_> = peers.iter().collect(); - ClusterInfo::retransmit_to(&retransmit_peers, &p, &tn1, false).unwrap(); + ClusterInfo::retransmit_to( + &retransmit_peers, + &p, + &tn1, + false, + &SocketAddrSpace::Unspecified, + ); let res: Vec<_> = [tn1, tn2, tn3] .into_par_iter() .map(|s| { diff --git a/install/Cargo.toml b/install/Cargo.toml index 101b854f35dfa8..9976677eab52df 100644 --- a/install/Cargo.toml +++ b/install/Cargo.toml @@ -3,7 +3,7 @@ authors = ["Solana Maintainers "] edition = "2018" name = "solana-install" description = "The solana cluster software installer" -version = "1.7.0" +version = "1.7.11" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -15,7 +15,7 @@ bincode = "1.3.1" bzip2 = "0.3.3" chrono = { version = "0.4.11", features = ["serde"] } clap = { version = "2.33.1" } -console = "0.11.3" +console = "0.14.1" ctrlc = { version = "3.1.5", features = ["termination"] } dirs-next = "2.0.0" indicatif = "0.15.0" @@ -25,14 +25,14 @@ reqwest = { version = "0.11.2", default-features = false, features = ["blocking" serde = { version = "1.0.122", features = ["derive"] } serde_json = "1.0.62" serde_yaml = "0.8.13" -solana-clap-utils = { path = "../clap-utils", version = "=1.7.0" } -solana-client = { path = "../client", version = "=1.7.0" } -solana-config-program = { path = "../programs/config", version = "=1.7.0" } -solana-logger = { path = "../logger", version = "=1.7.0" } -solana-sdk = { path = "../sdk", version = "=1.7.0" } -solana-version = { path = "../version", version = "=1.7.0" } +solana-clap-utils = { path = "../clap-utils", version = "=1.7.11" } +solana-client = { path = "../client", version = "=1.7.11" } +solana-config-program = { path = "../programs/config", version = "=1.7.11" } +solana-logger = { path = "../logger", version = "=1.7.11" } +solana-sdk = { path = "../sdk", version = "=1.7.11" } +solana-version = { path = "../version", version = "=1.7.11" } semver = "0.9.0" -tar = "0.4.28" +tar = "0.4.37" tempfile = "3.1.0" url = "2.1.1" diff --git a/install/src/command.rs b/install/src/command.rs index efe7d7bc0e4843..b8e92843519daa 100644 --- a/install/src/command.rs +++ b/install/src/command.rs @@ -548,7 +548,7 @@ pub fn init( init_or_update(config_file, true, false)?; let path_modified = if !no_modify_path { - add_to_path(&config.active_release_bin_dir().to_str().unwrap()) + add_to_path(config.active_release_bin_dir().to_str().unwrap()) } else { false }; @@ -613,10 +613,10 @@ pub fn info(config_file: &str, local_info_only: bool, eval: bool) -> Result<(), return Ok(()); } - println_name_value("Configuration:", &config_file); + println_name_value("Configuration:", config_file); println_name_value( "Active release directory:", - &config.active_release_dir().to_str().unwrap_or("?"), + config.active_release_dir().to_str().unwrap_or("?"), ); fn print_release_version(config: &Config) { @@ -633,14 +633,14 @@ pub fn info(config_file: &str, local_info_only: bool, eval: bool) -> Result<(), if let Some(explicit_release) = &config.explicit_release { match explicit_release { ExplicitRelease::Semver(release_semver) => { - println_name_value(&format!("{}Release version:", BULLET), &release_semver); + println_name_value(&format!("{}Release version:", BULLET), release_semver); println_name_value( &format!("{}Release URL:", BULLET), &github_release_download_url(release_semver), ); } ExplicitRelease::Channel(release_channel) => { - println_name_value(&format!("{}Release channel:", BULLET), &release_channel); + println_name_value(&format!("{}Release channel:", BULLET), release_channel); println_name_value( &format!("{}Release URL:", BULLET), &release_channel_download_url(release_channel), @@ -659,7 +659,7 @@ pub fn info(config_file: &str, local_info_only: bool, eval: bool) -> Result<(), Some(ref update_manifest) => { println_name_value("Installed version:", ""); print_release_version(&config); - print_update_manifest(&update_manifest); + print_update_manifest(update_manifest); } None => { println_name_value("Installed version:", "None"); diff --git a/install/src/lib.rs b/install/src/lib.rs index 188cdfcd061b5f..f79bdfcf1954dc 100644 --- a/install/src/lib.rs +++ b/install/src/lib.rs @@ -18,7 +18,7 @@ mod stop_process; mod update_manifest; pub fn is_semver(semver: &str) -> Result<(), String> { - match semver::Version::parse(&semver) { + match semver::Version::parse(semver) { Ok(_) => Ok(()), Err(err) => Err(format!("{:?}", err)), } @@ -60,10 +60,10 @@ pub fn explicit_release_of( fn handle_init(matches: &ArgMatches<'_>, config_file: &str) -> Result<(), String> { let json_rpc_url = matches.value_of("json_rpc_url").unwrap(); - let update_manifest_pubkey = pubkey_of(&matches, "update_manifest_pubkey"); + let update_manifest_pubkey = pubkey_of(matches, "update_manifest_pubkey"); let data_dir = matches.value_of("data_dir").unwrap(); let no_modify_path = matches.is_present("no_modify_path"); - let explicit_release = explicit_release_of(&matches, "explicit_release"); + let explicit_release = explicit_release_of(matches, "explicit_release"); if update_manifest_pubkey.is_none() && explicit_release.is_none() { Err(format!( @@ -98,7 +98,7 @@ pub fn main() -> Result<(), String> { .global(true) .help("Configuration file to use"); match *defaults::CONFIG_FILE { - Some(ref config_file) => arg.default_value(&config_file), + Some(ref config_file) => arg.default_value(config_file), None => arg.required(true), } }) @@ -115,7 +115,7 @@ pub fn main() -> Result<(), String> { .required(true) .help("Directory to store install data"); match *defaults::DATA_DIR { - Some(ref data_dir) => arg.default_value(&data_dir), + Some(ref data_dir) => arg.default_value(data_dir), None => arg, } }) @@ -181,7 +181,7 @@ pub fn main() -> Result<(), String> { .required(true) .help("Keypair file of the account that funds the deployment"); match *defaults::USER_KEYPAIR { - Some(ref config_file) => arg.default_value(&config_file), + Some(ref config_file) => arg.default_value(config_file), None => arg, } }) @@ -242,7 +242,7 @@ pub fn main() -> Result<(), String> { let config_file = matches.value_of("config_file").unwrap(); match matches.subcommand() { - ("init", Some(matches)) => handle_init(&matches, &config_file), + ("init", Some(matches)) => handle_init(matches, config_file), ("info", Some(matches)) => { let local_info_only = matches.is_present("local_info_only"); let eval = matches.is_present("eval"); @@ -290,7 +290,7 @@ pub fn main_init() -> Result<(), String> { .takes_value(true) .help("Configuration file to use"); match *defaults::CONFIG_FILE { - Some(ref config_file) => arg.default_value(&config_file), + Some(ref config_file) => arg.default_value(config_file), None => arg.required(true), } }) @@ -303,7 +303,7 @@ pub fn main_init() -> Result<(), String> { .required(true) .help("Directory to store install data"); match *defaults::DATA_DIR { - Some(ref data_dir) => arg.default_value(&data_dir), + Some(ref data_dir) => arg.default_value(data_dir), None => arg, } }) @@ -342,5 +342,5 @@ pub fn main_init() -> Result<(), String> { .get_matches(); let config_file = matches.value_of("config_file").unwrap(); - handle_init(&matches, &config_file) + handle_init(&matches, config_file) } diff --git a/keygen/Cargo.toml b/keygen/Cargo.toml index 00604f11575b63..09012d98c1a51c 100644 --- a/keygen/Cargo.toml +++ b/keygen/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-keygen" -version = "1.7.0" +version = "1.7.11" description = "Solana key generation utility" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -14,11 +14,11 @@ bs58 = "0.3.1" clap = "2.33" dirs-next = "2.0.0" num_cpus = "1.13.0" -solana-clap-utils = { path = "../clap-utils", version = "=1.7.0" } -solana-cli-config = { path = "../cli-config", version = "=1.7.0" } -solana-remote-wallet = { path = "../remote-wallet", version = "=1.7.0" } -solana-sdk = { path = "../sdk", version = "=1.7.0" } -solana-version = { path = "../version", version = "=1.7.0" } +solana-clap-utils = { path = "../clap-utils", version = "=1.7.11" } +solana-cli-config = { path = "../cli-config", version = "=1.7.11" } +solana-remote-wallet = { path = "../remote-wallet", version = "=1.7.11" } +solana-sdk = { path = "../sdk", version = "=1.7.11" } +solana-version = { path = "../version", version = "=1.7.11" } tiny-bip39 = "0.7.0" [[bin]] diff --git a/keygen/src/keygen.rs b/keygen/src/keygen.rs index a30e5c47cafa44..c7c962a58aa850 100644 --- a/keygen/src/keygen.rs +++ b/keygen/src/keygen.rs @@ -106,9 +106,6 @@ fn no_outfile_arg<'a, 'b>() -> Arg<'a, 'b> { Arg::with_name(NO_OUTFILE_ARG.name) .long(NO_OUTFILE_ARG.long) .conflicts_with_all(&["outfile", "silent"]) - // Require a seed phrase to avoid generating a keypair - // but having no way to get the private key - .requires("use_mnemonic") .help(NO_OUTFILE_ARG.help) } @@ -121,7 +118,6 @@ impl KeyGenerationCommonArgs for App<'_, '_> { self.arg(word_count_arg()) .arg(language_arg()) .arg(no_passphrase_arg()) - .arg(no_outfile_arg()) } } @@ -157,9 +153,9 @@ fn output_keypair( ) -> Result<(), Box> { if outfile == STDOUT_OUTFILE_TOKEN { let mut stdout = std::io::stdout(); - write_keypair(&keypair, &mut stdout)?; + write_keypair(keypair, &mut stdout)?; } else { - write_keypair_file(&keypair, outfile)?; + write_keypair_file(keypair, outfile)?; println!("Wrote {} keypair to {}", source, outfile); } Ok(()) @@ -346,7 +342,7 @@ fn main() -> Result<(), Box> { .global(true) .help("Configuration file to use"); if let Some(ref config_file) = *CONFIG_FILE { - arg.default_value(&config_file) + arg.default_value(config_file) } else { arg } @@ -395,6 +391,7 @@ fn main() -> Result<(), Box> { .help("Do not display seed phrase. Useful when piping output to other programs that prompt for user input, like gpg"), ) .key_generation_common_args() + .arg(no_outfile_arg()) ) .subcommand( SubCommand::with_name("grind") @@ -450,6 +447,12 @@ fn main() -> Result<(), Box> { .help("Generate using a mnemonic key phrase. Expect a significant slowdown in this mode"), ) .key_generation_common_args() + .arg( + no_outfile_arg() + // Require a seed phrase to avoid generating a keypair + // but having no way to get the private key + .requires("use_mnemonic") + ) ) .subcommand( SubCommand::with_name("pubkey") @@ -536,7 +539,7 @@ fn do_main(matches: &ArgMatches<'_>) -> Result<(), Box> { if matches.is_present("outfile") { let outfile = matches.value_of("outfile").unwrap(); - check_for_overwrite(&outfile, &matches); + check_for_overwrite(outfile, matches); write_pubkey_file(outfile, pubkey)?; } else { println!("{}", pubkey); @@ -555,7 +558,7 @@ fn do_main(matches: &ArgMatches<'_>) -> Result<(), Box> { match outfile { Some(STDOUT_OUTFILE_TOKEN) => (), - Some(outfile) => check_for_overwrite(&outfile, &matches), + Some(outfile) => check_for_overwrite(outfile, matches), None => (), } @@ -574,7 +577,7 @@ fn do_main(matches: &ArgMatches<'_>) -> Result<(), Box> { let keypair = keypair_from_seed(seed.as_bytes())?; if let Some(outfile) = outfile { - output_keypair(&keypair, &outfile, "new") + output_keypair(&keypair, outfile, "new") .map_err(|err| format!("Unable to write {}: {}", outfile, err))?; } @@ -597,7 +600,7 @@ fn do_main(matches: &ArgMatches<'_>) -> Result<(), Box> { }; if outfile != STDOUT_OUTFILE_TOKEN { - check_for_overwrite(&outfile, &matches); + check_for_overwrite(outfile, matches); } let keypair_name = "recover"; @@ -607,7 +610,7 @@ fn do_main(matches: &ArgMatches<'_>) -> Result<(), Box> { let skip_validation = matches.is_present(SKIP_SEED_PHRASE_VALIDATION_ARG.name); keypair_from_seed_phrase(keypair_name, skip_validation, true, None, true)? }; - output_keypair(&keypair, &outfile, "recovered")?; + output_keypair(&keypair, outfile, "recovered")?; } ("grind", Some(matches)) => { let ignore_case = matches.is_present("ignore_case"); diff --git a/ledger-tool/Cargo.toml b/ledger-tool/Cargo.toml index 47a7b5e7440ba8..9692bbe9ec30aa 100644 --- a/ledger-tool/Cargo.toml +++ b/ledger-tool/Cargo.toml @@ -3,7 +3,7 @@ authors = ["Solana Maintainers "] edition = "2018" name = "solana-ledger-tool" description = "Blockchain, Rebuilt for Scale" -version = "1.7.0" +version = "1.7.11" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -23,23 +23,23 @@ regex = "1" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0.56" serde_yaml = "0.8.13" -solana-clap-utils = { path = "../clap-utils", version = "=1.7.0" } -solana-cli-output = { path = "../cli-output", version = "=1.7.0" } -solana-ledger = { path = "../ledger", version = "=1.7.0" } -solana-logger = { path = "../logger", version = "=1.7.0" } -solana-measure = { path = "../measure", version = "=1.7.0" } -solana-runtime = { path = "../runtime", version = "=1.7.0" } -solana-sdk = { path = "../sdk", version = "=1.7.0" } -solana-stake-program = { path = "../programs/stake", version = "=1.7.0" } -solana-storage-bigtable = { path = "../storage-bigtable", version = "=1.7.0" } -solana-transaction-status = { path = "../transaction-status", version = "=1.7.0" } -solana-version = { path = "../version", version = "=1.7.0" } -solana-vote-program = { path = "../programs/vote", version = "=1.7.0" } +solana-clap-utils = { path = "../clap-utils", version = "=1.7.11" } +solana-cli-output = { path = "../cli-output", version = "=1.7.11" } +solana-ledger = { path = "../ledger", version = "=1.7.11" } +solana-logger = { path = "../logger", version = "=1.7.11" } +solana-measure = { path = "../measure", version = "=1.7.11" } +solana-runtime = { path = "../runtime", version = "=1.7.11" } +solana-sdk = { path = "../sdk", version = "=1.7.11" } +solana-stake-program = { path = "../programs/stake", version = "=1.7.11" } +solana-storage-bigtable = { path = "../storage-bigtable", version = "=1.7.11" } +solana-transaction-status = { path = "../transaction-status", version = "=1.7.11" } +solana-version = { path = "../version", version = "=1.7.11" } +solana-vote-program = { path = "../programs/vote", version = "=1.7.11" } tempfile = "3.1.0" tokio = { version = "1", features = ["full"] } [dev-dependencies] -assert_cmd = "1.0" +assert_cmd = "2.0" [target."cfg(unix)".dependencies] signal-hook = "0.1.15" diff --git a/ledger-tool/src/bigtable.rs b/ledger-tool/src/bigtable.rs index 38c387404319e7..b856ef13114ab3 100644 --- a/ledger-tool/src/bigtable.rs +++ b/ledger-tool/src/bigtable.rs @@ -385,18 +385,7 @@ pub fn bigtable_process_command(ledger_path: &Path, matches: &ArgMatches<'_>) { let runtime = tokio::runtime::Runtime::new().unwrap(); let verbose = matches.is_present("verbose"); - let output_format = matches - .value_of("output_format") - .map(|value| match value { - "json" => OutputFormat::Json, - "json-compact" => OutputFormat::JsonCompact, - _ => unreachable!(), - }) - .unwrap_or(if verbose { - OutputFormat::DisplayVerbose - } else { - OutputFormat::Display - }); + let output_format = OutputFormat::from_matches(matches, "output_format", verbose); let future = match matches.subcommand() { ("upload", Some(arg_matches)) => { @@ -405,7 +394,7 @@ pub fn bigtable_process_command(ledger_path: &Path, matches: &ArgMatches<'_>) { let allow_missing_metadata = arg_matches.is_present("allow_missing_metadata"); let force_reupload = arg_matches.is_present("force_reupload"); let blockstore = - crate::open_blockstore(&ledger_path, AccessType::TryPrimaryThenSecondary, None); + crate::open_blockstore(ledger_path, AccessType::TryPrimaryThenSecondary, None); runtime.block_on(upload( blockstore, diff --git a/ledger-tool/src/main.rs b/ledger-tool/src/main.rs index 7f69d0fb7e7087..9aa14f8a5cd99b 100644 --- a/ledger-tool/src/main.rs +++ b/ledger-tool/src/main.rs @@ -33,8 +33,6 @@ use solana_runtime::{ use solana_sdk::{ account::{AccountSharedData, ReadableAccount, WritableAccount}, clock::{Epoch, Slot}, - feature::{self, Feature}, - feature_set, genesis_config::{ClusterType, GenesisConfig}, hash::Hash, inflation::Inflation, @@ -42,9 +40,10 @@ use solana_sdk::{ pubkey::Pubkey, rent::Rent, shred_version::compute_shred_version, + stake::{self, state::StakeState}, system_program, }; -use solana_stake_program::stake_state::{self, PointValue, StakeState}; +use solana_stake_program::stake_state::{self, PointValue}; use solana_vote_program::{ self, vote_state::{self, VoteState}, @@ -76,14 +75,14 @@ fn output_slot_rewards(blockstore: &Blockstore, slot: Slot, method: &LedgerOutpu if !rewards.is_empty() { println!(" Rewards:"); println!( - " {:<44} {:^15} {:<15} {:<20}", - "Address", "Type", "Amount", "New Balance" + " {:<44} {:^15} {:<15} {:<20} {:>10}", + "Address", "Type", "Amount", "New Balance", "Commission", ); for reward in rewards { let sign = if reward.lamports < 0 { "-" } else { "" }; println!( - " {:<44} {:^15} {:<15} {}", + " {:<44} {:^15} {:<15} {} {}", reward.pubkey, if let Some(reward_type) = reward.reward_type { format!("{}", reward_type) @@ -95,7 +94,11 @@ fn output_slot_rewards(blockstore: &Blockstore, slot: Slot, method: &LedgerOutpu sign, lamports_to_sol(reward.lamports.abs() as u64) ), - format!("◎{:<18.9}", lamports_to_sol(reward.post_balance)) + format!("◎{:<18.9}", lamports_to_sol(reward.post_balance)), + reward + .commission + .map(|commission| format!("{:>9}%", commission)) + .unwrap_or_else(|| " -".to_string()) ); } } @@ -133,7 +136,7 @@ fn output_entry( .map(|transaction_status| transaction_status.into()); solana_cli_output::display::println_transaction( - &transaction, + transaction, &transaction_status, " ", None, @@ -452,7 +455,7 @@ fn graph_forks(bank_forks: &BankForks, include_all_votes: bool) -> String { let mut lowest_total_stake = 0; for (node_pubkey, (last_vote_slot, vote_state, stake, total_stake)) in &last_votes { all_votes.entry(*node_pubkey).and_modify(|validator_votes| { - validator_votes.remove(&last_vote_slot); + validator_votes.remove(last_vote_slot); }); dot.push(format!( @@ -472,7 +475,7 @@ fn graph_forks(bank_forks: &BankForks, include_all_votes: bool) -> String { dot.push(format!( r#" "last vote {}" -> "{}" [style=dashed,label="latest vote"];"#, node_pubkey, - if styled_slots.contains(&last_vote_slot) { + if styled_slots.contains(last_vote_slot) { last_vote_slot.to_string() } else { if *last_vote_slot < lowest_last_vote_slot { @@ -519,7 +522,7 @@ fn graph_forks(bank_forks: &BankForks, include_all_votes: bool) -> String { r#" "{} vote {}" -> "{}" [style=dotted,label="vote"];"#, node_pubkey, vote_slot, - if styled_slots.contains(&vote_slot) { + if styled_slots.contains(vote_slot) { vote_slot.to_string() } else { "...".to_string() @@ -711,8 +714,8 @@ fn load_bank_forks( }; bank_forks_utils::load( - &genesis_config, - &blockstore, + genesis_config, + blockstore, account_paths, None, snapshot_config.as_ref(), @@ -729,7 +732,8 @@ fn open_genesis_config_by(ledger_path: &Path, matches: &ArgMatches<'_>) -> Genes } fn assert_capitalization(bank: &Bank) { - assert!(bank.calculate_and_verify_capitalization()); + let debug_verify = true; + assert!(bank.calculate_and_verify_capitalization(debug_verify)); } #[allow(clippy::cognitive_complexity)] @@ -831,7 +835,7 @@ fn main() { .long("maximum-snapshots-to-retain") .value_name("NUMBER") .takes_value(true) - .default_value(&default_max_snapshot_to_retain) + .default_value(default_max_snapshot_to_retain) .help("Maximum number of snapshots to hold on to during snapshot purge"); let rent = Rent::default(); @@ -1272,14 +1276,6 @@ fn main() { .possible_values(&["pico", "full", "none"]) .help("Overwrite inflation when warping"), ) - .arg( - Arg::with_name("enable_stake_program_v2") - .required(false) - .long("enable-stake-program-v2") - .takes_value(false) - .help("Enable stake program v2 (several inflation-related staking \ - bugs are feature-gated behind this)"), - ) .arg( Arg::with_name("recalculate_capitalization") .required(false) @@ -1857,14 +1853,14 @@ fn main() { let remove_stake_accounts = arg_matches.is_present("remove_stake_accounts"); let new_hard_forks = hardforks_of(arg_matches, "hard_forks"); - let faucet_pubkey = pubkey_of(&arg_matches, "faucet_pubkey"); + let faucet_pubkey = pubkey_of(arg_matches, "faucet_pubkey"); let faucet_lamports = value_t!(arg_matches, "faucet_lamports", u64).unwrap_or(0); let rent_burn_percentage = value_t!(arg_matches, "rent_burn_percentage", u8); let hashes_per_tick = arg_matches.value_of("hashes_per_tick"); let bootstrap_stake_authorized_pubkey = - pubkey_of(&arg_matches, "bootstrap_stake_authorized_pubkey"); + pubkey_of(arg_matches, "bootstrap_stake_authorized_pubkey"); let bootstrap_validator_lamports = value_t_or_exit!(arg_matches, "bootstrap_validator_lamports", u64); let bootstrap_validator_stake_lamports = @@ -1878,9 +1874,9 @@ fn main() { ); exit(1); } - let bootstrap_validator_pubkeys = pubkeys_of(&arg_matches, "bootstrap_validator"); + let bootstrap_validator_pubkeys = pubkeys_of(arg_matches, "bootstrap_validator"); let accounts_to_remove = - pubkeys_of(&arg_matches, "accounts_to_remove").unwrap_or_default(); + pubkeys_of(arg_matches, "accounts_to_remove").unwrap_or_default(); let snapshot_version = arg_matches .value_of("snapshot_version") @@ -1971,7 +1967,8 @@ fn main() { if remove_stake_accounts { for (address, mut account) in bank - .get_program_accounts(&solana_stake_program::id()) + .get_program_accounts(&stake::program::id()) + .unwrap() .into_iter() { account.set_lamports(0); @@ -2005,6 +2002,7 @@ fn main() { // Delete existing vote accounts for (address, mut account) in bank .get_program_accounts(&solana_vote_program::id()) + .unwrap() .into_iter() { account.set_lamports(0); @@ -2033,9 +2031,9 @@ fn main() { ); let vote_account = vote_state::create_account_with_authorized( - &identity_pubkey, - &identity_pubkey, - &identity_pubkey, + identity_pubkey, + identity_pubkey, + identity_pubkey, 100, VoteState::get_rent_exempt_reserve(&rent).max(1), ); @@ -2045,8 +2043,8 @@ fn main() { &stake_state::create_account( bootstrap_stake_authorized_pubkey .as_ref() - .unwrap_or(&identity_pubkey), - &vote_pubkey, + .unwrap_or(identity_pubkey), + vote_pubkey, &vote_account, &rent, bootstrap_validator_stake_lamports, @@ -2166,6 +2164,7 @@ fn main() { let accounts: BTreeMap<_, _> = bank .get_all_accounts_with_modified_slots() + .unwrap() .into_iter() .filter(|(pubkey, _account, _slot)| { include_sysvars || !solana_sdk::sysvar::is_sysvar_id(pubkey) @@ -2272,95 +2271,6 @@ fn main() { .lazy_rent_collection .store(true, std::sync::atomic::Ordering::Relaxed); - let feature_account_balance = std::cmp::max( - genesis_config.rent.minimum_balance(Feature::size_of()), - 1, - ); - if arg_matches.is_present("enable_stake_program_v2") { - let mut force_enabled_count = 0; - if base_bank - .get_account(&feature_set::stake_program_v2::id()) - .is_none() - { - base_bank.store_account( - &feature_set::stake_program_v2::id(), - &feature::create_account( - &Feature { activated_at: None }, - feature_account_balance, - ), - ); - force_enabled_count += 1; - } - if base_bank - .get_account(&feature_set::rewrite_stake::id()) - .is_none() - { - base_bank.store_account( - &feature_set::rewrite_stake::id(), - &feature::create_account( - &Feature { activated_at: None }, - feature_account_balance, - ), - ); - force_enabled_count += 1; - } - - if force_enabled_count == 0 { - warn!("Already stake_program_v2 is activated (or scheduled)"); - } - - let mut store_failed_count = 0; - if force_enabled_count >= 1 { - if base_bank - .get_account(&feature_set::secp256k1_program_enabled::id()) - .is_some() - { - // steal some lamports from the pretty old feature not to affect - // capitalizaion, which doesn't affect inflation behavior! - base_bank.store_account( - &feature_set::secp256k1_program_enabled::id(), - &AccountSharedData::default(), - ); - force_enabled_count -= 1; - } else { - store_failed_count += 1; - } - } - - if force_enabled_count >= 1 { - if base_bank - .get_account(&feature_set::instructions_sysvar_enabled::id()) - .is_some() - { - // steal some lamports from the pretty old feature not to affect - // capitalizaion, which doesn't affect inflation behavior! - base_bank.store_account( - &feature_set::instructions_sysvar_enabled::id(), - &AccountSharedData::default(), - ); - force_enabled_count -= 1; - } else { - store_failed_count += 1; - } - } - assert_eq!(force_enabled_count, store_failed_count); - if store_failed_count >= 1 { - // we have no choice; maybe locally created blank cluster with - // not-Development cluster type. - let old_cap = base_bank.set_capitalization(); - let new_cap = base_bank.capitalization(); - warn!( - "Skewing capitalization a bit to enable stake_program_v2 as \ - requested: increasing {} from {} to {}", - feature_account_balance, old_cap, new_cap, - ); - assert_eq!( - old_cap + feature_account_balance * store_failed_count, - new_cap - ); - } - } - #[derive(Default, Debug)] struct PointDetail { epoch: Epoch, @@ -2471,7 +2381,7 @@ fn main() { } }; let warped_bank = Bank::new_from_parent_with_tracer( - &base_bank, + base_bank, base_bank.collector_id(), next_epoch, tracer, @@ -2488,7 +2398,7 @@ fn main() { println!("Slot: {} => {}", base_bank.slot(), warped_bank.slot()); println!("Epoch: {} => {}", base_bank.epoch(), warped_bank.epoch()); - assert_capitalization(&base_bank); + assert_capitalization(base_bank); assert_capitalization(&warped_bank); let interest_per_epoch = ((warped_bank.capitalization() as f64) / (base_bank.capitalization() as f64) @@ -2516,7 +2426,7 @@ fn main() { pubkey, account, base_bank - .get_account(&pubkey) + .get_account(pubkey) .map(|a| a.lamports()) .unwrap_or_default(), ) @@ -2715,7 +2625,7 @@ fn main() { ); } - assert_capitalization(&bank); + assert_capitalization(bank); println!("Inflation: {:?}", bank.inflation()); println!("RentCollector: {:?}", bank.rent_collector()); println!("Capitalization: {}", Sol(bank.capitalization())); @@ -2912,10 +2822,12 @@ fn main() { eprintln!("{} slots to be rooted", roots_to_fix.len()); for chunk in roots_to_fix.chunks(100) { eprintln!("{:?}", chunk); - blockstore.set_roots(&roots_to_fix).unwrap_or_else(|err| { - eprintln!("Unable to set roots {:?}: {}", roots_to_fix, err); - exit(1); - }); + blockstore + .set_roots(roots_to_fix.iter()) + .unwrap_or_else(|err| { + eprintln!("Unable to set roots {:?}: {}", roots_to_fix, err); + exit(1); + }); } } else { println!( diff --git a/ledger-tool/tests/basic.rs b/ledger-tool/tests/basic.rs index c9ccf9ae690cd1..4cda481e6ed358 100644 --- a/ledger-tool/tests/basic.rs +++ b/ledger-tool/tests/basic.rs @@ -39,11 +39,11 @@ fn nominal() { let ledger_path = ledger_path.to_str().unwrap(); // Basic validation - let output = run_ledger_tool(&["-l", &ledger_path, "verify"]); + let output = run_ledger_tool(&["-l", ledger_path, "verify"]); assert!(output.status.success()); // Print everything - let output = run_ledger_tool(&["-l", &ledger_path, "print", "-vvv"]); + let output = run_ledger_tool(&["-l", ledger_path, "print", "-vvv"]); assert!(output.status.success()); assert_eq!(count_newlines(&output.stdout), ticks + meta_lines); } diff --git a/ledger/Cargo.toml b/ledger/Cargo.toml index 157377b5ca07d9..ef34661928e2ac 100644 --- a/ledger/Cargo.toml +++ b/ledger/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-ledger" -version = "1.7.0" +version = "1.7.11" description = "Solana ledger" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -13,7 +13,7 @@ edition = "2018" bincode = "1.3.1" byteorder = "1.3.4" chrono = { version = "0.4.11", features = ["serde"] } -chrono-humanize = "0.1.1" +chrono-humanize = "0.2.1" crossbeam-channel = "0.4" dlopen_derive = "0.1.4" dlopen = "0.1.8" @@ -26,48 +26,53 @@ lazy_static = "1.4.0" libc = "0.2.81" log = { version = "0.4.11" } num_cpus = "1.13.0" -prost = "0.7.0" +prost = "0.8.0" rand = "0.7.0" rand_chacha = "0.2.2" rayon = "1.5.0" -reed-solomon-erasure = { version = "4.0.2", features = ["simd-accel"] } serde = "1.0.122" -serde_bytes = "0.11.4" +serde_bytes = "0.11.5" sha2 = "0.9.2" -solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "=1.7.0" } -solana-frozen-abi = { path = "../frozen-abi", version = "=1.7.0" } -solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.7.0" } -solana-transaction-status = { path = "../transaction-status", version = "=1.7.0" } -solana-logger = { path = "../logger", version = "=1.7.0" } -solana-measure = { path = "../measure", version = "=1.7.0" } -solana-merkle-tree = { path = "../merkle-tree", version = "=1.7.0" } -solana-metrics = { path = "../metrics", version = "=1.7.0" } -solana-perf = { path = "../perf", version = "=1.7.0" } -solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.7.0" } -solana-runtime = { path = "../runtime", version = "=1.7.0" } -solana-sdk = { path = "../sdk", version = "=1.7.0" } -solana-stake-program = { path = "../programs/stake", version = "=1.7.0" } -solana-storage-bigtable = { path = "../storage-bigtable", version = "=1.7.0" } -solana-storage-proto = { path = "../storage-proto", version = "=1.7.0" } -solana-vote-program = { path = "../programs/vote", version = "=1.7.0" } +solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "=1.7.11" } +solana-frozen-abi = { path = "../frozen-abi", version = "=1.7.11" } +solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.7.11" } +solana-transaction-status = { path = "../transaction-status", version = "=1.7.11" } +solana-logger = { path = "../logger", version = "=1.7.11" } +solana-measure = { path = "../measure", version = "=1.7.11" } +solana-merkle-tree = { path = "../merkle-tree", version = "=1.7.11" } +solana-metrics = { path = "../metrics", version = "=1.7.11" } +solana-perf = { path = "../perf", version = "=1.7.11" } +solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.7.11" } +solana-runtime = { path = "../runtime", version = "=1.7.11" } +solana-sdk = { path = "../sdk", version = "=1.7.11" } +solana-storage-bigtable = { path = "../storage-bigtable", version = "=1.7.11" } +solana-storage-proto = { path = "../storage-proto", version = "=1.7.11" } +solana-vote-program = { path = "../programs/vote", version = "=1.7.11" } tempfile = "3.1.0" thiserror = "1.0" tokio = { version = "1", features = ["full"] } tokio-stream = "0.1" trees = "0.2.1" +# Disable reed-solomon-erasure/simd-accel feature on aarch64 only since it +# requires clang to support -march=native. +[target.'cfg(any(target_arch = "aarch64", target_arch = "aarch64_apple_darwin"))'.dependencies] +reed-solomon-erasure = { version = "4.0.2" } + +[target.'cfg(not(any(target_arch = "aarch64", target_arch = "aarch64_apple_darwin")))'.dependencies] +reed-solomon-erasure = { version = "4.0.2", features = ["simd-accel"] } + [dependencies.rocksdb] # Avoid the vendored bzip2 within rocksdb-sys that can cause linker conflicts # when also using the bzip2 crate -version = "0.15.0" +version = "0.16.0" default-features = false features = ["lz4"] [dev-dependencies] assert_matches = "1.3.0" matches = "0.1.6" -solana-account-decoder = { path = "../account-decoder", version = "=1.7.0" } -solana-budget-program = { path = "../programs/budget", version = "=1.7.0" } +solana-account-decoder = { path = "../account-decoder", version = "=1.7.11" } [build-dependencies] rustc_version = "0.2" diff --git a/ledger/benches/protobuf.rs b/ledger/benches/protobuf.rs index 6b511b2925d216..e0244efa078684 100644 --- a/ledger/benches/protobuf.rs +++ b/ledger/benches/protobuf.rs @@ -21,6 +21,7 @@ fn create_rewards() -> Rewards { lamports: 42 + i, post_balance: std::u64::MAX, reward_type: Some(RewardType::Fee), + commission: None, }) .collect() } diff --git a/ledger/src/ancestor_iterator.rs b/ledger/src/ancestor_iterator.rs index 6c8099ce98f053..333974cc05a5da 100644 --- a/ledger/src/ancestor_iterator.rs +++ b/ledger/src/ancestor_iterator.rs @@ -60,7 +60,7 @@ mod tests { fn test_ancestor_iterator() { let blockstore_path = get_tmp_ledger_path!(); let blockstore = Blockstore::open(&blockstore_path).unwrap(); - blockstore.set_roots(&[0]).unwrap(); + blockstore.set_roots(std::iter::once(&0)).unwrap(); let ticks_per_slot = 5; /* diff --git a/ledger/src/bank_forks_utils.rs b/ledger/src/bank_forks_utils.rs index 0125fa0614dc11..d654f499b7b1e3 100644 --- a/ledger/src/bank_forks_utils.rs +++ b/ledger/src/bank_forks_utils.rs @@ -9,7 +9,7 @@ use crate::{ }; use log::*; use solana_runtime::{ - bank_forks::{BankForks, SnapshotConfig}, + bank_forks::{ArchiveFormat, BankForks, SnapshotConfig}, snapshot_utils, }; use solana_sdk::{clock::Slot, genesis_config::GenesisConfig, hash::Hash}; @@ -21,14 +21,18 @@ pub type LoadResult = result::Result< >; fn to_loadresult( - brp: BlockstoreProcessorResult, - snapshot_hash: Option<(Slot, Hash)>, + bpr: BlockstoreProcessorResult, + snapshot_slot_and_hash: Option<(Slot, Hash)>, ) -> LoadResult { - brp.map(|(bank_forks, leader_schedule_cache)| { - (bank_forks, leader_schedule_cache, snapshot_hash) + bpr.map(|(bank_forks, leader_schedule_cache)| { + (bank_forks, leader_schedule_cache, snapshot_slot_and_hash) }) } +/// Load the banks and accounts +/// +/// If a snapshot config is given, and a snapshot is found, it will be loaded. Otherwise, load +/// from genesis. pub fn load( genesis_config: &GenesisConfig, blockstore: &Blockstore, @@ -48,76 +52,53 @@ pub fn load( fs::create_dir_all(&snapshot_config.snapshot_path) .expect("Couldn't create snapshot directory"); - match snapshot_utils::get_highest_snapshot_archive_path( - &snapshot_config.snapshot_package_output_path, - ) { - Some((archive_filename, (archive_slot, archive_snapshot_hash, compression))) => { - info!("Loading snapshot package: {:?}", archive_filename); - // Fail hard here if snapshot fails to load, don't silently continue - - if account_paths.is_empty() { - error!("Account paths not present when booting from snapshot"); - process::exit(1); - } - - let deserialized_bank = snapshot_utils::bank_from_archive( - &account_paths, - &process_options.frozen_accounts, - &snapshot_config.snapshot_path, - &archive_filename, - compression, - genesis_config, - process_options.debug_keys.clone(), - Some(&crate::builtins::get(process_options.bpf_jit)), - process_options.account_indexes.clone(), - process_options.accounts_db_caching_enabled, - process_options.limit_load_slot_count_from_snapshot, - ) - .expect("Load from snapshot failed"); - if let Some(shrink_paths) = shrink_paths { - deserialized_bank.set_shrink_paths(shrink_paths); - } - - let deserialized_snapshot_hash = ( - deserialized_bank.slot(), - deserialized_bank.get_accounts_hash(), - ); - - if process_options.accounts_db_test_hash_calculation { - deserialized_bank.update_accounts_hash_with_index_option(false, true); - } - - if deserialized_snapshot_hash != (archive_slot, archive_snapshot_hash) { - error!( - "Snapshot has mismatch:\narchive: {:?}\ndeserialized: {:?}", - archive_snapshot_hash, deserialized_snapshot_hash - ); - process::exit(1); - } - - return to_loadresult( - blockstore_processor::process_blockstore_from_root( - blockstore, - deserialized_bank, - &process_options, - &VerifyRecyclers::default(), - transaction_status_sender, - cache_block_meta_sender, - ), - Some(deserialized_snapshot_hash), - ); - } - None => info!("No snapshot package available"), + if let Some((archive_filename, (archive_slot, archive_hash, archive_format))) = + snapshot_utils::get_highest_snapshot_archive_path( + &snapshot_config.snapshot_package_output_path, + ) + { + return load_from_snapshot( + genesis_config, + blockstore, + account_paths, + shrink_paths, + snapshot_config, + process_options, + transaction_status_sender, + cache_block_meta_sender, + archive_filename, + archive_slot, + archive_hash, + archive_format, + ); + } else { + info!("No snapshot package available; will load from genesis"); } } else { - info!("Snapshots disabled"); + info!("Snapshots disabled; will load from genesis"); } + load_from_genesis( + genesis_config, + blockstore, + account_paths, + process_options, + cache_block_meta_sender, + ) +} + +fn load_from_genesis( + genesis_config: &GenesisConfig, + blockstore: &Blockstore, + account_paths: Vec, + process_options: ProcessOptions, + cache_block_meta_sender: Option<&CacheBlockMetaSender>, +) -> LoadResult { info!("Processing ledger from genesis"); to_loadresult( blockstore_processor::process_blockstore( - &genesis_config, - &blockstore, + genesis_config, + blockstore, account_paths, process_options, cache_block_meta_sender, @@ -125,3 +106,74 @@ pub fn load( None, ) } + +#[allow(clippy::too_many_arguments)] +fn load_from_snapshot( + genesis_config: &GenesisConfig, + blockstore: &Blockstore, + account_paths: Vec, + shrink_paths: Option>, + snapshot_config: &SnapshotConfig, + process_options: ProcessOptions, + transaction_status_sender: Option<&TransactionStatusSender>, + cache_block_meta_sender: Option<&CacheBlockMetaSender>, + archive_filename: PathBuf, + archive_slot: Slot, + archive_hash: Hash, + archive_format: ArchiveFormat, +) -> LoadResult { + info!("Loading snapshot package: {:?}", archive_filename); + + // Fail hard here if snapshot fails to load, don't silently continue + if account_paths.is_empty() { + error!("Account paths not present when booting from snapshot"); + process::exit(1); + } + + let (deserialized_bank, timings) = snapshot_utils::bank_from_archive( + &account_paths, + &process_options.frozen_accounts, + &snapshot_config.snapshot_path, + &archive_filename, + archive_format, + genesis_config, + process_options.debug_keys.clone(), + Some(&crate::builtins::get(process_options.bpf_jit)), + process_options.account_indexes.clone(), + process_options.accounts_db_caching_enabled, + process_options.limit_load_slot_count_from_snapshot, + process_options.shrink_ratio, + process_options.accounts_db_test_hash_calculation, + process_options.accounts_db_skip_shrink, + ) + .expect("Load from snapshot failed"); + if let Some(shrink_paths) = shrink_paths { + deserialized_bank.set_shrink_paths(shrink_paths); + } + + let deserialized_bank_slot_and_hash = ( + deserialized_bank.slot(), + deserialized_bank.get_accounts_hash(), + ); + + if deserialized_bank_slot_and_hash != (archive_slot, archive_hash) { + error!( + "Snapshot has mismatch:\narchive: {:?}\ndeserialized: {:?}", + archive_hash, deserialized_bank_slot_and_hash + ); + process::exit(1); + } + + to_loadresult( + blockstore_processor::process_blockstore_from_root( + blockstore, + deserialized_bank, + &process_options, + &VerifyRecyclers::default(), + transaction_status_sender, + cache_block_meta_sender, + timings, + ), + Some(deserialized_bank_slot_and_hash), + ) +} diff --git a/core/src/bigtable_upload_service.rs b/ledger/src/bigtable_upload_service.rs similarity index 87% rename from core/src/bigtable_upload_service.rs rename to ledger/src/bigtable_upload_service.rs index ee0e6abc5a86e5..ba109bbaa74351 100644 --- a/core/src/bigtable_upload_service.rs +++ b/ledger/src/bigtable_upload_service.rs @@ -1,11 +1,13 @@ -use solana_ledger::blockstore::Blockstore; -use solana_runtime::commitment::BlockCommitmentCache; -use std::{ - sync::atomic::{AtomicBool, Ordering}, - sync::{Arc, RwLock}, - thread::{self, Builder, JoinHandle}, +use { + crate::{bigtable_upload, blockstore::Blockstore}, + solana_runtime::commitment::BlockCommitmentCache, + std::{ + sync::atomic::{AtomicBool, Ordering}, + sync::{Arc, RwLock}, + thread::{self, Builder, JoinHandle}, + }, + tokio::runtime::Runtime, }; -use tokio::runtime::Runtime; // Delay uploading the largest confirmed root for this many slots. This is done in an attempt to // ensure that the `CacheBlockMetaService` has had enough time to add the block time for the root @@ -68,7 +70,7 @@ impl BigTableUploadService { continue; } - let result = runtime.block_on(solana_ledger::bigtable_upload::upload_confirmed_blocks( + let result = runtime.block_on(bigtable_upload::upload_confirmed_blocks( blockstore.clone(), bigtable_ledger_storage.clone(), start_slot, diff --git a/ledger/src/blockstore.rs b/ledger/src/blockstore.rs index 420aad696719c3..e41f6fe9a20969 100644 --- a/ledger/src/blockstore.rs +++ b/ledger/src/blockstore.rs @@ -45,7 +45,7 @@ use std::{ borrow::Cow, cell::RefCell, cmp, - collections::{HashMap, HashSet}, + collections::{BTreeMap, HashMap, HashSet}, convert::TryInto, fs, io::{Error as IoError, ErrorKind}, @@ -54,8 +54,9 @@ use std::{ sync::{ atomic::{AtomicBool, Ordering}, mpsc::{sync_channel, Receiver, SyncSender, TrySendError}, - Arc, Mutex, RwLock, + Arc, Mutex, RwLock, RwLockWriteGuard, }, + time::Instant, }; use thiserror::Error; use trees::{Tree, TreeWalk}; @@ -85,13 +86,15 @@ pub const MAX_TURBINE_DELAY_IN_TICKS: u64 = MAX_TURBINE_PROPAGATION_IN_MS / MS_P // (32K shreds per slot * 4 TX per shred * 2.5 slots per sec) pub const MAX_DATA_SHREDS_PER_SLOT: usize = 32_768; -pub type CompletedSlotsReceiver = Receiver>; +pub type CompletedSlotsSender = SyncSender>; +pub type CompletedSlotsReceiver = Receiver>; type CompletedRanges = Vec<(u32, u32)>; #[derive(Clone, Copy)] pub enum PurgeType { Exact, PrimaryIndex, + CompactionFilter, } #[derive(Error, Debug)] @@ -117,10 +120,14 @@ pub struct CompletedDataSetInfo { pub struct BlockstoreSignals { pub blockstore: Blockstore, pub ledger_signal_receiver: Receiver, - pub completed_slots_receivers: [CompletedSlotsReceiver; 2], + pub completed_slots_receiver: CompletedSlotsReceiver, } // ledger window +// +// NOTE: allowing dead_code only because stubbing bank_hash_cf and program_cost_cf +// to 1.7 for rocksdb backward compatibility +#[allow(dead_code)] pub struct Blockstore { ledger_path: PathBuf, db: Arc, @@ -140,12 +147,35 @@ pub struct Blockstore { blocktime_cf: LedgerColumn, perf_samples_cf: LedgerColumn, block_height_cf: LedgerColumn, + program_costs_cf: LedgerColumn, + bank_hash_cf: LedgerColumn, last_root: Arc>, insert_shreds_lock: Arc>, pub new_shreds_signals: Vec>, - pub completed_slots_senders: Vec>>, - pub lowest_cleanup_slot: Arc>, + pub completed_slots_senders: Vec, + pub lowest_cleanup_slot: Arc>, no_compaction: bool, + slots_stats: Arc>, +} + +struct SlotsStats { + last_cleanup_ts: Instant, + stats: BTreeMap, +} + +impl Default for SlotsStats { + fn default() -> Self { + SlotsStats { + last_cleanup_ts: Instant::now(), + stats: BTreeMap::new(), + } + } +} + +#[derive(Default)] +struct SlotStats { + num_repaired: usize, + num_recovered: usize, } pub struct IndexMetaWorkingSetEntry { @@ -163,6 +193,13 @@ pub struct SlotMetaWorkingSetEntry { did_insert_occur: bool, } +#[derive(PartialEq, Debug, Clone)] +enum ShredSource { + Turbine, + Repaired, + Recovered, +} + #[derive(Default)] pub struct BlockstoreInsertionMetrics { pub num_shreds: usize, @@ -311,6 +348,8 @@ impl Blockstore { let blocktime_cf = db.column(); let perf_samples_cf = db.column(); let block_height_cf = db.column(); + let program_costs_cf = db.column(); + let bank_hash_cf = db.column(); let db = Arc::new(db); @@ -359,12 +398,15 @@ impl Blockstore { blocktime_cf, perf_samples_cf, block_height_cf, + program_costs_cf, + bank_hash_cf, new_shreds_signals: vec![], completed_slots_senders: vec![], insert_shreds_lock: Arc::new(Mutex::new(())), last_root, lowest_cleanup_slot: Arc::new(RwLock::new(0)), no_compaction: false, + slots_stats: Arc::new(Mutex::new(SlotsStats::default())), }; if initialize_transaction_status_index { blockstore.initialize_transaction_status_index()?; @@ -384,18 +426,16 @@ impl Blockstore { enforce_ulimit_nofile, )?; let (ledger_signal_sender, ledger_signal_receiver) = sync_channel(1); - let (completed_slots_sender1, completed_slots_receiver1) = - sync_channel(MAX_COMPLETED_SLOTS_IN_CHANNEL); - let (completed_slots_sender2, completed_slots_receiver2) = + let (completed_slots_sender, completed_slots_receiver) = sync_channel(MAX_COMPLETED_SLOTS_IN_CHANNEL); blockstore.new_shreds_signals = vec![ledger_signal_sender]; - blockstore.completed_slots_senders = vec![completed_slots_sender1, completed_slots_sender2]; + blockstore.completed_slots_senders = vec![completed_slots_sender]; Ok(BlockstoreSignals { blockstore, ledger_signal_receiver, - completed_slots_receivers: [completed_slots_receiver1, completed_slots_receiver2], + completed_slots_receiver, }) } @@ -705,7 +745,7 @@ impl Blockstore { for (&(slot, set_index), erasure_meta) in erasure_metas.iter() { let index_meta_entry = index_working_set.get_mut(&slot).expect("Index"); let index = &mut index_meta_entry.index; - match erasure_meta.status(&index) { + match erasure_meta.status(index) { ErasureMetaStatus::CanRecover => { Self::recover_shreds( index, @@ -758,6 +798,7 @@ impl Blockstore { pub fn insert_shreds_handle_duplicate( &self, shreds: Vec, + is_repaired: Vec, leader_schedule: Option<&Arc>, is_trusted: bool, handle_duplicate: &F, @@ -766,6 +807,7 @@ impl Blockstore { where F: Fn(Shred), { + assert_eq!(shreds.len(), is_repaired.len()); let mut total_start = Measure::start("Total elapsed"); let mut start = Measure::start("Blockstore lock"); let _lock = self.insert_shreds_lock.lock().unwrap(); @@ -787,46 +829,56 @@ impl Blockstore { let mut index_meta_time = 0; let mut newly_completed_data_sets: Vec = vec![]; let mut inserted_indices = Vec::new(); - shreds.into_iter().enumerate().for_each(|(i, shred)| { - if shred.is_data() { - let shred_slot = shred.slot(); - if let Ok(completed_data_sets) = self.check_insert_data_shred( - shred, - &mut erasure_metas, - &mut index_working_set, - &mut slot_meta_working_set, - &mut write_batch, - &mut just_inserted_data_shreds, - &mut index_meta_time, - is_trusted, - handle_duplicate, - leader_schedule, - false, - ) { - newly_completed_data_sets.extend(completed_data_sets.into_iter().map( - |(start_index, end_index)| CompletedDataSetInfo { - slot: shred_slot, - start_index, - end_index, - }, - )); - inserted_indices.push(i); - num_inserted += 1; + shreds + .into_iter() + .zip(is_repaired.into_iter()) + .enumerate() + .for_each(|(i, (shred, is_repaired))| { + if shred.is_data() { + let shred_slot = shred.slot(); + let shred_source = if is_repaired { + ShredSource::Repaired + } else { + ShredSource::Turbine + }; + if let Ok(completed_data_sets) = self.check_insert_data_shred( + shred, + &mut erasure_metas, + &mut index_working_set, + &mut slot_meta_working_set, + &mut write_batch, + &mut just_inserted_data_shreds, + &mut index_meta_time, + is_trusted, + handle_duplicate, + leader_schedule, + shred_source, + ) { + newly_completed_data_sets.extend(completed_data_sets.into_iter().map( + |(start_index, end_index)| CompletedDataSetInfo { + slot: shred_slot, + start_index, + end_index, + }, + )); + inserted_indices.push(i); + num_inserted += 1; + } + } else if shred.is_code() { + self.check_cache_coding_shred( + shred, + &mut erasure_metas, + &mut index_working_set, + &mut just_inserted_coding_shreds, + &mut index_meta_time, + handle_duplicate, + is_trusted, + is_repaired, + ); + } else { + panic!("There should be no other case"); } - } else if shred.is_code() { - self.check_cache_coding_shred( - shred, - &mut erasure_metas, - &mut index_working_set, - &mut just_inserted_coding_shreds, - &mut index_meta_time, - handle_duplicate, - is_trusted, - ); - } else { - panic!("There should be no other case"); - } - }); + }); start.stop(); let insert_shreds_elapsed = start.as_us(); @@ -838,7 +890,7 @@ impl Blockstore { let mut num_recovered_exists = 0; if let Some(leader_schedule_cache) = leader_schedule { let recovered_data = Self::try_shred_recovery( - &db, + db, &erasure_metas, &mut index_working_set, &mut just_inserted_data_shreds, @@ -861,7 +913,7 @@ impl Blockstore { is_trusted, &handle_duplicate, leader_schedule, - true, + ShredSource::Recovered, ) { Err(InsertDataShredError::Exists) => { num_recovered_exists += 1; @@ -993,8 +1045,10 @@ impl Blockstore { leader_schedule: Option<&Arc>, is_trusted: bool, ) -> Result<(Vec, Vec)> { + let shreds_len = shreds.len(); self.insert_shreds_handle_duplicate( shreds, + vec![false; shreds_len], leader_schedule, is_trusted, &|_| {}, @@ -1038,6 +1092,7 @@ impl Blockstore { index_meta_time: &mut u64, handle_duplicate: &F, is_trusted: bool, + is_repaired: bool, ) -> bool where F: Fn(Shred), @@ -1105,6 +1160,12 @@ impl Blockstore { return false; } + if is_repaired { + let mut slots_stats = self.slots_stats.lock().unwrap(); + let mut e = slots_stats.stats.entry(slot).or_default(); + e.num_repaired += 1; + } + // Should be safe to modify index_meta here. Two cases // 1) Recovery happens: Then all inserted erasure metas are removed // from just_received_coding_shreds, and nothing will be committed by @@ -1135,14 +1196,14 @@ impl Blockstore { let maybe_shred = self.get_coding_shred(slot, coding_index); if let Ok(Some(shred_data)) = maybe_shred { let potential_shred = Shred::new_from_serialized_shred(shred_data).unwrap(); - if Self::erasure_mismatch(&potential_shred, &shred) { + if Self::erasure_mismatch(&potential_shred, shred) { conflicting_shred = Some(potential_shred.payload); } break; } else if let Some(potential_shred) = just_received_coding_shreds.get(&(slot, coding_index)) { - if Self::erasure_mismatch(&potential_shred, &shred) { + if Self::erasure_mismatch(potential_shred, shred) { conflicting_shred = Some(potential_shred.payload.clone()); } break; @@ -1165,7 +1226,7 @@ impl Blockstore { is_trusted: bool, handle_duplicate: &F, leader_schedule: Option<&Arc>, - is_recovered: bool, + shred_source: ShredSource, ) -> std::result::Result, InsertDataShredError> where F: Fn(Shred), @@ -1183,7 +1244,7 @@ impl Blockstore { let slot_meta = &mut slot_meta_entry.new_slot_meta.borrow_mut(); if !is_trusted { - if Self::is_data_shred_present(&shred, slot_meta, &index_meta.data()) { + if Self::is_data_shred_present(&shred, slot_meta, index_meta.data()) { handle_duplicate(shred); return Err(InsertDataShredError::Exists); } @@ -1208,15 +1269,20 @@ impl Blockstore { just_inserted_data_shreds, &self.last_root, leader_schedule, - is_recovered, + shred_source.clone(), ) { return Err(InsertDataShredError::InvalidShred); } } let set_index = u64::from(shred.common_header.fec_set_index); - let newly_completed_data_sets = - self.insert_data_shred(slot_meta, index_meta.data_mut(), &shred, write_batch)?; + let newly_completed_data_sets = self.insert_data_shred( + slot_meta, + index_meta.data_mut(), + &shred, + write_batch, + shred_source, + )?; just_inserted_data_shreds.insert((slot, shred_index), shred); index_meta_working_set_entry.did_insert_occur = true; slot_meta_entry.did_insert_occur = true; @@ -1295,7 +1361,7 @@ impl Blockstore { just_inserted_data_shreds: &HashMap<(u64, u64), Shred>, last_root: &RwLock, leader_schedule: Option<&Arc>, - is_recovered: bool, + shred_source: ShredSource, ) -> bool { use crate::shred::SHRED_PAYLOAD_SIZE; let shred_index = u64::from(shred.index()); @@ -1371,8 +1437,8 @@ impl Blockstore { ( "error", format!( - "Leader {:?}, slot {}: received index {} >= slot.last_index {}, is_recovered: {}", - leader_pubkey, slot, shred_index, last_index, is_recovered + "Leader {:?}, slot {}: received index {} >= slot.last_index {}, shred_source: {:?}", + leader_pubkey, slot, shred_index, last_index, shred_source ), String ) @@ -1407,8 +1473,8 @@ impl Blockstore { ( "error", format!( - "Leader {:?}, slot {}: received shred_index {} < slot.received {}, is_recovered: {}", - leader_pubkey, slot, shred_index, slot_meta.received, is_recovered + "Leader {:?}, slot {}: received shred_index {} < slot.received {}, shred_source: {:?}", + leader_pubkey, slot, shred_index, slot_meta.received, shred_source ), String ) @@ -1426,6 +1492,7 @@ impl Blockstore { data_index: &mut ShredIndex, shred: &Shred, write_batch: &mut WriteBatch, + shred_source: ShredSource, ) -> Result> { let slot = shred.slot(); let index = u64::from(shred.index()); @@ -1474,9 +1541,32 @@ impl Blockstore { index as u32, new_consumed, shred.reference_tick(), - &data_index, + data_index, ); + if shred_source == ShredSource::Repaired || shred_source == ShredSource::Recovered { + let mut slots_stats = self.slots_stats.lock().unwrap(); + let mut e = slots_stats.stats.entry(slot_meta.slot).or_default(); + if shred_source == ShredSource::Repaired { + e.num_repaired += 1; + } + if shred_source == ShredSource::Recovered { + e.num_recovered += 1; + } + } if slot_meta.is_full() { + let (num_repaired, num_recovered) = { + let mut slots_stats = self.slots_stats.lock().unwrap(); + if let Some(e) = slots_stats.stats.remove(&slot_meta.slot) { + if slots_stats.last_cleanup_ts.elapsed().as_secs() > 30 { + let root = self.last_root(); + slots_stats.stats = slots_stats.stats.split_off(&root); + slots_stats.last_cleanup_ts = Instant::now(); + } + (e.num_repaired, e.num_recovered) + } else { + (0, 0) + } + }; datapoint_info!( "shred_insert_is_full", ( @@ -1486,6 +1576,8 @@ impl Blockstore { ), ("slot", slot_meta.slot, i64), ("last_index", slot_meta.last_index, i64), + ("num_repaired", num_repaired, i64), + ("num_recovered", num_recovered, i64), ); } trace!("inserted shred into slot {:?} and index {:?}", slot, index); @@ -1694,7 +1786,7 @@ impl Blockstore { } break; } - let (current_slot, index) = C::index(&db_iterator.key().expect("Expect a valid key")); + let (current_slot, index) = C::index(db_iterator.key().expect("Expect a valid key")); let current_index = { if current_slot > slot { @@ -1707,7 +1799,7 @@ impl Blockstore { let upper_index = cmp::min(current_index, end_index); // the tick that will be used to figure out the timeout for this hole let reference_tick = u64::from(Shred::reference_tick_from_data( - &db_iterator.value().expect("couldn't read value"), + db_iterator.value().expect("couldn't read value"), )); if ticks_since_first_insert < reference_tick + MAX_TURBINE_DELAY_IN_TICKS { @@ -1956,18 +2048,24 @@ impl Blockstore { batch.put::(0, &index0)?; Ok(None) } else { - let result = if index0.frozen && to_slot > index0.max_slot { - debug!("Pruning transaction index 0 at slot {}", index0.max_slot); + let purge_target_primary_index = if index0.frozen && to_slot > index0.max_slot { + info!( + "Pruning expired primary index 0 up to slot {} (max requested: {})", + index0.max_slot, to_slot + ); Some(0) } else if index1.frozen && to_slot > index1.max_slot { - debug!("Pruning transaction index 1 at slot {}", index1.max_slot); + info!( + "Pruning expired primary index 1 up to slot {} (max requested: {})", + index1.max_slot, to_slot + ); Some(1) } else { None }; - if result.is_some() { - *w_active_transaction_status_index = if index0.frozen { 0 } else { 1 }; + if let Some(purge_target_primary_index) = purge_target_primary_index { + *w_active_transaction_status_index = purge_target_primary_index; if index0.frozen { index0.max_slot = 0 }; @@ -1980,16 +2078,17 @@ impl Blockstore { batch.put::(1, &index1)?; } - Ok(result) + Ok(purge_target_primary_index) } } - fn get_primary_index( + fn get_primary_index_to_write( &self, slot: Slot, - w_active_transaction_status_index: &mut u64, + // take WriteGuard to require critical section semantics at call site + w_active_transaction_status_index: &RwLockWriteGuard, ) -> Result { - let i = *w_active_transaction_status_index; + let i = **w_active_transaction_status_index; let mut index_meta = self.transaction_status_index_cf.get(i)?.unwrap(); if slot > index_meta.max_slot { assert!(!index_meta.frozen); @@ -2028,9 +2127,10 @@ impl Blockstore { let status = status.into(); // This write lock prevents interleaving issues with the transaction_status_index_cf by gating // writes to that column - let mut w_active_transaction_status_index = + let w_active_transaction_status_index = self.active_transaction_status_index.write().unwrap(); - let primary_index = self.get_primary_index(slot, &mut w_active_transaction_status_index)?; + let primary_index = + self.get_primary_index_to_write(slot, &w_active_transaction_status_index)?; self.transaction_status_cf .put_protobuf((primary_index, signature, slot), &status)?; for address in writable_keys { @@ -2048,6 +2148,21 @@ impl Blockstore { Ok(()) } + fn ensure_lowest_cleanup_slot(&self) -> (std::sync::RwLockReadGuard, Slot) { + // Ensures consistent result by using lowest_cleanup_slot as the lower bound + // for reading columns that do not employ strong read consistency with slot-based + // delete_range + let lowest_cleanup_slot = self.lowest_cleanup_slot.read().unwrap(); + let lowest_available_slot = (*lowest_cleanup_slot) + .checked_add(1) + .expect("overflow from trusted value"); + + // Make caller hold this lock properly; otherwise LedgerCleanupService can purge/compact + // needed slots here at any given moment. + // Blockstore callers, like rpc, can process concurrent read queries + (lowest_cleanup_slot, lowest_available_slot) + } + // Returns a transaction status, as well as a loop counter for unit testing fn get_transaction_status_with_counter( &self, @@ -2055,9 +2170,15 @@ impl Blockstore { confirmed_unrooted_slots: &[Slot], ) -> Result<(Option<(Slot, TransactionStatusMeta)>, u64)> { let mut counter = 0; + let (lock, lowest_available_slot) = self.ensure_lowest_cleanup_slot(); + for transaction_status_cf_primary_index in 0..=1 { let index_iterator = self.transaction_status_cf.iter(IteratorMode::From( - (transaction_status_cf_primary_index, signature, 0), + ( + transaction_status_cf_primary_index, + signature, + lowest_available_slot, + ), IteratorDirection::Forward, ))?; for ((i, sig, slot), _data) in index_iterator { @@ -2076,6 +2197,8 @@ impl Blockstore { return Ok((status, counter)); } } + drop(lock); + Ok((None, counter)) } @@ -2199,13 +2322,15 @@ impl Blockstore { start_slot: Slot, end_slot: Slot, ) -> Result> { + let (lock, lowest_available_slot) = self.ensure_lowest_cleanup_slot(); + let mut signatures: Vec<(Slot, Signature)> = vec![]; for transaction_status_cf_primary_index in 0..=1 { let index_iterator = self.address_signatures_cf.iter(IteratorMode::From( ( transaction_status_cf_primary_index, pubkey, - start_slot, + start_slot.max(lowest_available_slot), Signature::default(), ), IteratorDirection::Forward, @@ -2220,6 +2345,7 @@ impl Blockstore { } } } + drop(lock); signatures.sort_by(|a, b| a.0.partial_cmp(&b.0).unwrap().then(a.1.cmp(&b.1))); Ok(signatures) } @@ -2232,13 +2358,14 @@ impl Blockstore { pubkey: Pubkey, slot: Slot, ) -> Result> { + let (lock, lowest_available_slot) = self.ensure_lowest_cleanup_slot(); let mut signatures: Vec<(Slot, Signature)> = vec![]; for transaction_status_cf_primary_index in 0..=1 { let index_iterator = self.address_signatures_cf.iter(IteratorMode::From( ( transaction_status_cf_primary_index, pubkey, - slot, + slot.max(lowest_available_slot), Signature::default(), ), IteratorDirection::Forward, @@ -2253,6 +2380,7 @@ impl Blockstore { signatures.push((slot, signature)); } } + drop(lock); signatures.sort_by(|a, b| a.0.partial_cmp(&b.0).unwrap().then(a.1.cmp(&b.1))); Ok(signatures) } @@ -2409,7 +2537,7 @@ impl Blockstore { address_signatures.extend( signatures .into_iter() - .filter(|(_, signature)| !excluded_signatures.contains(&signature)), + .filter(|(_, signature)| !excluded_signatures.contains(signature)), ) } else { address_signatures.append(&mut signatures); @@ -2492,7 +2620,7 @@ impl Blockstore { next_primary_index_iter_timer.stop(); let mut address_signatures: Vec<(Slot, Signature)> = address_signatures .into_iter() - .filter(|(_, signature)| !until_excluded_signatures.contains(&signature)) + .filter(|(_, signature)| !until_excluded_signatures.contains(signature)) .collect(); address_signatures.truncate(limit); @@ -2823,9 +2951,11 @@ impl Blockstore { } } - pub fn set_roots(&self, rooted_slots: &[u64]) -> Result<()> { + pub fn set_roots<'a>(&self, rooted_slots: impl Iterator) -> Result<()> { let mut write_batch = self.db.batch()?; + let mut max_new_rooted_slot = 0; for slot in rooted_slots { + max_new_rooted_slot = std::cmp::max(max_new_rooted_slot, *slot); write_batch.put::(*slot, &true)?; } @@ -2835,7 +2965,7 @@ impl Blockstore { if *last_root == std::u64::MAX { *last_root = 0; } - *last_root = cmp::max(*rooted_slots.iter().max().unwrap(), *last_root); + *last_root = cmp::max(max_new_rooted_slot, *last_root); Ok(()) } @@ -2970,7 +3100,7 @@ impl Blockstore { } pub fn scan_and_fix_roots(&self, exit: &Arc) -> Result<()> { - let ancestor_iterator = AncestorIterator::new(self.last_root(), &self) + let ancestor_iterator = AncestorIterator::new(self.last_root(), self) .take_while(|&slot| slot >= self.lowest_cleanup_slot()); let mut find_missing_roots = Measure::start("find_missing_roots"); @@ -2990,7 +3120,7 @@ impl Blockstore { return Ok(()); } trace!("{:?}", chunk); - self.set_roots(&roots_to_fix)?; + self.set_roots(chunk.iter())?; } } else { debug!( @@ -3255,8 +3385,8 @@ fn commit_slot_meta_working_set( } // Check if the working copy of the metadata has changed if Some(meta) != meta_backup.as_ref() { - should_signal = should_signal || slot_has_updates(meta, &meta_backup); - write_batch.put::(*slot, &meta)?; + should_signal = should_signal || slot_has_updates(meta, meta_backup); + write_batch.put::(*slot, meta)?; } } @@ -3407,7 +3537,7 @@ fn handle_chaining_for_slot( traverse_children_mut( db, slot, - &meta, + meta, working_set, new_chained_slots, slot_function, @@ -3497,7 +3627,7 @@ pub fn create_new_ledger( access_type: AccessType, ) -> Result { Blockstore::destroy(ledger_path)?; - genesis_config.write(&ledger_path)?; + genesis_config.write(ledger_path)?; // Fill slot 0 with ticks that link back to the genesis_config to bootstrap the ledger. let blockstore = Blockstore::open_with_access_type(ledger_path, access_type, None, false)?; @@ -3512,7 +3642,7 @@ pub fn create_new_ledger( assert!(shreds.last().unwrap().last_in_slot()); blockstore.insert_shreds(shreds, None, false)?; - blockstore.set_roots(&[0])?; + blockstore.set_roots(std::iter::once(&0))?; // Explicitly close the blockstore before we create the archived genesis file drop(blockstore); @@ -4531,7 +4661,7 @@ pub mod tests { let ledger_path = get_tmp_ledger_path!(); let BlockstoreSignals { blockstore: ledger, - completed_slots_receivers: [recvr, _], + completed_slots_receiver: recvr, .. } = Blockstore::open_with_signal(&ledger_path, None, true).unwrap(); let ledger = Arc::new(ledger); @@ -4557,7 +4687,7 @@ pub mod tests { let ledger_path = get_tmp_ledger_path!(); let BlockstoreSignals { blockstore: ledger, - completed_slots_receivers: [recvr, _], + completed_slots_receiver: recvr, .. } = Blockstore::open_with_signal(&ledger_path, None, true).unwrap(); let ledger = Arc::new(ledger); @@ -4601,7 +4731,7 @@ pub mod tests { let ledger_path = get_tmp_ledger_path!(); let BlockstoreSignals { blockstore: ledger, - completed_slots_receivers: [recvr, _], + completed_slots_receiver: recvr, .. } = Blockstore::open_with_signal(&ledger_path, None, true).unwrap(); let ledger = Arc::new(ledger); @@ -5382,7 +5512,7 @@ pub mod tests { &HashMap::new(), &last_root, None, - false + ShredSource::Turbine )); // Ensure that an empty shred (one with no data) would get inserted. Such shreds @@ -5405,7 +5535,7 @@ pub mod tests { &HashMap::new(), &last_root, None, - false + ShredSource::Repaired, )); empty_shred.data_header.size = 0; assert!(!blockstore.should_insert_data_shred( @@ -5414,7 +5544,7 @@ pub mod tests { &HashMap::new(), &last_root, None, - false + ShredSource::Recovered, )); // Trying to insert another "is_last" shred with index < the received index should fail @@ -5438,7 +5568,7 @@ pub mod tests { &HashMap::new(), &last_root, None, - false + ShredSource::Repaired, )); assert!(blockstore.has_duplicate_shreds_in_slot(0)); @@ -5459,7 +5589,7 @@ pub mod tests { &HashMap::new(), &last_root, None, - false + ShredSource::Repaired, )); } Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); @@ -5527,6 +5657,7 @@ pub mod tests { panic!("no dupes"); }, false, + false, )); // insert again fails on dupe @@ -5542,6 +5673,7 @@ pub mod tests { counter.fetch_add(1, Ordering::Relaxed); }, false, + false, )); assert_eq!(counter.load(Ordering::Relaxed), 1); } @@ -5761,7 +5893,7 @@ pub mod tests { let chained_slots = vec![0, 2, 4, 7, 12, 15]; assert_eq!(blockstore.last_root(), 0); - blockstore.set_roots(&chained_slots).unwrap(); + blockstore.set_roots(chained_slots.iter()).unwrap(); assert_eq!(blockstore.last_root(), 15); @@ -5778,7 +5910,7 @@ pub mod tests { let blockstore_path = get_tmp_ledger_path!(); let blockstore = Blockstore::open(&blockstore_path).unwrap(); let roots = vec![2, 4, 7, 12, 15]; - blockstore.set_roots(&roots).unwrap(); + blockstore.set_roots(roots.iter()).unwrap(); for i in 0..20 { if i < 2 || roots.contains(&i) || i > 15 { @@ -5947,7 +6079,7 @@ pub mod tests { let last_root = 100; { let blockstore = Blockstore::open(&blockstore_path).unwrap(); - blockstore.set_roots(&[last_root]).unwrap(); + blockstore.set_roots(std::iter::once(&last_root)).unwrap(); // Insert will fail, slot < root blockstore @@ -5976,7 +6108,9 @@ pub mod tests { ledger.insert_shreds(shreds, None, false).unwrap(); ledger.insert_shreds(more_shreds, None, false).unwrap(); ledger.insert_shreds(unrooted_shreds, None, false).unwrap(); - ledger.set_roots(&[slot - 1, slot, slot + 1]).unwrap(); + ledger + .set_roots(vec![slot - 1, slot, slot + 1].iter()) + .unwrap(); let parent_meta = SlotMeta { parent_slot: std::u64::MAX, @@ -6531,7 +6665,7 @@ pub mod tests { let meta3 = SlotMeta::new(3, 2); blockstore.meta_cf.put(3, &meta3).unwrap(); - blockstore.set_roots(&[0, 2]).unwrap(); + blockstore.set_roots(vec![0, 2].iter()).unwrap(); // Initialize index 0, including: // signature2 in non-root and root, @@ -6676,6 +6810,176 @@ pub mod tests { Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } + fn do_test_lowest_cleanup_slot_and_special_cfs( + simulate_compaction: bool, + simulate_ledger_cleanup_service: bool, + ) { + solana_logger::setup(); + + let blockstore_path = get_tmp_ledger_path!(); + { + let blockstore = Blockstore::open(&blockstore_path).unwrap(); + // TransactionStatus column opens initialized with one entry at index 2 + let transaction_status_cf = blockstore.db.column::(); + + let pre_balances_vec = vec![1, 2, 3]; + let post_balances_vec = vec![3, 2, 1]; + let status = TransactionStatusMeta { + status: solana_sdk::transaction::Result::<()>::Ok(()), + fee: 42u64, + pre_balances: pre_balances_vec, + post_balances: post_balances_vec, + inner_instructions: Some(vec![]), + log_messages: Some(vec![]), + pre_token_balances: Some(vec![]), + post_token_balances: Some(vec![]), + rewards: Some(vec![]), + } + .into(); + + let signature1 = Signature::new(&[2u8; 64]); + let signature2 = Signature::new(&[3u8; 64]); + + // Insert rooted slots 0..=3 with no fork + let meta0 = SlotMeta::new(0, 0); + blockstore.meta_cf.put(0, &meta0).unwrap(); + let meta1 = SlotMeta::new(1, 0); + blockstore.meta_cf.put(1, &meta1).unwrap(); + let meta2 = SlotMeta::new(2, 1); + blockstore.meta_cf.put(2, &meta2).unwrap(); + let meta3 = SlotMeta::new(3, 2); + blockstore.meta_cf.put(3, &meta3).unwrap(); + + blockstore.set_roots(vec![0, 1, 2, 3].iter()).unwrap(); + + let lowest_cleanup_slot = 1; + let lowest_available_slot = lowest_cleanup_slot + 1; + + transaction_status_cf + .put_protobuf((0, signature1, lowest_cleanup_slot), &status) + .unwrap(); + + transaction_status_cf + .put_protobuf((0, signature2, lowest_available_slot), &status) + .unwrap(); + + let address0 = solana_sdk::pubkey::new_rand(); + let address1 = solana_sdk::pubkey::new_rand(); + blockstore + .write_transaction_status( + lowest_cleanup_slot, + signature1, + vec![&address0], + vec![], + TransactionStatusMeta::default(), + ) + .unwrap(); + blockstore + .write_transaction_status( + lowest_available_slot, + signature2, + vec![&address1], + vec![], + TransactionStatusMeta::default(), + ) + .unwrap(); + + let check_for_missing = || { + ( + blockstore + .get_transaction_status_with_counter(signature1, &[]) + .unwrap() + .0 + .is_none(), + blockstore + .find_address_signatures_for_slot(address0, lowest_cleanup_slot) + .unwrap() + .is_empty(), + blockstore + .find_address_signatures(address0, lowest_cleanup_slot, lowest_cleanup_slot) + .unwrap() + .is_empty(), + ) + }; + + let assert_existing_always = || { + let are_existing_always = ( + blockstore + .get_transaction_status_with_counter(signature2, &[]) + .unwrap() + .0 + .is_some(), + !blockstore + .find_address_signatures_for_slot(address1, lowest_available_slot) + .unwrap() + .is_empty(), + !blockstore + .find_address_signatures( + address1, + lowest_available_slot, + lowest_available_slot, + ) + .unwrap() + .is_empty(), + ); + assert_eq!(are_existing_always, (true, true, true)); + }; + + let are_missing = check_for_missing(); + // should never be missing before the conditional compaction & simulation... + assert_eq!(are_missing, (false, false, false)); + assert_existing_always(); + + if simulate_compaction { + blockstore.set_max_expired_slot(lowest_cleanup_slot); + // force compaction filters to run across whole key range. + blockstore + .compact_storage(Slot::min_value(), Slot::max_value()) + .unwrap(); + } + + if simulate_ledger_cleanup_service { + *blockstore.lowest_cleanup_slot.write().unwrap() = lowest_cleanup_slot; + } + + let are_missing = check_for_missing(); + if simulate_compaction || simulate_ledger_cleanup_service { + // ... when either simulation (or both) is effective, we should observe to be missing + // consistently + assert_eq!(are_missing, (true, true, true)); + } else { + // ... otherwise, we should observe to be existing... + assert_eq!(are_missing, (false, false, false)); + } + assert_existing_always(); + } + Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); + } + + #[test] + fn test_lowest_cleanup_slot_and_special_cfs_with_compact_with_ledger_cleanup_service_simulation( + ) { + do_test_lowest_cleanup_slot_and_special_cfs(true, true); + } + + #[test] + fn test_lowest_cleanup_slot_and_special_cfs_with_compact_without_ledger_cleanup_service_simulation( + ) { + do_test_lowest_cleanup_slot_and_special_cfs(true, false); + } + + #[test] + fn test_lowest_cleanup_slot_and_special_cfs_without_compact_with_ledger_cleanup_service_simulation( + ) { + do_test_lowest_cleanup_slot_and_special_cfs(false, true); + } + + #[test] + fn test_lowest_cleanup_slot_and_special_cfs_without_compact_without_ledger_cleanup_service_simulation( + ) { + do_test_lowest_cleanup_slot_and_special_cfs(false, false); + } + #[test] fn test_get_rooted_transaction() { let slot = 2; @@ -6684,7 +6988,7 @@ pub mod tests { let ledger_path = get_tmp_ledger_path!(); let blockstore = Blockstore::open(&ledger_path).unwrap(); blockstore.insert_shreds(shreds, None, false).unwrap(); - blockstore.set_roots(&[slot - 1, slot]).unwrap(); + blockstore.set_roots(vec![slot - 1, slot].iter()).unwrap(); let expected_transactions: Vec = entries .iter() @@ -6872,7 +7176,7 @@ pub mod tests { fn test_empty_transaction_status() { let blockstore_path = get_tmp_ledger_path!(); let blockstore = Blockstore::open(&blockstore_path).unwrap(); - blockstore.set_roots(&[0]).unwrap(); + blockstore.set_roots(std::iter::once(&0)).unwrap(); assert_eq!( blockstore .get_rooted_transaction(Signature::default()) @@ -6918,7 +7222,7 @@ pub mod tests { ) .unwrap(); } - blockstore.set_roots(&[slot0, slot1]).unwrap(); + blockstore.set_roots(vec![slot0, slot1].iter()).unwrap(); let all0 = blockstore .get_confirmed_signatures_for_address(address0, 0, 50) @@ -7011,7 +7315,7 @@ pub mod tests { ) .unwrap(); } - blockstore.set_roots(&[21, 22, 23, 24]).unwrap(); + blockstore.set_roots(vec![21, 22, 23, 24].iter()).unwrap(); let mut past_slot = 0; for (slot, _) in blockstore.find_address_signatures(address0, 1, 25).unwrap() { assert!(slot >= past_slot); @@ -7083,7 +7387,7 @@ pub mod tests { ) .unwrap(); } - blockstore.set_roots(&[slot1]).unwrap(); + blockstore.set_roots(std::iter::once(&slot1)).unwrap(); let slot1_signatures = blockstore .find_address_signatures_for_slot(address0, 1) @@ -7189,7 +7493,9 @@ pub mod tests { } // Leave one slot unrooted to test only returns confirmed signatures - blockstore.set_roots(&[1, 2, 4, 5, 6, 7, 8]).unwrap(); + blockstore + .set_roots(vec![1, 2, 4, 5, 6, 7, 8].iter()) + .unwrap(); let highest_confirmed_root = 8; // Fetch all rooted signatures for address 0 at once... @@ -8035,6 +8341,7 @@ pub mod tests { lamports: 42 + i, post_balance: std::u64::MAX, reward_type: Some(RewardType::Fee), + commission: None, }) .collect(); let protobuf_rewards: generated::Rewards = rewards.into(); @@ -8101,6 +8408,7 @@ pub mod tests { lamports: -42, post_balance: 42, reward_type: Some(RewardType::Rent), + commission: None, }]), }; let deprecated_status: StoredTransactionStatusMeta = status.clone().into(); diff --git a/ledger/src/blockstore/blockstore_purge.rs b/ledger/src/blockstore/blockstore_purge.rs index 4fe38a3f4f824a..e847b246742667 100644 --- a/ledger/src/blockstore/blockstore_purge.rs +++ b/ledger/src/blockstore/blockstore_purge.rs @@ -32,6 +32,19 @@ impl Blockstore { } } + /// Usually this is paired with .purge_slots() but we can't internally call this in + /// that function unconditionally. That's because set_max_expired_slot() + /// expects to purge older slots by the successive chronological order, while .purge_slots() + /// can also be used to purge *future* slots for --hard-fork thing, preserving older + /// slots. It'd be quite dangerous to purge older slots in that case. + /// So, current legal user of this function is LedgerCleanupService. + pub fn set_max_expired_slot(&self, to_slot: Slot) { + // convert here from inclusive purged range end to inclusive alive range start to align + // with Slot::default() for initial compaction filter behavior consistency + let to_slot = to_slot.checked_add(1).unwrap(); + self.db.set_oldest_slot(to_slot); + } + pub fn purge_and_compact_slots(&self, from_slot: Slot, to_slot: Slot) { self.purge_slots(from_slot, to_slot, PurgeType::Exact); if let Err(e) = self.compact_storage(from_slot, to_slot) { @@ -122,6 +135,10 @@ impl Blockstore { .db .delete_range_cf::(&mut write_batch, from_slot, to_slot) .is_ok() + & self + .db + .delete_range_cf::(&mut write_batch, from_slot, to_slot) + .is_ok() & self .db .delete_range_cf::(&mut write_batch, from_slot, to_slot) @@ -165,6 +182,10 @@ impl Blockstore { & self .db .delete_range_cf::(&mut write_batch, from_slot, to_slot) + .is_ok() + & self + .db + .delete_range_cf::(&mut write_batch, from_slot, to_slot) .is_ok(); let mut w_active_transaction_status_index = self.active_transaction_status_index.write().unwrap(); @@ -180,6 +201,13 @@ impl Blockstore { to_slot, )?; } + PurgeType::CompactionFilter => { + // No explicit action is required here because this purge type completely and + // indefinitely relies on the proper working of compaction filter for those + // special column families, never toggling the primary index from the current + // one. Overall, this enables well uniformly distributed writes, resulting + // in no spiky periodic huge delete_range for them. + } } delete_range_timer.stop(); let mut write_timer = Measure::start("write_batch"); @@ -193,6 +221,10 @@ impl Blockstore { write_timer.stop(); purge_stats.delete_range += delete_range_timer.as_us(); purge_stats.write_batch += write_timer.as_us(); + // only drop w_active_transaction_status_index after we do db.write(write_batch); + // otherwise, readers might be confused with inconsistent state between + // self.active_transaction_status_index and RockDb's TransactionStatusIndex contents + drop(w_active_transaction_status_index); Ok(columns_purged) } @@ -236,6 +268,10 @@ impl Blockstore { .orphans_cf .compact_range(from_slot, to_slot) .unwrap_or(false) + && self + .bank_hash_cf + .compact_range(from_slot, to_slot) + .unwrap_or(false) && self .index_cf .compact_range(from_slot, to_slot) @@ -263,6 +299,10 @@ impl Blockstore { && self .perf_samples_cf .compact_range(from_slot, to_slot) + .unwrap_or(false) + && self + .block_height_cf + .compact_range(from_slot, to_slot) .unwrap_or(false); compact_timer.stop(); if !result { @@ -323,18 +363,26 @@ impl Blockstore { w_active_transaction_status_index: &mut u64, to_slot: Slot, ) -> Result<()> { - if let Some(index) = self.toggle_transaction_status_index( + if let Some(purged_index) = self.toggle_transaction_status_index( write_batch, w_active_transaction_status_index, to_slot, )? { *columns_purged &= self .db - .delete_range_cf::(write_batch, index, index + 1) + .delete_range_cf::( + write_batch, + purged_index, + purged_index + 1, + ) .is_ok() & self .db - .delete_range_cf::(write_batch, index, index + 1) + .delete_range_cf::( + write_batch, + purged_index, + purged_index + 1, + ) .is_ok(); } Ok(()) diff --git a/ledger/src/blockstore_db.rs b/ledger/src/blockstore_db.rs index b6dd277ea8ac55..72e564006b8568 100644 --- a/ledger/src/blockstore_db.rs +++ b/ledger/src/blockstore_db.rs @@ -5,9 +5,13 @@ use log::*; use prost::Message; pub use rocksdb::Direction as IteratorDirection; use rocksdb::{ - self, ColumnFamily, ColumnFamilyDescriptor, DBIterator, DBRawIterator, DBRecoveryMode, - IteratorMode as RocksIteratorMode, Options, WriteBatch as RWriteBatch, DB, + self, + compaction_filter::CompactionFilter, + compaction_filter_factory::{CompactionFilterContext, CompactionFilterFactory}, + ColumnFamily, ColumnFamilyDescriptor, CompactionDecision, DBIterator, DBRawIterator, + DBRecoveryMode, IteratorMode as RocksIteratorMode, Options, WriteBatch as RWriteBatch, DB, }; + use serde::de::DeserializeOwned; use serde::Serialize; use solana_runtime::hardened_unpack::UnpackError; @@ -17,7 +21,17 @@ use solana_sdk::{ signature::Signature, }; use solana_storage_proto::convert::generated; -use std::{collections::HashMap, fs, marker::PhantomData, path::Path, sync::Arc}; +use std::{ + collections::{HashMap, HashSet}, + ffi::{CStr, CString}, + fs, + marker::PhantomData, + path::Path, + sync::{ + atomic::{AtomicU64, Ordering}, + Arc, + }, +}; use thiserror::Error; const MAX_WRITE_BUFFER_SIZE: u64 = 256 * 1024 * 1024; // 256MB @@ -33,6 +47,8 @@ const DUPLICATE_SLOTS_CF: &str = "duplicate_slots"; const ERASURE_META_CF: &str = "erasure_meta"; // Column family for orphans data const ORPHANS_CF: &str = "orphans"; +/// Column family for bank hashes +const BANK_HASH_CF: &str = "bank_hashes"; // Column family for root data const ROOT_CF: &str = "root"; /// Column family for indexes @@ -57,6 +73,11 @@ const BLOCKTIME_CF: &str = "blocktime"; const PERF_SAMPLES_CF: &str = "perf_samples"; /// Column family for BlockHeight const BLOCK_HEIGHT_CF: &str = "block_height"; +/// Column family for ProgramCosts +const PROGRAM_COSTS_CF: &str = "program_costs"; + +// 1 day is chosen for the same reasoning of DEFAULT_COMPACTION_SLOT_INTERVAL +const PERIODIC_COMPACTION_SECONDS: u64 = 60 * 60 * 24; #[derive(Error, Debug)] pub enum BlockstoreError { @@ -114,6 +135,10 @@ pub mod columns { /// The erasure meta column pub struct ErasureMeta; + #[derive(Debug)] + /// The bank hash column + pub struct BankHash; + #[derive(Debug)] /// The root column pub struct Root; @@ -157,6 +182,10 @@ pub mod columns { #[derive(Debug)] /// The block height column pub struct BlockHeight; + + #[derive(Debug)] + // The program costs column + pub struct ProgramCosts; } pub enum AccessType { @@ -208,8 +237,30 @@ impl From for DBRecoveryMode { } } +#[derive(Default, Clone, Debug)] +struct OldestSlot(Arc); + +impl OldestSlot { + pub fn set(&self, oldest_slot: Slot) { + // this is independently used for compaction_filter without any data dependency. + // also, compaction_filters are created via its factories, creating short-lived copies of + // this atomic value for the single job of compaction. So, Relaxed store can be justified + // in total + self.0.store(oldest_slot, Ordering::Relaxed); + } + + pub fn get(&self) -> Slot { + // copy from the AtomicU64 as a general precaution so that the oldest_slot can not mutate + // across single run of compaction for simpler reasoning although this isn't strict + // requirement at the moment + // also eventual propagation (very Relaxed) load is Ok, because compaction by nature doesn't + // require strictly synchronized semantics in this regard + self.0.load(Ordering::Relaxed) + } +} + #[derive(Debug)] -struct Rocks(rocksdb::DB, ActualAccessType); +struct Rocks(rocksdb::DB, ActualAccessType, OldestSlot); impl Rocks { fn open( @@ -217,11 +268,7 @@ impl Rocks { access_type: AccessType, recovery_mode: Option, ) -> Result { - use columns::{ - AddressSignatures, BlockHeight, Blocktime, DeadSlots, DuplicateSlots, ErasureMeta, - Index, Orphans, PerfSamples, Rewards, Root, ShredCode, ShredData, SlotMeta, - TransactionStatus, TransactionStatusIndex, - }; + use columns::*; fs::create_dir_all(&path)?; @@ -234,39 +281,83 @@ impl Rocks { db_options.set_wal_recovery_mode(recovery_mode.into()); } + let oldest_slot = OldestSlot::default(); + // Column family names - let meta_cf_descriptor = - ColumnFamilyDescriptor::new(SlotMeta::NAME, get_cf_options(&access_type)); - let dead_slots_cf_descriptor = - ColumnFamilyDescriptor::new(DeadSlots::NAME, get_cf_options(&access_type)); - let duplicate_slots_cf_descriptor = - ColumnFamilyDescriptor::new(DuplicateSlots::NAME, get_cf_options(&access_type)); - let erasure_meta_cf_descriptor = - ColumnFamilyDescriptor::new(ErasureMeta::NAME, get_cf_options(&access_type)); - let orphans_cf_descriptor = - ColumnFamilyDescriptor::new(Orphans::NAME, get_cf_options(&access_type)); - let root_cf_descriptor = - ColumnFamilyDescriptor::new(Root::NAME, get_cf_options(&access_type)); - let index_cf_descriptor = - ColumnFamilyDescriptor::new(Index::NAME, get_cf_options(&access_type)); - let shred_data_cf_descriptor = - ColumnFamilyDescriptor::new(ShredData::NAME, get_cf_options(&access_type)); - let shred_code_cf_descriptor = - ColumnFamilyDescriptor::new(ShredCode::NAME, get_cf_options(&access_type)); - let transaction_status_cf_descriptor = - ColumnFamilyDescriptor::new(TransactionStatus::NAME, get_cf_options(&access_type)); - let address_signatures_cf_descriptor = - ColumnFamilyDescriptor::new(AddressSignatures::NAME, get_cf_options(&access_type)); - let transaction_status_index_cf_descriptor = - ColumnFamilyDescriptor::new(TransactionStatusIndex::NAME, get_cf_options(&access_type)); - let rewards_cf_descriptor = - ColumnFamilyDescriptor::new(Rewards::NAME, get_cf_options(&access_type)); - let blocktime_cf_descriptor = - ColumnFamilyDescriptor::new(Blocktime::NAME, get_cf_options(&access_type)); - let perf_samples_cf_descriptor = - ColumnFamilyDescriptor::new(PerfSamples::NAME, get_cf_options(&access_type)); - let block_height_cf_descriptor = - ColumnFamilyDescriptor::new(BlockHeight::NAME, get_cf_options(&access_type)); + let meta_cf_descriptor = ColumnFamilyDescriptor::new( + SlotMeta::NAME, + get_cf_options::(&access_type, &oldest_slot), + ); + let dead_slots_cf_descriptor = ColumnFamilyDescriptor::new( + DeadSlots::NAME, + get_cf_options::(&access_type, &oldest_slot), + ); + let duplicate_slots_cf_descriptor = ColumnFamilyDescriptor::new( + DuplicateSlots::NAME, + get_cf_options::(&access_type, &oldest_slot), + ); + let erasure_meta_cf_descriptor = ColumnFamilyDescriptor::new( + ErasureMeta::NAME, + get_cf_options::(&access_type, &oldest_slot), + ); + let orphans_cf_descriptor = ColumnFamilyDescriptor::new( + Orphans::NAME, + get_cf_options::(&access_type, &oldest_slot), + ); + let bank_hash_cf_descriptor = ColumnFamilyDescriptor::new( + BankHash::NAME, + get_cf_options::(&access_type, &oldest_slot), + ); + let root_cf_descriptor = ColumnFamilyDescriptor::new( + Root::NAME, + get_cf_options::(&access_type, &oldest_slot), + ); + let index_cf_descriptor = ColumnFamilyDescriptor::new( + Index::NAME, + get_cf_options::(&access_type, &oldest_slot), + ); + let shred_data_cf_descriptor = ColumnFamilyDescriptor::new( + ShredData::NAME, + get_cf_options::(&access_type, &oldest_slot), + ); + let shred_code_cf_descriptor = ColumnFamilyDescriptor::new( + ShredCode::NAME, + get_cf_options::(&access_type, &oldest_slot), + ); + let transaction_status_cf_descriptor = ColumnFamilyDescriptor::new( + TransactionStatus::NAME, + get_cf_options::(&access_type, &oldest_slot), + ); + let address_signatures_cf_descriptor = ColumnFamilyDescriptor::new( + AddressSignatures::NAME, + get_cf_options::(&access_type, &oldest_slot), + ); + let transaction_status_index_cf_descriptor = ColumnFamilyDescriptor::new( + TransactionStatusIndex::NAME, + get_cf_options::(&access_type, &oldest_slot), + ); + let rewards_cf_descriptor = ColumnFamilyDescriptor::new( + Rewards::NAME, + get_cf_options::(&access_type, &oldest_slot), + ); + let blocktime_cf_descriptor = ColumnFamilyDescriptor::new( + Blocktime::NAME, + get_cf_options::(&access_type, &oldest_slot), + ); + let perf_samples_cf_descriptor = ColumnFamilyDescriptor::new( + PerfSamples::NAME, + get_cf_options::(&access_type, &oldest_slot), + ); + let block_height_cf_descriptor = ColumnFamilyDescriptor::new( + BlockHeight::NAME, + get_cf_options::(&access_type, &oldest_slot), + ); + let program_costs_cf_descriptor = ColumnFamilyDescriptor::new( + ProgramCosts::NAME, + get_cf_options::(&access_type, &oldest_slot), + ); + // Don't forget to add to both run_purge_with_stats() and + // compact_storage() in ledger/src/blockstore/blockstore_purge.rs!! let cfs = vec![ (SlotMeta::NAME, meta_cf_descriptor), @@ -274,6 +365,7 @@ impl Rocks { (DuplicateSlots::NAME, duplicate_slots_cf_descriptor), (ErasureMeta::NAME, erasure_meta_cf_descriptor), (Orphans::NAME, orphans_cf_descriptor), + (BankHash::NAME, bank_hash_cf_descriptor), (Root::NAME, root_cf_descriptor), (Index::NAME, index_cf_descriptor), (ShredData::NAME, shred_data_cf_descriptor), @@ -288,19 +380,20 @@ impl Rocks { (Blocktime::NAME, blocktime_cf_descriptor), (PerfSamples::NAME, perf_samples_cf_descriptor), (BlockHeight::NAME, block_height_cf_descriptor), + (ProgramCosts::NAME, program_costs_cf_descriptor), ]; + let cf_names: Vec<_> = cfs.iter().map(|c| c.0).collect(); // Open the database let db = match access_type { AccessType::PrimaryOnly | AccessType::PrimaryOnlyForMaintenance => Rocks( DB::open_cf_descriptors(&db_options, path, cfs.into_iter().map(|c| c.1))?, ActualAccessType::Primary, + oldest_slot, ), AccessType::TryPrimaryThenSecondary => { - let names: Vec<_> = cfs.iter().map(|c| c.0).collect(); - match DB::open_cf_descriptors(&db_options, path, cfs.into_iter().map(|c| c.1)) { - Ok(db) => Rocks(db, ActualAccessType::Primary), + Ok(db) => Rocks(db, ActualAccessType::Primary, oldest_slot), Err(err) => { let secondary_path = path.join("solana-secondary"); @@ -312,23 +405,81 @@ impl Rocks { db_options.set_max_open_files(-1); Rocks( - DB::open_cf_as_secondary(&db_options, path, &secondary_path, names)?, + DB::open_cf_as_secondary( + &db_options, + path, + &secondary_path, + cf_names.clone(), + )?, ActualAccessType::Secondary, + oldest_slot, ) } } } }; + // this is only needed for LedgerCleanupService. so guard with PrimaryOnly (i.e. running solana-validator) + if matches!(access_type, AccessType::PrimaryOnly) { + for cf_name in cf_names { + // these special column families must be excluded from LedgerCleanupService's rocksdb + // compactions + if excludes_from_compaction(cf_name) { + continue; + } + + // This is the crux of our write-stall-free storage cleaning strategy with consistent + // state view for higher-layers + // + // For the consistent view, we commit delete_range on pruned slot range by LedgerCleanupService. + // simple story here. + // + // For actual storage cleaning, we employ RocksDB compaction. But default RocksDB compaction + // settings don't work well for us. That's because we're using it rather like a really big + // (100 GBs) ring-buffer. RocksDB is basically assuming uniform data write over the key space for + // efficient compaction, which isn't true for our use as a ring buffer. + // + // So, we customize the compaction strategy with 2 combined tweaks: + // (1) compaction_filter and (2) shortening its periodic cycles. + // + // Via the compaction_filter, we finally reclaim previously delete_range()-ed storage occupied + // by pruned slots. When compaction_filter is set, each SST files are re-compacted periodically + // to hunt for keys newly expired by the compaction_filter re-evaluation. But RocksDb's default + // `periodic_compaction_seconds` is 30 days, which is too long for our case. So, we + // shorten it to a day (24 hours). + // + // As we write newer SST files over time at rather consistent rate of speed, this + // effectively makes each newly-created ssts be re-compacted for the filter at + // well-dispersed different timings. + // As a whole, we rewrite the whole dataset at every PERIODIC_COMPACTION_SECONDS, + // slowly over the duration of PERIODIC_COMPACTION_SECONDS. So, this results in + // amortization. + // So, there is a bit inefficiency here because we'll rewrite not-so-old SST files + // too. But longer period would introduce higher variance of ledger storage sizes over + // the long period. And it's much better than the daily IO spike caused by compact_range() by + // previous implementation. + // + // `ttl` and `compact_range`(`ManualCompaction`), doesn't work nicely. That's + // because its original intention is delete_range()s to reclaim disk space. So it tries to merge + // them with N+1 SST files all way down to the bottommost SSTs, often leading to vastly large amount + // (= all) of invalidated SST files, when combined with newer writes happening at the opposite + // edge of the key space. This causes a long and heavy disk IOs and possible write + // stall and ultimately, the deadly Replay/Banking stage stall at higher layers. + db.0.set_options_cf( + db.cf_handle(cf_name), + &[( + "periodic_compaction_seconds", + &format!("{}", PERIODIC_COMPACTION_SECONDS), + )], + ) + .unwrap(); + } + } Ok(db) } fn columns(&self) -> Vec<&'static str> { - use columns::{ - AddressSignatures, BlockHeight, Blocktime, DeadSlots, DuplicateSlots, ErasureMeta, - Index, Orphans, PerfSamples, Rewards, Root, ShredCode, ShredData, SlotMeta, - TransactionStatus, TransactionStatusIndex, - }; + use columns::*; vec![ ErasureMeta::NAME, @@ -336,6 +487,7 @@ impl Rocks { DuplicateSlots::NAME, Index::NAME, Orphans::NAME, + BankHash::NAME, Root::NAME, SlotMeta::NAME, ShredData::NAME, @@ -347,6 +499,7 @@ impl Rocks { Blocktime::NAME, PerfSamples::NAME, BlockHeight::NAME, + ProgramCosts::NAME, ] } @@ -363,7 +516,7 @@ impl Rocks { } fn get_cf(&self, cf: &ColumnFamily, key: &[u8]) -> Result>> { - let opt = self.0.get_cf(cf, key)?.map(|db_vec| db_vec.to_vec()); + let opt = self.0.get_cf(cf, key)?; Ok(opt) } @@ -415,9 +568,13 @@ pub trait Column { fn key(index: Self::Index) -> Vec; fn index(key: &[u8]) -> Self::Index; - fn primary_index(index: Self::Index) -> Slot; + // this return Slot or some u64 + fn primary_index(index: Self::Index) -> u64; #[allow(clippy::wrong_self_convention)] fn as_index(slot: Slot) -> Self::Index; + fn slot(index: Self::Index) -> Slot { + Self::primary_index(index) + } } pub trait ColumnName { @@ -491,6 +648,10 @@ impl Column for columns::TransactionStatus { index.0 } + fn slot(index: Self::Index) -> Slot { + index.2 + } + #[allow(clippy::wrong_self_convention)] fn as_index(index: u64) -> Self::Index { (index, Signature::default(), 0) @@ -528,6 +689,10 @@ impl Column for columns::AddressSignatures { index.0 } + fn slot(index: Self::Index) -> Slot { + index.2 + } + #[allow(clippy::wrong_self_convention)] fn as_index(index: u64) -> Self::Index { (index, Pubkey::default(), 0, Signature::default()) @@ -555,6 +720,10 @@ impl Column for columns::TransactionStatusIndex { index } + fn slot(_index: Self::Index) -> Slot { + unimplemented!() + } + #[allow(clippy::wrong_self_convention)] fn as_index(slot: u64) -> u64 { slot @@ -565,6 +734,14 @@ impl ColumnName for columns::TransactionStatusIndex { const NAME: &'static str = TRANSACTION_STATUS_INDEX_CF; } +impl SlotColumn for columns::BankHash {} +impl ColumnName for columns::BankHash { + const NAME: &'static str = BANK_HASH_CF; +} +impl TypedColumn for columns::BankHash { + type Type = blockstore_meta::FrozenHashVersioned; +} + impl SlotColumn for columns::Rewards {} impl ColumnName for columns::Rewards { const NAME: &'static str = REWARDS_CF; @@ -597,6 +774,39 @@ impl TypedColumn for columns::BlockHeight { type Type = u64; } +impl ColumnName for columns::ProgramCosts { + const NAME: &'static str = PROGRAM_COSTS_CF; +} +impl TypedColumn for columns::ProgramCosts { + type Type = blockstore_meta::ProgramCost; +} +impl Column for columns::ProgramCosts { + type Index = Pubkey; + + fn key(pubkey: Pubkey) -> Vec { + let mut key = vec![0; 32]; // size_of Pubkey + key[0..32].clone_from_slice(&pubkey.as_ref()[0..32]); + key + } + + fn index(key: &[u8]) -> Self::Index { + Pubkey::new(&key[0..32]) + } + + fn primary_index(_index: Self::Index) -> u64 { + unimplemented!() + } + + fn slot(_index: Self::Index) -> Slot { + unimplemented!() + } + + #[allow(clippy::wrong_self_convention)] + fn as_index(_index: u64) -> Self::Index { + Pubkey::default() + } +} + impl Column for columns::ShredCode { type Index = (u64, u64); @@ -855,6 +1065,10 @@ impl Database { pub fn is_primary_access(&self) -> bool { self.backend.is_primary_access() } + + pub fn set_oldest_slot(&self, oldest_slot: Slot) { + self.backend.2.set(oldest_slot); + } } impl LedgerColumn @@ -1032,7 +1246,63 @@ impl<'a> WriteBatch<'a> { } } -fn get_cf_options(access_type: &AccessType) -> Options { +struct PurgedSlotFilter { + oldest_slot: Slot, + name: CString, + _phantom: PhantomData, +} + +impl CompactionFilter for PurgedSlotFilter { + fn filter(&mut self, _level: u32, key: &[u8], _value: &[u8]) -> CompactionDecision { + use rocksdb::CompactionDecision::*; + + let slot_in_key = C::slot(C::index(key)); + // Refer to a comment about periodic_compaction_seconds, especially regarding implicit + // periodic execution of compaction_filters + if slot_in_key >= self.oldest_slot { + Keep + } else { + Remove + } + } + + fn name(&self) -> &CStr { + &self.name + } +} + +struct PurgedSlotFilterFactory { + oldest_slot: OldestSlot, + name: CString, + _phantom: PhantomData, +} + +impl CompactionFilterFactory for PurgedSlotFilterFactory { + type Filter = PurgedSlotFilter; + + fn create(&mut self, _context: CompactionFilterContext) -> Self::Filter { + let copied_oldest_slot = self.oldest_slot.get(); + PurgedSlotFilter:: { + oldest_slot: copied_oldest_slot, + name: CString::new(format!( + "purged_slot_filter({}, {:?})", + C::NAME, + copied_oldest_slot + )) + .unwrap(), + _phantom: PhantomData::default(), + } + } + + fn name(&self) -> &CStr { + &self.name + } +} + +fn get_cf_options( + access_type: &AccessType, + oldest_slot: &OldestSlot, +) -> Options { let mut options = Options::default(); // 256 * 8 = 2GB. 6 of these columns should take at most 12GB of RAM options.set_max_write_buffer_number(8); @@ -1046,6 +1316,17 @@ fn get_cf_options(access_type: &AccessType) -> Options { options.set_level_zero_file_num_compaction_trigger(file_num_compaction_trigger as i32); options.set_max_bytes_for_level_base(total_size_base); options.set_target_file_size_base(file_size_base); + + // TransactionStatusIndex must be excluded from LedgerCleanupService's rocksdb + // compactions.... + if matches!(access_type, AccessType::PrimaryOnly) && !excludes_from_compaction(C::NAME) { + options.set_compaction_filter_factory(PurgedSlotFilterFactory:: { + oldest_slot: oldest_slot.clone(), + name: CString::new(format!("purged_slot_filter_factory({})", C::NAME)).unwrap(), + _phantom: PhantomData::default(), + }); + } + if matches!(access_type, AccessType::PrimaryOnlyForMaintenance) { options.set_disable_auto_compactions(true); } @@ -1077,3 +1358,79 @@ fn get_db_options(access_type: &AccessType) -> Options { options } + +fn excludes_from_compaction(cf_name: &str) -> bool { + // list of Column Families must be excluded from compaction: + let no_compaction_cfs: HashSet<&'static str> = vec![ + columns::TransactionStatusIndex::NAME, + columns::ProgramCosts::NAME, + ] + .into_iter() + .collect(); + + no_compaction_cfs.get(cf_name).is_some() +} + +#[cfg(test)] +pub mod tests { + use super::*; + use crate::blockstore_db::columns::ShredData; + + #[test] + fn test_compaction_filter() { + // this doesn't implement Clone... + let dummy_compaction_filter_context = || CompactionFilterContext { + is_full_compaction: true, + is_manual_compaction: true, + }; + let oldest_slot = OldestSlot::default(); + + let mut factory = PurgedSlotFilterFactory:: { + oldest_slot: oldest_slot.clone(), + name: CString::new("test compaction filter").unwrap(), + _phantom: PhantomData::default(), + }; + let mut compaction_filter = factory.create(dummy_compaction_filter_context()); + + let dummy_level = 0; + let key = ShredData::key(ShredData::as_index(0)); + let dummy_value = vec![]; + + // we can't use assert_matches! because CompactionDecision doesn't implement Debug + assert!(matches!( + compaction_filter.filter(dummy_level, &key, &dummy_value), + CompactionDecision::Keep + )); + + // mutating oledst_slot doen't affect existing compaction filters... + oldest_slot.set(1); + assert!(matches!( + compaction_filter.filter(dummy_level, &key, &dummy_value), + CompactionDecision::Keep + )); + + // recreating compaction filter starts to expire the key + let mut compaction_filter = factory.create(dummy_compaction_filter_context()); + assert!(matches!( + compaction_filter.filter(dummy_level, &key, &dummy_value), + CompactionDecision::Remove + )); + + // newer key shouldn't be removed + let key = ShredData::key(ShredData::as_index(1)); + matches!( + compaction_filter.filter(dummy_level, &key, &dummy_value), + CompactionDecision::Keep + ); + } + + #[test] + fn test_excludes_from_compaction() { + // currently there are two CFs are excluded from compaction: + assert!(excludes_from_compaction( + columns::TransactionStatusIndex::NAME + )); + assert!(excludes_from_compaction(columns::ProgramCosts::NAME)); + assert!(!excludes_from_compaction("something else")); + } +} diff --git a/ledger/src/blockstore_meta.rs b/ledger/src/blockstore_meta.rs index 586a61adb53311..a0a41fcb592d5a 100644 --- a/ledger/src/blockstore_meta.rs +++ b/ledger/src/blockstore_meta.rs @@ -1,6 +1,6 @@ use crate::erasure::ErasureConfig; use serde::{Deserialize, Serialize}; -use solana_sdk::clock::Slot; +use solana_sdk::{clock::Slot, hash::Hash}; use std::{collections::BTreeSet, ops::RangeBounds}; #[derive(Clone, Debug, Default, Deserialize, Serialize, Eq, PartialEq)] @@ -75,6 +75,33 @@ pub enum ErasureMetaStatus { StillNeed(usize), } +#[derive(Deserialize, Serialize, Debug, PartialEq)] +pub enum FrozenHashVersioned { + Current(FrozenHashStatus), +} + +impl FrozenHashVersioned { + pub fn frozen_hash(&self) -> Hash { + match self { + FrozenHashVersioned::Current(frozen_hash_status) => frozen_hash_status.frozen_hash, + } + } + + pub fn is_duplicate_confirmed(&self) -> bool { + match self { + FrozenHashVersioned::Current(frozen_hash_status) => { + frozen_hash_status.is_duplicate_confirmed + } + } + } +} + +#[derive(Deserialize, Serialize, Debug, PartialEq)] +pub struct FrozenHashStatus { + pub frozen_hash: Hash, + pub is_duplicate_confirmed: bool, +} + impl Index { pub(crate) fn new(slot: Slot) -> Self { Index { @@ -253,6 +280,11 @@ pub struct PerfSample { pub sample_period_secs: u16, } +#[derive(Clone, Debug, Default, Deserialize, Serialize, PartialEq)] +pub struct ProgramCost { + pub cost: u64, +} + #[cfg(test)] mod test { use super::*; diff --git a/ledger/src/blockstore_processor.rs b/ledger/src/blockstore_processor.rs index f10761d84c53f3..b1799d52f0e390 100644 --- a/ledger/src/blockstore_processor.rs +++ b/ledger/src/blockstore_processor.rs @@ -16,6 +16,7 @@ use solana_measure::measure::Measure; use solana_metrics::{datapoint_error, inc_new_counter_debug}; use solana_rayon_threadlimit::get_thread_count; use solana_runtime::{ + accounts_db::AccountShrinkThreshold, accounts_index::AccountSecondaryIndexes, bank::{ Bank, ExecuteTimings, InnerInstructionsList, RentDebits, TransactionBalancesSet, @@ -24,6 +25,7 @@ use solana_runtime::{ bank_forks::BankForks, bank_utils, commitment::VOTE_THRESHOLD_SIZE, + snapshot_utils::BankFromArchiveTimings, transaction_batch::TransactionBatch, vote_account::ArcVoteAccount, vote_sender_types::ReplayVoteSender, @@ -110,7 +112,7 @@ fn execute_batch( let mut mint_decimals: HashMap = HashMap::new(); let pre_token_balances = if record_token_balances { - collect_token_balances(&bank, &batch, &mut mint_decimals) + collect_token_balances(bank, batch, &mut mint_decimals) } else { vec![] }; @@ -137,7 +139,7 @@ fn execute_batch( if let Some(transaction_status_sender) = transaction_status_sender { let txs = batch.transactions_iter().cloned().collect(); let post_token_balances = if record_token_balances { - collect_token_balances(&bank, &batch, &mut mint_decimals) + collect_token_balances(bank, batch, &mut mint_decimals) } else { vec![] }; @@ -193,6 +195,8 @@ fn execute_batches( }) }); + timings.total_batches_len += batches.len(); + timings.num_execute_batches += 1; for timing in new_timings { timings.accumulate(&timing); } @@ -325,7 +329,7 @@ fn process_entries_with_callback( timings, )?; for hash in tick_hashes { - bank.register_tick(&hash); + bank.register_tick(hash); } Ok(()) } @@ -373,6 +377,8 @@ pub struct ProcessOptions { pub limit_load_slot_count_from_snapshot: Option, pub allow_dead_slots: bool, pub accounts_db_test_hash_calculation: bool, + pub accounts_db_skip_shrink: bool, + pub shrink_ratio: AccountShrinkThreshold, } pub fn process_blockstore( @@ -393,13 +399,15 @@ pub fn process_blockstore( // Setup bank for slot 0 let bank0 = Bank::new_with_paths( - &genesis_config, + genesis_config, account_paths, &opts.frozen_accounts, opts.debug_keys.clone(), Some(&crate::builtins::get(opts.bpf_jit)), opts.account_indexes.clone(), opts.accounts_db_caching_enabled, + opts.shrink_ratio, + false, ); let bank0 = Arc::new(bank0); info!("processing ledger for slot 0..."); @@ -418,6 +426,7 @@ pub fn process_blockstore( &recyclers, None, cache_block_meta_sender, + BankFromArchiveTimings::default(), ) } @@ -429,6 +438,7 @@ pub(crate) fn process_blockstore_from_root( recyclers: &VerifyRecyclers, transaction_status_sender: Option<&TransactionStatusSender>, cache_block_meta_sender: Option<&CacheBlockMetaSender>, + timings: BankFromArchiveTimings, ) -> BlockstoreProcessorResult { do_process_blockstore_from_root( blockstore, @@ -437,6 +447,7 @@ pub(crate) fn process_blockstore_from_root( recyclers, transaction_status_sender, cache_block_meta_sender, + timings, ) } @@ -447,6 +458,7 @@ fn do_process_blockstore_from_root( recyclers: &VerifyRecyclers, transaction_status_sender: Option<&TransactionStatusSender>, cache_block_meta_sender: Option<&CacheBlockMetaSender>, + timings: BankFromArchiveTimings, ) -> BlockstoreProcessorResult { info!("processing ledger from slot {}...", bank.slot()); @@ -474,7 +486,7 @@ fn do_process_blockstore_from_root( // ensure start_slot is rooted for correct replay if blockstore.is_primary_access() { blockstore - .set_roots(&[start_slot]) + .set_roots(std::iter::once(&start_slot)) .expect("Couldn't set root slot on startup"); } else if !blockstore.is_root(start_slot) { panic!("starting slot isn't root and can't update due to being secondary blockstore access: {}", start_slot); @@ -526,18 +538,45 @@ fn do_process_blockstore_from_root( } let bank_forks = BankForks::new_from_banks(&initial_forks, root); + let processing_time = now.elapsed(); + + let debug_verify = opts.accounts_db_test_hash_calculation; + let mut time_cap = Measure::start("capitalization"); + // We might be promptly restarted after bad capitalization was detected while creating newer snapshot. + // In that case, we're most likely restored from the last good snapshot and replayed up to this root. + // So again check here for the bad capitalization to avoid to continue until the next snapshot creation. + if !bank_forks + .root_bank() + .calculate_and_verify_capitalization(debug_verify) + { + return Err(BlockstoreProcessorError::RootBankWithMismatchedCapitalization(root)); + } + time_cap.stop(); + datapoint_info!( "process_blockstore_from_root", - ("total_time_us", now.elapsed().as_micros(), i64), + ("total_time_us", processing_time.as_micros(), i64), ("frozen_banks", bank_forks.frozen_banks().len(), i64), ("slot", bank_forks.root(), i64), - ("forks", initial_forks.len(), i64) + ("forks", initial_forks.len(), i64), + ("calculate_capitalization_us", time_cap.as_us(), i64), + ("untar_us", timings.untar_us, i64), + ( + "rebuild_bank_from_snapshots_us", + timings.rebuild_bank_from_snapshots_us, + i64 + ), + ( + "verify_snapshot_bank_us", + timings.verify_snapshot_bank_us, + i64 + ), ); info!("ledger processing timing: {:?}", timing); info!( "ledger processed in {}. root slot is {}, {} fork{} at {}, with {} frozen bank{}", - HumanTime::from(chrono::Duration::from_std(now.elapsed()).unwrap()) + HumanTime::from(chrono::Duration::from_std(processing_time).unwrap()) .to_text_en(Accuracy::Precise, Tense::Present), bank_forks.root(), initial_forks.len(), @@ -555,13 +594,6 @@ fn do_process_blockstore_from_root( ); assert!(bank_forks.active_banks().is_empty()); - // We might be promptly restarted after bad capitalization was detected while creating newer snapshot. - // In that case, we're most likely restored from the last good snapshot and replayed up to this root. - // So again check here for the bad capitalization to avoid to continue until the next snapshot creation. - if !bank_forks.root_bank().calculate_and_verify_capitalization() { - return Err(BlockstoreProcessorError::RootBankWithMismatchedCapitalization(root)); - } - Ok((bank_forks, leader_schedule_cache)) } @@ -760,8 +792,11 @@ pub fn confirm_slot( }; let check_start = Instant::now(); - let check_result = - entries.verify_and_hash_transactions(skip_verification, bank.secp256k1_program_enabled()); + let check_result = entries.verify_and_hash_transactions( + skip_verification, + bank.libsecp256k1_0_5_upgrade_enabled(), + bank.verify_tx_signatures_len_enabled(), + ); if check_result.is_none() { warn!("Ledger proof of history failed at slot: {}", slot); return Err(BlockError::InvalidEntryHash.into()); @@ -867,9 +902,9 @@ fn process_next_slots( // handles any partials if next_meta.is_full() { let next_bank = Arc::new(Bank::new_from_parent( - &bank, + bank, &leader_schedule_cache - .slot_leader_at(*next_slot, Some(&bank)) + .slot_leader_at(*next_slot, Some(bank)) .unwrap(), *next_slot, )); @@ -996,13 +1031,13 @@ fn load_frozen_forks( if new_root_bank.slot() == *root { break; } // Found the last root in the chain, yay! assert!(new_root_bank.slot() > *root); - rooted_slots.push(new_root_bank.slot()); + rooted_slots.push((new_root_bank.slot(), new_root_bank.hash())); // As noted, the cluster confirmed root should be descended from // our last root; therefore parent should be set new_root_bank = new_root_bank.parent().unwrap(); } inc_new_counter_info!("load_frozen_forks-cluster-confirmed-root", rooted_slots.len()); - blockstore.set_roots(&rooted_slots).expect("Blockstore::set_roots should succeed"); + blockstore.set_roots(rooted_slots.iter().map(|(slot, _hash)| slot)).expect("Blockstore::set_roots should succeed"); Some(cluster_root_bank) } else { None @@ -1019,7 +1054,7 @@ fn load_frozen_forks( *root = new_root_bank.slot(); last_root = new_root_bank.slot(); - leader_schedule_cache.set_root(&new_root_bank); + leader_schedule_cache.set_root(new_root_bank); new_root_bank.squash(); if last_free.elapsed() > Duration::from_secs(10) { @@ -1165,7 +1200,7 @@ pub struct TransactionStatusBatch { pub balances: TransactionBalancesSet, pub token_balances: TransactionTokenBalancesSet, pub inner_instructions: Option>>, - pub transaction_logs: Option>, + pub transaction_logs: Option>>, pub rent_debits: Vec, } @@ -1184,7 +1219,7 @@ impl TransactionStatusSender { balances: TransactionBalancesSet, token_balances: TransactionTokenBalancesSet, inner_instructions: Vec>, - transaction_logs: Vec, + transaction_logs: Vec>, rent_debits: Vec, ) { let slot = bank.slot(); @@ -1456,6 +1491,7 @@ pub mod tests { let opts = ProcessOptions { poh_verify: true, + accounts_db_test_hash_calculation: true, ..ProcessOptions::default() }; let (bank_forks, _leader_schedule) = @@ -1521,6 +1557,7 @@ pub mod tests { let opts = ProcessOptions { poh_verify: true, + accounts_db_test_hash_calculation: true, ..ProcessOptions::default() }; let (bank_forks, _leader_schedule) = @@ -1538,6 +1575,7 @@ pub mod tests { */ let opts = ProcessOptions { poh_verify: true, + accounts_db_test_hash_calculation: true, ..ProcessOptions::default() }; fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 3, 0, blockhash); @@ -1603,10 +1641,11 @@ pub mod tests { info!("last_fork1_entry.hash: {:?}", last_fork1_entry_hash); info!("last_fork2_entry.hash: {:?}", last_fork2_entry_hash); - blockstore.set_roots(&[0, 1, 4]).unwrap(); + blockstore.set_roots(vec![0, 1, 4].iter()).unwrap(); let opts = ProcessOptions { poh_verify: true, + accounts_db_test_hash_calculation: true, ..ProcessOptions::default() }; let (bank_forks, _leader_schedule) = @@ -1682,10 +1721,11 @@ pub mod tests { info!("last_fork1_entry.hash: {:?}", last_fork1_entry_hash); info!("last_fork2_entry.hash: {:?}", last_fork2_entry_hash); - blockstore.set_roots(&[0, 1]).unwrap(); + blockstore.set_roots(vec![0, 1].iter()).unwrap(); let opts = ProcessOptions { poh_verify: true, + accounts_db_test_hash_calculation: true, ..ProcessOptions::default() }; let (bank_forks, _leader_schedule) = @@ -1891,15 +1931,18 @@ pub mod tests { } // Set a root on the last slot of the last confirmed epoch - let rooted_slots: Vec<_> = (0..=last_slot).collect(); - blockstore.set_roots(&rooted_slots).unwrap(); + let rooted_slots: Vec = (0..=last_slot).collect(); + blockstore.set_roots(rooted_slots.iter()).unwrap(); // Set a root on the next slot of the confirmed epoch - blockstore.set_roots(&[last_slot + 1]).unwrap(); + blockstore + .set_roots(std::iter::once(&(last_slot + 1))) + .unwrap(); // Check that we can properly restart the ledger / leader scheduler doesn't fail let opts = ProcessOptions { poh_verify: true, + accounts_db_test_hash_calculation: true, ..ProcessOptions::default() }; let (bank_forks, _leader_schedule) = @@ -2044,6 +2087,7 @@ pub mod tests { .unwrap(); let opts = ProcessOptions { poh_verify: true, + accounts_db_test_hash_calculation: true, ..ProcessOptions::default() }; let (bank_forks, _leader_schedule) = @@ -2073,6 +2117,7 @@ pub mod tests { let blockstore = Blockstore::open(&ledger_path).unwrap(); let opts = ProcessOptions { poh_verify: true, + accounts_db_test_hash_calculation: true, ..ProcessOptions::default() }; let (bank_forks, _leader_schedule) = @@ -2091,6 +2136,7 @@ pub mod tests { let blockstore = Blockstore::open(&ledger_path).unwrap(); let opts = ProcessOptions { override_num_threads: Some(1), + accounts_db_test_hash_calculation: true, ..ProcessOptions::default() }; process_blockstore(&genesis_config, &blockstore, Vec::new(), opts, None).unwrap(); @@ -2107,6 +2153,7 @@ pub mod tests { let blockstore = Blockstore::open(&ledger_path).unwrap(); let opts = ProcessOptions { full_leader_cache: true, + accounts_db_test_hash_calculation: true, ..ProcessOptions::default() }; let (_bank_forks, leader_schedule) = @@ -2168,6 +2215,7 @@ pub mod tests { let opts = ProcessOptions { override_num_threads: Some(1), entry_callback: Some(entry_callback), + accounts_db_test_hash_calculation: true, ..ProcessOptions::default() }; process_blockstore(&genesis_config, &blockstore, Vec::new(), opts, None).unwrap(); @@ -2815,12 +2863,13 @@ pub mod tests { genesis_config.ticks_per_slot, genesis_config.hash(), ); - blockstore.set_roots(&[0, 1]).unwrap(); + blockstore.set_roots(vec![0, 1].iter()).unwrap(); // Specify halting at slot 0 let opts = ProcessOptions { poh_verify: true, dev_halt_at_slot: Some(0), + accounts_db_test_hash_calculation: true, ..ProcessOptions::default() }; let (bank_forks, _leader_schedule) = @@ -2865,12 +2914,13 @@ pub mod tests { last_hash = fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, i + 1, i, last_hash); } - blockstore.set_roots(&[3, 5]).unwrap(); + blockstore.set_roots(vec![3, 5].iter()).unwrap(); // Set up bank1 let bank0 = Arc::new(Bank::new(&genesis_config)); let opts = ProcessOptions { poh_verify: true, + accounts_db_test_hash_calculation: true, ..ProcessOptions::default() }; let recyclers = VerifyRecyclers::default(); @@ -2890,9 +2940,16 @@ pub mod tests { bank1.squash(); // Test process_blockstore_from_root() from slot 1 onwards - let (bank_forks, _leader_schedule) = - do_process_blockstore_from_root(&blockstore, bank1, &opts, &recyclers, None, None) - .unwrap(); + let (bank_forks, _leader_schedule) = do_process_blockstore_from_root( + &blockstore, + bank1, + &opts, + &recyclers, + None, + None, + BankFromArchiveTimings::default(), + ) + .unwrap(); assert_eq!(frozen_bank_slots(&bank_forks), vec![5, 6]); assert_eq!(bank_forks.working_bank().slot(), 6); @@ -3057,13 +3114,15 @@ pub mod tests { account_paths: Vec, ) -> EpochSchedule { let bank = Bank::new_with_paths( - &genesis_config, + genesis_config, account_paths, &[], None, None, AccountSecondaryIndexes::default(), false, + AccountShrinkThreshold::default(), + false, ); *bank.epoch_schedule() } @@ -3236,7 +3295,7 @@ pub mod tests { slot_leader_keypair: &Arc, ) { // Add votes to `last_slot` so that `root` will be confirmed - let vote_entry = next_entry(&parent_blockhash, 1, vec![vote_tx]); + let vote_entry = next_entry(parent_blockhash, 1, vec![vote_tx]); let mut entries = create_ticks(ticks_per_slot, 0, vote_entry.hash); entries.insert(0, vote_entry); blockstore @@ -3247,7 +3306,7 @@ pub mod tests { ticks_per_slot, Some(parent_slot), true, - &slot_leader_keypair, + slot_leader_keypair, entries, 0, ) @@ -3311,11 +3370,14 @@ pub mod tests { blockstore.add_tree(forks, false, true, ticks_per_slot, genesis_config.hash()); if let Some(blockstore_root) = blockstore_root { - blockstore.set_roots(&[blockstore_root]).unwrap(); + blockstore + .set_roots(std::iter::once(&blockstore_root)) + .unwrap(); } let opts = ProcessOptions { poh_verify: true, + accounts_db_test_hash_calculation: true, ..ProcessOptions::default() }; let (bank_forks, _leader_schedule) = diff --git a/ledger/src/entry.rs b/ledger/src/entry.rs index f918bb37bc63ee..f6dfb77e7c09ba 100644 --- a/ledger/src/entry.rs +++ b/ledger/src/entry.rs @@ -359,7 +359,8 @@ pub trait EntrySlice { fn verify_and_hash_transactions( &self, skip_verification: bool, - secp256k1_program_enabled: bool, + libsecp256k1_0_5_upgrade_enabled: bool, + verify_tx_signatures_len: bool, ) -> Option>>; } @@ -514,7 +515,8 @@ impl EntrySlice for [Entry] { fn verify_and_hash_transactions<'a>( &'a self, skip_verification: bool, - secp256k1_program_enabled: bool, + libsecp256k1_0_5_upgrade_enabled: bool, + verify_tx_signatures_len: bool, ) -> Option>> { let verify_and_hash = |tx: &'a Transaction| -> Option> { let message_hash = if !skip_verification { @@ -522,9 +524,10 @@ impl EntrySlice for [Entry] { if size > PACKET_DATA_SIZE as u64 { return None; } - if secp256k1_program_enabled { - // Verify tx precompiles if secp256k1 program is enabled. - tx.verify_precompiles().ok()?; + tx.verify_precompiles(libsecp256k1_0_5_upgrade_enabled) + .ok()?; + if verify_tx_signatures_len && !tx.verify_signatures_len() { + return None; } tx.verify_and_hash_message().ok()? } else { @@ -682,7 +685,7 @@ impl EntrySlice for [Entry] { } pub fn next_entry_mut(start: &mut Hash, num_hashes: u64, transactions: Vec) -> Entry { - let entry = Entry::new(&start, num_hashes, transactions); + let entry = Entry::new(start, num_hashes, transactions); *start = entry.hash; entry } @@ -724,44 +727,20 @@ pub fn next_entry(prev_hash: &Hash, num_hashes: u64, transactions: Vec Transaction { - let pubkey = keypair.pubkey(); - let budget_contract = Keypair::new(); - let budget_pubkey = budget_contract.pubkey(); - let ixs = budget_instruction::payment(&pubkey, &pubkey, &budget_pubkey, 1); - let message = Message::new(&ixs, Some(&pubkey)); - Transaction::new(&[keypair, &budget_contract], message, hash) - } - - fn create_sample_timestamp(keypair: &Keypair, hash: Hash) -> Transaction { - let pubkey = keypair.pubkey(); - let ix = budget_instruction::apply_timestamp(&pubkey, &pubkey, &pubkey, Utc::now()); - let message = Message::new(&[ix], Some(&pubkey)); - Transaction::new(&[keypair], message, hash) - } - - fn create_sample_apply_signature(keypair: &Keypair, hash: Hash) -> Transaction { - let pubkey = keypair.pubkey(); - let ix = budget_instruction::apply_signature(&pubkey, &pubkey, &pubkey); - let message = Message::new(&[ix], Some(&pubkey)); - Transaction::new(&[keypair], message, hash) - } - #[test] fn test_entry_verify() { let zero = Hash::default(); - let one = hash(&zero.as_ref()); + let one = hash(zero.as_ref()); assert!(Entry::new_tick(0, &zero).verify(&zero)); // base case, never used assert!(!Entry::new_tick(0, &zero).verify(&one)); // base case, bad assert!(next_entry(&zero, 1, vec![]).verify(&zero)); // inductive step @@ -819,23 +798,6 @@ mod tests { assert!(e0.verify(&zero)); } - #[test] - fn test_witness_reorder_attack() { - let zero = Hash::default(); - - // First, verify entries - let keypair = Keypair::new(); - let tx0 = create_sample_timestamp(&keypair, zero); - let tx1 = create_sample_apply_signature(&keypair, zero); - let mut e0 = Entry::new(&zero, 0, vec![tx0.clone(), tx1.clone()]); - assert!(e0.verify(&zero)); - - // Next, swap two witness transactions and ensure verification fails. - e0.transactions[0] = tx1; // <-- attack - e0.transactions[1] = tx0; - assert!(!e0.verify(&zero)); - } - #[test] fn test_next_entry() { let zero = Hash::default(); @@ -848,7 +810,7 @@ mod tests { assert_eq!(tick.hash, zero); let keypair = Keypair::new(); - let tx0 = create_sample_timestamp(&keypair, zero); + let tx0 = system_transaction::transfer(&keypair, &Pubkey::new_unique(), 42, zero); let entry0 = next_entry(&zero, 1, vec![tx0.clone()]); assert_eq!(entry0.num_hashes, 1); assert_eq!(entry0.hash, next_hash(&zero, 1, &[tx0])); @@ -867,7 +829,7 @@ mod tests { fn test_verify_slice1() { solana_logger::setup(); let zero = Hash::default(); - let one = hash(&zero.as_ref()); + let one = hash(zero.as_ref()); assert!(vec![][..].verify(&zero)); // base case assert!(vec![Entry::new_tick(0, &zero)][..].verify(&zero)); // singleton case 1 assert!(!vec![Entry::new_tick(0, &zero)][..].verify(&one)); // singleton case 2, bad @@ -882,8 +844,8 @@ mod tests { fn test_verify_slice_with_hashes1() { solana_logger::setup(); let zero = Hash::default(); - let one = hash(&zero.as_ref()); - let two = hash(&one.as_ref()); + let one = hash(zero.as_ref()); + let two = hash(one.as_ref()); assert!(vec![][..].verify(&one)); // base case assert!(vec![Entry::new_tick(1, &two)][..].verify(&one)); // singleton case 1 assert!(!vec![Entry::new_tick(1, &two)][..].verify(&two)); // singleton case 2, bad @@ -902,11 +864,12 @@ mod tests { fn test_verify_slice_with_hashes_and_transactions() { solana_logger::setup(); let zero = Hash::default(); - let one = hash(&zero.as_ref()); - let two = hash(&one.as_ref()); - let alice_pubkey = Keypair::new(); - let tx0 = create_sample_payment(&alice_pubkey, one); - let tx1 = create_sample_timestamp(&alice_pubkey, one); + let one = hash(zero.as_ref()); + let two = hash(one.as_ref()); + let alice_keypair = Keypair::new(); + let bob_keypair = Keypair::new(); + let tx0 = system_transaction::transfer(&alice_keypair, &bob_keypair.pubkey(), 1, one); + let tx1 = system_transaction::transfer(&bob_keypair, &alice_keypair.pubkey(), 1, one); assert!(vec![][..].verify(&one)); // base case assert!(vec![next_entry(&one, 1, vec![tx0.clone()])][..].verify(&one)); // singleton case 1 assert!(!vec![next_entry(&one, 1, vec![tx0.clone()])][..].verify(&two)); // singleton case 2, bad @@ -925,23 +888,76 @@ mod tests { assert!(!bad_ticks.verify(&one)); // inductive step, bad } + #[test] + fn test_verify_and_hash_transactions_sig_len() { + let mut rng = rand::thread_rng(); + let recent_blockhash = hash_new_rand(&mut rng); + let from_keypair = Keypair::new(); + let to_keypair = Keypair::new(); + let from_pubkey = from_keypair.pubkey(); + let to_pubkey = to_keypair.pubkey(); + + enum TestCase { + AddSignature, + RemoveSignature, + } + + let make_transaction = |case: TestCase| { + let message = Message::new( + &[system_instruction::transfer(&from_pubkey, &to_pubkey, 1)], + Some(&from_pubkey), + ); + let mut tx = Transaction::new(&[&from_keypair], message, recent_blockhash); + assert_eq!(tx.message.header.num_required_signatures, 1); + match case { + TestCase::AddSignature => { + let signature = to_keypair.sign_message(&tx.message.serialize()); + tx.signatures.push(signature); + } + TestCase::RemoveSignature => { + tx.signatures.remove(0); + } + } + tx + }; + // No signatures. + { + let tx = make_transaction(TestCase::RemoveSignature); + let entries = vec![next_entry(&recent_blockhash, 1, vec![tx])]; + assert!(entries[..] + .verify_and_hash_transactions(false, false, false) + .is_some()); + assert!(entries[..] + .verify_and_hash_transactions(false, false, true) + .is_none()); + } + // Too many signatures. + { + let tx = make_transaction(TestCase::AddSignature); + let entries = vec![next_entry(&recent_blockhash, 1, vec![tx])]; + assert!(entries[..] + .verify_and_hash_transactions(false, false, false) + .is_some()); + assert!(entries[..] + .verify_and_hash_transactions(false, false, true) + .is_none()); + } + } + #[test] fn test_verify_and_hash_transactions_packet_data_size() { let mut rng = rand::thread_rng(); let recent_blockhash = hash_new_rand(&mut rng); let keypair = Keypair::new(); let pubkey = keypair.pubkey(); - let budget_contract = Keypair::new(); - let budget_pubkey = budget_contract.pubkey(); let make_transaction = |size| { let ixs: Vec<_> = std::iter::repeat_with(|| { - budget_instruction::payment(&pubkey, &pubkey, &budget_pubkey, 1) + system_instruction::transfer(&pubkey, &Pubkey::new_unique(), 1) }) .take(size) - .flat_map(|x| x.into_iter()) .collect(); let message = Message::new(&ixs[..], Some(&pubkey)); - Transaction::new(&[&keypair, &budget_contract], message, recent_blockhash) + Transaction::new(&[&keypair], message, recent_blockhash) }; // Small transaction. { @@ -949,27 +965,27 @@ mod tests { let entries = vec![next_entry(&recent_blockhash, 1, vec![tx.clone()])]; assert!(bincode::serialized_size(&tx).unwrap() <= PACKET_DATA_SIZE as u64); assert!(entries[..] - .verify_and_hash_transactions(false, false) + .verify_and_hash_transactions(false, false, false) .is_some()); } // Big transaction. { - let tx = make_transaction(15); + let tx = make_transaction(25); let entries = vec![next_entry(&recent_blockhash, 1, vec![tx.clone()])]; assert!(bincode::serialized_size(&tx).unwrap() > PACKET_DATA_SIZE as u64); assert!(entries[..] - .verify_and_hash_transactions(false, false) + .verify_and_hash_transactions(false, false, false) .is_none()); } // Assert that verify fails as soon as serialized // size exceeds packet data size. - for size in 1..20 { + for size in 1..30 { let tx = make_transaction(size); let entries = vec![next_entry(&recent_blockhash, 1, vec![tx.clone()])]; assert_eq!( bincode::serialized_size(&tx).unwrap() <= PACKET_DATA_SIZE as u64, entries[..] - .verify_and_hash_transactions(false, false) + .verify_and_hash_transactions(false, false, false) .is_some(), ); } diff --git a/ledger/src/leader_schedule_utils.rs b/ledger/src/leader_schedule_utils.rs index 47df87242824a4..ea21a79b736e7d 100644 --- a/ledger/src/leader_schedule_utils.rs +++ b/ledger/src/leader_schedule_utils.rs @@ -63,9 +63,9 @@ fn sort_stakes(stakes: &mut Vec<(Pubkey, u64)>) { // Note: Use unstable sort, because we dedup right after to remove the equal elements. stakes.sort_unstable_by(|(l_pubkey, l_stake), (r_pubkey, r_stake)| { if r_stake == l_stake { - r_pubkey.cmp(&l_pubkey) + r_pubkey.cmp(l_pubkey) } else { - r_stake.cmp(&l_stake) + r_stake.cmp(l_stake) } }); diff --git a/ledger/src/lib.rs b/ledger/src/lib.rs index b7542b47df1b15..de49994ede66ee 100644 --- a/ledger/src/lib.rs +++ b/ledger/src/lib.rs @@ -5,6 +5,7 @@ extern crate solana_bpf_loader_program; pub mod bank_forks_utils; pub mod bigtable_upload; +pub mod bigtable_upload_service; pub mod block_error; #[macro_use] pub mod blockstore; diff --git a/ledger/src/next_slots_iterator.rs b/ledger/src/next_slots_iterator.rs index 945cdfab337201..f3c67a6cf0c643 100644 --- a/ledger/src/next_slots_iterator.rs +++ b/ledger/src/next_slots_iterator.rs @@ -43,7 +43,7 @@ mod tests { fn test_next_slots_iterator() { let blockstore_path = get_tmp_ledger_path!(); let blockstore = Blockstore::open(&blockstore_path).unwrap(); - blockstore.set_roots(&[0]).unwrap(); + blockstore.set_roots(std::iter::once(&0)).unwrap(); let ticks_per_slot = 5; /* Build a blockstore in the ledger with the following fork structure: diff --git a/ledger/src/poh.rs b/ledger/src/poh.rs index 0ade8d7a75a444..23521c975900c4 100644 --- a/ledger/src/poh.rs +++ b/ledger/src/poh.rs @@ -63,7 +63,7 @@ impl Poh { let num_hashes = std::cmp::min(self.remaining_hashes - 1, max_num_hashes); for _ in 0..num_hashes { - self.hash = hash(&self.hash.as_ref()); + self.hash = hash(self.hash.as_ref()); } self.num_hashes += num_hashes; self.remaining_hashes -= num_hashes; @@ -77,7 +77,7 @@ impl Poh { return None; // Caller needs to `tick()` first } - self.hash = hashv(&[&self.hash.as_ref(), &mixin.as_ref()]); + self.hash = hashv(&[self.hash.as_ref(), mixin.as_ref()]); let num_hashes = self.num_hashes + 1; self.num_hashes = 0; self.remaining_hashes -= 1; @@ -89,7 +89,7 @@ impl Poh { } pub fn tick(&mut self) -> Option { - self.hash = hash(&self.hash.as_ref()); + self.hash = hash(self.hash.as_ref()); self.num_hashes += 1; self.remaining_hashes -= 1; @@ -115,7 +115,7 @@ pub fn compute_hash_time_ns(hashes_sample_size: u64) -> u64 { let mut v = Hash::default(); let start = Instant::now(); for _ in 0..hashes_sample_size { - v = hash(&v.as_ref()); + v = hash(v.as_ref()); } start.elapsed().as_nanos() as u64 } @@ -139,11 +139,11 @@ mod tests { assert_ne!(entry.num_hashes, 0); for _ in 1..entry.num_hashes { - current_hash = hash(¤t_hash.as_ref()); + current_hash = hash(current_hash.as_ref()); } current_hash = match mixin { - Some(mixin) => hashv(&[¤t_hash.as_ref(), &mixin.as_ref()]), - None => hash(¤t_hash.as_ref()), + Some(mixin) => hashv(&[current_hash.as_ref(), mixin.as_ref()]), + None => hash(current_hash.as_ref()), }; if current_hash != entry.hash { return false; @@ -192,9 +192,9 @@ mod tests { #[test] fn test_poh_verify() { let zero = Hash::default(); - let one = hash(&zero.as_ref()); - let two = hash(&one.as_ref()); - let one_with_zero = hashv(&[&zero.as_ref(), &zero.as_ref()]); + let one = hash(zero.as_ref()); + let two = hash(one.as_ref()); + let one_with_zero = hashv(&[zero.as_ref(), zero.as_ref()]); let mut poh = Poh::new(zero, None); assert!(verify( @@ -262,7 +262,7 @@ mod tests { ( PohEntry { num_hashes: 1, - hash: hash(&one_with_zero.as_ref()), + hash: hash(one_with_zero.as_ref()), }, None ) diff --git a/ledger/src/rooted_slot_iterator.rs b/ledger/src/rooted_slot_iterator.rs index 9bd3b77b73301a..cdc6f1c46efe8f 100644 --- a/ledger/src/rooted_slot_iterator.rs +++ b/ledger/src/rooted_slot_iterator.rs @@ -84,7 +84,7 @@ mod tests { fn test_rooted_slot_iterator() { let blockstore_path = get_tmp_ledger_path!(); let blockstore = Blockstore::open(&blockstore_path).unwrap(); - blockstore.set_roots(&[0]).unwrap(); + blockstore.set_roots(std::iter::once(&0)).unwrap(); let ticks_per_slot = 5; /* Build a blockstore in the ledger with the following fork structure: @@ -131,7 +131,7 @@ mod tests { fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 4, fork_point, fork_hash); // Set a root - blockstore.set_roots(&[1, 2, 3]).unwrap(); + blockstore.set_roots(vec![1, 2, 3].iter()).unwrap(); // Trying to get an iterator on a different fork will error assert!(RootedSlotIterator::new(4, &blockstore).is_err()); @@ -196,11 +196,11 @@ mod tests { } // Set roots - blockstore.set_roots(&[0, 1, 2, 3]).unwrap(); + blockstore.set_roots(vec![0, 1, 2, 3].iter()).unwrap(); // Create one post-skip slot at 10, simulating starting from a snapshot // at 10 - blockstore.set_roots(&[10]).unwrap(); + blockstore.set_roots(std::iter::once(&10)).unwrap(); // Try to get an iterator from before the skip. The post-skip slot // should not return a SlotMeta let result: Vec<_> = RootedSlotIterator::new(3, &blockstore) @@ -214,7 +214,7 @@ mod tests { fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 11, 10, Hash::default()); // Set roots - blockstore.set_roots(&[11]).unwrap(); + blockstore.set_roots(std::iter::once(&11)).unwrap(); let result: Vec<_> = RootedSlotIterator::new(0, &blockstore) .unwrap() diff --git a/ledger/src/shred.rs b/ledger/src/shred.rs index f1d6ff92c9d5e4..8549562ebaed4a 100644 --- a/ledger/src/shred.rs +++ b/ledger/src/shred.rs @@ -840,7 +840,7 @@ impl Shredder { first_index: usize, slot: Slot, ) -> std::result::Result, reed_solomon_erasure::Error> { - Self::verify_consistent_shred_payload_sizes(&"try_recovery()", &shreds)?; + Self::verify_consistent_shred_payload_sizes("try_recovery()", &shreds)?; let mut recovered_data = vec![]; let fec_set_size = num_data + num_coding; @@ -933,7 +933,7 @@ impl Shredder { pub fn deshred(shreds: &[Shred]) -> std::result::Result, reed_solomon_erasure::Error> { use reed_solomon_erasure::Error::TooFewDataShards; const SHRED_DATA_OFFSET: usize = SIZE_OF_COMMON_SHRED_HEADER + SIZE_OF_DATA_SHRED_HEADER; - Self::verify_consistent_shred_payload_sizes(&"deshred()", shreds)?; + Self::verify_consistent_shred_payload_sizes("deshred()", shreds)?; let index = shreds.first().ok_or(TooFewDataShards)?.index(); let aligned = shreds.iter().zip(index..).all(|(s, i)| s.index() == i); let data_complete = { diff --git a/ledger/src/sigverify_shreds.rs b/ledger/src/sigverify_shreds.rs index 4f6511078a22ad..42ae66d83d97fa 100644 --- a/ledger/src/sigverify_shreds.rs +++ b/ledger/src/sigverify_shreds.rs @@ -312,7 +312,7 @@ fn sign_shred_cpu(keypair: &Keypair, packet: &mut Packet) { ); let signature = keypair.sign_message(&packet.data[msg_start..msg_end]); trace!("signature {:?}", signature); - packet.data[0..sig_end].copy_from_slice(&signature.as_ref()); + packet.data[0..sig_end].copy_from_slice(signature.as_ref()); } pub fn sign_shreds_cpu(keypair: &Keypair, batches: &mut [Packets]) { @@ -364,7 +364,7 @@ pub fn sign_shreds_gpu( let mut elems = Vec::new(); let offset: usize = pinned_keypair.len(); - let num_keypair_packets = vec_size_in_packets(&pinned_keypair); + let num_keypair_packets = vec_size_in_packets(pinned_keypair); let mut num_packets = num_keypair_packets; //should be zero diff --git a/ledger/src/staking_utils.rs b/ledger/src/staking_utils.rs index 7b7c24bb968003..3c8df05475777c 100644 --- a/ledger/src/staking_utils.rs +++ b/ledger/src/staking_utils.rs @@ -74,13 +74,13 @@ pub(crate) mod tests { pubkey::Pubkey, signature::{Keypair, Signer}, signers::Signers, + stake::{ + instruction as stake_instruction, + state::{Authorized, Delegation, Lockup, Stake}, + }, sysvar::stake_history::{self, StakeHistory}, transaction::Transaction, }; - use solana_stake_program::{ - stake_instruction, - stake_state::{Authorized, Delegation, Lockup, Stake}, - }; use solana_vote_program::{ vote_instruction, vote_state::{VoteInit, VoteState, VoteStateVersions}, @@ -201,10 +201,7 @@ pub(crate) mod tests { let result: Vec<_> = epoch_stakes_and_lockouts(&bank, first_leader_schedule_epoch); assert_eq!( result, - vec![( - leader_stake.stake(first_leader_schedule_epoch, None, true), - None - )] + vec![(leader_stake.stake(first_leader_schedule_epoch, None), None)] ); // epoch stakes and lockouts are saved off for the future epoch, should @@ -215,14 +212,8 @@ pub(crate) mod tests { from_account::(&bank.get_account(&stake_history::id()).unwrap()) .unwrap(); let mut expected = vec![ - ( - leader_stake.stake(bank.epoch(), Some(&stake_history), true), - None, - ), - ( - other_stake.stake(bank.epoch(), Some(&stake_history), true), - None, - ), + (leader_stake.stake(bank.epoch(), Some(&stake_history)), None), + (other_stake.stake(bank.epoch(), Some(&stake_history)), None), ]; expected.sort(); diff --git a/local-cluster/Cargo.toml b/local-cluster/Cargo.toml index c72fab22d1a308..0dc6fde2c20645 100644 --- a/local-cluster/Cargo.toml +++ b/local-cluster/Cargo.toml @@ -3,7 +3,7 @@ authors = ["Solana Maintainers "] edition = "2018" name = "solana-local-cluster" description = "Blockchain, Rebuilt for Scale" -version = "1.7.0" +version = "1.7.11" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -12,26 +12,28 @@ documentation = "https://docs.rs/solana-local-cluster" [dependencies] crossbeam-channel = "0.4" itertools = "0.9.0" -gag = "0.1.10" +gag = "1.0.0" fs_extra = "1.2.0" log = "0.4.11" rand = "0.7.0" rayon = "1.5.0" -solana-config-program = { path = "../programs/config", version = "=1.7.0" } -solana-core = { path = "../core", version = "=1.7.0" } -solana-client = { path = "../client", version = "=1.7.0" } -solana-download-utils = { path = "../download-utils", version = "=1.7.0" } -solana-exchange-program = { path = "../programs/exchange", version = "=1.7.0" } -solana-faucet = { path = "../faucet", version = "=1.7.0" } -solana-gossip = { path = "../gossip", version = "=1.7.0" } -solana-ledger = { path = "../ledger", version = "=1.7.0" } -solana-logger = { path = "../logger", version = "=1.7.0" } -solana-runtime = { path = "../runtime", version = "=1.7.0" } -solana-sdk = { path = "../sdk", version = "=1.7.0" } -solana-stake-program = { path = "../programs/stake", version = "=1.7.0" } -solana-vote-program = { path = "../programs/vote", version = "=1.7.0" } +solana-config-program = { path = "../programs/config", version = "=1.7.11" } +solana-core = { path = "../core", version = "=1.7.11" } +solana-client = { path = "../client", version = "=1.7.11" } +solana-download-utils = { path = "../download-utils", version = "=1.7.11" } +solana-exchange-program = { path = "../programs/exchange", version = "=1.7.11" } +solana-faucet = { path = "../faucet", version = "=1.7.11" } +solana-gossip = { path = "../gossip", version = "=1.7.11" } +solana-ledger = { path = "../ledger", version = "=1.7.11" } +solana-logger = { path = "../logger", version = "=1.7.11" } +solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.7.11" } +solana-rpc = { path = "../rpc", version = "=1.7.11" } +solana-runtime = { path = "../runtime", version = "=1.7.11" } +solana-sdk = { path = "../sdk", version = "=1.7.11" } +solana-stake-program = { path = "../programs/stake", version = "=1.7.11" } +solana-streamer = { path = "../streamer", version = "=1.7.11" } +solana-vote-program = { path = "../programs/vote", version = "=1.7.11" } tempfile = "3.1.0" -solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.7.0" } [dev-dependencies] assert_matches = "1.3.0" diff --git a/local-cluster/src/cluster.rs b/local-cluster/src/cluster.rs index fff7d1df907ae7..8211041970cc47 100644 --- a/local-cluster/src/cluster.rs +++ b/local-cluster/src/cluster.rs @@ -4,6 +4,7 @@ use solana_core::validator::ValidatorConfig; use solana_gossip::{cluster_info::Node, contact_info::ContactInfo}; use solana_sdk::pubkey::Pubkey; use solana_sdk::signature::Keypair; +use solana_streamer::socket::SocketAddrSpace; use std::path::PathBuf; use std::sync::Arc; @@ -39,7 +40,12 @@ pub trait Cluster { fn get_validator_client(&self, pubkey: &Pubkey) -> Option; fn get_contact_info(&self, pubkey: &Pubkey) -> Option<&ContactInfo>; fn exit_node(&mut self, pubkey: &Pubkey) -> ClusterValidatorInfo; - fn restart_node(&mut self, pubkey: &Pubkey, cluster_validator_info: ClusterValidatorInfo); + fn restart_node( + &mut self, + pubkey: &Pubkey, + cluster_validator_info: ClusterValidatorInfo, + socket_addr_space: SocketAddrSpace, + ); fn create_restart_context( &mut self, pubkey: &Pubkey, @@ -48,7 +54,13 @@ pub trait Cluster { fn restart_node_with_context( cluster_validator_info: ClusterValidatorInfo, restart_context: (Node, Option), + socket_addr_space: SocketAddrSpace, ) -> ClusterValidatorInfo; fn add_node(&mut self, pubkey: &Pubkey, cluster_validator_info: ClusterValidatorInfo); - fn exit_restart_node(&mut self, pubkey: &Pubkey, config: ValidatorConfig); + fn exit_restart_node( + &mut self, + pubkey: &Pubkey, + config: ValidatorConfig, + socket_addr_space: SocketAddrSpace, + ); } diff --git a/local-cluster/src/cluster_tests.rs b/local-cluster/src/cluster_tests.rs index e174e4bd8194d4..3e053dd55111ba 100644 --- a/local-cluster/src/cluster_tests.rs +++ b/local-cluster/src/cluster_tests.rs @@ -1,13 +1,12 @@ -use log::*; /// Cluster independent integration tests /// /// All tests must start from an entry point and a funding keypair and /// discover the rest of the network. +use log::*; use rand::{thread_rng, Rng}; use rayon::prelude::*; use solana_client::thin_client::create_client; use solana_core::consensus::VOTE_THRESHOLD_DEPTH; -use solana_core::validator::ValidatorExit; use solana_gossip::{ cluster_info::VALIDATOR_PORT_RANGE, contact_info::ContactInfo, gossip_service::discover_cluster, }; @@ -20,6 +19,7 @@ use solana_sdk::{ clock::{self, Slot, NUM_CONSECUTIVE_LEADER_SLOTS}, commitment_config::CommitmentConfig, epoch_schedule::MINIMUM_SLOTS_PER_EPOCH, + exit::Exit, hash::Hash, poh_config::PohConfig, pubkey::Pubkey, @@ -28,6 +28,7 @@ use solana_sdk::{ timing::duration_as_ms, transport::TransportError, }; +use solana_streamer::socket::SocketAddrSpace; use std::{ collections::{HashMap, HashSet}, path::Path, @@ -42,8 +43,10 @@ pub fn spend_and_verify_all_nodes( funding_keypair: &Keypair, nodes: usize, ignore_nodes: HashSet, + socket_addr_space: SocketAddrSpace, ) { - let cluster_nodes = discover_cluster(&entry_point_info.gossip, nodes).unwrap(); + let cluster_nodes = + discover_cluster(&entry_point_info.gossip, nodes, socket_addr_space).unwrap(); assert!(cluster_nodes.len() >= nodes); let ignore_nodes = Arc::new(ignore_nodes); cluster_nodes.par_iter().for_each(|ingress_node| { @@ -63,10 +66,10 @@ pub fn spend_and_verify_all_nodes( .get_recent_blockhash_with_commitment(CommitmentConfig::confirmed()) .unwrap(); let mut transaction = - system_transaction::transfer(&funding_keypair, &random_keypair.pubkey(), 1, blockhash); + system_transaction::transfer(funding_keypair, &random_keypair.pubkey(), 1, blockhash); let confs = VOTE_THRESHOLD_DEPTH + 1; let sig = client - .retry_transfer_until_confirmed(&funding_keypair, &mut transaction, 10, confs) + .retry_transfer_until_confirmed(funding_keypair, &mut transaction, 10, confs) .unwrap(); for validator in &cluster_nodes { if ignore_nodes.contains(&validator.id) { @@ -114,14 +117,14 @@ pub fn send_many_transactions( let transfer_amount = thread_rng().gen_range(1, max_tokens_per_transfer); let mut transaction = system_transaction::transfer( - &funding_keypair, + funding_keypair, &random_keypair.pubkey(), transfer_amount, blockhash, ); client - .retry_transfer(&funding_keypair, &mut transaction, 5) + .retry_transfer(funding_keypair, &mut transaction, 5) .unwrap(); expected_balances.insert(random_keypair.pubkey(), transfer_amount); @@ -178,13 +181,15 @@ pub fn sleep_n_epochs( pub fn kill_entry_and_spend_and_verify_rest( entry_point_info: &ContactInfo, - entry_point_validator_exit: &Arc>, + entry_point_validator_exit: &Arc>, funding_keypair: &Keypair, nodes: usize, slot_millis: u64, + socket_addr_space: SocketAddrSpace, ) { info!("kill_entry_and_spend_and_verify_rest..."); - let cluster_nodes = discover_cluster(&entry_point_info.gossip, nodes).unwrap(); + let cluster_nodes = + discover_cluster(&entry_point_info.gossip, nodes, socket_addr_space).unwrap(); assert!(cluster_nodes.len() >= nodes); let client = create_client(entry_point_info.client_facing_addr(), VALIDATOR_PORT_RANGE); // sleep long enough to make sure we are in epoch 3 @@ -236,7 +241,7 @@ pub fn kill_entry_and_spend_and_verify_rest( .get_recent_blockhash_with_commitment(CommitmentConfig::processed()) .unwrap(); let mut transaction = system_transaction::transfer( - &funding_keypair, + funding_keypair, &random_keypair.pubkey(), 1, blockhash, @@ -245,7 +250,7 @@ pub fn kill_entry_and_spend_and_verify_rest( let confs = VOTE_THRESHOLD_DEPTH + 1; let sig = { let sig = client.retry_transfer_until_confirmed( - &funding_keypair, + funding_keypair, &mut transaction, 5, confs, @@ -260,7 +265,7 @@ pub fn kill_entry_and_spend_and_verify_rest( } }; info!("poll_all_nodes_for_signature()"); - match poll_all_nodes_for_signature(&entry_point_info, &cluster_nodes, &sig, confs) { + match poll_all_nodes_for_signature(entry_point_info, &cluster_nodes, &sig, confs) { Err(e) => { info!("poll_all_nodes_for_signature() failed {:?}", e); result = Err(e); @@ -280,18 +285,23 @@ pub fn check_for_new_roots(num_new_roots: usize, contact_infos: &[ContactInfo], let mut last_print = Instant::now(); let loop_start = Instant::now(); let loop_timeout = Duration::from_secs(60); + let mut num_roots_map = HashMap::new(); while !done { assert!(loop_start.elapsed() < loop_timeout); + for (i, ingress_node) in contact_infos.iter().enumerate() { let client = create_client(ingress_node.client_facing_addr(), VALIDATOR_PORT_RANGE); - let slot = client.get_slot().unwrap_or(0); - roots[i].insert(slot); - let min_node = roots.iter().map(|r| r.len()).min().unwrap_or(0); - done = min_node >= num_new_roots; + let root_slot = client + .get_slot_with_commitment(CommitmentConfig::finalized()) + .unwrap_or(0); + roots[i].insert(root_slot); + num_roots_map.insert(ingress_node.id, roots[i].len()); + let num_roots = roots.iter().map(|r| r.len()).min().unwrap(); + done = num_roots >= num_new_roots; if done || last_print.elapsed().as_secs() > 3 { info!( - "{} {} min observed roots {}/16", - test_name, ingress_node.id, min_node + "{} waiting for {} new roots.. observed: {:?}", + test_name, num_new_roots, num_roots_map ); last_print = Instant::now(); } @@ -372,7 +382,7 @@ fn poll_all_nodes_for_signature( continue; } let client = create_client(validator.client_facing_addr(), VALIDATOR_PORT_RANGE); - client.poll_for_signature_confirmation(&sig, confs)?; + client.poll_for_signature_confirmation(sig, confs)?; } Ok(()) diff --git a/local-cluster/src/local_cluster.rs b/local-cluster/src/local_cluster.rs index 080adae3473929..af80e7e51073e1 100644 --- a/local-cluster/src/local_cluster.rs +++ b/local-cluster/src/local_cluster.rs @@ -29,13 +29,15 @@ use solana_sdk::{ poh_config::PohConfig, pubkey::Pubkey, signature::{Keypair, Signer}, + stake::{ + config as stake_config, instruction as stake_instruction, + state::{Authorized, Lockup}, + }, system_transaction, transaction::Transaction, }; -use solana_stake_program::{ - config as stake_config, stake_instruction, - stake_state::{Authorized, Lockup, StakeState}, -}; +use solana_stake_program::{config::create_account as create_stake_config_account, stake_state}; +use solana_streamer::socket::SocketAddrSpace; use solana_vote_program::{ vote_instruction, vote_state::{VoteInit, VoteState}, @@ -107,6 +109,7 @@ impl LocalCluster { num_nodes: usize, cluster_lamports: u64, lamports_per_node: u64, + socket_addr_space: SocketAddrSpace, ) -> Self { let stakes: Vec<_> = (0..num_nodes).map(|_| lamports_per_node).collect(); let mut config = ClusterConfig { @@ -118,10 +121,10 @@ impl LocalCluster { ), ..ClusterConfig::default() }; - Self::new(&mut config) + Self::new(&mut config, socket_addr_space) } - pub fn new(config: &mut ClusterConfig) -> Self { + pub fn new(config: &mut ClusterConfig, socket_addr_space: SocketAddrSpace) -> Self { assert_eq!(config.validator_configs.len(), config.node_stakes.len()); let mut validator_keys = { if let Some(ref keys) = config.validator_keys { @@ -190,7 +193,7 @@ impl LocalCluster { // Replace staking config genesis_config.add_account( stake_config::id(), - stake_config::create_account( + create_stake_config_account( 1, &stake_config::Config { warmup_cooldown_rate: 1_000_000_000.0f64, @@ -218,6 +221,7 @@ impl LocalCluster { &leader_config, true, // should_check_duplicate_instance Arc::new(RwLock::new(ValidatorStartProgress::default())), + socket_addr_space, ); let mut validators = HashMap::new(); @@ -261,22 +265,35 @@ impl LocalCluster { *stake, key.clone(), node_pubkey_to_vote_key.get(&key.pubkey()).cloned(), + socket_addr_space, ); } let mut listener_config = safe_clone_config(&config.validator_configs[0]); listener_config.voting_disabled = true; (0..config.num_listeners).for_each(|_| { - cluster.add_validator(&listener_config, 0, Arc::new(Keypair::new()), None); + cluster.add_validator( + &listener_config, + 0, + Arc::new(Keypair::new()), + None, + socket_addr_space, + ); }); discover_cluster( &cluster.entry_point_info.gossip, config.node_stakes.len() + config.num_listeners as usize, + socket_addr_space, ) .unwrap(); - discover_cluster(&cluster.entry_point_info.gossip, config.node_stakes.len()).unwrap(); + discover_cluster( + &cluster.entry_point_info.gossip, + config.node_stakes.len(), + socket_addr_space, + ) + .unwrap(); cluster } @@ -304,6 +321,7 @@ impl LocalCluster { stake: u64, validator_keypair: Arc, mut voting_keypair: Option>, + socket_addr_space: SocketAddrSpace, ) -> Pubkey { let client = create_client( self.entry_point_info.client_facing_addr(), @@ -360,6 +378,7 @@ impl LocalCluster { &config, true, // should_check_duplicate_instance Arc::new(RwLock::new(ValidatorStartProgress::default())), + socket_addr_space, ); let validator_pubkey = validator_keypair.pubkey(); @@ -399,7 +418,12 @@ impl LocalCluster { Self::transfer_with_client(&client, source_keypair, dest_pubkey, lamports) } - pub fn check_for_new_roots(&self, num_new_roots: usize, test_name: &str) { + pub fn check_for_new_roots( + &self, + num_new_roots: usize, + test_name: &str, + socket_addr_space: SocketAddrSpace, + ) { let alive_node_contact_infos: Vec<_> = self .validators .values() @@ -410,6 +434,7 @@ impl LocalCluster { let cluster_nodes = discover_cluster( &alive_node_contact_infos[0].gossip, alive_node_contact_infos.len(), + socket_addr_space, ) .unwrap(); info!("{} discovered {} nodes", test_name, cluster_nodes.len()); @@ -418,7 +443,12 @@ impl LocalCluster { info!("{} done waiting for roots", test_name); } - pub fn check_no_new_roots(&self, num_slots_to_wait: usize, test_name: &str) { + pub fn check_no_new_roots( + &self, + num_slots_to_wait: usize, + test_name: &str, + socket_addr_space: SocketAddrSpace, + ) { let alive_node_contact_infos: Vec<_> = self .validators .values() @@ -429,6 +459,7 @@ impl LocalCluster { let cluster_nodes = discover_cluster( &alive_node_contact_infos[0].gossip, alive_node_contact_infos.len(), + socket_addr_space, ) .unwrap(); info!("{} discovered {} nodes", test_name, cluster_nodes.len()); @@ -447,8 +478,7 @@ impl LocalCluster { let (blockhash, _fee_calculator, _last_valid_slot) = client .get_recent_blockhash_with_commitment(CommitmentConfig::processed()) .unwrap(); - let mut tx = - system_transaction::transfer(&source_keypair, dest_pubkey, lamports, blockhash); + let mut tx = system_transaction::transfer(source_keypair, dest_pubkey, lamports, blockhash); info!( "executing transfer of {} from {} to {}", lamports, @@ -456,7 +486,7 @@ impl LocalCluster { *dest_pubkey ); client - .retry_transfer(&source_keypair, &mut tx, 10) + .retry_transfer(source_keypair, &mut tx, 10) .expect("client transfer"); client .wait_for_balance_with_commitment( @@ -511,7 +541,7 @@ impl LocalCluster { .0, ); client - .retry_transfer(&from_account, &mut transaction, 10) + .retry_transfer(from_account, &mut transaction, 10) .expect("fund vote"); client .wait_for_balance_with_commitment( @@ -568,7 +598,7 @@ impl LocalCluster { ) { (Ok(Some(stake_account)), Ok(Some(vote_account))) => { match ( - StakeState::stake_from(&stake_account), + stake_state::stake_from(&stake_account), VoteState::from(&vote_account), ) { (Some(stake_state), Some(vote_state)) => { @@ -615,7 +645,7 @@ impl Cluster for LocalCluster { } fn exit_node(&mut self, pubkey: &Pubkey) -> ClusterValidatorInfo { - let mut node = self.validators.remove(&pubkey).unwrap(); + let mut node = self.validators.remove(pubkey).unwrap(); // Shut down the validator let mut validator = node.validator.take().expect("Validator must be running"); @@ -630,7 +660,7 @@ impl Cluster for LocalCluster { cluster_validator_info: &mut ClusterValidatorInfo, ) -> (Node, Option) { // Update the stored ContactInfo for this node - let node = Node::new_localhost_with_pubkey(&pubkey); + let node = Node::new_localhost_with_pubkey(pubkey); cluster_validator_info.info.contact_info = node.info.clone(); cluster_validator_info.config.rpc_addrs = Some((node.info.rpc, node.info.rpc_pubsub)); @@ -646,10 +676,18 @@ impl Cluster for LocalCluster { (node, entry_point_info) } - fn restart_node(&mut self, pubkey: &Pubkey, mut cluster_validator_info: ClusterValidatorInfo) { + fn restart_node( + &mut self, + pubkey: &Pubkey, + mut cluster_validator_info: ClusterValidatorInfo, + socket_addr_space: SocketAddrSpace, + ) { let restart_context = self.create_restart_context(pubkey, &mut cluster_validator_info); - let cluster_validator_info = - Self::restart_node_with_context(cluster_validator_info, restart_context); + let cluster_validator_info = Self::restart_node_with_context( + cluster_validator_info, + restart_context, + socket_addr_space, + ); self.add_node(pubkey, cluster_validator_info); } @@ -660,6 +698,7 @@ impl Cluster for LocalCluster { fn restart_node_with_context( mut cluster_validator_info: ClusterValidatorInfo, (node, entry_point_info): (Node, Option), + socket_addr_space: SocketAddrSpace, ) -> ClusterValidatorInfo { // Restart the node let validator_info = &cluster_validator_info.info; @@ -677,15 +716,21 @@ impl Cluster for LocalCluster { &safe_clone_config(&cluster_validator_info.config), true, // should_check_duplicate_instance Arc::new(RwLock::new(ValidatorStartProgress::default())), + socket_addr_space, ); cluster_validator_info.validator = Some(restarted_node); cluster_validator_info } - fn exit_restart_node(&mut self, pubkey: &Pubkey, validator_config: ValidatorConfig) { + fn exit_restart_node( + &mut self, + pubkey: &Pubkey, + validator_config: ValidatorConfig, + socket_addr_space: SocketAddrSpace, + ) { let mut cluster_validator_info = self.exit_node(pubkey); cluster_validator_info.config = validator_config; - self.restart_node(pubkey, cluster_validator_info); + self.restart_node(pubkey, cluster_validator_info, socket_addr_space); } fn get_contact_info(&self, pubkey: &Pubkey) -> Option<&ContactInfo> { @@ -708,7 +753,8 @@ mod test { fn test_local_cluster_start_and_exit() { solana_logger::setup(); let num_nodes = 1; - let cluster = LocalCluster::new_with_equal_stakes(num_nodes, 100, 3); + let cluster = + LocalCluster::new_with_equal_stakes(num_nodes, 100, 3, SocketAddrSpace::Unspecified); assert_eq!(cluster.validators.len(), num_nodes); } @@ -728,7 +774,7 @@ mod test { stakers_slot_offset: MINIMUM_SLOTS_PER_EPOCH as u64, ..ClusterConfig::default() }; - let cluster = LocalCluster::new(&mut config); + let cluster = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified); assert_eq!(cluster.validators.len(), NUM_NODES); } } diff --git a/local-cluster/src/validator_configs.rs b/local-cluster/src/validator_configs.rs index 782e4d923f7aed..9f804de1fda498 100644 --- a/local-cluster/src/validator_configs.rs +++ b/local-cluster/src/validator_configs.rs @@ -1,4 +1,5 @@ -use solana_core::validator::{ValidatorConfig, ValidatorExit}; +use solana_core::validator::ValidatorConfig; +use solana_sdk::exit::Exit; use std::sync::{Arc, RwLock}; pub fn safe_clone_config(config: &ValidatorConfig) -> ValidatorConfig { @@ -35,7 +36,6 @@ pub fn safe_clone_config(config: &ValidatorConfig) -> ValidatorConfig { max_genesis_archive_unpacked_size: config.max_genesis_archive_unpacked_size, wal_recovery_mode: config.wal_recovery_mode.clone(), poh_verify: config.poh_verify, - cuda: config.cuda, require_tower: config.require_tower, tower_path: config.tower_path.clone(), debug_keys: config.debug_keys.clone(), @@ -50,11 +50,13 @@ pub fn safe_clone_config(config: &ValidatorConfig) -> ValidatorConfig { accounts_db_caching_enabled: config.accounts_db_caching_enabled, warp_slot: config.warp_slot, accounts_db_test_hash_calculation: config.accounts_db_test_hash_calculation, + accounts_db_skip_shrink: config.accounts_db_skip_shrink, accounts_db_use_index_hash_calculation: config.accounts_db_use_index_hash_calculation, tpu_coalesce_ms: config.tpu_coalesce_ms, - validator_exit: Arc::new(RwLock::new(ValidatorExit::default())), + validator_exit: Arc::new(RwLock::new(Exit::default())), poh_hashes_per_batch: config.poh_hashes_per_batch, no_wait_for_vote_to_start_leader: config.no_wait_for_vote_to_start_leader, + accounts_shrink_ratio: config.accounts_shrink_ratio, } } diff --git a/local-cluster/tests/local_cluster.rs b/local-cluster/tests/local_cluster.rs index 7011e1230dd44e..4e1e34eae55d6c 100644 --- a/local-cluster/tests/local_cluster.rs +++ b/local-cluster/tests/local_cluster.rs @@ -12,7 +12,7 @@ use solana_client::{ thin_client::{create_client, ThinClient}, }; use solana_core::{ - broadcast_stage::BroadcastStageType, + broadcast_stage::{BroadcastDuplicatesConfig, BroadcastStageType}, consensus::{Tower, SWITCH_FORK_THRESHOLD, VOTE_THRESHOLD_DEPTH}, optimistic_confirmation_verifier::OptimisticConfirmationVerifier, validator::ValidatorConfig, @@ -55,6 +55,7 @@ use solana_sdk::{ timing::timestamp, transaction::Transaction, }; +use solana_streamer::socket::SocketAddrSpace; use solana_vote_program::{ vote_instruction, vote_state::{Vote, MAX_LOCKOUT_HISTORY}, @@ -92,7 +93,7 @@ fn test_ledger_cleanup_service() { validator_configs: make_identical_validator_configs(&validator_config, num_nodes), ..ClusterConfig::default() }; - let mut cluster = LocalCluster::new(&mut config); + let mut cluster = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified); // 200ms/per * 100 = 20 seconds, so sleep a little longer than that. sleep(Duration::from_secs(60)); @@ -101,6 +102,7 @@ fn test_ledger_cleanup_service() { &cluster.funding_keypair, num_nodes, HashSet::new(), + SocketAddrSpace::Unspecified, ); cluster.close_preserve_ledgers(); //check everyone's ledgers and make sure only ~100 slots are stored @@ -122,12 +124,14 @@ fn test_spend_and_verify_all_nodes_1() { solana_logger::setup_with_default(RUST_LOG_FILTER); error!("test_spend_and_verify_all_nodes_1"); let num_nodes = 1; - let local = LocalCluster::new_with_equal_stakes(num_nodes, 10_000, 100); + let local = + LocalCluster::new_with_equal_stakes(num_nodes, 10_000, 100, SocketAddrSpace::Unspecified); cluster_tests::spend_and_verify_all_nodes( &local.entry_point_info, &local.funding_keypair, num_nodes, HashSet::new(), + SocketAddrSpace::Unspecified, ); } @@ -137,12 +141,14 @@ fn test_spend_and_verify_all_nodes_2() { solana_logger::setup_with_default(RUST_LOG_FILTER); error!("test_spend_and_verify_all_nodes_2"); let num_nodes = 2; - let local = LocalCluster::new_with_equal_stakes(num_nodes, 10_000, 100); + let local = + LocalCluster::new_with_equal_stakes(num_nodes, 10_000, 100, SocketAddrSpace::Unspecified); cluster_tests::spend_and_verify_all_nodes( &local.entry_point_info, &local.funding_keypair, num_nodes, HashSet::new(), + SocketAddrSpace::Unspecified, ); } @@ -152,12 +158,14 @@ fn test_spend_and_verify_all_nodes_3() { solana_logger::setup_with_default(RUST_LOG_FILTER); error!("test_spend_and_verify_all_nodes_3"); let num_nodes = 3; - let local = LocalCluster::new_with_equal_stakes(num_nodes, 10_000, 100); + let local = + LocalCluster::new_with_equal_stakes(num_nodes, 10_000, 100, SocketAddrSpace::Unspecified); cluster_tests::spend_and_verify_all_nodes( &local.entry_point_info, &local.funding_keypair, num_nodes, HashSet::new(), + SocketAddrSpace::Unspecified, ); } @@ -166,7 +174,8 @@ fn test_spend_and_verify_all_nodes_3() { fn test_local_cluster_signature_subscribe() { solana_logger::setup_with_default(RUST_LOG_FILTER); let num_nodes = 2; - let cluster = LocalCluster::new_with_equal_stakes(num_nodes, 10_000, 100); + let cluster = + LocalCluster::new_with_equal_stakes(num_nodes, 10_000, 100, SocketAddrSpace::Unspecified); let nodes = cluster.get_node_pubkeys(); // Get non leader @@ -243,12 +252,14 @@ fn test_spend_and_verify_all_nodes_env_num_nodes() { .expect("please set environment variable NUM_NODES") .parse() .expect("could not parse NUM_NODES as a number"); - let local = LocalCluster::new_with_equal_stakes(num_nodes, 10_000, 100); + let local = + LocalCluster::new_with_equal_stakes(num_nodes, 10_000, 100, SocketAddrSpace::Unspecified); cluster_tests::spend_and_verify_all_nodes( &local.entry_point_info, &local.funding_keypair, num_nodes, HashSet::new(), + SocketAddrSpace::Unspecified, ); } @@ -266,7 +277,7 @@ fn test_leader_failure_4() { validator_configs: make_identical_validator_configs(&validator_config, num_nodes), ..ClusterConfig::default() }; - let local = LocalCluster::new(&mut config); + let local = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified); cluster_tests::kill_entry_and_spend_and_verify_rest( &local.entry_point_info, @@ -279,6 +290,7 @@ fn test_leader_failure_4() { &local.funding_keypair, num_nodes, config.ticks_per_slot * config.poh_config.target_tick_duration.as_millis() as u64, + SocketAddrSpace::Unspecified, ); } @@ -368,7 +380,7 @@ fn run_cluster_partition( "PARTITION_TEST starting cluster with {:?} partitions slots_per_epoch: {}", partitions, config.slots_per_epoch, ); - let mut cluster = LocalCluster::new(&mut config); + let mut cluster = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified); info!("PARTITION_TEST spend_and_verify_all_nodes(), ensure all nodes are caught up"); cluster_tests::spend_and_verify_all_nodes( @@ -376,9 +388,15 @@ fn run_cluster_partition( &cluster.funding_keypair, num_nodes, HashSet::new(), + SocketAddrSpace::Unspecified, ); - let cluster_nodes = discover_cluster(&cluster.entry_point_info.gossip, num_nodes).unwrap(); + let cluster_nodes = discover_cluster( + &cluster.entry_point_info.gossip, + num_nodes, + SocketAddrSpace::Unspecified, + ) + .unwrap(); // Check epochs have correct number of slots info!("PARTITION_TEST sleeping until partition starting condition",); @@ -425,7 +443,7 @@ fn run_cluster_partition( fn test_cluster_partition_1_2() { let empty = |_: &mut LocalCluster, _: &mut ()| {}; let on_partition_resolved = |cluster: &mut LocalCluster, _: &mut ()| { - cluster.check_for_new_roots(16, &"PARTITION_TEST"); + cluster.check_for_new_roots(16, "PARTITION_TEST", SocketAddrSpace::Unspecified); }; run_cluster_partition( &[vec![1], vec![1, 1]], @@ -445,7 +463,7 @@ fn test_cluster_partition_1_2() { fn test_cluster_partition_1_1() { let empty = |_: &mut LocalCluster, _: &mut ()| {}; let on_partition_resolved = |cluster: &mut LocalCluster, _: &mut ()| { - cluster.check_for_new_roots(16, &"PARTITION_TEST"); + cluster.check_for_new_roots(16, "PARTITION_TEST", SocketAddrSpace::Unspecified); }; run_cluster_partition( &[vec![1], vec![1]], @@ -465,7 +483,7 @@ fn test_cluster_partition_1_1() { fn test_cluster_partition_1_1_1() { let empty = |_: &mut LocalCluster, _: &mut ()| {}; let on_partition_resolved = |cluster: &mut LocalCluster, _: &mut ()| { - cluster.check_for_new_roots(16, &"PARTITION_TEST"); + cluster.check_for_new_roots(16, "PARTITION_TEST", SocketAddrSpace::Unspecified); }; run_cluster_partition( &[vec![1], vec![1], vec![1]], @@ -525,7 +543,7 @@ fn test_kill_heaviest_partition() { let on_partition_resolved = |cluster: &mut LocalCluster, _: &mut ()| { info!("Killing validator with id: {}", validator_to_kill); cluster.exit_node(&validator_to_kill); - cluster.check_for_new_roots(16, &"PARTITION_TEST"); + cluster.check_for_new_roots(16, "PARTITION_TEST", SocketAddrSpace::Unspecified); }; run_cluster_partition( &partitions, @@ -594,7 +612,7 @@ fn run_kill_partition_switch_threshold( .iter() .map(|validator_to_kill| { info!("Killing validator with id: {}", validator_to_kill); - cluster.exit_node(&validator_to_kill) + cluster.exit_node(validator_to_kill) }) .collect(); on_partition_start( @@ -622,7 +640,7 @@ fn find_latest_replayed_slot_from_ledger( mut latest_slot: Slot, ) -> (Slot, HashSet) { loop { - let mut blockstore = open_blockstore(&ledger_path); + let mut blockstore = open_blockstore(ledger_path); // This is kind of a hack because we can't query for new frozen blocks over RPC // since the validator is not voting. let new_latest_slots: Vec = blockstore @@ -644,7 +662,7 @@ fn find_latest_replayed_slot_from_ledger( break; } else { sleep(Duration::from_millis(50)); - blockstore = open_blockstore(&ledger_path); + blockstore = open_blockstore(ledger_path); } } // Check the slot has been replayed @@ -666,7 +684,7 @@ fn find_latest_replayed_slot_from_ledger( ); } else { sleep(Duration::from_millis(50)); - blockstore = open_blockstore(&ledger_path); + blockstore = open_blockstore(ledger_path); } } } else { @@ -870,7 +888,7 @@ fn test_switch_threshold_uses_gossip_votes() { 0, crds_value::Vote::new(node_keypair.pubkey(), vote_tx, timestamp()), ), - &node_keypair, + node_keypair, )], context .dead_validator_info @@ -880,6 +898,7 @@ fn test_switch_threshold_uses_gossip_votes() { .keypair .pubkey(), heavier_node_gossip, + &SocketAddrSpace::Unspecified, ) .unwrap(); @@ -962,7 +981,7 @@ fn test_kill_partition_switch_threshold_no_progress() { |_: &mut LocalCluster, _: &[Pubkey], _: Vec, _: &mut ()| {}; let on_before_partition_resolved = |_: &mut LocalCluster, _: &mut ()| {}; let on_partition_resolved = |cluster: &mut LocalCluster, _: &mut ()| { - cluster.check_no_new_roots(400, &"PARTITION_TEST"); + cluster.check_no_new_roots(400, "PARTITION_TEST", SocketAddrSpace::Unspecified); }; // This kills `max_failures_stake`, so no progress should be made @@ -1015,7 +1034,7 @@ fn test_kill_partition_switch_threshold_progress() { |_: &mut LocalCluster, _: &[Pubkey], _: Vec, _: &mut ()| {}; let on_before_partition_resolved = |_: &mut LocalCluster, _: &mut ()| {}; let on_partition_resolved = |cluster: &mut LocalCluster, _: &mut ()| { - cluster.check_for_new_roots(16, &"PARTITION_TEST"); + cluster.check_for_new_roots(16, "PARTITION_TEST", SocketAddrSpace::Unspecified); }; run_kill_partition_switch_threshold( &[&[(failures_stake as usize, 16)]], @@ -1203,6 +1222,7 @@ fn test_fork_choice_refresh_old_votes() { cluster.restart_node( &context.smallest_validator_key, context.alive_stake3_info.take().unwrap(), + SocketAddrSpace::Unspecified, ); loop { @@ -1246,7 +1266,7 @@ fn test_fork_choice_refresh_old_votes() { // for lockouts built during partition to resolve and gives validators an opportunity // to try and switch forks) let on_partition_resolved = |cluster: &mut LocalCluster, _: &mut PartitionContext| { - cluster.check_for_new_roots(16, &"PARTITION_TEST"); + cluster.check_for_new_roots(16, "PARTITION_TEST", SocketAddrSpace::Unspecified); }; run_kill_partition_switch_threshold( @@ -1273,16 +1293,19 @@ fn test_two_unbalanced_stakes() { let num_ticks_per_slot = 10; let num_slots_per_epoch = MINIMUM_SLOTS_PER_EPOCH as u64; - let mut cluster = LocalCluster::new(&mut ClusterConfig { - node_stakes: vec![999_990, 3], - cluster_lamports: 1_000_000, - validator_configs: make_identical_validator_configs(&validator_config, 2), - ticks_per_slot: num_ticks_per_slot, - slots_per_epoch: num_slots_per_epoch, - stakers_slot_offset: num_slots_per_epoch, - poh_config: PohConfig::new_sleep(Duration::from_millis(1000 / num_ticks_per_second)), - ..ClusterConfig::default() - }); + let mut cluster = LocalCluster::new( + &mut ClusterConfig { + node_stakes: vec![999_990, 3], + cluster_lamports: 1_000_000, + validator_configs: make_identical_validator_configs(&validator_config, 2), + ticks_per_slot: num_ticks_per_slot, + slots_per_epoch: num_slots_per_epoch, + stakers_slot_offset: num_slots_per_epoch, + poh_config: PohConfig::new_sleep(Duration::from_millis(1000 / num_ticks_per_second)), + ..ClusterConfig::default() + }, + SocketAddrSpace::Unspecified, + ); cluster_tests::sleep_n_epochs( 10.0, @@ -1307,9 +1330,14 @@ fn test_forwarding() { validator_configs: make_identical_validator_configs(&ValidatorConfig::default(), 2), ..ClusterConfig::default() }; - let cluster = LocalCluster::new(&mut config); + let cluster = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified); - let cluster_nodes = discover_cluster(&cluster.entry_point_info.gossip, 2).unwrap(); + let cluster_nodes = discover_cluster( + &cluster.entry_point_info.gossip, + 2, + SocketAddrSpace::Unspecified, + ) + .unwrap(); assert!(cluster_nodes.len() >= 2); let leader_pubkey = cluster.entry_point_info.id; @@ -1320,7 +1348,7 @@ fn test_forwarding() { .unwrap(); // Confirm that transactions were forwarded to and processed by the leader. - cluster_tests::send_many_transactions(&validator_info, &cluster.funding_keypair, 10, 20); + cluster_tests::send_many_transactions(validator_info, &cluster.funding_keypair, 10, 20); } #[test] @@ -1331,15 +1359,18 @@ fn test_restart_node() { let slots_per_epoch = MINIMUM_SLOTS_PER_EPOCH * 2; let ticks_per_slot = 16; let validator_config = ValidatorConfig::default(); - let mut cluster = LocalCluster::new(&mut ClusterConfig { - node_stakes: vec![100; 1], - cluster_lamports: 100, - validator_configs: vec![safe_clone_config(&validator_config)], - ticks_per_slot, - slots_per_epoch, - stakers_slot_offset: slots_per_epoch, - ..ClusterConfig::default() - }); + let mut cluster = LocalCluster::new( + &mut ClusterConfig { + node_stakes: vec![100; 1], + cluster_lamports: 100, + validator_configs: vec![safe_clone_config(&validator_config)], + ticks_per_slot, + slots_per_epoch, + stakers_slot_offset: slots_per_epoch, + ..ClusterConfig::default() + }, + SocketAddrSpace::Unspecified, + ); let nodes = cluster.get_node_pubkeys(); cluster_tests::sleep_n_epochs( 1.0, @@ -1347,7 +1378,7 @@ fn test_restart_node() { clock::DEFAULT_TICKS_PER_SLOT, slots_per_epoch, ); - cluster.exit_restart_node(&nodes[0], validator_config); + cluster.exit_restart_node(&nodes[0], validator_config, SocketAddrSpace::Unspecified); cluster_tests::sleep_n_epochs( 0.5, &cluster.genesis_config.poh_config, @@ -1372,8 +1403,13 @@ fn test_listener_startup() { validator_configs: make_identical_validator_configs(&ValidatorConfig::default(), 1), ..ClusterConfig::default() }; - let cluster = LocalCluster::new(&mut config); - let cluster_nodes = discover_cluster(&cluster.entry_point_info.gossip, 4).unwrap(); + let cluster = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified); + let cluster_nodes = discover_cluster( + &cluster.entry_point_info.gossip, + 4, + SocketAddrSpace::Unspecified, + ) + .unwrap(); assert_eq!(cluster_nodes.len(), 4); } @@ -1389,8 +1425,13 @@ fn test_mainnet_beta_cluster_type() { validator_configs: make_identical_validator_configs(&ValidatorConfig::default(), 1), ..ClusterConfig::default() }; - let cluster = LocalCluster::new(&mut config); - let cluster_nodes = discover_cluster(&cluster.entry_point_info.gossip, 1).unwrap(); + let cluster = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified); + let cluster_nodes = discover_cluster( + &cluster.entry_point_info.gossip, + 1, + SocketAddrSpace::Unspecified, + ) + .unwrap(); assert_eq!(cluster_nodes.len(), 1); let client = create_client( @@ -1402,7 +1443,7 @@ fn test_mainnet_beta_cluster_type() { for program_id in [ &solana_config_program::id(), &solana_sdk::system_program::id(), - &solana_stake_program::id(), + &solana_sdk::stake::program::id(), &solana_vote_program::id(), &solana_sdk::bpf_loader_deprecated::id(), &solana_sdk::bpf_loader::id(), @@ -1497,7 +1538,10 @@ fn test_frozen_account_from_genesis() { }], ..ClusterConfig::default() }; - generate_frozen_account_panic(LocalCluster::new(&mut config), validator_identity); + generate_frozen_account_panic( + LocalCluster::new(&mut config, SocketAddrSpace::Unspecified), + validator_identity, + ); } #[test] @@ -1521,7 +1565,7 @@ fn test_frozen_account_from_snapshot() { ), ..ClusterConfig::default() }; - let mut cluster = LocalCluster::new(&mut config); + let mut cluster = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified); let snapshot_package_output_path = &snapshot_test_config .validator_config @@ -1532,13 +1576,17 @@ fn test_frozen_account_from_snapshot() { trace!("Waiting for snapshot at {:?}", snapshot_package_output_path); let (archive_filename, _archive_snapshot_hash) = - wait_for_next_snapshot(&cluster, &snapshot_package_output_path); + wait_for_next_snapshot(&cluster, snapshot_package_output_path); trace!("Found snapshot: {:?}", archive_filename); // Restart the validator from a snapshot let validator_info = cluster.exit_node(&validator_identity.pubkey()); - cluster.restart_node(&validator_identity.pubkey(), validator_info); + cluster.restart_node( + &validator_identity.pubkey(), + validator_info, + SocketAddrSpace::Unspecified, + ); generate_frozen_account_panic(cluster, validator_identity); } @@ -1565,10 +1613,15 @@ fn test_consistency_halt() { ..ClusterConfig::default() }; - let mut cluster = LocalCluster::new(&mut config); + let mut cluster = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified); sleep(Duration::from_millis(5000)); - let cluster_nodes = discover_cluster(&cluster.entry_point_info.gossip, 1).unwrap(); + let cluster_nodes = discover_cluster( + &cluster.entry_point_info.gossip, + 1, + SocketAddrSpace::Unspecified, + ) + .unwrap(); info!("num_nodes: {}", cluster_nodes.len()); // Add a validator with the leader as trusted, it should halt when it detects @@ -1592,19 +1645,28 @@ fn test_consistency_halt() { validator_stake as u64, Arc::new(Keypair::new()), None, + SocketAddrSpace::Unspecified, ); let num_nodes = 2; assert_eq!( - discover_cluster(&cluster.entry_point_info.gossip, num_nodes) - .unwrap() - .len(), + discover_cluster( + &cluster.entry_point_info.gossip, + num_nodes, + SocketAddrSpace::Unspecified + ) + .unwrap() + .len(), num_nodes ); // Check for only 1 node on the network. let mut encountered_error = false; loop { - let discover = discover_cluster(&cluster.entry_point_info.gossip, 2); + let discover = discover_cluster( + &cluster.entry_point_info.gossip, + 2, + SocketAddrSpace::Unspecified, + ); match discover { Err(_) => { encountered_error = true; @@ -1656,7 +1718,7 @@ fn test_snapshot_download() { ..ClusterConfig::default() }; - let mut cluster = LocalCluster::new(&mut config); + let mut cluster = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified); // Get slot after which this was generated let snapshot_package_output_path = &leader_snapshot_test_config @@ -1668,7 +1730,7 @@ fn test_snapshot_download() { trace!("Waiting for snapshot"); let (archive_filename, archive_snapshot_hash) = - wait_for_next_snapshot(&cluster, &snapshot_package_output_path); + wait_for_next_snapshot(&cluster, snapshot_package_output_path); trace!("found: {:?}", archive_filename); let validator_archive_path = snapshot_utils::get_snapshot_archive_path( @@ -1696,6 +1758,7 @@ fn test_snapshot_download() { stake, Arc::new(Keypair::new()), None, + SocketAddrSpace::Unspecified, ); } @@ -1723,7 +1786,7 @@ fn test_snapshot_restart_tower() { ..ClusterConfig::default() }; - let mut cluster = LocalCluster::new(&mut config); + let mut cluster = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified); // Let the nodes run for a while, then stop one of the validators sleep(Duration::from_millis(5000)); @@ -1743,7 +1806,7 @@ fn test_snapshot_restart_tower() { .snapshot_package_output_path; let (archive_filename, archive_snapshot_hash) = - wait_for_next_snapshot(&cluster, &snapshot_package_output_path); + wait_for_next_snapshot(&cluster, snapshot_package_output_path); // Copy archive to validator's snapshot output directory let validator_archive_path = snapshot_utils::get_snapshot_archive_path( @@ -1758,17 +1821,18 @@ fn test_snapshot_restart_tower() { // Restart validator from snapshot, the validator's tower state in this snapshot // will contain slots < the root bank of the snapshot. Validator should not panic. - cluster.restart_node(&validator_id, validator_info); + cluster.restart_node(&validator_id, validator_info, SocketAddrSpace::Unspecified); // Test cluster can still make progress and get confirmations in tower // Use the restarted node as the discovery point so that we get updated // validator's ContactInfo let restarted_node_info = cluster.get_contact_info(&validator_id).unwrap(); cluster_tests::spend_and_verify_all_nodes( - &restarted_node_info, + restarted_node_info, &cluster.funding_keypair, 1, HashSet::new(), + SocketAddrSpace::Unspecified, ); } @@ -1802,7 +1866,7 @@ fn test_snapshots_blockstore_floor() { ..ClusterConfig::default() }; - let mut cluster = LocalCluster::new(&mut config); + let mut cluster = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified); trace!("Waiting for snapshot tar to be generated with slot",); @@ -1831,7 +1895,12 @@ fn test_snapshots_blockstore_floor() { // Start up a new node from a snapshot let validator_stake = 5; - let cluster_nodes = discover_cluster(&cluster.entry_point_info.gossip, 1).unwrap(); + let cluster_nodes = discover_cluster( + &cluster.entry_point_info.gossip, + 1, + SocketAddrSpace::Unspecified, + ) + .unwrap(); let mut trusted_validators = HashSet::new(); trusted_validators.insert(cluster_nodes[0].id); validator_snapshot_test_config @@ -1843,6 +1912,7 @@ fn test_snapshots_blockstore_floor() { validator_stake, Arc::new(Keypair::new()), None, + SocketAddrSpace::Unspecified, ); let all_pubkeys = cluster.get_node_pubkeys(); let validator_id = all_pubkeys @@ -1911,7 +1981,7 @@ fn test_snapshots_restart_validity() { // Create and reboot the node from snapshot `num_runs` times let num_runs = 3; let mut expected_balances = HashMap::new(); - let mut cluster = LocalCluster::new(&mut config); + let mut cluster = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified); for i in 1..num_runs { info!("run {}", i); // Push transactions to one of the nodes and confirm that transactions were @@ -1926,7 +1996,7 @@ fn test_snapshots_restart_validity() { expected_balances.extend(new_balances); - wait_for_next_snapshot(&cluster, &snapshot_package_output_path); + wait_for_next_snapshot(&cluster, snapshot_package_output_path); // Create new account paths since validator exit is not guaranteed to cleanup RPC threads, // which may delete the old accounts on exit at any point @@ -1941,6 +2011,7 @@ fn test_snapshots_restart_validity() { cluster.exit_restart_node( &nodes[0], safe_clone_config(&snapshot_test_config.validator_config), + SocketAddrSpace::Unspecified, ); // Verify account balances on validator @@ -1954,6 +2025,7 @@ fn test_snapshots_restart_validity() { &cluster.funding_keypair, 1, HashSet::new(), + SocketAddrSpace::Unspecified, ); } } @@ -1967,39 +2039,59 @@ fn test_fail_entry_verification_leader() { } #[test] -#[allow(unused_attributes)] +#[serial] #[ignore] +#[allow(unused_attributes)] fn test_fake_shreds_broadcast_leader() { test_faulty_node(BroadcastStageType::BroadcastFakeShreds); } +#[test] +#[serial] +#[ignore] +#[allow(unused_attributes)] +fn test_duplicate_shreds_broadcast_leader() { + test_faulty_node(BroadcastStageType::BroadcastDuplicates( + BroadcastDuplicatesConfig { + stake_partition: 50, + duplicate_send_delay: 1, + }, + )); +} + fn test_faulty_node(faulty_node_type: BroadcastStageType) { - solana_logger::setup_with_default(RUST_LOG_FILTER); - let num_nodes = 2; + solana_logger::setup_with_default("solana_local_cluster=info"); + let num_nodes = 3; + let error_validator_config = ValidatorConfig { broadcast_stage_type: faulty_node_type, ..ValidatorConfig::default() }; - let mut validator_configs = Vec::with_capacity(num_nodes - 1); + let mut validator_configs = Vec::with_capacity(num_nodes); validator_configs.resize_with(num_nodes - 1, ValidatorConfig::default); + validator_configs.push(error_validator_config); + + let mut validator_keys = Vec::with_capacity(num_nodes); + validator_keys.resize_with(num_nodes, || (Arc::new(Keypair::new()), true)); - // Push a faulty_bootstrap = vec![error_validator_config]; - validator_configs.insert(0, error_validator_config); - let node_stakes = vec![300, 100]; + let node_stakes = vec![60, 50, 60]; assert_eq!(node_stakes.len(), num_nodes); + assert_eq!(validator_keys.len(), num_nodes); + let mut cluster_config = ClusterConfig { cluster_lamports: 10_000, node_stakes, validator_configs, - slots_per_epoch: MINIMUM_SLOTS_PER_EPOCH * 2, - stakers_slot_offset: MINIMUM_SLOTS_PER_EPOCH * 2, + validator_keys: Some(validator_keys), + slots_per_epoch: MINIMUM_SLOTS_PER_EPOCH * 2u64, + stakers_slot_offset: MINIMUM_SLOTS_PER_EPOCH * 2u64, ..ClusterConfig::default() }; - let cluster = LocalCluster::new(&mut cluster_config); + let cluster = LocalCluster::new(&mut cluster_config, SocketAddrSpace::Unspecified); // Check for new roots - cluster.check_for_new_roots(16, &"test_faulty_node"); + cluster.check_for_new_roots(16, "test_faulty_node", SocketAddrSpace::Unspecified); } #[test] @@ -2012,7 +2104,7 @@ fn test_wait_for_max_stake() { validator_configs: make_identical_validator_configs(&validator_config, 4), ..ClusterConfig::default() }; - let cluster = LocalCluster::new(&mut config); + let cluster = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified); let client = RpcClient::new_socket(cluster.entry_point_info.rpc); assert!(client @@ -2036,7 +2128,7 @@ fn test_no_voting() { validator_configs: vec![validator_config], ..ClusterConfig::default() }; - let mut cluster = LocalCluster::new(&mut config); + let mut cluster = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified); let client = cluster .get_validator_client(&cluster.entry_point_info.id) .unwrap(); @@ -2090,7 +2182,7 @@ fn test_optimistic_confirmation_violation_detection() { skip_warmup_slots: true, ..ClusterConfig::default() }; - let mut cluster = LocalCluster::new(&mut config); + let mut cluster = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified); let entry_point_id = cluster.entry_point_info.id; // Let the nodes run for a while. Wait for validators to vote on slot `S` // so that the vote on `S-1` is definitely in gossip and optimistic confirmation is @@ -2141,7 +2233,11 @@ fn test_optimistic_confirmation_violation_detection() { let buf = std::env::var("OPTIMISTIC_CONF_TEST_DUMP_LOG") .err() .map(|_| BufferRedirect::stderr().unwrap()); - cluster.restart_node(&entry_point_id, exited_validator_info); + cluster.restart_node( + &entry_point_id, + exited_validator_info, + SocketAddrSpace::Unspecified, + ); // Wait for a root > prev_voted_slot to be set. Because the root is on a // different fork than `prev_voted_slot`, then optimistic confirmation is @@ -2163,11 +2259,21 @@ fn test_optimistic_confirmation_violation_detection() { OptimisticConfirmationVerifier::format_optimistic_confirmed_slot_violation_log( prev_voted_slot, ); + // Violation detection thread can be behind so poll logs up to 10 seconds if let Some(mut buf) = buf { + let start = Instant::now(); + let mut success = false; let mut output = String::new(); - buf.read_to_string(&mut output).unwrap(); - assert!(output.contains(&expected_log)); + while start.elapsed().as_secs() < 10 { + buf.read_to_string(&mut output).unwrap(); + if output.contains(&expected_log) { + success = true; + break; + } + sleep(Duration::from_millis(10)); + } print!("{}", output); + assert!(success); } else { panic!("dumped log and disabled testing"); } @@ -2199,7 +2305,7 @@ fn test_validator_saves_tower() { validator_keys: Some(vec![(validator_identity_keypair.clone(), true)]), ..ClusterConfig::default() }; - let mut cluster = LocalCluster::new(&mut config); + let mut cluster = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified); let validator_client = cluster.get_validator_client(&validator_id).unwrap(); @@ -2232,7 +2338,7 @@ fn test_validator_saves_tower() { assert_eq!(tower1.root(), 0); // Restart the validator and wait for a new root - cluster.restart_node(&validator_id, validator_info); + cluster.restart_node(&validator_id, validator_info, SocketAddrSpace::Unspecified); let validator_client = cluster.get_validator_client(&validator_id).unwrap(); // Wait for the first root @@ -2264,7 +2370,7 @@ fn test_validator_saves_tower() { // without having to wait for that snapshot to be generated in this test tower1.save(&validator_identity_keypair).unwrap(); - cluster.restart_node(&validator_id, validator_info); + cluster.restart_node(&validator_id, validator_info, SocketAddrSpace::Unspecified); let validator_client = cluster.get_validator_client(&validator_id).unwrap(); // Wait for a new root, demonstrating the validator was able to make progress from the older `tower1` @@ -2296,7 +2402,7 @@ fn test_validator_saves_tower() { remove_tower(&ledger_path, &validator_id); validator_info.config.require_tower = false; - cluster.restart_node(&validator_id, validator_info); + cluster.restart_node(&validator_id, validator_info, SocketAddrSpace::Unspecified); let validator_client = cluster.get_validator_client(&validator_id).unwrap(); // Wait for a couple more slots to pass so another vote occurs @@ -2334,7 +2440,7 @@ fn purge_slots(blockstore: &Blockstore, start_slot: Slot, slot_count: Slot) { } fn restore_tower(ledger_path: &Path, node_pubkey: &Pubkey) -> Option { - let tower = Tower::restore(&ledger_path, &node_pubkey); + let tower = Tower::restore(ledger_path, node_pubkey); if let Err(tower_err) = tower { if tower_err.is_file_missing() { return None; @@ -2343,7 +2449,7 @@ fn restore_tower(ledger_path: &Path, node_pubkey: &Pubkey) -> Option { } } // actually saved tower must have at least one vote. - Tower::restore(&ledger_path, &node_pubkey).ok() + Tower::restore(ledger_path, node_pubkey).ok() } fn last_vote_in_tower(ledger_path: &Path, node_pubkey: &Pubkey) -> Option<(Slot, Hash)> { @@ -2355,7 +2461,7 @@ fn root_in_tower(ledger_path: &Path, node_pubkey: &Pubkey) -> Option { } fn remove_tower(ledger_path: &Path, node_pubkey: &Pubkey) { - fs::remove_file(Tower::get_filename(&ledger_path, &node_pubkey)).unwrap(); + fs::remove_file(Tower::get_filename(ledger_path, node_pubkey)).unwrap(); } // A bit convoluted test case; but this roughly follows this test theoretical scenario: @@ -2439,7 +2545,7 @@ fn do_test_optimistic_confirmation_violation_with_or_without_tower(with_tower: b skip_warmup_slots: true, ..ClusterConfig::default() }; - let mut cluster = LocalCluster::new(&mut config); + let mut cluster = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified); let base_slot = 26; // S2 let next_slot_on_a = 27; // S3 @@ -2520,7 +2626,11 @@ fn do_test_optimistic_confirmation_violation_with_or_without_tower(with_tower: b // Run validator C only to make it produce and vote on its own fork. info!("Restart validator C again!!!"); let val_c_ledger_path = validator_c_info.info.ledger_path.clone(); - cluster.restart_node(&validator_c_pubkey, validator_c_info); + cluster.restart_node( + &validator_c_pubkey, + validator_c_info, + SocketAddrSpace::Unspecified, + ); let mut votes_on_c_fork = std::collections::BTreeSet::new(); // S4 and S5 for _ in 0..100 { @@ -2542,7 +2652,11 @@ fn do_test_optimistic_confirmation_violation_with_or_without_tower(with_tower: b // Step 4: // verify whether there was violation or not info!("Restart validator A again!!!"); - cluster.restart_node(&validator_a_pubkey, validator_a_info); + cluster.restart_node( + &validator_a_pubkey, + validator_a_info, + SocketAddrSpace::Unspecified, + ); // monitor for actual votes from validator A let mut bad_vote_detected = false; @@ -2629,7 +2743,7 @@ fn do_test_future_tower(cluster_mode: ClusterMode) { skip_warmup_slots: true, ..ClusterConfig::default() }; - let mut cluster = LocalCluster::new(&mut config); + let mut cluster = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified); let val_a_ledger_path = cluster.ledger_path(&validator_a_pubkey); @@ -2654,7 +2768,11 @@ fn do_test_future_tower(cluster_mode: ClusterMode) { purge_slots(&blockstore, purged_slot_before_restart, 100); } - cluster.restart_node(&validator_a_pubkey, validator_a_info); + cluster.restart_node( + &validator_a_pubkey, + validator_a_info, + SocketAddrSpace::Unspecified, + ); let mut newly_rooted = false; let some_root_after_restart = purged_slot_before_restart + 25; // 25 is arbitrary; just wait a bit @@ -2736,7 +2854,10 @@ fn test_hard_fork_invalidates_tower() { skip_warmup_slots: true, ..ClusterConfig::default() }; - let cluster = std::sync::Arc::new(std::sync::Mutex::new(LocalCluster::new(&mut config))); + let cluster = std::sync::Arc::new(std::sync::Mutex::new(LocalCluster::new( + &mut config, + SocketAddrSpace::Unspecified, + ))); let val_a_ledger_path = cluster.lock().unwrap().ledger_path(&validator_a_pubkey); @@ -2781,8 +2902,11 @@ fn test_hard_fork_invalidates_tower() { .lock() .unwrap() .create_restart_context(&validator_a_pubkey, &mut validator_a_info); - let restarted_validator_info = - LocalCluster::restart_node_with_context(validator_a_info, restart_context); + let restarted_validator_info = LocalCluster::restart_node_with_context( + validator_a_info, + restart_context, + SocketAddrSpace::Unspecified, + ); cluster_for_a .lock() .unwrap() @@ -2804,10 +2928,11 @@ fn test_hard_fork_invalidates_tower() { } // restart validator B normally - cluster - .lock() - .unwrap() - .restart_node(&validator_b_pubkey, validator_b_info); + cluster.lock().unwrap().restart_node( + &validator_b_pubkey, + validator_b_info, + SocketAddrSpace::Unspecified, + ); // validator A should now start so join its thread here thread.join().unwrap(); @@ -2816,7 +2941,7 @@ fn test_hard_fork_invalidates_tower() { cluster .lock() .unwrap() - .check_for_new_roots(16, &"hard fork"); + .check_for_new_roots(16, "hard fork", SocketAddrSpace::Unspecified); } #[test] @@ -2875,7 +3000,11 @@ fn run_test_load_program_accounts_partition(scan_commitment: CommitmentConfig) { let on_partition_before_resolved = |_: &mut LocalCluster, _: &mut ()| {}; let on_partition_resolved = |cluster: &mut LocalCluster, _: &mut ()| { - cluster.check_for_new_roots(20, &"run_test_load_program_accounts_partition"); + cluster.check_for_new_roots( + 20, + "run_test_load_program_accounts_partition", + SocketAddrSpace::Unspecified, + ); exit.store(true, Ordering::Relaxed); t_update.join().unwrap(); t_scan.join().unwrap(); @@ -3050,7 +3179,7 @@ fn run_test_load_program_accounts(scan_commitment: CommitmentConfig) { additional_accounts: starting_accounts, ..ClusterConfig::default() }; - let cluster = LocalCluster::new(&mut config); + let cluster = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified); // Give the threads a client to use for querying the cluster let all_pubkeys = cluster.get_node_pubkeys(); @@ -3066,7 +3195,11 @@ fn run_test_load_program_accounts(scan_commitment: CommitmentConfig) { scan_client_sender.send(scan_client).unwrap(); // Wait for some roots to pass - cluster.check_for_new_roots(40, &"run_test_load_program_accounts"); + cluster.check_for_new_roots( + 40, + "run_test_load_program_accounts", + SocketAddrSpace::Unspecified, + ); // Exit and ensure no violations of consistency were found exit.store(true, Ordering::Relaxed); diff --git a/log-analyzer/Cargo.toml b/log-analyzer/Cargo.toml index 8ce0a60982564f..3201aac181a50f 100644 --- a/log-analyzer/Cargo.toml +++ b/log-analyzer/Cargo.toml @@ -3,7 +3,7 @@ authors = ["Solana Maintainers "] edition = "2018" name = "solana-log-analyzer" description = "The solana cluster network analysis tool" -version = "1.7.0" +version = "1.7.11" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -14,9 +14,9 @@ byte-unit = "4.0.9" clap = "2.33.1" serde = "1.0.122" serde_json = "1.0.56" -solana-clap-utils = { path = "../clap-utils", version = "=1.7.0" } -solana-logger = { path = "../logger", version = "=1.7.0" } -solana-version = { path = "../version", version = "=1.7.0" } +solana-clap-utils = { path = "../clap-utils", version = "=1.7.11" } +solana-logger = { path = "../logger", version = "=1.7.11" } +solana-version = { path = "../version", version = "=1.7.11" } [[bin]] name = "solana-log-analyzer" diff --git a/logger/Cargo.toml b/logger/Cargo.toml index d5567d8c7de4cb..e7625987e3f56c 100644 --- a/logger/Cargo.toml +++ b/logger/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-logger" -version = "1.7.0" +version = "1.7.11" description = "Solana Logger" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" diff --git a/measure/Cargo.toml b/measure/Cargo.toml index ac26883fb140c0..88076cf1b77628 100644 --- a/measure/Cargo.toml +++ b/measure/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "solana-measure" description = "Blockchain, Rebuilt for Scale" -version = "1.7.0" +version = "1.7.11" homepage = "https://solana.com/" documentation = "https://docs.rs/solana-measure" readme = "../README.md" @@ -12,8 +12,8 @@ edition = "2018" [dependencies] log = "0.4.11" -solana-sdk = { path = "../sdk", version = "=1.7.0" } -solana-metrics = { path = "../metrics", version = "=1.7.0" } +solana-sdk = { path = "../sdk", version = "=1.7.11" } +solana-metrics = { path = "../metrics", version = "=1.7.11" } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/measure/src/measure.rs b/measure/src/measure.rs index 3b342605235093..26f32b097cd5e4 100644 --- a/measure/src/measure.rs +++ b/measure/src/measure.rs @@ -216,7 +216,7 @@ mod tests { { let some_struct = SomeStruct { x: 42 }; let (result, _measure) = Measure::this( - |(obj, x)| SomeStruct::add_to(&obj, x), + |(obj, x)| SomeStruct::add_to(obj, x), (&some_struct, 4), "test", ); diff --git a/merkle-root-bench/Cargo.toml b/merkle-root-bench/Cargo.toml index 5c471ba40155c7..c55f4a114ac3cb 100644 --- a/merkle-root-bench/Cargo.toml +++ b/merkle-root-bench/Cargo.toml @@ -2,7 +2,7 @@ authors = ["Solana Maintainers "] edition = "2018" name = "solana-merkle-root-bench" -version = "1.7.0" +version = "1.7.11" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -10,11 +10,11 @@ publish = false [dependencies] log = "0.4.11" -solana-logger = { path = "../logger", version = "=1.7.0" } -solana-runtime = { path = "../runtime", version = "=1.7.0" } -solana-measure = { path = "../measure", version = "=1.7.0" } -solana-sdk = { path = "../sdk", version = "=1.7.0" } -solana-version = { path = "../version", version = "=1.7.0" } +solana-logger = { path = "../logger", version = "=1.7.11" } +solana-runtime = { path = "../runtime", version = "=1.7.11" } +solana-measure = { path = "../measure", version = "=1.7.11" } +solana-sdk = { path = "../sdk", version = "=1.7.11" } +solana-version = { path = "../version", version = "=1.7.11" } clap = "2.33.1" [package.metadata.docs.rs] diff --git a/merkle-tree/Cargo.toml b/merkle-tree/Cargo.toml index afae0fded082bd..3dcecab123c587 100644 --- a/merkle-tree/Cargo.toml +++ b/merkle-tree/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-merkle-tree" -version = "1.7.0" +version = "1.7.11" description = "Solana Merkle Tree" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-merkle-tree" edition = "2018" [dependencies] -solana-program = { path = "../sdk/program", version = "=1.7.0" } +solana-program = { path = "../sdk/program", version = "=1.7.11" } fast-math = "0.1" # This can go once the BPF toolchain target Rust 1.42.0+ diff --git a/metrics/Cargo.toml b/metrics/Cargo.toml index 2904ffaf627f07..d65b89687fb631 100644 --- a/metrics/Cargo.toml +++ b/metrics/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-metrics" -version = "1.7.0" +version = "1.7.11" description = "Solana Metrics" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -15,7 +15,7 @@ gethostname = "0.2.1" lazy_static = "1.4.0" log = "0.4.11" reqwest = { version = "0.11.2", default-features = false, features = ["blocking", "rustls-tls", "json"] } -solana-sdk = { path = "../sdk", version = "=1.7.0" } +solana-sdk = { path = "../sdk", version = "=1.7.11" } [dev-dependencies] rand = "0.7.0" diff --git a/metrics/src/counter.rs b/metrics/src/counter.rs index 1bbc0dd778516e..29b7ecc526d2e9 100644 --- a/metrics/src/counter.rs +++ b/metrics/src/counter.rs @@ -222,7 +222,7 @@ mod tests { INIT_HOOK.call_once(|| { ENV_LOCK = Some(RwLock::new(())); }); - &ENV_LOCK.as_ref().unwrap() + ENV_LOCK.as_ref().unwrap() } } diff --git a/metrics/src/metrics.rs b/metrics/src/metrics.rs index 4484e8220bfa24..6d55b89e7e7a66 100644 --- a/metrics/src/metrics.rs +++ b/metrics/src/metrics.rs @@ -103,16 +103,23 @@ impl MetricsWriter for InfluxDbMetricsWriter { let client = reqwest::blocking::Client::builder() .timeout(Duration::from_secs(5)) - .build() - .unwrap(); + .build(); + let client = match client { + Ok(client) => client, + Err(err) => { + warn!("client instantiation failed: {}", err); + return; + } + }; + let response = client.post(write_url.as_str()).body(line).send(); if let Ok(resp) = response { - if !resp.status().is_success() { - warn!( - "submit response unsuccessful: {} {}", - resp.status(), - resp.text().unwrap() - ); + let status = resp.status(); + if !status.is_success() { + let text = resp + .text() + .unwrap_or_else(|_| "[text body empty]".to_string()); + warn!("submit response unsuccessful: {} {}", status, text,); } } else { warn!("submit error: {}", response.unwrap_err()); diff --git a/multinode-demo/bootstrap-validator.sh b/multinode-demo/bootstrap-validator.sh index d0046af995bd36..93c70af06977b2 100755 --- a/multinode-demo/bootstrap-validator.sh +++ b/multinode-demo/bootstrap-validator.sh @@ -20,6 +20,7 @@ else fi no_restart=0 +maybeRequireTower=true args=() while [[ -n $1 ]]; do @@ -75,6 +76,12 @@ while [[ -n $1 ]]; do elif [[ $1 == --maximum-snapshots-to-retain ]]; then args+=("$1" "$2") shift 2 + elif [[ $1 == --accounts-db-skip-shrink ]]; then + args+=("$1") + shift + elif [[ $1 == --skip-require-tower ]]; then + maybeRequireTower=false + shift else echo "Unknown argument: $1" $program --help @@ -99,8 +106,11 @@ ledger_dir="$SOLANA_CONFIG_DIR"/bootstrap-validator exit 1 } +if [[ $maybeRequireTower = true ]]; then + args+=(--require-tower) +fi + args+=( - --require-tower --ledger "$ledger_dir" --rpc-port 8899 --snapshot-interval-slots 200 diff --git a/multinode-demo/validator.sh b/multinode-demo/validator.sh index 224300954cbb40..bb596ee67cf430 100755 --- a/multinode-demo/validator.sh +++ b/multinode-demo/validator.sh @@ -45,6 +45,8 @@ EOF exit 1 } +maybeRequireTower=true + positional_args=() while [[ -n $1 ]]; do if [[ ${1:0:1} = - ]]; then @@ -140,10 +142,10 @@ while [[ -n $1 ]]; do elif [[ $1 = --log ]]; then args+=("$1" "$2") shift 2 - elif [[ $1 = --trusted-validator ]]; then + elif [[ $1 = --known-validator ]]; then args+=("$1" "$2") shift 2 - elif [[ $1 = --halt-on-trusted-validators-accounts-hash-mismatch ]]; then + elif [[ $1 = --halt-on-known-validators-accounts-hash-mismatch ]]; then args+=("$1") shift elif [[ $1 = --max-genesis-archive-unpacked-size ]]; then @@ -155,6 +157,12 @@ while [[ -n $1 ]]; do elif [[ $1 == --expected-bank-hash ]]; then args+=("$1" "$2") shift 2 + elif [[ $1 == --accounts-db-skip-shrink ]]; then + args+=("$1") + shift + elif [[ $1 == --skip-require-tower ]]; then + maybeRequireTower=false + shift elif [[ $1 = -h ]]; then usage "$@" else @@ -227,7 +235,10 @@ default_arg --identity "$identity" default_arg --vote-account "$vote_account" default_arg --ledger "$ledger_dir" default_arg --log - -default_arg --require-tower + +if [[ $maybeRequireTower = true ]]; then + default_arg --require-tower +fi if [[ -n $SOLANA_CUDA ]]; then program=$solana_validator_cuda diff --git a/net-shaper/Cargo.toml b/net-shaper/Cargo.toml index 9e0dea8a6163f5..15259e416e4e2f 100644 --- a/net-shaper/Cargo.toml +++ b/net-shaper/Cargo.toml @@ -3,7 +3,7 @@ authors = ["Solana Maintainers "] edition = "2018" name = "solana-net-shaper" description = "The solana cluster network shaping tool" -version = "1.7.0" +version = "1.7.11" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -13,8 +13,8 @@ publish = false clap = "2.33.1" serde = "1.0.122" serde_json = "1.0.56" -solana-clap-utils = { path = "../clap-utils", version = "=1.7.0" } -solana-logger = { path = "../logger", version = "=1.7.0" } +solana-clap-utils = { path = "../clap-utils", version = "=1.7.11" } +solana-logger = { path = "../logger", version = "=1.7.11" } rand = "0.7.0" [[bin]] diff --git a/net-utils/Cargo.toml b/net-utils/Cargo.toml index 5a13571098e6f7..0970c333598a3f 100644 --- a/net-utils/Cargo.toml +++ b/net-utils/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-net-utils" -version = "1.7.0" +version = "1.7.11" description = "Solana Network Utilities" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -18,9 +18,10 @@ rand = "0.7.0" serde = "1.0.122" serde_derive = "1.0.103" socket2 = "0.3.17" -solana-clap-utils = { path = "../clap-utils", version = "=1.7.0" } -solana-logger = { path = "../logger", version = "=1.7.0" } -solana-version = { path = "../version", version = "=1.7.0" } +solana-clap-utils = { path = "../clap-utils", version = "=1.7.11" } +solana-logger = { path = "../logger", version = "=1.7.11" } +solana-sdk = { path = "../sdk", version = "=1.7.11" } +solana-version = { path = "../version", version = "=1.7.11" } tokio = { version = "1", features = ["full"] } url = "2.1.1" diff --git a/net-utils/src/bin/ip_address_server.rs b/net-utils/src/bin/ip_address_server.rs index d1b2e60d322ae2..dbf55f8edb63a2 100644 --- a/net-utils/src/bin/ip_address_server.rs +++ b/net-utils/src/bin/ip_address_server.rs @@ -19,7 +19,7 @@ fn main() { .unwrap_or_else(|_| panic!("Unable to parse {}", port)); let bind_addr = SocketAddr::from(([0, 0, 0, 0], port)); let tcp_listener = TcpListener::bind(bind_addr).expect("unable to start tcp listener"); - let _runtime = solana_net_utils::ip_echo_server(tcp_listener); + let _runtime = solana_net_utils::ip_echo_server(tcp_listener, /*shred_version=*/ None); loop { std::thread::park(); } diff --git a/net-utils/src/ip_echo_server.rs b/net-utils/src/ip_echo_server.rs index df46be35599263..31f0b704ddc718 100644 --- a/net-utils/src/ip_echo_server.rs +++ b/net-utils/src/ip_echo_server.rs @@ -1,8 +1,13 @@ use { - crate::{ip_echo_server_reply_length, HEADER_LENGTH}, + crate::{HEADER_LENGTH, IP_ECHO_SERVER_RESPONSE_LENGTH}, log::*, serde_derive::{Deserialize, Serialize}, - std::{io, net::SocketAddr, time::Duration}, + solana_sdk::deserialize_utils::default_on_eof, + std::{ + io, + net::{IpAddr, SocketAddr}, + time::Duration, + }, tokio::{ io::{AsyncReadExt, AsyncWriteExt}, net::{TcpListener, TcpStream}, @@ -23,6 +28,15 @@ pub(crate) struct IpEchoServerMessage { udp_ports: [u16; MAX_PORT_COUNT_PER_MESSAGE], // Fixed size list of ports to avoid vec serde } +#[derive(Debug, PartialEq, Serialize, Deserialize)] +pub struct IpEchoServerResponse { + // Public IP address of request echoed back to the node. + pub(crate) address: IpAddr, + // Cluster shred-version of the node running the server. + #[serde(deserialize_with = "default_on_eof")] + pub(crate) shred_version: Option, +} + impl IpEchoServerMessage { pub fn new(tcp_ports: &[u16], udp_ports: &[u16]) -> Self { let mut msg = Self::default(); @@ -42,7 +56,11 @@ pub(crate) fn ip_echo_server_request_length() -> usize { + REQUEST_TERMINUS_LENGTH } -async fn process_connection(mut socket: TcpStream, peer_addr: SocketAddr) -> io::Result<()> { +async fn process_connection( + mut socket: TcpStream, + peer_addr: SocketAddr, + shred_version: Option, +) -> io::Result<()> { info!("connection from {:?}", peer_addr); let mut data = vec![0u8; ip_echo_server_request_length()]; @@ -113,16 +131,19 @@ async fn process_connection(mut socket: TcpStream, peer_addr: SocketAddr) -> io: let _ = tcp_stream.shutdown(); } } - + let response = IpEchoServerResponse { + address: peer_addr.ip(), + shred_version, + }; // "\0\0\0\0" header is added to ensure a valid response will never // conflict with the first four bytes of a valid HTTP response. - let mut bytes = vec![0u8; ip_echo_server_reply_length()]; - bincode::serialize_into(&mut bytes[HEADER_LENGTH..], &peer_addr.ip()).unwrap(); + let mut bytes = vec![0u8; IP_ECHO_SERVER_RESPONSE_LENGTH]; + bincode::serialize_into(&mut bytes[HEADER_LENGTH..], &response).unwrap(); trace!("response: {:?}", bytes); writer.write_all(&bytes).await } -async fn run_echo_server(tcp_listener: std::net::TcpListener) { +async fn run_echo_server(tcp_listener: std::net::TcpListener, shred_version: Option) { info!("bound to {:?}", tcp_listener.local_addr().unwrap()); let tcp_listener = TcpListener::from_std(tcp_listener).expect("Failed to convert std::TcpListener"); @@ -131,7 +152,7 @@ async fn run_echo_server(tcp_listener: std::net::TcpListener) { match tcp_listener.accept().await { Ok((socket, peer_addr)) => { runtime::Handle::current().spawn(async move { - if let Err(err) = process_connection(socket, peer_addr).await { + if let Err(err) = process_connection(socket, peer_addr, shred_version).await { info!("session failed: {:?}", err); } }); @@ -143,10 +164,14 @@ async fn run_echo_server(tcp_listener: std::net::TcpListener) { /// Starts a simple TCP server on the given port that echos the IP address of any peer that /// connects. Used by |get_public_ip_addr| -pub fn ip_echo_server(tcp_listener: std::net::TcpListener) -> IpEchoServer { +pub fn ip_echo_server( + tcp_listener: std::net::TcpListener, + // Cluster shred-version of the node running the server. + shred_version: Option, +) -> IpEchoServer { tcp_listener.set_nonblocking(true).unwrap(); let runtime = Runtime::new().expect("Failed to create Runtime"); - runtime.spawn(run_echo_server(tcp_listener)); + runtime.spawn(run_echo_server(tcp_listener, shred_version)); runtime } diff --git a/net-utils/src/lib.rs b/net-utils/src/lib.rs index 6c66f3d7fd2488..55d702baeb33b9 100644 --- a/net-utils/src/lib.rs +++ b/net-utils/src/lib.rs @@ -15,8 +15,8 @@ use { }; mod ip_echo_server; -use ip_echo_server::IpEchoServerMessage; pub use ip_echo_server::{ip_echo_server, IpEchoServer, MAX_PORT_COUNT_PER_MESSAGE}; +use ip_echo_server::{IpEchoServerMessage, IpEchoServerResponse}; /// A data type representing a public Udp socket pub struct UdpSocketPair { @@ -28,17 +28,12 @@ pub struct UdpSocketPair { pub type PortRange = (u16, u16); pub(crate) const HEADER_LENGTH: usize = 4; -pub(crate) fn ip_echo_server_reply_length() -> usize { - let largest_ip_addr = IpAddr::from([0u16; 8]); // IPv6 variant - HEADER_LENGTH + bincode::serialized_size(&largest_ip_addr).unwrap() as usize -} +pub(crate) const IP_ECHO_SERVER_RESPONSE_LENGTH: usize = HEADER_LENGTH + 23; fn ip_echo_server_request( ip_echo_server_addr: &SocketAddr, msg: IpEchoServerMessage, -) -> Result { - let mut data = vec![0u8; ip_echo_server_reply_length()]; - +) -> Result { let timeout = Duration::new(5, 0); TcpStream::connect_timeout(ip_echo_server_addr, timeout) .and_then(|mut stream| { @@ -54,9 +49,11 @@ fn ip_echo_server_request( stream.set_read_timeout(Some(Duration::new(10, 0)))?; stream.write_all(&bytes)?; stream.shutdown(std::net::Shutdown::Write)?; - stream.read(data.as_mut_slice()) + let mut data = vec![0u8; IP_ECHO_SERVER_RESPONSE_LENGTH]; + let _ = stream.read(&mut data[..])?; + Ok(data) }) - .and_then(|_| { + .and_then(|data| { // It's common for users to accidentally confuse the validator's gossip port and JSON // RPC port. Attempt to detect when this occurs by looking for the standard HTTP // response header and provide the user with a helpful error message @@ -102,7 +99,14 @@ fn ip_echo_server_request( /// Determine the public IP address of this machine by asking an ip_echo_server at the given /// address pub fn get_public_ip_addr(ip_echo_server_addr: &SocketAddr) -> Result { - ip_echo_server_request(ip_echo_server_addr, IpEchoServerMessage::default()) + let resp = ip_echo_server_request(ip_echo_server_addr, IpEchoServerMessage::default())?; + Ok(resp.address) +} + +pub fn get_cluster_shred_version(ip_echo_server_addr: &SocketAddr) -> Result { + let resp = ip_echo_server_request(ip_echo_server_addr, IpEchoServerMessage::default())?; + resp.shred_version + .ok_or_else(|| String::from("IP echo server does not return a shred-version")) } // Checks if any of the provided TCP/UDP ports are not reachable by the machine at @@ -524,6 +528,57 @@ mod tests { use super::*; use std::net::Ipv4Addr; + #[test] + fn test_response_length() { + let resp = IpEchoServerResponse { + address: IpAddr::from([u16::MAX; 8]), // IPv6 variant + shred_version: Some(u16::MAX), + }; + let resp_size = bincode::serialized_size(&resp).unwrap(); + assert_eq!( + IP_ECHO_SERVER_RESPONSE_LENGTH, + HEADER_LENGTH + resp_size as usize + ); + } + + // Asserts that an old client can parse the response from a new server. + #[test] + fn test_backward_compat() { + let address = IpAddr::from([ + 525u16, 524u16, 523u16, 522u16, 521u16, 520u16, 519u16, 518u16, + ]); + let response = IpEchoServerResponse { + address, + shred_version: Some(42), + }; + let mut data = vec![0u8; IP_ECHO_SERVER_RESPONSE_LENGTH]; + bincode::serialize_into(&mut data[HEADER_LENGTH..], &response).unwrap(); + data.truncate(HEADER_LENGTH + 20); + assert_eq!( + bincode::deserialize::(&data[HEADER_LENGTH..]).unwrap(), + address + ); + } + + // Asserts that a new client can parse the response from an old server. + #[test] + fn test_forward_compat() { + let address = IpAddr::from([ + 525u16, 524u16, 523u16, 522u16, 521u16, 520u16, 519u16, 518u16, + ]); + let mut data = vec![0u8; IP_ECHO_SERVER_RESPONSE_LENGTH]; + bincode::serialize_into(&mut data[HEADER_LENGTH..], &address).unwrap(); + let response: Result = + bincode::deserialize(&data[HEADER_LENGTH..]); + assert_eq!( + response.unwrap(), + IpEchoServerResponse { + address, + shred_version: None, + } + ); + } + #[test] fn test_parse_port_or_addr() { let p1 = parse_port_or_addr(Some("9000"), SocketAddr::from(([1, 2, 3, 4], 1))); @@ -624,14 +679,14 @@ mod tests { let (_server_port, (server_udp_socket, server_tcp_listener)) = bind_common_in_range(ip_addr, (3200, 3250)).unwrap(); - let _runtime = ip_echo_server(server_tcp_listener); + let _runtime = ip_echo_server(server_tcp_listener, /*shred_version=*/ Some(42)); let server_ip_echo_addr = server_udp_socket.local_addr().unwrap(); assert_eq!( get_public_ip_addr(&server_ip_echo_addr), parse_host("127.0.0.1"), ); - + assert_eq!(get_cluster_shred_version(&server_ip_echo_addr), Ok(42)); assert!(verify_reachable_ports(&server_ip_echo_addr, vec![], &[],)); } @@ -644,14 +699,14 @@ mod tests { let (client_port, (client_udp_socket, client_tcp_listener)) = bind_common_in_range(ip_addr, (3200, 3250)).unwrap(); - let _runtime = ip_echo_server(server_tcp_listener); + let _runtime = ip_echo_server(server_tcp_listener, /*shred_version=*/ Some(65535)); let ip_echo_server_addr = server_udp_socket.local_addr().unwrap(); assert_eq!( get_public_ip_addr(&ip_echo_server_addr), parse_host("127.0.0.1"), ); - + assert_eq!(get_cluster_shred_version(&ip_echo_server_addr), Ok(65535)); assert!(verify_reachable_ports( &ip_echo_server_addr, vec![(client_port, client_tcp_listener)], diff --git a/net/net.sh b/net/net.sh index 3a25c81f261551..873c81b023da37 100755 --- a/net/net.sh +++ b/net/net.sh @@ -102,7 +102,10 @@ Operate a configured testnet --cluster-type development|devnet|testnet|mainnet-beta - Specify whether or not to launch the cluster in "development" mode with all features enabled at epoch 0, or various other live clusters' feature set (default: development) - --warp-slot WARP_SLOT - Boot from a snapshot that has warped ahead to WARP_SLOT rather than a slot 0 genesis. + --slots-per-epoch SLOTS + - Override the number of slots in an epoch + --warp-slot WARP_SLOT + - Boot from a snapshot that has warped ahead to WARP_SLOT rather than a slot 0 genesis. sanity/start-specific options: -F - Discard validator nodes that didn't bootup successfully -o noInstallCheck - Skip solana-install sanity @@ -306,7 +309,7 @@ startBootstrapLeader() { ${#clientIpList[@]} \"$benchTpsExtraArgs\" \ ${#clientIpList[@]} \"$benchExchangeExtraArgs\" \ \"$genesisOptions\" \ - \"$maybeNoSnapshot $maybeSkipLedgerVerify $maybeLimitLedgerSize $maybeWaitForSupermajority\" \ + \"$maybeNoSnapshot $maybeSkipLedgerVerify $maybeLimitLedgerSize $maybeWaitForSupermajority $maybeAccountsDbSkipShrink $maybeSkipRequireTower\" \ \"$gpuMode\" \ \"$maybeWarpSlot\" \ \"$waitForNodeInit\" \ @@ -378,7 +381,7 @@ startNode() { ${#clientIpList[@]} \"$benchTpsExtraArgs\" \ ${#clientIpList[@]} \"$benchExchangeExtraArgs\" \ \"$genesisOptions\" \ - \"$maybeNoSnapshot $maybeSkipLedgerVerify $maybeLimitLedgerSize $maybeWaitForSupermajority\" \ + \"$maybeNoSnapshot $maybeSkipLedgerVerify $maybeLimitLedgerSize $maybeWaitForSupermajority $maybeAccountsDbSkipShrink $maybeSkipRequireTower\" \ \"$gpuMode\" \ \"$maybeWarpSlot\" \ \"$waitForNodeInit\" \ @@ -779,6 +782,8 @@ maybeLimitLedgerSize="" maybeSkipLedgerVerify="" maybeDisableAirdrops="" maybeWaitForSupermajority="" +maybeAccountsDbSkipShrink="" +maybeSkipRequireTower="" debugBuild=false doBuild=true gpuMode=auto @@ -822,6 +827,9 @@ while [[ -n $1 ]]; do esac genesisOptions="$genesisOptions $1 $2" shift 2 + elif [[ $1 = --slots-per-epoch ]]; then + genesisOptions="$genesisOptions $1 $2" + shift 2 elif [[ $1 = --no-snapshot-fetch ]]; then maybeNoSnapshot="$1" shift 1 @@ -900,6 +908,12 @@ while [[ -n $1 ]]; do elif [[ $1 == --extra-primordial-stakes ]]; then extraPrimordialStakes=$2 shift 2 + elif [[ $1 = --accounts-db-skip-shrink ]]; then + maybeAccountsDbSkipShrink="$1" + shift 1 + elif [[ $1 = --skip-require-tower ]]; then + maybeSkipRequireTower="$1" + shift 1 else usage "Unknown long option: $1" fi diff --git a/notifier/Cargo.toml b/notifier/Cargo.toml index ca81c7de720303..247202af76823e 100644 --- a/notifier/Cargo.toml +++ b/notifier/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-notifier" -version = "1.7.0" +version = "1.7.11" description = "Solana Notifier" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" diff --git a/perf/Cargo.toml b/perf/Cargo.toml index 8ab4a157fd224f..9d57b5e2b7b3be 100644 --- a/perf/Cargo.toml +++ b/perf/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-perf" -version = "1.7.0" +version = "1.7.11" description = "Solana Performance APIs" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,20 +10,19 @@ documentation = "https://docs.rs/solana-perf" edition = "2018" [dependencies] -rand = "0.7.0" -dlopen = "0.1.8" bincode = "1.3.1" -rayon = "1.5.0" -serde = "1.0.122" +curve25519-dalek = { version = "2" } +dlopen = "0.1.8" dlopen_derive = "0.1.4" lazy_static = "1.4.0" log = "0.4.11" -solana-sdk = { path = "../sdk", version = "=1.7.0" } -solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.7.0" } -solana-budget-program = { path = "../programs/budget", version = "=1.7.0" } -solana-logger = { path = "../logger", version = "=1.7.0" } -solana-metrics = { path = "../metrics", version = "=1.7.0" } -curve25519-dalek = { version = "2" } +rand = "0.7.0" +rayon = "1.5.0" +serde = "1.0.126" +solana-logger = { path = "../logger", version = "=1.7.11" } +solana-metrics = { path = "../metrics", version = "=1.7.11" } +solana-sdk = { path = "../sdk", version = "=1.7.11" } +solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.7.11" } [lib] name = "solana_perf" diff --git a/perf/src/cuda_runtime.rs b/perf/src/cuda_runtime.rs index 3b5b054c1aadec..eb0bf6a79f9f09 100644 --- a/perf/src/cuda_runtime.rs +++ b/perf/src/cuda_runtime.rs @@ -146,6 +146,10 @@ impl<'a, T: Clone + Send + Sync + Default + Sized> IntoParallelIterator for &'a } impl PinnedVec { + pub fn reserve(&mut self, size: usize) { + self.x.reserve(size); + } + pub fn reserve_and_pin(&mut self, size: usize) { if self.x.capacity() < size { if self.pinned { diff --git a/perf/src/packet.rs b/perf/src/packet.rs index 946124e7fe0efb..e73a5ad6b7af25 100644 --- a/perf/src/packet.rs +++ b/perf/src/packet.rs @@ -28,11 +28,22 @@ impl Packets { Packets { packets } } + pub fn new_unpinned_with_recycler( + recycler: PacketsRecycler, + size: usize, + name: &'static str, + ) -> Self { + let mut packets = recycler.allocate(name); + packets.reserve(size); + Packets { packets } + } + pub fn new_with_recycler(recycler: PacketsRecycler, size: usize, name: &'static str) -> Self { let mut packets = recycler.allocate(name); packets.reserve_and_pin(size); Packets { packets } } + pub fn new_with_recycler_data( recycler: &PacketsRecycler, name: &'static str, @@ -43,9 +54,19 @@ impl Packets { vec } + pub fn new_unpinned_with_recycler_data( + recycler: &PacketsRecycler, + name: &'static str, + mut packets: Vec, + ) -> Self { + let mut vec = Self::new_unpinned_with_recycler(recycler.clone(), packets.len(), name); + vec.packets.append(&mut packets); + vec + } + pub fn set_addr(&mut self, addr: &SocketAddr) { for m in self.packets.iter_mut() { - m.meta.set_addr(&addr); + m.meta.set_addr(addr); } } @@ -76,7 +97,7 @@ pub fn to_packets_with_destination( recycler: PacketsRecycler, dests_and_data: &[(SocketAddr, T)], ) -> Packets { - let mut out = Packets::new_with_recycler( + let mut out = Packets::new_unpinned_with_recycler( recycler, dests_and_data.len(), "to_packets_with_destination", diff --git a/perf/src/sigverify.rs b/perf/src/sigverify.rs index a9651071920424..5b6c591482a9cc 100644 --- a/perf/src/sigverify.rs +++ b/perf/src/sigverify.rs @@ -118,10 +118,8 @@ fn verify_packet(packet: &mut Packet) { let pubkey_end = pubkey_start.saturating_add(size_of::()); let sig_end = sig_start.saturating_add(size_of::()); - if pubkey_end >= packet.meta.size || sig_end >= packet.meta.size { - packet.meta.discard = true; - return; - } + // get_packet_offsets should ensure pubkey_end and sig_end do + // not overflow packet.meta.size let signature = Signature::new(&packet.data[sig_start..sig_end]); @@ -217,6 +215,10 @@ fn do_get_packet_offsets( .filter(|v| *v <= packet.meta.size) .ok_or(PacketError::InvalidPubkeyLen)?; + if pubkey_len < sig_len_untrusted { + return Err(PacketError::InvalidPubkeyLen); + } + let sig_start = current_offset .checked_add(sig_size) .ok_or(PacketError::InvalidLen)?; @@ -519,11 +521,11 @@ mod tests { let packet_offsets = sigverify::get_packet_offsets(&packet, 0); assert_eq!( - memfind(&tx_bytes, &tx.signatures[0].as_ref()), + memfind(&tx_bytes, tx.signatures[0].as_ref()), Some(SIG_OFFSET) ); assert_eq!( - memfind(&tx_bytes, &tx.message().account_keys[0].as_ref()), + memfind(&tx_bytes, tx.message().account_keys[0].as_ref()), Some(packet_offsets.pubkey_start as usize) ); assert_eq!( @@ -531,7 +533,7 @@ mod tests { Some(packet_offsets.msg_start as usize) ); assert_eq!( - memfind(&tx_bytes, &tx.signatures[0].as_ref()), + memfind(&tx_bytes, tx.signatures[0].as_ref()), Some(packet_offsets.sig_start as usize) ); assert_eq!(packet_offsets.sig_len, 1); @@ -597,6 +599,65 @@ mod tests { assert_eq!(res, Err(PacketError::InvalidLen)); } + #[test] + fn test_pubkey_too_small() { + solana_logger::setup(); + let mut tx = test_tx(); + let sig = tx.signatures[0]; + const NUM_SIG: usize = 18; + tx.signatures = vec![sig; NUM_SIG]; + tx.message.account_keys = vec![]; + tx.message.header.num_required_signatures = NUM_SIG as u8; + let mut packet = sigverify::make_packet_from_transaction(tx); + + let res = sigverify::do_get_packet_offsets(&packet, 0); + assert_eq!(res, Err(PacketError::InvalidPubkeyLen)); + + verify_packet(&mut packet); + assert!(packet.meta.discard); + + packet.meta.discard = false; + let mut batches = generate_packet_vec(&packet, 1, 1); + ed25519_verify(&mut batches); + assert!(batches[0].packets[0].meta.discard); + } + + #[test] + fn test_pubkey_len() { + // See that the verify cannot walk off the end of the packet + // trying to index into the account_keys to access pubkey. + use solana_sdk::signer::{keypair::Keypair, Signer}; + solana_logger::setup(); + + const NUM_SIG: usize = 17; + let keypair1 = Keypair::new(); + let pubkey1 = keypair1.pubkey(); + let mut message = Message::new(&[], Some(&pubkey1)); + message.account_keys.push(pubkey1); + message.account_keys.push(pubkey1); + message.header.num_required_signatures = NUM_SIG as u8; + message.recent_blockhash = Hash(pubkey1.to_bytes()); + let mut tx = Transaction::new_unsigned(message); + + info!("message: {:?}", tx.message_data()); + info!("tx: {:?}", tx); + let sig = keypair1.try_sign_message(&tx.message_data()).unwrap(); + tx.signatures = vec![sig; NUM_SIG]; + + let mut packet = sigverify::make_packet_from_transaction(tx); + + let res = sigverify::do_get_packet_offsets(&packet, 0); + assert_eq!(res, Err(PacketError::InvalidPubkeyLen)); + + verify_packet(&mut packet); + assert!(packet.meta.discard); + + packet.meta.discard = false; + let mut batches = generate_packet_vec(&packet, 1, 1); + ed25519_verify(&mut batches); + assert!(batches[0].packets[0].meta.discard); + } + #[test] fn test_large_sig_len() { let tx = test_tx(); @@ -667,7 +728,7 @@ mod tests { let tx_bytes = serialize(&tx0).unwrap(); assert!(tx_bytes.len() <= PACKET_DATA_SIZE); assert_eq!( - memfind(&tx_bytes, &tx0.signatures[0].as_ref()), + memfind(&tx_bytes, tx0.signatures[0].as_ref()), Some(SIG_OFFSET) ); let tx1 = deserialize(&tx_bytes).unwrap(); @@ -748,10 +809,8 @@ mod tests { let mut batches = generate_packet_vec(&packet, n, 2); - let recycler = Recycler::default(); - let recycler_out = Recycler::default(); // verify packets - sigverify::ed25519_verify(&mut batches, &recycler, &recycler_out); + ed25519_verify(&mut batches); // check result let should_discard = modify_data; @@ -761,6 +820,12 @@ mod tests { .all(|p| p.meta.discard == should_discard)); } + fn ed25519_verify(batches: &mut [Packets]) { + let recycler = Recycler::default(); + let recycler_out = Recycler::default(); + sigverify::ed25519_verify(batches, &recycler, &recycler_out); + } + #[test] fn test_verify_tampered_sig_len() { let mut tx = test_tx(); @@ -770,10 +835,8 @@ mod tests { let mut batches = generate_packet_vec(&packet, 1, 1); - let recycler = Recycler::default(); - let recycler_out = Recycler::default(); // verify packets - sigverify::ed25519_verify(&mut batches, &recycler, &recycler_out); + ed25519_verify(&mut batches); assert!(batches .iter() .flat_map(|p| &p.packets) @@ -810,10 +873,8 @@ mod tests { batches[0].packets.push(packet); - let recycler = Recycler::default(); - let recycler_out = Recycler::default(); // verify packets - sigverify::ed25519_verify(&mut batches, &recycler, &recycler_out); + ed25519_verify(&mut batches); // check result let ref_ans = 1u8; diff --git a/perf/src/test_tx.rs b/perf/src/test_tx.rs index 6664f684f9b335..a6845317183e1d 100644 --- a/perf/src/test_tx.rs +++ b/perf/src/test_tx.rs @@ -1,10 +1,12 @@ -use solana_sdk::hash::Hash; -use solana_sdk::instruction::CompiledInstruction; -use solana_sdk::signature::{Keypair, Signer}; -use solana_sdk::system_instruction::SystemInstruction; -use solana_sdk::system_program; -use solana_sdk::system_transaction; -use solana_sdk::transaction::Transaction; +use solana_sdk::{ + hash::Hash, + instruction::CompiledInstruction, + signature::{Keypair, Signer}, + stake, + system_instruction::SystemInstruction, + system_program, system_transaction, + transaction::Transaction, +}; pub fn test_tx() -> Transaction { let keypair1 = Keypair::new(); @@ -22,7 +24,7 @@ pub fn test_multisig_tx() -> Transaction { let transfer_instruction = SystemInstruction::Transfer { lamports }; - let program_ids = vec![system_program::id(), solana_budget_program::id()]; + let program_ids = vec![system_program::id(), stake::program::id()]; let instructions = vec![CompiledInstruction::new( 0, diff --git a/poh-bench/Cargo.toml b/poh-bench/Cargo.toml index 02db69035d6de1..2807a6359fbae7 100644 --- a/poh-bench/Cargo.toml +++ b/poh-bench/Cargo.toml @@ -2,7 +2,7 @@ authors = ["Solana Maintainers "] edition = "2018" name = "solana-poh-bench" -version = "1.7.0" +version = "1.7.11" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -13,13 +13,13 @@ clap = "2.33.1" log = "0.4.11" rand = "0.7.0" rayon = "1.5.0" -solana-logger = { path = "../logger", version = "=1.7.0" } -solana-ledger = { path = "../ledger", version = "=1.7.0" } -solana-sdk = { path = "../sdk", version = "=1.7.0" } -solana-clap-utils = { path = "../clap-utils", version = "=1.7.0" } -solana-measure = { path = "../measure", version = "=1.7.0" } -solana-version = { path = "../version", version = "=1.7.0" } -solana-perf = { path = "../perf", version = "=1.7.0" } +solana-logger = { path = "../logger", version = "=1.7.11" } +solana-ledger = { path = "../ledger", version = "=1.7.11" } +solana-sdk = { path = "../sdk", version = "=1.7.11" } +solana-clap-utils = { path = "../clap-utils", version = "=1.7.11" } +solana-measure = { path = "../measure", version = "=1.7.11" } +solana-version = { path = "../version", version = "=1.7.11" } +solana-perf = { path = "../perf", version = "=1.7.11" } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/poh-bench/src/main.rs b/poh-bench/src/main.rs index 98e83f4f0fd165..d742d816e2d915 100644 --- a/poh-bench/src/main.rs +++ b/poh-bench/src/main.rs @@ -1,6 +1,9 @@ #![allow(clippy::integer_arithmetic)] use clap::{crate_description, crate_name, value_t, App, Arg}; +#[cfg(any(target_arch = "x86", target_arch = "x86_64"))] use solana_ledger::entry::{self, create_ticks, init_poh, EntrySlice, VerifyRecyclers}; +#[cfg(not(any(target_arch = "x86", target_arch = "x86_64")))] +use solana_ledger::entry::{create_ticks, init_poh, EntrySlice, VerifyRecyclers}; use solana_measure::measure::Measure; use solana_perf::perf_libs; use solana_sdk::hash::hash; @@ -84,34 +87,40 @@ fn main() { time.as_us() / iterations as u64 ); - if is_x86_feature_detected!("avx2") && entry::api().is_some() { - let mut time = Measure::start("time"); - for _ in 0..iterations { - assert!(ticks[..num_entries] - .verify_cpu_x86_simd(&start_hash, 8) - .finish_verify()); + // A target_arch check is required here since calling + // is_x86_feature_detected from a non-x86_64 arch results in a build + // error. + #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] + { + if is_x86_feature_detected!("avx2") && entry::api().is_some() { + let mut time = Measure::start("time"); + for _ in 0..iterations { + assert!(ticks[..num_entries] + .verify_cpu_x86_simd(&start_hash, 8) + .finish_verify()); + } + time.stop(); + println!( + "{},cpu_simd_avx2,{}", + num_entries, + time.as_us() / iterations as u64 + ); } - time.stop(); - println!( - "{},cpu_simd_avx2,{}", - num_entries, - time.as_us() / iterations as u64 - ); - } - if is_x86_feature_detected!("avx512f") && entry::api().is_some() { - let mut time = Measure::start("time"); - for _ in 0..iterations { - assert!(ticks[..num_entries] - .verify_cpu_x86_simd(&start_hash, 16) - .finish_verify()); + if is_x86_feature_detected!("avx512f") && entry::api().is_some() { + let mut time = Measure::start("time"); + for _ in 0..iterations { + assert!(ticks[..num_entries] + .verify_cpu_x86_simd(&start_hash, 16) + .finish_verify()); + } + time.stop(); + println!( + "{},cpu_simd_avx512,{}", + num_entries, + time.as_us() / iterations as u64 + ); } - time.stop(); - println!( - "{},cpu_simd_avx512,{}", - num_entries, - time.as_us() / iterations as u64 - ); } if perf_libs::api().is_some() { diff --git a/stake-monitor/.gitignore b/poh/.gitignore similarity index 100% rename from stake-monitor/.gitignore rename to poh/.gitignore diff --git a/poh/Cargo.toml b/poh/Cargo.toml new file mode 100644 index 00000000000000..c4865464bd5c90 --- /dev/null +++ b/poh/Cargo.toml @@ -0,0 +1,39 @@ +[package] +name = "solana-poh" +version = "1.7.11" +description = "Solana PoH" +authors = ["Solana Maintainers "] +repository = "https://github.com/solana-labs/solana" +license = "Apache-2.0" +homepage = "https://solana.com/" +documentation = "https://docs.rs/solana-poh" +edition = "2018" + +[dependencies] +core_affinity = "0.5.10" +crossbeam-channel = "0.4" +log = "0.4.11" +solana-ledger = { path = "../ledger", version = "=1.7.11" } +solana-measure = { path = "../measure", version = "=1.7.11" } +solana-metrics = { path = "../metrics", version = "=1.7.11" } +solana-runtime = { path = "../runtime", version = "=1.7.11" } +solana-sdk = { path = "../sdk", version = "=1.7.11" } +solana-sys-tuner = { path = "../sys-tuner", version = "=1.7.11" } +thiserror = "1.0" + +[dev-dependencies] +bincode = "1.3.1" +matches = "0.1.6" +rand = "0.7.0" +solana-logger = { path = "../logger", version = "=1.7.11" } +solana-perf = { path = "../perf", version = "=1.7.11" } + +[lib] +crate-type = ["lib"] +name = "solana_poh" + +[[bench]] +name = "poh" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/core/benches/poh.rs b/poh/benches/poh.rs similarity index 88% rename from core/benches/poh.rs rename to poh/benches/poh.rs index f7a7244a97b67e..9996544022c9d3 100644 --- a/core/benches/poh.rs +++ b/poh/benches/poh.rs @@ -3,12 +3,16 @@ #![feature(test)] extern crate test; -use solana_core::poh_service::DEFAULT_HASHES_PER_BATCH; -use solana_ledger::poh::Poh; -use solana_sdk::hash::Hash; -use std::sync::atomic::{AtomicBool, Ordering}; -use std::sync::{Arc, Mutex}; -use test::Bencher; +use { + solana_ledger::poh::Poh, + solana_poh::poh_service::DEFAULT_HASHES_PER_BATCH, + solana_sdk::hash::Hash, + std::sync::{ + atomic::{AtomicBool, Ordering}, + Arc, Mutex, + }, + test::Bencher, +}; const NUM_HASHES: u64 = 30_000; // Should require ~10ms on a 2017 MacBook Pro diff --git a/core/benches/poh_verify.rs b/poh/benches/poh_verify.rs similarity index 78% rename from core/benches/poh_verify.rs rename to poh/benches/poh_verify.rs index 9cbf551f496e78..b0ae0b7aaecfc6 100644 --- a/core/benches/poh_verify.rs +++ b/poh/benches/poh_verify.rs @@ -1,11 +1,15 @@ #![feature(test)] extern crate test; -use solana_ledger::entry::{next_entry_mut, Entry, EntrySlice}; -use solana_sdk::hash::{hash, Hash}; -use solana_sdk::signature::{Keypair, Signer}; -use solana_sdk::system_transaction; -use test::Bencher; +use { + solana_ledger::entry::{next_entry_mut, Entry, EntrySlice}, + solana_sdk::{ + hash::{hash, Hash}, + signature::{Keypair, Signer}, + system_transaction, + }, + test::Bencher, +}; const NUM_HASHES: u64 = 400; const NUM_ENTRIES: usize = 800; @@ -14,7 +18,7 @@ const NUM_ENTRIES: usize = 800; fn bench_poh_verify_ticks(bencher: &mut Bencher) { solana_logger::setup(); let zero = Hash::default(); - let start_hash = hash(&zero.as_ref()); + let start_hash = hash(zero.as_ref()); let mut cur_hash = start_hash; let mut ticks: Vec = Vec::with_capacity(NUM_ENTRIES); @@ -30,7 +34,7 @@ fn bench_poh_verify_ticks(bencher: &mut Bencher) { #[bench] fn bench_poh_verify_transaction_entries(bencher: &mut Bencher) { let zero = Hash::default(); - let start_hash = hash(&zero.as_ref()); + let start_hash = hash(zero.as_ref()); let mut cur_hash = start_hash; let keypair1 = Keypair::new(); diff --git a/poh/src/lib.rs b/poh/src/lib.rs new file mode 100644 index 00000000000000..b3980690cbe8df --- /dev/null +++ b/poh/src/lib.rs @@ -0,0 +1,10 @@ +#![allow(clippy::integer_arithmetic)] +pub mod poh_recorder; +pub mod poh_service; + +#[macro_use] +extern crate solana_metrics; + +#[cfg(test)] +#[macro_use] +extern crate matches; diff --git a/core/src/poh_recorder.rs b/poh/src/poh_recorder.rs similarity index 96% rename from core/src/poh_recorder.rs rename to poh/src/poh_recorder.rs index 6753049a1ad922..2a17c68d418d27 100644 --- a/core/src/poh_recorder.rs +++ b/poh/src/poh_recorder.rs @@ -10,30 +10,32 @@ //! For Entries: //! * recorded entry must be >= WorkingBank::min_tick_height && entry must be < WorkingBank::max_tick_height //! -use crate::poh_service::PohService; -use crossbeam_channel::{ - unbounded, Receiver as CrossbeamReceiver, RecvTimeoutError, Sender as CrossbeamSender, -}; -use solana_ledger::blockstore::Blockstore; -use solana_ledger::entry::Entry; -use solana_ledger::leader_schedule_cache::LeaderScheduleCache; -use solana_ledger::poh::Poh; -use solana_runtime::bank::Bank; pub use solana_sdk::clock::Slot; -use solana_sdk::clock::NUM_CONSECUTIVE_LEADER_SLOTS; -use solana_sdk::hash::Hash; -use solana_sdk::poh_config::PohConfig; -use solana_sdk::pubkey::Pubkey; -use solana_sdk::timing; -use solana_sdk::transaction::Transaction; -use std::cmp; -use std::sync::{ - atomic::{AtomicBool, Ordering}, - mpsc::{channel, Receiver, SendError, Sender, SyncSender}, - {Arc, Mutex}, +use { + crate::poh_service::PohService, + crossbeam_channel::{ + unbounded, Receiver as CrossbeamReceiver, RecvTimeoutError, Sender as CrossbeamSender, + }, + log::*, + solana_ledger::{ + blockstore::Blockstore, entry::Entry, leader_schedule_cache::LeaderScheduleCache, poh::Poh, + }, + solana_runtime::bank::Bank, + solana_sdk::{ + clock::NUM_CONSECUTIVE_LEADER_SLOTS, hash::Hash, poh_config::PohConfig, pubkey::Pubkey, + timing, transaction::Transaction, + }, + std::{ + cmp, + sync::{ + atomic::{AtomicBool, Ordering}, + mpsc::{channel, Receiver, SendError, Sender, SyncSender}, + {Arc, Mutex}, + }, + time::{Duration, Instant}, + }, + thiserror::Error, }; -use std::time::{Duration, Instant}; -use thiserror::Error; pub const GRACE_TICKS_FACTOR: u64 = 2; pub const MAX_GRACE_SLOTS: u64 = 2; @@ -726,22 +728,67 @@ impl PohRecorder { }) } - #[cfg(test)] + // Used in tests pub fn schedule_dummy_max_height_reached_failure(&mut self) { self.reset(Hash::default(), 1, None); } } +pub fn create_test_recorder( + bank: &Arc, + blockstore: &Arc, + poh_config: Option, +) -> ( + Arc, + Arc>, + PohService, + Receiver, +) { + let exit = Arc::new(AtomicBool::new(false)); + let poh_config = Arc::new(poh_config.unwrap_or_default()); + let (mut poh_recorder, entry_receiver, record_receiver) = PohRecorder::new( + bank.tick_height(), + bank.last_blockhash(), + bank.slot(), + Some((4, 4)), + bank.ticks_per_slot(), + &Pubkey::default(), + blockstore, + &Arc::new(LeaderScheduleCache::new_from_bank(bank)), + &poh_config, + exit.clone(), + ); + poh_recorder.set_bank(bank); + + let poh_recorder = Arc::new(Mutex::new(poh_recorder)); + let poh_service = PohService::new( + poh_recorder.clone(), + &poh_config, + &exit, + bank.ticks_per_slot(), + crate::poh_service::DEFAULT_PINNED_CPU_CORE, + crate::poh_service::DEFAULT_HASHES_PER_BATCH, + record_receiver, + ); + + (exit, poh_recorder, poh_service, entry_receiver) +} + #[cfg(test)] mod tests { - use super::*; - use bincode::serialize; - use solana_ledger::genesis_utils::{create_genesis_config, GenesisConfigInfo}; - use solana_ledger::{blockstore::Blockstore, blockstore_meta::SlotMeta, get_tmp_ledger_path}; - use solana_perf::test_tx::test_tx; - use solana_sdk::clock::DEFAULT_TICKS_PER_SLOT; - use solana_sdk::hash::hash; - use std::sync::mpsc::sync_channel; + use { + super::*, + bincode::serialize, + solana_ledger::{ + blockstore::Blockstore, + blockstore_meta::SlotMeta, + genesis_utils::{create_genesis_config, GenesisConfigInfo}, + get_tmp_ledger_path, + }, + solana_perf::test_tx::test_tx, + solana_sdk::{clock::DEFAULT_TICKS_PER_SLOT, hash::hash}, + std::sync::mpsc::sync_channel, + }; #[test] fn test_poh_recorder_no_zero_tick() { diff --git a/core/src/poh_service.rs b/poh/src/poh_service.rs similarity index 95% rename from core/src/poh_service.rs rename to poh/src/poh_service.rs index c72bc6b6696a56..4833f16fb158c4 100644 --- a/core/src/poh_service.rs +++ b/poh/src/poh_service.rs @@ -1,14 +1,21 @@ //! The `poh_service` module implements a service that records the passing of //! "ticks", a measure of time in the PoH stream -use crate::poh_recorder::{PohRecorder, Record}; -use crossbeam_channel::Receiver; -use solana_ledger::poh::Poh; -use solana_measure::measure::Measure; -use solana_sdk::poh_config::PohConfig; -use std::sync::atomic::{AtomicBool, Ordering}; -use std::sync::{Arc, Mutex}; -use std::thread::{self, sleep, Builder, JoinHandle}; -use std::time::{Duration, Instant}; +use { + crate::poh_recorder::{PohRecorder, Record}, + crossbeam_channel::Receiver, + log::*, + solana_ledger::poh::Poh, + solana_measure::measure::Measure, + solana_sdk::poh_config::PohConfig, + std::{ + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, Mutex, + }, + thread::{self, sleep, Builder, JoinHandle}, + time::{Duration, Instant}, + }, +}; pub struct PohService { tick_producer: JoinHandle<()>, @@ -348,20 +355,22 @@ impl PohService { #[cfg(test)] mod tests { - use super::*; - use crate::poh_recorder::WorkingBank; - use rand::{thread_rng, Rng}; - use solana_ledger::genesis_utils::{create_genesis_config, GenesisConfigInfo}; - use solana_ledger::leader_schedule_cache::LeaderScheduleCache; - use solana_ledger::{blockstore::Blockstore, get_tmp_ledger_path}; - use solana_measure::measure::Measure; - use solana_perf::test_tx::test_tx; - use solana_runtime::bank::Bank; - use solana_sdk::clock; - use solana_sdk::hash::hash; - use solana_sdk::pubkey::Pubkey; - use solana_sdk::timing; - use std::time::Duration; + use { + super::*, + crate::poh_recorder::WorkingBank, + rand::{thread_rng, Rng}, + solana_ledger::{ + blockstore::Blockstore, + genesis_utils::{create_genesis_config, GenesisConfigInfo}, + get_tmp_ledger_path, + leader_schedule_cache::LeaderScheduleCache, + }, + solana_measure::measure::Measure, + solana_perf::test_tx::test_tx, + solana_runtime::bank::Bank, + solana_sdk::{clock, hash::hash, pubkey::Pubkey, timing}, + std::time::Duration, + }; #[test] #[ignore] diff --git a/program-test/Cargo.toml b/program-test/Cargo.toml index 5734842a00adbc..255074a54d303a 100644 --- a/program-test/Cargo.toml +++ b/program-test/Cargo.toml @@ -5,28 +5,27 @@ edition = "2018" license = "Apache-2.0" name = "solana-program-test" repository = "https://github.com/solana-labs/solana" -version = "1.7.0" +version = "1.7.11" [dependencies] async-trait = "0.1.42" base64 = "0.12.3" bincode = "1.3.1" chrono = "0.4.19" -chrono-humanize = "0.1.1" +chrono-humanize = "0.2.1" log = "0.4.11" mio = "0.7.6" serde = "1.0.112" serde_derive = "1.0.103" -solana-banks-client = { path = "../banks-client", version = "=1.7.0" } -solana-banks-server = { path = "../banks-server", version = "=1.7.0" } -solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "=1.7.0" } -solana-logger = { path = "../logger", version = "=1.7.0" } -solana-runtime = { path = "../runtime", version = "=1.7.0" } -solana-sdk = { path = "../sdk", version = "=1.7.0" } -solana-vote-program = { path = "../programs/vote", version = "=1.7.0" } +solana-banks-client = { path = "../banks-client", version = "=1.7.11" } +solana-banks-server = { path = "../banks-server", version = "=1.7.11" } +solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "=1.7.11" } +solana-logger = { path = "../logger", version = "=1.7.11" } +solana-runtime = { path = "../runtime", version = "=1.7.11" } +solana-sdk = { path = "../sdk", version = "=1.7.11" } +solana-vote-program = { path = "../programs/vote", version = "=1.7.11" } thiserror = "1.0" tokio = { version = "1", features = ["full"] } [dev-dependencies] assert_matches = "1.3.0" -solana-stake-program = { path = "../programs/stake", version = "=1.7.0" } diff --git a/program-test/src/lib.rs b/program-test/src/lib.rs index b531fb44e4c5f1..636fa68d6040cc 100644 --- a/program-test/src/lib.rs +++ b/program-test/src/lib.rs @@ -19,7 +19,6 @@ use { clock::{Clock, Slot}, entrypoint::{ProgramResult, SUCCESS}, epoch_schedule::EpochSchedule, - feature_set::demote_sysvar_write_locks, fee_calculator::{FeeCalculator, FeeRateGovernor}, genesis_config::{ClusterType, GenesisConfig}, hash::Hash, @@ -259,14 +258,12 @@ impl solana_sdk::program_stubs::SyscallStubs for SyscallStubs { } panic!("Program id {} wasn't found in account_infos", program_id); }; - let demote_sysvar_write_locks = - invoke_context.is_feature_active(&demote_sysvar_write_locks::id()); // TODO don't have the caller's keyed_accounts so can't validate writer or signer escalation or deescalation yet let caller_privileges = message .account_keys .iter() .enumerate() - .map(|(i, _)| message.is_writable(i, demote_sysvar_write_locks)) + .map(|(i, _)| message.is_writable(i)) .collect::>(); stable_log::program_invoke(&logger, &program_id, invoke_context.invoke_depth()); @@ -290,7 +287,7 @@ impl solana_sdk::program_stubs::SyscallStubs for SyscallStubs { 'outer: for key in &message.account_keys { for account_info in account_infos { if account_info.unsigned_key() == key { - accounts.push(Rc::new(RefCell::new(ai_to_a(account_info)))); + accounts.push((*key, Rc::new(RefCell::new(ai_to_a(account_info))))); continue 'outer; } } @@ -311,7 +308,7 @@ impl solana_sdk::program_stubs::SyscallStubs for SyscallStubs { { let mut program_signer = false; for seeds in signers_seeds.iter() { - let signer = Pubkey::create_program_address(&seeds, &caller).unwrap(); + let signer = Pubkey::create_program_address(seeds, &caller).unwrap(); if instruction_account.pubkey == signer { program_signer = true; break; @@ -324,7 +321,7 @@ impl solana_sdk::program_stubs::SyscallStubs for SyscallStubs { } } - invoke_context.record_instruction(&instruction); + invoke_context.record_instruction(instruction); solana_runtime::message_processor::MessageProcessor::process_cross_program_instruction( &message, @@ -336,14 +333,12 @@ impl solana_sdk::program_stubs::SyscallStubs for SyscallStubs { .map_err(|err| ProgramError::try_from(err).unwrap_or_else(|err| panic!("{}", err)))?; // Copy writeable account modifications back into the caller's AccountInfos - for (i, account_pubkey) in message.account_keys.iter().enumerate() { - if !message.is_writable(i, true) { + for (i, (pubkey, account)) in accounts.iter().enumerate().take(message.account_keys.len()) { + if !message.is_writable(i) { continue; } - for account_info in account_infos { - if account_info.unsigned_key() == account_pubkey { - let account = &accounts[i]; + if account_info.unsigned_key() == pubkey { **account_info.try_borrow_mut_lamports().unwrap() = account.borrow().lamports(); let mut data = account_info.try_borrow_mut_data()?; @@ -393,6 +388,16 @@ impl solana_sdk::program_stubs::SyscallStubs for SyscallStubs { } pub fn find_file(filename: &str) -> Option { + for dir in default_shared_object_dirs() { + let candidate = dir.join(&filename); + if candidate.exists() { + return Some(candidate); + } + } + None +} + +fn default_shared_object_dirs() -> Vec { let mut search_path = vec![]; if let Ok(bpf_out_dir) = std::env::var("BPF_OUT_DIR") { search_path.push(PathBuf::from(bpf_out_dir)); @@ -401,15 +406,8 @@ pub fn find_file(filename: &str) -> Option { if let Ok(dir) = std::env::current_dir() { search_path.push(dir); } - trace!("search path: {:?}", search_path); - - for path in search_path { - let candidate = path.join(&filename); - if candidate.exists() { - return Some(candidate); - } - } - None + trace!("BPF .so search path: {:?}", search_path); + search_path } pub fn read_file>(path: P) -> Vec { @@ -502,6 +500,13 @@ impl Default for ProgramTest { } impl ProgramTest { + /// Create a `ProgramTest`. + /// + /// This is a wrapper around [`default`] and [`add_program`]. See their documentation for more + /// details. + /// + /// [`default`]: #method.default + /// [`add_program`]: #method.add_program pub fn new( program_name: &str, program_id: Pubkey, @@ -579,7 +584,7 @@ impl ProgramTest { /// Add a BPF program to the test environment. /// - /// `program_name` will also used to locate the BPF shared object in the current or fixtures + /// `program_name` will also be used to locate the BPF shared object in the current or fixtures /// directory. /// /// If `process_instruction` is provided, the natively built-program may be used instead of the @@ -590,20 +595,7 @@ impl ProgramTest { program_id: Pubkey, process_instruction: Option, ) { - let loader = solana_sdk::bpf_loader::id(); - let program_file = find_file(&format!("{}.so", program_name)); - - if process_instruction.is_none() && program_file.is_none() { - panic!("Unable to add program {} ({})", program_name, program_id); - } - - if (program_file.is_some() && self.prefer_bpf) || process_instruction.is_none() { - let program_file = program_file.unwrap_or_else(|| { - panic!( - "Program file data not available for {} ({})", - program_name, program_id - ); - }); + let add_bpf = |this: &mut ProgramTest, program_file: PathBuf| { let data = read_file(&program_file); info!( "\"{}\" BPF program from {}{}", @@ -627,28 +619,87 @@ impl ProgramTest { .unwrap_or_else(|| "".to_string()) ); - self.add_account( + this.add_account( program_id, Account { lamports: Rent::default().minimum_balance(data.len()).min(1), data, - owner: loader, + owner: solana_sdk::bpf_loader::id(), executable: true, rent_epoch: 0, }, ); - } else { + }; + + let add_native = |this: &mut ProgramTest, process_fn: ProcessInstructionWithContext| { info!("\"{}\" program loaded as native code", program_name); - self.builtins.push(Builtin::new( + this.builtins + .push(Builtin::new(program_name, program_id, process_fn)); + }; + + let warn_invalid_program_name = || { + let valid_program_names = default_shared_object_dirs() + .iter() + .filter_map(|dir| dir.read_dir().ok()) + .flat_map(|read_dir| { + read_dir.filter_map(|entry| { + let path = entry.ok()?.path(); + if !path.is_file() { + return None; + } + match path.extension()?.to_str()? { + "so" => Some(path.file_stem()?.to_os_string()), + _ => None, + } + }) + }) + .collect::>(); + + if valid_program_names.is_empty() { + // This should be unreachable as `test-bpf` should guarantee at least one shared + // object exists somewhere. + warn!("No BPF shared objects found."); + return; + } + + warn!( + "Possible bogus program name. Ensure the program name ({}) \ + matches one of the following recognizable program names:", program_name, - program_id, - process_instruction.unwrap_or_else(|| { - panic!( - "Program processor not available for {} ({})", - program_name, program_id - ); - }), - )); + ); + for name in valid_program_names { + warn!(" - {}", name.to_str().unwrap()); + } + }; + + let program_file = find_file(&format!("{}.so", program_name)); + match (self.prefer_bpf, program_file, process_instruction) { + // If BPF is preferred (i.e., `test-bpf` is invoked) and a BPF shared object exists, + // use that as the program data. + (true, Some(file), _) => add_bpf(self, file), + + // If BPF is not required (i.e., we were invoked with `test`), use the provided + // processor function as is. + // + // TODO: figure out why tests hang if a processor panics when running native code. + (false, _, Some(process)) => add_native(self, process), + + // Invalid: `test-bpf` invocation with no matching BPF shared object. + (true, None, _) => { + warn_invalid_program_name(); + panic!( + "Program file data not available for {} ({})", + program_name, program_id + ); + } + + // Invalid: regular `test` invocation without a processor. + (false, _, None) => { + panic!( + "Program processor not available for {} ({})", + program_name, program_id + ); + } } } @@ -713,7 +764,7 @@ impl ProgramTest { // Add commonly-used SPL programs as a convenience to the user for (program_id, account) in programs::spl_programs(&Rent::default()).iter() { - bank.store_account(program_id, &account); + bank.store_account(program_id, account); } // User-supplied additional builtins @@ -726,10 +777,10 @@ impl ProgramTest { } for (address, account) in self.accounts.iter() { - if bank.get_account(&address).is_some() { + if bank.get_account(address).is_some() { info!("Overriding account at {}", address); } - bank.store_account(&address, &account); + bank.store_account(address, account); } bank.set_capitalization(); if let Some(max_units) = self.bpf_compute_max_units { diff --git a/program-test/src/programs.rs b/program-test/src/programs.rs index 9854834476908f..bb5912328c3c1b 100644 --- a/program-test/src/programs.rs +++ b/program-test/src/programs.rs @@ -18,7 +18,7 @@ mod spl_associated_token_account { } static SPL_PROGRAMS: &[(Pubkey, &[u8])] = &[ - (spl_token::ID, include_bytes!("programs/spl_token-3.1.0.so")), + (spl_token::ID, include_bytes!("programs/spl_token-3.2.0.so")), ( spl_memo_1_0::ID, include_bytes!("programs/spl_memo-1.0.0.so"), @@ -29,7 +29,7 @@ static SPL_PROGRAMS: &[(Pubkey, &[u8])] = &[ ), ( spl_associated_token_account::ID, - include_bytes!("programs/spl_associated-token-account-1.0.1.so"), + include_bytes!("programs/spl_associated-token-account-1.0.3.so"), ), ]; diff --git a/program-test/src/programs/spl_associated-token-account-1.0.1.so b/program-test/src/programs/spl_associated-token-account-1.0.1.so deleted file mode 100644 index c50d9191acee8a..00000000000000 Binary files a/program-test/src/programs/spl_associated-token-account-1.0.1.so and /dev/null differ diff --git a/program-test/src/programs/spl_associated-token-account-1.0.3.so b/program-test/src/programs/spl_associated-token-account-1.0.3.so new file mode 100644 index 00000000000000..9b297786912d98 Binary files /dev/null and b/program-test/src/programs/spl_associated-token-account-1.0.3.so differ diff --git a/program-test/src/programs/spl_token-3.1.0.so b/program-test/src/programs/spl_token-3.1.0.so deleted file mode 100644 index 53079d03b5ba6a..00000000000000 Binary files a/program-test/src/programs/spl_token-3.1.0.so and /dev/null differ diff --git a/program-test/src/programs/spl_token-3.2.0.so b/program-test/src/programs/spl_token-3.2.0.so new file mode 100644 index 00000000000000..9b8c9549fceb5f Binary files /dev/null and b/program-test/src/programs/spl_token-3.2.0.so differ diff --git a/program-test/tests/warp.rs b/program-test/tests/warp.rs index 9ed46aac1038b9..13758afd5e4bad 100644 --- a/program-test/tests/warp.rs +++ b/program-test/tests/warp.rs @@ -12,6 +12,10 @@ use { pubkey::Pubkey, rent::Rent, signature::{Keypair, Signer}, + stake::{ + instruction as stake_instruction, + state::{Authorized, Lockup, StakeState}, + }, system_instruction, system_program, sysvar::{ clock, @@ -20,10 +24,6 @@ use { }, transaction::{Transaction, TransactionError}, }, - solana_stake_program::{ - stake_instruction, - stake_state::{Authorized, Lockup, StakeState}, - }, solana_vote_program::{ vote_instruction, vote_state::{VoteInit, VoteState}, @@ -277,7 +277,7 @@ async fn stake_rewards_from_warp() { assert_matches!( stake .delegation - .stake_activating_and_deactivating(clock.epoch, Some(&stake_history), true,), + .stake_activating_and_deactivating(clock.epoch, Some(&stake_history)), (_, 0, 0) ); } diff --git a/programs/bpf/Cargo.lock b/programs/bpf/Cargo.lock index 03fc819703a93e..1f625a662eca20 100644 --- a/programs/bpf/Cargo.lock +++ b/programs/bpf/Cargo.lock @@ -42,13 +42,19 @@ dependencies = [ "memchr", ] +[[package]] +name = "aliasable" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "250f629c0161ad8107cf89319e990051fae62832fd343083bea452d93e2205fd" + [[package]] name = "ansi_term" version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ee49baf6cb617b853aa8d93bf420db2383fab46d314482ca2803b40d5fde979b" dependencies = [ - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -100,7 +106,7 @@ checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" dependencies = [ "hermit-abi", "libc", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -217,9 +223,9 @@ checksum = "8d696c370c750c948ada61c69a0ee2cbbb9c50b1019ddb86d9317157a99c2cae" [[package]] name = "borsh" -version = "0.8.1" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5a26c53ddf60281f18e7a29b20db7ba3db82a9d81b9650bfaa02d646f50d364" +checksum = "4fcabb02816fdadf90866dc9a7824491ccb63d69f55375a266dc03509ac68d36" dependencies = [ "borsh-derive", "hashbrown", @@ -227,9 +233,9 @@ dependencies = [ [[package]] name = "borsh-derive" -version = "0.8.1" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b637a47728b78a78cd7f4b85bf06d71ef4221840e059a38f048be2422bf673b2" +checksum = "4bd16f0729b89f0a212b0e2e1d19cc6593df63f771161a11863967780e2d033d" dependencies = [ "borsh-derive-internal", "borsh-schema-derive-internal", @@ -240,9 +246,9 @@ dependencies = [ [[package]] name = "borsh-derive-internal" -version = "0.8.1" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d813fa25eb0bed78c36492cff4415f38c760d6de833d255ba9095bd8ebb7d725" +checksum = "1e321a130a3ac4b88eb59a6d670bde11eec9721a397b77e0f2079060e2a1b785" dependencies = [ "proc-macro2 1.0.24", "quote 1.0.6", @@ -251,9 +257,9 @@ dependencies = [ [[package]] name = "borsh-schema-derive-internal" -version = "0.8.1" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcf78ee4a98c8cb9eba1bac3d3e2a1ea3d7673c719ce691e67b5cbafc472d3b7" +checksum = "15151a485164b319cc7a5160fe4316dc469a27993f71b73d7617dc9032ff0fd7" dependencies = [ "proc-macro2 1.0.24", "quote 1.0.6", @@ -376,14 +382,14 @@ dependencies = [ "num-traits", "serde", "time", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] name = "chrono-humanize" -version = "0.1.2" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8164ae3089baf04ff71f32aeb70213283dcd236dce8bc976d00b17a458f5f71c" +checksum = "2eddc119501d583fd930cb92144e605f44e0252c38dd89d9247fffa1993375cb" dependencies = [ "chrono", ] @@ -438,15 +444,15 @@ dependencies = [ "terminal_size", "termios", "unicode-width", - "winapi 0.3.8", + "winapi 0.3.9", "winapi-util", ] [[package]] name = "console" -version = "0.13.0" +version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a50aab2529019abfabfa93f1e6c41ef392f91fbf179b347a7e96abb524884a08" +checksum = "3993e6445baa160675931ec041a5e03ca84b9c6e32a056150d3aa2bdda0a1f45" dependencies = [ "encode_unicode", "lazy_static", @@ -454,8 +460,7 @@ dependencies = [ "regex", "terminal_size", "unicode-width", - "winapi 0.3.8", - "winapi-util", + "winapi 0.3.9", ] [[package]] @@ -523,9 +528,9 @@ dependencies = [ [[package]] name = "crossbeam-deque" -version = "0.7.3" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f02af974daeee82218205558e51ec8768b48cf524bd01d550abe5573a608285" +checksum = "c20ff29ded3204c5106278a81a38f4b482636ed4fa1e6cfbeef193291beb29ed" dependencies = [ "crossbeam-epoch 0.8.2", "crossbeam-utils 0.7.2", @@ -534,9 +539,9 @@ dependencies = [ [[package]] name = "crossbeam-deque" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94af6efb46fef72616855b036a624cf27ba656ffc9be1b9a3c931cfc7749a9a9" +checksum = "6455c0ca19f0d2fbf751b908d5c55c1f5cbc65e03c4225427254b46890bdde1e" dependencies = [ "cfg-if 1.0.0", "crossbeam-epoch 0.9.1", @@ -611,16 +616,6 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" -[[package]] -name = "crypto-mac" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4434400df11d95d556bac068ddfedd482915eb18fe8bea89bc80b6e4b1c179e5" -dependencies = [ - "generic-array 0.12.3", - "subtle 1.0.0", -] - [[package]] name = "crypto-mac" version = "0.8.0" @@ -628,7 +623,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b584a330336237c1eecd3e94266efb216c56ed91225d634cb2991c5f3fd1aeab" dependencies = [ "generic-array 0.14.3", - "subtle 2.2.2", + "subtle", ] [[package]] @@ -638,7 +633,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "58bcd97a54c7ca5ce2f6eb16f6bede5b0ab5f0055fedc17d2f0b4466e21671ca" dependencies = [ "generic-array 0.14.3", - "subtle 2.2.2", + "subtle", ] [[package]] @@ -648,7 +643,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4857fd85a0c34b3c3297875b747c1e02e06b6a0ea32dd892d8192b9ce0813ea6" dependencies = [ "generic-array 0.14.3", - "subtle 2.2.2", + "subtle", ] [[package]] @@ -660,7 +655,7 @@ dependencies = [ "byteorder 1.3.4", "digest 0.8.1", "rand_core 0.5.1", - "subtle 2.2.2", + "subtle", "zeroize", ] @@ -673,7 +668,7 @@ dependencies = [ "byteorder 1.3.4", "digest 0.9.0", "rand_core 0.5.1", - "subtle 2.2.2", + "subtle", "zeroize", ] @@ -764,7 +759,7 @@ checksum = "4ebda144c4fe02d1f7ea1a7d9641b6fc6b580adcfa024ae48797ecdeb6825b4d" dependencies = [ "libc", "redox_users", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -794,7 +789,7 @@ dependencies = [ "rand 0.7.3", "serde", "serde_bytes", - "sha2 0.9.2", + "sha2", "zeroize", ] @@ -808,7 +803,7 @@ dependencies = [ "ed25519-dalek", "failure", "hmac 0.9.0", - "sha2 0.9.2", + "sha2", ] [[package]] @@ -922,7 +917,7 @@ dependencies = [ "cfg-if 0.1.10", "libc", "redox_syscall 0.1.56", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -1078,7 +1073,7 @@ dependencies = [ "futures-sink", "futures-task", "memchr", - "pin-project-lite 0.2.4", + "pin-project-lite 0.2.7", "pin-utils", "proc-macro-hack", "proc-macro-nested", @@ -1112,7 +1107,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e692e296bfac1d2533ef168d0b60ff5897b8b70a4009276834014dd8924cc028" dependencies = [ "libc", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -1162,9 +1157,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.1" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d832b01df74254fe364568d6ddc294443f61cbec82816b60904303af87efae78" +checksum = "825343c4eef0b63f541f8903f395dc5beb362a979b5799a84062527ef1e37726" dependencies = [ "bytes 1.0.1", "fnv", @@ -1174,7 +1169,7 @@ dependencies = [ "http", "indexmap", "slab", - "tokio 1.4.0", + "tokio 1.8.1", "tokio-util", "tracing", ] @@ -1223,16 +1218,6 @@ dependencies = [ "pkg-config", ] -[[package]] -name = "hmac" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dcb5e64cda4c23119ab41ba960d1e170a774c8e4b9d9e6a9bc18aabf5e59695" -dependencies = [ - "crypto-mac 0.7.0", - "digest 0.8.1", -] - [[package]] name = "hmac" version = "0.8.1" @@ -1265,13 +1250,13 @@ dependencies = [ [[package]] name = "hmac-drbg" -version = "0.2.0" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6e570451493f10f6581b48cdd530413b63ea9e780f544bfd3bdcaa0d89d1a7b" +checksum = "17ea0a1394df5b6574da6e0c1ade9e78868c9fb0a4e5ef4428e32da4676b85b1" dependencies = [ - "digest 0.8.1", - "generic-array 0.12.3", - "hmac 0.7.1", + "digest 0.9.0", + "generic-array 0.14.3", + "hmac 0.8.1", ] [[package]] @@ -1297,15 +1282,15 @@ dependencies = [ [[package]] name = "httparse" -version = "1.3.4" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd179ae861f0c2e53da70d892f5f3029f9594be0c41dc5269cd371691b1dc2f9" +checksum = "f3a87b616e37e93c22fb19bcd386f02f3af5ea98a25670ad0fce773de23c5e68" [[package]] name = "httpdate" -version = "0.3.2" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "494b4d60369511e7dea41cf646832512a94e542f68bb9c49e54518e0f468eb47" +checksum = "6456b8a6c8f33fee7d958fcd1b60d55b11940a79e63ae87013e6d22e26034440" [[package]] name = "humantime" @@ -1315,9 +1300,9 @@ checksum = "3c1ad908cc71012b7bea4d0c53ba96a8cba9962f048fa68d143376143d863b7a" [[package]] name = "hyper" -version = "0.14.4" +version = "0.14.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8e946c2b1349055e0b72ae281b238baf1a3ea7307c7e9f9d64673bdd9c26ac7" +checksum = "0b61cf2d1aebcf6e6352c97b81dc2244ca29194be1b276f5d8ad5c6330fffb11" dependencies = [ "bytes 1.0.1", "futures-channel", @@ -1329,9 +1314,9 @@ dependencies = [ "httparse", "httpdate", "itoa", - "pin-project", - "socket2", - "tokio 1.4.0", + "pin-project-lite 0.2.7", + "socket2 0.4.1", + "tokio 1.8.1", "tower-service", "tracing", "want", @@ -1347,7 +1332,7 @@ dependencies = [ "hyper", "log", "rustls", - "tokio 1.4.0", + "tokio 1.8.1", "tokio-rustls", "webpki", ] @@ -1379,7 +1364,7 @@ version = "0.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7baab56125e25686df467fe470785512329883aab42696d661247aca2a2896e4" dependencies = [ - "console 0.13.0", + "console 0.14.1", "lazy_static", "number_prefix", "regex", @@ -1500,9 +1485,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.81" +version = "0.2.98" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1482821306169ec4d07f6aca392a4681f66c75c9918aa49641a2595db64053cb" +checksum = "320cfe77175da3a483efed4bc0adc1968ca050b098ce4f2f1c13a56626128790" [[package]] name = "libloading" @@ -1510,25 +1495,57 @@ version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2cadb8e769f070c45df05c78c7520eb4cd17061d4ab262e43cfc68b4d00ac71c" dependencies = [ - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] name = "libsecp256k1" -version = "0.3.5" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fc1e2c808481a63dc6da2074752fdd4336a3c8fcc68b83db6f1fd5224ae7962" +checksum = "bd1137239ab33b41aa9637a88a28249e5e70c40a42ccc92db7f12cc356c1fcd7" dependencies = [ "arrayref", - "crunchy", - "digest 0.8.1", + "base64 0.12.3", + "digest 0.9.0", "hmac-drbg", + "libsecp256k1-core", + "libsecp256k1-gen-ecmult", + "libsecp256k1-gen-genmult", "rand 0.7.3", - "sha2 0.8.2", - "subtle 2.2.2", + "serde", + "sha2", "typenum", ] +[[package]] +name = "libsecp256k1-core" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ee11012b293ea30093c129173cac4335513064094619f4639a25b310fd33c11" +dependencies = [ + "crunchy", + "digest 0.9.0", + "subtle", +] + +[[package]] +name = "libsecp256k1-gen-ecmult" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32239626ffbb6a095b83b37a02ceb3672b2443a87a000a884fc3c4d16925c9c0" +dependencies = [ + "libsecp256k1-core", +] + +[[package]] +name = "libsecp256k1-gen-genmult" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76acb433e21d10f5f9892b1962c2856c58c7f39a9e4bd68ac82b9436a0ffd5b9" +dependencies = [ + "libsecp256k1-core", +] + [[package]] name = "linked-hash-map" version = "0.5.4" @@ -1651,7 +1668,7 @@ dependencies = [ "log", "miow 0.3.6", "ntapi", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -1683,8 +1700,8 @@ version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a33c1b55807fbed163481b5ba66db4b2fa6cde694a5027be10fb724206c5897" dependencies = [ - "socket2", - "winapi 0.3.8", + "socket2 0.3.17", + "winapi 0.3.9", ] [[package]] @@ -1713,7 +1730,7 @@ checksum = "391630d12b68002ae1e25e8f974306474966550ad82dac6886fb8910c19568ae" dependencies = [ "cfg-if 0.1.10", "libc", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -1734,7 +1751,7 @@ version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a31937dea023539c72ddae0e3571deadc1414b300483fa7aaec176168cfa9d2" dependencies = [ - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -1889,21 +1906,23 @@ dependencies = [ [[package]] name = "ouroboros" -version = "0.5.1" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc04551635026d3ac7bc646698ea1836a85ed2a26b7094fe1d15d8b14854c4a2" +checksum = "f0e3827c8742f21283e9374adf7905984e7b85731ad94a203137b56955d818b3" dependencies = [ + "aliasable", "ouroboros_macro", "stable_deref_trait", ] [[package]] name = "ouroboros_macro" -version = "0.5.1" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cec33dfceabec83cd0e95a5ce9d20e76ab3a5cbfef59659b8c927f69b93ed8ae" +checksum = "1ef06077e08eac7e2aeaacfbd113a25fcb1b9b903437bd89d2bd513da6e04112" dependencies = [ "Inflector", + "proc-macro-error", "proc-macro2 1.0.24", "quote 1.0.6", "syn 1.0.67", @@ -1953,7 +1972,7 @@ dependencies = [ "redox_syscall 0.1.56", "rustc_version", "smallvec 0.6.14", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -1967,7 +1986,7 @@ dependencies = [ "libc", "redox_syscall 0.1.56", "smallvec 1.6.1", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -1981,7 +2000,7 @@ dependencies = [ "libc", "redox_syscall 0.1.56", "smallvec 1.6.1", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -2045,9 +2064,9 @@ checksum = "f7505eeebd78492e0f6108f7171c4948dbb120ee8119d9d77d0afa5469bef67f" [[package]] name = "pin-project-lite" -version = "0.2.4" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "439697af366c49a6d0a010c56a0d97685bc140ce0d377b13a2ea2aa42d64a827" +checksum = "8d31d11c69a6b52a174b42bdc0c30e5e11670f90788b2c471c31c1d17d449443" [[package]] name = "pin-utils" @@ -2082,6 +2101,30 @@ dependencies = [ "toml", ] +[[package]] +name = "proc-macro-error" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" +dependencies = [ + "proc-macro-error-attr", + "proc-macro2 1.0.24", + "quote 1.0.6", + "syn 1.0.67", + "version_check", +] + +[[package]] +name = "proc-macro-error-attr" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" +dependencies = [ + "proc-macro2 1.0.24", + "quote 1.0.6", + "version_check", +] + [[package]] name = "proc-macro-hack" version = "0.5.19" @@ -2237,7 +2280,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b0d8e0819fadc20c74ea8373106ead0600e3a67ef1fe8da56e39b9ae7275674" dependencies = [ "autocfg", - "crossbeam-deque 0.8.0", + "crossbeam-deque 0.8.1", "either", "rayon-core", ] @@ -2249,7 +2292,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ab346ac5921dc62ffa9f89b7a773907511cdfa5490c572ae9be1be33e8afa4a" dependencies = [ "crossbeam-channel 0.5.0", - "crossbeam-deque 0.8.0", + "crossbeam-deque 0.8.1", "crossbeam-utils 0.8.1", "lazy_static", "num_cpus", @@ -2282,21 +2325,20 @@ dependencies = [ [[package]] name = "regex" -version = "1.3.9" +version = "1.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c3780fcf44b193bc4d09f36d2a3c87b251da4a046c87795a0d35f4f927ad8e6" +checksum = "2a26af418b574bd56588335b3a3659a65725d4e636eb1016c2f9e3b38c7cc759" dependencies = [ "aho-corasick", "memchr", "regex-syntax", - "thread_local", ] [[package]] name = "regex-syntax" -version = "0.6.18" +version = "0.6.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26412eb97c6b088a6997e05f69403a802a92d520de2f8e63c2b65f9e0f47c4e8" +checksum = "f497285884f3fcff424ffc933e56d7cbca511def0c9831a7f9b5f6153e3cc89b" [[package]] name = "remove_dir_all" @@ -2304,7 +2346,7 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4a83fa3702a688b9359eccba92d153ac33fd2e8462f9e0e3fdf155239ea7792e" dependencies = [ - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -2328,12 +2370,12 @@ dependencies = [ "log", "mime", "percent-encoding", - "pin-project-lite 0.2.4", + "pin-project-lite 0.2.7", "rustls", "serde", "serde_json", "serde_urlencoded", - "tokio 1.4.0", + "tokio 1.8.1", "tokio-rustls", "url", "wasm-bindgen", @@ -2355,7 +2397,7 @@ dependencies = [ "spin", "untrusted", "web-sys", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -2365,7 +2407,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "99371657d3c8e4d816fb6221db98fa408242b0b53bac08f8676a41f8554fe99f" dependencies = [ "libc", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -2430,7 +2472,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f05ba609c234e60bee0d547fe94a4c7e9da733d1c962cf6e59efa4cd9c8bc75" dependencies = [ "lazy_static", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -2536,9 +2578,9 @@ dependencies = [ [[package]] name = "serde_bytes" -version = "0.11.4" +version = "0.11.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3bf487fbf5c6239d7ea2ff8b10cb6b811cd4b5080d1c2aeed1dec18753c06e10" +checksum = "16ae07dd2f88a366f15bd0632ba725227018c69a1c8550a927324f8eb8368bb9" dependencies = [ "serde", ] @@ -2601,18 +2643,6 @@ dependencies = [ "opaque-debug 0.2.3", ] -[[package]] -name = "sha2" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a256f46ea78a0c0d9ff00077504903ac881a1dafdc20da66545699e7776b3e69" -dependencies = [ - "block-buffer 0.7.3", - "digest 0.8.1", - "fake-simd", - "opaque-debug 0.2.3", -] - [[package]] name = "sha2" version = "0.9.2" @@ -2683,12 +2713,22 @@ dependencies = [ "cfg-if 1.0.0", "libc", "redox_syscall 0.1.56", - "winapi 0.3.8", + "winapi 0.3.9", +] + +[[package]] +name = "socket2" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "765f090f0e423d2b55843402a07915add955e7d60657db13707a159727326cad" +dependencies = [ + "libc", + "winapi 0.3.9", ] [[package]] name = "solana-account-decoder" -version = "1.7.0" +version = "1.7.11" dependencies = [ "Inflector", "base64 0.12.3", @@ -2701,7 +2741,6 @@ dependencies = [ "serde_json", "solana-config-program", "solana-sdk", - "solana-stake-program", "solana-vote-program", "spl-token", "thiserror", @@ -2710,7 +2749,7 @@ dependencies = [ [[package]] name = "solana-banks-client" -version = "1.7.0" +version = "1.7.11" dependencies = [ "bincode", "borsh", @@ -2718,16 +2757,16 @@ dependencies = [ "futures 0.3.12", "mio 0.7.7", "solana-banks-interface", - "solana-program 1.7.0", + "solana-program 1.7.11", "solana-sdk", "tarpc", - "tokio 1.4.0", + "tokio 1.8.1", "tokio-serde", ] [[package]] name = "solana-banks-interface" -version = "1.7.0" +version = "1.7.11" dependencies = [ "mio 0.7.7", "serde", @@ -2737,7 +2776,7 @@ dependencies = [ [[package]] name = "solana-banks-server" -version = "1.7.0" +version = "1.7.11" dependencies = [ "bincode", "futures 0.3.12", @@ -2748,17 +2787,18 @@ dependencies = [ "solana-runtime", "solana-sdk", "tarpc", - "tokio 1.4.0", + "tokio 1.8.1", "tokio-serde", "tokio-stream", ] [[package]] name = "solana-bpf-loader-program" -version = "1.7.0" +version = "1.7.11" dependencies = [ "bincode", "byteorder 1.3.4", + "libsecp256k1", "log", "num-derive 0.3.0", "num-traits", @@ -2773,7 +2813,7 @@ dependencies = [ [[package]] name = "solana-bpf-programs" -version = "1.7.0" +version = "1.7.11" dependencies = [ "bincode", "byteorder 1.3.4", @@ -2785,7 +2825,7 @@ dependencies = [ "solana-account-decoder", "solana-bpf-loader-program", "solana-cli-output", - "solana-logger 1.7.0", + "solana-logger 1.7.11", "solana-measure", "solana-runtime", "solana-sdk", @@ -2796,264 +2836,288 @@ dependencies = [ [[package]] name = "solana-bpf-rust-128bit" -version = "1.7.0" +version = "1.7.11" dependencies = [ "solana-bpf-rust-128bit-dep", - "solana-program 1.7.0", + "solana-program 1.7.11", ] [[package]] name = "solana-bpf-rust-128bit-dep" -version = "1.7.0" +version = "1.7.11" dependencies = [ - "solana-program 1.7.0", + "solana-program 1.7.11", ] [[package]] name = "solana-bpf-rust-alloc" -version = "1.7.0" +version = "1.7.11" dependencies = [ - "solana-program 1.7.0", + "solana-program 1.7.11", ] [[package]] name = "solana-bpf-rust-call-depth" -version = "1.7.0" +version = "1.7.11" dependencies = [ - "solana-program 1.7.0", + "solana-program 1.7.11", ] [[package]] name = "solana-bpf-rust-caller-access" -version = "1.7.0" +version = "1.7.11" dependencies = [ - "solana-program 1.7.0", + "solana-program 1.7.11", ] [[package]] name = "solana-bpf-rust-custom-heap" -version = "1.7.0" +version = "1.7.11" dependencies = [ - "solana-program 1.7.0", + "solana-program 1.7.11", ] [[package]] name = "solana-bpf-rust-dep-crate" -version = "1.7.0" +version = "1.7.11" dependencies = [ "byteorder 1.3.4", - "solana-program 1.7.0", + "solana-program 1.7.11", ] [[package]] name = "solana-bpf-rust-deprecated-loader" -version = "1.7.0" +version = "1.7.11" dependencies = [ - "solana-program 1.7.0", + "solana-program 1.7.11", ] [[package]] name = "solana-bpf-rust-dup-accounts" -version = "1.7.0" +version = "1.7.11" dependencies = [ - "solana-program 1.7.0", + "solana-program 1.7.11", ] [[package]] name = "solana-bpf-rust-error-handling" -version = "1.7.0" +version = "1.7.11" dependencies = [ "num-derive 0.2.5", "num-traits", - "solana-program 1.7.0", + "solana-program 1.7.11", "thiserror", ] [[package]] name = "solana-bpf-rust-external-spend" -version = "1.7.0" +version = "1.7.11" dependencies = [ - "solana-program 1.7.0", + "solana-program 1.7.11", ] [[package]] name = "solana-bpf-rust-finalize" -version = "1.7.0" +version = "1.7.11" dependencies = [ - "solana-program 1.7.0", + "solana-program 1.7.11", ] [[package]] name = "solana-bpf-rust-instruction-introspection" -version = "1.7.0" +version = "1.7.11" dependencies = [ - "solana-program 1.7.0", + "solana-program 1.7.11", ] [[package]] name = "solana-bpf-rust-invoke" -version = "1.7.0" +version = "1.7.11" dependencies = [ "solana-bpf-rust-invoked", - "solana-program 1.7.0", + "solana-program 1.7.11", ] [[package]] name = "solana-bpf-rust-invoke-and-error" -version = "1.7.0" +version = "1.7.11" dependencies = [ - "solana-program 1.7.0", + "solana-program 1.7.11", ] [[package]] name = "solana-bpf-rust-invoke-and-ok" -version = "1.7.0" +version = "1.7.11" dependencies = [ - "solana-program 1.7.0", + "solana-program 1.7.11", ] [[package]] name = "solana-bpf-rust-invoke-and-return" -version = "1.7.0" +version = "1.7.11" dependencies = [ - "solana-program 1.7.0", + "solana-program 1.7.11", ] [[package]] name = "solana-bpf-rust-invoked" -version = "1.7.0" +version = "1.7.11" dependencies = [ - "solana-program 1.7.0", + "solana-program 1.7.11", ] [[package]] name = "solana-bpf-rust-iter" -version = "1.7.0" +version = "1.7.11" dependencies = [ - "solana-program 1.7.0", + "solana-program 1.7.11", ] [[package]] name = "solana-bpf-rust-many-args" -version = "1.7.0" +version = "1.7.11" dependencies = [ "solana-bpf-rust-many-args-dep", - "solana-program 1.7.0", + "solana-program 1.7.11", ] [[package]] name = "solana-bpf-rust-many-args-dep" -version = "1.7.0" +version = "1.7.11" dependencies = [ - "solana-program 1.7.0", + "solana-program 1.7.11", ] [[package]] name = "solana-bpf-rust-mem" -version = "1.7.0" +version = "1.7.11" dependencies = [ - "solana-program 1.7.0", + "solana-program 1.7.11", + "solana-program-test", + "solana-sdk", +] + +[[package]] +name = "solana-bpf-rust-membuiltins" +version = "1.7.11" +dependencies = [ + "solana-bpf-rust-mem", + "solana-program 1.7.11", ] [[package]] name = "solana-bpf-rust-noop" -version = "1.7.0" +version = "1.7.11" dependencies = [ - "solana-program 1.7.0", + "solana-program 1.7.11", ] [[package]] name = "solana-bpf-rust-panic" -version = "1.7.0" +version = "1.7.11" dependencies = [ - "solana-program 1.7.0", + "solana-program 1.7.11", ] [[package]] name = "solana-bpf-rust-param-passing" -version = "1.7.0" +version = "1.7.11" dependencies = [ "solana-bpf-rust-param-passing-dep", - "solana-program 1.7.0", + "solana-program 1.7.11", ] [[package]] name = "solana-bpf-rust-param-passing-dep" -version = "1.7.0" +version = "1.7.11" dependencies = [ - "solana-program 1.7.0", + "solana-program 1.7.11", ] [[package]] name = "solana-bpf-rust-rand" -version = "1.7.0" +version = "1.7.11" dependencies = [ "getrandom 0.1.14", "rand 0.7.3", - "solana-program 1.7.0", + "solana-program 1.7.11", +] + +[[package]] +name = "solana-bpf-rust-ro-account_modify" +version = "1.7.11" +dependencies = [ + "solana-program 1.7.11", ] [[package]] name = "solana-bpf-rust-ro-modify" -version = "1.7.0" +version = "1.7.11" dependencies = [ - "solana-program 1.7.0", + "solana-program 1.7.11", ] [[package]] name = "solana-bpf-rust-sanity" -version = "1.7.0" +version = "1.7.11" dependencies = [ - "solana-program 1.7.0", + "solana-program 1.7.11", +] + +[[package]] +name = "solana-bpf-rust-secp256k1-recover" +version = "1.7.11" +dependencies = [ + "solana-program 1.7.11", ] [[package]] name = "solana-bpf-rust-sha" -version = "1.7.0" +version = "1.7.11" dependencies = [ - "solana-program 1.7.0", + "solana-program 1.7.11", ] [[package]] name = "solana-bpf-rust-spoof1" -version = "1.7.0" +version = "1.7.11" dependencies = [ - "solana-program 1.7.0", + "solana-program 1.7.11", ] [[package]] name = "solana-bpf-rust-spoof1-system" -version = "1.7.0" +version = "1.7.11" dependencies = [ - "solana-program 1.7.0", + "solana-program 1.7.11", ] [[package]] name = "solana-bpf-rust-sysvar" -version = "1.7.0" +version = "1.7.11" dependencies = [ - "solana-program 1.7.0", + "solana-program 1.7.11", "solana-program-test", "solana-sdk", ] [[package]] name = "solana-bpf-rust-upgradeable" -version = "1.7.0" +version = "1.7.11" dependencies = [ - "solana-program 1.7.0", + "solana-program 1.7.11", ] [[package]] name = "solana-bpf-rust-upgraded" -version = "1.7.0" +version = "1.7.11" dependencies = [ - "solana-program 1.7.0", + "solana-program 1.7.11", ] [[package]] name = "solana-clap-utils" -version = "1.7.0" +version = "1.7.11" dependencies = [ "chrono", "clap", @@ -3068,7 +3132,7 @@ dependencies = [ [[package]] name = "solana-cli-config" -version = "1.7.0" +version = "1.7.11" dependencies = [ "dirs-next", "lazy_static", @@ -3080,12 +3144,13 @@ dependencies = [ [[package]] name = "solana-cli-output" -version = "1.7.0" +version = "1.7.11" dependencies = [ "Inflector", "base64 0.13.0", "chrono", - "console 0.11.3", + "clap", + "console 0.14.1", "humantime", "indicatif", "serde", @@ -3095,7 +3160,6 @@ dependencies = [ "solana-clap-utils", "solana-client", "solana-sdk", - "solana-stake-program", "solana-transaction-status", "solana-vote-program", "spl-memo", @@ -3103,7 +3167,7 @@ dependencies = [ [[package]] name = "solana-client" -version = "1.7.0" +version = "1.7.11" dependencies = [ "base64 0.13.0", "bincode", @@ -3128,14 +3192,14 @@ dependencies = [ "solana-version", "solana-vote-program", "thiserror", - "tokio 1.4.0", + "tokio 1.8.1", "tungstenite", "url", ] [[package]] name = "solana-config-program" -version = "1.7.0" +version = "1.7.11" dependencies = [ "bincode", "chrono", @@ -3148,7 +3212,7 @@ dependencies = [ [[package]] name = "solana-crate-features" -version = "1.7.0" +version = "1.7.11" dependencies = [ "backtrace", "bytes 0.4.12", @@ -3166,12 +3230,12 @@ dependencies = [ "syn 0.15.44", "syn 1.0.67", "tokio 0.1.22", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] name = "solana-faucet" -version = "1.7.0" +version = "1.7.11" dependencies = [ "bincode", "byteorder 1.3.4", @@ -3181,20 +3245,20 @@ dependencies = [ "serde_derive", "solana-clap-utils", "solana-cli-config", - "solana-logger 1.7.0", + "solana-logger 1.7.11", "solana-metrics", "solana-sdk", "solana-version", "spl-memo", "thiserror", - "tokio 1.4.0", + "tokio 1.8.1", ] [[package]] name = "solana-frozen-abi" -version = "1.6.4" +version = "1.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f81ac3925c8995d935d3af27390cf7b748a81837a29feb75a00e115f40dae6dc" +checksum = "b0b98d31e0662fedf3a1ee30919c655713874d578e19e65affe46109b1b927f9" dependencies = [ "bs58", "bv", @@ -3204,15 +3268,15 @@ dependencies = [ "rustc_version", "serde", "serde_derive", - "sha2 0.9.2", - "solana-frozen-abi-macro 1.6.4", - "solana-logger 1.6.4", + "sha2", + "solana-frozen-abi-macro 1.7.6", + "solana-logger 1.7.6", "thiserror", ] [[package]] name = "solana-frozen-abi" -version = "1.7.0" +version = "1.7.11" dependencies = [ "bs58", "bv", @@ -3222,19 +3286,18 @@ dependencies = [ "rustc_version", "serde", "serde_derive", - "sha2 0.9.2", - "solana-frozen-abi-macro 1.7.0", - "solana-logger 1.7.0", + "sha2", + "solana-frozen-abi-macro 1.7.11", + "solana-logger 1.7.11", "thiserror", ] [[package]] name = "solana-frozen-abi-macro" -version = "1.6.4" +version = "1.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae448159038e470f3b2dd1ab0d219246211f940a10bf1e656a02cb5c2d897437" +checksum = "ceac6e8ad1a784c92ff5f3d6ad68a8d664d389b08055b674c38b2b9abb69e6d4" dependencies = [ - "lazy_static", "proc-macro2 1.0.24", "quote 1.0.6", "rustc_version", @@ -3243,7 +3306,7 @@ dependencies = [ [[package]] name = "solana-frozen-abi-macro" -version = "1.7.0" +version = "1.7.11" dependencies = [ "proc-macro2 1.0.24", "quote 1.0.6", @@ -3253,9 +3316,9 @@ dependencies = [ [[package]] name = "solana-logger" -version = "1.6.4" +version = "1.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fa7bddd7b89c26c6e3ef4af9b47d6bc8d60888559affb5160f5ade18c0cd058" +checksum = "ec7c514fe57f8c5042fa88c19f5711c67f264db723d9d79379fcb78dd1f09bbf" dependencies = [ "env_logger", "lazy_static", @@ -3264,7 +3327,7 @@ dependencies = [ [[package]] name = "solana-logger" -version = "1.7.0" +version = "1.7.11" dependencies = [ "env_logger", "lazy_static", @@ -3273,7 +3336,7 @@ dependencies = [ [[package]] name = "solana-measure" -version = "1.7.0" +version = "1.7.11" dependencies = [ "log", "solana-metrics", @@ -3282,7 +3345,7 @@ dependencies = [ [[package]] name = "solana-metrics" -version = "1.7.0" +version = "1.7.11" dependencies = [ "env_logger", "gethostname", @@ -3294,7 +3357,7 @@ dependencies = [ [[package]] name = "solana-net-utils" -version = "1.7.0" +version = "1.7.11" dependencies = [ "bincode", "clap", @@ -3303,21 +3366,23 @@ dependencies = [ "rand 0.7.3", "serde", "serde_derive", - "socket2", + "socket2 0.3.17", "solana-clap-utils", - "solana-logger 1.7.0", + "solana-logger 1.7.11", + "solana-sdk", "solana-version", - "tokio 1.4.0", + "tokio 1.8.1", "url", ] [[package]] name = "solana-program" -version = "1.6.4" +version = "1.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "231bf4cd42997b6f34e7c74a1449e8a1e8816fd83662fc3a5a0922da48fc667b" +checksum = "3bfe6a5dfc5372c0a946018ecdd8115e38af78cea8275bac48cf3d105c6b1fb3" dependencies = [ "bincode", + "blake3", "borsh", "borsh-derive", "bs58", @@ -3326,6 +3391,7 @@ dependencies = [ "hex", "itertools 0.9.0", "lazy_static", + "libsecp256k1", "log", "num-derive 0.3.0", "num-traits", @@ -3335,17 +3401,18 @@ dependencies = [ "serde", "serde_bytes", "serde_derive", - "sha2 0.9.2", - "solana-frozen-abi 1.6.4", - "solana-frozen-abi-macro 1.6.4", - "solana-logger 1.6.4", - "solana-sdk-macro 1.6.4", + "sha2", + "sha3", + "solana-frozen-abi 1.7.6", + "solana-frozen-abi-macro 1.7.6", + "solana-logger 1.7.6", + "solana-sdk-macro 1.7.6", "thiserror", ] [[package]] name = "solana-program" -version = "1.7.0" +version = "1.7.11" dependencies = [ "bincode", "blake3", @@ -3357,6 +3424,7 @@ dependencies = [ "hex", "itertools 0.9.0", "lazy_static", + "libsecp256k1", "log", "num-derive 0.3.0", "num-traits", @@ -3366,18 +3434,18 @@ dependencies = [ "serde", "serde_bytes", "serde_derive", - "sha2 0.9.2", + "sha2", "sha3", - "solana-frozen-abi 1.7.0", - "solana-frozen-abi-macro 1.7.0", - "solana-logger 1.7.0", - "solana-sdk-macro 1.7.0", + "solana-frozen-abi 1.7.11", + "solana-frozen-abi-macro 1.7.11", + "solana-logger 1.7.11", + "solana-sdk-macro 1.7.11", "thiserror", ] [[package]] name = "solana-program-test" -version = "1.7.0" +version = "1.7.11" dependencies = [ "async-trait", "base64 0.12.3", @@ -3391,17 +3459,17 @@ dependencies = [ "solana-banks-client", "solana-banks-server", "solana-bpf-loader-program", - "solana-logger 1.7.0", + "solana-logger 1.7.11", "solana-runtime", "solana-sdk", "solana-vote-program", "thiserror", - "tokio 1.4.0", + "tokio 1.8.1", ] [[package]] name = "solana-rayon-threadlimit" -version = "1.7.0" +version = "1.7.11" dependencies = [ "lazy_static", "num_cpus", @@ -3409,10 +3477,10 @@ dependencies = [ [[package]] name = "solana-remote-wallet" -version = "1.7.0" +version = "1.7.11" dependencies = [ "base32", - "console 0.11.3", + "console 0.14.1", "dialoguer", "hidapi", "log", @@ -3428,7 +3496,7 @@ dependencies = [ [[package]] name = "solana-runtime" -version = "1.7.0" +version = "1.7.11" dependencies = [ "arrayref", "bincode", @@ -3458,9 +3526,9 @@ dependencies = [ "serde", "serde_derive", "solana-config-program", - "solana-frozen-abi 1.7.0", - "solana-frozen-abi-macro 1.7.0", - "solana-logger 1.7.0", + "solana-frozen-abi 1.7.11", + "solana-frozen-abi-macro 1.7.11", + "solana-logger 1.7.11", "solana-measure", "solana-metrics", "solana-rayon-threadlimit", @@ -3477,7 +3545,7 @@ dependencies = [ [[package]] name = "solana-sdk" -version = "1.7.0" +version = "1.7.11" dependencies = [ "assert_matches", "bincode", @@ -3510,23 +3578,23 @@ dependencies = [ "serde_bytes", "serde_derive", "serde_json", - "sha2 0.9.2", + "sha2", "sha3", "solana-crate-features", - "solana-frozen-abi 1.7.0", - "solana-frozen-abi-macro 1.7.0", - "solana-logger 1.7.0", - "solana-program 1.7.0", - "solana-sdk-macro 1.7.0", + "solana-frozen-abi 1.7.11", + "solana-frozen-abi-macro 1.7.11", + "solana-logger 1.7.11", + "solana-program 1.7.11", + "solana-sdk-macro 1.7.11", "thiserror", "uriparse", ] [[package]] name = "solana-sdk-macro" -version = "1.6.4" +version = "1.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c805f1e32677f8ec0cc0b2f470833a0d5ab0922f068e52be3a4a93b468c9c70" +checksum = "84710ce45a21cccd9f2b09d8e9aad529080bb2540f27b1253874b6e732b465b9" dependencies = [ "bs58", "proc-macro2 1.0.24", @@ -3537,7 +3605,7 @@ dependencies = [ [[package]] name = "solana-sdk-macro" -version = "1.7.0" +version = "1.7.11" dependencies = [ "bs58", "proc-macro2 1.0.24", @@ -3548,20 +3616,14 @@ dependencies = [ [[package]] name = "solana-secp256k1-program" -version = "1.7.0" +version = "1.7.11" dependencies = [ - "bincode", - "digest 0.9.0", - "libsecp256k1", - "rand 0.7.3", - "sha3", - "solana-logger 1.7.0", "solana-sdk", ] [[package]] name = "solana-stake-program" -version = "1.7.0" +version = "1.7.11" dependencies = [ "bincode", "log", @@ -3571,8 +3633,8 @@ dependencies = [ "serde", "serde_derive", "solana-config-program", - "solana-frozen-abi 1.7.0", - "solana-frozen-abi-macro 1.7.0", + "solana-frozen-abi 1.7.11", + "solana-frozen-abi-macro 1.7.11", "solana-metrics", "solana-sdk", "solana-vote-program", @@ -3581,7 +3643,7 @@ dependencies = [ [[package]] name = "solana-transaction-status" -version = "1.7.0" +version = "1.7.11" dependencies = [ "Inflector", "base64 0.12.3", @@ -3594,7 +3656,6 @@ dependencies = [ "solana-account-decoder", "solana-runtime", "solana-sdk", - "solana-stake-program", "solana-vote-program", "spl-associated-token-account", "spl-memo", @@ -3604,21 +3665,21 @@ dependencies = [ [[package]] name = "solana-version" -version = "1.7.0" +version = "1.7.11" dependencies = [ "log", "rustc_version", "serde", "serde_derive", - "solana-frozen-abi 1.7.0", - "solana-frozen-abi-macro 1.7.0", - "solana-logger 1.7.0", + "solana-frozen-abi 1.7.11", + "solana-frozen-abi-macro 1.7.11", + "solana-logger 1.7.11", "solana-sdk", ] [[package]] name = "solana-vote-program" -version = "1.7.0" +version = "1.7.11" dependencies = [ "bincode", "log", @@ -3627,9 +3688,9 @@ dependencies = [ "rustc_version", "serde", "serde_derive", - "solana-frozen-abi 1.7.0", - "solana-frozen-abi-macro 1.7.0", - "solana-logger 1.7.0", + "solana-frozen-abi 1.7.11", + "solana-frozen-abi-macro 1.7.11", + "solana-logger 1.7.11", "solana-metrics", "solana-sdk", "thiserror", @@ -3662,11 +3723,11 @@ checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" [[package]] name = "spl-associated-token-account" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4adc47eebe5d2b662cbaaba1843719c28a67e5ec5d0460bc3ca60900a51f74e2" +checksum = "393e2240d521c3dd770806bff25c2c00d761ac962be106e14e22dd912007f428" dependencies = [ - "solana-program 1.6.4", + "solana-program 1.7.6", "spl-token", ] @@ -3676,20 +3737,20 @@ version = "3.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bd0dc6f70db6bacea7ff25870b016a65ba1d1b6013536f08e4fd79a8f9005325" dependencies = [ - "solana-program 1.6.4", + "solana-program 1.7.6", ] [[package]] name = "spl-token" -version = "3.1.0" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b795e50d15dfd35aa5460b80a16414503a322be115a417a43db987c5824c6798" +checksum = "93bfdd5bd7c869cb565c7d7635c4fafe189b988a0bdef81063cd9585c6b8dc01" dependencies = [ "arrayref", "num-derive 0.3.0", "num-traits", "num_enum", - "solana-program 1.6.4", + "solana-program 1.7.6", "thiserror", ] @@ -3711,12 +3772,6 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" -[[package]] -name = "subtle" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d67a5a62ba6e01cb2192ff309324cb4875d0c451d55fe2319433abe7a05a8ee" - [[package]] name = "subtle" version = "2.2.2" @@ -3765,13 +3820,12 @@ dependencies = [ [[package]] name = "tar" -version = "0.4.29" +version = "0.4.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8a4c1d0bee3230179544336c15eefb563cf0302955d962e456542323e8c2e8a" +checksum = "d6f5515d3add52e0bbdcad7b83c388bb36ba7b754dda3b5f5bc2d38640cdba5c" dependencies = [ "filetime", "libc", - "redox_syscall 0.1.56", "xattr", ] @@ -3791,7 +3845,7 @@ dependencies = [ "serde", "static_assertions", "tarpc-plugins", - "tokio 1.4.0", + "tokio 1.8.1", "tokio-serde", "tokio-util", ] @@ -3818,7 +3872,7 @@ dependencies = [ "rand 0.8.2", "redox_syscall 0.2.4", "remove_dir_all", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -3837,7 +3891,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4bd2d183bd3fac5f5fe38ddbeb4dc9aec4a39a9d7d59e7491d900302da01cbe1" dependencies = [ "libc", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -3878,15 +3932,6 @@ dependencies = [ "syn 1.0.67", ] -[[package]] -name = "thread_local" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d40c6d1b69745a6ec6fb1ca717914848da4b44ae29d9b3080cbee91d72a69b14" -dependencies = [ - "lazy_static", -] - [[package]] name = "time" version = "0.1.43" @@ -3894,7 +3939,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ca8a50ef2360fbd1eeb0ecd46795a87a19024eb4b53c5dc916ca1fd95fe62438" dependencies = [ "libc", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -3909,7 +3954,7 @@ dependencies = [ "pbkdf2 0.4.0", "rand 0.7.3", "rustc-hash", - "sha2 0.9.2", + "sha2", "thiserror", "unicode-normalization", "zeroize", @@ -3956,9 +4001,9 @@ dependencies = [ [[package]] name = "tokio" -version = "1.4.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "134af885d758d645f0f0505c9a8b3f9bf8a348fd822e112ab5248138348f1722" +checksum = "98c8b05dc14c75ea83d63dd391100353789f5f24b8b3866542a5e85c8be8e985" dependencies = [ "autocfg", "bytes 1.0.1", @@ -3968,10 +4013,10 @@ dependencies = [ "num_cpus", "once_cell", "parking_lot 0.11.1", - "pin-project-lite 0.2.4", + "pin-project-lite 0.2.7", "signal-hook-registry", "tokio-macros", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -4064,7 +4109,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc6844de72e57df1980054b38be3a9f4702aba4858be64dd700181a8a6d0e1b6" dependencies = [ "rustls", - "tokio 1.4.0", + "tokio 1.8.1", "webpki", ] @@ -4091,8 +4136,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e177a5d8c3bf36de9ebe6d58537d8879e964332f93fb3339e43f618c81361af0" dependencies = [ "futures-core", - "pin-project-lite 0.2.4", - "tokio 1.4.0", + "pin-project-lite 0.2.7", + "tokio 1.8.1", ] [[package]] @@ -4125,7 +4170,7 @@ version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df720b6581784c118f0eb4310796b12b1d242a7eb95f716a8367855325c25f89" dependencies = [ - "crossbeam-deque 0.7.3", + "crossbeam-deque 0.7.4", "crossbeam-queue", "crossbeam-utils 0.7.2", "futures 0.1.29", @@ -4191,8 +4236,8 @@ dependencies = [ "futures-core", "futures-sink", "log", - "pin-project-lite 0.2.4", - "tokio 1.4.0", + "pin-project-lite 0.2.7", + "tokio 1.8.1", ] [[package]] @@ -4378,7 +4423,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "777182bc735b6424e1a57516d35ed72cb8019d85c8c9bf536dccb3445c1a2f7d" dependencies = [ "same-file", - "winapi 0.3.8", + "winapi 0.3.9", "winapi-util", ] @@ -4509,9 +4554,9 @@ checksum = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a" [[package]] name = "winapi" -version = "0.3.8" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8093091eeb260906a183e6ae1abdba2ef5ef2257a21801128899c3fc699229c6" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" dependencies = [ "winapi-i686-pc-windows-gnu", "winapi-x86_64-pc-windows-gnu", @@ -4535,7 +4580,7 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" dependencies = [ - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -4550,7 +4595,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0120db82e8a1e0b9fb3345a539c478767c0048d842860994d96113d5b667bd69" dependencies = [ - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] diff --git a/programs/bpf/Cargo.toml b/programs/bpf/Cargo.toml index 8bb3d8097f413c..5291fdb0fd19b5 100644 --- a/programs/bpf/Cargo.toml +++ b/programs/bpf/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "solana-bpf-programs" description = "Blockchain, Rebuilt for Scale" -version = "1.7.0" +version = "1.7.11" documentation = "https://docs.rs/solana" homepage = "https://solana.com/" readme = "README.md" @@ -26,15 +26,15 @@ itertools = "0.10.0" log = "0.4.11" miow = "0.2.2" net2 = "0.2.37" -solana-bpf-loader-program = { path = "../bpf_loader", version = "=1.7.0" } -solana-cli-output = { path = "../../cli-output", version = "=1.7.0" } -solana-logger = { path = "../../logger", version = "=1.7.0" } -solana-measure = { path = "../../measure", version = "=1.7.0" } +solana-bpf-loader-program = { path = "../bpf_loader", version = "=1.7.11" } +solana-cli-output = { path = "../../cli-output", version = "=1.7.11" } +solana-logger = { path = "../../logger", version = "=1.7.11" } +solana-measure = { path = "../../measure", version = "=1.7.11" } solana_rbpf = "=0.2.11" -solana-runtime = { path = "../../runtime", version = "=1.7.0" } -solana-sdk = { path = "../../sdk", version = "=1.7.0" } -solana-transaction-status = { path = "../../transaction-status", version = "=1.7.0" } -solana-account-decoder = { path = "../../account-decoder", version = "=1.7.0" } +solana-runtime = { path = "../../runtime", version = "=1.7.11" } +solana-sdk = { path = "../../sdk", version = "=1.7.11" } +solana-transaction-status = { path = "../../transaction-status", version = "=1.7.11" } +solana-account-decoder = { path = "../../account-decoder", version = "=1.7.11" } [[bench]] @@ -64,13 +64,16 @@ members = [ "rust/many_args", "rust/many_args_dep", "rust/mem", + "rust/membuiltins", "rust/noop", "rust/panic", "rust/param_passing", "rust/param_passing_dep", "rust/rand", "rust/ro_modify", + "rust/ro_account_modify", "rust/sanity", + "rust/secp256k1_recover", "rust/sha", "rust/spoof1", "rust/spoof1_system", diff --git a/programs/bpf/build.rs b/programs/bpf/build.rs index a044671a04175a..63f77b4b2b5853 100644 --- a/programs/bpf/build.rs +++ b/programs/bpf/build.rs @@ -78,12 +78,15 @@ fn main() { "iter", "many_args", "mem", + "membuiltins", "noop", "panic", "param_passing", "rand", "ro_modify", + "ro_account_modify", "sanity", + "secp256k1_recover", "sha", "spoof1", "spoof1_system", diff --git a/programs/bpf/c/src/invoke/invoke.c b/programs/bpf/c/src/invoke/invoke.c index 0ae306220e6ef6..c330576ef2aa02 100644 --- a/programs/bpf/c/src/invoke/invoke.c +++ b/programs/bpf/c/src/invoke/invoke.c @@ -17,6 +17,10 @@ static const uint8_t TEST_INSTRUCTION_META_TOO_LARGE = 10; static const uint8_t TEST_RETURN_ERROR = 11; static const uint8_t TEST_PRIVILEGE_DEESCALATION_ESCALATION_SIGNER = 12; static const uint8_t TEST_PRIVILEGE_DEESCALATION_ESCALATION_WRITABLE = 13; +static const uint8_t TEST_WRITABLE_DEESCALATION_WRITABLE = 14; +static const uint8_t TEST_NESTED_INVOKE_TOO_DEEP = 15; +static const uint8_t TEST_EXECUTABLE_LAMPORTS = 16; +static const uint8_t ADD_LAMPORTS = 17; static const int MINT_INDEX = 0; static const int ARGUMENT_INDEX = 1; @@ -30,10 +34,39 @@ static const int DERIVED_KEY3_INDEX = 8; static const int SYSTEM_PROGRAM_INDEX = 9; static const int FROM_INDEX = 10; +uint64_t do_nested_invokes(uint64_t num_nested_invokes, + SolAccountInfo *accounts, uint64_t num_accounts) { + sol_assert(accounts[ARGUMENT_INDEX].is_signer); + + *accounts[ARGUMENT_INDEX].lamports -= 5; + *accounts[INVOKED_ARGUMENT_INDEX].lamports += 5; + + SolAccountMeta arguments[] = { + {accounts[INVOKED_ARGUMENT_INDEX].key, true, true}, + {accounts[ARGUMENT_INDEX].key, true, true}, + {accounts[INVOKED_PROGRAM_INDEX].key, false, false}}; + uint8_t data[] = {NESTED_INVOKE, num_nested_invokes}; + const SolInstruction instruction = {accounts[INVOKED_PROGRAM_INDEX].key, + arguments, SOL_ARRAY_SIZE(arguments), + data, SOL_ARRAY_SIZE(data)}; + + sol_log("First invoke"); + sol_assert(SUCCESS == sol_invoke(&instruction, accounts, num_accounts)); + sol_log("2nd invoke from first program"); + sol_assert(SUCCESS == sol_invoke(&instruction, accounts, num_accounts)); + + sol_assert(*accounts[ARGUMENT_INDEX].lamports == + 42 - 5 + (2 * num_nested_invokes)); + sol_assert(*accounts[INVOKED_ARGUMENT_INDEX].lamports == + 10 + 5 - (2 * num_nested_invokes)); + + return SUCCESS; +} + extern uint64_t entrypoint(const uint8_t *input) { sol_log("Invoke C program"); - SolAccountInfo accounts[11]; + SolAccountInfo accounts[12]; SolParameters params = (SolParameters){.ka = accounts}; if (!sol_deserialize(input, ¶ms, SOL_ARRAY_SIZE(accounts))) { @@ -202,32 +235,9 @@ extern uint64_t entrypoint(const uint8_t *input) { sol_invoke(&instruction, accounts, SOL_ARRAY_SIZE(accounts))); } - sol_log("Test invoke"); + sol_log("Test nested invoke"); { - sol_assert(accounts[ARGUMENT_INDEX].is_signer); - - *accounts[ARGUMENT_INDEX].lamports -= 5; - *accounts[INVOKED_ARGUMENT_INDEX].lamports += 5; - - SolAccountMeta arguments[] = { - {accounts[INVOKED_ARGUMENT_INDEX].key, true, true}, - {accounts[ARGUMENT_INDEX].key, true, true}, - {accounts[INVOKED_PROGRAM_DUP_INDEX].key, false, false}}; - uint8_t data[] = {NESTED_INVOKE}; - const SolInstruction instruction = {accounts[INVOKED_PROGRAM_INDEX].key, - arguments, SOL_ARRAY_SIZE(arguments), - data, SOL_ARRAY_SIZE(data)}; - - sol_log("First invoke"); - sol_assert(SUCCESS == - sol_invoke(&instruction, accounts, SOL_ARRAY_SIZE(accounts))); - sol_log("2nd invoke from first program"); - sol_assert(SUCCESS == - sol_invoke(&instruction, accounts, SOL_ARRAY_SIZE(accounts))); - - sol_assert(*accounts[ARGUMENT_INDEX].lamports == 42 - 5 + 1 + 1 + 1 + 1); - sol_assert(*accounts[INVOKED_ARGUMENT_INDEX].lamports == - 10 + 5 - 1 - 1 - 1 - 1); + sol_assert(SUCCESS == do_nested_invokes(4, accounts, params.ka_num)); } sol_log("Test privilege deescalation"); @@ -271,24 +281,6 @@ extern uint64_t entrypoint(const uint8_t *input) { sol_assert(accounts[ARGUMENT_INDEX].data[i] == 0); } } - sol_log("Test writable deescalation"); - { - uint8_t buffer[10]; - for (int i = 0; i < 10; i++) { - buffer[i] = accounts[INVOKED_ARGUMENT_INDEX].data[i]; - } - SolAccountMeta arguments[] = { - {accounts[INVOKED_ARGUMENT_INDEX].key, false, false}}; - uint8_t data[] = {WRITE_ACCOUNT, 10}; - const SolInstruction instruction = {accounts[INVOKED_PROGRAM_INDEX].key, - arguments, SOL_ARRAY_SIZE(arguments), - data, SOL_ARRAY_SIZE(data)}; - sol_invoke(&instruction, accounts, SOL_ARRAY_SIZE(accounts)); - - for (int i = 0; i < 10; i++) { - sol_assert(buffer[i] == accounts[INVOKED_ARGUMENT_INDEX].data[i]); - } - } break; } case TEST_PRIVILEGE_ESCALATION_SIGNER: { @@ -521,6 +513,53 @@ extern uint64_t entrypoint(const uint8_t *input) { sol_invoke(&instruction, accounts, SOL_ARRAY_SIZE(accounts))); break; } + case TEST_WRITABLE_DEESCALATION_WRITABLE: { + sol_log("Test writable deescalation"); + uint8_t buffer[10]; + for (int i = 0; i < 10; i++) { + buffer[i] = accounts[INVOKED_ARGUMENT_INDEX].data[i]; + } + SolAccountMeta arguments[] = { + {accounts[INVOKED_ARGUMENT_INDEX].key, false, false}}; + uint8_t data[] = {WRITE_ACCOUNT, 10}; + const SolInstruction instruction = {accounts[INVOKED_PROGRAM_INDEX].key, + arguments, SOL_ARRAY_SIZE(arguments), + data, SOL_ARRAY_SIZE(data)}; + sol_invoke(&instruction, accounts, SOL_ARRAY_SIZE(accounts)); + + for (int i = 0; i < 10; i++) { + sol_assert(buffer[i] == accounts[INVOKED_ARGUMENT_INDEX].data[i]); + } + break; + } + case TEST_NESTED_INVOKE_TOO_DEEP: { + do_nested_invokes(5, accounts, params.ka_num); + break; + } + case TEST_EXECUTABLE_LAMPORTS: { + sol_log("Test executable lamports"); + accounts[ARGUMENT_INDEX].executable = true; + *accounts[ARGUMENT_INDEX].lamports -= 1; + *accounts[DERIVED_KEY1_INDEX].lamports +=1; + SolAccountMeta arguments[] = { + {accounts[ARGUMENT_INDEX].key, true, false}, + {accounts[DERIVED_KEY1_INDEX].key, true, false}, + }; + uint8_t data[] = {ADD_LAMPORTS, 0, 0, 0}; + SolPubkey program_id; + sol_memcpy(&program_id, params.program_id, sizeof(SolPubkey)); + const SolInstruction instruction = {&program_id, + arguments, SOL_ARRAY_SIZE(arguments), + data, SOL_ARRAY_SIZE(data)}; + sol_invoke(&instruction, accounts, SOL_ARRAY_SIZE(accounts)); + *accounts[ARGUMENT_INDEX].lamports += 1; + break; + } + case ADD_LAMPORTS: { + *accounts[0].lamports += 1; + break; + } + default: sol_panic(); } diff --git a/programs/bpf/c/src/invoked/invoked.c b/programs/bpf/c/src/invoked/invoked.c index e0b6f7f5515115..273deb9d0607db 100644 --- a/programs/bpf/c/src/invoked/invoked.c +++ b/programs/bpf/c/src/invoked/invoked.c @@ -228,16 +228,17 @@ extern uint64_t entrypoint(const uint8_t *input) { *accounts[INVOKED_ARGUMENT_INDEX].lamports -= 1; *accounts[ARGUMENT_INDEX].lamports += 1; - if (params.ka_num == 3) { + uint8_t remaining_invokes = params.data[1]; + if (remaining_invokes > 1) { + sol_log("Invoke again"); SolAccountMeta arguments[] = { {accounts[INVOKED_ARGUMENT_INDEX].key, true, true}, - {accounts[ARGUMENT_INDEX].key, true, true}}; - uint8_t data[] = {NESTED_INVOKE}; + {accounts[ARGUMENT_INDEX].key, true, true}, + {accounts[INVOKED_PROGRAM_INDEX].key, false, false}}; + uint8_t data[] = {NESTED_INVOKE, remaining_invokes - 1}; const SolInstruction instruction = {accounts[INVOKED_PROGRAM_INDEX].key, arguments, SOL_ARRAY_SIZE(arguments), data, SOL_ARRAY_SIZE(data)}; - - sol_log("Invoke again"); sol_assert(SUCCESS == sol_invoke(&instruction, accounts, params.ka_num)); } else { sol_log("Last invoked"); diff --git a/programs/bpf/c/src/secp256k1_recover/secp256k1_recover.c b/programs/bpf/c/src/secp256k1_recover/secp256k1_recover.c new file mode 100644 index 00000000000000..f4cd076f7afb44 --- /dev/null +++ b/programs/bpf/c/src/secp256k1_recover/secp256k1_recover.c @@ -0,0 +1,38 @@ +/** + * @brief Secp256k1Recover Syscall test + */ +#include + +extern uint64_t entrypoint(const uint8_t *input) { + + uint8_t result[SECP256K1_RECOVER_RESULT_LENGTH]; + uint8_t expected[] = { 0x42, 0xcd, 0x27, 0xe4, 0x0f, 0xdf, 0x7c, 0x97, + 0x0a, 0xa2, 0xca, 0x0b, 0x88, 0x5b, 0x96, 0x0f, + 0x8b, 0x62, 0x8a, 0x41, 0xa1, 0x81, 0xe7, 0xe6, + 0x8e, 0x03, 0xea, 0x0b, 0x84, 0x20, 0x58, 0x9b, + 0x32, 0x06, 0xbd, 0x66, 0x2f, 0x75, 0x65, 0xd6, + 0x9d, 0xbd, 0x1d, 0x34, 0x29, 0x6a, 0xd9, 0x35, + 0x38, 0xed, 0x86, 0x9e, 0x99, 0x20, 0x43, 0xc3, + 0xeb, 0xad, 0x65, 0x50, 0xa0, 0x11, 0x6e, 0x5d}; + + uint8_t hash[] = { 0xde, 0xa5, 0x66, 0xb6, 0x94, 0x3b, 0xe0, 0xe9, + 0x62, 0x53, 0xc2, 0x21, 0x5b, 0x1b, 0xac, 0x69, + 0xe7, 0xa8, 0x1e, 0xdb, 0x41, 0xc5, 0x02, 0x8b, + 0x4f, 0x5c, 0x45, 0xc5, 0x3b, 0x49, 0x54, 0xd0}; + uint8_t signature[] = { 0x97, 0xa4, 0xee, 0x31, 0xfe, 0x82, 0x65, 0x72, + 0x9f, 0x4a, 0xa6, 0x7d, 0x24, 0xd4, 0xa7, 0x27, + 0xf8, 0xc3, 0x15, 0xa4, 0xc8, 0xf9, 0x80, 0xeb, + 0x4c, 0x4d, 0x4a, 0xfa, 0x6e, 0xc9, 0x42, 0x41, + 0x5d, 0x10, 0xd9, 0xc2, 0x8a, 0x90, 0xe9, 0x92, + 0x9c, 0x52, 0x4b, 0x2c, 0xfb, 0x65, 0xdf, 0xbc, + 0xf6, 0x8c, 0xfd, 0x68, 0xdb, 0x17, 0xf9, 0x5d, + 0x23, 0x5f, 0x96, 0xd8, 0xf0, 0x72, 0x01, 0x2d}; + uint64_t recovery_id = 1; + + uint64_t result_code = sol_secp256k1_recover(hash, recovery_id, signature, result); + + sol_assert(0 == result_code); + sol_assert(0 == sol_memcmp(result, expected, SHA256_RESULT_LENGTH)); + + return SUCCESS; +} diff --git a/programs/bpf/rust/128bit/Cargo.toml b/programs/bpf/rust/128bit/Cargo.toml index bae158418d308f..eed97bf89d2c90 100644 --- a/programs/bpf/rust/128bit/Cargo.toml +++ b/programs/bpf/rust/128bit/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-128bit" -version = "1.7.0" +version = "1.7.11" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,8 +10,8 @@ documentation = "https://docs.rs/solana-bpf-rust-128bit" edition = "2018" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.7.0" } -solana-bpf-rust-128bit-dep = { path = "../128bit_dep", version = "=1.7.0" } +solana-program = { path = "../../../../sdk/program", version = "=1.7.11" } +solana-bpf-rust-128bit-dep = { path = "../128bit_dep", version = "=1.7.11" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/128bit_dep/Cargo.toml b/programs/bpf/rust/128bit_dep/Cargo.toml index ba33bc4f652d05..5a0a0aa36c4480 100644 --- a/programs/bpf/rust/128bit_dep/Cargo.toml +++ b/programs/bpf/rust/128bit_dep/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-128bit-dep" -version = "1.7.0" +version = "1.7.11" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-128bit-dep" edition = "2018" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.7.0" } +solana-program = { path = "../../../../sdk/program", version = "=1.7.11" } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/programs/bpf/rust/alloc/Cargo.toml b/programs/bpf/rust/alloc/Cargo.toml index b0f250134f35b5..c6801acc69c5dd 100644 --- a/programs/bpf/rust/alloc/Cargo.toml +++ b/programs/bpf/rust/alloc/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-alloc" -version = "1.7.0" +version = "1.7.11" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-alloc" edition = "2018" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.7.0" } +solana-program = { path = "../../../../sdk/program", version = "=1.7.11" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/call_depth/Cargo.toml b/programs/bpf/rust/call_depth/Cargo.toml index 5dea404fabe6dc..fded0b3c78edb6 100644 --- a/programs/bpf/rust/call_depth/Cargo.toml +++ b/programs/bpf/rust/call_depth/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-call-depth" -version = "1.7.0" +version = "1.7.11" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-call-depth" edition = "2018" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.7.0" } +solana-program = { path = "../../../../sdk/program", version = "=1.7.11" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/caller_access/Cargo.toml b/programs/bpf/rust/caller_access/Cargo.toml index 47473a24c88787..1291eb7897911c 100644 --- a/programs/bpf/rust/caller_access/Cargo.toml +++ b/programs/bpf/rust/caller_access/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-caller-access" -version = "1.7.0" +version = "1.7.11" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-caller-access" edition = "2018" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.7.0" } +solana-program = { path = "../../../../sdk/program", version = "=1.7.11" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/caller_access/src/lib.rs b/programs/bpf/rust/caller_access/src/lib.rs index 6db20e433aeb54..abceb6dda24bbd 100644 --- a/programs/bpf/rust/caller_access/src/lib.rs +++ b/programs/bpf/rust/caller_access/src/lib.rs @@ -21,16 +21,8 @@ fn process_instruction( let mut lamports = accounts[0].lamports(); let owner = &accounts[0].owner; let mut data = accounts[0].try_borrow_mut_data()?; - let account = AccountInfo::new( - &key, - false, - false, - &mut lamports, - &mut data, - &owner, - true, - 0, - ); + let account = + AccountInfo::new(&key, false, false, &mut lamports, &mut data, owner, true, 0); msg!("{:?} calling {:?}", program_id, key); invoke(&ix, &[account])?; } else { diff --git a/programs/bpf/rust/custom_heap/Cargo.toml b/programs/bpf/rust/custom_heap/Cargo.toml index 6c925232b32190..3a7d9198ce30f7 100644 --- a/programs/bpf/rust/custom_heap/Cargo.toml +++ b/programs/bpf/rust/custom_heap/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-custom-heap" -version = "1.7.0" +version = "1.7.11" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-custom-heap" edition = "2018" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.7.0" } +solana-program = { path = "../../../../sdk/program", version = "=1.7.11" } [features] default = ["custom-heap"] diff --git a/programs/bpf/rust/dep_crate/Cargo.toml b/programs/bpf/rust/dep_crate/Cargo.toml index be2fd397f9b785..c9d219d930df40 100644 --- a/programs/bpf/rust/dep_crate/Cargo.toml +++ b/programs/bpf/rust/dep_crate/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-dep-crate" -version = "1.7.0" +version = "1.7.11" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -11,7 +11,7 @@ edition = "2018" [dependencies] byteorder = { version = "1", default-features = false } -solana-program = { path = "../../../../sdk/program", version = "=1.7.0" } +solana-program = { path = "../../../../sdk/program", version = "=1.7.11" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/deprecated_loader/Cargo.toml b/programs/bpf/rust/deprecated_loader/Cargo.toml index 7fb1583673f472..60b828d3a8e060 100644 --- a/programs/bpf/rust/deprecated_loader/Cargo.toml +++ b/programs/bpf/rust/deprecated_loader/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-deprecated-loader" -version = "1.7.0" +version = "1.7.11" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-deprecated-loader" edition = "2018" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.7.0" } +solana-program = { path = "../../../../sdk/program", version = "=1.7.11" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/dup_accounts/Cargo.toml b/programs/bpf/rust/dup_accounts/Cargo.toml index 83956e52246dfd..3c1328aff487d0 100644 --- a/programs/bpf/rust/dup_accounts/Cargo.toml +++ b/programs/bpf/rust/dup_accounts/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-dup-accounts" -version = "1.7.0" +version = "1.7.11" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-dup-accounts" edition = "2018" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.7.0" } +solana-program = { path = "../../../../sdk/program", version = "=1.7.11" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/dup_accounts/src/lib.rs b/programs/bpf/rust/dup_accounts/src/lib.rs index 25c1f8bd26aba5..33b55b5ee2ed6c 100644 --- a/programs/bpf/rust/dup_accounts/src/lib.rs +++ b/programs/bpf/rust/dup_accounts/src/lib.rs @@ -71,7 +71,7 @@ fn process_instruction( AccountMeta::new_readonly(*accounts[3].key, true), ], ); - invoke(&instruction, &accounts)?; + invoke(&instruction, accounts)?; let instruction = Instruction::new_with_bytes( *program_id, @@ -83,7 +83,7 @@ fn process_instruction( AccountMeta::new(*accounts[3].key, false), ], ); - invoke(&instruction, &accounts)?; + invoke(&instruction, accounts)?; assert_eq!(accounts[2].try_borrow_mut_data()?[0], 3); assert_eq!(accounts[3].try_borrow_mut_data()?[0], 3); } diff --git a/programs/bpf/rust/error_handling/Cargo.toml b/programs/bpf/rust/error_handling/Cargo.toml index 45ae609cf606a5..a9ba80f62c12a3 100644 --- a/programs/bpf/rust/error_handling/Cargo.toml +++ b/programs/bpf/rust/error_handling/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-error-handling" -version = "1.7.0" +version = "1.7.11" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -12,7 +12,7 @@ edition = "2018" [dependencies] num-derive = "0.2" num-traits = "0.2" -solana-program = { path = "../../../../sdk/program", version = "=1.7.0" } +solana-program = { path = "../../../../sdk/program", version = "=1.7.11" } thiserror = "1.0" [lib] diff --git a/programs/bpf/rust/external_spend/Cargo.toml b/programs/bpf/rust/external_spend/Cargo.toml index 3edb267e0b8a0a..5598fb4da38cac 100644 --- a/programs/bpf/rust/external_spend/Cargo.toml +++ b/programs/bpf/rust/external_spend/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-external-spend" -version = "1.7.0" +version = "1.7.11" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-external-spend" edition = "2018" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.7.0" } +solana-program = { path = "../../../../sdk/program", version = "=1.7.11" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/finalize/Cargo.toml b/programs/bpf/rust/finalize/Cargo.toml index 246dbac84f7770..1056f93ec3d9ac 100644 --- a/programs/bpf/rust/finalize/Cargo.toml +++ b/programs/bpf/rust/finalize/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-finalize" -version = "1.7.0" +version = "1.7.11" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-finalize" edition = "2018" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.7.0" } +solana-program = { path = "../../../../sdk/program", version = "=1.7.11" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/instruction_introspection/Cargo.toml b/programs/bpf/rust/instruction_introspection/Cargo.toml index c4ef4dec61efea..4905b38df1bffb 100644 --- a/programs/bpf/rust/instruction_introspection/Cargo.toml +++ b/programs/bpf/rust/instruction_introspection/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-instruction-introspection" -version = "1.7.0" +version = "1.7.11" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-instruction-introspection" edition = "2018" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.7.0" } +solana-program = { path = "../../../../sdk/program", version = "=1.7.11" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/invoke/Cargo.toml b/programs/bpf/rust/invoke/Cargo.toml index d9637269ae122e..283537311a158d 100644 --- a/programs/bpf/rust/invoke/Cargo.toml +++ b/programs/bpf/rust/invoke/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-invoke" -version = "1.7.0" +version = "1.7.11" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -11,7 +11,7 @@ edition = "2018" [dependencies] solana-bpf-rust-invoked = { path = "../invoked", default-features = false } -solana-program = { path = "../../../../sdk/program", version = "=1.7.0" } +solana-program = { path = "../../../../sdk/program", version = "=1.7.11" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/invoke/src/lib.rs b/programs/bpf/rust/invoke/src/lib.rs index e0889766eeb699..dec99fd88eed57 100644 --- a/programs/bpf/rust/invoke/src/lib.rs +++ b/programs/bpf/rust/invoke/src/lib.rs @@ -29,19 +29,56 @@ const TEST_INSTRUCTION_META_TOO_LARGE: u8 = 10; const TEST_RETURN_ERROR: u8 = 11; const TEST_PRIVILEGE_DEESCALATION_ESCALATION_SIGNER: u8 = 12; const TEST_PRIVILEGE_DEESCALATION_ESCALATION_WRITABLE: u8 = 13; +const TEST_WRITABLE_DEESCALATION_WRITABLE: u8 = 14; +const TEST_NESTED_INVOKE_TOO_DEEP: u8 = 15; +const TEST_EXECUTABLE_LAMPORTS: u8 = 16; +const ADD_LAMPORTS: u8 = 17; -// const MINT_INDEX: usize = 0; +// const MINT_INDEX: usize = 0; // unused placeholder const ARGUMENT_INDEX: usize = 1; const INVOKED_PROGRAM_INDEX: usize = 2; const INVOKED_ARGUMENT_INDEX: usize = 3; const INVOKED_PROGRAM_DUP_INDEX: usize = 4; -// const ARGUMENT_DUP_INDEX: usize = 5; +// const ARGUMENT_DUP_INDEX: usize = 5; unused placeholder const DERIVED_KEY1_INDEX: usize = 6; const DERIVED_KEY2_INDEX: usize = 7; const DERIVED_KEY3_INDEX: usize = 8; const SYSTEM_PROGRAM_INDEX: usize = 9; const FROM_INDEX: usize = 10; +fn do_nested_invokes(num_nested_invokes: u64, accounts: &[AccountInfo]) -> ProgramResult { + assert!(accounts[ARGUMENT_INDEX].is_signer); + + let pre_argument_lamports = accounts[ARGUMENT_INDEX].lamports(); + let pre_invoke_argument_lamports = accounts[INVOKED_ARGUMENT_INDEX].lamports(); + **accounts[ARGUMENT_INDEX].lamports.borrow_mut() -= 5; + **accounts[INVOKED_ARGUMENT_INDEX].lamports.borrow_mut() += 5; + + msg!("First invoke"); + let instruction = create_instruction( + *accounts[INVOKED_PROGRAM_INDEX].key, + &[ + (accounts[ARGUMENT_INDEX].key, true, true), + (accounts[INVOKED_ARGUMENT_INDEX].key, true, true), + (accounts[INVOKED_PROGRAM_INDEX].key, false, false), + ], + vec![NESTED_INVOKE, num_nested_invokes as u8], + ); + invoke(&instruction, accounts)?; + msg!("2nd invoke from first program"); + invoke(&instruction, accounts)?; + + assert_eq!( + accounts[ARGUMENT_INDEX].lamports(), + pre_argument_lamports - 5 + (2 * num_nested_invokes) + ); + assert_eq!( + accounts[INVOKED_ARGUMENT_INDEX].lamports(), + pre_invoke_argument_lamports + 5 - (2 * num_nested_invokes) + ); + Ok(()) +} + entrypoint!(process_instruction); fn process_instruction( program_id: &Pubkey, @@ -226,8 +263,10 @@ fn process_instruction( )?, accounts[DERIVED_KEY1_INDEX].key ); + let not_native_program_id = Pubkey::new_from_array([6u8; 32]); + assert!(!not_native_program_id.is_native_program_id()); assert_eq!( - Pubkey::create_program_address(&[b"You pass butter"], &Pubkey::default()) + Pubkey::create_program_address(&[b"You pass butter"], ¬_native_program_id) .unwrap_err(), PubkeyError::InvalidSeeds ); @@ -239,8 +278,10 @@ fn process_instruction( Pubkey::try_find_program_address(&[b"You pass butter"], program_id).unwrap(); assert_eq!(&address, accounts[DERIVED_KEY1_INDEX].key); assert_eq!(bump_seed, bump_seed1); + let not_native_program_id = Pubkey::new_from_array([6u8; 32]); + assert!(!not_native_program_id.is_native_program_id()); assert_eq!( - Pubkey::create_program_address(&[b"You pass butter"], &Pubkey::default()) + Pubkey::create_program_address(&[b"You pass butter"], ¬_native_program_id) .unwrap_err(), PubkeyError::InvalidSeeds ); @@ -281,31 +322,7 @@ fn process_instruction( msg!("Test nested invoke"); { - assert!(accounts[ARGUMENT_INDEX].is_signer); - - **accounts[ARGUMENT_INDEX].lamports.borrow_mut() -= 5; - **accounts[INVOKED_ARGUMENT_INDEX].lamports.borrow_mut() += 5; - - msg!("First invoke"); - let instruction = create_instruction( - *accounts[INVOKED_PROGRAM_INDEX].key, - &[ - (accounts[ARGUMENT_INDEX].key, true, true), - (accounts[INVOKED_ARGUMENT_INDEX].key, true, true), - (accounts[INVOKED_PROGRAM_DUP_INDEX].key, false, false), - (accounts[INVOKED_PROGRAM_DUP_INDEX].key, false, false), - ], - vec![NESTED_INVOKE], - ); - invoke(&instruction, accounts)?; - msg!("2nd invoke from first program"); - invoke(&instruction, accounts)?; - - assert_eq!(accounts[ARGUMENT_INDEX].lamports(), 42 - 5 + 1 + 1 + 1 + 1); - assert_eq!( - accounts[INVOKED_ARGUMENT_INDEX].lamports(), - 10 + 5 - 1 - 1 - 1 - 1 - ); + do_nested_invokes(4, accounts)?; } msg!("Test privilege deescalation"); @@ -354,27 +371,6 @@ fn process_instruction( } } - msg!("Test writable deescalation"); - { - const NUM_BYTES: usize = 10; - let mut buffer = [0; NUM_BYTES]; - buffer.copy_from_slice( - &accounts[INVOKED_ARGUMENT_INDEX].data.borrow_mut()[..NUM_BYTES], - ); - - let instruction = create_instruction( - *accounts[INVOKED_PROGRAM_INDEX].key, - &[(accounts[INVOKED_ARGUMENT_INDEX].key, false, false)], - vec![WRITE_ACCOUNT, NUM_BYTES as u8], - ); - let _ = invoke(&instruction, accounts); - - assert_eq!( - buffer, - accounts[INVOKED_ARGUMENT_INDEX].data.borrow_mut()[..NUM_BYTES] - ); - } - msg!("Create account and init data"); { let from_lamports = accounts[FROM_INDEX].lamports(); @@ -603,6 +599,55 @@ fn process_instruction( ); invoke(&invoked_instruction, accounts)?; } + TEST_WRITABLE_DEESCALATION_WRITABLE => { + msg!("Test writable deescalation writable"); + const NUM_BYTES: usize = 10; + let mut buffer = [0; NUM_BYTES]; + buffer + .copy_from_slice(&accounts[INVOKED_ARGUMENT_INDEX].data.borrow_mut()[..NUM_BYTES]); + + let instruction = create_instruction( + *accounts[INVOKED_PROGRAM_INDEX].key, + &[(accounts[INVOKED_ARGUMENT_INDEX].key, false, false)], + vec![WRITE_ACCOUNT, NUM_BYTES as u8], + ); + let _ = invoke(&instruction, accounts); + + assert_eq!( + buffer, + accounts[INVOKED_ARGUMENT_INDEX].data.borrow_mut()[..NUM_BYTES] + ); + } + TEST_NESTED_INVOKE_TOO_DEEP => { + let _ = do_nested_invokes(5, accounts); + } + TEST_EXECUTABLE_LAMPORTS => { + msg!("Test executable lamports"); + let mut accounts = accounts.to_vec(); + + // set account to executable and subtract lamports + accounts[ARGUMENT_INDEX].executable = true; + **(*accounts[ARGUMENT_INDEX].lamports).borrow_mut() -= 1; + // add lamports to dest account + **(*accounts[DERIVED_KEY1_INDEX].lamports).borrow_mut() += 1; + + let instruction = create_instruction( + *program_id, + &[ + (accounts[ARGUMENT_INDEX].key, true, false), + (accounts[DERIVED_KEY1_INDEX].key, true, false), + ], + vec![ADD_LAMPORTS, 0, 0, 0], + ); + let _ = invoke(&instruction, &accounts); + + // reset executable account + **(*accounts[ARGUMENT_INDEX].lamports).borrow_mut() += 1; + } + ADD_LAMPORTS => { + // make sure the total balance is fine + **accounts[0].lamports.borrow_mut() += 1; + } _ => panic!(), } diff --git a/programs/bpf/rust/invoke_and_error/Cargo.toml b/programs/bpf/rust/invoke_and_error/Cargo.toml index 8bdf4e1142b985..1220d7b75ad4f2 100644 --- a/programs/bpf/rust/invoke_and_error/Cargo.toml +++ b/programs/bpf/rust/invoke_and_error/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-invoke-and-error" -version = "1.7.0" +version = "1.7.11" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-invoke-and-error" edition = "2018" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.7.0" } +solana-program = { path = "../../../../sdk/program", version = "=1.7.11" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/invoke_and_error/src/lib.rs b/programs/bpf/rust/invoke_and_error/src/lib.rs index 25012107514ab9..6b94e82ca0981e 100644 --- a/programs/bpf/rust/invoke_and_error/src/lib.rs +++ b/programs/bpf/rust/invoke_and_error/src/lib.rs @@ -26,7 +26,7 @@ fn process_instruction( data: instruction_data.to_owned(), program_id: *to_call, }; - let _ = invoke(&instruction, &infos); + let _ = invoke(&instruction, infos); Err(42.into()) } diff --git a/programs/bpf/rust/invoke_and_ok/Cargo.toml b/programs/bpf/rust/invoke_and_ok/Cargo.toml index 54bf58054735a4..374e6a3d8bf691 100644 --- a/programs/bpf/rust/invoke_and_ok/Cargo.toml +++ b/programs/bpf/rust/invoke_and_ok/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-invoke-and-ok" -version = "1.7.0" +version = "1.7.11" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-invoke-and-ok" edition = "2018" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.7.0" } +solana-program = { path = "../../../../sdk/program", version = "=1.7.11" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/invoke_and_ok/src/lib.rs b/programs/bpf/rust/invoke_and_ok/src/lib.rs index 99e58f7d9ad66d..06a47ae5ec5371 100644 --- a/programs/bpf/rust/invoke_and_ok/src/lib.rs +++ b/programs/bpf/rust/invoke_and_ok/src/lib.rs @@ -27,7 +27,7 @@ fn process_instruction( data: instruction_data.to_owned(), program_id: *to_call, }; - let _ = invoke(&instruction, &infos); + let _ = invoke(&instruction, infos); Ok(()) } diff --git a/programs/bpf/rust/invoke_and_return/Cargo.toml b/programs/bpf/rust/invoke_and_return/Cargo.toml index 1cb4046ca4566e..b1c4ae4198b7cd 100644 --- a/programs/bpf/rust/invoke_and_return/Cargo.toml +++ b/programs/bpf/rust/invoke_and_return/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-invoke-and-return" -version = "1.7.0" +version = "1.7.11" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-invoke-and-return" edition = "2018" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.7.0" } +solana-program = { path = "../../../../sdk/program", version = "=1.7.11" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/invoke_and_return/src/lib.rs b/programs/bpf/rust/invoke_and_return/src/lib.rs index 5698103f11c1e5..924fd40a84fe40 100644 --- a/programs/bpf/rust/invoke_and_return/src/lib.rs +++ b/programs/bpf/rust/invoke_and_return/src/lib.rs @@ -27,5 +27,5 @@ fn process_instruction( data: instruction_data.to_owned(), program_id: *to_call, }; - invoke(&instruction, &infos) + invoke(&instruction, infos) } diff --git a/programs/bpf/rust/invoked/Cargo.toml b/programs/bpf/rust/invoked/Cargo.toml index c311efcbf50b2b..0b8cd37e9ab6e1 100644 --- a/programs/bpf/rust/invoked/Cargo.toml +++ b/programs/bpf/rust/invoked/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-invoked" -version = "1.7.0" +version = "1.7.11" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-invoked" edition = "2018" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.7.0" } +solana-program = { path = "../../../../sdk/program", version = "=1.7.11" } [features] default = ["program"] diff --git a/programs/bpf/rust/invoked/src/processor.rs b/programs/bpf/rust/invoked/src/processor.rs index 062ca26c56e3a8..892ba90e631235 100644 --- a/programs/bpf/rust/invoked/src/processor.rs +++ b/programs/bpf/rust/invoked/src/processor.rs @@ -202,21 +202,24 @@ fn process_instruction( msg!("nested invoke"); const ARGUMENT_INDEX: usize = 0; const INVOKED_ARGUMENT_INDEX: usize = 1; - const INVOKED_PROGRAM_INDEX: usize = 3; + const INVOKED_PROGRAM_INDEX: usize = 2; assert!(accounts[INVOKED_ARGUMENT_INDEX].is_signer); + assert!(instruction_data.len() > 1); **accounts[INVOKED_ARGUMENT_INDEX].lamports.borrow_mut() -= 1; **accounts[ARGUMENT_INDEX].lamports.borrow_mut() += 1; - if accounts.len() > 2 { + let remaining_invokes = instruction_data[1]; + if remaining_invokes > 1 { msg!("Invoke again"); let invoked_instruction = create_instruction( *accounts[INVOKED_PROGRAM_INDEX].key, &[ (accounts[ARGUMENT_INDEX].key, true, true), (accounts[INVOKED_ARGUMENT_INDEX].key, true, true), + (accounts[INVOKED_PROGRAM_INDEX].key, false, false), ], - vec![NESTED_INVOKE], + vec![NESTED_INVOKE, remaining_invokes - 1], ); invoke(&invoked_instruction, accounts)?; } else { diff --git a/programs/bpf/rust/iter/Cargo.toml b/programs/bpf/rust/iter/Cargo.toml index bfe39cd9b52a9c..1d52eebd055464 100644 --- a/programs/bpf/rust/iter/Cargo.toml +++ b/programs/bpf/rust/iter/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-iter" -version = "1.7.0" +version = "1.7.11" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-iter" edition = "2018" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.7.0" } +solana-program = { path = "../../../../sdk/program", version = "=1.7.11" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/many_args/Cargo.toml b/programs/bpf/rust/many_args/Cargo.toml index 52eb398e3bdfb9..82c02404456356 100644 --- a/programs/bpf/rust/many_args/Cargo.toml +++ b/programs/bpf/rust/many_args/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-many-args" -version = "1.7.0" +version = "1.7.11" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,8 +10,8 @@ documentation = "https://docs.rs/solana-bpf-rust-many-args" edition = "2018" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.7.0" } -solana-bpf-rust-many-args-dep = { path = "../many_args_dep", version = "=1.7.0" } +solana-program = { path = "../../../../sdk/program", version = "=1.7.11" } +solana-bpf-rust-many-args-dep = { path = "../many_args_dep", version = "=1.7.11" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/many_args_dep/Cargo.toml b/programs/bpf/rust/many_args_dep/Cargo.toml index 9992955485d5c2..1424ad9a7375c9 100644 --- a/programs/bpf/rust/many_args_dep/Cargo.toml +++ b/programs/bpf/rust/many_args_dep/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-many-args-dep" -version = "1.7.0" +version = "1.7.11" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-many-args-dep" edition = "2018" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.7.0" } +solana-program = { path = "../../../../sdk/program", version = "=1.7.11" } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/programs/bpf/rust/mem/Cargo.toml b/programs/bpf/rust/mem/Cargo.toml index f3d15b6e741952..0810a4981cbd54 100644 --- a/programs/bpf/rust/mem/Cargo.toml +++ b/programs/bpf/rust/mem/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-mem" -version = "1.7.0" +version = "1.7.11" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -9,11 +9,18 @@ homepage = "https://solana.com/" documentation = "https://docs.rs/solana-bpf-rust-mem" edition = "2018" +[features] +no-entrypoint = [] + [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.7.0" } +solana-program = { path = "../../../../sdk/program", version = "=1.7.11" } + +[dev-dependencies] +solana-program-test = { path = "../../../../program-test", version = "=1.7.11" } +solana-sdk = { path = "../../../../sdk", version = "=1.7.11" } [lib] -crate-type = ["cdylib"] +crate-type = ["cdylib", "lib"] [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/programs/bpf/rust/mem/src/entrypoint.rs b/programs/bpf/rust/mem/src/entrypoint.rs new file mode 100644 index 00000000000000..65d25ef8f2fd7a --- /dev/null +++ b/programs/bpf/rust/mem/src/entrypoint.rs @@ -0,0 +1,39 @@ +//! @brief Test mem functions + +use crate::{run_mem_tests, MemOps}; +use solana_program::{ + account_info::AccountInfo, + entrypoint, + entrypoint::ProgramResult, + program_memory::{sol_memcmp, sol_memcpy, sol_memmove, sol_memset}, + pubkey::Pubkey, +}; + +entrypoint!(process_instruction); +#[allow(clippy::unnecessary_wraps)] +pub fn process_instruction( + _program_id: &Pubkey, + _accounts: &[AccountInfo], + _instruction_data: &[u8], +) -> ProgramResult { + // Via syscalls + #[derive(Default)] + struct MemOpSyscalls(); + impl MemOps for MemOpSyscalls { + fn memcpy(&self, dst: &mut [u8], src: &[u8], n: usize) { + sol_memcpy(dst, src, n) + } + unsafe fn memmove(&self, dst: *mut u8, src: *mut u8, n: usize) { + sol_memmove(dst, src, n) + } + fn memset(&self, s: &mut [u8], c: u8, n: usize) { + sol_memset(s, c, n) + } + fn memcmp(&self, s1: &[u8], s2: &[u8], n: usize) -> i32 { + sol_memcmp(s1, s2, n) + } + } + run_mem_tests(MemOpSyscalls::default()); + + Ok(()) +} diff --git a/programs/bpf/rust/mem/src/lib.rs b/programs/bpf/rust/mem/src/lib.rs index 3817010ad2c64c..91a98a208c86bc 100644 --- a/programs/bpf/rust/mem/src/lib.rs +++ b/programs/bpf/rust/mem/src/lib.rs @@ -1,190 +1,165 @@ -//! @brief Test builtin mem functions +//! @brief Test mem functions -#![cfg(target_arch = "bpf")] -#![feature(rustc_private)] +#[cfg(not(feature = "no-entrypoint"))] +pub mod entrypoint; -extern crate compiler_builtins; -use solana_program::{custom_panic_default, entrypoint::SUCCESS}; +pub trait MemOps { + fn memcpy(&self, dst: &mut [u8], src: &[u8], n: usize); + /// # Safety + unsafe fn memmove(&self, dst: *mut u8, src: *mut u8, n: usize); + fn memset(&self, s: &mut [u8], c: u8, n: usize); + fn memcmp(&self, s1: &[u8], s2: &[u8], n: usize) -> i32; +} -#[no_mangle] -pub extern "C" fn entrypoint(_input: *mut u8) -> u64 { - unsafe { - // memcpy - let src = &mut [1_u8; 18]; - let dst = &mut [0_u8; 1]; - compiler_builtins::mem::memcpy(&mut src[0] as *mut u8, &mut dst[0] as *mut u8, 1); - assert_eq!(&src[..1], dst); - let dst = &mut [0_u8; 3]; - compiler_builtins::mem::memcpy(&mut src[0] as *mut u8, &mut dst[0] as *mut u8, 3); - assert_eq!(&src[..3], dst); - let dst = &mut [0_u8; 8]; - compiler_builtins::mem::memcpy(&mut src[0] as *mut u8, &mut dst[0] as *mut u8, 8); - assert_eq!(&src[..8], dst); - let dst = &mut [0_u8; 9]; - compiler_builtins::mem::memcpy(&mut src[0] as *mut u8, &mut dst[0] as *mut u8, 9); - assert_eq!(&src[..9], dst); - let dst = &mut [0_u8; 16]; - compiler_builtins::mem::memcpy(&mut src[0] as *mut u8, &mut dst[0] as *mut u8, 16); - assert_eq!(&src[..16], dst); - let dst = &mut [0_u8; 18]; - compiler_builtins::mem::memcpy(&mut src[0] as *mut u8, &mut dst[0] as *mut u8, 18); - assert_eq!(&src[..18], dst); - let dst = &mut [0_u8; 18]; - compiler_builtins::mem::memcpy(&mut src[1] as *mut u8, &mut dst[0] as *mut u8, 17); - assert_eq!(&src[1..], &dst[1..]); - let dst = &mut [0_u8; 18]; - compiler_builtins::mem::memcpy(&mut src[1] as *mut u8, &mut dst[1] as *mut u8, 17); - assert_eq!(&src[1..], &dst[..17]); +pub fn run_mem_tests(mem_ops: T) { + // memcpy + let src = &[1_u8; 18]; + let dst = &mut [0_u8; 1]; + mem_ops.memcpy(dst, src, 1); + assert_eq!(&src[..1], dst); + let dst = &mut [0_u8; 3]; + mem_ops.memcpy(dst, src, 3); + assert_eq!(&src[..3], dst); + let dst = &mut [0_u8; 8]; + mem_ops.memcpy(dst, src, 8); + assert_eq!(&src[..8], dst); + let dst = &mut [0_u8; 9]; + mem_ops.memcpy(dst, src, 9); + assert_eq!(&src[..9], dst); + let dst = &mut [0_u8; 16]; + mem_ops.memcpy(dst, src, 16); + assert_eq!(&src[..16], dst); + let dst = &mut [0_u8; 18]; + mem_ops.memcpy(dst, src, 18); + assert_eq!(&src[..18], dst); + let dst = &mut [0_u8; 18]; + mem_ops.memcpy(dst, &src[1..], 17); + assert_eq!(&src[1..], &dst[..17]); + let dst = &mut [0_u8; 18]; + mem_ops.memcpy(&mut dst[1..], &src[1..], 17); + assert_eq!(&src[1..], &dst[1..]); - // memmove + // memmove + unsafe { let buf = &mut [1_u8, 0]; - compiler_builtins::mem::memmove(&mut buf[0] as *mut u8, &mut buf[1] as *mut u8, 1); + mem_ops.memmove(&mut buf[0] as *mut u8, &mut buf[1] as *mut u8, 1); assert_eq!(buf[0], buf[1]); let buf = &mut [1_u8, 0]; - compiler_builtins::mem::memmove(&mut buf[1] as *mut u8, &mut buf[0] as *mut u8, 1); + mem_ops.memmove(&mut buf[1] as *mut u8, &mut buf[0] as *mut u8, 1); assert_eq!(buf[0], buf[1]); let buf = &mut [1_u8, 1, 1, 0, 0, 0]; - compiler_builtins::mem::memmove(&mut buf[0] as *mut u8, &mut buf[3] as *mut u8, 3); + mem_ops.memmove(&mut buf[0] as *mut u8, &mut buf[3] as *mut u8, 3); assert_eq!(buf[..3], buf[3..]); let buf = &mut [1_u8, 1, 1, 0, 0, 0]; - compiler_builtins::mem::memmove(&mut buf[3] as *mut u8, &mut buf[0] as *mut u8, 3); + mem_ops.memmove(&mut buf[3] as *mut u8, &mut buf[0] as *mut u8, 3); assert_eq!(buf[..3], buf[3..]); let buf = &mut [1_u8, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]; - compiler_builtins::mem::memmove(&mut buf[0] as *mut u8, &mut buf[8] as *mut u8, 8); + mem_ops.memmove(&mut buf[0] as *mut u8, &mut buf[8] as *mut u8, 8); assert_eq!(buf[..8], buf[8..]); let buf = &mut [1_u8, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]; - compiler_builtins::mem::memmove(&mut buf[8] as *mut u8, &mut buf[0] as *mut u8, 8); + mem_ops.memmove(&mut buf[8] as *mut u8, &mut buf[0] as *mut u8, 8); assert_eq!(buf[..8], buf[8..]); let buf = &mut [1_u8, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0]; - compiler_builtins::mem::memmove(&mut buf[0] as *mut u8, &mut buf[9] as *mut u8, 9); + mem_ops.memmove(&mut buf[0] as *mut u8, &mut buf[9] as *mut u8, 9); assert_eq!(buf[..9], buf[9..]); let buf = &mut [0_u8, 1, 2, 3, 4, 5, 6, 7, 8, 9]; - compiler_builtins::mem::memmove(&mut buf[1] as *mut u8, &mut buf[0] as *mut u8, 9); + mem_ops.memmove(&mut buf[1] as *mut u8, &mut buf[0] as *mut u8, 9); assert_eq!(&mut [0_u8, 0, 1, 2, 3, 4, 5, 6, 7, 8], buf); let buf = &mut [1_u8, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0]; - compiler_builtins::mem::memmove(&mut buf[9] as *mut u8, &mut buf[0] as *mut u8, 9); + mem_ops.memmove(&mut buf[9] as *mut u8, &mut buf[0] as *mut u8, 9); assert_eq!(buf[..9], buf[9..]); let buf = &mut [ 1_u8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ]; - compiler_builtins::mem::memmove(&mut buf[0] as *mut u8, &mut buf[16] as *mut u8, 16); + mem_ops.memmove(&mut buf[0] as *mut u8, &mut buf[16] as *mut u8, 16); assert_eq!(buf[..16], buf[16..]); let buf = &mut [ 1_u8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ]; - compiler_builtins::mem::memmove(&mut buf[16] as *mut u8, &mut buf[0] as *mut u8, 16); + mem_ops.memmove(&mut buf[16] as *mut u8, &mut buf[0] as *mut u8, 16); assert_eq!(buf[..16], buf[16..]); let buf = &mut [ 1_u8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ]; - compiler_builtins::mem::memmove(&mut buf[0] as *mut u8, &mut buf[18] as *mut u8, 18); + mem_ops.memmove(&mut buf[0] as *mut u8, &mut buf[18] as *mut u8, 18); assert_eq!(buf[..18], buf[18..]); let buf = &mut [ 1_u8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ]; - compiler_builtins::mem::memmove(&mut buf[18] as *mut u8, &mut buf[0] as *mut u8, 18); + mem_ops.memmove(&mut buf[18] as *mut u8, &mut buf[0] as *mut u8, 18); assert_eq!(buf[..18], buf[18..]); let buf = &mut [ 1_u8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ]; - compiler_builtins::mem::memmove(&mut buf[1] as *mut u8, &mut buf[18] as *mut u8, 17); + mem_ops.memmove(&mut buf[1] as *mut u8, &mut buf[18] as *mut u8, 17); assert_eq!(buf[1..17], buf[18..34]); let buf = &mut [ 1_u8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ]; - compiler_builtins::mem::memmove(&mut buf[19] as *mut u8, &mut buf[1] as *mut u8, 17); + mem_ops.memmove(&mut buf[19] as *mut u8, &mut buf[1] as *mut u8, 17); assert_eq!(buf[..17], buf[19..]); - - // memset - let exp = &[1_u8; 18]; - let buf = &mut [0_u8; 18]; - compiler_builtins::mem::memset(&mut buf[0] as *mut u8, 1, 1); - assert_eq!(exp[..1], buf[..1]); - compiler_builtins::mem::memset(&mut buf[0] as *mut u8, 1, 3); - assert_eq!(exp[..3], buf[..3]); - compiler_builtins::mem::memset(&mut buf[0] as *mut u8, 1, 8); - assert_eq!(exp[..8], buf[..8]); - compiler_builtins::mem::memset(&mut buf[0] as *mut u8, 1, 9); - assert_eq!(exp[..9], buf[..9]); - compiler_builtins::mem::memset(&mut buf[0] as *mut u8, 1, 16); - assert_eq!(exp[..16], buf[..16]); - compiler_builtins::mem::memset(&mut buf[0] as *mut u8, 1, 18); - assert_eq!(exp[..18], buf[..18]); - compiler_builtins::mem::memset(&mut buf[1] as *mut u8, 1, 17); - assert_eq!(exp[1..18], buf[1..18]); - - // memcmp - assert_eq!( - -1, - compiler_builtins::mem::memcmp(&[0_u8] as *const u8, &[1_u8] as *const u8, 1) - ); - assert_eq!( - -1, - compiler_builtins::mem::memcmp( - &[0_u8, 0, 0] as *const u8, - &[0_u8, 0, 1] as *const u8, - 3 - ) - ); - assert_eq!( - 0, - compiler_builtins::mem::memcmp( - &[0_u8, 0, 0, 0, 0, 0, 0, 0, 0] as *const u8, - &[0_u8, 0, 0, 0, 0, 0, 0, 0, 0] as *const u8, - 9 - ) - ); - assert_eq!( - -1, - compiler_builtins::mem::memcmp( - &[0_u8, 0, 0, 0, 0, 0, 0, 0, 0] as *const u8, - &[0_u8, 0, 0, 0, 0, 0, 0, 0, 1] as *const u8, - 9 - ) - ); - assert_eq!( - -1, - compiler_builtins::mem::memcmp( - &[0_u8, 0, 0, 0, 0, 0, 0, 0, 0, 0] as *const u8, - &[0_u8, 0, 0, 0, 0, 0, 0, 0, 0, 1] as *const u8, - 10 - ) - ); - assert_eq!( - 0, - compiler_builtins::mem::memcmp(&[0_u8; 8] as *const u8, &[0_u8; 8] as *const u8, 8) - ); - assert_eq!( - -1, - compiler_builtins::mem::memcmp(&[0_u8; 8] as *const u8, &[1_u8; 8] as *const u8, 8) - ); - assert_eq!( - -1, - compiler_builtins::mem::memcmp(&[0_u8; 16] as *const u8, &[1_u8; 16] as *const u8, 16) - ); - assert_eq!( - -1, - compiler_builtins::mem::memcmp(&[0_u8; 18] as *const u8, &[1_u8; 18] as *const u8, 18) - ); - let one = &[0_u8; 18]; - let two = &[1_u8; 18]; - assert_eq!( - -1, - compiler_builtins::mem::memcmp(&one[1] as *const u8, &two[0] as *const u8, 17) - ); - assert_eq!( - -1, - compiler_builtins::mem::memcmp(&one[1] as *const u8, &two[1] as *const u8, 17) - ); + let buf = &mut [0_u8, 0, 0, 1, 1, 1, 1, 1, 0]; + mem_ops.memmove(&mut buf[0] as *mut u8, &mut buf[3] as *mut u8, 5); + assert_eq!(buf, &mut [1, 1, 1, 1, 1, 1, 1, 1, 0]); } - SUCCESS -} + // memset + let exp = &[1_u8; 18]; + let buf = &mut [0_u8; 18]; + mem_ops.memset(&mut buf[0..], 1, 1); + assert_eq!(exp[..1], buf[..1]); + mem_ops.memset(&mut buf[0..], 1, 3); + assert_eq!(exp[..3], buf[..3]); + mem_ops.memset(&mut buf[0..], 1, 8); + assert_eq!(exp[..8], buf[..8]); + mem_ops.memset(&mut buf[0..], 1, 9); + assert_eq!(exp[..9], buf[..9]); + mem_ops.memset(&mut buf[0..], 1, 16); + assert_eq!(exp[..16], buf[..16]); + mem_ops.memset(&mut buf[0..], 1, 18); + assert_eq!(exp[..18], buf[..18]); + mem_ops.memset(&mut buf[1..], 1, 17); + assert_eq!(exp[1..18], buf[1..18]); -custom_panic_default!(); + // memcmp + assert_eq!(-1, mem_ops.memcmp(&[0_u8], &[1_u8], 1)); + assert_eq!(-1, mem_ops.memcmp(&[0_u8, 0, 0], &[0_u8, 0, 1], 3)); + assert_eq!( + 0, + mem_ops.memcmp( + &[0_u8, 0, 0, 0, 0, 0, 0, 0, 0], + &[0_u8, 0, 0, 0, 0, 0, 0, 0, 0], + 9 + ) + ); + assert_eq!( + -1, + mem_ops.memcmp( + &[0_u8, 0, 0, 0, 0, 0, 0, 0, 0], + &[0_u8, 0, 0, 0, 0, 0, 0, 0, 1], + 9 + ) + ); + assert_eq!( + -1, + mem_ops.memcmp( + &[0_u8, 0, 0, 0, 0, 0, 0, 0, 0, 0], + &[0_u8, 0, 0, 0, 0, 0, 0, 0, 0, 1], + 10 + ) + ); + assert_eq!(0, mem_ops.memcmp(&[0_u8; 8], &[0_u8; 8], 8)); + assert_eq!(-1, mem_ops.memcmp(&[0_u8; 8], &[1_u8; 8], 8)); + assert_eq!(-1, mem_ops.memcmp(&[0_u8; 16], &[1_u8; 16], 16)); + assert_eq!(-1, mem_ops.memcmp(&[0_u8; 18], &[1_u8; 18], 18)); + let one = &[0_u8; 18]; + let two = &[1_u8; 18]; + assert_eq!(-1, mem_ops.memcmp(&one[1..], &two[0..], 17)); + assert_eq!(-1, mem_ops.memcmp(&one[1..], &two[1..], 17)); +} diff --git a/programs/bpf/rust/mem/tests/lib.rs b/programs/bpf/rust/mem/tests/lib.rs new file mode 100644 index 00000000000000..5bc591d728644d --- /dev/null +++ b/programs/bpf/rust/mem/tests/lib.rs @@ -0,0 +1,23 @@ +use solana_bpf_rust_mem::entrypoint::process_instruction; +use solana_program_test::*; +use solana_sdk::{ + instruction::Instruction, pubkey::Pubkey, signature::Signer, transaction::Transaction, +}; + +#[tokio::test] +async fn test_mem() { + let program_id = Pubkey::new_unique(); + let program_test = ProgramTest::new( + "solana_bpf_rust_mem", + program_id, + processor!(process_instruction), + ); + let (mut banks_client, payer, recent_blockhash) = program_test.start().await; + + let mut transaction = Transaction::new_with_payer( + &[Instruction::new_with_bincode(program_id, &(), vec![])], + Some(&payer.pubkey()), + ); + transaction.sign(&[&payer], recent_blockhash); + banks_client.process_transaction(transaction).await.unwrap(); +} diff --git a/programs/bpf/rust/membuiltins/Cargo.toml b/programs/bpf/rust/membuiltins/Cargo.toml new file mode 100644 index 00000000000000..e44acf6b325b30 --- /dev/null +++ b/programs/bpf/rust/membuiltins/Cargo.toml @@ -0,0 +1,20 @@ +[package] +name = "solana-bpf-rust-membuiltins" +version = "1.7.11" +description = "Solana BPF test program written in Rust" +authors = ["Solana Maintainers "] +repository = "https://github.com/solana-labs/solana" +license = "Apache-2.0" +homepage = "https://solana.com/" +documentation = "https://docs.rs/solana-bpf-rust-mem" +edition = "2018" + +[dependencies] +solana-bpf-rust-mem = { path = "../mem", version = "=1.7.11", features = [ "no-entrypoint" ] } +solana-program = { path = "../../../../sdk/program", version = "=1.7.11" } + +[lib] +crate-type = ["cdylib"] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/programs/bpf/rust/membuiltins/src/lib.rs b/programs/bpf/rust/membuiltins/src/lib.rs new file mode 100644 index 00000000000000..1ce2ed339cc683 --- /dev/null +++ b/programs/bpf/rust/membuiltins/src/lib.rs @@ -0,0 +1,39 @@ +//! @brief Test builtin mem functions + +#![cfg(target_arch = "bpf")] +#![feature(rustc_private)] + +extern crate compiler_builtins; +use solana_bpf_rust_mem::{run_mem_tests, MemOps}; +use solana_program::{custom_panic_default, entrypoint::SUCCESS}; + +#[no_mangle] +pub extern "C" fn entrypoint(_input: *mut u8) -> u64 { + #[derive(Default)] + struct MemOpSyscalls(); + impl MemOps for MemOpSyscalls { + fn memcpy(&self, dst: &mut [u8], src: &[u8], n: usize) { + unsafe { + compiler_builtins::mem::memcpy(dst.as_mut_ptr(), src.as_ptr(), n); + } + } + unsafe fn memmove(&self, dst: *mut u8, src: *mut u8, n: usize) { + compiler_builtins::mem::memmove(dst, src, n); + } + fn memset(&self, s: &mut [u8], c: u8, n: usize) { + unsafe { + compiler_builtins::mem::memset(s.as_mut_ptr(), c as i32, n); + } + } + fn memcmp(&self, s1: &[u8], s2: &[u8], n: usize) -> i32 { + unsafe { compiler_builtins::mem::memcmp(s1.as_ptr(), s2.as_ptr(), n) } + } + } + let mem_ops = MemOpSyscalls::default(); + + run_mem_tests(mem_ops); + + SUCCESS +} + +custom_panic_default!(); diff --git a/programs/bpf/rust/noop/Cargo.toml b/programs/bpf/rust/noop/Cargo.toml index 46773504ae5842..091f34704c3019 100644 --- a/programs/bpf/rust/noop/Cargo.toml +++ b/programs/bpf/rust/noop/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-noop" -version = "1.7.0" +version = "1.7.11" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-noop" edition = "2018" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.7.0" } +solana-program = { path = "../../../../sdk/program", version = "=1.7.11" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/panic/Cargo.toml b/programs/bpf/rust/panic/Cargo.toml index 40908be5b58c0c..ac5d469c9c5463 100644 --- a/programs/bpf/rust/panic/Cargo.toml +++ b/programs/bpf/rust/panic/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-panic" -version = "1.7.0" +version = "1.7.11" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-panic" edition = "2018" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.7.0" } +solana-program = { path = "../../../../sdk/program", version = "=1.7.11" } [features] default = ["custom-panic"] diff --git a/programs/bpf/rust/param_passing/Cargo.toml b/programs/bpf/rust/param_passing/Cargo.toml index d8ffac57ed5c5f..f9c51db57df18f 100644 --- a/programs/bpf/rust/param_passing/Cargo.toml +++ b/programs/bpf/rust/param_passing/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-param-passing" -version = "1.7.0" +version = "1.7.11" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,8 +10,8 @@ documentation = "https://docs.rs/solana-bpf-rust-param-passing" edition = "2018" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.7.0" } -solana-bpf-rust-param-passing-dep = { path = "../param_passing_dep", version = "=1.7.0" } +solana-program = { path = "../../../../sdk/program", version = "=1.7.11" } +solana-bpf-rust-param-passing-dep = { path = "../param_passing_dep", version = "=1.7.11" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/param_passing_dep/Cargo.toml b/programs/bpf/rust/param_passing_dep/Cargo.toml index 085ff287f9b07b..47bc0b105aabb7 100644 --- a/programs/bpf/rust/param_passing_dep/Cargo.toml +++ b/programs/bpf/rust/param_passing_dep/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-param-passing-dep" -version = "1.7.0" +version = "1.7.11" description = "Solana BPF program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-param-passing-dep" edition = "2018" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.7.0" } +solana-program = { path = "../../../../sdk/program", version = "=1.7.11" } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/programs/bpf/rust/rand/Cargo.toml b/programs/bpf/rust/rand/Cargo.toml index bc6ffc9fa1fcf6..650d90fe2dba17 100644 --- a/programs/bpf/rust/rand/Cargo.toml +++ b/programs/bpf/rust/rand/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-rand" -version = "1.7.0" +version = "1.7.11" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -12,7 +12,7 @@ edition = "2018" [dependencies] getrandom = { version = "0.1.14", features = ["dummy"] } rand = "0.7" -solana-program = { path = "../../../../sdk/program", version = "=1.7.0" } +solana-program = { path = "../../../../sdk/program", version = "=1.7.11" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/ro_account_modify/Cargo.toml b/programs/bpf/rust/ro_account_modify/Cargo.toml new file mode 100644 index 00000000000000..0967a80015c8a6 --- /dev/null +++ b/programs/bpf/rust/ro_account_modify/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "solana-bpf-rust-ro-account_modify" +version = "1.7.11" +description = "Solana BPF test program written in Rust" +authors = ["Solana Maintainers "] +repository = "https://github.com/solana-labs/solana" +license = "Apache-2.0" +homepage = "https://solana.com/" +documentation = "https://docs.rs/solana-bpf-rust-ro-modify" +edition = "2018" + +[dependencies] +solana-program = { path = "../../../../sdk/program", version = "=1.7.11" } + +[lib] +crate-type = ["cdylib"] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/programs/bpf/rust/ro_account_modify/src/lib.rs b/programs/bpf/rust/ro_account_modify/src/lib.rs new file mode 100644 index 00000000000000..8a69f09a89740a --- /dev/null +++ b/programs/bpf/rust/ro_account_modify/src/lib.rs @@ -0,0 +1,70 @@ +use solana_program::{ + account_info::AccountInfo, + entrypoint, + entrypoint::ProgramResult, + instruction::{AccountMeta, Instruction}, + msg, + program::invoke, + pubkey::Pubkey, +}; + +const ARGUMENT_INDEX: usize = 0; + +const INSTRUCTION_MODIFY: u8 = 0; +const INSTRUCTION_INVOKE_MODIFY: u8 = 1; +const INSTRUCTION_MODIFY_INVOKE: u8 = 2; +const INSTRUCTION_VERIFY_MODIFIED: u8 = 3; + +entrypoint!(process_instruction); +fn process_instruction( + program_id: &Pubkey, + accounts: &[AccountInfo], + instruction_data: &[u8], +) -> ProgramResult { + assert!(!accounts[ARGUMENT_INDEX].is_writable); + + match instruction_data[0] { + INSTRUCTION_MODIFY => { + msg!("modify ro account"); + assert_eq!(0, accounts[ARGUMENT_INDEX].try_borrow_data()?[0]); + accounts[ARGUMENT_INDEX].try_borrow_mut_data()?[0] = 1; + } + INSTRUCTION_INVOKE_MODIFY => { + msg!("invoke and modify ro account"); + + assert_eq!(0, accounts[ARGUMENT_INDEX].try_borrow_data()?[0]); + + let instruction = Instruction { + program_id: *program_id, + accounts: vec![AccountMeta::new_readonly( + *accounts[ARGUMENT_INDEX].key, + false, + )], + data: vec![INSTRUCTION_MODIFY], + }; + invoke(&instruction, accounts)?; + } + INSTRUCTION_MODIFY_INVOKE => { + msg!("modify and invoke ro account"); + + assert_eq!(0, accounts[ARGUMENT_INDEX].try_borrow_data()?[0]); + accounts[ARGUMENT_INDEX].try_borrow_mut_data()?[0] = 1; + + let instruction = Instruction { + program_id: *program_id, + accounts: vec![AccountMeta::new_readonly( + *accounts[ARGUMENT_INDEX].key, + false, + )], + data: vec![INSTRUCTION_VERIFY_MODIFIED], + }; + invoke(&instruction, accounts)?; + } + INSTRUCTION_VERIFY_MODIFIED => { + msg!("verify modified"); + assert_eq!(1, accounts[ARGUMENT_INDEX].try_borrow_data()?[0]) + } + _ => panic!("Unknown instruction"), + } + Ok(()) +} diff --git a/programs/bpf/rust/ro_modify/Cargo.toml b/programs/bpf/rust/ro_modify/Cargo.toml index 1e3db09e3135e7..bac21ef9813b5b 100644 --- a/programs/bpf/rust/ro_modify/Cargo.toml +++ b/programs/bpf/rust/ro_modify/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-ro-modify" -version = "1.7.0" +version = "1.7.11" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-ro-modify" edition = "2018" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.7.0" } +solana-program = { path = "../../../../sdk/program", version = "=1.7.11" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/sanity/Cargo.toml b/programs/bpf/rust/sanity/Cargo.toml index 574fe56ab93a4b..d09474490a9dc6 100644 --- a/programs/bpf/rust/sanity/Cargo.toml +++ b/programs/bpf/rust/sanity/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-sanity" -version = "1.7.0" +version = "1.7.11" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-sanity" edition = "2018" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.7.0" } +solana-program = { path = "../../../../sdk/program", version = "=1.7.11" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/secp256k1_recover/Cargo.toml b/programs/bpf/rust/secp256k1_recover/Cargo.toml new file mode 100644 index 00000000000000..840d1006da9318 --- /dev/null +++ b/programs/bpf/rust/secp256k1_recover/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "solana-bpf-rust-secp256k1-recover" +version = "1.7.11" +description = "Solana BPF test program written in Rust" +authors = ["Solana Maintainers "] +repository = "https://github.com/solana-labs/solana" +license = "Apache-2.0" +homepage = "https://solana.com/" +documentation = "https://docs.rs/solana-bpf-rust-secp256k1-recover" +edition = "2018" + +[dependencies] +solana-program = { path = "../../../../sdk/program", version = "=1.7.11" } + +[lib] +crate-type = ["cdylib"] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/programs/bpf/rust/secp256k1_recover/src/lib.rs b/programs/bpf/rust/secp256k1_recover/src/lib.rs new file mode 100644 index 00000000000000..f452bbbf759939 --- /dev/null +++ b/programs/bpf/rust/secp256k1_recover/src/lib.rs @@ -0,0 +1,44 @@ +//! @brief Secp256k1Recover Syscall test + +extern crate solana_program; +use solana_program::{custom_panic_default, msg}; + +fn test_secp256k1_recover() { + use solana_program::secp256k1_recover::secp256k1_recover; + + let expected: [u8; 64] = [ + 0x42, 0xcd, 0x27, 0xe4, 0x0f, 0xdf, 0x7c, 0x97, 0x0a, 0xa2, 0xca, 0x0b, 0x88, 0x5b, 0x96, + 0x0f, 0x8b, 0x62, 0x8a, 0x41, 0xa1, 0x81, 0xe7, 0xe6, 0x8e, 0x03, 0xea, 0x0b, 0x84, 0x20, + 0x58, 0x9b, 0x32, 0x06, 0xbd, 0x66, 0x2f, 0x75, 0x65, 0xd6, 0x9d, 0xbd, 0x1d, 0x34, 0x29, + 0x6a, 0xd9, 0x35, 0x38, 0xed, 0x86, 0x9e, 0x99, 0x20, 0x43, 0xc3, 0xeb, 0xad, 0x65, 0x50, + 0xa0, 0x11, 0x6e, 0x5d, + ]; + + let hash: [u8; 32] = [ + 0xde, 0xa5, 0x66, 0xb6, 0x94, 0x3b, 0xe0, 0xe9, 0x62, 0x53, 0xc2, 0x21, 0x5b, 0x1b, 0xac, + 0x69, 0xe7, 0xa8, 0x1e, 0xdb, 0x41, 0xc5, 0x02, 0x8b, 0x4f, 0x5c, 0x45, 0xc5, 0x3b, 0x49, + 0x54, 0xd0, + ]; + let recovery_id: u8 = 1; + let signature: [u8; 64] = [ + 0x97, 0xa4, 0xee, 0x31, 0xfe, 0x82, 0x65, 0x72, 0x9f, 0x4a, 0xa6, 0x7d, 0x24, 0xd4, 0xa7, + 0x27, 0xf8, 0xc3, 0x15, 0xa4, 0xc8, 0xf9, 0x80, 0xeb, 0x4c, 0x4d, 0x4a, 0xfa, 0x6e, 0xc9, + 0x42, 0x41, 0x5d, 0x10, 0xd9, 0xc2, 0x8a, 0x90, 0xe9, 0x92, 0x9c, 0x52, 0x4b, 0x2c, 0xfb, + 0x65, 0xdf, 0xbc, 0xf6, 0x8c, 0xfd, 0x68, 0xdb, 0x17, 0xf9, 0x5d, 0x23, 0x5f, 0x96, 0xd8, + 0xf0, 0x72, 0x01, 0x2d, + ]; + + let public_key = secp256k1_recover(&hash[..], recovery_id, &signature[..]).unwrap(); + assert_eq!(public_key.to_bytes(), expected); +} + +#[no_mangle] +pub extern "C" fn entrypoint(_input: *mut u8) -> u64 { + msg!("secp256k1_recover"); + + test_secp256k1_recover(); + + 0 +} + +custom_panic_default!(); diff --git a/programs/bpf/rust/sha/Cargo.toml b/programs/bpf/rust/sha/Cargo.toml index 06e8eddfb39da7..21c69ee5a1d708 100644 --- a/programs/bpf/rust/sha/Cargo.toml +++ b/programs/bpf/rust/sha/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-sha" -version = "1.7.0" +version = "1.7.11" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-sha" edition = "2018" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.7.0" } +solana-program = { path = "../../../../sdk/program", version = "=1.7.11" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/spoof1/Cargo.toml b/programs/bpf/rust/spoof1/Cargo.toml index 47c18bb99619ff..1a84e08f8cb865 100644 --- a/programs/bpf/rust/spoof1/Cargo.toml +++ b/programs/bpf/rust/spoof1/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-spoof1" -version = "1.7.0" +version = "1.7.11" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-spoof1" edition = "2018" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.7.0" } +solana-program = { path = "../../../../sdk/program", version = "=1.7.11" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/spoof1_system/Cargo.toml b/programs/bpf/rust/spoof1_system/Cargo.toml index c4221a0d0674b8..0e2abea6ebdce2 100644 --- a/programs/bpf/rust/spoof1_system/Cargo.toml +++ b/programs/bpf/rust/spoof1_system/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-spoof1-system" -version = "1.7.0" +version = "1.7.11" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-spoof1-system" edition = "2018" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.7.0" } +solana-program = { path = "../../../../sdk/program", version = "=1.7.11" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/sysvar/Cargo.toml b/programs/bpf/rust/sysvar/Cargo.toml index 90d4b77c8df0b3..81940fb331766e 100644 --- a/programs/bpf/rust/sysvar/Cargo.toml +++ b/programs/bpf/rust/sysvar/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-sysvar" -version = "1.7.0" +version = "1.7.11" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,11 +10,11 @@ documentation = "https://docs.rs/solana-bpf-rust-sysvar" edition = "2018" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.7.0" } +solana-program = { path = "../../../../sdk/program", version = "=1.7.11" } [dev-dependencies] -solana-program-test = { path = "../../../../program-test", version = "=1.7.0" } -solana-sdk = { path = "../../../../sdk", version = "=1.7.0" } +solana-program-test = { path = "../../../../program-test", version = "=1.7.11" } +solana-sdk = { path = "../../../../sdk", version = "=1.7.11" } [lib] crate-type = ["cdylib", "lib"] diff --git a/programs/bpf/rust/sysvar/tests/lib.rs b/programs/bpf/rust/sysvar/tests/lib.rs index 370cd9e3018418..bb0fbd59ae768b 100644 --- a/programs/bpf/rust/sysvar/tests/lib.rs +++ b/programs/bpf/rust/sysvar/tests/lib.rs @@ -12,7 +12,7 @@ use solana_sdk::{ }; #[tokio::test] -async fn test_noop() { +async fn test_sysvars() { let program_id = Pubkey::new_unique(); let program_test = ProgramTest::new( "solana_bpf_rust_sysvar", diff --git a/programs/bpf/rust/upgradeable/Cargo.toml b/programs/bpf/rust/upgradeable/Cargo.toml index 0d326957a30cc0..f5e7c7ab1c22af 100644 --- a/programs/bpf/rust/upgradeable/Cargo.toml +++ b/programs/bpf/rust/upgradeable/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-upgradeable" -version = "1.7.0" +version = "1.7.11" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-upgradeable" edition = "2018" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.7.0" } +solana-program = { path = "../../../../sdk/program", version = "=1.7.11" } [lib] name = "solana_bpf_rust_upgradeable" diff --git a/programs/bpf/rust/upgraded/Cargo.toml b/programs/bpf/rust/upgraded/Cargo.toml index 872ca3e5f53140..852e53a4f333f8 100644 --- a/programs/bpf/rust/upgraded/Cargo.toml +++ b/programs/bpf/rust/upgraded/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-upgraded" -version = "1.7.0" +version = "1.7.11" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-upgraded" edition = "2018" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.7.0" } +solana-program = { path = "../../../../sdk/program", version = "=1.7.11" } [lib] name = "solana_bpf_rust_upgraded" diff --git a/programs/bpf/tests/programs.rs b/programs/bpf/tests/programs.rs index b95e4d7fca2484..c394f72b98e5ca 100644 --- a/programs/bpf/tests/programs.rs +++ b/programs/bpf/tests/programs.rs @@ -17,7 +17,7 @@ use solana_bpf_loader_program::{ use solana_cli_output::display::println_transaction; use solana_rbpf::{ static_analysis::Analysis, - vm::{Config, Executable, Tracer} + vm::{Config, Executable, Tracer}, }; use solana_runtime::{ bank::{Bank, ExecuteTimings, NonceRollbackInfo, TransactionBalancesSet, TransactionResults}, @@ -278,7 +278,6 @@ fn run_program( &bpf_loader::id(), parameter_accounts, parameter_bytes.as_slice(), - true, ) .unwrap(); } @@ -293,26 +292,24 @@ fn process_transaction_and_record_inner( let signature = tx.signatures.get(0).unwrap().clone(); let txs = vec![tx]; let tx_batch = bank.prepare_batch(txs.iter()); - let (mut results, _, mut inner, _transaction_logs) = bank.load_execute_and_commit_transactions( - &tx_batch, - MAX_PROCESSING_AGE, - false, - true, - false, - &mut ExecuteTimings::default(), - ); - let inner_instructions = if inner.is_empty() { - Some(vec![vec![]]) - } else { - inner.swap_remove(0) - }; + let (mut results, _, mut inner_instructions, _transaction_logs) = bank + .load_execute_and_commit_transactions( + &tx_batch, + MAX_PROCESSING_AGE, + false, + true, + false, + &mut ExecuteTimings::default(), + ); let result = results .fee_collection_results .swap_remove(0) .and_then(|_| bank.get_signature_status(&signature).unwrap()); ( result, - inner_instructions.expect("cpi recording should be enabled"), + inner_instructions + .swap_remove(0) + .expect("cpi recording should be enabled"), ) } @@ -330,8 +327,8 @@ fn execute_transactions(bank: &Bank, txs: &[Transaction]) -> Vec Vec Vec = inner_instructions[0] .iter() @@ -875,6 +871,9 @@ fn test_program_bpf_invoke_sanity() { invoked_program_id.clone(), invoked_program_id.clone(), invoked_program_id.clone(), + invoked_program_id.clone(), + invoked_program_id.clone(), + invoked_program_id.clone(), ], Languages::Rust => vec![ solana_sdk::system_program::id(), @@ -895,6 +894,9 @@ fn test_program_bpf_invoke_sanity() { invoked_program_id.clone(), invoked_program_id.clone(), invoked_program_id.clone(), + invoked_program_id.clone(), + invoked_program_id.clone(), + invoked_program_id.clone(), solana_sdk::system_program::id(), ], }; @@ -930,7 +932,7 @@ fn test_program_bpf_invoke_sanity() { .iter() .map(|ix| message.account_keys[ix.program_id_index as usize].clone()) .collect(); - assert_eq!(result.unwrap_err(), expected_error); + assert_eq!(result, Err(expected_error)); assert_eq!(invoked_programs, expected_invoked_programs); }; @@ -1000,6 +1002,30 @@ fn test_program_bpf_invoke_sanity() { &[invoked_program_id.clone()], ); + do_invoke_failure_test_local( + TEST_WRITABLE_DEESCALATION_WRITABLE, + TransactionError::InstructionError(0, InstructionError::ReadonlyDataModified), + &[invoked_program_id.clone()], + ); + + do_invoke_failure_test_local( + TEST_NESTED_INVOKE_TOO_DEEP, + TransactionError::InstructionError(0, InstructionError::CallDepth), + &[ + invoked_program_id.clone(), + invoked_program_id.clone(), + invoked_program_id.clone(), + invoked_program_id.clone(), + invoked_program_id.clone(), + ], + ); + + do_invoke_failure_test_local( + TEST_EXECUTABLE_LAMPORTS, + TransactionError::InstructionError(0, InstructionError::ExecutableLamportChange), + &[invoke_program_id.clone()], + ); + // Check resulting state assert_eq!(43, bank.get_balance(&derived_key1)); @@ -1252,11 +1278,12 @@ fn assert_instruction_count() { ("alloc", 1137), ("bpf_to_bpf", 13), ("multiple_static", 8), - ("noop", 5), - ("noop++", 5), + ("noop", 42), + ("noop++", 42), ("relative_call", 10), - ("sanity", 169), - ("sanity++", 168), + ("sanity", 174), + ("sanity++", 174), + ("secp256k1_recover", 357), ("sha", 694), ("struct_pass", 8), ("struct_ret", 22), @@ -1266,18 +1293,20 @@ fn assert_instruction_count() { { programs.extend_from_slice(&[ ("solana_bpf_rust_128bit", 584), - ("solana_bpf_rust_alloc", 4967), - ("solana_bpf_rust_custom_heap", 365), + ("solana_bpf_rust_alloc", 8906), + ("solana_bpf_rust_custom_heap", 539), ("solana_bpf_rust_dep_crate", 2), - ("solana_bpf_rust_external_spend", 334), - ("solana_bpf_rust_iter", 8), - ("solana_bpf_rust_many_args", 189), - ("solana_bpf_rust_mem", 1665), - ("solana_bpf_rust_noop", 322), + ("solana_bpf_rust_external_spend", 521), + ("solana_bpf_rust_iter", 724), + ("solana_bpf_rust_many_args", 237), + ("solana_bpf_rust_mem", 3166), + ("solana_bpf_rust_membuiltins", 4069), + ("solana_bpf_rust_noop", 495), ("solana_bpf_rust_param_passing", 46), - ("solana_bpf_rust_rand", 325), - ("solana_bpf_rust_sanity", 587), - ("solana_bpf_rust_sha", 22417), + ("solana_bpf_rust_rand", 498), + ("solana_bpf_rust_sanity", 917), + ("solana_bpf_rust_secp256k1_recover", 306), + ("solana_bpf_rust_sha", 29131), ]); } @@ -2454,3 +2483,68 @@ fn test_program_bpf_finalize() { TransactionError::InstructionError(0, InstructionError::ProgramFailedToComplete) ); } + +#[cfg(feature = "bpf_rust")] +#[test] +fn test_program_bpf_ro_account_modify() { + solana_logger::setup(); + + let GenesisConfigInfo { + genesis_config, + mint_keypair, + .. + } = create_genesis_config(50); + let mut bank = Bank::new(&genesis_config); + let (name, id, entrypoint) = solana_bpf_loader_program!(); + bank.add_builtin(&name, id, entrypoint); + let bank = Arc::new(bank); + let bank_client = BankClient::new_shared(&bank); + + let program_id = load_bpf_program( + &bank_client, + &bpf_loader::id(), + &mint_keypair, + "solana_bpf_rust_ro_account_modify", + ); + + let argument_keypair = Keypair::new(); + let account = AccountSharedData::new(42, 100, &program_id); + bank.store_account(&argument_keypair.pubkey(), &account); + + let from_keypair = Keypair::new(); + let account = AccountSharedData::new(84, 0, &solana_sdk::system_program::id()); + bank.store_account(&from_keypair.pubkey(), &account); + + let mint_pubkey = mint_keypair.pubkey(); + let account_metas = vec![ + AccountMeta::new_readonly(argument_keypair.pubkey(), false), + AccountMeta::new_readonly(program_id, false), + ]; + + let instruction = Instruction::new_with_bytes(program_id, &[0], account_metas.clone()); + let message = Message::new(&[instruction], Some(&mint_pubkey)); + let result = bank_client.send_and_confirm_message(&[&mint_keypair], message); + println!("result: {:?}", result); + assert_eq!( + result.unwrap_err().unwrap(), + TransactionError::InstructionError(0, InstructionError::ReadonlyDataModified) + ); + + let instruction = Instruction::new_with_bytes(program_id, &[1], account_metas.clone()); + let message = Message::new(&[instruction], Some(&mint_pubkey)); + let result = bank_client.send_and_confirm_message(&[&mint_keypair], message); + println!("result: {:?}", result); + assert_eq!( + result.unwrap_err().unwrap(), + TransactionError::InstructionError(0, InstructionError::ReadonlyDataModified) + ); + + let instruction = Instruction::new_with_bytes(program_id, &[2], account_metas.clone()); + let message = Message::new(&[instruction], Some(&mint_pubkey)); + let result = bank_client.send_and_confirm_message(&[&mint_keypair], message); + println!("result: {:?}", result); + assert_eq!( + result.unwrap_err().unwrap(), + TransactionError::InstructionError(0, InstructionError::ReadonlyDataModified) + ); +} diff --git a/programs/bpf_loader/Cargo.toml b/programs/bpf_loader/Cargo.toml index ba7c4970bc2c03..929a7a0f1c8d07 100644 --- a/programs/bpf_loader/Cargo.toml +++ b/programs/bpf_loader/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-loader-program" -version = "1.7.0" +version = "1.7.11" description = "Solana BPF loader" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -16,10 +16,11 @@ log = "0.4.11" num-derive = "0.3" num-traits = "0.2" rand_core = "0.6.2" +libsecp256k1 = "0.5.0" sha3 = "0.9.1" -solana-measure = { path = "../../measure", version = "=1.7.0" } -solana-runtime = { path = "../../runtime", version = "=1.7.0" } -solana-sdk = { path = "../../sdk", version = "=1.7.0" } +solana-measure = { path = "../../measure", version = "=1.7.11" } +solana-runtime = { path = "../../runtime", version = "=1.7.11" } +solana-sdk = { path = "../../sdk", version = "=1.7.11" } solana_rbpf = "=0.2.11" thiserror = "1.0" diff --git a/programs/bpf_loader/benches/serialization.rs b/programs/bpf_loader/benches/serialization.rs index e99c953b496d4d..472c7953053aa2 100644 --- a/programs/bpf_loader/benches/serialization.rs +++ b/programs/bpf_loader/benches/serialization.rs @@ -107,9 +107,9 @@ fn bench_serialize_unaligned(bencher: &mut Bencher) { .enumerate() .map(|(i, (key, account))| { if i <= accounts.len() / 2 { - KeyedAccount::new_readonly(&key, false, &account) + KeyedAccount::new_readonly(key, false, account) } else { - KeyedAccount::new(&key, false, &account) + KeyedAccount::new(key, false, account) } }) .collect(); @@ -128,9 +128,9 @@ fn bench_serialize_aligned(bencher: &mut Bencher) { .enumerate() .map(|(i, (key, account))| { if i <= accounts.len() / 2 { - KeyedAccount::new_readonly(&key, false, &account) + KeyedAccount::new_readonly(key, false, account) } else { - KeyedAccount::new(&key, false, &account) + KeyedAccount::new(key, false, account) } }) .collect(); diff --git a/programs/bpf_loader/src/lib.rs b/programs/bpf_loader/src/lib.rs index 2cad10a0b5e263..7bb85a7e89d2c2 100644 --- a/programs/bpf_loader/src/lib.rs +++ b/programs/bpf_loader/src/lib.rs @@ -31,14 +31,15 @@ use solana_sdk::{ bpf_loader, bpf_loader_deprecated, bpf_loader_upgradeable::{self, UpgradeableLoaderState}, clock::Clock, - entrypoint::SUCCESS, - feature_set::{skip_ro_deserialization, upgradeable_close_instruction}, + entrypoint::{HEAP_LENGTH, SUCCESS}, + feature_set::{add_missing_program_error_mappings, upgradeable_close_instruction}, ic_logger_msg, ic_msg, instruction::InstructionError, keyed_account::{from_keyed_account, keyed_account_at_index}, loader_instruction::LoaderInstruction, loader_upgradeable_instruction::UpgradeableLoaderInstruction, process_instruction::{stable_log, ComputeMeter, Executor, InvokeContext}, + program_error::{ACCOUNT_NOT_RENT_EXEMPT, BORSH_IO_ERROR}, program_utils::limited_deserialize, pubkey::Pubkey, rent::Rent, @@ -128,7 +129,7 @@ fn write_program_data( ); return Err(InstructionError::AccountDataTooSmall); } - data[program_data_offset..program_data_offset + len].copy_from_slice(&bytes); + data[program_data_offset..program_data_offset + len].copy_from_slice(bytes); Ok(()) } @@ -138,10 +139,6 @@ fn check_loader_id(id: &Pubkey) -> bool { || bpf_loader_upgradeable::check_id(id) } -/// Default program heap size, allocators -/// are expected to enforce this -const DEFAULT_HEAP_SIZE: usize = 32 * 1024; - /// Create the BPF virtual machine pub fn create_vm<'a>( loader_id: &'a Pubkey, @@ -149,7 +146,11 @@ pub fn create_vm<'a>( parameter_bytes: &mut [u8], invoke_context: &'a mut dyn InvokeContext, ) -> Result, EbpfError> { - let heap = AlignedMemory::new_with_size(DEFAULT_HEAP_SIZE, HOST_ALIGN); + let bpf_compute_budget = invoke_context.get_bpf_compute_budget(); + let heap = AlignedMemory::new_with_size( + bpf_compute_budget.heap_size.unwrap_or(HEAP_LENGTH), + HOST_ALIGN, + ); let heap_region = MemoryRegion::new_from_slice(heap.as_slice(), MM_HEAP_START, 0, true); let mut vm = EbpfVm::new(program, parameter_bytes, &[heap_region])?; syscalls::bind_syscall_context_objects(loader_id, &mut vm, invoke_context, heap)?; @@ -369,7 +370,7 @@ fn process_loader_upgradeable_instruction( // Create ProgramData account let (derived_address, bump_seed) = - Pubkey::find_program_address(&[program.unsigned_key().as_ref()], &program_id); + Pubkey::find_program_address(&[program.unsigned_key().as_ref()], program_id); if derived_address != *programdata.unsigned_key() { ic_logger_msg!(logger, "ProgramData address is not derived"); return Err(InstructionError::InvalidArgument); @@ -751,13 +752,15 @@ impl Executor for BpfExecutor { ) -> Result<(), InstructionError> { let logger = invoke_context.get_logger(); let invoke_depth = invoke_context.invoke_depth(); + let add_missing_program_error_mappings = + invoke_context.is_feature_active(&add_missing_program_error_mappings::id()); invoke_context.remove_first_keyed_account()?; let mut serialize_time = Measure::start("serialize"); let keyed_accounts = invoke_context.get_keyed_accounts()?; let mut parameter_bytes = - serialize_parameters(loader_id, program_id, keyed_accounts, &instruction_data)?; + serialize_parameters(loader_id, program_id, keyed_accounts, instruction_data)?; serialize_time.stop(); let mut create_vm_time = Measure::start("create_vm"); let mut execute_time; @@ -804,7 +807,14 @@ impl Executor for BpfExecutor { match result { Ok(status) => { if status != SUCCESS { - let error: InstructionError = status.into(); + let error: InstructionError = if !add_missing_program_error_mappings + && (status == ACCOUNT_NOT_RENT_EXEMPT || status == BORSH_IO_ERROR) + { + // map originally missing error mappings to InvalidError + InstructionError::InvalidError + } else { + status.into() + }; stable_log::program_failure(&logger, program_id, &error); return Err(error); } @@ -827,12 +837,7 @@ impl Executor for BpfExecutor { } let mut deserialize_time = Measure::start("deserialize"); let keyed_accounts = invoke_context.get_keyed_accounts()?; - deserialize_parameters( - loader_id, - keyed_accounts, - parameter_bytes.as_slice(), - invoke_context.is_feature_active(&skip_ro_deserialization::id()), - )?; + deserialize_parameters(loader_id, keyed_accounts, parameter_bytes.as_slice())?; deserialize_time.stop(); invoke_context.update_timing( serialize_time.as_us(), @@ -1128,6 +1133,7 @@ mod tests { programs: vec![], accounts: vec![], sysvars: vec![], + disabled_features: vec![].into_iter().collect(), }; assert_eq!( Err(InstructionError::ProgramFailedToComplete), @@ -2188,8 +2194,10 @@ mod tests { let upgrade_authority_address = Pubkey::new_unique(); let buffer_address = Pubkey::new_unique(); let program_address = Pubkey::new_unique(); - let (programdata_address, _) = - Pubkey::find_program_address(&[program_address.as_ref()], &id()); + let (programdata_address, _) = Pubkey::find_program_address( + &[program_address.as_ref()], + &bpf_loader_upgradeable::id(), + ); let spill_address = Pubkey::new_unique(); let upgrade_authority_account = AccountSharedData::new_ref(1, 0, &Pubkey::new_unique()); let rent_id = sysvar::rent::id(); @@ -2224,7 +2232,7 @@ mod tests { .unwrap(); buffer_account.borrow_mut().data_as_mut_slice() [UpgradeableLoaderState::buffer_data_offset().unwrap()..] - .copy_from_slice(&elf_new); + .copy_from_slice(elf_new); let programdata_account = AccountSharedData::new_ref( min_programdata_balance, UpgradeableLoaderState::programdata_len(elf_orig.len().max(elf_new.len())).unwrap(), @@ -2842,8 +2850,10 @@ mod tests { let new_upgrade_authority_address = Pubkey::new_unique(); let new_upgrade_authority_account = AccountSharedData::new_ref(1, 0, &Pubkey::new_unique()); let program_address = Pubkey::new_unique(); - let (programdata_address, _) = - Pubkey::find_program_address(&[program_address.as_ref()], &id()); + let (programdata_address, _) = Pubkey::find_program_address( + &[program_address.as_ref()], + &bpf_loader_upgradeable::id(), + ); let programdata_account = AccountSharedData::new_ref( 1, UpgradeableLoaderState::programdata_len(0).unwrap(), diff --git a/programs/bpf_loader/src/serialization.rs b/programs/bpf_loader/src/serialization.rs index 31683274e5950b..4f842abebb36d7 100644 --- a/programs/bpf_loader/src/serialization.rs +++ b/programs/bpf_loader/src/serialization.rs @@ -40,12 +40,11 @@ pub fn deserialize_parameters( loader_id: &Pubkey, keyed_accounts: &[KeyedAccount], buffer: &[u8], - skip_ro_deserialization: bool, ) -> Result<(), InstructionError> { if *loader_id == bpf_loader_deprecated::id() { - deserialize_parameters_unaligned(keyed_accounts, buffer, skip_ro_deserialization) + deserialize_parameters_unaligned(keyed_accounts, buffer) } else { - deserialize_parameters_aligned(keyed_accounts, buffer, skip_ro_deserialization) + deserialize_parameters_aligned(keyed_accounts, buffer) } } @@ -105,7 +104,7 @@ pub fn serialize_parameters_unaligned( .map_err(|_| InstructionError::InvalidArgument)?; v.write_u64::(keyed_account.data_len()? as u64) .map_err(|_| InstructionError::InvalidArgument)?; - v.write_all(&keyed_account.try_account_ref()?.data()) + v.write_all(keyed_account.try_account_ref()?.data()) .map_err(|_| InstructionError::InvalidArgument)?; v.write_all(keyed_account.owner()?.as_ref()) .map_err(|_| InstructionError::InvalidArgument)?; @@ -127,33 +126,28 @@ pub fn serialize_parameters_unaligned( pub fn deserialize_parameters_unaligned( keyed_accounts: &[KeyedAccount], buffer: &[u8], - skip_ro_deserialization: bool, ) -> Result<(), InstructionError> { let mut start = size_of::(); // number of accounts for (i, keyed_account) in keyed_accounts.iter().enumerate() { let (is_dup, _) = is_dup(&keyed_accounts[..i], keyed_account); start += 1; // is_dup if !is_dup { - if keyed_account.is_writable() || !skip_ro_deserialization { - start += size_of::(); // is_signer - start += size_of::(); // is_writable - start += size_of::(); // key - keyed_account - .try_account_ref_mut()? - .set_lamports(LittleEndian::read_u64(&buffer[start..])); - start += size_of::() // lamports + start += size_of::(); // is_signer + start += size_of::(); // is_writable + start += size_of::(); // key + keyed_account + .try_account_ref_mut()? + .set_lamports(LittleEndian::read_u64(&buffer[start..])); + start += size_of::() // lamports + size_of::(); // data length - let end = start + keyed_account.data_len()?; - keyed_account - .try_account_ref_mut()? - .set_data_from_slice(&buffer[start..end]); - start += keyed_account.data_len()? // data + let end = start + keyed_account.data_len()?; + keyed_account + .try_account_ref_mut()? + .set_data_from_slice(&buffer[start..end]); + start += keyed_account.data_len()? // data + size_of::() // owner + size_of::() // executable + size_of::(); // rent_epoch - } else { - start += get_serialized_account_size_unaligned(keyed_account)?; - } } } Ok(()) @@ -229,7 +223,7 @@ pub fn serialize_parameters_aligned( .map_err(|_| InstructionError::InvalidArgument)?; v.write_u64::(keyed_account.data_len()? as u64) .map_err(|_| InstructionError::InvalidArgument)?; - v.write_all(&keyed_account.try_account_ref()?.data()) + v.write_all(keyed_account.try_account_ref()?.data()) .map_err(|_| InstructionError::InvalidArgument)?; v.resize( MAX_PERMITTED_DATA_INCREASE @@ -253,7 +247,6 @@ pub fn serialize_parameters_aligned( pub fn deserialize_parameters_aligned( keyed_accounts: &[KeyedAccount], buffer: &[u8], - skip_ro_deserialization: bool, ) -> Result<(), InstructionError> { let mut start = size_of::(); // number of accounts for (i, keyed_account) in keyed_accounts.iter().enumerate() { @@ -261,7 +254,7 @@ pub fn deserialize_parameters_aligned( start += size_of::(); // position if is_dup { start += 7; // padding to 64-bit aligned - } else if keyed_account.is_writable() || !skip_ro_deserialization { + } else { let mut account = keyed_account.try_account_ref_mut()?; start += size_of::() // is_signer + size_of::() // is_writable @@ -286,8 +279,6 @@ pub fn deserialize_parameters_aligned( start += pre_len + MAX_PERMITTED_DATA_INCREASE; // data start += (start as *const u8).align_offset(align_of::()); start += size_of::(); // rent_epoch - } else { - start += get_serialized_account_size_aligned(keyed_account)?; } } Ok(()) @@ -391,9 +382,9 @@ mod tests { .enumerate() .map(|(i, (key, account))| { if i <= accounts.len() / 2 { - KeyedAccount::new_readonly(&key, false, &account) + KeyedAccount::new_readonly(key, false, account) } else { - KeyedAccount::new(&key, false, &account) + KeyedAccount::new(key, false, account) } }) .collect(); @@ -448,30 +439,19 @@ mod tests { .enumerate() .map(|(i, (key, account))| { if i <= accounts.len() / 2 { - KeyedAccount::new_readonly(&key, false, &account) + KeyedAccount::new_readonly(key, false, account) } else { - KeyedAccount::new(&key, false, &account) + KeyedAccount::new(key, false, account) } }) .collect(); - deserialize_parameters( - &bpf_loader::id(), - &de_keyed_accounts, - serialized.as_slice(), - true, - ) - .unwrap(); + deserialize_parameters(&bpf_loader::id(), &de_keyed_accounts, serialized.as_slice()) + .unwrap(); for ((account, de_keyed_account), key) in accounts.iter().zip(de_keyed_accounts).zip(keys.clone()) { assert_eq!(key, *de_keyed_account.unsigned_key()); let account = account.borrow(); - assert_eq!(account.lamports(), de_keyed_account.lamports().unwrap()); - assert_eq!( - account.data(), - de_keyed_account.try_account_ref().unwrap().data() - ); - assert_eq!(*account.owner(), de_keyed_account.owner().unwrap()); assert_eq!(account.executable(), de_keyed_account.executable().unwrap()); assert_eq!(account.rent_epoch(), de_keyed_account.rent_epoch().unwrap()); } @@ -507,9 +487,9 @@ mod tests { .enumerate() .map(|(i, (key, account))| { if i < accounts.len() / 2 { - KeyedAccount::new_readonly(&key, false, &account) + KeyedAccount::new_readonly(key, false, account) } else { - KeyedAccount::new(&key, false, &account) + KeyedAccount::new(key, false, account) } }) .collect(); @@ -517,7 +497,6 @@ mod tests { &bpf_loader_deprecated::id(), &de_keyed_accounts, serialized.as_slice(), - true, ) .unwrap(); for ((account, de_keyed_account), key) in diff --git a/programs/bpf_loader/src/syscalls.rs b/programs/bpf_loader/src/syscalls.rs index 15406fe1ed3e48..bbc507ec683cf9 100644 --- a/programs/bpf_loader/src/syscalls.rs +++ b/programs/bpf_loader/src/syscalls.rs @@ -19,9 +19,10 @@ use solana_sdk::{ entrypoint::{MAX_PERMITTED_DATA_INCREASE, SUCCESS}, epoch_schedule::EpochSchedule, feature_set::{ - cpi_data_cost, cpi_share_ro_and_exec_accounts, demote_sysvar_write_locks, - enforce_aligned_host_addrs, keccak256_syscall_enabled, - set_upgrade_authority_via_cpi_enabled, sysvar_via_syscall, update_data_on_realloc, + cpi_data_cost, enforce_aligned_host_addrs, keccak256_syscall_enabled, + libsecp256k1_0_5_upgrade_enabled, mem_overlap_fix, memory_ops_syscalls, + secp256k1_recover_syscall_enabled, set_upgrade_authority_via_cpi_enabled, + sysvar_via_syscall, update_data_on_realloc, }, hash::{Hasher, HASH_BYTES}, ic_msg, @@ -32,6 +33,9 @@ use solana_sdk::{ process_instruction::{self, stable_log, ComputeMeter, InvokeContext, Logger}, pubkey::{Pubkey, PubkeyError, MAX_SEEDS}, rent::Rent, + secp256k1_recover::{ + Secp256k1RecoverError, SECP256K1_PUBLIC_KEY_LENGTH, SECP256K1_SIGNATURE_LENGTH, + }, sysvar::{self, fees::Fees, Sysvar, SysvarId}, }; use std::{ @@ -74,6 +78,8 @@ pub enum SyscallError { InstructionTooLarge(usize, usize), #[error("Too many accounts passed to inner instruction")] TooManyAccounts, + #[error("Overlapping copy")] + CopyOverlapping, } impl From for EbpfError { fn from(error: SyscallError) -> Self { @@ -132,6 +138,11 @@ pub fn register_syscalls( syscall_registry.register_syscall_by_name(b"sol_keccak256", SyscallKeccak256::call)?; } + if invoke_context.is_feature_active(&secp256k1_recover_syscall_enabled::id()) { + syscall_registry + .register_syscall_by_name(b"sol_secp256k1_recover", SyscallSecp256k1Recover::call)?; + } + if invoke_context.is_feature_active(&sysvar_via_syscall::id()) { syscall_registry .register_syscall_by_name(b"sol_get_clock_sysvar", SyscallGetClockSysvar::call)?; @@ -145,10 +156,20 @@ pub fn register_syscalls( .register_syscall_by_name(b"sol_get_rent_sysvar", SyscallGetRentSysvar::call)?; } + if invoke_context.is_feature_active(&memory_ops_syscalls::id()) { + syscall_registry.register_syscall_by_name(b"sol_memcpy_", SyscallMemcpy::call)?; + syscall_registry.register_syscall_by_name(b"sol_memmove_", SyscallMemmove::call)?; + syscall_registry.register_syscall_by_name(b"sol_memcmp_", SyscallMemcmp::call)?; + syscall_registry.register_syscall_by_name(b"sol_memset_", SyscallMemset::call)?; + } + + // Cross-program invocation syscalls syscall_registry .register_syscall_by_name(b"sol_invoke_signed_c", SyscallInvokeSignedC::call)?; syscall_registry .register_syscall_by_name(b"sol_invoke_signed_rust", SyscallInvokeSignedRust::call)?; + + // Memory allocator syscall_registry.register_syscall_by_name(b"sol_alloc_free_", SyscallAllocFree::call)?; Ok(syscall_registry) @@ -268,6 +289,56 @@ pub fn bind_syscall_context_objects<'a>( }), ); + bind_feature_gated_syscall_context_object!( + vm, + invoke_context.is_feature_active(&memory_ops_syscalls::id()), + Box::new(SyscallMemcpy { + cost: invoke_context.get_bpf_compute_budget().cpi_bytes_per_unit, + compute_meter: invoke_context.get_compute_meter(), + loader_id, + mem_overlap_fix: invoke_context.is_feature_active(&mem_overlap_fix::id()), + }), + ); + bind_feature_gated_syscall_context_object!( + vm, + invoke_context.is_feature_active(&memory_ops_syscalls::id()), + Box::new(SyscallMemmove { + cost: invoke_context.get_bpf_compute_budget().cpi_bytes_per_unit, + compute_meter: invoke_context.get_compute_meter(), + loader_id, + }), + ); + bind_feature_gated_syscall_context_object!( + vm, + invoke_context.is_feature_active(&memory_ops_syscalls::id()), + Box::new(SyscallMemcmp { + cost: invoke_context.get_bpf_compute_budget().cpi_bytes_per_unit, + compute_meter: invoke_context.get_compute_meter(), + loader_id, + }), + ); + bind_feature_gated_syscall_context_object!( + vm, + invoke_context.is_feature_active(&memory_ops_syscalls::id()), + Box::new(SyscallMemset { + cost: invoke_context.get_bpf_compute_budget().cpi_bytes_per_unit, + compute_meter: invoke_context.get_compute_meter(), + loader_id, + }), + ); + + bind_feature_gated_syscall_context_object!( + vm, + invoke_context.is_feature_active(&secp256k1_recover_syscall_enabled::id()), + Box::new(SyscallSecp256k1Recover { + cost: bpf_compute_budget.secp256k1_recover_cost, + compute_meter: invoke_context.get_compute_meter(), + loader_id, + libsecp256k1_0_5_upgrade_enabled: invoke_context + .is_feature_active(&libsecp256k1_0_5_upgrade_enabled::id()), + }), + ); + let is_sysvar_via_syscall_active = invoke_context.is_feature_active(&sysvar_via_syscall::id()); let invoke_context = Rc::new(RefCell::new(invoke_context)); @@ -322,7 +393,6 @@ pub fn bind_syscall_context_objects<'a>( )?; // Memory allocator - vm.bind_syscall_context_object( Box::new(SyscallAllocFree { aligned: *loader_id != bpf_loader_deprecated::id(), @@ -535,7 +605,7 @@ impl<'a> SyscallObject for SyscallPanic<'a> { memory_mapping, file, len, - &self.loader_id, + self.loader_id, self.enforce_aligned_host_addrs, &mut |string: &str| Err(SyscallError::Panic(string.to_string(), line, column).into()), ); @@ -566,7 +636,7 @@ impl<'a> SyscallObject for SyscallLog<'a> { memory_mapping, addr, len, - &self.loader_id, + self.loader_id, self.enforce_aligned_host_addrs, &mut |string: &str| { stable_log::program_log(&self.logger, string); @@ -1137,6 +1207,252 @@ impl<'a> SyscallObject for SyscallKeccak256<'a> { } } +fn check_overlapping(src_addr: u64, dst_addr: u64, n: u64) -> bool { + (src_addr <= dst_addr && src_addr + n > dst_addr) + || (dst_addr <= src_addr && dst_addr + n > src_addr) +} + +/// memcpy +pub struct SyscallMemcpy<'a> { + cost: u64, + compute_meter: Rc>, + loader_id: &'a Pubkey, + mem_overlap_fix: bool, +} +impl<'a> SyscallObject for SyscallMemcpy<'a> { + fn call( + &mut self, + dst_addr: u64, + src_addr: u64, + n: u64, + _arg4: u64, + _arg5: u64, + memory_mapping: &MemoryMapping, + result: &mut Result>, + ) { + if if self.mem_overlap_fix { + check_overlapping(src_addr, dst_addr, n) + } else { + dst_addr + n > src_addr && src_addr > dst_addr + } { + *result = Err(SyscallError::CopyOverlapping.into()); + return; + } + + question_mark!(self.compute_meter.consume(n / self.cost), result); + let dst = question_mark!( + translate_slice_mut::(memory_mapping, dst_addr, n, self.loader_id, true), + result + ); + let src = question_mark!( + translate_slice::(memory_mapping, src_addr, n, self.loader_id, true), + result + ); + unsafe { + std::ptr::copy_nonoverlapping(src.as_ptr(), dst.as_mut_ptr(), n as usize); + } + *result = Ok(0); + } +} +/// memmove +pub struct SyscallMemmove<'a> { + cost: u64, + compute_meter: Rc>, + loader_id: &'a Pubkey, +} +impl<'a> SyscallObject for SyscallMemmove<'a> { + fn call( + &mut self, + dst_addr: u64, + src_addr: u64, + n: u64, + _arg4: u64, + _arg5: u64, + memory_mapping: &MemoryMapping, + result: &mut Result>, + ) { + question_mark!(self.compute_meter.consume(n / self.cost), result); + let dst = question_mark!( + translate_slice_mut::(memory_mapping, dst_addr, n, self.loader_id, true), + result + ); + let src = question_mark!( + translate_slice::(memory_mapping, src_addr, n, self.loader_id, true), + result + ); + unsafe { + std::ptr::copy(src.as_ptr(), dst.as_mut_ptr(), n as usize); + } + *result = Ok(0); + } +} +/// memcmp +pub struct SyscallMemcmp<'a> { + cost: u64, + compute_meter: Rc>, + loader_id: &'a Pubkey, +} +impl<'a> SyscallObject for SyscallMemcmp<'a> { + fn call( + &mut self, + s1_addr: u64, + s2_addr: u64, + n: u64, + cmp_result_addr: u64, + _arg5: u64, + memory_mapping: &MemoryMapping, + result: &mut Result>, + ) { + question_mark!(self.compute_meter.consume(n / self.cost), result); + let s1 = question_mark!( + translate_slice::(memory_mapping, s1_addr, n, self.loader_id, true), + result + ); + let s2 = question_mark!( + translate_slice::(memory_mapping, s2_addr, n, self.loader_id, true), + result + ); + let cmp_result = question_mark!( + translate_type_mut::(memory_mapping, cmp_result_addr, self.loader_id, true), + result + ); + let mut i = 0; + while i < n as usize { + let a = s1[i]; + let b = s2[i]; + if a != b { + *cmp_result = a as i32 - b as i32; + *result = Ok(0); + return; + } + i += 1; + } + *cmp_result = 0; + *result = Ok(0); + } +} +/// memset +pub struct SyscallMemset<'a> { + cost: u64, + compute_meter: Rc>, + loader_id: &'a Pubkey, +} +impl<'a> SyscallObject for SyscallMemset<'a> { + fn call( + &mut self, + s_addr: u64, + c: u64, + n: u64, + _arg4: u64, + _arg5: u64, + memory_mapping: &MemoryMapping, + result: &mut Result>, + ) { + question_mark!(self.compute_meter.consume(n / self.cost), result); + let s = question_mark!( + translate_slice_mut::(memory_mapping, s_addr, n, self.loader_id, true), + result + ); + for val in s.iter_mut().take(n as usize) { + *val = c as u8; + } + *result = Ok(0); + } +} + +/// secp256k1_recover +pub struct SyscallSecp256k1Recover<'a> { + cost: u64, + compute_meter: Rc>, + loader_id: &'a Pubkey, + libsecp256k1_0_5_upgrade_enabled: bool, +} + +impl<'a> SyscallObject for SyscallSecp256k1Recover<'a> { + fn call( + &mut self, + hash_addr: u64, + recovery_id_val: u64, + signature_addr: u64, + result_addr: u64, + _arg5: u64, + memory_mapping: &MemoryMapping, + result: &mut Result>, + ) { + question_mark!(self.compute_meter.consume(self.cost), result); + + let hash = question_mark!( + translate_slice::( + memory_mapping, + hash_addr, + keccak::HASH_BYTES as u64, + self.loader_id, + true, + ), + result + ); + let signature = question_mark!( + translate_slice::( + memory_mapping, + signature_addr, + SECP256K1_SIGNATURE_LENGTH as u64, + self.loader_id, + true, + ), + result + ); + let secp256k1_recover_result = question_mark!( + translate_slice_mut::( + memory_mapping, + result_addr, + SECP256K1_PUBLIC_KEY_LENGTH as u64, + self.loader_id, + true, + ), + result + ); + + let message = match libsecp256k1::Message::parse_slice(hash) { + Ok(msg) => msg, + Err(_) => { + *result = Ok(Secp256k1RecoverError::InvalidHash.into()); + return; + } + }; + let recovery_id = match libsecp256k1::RecoveryId::parse(recovery_id_val as u8) { + Ok(id) => id, + Err(_) => { + *result = Ok(Secp256k1RecoverError::InvalidRecoveryId.into()); + return; + } + }; + let sig_parse_result = if self.libsecp256k1_0_5_upgrade_enabled { + libsecp256k1::Signature::parse_standard_slice(signature) + } else { + libsecp256k1::Signature::parse_overflowing_slice(signature) + }; + + let signature = match sig_parse_result { + Ok(sig) => sig, + Err(_) => { + *result = Ok(Secp256k1RecoverError::InvalidSignature.into()); + return; + } + }; + + let public_key = match libsecp256k1::recover(&message, &signature, &recovery_id) { + Ok(key) => key.serialize(), + Err(_) => { + *result = Ok(Secp256k1RecoverError::InvalidSignature.into()); + return; + } + }; + + secp256k1_recover_result.copy_from_slice(&public_key[1..65]); + *result = Ok(SUCCESS); + } +} + // Cross-program invocation syscalls struct AccountReferences<'a> { @@ -1152,7 +1468,7 @@ type TranslatedAccount<'a> = ( Option>, ); type TranslatedAccounts<'a> = ( - Vec>>, + Vec<(Pubkey, Rc>)>, Vec>>, ); @@ -1169,7 +1485,6 @@ trait SyscallInvokeSigned<'a> { fn translate_accounts( &self, account_keys: &[Pubkey], - caller_write_privileges: &[bool], program_account_index: usize, account_infos_addr: u64, account_infos_len: u64, @@ -1246,7 +1561,6 @@ impl<'a> SyscallInvokeSigned<'a> for SyscallInvokeSignedRust<'a> { fn translate_accounts( &self, account_keys: &[Pubkey], - caller_write_privileges: &[bool], program_account_index: usize, account_infos_addr: u64, account_infos_len: u64, @@ -1368,7 +1682,6 @@ impl<'a> SyscallInvokeSigned<'a> for SyscallInvokeSignedRust<'a> { get_translated_accounts( account_keys, - caller_write_privileges, program_account_index, &account_info_keys, account_infos, @@ -1585,7 +1898,6 @@ impl<'a> SyscallInvokeSigned<'a> for SyscallInvokeSignedC<'a> { fn translate_accounts( &self, account_keys: &[Pubkey], - caller_write_privileges: &[bool], program_account_index: usize, account_infos_addr: u64, account_infos_len: u64, @@ -1689,7 +2001,6 @@ impl<'a> SyscallInvokeSigned<'a> for SyscallInvokeSignedC<'a> { get_translated_accounts( account_keys, - caller_write_privileges, program_account_index, &account_info_keys, account_infos, @@ -1779,7 +2090,6 @@ impl<'a> SyscallObject for SyscallInvokeSignedC<'a> { fn get_translated_accounts<'a, T, F>( account_keys: &[Pubkey], - caller_write_privileges: &[bool], program_account_index: usize, account_info_keys: &[&Pubkey], account_infos: &[T], @@ -1792,7 +2102,7 @@ where let mut accounts = Vec::with_capacity(account_keys.len()); let mut refs = Vec::with_capacity(account_keys.len()); for (i, ref account_key) in account_keys.iter().enumerate() { - let account = invoke_context.get_account(&account_key).ok_or_else(|| { + let account = invoke_context.get_account(account_key).ok_or_else(|| { ic_msg!( invoke_context, "Instruction references an unknown account {}", @@ -1801,13 +2111,9 @@ where SyscallError::InstructionError(InstructionError::MissingAccount) })?; - if i == program_account_index - || account.borrow().executable() - || (invoke_context.is_feature_active(&cpi_share_ro_and_exec_accounts::id()) - && !caller_write_privileges[i]) - { + if i == program_account_index || account.borrow().executable() { // Use the known account - accounts.push(account); + accounts.push((**account_key, account)); refs.push(None); } else if let Some(account_info) = account_info_keys @@ -1822,7 +2128,7 @@ where }) { let (account, account_ref) = do_translate(account_info, invoke_context)?; - accounts.push(account); + accounts.push((**account_key, account)); refs.push(account_ref); } else { ic_msg!( @@ -1935,14 +2241,7 @@ fn call<'a>( signers_seeds_len: u64, memory_mapping: &MemoryMapping, ) -> Result> { - let ( - message, - executables, - accounts, - account_refs, - caller_write_privileges, - demote_sysvar_write_locks, - ) = { + let (message, executables, accounts, account_refs, caller_write_privileges) = { let invoke_context = syscall.get_context()?; invoke_context @@ -1960,7 +2259,7 @@ fn call<'a>( let instruction = syscall.translate_instruction( instruction_addr, - &memory_mapping, + memory_mapping, enforce_aligned_host_addrs, )?; let signers = syscall.translate_signers( @@ -2000,7 +2299,6 @@ fn call<'a>( check_authorized_program(&callee_program_id, &instruction.data, &invoke_context)?; let (accounts, account_refs) = syscall.translate_accounts( &message.account_keys, - &caller_write_privileges, callee_program_id_index, account_infos_addr, account_infos_len, @@ -2015,6 +2313,7 @@ fn call<'a>( ic_msg!(invoke_context, "Unknown program {}", callee_program_id,); SyscallError::InstructionError(InstructionError::MissingAccount) })? + .1 .clone(); let programdata_executable = get_upgradeable_executable(&callee_program_id, &program_account, &invoke_context)?; @@ -2033,7 +2332,6 @@ fn call<'a>( accounts, account_refs, caller_write_privileges, - invoke_context.is_feature_active(&demote_sysvar_write_locks::id()), ) }; @@ -2056,10 +2354,10 @@ fn call<'a>( // Copy results back to caller { let invoke_context = syscall.get_context()?; - for (i, (account, account_ref)) in accounts.iter().zip(account_refs).enumerate() { + for (i, ((_key, account), account_ref)) in accounts.iter().zip(account_refs).enumerate() { let account = account.borrow(); if let Some(mut account_ref) = account_ref { - if message.is_writable(i, demote_sysvar_write_locks) && !account.executable() { + if message.is_writable(i) && !account.executable() { *account_ref.lamports = account.lamports(); *account_ref.owner = *account.owner(); if account_ref.data.len() != account.data().len() { @@ -3090,4 +3388,15 @@ mod tests { assert_eq!(got_rent, src_rent); } } + + #[test] + fn test_overlapping() { + assert!(!check_overlapping(10, 7, 3)); + assert!(check_overlapping(10, 8, 3)); + assert!(check_overlapping(10, 9, 3)); + assert!(check_overlapping(10, 10, 3)); + assert!(check_overlapping(10, 11, 3)); + assert!(check_overlapping(10, 12, 3)); + assert!(!check_overlapping(10, 13, 3)); + } } diff --git a/programs/budget/Cargo.toml b/programs/budget/Cargo.toml index a6e3ff3ec981f1..0c0113ccb7d3a8 100644 --- a/programs/budget/Cargo.toml +++ b/programs/budget/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-budget-program" -version = "1.7.0" +version = "1.7.11" description = "Solana Budget program" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -17,11 +17,11 @@ num-derive = "0.3" num-traits = "0.2" serde = "1.0.122" serde_derive = "1.0.103" -solana-sdk = { path = "../../sdk", version = "=1.7.0" } +solana-sdk = { path = "../../sdk", version = "=1.7.11" } thiserror = "1.0" [dev-dependencies] -solana-runtime = { path = "../../runtime", version = "=1.7.0" } +solana-runtime = { path = "../../runtime", version = "=1.7.11" } [lib] crate-type = ["lib", "cdylib"] diff --git a/programs/budget/src/budget_expr.rs b/programs/budget/src/budget_expr.rs deleted file mode 100644 index c5584eee092715..00000000000000 --- a/programs/budget/src/budget_expr.rs +++ /dev/null @@ -1,390 +0,0 @@ -//! The `budget_expr` module provides a domain-specific language for payment plans. Users create BudgetExpr objects that -//! are given to an interpreter. The interpreter listens for `Witness` transactions, -//! which it uses to reduce the payment plan. When the budget is reduced to a -//! `Payment`, the payment is executed. - -use chrono::prelude::*; -use serde_derive::{Deserialize, Serialize}; -use solana_sdk::hash::Hash; -use solana_sdk::pubkey::Pubkey; - -/// The types of events a payment plan can process. -#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)] -pub enum Witness { - /// The current time. - Timestamp(DateTime), - - /// A signature from Pubkey. - Signature, - - /// Account snapshot. - AccountData(Hash, Pubkey), -} - -/// Some amount of lamports that should be sent to the `to` `Pubkey`. -#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)] -pub struct Payment { - /// Amount to be paid. - pub lamports: u64, - - /// The `Pubkey` that `lamports` should be paid to. - pub to: Pubkey, -} - -/// The account constraints a Condition would wait on. -/// Note: ideally this would be function that accepts an Account and returns -/// a bool, but we don't have a way to pass functions over the wire. To simulate -/// higher order programming, create your own program that includes an instruction -/// that sets account data to a boolean. Pass that account key and program_id here. -#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)] -pub struct AccountConstraints { - /// The account holder. - pub key: Pubkey, - - /// The program id that must own the account at `key`. - pub program_id: Pubkey, - - /// The hash of the data in the account at `key`. - pub data_hash: Hash, -} - -/// A data type representing a `Witness` that the payment plan is waiting on. -#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)] -pub enum Condition { - /// Wait for a `Timestamp` `Witness` at or after the given `DateTime`. - Timestamp(DateTime, Pubkey), - - /// Wait for a `Signature` `Witness` from `Pubkey`. - Signature(Pubkey), - - /// Wait for the account with the given constraints. - AccountData(AccountConstraints), -} - -impl Condition { - /// Return true if the given Witness satisfies this Condition. - pub fn is_satisfied(&self, witness: &Witness, from: &Pubkey) -> bool { - match (self, witness) { - (Condition::Signature(pubkey), Witness::Signature) => pubkey == from, - (Condition::Timestamp(dt, pubkey), Witness::Timestamp(last_time)) => { - pubkey == from && dt <= last_time - } - ( - Condition::AccountData(constraints), - Witness::AccountData(actual_hash, program_id), - ) => { - constraints.program_id == *program_id - && constraints.key == *from - && constraints.data_hash == *actual_hash - } - _ => false, - } - } -} - -/// A data type representing a payment plan. -#[repr(C)] -#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)] -pub enum BudgetExpr { - /// Make a payment. - Pay(Payment), - - /// Make a payment after some condition. - After(Condition, Box), - - /// Either make a payment after one condition or a different payment after another - /// condition, which ever condition is satisfied first. - Or((Condition, Box), (Condition, Box)), - - /// Make a payment after both of two conditions are satisfied - And(Condition, Condition, Box), -} - -impl BudgetExpr { - /// Create the simplest budget - one that pays `lamports` to Pubkey. - pub fn new_payment(lamports: u64, to: &Pubkey) -> Self { - BudgetExpr::Pay(Payment { lamports, to: *to }) - } - - /// Create a budget that pays `lamports` to `to` after being witnessed by `from`. - pub fn new_authorized_payment(from: &Pubkey, lamports: u64, to: &Pubkey) -> Self { - BudgetExpr::After( - Condition::Signature(*from), - Box::new(Self::new_payment(lamports, to)), - ) - } - - /// Create a budget that pays `lamports` to `to` after witnessing account data in `account_pubkey` with the given hash. - pub fn new_payment_when_account_data( - account_pubkey: &Pubkey, - account_program_id: &Pubkey, - account_hash: Hash, - lamports: u64, - to: &Pubkey, - ) -> Self { - BudgetExpr::After( - Condition::AccountData(AccountConstraints { - key: *account_pubkey, - program_id: *account_program_id, - data_hash: account_hash, - }), - Box::new(Self::new_payment(lamports, to)), - ) - } - - /// Create a budget that pays `lamports` to `to` after being witnessed by `witness` unless - /// canceled with a signature from `from`. - pub fn new_cancelable_authorized_payment( - witness: &Pubkey, - lamports: u64, - to: &Pubkey, - from: Option, - ) -> Self { - if from.is_none() { - return Self::new_authorized_payment(witness, lamports, to); - } - let from = from.unwrap(); - BudgetExpr::Or( - ( - Condition::Signature(*witness), - Box::new(BudgetExpr::new_payment(lamports, to)), - ), - ( - Condition::Signature(from), - Box::new(BudgetExpr::new_payment(lamports, &from)), - ), - ) - } - - /// Create a budget that pays lamports` to `to` after being witnessed by 2x `from`s - pub fn new_2_2_multisig_payment( - from0: &Pubkey, - from1: &Pubkey, - lamports: u64, - to: &Pubkey, - ) -> Self { - BudgetExpr::And( - Condition::Signature(*from0), - Condition::Signature(*from1), - Box::new(Self::new_payment(lamports, to)), - ) - } - - /// Create a budget that pays `lamports` to `to` after the given DateTime signed - /// by `dt_pubkey`. - pub fn new_future_payment( - dt: DateTime, - dt_pubkey: &Pubkey, - lamports: u64, - to: &Pubkey, - ) -> Self { - BudgetExpr::After( - Condition::Timestamp(dt, *dt_pubkey), - Box::new(Self::new_payment(lamports, to)), - ) - } - - /// Create a budget that pays `lamports` to `to` after the given DateTime - /// signed by `dt_pubkey` unless canceled by `from`. - pub fn new_cancelable_future_payment( - dt: DateTime, - dt_pubkey: &Pubkey, - lamports: u64, - to: &Pubkey, - from: Option, - ) -> Self { - if from.is_none() { - return Self::new_future_payment(dt, dt_pubkey, lamports, to); - } - let from = from.unwrap(); - BudgetExpr::Or( - ( - Condition::Timestamp(dt, *dt_pubkey), - Box::new(Self::new_payment(lamports, to)), - ), - ( - Condition::Signature(from), - Box::new(Self::new_payment(lamports, &from)), - ), - ) - } - - /// Return Payment if the budget requires no additional Witnesses. - pub fn final_payment(&self) -> Option { - match self { - BudgetExpr::Pay(payment) => Some(payment.clone()), - _ => None, - } - } - - /// Return true if the budget spends exactly `spendable_lamports`. - pub fn verify(&self, spendable_lamports: u64) -> bool { - match self { - BudgetExpr::Pay(payment) => payment.lamports == spendable_lamports, - BudgetExpr::After(_, sub_expr) | BudgetExpr::And(_, _, sub_expr) => { - sub_expr.verify(spendable_lamports) - } - BudgetExpr::Or(a, b) => { - a.1.verify(spendable_lamports) && b.1.verify(spendable_lamports) - } - } - } - - /// Apply a witness to the budget to see if the budget can be reduced. - /// If so, modify the budget in-place. - pub fn apply_witness(&mut self, witness: &Witness, from: &Pubkey) { - let new_expr = match self { - BudgetExpr::After(cond, sub_expr) if cond.is_satisfied(witness, from) => { - Some(sub_expr.clone()) - } - BudgetExpr::Or((cond, sub_expr), _) if cond.is_satisfied(witness, from) => { - Some(sub_expr.clone()) - } - BudgetExpr::Or(_, (cond, sub_expr)) if cond.is_satisfied(witness, from) => { - Some(sub_expr.clone()) - } - BudgetExpr::And(cond0, cond1, sub_expr) => { - if cond0.is_satisfied(witness, from) { - Some(Box::new(BudgetExpr::After(cond1.clone(), sub_expr.clone()))) - } else if cond1.is_satisfied(witness, from) { - Some(Box::new(BudgetExpr::After(cond0.clone(), sub_expr.clone()))) - } else { - None - } - } - _ => None, - }; - if let Some(expr) = new_expr { - *self = *expr; - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_signature_satisfied() { - let from = Pubkey::default(); - assert!(Condition::Signature(from).is_satisfied(&Witness::Signature, &from)); - } - - #[test] - fn test_timestamp_satisfied() { - let dt1 = Utc.ymd(2014, 11, 14).and_hms(8, 9, 10); - let dt2 = Utc.ymd(2014, 11, 14).and_hms(10, 9, 8); - let from = Pubkey::default(); - assert!(Condition::Timestamp(dt1, from).is_satisfied(&Witness::Timestamp(dt1), &from)); - assert!(Condition::Timestamp(dt1, from).is_satisfied(&Witness::Timestamp(dt2), &from)); - assert!(!Condition::Timestamp(dt2, from).is_satisfied(&Witness::Timestamp(dt1), &from)); - } - - #[test] - fn test_verify() { - let dt = Utc.ymd(2014, 11, 14).and_hms(8, 9, 10); - let from = Pubkey::default(); - let to = Pubkey::default(); - assert!(BudgetExpr::new_payment(42, &to).verify(42)); - assert!(BudgetExpr::new_authorized_payment(&from, 42, &to).verify(42)); - assert!(BudgetExpr::new_future_payment(dt, &from, 42, &to).verify(42)); - assert!( - BudgetExpr::new_cancelable_future_payment(dt, &from, 42, &to, Some(from)).verify(42) - ); - } - - #[test] - fn test_authorized_payment() { - let from = Pubkey::default(); - let to = Pubkey::default(); - - let mut expr = BudgetExpr::new_authorized_payment(&from, 42, &to); - expr.apply_witness(&Witness::Signature, &from); - assert_eq!(expr, BudgetExpr::new_payment(42, &to)); - } - - #[test] - fn test_future_payment() { - let dt = Utc.ymd(2014, 11, 14).and_hms(8, 9, 10); - let from = solana_sdk::pubkey::new_rand(); - let to = solana_sdk::pubkey::new_rand(); - - let mut expr = BudgetExpr::new_future_payment(dt, &from, 42, &to); - expr.apply_witness(&Witness::Timestamp(dt), &from); - assert_eq!(expr, BudgetExpr::new_payment(42, &to)); - } - - #[test] - fn test_unauthorized_future_payment() { - // Ensure timestamp will only be acknowledged if it came from the - // whitelisted public key. - let dt = Utc.ymd(2014, 11, 14).and_hms(8, 9, 10); - let from = solana_sdk::pubkey::new_rand(); - let to = solana_sdk::pubkey::new_rand(); - - let mut expr = BudgetExpr::new_future_payment(dt, &from, 42, &to); - let orig_expr = expr.clone(); - expr.apply_witness(&Witness::Timestamp(dt), &to); // <-- Attack! - assert_eq!(expr, orig_expr); - } - - #[test] - fn test_cancelable_future_payment() { - let dt = Utc.ymd(2014, 11, 14).and_hms(8, 9, 10); - let from = Pubkey::default(); - let to = Pubkey::default(); - - let mut expr = BudgetExpr::new_cancelable_future_payment(dt, &from, 42, &to, Some(from)); - expr.apply_witness(&Witness::Timestamp(dt), &from); - assert_eq!(expr, BudgetExpr::new_payment(42, &to)); - - let mut expr = BudgetExpr::new_cancelable_future_payment(dt, &from, 42, &to, Some(from)); - expr.apply_witness(&Witness::Signature, &from); - assert_eq!(expr, BudgetExpr::new_payment(42, &from)); - } - #[test] - fn test_2_2_multisig_payment() { - let from0 = solana_sdk::pubkey::new_rand(); - let from1 = solana_sdk::pubkey::new_rand(); - let to = Pubkey::default(); - - let mut expr = BudgetExpr::new_2_2_multisig_payment(&from0, &from1, 42, &to); - expr.apply_witness(&Witness::Signature, &from0); - assert_eq!(expr, BudgetExpr::new_authorized_payment(&from1, 42, &to)); - } - - #[test] - fn test_multisig_after_sig() { - let from0 = solana_sdk::pubkey::new_rand(); - let from1 = solana_sdk::pubkey::new_rand(); - let from2 = solana_sdk::pubkey::new_rand(); - let to = Pubkey::default(); - - let expr = BudgetExpr::new_2_2_multisig_payment(&from0, &from1, 42, &to); - let mut expr = BudgetExpr::After(Condition::Signature(from2), Box::new(expr)); - - expr.apply_witness(&Witness::Signature, &from2); - expr.apply_witness(&Witness::Signature, &from0); - assert_eq!(expr, BudgetExpr::new_authorized_payment(&from1, 42, &to)); - } - - #[test] - fn test_multisig_after_ts() { - let from0 = solana_sdk::pubkey::new_rand(); - let from1 = solana_sdk::pubkey::new_rand(); - let dt = Utc.ymd(2014, 11, 11).and_hms(7, 7, 7); - let to = Pubkey::default(); - - let expr = BudgetExpr::new_2_2_multisig_payment(&from0, &from1, 42, &to); - let mut expr = BudgetExpr::After(Condition::Timestamp(dt, from0), Box::new(expr)); - - expr.apply_witness(&Witness::Timestamp(dt), &from0); - assert_eq!( - expr, - BudgetExpr::new_2_2_multisig_payment(&from0, &from1, 42, &to) - ); - - expr.apply_witness(&Witness::Signature, &from0); - assert_eq!(expr, BudgetExpr::new_authorized_payment(&from1, 42, &to)); - } -} diff --git a/programs/budget/src/budget_instruction.rs b/programs/budget/src/budget_instruction.rs deleted file mode 100644 index e61bdfcce0bd29..00000000000000 --- a/programs/budget/src/budget_instruction.rs +++ /dev/null @@ -1,195 +0,0 @@ -use crate::{budget_expr::BudgetExpr, budget_state::BudgetState, id}; -use bincode::serialized_size; -use chrono::prelude::{DateTime, Utc}; -use num_derive::{FromPrimitive, ToPrimitive}; -use serde_derive::{Deserialize, Serialize}; -use solana_sdk::{ - decode_error::DecodeError, - hash::Hash, - instruction::{AccountMeta, Instruction}, - pubkey::Pubkey, - system_instruction, -}; -use thiserror::Error; - -#[derive(Error, Debug, Clone, PartialEq, FromPrimitive, ToPrimitive)] -pub enum BudgetError { - #[error("destination missing")] - DestinationMissing, -} - -impl DecodeError for BudgetError { - fn type_of() -> &'static str { - "BudgetError" - } -} - -/// An instruction to progress the smart contract. -#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)] -pub enum BudgetInstruction { - /// Declare and instantiate `BudgetExpr`. - InitializeAccount(Box), - - /// Tell a payment plan acknowledge the given `DateTime` has past. - ApplyTimestamp(DateTime), - - /// Tell the budget that the `InitializeAccount` with `Signature` has been - /// signed by the containing transaction's `Pubkey`. - ApplySignature, - - /// Load an account and pass its data to the budget for inspection. - ApplyAccountData, -} - -fn initialize_account(contract: &Pubkey, expr: BudgetExpr) -> Instruction { - let mut keys = vec![]; - if let BudgetExpr::Pay(payment) = &expr { - keys.push(AccountMeta::new(payment.to, false)); - } - keys.push(AccountMeta::new(*contract, false)); - Instruction::new_with_bincode( - id(), - &BudgetInstruction::InitializeAccount(Box::new(expr)), - keys, - ) -} - -pub fn create_account( - from: &Pubkey, - contract: &Pubkey, - lamports: u64, - expr: BudgetExpr, -) -> Vec { - if !expr.verify(lamports) { - panic!("invalid budget expression"); - } - let space = serialized_size(&BudgetState::new(expr.clone())).unwrap(); - vec![ - system_instruction::create_account(&from, contract, lamports, space, &id()), - initialize_account(contract, expr), - ] -} - -/// Create a new payment script. -pub fn payment(from: &Pubkey, to: &Pubkey, contract: &Pubkey, lamports: u64) -> Vec { - let expr = BudgetExpr::new_payment(lamports, to); - create_account(from, &contract, lamports, expr) -} - -/// Create a future payment script. -pub fn on_date( - from: &Pubkey, - to: &Pubkey, - contract: &Pubkey, - dt: DateTime, - dt_pubkey: &Pubkey, - cancelable: Option, - lamports: u64, -) -> Vec { - let expr = BudgetExpr::new_cancelable_future_payment(dt, dt_pubkey, lamports, to, cancelable); - create_account(from, contract, lamports, expr) -} - -/// Create a multisig payment script. -pub fn when_signed( - from: &Pubkey, - to: &Pubkey, - contract: &Pubkey, - witness: &Pubkey, - cancelable: Option, - lamports: u64, -) -> Vec { - let expr = BudgetExpr::new_cancelable_authorized_payment(witness, lamports, to, cancelable); - create_account(from, contract, lamports, expr) -} - -/// Make a payment when an account has the given data -pub fn when_account_data( - from: &Pubkey, - to: &Pubkey, - contract: &Pubkey, - account_pubkey: &Pubkey, - account_program_id: &Pubkey, - account_hash: Hash, - lamports: u64, -) -> Vec { - let expr = BudgetExpr::new_payment_when_account_data( - account_pubkey, - account_program_id, - account_hash, - lamports, - to, - ); - create_account(from, contract, lamports, expr) -} - -pub fn apply_timestamp( - from: &Pubkey, - contract: &Pubkey, - to: &Pubkey, - dt: DateTime, -) -> Instruction { - let mut account_metas = vec![ - AccountMeta::new(*from, true), - AccountMeta::new(*contract, false), - ]; - if from != to { - account_metas.push(AccountMeta::new(*to, false)); - } - Instruction::new_with_bincode(id(), &BudgetInstruction::ApplyTimestamp(dt), account_metas) -} - -pub fn apply_signature(from: &Pubkey, contract: &Pubkey, to: &Pubkey) -> Instruction { - let mut account_metas = vec![ - AccountMeta::new(*from, true), - AccountMeta::new(*contract, false), - ]; - if from != to { - account_metas.push(AccountMeta::new(*to, false)); - } - Instruction::new_with_bincode(id(), &BudgetInstruction::ApplySignature, account_metas) -} - -/// Apply account data to a contract waiting on an AccountData witness. -pub fn apply_account_data(witness_pubkey: &Pubkey, contract: &Pubkey, to: &Pubkey) -> Instruction { - let account_metas = vec![ - AccountMeta::new_readonly(*witness_pubkey, false), - AccountMeta::new(*contract, false), - AccountMeta::new(*to, false), - ]; - Instruction::new_with_bincode(id(), &BudgetInstruction::ApplyAccountData, account_metas) -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::budget_expr::BudgetExpr; - - #[test] - fn test_budget_instruction_verify() { - let alice_pubkey = solana_sdk::pubkey::new_rand(); - let bob_pubkey = solana_sdk::pubkey::new_rand(); - let budget_pubkey = solana_sdk::pubkey::new_rand(); - payment(&alice_pubkey, &bob_pubkey, &budget_pubkey, 1); // No panic! indicates success. - } - - #[test] - #[should_panic] - fn test_budget_instruction_overspend() { - let alice_pubkey = solana_sdk::pubkey::new_rand(); - let bob_pubkey = solana_sdk::pubkey::new_rand(); - let budget_pubkey = solana_sdk::pubkey::new_rand(); - let expr = BudgetExpr::new_payment(2, &bob_pubkey); - create_account(&alice_pubkey, &budget_pubkey, 1, expr); - } - - #[test] - #[should_panic] - fn test_budget_instruction_underspend() { - let alice_pubkey = solana_sdk::pubkey::new_rand(); - let bob_pubkey = solana_sdk::pubkey::new_rand(); - let budget_pubkey = solana_sdk::pubkey::new_rand(); - let expr = BudgetExpr::new_payment(1, &bob_pubkey); - create_account(&alice_pubkey, &budget_pubkey, 2, expr); - } -} diff --git a/programs/budget/src/budget_processor.rs b/programs/budget/src/budget_processor.rs deleted file mode 100644 index 1df0ea8592c750..00000000000000 --- a/programs/budget/src/budget_processor.rs +++ /dev/null @@ -1,627 +0,0 @@ -//! budget program -use crate::{ - budget_expr::Witness, - budget_instruction::{BudgetError, BudgetInstruction}, - budget_state::BudgetState, -}; -use chrono::prelude::{DateTime, Utc}; -use log::*; -use solana_sdk::{ - account::{ReadableAccount, WritableAccount}, - hash::hash, - instruction::InstructionError, - keyed_account::{keyed_account_at_index, KeyedAccount}, - process_instruction::InvokeContext, - program_utils::limited_deserialize, - pubkey::Pubkey, -}; - -/// Process a Witness Signature. Any payment plans waiting on this signature -/// will progress one step. -fn apply_signature( - budget_state: &mut BudgetState, - witness_keyed_account: &KeyedAccount, - contract_keyed_account: &KeyedAccount, - to_keyed_account: Result<&KeyedAccount, InstructionError>, -) -> Result<(), InstructionError> { - let mut final_payment = None; - if let Some(ref mut expr) = budget_state.pending_budget { - let key = witness_keyed_account.signer_key().unwrap(); - expr.apply_witness(&Witness::Signature, key); - final_payment = expr.final_payment(); - } - - if let Some(payment) = final_payment { - if let Some(key) = witness_keyed_account.signer_key() { - if &payment.to == key { - budget_state.pending_budget = None; - contract_keyed_account - .try_account_ref_mut()? - .checked_sub_lamports(payment.lamports)?; - witness_keyed_account - .try_account_ref_mut()? - .checked_add_lamports(payment.lamports)?; - return Ok(()); - } - } - let to_keyed_account = to_keyed_account?; - if &payment.to != to_keyed_account.unsigned_key() { - trace!("destination missing"); - return Err(BudgetError::DestinationMissing.into()); - } - budget_state.pending_budget = None; - contract_keyed_account - .try_account_ref_mut()? - .checked_sub_lamports(payment.lamports)?; - to_keyed_account - .try_account_ref_mut()? - .checked_add_lamports(payment.lamports)?; - } - Ok(()) -} - -/// Process a Witness Timestamp. Any payment plans waiting on this timestamp -/// will progress one step. -fn apply_timestamp( - budget_state: &mut BudgetState, - witness_keyed_account: &KeyedAccount, - contract_keyed_account: &KeyedAccount, - to_keyed_account: Result<&KeyedAccount, InstructionError>, - dt: DateTime, -) -> Result<(), InstructionError> { - // Check to see if any timelocked transactions can be completed. - let mut final_payment = None; - - if let Some(ref mut expr) = budget_state.pending_budget { - let key = witness_keyed_account.signer_key().unwrap(); - expr.apply_witness(&Witness::Timestamp(dt), key); - final_payment = expr.final_payment(); - } - - if let Some(payment) = final_payment { - let to_keyed_account = to_keyed_account?; - if &payment.to != to_keyed_account.unsigned_key() { - trace!("destination missing"); - return Err(BudgetError::DestinationMissing.into()); - } - budget_state.pending_budget = None; - contract_keyed_account - .try_account_ref_mut()? - .checked_sub_lamports(payment.lamports)?; - to_keyed_account - .try_account_ref_mut()? - .checked_add_lamports(payment.lamports)?; - } - Ok(()) -} - -/// Process an AccountData Witness and any payment waiting on it. -fn apply_account_data( - budget_state: &mut BudgetState, - witness_keyed_account: &KeyedAccount, - contract_keyed_account: &KeyedAccount, - to_keyed_account: Result<&KeyedAccount, InstructionError>, -) -> Result<(), InstructionError> { - // Check to see if any timelocked transactions can be completed. - let mut final_payment = None; - - if let Some(ref mut expr) = budget_state.pending_budget { - let key = witness_keyed_account.unsigned_key(); - let program_id = witness_keyed_account.owner()?; - let actual_hash = hash(&witness_keyed_account.try_account_ref()?.data()); - expr.apply_witness(&Witness::AccountData(actual_hash, program_id), key); - final_payment = expr.final_payment(); - } - - if let Some(payment) = final_payment { - let to_keyed_account = to_keyed_account?; - if &payment.to != to_keyed_account.unsigned_key() { - trace!("destination missing"); - return Err(BudgetError::DestinationMissing.into()); - } - budget_state.pending_budget = None; - contract_keyed_account - .try_account_ref_mut()? - .checked_sub_lamports(payment.lamports)?; - to_keyed_account - .try_account_ref_mut()? - .checked_add_lamports(payment.lamports)?; - } - Ok(()) -} - -pub fn process_instruction( - _program_id: &Pubkey, - data: &[u8], - invoke_context: &mut dyn InvokeContext, -) -> Result<(), InstructionError> { - let keyed_accounts = invoke_context.get_keyed_accounts()?; - - let instruction = limited_deserialize(data)?; - - trace!("process_instruction: {:?}", instruction); - - match instruction { - BudgetInstruction::InitializeAccount(expr) => { - let contract_keyed_account = keyed_account_at_index(keyed_accounts, 0)?; - - if let Some(payment) = expr.final_payment() { - let to_keyed_account = contract_keyed_account; - let contract_keyed_account = keyed_account_at_index(keyed_accounts, 1)?; - contract_keyed_account - .try_account_ref_mut()? - .set_lamports(0); - to_keyed_account - .try_account_ref_mut()? - .checked_add_lamports(payment.lamports)?; - return Ok(()); - } - let existing = - BudgetState::deserialize(&contract_keyed_account.try_account_ref_mut()?.data()) - .ok(); - if Some(true) == existing.map(|x| x.initialized) { - trace!("contract already exists"); - return Err(InstructionError::AccountAlreadyInitialized); - } - let budget_state = BudgetState { - pending_budget: Some(*expr), - initialized: true, - }; - budget_state.serialize( - &mut contract_keyed_account - .try_account_ref_mut()? - .data_as_mut_slice(), - ) - } - BudgetInstruction::ApplyTimestamp(dt) => { - let witness_keyed_account = keyed_account_at_index(keyed_accounts, 0)?; - let contract_keyed_account = keyed_account_at_index(keyed_accounts, 1)?; - let mut budget_state = - BudgetState::deserialize(&contract_keyed_account.try_account_ref()?.data())?; - if !budget_state.is_pending() { - return Ok(()); // Nothing to do here. - } - if !budget_state.initialized { - trace!("contract is uninitialized"); - return Err(InstructionError::UninitializedAccount); - } - if witness_keyed_account.signer_key().is_none() { - return Err(InstructionError::MissingRequiredSignature); - } - trace!("apply timestamp"); - apply_timestamp( - &mut budget_state, - witness_keyed_account, - contract_keyed_account, - keyed_account_at_index(keyed_accounts, 2), - dt, - )?; - trace!("apply timestamp committed"); - budget_state.serialize( - &mut contract_keyed_account - .try_account_ref_mut()? - .data_as_mut_slice(), - ) - } - BudgetInstruction::ApplySignature => { - let witness_keyed_account = keyed_account_at_index(keyed_accounts, 0)?; - let contract_keyed_account = keyed_account_at_index(keyed_accounts, 1)?; - let mut budget_state = - BudgetState::deserialize(&contract_keyed_account.try_account_ref()?.data())?; - if !budget_state.is_pending() { - return Ok(()); // Nothing to do here. - } - if !budget_state.initialized { - trace!("contract is uninitialized"); - return Err(InstructionError::UninitializedAccount); - } - if witness_keyed_account.signer_key().is_none() { - return Err(InstructionError::MissingRequiredSignature); - } - trace!("apply signature"); - apply_signature( - &mut budget_state, - witness_keyed_account, - contract_keyed_account, - keyed_account_at_index(keyed_accounts, 2), - )?; - trace!("apply signature committed"); - budget_state.serialize( - &mut contract_keyed_account - .try_account_ref_mut()? - .data_as_mut_slice(), - ) - } - BudgetInstruction::ApplyAccountData => { - let witness_keyed_account = keyed_account_at_index(keyed_accounts, 0)?; - let contract_keyed_account = keyed_account_at_index(keyed_accounts, 1)?; - let mut budget_state = - BudgetState::deserialize(&contract_keyed_account.try_account_ref()?.data())?; - if !budget_state.is_pending() { - return Ok(()); // Nothing to do here. - } - if !budget_state.initialized { - trace!("contract is uninitialized"); - return Err(InstructionError::UninitializedAccount); - } - apply_account_data( - &mut budget_state, - witness_keyed_account, - contract_keyed_account, - keyed_account_at_index(keyed_accounts, 2), - )?; - trace!("apply account data committed"); - budget_state.serialize( - &mut contract_keyed_account - .try_account_ref_mut()? - .data_as_mut_slice(), - ) - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::budget_instruction; - use crate::id; - use solana_runtime::bank::Bank; - use solana_runtime::bank_client::BankClient; - use solana_sdk::account::{Account, AccountSharedData}; - use solana_sdk::client::SyncClient; - use solana_sdk::genesis_config::create_genesis_config; - use solana_sdk::hash::hash; - use solana_sdk::instruction::InstructionError; - use solana_sdk::message::Message; - use solana_sdk::signature::{Keypair, Signer}; - use solana_sdk::transaction::TransactionError; - - fn create_bank(lamports: u64) -> (Bank, Keypair) { - let (genesis_config, mint_keypair) = create_genesis_config(lamports); - let mut bank = Bank::new(&genesis_config); - bank.add_builtin("budget_program", id(), process_instruction); - (bank, mint_keypair) - } - - #[test] - fn test_initialize_no_panic() { - let (bank, alice_keypair) = create_bank(1); - let bank_client = BankClient::new(bank); - - let alice_pubkey = alice_keypair.pubkey(); - let budget_keypair = Keypair::new(); - let budget_pubkey = budget_keypair.pubkey(); - let bob_pubkey = solana_sdk::pubkey::new_rand(); - - let mut instructions = - budget_instruction::payment(&alice_pubkey, &bob_pubkey, &budget_pubkey, 1); - instructions[1].accounts = vec![]; //